code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __call__(self, stream, content_type=CONTENT_TYPE_NPY): try: if content_type == CONTENT_TYPE_CSV: return np.genfromtxt(codecs.getreader('utf-8')(stream), delimiter=',', dtype=self.dtype) elif content_type == CONTENT_TYPE_JSON: return np.array(json.load(codecs.getreader('utf-8')(stream)), dtype=self.dtype) elif content_type == CONTENT_TYPE_NPY: return np.load(BytesIO(stream.read())) finally: stream.close()
Decode from serialized data into a Numpy array. Args: stream (stream): The response stream to be deserialized. content_type (str): The content type of the response. Can accept CSV, JSON, or NPY data. Returns: object: Body of the response deserialized into a Numpy array.
juraj-google-style
def info_gen(self, code, message, compressed=False): if "COMPRESS=GZIP" in message: return self.__info_gzip_gen() if compressed: return self.__info_yenczlib_gen() return self.__info_plain_gen()
Dispatcher for the info generators. Determines which __info_*_gen() should be used based on the supplied parameters. Args: code: The status code for the command response. message: The status message for the command reponse. compressed: Force decompression. Useful for xz* commands. Returns: An info generator.
juraj-google-style
def concretize(self): dfa = DFA(self.alphabet) for state in self.states: for arc in state.arcs: for char in arc.guard: dfa.add_arc(arc.src_state, arc.dst_state, char) for i in xrange(len(self.states)): if self.states[i].final: dfa[i].final = True return dfa
Transforms the SFA into a DFA Args: None Returns: DFA: The generated DFA
juraj-google-style
def add_file(self, filename, file_content): self._group_data['fileName'] = filename self._file_content = file_content
Add a file for Document and Report types. Example:: document = tcex.batch.group('Document', 'My Document') document.add_file('my_file.txt', 'my contents') Args: filename (str): The name of the file. file_content (bytes|method|str): The contents of the file or callback to get contents.
codesearchnet
def mockenv_context(*remove, **update): env = os.environ update = update or {} remove = remove or [] stomped = (set(update.keys()) | set(remove)) & set(env.keys()) update_after = {k: env[k] for k in stomped} remove_after = frozenset((k for k in update if k not in env)) try: env.update(update) [env.pop(k, None) for k in remove] yield finally: env.update(update_after) [env.pop(k) for k in remove_after]
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations. Args: remove: Environment variables to remove. update: Dictionary of environment variables and values to add/update.
github-repos
async def ping(self, conversation_id: uuid.UUID=None) -> float: cmd = convo.Ping(conversation_id=(conversation_id or uuid.uuid4())) result = (await self.dispatcher.start_conversation(cmd)) return (await result)
Send a message to the remote server to check liveness. Returns: The round-trip time to receive a Pong message in fractional seconds Examples: >>> async with connect() as conn: >>> print("Sending a PING to the server") >>> time_secs = await conn.ping() >>> print("Received a PONG after {} secs".format(time_secs))
codesearchnet
def create_inputs(inspecs): ret = [] for i in inspecs: v = nn.Variable(i.shape, need_grad=i.need_grad) v.d = i.init(v.shape) ret.append(v) return ret
Create input :obj:`nnabla.Variable` from :obj:`Inspec`. Args: inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``. Returns: :obj:`list` of :obj:`nnabla.Variable`: Input variables.
juraj-google-style
def infeed_dequeue_tuple(dtypes, shapes, name=None): for dtype in dtypes: if dtype not in _SUPPORTED_INFEED_DTYPES: raise TypeError('{} is not a supported TPU infeed type. Supported types are: {}'.format(dtype, list(_SUPPORTED_INFEED_DTYPES))) return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)
A placeholder op for values fed into the TPU simultaneously as a tuple. Args: dtypes: A list of `tf.DType`s that has length `>= 1`. The element types of each element in `outputs`. shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). The shapes of each tensor in `outputs`. name: A name for the operation (optional). Returns: A list of `Tensor` objects of type `dtypes`. A list of tensors that will be provided using the infeed mechanism. Raises: TypeError: If a type in 'dtypes` is not a supported infeed type.
github-repos
def _list_objects(self, client_kwargs, path, max_request_entries): kwargs = dict() if max_request_entries: kwargs['max_keys'] = max_request_entries bucket = self._get_bucket(client_kwargs) while True: with _handle_oss_error(): response = bucket.list_objects(prefix=path, **kwargs) if not response.object_list: raise _ObjectNotFoundError('Not found: %s' % path) for obj in response.object_list: yield obj.key, self._model_to_dict(obj, ('key',)) if response.next_marker: client_kwargs['marker'] = response.next_marker else: break
Lists objects. args: client_kwargs (dict): Client arguments. path (str): Path relative to current locator. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict
juraj-google-style
def _CreateStyleForRoute(self, doc, route): style_id = ('route_%s' % route.route_id) style = ET.SubElement(doc, 'Style', {'id': style_id}) linestyle = ET.SubElement(style, 'LineStyle') width = ET.SubElement(linestyle, 'width') type_to_width = {0: '3', 1: '3', 2: '5', 3: '1'} width.text = type_to_width.get(route.route_type, '1') if route.route_color: color = ET.SubElement(linestyle, 'color') red = route.route_color[0:2].lower() green = route.route_color[2:4].lower() blue = route.route_color[4:6].lower() color.text = ('ff%s%s%s' % (blue, green, red)) return style_id
Create a KML Style element for the route. The style sets the line colour if the route colour is specified. The line thickness is set depending on the vehicle type. Args: doc: The KML Document ElementTree.Element instance. route: The transitfeed.Route to create the style for. Returns: The id of the style as a string.
codesearchnet
def __init__(self, **kwargs): super(_Merge, self).__init__(**kwargs) self.supports_masking = True
Initializes a Merge layer. Args: **kwargs: standard layer keyword arguments.
github-repos
def _find_matching_instance(cache_key): infos = get_all() candidates = [info for info in infos if (info.cache_key == cache_key)] for candidate in sorted(candidates, key=(lambda x: x.port)): return candidate return None
Find a running TensorBoard instance compatible with the cache key. Returns: A `TensorBoardInfo` object, or `None` if none matches the cache key.
codesearchnet
def get_decoder_config(self, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, feature: str='default') -> OnnxConfig: decoder_config.encoder_hidden_size = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature)
Returns ONNX decoder config for `VisionEncoderDecoder` model. Args: encoder_config (`PretrainedConfig`): The encoder model's configuration to use when exporting to ONNX. decoder_config (`PretrainedConfig`): The decoder model's configuration to use when exporting to ONNX feature (`str`, *optional*): The type of feature to export the model with. Returns: [`VisionEncoderDecoderDecoderOnnxConfig`]: An instance of the ONNX configuration object.
github-repos
def set_address(self, name, value=None, default=False, disable=False): commands = [('interface %s' % name)] commands.append(self.command_builder('ip address', value=value, default=default, disable=disable)) return self.configure(commands)
Configures the interface IP address Args: name (string): The interface identifier to apply the interface config to value (string): The IP address and mask to set the interface to. The value should be in the format of A.B.C.D/E default (bool): Configures the address parameter to its default value using the EOS CLI default command disable (bool): Negates the address parameter value using the EOS CLI no command Returns: True if the operation succeeds otherwise False.
codesearchnet
def verify(self, message, signature): message = _helpers._to_bytes(message, encoding='utf-8') signature = _helpers._to_bytes(signature, encoding='utf-8') try: crypto.verify(self._pubkey, signature, message, 'sha256') return True except crypto.Error: return False
Verifies a message against a signature. Args: message: string or bytes, The message to verify. If string, will be encoded to bytes as utf-8. signature: string or bytes, The signature on the message. If string, will be encoded to bytes as utf-8. Returns: True if message was signed by the private key associated with the public key that this object was constructed with.
juraj-google-style
def __init__(self, identifier, configuration): super(SampleFileProfiler, self).__init__() self._identifier = identifier self._path = configuration.directory self._profile_measurements = {} self._sample_file = None self._start_time = None
Initializes a sample file profiler. Sample files are gzip compressed UTF-8 encoded CSV files. Args: identifier (str): identifier of the profiling session used to create the sample filename. configuration (ProfilingConfiguration): profiling configuration.
juraj-google-style
def get_mode_group(self, group): hmodegroup = self._libinput.libinput_device_tablet_pad_get_mode_group( self._handle, group) if hmodegroup: return TabletPadModeGroup(hmodegroup, self._libinput) return None
While a reference is kept by the caller, the returned mode group will compare equal with mode group returned by each subsequent call of this method with the same index and mode group returned from :attr:`~libinput.event.TabletPadEvent.mode_group`, provided the event was generated by this mode group. Args: group (int): A mode group index. Returns: ~libinput.define.TabletPadModeGroup: The mode group with the given index or :obj:`None` if an invalid index is given.
juraj-google-style
def _run(self, num_iters): graph = ops.Graph() with graph.as_default(): init, output = self._build_graph() with session_lib.Session(graph=graph) as session: init.run() _ = session.run(output) start_time = time.time() for _ in range(num_iters): _ = session.run(output) duration = time.time() - start_time print('%f secs per enqueue-dequeue' % (duration / num_iters)) self.report_benchmark(name='fifo_queue', iters=num_iters, wall_time=duration / num_iters) return duration
Benchmarks enqueueing and dequeueing from a FIFOQueue. Args: num_iters: The number of iterations to run. Returns: The duration of the run in seconds.
github-repos
def __add__(self, other): copy = self.__class__(self.copy()) return copy.merge(other)
Add other in self and return new dict Args: other: dict to add in self Returns: Merged dict Example: >>> from ww import d >>> current_dict = d({1: 1, 2: 2, 3: 3}) >>> to_merge_dict = {3: 4, 4: 5} >>> current_dict + to_merge_dict {1: 1, 2: 2, 3: 4, 4: 5}
juraj-google-style
def master_key_from_entropy(passphrase='', strength=128): if ((strength % 32) != 0): raise ValueError('strength must be a multiple of 32') if ((strength < 128) or (strength > 256)): raise ValueError('strength should be >= 128 and <= 256') entropy = rand_bytes((strength m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return (HDPrivateKey.master_key_from_seed(Mnemonic.to_seed(n, passphrase)), n)
Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered.
codesearchnet
def denyMapIdentity(self, subject, vendorSpecific=None): response = self.denyMapIdentityResponse(subject, vendorSpecific) return self._read_boolean_response(response)
See Also: denyMapIdentityResponse() Args: subject: vendorSpecific: Returns:
juraj-google-style
def GetProcessedTaskByIdentifier(self, task_identifier): with self._lock: task = self._tasks_processing.get(task_identifier, None) if (not task): task = self._tasks_queued.get(task_identifier, None) if (not task): task = self._tasks_abandoned.get(task_identifier, None) if (not task): raise KeyError('Status of task {0:s} is unknown'.format(task_identifier)) return task
Retrieves a task that has been processed. Args: task_identifier (str): unique identifier of the task. Returns: Task: a task that has been processed. Raises: KeyError: if the task was not processing, queued or abandoned.
codesearchnet
def compute_ssim(image1, image2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11): gaussian_kernel_1d = get_gaussian_kernel( gaussian_kernel_width, gaussian_kernel_sigma) return SSIM(image1, gaussian_kernel_1d).ssim_value(image2)
Computes SSIM. Args: im1: First PIL Image object to compare. im2: Second PIL Image object to compare. Returns: SSIM float value.
juraj-google-style
def __init__(self, endpoint_name, sagemaker_session=None): super(SKLearnPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
Initialize an ``SKLearnPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
juraj-google-style
def convert_debug_info_func(saved_debug_info): def f(original_nodes): del original_nodes return saved_debug_info return f
Returns a method to retrieve the `GraphDebugInfo` from the original graph. Args: saved_debug_info: The `GraphDebugInfo` containing all the debug info. Returns: A function which retrieves the stack traces from the original graph and converts them to a `GraphDebugInfo` for a given set of nodes.
github-repos
def __handle_variable(self, shell_entry, output): if ('variable' in shell_entry): variable_name = shell_entry['variable'] self.pipeline.variables[variable_name] = '\n'.join(output)
Saving output for configured variable name. Args: shell_entry(dict): shell based configuration (shell, docker container or Python). output: list of strings representing output of last shell
codesearchnet
def __init__(self, func, lower_control_flow, aggressive_inlining, variable_names_allowlist=None, variable_names_denylist=None, session=None): self._session = session session.run(variables.global_variables_initializer()) for op in ops.get_default_graph().get_collection(VAR_ASSIGN_COLLECTION): session.run(op) super(_FunctionConverterDataInGraph, self).__init__(func, lower_control_flow, aggressive_inlining, variable_names_allowlist, variable_names_denylist)
Creates the conversion data for the given function. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). variable_names_allowlist: The set of variable names to convert (by default, all variables are converted). variable_names_denylist: The set of variable names to omit converting to constants. session: Session object.
github-repos
def func_callsig(func, with_name=True): import inspect argspec = inspect.getargspec(func) (args, varargs, varkw, defaults) = argspec callsig = inspect.formatargspec(*argspec[0:3]) if with_name: callsig = get_callable_name(func) + callsig return callsig
String of function call signature Args: func (function): live python function Returns: str: callsig CommandLine: python -m utool.util_str --exec-func_callsig Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> func = func_str >>> callsig = func_callsig(func) >>> result = str(callsig) >>> print(result) func_str(func, args, kwargs, type_aliases, packed, packkw, truncate)
juraj-google-style
def build_recursive_localize_env(destination, inputs): export_input_dirs = '\n'.join([ 'export {0}={1}/{2}'.format(var.name, destination.rstrip('/'), var.docker_path.rstrip('/')) for var in inputs if var.recursive and var.docker_path ]) return export_input_dirs
Return a multi-line string with export statements for the variables. Arguments: destination: Folder where the data will be put. For example /mnt/data inputs: a list of InputFileParam Returns: a multi-line string with a shell script that sets environment variables corresponding to the inputs.
juraj-google-style
def to_title_caps(underscore_case): r words = underscore_case.split('_') words2 = [ word[0].upper() + word[1:] for count, word in enumerate(words) ] title_str = ' '.join(words2) return title_str
r""" Args: underscore_case (?): Returns: str: title_str CommandLine: python -m utool.util_str --exec-to_title_caps Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> underscore_case = 'the_foo_bar_func' >>> title_str = to_title_caps(underscore_case) >>> result = ('title_str = %s' % (str(title_str),)) >>> print(result) title_str = The Foo Bar Func
juraj-google-style
def parse(input_string, prefix=''): tree = parser.parse(input_string) visitor = ChatlVisitor(prefix) visit_parse_tree(tree, visitor) return visitor.parsed
Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content
juraj-google-style
def list_workflows(config): workflows = [] for path in config.workflows: filenames = glob.glob(os.path.join(os.path.abspath(path), '*.py')) for filename in filenames: module_name = os.path.splitext(os.path.basename(filename))[0] workflow = Workflow() try: workflow.load(module_name, validate_arguments=False, strict_dag=True) workflows.append(workflow) except DirectedAcyclicGraphInvalid as e: raise WorkflowDefinitionError(workflow_name=module_name, graph_name=e.graph_name) except WorkflowImportError: continue return workflows
List all available workflows. Returns a list of all workflows that are available from the paths specified in the config. A workflow is defined as a Python file with at least one DAG. Args: config (Config): Reference to the configuration object from which the settings are retrieved. Returns: list: A list of workflows.
juraj-google-style
def detect_format(program, attributes) -> str: def fmt(attr): return attr.array_length * attr.dimension, attr.shape return ' '.join('%d%s' % fmt(program[a]) for a in attributes)
Detect format for vertex attributes. The format returned does not contain padding. Args: program (Program): The program. attributes (list): A list of attribute names. Returns: str
juraj-google-style
def on_heartbeat(self, message): logger.info('Got a heartbeat') logger.info('Heartbeat message: {}'.format(message)) self.heartbeat_thread.update_sequence(message['d']) return
Runs on a heartbeat event from websocket connection Args: message (dict): Full message from Discord websocket connection"
codesearchnet
def Pack(cls, obj, version): if isinstance(obj, ServiceQuery): return str(obj) return obj
Pack the given object using AdWords-specific logic. Args: obj: an object to be packed for SOAP using AdWords-specific logic, if applicable. version: the version of the current API, e.g. 'v201809' Returns: The given object packed with AdWords-specific logic for SOAP, if applicable. Otherwise, returns the given object unmodified.
juraj-google-style
def get_attribute(self, attribute: str) -> 'Node': matches = [value_node for (key_node, value_node) in self.yaml_node.value if (key_node.value == attribute)] if (len(matches) != 1): raise SeasoningError('Attribute not found, or found multiple times: {}'.format(matches)) return Node(matches[0])
Returns the node representing the given attribute's value. Use only if is_mapping() returns true. Args: attribute: The name of the attribute to retrieve. Raises: KeyError: If the attribute does not exist. Returns: A node representing the value.
codesearchnet
def __build_config_block(self, config_block_node): node_lists = [] for line_node in config_block_node: if isinstance(line_node, pegnode.ConfigLine): node_lists.append(self.__build_config(line_node)) elif isinstance(line_node, pegnode.OptionLine): node_lists.append(self.__build_option(line_node)) elif isinstance(line_node, pegnode.ServerLine): node_lists.append(self.__build_server(line_node)) elif isinstance(line_node, pegnode.BindLine): node_lists.append(self.__build_bind(line_node)) elif isinstance(line_node, pegnode.AclLine): node_lists.append(self.__build_acl(line_node)) elif isinstance(line_node, pegnode.BackendLine): node_lists.append(self.__build_usebackend(line_node)) elif isinstance(line_node, pegnode.UserLine): node_lists.append(self.__build_user(line_node)) elif isinstance(line_node, pegnode.GroupLine): node_lists.append(self.__build_group(line_node)) else: pass return node_lists
parse `config_block` in each section Args: config_block_node (TreeNode): Description Returns: [line_node1, line_node2, ...]
codesearchnet
def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False): if os.path.isdir(filepath): for (dirpath, dirnames, filenames) in os.walk(filepath): fnames = [f for f in filenames if (f == cls.PICKLE_FNAME)] if fnames: if (len(fnames) == 1): filepath = os.path.join(dirpath, fnames[0]) break else: err_msg = ('Found multiple databases:\n %s' % str(fnames)) raise RuntimeError(err_msg) else: err_msg = ('Cannot find %s inside directory %s' % (cls.PICKLE_FNAME, filepath)) raise ValueError(err_msg) if (remove_lock and os.path.exists((filepath + '.lock'))): try: os.remove((filepath + '.lock')) except: pass with FileLock(filepath): with open(filepath, 'rb') as fh: flow = pmg_pickle_load(fh) if (flow.VERSION != cls.VERSION): msg = ('File flow version %s != latest version %s\n.Regenerate the flow to solve the problem ' % (flow.VERSION, cls.VERSION)) warnings.warn(msg) flow.set_spectator_mode(spectator_mode) flow.check_status() return flow
Loads the object from a pickle file and performs initial setup. Args: filepath: Filename or directory name. It filepath is a directory, we scan the directory tree starting from filepath and we read the first pickle database. Raise RuntimeError if multiple databases are found. spectator_mode: If True, the nodes of the flow are not connected by signals. This option is usually used when we want to read a flow in read-only mode and we want to avoid callbacks that can change the flow. remove_lock: True to remove the file lock if any (use it carefully).
codesearchnet
def _get_attrs_items(obj): attrs = getattr(obj.__class__, '__attrs_attrs__') attr_names = (a.name for a in attrs) return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
Returns a list of (name, value) pairs from an attrs instance. TODO(b/268078256): check if this comment is valid, and if so, ensure it's handled in the function below. The list will be sorted by name. Args: obj: an object. Returns: A list of (attr_name, attr_value) pairs, sorted by attr_name.
github-repos
def FormatArtifacts(self, artifacts): artifact_definitions = [artifact.AsDict() for artifact in artifacts] json_data = json.dumps(artifact_definitions) return json_data
Formats artifacts to desired output format. Args: artifacts (list[ArtifactDefinition]): artifact definitions. Returns: str: formatted string of artifact definition.
juraj-google-style
def dist_point_line(p, l1, l2): cross_prod = np.cross(l2 - l1, p - l1) return np.linalg.norm(cross_prod) / np.linalg.norm(l2 - l1)
compute the orthogonal distance between from the line that goes through the points l1, l2 and the point p Args: p, l1, l2 : iterable point indices 0, 1, 2 corresponding to cartesian coordinates
juraj-google-style
def plot_real_feature(df, feature_name, bins=50, figsize=(15, 15)): ix_negative_target = df[(df.target == 0)].index ix_positive_target = df[(df.target == 1)].index plt.figure(figsize=figsize) ax_overall_dist = plt.subplot2grid((3, 2), (0, 0), colspan=2) ax_target_conditional_dist = plt.subplot2grid((3, 2), (1, 0), colspan=2) ax_botplot = plt.subplot2grid((3, 2), (2, 0)) ax_violin_plot = plt.subplot2grid((3, 2), (2, 1)) ax_overall_dist.set_title('Distribution of {}'.format(feature_name), fontsize=16) sns.distplot(df[feature_name], bins=50, ax=ax_overall_dist) sns.distplot(df.loc[ix_positive_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Positive Target') sns.distplot(df.loc[ix_negative_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Negative Target') ax_target_conditional_dist.legend(loc='upper right', prop={'size': 14}) sns.boxplot(y=feature_name, x='target', data=df, ax=ax_botplot) sns.violinplot(y=feature_name, x='target', data=df, ax=ax_violin_plot) plt.show()
Plot the distribution of a real-valued feature conditioned by the target. Examples: `plot_real_feature(X, 'emb_mean_euclidean')` Args: df: Pandas dataframe containing the target column (named 'target'). feature_name: The name of the feature to plot. bins: The number of histogram bins for the distribution plot. figsize: The size of the plotted figure.
codesearchnet
def _PrintExtractionStatusUpdateLinear(self, processing_status): for worker_status in processing_status.workers_status: status_line = ( '{0:s} (PID: {1:d}) - events produced: {2:d} - file: {3:s} ' '- running: {4!s}\n').format( worker_status.identifier, worker_status.pid, worker_status.number_of_produced_events, worker_status.display_name, worker_status.status not in definitions.ERROR_STATUS_INDICATORS) self._output_writer.Write(status_line)
Prints an extraction status update in linear mode. Args: processing_status (ProcessingStatus): processing status.
juraj-google-style
def sign_adaptation(control: FloatNest, output: FloatTensor, set_point: FloatTensor, adaptation_rate: FloatTensor=0.01) -> FloatNest: def _get_new_control(control, output, set_point): new_control = mcmc_util.choose((output > set_point), (control * (1.0 + adaptation_rate)), (control / (1.0 + adaptation_rate))) return new_control output = maybe_broadcast_structure(output, control) set_point = maybe_broadcast_structure(set_point, control) return tf.nest.map_structure(_get_new_control, control, output, set_point)
A function to do simple sign-based control of a variable. ``` control = control * (1. + adaptation_rate) ** sign(output - set_point) ``` Args: control: The control variable. output: The output variable. set_point: The set point for `output`. This function will adjust `control` so that `output` matches `set_point`. adaptation_rate: Adaptation rate. Returns: control: New control.
codesearchnet
def principal_direction_extent(points): points = np.copy(points) points -= np.mean(points, axis=0) (_, eigv) = pca(points) extent = np.zeros(3) for i in range(eigv.shape[1]): scalar_projs = np.sort(np.array([np.dot(p, eigv[(:, i)]) for p in points])) extent[i] = scalar_projs[(- 1)] if (scalar_projs[0] < 0.0): extent -= scalar_projs[0] return extent
Calculate the extent of a set of 3D points. The extent is defined as the maximum distance between the projections on the principal directions of the covariance matrix of the points. Parameter: points : a 2D numpy array of points Returns: extents : the extents for each of the eigenvectors of the cov matrix eigs : eigenvalues of the covariance matrix eigv : respective eigenvectors of the covariance matrix
codesearchnet
def get_gdns_publisher(config, metrics, **kwargs): builder = gdns_publisher.GDNSPublisherBuilder( config, metrics, **kwargs) return builder.build_publisher()
Get a GDNSPublisher client. A factory function that validates configuration and returns a publisher client (:interface:`gordon.interfaces.IMessageHandler`) provider. Args: config (dict): Google Cloud DNS API related configuration. metrics (obj): :interface:`IMetricRelay` implementation. kwargs (dict): Additional keyword arguments to pass to the publisher. Returns: A :class:`GDNSPublisher` instance.
juraj-google-style
def not_implemented(cls, errors=None): if cls.expose_status: cls.response.content_type = 'application/json' cls.response._status_line = '501 Not Implemented' return cls(501, None, errors).to_json
Shortcut API for HTTP 501 `Not Implemented` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
codesearchnet
def word_probability(self, word, total_words=None): if (total_words is None): total_words = self._word_frequency.total_words return (self._word_frequency.dictionary[word] / total_words)
Calculate the probability of the `word` being the desired, correct word Args: word (str): The word for which the word probability is \ calculated total_words (int): The total number of words to use in the \ calculation; use the default for using the whole word \ frequency Returns: float: The probability that the word is the correct word
codesearchnet
def _restore_volume(self, fade): self.device.mute = self.mute if self.volume == 100: fixed_vol = self.device.renderingControl.GetOutputFixed( [('InstanceID', 0)])['CurrentFixed'] else: fixed_vol = False if not fixed_vol: self.device.bass = self.bass self.device.treble = self.treble self.device.loudness = self.loudness if fade: self.device.volume = 0 self.device.ramp_to_volume(self.volume) else: self.device.volume = self.volume
Reinstate volume. Args: fade (bool): Whether volume should be faded up on restore.
juraj-google-style
def eval(self, amplstatements, **kwargs): if (self._langext is not None): amplstatements = self._langext.translate(amplstatements, **kwargs) lock_and_call((lambda : self._impl.eval(amplstatements)), self._lock) self._errorhandler_wrapper.check()
Parses AMPL code and evaluates it as a possibly empty sequence of AMPL declarations and statements. As a side effect, it invalidates all entities (as the passed statements can contain any arbitrary command); the lists of entities will be re-populated lazily (at first access) The output of interpreting the statements is passed to the current OutputHandler (see getOutputHandler and setOutputHandler). By default, errors and warnings are printed on stdout. This behavior can be changed reassigning an ErrorHandler using setErrorHandler. Args: amplstatements: A collection of AMPL statements and declarations to be passed to the interpreter. Raises: RuntimeError: if the input is not a complete AMPL statement (e.g. if it does not end with semicolon) or if the underlying interpreter is not running.
codesearchnet
def load_and_save_resfile(filename, outfile=None, outdir=None, mass=1.0): d = CellpyData() if (not outdir): outdir = prms.Paths['cellpydatadir'] if (not outfile): outfile = (os.path.basename(filename).split('.')[0] + '.h5') outfile = os.path.join(outdir, outfile) print('filename:', filename) print('outfile:', outfile) print('outdir:', outdir) print('mass:', mass, 'mg') d.from_raw(filename) d.set_mass(mass) d.make_step_table() d.make_summary() d.save(filename=outfile) d.to_csv(datadir=outdir, cycles=True, raw=True, summary=True) return outfile
Load a raw data file and save it as cellpy-file. Args: mass (float): active material mass [mg]. outdir (path): optional, path to directory for saving the hdf5-file. outfile (str): optional, name of hdf5-file. filename (str): name of the resfile. Returns: out_file_name (str): name of saved file.
codesearchnet
def dag_to_circuit(dag): qregs = collections.OrderedDict() for qreg in dag.qregs.values(): qreg_tmp = QuantumRegister(qreg.size, name=qreg.name) qregs[qreg.name] = qreg_tmp cregs = collections.OrderedDict() for creg in dag.cregs.values(): creg_tmp = ClassicalRegister(creg.size, name=creg.name) cregs[creg.name] = creg_tmp name = dag.name or None circuit = QuantumCircuit(*qregs.values(), *cregs.values(), name=name) for node in dag.topological_op_nodes(): qubits = [] for qubit in node.qargs: qubits.append(qregs[qubit[0].name][qubit[1]]) clbits = [] for clbit in node.cargs: clbits.append(cregs[clbit[0].name][clbit[1]]) if node.condition is None: control = None else: control = (node.condition[0], node.condition[1]) inst = node.op.copy() inst.control = control circuit.append(inst, qubits, clbits) return circuit
Build a ``QuantumCircuit`` object from a ``DAGCircuit``. Args: dag (DAGCircuit): the input dag. Return: QuantumCircuit: the circuit representing the input dag.
juraj-google-style
def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor: assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [self.number_of_token_type_embeddings], value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.broadcast_to(position_ids, shape=input_shape) if self.reset_position_index_per_cell: col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1) row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1) full_index = ProductIndexMap(col_index, row_index) first_position_per_segment = reduce_min(position_ids, full_index)[0] first_position = gather(first_position_per_segment, full_index) position = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) position_embeddings = tf.gather(self.position_embeddings, indices=position_ids) final_embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f'token_type_embeddings_{i}' final_embeddings += tf.gather(params=getattr(self, name), indices=token_type_ids[:, :, i]) final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings
Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor.
github-repos
def get_countries(is_legacy_xml=False): countries = {} if sys.platform == 'win32' and getattr(sys, 'frozen', False): data_dir = path.dirname(sys.executable) else: data_dir = path.dirname(__file__) if is_legacy_xml: log.debug('Opening country code legacy XML: {0}'.format( str(data_dir) + '/data/iso_3166-1_list_en.xml')) f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r', encoding='ISO-8859-1') data = f.read() if not data: return {} dom = parseString(data) entries = dom.getElementsByTagName('ISO_3166-1_Entry') for entry in entries: code = entry.getElementsByTagName( 'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data name = entry.getElementsByTagName( 'ISO_3166-1_Country_name')[0].firstChild.data countries[code] = name.title() else: log.debug('Opening country code CSV: {0}'.format( str(data_dir) + '/data/iso_3166-1_list_en.xml')) f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r', encoding='utf-8') csv_reader = csv.reader(f, delimiter=',', quotechar='"') for row in csv_reader: code = row[0] name = row[1] countries[code] = name return countries
The function to generate a dictionary containing ISO_3166-1 country codes to names. Args: is_legacy_xml (:obj:`bool`): Whether to use the older country code list (iso_3166-1_list_en.xml). Returns: dict: A mapping of country codes as the keys to the country names as the values.
juraj-google-style
def get_variant_by_name(self, name): try: geno = self.df.loc[:, name].values info = self.map_info.loc[name, :] except KeyError: logging.variant_name_not_found(name) return [] else: return [Genotypes( Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]), geno, reference=info.a2, coded=info.a1, multiallelic=False, )]
Get the genotypes for a given variant (by name). Args: name (str): The name of the variant to retrieve the genotypes. Returns: list: A list of Genotypes. This is a list in order to keep the same behaviour as the other functions.
juraj-google-style
def write_seq_as_temp_fasta(seq): sr = ssbio.protein.sequence.utils.cast_to_seq_record(seq, id='tempfasta') return write_fasta_file(seq_records=sr, outname='temp', outdir=tempfile.gettempdir(), force_rerun=True)
Write a sequence as a temporary FASTA file Args: seq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object Returns: str: Path to temporary FASTA file (located in system temporary files directory)
codesearchnet
def get_parts_of_url_path(url): parsed = urlparse(url) path = unquote(parsed.path).lstrip('/') parts = path.split('/') return parts
Given a url, take out the path part and split it by '/'. Args: url (str): the url slice returns list: parts after the domain name of the URL
juraj-google-style
def as_dataframe(self, pattern='*', max_rows=None): data = [] for (i, group) in enumerate(self.list(pattern)): if ((max_rows is not None) and (i >= max_rows)): break parent = self._group_dict.get(group.parent_id) parent_display_name = ('' if (parent is None) else parent.display_name) data.append([group.id, group.display_name, group.parent_id, parent_display_name, group.is_cluster, group.filter]) return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
Creates a pandas dataframe from the groups that match the filters. Args: pattern: An optional pattern to further filter the groups. This can include Unix shell-style wildcards. E.g. ``"Production *"``, ``"*-backend"``. max_rows: The maximum number of groups to return. If None, return all. Returns: A pandas dataframe containing matching groups.
codesearchnet
def create_releasenotes(project_dir=os.curdir, bugtracker_url=''): pkg_info_file = os.path.join(project_dir, 'PKG-INFO') if os.path.exists(pkg_info_file): return with open('RELEASE_NOTES', 'wb') as releasenotes_fd: releasenotes_fd.write( get_releasenotes( project_dir=project_dir, bugtracker_url=bugtracker_url, ).encode('utf-8') + b'\n' )
Creates the release notes file, if not in a package. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: None Raises: RuntimeError: If the release notes could not be retrieved
juraj-google-style
def post_create_app(cls, app, **settings): register_errorhandler = settings.pop('register_errorhandler', True) if register_errorhandler: AppException.register_errorhandler(app) return app
Register the errorhandler for the AppException to the passed in App. Args: app (fleaker.base.BaseApplication): A Flask application that extends the Fleaker Base Application, such that the hooks are implemented. Kwargs: register_errorhandler (bool): A boolean indicating if we want to automatically register an errorhandler for the :class:`AppException` exception class after we create this App. Pass ``False`` to prevent registration. Default is ``True``. Returns: fleaker.base.BaseApplication: Returns the app it was given.
codesearchnet
def _AggregatedGrads(grads, op, gradient_uid, loop_state, aggregation_method=None): if aggregation_method is None: aggregation_method = AggregationMethod.DEFAULT valid_aggregation_methods = [AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE, AggregationMethod.EXPERIMENTAL_ACCUMULATE_N] if aggregation_method not in valid_aggregation_methods: raise ValueError(f'Invalid `aggregation_method` specified {aggregation_method}. Accepted values are {valid_aggregation_methods}.') out_grads = _GetGrads(grads, op) for i, out_grad in enumerate(out_grads): if loop_state: if isinstance(out_grad, (tensor_lib.Tensor, indexed_slices.IndexedSlices)): assert control_flow_util.IsLoopSwitch(op) continue if isinstance(out_grad, collections_abc.Sequence) and (not all((isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) for g in out_grad if g is not None))): raise TypeError(f'Invalid gradient {out_grad} [index = {i}]. Gradients have to be either all Tensors or all IndexedSlices') if out_grad: if len(out_grad) < 2: used = 'nop' out_grads[i] = out_grad[0] elif all((isinstance(g, tensor_lib.Tensor) for g in out_grad if g is not None)): tensor_shape = _AccumulatorShape(out_grad) if aggregation_method in [AggregationMethod.EXPERIMENTAL_TREE, AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]: used = 'tree' with ops.name_scope(op.name + '_gradient_sum'): running_sum = out_grad[0] for grad in out_grad[1:]: running_sum = math_ops.add_n([running_sum, grad]) out_grads[i] = running_sum else: used = 'add_n' out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid) logging.vlog(2, ' _AggregatedGrads %d x %s using %s', len(out_grad), tensor_shape, used) else: out_grads[i] = backprop_util.AggregateIndexedSlicesGradients(out_grad) else: out_grads[i] = None return out_grads
Get the aggregated gradients for op. Args: grads: The map of memoized gradients. op: The op to get gradients for. gradient_uid: A unique identifier within the graph indicating which invocation of gradients is being executed. Used to cluster ops for compilation. loop_state: An object for maintaining the state of the while loops in the graph. It is of type ControlFlowState. None if the graph contains no while loops. aggregation_method: Specifies the method used to combine gradient terms. Accepted values are constants defined in the class `AggregationMethod`. Returns: A list of gradients, one per each output of `op`. If the gradients for a particular output is a list, this function aggregates it before returning. Raises: TypeError: if the incoming grads are not Tensors or IndexedSlices. ValueError: if the arguments are invalid.
github-repos
def url(self, endpoint=''): if not endpoint.startswith('/'): endpoint = "/" + endpoint return self.protocol + ":
Get the base URL of the Remote. Arguments: None Returns: `str` base URL
juraj-google-style
def _sort_course_modes(self, modes): def slug_weight(mode): sorting_slugs = COURSE_MODE_SORT_ORDER sorting_slugs_size = len(sorting_slugs) if mode['slug'] in sorting_slugs: return sorting_slugs_size - sorting_slugs.index(mode['slug']) return 0 return sorted(modes, key=slug_weight, reverse=True)
Sort the course mode dictionaries by slug according to the COURSE_MODE_SORT_ORDER constant. Arguments: modes (list): A list of course mode dictionaries. Returns: list: A list with the course modes dictionaries sorted by slug.
juraj-google-style
def sg_reuse(tensor, **opt): opt = tf.sg_opt(opt) assert hasattr(tensor, '_sugar'), 'cannot reuse this node.' assert (opt.input is not None), 'input is mandatory.' (nodes, prev) = ([tensor], tensor._sugar.prev) while (prev is not None): nodes = ([prev] + nodes) prev = (prev._sugar.prev if hasattr(prev, '_sugar') else None) out = opt.input for node in nodes[1:]: if node._sugar.is_layer: fn = tf.sg_layer_func(node._sugar.func) if node._sugar.arg.scope_name: with tf.variable_scope(node._sugar.arg.scope_name): out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True))) else: out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True))) else: out = node._sugar.func(out, node._sugar.arg) return out
r""" Reconstruct computational graph of `tensor` so all the parameters can be reused and replace its input tensor with `opt.input`. Args: tensor: A `Tensor` (automatically given by chaining). **opt: input: A `Tensor` that will replace the original input tensor. Returns: Reconstructed tensor nodes.
codesearchnet
def _GetExpectedFractionalMaxPoolResult(self, input_tensor, row_seq, col_seq, overlapping): input_shape = input_tensor.shape output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1, input_shape[3]) output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype) for batch in range(input_shape[0]): for channel in range(input_shape[3]): two_dim_slice = input_tensor[batch, :, :, channel] tmp = self._MaxPoolAlongRows(two_dim_slice, row_seq, overlapping) output_tensor[batch, :, :, channel] = self._MaxPoolAlongCols(tmp, col_seq, overlapping) return output_tensor
Get expected fractional max pool result. row_seq and col_seq together defines the fractional pooling region. Args: input_tensor: Original input tensor, assuming it is a 4-D tensor, with dimension as [batch, height/row, width/column, channels/depth]. row_seq: Cumulative pooling sequence along row. col_seq: Cumulative pooling sequence along column. overlapping: Use overlapping when doing pooling. Returns: A 4-D tensor that is the result of max pooling on input_tensor based on pooling region defined by row_seq and col_seq, conditioned on whether or not overlapping is used.
github-repos
def authenticate(self): basic_auth = request.authorization is_valid = False user = None if basic_auth: is_valid, user = self.check_basic_auth( basic_auth.username, basic_auth.password ) else: token = request.headers.get('Authorization', None) param_token = request.args.get('access_token') if token or param_token: if token: token = token[6:] else: token = param_token log.debug('Received token: %s', token) is_valid, user = self.check_token_auth(token) return (is_valid, user)
Authenticate user by any means and return either true or false. Args: Returns: tuple (is_valid, username): True is valid user, False if not
juraj-google-style
def __init__(self, name: str, object_id: int, timestamp: int, pid: int, allocator: str, num_bytes: int) -> None: self._name = name self._pid = pid self._object_id = object_id self._create_time = timestamp self._allocator = allocator self._num_bytes = num_bytes self._ref_times = [] self._unref_times = []
Creates an object to track tensor references. This class is not thread safe and is intended only for internal use by the 'Timeline' class in this file. Args: name: The name of the Tensor as a string. object_id: Chrome Trace object identifier assigned for this Tensor. timestamp: The creation timestamp of this event as a long integer. pid: Process identifier of the associated device, as an integer. allocator: Name of the allocator used to create the Tensor. num_bytes: Number of bytes allocated (long integer). Returns: A 'TensorTracker' object.
github-repos
def get_broadcast_shape(*tensors): s_shape = tensors[0].shape for t in tensors[1:]: s_shape = tf.broadcast_static_shape(s_shape, t.shape) if tensorshape_util.is_fully_defined(s_shape): return tensorshape_util.as_list(s_shape) d_shape = tf.shape(input=tensors[0]) for t in tensors[1:]: d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(input=t)) return d_shape
Get broadcast shape as a Python list of integers (preferred) or `Tensor`. Args: *tensors: One or more `Tensor` objects (already converted!). Returns: broadcast shape: Python list (if shapes determined statically), otherwise an `int32` `Tensor`.
juraj-google-style
def get_timestamp(self, cycle=None, dataset_number=None, in_minutes=False, full=True): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt timestamp_header = self.headers_normal.test_time_txt v = pd.Series() test = self.datasets[dataset_number].dfdata if cycle: c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[timestamp_header] else: if not full: self.logger.debug("getting timestapm for all cycles") v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[timestamp_header]) else: self.logger.debug("returning full timestamp col") v = test[timestamp_header] if in_minutes and v is not None: v /= 60.0 if in_minutes and v is not None: v /= 60.0 return v
Returns timestamps (in sec or minutes (if in_minutes==True)). Args: cycle: cycle number (all if None) dataset_number: first dataset if None in_minutes: return values in minutes instead of seconds if True full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
juraj-google-style
def __init__(self, **kwargs): try: arguments = Adapter(CollectorStage.schema_complete().validate(kwargs)) self.stage = arguments.stage self.status = arguments.status self.events = arguments.events except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
Initializing and validating fields. Args: kwargs (dict): application command line options. Raises: RuntimeError: when validation of parameters has failed.
juraj-google-style
def dotcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return '.'.join([w.lower() for w in words])
Return text in dot.case style. Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> dotcase("hello world") 'hello.world' >>> dotcase("helloHTMLWorld", True, ["HTML"]) 'hello.html.world'
juraj-google-style
def get_aws_session(account): from cloud_inquisitor.config import dbconfig from cloud_inquisitor.plugins.types.accounts import AWSAccount if not isinstance(account, AWSAccount): raise InquisitorError('Non AWSAccount passed to get_aws_session, got {}'.format(account.__class__.__name__)) session = get_local_aws_session() if session.get_credentials().method in ['iam-role', 'env', 'explicit']: sts = session.client('sts') else: temp_sts = session.client('sts') audit_sts_role = temp_sts.assume_role( RoleArn=app_config.aws_api.instance_role_arn, RoleSessionName='inquisitor' ) sts = boto3.session.Session( audit_sts_role['Credentials']['AccessKeyId'], audit_sts_role['Credentials']['SecretAccessKey'], audit_sts_role['Credentials']['SessionToken'] ).client('sts') role = sts.assume_role( RoleArn='arn:aws:iam::{}:role/{}'.format( account.account_number, dbconfig.get('role_name', default='cinq_role') ), RoleSessionName='inquisitor' ) sess = boto3.session.Session( role['Credentials']['AccessKeyId'], role['Credentials']['SecretAccessKey'], role['Credentials']['SessionToken'] ) return sess
Function to return a boto3 Session based on the account passed in the first argument. Args: account (:obj:`Account`): Account to create the session object for Returns: :obj:`boto3:boto3.session.Session`
juraj-google-style
def resnet50(pretrained=False, **kwargs): model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model
Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
juraj-google-style
def reverse_transform(self, column): self.check_data_type() return pd.DataFrame({self.col_name: np.log(column[self.col_name])})
Applies the natural logarithm function to turn positive values into real ranged values. Args: column (pandas.DataFrame): Data to transform. Returns: pd.DataFrame
juraj-google-style
def _get_table_names(statement): parts = statement.to_unicode().split() tables = set() for (i, token) in enumerate(parts): if ((token.lower() == 'from') or token.lower().endswith('join')): tables.add(parts[(i + 1)].rstrip(';')) return list(tables)
Returns table names found in the query. NOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well. Args: statement (sqlparse.sql.Statement): parsed by sqlparse sql statement. Returns: list of str
codesearchnet
def setup_model(x, y, model_type='random_forest', seed=None, **kwargs): assert ((len(x) > 1) and (len(y) > 1)), 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))'.format(len(x), len(y)) sets = namedtuple('Datasets', ['train', 'test']) (x_train, x_test, y_train, y_test) = train_test_split(x, y, random_state=seed, shuffle=False) x = sets(x_train, x_test) y = sets(y_train, y_test) if ((model_type == 'random_forest') or (model_type == 'rf')): model = rf.RandomForest(x, y, random_state=seed, **kwargs) elif ((model_type == 'deep_neural_network') or (model_type == 'dnn')): model = dnn.DeepNeuralNetwork(x, y, **kwargs) else: raise ValueError('Invalid model type kwarg') return model
Initializes a machine learning model Args: x: Pandas DataFrame, X axis of features y: Pandas Series, Y axis of targets model_type: Machine Learning model to use Valid values: 'random_forest' seed: Random state to use when splitting sets and creating the model **kwargs: Scikit Learn's RandomForestClassifier kwargs Returns: Trained model instance of model_type
codesearchnet
def emit(self, value): if not self._tstate.output_writer: logging.error("emit is called, but no output writer is set.") return self._tstate.output_writer.write(value)
Emits a value to output writer. Args: value: a value of type expected by the output writer.
juraj-google-style
def _set_read_only_resource_inputs_attr(op, branch_graphs): read_only_indices = set(range(len(op.inputs) - 1)) for branch_graph in branch_graphs: assert len(branch_graph.inputs) == len(op.inputs) - 1, 'should never happen' if not read_only_indices: break branch_read_only_indices = acd.get_read_only_resource_input_indices_graph(branch_graph) read_only_indices = read_only_indices.intersection(branch_read_only_indices) read_only_indices = [i + 1 for i in read_only_indices] ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, sorted(read_only_indices))
Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: If or Case Operation. branch_graphs: List of branch FuncGraphs.
github-repos
def get_javascript_error(self, return_type='string'): if BROME_CONFIG['proxy_driver']['intercept_javascript_error']: js_errors = self._driver.execute_script('return window.jsErrors; window.jsErrors = [];') if (not js_errors): js_errors = [] if (return_type == 'list'): if len(js_errors): return js_errors else: return [] elif len(js_errors): return os.linesep.join(js_errors) else: return self.no_javascript_error_string elif (return_type == 'list'): return [] else: return self.no_javascript_error_string
Return the gathered javascript error Args: return_type: 'string' | 'list'; default: 'string'
codesearchnet
def inflate_nd_checker(identifier, definition): if isinstance(definition, bool): return Checker(name=identifier, passes=definition) elif isinstance(definition, dict): return Checker(definition.pop('name', identifier), **definition) else: raise ValueError(('%s type is not supported for no-data checkers, use bool or dict' % type(definition)))
Inflate a no-data checker from a basic definition. Args: identifier (str): the no-data checker identifier / name. definition (bool/dict): a boolean acting as "passes" or a full dict definition with "passes" and "allow_failure". Returns: Checker: a checker instance. Raises: ValueError: when the definition type is not bool or dict.
codesearchnet
def get_all_threads(self, expand=False): if (not expand): return self._request_threads(self._url.catalog()) thread_ids = self.get_all_thread_ids() threads = [self.get_thread(id, raise_404=False) for id in thread_ids] return filter(None, threads)
Return every thread on this board. If not expanded, result is same as get_threads run across all board pages, with last 3-5 replies included. Uses the catalog when not expanding, and uses the flat thread ID listing at /{board}/threads.json when expanding for more efficient resource usage. If expanded, all data of all threads is returned with no omitted posts. Args: expand (bool): Whether to download every single post of every thread. If enabled, this option can be very slow and bandwidth-intensive. Returns: list of :mod:`basc_py4chan.Thread`: List of Thread objects representing every thread on this board.
codesearchnet
def init(self, force_deploy=False, client=None): _force_deploy = self.provider_conf.force_deploy self.provider_conf.force_deploy = _force_deploy or force_deploy self._provider_conf = self.provider_conf.to_dict() r = api.Resources(self._provider_conf, client=client) r.launch() roles = r.get_roles() networks = r.get_networks() return (_to_enos_roles(roles), _to_enos_networks(networks))
Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met.
juraj-google-style
def identity(self): return self._implementation.identity()
Returns a TensorArray with the same content and properties. Returns: A new TensorArray object with flow that ensures the control dependencies from the contexts will become control dependencies for writes, reads, etc. Use this object for all subsequent operations.
github-repos
def __init__(self, name, description, *labels): super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels), name, description, *labels)
Creates a new IntGauge. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric.
github-repos
def _ExtractRequestSummaryFields(document): headers = document.childAtPath('Header/RequestHeader') body = document.childAtPath('Body') summary_fields = {'methodName': body.getChildren()[0].name} client_customer_id = headers.getChild('clientCustomerId') if (client_customer_id is not None): summary_fields['clientCustomerId'] = client_customer_id.text network_code = headers.getChild('networkCode') if (network_code is not None): summary_fields['networkCode'] = network_code.text return summary_fields
Extract logging fields from the request's suds.sax.element.Element. Args: document: A suds.sax.element.Element instance containing the API request. Returns: A dict mapping logging field names to their corresponding value.
codesearchnet
def remove_node(self, node): if (node not in self.node_list): return self.node_list.remove(node) for n in self.node_list: n.link_list = [link for link in n.link_list if (link.target != node)]
Remove a node from ``self.node_list`` and links pointing to it. If ``node`` is not in the graph, do nothing. Args: node (Node): The node to be removed Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node(node_1) >>> len(graph.node_list) 0
codesearchnet
def _prefix_from_prefix_int(self, prefixlen): if (not isinstance(prefixlen, (int, long))): raise NetmaskValueError(('%r is not an integer' % prefixlen)) prefixlen = int(prefixlen) if (not (0 <= prefixlen <= self._max_prefixlen)): raise NetmaskValueError(('%d is not a valid prefix length' % prefixlen)) return prefixlen
Validate and return a prefix length integer. Args: prefixlen: An integer containing the prefix length. Returns: The input, possibly converted from long to int. Raises: NetmaskValueError: If the input is not an integer, or out of range.
codesearchnet
def matches(self, other, **kwargs): from pymatgen.analysis.structure_matcher import StructureMatcher m = StructureMatcher(**kwargs) return m.fit(Structure.from_sites(self), Structure.from_sites(other))
Check whether this structure is similar to another structure. Basically a convenience method to call structure matching fitting. Args: other (IStructure/Structure): Another structure. **kwargs: Same **kwargs as in :class:`pymatgen.analysis.structure_matcher.StructureMatcher`. Returns: (bool) True is the structures are similar under some affine transformation.
juraj-google-style
def are_equal_elements(a_el, b_el): if a_el.tagName != b_el.tagName: return False if sorted(a_el.attributes.items()) != sorted(b_el.attributes.items()): return False if len(a_el.childNodes) != len(b_el.childNodes): return False for a_child_el, b_child_el in zip(a_el.childNodes, b_el.childNodes): if a_child_el.nodeType != b_child_el.nodeType: return False if ( a_child_el.nodeType == a_child_el.TEXT_NODE and a_child_el.data != b_child_el.data ): return False if a_child_el.nodeType == a_child_el.ELEMENT_NODE and not are_equal_elements( a_child_el, b_child_el ): return False return True
Normalize and compare ElementTrees for equality. Args: a_el: ElementTree b_el: ElementTree ElementTrees to compare for equality. Returns: bool: ``True`` if the ElementTrees are semantically equivalent.
juraj-google-style
def build_constraint(cls, fhir_path_expression: str, key: str='key-1', severity: codes_pb2.ConstraintSeverityCode.Value=codes_pb2.ConstraintSeverityCode.ERROR) -> datatypes_pb2.ElementDefinition.Constraint: return datatypes_pb2.ElementDefinition.Constraint(key=datatypes_pb2.Id(value=key), expression=datatypes_pb2.String(value=fhir_path_expression), severity=datatypes_pb2.ElementDefinition.Constraint.SeverityCode(value=severity))
Returns an `ElementDefinition.Constraint` for a FHIRPath expression. Args: fhir_path_expression: The raw FHIRPath expression. key: The FHIRPath constraint unique identifier. Defaults to 'key-1'. severity: The constraint severity. Defaults to ERROR. Returns: An instance of `ElementDefinition.Constraint` capturing the raw underlying `fhir_path_expression`.
github-repos
def _do_pass(self, pass_, dag, options): if (not options['ignore_requires']): for required_pass in pass_.requires: dag = self._do_pass(required_pass, dag, options) if (pass_ not in self.valid_passes): if pass_.is_transformation_pass: pass_.property_set = self.fenced_property_set new_dag = pass_.run(dag) if (not isinstance(new_dag, DAGCircuit)): raise TranspilerError(('Transformation passes should return a transformed dag.The pass %s is returning a %s' % (type(pass_).__name__, type(new_dag)))) dag = new_dag elif pass_.is_analysis_pass: pass_.property_set = self.property_set pass_.run(FencedDAGCircuit(dag)) else: raise TranspilerError('I dont know how to handle this type of pass') self._update_valid_passes(pass_, options['ignore_preserves']) return dag
Do a pass and its "requires". Args: pass_ (BasePass): Pass to do. dag (DAGCircuit): The dag on which the pass is ran. options (dict): PassManager options. Returns: DAGCircuit: The transformed dag in case of a transformation pass. The same input dag in case of an analysis pass. Raises: TranspilerError: If the pass is not a proper pass instance.
codesearchnet
def _query(self, key_pos: int, src: Any, use_inferred: bool) -> Any: if key_pos == len(self._keys): return src key = self.keys[key_pos] if hasattr(src, 'sym_hasattr'): if src.sym_hasattr(key): if use_inferred: v = src.sym_inferred(key) else: v = src.sym_getattr(key) return self._query(key_pos + 1, v, use_inferred) elif hasattr(src, '__getitem__'): if isinstance(key, int): if not hasattr(src, '__len__'): raise KeyError(f"Cannot query index ({key}) on object ({src!r}): '__len__' does not exist.") if key < len(src): return self._query(key_pos + 1, src[key], use_inferred) else: if not hasattr(src, '__contains__'): raise KeyError(f"Cannot query key ({key!r}) on object ({src!r}): '__contains__' does not exist.") if key in src: return self._query(key_pos + 1, src[key], use_inferred) else: raise KeyError(f"Cannot query sub-key {key!r} of object ({src!r}): '__getitem__' does not exist. (path={KeyPath(self.keys[:key_pos])})") raise KeyError(f'Path {KeyPath(self._keys[:key_pos + 1])!r} does not exist: key {key!r} is absent from innermost value {src!r}.')
Query the value of current path up to key_pos from an object. Args: key_pos: Start position in self._keys. src: Source value to query. use_inferred: If True, infer `pg.Inferential` values. Otherwise returns their symbolic form. Applicable only for symbolic values. Returns: Value from src if path exists. Raises: KeyError: Path doesn't exist in src.
github-repos
def start(self, request: Request) -> Response: if self._session_state != SessionState.ready: raise RuntimeError('Session already started') assert not self._request self._request = request _logger.debug(__('Client fetch request {0}.', request)) connection = yield from self._acquire_request_connection(request) full_url = connection.proxied and not connection.tunneled self._stream = stream = self._stream_factory(connection) yield from self._stream.reconnect() request.address = connection.address self.event_dispatcher.notify(self.Event.begin_request, request) write_callback = functools.partial(self.event_dispatcher.notify, self.Event.request_data) stream.data_event_dispatcher.add_write_listener(write_callback) yield from stream.write_request(request, full_url=full_url) if request.body: assert 'Content-Length' in request.fields length = int(request.fields['Content-Length']) yield from stream.write_body(request.body, length=length) stream.data_event_dispatcher.remove_write_listener(write_callback) self.event_dispatcher.notify(self.Event.end_request, request) read_callback = functools.partial(self.event_dispatcher.notify, self.Event.response_data) stream.data_event_dispatcher.add_read_listener(read_callback) self._response = response = yield from stream.read_response() response.request = request self.event_dispatcher.notify(self.Event.begin_response, response) self._session_state = SessionState.request_sent return response
Begin a HTTP request Args: request: Request information. Returns: A response populated with the HTTP headers. Once the headers are received, call :meth:`download`. Coroutine.
juraj-google-style
def true_events(network, previous_state, current_state, next_state, indices=None, major_complex=None): if major_complex: nodes = major_complex.subsystem.node_indices elif indices: nodes = indices else: major_complex = compute.major_complex(network, current_state) nodes = major_complex.subsystem.node_indices return events(network, previous_state, current_state, next_state, nodes)
Return all mechanisms that have true causes and true effects within the complex. Args: network (Network): The network to analyze. previous_state (tuple[int]): The state of the network at ``t - 1``. current_state (tuple[int]): The state of the network at ``t``. next_state (tuple[int]): The state of the network at ``t + 1``. Keyword Args: indices (tuple[int]): The indices of the major complex. major_complex (AcSystemIrreducibilityAnalysis): The major complex. If ``major_complex`` is given then ``indices`` is ignored. Returns: tuple[Event]: List of true events in the major complex.
codesearchnet
def Search(self, artifact, os_name=None, cpe=None, label=None): hit = (lambda x: ((x[0] == x[1]) or (not x[0]))) seq = [(artifact, self.artifact), (os_name, self.os_name), (cpe, self.cpe), (label, self.label)] return all(map(hit, seq))
Whether the condition contains the specified values. Args: artifact: A string identifier for the artifact. os_name: An OS string. cpe: A CPE string. label: A label string. Returns: True if the values match the non-empty query attributes. Empty query attributes are ignored in the comparison.
codesearchnet
def __init__(self, parent, isolated=True, function_name=None): self.parent = parent self.isolated = isolated self.function_name = function_name self.isolated_names = set() self.read = set() self.modified = set() self.deleted = set() self.bound = set() self.globals = set() self.nonlocals = set() self.annotations = set() self.params = weakref.WeakValueDictionary() self.is_final = False
Create a new scope. Args: parent: A Scope or None. isolated: Whether the scope is isolated, that is, whether variables modified in this scope should be considered modified in the parent scope. function_name: Name of the function owning this scope.
github-repos
def _is_tensor(x): return isinstance(x, (tensor_lib.Tensor, variables.Variable))
Returns `True` if `x` is a symbolic tensor-like object. Args: x: A python object to check. Returns: `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
github-repos
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None) -> 'torch.Tensor': class_queries_logits = outputs.class_queries_logits masks_queries_logits = outputs.masks_queries_logits masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
github-repos
def dew_point_temperature(self, value=99.9): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `dew_point_temperature`'.format(value)) if value <= -70.0: raise ValueError('value need to be greater -70.0 ' 'for field `dew_point_temperature`') if value >= 70.0: raise ValueError('value need to be smaller 70.0 ' 'for field `dew_point_temperature`') self._dew_point_temperature = value
Corresponds to IDD Field `dew_point_temperature` Args: value (float): value for IDD Field `dew_point_temperature` Unit: C value > -70.0 value < 70.0 Missing value: 99.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
async def enqueue(self, query, queue_index=None, stop_current=False, shuffle=False): if ((query is None) or (query == '')): return self.statuslog.info('Parsing {}'.format(query)) self.logger.debug('Enqueueing from query') indexnum = None if (queue_index is not None): try: indexnum = (int(queue_index) - 1) except TypeError: self.statuslog.error('Play index argument must be a number') return except ValueError: self.statuslog.error('Play index argument must be a number') return if (not self.vready): self.parse_query(query, indexnum, stop_current, shuffle) else: parse_thread = threading.Thread(target=self.parse_query, args=[query, indexnum, stop_current, shuffle]) parse_thread.start()
Queues songs based on either a YouTube search or a link Args: query (str): Either a search term or a link queue_index (str): The queue index to enqueue at (None for end) stop_current (bool): Whether to stop the current song after the songs are queued shuffle (bool): Whether to shuffle the added songs
codesearchnet