code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def compile(self, container: Container, verbose: bool = False ) -> CompilationOutcome: bug = self.__installation.bugs[container.bug] return bug.compiler.compile(self, container, verbose=verbose)
Attempts to compile the program inside a given container. Params: verbose: specifies whether to print the stdout and stderr produced by the compilation command to the stdout. If `True`, then the stdout and stderr will be printed. Returns: a summary of the outcome of the compilation attempt.
juraj-google-style
def add(self, pattern_txt): self.patterns[len(pattern_txt)] = pattern_txt low = 0 high = (len(pattern_txt) - 1) while (not pattern_txt[low]): low += 1 while (not pattern_txt[high]): high -= 1 min_pattern = pattern_txt[low:(high + 1)] self.min_patterns[len(min_pattern)] = min_pattern
Add a pattern to the list. Args: pattern_txt (str list): the pattern, as a list of lines.
codesearchnet
def quote_identifier(identifier: str, mixed: Union[SQLCompiler, Engine, Dialect]) -> str: return get_preparer(mixed).quote(identifier)
Converts an SQL identifier to a quoted version, via the SQL dialect in use. Args: identifier: the identifier to be quoted mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or :class:`Dialect` object Returns: the quoted identifier
juraj-google-style
def _make_inputs_match(branch_graphs, branch_inputs): assert len(branch_graphs) == len(branch_inputs) added_inputs = set() new_inputs = [] for branch_in in branch_inputs: for tensor in branch_in: tensor_id = ops.tensor_id(tensor) if tensor_id not in added_inputs: added_inputs.add(tensor_id) new_inputs.append(tensor) for branch_graph, branch_in in zip(branch_graphs, branch_inputs): input_ids = [ops.tensor_id(t) for t in branch_in] branch_input_to_param = dict(zip(input_ids, branch_graph.inputs)) input_list = [] for in_t in new_inputs: param = branch_input_to_param.get(ops.tensor_id(in_t)) if param is None: param = _create_dummy_input(branch_graph, in_t) input_list.append(param) branch_graph.inputs = input_list branch_graph.function_captures.reset_captures(new_inputs, branch_graph.inputs) return new_inputs
Modifies branch_graphs so they have the same input signature. This method reorders and/or adds parameters to each graph in branch_graphs so they have the same input signature, and updates the 'inputs' and 'captured' fields of each graph accordingly. It uses the input tensors from the outer graph to avoid duplicating shared arguments. Args: branch_graphs: a `list` of `FuncGraph` branch_inputs: a `list` of `list`s of `Tensor`s in the outer graph. The inputs for the corresponding graph in `branch_graphs`. Returns: A new list of Tensors from the outer graph that are the new inputs for each branch_graph. This is a deduped version of `sum(branch_inputs)`.
github-repos
def create(self, data={}, **kwargs): url = self.base_url return self.post_url(url, data, **kwargs)
Create Virtual Account from given dict Args: Param for Creating Virtual Account Returns: Virtual Account dict
codesearchnet
class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): def __init__(self, name='mean_absolute_percentage_error', dtype=None): super().__init__(mean_absolute_percentage_error, name, dtype=dtype) self._direction = 'down' def get_config(self): return {'name': self.name, 'dtype': self.dtype}
Computes mean absolute percentage error between `y_true` and `y_pred`. Formula: ```python loss = 100 * mean(abs((y_true - y_pred) / y_true)) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: >>> m = keras.metrics.MeanAbsolutePercentageError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 250000000.0 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 500000000.0 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras.metrics.MeanAbsolutePercentageError()]) ```
github-repos
def anti_clobber_dir_path(dir_path, suffix='.d'): dir_path = os.path.normpath(dir_path) parts = dir_path.split(os.sep) for index in range(len(parts)): test_path = os.sep.join(parts[:index + 1]) if os.path.isfile(test_path): parts[index] += suffix return os.sep.join(parts) return dir_path
Return a directory path free of filenames. Args: dir_path (str): A directory path. suffix (str): The suffix to append to the part of the path that is a file. Returns: str
juraj-google-style
def _try_recover(self, trial, error_msg): try: self.trial_executor.stop_trial(trial, error=(error_msg is not None), error_msg=error_msg, stop_logger=False) trial.result_logger.flush() if self.trial_executor.has_resources(trial.resources): logger.info('Attempting to recover trial state from last checkpoint.') self.trial_executor.start_trial(trial) if (trial.status == Trial.ERROR): raise RuntimeError('Trial did not start correctly.') else: logger.debug('Notifying Scheduler and requeueing trial.') self._requeue_trial(trial) except Exception: logger.exception('Error recovering trial from checkpoint, abort.') self._scheduler_alg.on_trial_error(self, trial) self._search_alg.on_trial_complete(trial.trial_id, error=True)
Tries to recover trial. Notifies SearchAlgorithm and Scheduler if failure to recover. Args: trial (Trial): Trial to recover. error_msg (str): Error message from prior to invoking this method.
codesearchnet
def GetRelativePath(self, path_spec): location = getattr(path_spec, 'location', None) if (location is None): raise errors.PathSpecError('Path specification missing location.') if path_spec_factory.Factory.IsSystemLevelTypeIndicator(self._file_system.type_indicator): if (not location.startswith(self._mount_point.location)): raise errors.PathSpecError('Path specification does not contain mount point.') else: if (not hasattr(path_spec, 'parent')): raise errors.PathSpecError('Path specification missing parent.') if (path_spec.parent != self._mount_point): raise errors.PathSpecError('Path specification does not contain mount point.') path_segments = self._file_system.SplitPath(location) if path_spec_factory.Factory.IsSystemLevelTypeIndicator(self._file_system.type_indicator): mount_point_path_segments = self._file_system.SplitPath(self._mount_point.location) path_segments = path_segments[len(mount_point_path_segments):] return '{0:s}{1:s}'.format(self._file_system.PATH_SEPARATOR, self._file_system.PATH_SEPARATOR.join(path_segments))
Returns the relative path based on a resolved path specification. The relative path is the location of the upper most path specification. The the location of the mount point is stripped off if relevant. Args: path_spec (PathSpec): path specification. Returns: str: corresponding relative path or None if the relative path could not be determined. Raises: PathSpecError: if the path specification is incorrect.
codesearchnet
def recipients(self, notification_type, recipients, priority='Low'): self._notification_type = notification_type self._recipients = recipients self._priority = priority self._is_organization = False
Set vars for the passed in data. Used for one or more recipient notification. .. code-block:: javascript { "notificationType": notification_type, "priority": priority "isOrganization": false, "recipients": recipients } Args: notification_type (str): The type of notification being sent. recipients (str): A comma delimited string of recipients. priority (str): The priority: Low, Medium, High.
codesearchnet
def apply(self, data, path=None, applicator=None): if applicator: applicator.pset = self else: applicator = Applicator(self) return applicator.apply(data, path=path)
Apply permissions in this set to the provided data, effectively removing all keys from it are not permissioned to be viewed Arguments: data -- dict of data Returns: Cleaned data
juraj-google-style
def _convert_metadata(data): def compose(val, arguments=None): if val is None: return None if not arguments: return val arguments[" return arguments conspect = data.get("conspect", {}) author_name = data.get("author", {}).get("name") author_code = data.get("author", {}).get("code") metadata = odict[ "dc:title": data.get("title"), "dcterms:alternative": data.get("subtitle"), "dc:creator": compose(author_name, {"@id": author_code}), "dc:publisher": data.get("publisher"), "dc:description": data.get("annotation"), "dc:coverage": compose(data.get("place"), {"@xml:lang": "cze"}), "dc:language": compose(data.get("language"), {"@schema": "ISO 639-2"}), "dcterms:created": data.get("from_year"), "dcterms:accrualperiodicity": compose( data.get("periodicity"), {"@xml:lang": "cze"} ), "dc:identifier": [ {"@rdf:resource": data["url"]}, compose(data.get("issn"), {"@xsi:type": "ISSN"}), compose(conspect.get("mdt"), {"@xsi:type": "MDT"}), compose(conspect.get("ddc"), {"@xsi:type": "DDC"}), ], "dc:subject": [ compose(conspect.get("mdt"), {"@xsi:type": "dcterms:UDC"}), compose(conspect.get("ddc"), {"@xsi:type": "dcterms:DDC"}), ], ] def pick_keywords(data, source): return [ x["zahlavi"] for x in data.get(source, []) if x.get("zahlavi") ] cz_keywords = pick_keywords(data, "cz_keywords") en_keywords = pick_keywords(data, "en_keywords") if cz_keywords: metadata["dc:subject"].append({ "@xml:lang": "cz", " }) if en_keywords: metadata["dc:subject"].append({ "@xml:lang": "en", " }) metadata["dc:identifier"] = [x for x in metadata["dc:identifier"] if x] metadata["dc:subject"] = [x for x in metadata["dc:subject"] if x] return metadata
Convert metadata from WA-KAT to Dublin core dictionary like structure, which may be easily converted to xml using :mod:`xmltodict` module. Args: data (dict): Nested WA-KAT data. See tests for example. Returns: dict: Dict in dublin core format.
juraj-google-style
def _InstallRpm(self, path): pid = os.fork() if pid == 0: cmd = "/bin/rpm" cmd_args = [cmd, "-U", "--replacepkgs", "--replacefiles", path] env = os.environ.copy() env.pop("LD_LIBRARY_PATH", None) env.pop("PYTHON_PATH", None) os.execve(cmd, cmd_args, env) else: time.sleep(1000)
Client update for rpm based distros. Upgrading rpms is a bit more tricky than upgrading deb packages since there is a preinstall script that kills the running GRR daemon and, thus, also the installer process. We need to make sure we detach the child process properly and therefore cannot use client_utils_common.Execute(). Args: path: Path to the .rpm.
juraj-google-style
def visit_membership(self, relation: _evaluation.MembershipRelationNode) -> _sql_data_types.Select: lhs_result = self.visit(relation.left) rhs_result = self.visit(relation.right) in_lhs = lhs_result if isinstance(relation, _evaluation.InNode) else rhs_result in_rhs = rhs_result if isinstance(relation, _evaluation.InNode) else lhs_result sql_expr = f'({in_lhs.as_operand()}) IN ({in_rhs.as_operand()})' return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_expr, _sql_data_type=_sql_data_types.Boolean, _sql_alias='mem_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)
Translates a FHIRPath membership relation to Spark SQL. For the `IN` relation, the LHS operand is assumed to be a collection of a single value. For 'CONTAINS', the RHS operand is assumed to be a collection of a single value. Equality is handled in the visit_equality function. Args: relation: The FHIRPath AST `MembershipRelation` node. Returns: A compiled Spark SQL expression.
github-repos
def is_coord_subset(subset, superset, atol=1e-8): c1 = np.array(subset) c2 = np.array(superset) is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1) any_close = np.any(is_close, axis=-1) return np.all(any_close)
Tests if all coords in subset are contained in superset. Doesn't use periodic boundary conditions Args: subset, superset: List of coords Returns: True if all of subset is in superset.
juraj-google-style
def pop(self): if not self.layers: raise TypeError('There are no layers in the model.') layer = self._self_tracked_trackables.pop() self._layer_call_argspecs.pop(layer) if not self.layers: self.outputs = None self.inputs = None self.built = False self._inferred_input_shape = None self._has_explicit_input_shape = False self._graph_initialized = False elif self._graph_initialized: self.layers[-1]._outbound_nodes = [] self.outputs = [self.layers[-1].output] self._init_graph_network(self.inputs, self.outputs) self.built = True
Removes the last layer in the model. Raises: TypeError: if there are no layers in the model.
github-repos
def sg_queue_context(sess=None): r sess = tf.get_default_session() if sess is None else sess coord = tf.train.Coordinator() try: threads = tf.train.start_queue_runners(sess, coord) yield finally: coord.request_stop() coord.join(threads)
r"""Context helper for queue routines. Args: sess: A session to open queues. If not specified, a new session is created. Returns: None
juraj-google-style
def from_string(cls, string_input): directives = [] tasks = [] charge = None spin_multiplicity = None title = None basis_set = None basis_set_option = None theory_directives = {} geom_options = None symmetry_options = None memory_options = None lines = string_input.strip().split("\n") while len(lines) > 0: l = lines.pop(0).strip() if l == "": continue toks = l.split() if toks[0].lower() == "geometry": geom_options = toks[1:] l = lines.pop(0).strip() toks = l.split() if toks[0].lower() == "symmetry": symmetry_options = toks[1:] l = lines.pop(0).strip() species = [] coords = [] while l.lower() != "end": toks = l.split() species.append(toks[0]) coords.append([float(i) for i in toks[1:]]) l = lines.pop(0).strip() mol = Molecule(species, coords) elif toks[0].lower() == "charge": charge = int(toks[1]) elif toks[0].lower() == "title": title = l[5:].strip().strip("\"") elif toks[0].lower() == "basis": l = lines.pop(0).strip() basis_set = {} while l.lower() != "end": toks = l.split() basis_set[toks[0]] = toks[-1].strip("\"") l = lines.pop(0).strip() elif toks[0].lower() in NwTask.theories: if len(toks) > 1: basis_set_option = toks[1] theory = toks[0].lower() l = lines.pop(0).strip() theory_directives[theory] = {} while l.lower() != "end": toks = l.split() theory_directives[theory][toks[0]] = toks[-1] if toks[0] == "mult": spin_multiplicity = float(toks[1]) l = lines.pop(0).strip() elif toks[0].lower() == "task": tasks.append( NwTask(charge=charge, spin_multiplicity=spin_multiplicity, title=title, theory=toks[1], operation=toks[2], basis_set=basis_set, basis_set_option=basis_set_option, theory_directives=theory_directives.get(toks[1]))) elif toks[0].lower() == "memory": memory_options = ' '.join(toks[1:]) else: directives.append(l.strip().split()) return NwInput(mol, tasks=tasks, directives=directives, geometry_options=geom_options, symmetry_options=symmetry_options, memory_options=memory_options)
Read an NwInput from a string. Currently tested to work with files generated from this class itself. Args: string_input: string_input to parse. Returns: NwInput object
juraj-google-style
def discretize(self, data): ret = data.copy() for feature in self.lambdas: if len(data.shape) == 1: ret[feature] = int(self.lambdas[feature](ret[feature])) else: ret[:, feature] = self.lambdas[feature]( ret[:, feature]).astype(int) return ret
Discretizes the data. Args: data: numpy 2d or 1d array Returns: numpy array of same dimension, discretized.
juraj-google-style
def add_message(self, message_type): name = self.__normalized_name(message_type) if name not in self.__schemas: self.__schemas[name] = None schema = self.__message_to_schema(message_type) self.__schemas[name] = schema return name
Add a new message. Args: message_type: protorpc.message.Message class to be parsed. Returns: string, The JSON Schema id. Raises: KeyError if the Schema id for this message_type would collide with the Schema id of a different message_type that was already added.
juraj-google-style
def _CheckStorageMetadata(cls, metadata_values, check_readable_only=False): format_version = metadata_values.get('format_version', None) if (not format_version): raise IOError('Missing format version.') try: format_version = int(format_version, 10) except (TypeError, ValueError): raise IOError('Invalid format version: {0!s}.'.format(format_version)) if ((not check_readable_only) and (format_version != cls._FORMAT_VERSION)): raise IOError('Format version: {0:d} is not supported.'.format(format_version)) if (format_version < cls._COMPATIBLE_FORMAT_VERSION): raise IOError('Format version: {0:d} is too old and no longer supported.'.format(format_version)) if (format_version > cls._FORMAT_VERSION): raise IOError('Format version: {0:d} is too new and not yet supported.'.format(format_version)) metadata_values['format_version'] = format_version compression_format = metadata_values.get('compression_format', None) if (compression_format not in definitions.COMPRESSION_FORMATS): raise IOError('Unsupported compression format: {0:s}'.format(compression_format)) serialization_format = metadata_values.get('serialization_format', None) if (serialization_format != definitions.SERIALIZER_FORMAT_JSON): raise IOError('Unsupported serialization format: {0:s}'.format(serialization_format)) storage_type = metadata_values.get('storage_type', None) if (storage_type not in definitions.STORAGE_TYPES): raise IOError('Unsupported storage type: {0:s}'.format(storage_type))
Checks the storage metadata. Args: metadata_values (dict[str, str]): metadata values per key. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Raises: IOError: if the format version or the serializer format is not supported. OSError: if the format version or the serializer format is not supported.
codesearchnet
def __write_to_hdf5_light(self, filename_out, *args, **kwargs): block_size = 0 with h5py.File(filename_out, 'w') as h5: h5.attrs[b'CLASS'] = b'FILTERBANK' h5.attrs[b'VERSION'] = b'1.0' if HAS_BITSHUFFLE: bs_compression = bitshuffle.h5.H5FILTER bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4) else: bs_compression = None bs_compression_opts = None logger.warning('Warning: bitshuffle not found. No compression applied.') dset = h5.create_dataset('data', data=self.data, compression=bs_compression, compression_opts=bs_compression_opts) dset_mask = h5.create_dataset('mask', shape=self.file_shape, compression=bs_compression, compression_opts=bs_compression_opts, dtype='uint8') dset.dims[0].label = b'frequency' dset.dims[1].label = b'feed_id' dset.dims[2].label = b'time' dset_mask.dims[0].label = b'frequency' dset_mask.dims[1].label = b'feed_id' dset_mask.dims[2].label = b'time' for (key, value) in self.header.items(): dset.attrs[key] = value
Write data to HDF5 file in one go. Args: filename_out (str): Name of output file
codesearchnet
def set_vocabulary(self, vocabulary, idf_weights=None): if self.output_mode == 'tf_idf': if idf_weights is None: raise ValueError("`idf_weights` must be set if output_mode is 'tf_idf'.") elif idf_weights is not None: raise ValueError(f"`idf_weights` should only be set if output_mode is `'tf_idf'`. Received: output_mode={self.output_mode} and idf_weights={idf_weights}") if isinstance(vocabulary, str): if not tf.io.gfile.exists(vocabulary): raise ValueError(f'Vocabulary file {vocabulary} does not exist.') if self.output_mode == 'tf_idf': raise ValueError("output_mode `'tf_idf'` does not support loading a vocabulary from file.") self.lookup_table = self._lookup_table_from_file(vocabulary) self._record_vocabulary_size() return if not tf.executing_eagerly() and (tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)): raise RuntimeError(f'Cannot set a tensor vocabulary on layer {self.name} when not executing eagerly. Create this layer or call `set_vocabulary()` outside of any traced function.') if tf.is_tensor(vocabulary): vocabulary = self._tensor_vocab_to_numpy(vocabulary) elif isinstance(vocabulary, (list, tuple)): vocabulary = np.array(vocabulary) if tf.is_tensor(idf_weights): idf_weights = idf_weights.numpy() elif isinstance(idf_weights, (list, tuple)): idf_weights = np.array(idf_weights) if vocabulary.size == 0: raise ValueError(f'Cannot set an empty vocabulary. Received: vocabulary={vocabulary}') oov_start = self._oov_start_index() token_start = self._token_start_index() special_tokens = [self.mask_token] * oov_start + [self.oov_token] * self.num_oov_indices found_special_tokens = np.array_equal(special_tokens, vocabulary[:token_start]) if found_special_tokens: tokens = vocabulary[token_start:] else: tokens = vocabulary repeated_tokens = self._find_repeated_tokens(tokens) if repeated_tokens: raise ValueError(f'The passed vocabulary has at least one repeated term. Please uniquify your dataset. The repeated terms are: {repeated_tokens}') if self.mask_token is not None and self.mask_token in tokens: mask_index = np.argwhere(vocabulary == self.mask_token)[-1] raise ValueError(f'Found reserved mask token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: mask_token={self.mask_token} at vocabulary index {mask_index}') if self.oov_token is not None and self.invert and (self.oov_token in tokens): oov_index = np.argwhere(vocabulary == self.oov_token)[-1] raise ValueError(f'Found reserved OOV token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: oov_token={self.oov_token} at vocabulary index {oov_index}') new_vocab_size = token_start + len(tokens) if self.max_tokens is not None and new_vocab_size > self.max_tokens: raise ValueError(f'Attempted to set a vocabulary larger than the maximum vocab size. Received vocabulary size is {new_vocab_size}; `max_tokens` is {self.max_tokens}.') self.lookup_table = self._lookup_table_from_tokens(tokens) self._record_vocabulary_size() if self.output_mode == 'tf_idf' and idf_weights is not None: if len(vocabulary) != len(idf_weights): raise ValueError(f'`idf_weights` must be the same length as vocabulary. len(idf_weights) is {len(idf_weights)}; len(vocabulary) is {len(vocabulary)}') idf_weights = self._convert_to_ndarray(idf_weights) if idf_weights.ndim != 1: raise ValueError(f'TF-IDF data must be a 1-index array. Received: type(idf_weights)={type(idf_weights)}') if found_special_tokens: front_padding = 0 front_padding_value = 0 else: front_padding = token_start front_padding_value = np.average(idf_weights) back_padding_value = 0 if self.pad_to_max_tokens and self.max_tokens is not None: back_padding = self.max_tokens - front_padding - len(idf_weights) else: back_padding = 0 weights = np.pad(idf_weights, (front_padding, back_padding), 'constant', constant_values=(front_padding_value, back_padding_value)) weights = tf.convert_to_tensor(weights, dtype=backend.floatx()) self.idf_weights = tf.Variable(weights, trainable=False) self.idf_weights_const = self.idf_weights.value()
Sets vocabulary (and optionally document frequency) for this layer. This method sets the vocabulary and idf weights for this layer directly, instead of analyzing a dataset through `adapt`. It should be used whenever the vocab (and optionally document frequency) information is already known. If vocabulary data is already present in the layer, this method will replace it. Args: vocabulary: Either an array or a string path to a text file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor containing the vocbulary terms. If passing a file path, the file should contain one line per term in the vocabulary. idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse document frequency weights with equal length to vocabulary. Must be set if `output_mode` is `"tf_idf"`. Should not be set otherwise.
github-repos
def _set_default_attr(self, default_attr): for attr, val in six.iteritems(default_attr): if getattr(self, attr, None) is None: setattr(self, attr, val)
Sets default attributes when None. Args: default_attr: dict. Key-val of attr, default-value.
juraj-google-style
def convert(recursive=False, optional_features=None, user_requested=True, conversion_ctx=ag_ctx.NullCtx()): def decorator(f): def wrapper(*args, **kwargs): options = converter.ConversionOptions(recursive=recursive, user_requested=user_requested, optional_features=optional_features) try: with conversion_ctx: return converted_call(f, args, kwargs, options=options) except Exception as e: if hasattr(e, 'ag_error_metadata'): raise e.ag_error_metadata.to_exception(e) else: raise if inspect.isfunction(f) or inspect.ismethod(f): wrapper = functools.update_wrapper(wrapper, f) decorated_wrapper = tf_decorator.make_decorator(f, wrapper) return autograph_artifact(decorated_wrapper) return decorator
Decorator that compiles a function to use TensorFlow ops. The decorator is dynamic - it recompiles the target whenever the decorated function is called. This means the parameter values are known at conversion. It also means that repeated calls with different types of parameters will be correctly processed. Args: recursive: bool, whether to recursively convert any functions or classes that the converted function may use. optional_features: converted.Feature, allows toggling optional or experimental features. When set to None, only the core features are enabled. user_requested: bool, whether this is a function that the user explicitly asked to be converted. See ConversionOptions.user_requested. conversion_ctx: Optional ag_ctx.ControlStatusCtx, the Autograph context in which `f` is used. Returns: Callable, a decorator that converts the given function into an equivalent function that uses TensorFlow ops.
github-repos
def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False): for d in directories: if (d.startswith(os.path.expanduser('~')) and (not os.path.exists(d))): os.makedirs(d) possible_path = os.path.join(d, rel_path) if os.path.exists(possible_path): loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug) if (loaded is not None): return (possible_path, cls.load_yaml_by_path(possible_path)) return None
Load a yaml file with path that is relative to one of given directories. Args: directories: list of directories to search name: relative path of the yaml file to load log_debug: log all messages as debug Returns: tuple (fullpath, loaded yaml structure) or None if not found
codesearchnet
def format_tasks(tasks): return ['%d : %s (%s)' % (task.key.id(), task.description, ('done' if task.done else 'created %s' % task.created)) for task in tasks]
Converts a list of tasks to a list of string representations. Args: tasks: A list of the tasks to convert. Returns: A list of string formatted tasks.
juraj-google-style
def check_panels(adapter, panels, default_panels=None): default_panels = default_panels or [] panels_exist = True for panel in default_panels: if panel not in panels: log.warning("Default panels have to be defined in panels") panels_exist = False for panel in panels: if not adapter.gene_panel(panel): log.warning("Panel {} does not exist in database".format(panel)) panels_exist = False return panels_exist
Make sure that the gene panels exist in the database Also check if the default panels are defined in gene panels Args: adapter(MongoAdapter) panels(list(str)): A list with panel names Returns: panels_exists(bool)
juraj-google-style
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Tuple[tf.Tensor] | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value)
Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`tf.Tensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(decoder_attention_heads,)* cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. *(decoder_attention_heads,)* past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
github-repos
def callsign(msg): if common.typecode(msg) < 1 or common.typecode(msg) > 4: raise RuntimeError("%s: Not a identification message" % msg) chars = ' msgbin = common.hex2bin(msg) csbin = msgbin[40:96] cs = '' cs += chars[common.bin2int(csbin[0:6])] cs += chars[common.bin2int(csbin[6:12])] cs += chars[common.bin2int(csbin[12:18])] cs += chars[common.bin2int(csbin[18:24])] cs += chars[common.bin2int(csbin[24:30])] cs += chars[common.bin2int(csbin[30:36])] cs += chars[common.bin2int(csbin[36:42])] cs += chars[common.bin2int(csbin[42:48])] cs = cs.replace(' return cs
Aircraft callsign Args: msg (string): 28 bytes hexadecimal message string Returns: string: callsign
juraj-google-style
def save_yaml(dictionary, path, pretty=False, sortkeys=False): if sortkeys: dictionary = dict(dictionary) with open(path, 'w') as f: if pretty: pyaml.dump(dictionary, f) else: yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)
Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
juraj-google-style
def pretty_print_counters(counters): totals = collections.defaultdict(int) for (name, val) in counters: prefixes = ([name[:i] for i in xrange(len(name)) if (name[i] == '/')] + [name]) for p in prefixes: totals[p] += val parts = [] for (name, val) in sorted(six.iteritems(totals)): parts.append(((' ' * name.count('/')) + ('%s: %.3g' % (name, val)))) return '\n'.join(parts)
print counters hierarchically. Each counter is a pair of a string and a number. The string can have slashes, meaning that the number also counts towards each prefix. e.g. "parameters/trainable" counts towards both "parameters" and "parameters/trainable". Args: counters: a list of (string, number) pairs Returns: a string
codesearchnet
def get_latex_figure_str(fpath_list, caption_str=None, label_str=None, width_str=r'\textwidth', height_str=None, nCols=None, dpath=None, colpos_sep=' ', nlsep='', use_sublbls=None, use_frame=False): r import utool as ut if nCols is None: nCols = len(fpath_list) USE_SUBFIGURE = True if width_str is not None: colwidth = (1.0 / nCols) if USE_SUBFIGURE: colwidth *= .95 graphics_sizestr = ('%.2f' % (colwidth,)) + width_str else: graphics_sizestr = '[width=%.1f%s]' % (colwidth, width_str) elif height_str is not None: graphics_sizestr = '[height=%s]' % (height_str) else: graphics_sizestr = '' if dpath is not None: fpath_list = [ut.relpath_unix(fpath_, dpath) for fpath_ in fpath_list] if USE_SUBFIGURE: graphics_list = [] sublbl_prefix = label_str if label_str is not None else '' for count, fpath in enumerate(fpath_list): CHRLBLS = True if CHRLBLS: subchar = chr(65 + count) else: subchar = str(count) parts = [] subfigure_str = '' if len(fpath_list) > 1: parts.append('\\begin{subfigure}[h]{' + graphics_sizestr + '}') parts.append('\\centering') graphics_part = '\\includegraphics[width=%s]{%s}' % (width_str, fpath,) if use_frame: parts.append('\\fbox{%s}' % (graphics_part,)) else: parts.append(graphics_part) if use_sublbls is True or use_sublbls is None and len(fpath_list) > 1: parts.append('\\caption{}\\label{sub:' + sublbl_prefix + subchar + '}') if len(fpath_list) > 1: parts.append('\\end{subfigure}') subfigure_str = ''.join(parts) graphics_list.append(subfigure_str) else: if True: graphics_list = [ r'\includegraphics%s{%s}\captionof{figure}{%s}' % ( graphics_sizestr, fpath, 'fd', ) for count, fpath in enumerate(fpath_list)] else: graphics_list = [r'\includegraphics%s{%s}' % (graphics_sizestr, fpath,) for fpath in fpath_list] NL = '\n' if USE_SUBFIGURE: col_spacer_mid = NL + '~~' + '% --' + NL col_spacer_end = NL + r'\\' + '% --' + NL else: col_spacer_mid = NL + '&' + NL col_spacer_end = NL + r'\\' + nlsep + NL sep_list = [ col_spacer_mid if count % nCols > 0 else col_spacer_end for count in range(1, len(graphics_list) + 1) ] if len(sep_list) > 0: sep_list[-1] = '' graphics_list_ = [graphstr + sep for graphstr, sep in zip(graphics_list, sep_list)] graphics_body = ''.join(graphics_list_) header_str = colpos_sep.join(['c'] * nCols) if USE_SUBFIGURE: figure_body = graphics_body else: figure_body = ut.codeblock( r ) % (header_str, graphics_body) if caption_str is not None: if label_str is not None: figure_body += '\n\caption[%s]{%s}' % (label_str, caption_str,) else: figure_body += '\n\caption{%s}' % (caption_str,) if label_str is not None: figure_body += '\n\label{fig:%s}' % (label_str,) figure_fmtstr = ut.codeblock( r ) figure_str = figure_fmtstr % (figure_body) return figure_str
r""" Args: fpath_list (list): dpath (str): directory relative to main tex file Returns: str: figure_str CommandLine: python -m utool.util_latex --test-get_latex_figure_str Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> fpath_list = ['figures/foo.png'] >>> figure_str = get_latex_figure_str(fpath_list) >>> result = str(figure_str) >>> print(result)
juraj-google-style
def send_msg_to_webhook(self, message): payload = {'content': message} header = {'Content-Type': 'application/json'} try: request = requests.post(self.api_url, headers=header, json=payload) request.raise_for_status() except Exception as error_msg: warning_msg = (('EXCEPTION: UNABLE TO COMMIT LOG MESSAGE' + '\n\texception={0}'.format(repr(error_msg))) + '\n\tmessage={0}'.format(message)) warnings.warn(warning_msg, exceptions.WebhookFailedEmitWarning)
separated Requests logic for easier testing Args: message (str): actual logging string to be passed to REST endpoint Todo: * Requests.text/json return for better testing options
codesearchnet
def instantiate_resolver(self, name, args): if name not in self._known_resolvers: raise ArgumentError("Attempting to instantiate unknown dependency resolver", name=name) return self._known_resolvers[name](args)
Directly instantiate a dependency resolver by name with the given arguments Args: name (string): The name of the class that we want to instantiate args (dict): The arguments to pass to the resolver factory Returns: DependencyResolver
juraj-google-style
def export_node(self, n) -> Dict[str, Union[str, List[str]]]: node_dict = { "name": n[0], "units": _get_units(n[0]), "dtype": _get_dtype(n[0]), "arguments": list(self.predecessors(n[0])), } if not n[1].get("indicators") is None: for indicator in n[1]["indicators"].values(): if "dataset" in indicator.__dict__: del indicator.__dict__["dataset"] node_dict["indicators"] = [ _process_datetime(indicator.__dict__) for indicator in n[1]["indicators"].values() ] else: node_dict["indicators"] = None return node_dict
Return dict suitable for exporting to JSON. Args: n: A dict representing the data in a networkx AnalysisGraph node. Returns: The node dict with additional fields for name, units, dtype, and arguments.
juraj-google-style
def _CreateOutputFileHandles(self, output_type): gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type) gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, 'wb', self.GZIP_COMPRESSION_LEVEL, gzip_filehandle_parent) self.temp_output_trackers[output_type] = TempOutputTracker(output_type=output_type, gzip_filehandle=gzip_filehandle, gzip_filehandle_parent=gzip_filehandle_parent) return self.temp_output_trackers[output_type]
Creates a new gzipped output tempfile for the output type. We write to JSON data to gzip_filehandle to get compressed data. We hold a reference to the original filehandle (gzip_filehandle_parent) so we can pass the gzip data to bigquery. Args: output_type: string of export type to be used in filename. e.g. ExportedFile Returns: A TempOutputTracker object
codesearchnet
def __init__(self, match_type=MatchType.OFPMT_OXM, oxm_match_fields=None): super().__init__() self.match_type = match_type self.oxm_match_fields = oxm_match_fields or OxmMatchFields() self._update_match_length()
Describe the flow match header structure. Args: match_type (MatchType): One of OFPMT_* (MatchType) items. length (int): Length of Match (excluding padding) followed by Exactly (length - 4) (possibly 0) bytes containing OXM TLVs, then exactly ((length + 7)/8*8 - length) (between 0 and 7) bytes of all-zero bytes. oxm_fields (OxmMatchFields): Sample description.
juraj-google-style
class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin): def __init__(self, config: MaskFormerSwinConfig): super().__init__(config) super()._init_backbone(config) self.model = MaskFormerSwinModel(config) if 'stem' in self.out_features: raise ValueError("This backbone does not support 'stem' in the `out_features`.") self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))] self.hidden_states_norms = nn.ModuleList([nn.LayerNorm(num_channels) for num_channels in self.num_features[1:]]) self.post_init() def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.model(pixel_values, output_hidden_states=True, output_attentions=output_attentions, return_dict=True) hidden_states = outputs.hidden_states[1:] spatial_dimensions: Tuple[Tuple[int, int]] = outputs.hidden_states_spatial_dimensions feature_maps = () for i, (hidden_state, stage, (height, width)) in enumerate(zip(hidden_states, self.stage_names[1:], spatial_dimensions)): norm = self.hidden_states_norms[i] hidden_state_unpolled = hidden_state[-1] hidden_state_norm = norm(hidden_state_unpolled) batch_size, _, hidden_size = hidden_state_norm.shape hidden_state_permuted = hidden_state_norm.permute(0, 2, 1).view((batch_size, hidden_size, height, width)).contiguous() if stage in self.out_features: feature_maps += (hidden_state_permuted,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) if output_attentions: output += (outputs.attentions,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
MaskFormerSwin backbone, designed especially for the MaskFormer framework. This classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size, num_channels, height, width)`). It also adds additional layernorms after each stage. Args: config (`MaskFormerSwinConfig`): The configuration used by [`MaskFormerSwinModel`].
github-repos
def normalize_full_name_false(decl): if decl.cache.normalized_full_name_false is None: decl.cache.normalized_full_name_false = normalize( declaration_utils.full_name(decl, with_defaults=False)) return decl.cache.normalized_full_name_false
Cached variant of normalize Args: decl (declaration.declaration_t): the declaration Returns: str: normalized name
juraj-google-style
def step(self, actions, step_mul=None): if self._state == environment.StepType.LAST: return self.reset() skip = not self._ensure_available_actions self._parallel.run( (c.act, f.transform_action(o.observation, a, skip_available=skip)) for c, f, o, a in zip( self._controllers, self._features, self._obs, actions)) self._state = environment.StepType.MID return self._step(step_mul)
Apply actions, step the world forward, and return observations. Args: actions: A list of actions meeting the action spec, one per agent. step_mul: If specified, use this rather than the environment's default. Returns: A tuple of TimeStep namedtuples, one per agent.
juraj-google-style
def parse_result(line): if line.startswith("Problem"): raise RuntimeError("Login credentials seems to be wrong") result = { 'p_value': None, 'gene_symbols': [], 'disease_nr': None, 'disease_source': None, 'description': None, 'raw_line': line } result['raw_line'] = line.rstrip() result_line = line.rstrip().split('\t') try: result['p_value'] = float(result_line[0]) except ValueError: pass try: medical_litterature = result_line[2].split(':') result['disease_source'] = medical_litterature[0] result['disease_nr'] = int(medical_litterature[1]) except IndexError: pass try: description = result_line[3] result['description'] = description except IndexError: pass if len(result_line) > 4: for gene_symbol in result_line[4].split(','): result['gene_symbols'].append(gene_symbol.strip()) return result
Parse the result line of a phenomizer request. Arguments: line (str): A raw output line from phenomizer Returns: result (dict): A dictionary with the phenomizer info: { 'p_value': float, 'gene_symbols': list(str), 'disease_nr': int, 'disease_source': str, 'description': str, 'raw_line': str }
juraj-google-style
def screenshot(self, filename=None): image = self.d.screenshot() if self.rotation: method = getattr(Image, 'ROTATE_{}'.format(self.rotation*90)) image = image.transpose(method) if filename: image.save(filename) return image
Take ios screenshot Args: - filename(string): optional Returns: PIL.Image object
juraj-google-style
def extend_webfont_settings(webfont_settings): if (not webfont_settings.get('fontdir_path', False)): raise IcomoonSettingsError("Webfont settings miss the required key item 'fontdir_path'") if (not webfont_settings.get('csspart_path', False)): webfont_settings['csspart_path'] = None return webfont_settings
Validate a webfont settings and optionally fill missing ``csspart_path`` option. Args: webfont_settings (dict): Webfont settings (an item value from ``settings.ICOMOON_WEBFONTS``). Returns: dict: Webfont settings
codesearchnet
def _get_break_loop_node(break_node): loop_nodes = (astroid.For, astroid.While) parent = break_node.parent while ((not isinstance(parent, loop_nodes)) or (break_node in getattr(parent, 'orelse', []))): break_node = parent parent = parent.parent if (parent is None): break return parent
Returns the loop node that holds the break node in arguments. Args: break_node (astroid.Break): the break node of interest. Returns: astroid.For or astroid.While: the loop node holding the break node.
codesearchnet
def long_id(self, sample): if self.grid == 'WAC': lon = self.CENTER_LONGITUDE + (sample - self.SAMPLE_PROJECTION_OFFSET - 1)\ * self.MAP_SCALE * 1e-3 / (self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0)) return lon * 180 / np.pi else: lon = float(self.CENTER_LONGITUDE) + \ (sample - float(self.SAMPLE_PROJECTION_OFFSET) - 1)\ / float(self.MAP_RESOLUTION) return lon
Return the corresponding longitude Args: sample (int): sample number on a line Returns: Correponding longidude in degree
juraj-google-style
def add_user_to_template(self, template_id, account_id=None, email_address=None): return self._add_remove_user_template(self.TEMPLATE_ADD_USER_URL, template_id, account_id, email_address)
Gives the specified Account access to the specified Template Args: template_id (str): The id of the template to give the account access to account_id (str): The id of the account to give access to the template. The account id prevails if both account_id and email_address are provided. email_address (str): The email address of the account to give access to. Returns: A Template object
juraj-google-style
def getlines(self, bufnr=None): buf = self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer return buf[:]
Get all lines of a buffer as a list. Args: bufnr (Optional[int]): A Vim buffer number, current if ``None``. Returns: List[str]
juraj-google-style
def convert_segmentation_to_rle(segmentation): segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings
Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
github-repos
def properties(self): props = {} for line in self.adb_shell(['getprop']).splitlines(): m = _PROP_PATTERN.match(line) if m: props[m.group('key')] = m.group('value') return props
Android Properties, extracted from `adb shell getprop` Returns: dict of props, for example: {'ro.bluetooth.dun': 'true'}
codesearchnet
def _get_audios_and_audio_lengths(self, audios: AudioInput) -> Sequence['torch.Tensor', Sequence[int]]: requires_backends(self, ['torch']) if isinstance(audios, np.ndarray): audios = torch.from_numpy(audios) elif isinstance(audios, Sequence) and isinstance(audios[0], np.ndarray): audios = [torch.from_numpy(arr) for arr in audios] if isinstance(audios, torch.Tensor): if audios.ndim == 1: audios = audios.unsqueeze(0) if not torch.is_floating_point(audios): raise ValueError('Invalid audio provided. Audio should be a floating point between 0 and 1') if audios.shape[0] > 1: logger.warning('Audio samples are already collated; assuming they all have the same length') lengths = [audios.shape[-1]] * audios.shape[0] return (audios, lengths) elif isinstance(audios, Sequence) and isinstance(audios[0], torch.Tensor): if not torch.is_floating_point(audios[0]): raise ValueError('Invalid audio provided. Audio should be a floating point between 0 and 1') lengths = [audio.shape[-1] for audio in audios] padding = [max(lengths) - length for length in lengths] audios = [audio.view(1, -1) for audio in audios] padded = [torch.nn.functional.pad(audio, (0, pad)) for audio, pad in zip(audios, padding)] audios = torch.cat(padded, dim=0) return (audios, lengths) raise TypeError('Invalid audio provided. Audio should be a one or more torch tensors or numpy arrays')
Coerces audio inputs to torch tensors and extracts audio lengths prior to stacking. Args: audios (`AudioInput`): Audio sequence, numpy array, or torch tensor.
github-repos
def get_neighbors(self, site, r): nn = self.get_sites_in_sphere(site.coords, r) return [(s, dist) for (s, dist) in nn if site != s]
Get all neighbors to a site within a sphere of radius r. Excludes the site itself. Args: site (Site): Site at the center of the sphere. r (float): Radius of sphere. Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance.
juraj-google-style
def get_message(routing_key, properties, body): if (properties.headers is None): _log.error('Message (body=%r) arrived without headers. A publisher is misbehaving!', body) properties.headers = {} try: MessageClass = get_class(properties.headers['fedora_messaging_schema']) except KeyError: _log.error('Message (headers=%r, body=%r) arrived without a schema header. A publisher is misbehaving!', properties.headers, body) MessageClass = Message try: severity = properties.headers['fedora_messaging_severity'] except KeyError: _log.error('Message (headers=%r, body=%r) arrived without a severity. A publisher is misbehaving! Defaulting to INFO.', properties.headers, body) severity = INFO if (properties.content_encoding is None): _log.error('Message arrived without a content encoding') properties.content_encoding = 'utf-8' try: body = body.decode(properties.content_encoding) except UnicodeDecodeError as e: _log.error('Unable to decode message body %r with %s content encoding', body, properties.content_encoding) raise ValidationError(e) try: body = json.loads(body) except ValueError as e: _log.error('Failed to load message body %r, %r', body, e) raise ValidationError(e) message = MessageClass(body=body, topic=routing_key, properties=properties, severity=severity) try: message.validate() _log.debug('Successfully validated message %r', message) except jsonschema.exceptions.ValidationError as e: _log.error('Message validation of %r failed: %r', message, e) raise ValidationError(e) return message
Construct a Message instance given the routing key, the properties and the body received from the AMQP broker. Args: routing_key (str): The AMQP routing key (will become the message topic) properties (pika.BasicProperties): the AMQP properties body (bytes): The encoded message body Raises: ValidationError: If Message validation failed or message body docoding/loading is impossible.
codesearchnet
def Value(self, p): if p < 0 or p > 1: raise ValueError('Probability p must be in range [0, 1]') if p == 0: return self.xs[0] if p == 1: return self.xs[-1] index = bisect.bisect(self.ps, p) if p == self.ps[index - 1]: return self.xs[index - 1] else: return self.xs[index]
Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value
juraj-google-style
def compose(f, *fs): rfs = list(chain([f], fs)) rfs.reverse() def composed(*args, **kwargs): return reduce((lambda result, fn: fn(result)), rfs[1:], rfs[0](*args, **kwargs)) return composed
Compose functions right to left. compose(f, g, h)(x) -> f(g(h(x))) Args: f, *fs: The head and rest of a sequence of callables. The rightmost function passed can accept any arguments and the returned function will have the same signature as this last provided function. All preceding functions must be unary. Returns: The composition of the argument functions. The returned function will accept the same arguments as the rightmost passed in function.
codesearchnet
def change_kernel(self, kernel, return_dict=True): if (type(kernel) != Kernel): raise BadKernelObject('Use Kernel object') return self._perform_action({'type': 'change_kernel', 'kernel': kernel.id}, return_dict)
Change the kernel to a new one Args: kernel : instance of digitalocean.Kernel.Kernel Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action
codesearchnet
def disable(self): self.client.api.disable_plugin(self.name) self.reload()
Disable the plugin. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def has_all_finite_radius_neurites(data_wrapper, threshold=0.0): db = data_wrapper.data_block neurite_ids = np.in1d(db[(:, COLS.TYPE)], POINT_TYPE.NEURITES) zero_radius_ids = (db[(:, COLS.R)] <= threshold) bad_pts = np.array(db[(neurite_ids & zero_radius_ids)][(:, COLS.ID)], dtype=int).tolist() return CheckResult((len(bad_pts) == 0), bad_pts)
Check that all points with neurite type have a finite radius Returns: CheckResult with result and list of IDs of neurite points with zero radius
codesearchnet
def build(X_df=None, y_df=None): if X_df is None: X_df, _ = load_data() if y_df is None: _, y_df = load_data() features = get_contrib_features() mapper_X = ballet.feature.make_mapper(features) X = mapper_X.fit_transform(X_df) encoder_y = get_target_encoder() y = encoder_y.fit_transform(y_df) return { 'X_df': X_df, 'features': features, 'mapper_X': mapper_X, 'X': X, 'y_df': y_df, 'encoder_y': encoder_y, 'y': y, }
Build features and target Args: X_df (DataFrame): raw variables y_df (DataFrame): raw target Returns: dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y
juraj-google-style
def find1(self, kw: YangIdentifier, arg: str = None, pref: YangIdentifier = None, required: bool = False) -> Optional["Statement"]: for sub in self.substatements: if (sub.keyword == kw and sub.prefix == pref and (arg is None or sub.argument == arg)): return sub if required: raise StatementNotFound(str(self), kw)
Return first substatement with the given parameters. Args: kw: Statement keyword (local part for extensions). arg: Argument (all arguments will match if ``None``). pref: Keyword prefix (``None`` for built-in statements). required: Should an exception be raised on failure? Raises: StatementNotFound: If `required` is ``True`` and the statement is not found.
juraj-google-style
def _validate_first_message(cls, msg): data = cls._unpack_message(msg) logger.debug(data) if (data != cls.RTM_HANDSHAKE): raise SlackApiError('Unexpected response: {!r}'.format(data)) logger.info('Joined real-time messaging.')
Check the first message matches the expected handshake. Note: The handshake is provided as :py:attr:`RTM_HANDSHAKE`. Arguments: msg (:py:class:`aiohttp.Message`): The message to validate. Raises: :py:class:`SlackApiError`: If the data doesn't match the expected handshake.
codesearchnet
def attribute(self, attr_type, attr_value, displayed=False, source=None, unique=True, formatter=None): attr = Attribute(attr_type, attr_value, displayed, source, formatter) if (unique == 'Type'): for attribute_data in self._attributes: if (attribute_data.type == attr_type): attr = attribute_data break else: self._attributes.append(attr) elif (unique is True): for attribute_data in self._attributes: if ((attribute_data.type == attr_type) and (attribute_data.value == attr.value)): attr = attribute_data break else: self._attributes.append(attr) elif (unique is False): self._attributes.append(attr) return attr
Return instance of Attribute unique: * False - Attribute type:value can be duplicated. * Type - Attribute type has to be unique (e.g., only 1 Description Attribute). * True - Attribute type:value combo must be unique. Args: attr_type (str): The ThreatConnect defined attribute type. attr_value (str): The value for this attribute. displayed (bool, default:false): If True the supported attribute will be marked for display. source (str, optional): The source value for this attribute. unique (bool|string, optional): Control attribute creation. formatter (method, optional): A method that takes a single attribute value and returns a single formatted value. Returns: obj: An instance of Attribute.
codesearchnet
def DownloadDir(aff4_path, output_dir, bufsize=8192, preserve_path=True): if not os.path.isdir(output_dir): os.makedirs(output_dir) fd = aff4.FACTORY.Open(aff4_path) for child in fd.OpenChildren(): if preserve_path: full_dir = utils.JoinPath(output_dir, child.urn.Path()) full_dir = os.path.dirname(full_dir) if not os.path.isdir(full_dir): os.makedirs(full_dir) outfile = os.path.join(full_dir, child.urn.Basename()) else: outfile = os.path.join(output_dir, child.urn.Basename()) logging.info(u"Downloading %s to %s", child.urn, outfile) with open(outfile, "wb") as out_fd: try: buf = child.Read(bufsize) while buf: out_fd.write(buf) buf = child.Read(bufsize) except IOError as e: logging.error("Failed to read %s. Err: %s", child.urn, e)
Take an aff4 path and download all files in it to output_dir. Args: aff4_path: Any aff4 path as a string output_dir: A local directory to write to, will be created if not there. bufsize: Buffer size to use. preserve_path: If set all paths will be created. Note that this works for collections as well. It will download all files in the collection. This only downloads files that are already in the datastore, it doesn't queue anything on the client.
juraj-google-style
def get_enterprise_user_id(self, obj): enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first() return (enterprise_learner and enterprise_learner.id)
Get enterprise user id from user object. Arguments: obj (User): Django User object Returns: (int): Primary Key identifier for enterprise user object.
codesearchnet
def get(issue_id, issue_type_id): return db.Issue.find_one( Issue.issue_id == issue_id, Issue.issue_type_id == issue_type_id )
Return issue by ID Args: issue_id (str): Unique Issue identifier issue_type_id (str): Type of issue to get Returns: :obj:`Issue`: Returns Issue object if found, else None
juraj-google-style
def which(self, cmd, parent_environ=None, fallback=False): env = self.get_environ(parent_environ=parent_environ) path = which(cmd, env=env) if (fallback and (path is None)): path = which(cmd) return path
Find a program in the resolved environment. Args: cmd: String name of the program to find. parent_environ: Environment to interpret the context within, defaults to os.environ if None. fallback: If True, and the program is not found in the context, the current environment will then be searched. Returns: Path to the program, or None if the program was not found.
codesearchnet
def bytes_to_readable_str(num_bytes, include_b=False): if num_bytes is None: return str(num_bytes) if num_bytes < 1024: result = "%d" % num_bytes elif num_bytes < 1048576: result = "%.2fk" % (num_bytes / float(1 << 10)) elif num_bytes < 1073741824: result = "%.2fM" % (num_bytes / float(1 << 20)) else: result = "%.2fG" % (num_bytes / float(1 << 30)) if include_b: result += "B" return result
Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: (`int` or None) Number of bytes. include_b: (`bool`) Include the letter B at the end of the unit. Returns: (`str`) A string representing the number of bytes in a human-readable way, including a unit at the end.
juraj-google-style
def pack(value, nbits=None): if (nbits is None): nbits = (pack_size(value) * BITS_PER_BYTE) elif (nbits <= 0): raise ValueError('Given number of bits must be greater than 0.') buf_size = int(math.ceil((nbits / float(BITS_PER_BYTE)))) buf = (ctypes.c_uint8 * buf_size)() for (idx, _) in enumerate(buf): buf[idx] = ((value >> (idx * BITS_PER_BYTE)) & 255) return buf
Packs a given value into an array of 8-bit unsigned integers. If ``nbits`` is not present, calculates the minimal number of bits required to represent the given ``value``. The result is little endian. Args: value (int): the integer value to pack nbits (int): optional number of bits to use to represent the value Returns: An array of ``ctypes.c_uint8`` representing the packed ``value``. Raises: ValueError: if ``value < 0`` and ``nbits`` is ``None`` or ``nbits <= 0``. TypeError: if ``nbits`` or ``value`` are not numbers.
codesearchnet
def _ReadPropertySet(self, property_set): for property_section in property_set.sections: if (property_section.class_identifier != self._CLASS_IDENTIFIER): continue for property_value in property_section.properties: property_name = self._PROPERTY_NAMES.get(property_value.identifier, None) if (not property_name): property_name = '0x{0:04}'.format(property_value.identifier) value = self._GetValueAsObject(property_value) if self._PROPERTY_VALUE_MAPPINGS: value_callback_name = self._PROPERTY_VALUE_MAPPINGS.get(property_name, None) if value_callback_name: value_callback_method = getattr(self, value_callback_name, None) if value_callback_method: value = value_callback_method(value) if (property_name in self._DATE_TIME_PROPERTIES): properties_dict = self.date_time_properties value = dfdatetime_filetime.Filetime(timestamp=value) else: properties_dict = self._properties if (property_name not in properties_dict): properties_dict[property_name] = value
Reads properties from a property set. Args: property_set (pyolecf.property_set): OLECF property set.
codesearchnet
def _to_backend_layout(tensor_layout): if tensor_layout.device_mesh is None: raise ValueError('Cannot create sharding when device mesh is not set for TensorLayout.') partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes) jax_mesh = tensor_layout.device_mesh.backend_mesh return jax.sharding.NamedSharding(jax_mesh, partition_spec)
Convert the TensorLayout to JAX backend specific Sharding. Args: tensor_layout: TensorLayout instance to convert. Returns: A `jax.sharding.NamedSharding` instance.
github-repos
def _GetScanner(self, specification_store, signature_identifiers): if not specification_store: return None scanner_object = pysigscan.scanner() for format_specification in specification_store.specifications: if format_specification.identifier not in signature_identifiers: continue for signature in format_specification.signatures: pattern_offset = signature.offset if pattern_offset is None: signature_flags = pysigscan.signature_flags.NO_OFFSET elif pattern_offset < 0: pattern_offset *= -1 signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START scanner_object.add_signature( signature.identifier, pattern_offset, signature.pattern, signature_flags) self._signature_identifiers.append(format_specification.identifier) return scanner_object
Initializes the scanner form the specification store. Args: specification_store (FormatSpecificationStore): a specification store. signature_identifiers (list[str]): signature identifiers. Returns: pysigscan.scanner: signature scanner or None.
juraj-google-style
def ReadArtifactDefinitionValues(self, artifact_definition_values): if not artifact_definition_values: raise errors.FormatError('Missing artifact definition values.') different_keys = ( set(artifact_definition_values) - definitions.TOP_LEVEL_KEYS) if different_keys: different_keys = ', '.join(different_keys) raise errors.FormatError('Undefined keys: {0:s}'.format(different_keys)) name = artifact_definition_values.get('name', None) if not name: raise errors.FormatError('Invalid artifact definition missing name.') description = artifact_definition_values.get('doc', None) if not description: raise errors.FormatError( 'Invalid artifact definition: {0:s} missing description.'.format( name)) artifact_definition = artifact.ArtifactDefinition( name, description=description) if artifact_definition_values.get('collectors', []): raise errors.FormatError( 'Invalid artifact definition: {0:s} still uses collectors.'.format( name)) urls = artifact_definition_values.get('urls', []) if not isinstance(urls, list): raise errors.FormatError( 'Invalid artifact definition: {0:s} urls is not a list.'.format( name)) artifact_definition.conditions = artifact_definition_values.get( 'conditions', []) artifact_definition.provides = artifact_definition_values.get( 'provides', []) self._ReadLabels(artifact_definition_values, artifact_definition, name) self._ReadSupportedOS(artifact_definition_values, artifact_definition, name) artifact_definition.urls = urls self._ReadSources(artifact_definition_values, artifact_definition, name) return artifact_definition
Reads an artifact definition from a dictionary. Args: artifact_definition_values (dict[str, object]): artifact definition values. Returns: ArtifactDefinition: an artifact definition. Raises: FormatError: if the format of the artifact definition is not set or incorrect.
juraj-google-style
def update_fitness(objective_function, particle): fitness = objective_function(particle.position) best_fitness = particle.best_fitness cmp = comparator(fitness) if best_fitness is None or cmp(fitness, best_fitness): best_position = particle.position return particle._replace(fitness=fitness, best_fitness=fitness, best_position=best_position) else: return particle._replace(fitness=fitness)
Calculates and updates the fitness and best_fitness of a particle. Fitness is calculated using the 'problem.fitness' function. Args: problem: The optimization problem encapsulating the fitness function and optimization type. particle: cipy.algorithms.pso.Particle: Particle to update the fitness for. Returns: cipy.algorithms.pso.Particle: A new particle with the updated fitness.
juraj-google-style
def delete_storage_account(access_token, subscription_id, rgname, account_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '?api-version=', STORAGE_API]) return do_delete(endpoint, access_token)
Delete a storage account in the specified resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new storage account. Returns: HTTP response.
juraj-google-style
def validate_split(query): if query.order: raise SplitNotPossibleError('Query cannot have any sort orders.') if query.limit is not None: raise SplitNotPossibleError('Query cannot have a limit set.') for filter in query.filters: if isinstance(filter[1], ValueProvider): filter_operator = filter[1].get() else: filter_operator = filter[1] if filter_operator in ['<', '<=', '>', '>=']: raise SplitNotPossibleError('Query cannot have any inequality filters.')
Verifies that the given query can be properly scattered. Note that equality and ancestor filters are allowed, however they may result in inefficient sharding. Raises: QuerySplitterError if split could not be performed owing to query parameters.
github-repos
def FormatTree(tree, style_config=None, lines=None): style.SetGlobalStyle(style.CreateStyleFromConfig(style_config)) comment_splicer.SpliceComments(tree) continuation_splicer.SpliceContinuations(tree) subtype_assigner.AssignSubtypes(tree) identify_container.IdentifyContainers(tree) split_penalty.ComputeSplitPenalties(tree) blank_line_calculator.CalculateBlankLines(tree) llines = pytree_unwrapper.UnwrapPyTree(tree) for lline in llines: lline.CalculateFormattingInformation() lines = _LineRangesToSet(lines) _MarkLinesToFormat(llines, lines) return reformatter.Reformat(_SplitSemicolons(llines), lines)
Format a parsed lib2to3 pytree. This provides an alternative entry point to YAPF. Arguments: tree: (pytree.Node) The root of the pytree to format. style_config: (string) Either a style name or a path to a file that contains formatting style settings. If None is specified, use the default style as set in style.DEFAULT_STYLE_FACTORY lines: (list of tuples of integers) A list of tuples of lines, [start, end], that we want to format. The lines are 1-based indexed. It can be used by third-party code (e.g., IDEs) when reformatting a snippet of code rather than a whole file. Returns: The source formatted according to the given formatting style.
github-repos
def merkleroot(hashes): if not hashes: return sha3_256(b'').hexdigest() if len(hashes) == 1: return hexlify(hashes[0]).decode() if len(hashes) % 2 == 1: hashes.append(hashes[-1]) parent_hashes = [ sha3_256(hashes[i] + hashes[i+1]).digest() for i in range(0, len(hashes)-1, 2) ] return merkleroot(parent_hashes)
Computes the merkle root for a given list. Args: hashes (:obj:`list` of :obj:`bytes`): The leaves of the tree. Returns: str: Merkle root in hexadecimal form.
juraj-google-style
def resnet18(pretrained=False, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model
Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
codesearchnet
def source_lines(self, host_name, file_path): offset = self._host_name_file_path_to_offset[host_name, file_path] return list(self._reader.read_source_files_event(offset).source_file.lines)
Read the line-by-line content of a source file. Args: host_name: Host name on which the source file is located. file_path: File path at which the source file is located. Returns: Lines of the source file as a `list` of `str`s.
github-repos
def update_acmg(self, institute_obj, case_obj, user_obj, link, variant_obj, acmg_str): self.create_event(institute=institute_obj, case=case_obj, user=user_obj, link=link, category='variant', verb='acmg', variant=variant_obj, subject=variant_obj['display_name']) LOG.info('Setting ACMG to {} for: {}'.format(acmg_str, variant_obj['display_name'])) if (acmg_str is None): updated_variant = self.variant_collection.find_one_and_update({'_id': variant_obj['_id']}, {'$unset': {'acmg_classification': 1}}, return_document=pymongo.ReturnDocument.AFTER) else: updated_variant = self.variant_collection.find_one_and_update({'_id': variant_obj['_id']}, {'$set': {'acmg_classification': REV_ACMG_MAP[acmg_str]}}, return_document=pymongo.ReturnDocument.AFTER) LOG.debug('Variant updated') return updated_variant
Create an event for updating the ACMG classification of a variant. Arguments: institute_obj (dict): A Institute object case_obj (dict): Case object user_obj (dict): A User object link (str): The url to be used in the event variant_obj (dict): A variant object acmg_str (str): The new ACMG classification string Returns: updated_variant
codesearchnet
def _wrap_and_check_outputs(self, outputs, single_output_default_name, error_label=None): if not isinstance(outputs, dict): outputs = {single_output_default_name: outputs} output_dict = {} for key, value in outputs.items(): error_name = error_label or single_output_default_name key = self._check_output_key(key, error_name) if not isinstance(value, tensor.Tensor): raise ValueError('{} output value must be a Tensor; got {}.'.format(error_name, value)) output_dict[key] = value return output_dict
Wraps raw tensors as dicts and checks type. Note that we create a new dict here so that we can overwrite the keys if necessary. Args: outputs: A `Tensor` or a dict of string to `Tensor`. single_output_default_name: A string key for use in the output dict if the provided `outputs` is a raw tensor. error_label: descriptive string for use in error messages. If none, single_output_default_name will be used. Returns: A dict of tensors Raises: ValueError: if the outputs dict keys are not strings or tuples of strings or the values are not Tensors.
github-repos
def traverse_inorder(self, leaves=True, internal=True): c = self; s = deque(); done = False while not done: if c is None: if len(s) == 0: done = True else: c = s.pop() if (leaves and c.is_leaf()) or (internal and not c.is_leaf()): yield c if len(c.children) == 0: c = None elif len(c.children) == 2: c = c.children[1] else: raise RuntimeError(INORDER_NONBINARY) else: s.append(c) if len(c.children) == 0: c = None elif len(c.children) == 2: c = c.children[0] else: raise RuntimeError(INORDER_NONBINARY)
Perform an inorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
juraj-google-style
def find_module_id_defining_flag(self, flagname, default=None): registered_flag = self._flags().get(flagname) if registered_flag is None: return default for module_id, flags in six.iteritems(self.flags_by_module_id_dict()): for flag in flags: if (flag.name == registered_flag.name and flag.short_name == registered_flag.short_name): return module_id return default
Return the ID of the module defining this flag, or default. Args: flagname: str, name of the flag to lookup. default: Value to return if flagname is not defined. Defaults to None. Returns: The ID of the module which registered the flag with this name. If no such module exists (i.e. no flag with this name exists), we return default.
juraj-google-style
def exec_python(attr, src, executable="python"): import subprocess if isinstance(src, basestring): src = [src] p = popen([executable, "-c", "; ".join(src)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode: from rez.exceptions import InvalidPackageError raise InvalidPackageError( "Error determining package attribute '%s':\n%s" % (attr, err)) return out.strip()
Runs a python subproc to calculate a package attribute. Args: attr (str): Name of package attribute being created. src (list of str): Python code to execute, will be converted into semicolon-delimited single line of code. Returns: str: Output of python process.
juraj-google-style
class RunScoreAndLearn(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]): def __init__(self, detector: AnomalyDetector): self._detector = detector def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]: return input | beam.ParDo(_ScoreAndLearnDoFn(self._detector.to_spec()))
Applies the _ScoreAndLearnDoFn to a PCollection of data. This PTransform scores and learns from data points using an anomaly detection model. Args: detector: The anomaly detection model to use.
github-repos
def _attempt_shard_retry(self, shard_state, tstate): shard_attempts = (shard_state.retries + 1) if (shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS): logging.warning('Shard attempt %s exceeded %s max attempts.', shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS) return self._TASK_DIRECTIVE.FAIL_TASK if (tstate.output_writer and (not tstate.output_writer._supports_shard_retry(tstate))): logging.warning('Output writer %s does not support shard retry.', tstate.output_writer.__class__.__name__) return self._TASK_DIRECTIVE.FAIL_TASK shard_state.reset_for_retry() logging.warning('Shard %s attempt %s failed with up to %s attempts.', shard_state.shard_id, shard_state.retries, parameters.config.SHARD_MAX_ATTEMPTS) output_writer = None if tstate.output_writer: output_writer = tstate.output_writer.create(tstate.mapreduce_spec, shard_state.shard_number, (shard_attempts + 1)) tstate.reset_for_retry(output_writer) return self._TASK_DIRECTIVE.RETRY_SHARD
Whether to retry shard. This method may modify shard_state and tstate to prepare for retry or fail. Args: shard_state: model.ShardState for current shard. tstate: model.TransientShardState for current shard. Returns: A _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried. FAIL_TASK otherwise.
codesearchnet
def update(w: jax.Array, scores: jax.Array, rows: jax.Array, cols: jax.Array, Y: jax.Array) -> typing.Tuple[jax.Array, jax.Array, int, float]: N = w.shape[0] M = scores.shape[0] res = w.dot(Y) - jax.ops.segment_sum((w * (2 * Y - 1)).take(rows), cols, M) err = 0.5 - jnp.abs(res - 0.5) best_feature_index: int = err.argmin() positivity: bool = res.at[best_feature_index].get() < 0.5 err_min = err.at[best_feature_index].get() amount: float = jnp.log((1 - err_min) / (err_min + EPS)) X_best = jnp.zeros(N, dtype=bool).at[jnp.where(cols == best_feature_index, rows, N)].set(True, mode='drop') w = w * jnp.exp(amount * (Y ^ X_best == positivity)) w = w / w.sum() score = amount * (2 * positivity - 1) scores = scores.at[best_feature_index].add(score) return (w, scores, best_feature_index, score)
Calculates the new weight vector and the contribution scores. Args: w (jax.Array): A weight vector. scores (JAX array): Contribution scores of features. rows (jax.Array): Row indices of True values in the input data. cols (jax.Array): Column indices of True values in the input data. Y (jax.Array): The target output. Returns: A tuple of following items: - w (jax.Array): The new weight vector. - scores (JAX array): The new contribution scores. - best_feature_index (int): The index of the best feature. - score (float): The newly added score for the best feature.
github-repos
def blocks(self, name): b = self._blocks(name) if b: return b return self._blocks(name.replace('?>?', ' '))
Search for defined blocks recursively. Allow '>' to be ignored. '.a .b' == '.a > .b' Args: name (string): Search term Returns: Block object OR False
juraj-google-style
def __init__(self, z=None, x=None, label=None): r if label is not None: a = Pauli.from_label(label) self._z = a.z self._x = a.x else: self._init_from_bool(z, x)
r"""Make the Pauli object. Note that, for the qubit index: - Order of z, x vectors is q_0 ... q_{n-1}, - Order of pauli label is q_{n-1} ... q_0 E.g., - z and x vectors: z = [z_0 ... z_{n-1}], x = [x_0 ... x_{n-1}] - a pauli is $P_{n-1} \otimes ... \otimes P_0$ Args: z (numpy.ndarray): boolean, z vector x (numpy.ndarray): boolean, x vector label (str): pauli label
juraj-google-style
def DumpMany(objs): precondition.AssertIterableType(objs, object) text = yaml.safe_dump_all(objs, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode("utf-8") return text
Stringifies a sequence of Python objects to a multi-document YAML. Args: objs: An iterable of Python objects to convert to YAML. Returns: A multi-document YAML representation of the given objects.
juraj-google-style
def build(cls, value: object, binary: bool = False, fallback: object = None) -> Union[Nil, 'String']: if value is None: if fallback is None: return Nil() else: return cls.build(fallback, binary) elif not value: return QuotedString(b'') elif isinstance(value, bytes): ascii_ = value elif isinstance(value, memoryview): ascii_ = bytes(value) elif hasattr(value, '__bytes__'): ascii_ = bytes(cast(SupportsBytes, value)) elif isinstance(value, str) or hasattr(value, '__str__'): value = str(value) try: ascii_ = bytes(value, 'ascii') except UnicodeEncodeError: ascii_ = bytes(value, 'utf-8', 'replace') return LiteralString(ascii_, binary) else: raise TypeError(value) if not binary and len(ascii_) < 64 \ and b'\n' not in ascii_ \ and b'\x00' not in ascii_: return QuotedString(ascii_) else: return LiteralString(ascii_, binary)
Produce either a :class:`QuotedString` or :class:`LiteralString` based on the contents of ``data``. This is useful to improve readability of response data. Args: value: The string to serialize. binary: True if the string should be transmitted as binary. fallback: The default value to use if ``value`` is None.
juraj-google-style
def token_network_connect( self, registry_address: PaymentNetworkID, token_address: TokenAddress, funds: TokenAmount, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ) -> None: if not is_binary_address(registry_address): raise InvalidAddress('registry_address must be a valid address in binary') if not is_binary_address(token_address): raise InvalidAddress('token_address must be a valid address in binary') token_network_identifier = views.get_token_network_identifier_by_token_address( chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, token_address=token_address, ) connection_manager = self.raiden.connection_manager_for_token_network( token_network_identifier, ) has_enough_reserve, estimated_required_reserve = has_enough_gas_reserve( raiden=self.raiden, channels_to_open=initial_channel_target, ) if not has_enough_reserve: raise InsufficientGasReserve(( 'The account balance is below the estimated amount necessary to ' 'finish the lifecycles of all active channels. A balance of at ' f'least {estimated_required_reserve} wei is required.' )) connection_manager.connect( funds=funds, initial_channel_target=initial_channel_target, joinable_funds_target=joinable_funds_target, )
Automatically maintain channels open for the given token network. Args: token_address: the ERC20 token network to connect to. funds: the amount of funds that can be used by the ConnectionMananger. initial_channel_target: number of channels to open proactively. joinable_funds_target: fraction of the funds that will be used to join channels opened by other participants.
juraj-google-style
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name): if not current_app.testing: from flask_sendmail import Message message = Message( subject, recipients=[recipient], html=html_message, body=text_message) self.mail.send(message)
Send email message via Flask-Sendmail. Args: recipient: Email address or tuple of (Name, Email-address). subject: Subject line. html_message: The message body in HTML. text_message: The message body in plain text.
juraj-google-style
def save_scan_plot(self, filename='scan.pdf', img_format='pdf', coords=None): plt = self.get_scan_plot(coords) plt.savefig(filename, format=img_format)
Save matplotlib plot of the potential energy surface to a file. Args: filename: Filename to write to. img_format: Image format to use. Defaults to EPS. coords: internal coordinate name to use as abcissa.
codesearchnet
def _set_textarea(el, value): if isinstance(value, dict): el.text = value["val"] elif type(value) in [list, tuple]: el.text = "\n\n".join( "-- %s --\n%s" % (item["source"], item["val"]) for item in value ) else: el.text = value
Set content of given textarea element `el` to `value`. Args: el (obj): Reference to textarea element you wish to set. value (obj/list): Value to which the `el` will be set.
juraj-google-style
def testRaggedOneHotMatchesArrayOpsOneHot(self, indices_shape, depth, on_value=None, off_value=None, axis=None, dtype=None): indices_shape = tensor_shape.as_shape(indices_shape) indices = np.random.randint(depth + 1, size=indices_shape) expected = array_ops.one_hot(indices, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) for ragged_rank in range(1, len(indices_shape)): if axis is not None and 0 <= axis <= ragged_rank: continue ragged_indices = ragged_tensor.RaggedTensor.from_tensor(indices, ragged_rank=ragged_rank) result = ragged_array_ops.ragged_one_hot(ragged_indices, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) self.assertAllEqual(result.to_tensor(), expected)
Tests that tf.one_hot gives the same result for ragged & uniform tensors. Runs tf.one_hot with a uniform tensor, and compares the output with the results of calling tf.one_hot with ragged version of that tensor with varying ragged ranks. Args: indices_shape: Shape for `indices` arg to `tf.one_hot` depth: `depth` arg to `tf.one_hot` on_value: `on_value` arg to `tf.one_hot` off_value: `off_value` arg to `tf.one_hot` axis: `axis` arg to `tf.one_hot` dtype: `dtype` arg to `tf.one_hot`
github-repos
def __call__(self, request: beam.Row, *args, **kwargs): if self.entity_row_fn: entity_dict = self.entity_row_fn(request) else: request_dict = request._asdict() entity_dict = {self.entity_id: request_dict[self.entity_id]} feature_values = self.store.get_online_features(features=self.features, entity_rows=[entity_dict], full_feature_names=self.full_feature_names).to_dict() response_dict = {k: v[0] for k, v in feature_values.items()} return (request, beam.Row(**response_dict))
Fetches feature values for an entity-id from the Feast feature store. Args: request: the input `beam.Row` to enrich.
github-repos
def _PreprocessSources(self, extraction_engine): logger.debug('Starting preprocessing.') try: artifacts_registry = engine.BaseEngine.BuildArtifactsRegistry( self._artifact_definitions_path, self._custom_artifacts_path) extraction_engine.PreprocessSources( artifacts_registry, self._source_path_specs, resolver_context=self._resolver_context) except IOError as exception: logger.error('Unable to preprocess with error: {0!s}'.format(exception)) logger.debug('Preprocessing done.')
Preprocesses the sources. Args: extraction_engine (BaseEngine): extraction engine to preprocess the sources.
juraj-google-style
def set_volume(percentage): if ((percentage > 100) or (percentage < 0)): raise ValueError('percentage must be an integer between 0 and 100') if (system.get_name() == 'windows'): pass elif (system.get_name() == 'mac'): volume_int = (percentage / 10) sp.Popen(['osascript', '-e', ('set Volume %d' % volume_int)]).wait() else: formatted = (str(percentage) + '%') sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
Set the volume. Sets the volume to a given percentage (integer between 0 and 100). Args: percentage (int): The percentage (as a 0 to 100 integer) to set the volume to. Raises: ValueError: if the percentage is >100 or <0.
codesearchnet
def authenticate(self, user, password): request = Request(AUTH_URL) request.add_header('X-Simperium-API-Key', API_KEY) if (sys.version_info < (3, 3)): request.add_data(json.dumps({'username': user, 'password': password})) else: request.data = json.dumps({'username': user, 'password': password}).encode() try: res = urllib2.urlopen(request).read() token = json.loads(res.decode('utf-8'))['access_token'] except HTTPError: raise SimplenoteLoginFailed('Login to Simplenote API failed!') except IOError: token = None return token
Method to get simplenote auth token Arguments: - user (string): simplenote email address - password (string): simplenote password Returns: Simplenote API token as string
codesearchnet