code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_new_profile_template(self): uri = '{}/new-profile-template'.format(self.data['uri']) return self._helper.do_get(uri)
Retrieves the profile template for a given server profile. Returns: dict: Server profile template.
codesearchnet
def dump(self, content, filepath, indent=4): with open(filepath, 'w') as fp: json.dump(content, fp, indent=indent)
Dump settings content to filepath. Args: content (str): Settings content. filepath (str): Settings file location.
juraj-google-style
def get_range(self, start=None, stop=None): return self.from_iterable(self.ranges(start, stop))
Return a RangeMap for the range start to stop. Returns: A RangeMap
codesearchnet
def get_module(module_abs_import): logger.debug('starting') logger.debug(f'loading module {module_abs_import}') try: imported_module = importlib.import_module(module_abs_import) logger.debug('done') return imported_module except ModuleNotFoundError as err: msg = f"The module doesn't exist. Looking for a file like this: {module_abs_import}" extended_msg = f logger.error(msg) raise PyModuleNotFoundError(extended_msg) from err
Use importlib to get the module dynamically. Get instance of the module specified by the module_abs_import. This means that module_abs_import must be resolvable from this package. Args: module_abs_import: string. Absolute name of module to import. Raises: PyModuleNotFoundError: if module not found.
codesearchnet
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): version = 2 if (aggregate_indexes is None): aggregate_indexes = [] if (forensic_indexes is None): forensic_indexes = [] for aggregate_index_name in aggregate_indexes: if (not Index(aggregate_index_name).exists()): continue aggregate_index = Index(aggregate_index_name) doc = 'doc' fo_field = 'published_policy.fo' fo = 'fo' fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field]) fo_mapping = fo_mapping[list(fo_mapping.keys())[0]]['mappings'] if (doc not in fo_mapping): continue fo_mapping = fo_mapping[doc][fo_field]['mapping'][fo] fo_type = fo_mapping['type'] if (fo_type == 'long'): new_index_name = '{0}-v{1}'.format(aggregate_index_name, version) body = {'properties': {'published_policy.fo': {'type': 'text', 'fields': {'keyword': {'type': 'keyword', 'ignore_above': 256}}}}} Index(new_index_name).create() Index(new_index_name).put_mapping(doc_type=doc, body=body) reindex(connections.get_connection(), aggregate_index_name, new_index_name) Index(aggregate_index_name).delete() for forensic_index in forensic_indexes: pass
Updates index mappings Args: aggregate_indexes (list): A list of aggregate index names forensic_indexes (list): A list of forensic index names
codesearchnet
def CheckGlobalStatic(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] if (((linenum + 1) < clean_lines.NumLines()) and (not Search('[;({]', line))): line += clean_lines.elided[(linenum + 1)].strip() match = Match('((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\b(.*)', line) if (match and (not Search('\\bstring\\b(\\s+const)?\\s*\\*\\s*(const\\s+)?\\w', line)) and (not Search('\\boperator\\W', line)) and (not Match('\\s*(<.*>)?(::[a-zA-Z0-9_]+)*\\s*\\(([^"]|$)', match.group(3)))): error(filename, linenum, 'runtime/string', 4, ('For a static/global string constant, use a C style string instead: "%schar %s[]".' % (match.group(1), match.group(2)))) if Search('\\b([A-Za-z0-9_]*_)\\(\\1\\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def _grad_variance(self): grad_var_ops = [] tensor_to_avg = [] for (t, g) in zip(self._vars, self._grad): if isinstance(g, tf.IndexedSlices): tensor_to_avg.append(tf.reshape(tf.unsorted_segment_sum(g.values, g.indices, g.dense_shape[0]), shape=t.get_shape())) else: tensor_to_avg.append(g) avg_op = self._moving_averager.apply(tensor_to_avg) grad_var_ops.append(avg_op) with tf.control_dependencies([avg_op]): self._grad_avg = [self._moving_averager.average(val) for val in tensor_to_avg] self._grad_avg_squared = [tf.square(val) for val in self._grad_avg] self._grad_var = tf.maximum(tf.constant(1e-06, dtype=self._grad_norm_squared_avg.dtype), (self._grad_norm_squared_avg - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))) if self._sparsity_debias: self._grad_var *= self._sparsity_avg return grad_var_ops
Estimate of gradient Variance. Returns: C_t ops.
codesearchnet
def ReadFileObject(self, definitions_registry, file_object): last_definition_object = None error_location = None error_message = None try: yaml_generator = yaml.safe_load_all(file_object) for yaml_definition in yaml_generator: definition_object = self._ReadDefinition( definitions_registry, yaml_definition) if not definition_object: error_location = self._GetFormatErrorLocation( yaml_definition, last_definition_object) error_message = '{0:s} Missing definition object.'.format( error_location) raise errors.FormatError(error_message) definitions_registry.RegisterDefinition(definition_object) last_definition_object = definition_object except errors.DefinitionReaderError as exception: error_message = 'in: {0:s} {1:s}'.format( exception.name or '<NAMELESS>', exception.message) raise errors.FormatError(error_message) except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception: error_location = self._GetFormatErrorLocation({}, last_definition_object) error_message = '{0:s} {1!s}'.format(error_location, exception) raise errors.FormatError(error_message)
Reads data type definitions from a file-like object into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. file_object (file): file-like object to read from. Raises: FormatError: if the definitions values are missing or if the format is incorrect.
juraj-google-style
def _build_select_and_next_from_expressions(self, builders: Tuple[column_expression_builder.ColumnExpressionBuilder, ...], child_builders: MutableSequence[column_expression_builder.ColumnExpressionBuilder], columns_selected: MutableSequence[str]) -> Tuple[MutableSequence[str], MutableSequence[str]]: select_expressions, next_from_expressions = ([], []) for column_name in columns_selected: select_expressions.append(f'(SELECT {column_name}) AS {column_name}') for builder in builders: child_builders.extend(builder.children) column_alias = _get_column_alias(builder) if builder.column_name: columns_selected.append(column_alias) needs_unnest = builder.needs_unnest or builder.children select_expression = self._encode(builder=builder, select_scalars_as_array=needs_unnest) if needs_unnest: select_expression = f'{select_expression} AS {column_alias}_needs_unnest_' next_from_expressions.append(self._build_next_from_expression(column_alias)) else: select_expression = f'{select_expression} AS {column_alias}' select_expressions.append(select_expression) return (select_expressions, next_from_expressions)
Build select expressions and next from expressions from the builders. Args: builders: the immutable current builders to compute select expressions. child_builders: collects the current given builders' children for the next round. columns_selected: accumulatively collects columns which has already been handled completely. Returns: The select expressions and next from expressions computed form the given builders.
github-repos
def node_attributes(self, node_name, device_name=None): if not self._debug_graphs: raise LookupError('No partition graphs have been loaded.') device_name = self._infer_device_name(device_name, node_name) return self._debug_graphs[device_name].node_attributes[node_name]
Get the attributes of a node. Args: node_name: Name of the node in question. device_name: (`str`) name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: Attributes of the node. Raises: LookupError: If no partition graphs have been loaded.
github-repos
def cudnn_bi_gru(units, n_hidden, seq_lengths=None, n_layers=1, trainable_initial_states=False, name='cudnn_bi_gru', reuse=False): with tf.variable_scope(name, reuse=reuse): if (seq_lengths is None): seq_lengths = (tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1]) with tf.variable_scope('Forward'): (h_fw, h_last_fw) = cudnn_gru_wrapper(units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths, reuse=reuse) with tf.variable_scope('Backward'): reversed_units = tf.reverse_sequence(units, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0) (h_bw, h_last_bw) = cudnn_gru_wrapper(reversed_units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths, reuse=reuse) h_bw = tf.reverse_sequence(h_bw, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0) return ((h_fw, h_bw), (h_last_fw, h_last_bw))
Fast CuDNN Bi-GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state seq_lengths: number of tokens in each sample in the batch n_layers: number of layers trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H * 2] where H - number of hidden units
codesearchnet
def print_args(output=sys.stdout): def decorator(func): @wraps(func) def _(*args, **kwargs): output.write( "Args: {0}, KwArgs: {1}\n".format(str(args), str(kwargs))) return func(*args, **kwargs) return _ return decorator
Decorate a function so that print arguments before calling it. Args: output: writable to print args. (Default: sys.stdout)
juraj-google-style
def _AnalyzeEvents(self, storage_writer, analysis_plugins, event_filter=None): self._status = definitions.STATUS_INDICATOR_RUNNING self._number_of_consumed_events = 0 self._number_of_consumed_reports = 0 self._number_of_consumed_sources = 0 self._number_of_consumed_warnings = 0 self._number_of_produced_events = 0 self._number_of_produced_reports = 0 self._number_of_produced_sources = 0 self._number_of_produced_warnings = 0 number_of_filtered_events = 0 logger.debug('Processing events.') filter_limit = getattr(event_filter, 'limit', None) for event in storage_writer.GetSortedEvents(): event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: event_data = storage_writer.GetEventDataByIdentifier( event_data_identifier) if event_data: for attribute_name, attribute_value in event_data.GetAttributes(): setattr(event, attribute_name, attribute_value) event_identifier = event.GetIdentifier() event.tag = self._event_tag_index.GetEventTagByIdentifier( storage_writer, event_identifier) if event_filter: filter_match = event_filter.Match(event) else: filter_match = None if filter_match == False: number_of_filtered_events += 1 continue for event_queue in self._event_queues.values(): event_queue.PushItem(event) self._number_of_consumed_events += 1 if (event_filter and filter_limit and filter_limit == self._number_of_consumed_events): break logger.debug('Finished pushing events to analysis plugins.') for event_queue in self._event_queues.values(): event_queue.PushItem(plaso_queue.QueueAbort(), block=False) logger.debug('Processing analysis plugin results.') plugin_names = [plugin_name for plugin_name in analysis_plugins.keys()] while plugin_names: for plugin_name in list(plugin_names): if self._abort: break task = tasks.Task() task.identifier = plugin_name merge_ready = storage_writer.CheckTaskReadyForMerge(task) if merge_ready: storage_writer.PrepareMergeTaskStorage(task) self._status = definitions.STATUS_INDICATOR_MERGING event_queue = self._event_queues[plugin_name] del self._event_queues[plugin_name] event_queue.Close() storage_merge_reader = storage_writer.StartMergeTaskStorage(task) storage_merge_reader.MergeAttributeContainers( callback=self._MergeEventTag) plugin_names.remove(plugin_name) self._status = definitions.STATUS_INDICATOR_RUNNING self._number_of_produced_event_tags = ( storage_writer.number_of_event_tags) self._number_of_produced_reports = ( storage_writer.number_of_analysis_reports) try: storage_writer.StopTaskStorage(abort=self._abort) except (IOError, OSError) as exception: logger.error('Unable to stop task storage with error: {0!s}'.format( exception)) if self._abort: logger.debug('Processing aborted.') else: logger.debug('Processing completed.') events_counter = collections.Counter() events_counter['Events filtered'] = number_of_filtered_events events_counter['Events processed'] = self._number_of_consumed_events return events_counter
Analyzes events in a plaso storage. Args: storage_writer (StorageWriter): storage writer. analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that should be run and their names. event_filter (Optional[FilterObject]): event filter. Returns: collections.Counter: counter containing information about the events processed and filtered. Raises: RuntimeError: if a non-recoverable situation is encountered.
juraj-google-style
def submodules(self): submodules = [] submodules.extend(self.modules) for p in self.packages: submodules.extend(p.submodules) return submodules
Property to return all sub-modules of the node, recursively. Returns: list of Module: the sub-modules.
codesearchnet
def MakePmfFromList(t, name=''): hist = MakeHistFromList(t) d = hist.GetDict() pmf = Pmf(d, name) pmf.Normalize() return pmf
Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this PMF Returns: Pmf object
juraj-google-style
def send_command(self, command, arg=None): if arg is not None: command = '%s:%s' % (command, arg) self._write(six.StringIO(command), len(command))
Sends a command to the device. Args: command: The command to send. arg: Optional argument to the command.
juraj-google-style
def delete_items_by_index(list_, index_list, copy=False): if copy: list_ = list_[:] index_list_ = [((len(list_) + x) if (x < 0) else x) for x in index_list] index_list_ = sorted(index_list_, reverse=True) for index in index_list_: del list_[index] return list_
Remove items from ``list_`` at positions specified in ``index_list`` The original ``list_`` is preserved if ``copy`` is True Args: list_ (list): index_list (list): copy (bool): preserves original list if True Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [8, 1, 8, 1, 6, 6, 3, 4, 4, 5, 6] >>> index_list = [2, -1] >>> result = delete_items_by_index(list_, index_list) >>> print(result) [8, 1, 1, 6, 6, 3, 4, 4, 5]
codesearchnet
def __optimize_deconvolution_layer(self, learning_rate, epoch): params_list = [] grads_list = [] for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0: params_list.append(self.__deconvolution_layer_list[i].graph.weight_arr) grads_list.append(self.__deconvolution_layer_list[i].delta_weight_arr) for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0: params_list.append(self.__deconvolution_layer_list[i].graph.bias_arr) grads_list.append(self.__deconvolution_layer_list[i].delta_bias_arr) params_list = self.__opt_params.optimize( params_list, grads_list, learning_rate ) i = 0 for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0: self.__deconvolution_layer_list[i].graph.weight_arr = params_list.pop(0) if ((epoch + 1) % self.__attenuate_epoch == 0): self.__deconvolution_layer_list[i].graph.weight_arr = self.__opt_params.constrain_weight( self.__deconvolution_layer_list[i].graph.weight_arr ) for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0: self.__deconvolution_layer_list[i].graph.bias_arr = params_list.pop(0) for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0: if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0: self.__deconvolution_layer_list[i].reset_delta()
Back propagation for Deconvolution layer. Args: learning_rate: Learning rate. epoch: Now epoch.
juraj-google-style
def convert_attribute_name_to_tag(value): if (not isinstance(value, six.string_types)): raise ValueError('The attribute name must be a string.') for entry in attribute_name_tag_table: if (value == entry[0]): return entry[1] raise ValueError("Unrecognized attribute name: '{}'".format(value))
A utility function that converts an attribute name string into the corresponding attribute tag. For example: 'State' -> enums.Tags.STATE Args: value (string): The string name of the attribute. Returns: enum: The Tags enumeration value that corresponds to the attribute name string. Raises: ValueError: if the attribute name string is not a string or if it is an unrecognized attribute name
codesearchnet
def aggregate_groups(self, ct_agg, nr_groups, skip_key, carray_factor, groupby_cols, agg_ops, dtype_dict, bool_arr=None): for col in groupby_cols: result_array = ctable_ext.groupby_value(self[col], carray_factor, nr_groups, skip_key) if (bool_arr is not None): result_array = np.delete(result_array, skip_key) ct_agg.addcol(result_array, name=col) del result_array for (input_col_name, output_col_name, agg_op) in agg_ops: input_col = self[input_col_name] output_col_dtype = dtype_dict[output_col_name] input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype) output_buffer = np.zeros(nr_groups, dtype=output_col_dtype) if (agg_op == 'sum'): ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif (agg_op == 'mean'): ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif (agg_op == 'std'): ctable_ext.aggregate_std(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif (agg_op == 'count'): ctable_ext.aggregate_count(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif (agg_op == 'count_distinct'): ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif (agg_op == 'sorted_count_distinct'): ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) else: raise KeyError(('Unknown aggregation operation ' + str(agg_op))) if (bool_arr is not None): output_buffer = np.delete(output_buffer, skip_key) ct_agg.addcol(output_buffer, name=output_col_name) del output_buffer ct_agg.delcol('tmp_col_bquery__')
Perform aggregation and place the result in the given ctable. Args: ct_agg (ctable): the table to hold the aggregation nr_groups (int): the number of groups (number of rows in output table) skip_key (int): index of the output row to remove from results (used for filtering) carray_factor: the carray for each row in the table a reference to the the unique group index groupby_cols: the list of 'dimension' columns that are used to perform the groupby over output_agg_ops (list): list of tuples of the form: (input_col, agg_op) input_col (string): name of the column to act on agg_op (int): aggregation operation to perform bool_arr: a boolean array containing the filter
codesearchnet
def Decompress(self, compressed_data): try: uncompressed_data = self._zlib_decompressor.decompress(compressed_data) remaining_compressed_data = getattr( self._zlib_decompressor, 'unused_data', b'') except zlib.error as exception: raise errors.BackEndError(( 'Unable to decompress zlib compressed stream with error: ' '{0!s}.').format(exception)) return uncompressed_data, remaining_compressed_data
Decompresses the compressed data. Args: compressed_data (bytes): compressed data. Returns: tuple(bytes, bytes): uncompressed data and remaining compressed data. Raises: BackEndError: if the zlib compressed stream cannot be decompressed.
juraj-google-style
def emulate(self, context=None, start=None, end=None, arch_mode=None, hooks=None, max_instrs=None, print_asm=False): if (arch_mode is not None): self._load(arch_mode=arch_mode) context = (context if context else {}) start_addr = (start if start else self.binary.ea_start) end_addr = (end if end else self.binary.ea_end) hooks = (hooks if hooks else {}) for (reg, val) in context.get('registers', {}).items(): self.ir_emulator.registers[reg] = val for (addr, val) in context.get('memory', {}).items(): self.ir_emulator.memory.write(addr, 4, val) self.emulator.emulate(start_addr, end_addr, hooks, max_instrs, print_asm) context_out = {'registers': {}, 'memory': {}} for (reg, val) in self.ir_emulator.registers.items(): context_out['registers'][reg] = val return context_out
Emulate native code. Args: context (dict): Processor context (register and/or memory). start (int): Start address. end (int): End address. arch_mode (int): Architecture mode. hooks (dict): Hooks by address. max_instrs (int): Maximum number of instructions to execute. print_asm (bool): Print asm. Returns: dict: Processor context.
codesearchnet
def learn(self, grad_arr): encoder_delta_arr, _, encoder_grads_list = self.__encoder_decoder_controller.encoder.hidden_back_propagate( grad_arr[:, -1] ) encoder_grads_list.insert(0, None) encoder_grads_list.insert(0, None) self.__encoder_decoder_controller.encoder.optimize( encoder_grads_list, self.__learning_rate, 1 ) return encoder_delta_arr
Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. Returns: `np.ndarray` of delta or gradients.
juraj-google-style
def _txn_is_in_valid_batch(self, txn_id): batch = self._batches_by_txn_id[txn_id] return all((self._txn_results[sig].is_valid for sig in set(self._txn_results).intersection((txn.header_signature for txn in batch.transactions))))
Returns whether the transaction is in a valid batch. Args: txn_id (str): The transaction header signature. Returns: (bool): True if the txn's batch is valid, False otherwise.
codesearchnet
def _get_next_empty_bitmap(self): for (i, byte) in enumerate(self._bitmap): if (byte != 255): for offset in range(8): if (not (byte & (1 << offset))): return ((i * 8) + offset)
Returns the next empty entry. Returns: int: The value of the empty entry
codesearchnet
def get_wf_from_path(self, path): with open(path) as fp: content = fp.read() return [(os.path.basename(os.path.splitext(path)[0]), content), ]
load xml from given path Args: path: diagram path Returns:
juraj-google-style
def chhome(name, home, **kwargs): if six.PY2: name = _to_unicode(name) home = _to_unicode(home) kwargs = salt.utils.args.clean_kwargs(**kwargs) persist = kwargs.pop('persist', False) if kwargs: salt.utils.args.invalid_kwargs(kwargs) if persist: log.info('Ignoring unsupported \'persist\' argument to user.chhome') pre_info = info(name) if not pre_info: return False if home == pre_info['home']: return True if not update(name=name, home=home): return False post_info = info(name) if post_info['home'] != pre_info['home']: return post_info['home'] == home return False
Change the home directory of the user, pass True for persist to move files to the new home directory if the old home directory exist. Args: name (str): The name of the user whose home directory you wish to change home (str): The new location of the home directory Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' user.chhome foo \\\\fileserver\\home\\foo True
juraj-google-style
def tf_step( self, time, variables, arguments, fn_loss, **kwargs ): unperturbed_loss = fn_loss(**arguments) perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables] applied = self.apply_step(variables=variables, deltas=perturbations) with tf.control_dependencies(control_inputs=(applied,)): perturbed_loss = fn_loss(**arguments) direction = tf.sign(x=(unperturbed_loss - perturbed_loss)) deltas_sum = [direction * perturbation for perturbation in perturbations] if self.unroll_loop: previous_perturbations = perturbations for sample in xrange(self.num_samples): with tf.control_dependencies(control_inputs=deltas_sum): perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables] perturbation_deltas = [ pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations) ] applied = self.apply_step(variables=variables, deltas=perturbation_deltas) previous_perturbations = perturbations with tf.control_dependencies(control_inputs=(applied,)): perturbed_loss = fn_loss(**arguments) direction = tf.sign(x=(unperturbed_loss - perturbed_loss)) deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)] else: def body(iteration, deltas_sum, previous_perturbations): with tf.control_dependencies(control_inputs=deltas_sum): perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables] perturbation_deltas = [ pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations) ] applied = self.apply_step(variables=variables, deltas=perturbation_deltas) with tf.control_dependencies(control_inputs=(applied,)): perturbed_loss = fn_loss(**arguments) direction = tf.sign(x=(unperturbed_loss - perturbed_loss)) deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)] return iteration + 1, deltas_sum, perturbations def cond(iteration, deltas_sum, previous_perturbation): return iteration < self.num_samples - 1 _, deltas_sum, perturbations = tf.while_loop(cond=cond, body=body, loop_vars=(0, deltas_sum, perturbations)) with tf.control_dependencies(control_inputs=deltas_sum): deltas = [delta / self.num_samples for delta in deltas_sum] perturbation_deltas = [delta - pert for delta, pert in zip(deltas, perturbations)] applied = self.apply_step(variables=variables, deltas=perturbation_deltas) with tf.control_dependencies(control_inputs=(applied,)): return [delta + 0.0 for delta in deltas]
Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. arguments: Dict of arguments for callables, like fn_loss. fn_loss: A callable returning the loss of the current model. **kwargs: Additional arguments, not used. Returns: List of delta tensors corresponding to the updates for each optimized variable.
juraj-google-style
def get_all(self, attrs: Iterable[FetchAttribute]) -> Sequence[Tuple[(FetchAttribute, MaybeBytes)]]: ret: List[Tuple[(FetchAttribute, MaybeBytes)]] = [] for attr in attrs: try: ret.append((attr.for_response, self.get(attr))) except NotFetchable: pass return ret
Return a list of tuples containing the attribute iself and the bytes representation of that attribute from the message. Args: attrs: The fetch attributes.
codesearchnet
def top_rated(self, **kwargs): path = self._get_path('top_rated') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of top rated movies. By default, this list will only include movies that have 10 or more votes. This list refreshes every day. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def get_registry_data(self, name, auth_config=None): return RegistryData(image_name=name, attrs=self.client.api.inspect_distribution(name, auth_config), client=self.client, collection=self)
Gets the registry data for an image. Args: name (str): The name of the image. auth_config (dict): Override the credentials that are found in the config for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. Returns: (:py:class:`RegistryData`): The data object. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def gru_feedfwd(a_t, h_prev, filters, name=None): with tf.variable_scope(name, default_name='GRU', values=[a_t, h_prev]): z_t = tf.sigmoid((tpu_conv1d(a_t, filters, 1, padding='SAME', name='W_z') + tpu_conv1d(h_prev, filters, 1, padding='SAME', name='U_z'))) r_t = tf.sigmoid((tpu_conv1d(a_t, filters, 1, padding='SAME', name='W_r') + tpu_conv1d(h_prev, filters, 1, padding='SAME', name='U_r'))) h_tilde = tf.tanh((tpu_conv1d(a_t, filters, 1, padding='SAME', name='W') + tpu_conv1d((r_t * h_prev), filters, 1, padding='SAME', name='U'))) h_t = (((1.0 - z_t) * h_prev) + (z_t * h_tilde)) return h_t
position-wise Feed-fwd GRU gates following the MPNN. Args: a_t: Tensor of shape [batch, length, depth] of current input h_prev: Tensor of shape [batch, length, depth] of prev input filters: an integer specifying number of dimensions of the filters name: A string Returns: h_t: [batch, length, filters] hidden state
codesearchnet
def node(self, force_new_node: bool=False) -> EventSetNode: if self._internal_node is not None and (not force_new_node): return self._internal_node self._internal_node = create_node_with_new_reference(schema=self._schema, name=self._name) return self._internal_node
Creates an [`EventSetNode`][temporian.EventSetNode] able to consume this EventSet. If called multiple times with `force_new_node=False` (default), the same node is returned. Usage example: ```python >>> my_evset = tp.event_set( ... timestamps=[1, 2, 3, 4], ... features={ ... "feature_1": [0.5, 0.6, np.nan, 0.9], ... "feature_2": ["red", "blue", "red", "blue"], ... }, ... ) >>> my_node = my_evset.node() ``` Args: force_new_node: If false (default), return the same node each time `node` is called. If true, a new node is created each time. Returns: An EventSetNode able to consume this EventSet.
github-repos
def acquire(self, blocking=True, timeout=-1): result = self.lock.acquire(blocking, timeout) return result
Acquire the :attr:`lock` Args: blocking (bool): See :meth:`threading.Lock.acquire` timeout (float): See :meth:`threading.Lock.acquire` Returns: bool: :obj:`True` if the lock was acquired, otherwise :obj:`False`
juraj-google-style
def module_import(module_path): try: module = __import__(module_path) components = module_path.split('.') for component in components[1:]: module = getattr(module, component) return module except ImportError: raise BadModulePathError(('Unable to find module "%s".' % (module_path,)))
Imports the module indicated in name Args: module_path: string representing a module path such as 'app.config' or 'app.extras.my_module' Returns: the module matching name of the last component, ie: for 'app.extras.my_module' it returns a reference to my_module Raises: BadModulePathError if the module is not found
codesearchnet
def unpause(self, container): url = self._url('/containers/{0}/unpause', container) res = self._post(url) self._raise_for_status(res)
Unpause all processes within a container. Args: container (str): The container to unpause
codesearchnet
def _in_gae_environment(): if (SETTINGS.env_name is not None): return (SETTINGS.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL')) try: import google.appengine except ImportError: pass else: server_software = os.environ.get(_SERVER_SOFTWARE, '') if server_software.startswith('Google App Engine/'): SETTINGS.env_name = 'GAE_PRODUCTION' return True elif server_software.startswith('Development/'): SETTINGS.env_name = 'GAE_LOCAL' return True return False
Detects if the code is running in the App Engine environment. Returns: True if running in the GAE environment, False otherwise.
codesearchnet
async def destroy_tournament(self, t: Tournament): (await self.connection('DELETE', 'tournaments/{}'.format(t.id))) if (t in self.tournaments): self.tournaments.remove(t)
completely removes a tournament from Challonge |methcoro| Note: |from_api| Deletes a tournament along with all its associated records. There is no undo, so use with care! Raises: APIException
codesearchnet
def index_last_dim_with_indices(x, indices): assert (len(x.shape) == (len(indices.shape) + 1)) x_shape = shape_list(x) vocab_size = x_shape[(- 1)] flat_x = tf.reshape(x, [list_product(x_shape[:(- 1)]), vocab_size]) flat_indices = tf.reshape(indices, [list_product(x_shape[:(- 1)])]) idx = tf.stack([tf.range(tf.to_int64(shape_list(flat_indices)[0])), tf.to_int64(flat_indices)], axis=1) flat_x_idx = tf.gather_nd(flat_x, idx) x_idx = tf.reshape(flat_x_idx, x_shape[:(- 1)]) return x_idx
Use indices to index into the last axis of x. This can be useful for recovering the actual probabilities of a sample from a probability distribution. Args: x: Tensor, n-d. indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1) dimensions of x. The values of indices will be used to index into the last axis of x. Returns: Tensor, (n-1)-d.
codesearchnet
def derive_field_name(self, field_name): cls = type(self) return cls( self[0], self[1], self[2], field_name, self[4], self[5] )
Derives a new event from this one setting the ``field_name`` attribute. Args: field_name (Union[amazon.ion.symbols.SymbolToken, unicode]): The field name to set. Returns: IonEvent: The newly generated event.
juraj-google-style
def call(self, input_ids: TFModelInputType=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[tf.Tensor]]]=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns:
github-repos
def validate_gcs_path(path, require_object): bucket, key = datalab.storage._bucket.parse_name(path) if bucket is None: raise Exception('Invalid GCS path "%s"' % path) if require_object and key is None: raise Exception('It appears the GCS path "%s" is a bucket path but not an object path' % path)
Check whether a given path is a valid GCS path. Args: path: the config to check. require_object: if True, the path has to be an object path but not bucket path. Raises: Exception if the path is invalid
juraj-google-style
def ensure_app_config_dir(appname, *args): from ubelt import util_path dpath = get_app_config_dir(appname, *args) util_path.ensuredir(dpath) return dpath
Calls `get_app_config_dir` but ensures the directory exists. Args: appname (str): the name of the application *args: any other subdirectories may be specified SeeAlso: get_app_config_dir Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_config_dir('ubelt') >>> assert exists(dpath)
juraj-google-style
def read_graph_from_string(txt): if (not txt.startswith('{')): return read_dot(txt) def conv(value): if isinstance(value, basestring): return (('"' + value) + '"') else: return value doc = literal_eval(txt) g = digraph() for (attrs, values) in doc.get('nodes', []): attrs = [(k, conv(v)) for (k, v) in attrs] for value in values: if isinstance(value, basestring): node_name = value attrs_ = attrs else: (node_name, label) = value attrs_ = (attrs + [('label', conv(label))]) g.add_node(node_name, attrs=attrs_) for (attrs, values) in doc.get('edges', []): attrs_ = [(k, conv(v)) for (k, v) in attrs] for value in values: if (len(value) == 3): edge = value[:2] label = value[(- 1)] else: edge = value label = '' g.add_edge(edge, label=label, attrs=attrs_) return g
Read a graph from a string, either in dot format, or our own compressed format. Returns: `pygraph.digraph`: Graph object.
codesearchnet
def to_json_file(self, json_file_path: Union[str, os.PathLike]): with open(json_file_path, 'w', encoding='utf-8') as writer: writer.write(self.to_json_string())
Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this processor instance's parameters will be saved.
github-repos
def run(cls, **kwargs): err_pointer, tmp_pointer, new_bytes = 0, 0, 0 print_logs_live = kwargs.pop("print_logs_live", None) cmd = cls.create(**kwargs) sighandler = SignalHandler() while not Command.is_done(cmd.status): if sighandler.received_term_signal: logging.warning("Received signal {}. Canceling Qubole Command ID: {}".format(sighandler.last_signal, cmd.id)) cls.cancel(cmd) exit() time.sleep(Qubole.poll_interval) cmd = cls.find(cmd.id) if print_logs_live is True: log, err_length, tmp_length = cmd.get_log_partial(err_pointer, tmp_pointer) if err_length != "0": err_pointer += int(err_length) new_bytes = int(err_length) + int(tmp_length) - tmp_pointer tmp_pointer = int(tmp_length) else: tmp_pointer += int(tmp_length) new_bytes = int(tmp_length) if len(log) > 0 and new_bytes > 0: print(log[-new_bytes:], file=sys.stderr) return cmd
Create a command object by issuing a POST request to the /command endpoint Waits until the command is complete. Repeatedly polls to check status Args: `**kwargs`: keyword arguments specific to command type Returns: Command object
juraj-google-style
def get_all_if_deleted(self): with self._lock: results = {} for (add, fut) in self._state.items(): if self._contains_and_deleted(add): results[add] = fut.result() return results
Return all the addresses deleted in the context. Useful in the squash method. Returns: (dict of str to bytes): The addresses and bytes that have been deleted in the context.
codesearchnet
def normalize(self, inplace=False): if inplace: nrm = self.norm() self.data /= nrm return None nrm = self.norm() data_copy = np.array(self.data, copy=True) data_copy /= nrm return Quaternion(data_copy)
Normalizes a Quaternion to unit length so that it represents a valid rotation. Args: inplace (bool): Do an inplace normalization. Returns: Quaternion: Normalized quaternion.
codesearchnet
def get_contacts(self): for (jid, item) in self.roster.items.items(): try: self._contacts[jid.bare()].update(item.export_as_json()) except KeyError: self._contacts[jid.bare()] = item.export_as_json() return self._contacts
Returns list of contacts Returns: dict: the roster of contacts
codesearchnet
def absl_to_standard(level): if (not isinstance(level, int)): raise TypeError('Expect an int level, found {}'.format(type(level))) if (level < ABSL_FATAL): level = ABSL_FATAL if (level <= ABSL_DEBUG): return ABSL_TO_STANDARD[level] return ((STANDARD_DEBUG - level) + 1)
Converts an integer level from the absl value to the standard value. Args: level: int, an absl.logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in standard logging.
codesearchnet
def create_asset_delivery_policy(access_token, ams_account, key_delivery_url): path = '/AssetDeliveryPolicies' endpoint = ''.join([ams_rest_endpoint, path]) body = (('{ \t\t"Name":"AssetDeliveryPolicy", \t\t"AssetDeliveryProtocol":"4", \t\t"AssetDeliveryPolicyType":"3", \t\t"AssetDeliveryConfiguration":"[{ \t\t\t\\"Key\\":\\"2\\", \t\t\t\\"Value\\":\\"' + key_delivery_url) + '\\"}]" \t}') return do_ams_post(endpoint, path, body, access_token)
Create Media Service Asset Delivery Policy. Args: access_token (str): A valid Azure authentication token. ams_account (str): Media Service Account. Returns: HTTP response. JSON body.
codesearchnet
def authenticate(json_path=None): msg = 'budou.authentication() is deprecated. Please use budou.get_parser() to obtain a parser instead.' warnings.warn(msg, DeprecationWarning) parser = get_parser('nlapi', credentials_path=json_path) return parser
Gets a Natural Language API parser by authenticating the API. **This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a parser instead. Args: json_path (:obj:`str`, optional): The file path to the service account's credentials. Returns: Parser. (:obj:`budou.parser.NLAPIParser`)
codesearchnet
def get_compiler_ir(self, device_name, platform_name, function_name, flat_args, captured_inputs, stage='hlo'): return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name, stage, device_name, flat_args, captured_inputs, platform_name)
Get the compiler IR bytes. Args: device_name: The name of the device with the form as "/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc. When this is used, actual device is needed for getting the compiler IR. platform_name: The name of the platform, e.g. "TPU". When this is used, first we find a device whose name contains the platform, if it is found we get the compiler IR by device. Otherwise the compiler IR is obtained as if using that device. The former logic of falling back to device is necessary, as there are cases of TF variables that need to access devices, but the upper layer may generally choose platform for getting compiler IR in a device-agnostic way. function_name: The name of the function to get the compiler IR. flat_args: The flat argument inputs. captured_inputs: The inputs that are captured. stage: The exported stage for the given function. Returns: The compiler IR bytes.
github-repos
def copy_scoped_meta_graph(from_scope, to_scope, from_graph=None, to_graph=None): from_graph = from_graph or ops.get_default_graph() to_graph = to_graph or ops.get_default_graph() if from_graph == to_graph and from_scope == to_scope: raise ValueError(f"'from_scope' and 'to_scope' need to be different when performing copy in the same graph. Received: 'from_graph': {from_graph}, 'to_graph': {to_graph}, 'from_scope': {from_scope}, 'to_scope': {to_scope}.") orig_meta_graph, var_list = export_scoped_meta_graph(export_scope=from_scope, graph=from_graph) var_list = import_scoped_meta_graph(orig_meta_graph, graph=to_graph, import_scope=to_scope) return var_list
Copies a sub-meta_graph from one scope to another. Args: from_scope: `String` name scope containing the subgraph to be copied. to_scope: `String` name scope under which the copied subgraph will reside. from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the default graph is use. to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the default graph is used. Returns: A dictionary of `Variables` that has been copied into `to_scope`. Raises: ValueError: If `from_scope` and `to_scope` are the same while `from_graph` and `to_graph` are also the same.
github-repos
def df(self): import pandas as pd return pd.concat([w.df(uwi=True) for w in self])
Makes a pandas DataFrame containing Curve data for all the wells in the Project. The DataFrame has a dual index of well UWI and curve Depths. Requires `pandas`. Args: No arguments. Returns: `pandas.DataFrame`.
juraj-google-style
def eval_algorithm(curr, prev): if curr['close'] > prev['close']: v = curr['volume'] elif curr['close'] < prev['close']: v = curr['volume'] * -1 else: v = 0 return prev['obv'] + v
Evaluates OBV Args: curr: Dict of current volume and close prev: Dict of previous OBV and close Returns: Float of OBV
juraj-google-style
def create(self, name, nopassword=None, secret=None, encryption=None): if (secret is not None): return self.create_with_secret(name, secret, encryption) elif (nopassword is True): return self.create_with_nopassword(name) else: raise TypeError('either "nopassword" or "secret" must be specified to create a user')
Creates a new user on the local system. Creating users requires either a secret (password) or the nopassword keyword to be specified. Args: name (str): The name of the user to craete nopassword (bool): Configures the user to be able to authenticate without a password challenage secret (str): The secret (password) to assign to this user encryption (str): Specifies how the secret is encoded. Valid values are "cleartext", "md5", "sha512". The default is "cleartext" Returns: True if the operation was successful otherwise False Raises: TypeError: if the required arguments are not satisfied
codesearchnet
def parse_mapping(mapping_file: Optional[str]) -> configparser.ConfigParser: LOGGER.debug('Parsing mapping file. Command line: %s', mapping_file) def parse(mapping_file): config = configparser.ConfigParser() config.read_file(mapping_file) return config if (mapping_file is not None): LOGGER.debug('Parsing command line mapping file') return parse(mapping_file) xdg_config_dir = xdg.BaseDirectory.load_first_config('pass-git-helper') if (xdg_config_dir is None): raise RuntimeError('No mapping configured so far at any XDG config location. Please create {config_file}'.format(config_file=DEFAULT_CONFIG_FILE)) mapping_file = os.path.join(xdg_config_dir, CONFIG_FILE_NAME) LOGGER.debug('Parsing mapping file %s', mapping_file) with open(mapping_file, 'r') as file_handle: return parse(file_handle)
Parse the file containing the mappings from hosts to pass entries. Args: mapping_file: Name of the file to parse. If ``None``, the default file from the XDG location is used.
codesearchnet
def identity(n, dtype=None): return backend.numpy.identity(n, dtype=dtype)
Return the identity tensor. The identity tensor is a square tensor with ones on the main diagonal and zeros elsewhere. Args: n: Number of rows (and columns) in the `n x n` output tensor. dtype: Data type of the output tensor. Returns: The identity tensor.
github-repos
def get(self, file_path, ref, **kwargs): file_path = file_path.replace('/', '%2F') return GetMixin.get(self, file_path, ref=ref, **kwargs)
Retrieve a single file. Args: file_path (str): Path of the file to retrieve ref (str): Name of the branch, tag or commit **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the file could not be retrieved Returns: object: The generated RESTObject
juraj-google-style
def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None): newhdxobjects = list() for hdxobject in hdxobjects: newhdxobjectdata = copy.deepcopy(hdxobject.data) newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration) if attribute_to_copy: value = getattr(hdxobject, attribute_to_copy) setattr(newhdxobject, attribute_to_copy, value) newhdxobjects.append(newhdxobject) return newhdxobjects
Helper function to make a deep copy of a supplied list of HDX objects Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to copy hdxobjectclass (type): Type of the HDX Objects to be copied attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None. Returns: List[T <= HDXObject]: Deep copy of list of HDX objects
juraj-google-style
def _get_ami_dict(json_url): LOG.info("Getting AMI from %s", json_url) response = requests.get(json_url) assert response.ok, "Error getting ami info from {}".format(json_url) ami_dict = response.json() LOG.debug('AMI json contents: %s', ami_dict) return ami_dict
Get ami from a web url. Args: region (str): AWS Region to find AMI ID. Returns: dict: Contents in dictionary format.
juraj-google-style
def __init__(self, maxsize=0): self._maxsize = maxsize self._queue = collections.deque() self._closed = False self._mutex = threading.Lock() self._not_empty = threading.Condition(self._mutex) self._not_full = threading.Condition(self._mutex)
Create a queue object with a given maximum size. Args: maxsize: int size of queue. If <= 0, the queue size is infinite.
github-repos
def _future_command_unlocked(self, cmd): future = self._loop.create_future() asyncio_loop = self._loop.get_loop() def _done_callback(result): retval = result['return_value'] if not result['result']: future.set_exception(HardwareError("Error executing synchronous command", command=cmd, return_value=retval)) else: future.set_result(retval) callback = functools.partial(asyncio_loop.call_soon_threadsafe, _done_callback) self._commands.put((cmd, callback, True, None)) return future
Run command as a coroutine and return a future. Args: loop (BackgroundEventLoop): The loop that we should attach the future too. cmd (list): The command and arguments that we wish to call. Returns: asyncio.Future: An awaitable future with the result of the operation.
juraj-google-style
def _get_tables(self, base_dir): table_dict = {} for table in self.metadata['tables']: if table['use']: relative_path = os.path.join(base_dir, self.metadata['path'], table['path']) data_table = pd.read_csv(relative_path) pii_fields = self._get_pii_fields(table) data_table = self._anonymize_table(data_table, pii_fields) table_dict[table['name']] = (data_table, table) return table_dict
Load the contents of meta_file and the corresponding data. If fields containing Personally Identifiable Information are detected in the metadata they are anonymized before asign them into `table_dict`. Args: base_dir(str): Root folder of the dataset files. Returns: dict: Mapping str -> tuple(pandas.DataFrame, dict)
codesearchnet
def initialize_environments(self, batch_size=1): assert batch_size >= 1 self._batch_size = batch_size self._envs = [gym.make(self.base_env_name) for _ in range(batch_size)] if self._env_wrapper_fn is not None: self._envs = list(map(self._env_wrapper_fn, self._envs)) if self._observation_space: assert str(self._observation_space) == str( self._envs[0].observation_space) else: self._observation_space = self._envs[0].observation_space if self._action_space: assert str(self._action_space) == str(self._envs[0].action_space) else: self._action_space = self._envs[0].action_space self._verify_same_spaces() if self.reward_range is None: self._reward_range = self._envs[0].reward_range self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size)
Initializes the environments and trajectories. Subclasses can override this if they don't want a default implementation which initializes `batch_size` environments, but must take care to initialize self._trajectories (this is checked in __init__ anyways). Args: batch_size: (int) Number of `self.base_env_name` envs to initialize.
juraj-google-style
def files_comments_add(self, *, comment: str, file: str, **kwargs) -> SlackResponse: kwargs.update({"comment": comment, "file": file}) return self.api_call("files.comments.add", json=kwargs)
Add a comment to an existing file. Args: comment (str): The body of the comment. e.g. 'Everyone should take a moment to read this file.' file (str): The file id. e.g. 'F1234467890'
juraj-google-style
def rjust_text(text, width=80, indent=0, subsequent=None): text = re.sub(r"\s+", " ", text).strip() if subsequent is None: subsequent = indent wrapper = TextWrapper( width=width, break_long_words=False, replace_whitespace=True, initial_indent=" " * (indent + subsequent), subsequent_indent=" " * subsequent, ) return wrapper.fill(text)[subsequent:]
Wrap text and adjust it to right border. Same as L{wrap_text} with the difference that the text is aligned against the right text border. Args: text (str): Text to wrap and align. width (int): Maximum number of characters per line. indent (int): Indentation of the first line. subsequent (int or None): Indentation of all other lines, if it is ``None``, then the indentation will be same as for the first line.
juraj-google-style
def _CheckWindowsRegistryKeyPath(self, filename, artifact_definition, key_path): result = True key_path_segments = key_path.lower().split('\\') if (key_path_segments[0] == '%%current_control_set%%'): result = False logging.warning('Artifact definition: {0:s} in file: {1:s} contains Windows Registry key path that starts with %%CURRENT_CONTROL_SET%%. Replace %%CURRENT_CONTROL_SET%% with HKEY_LOCAL_MACHINE\\System\\CurrentControlSet'.format(artifact_definition.name, filename)) for (segment_index, key_path_segment) in enumerate(key_path_segments): if (key_path_segment.startswith('%%') and key_path_segment.endswith('%%')): if ((segment_index == 1) and (key_path_segment == '%%users.sid%%') and (key_path_segments[0] == 'hkey_users')): continue if key_path_segment.startswith('%%environ_'): result = False logging.warning('Artifact definition: {0:s} in file: {1:s} contains Windows Registry key path that contains an environment variable: "{2:s}". Usage of environment variables in key paths is not encouraged at this time.'.format(artifact_definition.name, filename, key_path_segment)) elif key_path_segment.startswith('%%users.'): result = False logging.warning('Artifact definition: {0:s} in file: {1:s} contains Windows Registry key path that contains a users variable: "{2:s}". Usage of users variables in key paths, except for "HKEY_USERS\\%%users.sid%%", is not encouraged at this time.'.format(artifact_definition.name, filename, key_path_segment)) return result
Checks if a path is a valid Windows Registry key path. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. key_path (str): Windows Registry key path to validate. Returns: bool: True if the Windows Registry key path is valid.
codesearchnet
def parse_uri(self, uri=None): if (not uri): return rdflib.term.URIRef(self.root) elif (type(uri) == str): if ((type(uri) == str) and (not uri.startswith('http'))): return rdflib.term.URIRef(('%s%s' % (self.root, uri))) else: return rdflib.term.URIRef(uri) elif (type(uri) == rdflib.term.URIRef): return uri else: raise TypeError('invalid URI input')
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef
codesearchnet
def deep_update(original, new_dict, new_keys_allowed, whitelist): for (k, value) in new_dict.items(): if (k not in original): if (not new_keys_allowed): raise Exception('Unknown config parameter `{}` '.format(k)) if isinstance(original.get(k), dict): if (k in whitelist): deep_update(original[k], value, True, []) else: deep_update(original[k], value, new_keys_allowed, []) else: original[k] = value return original
Updates original dict with values from new_dict recursively. If new key is introduced in new_dict, then if new_keys_allowed is not True, an error will be thrown. Further, for sub-dicts, if the key is in the whitelist, then new subkeys can be introduced. Args: original (dict): Dictionary with default values. new_dict (dict): Dictionary with values to be updated new_keys_allowed (bool): Whether new keys are allowed. whitelist (list): List of keys that correspond to dict values where new subkeys can be introduced. This is only at the top level.
codesearchnet
def List(self, request, global_params=None): config = self.GetMethodConfig('List') return self._RunMethod(config, request, global_params=global_params)
Lists all projects to which you have been granted any project role. Args: request: (BigqueryProjectsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ProjectList) The response message.
github-repos
def tf_step(self, time, variables, **kwargs): fn_loss = kwargs['fn_loss'] if (variables is None): variables = tf.trainable_variables return tf.gradients(fn_loss, variables)
Creates the TensorFlow operations for performing an optimization step on the given variables, including actually changing the values of the variables. Args: time: Time tensor. Not used for this optimizer. variables: List of variables to optimize. **kwargs: fn_loss : loss function tensor to differentiate. Returns: List of delta tensors corresponding to the updates for each optimized variable.
codesearchnet
def compute_average_oxidation_state(site): try: avg_oxi = sum([sp.oxi_state * occu for sp, occu in site.species.items() if sp is not None]) return avg_oxi except AttributeError: pass try: return site.charge except AttributeError: raise ValueError("Ewald summation can only be performed on structures " "that are either oxidation state decorated or have " "site charges.")
Calculates the average oxidation state of a site Args: site: Site to compute average oxidation state Returns: Average oxidation state of site.
juraj-google-style
def max_zoom(self): zoom_levels = [map_layer.max_zoom for map_layer in self.layers] return max(zoom_levels)
Get the maximal zoom level of all layers. Returns: int: the maximum of all zoom levels of all layers Raises: ValueError: if no layers exist
codesearchnet
def get(self, name): name = str(name) if (name not in self._properties): raise ArgumentError('Unknown property in DeviceModel', name=name) return self._properties[name]
Get a device model property. Args: name (str): The name of the property to get
codesearchnet
def __init__(self, key, committed, attempted): self.key = key self.committed = committed self.attempted = attempted
Initializes ``MetricResult``. Args: key: A ``MetricKey`` object. committed: Metric data that has been committed (e.g. logical updates) attempted: Metric data that has been attempted (e.g. physical updates)
github-repos
def extract_certs(certs_txt: str) -> List[crypto.X509]: pattern = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----' certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL) certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt] return certs
Extracts pycrypto X509 objects from SSL certificates chain string. Args: certs_txt: SSL certificates chain string. Returns: result: List of pycrypto X509 objects.
juraj-google-style
def _pad_batch(self, images: list['torch.Tensor'], return_tensors: Optional[Union[str, TensorType]]) -> tuple: max_size = get_max_height_width(images) grouped_images, grouped_images_index = group_images_by_shape(images) processed_images = {} processed_masks = {} for shape, stacked_images in grouped_images.items(): if return_tensors == 'pt' and len(stacked_images) > 0: device = stacked_images.device mask_template = torch.zeros(max_size, dtype=torch.int64, device=device) original_size = stacked_images.shape[-2:] needs_padding = original_size[0] != max_size[0] or original_size[1] != max_size[1] if needs_padding: padding_bottom = max_size[0] - original_size[0] padding_right = max_size[1] - original_size[1] padding = [0, 0, padding_right, padding_bottom] padded_images = F.pad(stacked_images, padding, fill=0) pixel_mask = mask_template.clone() pixel_mask[:original_size[0], :original_size[1]].fill_(1) pixel_masks = pixel_mask.unsqueeze(0).repeat(stacked_images.shape[0], 1, 1) else: padded_images = stacked_images pixel_masks = torch.ones((stacked_images.shape[0], max_size[0], max_size[1]), dtype=torch.int64, device=stacked_images.device) processed_images[shape] = padded_images processed_masks[shape] = pixel_masks padded_images = reorder_images(processed_images, grouped_images_index) pixel_masks = reorder_images(processed_masks, grouped_images_index) if return_tensors == 'pt' and padded_images: padded_images = torch.stack(padded_images) pixel_masks = torch.stack(pixel_masks) return (padded_images, pixel_masks)
Pad a batch of images to the same size based on the maximum dimensions. Args: images (`list[torch.Tensor]`): List of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Returns: `tuple`: Tuple containing padded images and pixel masks.
github-repos
def geotiff(self, **kwargs): if ('proj' not in kwargs): kwargs['proj'] = self.proj return to_geotiff(self, **kwargs)
Creates a geotiff on the filesystem Args: path (str): optional, path to write the geotiff file to, default is ./output.tif proj (str): optional, EPSG string of projection to reproject to spec (str): optional, if set to 'rgb', write out color-balanced 8-bit RGB tif bands (list): optional, list of bands to export. If spec='rgb' will default to RGB bands, otherwise will export all bands Returns: str: path the geotiff was written to
codesearchnet
def _GetUsernameFromProfilePath(self, path): while path and path[-1] == '\\': path = path[:-1] if path: _, _, path = path.rpartition('\\') return path
Retrieves the username from a Windows profile path. Trailing path path segment are ignored. Args: path (str): a Windows path with '\\' as path segment separator. Returns: str: basename which is the last path segment.
juraj-google-style
def on_value_event(self, event): raise NotImplementedError('on_value_event() is not implemented in the base servicer class')
Callback for Event proto received through the gRPC stream. This Event proto carries a Tensor in its summary.value[0] field. Args: event: The Event proto from the stream to be processed.
github-repos
def multi_rouge_n(sequences, scores_ids, n=2): ngrams = [_get_word_ngrams(n, sequence) for sequence in sequences] counts = [len(ngram) for ngram in ngrams] scores = [] for (hyp_id, ref_id) in scores_ids: evaluated_ngrams = ngrams[hyp_id] evaluated_count = counts[hyp_id] reference_ngrams = ngrams[ref_id] reference_count = counts[ref_id] overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) scores += [f_r_p_rouge_n(evaluated_count, reference_count, overlapping_count)] return scores
Efficient way to compute highly repetitive scoring i.e. sequences are involved multiple time Args: sequences(list[str]): list of sequences (either hyp or ref) scores_ids(list[tuple(int)]): list of pairs (hyp_id, ref_id) ie. scores[i] = rouge_n(scores_ids[i][0], scores_ids[i][1]) Returns: scores: list of length `len(scores_ids)` containing rouge `n` scores as a dict with 'f', 'r', 'p' Raises: KeyError: if there's a value of i in scores_ids that is not in [0, len(sequences)[
codesearchnet
def decode(self, encoded): encoded = super().decode(encoded) return self.tokenizer.decode([self.itos[index] for index in encoded])
Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``.
juraj-google-style
def phase_uniquizer(all_phases): measurement_name_maker = UniqueNameMaker( itertools.chain.from_iterable( phase.measurements.keys() for phase in all_phases if phase.measurements)) attachment_names = list(itertools.chain.from_iterable( phase.attachments.keys() for phase in all_phases)) attachment_names.extend(itertools.chain.from_iterable([ 'multidim_' + name for name, meas in phase.measurements.items() if meas.dimensions is not None ] for phase in all_phases if phase.measurements)) attachment_name_maker = UniqueNameMaker(attachment_names) for phase in all_phases: for name, _ in sorted(phase.measurements.items()): old_name = name name = measurement_name_maker.make_unique(name) phase.measurements[old_name].name = name phase.measurements[name] = phase.measurements.pop(old_name) for name, _ in sorted(phase.attachments.items()): old_name = name name = attachment_name_maker.make_unique(name) phase.attachments[old_name].name = name phase.attachments[name] = phase.attachments.pop(old_name) return all_phases
Makes the names of phase measurement and attachments unique. This function will make the names of measurements and attachments unique. It modifies the input all_phases. Args: all_phases: the phases to make unique Returns: the phases now modified.
juraj-google-style
def _shape_invariant_to_type_spec(self, shape): raise NotImplementedError(f'{type(self).__name__}._shape_invariant_to_type_spec')
Returns a TypeSpec given a shape invariant (used by `tf.while_loop`). Args: shape: A `tf.TensorShape` object. The shape invariant for this `CompositeTensor`, or `None` if a default shape invariant should be used (based on the value of this `CompositeTensor`). Returns: A nested structure whose values are `tf.TensorShape` objects, specifying the shape invariants for the tensors that comprise this `CompositeTensor`.
github-repos
def determine_action(self, issue): resource_type = self.resource_types[issue.resource.resource_type_id] issue_alert_schedule = self.alert_schedule[resource_type] if \ resource_type in self.alert_schedule \ else self.alert_schedule['*'] action_item = { 'action': None, 'action_description': None, 'last_alert': issue.last_alert, 'issue': issue, 'resource': self.resource_classes[self.resource_types[issue.resource.resource_type_id]](issue.resource), 'owners': [], 'stop_after': issue_alert_schedule['stop'], 'remove_after': issue_alert_schedule['remove'], 'notes': issue.notes, 'missing_tags': issue.missing_tags } time_elapsed = time.time() - issue.created stop_schedule = pytimeparse.parse(issue_alert_schedule['stop']) remove_schedule = pytimeparse.parse(issue_alert_schedule['remove']) if self.collect_only: action_item['action'] = AuditActions.IGNORE elif remove_schedule and time_elapsed >= remove_schedule: action_item['action'] = AuditActions.REMOVE action_item['action_description'] = 'Resource removed' action_item['last_alert'] = remove_schedule if issue.update({'last_alert': remove_schedule}): db.session.add(issue.issue) elif stop_schedule and time_elapsed >= stop_schedule: action_item['action'] = AuditActions.STOP action_item['action_description'] = 'Resource stopped' action_item['last_alert'] = stop_schedule if issue.update({'last_alert': stop_schedule}): db.session.add(issue.issue) else: alert_selection = self.determine_alert( issue_alert_schedule['alert'], issue.get_property('created').value, issue.get_property('last_alert').value ) if alert_selection: action_item['action'] = AuditActions.ALERT action_item['action_description'] = '{} alert'.format(alert_selection) action_item['last_alert'] = alert_selection if issue.update({'last_alert': alert_selection}): db.session.add(issue.issue) else: action_item['action'] = AuditActions.IGNORE db.session.commit() return action_item
Determine the action we should take for the issue Args: issue: Issue to determine action for Returns: `dict`
juraj-google-style
def navbar(self): window = BaseWindow(self.selenium, self.selenium.current_window_handle) with self.selenium.context(self.selenium.CONTEXT_CHROME): el = self.selenium.find_element(*self._nav_bar_locator) return NavBar(window, el)
Provide access to the Navigation Bar. Returns: :py:class:`NavBar`: FoxPuppet NavBar object.
codesearchnet
def save_image(tensor, filename, nrow=8, padding=2, pad_value=0): from PIL import Image grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value) im = Image.fromarray(pre_pillow_float_img_process(grid)) im.save(filename)
Save a given Tensor into an image file. Args: tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, saves the tensor as a grid of images by calling ``make_grid``. **kwargs: Other arguments are documented in ``make_grid``.
codesearchnet
def from_stat_file(cls, statfile, timestep=1, is_leap_year=False): stat = STAT(statfile) def check_missing(opt_data, data_name): if (opt_data == []): raise ValueError('Stat file contains no optical data.') for (i, x) in enumerate(opt_data): if (x is None): raise ValueError('Missing optical depth data for {} at month {}'.format(data_name, i)) check_missing(stat.monthly_tau_beam, 'monthly_tau_beam') check_missing(stat.monthly_tau_diffuse, 'monthly_tau_diffuse') return cls.from_ashrae_revised_clear_sky(stat.location, stat.monthly_tau_beam, stat.monthly_tau_diffuse, timestep, is_leap_year)
Create an ASHRAE Revised Clear Sky wea object from the monthly sky optical depths in a .stat file. Args: statfile: Full path to the .stat file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. is_leap_year: A boolean to indicate if values are representing a leap year. Default is False.
codesearchnet
def params(self): payload = self.payload d = {} for (i, p) in enumerate(payload['currentConfiguration']): type_name = p['typeName'] cp = payload['configurationParameters'][i]['message'] name = cp['parameterName'] if (type_name == 'BTMParameterQuantity'): try: v = q(p['message']['expression']) except: v = q(p['message']['value'], p['message']['units']) elif (type_name == 'BTMParameterBoolean'): v = p['message']['value'] elif (type_name == 'BTMParameterEnum'): enum = p['message']['value'] enum_map = {d['message']['option']: i for (i, d) in enumerate(cp['options'])} v = cp['options'][enum_map[enum]]['message']['optionName'] d[name] = v return d
Get the params of response data from the API. Returns: - d (dict): Dictionary mapping of all configuration values
codesearchnet
def conv_json(self, uri_format='sparql_uri', add_ids=False): def convert_item(ivalue): ' converts an idividual value to a json value\n\n Args:\n ivalue: value of the item to convert\n\n Returns:\n JSON serializable value\n ' nvalue = ivalue if isinstance(ivalue, BaseRdfDataType): if (ivalue.type == 'uri'): if (ivalue.startswith('pyuri') and (uri_format == 'pyuri')): nvalue = getattr(ivalue, 'sparql') else: nvalue = getattr(ivalue, uri_format) else: nvalue = ivalue.to_json elif isinstance(ivalue, RdfClassBase): if (ivalue.subject.type == 'uri'): nvalue = ivalue.conv_json(uri_format, add_ids) elif (ivalue.subject.type == 'bnode'): nvalue = ivalue.conv_json(uri_format, add_ids) elif isinstance(ivalue, list): nvalue = [] for item in ivalue: temp = convert_item(item) nvalue.append(temp) return nvalue rtn_val = {key: convert_item(value) for (key, value) in self.items()} if add_ids: if (self.subject.type == 'uri'): rtn_val['uri'] = self.subject.sparql_uri rtn_val['id'] = sha1(rtn_val['uri'].encode()).hexdigest() return rtn_val
converts the class to a json compatable python dictionary Args: uri_format('sparql_uri','pyuri'): The format that uri values will be returned Returns: dict: a json compatabile python dictionary
codesearchnet
def declare(self, name, description=None, **kwargs): if (not self._is_valid_key(name)): raise self.InvalidKeyError('Invalid key name, must begin with a lowercase letter', name) if (name in self._declarations): raise self.KeyAlreadyDeclaredError('Configuration key already declared', name) self._declarations[name] = self.Declaration(name, description=description, **kwargs)
Declare a configuration key with the given name. Args: name: Configuration key to declare, must not have been already declared. description: If provided, use this as the description for this key. **kwargs: Other kwargs to pass to the Declaration, only default_value is currently supported.
codesearchnet
def __contains__(self, k): chain = ChainMap(self.scopes, self.globals) return chain.__contains__(k)
Check whether a variable has been assigned to. This is **not** the same kind of element-of as described in the class documentation. Args: k (str): The name of the variable to check. Returns: bool: Whether or not the variable has been assigned to.
juraj-google-style
def put(value): worker = global_worker worker.check_connected() with profiling.profile("ray.put"): if worker.mode == LOCAL_MODE: return value object_id = ray._raylet.compute_put_id( worker.current_task_id, worker.task_context.put_index, ) worker.put_object(object_id, value) worker.task_context.put_index += 1 return object_id
Store an object in the object store. Args: value: The Python object to be stored. Returns: The object ID assigned to this value.
juraj-google-style
def convert_one(self, op: ops.Operation) -> ops.OP_TREE: if not isinstance(op, ops.GateOperation): raise TypeError("{!r} is not a gate operation.".format(op)) if is_native_ion_gate(op.gate): return [op] if isinstance(op.gate, ops.HPowGate) and op.gate.exponent == 1: return [ops.Rx(np.pi).on(op.qubits[0]), ops.Ry(-1 * np.pi/2).on(op.qubits[0])] if isinstance(op.gate, ops.CNotPowGate) and op.gate.exponent == 1: return [ops.Ry(np.pi/2).on(op.qubits[0]), MS(np.pi/4).on(op.qubits[0], op.qubits[1]), ops.Rx(-1*np.pi/2).on(op.qubits[0]), ops.Rx(-1*np.pi/2).on(op.qubits[1]), ops.Ry(-1*np.pi/2).on(op.qubits[0])] mat = protocols.unitary(op, None) if len( op.qubits) <= 2 else None if mat is not None and len(op.qubits) == 1: gates = optimizers.single_qubit_matrix_to_phased_x_z(mat) return [g.on(op.qubits[0]) for g in gates] elif mat is not None and len(op.qubits) == 2: return two_qubit_matrix_to_ion_operations( op.qubits[0], op.qubits[1], mat) else: if self.ignore_failures: return [op] else: raise TypeError( "Don't know how to work with {!r}. " "It isn't a native Ion Trap operation, " "a 1 or 2 qubit gate with a known unitary, " "or composite.".format(op.gate))
Convert a single (one- or two-qubit) operation into ion trap native gates Args: op: gate operation to be converted Returns: the desired operation implemented with ion trap gates
juraj-google-style
def get_project_id(): if (os.name == 'nt'): command = _CLOUD_SDK_WINDOWS_COMMAND else: command = _CLOUD_SDK_POSIX_COMMAND try: output = subprocess.check_output(((command,) + _CLOUD_SDK_CONFIG_COMMAND), stderr=subprocess.STDOUT) except (subprocess.CalledProcessError, OSError, IOError): return None try: configuration = json.loads(output.decode('utf-8')) except ValueError: return None try: return configuration['configuration']['properties']['core']['project'] except KeyError: return None
Gets the project ID from the Cloud SDK. Returns: Optional[str]: The project ID.
codesearchnet
def get_metadata_attribute(self, metaname): metadata_value = self.metadata.get(metaname, None) if metadata_value is None: raise NoMetadataException( "No metadata attribute named %s" % metaname) if not isinstance(metadata_value, list): raise TypeError("Metadata is not a list and it should be.") if len(metadata_value) > 1: return metadata_value else: return metadata_value[0]
Get the metadata attribute by the name. Args: metaname (:obj:`str`): Name of the attribute Returns: :obj:`list` or :obj:`str`: Value(s) of the requested metadata attribute Raises: NoMetadataException: Attribute error TypeError: Metadata should be a list
juraj-google-style
def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type): types = [treefun(n) for n in neuron.neurites] return CheckResult((types.count(NeuriteType.basal_dendrite) >= min_number))
Check if a neuron has basal dendrites Arguments: neuron(Neuron): The neuron object to test min_number: minimum number of basal dendrites required treefun: Optional function to calculate the tree type of neuron's neurites Returns: CheckResult with result
codesearchnet
def get_all_profiles(store='local'): return {'Domain Profile': get_all_settings(profile='domain', store=store), 'Private Profile': get_all_settings(profile='private', store=store), 'Public Profile': get_all_settings(profile='public', store=store)}
Gets all properties for all profiles in the specified store Args: store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: dict: A dictionary containing the specified settings for each profile
codesearchnet