code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def create_deferred(self, func, input_layer, deferred_args, deferred_kwargs, name): my_defaults = _defaults def _with_method_complete(*args, **kwargs): input_layer = args[0] with input_layer.g.as_default(), defaults_scope(**my_defaults), \ tf.name_scope(name): return input_layer._method_complete(func(*args, **kwargs)) full_args = [input_layer] full_args.extend(deferred_args) partial_context = {} if isinstance(input_layer, _DeferredLayer): partial_context = input_layer._partial_context return _DeferredLayer(input_layer.bookkeeper, scopes.Template(None, _with_method_complete), full_args, deferred_kwargs, scope=input_layer._scope, defaults=input_layer.defaults, partial_context=partial_context)
Creates a deferred node with captured scope. Args: func: The original function to call. input_layer: The input_layer. deferred_args: The arguments that will be used bythe deferred function. deferred_kwargs: The keyword args for the deferred function. name: The name of this layer. Returns: A _DeferredLayer that will execute func in the correct scopes.
juraj-google-style
def get_frame(self, index=None, onset=None): if onset: index = int((onset * self.fps)) return super(VideoStim, self).get_frame(index)
Overrides the default behavior by giving access to the onset argument. Args: index (int): Positional index of the desired frame. onset (float): Onset (in seconds) of the desired frame.
codesearchnet
def is_edge_change_point(change_point_index, data_size, edge_segment_size=constants._EDGE_SEGMENT_SIZE): return change_point_index > data_size - edge_segment_size
Removes the change points that are at the edges of the data. Args: change_point_index: Index of the change point. data_size: Size of the data. edge_segment_size: Size of the edge segment.
github-repos
def decodes(self, s: str) -> BioCCollection: tree = etree.parse(io.BytesIO(bytes(s, encoding='UTF-8'))) collection = self.__parse_collection(tree.getroot()) collection.encoding = tree.docinfo.encoding collection.standalone = tree.docinfo.standalone collection.version = tree.docinfo.xml_version return collection
Deserialize ``s`` to a BioC collection object. Args: s: a "str" instance containing a BioC collection Returns: an object of BioCollection
codesearchnet
def delete(self): try: self._api.table_delete(self._name_parts) except google.datalab.utils.RequestException: pass except Exception as e: raise e return (not self.exists())
Delete the table. Returns: True if the Table no longer exists; False otherwise.
codesearchnet
def open_phrase(self, string, pos): if string[pos - 1] == "\\": string = string[:pos - 1] + string[pos:] pos -= 1 if pos == 0 or string[pos - 1] != "\\": tag = self.meta.search(string, pos + 1) return string, None, tag child = Phrase(pos) escaped, child = self.parse(string[pos + 1:], child) string = string[:pos + 1] + escaped tag = self.meta.search(string, child.closing + 1) return string, child, tag
Helper function of self.parse() handling opening tags. Arguments: string (str): The string being parsed. pos (int): The index/position of the opening tag in the string. Returns: The (possibly) escaped string, a child phrase if the opening tag was not escaped and otherwise None, and a new tag match, either starting at one index passed the escaped tag or one index passed the closing tag of the child.
juraj-google-style
def _retrieve_object(output_dict: Dict[(str, Any)], obj: Any) -> None: import ROOT if (isinstance(obj, ROOT.TH1) or isinstance(obj, ROOT.THnBase)): if isinstance(obj, ROOT.TH1): obj.SetDirectory(0) ROOT.SetOwnership(obj, False) output_dict[obj.GetName()] = obj if isinstance(obj, ROOT.TCollection): output_dict[obj.GetName()] = {} for obj_temp in list(obj): _retrieve_object(output_dict[obj.GetName()], obj_temp)
Function to recursively retrieve histograms from a list in a ROOT file. ``SetDirectory(True)`` is applied to TH1 derived hists and python is explicitly given ownership of the retrieved objects. Args: output_dict (dict): Dict under which hists should be stored. obj (ROOT.TObject derived): Object(s) to be stored. If it is a collection, it will be recursed through. Returns: None: Changes in the dict are reflected in the output_dict which was passed.
codesearchnet
def check_network_role(self, public_key): state_root = self._current_root_func() if state_root == INIT_ROOT_KEY: LOGGER.debug("Chain head is not set yet. Permit all.") return True self._cache.update_view(state_root) role = self._cache.get_role("network", state_root) if role is None: policy_name = "default" else: policy_name = role.policy_name policy = self._cache.get_policy(policy_name, state_root) if policy is not None: if not self._allowed(public_key, policy): LOGGER.debug("Node is not permitted: %s.", public_key) return False return True
Check the public key of a node on the network to see if they are permitted to participate. The roles being checked are the following, from first to last: "network" "default" The first role that is set will be the one used to enforce if the node is allowed. Args: public_key (string): The public key belonging to a node on the network
juraj-google-style
def _InitializeGraph(self, os_name, artifact_list): dependencies = artifact_registry.REGISTRY.SearchDependencies( os_name, artifact_list) artifact_names, attribute_names = dependencies self._AddAttributeNodes(attribute_names) self._AddArtifactNodesAndEdges(artifact_names)
Creates the nodes and directed edges of the dependency graph. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names.
juraj-google-style
def AddStorageMediaImageOptions(self, argument_group): argument_group.add_argument('--partitions', '--partition', dest='partitions', action='store', type=str, default=None, help='Define partitions to be processed. A range of partitions can be defined as: "3..5". Multiple partitions can be defined as: "1,3,5" (a list of comma separated values). Ranges and lists can also be combined as: "1,3..5". The first partition is 1. All partitions can be specified with: "all".') argument_group.add_argument('--volumes', '--volume', dest='volumes', action='store', type=str, default=None, help='Define volumes to be processed. A range of volumes can be defined as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list of comma separated values). Ranges and lists can also be combined as: "1,3..5". The first volume is 1. All volumes can be specified with: "all".')
Adds the storage media image options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
codesearchnet
def _ParseProcessingOptions(self, options): argument_helper_names = [ 'process_resources', 'temporary_directory', 'zeromq'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) worker_memory_limit = getattr(options, 'worker_memory_limit', None) if worker_memory_limit and worker_memory_limit < 0: raise errors.BadConfigOption( 'Invalid worker memory limit value cannot be negative.') self._worker_memory_limit = worker_memory_limit
Parses the processing options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
juraj-google-style
def deprocess_input(input_array, input_range=(0, 255)): input_array = input_array.copy() input_array -= input_array.mean() input_array /= (input_array.std() + K.epsilon()) input_array *= 0.1 input_array += 0.5 input_array = np.clip(input_array, 0, 1) return (input_range[1] - input_range[0]) * input_array + input_range[0]
Utility function to scale the `input_array` to `input_range` throwing away high frequency artifacts. Args: input_array: An N-dim numpy array. input_range: Specifies the input range as a `(min, max)` tuple to rescale the `input_array`. Returns: The rescaled `input_array`.
juraj-google-style
def _run_graph(self, device, input_shape, perm, num_iters, datatype): graph = ops.Graph() with graph.as_default(): outputs = build_graph(device, input_shape, perm, datatype, num_iters) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() session.run(outputs) start_time = time.time() session.run(outputs) duration = (time.time() - start_time) / num_iters throughput = np.prod(np.array(input_shape)) * datatype().itemsize * 2 / duration / 1000000000.0 print('%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s.' % (device, str(datatype), str(input_shape).replace(' ', ''), str(perm).replace(' ', ''), num_iters, duration, throughput)) name_template = 'transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}' self.report_benchmark(name=name_template.format(device=device, dtype=str(datatype).replace(' ', ''), inputshape=str(input_shape).replace(' ', ''), perm=str(perm).replace(' ', '')).replace(' ', ''), iters=num_iters, wall_time=duration) return duration
runs the graph and print its execution time. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. num_iters: Number of iterations to run the benchmark. datatype: numpy data type of the input tensor. Returns: The duration of the run in seconds.
github-repos
def start_txn(self, txn_name=None): if (not txn_name): txn_name = uuid.uuid4().hex txn_response = self.api.http_request('POST', ('%s/fcr:tx' % self.root), data=None, headers=None) if (txn_response.status_code == 201): txn_uri = txn_response.headers['Location'] logger.debug(('spawning transaction: %s' % txn_uri)) txn = Transaction(self, txn_name, txn_uri, expires=txn_response.headers['Expires']) self.txns[txn_name] = txn return txn
Request new transaction from repository, init new Transaction, store in self.txns Args: txn_name (str): human name for transaction Return: (Transaction): returns intance of newly created transaction
codesearchnet
def hist_axis_func(axis_type: enum.Enum) -> Callable[[Hist], Axis]: def axis_func(hist: Hist) -> Axis: try: hist_axis_type = axis_type.value except AttributeError: hist_axis_type = axis_type if hasattr(hist, "ProjectionND") and hasattr(hist, "Projection"): return hist.GetAxis(hist_axis_type) else: axis_function_map = { TH1AxisType.x_axis.value: hist.GetXaxis, TH1AxisType.y_axis.value: hist.GetYaxis, TH1AxisType.z_axis.value: hist.GetZaxis } return_func = axis_function_map[hist_axis_type] return return_func() return axis_func
Wrapper to retrieve the axis of a given histogram. This can be convenient outside of just projections, so it's made available in the API. Args: axis_type: The type of axis to retrieve. Returns: Callable to retrieve the specified axis when given a hist.
juraj-google-style
def _prevent_2nd_derivative(x): def grad(dy): return array_ops.prevent_gradient(dy, message='Second derivative is not implemented.') return (tf.identity(x), grad)
Disables computation of the second derivatives for a tensor. NB: you need to apply a non-identity function to the output tensor for the exception to be raised. Arguments: x: A tensor. Returns: A tensor with the same value and the same derivative as x, but that raises LookupError when trying to compute the second derivatives.
codesearchnet
def get_unprocessed_data(self, how_many, model_settings, mode): candidates = self.data_index[mode] if how_many == -1: sample_count = len(candidates) else: sample_count = how_many desired_samples = model_settings['desired_samples'] words_list = self.words_list data = np.zeros((sample_count, desired_samples)) labels = [] with tf.compat.v1.Session(graph=tf.Graph()) as sess: wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, []) wav_loader = io_ops.read_file(wav_filename_placeholder) wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1, desired_samples=desired_samples) foreground_volume_placeholder = tf.compat.v1.placeholder(tf.float32, []) scaled_foreground = tf.multiply(wav_decoder.audio, foreground_volume_placeholder) for i in range(sample_count): if how_many == -1: sample_index = i else: sample_index = np.random.randint(len(candidates)) sample = candidates[sample_index] input_dict = {wav_filename_placeholder: sample['file']} if sample['label'] == SILENCE_LABEL: input_dict[foreground_volume_placeholder] = 0 else: input_dict[foreground_volume_placeholder] = 1 data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten() label_index = self.word_to_index[sample['label']] labels.append(words_list[label_index]) return (data, labels)
Retrieve sample data for the given partition, with no transformations. Args: how_many: Desired number of samples to return. -1 means the entire contents of this partition. model_settings: Information about the current model being trained. mode: Which partition to use, must be 'training', 'validation', or 'testing'. Returns: List of sample data for the samples, and list of labels in one-hot form.
github-repos
def kill(container, rm=True): container = get_container(container) if not container: raise Exception('No such container: %s' % container) unbind_all(container['ip']) sudo('docker kill %s' % container['name']) if rm: sudo('docker rm %s' % container['name'])
Kill a container Args: * container: Container name or ID * rm=True: Remove the container or not
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): file_header_map = self._GetDataTypeMap('systemd_journal_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) if file_header.signature != self._FILE_SIGNATURE: raise errors.UnableToParseFile('Invalid file signature.') if file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES: raise errors.UnableToParseFile( 'Unsupported file header size: {0:d}.'.format( file_header.header_size)) data_hash_table_end_offset = ( file_header.data_hash_table_offset + file_header.data_hash_table_size) field_hash_table_end_offset = ( file_header.field_hash_table_offset + file_header.field_hash_table_size) self._maximum_journal_file_offset = max( data_hash_table_end_offset, field_hash_table_end_offset) entry_object_offsets = self._ParseEntryObjectOffsets( file_object, file_header.entry_array_offset) for entry_object_offset in entry_object_offsets: if entry_object_offset == 0: continue try: fields = self._ParseJournalEntry(file_object, entry_object_offset) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse journal entry at offset: 0x{0:08x} with ' 'error: {1!s}').format(entry_object_offset, exception)) return event_data = SystemdJournalEventData() event_data.body = fields.get('MESSAGE', None) event_data.hostname = fields.get('_HOSTNAME', None) event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None) if event_data.reporter and event_data.reporter != 'kernel': event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None)) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=fields['real_time']) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a Systemd journal file-like object. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the header cannot be parsed.
juraj-google-style
def __enter__(self): self._old = self._var_scope_store.current_scope if isinstance(self._name_or_scope, VariableScope): self._var_scope_store.open_variable_scope(self._new_name) self._old_subscopes = copy.copy(self._var_scope_store.variable_scopes_count) variable_scope_object = self._cached_variable_scope_object else: self._new_name = self._old.name + '/' + self._name_or_scope if self._old.name else self._name_or_scope self._reuse = self._reuse or self._old.reuse if self._old_name_scope is None: name_scope = self._name_or_scope else: name_scope = self._old_name_scope variable_scope_object = VariableScope(self._reuse, name=self._new_name, initializer=self._old.initializer, regularizer=self._old.regularizer, caching_device=self._old.caching_device, partitioner=self._old.partitioner, dtype=self._old.dtype, use_resource=self._old.use_resource, custom_getter=self._old.custom_getter, name_scope=name_scope, constraint=self._constraint) if self._initializer is not None: variable_scope_object.set_initializer(self._initializer) if self._regularizer is not None: variable_scope_object.set_regularizer(self._regularizer) if self._caching_device is not None: variable_scope_object.set_caching_device(self._caching_device) if self._partitioner is not None: variable_scope_object.set_partitioner(self._partitioner) if self._custom_getter is not None: variable_scope_object.set_custom_getter(_maybe_wrap_custom_getter(self._custom_getter, self._old.custom_getter)) if self._dtype is not None: variable_scope_object.set_dtype(self._dtype) if self._use_resource is not None: variable_scope_object.set_use_resource(self._use_resource) self._var_scope_store.open_variable_scope(self._new_name) self._var_scope_store.current_scope = variable_scope_object self._last_variable_scope_object = variable_scope_object return variable_scope_object
Begins the scope block. Returns: A VariableScope. Raises: ValueError: when trying to reuse within a create scope, or create within a reuse scope, or if reuse is not `None` or `True`. TypeError: when the types of some arguments are not appropriate.
github-repos
def flick(self, x, y, speed): self._driver.flick(self, x, y, speed)
Deprecated use touch('drag', { fromX, fromY, toX, toY, duration(s) }) instead. Flick on the touch screen using finger motion events. This flickcommand starts at a particulat screen location. Support: iOS Args: x(float}: The x offset in pixels to flick by. y(float): The y offset in pixels to flick by. speed(float) The speed in pixels per seconds. Returns: WebElement object.
juraj-google-style
def _callable_func(self, func, axis, *args, **kwargs): def callable_apply_builder(df, axis=0): if (not axis): df.index = index df.columns = pandas.RangeIndex(len(df.columns)) else: df.columns = index df.index = pandas.RangeIndex(len(df.index)) result = df.apply(func, *args, axis=axis, **kwargs) return result index = (self.index if (not axis) else self.columns) func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis) result_data = self._map_across_full_axis(axis, func_prepared) return self._post_process_apply(result_data, axis)
Apply callable functions across given axis. Args: func: The functions to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
codesearchnet
def __init__(self, config=None, namespace=None): self.driver = get_database_instance(config) self.user = generate_key_pair(get_value('secret', 'SECRET', None, config)) self.namespace = get_value('db.namespace', 'DB_NAMESPACE', 'namespace' if not namespace else namespace, config) self.logger = logging.getLogger('Plugin') logging.basicConfig(level=logging.INFO)
Initialize a :class:`~.Plugin` instance and connect to BigchainDB. Args: *nodes (str): One or more URLs of BigchainDB nodes to connect to as the persistence layer
juraj-google-style
def _process_health_pill_event(self, node_name_set, mapping, target_step, file_path): events_loader = event_file_loader.EventFileLoader(file_path) for event in events_loader.Load(): if (not event.HasField('summary')): logger.warn('An event in a debugger events file lacks a summary.') continue if (event.step < target_step): continue if (event.step > target_step): return True for value in event.summary.value: summary_metadata = value.metadata plugin_data = summary_metadata.plugin_data if (plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME): try: content = json.loads(tf.compat.as_text(summary_metadata.plugin_data.content)) except ValueError as err: logger.warn('Could not parse the JSON string containing data for the debugger plugin: %r, %r', content, err) continue device_name = content['device'] output_slot = content['outputSlot'] else: logger.error('No debugger plugin data found for event with tag %s and node name %s.', value.tag, value.node_name) continue if (not value.HasField('tensor')): logger.warn('An event in a debugger events file lacks a tensor value.') continue match = re.match('^(.*):(\\d+):DebugNumericSummary$', value.node_name) if (not match): logger.warn('A event with a health pill has an invalid watch, (i.e., an unexpected debug op): %r', value.node_name) return None health_pill = self._process_health_pill_value(wall_time=event.wall_time, step=event.step, device_name=device_name, output_slot=output_slot, node_name=match.group(1), tensor_proto=value.tensor, node_name_set=node_name_set) if (not health_pill): continue mapping[health_pill.node_name].append(health_pill) return False
Creates health pills out of data in an event. Creates health pills out of the event and adds them to the mapping. Args: node_name_set: A set of node names that are relevant. mapping: The mapping from node name to HealthPillEvents. This object may be destructively modified. target_step: The target step at which to obtain health pills. file_path: The path to the file with health pill events. Returns: Whether we should stop reading events because future events are no longer relevant.
codesearchnet
def single_qubit_matrix_to_pauli_rotations( mat: np.ndarray, atol: float = 0 ) -> List[Tuple[ops.Pauli, float]]: def is_clifford_rotation(half_turns): return near_zero_mod(half_turns, 0.5, atol=atol) def to_quarter_turns(half_turns): return round(2 * half_turns) % 4 def is_quarter_turn(half_turns): return (is_clifford_rotation(half_turns) and to_quarter_turns(half_turns) % 2 == 1) def is_half_turn(half_turns): return (is_clifford_rotation(half_turns) and to_quarter_turns(half_turns) == 2) def is_no_turn(half_turns): return (is_clifford_rotation(half_turns) and to_quarter_turns(half_turns) == 0) z_rad_before, y_rad, z_rad_after = ( linalg.deconstruct_single_qubit_matrix_into_angles(mat)) z_ht_before = z_rad_before / np.pi - 0.5 m_ht = y_rad / np.pi m_pauli = ops.pauli_gates.X z_ht_after = z_rad_after / np.pi + 0.5 if is_clifford_rotation(z_ht_before): if ((is_quarter_turn(z_ht_before) or is_quarter_turn(z_ht_after)) ^ (is_half_turn(m_ht) and is_no_turn(z_ht_before-z_ht_after))): z_ht_before += 0.5 z_ht_after -= 0.5 m_pauli = ops.pauli_gates.Y if is_half_turn(z_ht_before) or is_half_turn(z_ht_after): z_ht_before -= 1 z_ht_after += 1 m_ht = -m_ht if is_no_turn(m_ht): z_ht_before += z_ht_after z_ht_after = 0 elif is_half_turn(m_ht): z_ht_after -= z_ht_before z_ht_before = 0 rotation_list = [ (ops.pauli_gates.Z, z_ht_before), (m_pauli, m_ht), (ops.pauli_gates.Z, z_ht_after)] return [(pauli, ht) for pauli, ht in rotation_list if not is_no_turn(ht)]
Implements a single-qubit operation with few rotations. Args: mat: The 2x2 unitary matrix of the operation to implement. atol: A limit on the amount of absolute error introduced by the construction. Returns: A list of (Pauli, half_turns) tuples that, when applied in order, perform the desired operation.
juraj-google-style
def SummaryMetadata(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.SummaryMetadata(tag)
Return the summary metadata for the given tag on the given run. Args: run: A string name of the run for which summary metadata is to be retrieved. tag: A string name of the tag whose summary metadata is to be retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: A `SummaryMetadata` protobuf.
codesearchnet
def confirm(statement): prompt = "{statement} [y/n]".format(statement=statement) answer = _ask(prompt, limited_to=["yes", "no", "y", "n"]) return answer and answer.startswith("y")
Ask the user for confirmation about the specified statement. Args: statement (unicode): statement to ask the user confirmation about. Returns: bool: whether or not specified statement was confirmed.
juraj-google-style
def sonority_from_fts(self, seg): def match(m): return self.fm.match(fts(m), seg) minusHi = BoolTree(match('-hi'), 9, 8) minusNas = BoolTree(match('-nas'), 6, 5) plusVoi1 = BoolTree(match('+voi'), 4, 3) plusVoi2 = BoolTree(match('+voi'), 2, 1) plusCont = BoolTree(match('+cont'), plusVoi1, plusVoi2) plusSon = BoolTree(match('+son'), minusNas, plusCont) minusCons = BoolTree(match('-cons'), 7, plusSon) plusSyl = BoolTree(match('+syl'), minusHi, minusCons) return plusSyl.get_value()
Given a segment as features, returns the sonority on a scale of 1 to 9. Args: seg (list): collection of (value, feature) pairs representing a segment (vowel or consonant) Returns: int: sonority of `seg` between 1 and 9
codesearchnet
def depth_may_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_may_average_ground_temperature`'.format(value)) self._depth_may_average_ground_temperature = value
Corresponds to IDD Field `depth_may_average_ground_temperature` Args: value (float): value for IDD Field `depth_may_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def __init__(self, name=None): if not name or name[-1] != '/': with tf.compat.v1.name_scope(name or type(self).__name__) as name: pass self._name = name
Creates the ExponentialFamily. Args: name: Python `str` used as TF namescope for ops created by member functions. Default value: `None` (i.e., the subclass name).
juraj-google-style
def __init__(self, name, combine_fn): self.name = name self.combine_fn = combine_fn self.accumulator = combine_fn.create_accumulator() self._add_input = self.combine_fn.add_input
Creates a Counter object. Args: name: the name of this counter. It may be a string, or a CounterName object. combine_fn: the CombineFn to use for aggregation
github-repos
def all(script, face=True, vert=True): filter_xml = ''.join([' <filter name="Select All">\n', ' <Param name="allFaces" ', 'value="{}" '.format(str(face).lower()), 'description="DSelect all Faces" ', 'type="RichBool" ', '/>\n', ' <Param name="allVerts" ', 'value="{}" '.format(str(vert).lower()), 'description="Select all Vertices" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Select all the faces of the current mesh Args: script: the FilterScript object or script filename to write the filter to. faces (bool): If True the filter will select all the faces. verts (bool): If True the filter will select all the vertices. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
codesearchnet
def list_groups(refresh=False): if (('group.list_groups' in __context__) and (not refresh)): return __context__['group.list_groups'] results = _get_all_groups() ret = [] for result in results: ret.append(result.Name) __context__['group.list_groups'] = ret return ret
Return a list of groups Args: refresh (bool): Refresh the info for all groups in ``__context__``. If False only the groups in ``__context__`` will be returned. If True, the ``__context__`` will be refreshed with current data and returned. Default is False Returns: list: A list of groups on the machine CLI Example: .. code-block:: bash salt '*' group.list_groups
codesearchnet
def set_size(self, mode): return len(self.data_index[mode])
Calculates the number of samples in the dataset partition. Args: mode: Which partition, must be 'training', 'validation', or 'testing'. Returns: Number of samples in the partition.
github-repos
def add_step(self, step, run_meta): op_log = tfprof_logger.merge_default_with_oplog(self._graph, run_meta=run_meta) self._coverage = print_mdl.AddStep(step, _graph_string(self._graph), run_meta.SerializeToString(), op_log.SerializeToString())
Add statistics of a step. Args: step: int, An id used to group one or more different `run_meta` together. When profiling with the profile_xxx APIs, user can use the `step` id in the `options` to profile these `run_meta` together. run_meta: RunMetadata proto that contains statistics of a session run.
github-repos
def _get_bases(type_): try: class _(type_): 'Check if type_ is subclassable.' BaseClass = type_ except TypeError: BaseClass = object class MetaClass(_ValidationMeta, BaseClass.__class__): 'Use the type_ meta and include base validation functionality.' return (BaseClass, MetaClass)
Get the base and meta classes to use in creating a subclass. Args: type_: The type to subclass. Returns: A tuple containing two values: a base class, and a metaclass.
codesearchnet
def is_sequence(value): return (hasattr(value, '__iter__') and (not isinstance(value, (six.string_types, six.binary_type))))
Determine if a value is a sequence type. Returns: ``True`` if `value` is a sequence type (e.g., ``list``, or ``tuple``). String types will return ``False``. NOTE: On Python 3, strings have the __iter__ defined, so a simple hasattr check is insufficient.
codesearchnet
def sg_reverse_seq(tensor, opt): opt += tf.sg_opt(axis=1) seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(axis=opt.axis) return tf.reverse_sequence(tensor, seq_len, opt.axis, name=opt.name)
r"""Reverses variable length slices. Before applying the pure tensorflow function tf.reverse_sequence, this function calculates sequence lengths by counting non-zeros. For example, ``` tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]] tensor.sg_reverse_seq() => [[3 2 1 0 0] [5 4 0 0 0]] ``` Args: tensor: A 2-D `Tensor` (automatically given by chain). opt: axis: Axis to reverse. Default is 1. name : If provided, it replaces current tensor's name. Returns: A `Tensor` with the same shape and type as `tensor`.
codesearchnet
def greedy_set_cover(universe, subsets, costs): elements = set(e for s in subsets.keys() for e in subsets[s]) if elements != universe: return None covered = set() cover_sets = [] while covered != universe: min_cost_elem_ratio = float("inf") min_set = None for s, elements in subsets.items(): new_elements = len(elements - covered) if new_elements != 0: cost_elem_ratio = costs[s] / new_elements if cost_elem_ratio < min_cost_elem_ratio: min_cost_elem_ratio = cost_elem_ratio min_set = s cover_sets.append(min_set) covered |= subsets[min_set] return cover_sets
Approximate greedy algorithm for set-covering. Can be used on large inputs - though not an optimal solution. Args: universe (list): Universe of elements subsets (dict): Subsets of U {S1:elements,S2:elements} costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
juraj-google-style
def save(self, project): if (('id' in project) and (project['id'] is not None)): self.logger.debug(('Updating existing project: ' + json.dumps(project))) url = ('%(base_url)s/%(project_id)s' % {'base_url': self.base_url, 'project_id': project['id']}) r = self.gbdx_connection.put(url, json=project) try: r.raise_for_status() except: print(r.text) raise return project['id'] else: self.logger.debug(('Creating new project: ' + json.dumps(project))) url = self.base_url r = self.gbdx_connection.post(url, json=project) try: r.raise_for_status() except: print(r.text) raise project_json = r.json() return project_json['id']
Saves an AnswerFactory Project Args: project (dict): Dictionary specifying an AnswerFactory Project. Returns: AnswerFactory Project id
codesearchnet
def write_temp_file(self, content, filename=None, mode='w'): if (filename is None): filename = str(uuid.uuid4()) fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename) with open(fqpn, mode) as fh: fh.write(content) return fqpn
Write content to a temporary file. Args: content (bytes|str): The file content. If passing binary data the mode needs to be set to 'wb'. filename (str, optional): The filename to use when writing the file. mode (str, optional): The file write mode which could be either 'w' or 'wb'. Returns: str: Fully qualified path name for the file.
codesearchnet
def get_copy_folder_location(): copy_settings_path = 'Library/Application Support/Copy Agent/config.db' copy_home = None copy_settings = os.path.join(os.environ['HOME'], copy_settings_path) if os.path.isfile(copy_settings): database = sqlite3.connect(copy_settings) if database: cur = database.cursor() query = "SELECT value FROM config2 WHERE option = 'csmRootPath';" cur.execute(query) data = cur.fetchone() copy_home = str(data[0]) cur.close() if (not copy_home): error('Unable to find your Copy install =(') return copy_home
Try to locate the Copy folder. Returns: (str) Full path to the current Copy folder
codesearchnet
def _write_to_zip(self, path, contents): if isinstance(path, list): path = os.path.sep.join(path) self.zf.writestr(path, contents)
_write_to_zip: Write file to zip Args: path: (str) where in zip to write file contents: (str) contents of file to write Returns: None
juraj-google-style
def get_module_by_id(module_id: str) -> Union[EFBChannel, EFBMiddleware]: try: if master.channel_id == module_id: return master except NameError: pass if module_id in slaves: return slaves[module_id] for i in middlewares: if i.middleware_id == module_id: return i raise NameError("Module ID {} is not found".format(module_id))
Return the module instance of a provided module ID Args: module_id: Module ID, with instance ID if available. Returns: Module instance requested. Raises: NameError: When the module is not found.
juraj-google-style
def get_tensors_by_names(names): ret = [] G = tfv1.get_default_graph() for n in names: opn, varn = get_op_tensor_name(n) ret.append(G.get_tensor_by_name(varn)) return ret
Get a list of tensors in the default graph by a list of names. Args: names (list):
juraj-google-style
def update_model_path(self, model_path: Optional[str]=None): self._model = model_path if model_path else self._model
Updates the pretrained model used by the Hugging Face Pipeline task. Make sure that the new model does the same task as initial model. Args: model_path (str): (Optional) Path to the new trained model from Hugging Face. Defaults to None.
github-repos
def get_mim_phenotypes(genemap_lines): phenotype_mims = set() phenotypes_found = {} for entry in parse_genemap2(genemap_lines): hgnc_symbol = entry['hgnc_symbol'] for phenotype in entry['phenotypes']: mim_nr = phenotype['mim_number'] if (mim_nr in phenotypes_found): phenotype_entry = phenotypes_found[mim_nr] phenotype_entry['inheritance'] = phenotype_entry['inheritance'].union(phenotype['inheritance']) phenotype_entry['hgnc_symbols'].add(hgnc_symbol) else: phenotype['hgnc_symbols'] = set([hgnc_symbol]) phenotypes_found[mim_nr] = phenotype return phenotypes_found
Get a dictionary with phenotypes Use the mim numbers for phenotypes as keys and phenotype information as values. Args: genemap_lines(iterable(str)) Returns: phenotypes_found(dict): A dictionary with mim_numbers as keys and dictionaries with phenotype information as values. { 'description': str, # Description of the phenotype 'hgnc_symbols': set(), # Associated hgnc symbols 'inheritance': set(), # Associated phenotypes 'mim_number': int, # mim number of phenotype }
codesearchnet
def offset(self, num_to_skip): query = query_mod.Query(self) return query.offset(num_to_skip)
Skip to an offset in a query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.offset` for more information on this method. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query.
codesearchnet
def stSpectogram(signal, fs, win, step, PLOT=False): win = int(win) step = int(step) signal = numpy.double(signal) signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (numpy.abs(signal)).max() signal = (signal - DC) / (MAX - DC) N = len(signal) cur_p = 0 count_fr = 0 nfft = int(win / 2) specgram = numpy.array([], dtype=numpy.float64) while (cur_p + win - 1 < N): count_fr += 1 x = signal[cur_p:cur_p+win] cur_p = cur_p + step X = abs(fft(x)) X = X[0:nfft] X = X / len(X) if count_fr == 1: specgram = X ** 2 else: specgram = numpy.vstack((specgram, X)) FreqAxis = [float((f + 1) * fs) / (2 * nfft) for f in range(specgram.shape[1])] TimeAxis = [float(t * step) / fs for t in range(specgram.shape[0])] if (PLOT): fig, ax = plt.subplots() imgplot = plt.imshow(specgram.transpose()[::-1, :]) fstep = int(nfft / 5.0) FreqTicks = range(0, int(nfft) + fstep, fstep) FreqTicksLabels = [str(fs / 2 - int((f * fs) / (2 * nfft))) for f in FreqTicks] ax.set_yticks(FreqTicks) ax.set_yticklabels(FreqTicksLabels) TStep = int(count_fr/3) TimeTicks = range(0, count_fr, TStep) TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks] ax.set_xticks(TimeTicks) ax.set_xticklabels(TimeTicksLabels) ax.set_xlabel('time (secs)') ax.set_ylabel('freq (Hz)') imgplot.set_cmap('jet') plt.colorbar() plt.show() return (specgram, TimeAxis, FreqAxis)
Short-term FFT mag for spectogram estimation: Returns: a numpy array (nFFT x numOfShortTermWindows) ARGUMENTS: signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) PLOT: flag, 1 if results are to be ploted RETURNS:
juraj-google-style
def _compose_output_rep(lhs_rep, rhs_rep, lhs_contraction, rhs_contraction, lhs_batch, rhs_batch): output_rep = [] for dim in lhs_batch: output_rep.append(lhs_rep[dim]) for i in _minus(range(len(lhs_rep)), lhs_batch + lhs_contraction): output_rep.append(lhs_rep[i]) for i in _minus(range(len(rhs_rep)), rhs_batch + rhs_contraction): output_rep.append(rhs_rep[i]) return ''.join(output_rep)
Compose the output string representation. e.g., ij, jk, (((1,), (0,)), ((), ())) -> ik aij, ajk, (((2,), (1,)), ((0,), (0,))) -> aik Args: lhs_rep: A string representation for the left-hand side input array rhs_rep: A string representation for the right-hand side input array lhs_contraction: Sequence[int] (the contraction dimensions of lhs) rhs_contraction: Sequence[int] (the contraction dimensions of rhs) lhs_batch: Sequence[int] (the batch dimensions of lhs) rhs_batch: Sequence[int] (the batch dimensions of rhs) Returns: A string representation of the result array.
github-repos
def _ConvertValueBinaryDataToUBInt64(self, value): if (not value): return None integer_map = self._GetDataTypeMap('uint64be') try: return self._ReadStructureFromByteStream(value, 0, integer_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to parse integer value with error: {0!s}'.format(exception))
Converts a binary data value into an integer. Args: value (bytes): binary data value containing an unsigned 64-bit big-endian integer. Returns: int: integer representation of binary data value or None if value is not set. Raises: ParseError: if the integer value cannot be parsed.
codesearchnet
def debug(self, status=None, nids=None): nrows, ncols = get_terminal_size() sched_excfile = os.path.join(self.workdir, "_exceptions") if os.path.exists(sched_excfile): with open(sched_excfile, "r") as fh: cprint("Found exceptions raised by the scheduler", "red") cprint(fh.read(), color="red") return if status is not None: tasks = list(self.iflat_tasks(status=status, nids=nids)) else: errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids)) qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids)) abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids)) tasks = errors + qcriticals + abicriticals ntasks = 0 for task in tasks: print(make_banner(str(task), width=ncols, mark="=")) ntasks += 1 for efname in ["qerr_file", "stderr_file",]: err_file = getattr(task, efname) if err_file.exists: s = err_file.read() if not s: continue print(make_banner(str(err_file), width=ncols, mark="=")) cprint(s, color="red") try: report = task.get_event_report() if report and report.num_errors: print(make_banner(os.path.basename(report.filename), width=ncols, mark="=")) s = "\n".join(str(e) for e in report.errors) else: s = None except Exception as exc: s = str(exc) count = 0 if s is not None: cprint(s, color="red") count += 1 if not count: log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*") if not log_files: cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs", color="magenta") for log_file in log_files: try: report = EventsParser().parse(log_file) if report.errors: print(report) count += 1 break except Exception as exc: cprint(str(exc), color="red") count += 1 break if not count: cprint("Houston, we could not find any error message that can explain the problem", color="magenta") print("Number of tasks analyzed: %d" % ntasks)
This method is usually used when the flow didn't completed succesfully It analyzes the files produced the tasks to facilitate debugging. Info are printed to stdout. Args: status: If not None, only the tasks with this status are selected nids: optional list of node identifiers used to filter the tasks.
juraj-google-style
def create_string(self, key, value): data = None if key is not None and value is not None: if isinstance(value, (bool, list, int, dict)): value = u'{}'.format(value) data = self.db.create(key.strip(), u'{}'.format(json.dumps(value))) else: self.tcex.log.warning(u'The key or value field was None.') return data
Create method of CRUD operation for string data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write.
juraj-google-style
def _build_zmat(self, construction_table): c_table = construction_table default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] optional_cols = list((set(self.columns) - {'atom', 'x', 'y', 'z'})) zmat_frame = pd.DataFrame(columns=(default_cols + optional_cols), dtype='float', index=c_table.index) zmat_frame.loc[(:, optional_cols)] = self.loc[(c_table.index, optional_cols)] zmat_frame.loc[(:, 'atom')] = self.loc[(c_table.index, 'atom')] zmat_frame.loc[(:, ['b', 'a', 'd'])] = c_table zmat_values = self._calculate_zmat_values(c_table) zmat_frame.loc[(:, ['bond', 'angle', 'dihedral'])] = zmat_values zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={'last_valid_cartesian': self.copy()}) return zmatrix
Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`.
codesearchnet
def _load_activations(self, filename): logger.info(('Loading activation data from %s...' % filename)) activations = pd.read_csv(filename, sep='\t') activations.columns = [col.lower() for col in list(activations.columns)] mc = ['x', 'y', 'z', 'id', 'space'] if (set(mc) - set(list(activations.columns))): logger.error('At least one of mandatory columns (x, y, z, id, and space) is missing from input file.') return spaces = activations['space'].unique() xyz = activations[['x', 'y', 'z']].values for s in spaces: if (s != self.transformer.target): inds = (activations['space'] == s) xyz[inds] = self.transformer.apply(s, xyz[inds]) activations[['x', 'y', 'z']] = xyz ijk = pd.DataFrame(transformations.xyz_to_mat(xyz), columns=['i', 'j', 'k']) activations = pd.concat([activations, ijk], axis=1) return activations
Load activation data from a text file. Args: filename (str): a string pointing to the location of the txt file to read from.
codesearchnet
def add_to_query(self, query): self.handle = win32pdh.AddCounter(query, self.path)
Add the current path to the query Args: query (obj): The handle to the query to add the counter
codesearchnet
def matches_kv(pcoll, regex, keyGroup, valueGroup=0): regex = Regex._regex_compile(regex) def _process(element): match = regex.match(element) if match: yield (match.group(keyGroup), match.group(valueGroup)) return pcoll | FlatMap(_process)
Returns the KV pairs if the string matches the regular expression, deriving the key & value from the specified group of the regular expression. Args: regex: the regular expression string or (re.compile) pattern. keyGroup: The Regex group to use as the key. Can be int or str. valueGroup: (optional) Regex group to use the value. Can be int or str. The default value "0" returns entire matched string.
github-repos
def _init_boto3_clients(self): try: profile = self._config.get('environment', {}).get('profile') region = self._config.get('environment', {}).get('region') if profile: self._b3Sess = boto3.session.Session(profile_name=profile) else: self._b3Sess = boto3.session.Session() self._s3 = self._b3Sess.client('s3') self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region) self._ssm = self._b3Sess.client('ssm', region_name=region) return True except Exception as wtf: logging.error('Exception caught in intialize_session(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return False
The utililty requires boto3 clients to Cloud Formation and S3. Here is where we make them. Args: None Returns: Good or Bad; True or False
codesearchnet
def __init__(self, fraction_of_second=None, time_elements_tuple=None): if fraction_of_second is not None: if fraction_of_second < 0.0 or fraction_of_second >= 1.0: raise ValueError( 'Fraction of second value: {0:f} out of bounds.'.format( fraction_of_second)) super(TimeElementsWithFractionOfSecond, self).__init__( time_elements_tuple=time_elements_tuple) self._precision = None self.fraction_of_second = fraction_of_second
Initializes time elements. Args: fraction_of_second (Optional[decimal.Decimal]): fraction of second, which must be a value between 0.0 and 1.0. time_elements_tuple (Optional[tuple[int, int, int, int, int, int]]): time elements, contains year, month, day of month, hours, minutes and seconds. Raises: ValueError: if the time elements tuple is invalid or fraction of second value is out of bounds.
juraj-google-style
def qc_curve_group(self, tests, alias=None): keys = [k for k, v in self.data.items() if isinstance(v, Curve)] if not keys: return {} all_tests = tests.get('all', tests.get('All', tests.get('ALL', []))) data = {test.__name__: test(self, keys, alias) for test in all_tests} results = {} for i, key in enumerate(keys): this = {} for test, result in data.items(): this[test] = result[i] results[key] = this return results
Run tests on a cohort of curves. Args: alias (dict): an alias dictionary, mapping mnemonics to lists of mnemonics. Returns: dict.
juraj-google-style
def Get(self, request, global_params=None): config = self.GetMethodConfig('Get') return self._RunMethod(config, request, global_params=global_params)
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. Args: request: (CloudbuildProjectsLocationsOperationsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def replace(self, **kw): if ('tzinfo' in kw): if (kw['tzinfo'] is None): raise TypeError('Can not remove the timezone use asdatetime()') else: tzinfo = kw['tzinfo'] del kw['tzinfo'] else: tzinfo = None is_dst = None if ('is_dst' in kw): is_dst = kw['is_dst'] del kw['is_dst'] else: is_dst = self.is_dst replaced = self.asdatetime().replace(**kw) return type(self)(replaced, tzinfo=(tzinfo or self.tzinfo.zone), is_dst=is_dst)
Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid.
codesearchnet
def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs): temperatures = np.linspace(tmin, tmax, ntemp) if self.structure: ylabel = '$S$ (J/K/mol)' else: ylabel = '$S$ (J/K/mol-c)' fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs) return fig
Plots the vibrational entrpy in a temperature range. Args: tmin: minimum temperature tmax: maximum temperature ntemp: number of steps ylim: tuple specifying the y-axis limits. kwargs: kwargs passed to the matplotlib function 'plot'. Returns: matplotlib figure
codesearchnet
def merge_value(self, json_value: Any, target: message.Message) -> None: target_descriptor = target.DESCRIPTOR if annotation_utils.is_primitive_type(target_descriptor): if isinstance(json_value, dict): self._merge_message(json_value, target) extension_field = target_descriptor.fields_by_name.get('extension') if extension_field is None: raise ValueError(f"Invalid primitive. No 'extension' field exists on {target_descriptor.full_name}.") primitive_has_no_value = extensions.create_primitive_has_no_value(extension_field.message_type) proto_utils.append_value_at_field(target, extension_field, primitive_has_no_value) else: wrapper = self.primitive_handler.primitive_wrapper_from_json_value(json_value, type(target), default_timezone=self.default_timezone) wrapper.merge_into(target) elif annotation_utils.is_reference(target_descriptor): self._merge_message(json_value, target) references.split_if_relative_reference(target) elif isinstance(json_value, dict): self._merge_message(json_value, target) elif isinstance(json_value, (tuple, list)) and len(json_value) == 1: self._merge_message(json_value[0], target) else: raise ValueError(f'Expected a JSON object for field of type: {target_descriptor.full_name}.')
Merges the provided json_value into the target Message. Args: json_value: A Python-native representation of JSON data. target: The target Message to merge the JSON data into.
github-repos
def for_model(self, fn): return ray.get(self.workers[0].for_model.remote(fn))
Apply the given function to a single model replica. Returns: Result from applying the function.
codesearchnet
def __init__(self, envs, blocking): self._envs = envs self._blocking = blocking observ_space = self._envs[0].observation_space if not all(env.observation_space == observ_space for env in self._envs): raise ValueError('All environments must use the same observation space.') action_space = self._envs[0].action_space if not all(env.action_space == action_space for env in self._envs): raise ValueError('All environments must use the same observation space.')
Combine multiple environments to step them in batch. To step environments in parallel, environments must support a `blocking=False` argument to their step and reset functions that makes them return callables instead to receive the result at a later time. Args: envs: List of environments. blocking: Step environments after another rather than in parallel. Raises: ValueError: Environments have different observation or action spaces.
juraj-google-style
def raster(self, path, size, bandtype=gdal.GDT_Byte): path = getattr(path, 'name', path) try: is_multiband = len(size) > 2 nx, ny, nbands = size if is_multiband else size + (1,) except (TypeError, ValueError) as exc: exc.args = ('Size must be 2 or 3-item sequence',) raise if nx < 1 or ny < 1: raise ValueError('Invalid raster size %s' % (size,)) if not self._is_empty(path): raise IOError('%s already exists, open with Raster()' % path) ds = self.Create(path, nx, ny, nbands, bandtype) if not ds: raise ValueError( 'Could not create %s using %s' % (path, str(self))) return Raster(ds)
Returns a new Raster instance. gdal.Driver.Create() does not support all formats. Arguments: path -- file object or path as str size -- two or three-tuple of (xsize, ysize, bandcount) bandtype -- GDAL pixel data type
juraj-google-style
def as_date(dat): LOGGER.debug('as_date(%s)', dat) return strict_rfc3339.timestamp_to_rfc3339_utcoffset( calendar.timegm(dat.timetuple()))
Return the RFC3339 UTC string representation of the given date and time. Args: dat (:py:class:`datetime.date`): the object/type to be serialized. Raises: TypeError: when ``o`` is not an instance of ``datetime.date``. Returns: (str) JSON serializable type for the given object.
juraj-google-style
def rated_movies(self, **kwargs): path = self._get_guest_session_id_path('rated_movies') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get a list of rated moview for a specific guest session id. Args: page: (optional) Minimum 1, maximum 1000. sort_by: (optional) 'created_at.asc' | 'created_at.desc' language: (optional) ISO 639-1 code. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def ws010c(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `ws010c`'.format(value)) self._ws010c = value
Corresponds to IDD Field `ws010c` Wind speed corresponding to 1.0% cumulative frequency of occurrence for coldest month; Args: value (float): value for IDD Field `ws010c` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def get_variantid(variant_obj, family_id): new_id = parse_document_id(chrom=variant_obj['chromosome'], pos=str(variant_obj['position']), ref=variant_obj['reference'], alt=variant_obj['alternative'], variant_type=variant_obj['variant_type'], case_id=family_id) return new_id
Create a new variant id. Args: variant_obj(dict) family_id(str) Returns: new_id(str): The new variant id
codesearchnet
def get_stored_hash(self, temp_ver): with open(self._prefixed(('%s.hash' % temp_ver.name))) as f: return f.read().strip()
Retrieves the hash for the given template version from the store Args: temp_ver (TemplateVersion): template version to retrieve the hash for Returns: str: hash of the given template version
codesearchnet
def __init__(self, path, recursive=True, ignoreErrors=True): self._path = os.path.normpath(path) self._recursive = recursive self._ignoreErrors = ignoreErrors self._indexLoaded = False self._mibIndex = None
Create an instance of *FileReader* serving a directory. Args: path (str): directory to search MIB files Keyword Args: recursive (bool): whether to include subdirectories ignoreErrors (bool): ignore filesystem access errors
juraj-google-style
def _get_setting(self, key, default_value=None, value_type=str): try: state_entry = self._state_view.get( SettingsView.setting_address(key)) except KeyError: return default_value if state_entry is not None: setting = Setting() setting.ParseFromString(state_entry) for setting_entry in setting.entries: if setting_entry.key == key: return value_type(setting_entry.value) return default_value
Get the setting stored at the given key. Args: key (str): the setting key default_value (str, optional): The default value, if none is found. Defaults to None. value_type (function, optional): The type of a setting value. Defaults to `str`. Returns: str: The value of the setting if found, default_value otherwise.
juraj-google-style
def clear_operations_touching(self, qubits: Iterable[ops.Qid], moment_indices: Iterable[int]): qubits = frozenset(qubits) for k in moment_indices: if (0 <= k < len(self._moments)): self._moments[k] = self._moments[k].without_operations_touching(qubits)
Clears operations that are touching given qubits at given moments. Args: qubits: The qubits to check for operations on. moment_indices: The indices of moments to check for operations within.
codesearchnet
def main(argv=None): if argv is None: argv = sys.argv args = parse_args(argv) logging.basicConfig(level=50 - args.verbosity * 10) if args.diff: mode = merge_pyi.Mode.DIFF elif args.in_place: mode = merge_pyi.Mode.OVERWRITE else: mode = merge_pyi.Mode.PRINT backup = args.backup or None changed = merge_pyi.merge_files(py_path=args.py, pyi_path=args.pyi, mode=mode, backup=backup) if mode == merge_pyi.Mode.OVERWRITE: if changed: print(f'Merged types to {args.py} from {args.pyi}') else: print(f'No new types for {args.py} in {args.pyi}')
Merge a source file and a pyi file. Args: argv: Flags and files to process.
github-repos
def get_config_files(): apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), APPS_DIR) custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR) config_files = set() custom_files = set() if os.path.isdir(custom_apps_dir): for filename in os.listdir(custom_apps_dir): if filename.endswith('.cfg'): config_files.add(os.path.join(custom_apps_dir, filename)) custom_files.add(filename) for filename in os.listdir(apps_dir): if (filename.endswith('.cfg') and (filename not in custom_files)): config_files.add(os.path.join(apps_dir, filename)) return config_files
Return the application configuration files. Return a list of configuration files describing the apps supported by Mackup. The files return are absolute full path to those files. e.g. /usr/lib/mackup/applications/bash.cfg Only one config file per application should be returned, custom config having a priority over stock config. Returns: set of strings.
codesearchnet
def write_string(self, registeraddress, textstring, numberOfRegisters=16): _checkInt(numberOfRegisters, minvalue=1, description='number of registers for write string') _checkString(textstring, 'input string', minlength=1, maxlength=(2 * numberOfRegisters)) self._genericCommand(16, registeraddress, textstring, numberOfRegisters=numberOfRegisters, payloadformat='string')
Write a string to the slave. Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits). For example 16 consecutive registers can hold 32 characters (32 bytes). Uses Modbus function code 16. Args: * registeraddress (int): The slave register start address (use decimal numbers, not hex). * textstring (str): The string to store in the slave * numberOfRegisters (int): The number of registers allocated for the string. If the textstring is longer than the 2*numberOfRegisters, an error is raised. Shorter strings are padded with spaces. Returns: None Raises: ValueError, TypeError, IOError
codesearchnet
def create_volume(self, volume_name: str, driver_spec: str = None): if driver_spec: driver = driver_spec else: driver = 'local' if not self._manager: raise RuntimeError('Services can only be deleted ' 'on swarm manager nodes') self._client.volumes.create(name=volume_name, driver=driver)
Create new docker volumes. Only the manager nodes can create a volume Args: volume_name (string): Name for the new docker volume driver_spec (string): Driver for the docker volume
juraj-google-style
def exponential(data): data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): ' Cost function for exponential distribution with changing mean\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n ' return (((- 1) * (t - s)) * (np.log((t - s)) - np.log((cumm[t] - cumm[s])))) return cost
Creates a segment cost function for a time series with a exponential distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
codesearchnet
def correct_entry(self, entry): entry.correction.update(self.get_correction(entry)) return entry
Corrects a single entry. Args: entry: A DefectEntry object. Returns: An processed entry. Raises: CompatibilityError if entry is not compatible.
juraj-google-style
def abs_path(rel_path): return os.path.abspath( os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path) )
Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``.
juraj-google-style
def add_to_collections(self, names, value) -> None: names = (names,) if isinstance(names, str) else set(names) for name in names: self.add_to_collection(name, value)
Stores `value` in the collections given by `names`. Note that collections are not sets, so it is possible to add a value to a collection several times. This function makes sure that duplicates in `names` are ignored, but it will not check for pre-existing membership of `value` in any of the collections in `names`. `names` can be any iterable, but if `names` is a string, it is treated as a single collection name. Args: names: The keys for the collections to add to. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections.
github-repos
def check_url_filetoupload(self): if (self.file_to_upload is None): if ('url' in self.data): if ('resource_type' not in self.data): self.data['resource_type'] = 'api' if ('url_type' not in self.data): self.data['url_type'] = 'api' else: raise HDXError('Either a url or a file to upload must be supplied!') else: if ('url' in self.data): if (self.data['url'] != hdx.data.dataset.Dataset.temporary_url): raise HDXError('Either a url or a file to upload must be supplied not both!') if ('resource_type' not in self.data): self.data['resource_type'] = 'file.upload' if ('url_type' not in self.data): self.data['url_type'] = 'upload' if ('tracking_summary' in self.data): del self.data['tracking_summary']
Check if url or file to upload provided for resource and add resource_type and url_type if not supplied Returns: None
codesearchnet
def matchall(text, patterns): ret = [] for pattern in patterns: match = re.findall(pattern, text) ret += match return ret
Scans through a string for substrings matched some patterns. Args: text: A string to be scanned. patterns: a list of regex pattern. Returns: a list if matched. empty if not.
codesearchnet
def FindFileContainingSymbol(self, symbol): symbol = _NormalizeFullyQualifiedName(symbol) try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: return self._FindFileContainingSymbolInDb(symbol) except KeyError: pass try: return self._file_desc_by_toplevel_extension[symbol] except KeyError: pass (message_name, _, extension_name) = symbol.rpartition('.') try: message = self.FindMessageTypeByName(message_name) assert message.extensions_by_name[extension_name] return message.file except KeyError: raise KeyError(('Cannot find a file containing %s' % symbol))
Gets the FileDescriptor for the file containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file cannot be found in the pool.
codesearchnet
def get_definition(self, task_name): r = self.gbdx_connection.get(self._base_url + '/' + task_name) raise_for_status(r) return r.json()
Gets definition of a registered GBDX task. Args: task_name (str): Task name. Returns: Dictionary representing the task definition.
juraj-google-style
def bundle_for_objs_and_resources(objs, resources): if isinstance(resources, BaseResources): js_resources = css_resources = resources elif isinstance(resources, tuple) and len(resources) == 2 and all(r is None or isinstance(r, BaseResources) for r in resources): js_resources, css_resources = resources if js_resources and not css_resources: warn('No Bokeh CSS Resources provided to template. If required you will need to provide them manually.') if css_resources and not js_resources: warn('No Bokeh JS Resources provided to template. If required you will need to provide them manually.') else: raise ValueError("expected Resources or a pair of optional Resources, got %r" % resources) from copy import deepcopy use_widgets = _use_widgets(objs) if objs else True use_tables = _use_tables(objs) if objs else True use_gl = _use_gl(objs) if objs else True if js_resources: js_resources = deepcopy(js_resources) if not use_widgets and "bokeh-widgets" in js_resources.js_components: js_resources.js_components.remove("bokeh-widgets") if not use_tables and "bokeh-tables" in js_resources.js_components: js_resources.js_components.remove("bokeh-tables") if not use_gl and "bokeh-gl" in js_resources.js_components: js_resources.js_components.remove("bokeh-gl") bokeh_js = js_resources.render_js() else: bokeh_js = None models = [ obj.__class__ for obj in _all_objs(objs) ] if objs else None custom_bundle = bundle_models(models) if custom_bundle is not None: custom_bundle = wrap_in_script_tag(custom_bundle) if bokeh_js is not None: bokeh_js += "\n" + custom_bundle else: bokeh_js = custom_bundle if css_resources: css_resources = deepcopy(css_resources) if not use_widgets and "bokeh-widgets" in css_resources.css_components: css_resources.css_components.remove("bokeh-widgets") if not use_tables and "bokeh-tables" in css_resources.css_components: css_resources.css_components.remove("bokeh-tables") bokeh_css = css_resources.render_css() else: bokeh_css = None return bokeh_js, bokeh_css
Generate rendered CSS and JS resources suitable for the given collection of Bokeh objects Args: objs (seq[Model or Document]) : resources (BaseResources or tuple[BaseResources]) Returns: tuple
juraj-google-style
def GetAllSubClasses(ast): hierarchy = ast.Visit(pytd_visitors.ExtractSuperClasses()) hierarchy = {cls: list(superclasses) for cls, superclasses in hierarchy.items()} return utils.invert_dict(hierarchy)
Compute a class->subclasses mapping. Args: ast: Parsed PYTD. Returns: A dictionary, mapping instances of pytd.Type (types) to lists of pytd.Class (the derived classes).
github-repos
def calculate_row_format(columns, keys=None): row_format = '' if (keys is None): keys = columns.keys() else: keys = [key for key in keys if (key in columns)] for key in keys: if (len(row_format) > 0): row_format += '|' row_format += ('%%(%s)-%ds' % (key, columns[key])) return (('|' + row_format) + '|')
Calculate row format. Args: columns (dict): the keys are the column name and the value the max length. keys (list): optional list of keys to order columns as well as to filter for them. Returns: str: format for table row
codesearchnet
def adjust_brightness(img, brightness_factor): if (not _is_pil_image(img)): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Brightness(img) img = enhancer.enhance(brightness_factor) return img
Adjust brightness of an Image. Args: img (PIL Image): PIL Image to be adjusted. brightness_factor (float): How much to adjust the brightness. Can be any non negative number. 0 gives a black image, 1 gives the original image while 2 increases the brightness by a factor of 2. Returns: PIL Image: Brightness adjusted image.
codesearchnet
def load(self, filename, offset): self.offset = offset self.filename = filename self.bootsector = BootSector(filename=filename, length=NTFS_BOOTSECTOR_SIZE, offset=self.offset) self.mft_table = MftTable(mft_entry_size=self.bootsector.mft_record_size, filename=self.filename, offset=self.mft_table_offset) self.mft_table.preload_entries(NUM_SYSTEM_ENTRIES) self._load_volume_information()
Loads NTFS volume information Args: filename (str): Path to file/device to read the volume \ information from. offset (uint): Valid NTFS partition offset from the beginning \ of the file/device. Raises: IOError: If source file/device does not exist or is not readable
codesearchnet
def save_attributes_to_hdf5_group(group, name, data): bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] if bad_attributes: raise RuntimeError(f'The following attributes cannot be saved to HDF5 file because they are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}') data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) while any((x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data)): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs['%s%d' % (name, chunk_id)] = chunk_data else: group.attrs[name] = data
Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved.
github-repos
def random_subset_by_duration(self, relative_duration, balance_labels=False, label_list_ids=None): total_duration = self.corpus.total_duration subset_duration = (relative_duration * total_duration) utterance_durations = {utt_idx: utt.duration for (utt_idx, utt) in self.corpus.utterances.items()} if balance_labels: all_label_values = self.corpus.all_label_values(label_list_ids=label_list_ids) label_durations = {} for (utt_idx, utt) in self.corpus.utterances.items(): label_durations[utt_idx] = utt.label_total_duration(label_list_ids) subset_utterance_ids = utils.select_balanced_subset(label_durations, subset_duration, list(all_label_values), select_count_values=utterance_durations, seed=self.rand.random()) else: dummy_weights = {utt_idx: {'w': 1} for utt_idx in self.corpus.utterances.keys()} subset_utterance_ids = utils.select_balanced_subset(dummy_weights, subset_duration, ['w'], select_count_values=utterance_durations, seed=self.rand.random()) filter = subview.MatchingUtteranceIdxFilter(utterance_idxs=set(subset_utterance_ids)) return subview.Subview(self.corpus, filter_criteria=[filter])
Create a subview of random utterances with a approximate duration relative to the full corpus. Random utterances are selected so that the sum of all utterance durations equals to the relative duration of the full corpus. Args: relative_duration (float): A value between 0 and 1. (e.g. 0.5 will create a subset with approximately 50% of the full corpus duration) balance_labels (bool): If True, the labels of the selected utterances are balanced as far as possible. So the count/duration of every label within the subset is equal. label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered for balancing. Otherwise only the ones that are in the list are considered. Returns: Subview: The subview representing the subset.
codesearchnet
def get_provider_fn_decorations(provider_fn, default_arg_names): if hasattr(provider_fn, _IS_WRAPPER_ATTR): provider_decorations = getattr(provider_fn, _PROVIDER_DECORATIONS_ATTR) if provider_decorations: expanded_provider_decorations = [] for provider_decoration in provider_decorations: if (provider_decoration.in_scope_id is None): provider_decoration.in_scope_id = scoping.DEFAULT_SCOPE if (provider_decoration.arg_name is not None): expanded_provider_decorations.append(provider_decoration) else: expanded_provider_decorations.extend([ProviderDecoration(default_arg_name, provider_decoration.annotated_with, provider_decoration.in_scope_id) for default_arg_name in default_arg_names]) return expanded_provider_decorations return [ProviderDecoration(default_arg_name, annotated_with=None, in_scope_id=scoping.DEFAULT_SCOPE) for default_arg_name in default_arg_names]
Retrieves the provider method-relevant info set by decorators. If any info wasn't set by decorators, then defaults are returned. Args: provider_fn: a (possibly decorated) provider function default_arg_names: the (possibly empty) arg names to use if none were specified via @provides() Returns: a sequence of ProviderDecoration
codesearchnet
def __init__(self, x=0, y=0): self._ptr = ffi.new('SDL_Point *', [x, y])
Construct a new point. Args: x (int): The x position of the point. y (int): The y position of the point.
juraj-google-style
def merge_summaries(prev_summary, next_summary, epsilon): merged = np.concatenate((prev_summary, next_summary), axis=1) merged = np.take(merged, np.argsort(merged[0]), axis=1) return compress_summary(merged, epsilon)
Weighted merge sort of summaries. Given two summaries of distinct data, this function merges (and compresses) them to stay within `epsilon` error tolerance. Args: prev_summary: 2D `np.ndarray` summary to be merged with `next_summary`. next_summary: 2D `np.ndarray` summary to be merged with `prev_summary`. epsilon: A float that determines the approximate desired precision. Returns: A 2-D `np.ndarray` that is a merged summary. First column is the interpolated partition values, the second is the weights (counts).
github-repos
def backend(): return _BACKEND
Publicly accessible method for determining the current backend. Returns: String, the name of the backend Keras is currently using. One of `"tensorflow"`, `"torch"`, or `"jax"`. Example: >>> keras.config.backend() 'tensorflow'
github-repos
def write_zip_data(self, temp_parfile, stored_resources): logging.debug('Storing Files...') with contextlib.closing(zipfile.ZipFile(temp_parfile, 'w', self.compression)) as z: items = sorted(stored_resources.items()) for relative_path, resource in items: assert resource.zipinfo.filename == relative_path resource.store(z)
Write the second part of a parfile, consisting of ZIP data Args: stored_resources: A dictionary mapping relative path to the content to store at that path.
github-repos
def add_map(self, counters_map): for counter_name in counters_map.counters: self.increment(counter_name, counters_map.counters[counter_name])
Add all counters from the map. For each counter in the passed map, adds its value to the counter in this map. Args: counters_map: CounterMap instance to add.
codesearchnet