code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def morph_dict(d, convert_function): new = {} for k, v in six.iteritems(d): new_v = v if isinstance(v, dict): new_v = morph_dict(v, convert_function) elif isinstance(v, list): new_v = list() for x in v: new_v.append( morph_dict(x, convert_function) ) new[convert_function(k)] = new_v return new
Convert a nested dictionary from one convention to another. Args: d (dict): dictionary (nested or not) to be converted. convert_function (func): function that takes the string in one convention and returns it in the other one. Returns: Dictionary with the new keys.
juraj-google-style
def CleanVacuousVersions(clients=None, dry_run=True): if not clients: index = client_index.CreateClientIndex() clients = index.LookupClients(["."]) clients.sort() with data_store.DB.GetMutationPool() as pool: logging.info("checking %d clients", len(clients)) for batch in collection.Batch(clients, 10000): client_infos = data_store.DB.MultiResolvePrefix( batch, ["aff4:", "aff4:"], data_store.DB.ALL_TIMESTAMPS) for client, type_list in client_infos: cleared = 0 kept = 0 updates = [] for a, _, ts in type_list: if ts != 0: updates.append((ts, a)) updates = sorted(updates) dirty = True for ts, a in updates: if a == "aff4:type": if dirty: kept += 1 dirty = False else: cleared += 1 if not dry_run: pool.DeleteAttributes(client, ["aff4:type"], start=ts, end=ts) if pool.Size() > 1000: pool.Flush() else: dirty = True logging.info("%s: kept %d and cleared %d", client, kept, cleared)
A script to remove no-op client versions. This script removes versions of a client when it is identical to the previous, in the sense that no versioned attributes were changed since the previous client version. Args: clients: A list of ClientURN, if empty cleans all clients. dry_run: whether this is a dry run
juraj-google-style
def get_object(cls, api_token, droplet_id): droplet = cls(token=api_token, id=droplet_id) droplet.load() return droplet
Class method that will return a Droplet object by ID. Args: api_token (str): token droplet_id (int): droplet id
juraj-google-style
def add(self, email): if email not in self._collaborators: self._collaborators[email] = ShareRequestValue.Add self._dirty = True
Add a collaborator. Args: str : Collaborator email address.
juraj-google-style
def convertTimestamps(column): tempColumn = column try: tempValue = np.datetime64(column[randint(0, len(column.index) - 1)]) tempColumn = column.apply(to_datetime) except Exception: pass return tempColumn
Convert a dtype of a given column to a datetime. This method tries to do this by brute force. Args: column (pandas.Series): A Series object with all rows. Returns: column: Converted to datetime if no errors occured, else the original column will be returned.
juraj-google-style
async def check_in(self): res = (await self.connection('POST', 'tournaments/{}/participants/{}/check_in'.format(self._tournament_id, self._id))) self._refresh_from_json(res)
Checks this participant in |methcoro| Warning: |unstable| Raises: APIException
codesearchnet
def UploadUsers(self, hash_algorithm, hash_key, accounts): return self.rpc_helper.UploadAccount(hash_algorithm, base64.urlsafe_b64encode(hash_key), [GitkitUser.ToRequest(i) for i in accounts])
Uploads multiple users to Gitkit server. Args: hash_algorithm: string, the hash algorithm. hash_key: array, raw key of the hash algorithm. accounts: list of GitkitUser. Returns: A dict of failed accounts. The key is the index of the 'accounts' list, starting from 0.
juraj-google-style
def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO): return self._path_open(path, 'wb', mime_type, compression_type)
Returns a write channel for the given file path. Args: path: string path of the file object to be written to the system mime_type: MIME type to specify the type of content in the file object compression_type: Type of compression to be used for this object Returns: file handle with a close function for the user to use
github-repos
def get_yielded_type(type_hint): if isinstance(type_hint, typing.TypeVar): return typing.Any if isinstance(type_hint, AnyTypeConstraint): return type_hint if is_consistent_with(type_hint, Iterator[Any]): return type_hint.yielded_type if is_consistent_with(type_hint, Tuple[Any, ...]): if isinstance(type_hint, TupleConstraint): return Union[type_hint.tuple_types] else: return type_hint.inner_type if is_consistent_with(type_hint, Iterable[Any]): if isinstance(type_hint, UnionConstraint): yielded_types = set() for typ in type_hint.inner_types(): yielded_types.add(get_yielded_type(typ)) return Union[yielded_types] return type_hint.inner_type raise ValueError('%s is not iterable' % type_hint)
Obtains the type of elements yielded by an iterable.s Note that "iterable" here means: can be iterated over in a for loop, excluding strings and dicts. Args: type_hint: (TypeConstraint) The iterable in question. Must be normalize()-d. Returns: Yielded type of the iterable. Raises: ValueError if not iterable.
github-repos
def accepts(regex, negate, *values): return any(v and regex.search(v) for v in values) != negate
Given a compiled regex and a negate, find if any of the values match. Args: regex (Pattern): negate (bool): *values (str): Returns:
juraj-google-style
def decompose(miz_file: Path, output_folder: Path): mission_folder, assets_folder = NewMiz._get_subfolders(output_folder) NewMiz._wipe_folders(mission_folder, assets_folder) LOGGER.info('unzipping mission file') with Miz(miz_file) as miz: version = miz.mission.d['version'] LOGGER.debug(f'mission version: "%s"', version) LOGGER.info('copying assets to: "%s"', assets_folder) ignore = shutil.ignore_patterns('mission') shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore) NewMiz._reorder_warehouses(assets_folder) LOGGER.info('decomposing mission table into: "%s" (this will take a while)', mission_folder) NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz)
Decompose this Miz into json Args: output_folder: folder to output the json structure as a Path miz_file: MIZ file path as a Path
juraj-google-style
def register_auth_system(self, auth_system): auth_system_settings = dbconfig.get('auth_system') if auth_system.name not in auth_system_settings['available']: auth_system_settings['available'].append(auth_system.name) dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings)) if auth_system.name == auth_system_settings['enabled'][0]: self.active_auth_system = auth_system auth_system().bootstrap() logger.debug('Registered {} as the active auth system'.format(auth_system.name)) return True else: logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name)) return False
Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered as the active auth system, else `False` Args: auth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register Returns: `bool`
juraj-google-style
def enable_argscope_for_module(module, log_shape=True): if (is_tfv2() and (module == tf.layers)): module = tf.compat.v1.layers for (name, obj) in getmembers(module): if isfunction(obj): setattr(module, name, enable_argscope_for_function(obj, log_shape=log_shape))
Overwrite all functions of a given module to support argscope. Note that this function monkey-patches the module and therefore could have unexpected consequences. It has been only tested to work well with ``tf.layers`` module. Example: .. code-block:: python import tensorflow as tf enable_argscope_for_module(tf.layers) Args: log_shape (bool): print input/output shapes of each function.
codesearchnet
def set_file_to_upload(self, file_to_upload): if ('url' in self.data): del self.data['url'] self.file_to_upload = file_to_upload
Delete any existing url and set the file uploaded to the local path provided Args: file_to_upload (str): Local path to file to upload Returns: None
codesearchnet
def _handle_metrics(self, outputs, targets=None, skip_target_masks=None, sample_weights=None, masks=None, return_weighted_metrics=False, return_weighted_and_unweighted_metrics=False): skip_target_masks = skip_target_masks or [False] * len(outputs) metric_results = [] with backend.name_scope('metrics'): for i in range(len(outputs)): if skip_target_masks[i]: continue output = outputs[i] if outputs else None target = targets[i] if targets else None output_mask = masks[i] if masks else None if return_weighted_and_unweighted_metrics or not return_weighted_metrics: metric_results.extend(self._handle_per_output_metrics(self._per_output_metrics[i], target, output, output_mask)) if return_weighted_and_unweighted_metrics or return_weighted_metrics: metric_results.extend(self._handle_per_output_metrics(self._per_output_weighted_metrics[i], target, output, output_mask, weights=sample_weights[i] if sample_weights else None)) return metric_results
Handles calling metric functions. Args: outputs: List of outputs (predictions). targets: List of targets. skip_target_masks: Optional. List of boolean for whether the corresponding target should be ignored or not. sample_weights: Optional list of sample weight arrays. masks: List of computed output mask values. return_weighted_metrics: Flag that indicates whether weighted metrics should be computed instead of unweighted metrics. This flag is ignored when `return_weighted_and_unweighted_metrics` is enabled. return_weighted_and_unweighted_metrics: Flag that is used to indicate whether both weighted and unweighted metrics should be computed. When this is not enabled, we use `return_weighted_metrics` param to indicate whether weighted or unweighted metrics should be returned. Returns: A list of metric result tensors.
github-repos
def _replace_oov(original_vocab, line): return u" ".join( [word if word in original_vocab else u"UNK" for word in line.split()])
Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words.
juraj-google-style
def wait_for_notification(self, notification_class=BaseNotification): if notification_class: if notification_class is BaseNotification: message = "No notification was shown." else: message = "{0} was not shown.".format(notification_class.__name__) self.wait.until( lambda _: isinstance(self.notification, notification_class), message=message, ) return self.notification else: self.wait.until( lambda _: self.notification is None, message="Unexpected notification shown.", )
Wait for the specified notification to be displayed. Args: notification_class (:py:class:`BaseNotification`, optional): The notification class to wait for. If `None` is specified it will wait for any notification to be closed. Defaults to `BaseNotification`. Returns: :py:class:`BaseNotification`: Firefox notification.
juraj-google-style
def __call__(self, *args, **kwargs): if not hasattr(self, '_thread_local'): raise RuntimeError('You must call `super().__init__()` in the layer constructor.') inputs, args, kwargs = self._split_out_first_arg(args, kwargs) input_list = nest.flatten(inputs) if _in_functional_construction_mode(self, inputs, args, kwargs, input_list): return self._functional_construction_call(inputs, args, kwargs, input_list) call_context = base_layer_utils.call_context() if any((isinstance(x, (np_arrays.ndarray, np.ndarray, float, int)) for x in input_list)): inputs = nest.map_structure(_convert_numpy_or_python_types, inputs) input_list = nest.flatten(inputs) input_masks, mask_is_implicit = self._get_input_masks(inputs, input_list, args, kwargs) if self._expects_mask_arg and mask_is_implicit: kwargs['mask'] = input_masks args, kwargs, training_mode = self._set_training_mode(args, kwargs, call_context) if not call_context.in_call: self._clear_losses() eager = context.executing_eagerly() with call_context.enter(layer=self, inputs=inputs, build_graph=not eager, training=training_mode): input_spec.assert_input_compatibility(self.input_spec, inputs, self.name) if eager: call_fn = self.call name_scope = self._name else: name_scope = self._name_scope() call_fn = self._autographed_call() with ops.name_scope_v2(name_scope): if not self.built: self._maybe_build(inputs) if self._autocast: inputs = self._maybe_cast_inputs(inputs, input_list) with autocast_variable.enable_auto_cast_variables(self._compute_dtype_object): outputs = call_fn(inputs, *args, **kwargs) if self._activity_regularizer: self._handle_activity_regularization(inputs, outputs) if self._supports_masking: self._set_mask_metadata(inputs, outputs, input_masks, not eager) if self._saved_model_inputs_spec is None: self._set_save_spec(inputs) return outputs
Wraps `call`, applying pre- and post-processing steps. Args: *args: Positional arguments to be passed to `self.call`. **kwargs: Keyword arguments to be passed to `self.call`. Returns: Output tensor(s). Note: - The following optional keyword arguments are reserved for specific uses: * `training`: Boolean scalar tensor of Python boolean indicating whether the `call` is meant for training or inference. * `mask`: Boolean input mask. - If the layer's `call` method takes a `mask` argument (as some Keras layers do), its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support. - If the layer is not built, the method will call `build`. Raises: ValueError: if the layer's `call` method returns None (an invalid value). RuntimeError: if `super().__init__()` was not called in the constructor.
github-repos
def load_attributes_from_hdf5_group(group, name): if name in group.attrs: data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]] else: data = [] chunk_id = 0 while '%s%d' % (name, chunk_id) in group.attrs: data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs['%s%d' % (name, chunk_id)]]) chunk_id += 1 return data
Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data. Copied from Keras to Transformers to avoid versioning issues.
github-repos
def _replace_ragged_with_flat_values(value, partition_lists, flat_values_nrows): if ragged_tensor.is_ragged(value): value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value) partition_lists.append(value._nested_row_partitions) nrows = tensor_shape.dimension_at_index(value.flat_values.shape, 0).value if nrows is not None: flat_values_nrows.append(nrows) return value.flat_values def recurse(v): return _replace_ragged_with_flat_values(v, partition_lists, flat_values_nrows) if isinstance(value, list): return [recurse(v) for v in value] elif isinstance(value, tuple): return tuple((recurse(v) for v in value)) elif isinstance(value, dict): return dict(((k, recurse(v)) for k, v in value.items())) else: return value
Replace RaggedTensors with their flat_values, and record their partitions. Returns a copy of `value`, with any nested `RaggedTensor`s replaced by their `flat_values` tensor. Looks inside lists, tuples, and dicts. Appends each `RaggedTensor`'s `RowPartition`s to `partition_lists`. Args: value: The value that should be transformed by replacing `RaggedTensors`. partition_lists: An output parameter used to record the row partitions for any `RaggedTensors` that were replaced. flat_values_nrows: An output parameter used to record the outer dimension size for each replacement `flat_values` (when known). Contains a list of int. Returns: A copy of `value` with nested `RaggedTensors` replaced by their `values`.
github-repos
def ParseFileObject(self, parser_mediator, file_object): regf_file = pyregf.file() try: regf_file.open_file_object(file_object) except IOError: return root_key = regf_file.get_root_key() if (root_key is None): regf_file.close() return root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY) if (root_file_key is None): regf_file.close() return for volume_key in root_file_key.sub_keys: for am_entry in volume_key.sub_keys: self._ProcessAMCacheFileKey(am_entry, parser_mediator) root_program_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_PROGRAM_KEY) if (root_program_key is None): regf_file.close() return for am_entry in root_program_key.sub_keys: self._ProcessAMCacheProgramKey(am_entry, parser_mediator) regf_file.close()
Parses an Amcache.hve file for events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
codesearchnet
def __init__(self, *nodes, timeout=None): self.nodes = nodes self.timeout = timeout self.connection_pool = Pool([Connection(node_url=node['endpoint'], headers=node['headers']) for node in nodes])
Initializes an instance of :class:`~bigchaindb_driver.transport.Transport`. Args: nodes: each node is a dictionary with the keys `endpoint` and `headers` timeout (int): Optional timeout in seconds.
juraj-google-style
def etm_supported(self): res = self._dll.JLINKARM_ETM_IsPresent() if (res == 1): return True info = ctypes.c_uint32(0) index = enums.JLinkROMTable.ETM res = self._dll.JLINKARM_GetDebugInfo(index, ctypes.byref(info)) if (res == 1): return False return True
Returns if the CPU core supports ETM. Args: self (JLink): the ``JLink`` instance. Returns: ``True`` if the CPU has the ETM unit, otherwise ``False``.
codesearchnet
def getlines(self, bufnr=None): buf = (self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer) return buf[:]
Get all lines of a buffer as a list. Args: bufnr (Optional[int]): A Vim buffer number, current if ``None``. Returns: List[str]
codesearchnet
def compute_writer_results(results): if not results: return sources, targets, delayeds = split_results(results) if targets: delayeds.append(da.store(sources, targets, compute=False)) if delayeds: da.compute(delayeds) if targets: for target in targets: if hasattr(target, 'close'): target.close()
Compute all the given dask graphs `results` so that the files are saved. Args: results (iterable): Iterable of dask graphs resulting from calls to `scn.save_datasets(..., compute=False)`
juraj-google-style
def clean_doctest_list(doctest_file: str, overwrite: bool=False): non_existent_paths = [] all_paths = [] with open(doctest_file, 'r', encoding='utf-8') as f: for line in f: line = line.strip().split(' ')[0] path = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(line) if len(non_existent_paths) > 0: non_existent_paths = '\n'.join([f'- {f}' for f in non_existent_paths]) raise ValueError(f'`{doctest_file}` contains non-existent paths:\n{non_existent_paths}') sorted_paths = sorted(all_paths) if all_paths != sorted_paths: if not overwrite: raise ValueError(f'Files in `{doctest_file}` are not in alphabetical order, run `make fix-copies` to fix this automatically.') with open(doctest_file, 'w', encoding='utf-8') as f: f.write('\n'.join(sorted_paths) + '\n')
Cleans the doctest in a given file. Args: doctest_file (`str`): The path to the doctest file to check or clean. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix problems. If `False`, will error when the file is not clean.
github-repos
def register_macro(name: str, func: Callable, allow_overwrite: bool=False) -> None: if hasattr(Circuit, name): if allow_overwrite: warnings.warn(f'Circuit has attribute `{name}`.') else: raise ValueError(f'Circuit has attribute `{name}`.') if name.startswith('run_with_'): if allow_overwrite: warnings.warn(f'Gate name `{name}` may conflict with run of backend.') else: raise ValueError(f"Gate name `{name}` shall not start with 'run_with_'.") if (not allow_overwrite): if (name in GATE_SET): raise ValueError(f"Gate '{name}' is already exists in gate set.") if (name in GLOBAL_MACROS): raise ValueError(f"Macro '{name}' is already exists.") GLOBAL_MACROS[name] = func
Register new macro to Circuit. Args: name (str): The name of macro. func (callable): The function to be called. allow_overwrite (bool, optional): If True, allow to overwrite the existing macro. Otherwise, raise the ValueError. Raises: ValueError: The name is duplicated with existing macro, gate or method. When `allow_overwrite=True`, this error is not raised.
codesearchnet
def update_hash(src_file): hash_file = local.path(src_file) + ".hash" new_hash = 0 with open(hash_file, 'w') as h_file: new_hash = get_hash_of_dirs(src_file) h_file.write(str(new_hash)) return new_hash
Update the hash for the given file. Args: src: The file name. root: The path of the given file.
juraj-google-style
def save(self, file_prefix, checkpoint_number=None, session=None, options=None): options = options or checkpoint_options.CheckpointOptions() feed_dict = {} use_session = not context.executing_eagerly() and (not ops.inside_function()) if checkpoint_number: file_prefix = '%s-%d' % (file_prefix, checkpoint_number) if use_session: if self._object_graph_feed_tensor is None: with ops.device('/cpu:0'): self._object_graph_feed_tensor = constant_op.constant('', dtype=dtypes.string) self._file_prefix_feed_tensor = constant_op.constant('', dtype=dtypes.string) object_graph_tensor = self._object_graph_feed_tensor file_prefix_tensor = self._file_prefix_feed_tensor feed_dict[file_prefix_tensor] = file_prefix else: with ops.device('/cpu:0'): file_prefix_tensor = ops.convert_to_tensor(file_prefix, dtype=dtypes.string) object_graph_tensor = None if not tensor_util.is_tensor(file_prefix): file_io.recursive_create_dir(os.path.dirname(file_prefix)) save_path, new_feed_additions = self._save_cached_when_graph_building(file_prefix_tensor, object_graph_tensor, options) if new_feed_additions: feed_dict.update(new_feed_additions) if not use_session: session = None elif session is None: session = get_session() if session: return session.run(save_path, feed_dict=feed_dict) elif use_session: raise RuntimeError(f'Unable to save checkpoint to "{file_prefix}" in graph mode without a default session. Please use `with tf.Session():` to create a session.') else: return save_path
Save a training checkpoint. The saved checkpoint includes variables created by this object and any Trackable objects it depends on at the time `Saver.save()` is called. Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). Names are generated based on this prefix and `checkpoint_number`, if provided. checkpoint_number: An integer variable or Tensor, used to number checkpoints. Typically this value is saved along with other variables in training checkpoints, which will happen automatically if it was created by `root_trackable` or one of its dependencies (via `Trackable._add_variable`). session: The session to evaluate variables in. Ignored when executing eagerly. If not provided when graph building, the default session is used. options: Optional `tf.train.CheckpointOptions` object. Returns: The full path to the checkpoint. Raises: RuntimeError: if called in V1 Graph mode without a default session.
github-repos
def get_data(self, how_many, offset, model_settings, background_frequency, background_volume_range, time_shift, mode, sess): candidates = self.data_index[mode] if how_many == -1: sample_count = len(candidates) else: sample_count = max(0, min(how_many, len(candidates) - offset)) data = np.zeros((sample_count, model_settings['fingerprint_size'])) labels = np.zeros(sample_count) desired_samples = model_settings['desired_samples'] use_background = self.background_data and mode == 'training' pick_deterministically = mode != 'training' for i in range(offset, offset + sample_count): if how_many == -1 or pick_deterministically: sample_index = i else: sample_index = np.random.randint(len(candidates)) sample = candidates[sample_index] if time_shift > 0: time_shift_amount = np.random.randint(-time_shift, time_shift) else: time_shift_amount = 0 if time_shift_amount > 0: time_shift_padding = [[time_shift_amount, 0], [0, 0]] time_shift_offset = [0, 0] else: time_shift_padding = [[0, -time_shift_amount], [0, 0]] time_shift_offset = [-time_shift_amount, 0] input_dict = {self.wav_filename_placeholder_: sample['file'], self.time_shift_padding_placeholder_: time_shift_padding, self.time_shift_offset_placeholder_: time_shift_offset} if use_background or sample['label'] == SILENCE_LABEL: background_index = np.random.randint(len(self.background_data)) background_samples = self.background_data[background_index] if len(background_samples) <= model_settings['desired_samples']: raise ValueError('Background sample is too short! Need more than %d samples but only %d were found' % (model_settings['desired_samples'], len(background_samples))) background_offset = np.random.randint(0, len(background_samples) - model_settings['desired_samples']) background_clipped = background_samples[background_offset:background_offset + desired_samples] background_reshaped = background_clipped.reshape([desired_samples, 1]) if sample['label'] == SILENCE_LABEL: background_volume = np.random.uniform(0, 1) elif np.random.uniform(0, 1) < background_frequency: background_volume = np.random.uniform(0, background_volume_range) else: background_volume = 0 else: background_reshaped = np.zeros([desired_samples, 1]) background_volume = 0 input_dict[self.background_data_placeholder_] = background_reshaped input_dict[self.background_volume_placeholder_] = background_volume if sample['label'] == SILENCE_LABEL: input_dict[self.foreground_volume_placeholder_] = 0 else: input_dict[self.foreground_volume_placeholder_] = 1 summary, data_tensor = sess.run([self.merged_summaries_, self.output_], feed_dict=input_dict) self.summary_writer_.add_summary(summary) data[i - offset, :] = data_tensor.flatten() label_index = self.word_to_index[sample['label']] labels[i - offset] = label_index return (data, labels)
Gather samples from the data set, applying transformations as needed. When the mode is 'training', a random selection of samples will be returned, otherwise the first N clips in the partition will be used. This ensures that validation always uses the same samples, reducing noise in the metrics. Args: how_many: Desired number of samples to return. -1 means the entire contents of this partition. offset: Where to start when fetching deterministically. model_settings: Information about the current model being trained. background_frequency: How many clips will have background noise, 0.0 to 1.0. background_volume_range: How loud the background noise will be. time_shift: How much to randomly shift the clips by in time. mode: Which partition to use, must be 'training', 'validation', or 'testing'. sess: TensorFlow session that was active when processor was created. Returns: List of sample data for the transformed samples, and list of label indexes Raises: ValueError: If background samples are too short.
github-repos
def __init__(self, byte_size, is_complete=False): super(DataTypeMapSizeHint, self).__init__() self.byte_size = byte_size self.is_complete = is_complete
Initializes a data type map size hint. Args: byte_size (int): byte size. is_complete (optional[bool]): True if the size is the complete size of the data type.
juraj-google-style
def delete(self, filename): folder = ('Packages' if is_package(filename) else 'Scripts') path = os.path.join(self.connection['mount_point'], folder, filename) if os.path.isdir(path): shutil.rmtree(path) elif os.path.isfile(path): os.remove(path)
Delete a file from the repository. This method will not delete a script from a migrated JSS. Please remove migrated scripts with jss.Script.delete. Args: filename: String filename only (i.e. no path) of file to delete. Will handle deleting scripts vs. packages automatically.
codesearchnet
def remove_redistribution(self, protocol): protocols = ['bgp', 'rip', 'static', 'connected'] if protocol not in protocols: raise ValueError('redistributed protocol must be' 'bgp, connected, rip or static') cmd = 'no redistribute {}'.format(protocol) return self.configure_ospf(cmd)
Removes a protocol redistribution to OSPF Args: protocol (str): protocol to redistribute route_map_name (str): route-map to be used to filter the protocols Returns: bool: True if the command completes successfully Exception: ValueError: This will be raised if the protocol pass is not one of the following: [rip, bgp, static, connected]
juraj-google-style
def base_list_parser(): base_parser = ArgumentParser(add_help=False) base_parser.add_argument('-F', '--format', action='store', default='default', choices=['csv', 'json', 'yaml', 'default'], help='choose the output format') return base_parser
Creates a parser with arguments specific to formatting lists of resources. Returns: {ArgumentParser}: Base parser with defaul list args
codesearchnet
def _allocate_ips_to_nics(self, conf): for dom_name, dom_spec in conf.get('domains', {}).items(): for idx, nic in enumerate(dom_spec.get('nics', [])): if 'ip' in nic: continue net = self._get_net(conf, dom_name, nic) if net['type'] != 'nat': continue allocated = net['mapping'].values() vacant = _create_ip( net['gw'], set(range(2, 255)).difference( set([int(ip.split('.')[-1]) for ip in allocated]) ).pop() ) nic['ip'] = vacant self._add_nic_to_mapping(net, dom_spec, nic)
For all the nics of all the domains in the conf that have dynamic ip, allocate one and addit to the network mapping Args: conf (dict): Configuration spec to extract the domains from Returns: None
juraj-google-style
def get_calendar(self, **kwargs): start_date = util.date_string(kwargs.get('start_day', '01'), kwargs.get('start_month', '01'), kwargs.get('start_year', '1970')) end_date = util.date_string(kwargs.get('end_day', '01'), kwargs.get('end_month', '01'), kwargs.get('end_year', '1970')) params = {'SelectDateBegin': start_date, 'SelectDateEnd': end_date} result = self.make_request('bus', 'get_calendar', **params) if (not util.check_result(result)): return (False, result.get('resultDescription', 'UNKNOWN ERROR')) values = util.response_list(result, 'resultValues') return (True, [emtype.CalendarItem(**a) for a in values])
Obtain EMT calendar for a range of dates. Args: start_day (int): Starting day of the month in format DD. The number is automatically padded if it only has one digit. start_month (int): Starting month number in format MM. The number is automatically padded if it only has one digit. start_year (int): Starting year number in format YYYY. end_day (int): Ending day of the month in format DD. The number is automatically padded if it only has one digit. end_month (int): Ending month number in format MM. The number is automatically padded if it only has one digit. end_year (int): Ending year number in format YYYY. Returns: Status boolean and parsed response (list[CalendarItem]), or message string in case of error.
codesearchnet
def write_markdown_to_file(self, f): print("---", file=f) print("---", file=f) print("<!-- This file is machine generated: DO NOT EDIT! -->", file=f) print("", file=f) print(" print("", file=f) fullname_f = lambda name: self._members[name][0] anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name)) for filename, library in self._filename_to_library_map: sorted_names = sorted(library.mentioned, key=lambda x: (str.lower(x), x)) member_names = [n for n in sorted_names if n in self._members] full_filename = self._path_prefix + filename links = ["[`%s`](%s for name in member_names] if links: print("* **[%s](%s)**:" % (library.title, full_filename[:-3]), file=f) for link in links: print(" * %s" % link, file=f) print("", file=f)
Writes this index to file `f`. The output is formatted as an unordered list. Each list element contains the title of the library, followed by a list of symbols in that library hyperlinked to the corresponding anchor in that library. Args: f: The output file.
juraj-google-style
def Artifacts(self, os_name=None, cpe=None, label=None): hit = lambda x: x[0] == x[1] or not x[0] seq = [(self.os_name, os_name), (self.cpe, cpe), (self.label, label)] return all(map(hit, seq))
Whether the conditions applies, modulo host data. Args: os_name: An OS string. cpe: A CPE string. label: A label string. Returns: True if os_name, cpe or labels match. Empty values are ignored.
juraj-google-style
def get_v2_optimizer(name, **kwargs): try: return _V2_OPTIMIZER_MAP[name](**kwargs) except KeyError: raise ValueError('Could not find requested v2 optimizer: {}\nValid choices: {}'.format(name, list(_V2_OPTIMIZER_MAP.keys())))
Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized Keras v2 optimizer. Raises: ValueError: if an unknown name was passed.
github-repos
def add_action_to(cls, parser, action, subactions, level): p = parser.add_parser(action.name, description=action.description, argument_default=argparse.SUPPRESS) for arg in action.args: arg.add_argument_to(p) if subactions: subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subactions_str, description=cls.subactions_desc) for (subact, subsubacts) in sorted(subactions.items(), key=(lambda x: x[0].name)): cls.add_action_to(subparsers, subact, subsubacts, (level + 1))
Adds given action to given parser Args: parser: instance of devassistant_argparse.ArgumentParser action: devassistant.actions.Action subclass subactions: dict with subactions - {SubA: {SubB: {}}, SubC: {}}
codesearchnet
def signature_cert_chain_url(url): r = urlparse(url) if not r.scheme.lower() == 'https': warnings.warn('Certificate URL scheme is invalid.') return False if not r.hostname.lower() == 's3.amazonaws.com': warnings.warn('Certificate URL hostname is invalid.') return False if not os.path.normpath(r.path).startswith('/echo.api/'): warnings.warn('Certificate URL path is invalid.') return False if r.port and not r.port == 443: warnings.warn('Certificate URL port is invalid.') return False return True
Validate URL specified by SignatureCertChainUrl. See `validate.request` for additional info. Args: url: str. SignatureCertChainUrl header value sent by request. Returns: bool: True if valid, False otherwise.
juraj-google-style
def import_from_xml(xml, edx_video_id, resource_fs, static_dir, external_transcripts=dict(), course_id=None): if (xml.tag != 'video_asset'): raise ValCannotCreateError('Invalid XML') try: if (not edx_video_id): raise Video.DoesNotExist video = Video.objects.get(edx_video_id=edx_video_id) logger.info("edx_video_id '%s' present in course '%s' not imported because it exists in VAL.", edx_video_id, course_id) if (course_id and (video.status != EXTERNAL_VIDEO_STATUS)): (course_video, __) = CourseVideo.get_or_create_with_validation(video=video, course_id=course_id) image_file_name = xml.get('image', '').strip() if image_file_name: VideoImage.create_or_update(course_video, image_file_name) return edx_video_id except ValidationError as err: logger.exception(err.message) raise ValCannotCreateError(err.message_dict) except Video.DoesNotExist: pass if edx_video_id: data = {'edx_video_id': edx_video_id, 'client_video_id': xml.get('client_video_id'), 'duration': xml.get('duration'), 'status': 'imported', 'encoded_videos': [], 'courses': ([{course_id: xml.get('image')}] if course_id else [])} for encoded_video_el in xml.iterfind('encoded_video'): profile_name = encoded_video_el.get('profile') try: Profile.objects.get(profile_name=profile_name) except Profile.DoesNotExist: logger.info("Imported edx_video_id '%s' contains unknown profile '%s'.", edx_video_id, profile_name) continue data['encoded_videos'].append({'profile': profile_name, 'url': encoded_video_el.get('url'), 'file_size': encoded_video_el.get('file_size'), 'bitrate': encoded_video_el.get('bitrate')}) if (not data['encoded_videos']): data['status'] = EXTERNAL_VIDEO_STATUS data['courses'] = [] edx_video_id = create_video(data) else: edx_video_id = create_external_video('External Video') create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts) return edx_video_id
Imports data from a video_asset element about the given video_id. If the edx_video_id already exists, then no changes are made. If an unknown profile is referenced by an encoded video, that encoding will be ignored. Arguments: xml (Element): An lxml video_asset element containing import data edx_video_id (str): val video id resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. external_transcripts (dict): A dict containing the list of names of the external transcripts. Example: { 'en': ['The_Flash.srt', 'Harry_Potter.srt'], 'es': ['Green_Arrow.srt'] } course_id (str): The ID of a course to associate the video with Raises: ValCannotCreateError: if there is an error importing the video Returns: edx_video_id (str): val video id.
codesearchnet
def log_correction(self, event, action): action = str(action) self.history.info(action) self._corrections.append(dict( event=event.as_dict(), action=action, ))
This method should be called once we have fixed the problem associated to this event. It adds a new entry in the correction history of the node. Args: event: :class:`AbinitEvent` that triggered the correction. action (str): Human-readable string with info on the action perfomed to solve the problem.
juraj-google-style
def bottleneck_block_v1(cnn, depth, depth_bottleneck, stride): input_layer = cnn.top_layer in_size = cnn.top_size name_key = "resnet_v1" name = name_key + str(cnn.counts[name_key]) cnn.counts[name_key] += 1 with tf.variable_scope(name): if depth == in_size: if stride == 1: shortcut = input_layer else: shortcut = cnn.apool( 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) else: shortcut = cnn.conv( depth, 1, 1, stride, stride, activation=None, use_batch_norm=True, input_layer=input_layer, num_channels_in=in_size, bias=None) cnn.conv( depth_bottleneck, 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size, use_batch_norm=True, bias=None) cnn.conv( depth_bottleneck, 3, 3, 1, 1, mode="SAME_RESNET", use_batch_norm=True, bias=None) res = cnn.conv( depth, 1, 1, 1, 1, activation=None, use_batch_norm=True, bias=None) output = tf.nn.relu(shortcut + res) cnn.top_layer = output cnn.top_size = depth
Bottleneck block with identity short-cut for ResNet v1. Args: cnn: the network to append bottleneck blocks. depth: the number of output filters for this bottleneck block. depth_bottleneck: the number of bottleneck filters for this block. stride: Stride used in the first layer of the bottleneck block.
juraj-google-style
def _CreateRouteTripsFolder(self, parent, route, style_id=None, schedule=None): if not route.trips: return None trips = list(route.trips) trips.sort(key=lambda x: x.trip_id) trips_folder = self._CreateFolder(parent, 'Trips', visible=False) for trip in trips: if (self.date_filter and not trip.service_period.IsActiveOn(self.date_filter)): continue if trip.trip_headsign: description = 'Headsign: %s' % trip.trip_headsign else: description = None coordinate_list = [] for secs, stoptime, tp in trip.GetTimeInterpolatedStops(): if self.altitude_per_sec > 0: coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat, (secs - 3600 * 4) * self.altitude_per_sec)) else: coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat)) placemark = self._CreatePlacemark(trips_folder, trip.trip_id, style_id=style_id, visible=False, description=description) self._CreateLineString(placemark, coordinate_list) return trips_folder
Create a KML Folder containing all the trips in the route. The folder contains a placemark for each of these trips. If there are no trips in the route, no folder is created and None is returned. Args: parent: The parent ElementTree.Element instance. route: The transitfeed.Route instance. style_id: A style id string for the placemarks or None. Returns: The Folder ElementTree.Element instance or None.
juraj-google-style
def service(self, block, service_name): declaration = block.service_declaration(service_name) if (declaration is None): raise NoSuchServiceError('Service {!r} was not requested.'.format(service_name)) service = self._services.get(service_name) if ((service is None) and (declaration == 'need')): raise NoSuchServiceError('Service {!r} is not available.'.format(service_name)) return service
Return a service, or None. Services are objects implementing arbitrary other interfaces. They are requested by agreed-upon names, see [XXX TODO] for a list of possible services. The object returned depends on the service requested. XBlocks must announce their intention to request services with the `XBlock.needs` or `XBlock.wants` decorators. Use `needs` if you assume that the service is available, or `wants` if your code is flexible and can accept a None from this method. Runtimes can override this method if they have different techniques for finding and delivering services. Arguments: block (XBlock): this block's class will be examined for service decorators. service_name (str): the name of the service requested. Returns: An object implementing the requested service, or None.
codesearchnet
def nPr(n, r): f = math.factorial return int((f(n) / f((n - r))))
Calculates nPr. Args: n (int): total number of items. r (int): items to permute Returns: nPr.
codesearchnet
def _use_tables(objs): from ..models.widgets import TableWidget return _any(objs, (lambda obj: isinstance(obj, TableWidget)))
Whether a collection of Bokeh objects contains a TableWidget Args: objs (seq[Model or Document]) : Returns: bool
codesearchnet
def attach_stream(self, stream): (curr_stream, count, prev) = self._allocated_streams[stream] if (count == (self.model.get(u'max_node_outputs') - 1)): new_stream = self.allocate_stream(curr_stream.stream_type, previous=curr_stream) copy_desc = u'({} always) => {} using copy_all_a'.format(curr_stream, new_stream) self.sensor_graph.add_node(copy_desc) self._allocated_streams[stream] = (new_stream, 1, curr_stream) if ((curr_stream.stream_type == DataStream.ConstantType) and (curr_stream in self.sensor_graph.constant_database)): self.sensor_graph.add_constant(new_stream, self.sensor_graph.constant_database[curr_stream]) return new_stream self._allocated_streams[stream] = (curr_stream, (count + 1), prev) return curr_stream
Notify that we would like to attach a node input to this stream. The return value from this function is the DataStream that should be attached to since this function may internally allocate a new SGNode that copies the stream if there is no space in the output list to hold another input. This function should be called once for every node input before allocated a new sensor graph node that attaches to a stream that is managed by the StreamAllocator. Args: stream (DataStream): The stream (originally returned from allocate_stream) that we want to attach to. Returns: Datastream: A data stream, possible the same as stream, that should be attached to a node input.
codesearchnet
def GetArtifactParserDependencies(rdf_artifact): deps = set() processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name) for p in processors: deps.update(p.knowledgebase_dependencies) return deps
Return the set of knowledgebase path dependencies required by the parser. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"]
codesearchnet
def EvalGeneric(self, hashers=None): if (hashers is None): hashers = Fingerprinter.GENERIC_HASH_CLASSES hashfuncs = [x() for x in hashers] finger = Finger(hashfuncs, [Range(0, self.filelength)], {'name': 'generic'}) self.fingers.append(finger) return True
Causes the entire file to be hashed by the given hash functions. This sets up a 'finger' for fingerprinting, where the entire file is passed through a pre-defined (or user defined) set of hash functions. Args: hashers: An iterable of hash classes (e.g. out of hashlib) which will be instantiated for use. If hashers is not provided, or is provided as 'None', the default hashers will get used. To invoke this without hashers, provide an empty list. Returns: Always True, as all files are 'generic' files.
codesearchnet
def merge_entries(self, source_entry): for list_attr in source_entry.attrs.values(): for attr in list_attr: self.attrs[attr.header.attr_type_id].append(attr) for stream in source_entry.data_streams: dest_stream = self._find_datastream(stream.name) if dest_stream is not None: dest_stream.add_from_datastream(stream) else: self.data_streams.append(stream)
Merge two entries. Allow the merging of two MFTEntries copying the attributes to the correct place and the datastreams. Args: source_entry (:obj:`MFTEntry`) - Source entry where the data will be copied from
juraj-google-style
def get(self, name_or_uri): name_or_uri = quote(name_or_uri) return self._client.get(name_or_uri)
Get the role by its URI or Name. Args: name_or_uri: Can be either the Name or the URI. Returns: dict: Role
juraj-google-style
def _MergeSameAgency(self, a_agency_id, b_agency_id): a_agency_id = (a_agency_id or self.feed_merger.a_schedule.GetDefaultAgency().agency_id) b_agency_id = (b_agency_id or self.feed_merger.b_schedule.GetDefaultAgency().agency_id) a_agency = self.feed_merger.a_schedule.GetAgency(a_agency_id)._migrated_entity b_agency = self.feed_merger.b_schedule.GetAgency(b_agency_id)._migrated_entity if (a_agency != b_agency): raise MergeError('agency must be the same') return a_agency.agency_id
Merge agency ids to the corresponding agency id in the merged schedule. Args: a_agency_id: an agency id from the old schedule b_agency_id: an agency id from the new schedule Returns: The agency id of the corresponding merged agency. Raises: MergeError: If a_agency_id and b_agency_id do not correspond to the same merged agency. KeyError: Either aaid or baid is not a valid agency id.
codesearchnet
def get_config_string_option(parser: ConfigParser, section: str, option: str, default: str = None) -> str: if not parser.has_section(section): raise ValueError("config missing section: " + section) return parser.get(section, option, fallback=default)
Retrieves a string value from a parser. Args: parser: instance of :class:`ConfigParser` section: section name within config file option: option (variable) name within that section default: value to return if option is absent Returns: string value Raises: ValueError: if the section is absent
juraj-google-style
def nrows(self, out_type=None, name=None): with ops.name_scope(name, 'RaggedNRows', [self]): if out_type is None: return self._row_partition.nrows() else: return math_ops.cast(self._row_partition.nrows(), dtype=out_type)
Returns the number of rows in this ragged tensor. I.e., the size of the outermost dimension of the tensor. Args: out_type: `dtype` for the returned tensor. Defaults to `self.row_splits.dtype`. name: A name prefix for the returned tensor (optional). Returns: A scalar `Tensor` with dtype `out_type`. #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.nrows()) # rt has 5 rows. tf.Tensor(5, shape=(), dtype=int64)
github-repos
def _get_command_and_argv(argv): command_name = argv[0] if not command_name: argv = argv[1:] elif command_name == settings.command: argv.remove(command_name) return command_name, argv
Extract the command name and arguments to pass to docopt. Args: argv: The argument list being used to run the command. Returns: A tuple containing the name of the command and the arguments to pass to docopt.
juraj-google-style
def __init__(self, loop_var, loop_len, pfor_ops, fallback_to_while_loop, all_indices=None, all_indices_partitioned=False, pfor_config=None, warn=False): assert isinstance(loop_var, tensor_lib.Tensor) assert loop_var.op.type == 'PlaceholderWithDefault' self._loop_var = loop_var loop_len_value = tensor_util.constant_value(loop_len) if loop_len_value is not None: loop_len = loop_len_value self._loop_len_vector = ops.convert_to_tensor([loop_len]) else: self._loop_len_vector = array_ops.reshape(loop_len, [1]) self._all_indices_partitioned = all_indices_partitioned if all_indices_partitioned: assert all_indices is not None if all_indices is None: self.all_indices = math_ops.range(loop_len, dtype=dtypes.int32, name='all_indices') else: self.all_indices = all_indices self._conversion_map = object_identity.ObjectIdentityDictionary() self._conversion_map[loop_var] = wrap(self.all_indices, True) self._pfor_ops = set(pfor_ops) self._pfor_op_ids = set((x._id for x in pfor_ops)) self._fallback_to_while_loop = fallback_to_while_loop self._warn = warn self._pfor_config = pfor_config
Creates an object to rewrite a parallel-for loop. Args: loop_var: Tensor output of a Placeholder operation. The value should be an int32 scalar representing the loop iteration number. loop_len: A scalar or scalar Tensor representing the number of iterations the loop is run for. pfor_ops: List of all ops inside the loop body. fallback_to_while_loop: If True, on failure to vectorize an op, a while loop is used to sequentially execute that op. all_indices: If not None, an int32 vector with size `loop_len` representing the iteration ids that are still active. These values should be unique and sorted. However they may not be contiguous. This is typically the case when inside a control flow construct which has partitioned the indices of the iterations that are being converted. all_indices_partitioned: If True, this object is being constructed from a control flow construct where not all the pfor iterations are guaranteed to be active. pfor_config: PForConfig object used while constructing the loop body. warn: Whether or not to warn on while loop conversions.
github-repos
def vstack(xs): if any_symbolic_tensors((xs,)): return Vstack().symbolic_call(xs) return backend.numpy.vstack(xs)
Stack tensors in sequence vertically (row wise). Args: xs: Sequence of tensors. Returns: Tensor formed by stacking the given tensors.
github-repos
def create_chapter_from_string(self, html_string, url=None, title=None): clean_html_string = self.clean_function(html_string) clean_xhtml_string = clean.html_to_xhtml(clean_html_string) if title: pass else: try: root = BeautifulSoup(html_string, 'html.parser') title_node = root.title if (title_node is not None): title = unicode(title_node.string) else: raise ValueError except (IndexError, ValueError): title = 'Ebook Chapter' return Chapter(clean_xhtml_string, title, url)
Creates a Chapter object from a string. Sanitizes the string using the clean_function method, and saves it as the content of the created chapter. Args: html_string (string): The html or xhtml content of the created Chapter url (Option[string]): A url to infer the title of the chapter from title (Option[string]): The title of the created Chapter. By default, this is None, in which case the title will try to be inferred from the webpage at the url. Returns: Chapter: A chapter object whose content is the given string and whose title is that provided or inferred from the url
codesearchnet
def _ExtractInterfaceMetadata(self, metadata): interfaces = [] for network_interface in metadata: mac_address = network_interface.get('mac') interface = self.network_utils.GetNetworkInterface(mac_address) ip_addresses = [] if interface: ip_addresses.extend(network_interface.get('forwardedIps', [])) if self.ip_aliases: ip_addresses.extend(network_interface.get('ipAliases', [])) if self.target_instance_ips: ip_addresses.extend(network_interface.get('targetInstanceIps', [])) interfaces.append(NetworkDaemon.NetworkInterface(interface, ip_addresses, network_interface.get('ip', []))) else: message = 'Network interface not found for MAC address: %s.' self.logger.warning(message, mac_address) return interfaces
Extracts network interface metadata. Args: metadata: dict, the metadata response with the new network interfaces. Returns: list, a list of NetworkInterface objects.
codesearchnet
def create(self, key, value): key = quote(key, safe='~') headers = {'content-type': 'application/octet-stream'} url = '/internal/playbooks/keyValue/{}'.format(key) r = self.tcex.session.put(url, data=value, headers=headers) return r.content
Create key/value pair in remote KV store. Args: key (string): The key to create in remote KV store. value (any): The value to store in remote KV store. Returns: (string): The response from the API call.
juraj-google-style
def _get_array(self, handle: int) -> np.ndarray: tup = self._arrays[handle] assert tup is not None c_arr, shape = tup with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) result = np.ctypeslib.as_array(c_arr) result.shape = shape return result
Returns the array with the given handle. Args: handle: The handle of the array whose memory should be freed. This handle must come from the _create_array method. Returns: The numpy ndarray with the handle given from _create_array.
juraj-google-style
class GraniteMoeSharedMoE(nn.Module): def __init__(self, config: GraniteMoeSharedConfig): super(GraniteMoeSharedMoE, self).__init__() self.input_size = config.hidden_size self.hidden_size = config.intermediate_size self.activation = ACT2FN[config.hidden_act] self.input_linear = GraniteMoeSharedParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2) self.output_linear = GraniteMoeSharedParallelExperts(config.num_local_experts, self.hidden_size, self.input_size) self.router = GraniteMoeSharedTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok) def forward(self, layer_input): bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) return (layer_output, router_logits)
A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. Args: config: Configuration object with model hyperparameters.
github-repos
def to_genai_part(part_content: content_api.ProcessorPartTypes, mimetype: str | None=None) -> genai_types.Part: if isinstance(part_content, str): return genai_types.Part(text=part_content) elif isinstance(part_content, bytes): if mimetype is None: raise ValueError('Mimetype must be specified for bytes to_genai_part conversion.') p = ProcessorPart(part_content, mimetype=mimetype) return p.part elif isinstance(part_content, Image.Image): p = ProcessorPart(part_content) return p.part elif isinstance(part_content, ProcessorPart): return part_content.part elif isinstance(part_content, genai_types.Part): return part_content else: raise ValueError(f'Unsupported type for to_genai_part: {type(part_content)}')
Converts object of type `ProcessorPartTypes` to a Genai Part. Args: part_content: The content to convert. mimetype: (Optional) The mimetype of the content. Must be specified if part_content is bytes. Returns: The Genai Part representation of the content.
github-repos
def set(self, name: str, value: Any) -> None: self.agent.set(name, value)
Stores a knowledge item in the agent knowledge base. Args: name (str): name of the item value (Any): value of the item
codesearchnet
def update(self, **kwargs): to_remove = [] for key, value in kwargs.items(): if hasattr(self, key): setattr(self, key, value) to_remove.append(key) unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} return unused_kwargs
Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes, returning all the unused kwargs. Args: kwargs (`Dict[str, Any]`): Dictionary of attributes to tentatively update this class. Returns: `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
github-repos
def update_uid_state(self, id_or_uri, refresh_state_data): uri = self._client.build_uri(id_or_uri) + "/uidState" return self._client.update(refresh_state_data, uri)
Sets the unit identification (UID) light state of the specified power delivery device. The device must be an HP iPDU component with a locator light (HP Intelligent Load Segment, HP AC Module, HP Intelligent Outlet Bar, or HP Intelligent Outlet) Args: id_or_uri: Can be either the power device id or the uri refresh_state_data: Power device refresh request Returns: str: The UID state
juraj-google-style
def get_text(obj) -> Tuple[int, str]: from bioc.bioc import BioCDocument, BioCPassage, BioCSentence if isinstance(obj, BioCSentence): return obj.offset, obj.text if isinstance(obj, BioCPassage): if obj.text: return obj.offset, obj.text text = '' for sentence in obj.sentences: try: text = pad_char(text, sentence.offset - obj.offset, ' ') assert sentence.text, f'BioC sentence has no text: {sentence.offset}' text += sentence.text except ValueError: raise ValueError(f'Overlapping sentences {sentence.offset}') return obj.offset, text if isinstance(obj, BioCDocument): text = '' for passage in obj.passages: try: text = pad_char(text, passage.offset) text += get_text(passage)[1] except ValueError: raise ValueError(f'{obj.id}: overlapping passages {passage.offset}') return 0, text raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, ' f'BioCDocument, BioCPassage, or BioCSentence')
Return text with its offset in the document Args: obj: BioCDocument, BioCPassage, or BioCSentence Returns: offset, text
juraj-google-style
def run_query_series(queries, conn): results = [] for item in queries: qry = item kwargs = {} if isinstance(item, tuple): qry = item[0] kwargs = item[1] result = conn.update_query(qry, **kwargs) results.append(result) return results
Iterates through a list of queries and runs them through the connection Args: ----- queries: list of strings or tuples containing (query_string, kwargs) conn: the triplestore connection to use
codesearchnet
def step(self, action, blocking=True): promise = self.call('step', action) if blocking: return promise() else: return promise
Step the environment. Args: action: The action to apply to the environment. blocking: Whether to wait for the result. Returns: Transition tuple when blocking, otherwise callable that returns the transition tuple.
codesearchnet
def help(route): r help_text = getRouteHelp(route.split('/') if route else []) if help_text is None: err('Can\'t help :(') else: print '\n%s' % help_text
r"""Displays help for the given route. Args: route (str): A route that resolves a member.
juraj-google-style
def forward(self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor: if self.summary_type == 'last': output = hidden_states[:, -1] elif self.summary_type == 'first': output = hidden_states[:, 0] elif self.summary_type == 'mean': output = hidden_states.mean(dim=1) elif self.summary_type == 'cls_index': if cls_index is None: cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) output = hidden_states.gather(-2, cls_index).squeeze(-2) elif self.summary_type == 'attn': raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output
Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states.
github-repos
def __getitem__(self, key): if key in self.patterns: return ScreenPattern(self.patterns[key], self.field_registry) for shorter in range(key, 0, -1): if shorter in self.min_patterns: pattern = self.min_patterns[shorter] prefix = [''] * (key - shorter / 2) return ScreenPattern(prefix + pattern, self.field_registry) return ScreenPattern([], self.field_registry)
Retrieve the best pattern for a given size. The algorithm is: - If a pattern is registered for the size, use it - Otherwise, find the longest registered pattern shorter thant size, add some blank lines before, and return it - If no shorter pattern exist, return a blank pattern. Args: key (int): the target size Returns: ScreenPattern: the best pattern available for that size
juraj-google-style
def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) query = ( _PipelineRecord.all(cursor=cursor) .filter('root_pipeline =', root_pipeline_key)) results = query.fetch(max_to_notify) task_list = [] for pipeline_record in results: if pipeline_record.status not in ( _PipelineRecord.RUN, _PipelineRecord.WAITING): continue pipeline_key = pipeline_record.key() task_list.append(taskqueue.Task( name='%s-%s-abort' % (self.task_name, pipeline_key.name()), url=self.abort_handler_path, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT), headers={'X-Ae-Pipeline-Key': pipeline_key})) if len(results) == max_to_notify: the_match = re.match('(.*)-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-%d' % (prefix, end), url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing.
juraj-google-style
def iter_processed_text(self, file, encoding=None, base_url=None): for (text, is_link) in self.iter_text(file, encoding): if (is_link and base_url): new_link = urljoin_safe(base_url, text, allow_fragments=False) if new_link: (yield (new_link, is_link)) else: (yield (new_link, False)) else: (yield (text, is_link))
Return the file text and processed absolute links. Args: file: A file object containing the document. encoding (str): The encoding of the document. base_url (str): The URL at which the document is located. Returns: iterator: Each item is a tuple: 1. str: The text 2. bool: Whether the text a link
codesearchnet
def process_filter_directive(filter_operation_info, location, context): (op_name, operator_params) = _get_filter_op_name_and_values(filter_operation_info.directive) non_comparison_filters = {u'name_or_alias': _process_name_or_alias_filter_directive, u'between': _process_between_filter_directive, u'in_collection': _process_in_collection_filter_directive, u'has_substring': _process_has_substring_filter_directive, u'contains': _process_contains_filter_directive, u'intersects': _process_intersects_filter_directive, u'has_edge_degree': _process_has_edge_degree_filter_directive} all_recognized_filters = (frozenset(non_comparison_filters.keys()) | COMPARISON_OPERATORS) if (all_recognized_filters != ALL_OPERATORS): unrecognized_filters = (ALL_OPERATORS - all_recognized_filters) raise AssertionError(u'Some filtering operators are defined but do not have an associated processing function. This is a bug: {}'.format(unrecognized_filters)) if (op_name in COMPARISON_OPERATORS): process_func = partial(_process_comparison_filter_directive, operator=op_name) else: process_func = non_comparison_filters.get(op_name, None) if (process_func is None): raise GraphQLCompilationError(u'Unknown op_name for filter directive: {}'.format(op_name)) if ((filter_operation_info.field_name is None) and (op_name not in INNER_SCOPE_VERTEX_FIELD_OPERATORS)): raise GraphQLCompilationError(u'The filter with op_name "{}" must be applied on a field. It may not be applied on a type coercion.'.format(op_name)) fields = ((filter_operation_info.field_name,) if (op_name != 'name_or_alias') else ('name', 'alias')) context['metadata'].record_filter_info(location, FilterInfo(fields=fields, op_name=op_name, args=tuple(operator_params))) return process_func(filter_operation_info, location, context, operator_params)
Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation
codesearchnet
def write(self, x: int, y: int, text: str, transposed_text: 'Optional[str]' = None): entry = self.entries.get((x, y), _DiagramText('', '')) self.entries[(x, y)] = _DiagramText( entry.text + text, entry.transposed_text + (transposed_text if transposed_text else text))
Adds text to the given location. Args: x: The column in which to write the text. y: The row in which to write the text. text: The text to write at location (x, y). transposed_text: Optional text to write instead, if the text diagram is transposed.
juraj-google-style
def map_concepts_to_indicators(self, n: int=1, min_temporal_res: Optional[str]=None): for node in self.nodes(data=True): query_parts = ['select Indicator from concept_to_indicator_mapping', f"where `Concept` like '{node[0]}'"] query = ' '.join(query_parts) results = engine.execute(query) if (min_temporal_res is not None): if (min_temporal_res not in ['month']): raise ValueError("min_temporal_res must be 'month'") vars_with_required_temporal_resolution = [r[0] for r in engine.execute(f'select distinct `Variable` from indicator where `{min_temporal_res.capitalize()}` is not null')] results = [r for r in results if (r[0] in vars_with_required_temporal_resolution)] node[1]['indicators'] = {x: Indicator(x, 'MITRE12') for x in [r[0] for r in take(n, results)]}
Map each concept node in the AnalysisGraph instance to one or more tangible quantities, known as 'indicators'. Args: n: Number of matches to keep min_temporal_res: Minimum temporal resolution that the indicators must have data for.
codesearchnet
def alltoall(self, x, mesh_axis, split_axis, concat_axis): x = x.to_laid_out_tensor() t = x.one_slice group_assignment = self._create_group_assignment([mesh_axis]) dtype = t.dtype if dtype == tf.float32: t = tf.to_bfloat16(t) t = tpu_ops.all_to_all( t, concat_dimension=concat_axis, split_dimension=split_axis, split_count=len(group_assignment[0]), group_assignment=group_assignment) t = tf.cast(t, dtype) x = self.LaidOutTensor([t]) return x
Grouped alltoall (like MPI alltoall with splitting and concatenation). Args: x: a LaidOutTensor mesh_axis: an integer the mesh axis along which to group split_axis: an integer (the Tensor axis along which to split) concat_axis: an integer (the Tensor axis along which to concatenate) Returns: a LaidOutTensor
juraj-google-style
def precompute_edge_matrices(adjacency, hparams): (batch_size, num_nodes, _, edge_dim) = common_layers.shape_list(adjacency) with tf.variable_scope('edge_network'): x = tf.reshape(adjacency, [((batch_size * num_nodes) * num_nodes), edge_dim], name='adj_reshape_in') for ip_layer in range(hparams.edge_network_layers): name = ('edge_network_layer_%d' % ip_layer) x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), hparams.edge_network_hidden_size, activation=tf.nn.relu, name=name) x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), (hparams.hidden_size ** 2), activation=None, name='edge_network_output') edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes, num_nodes, hparams.hidden_size, hparams.hidden_size]) edge_matrices = tf.reshape(tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [(- 1), (num_nodes * hparams.hidden_size), (num_nodes * hparams.hidden_size)], name='edge_matrices') return edge_matrices
Precompute the a_in and a_out tensors. (we don't want to add to the graph everytime _fprop is called) Args: adjacency: placeholder of real valued vectors of shape [B, L, L, E] hparams: HParams object Returns: edge_matrices: [batch, L * D, L * D] the dense matrix for message passing viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function of the edge vector of the adjacency matrix at that spot.
codesearchnet
def find_nearest_color_index(r, g, b, color_table=None, method='euclid'): shortest_distance = 257*257*3 index = 0 if not color_table: if not color_table8: build_color_tables() color_table = color_table8 for i, values in enumerate(color_table): rd = r - values[0] gd = g - values[1] bd = b - values[2] this_distance = (rd * rd) + (gd * gd) + (bd * bd) if this_distance < shortest_distance: index = i shortest_distance = this_distance return index
Given three integers representing R, G, and B, return the nearest color index. Arguments: r: int - of range 0…255 g: int - of range 0…255 b: int - of range 0…255 Returns: int, None: index, or None on error.
juraj-google-style
def be2le_state_by_state(tpm): le = np.empty(tpm.shape) N = tpm.shape[0] n = int(log2(N)) for i in range(N): le[i, :] = tpm[be2le(i, n), :] return le
Convert a state-by-state TPM from big-endian to little-endian or vice versa. Args: tpm (np.ndarray): A state-by-state TPM. Returns: np.ndarray: The state-by-state TPM in the other indexing format. Example: >>> tpm = np.arange(16).reshape([4, 4]) >>> be2le_state_by_state(tpm) array([[ 0., 1., 2., 3.], [ 8., 9., 10., 11.], [ 4., 5., 6., 7.], [12., 13., 14., 15.]])
juraj-google-style
def cluster_spec(self): tf_config = _load_tf_config() if 'cluster' not in tf_config: return ClusterSpec({}) return ClusterSpec(tf_config['cluster'])
Returns a ClusterSpec based on the TF_CONFIG environment variable. Returns: A ClusterSpec with information from the TF_CONFIG environment variable.
github-repos
def _InternalUnpackAny(msg): type_url = msg.type_url db = symbol_database.Default() if (not type_url): return None type_name = type_url.split('/')[(- 1)] descriptor = db.pool.FindMessageTypeByName(type_name) if (descriptor is None): return None message_class = db.GetPrototype(descriptor) message = message_class() message.ParseFromString(msg.value) return message
Unpacks Any message and returns the unpacked message. This internal method is differnt from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message.
codesearchnet
def v_cross(u, v): i = '(({u1})*({v2}) - ({u2})*({v1}))'.format(u1=u[1], u2=u[2], v1=v[1], v2=v[2]) j = '(({u2})*({v0}) - ({u0})*({v2}))'.format(u0=u[0], u2=u[2], v0=v[0], v2=v[2]) k = '(({u0})*({v1}) - ({u1})*({v0}))'.format(u0=u[0], u1=u[1], v0=v[0], v1=v[1]) return [i, j, k]
muparser cross product function Compute the cross product of two 3x1 vectors Args: u (list or tuple of 3 strings): first vector v (list or tuple of 3 strings): second vector Returns: A list containing a muparser string of the cross product
juraj-google-style
def _replacer(self, match: re.Match[str], is_verbatim: bool, is_global: bool) -> str: symbol_name = match.group(0) if symbol_name in self._local_symbol_replacement_cache: return self._local_symbol_replacement_cache[symbol_name] if symbol_name in self._global_symbol_replacement_cache: return self._global_symbol_replacement_cache[symbol_name] if is_verbatim: declaration_replacement = symbol_name reference_replacement = symbol_name else: capture_name = self._generate_unique_name(symbol_name) capture_pattern = '[^ ]+' maybe_global_flag = '$' if is_global else '' declaration_replacement = f'[[{maybe_global_flag}{capture_name}:{capture_pattern}]]' reference_replacement = f'[[{maybe_global_flag}{capture_name}]]' if is_global: self._global_symbol_replacement_cache[symbol_name] = reference_replacement else: self._local_symbol_replacement_cache[symbol_name] = reference_replacement return declaration_replacement
A symbol-name replacement function for use in `re.sub`. Args: match: The match object produced by `self._SYMBOL_NAME_REGEX`. is_verbatim: Whether the newly matched symbol appears in a "CHECK-LABEL" directive, in which case it should be checked verbatim (not replaced with a regex capture). is_global: Whether the newly matched symbol appears in a declaration at global scope, i.e. whether it's a function name. If so, it should be remembered across function boundaries. Returns: The replacement string for the symbol name.
github-repos
def from_string(contents): if contents[-1] != "\n": contents += "\n" white_space = r"[ \t\r\f\v]" natoms_line = white_space + r"*\d+" + white_space + r"*\n" comment_line = r"[^\n]*\n" coord_lines = r"(\s*\w+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s*\n)+" frame_pattern_text = natoms_line + comment_line + coord_lines pat = re.compile(frame_pattern_text, re.MULTILINE) mols = [] for xyz_match in pat.finditer(contents): xyz_text = xyz_match.group(0) mols.append(XYZ._from_frame_string(xyz_text)) return XYZ(mols)
Creates XYZ object from a string. Args: contents: String representing an XYZ file. Returns: XYZ object
juraj-google-style
def visualize_instance_html(self, exp, label, div_name, exp_object_name, show_table=True, show_all=False): if (not show_table): return '' weights = ([0] * len(self.feature_names)) for x in exp: weights[x[0]] = x[1] out_list = list(zip(self.exp_feature_names, self.feature_values, weights)) if (not show_all): out_list = [out_list[x[0]] for x in exp] ret = (u'\n %s.show_raw_tabular(%s, %d, %s);\n ' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)) return ret
Shows the current example in a table format. Args: exp: list of tuples [(id, weight), (id,weight)] label: label id (integer) div_name: name of div object to be used for rendering(in js) exp_object_name: name of js explanation object show_table: if False, don't show table visualization. show_all: if True, show zero-weighted features in the table.
codesearchnet
def edit_distance(x, y): ret = layers_distance(x.layers, y.layers) ret += Constant.KERNEL_LAMBDA * skip_connections_distance( x.skip_connections, y.skip_connections ) return ret
The distance between two neural networks. Args: x: An instance of NetworkDescriptor. y: An instance of NetworkDescriptor Returns: The edit-distance between x and y.
juraj-google-style
def download(url): headers = {"User-Agent": USER_AGENT} resp = requests.get( url, timeout=REQUEST_TIMEOUT, headers=headers, allow_redirects=True, verify=False, ) def decode(st, alt_encoding=None): encodings = ['ascii', 'utf-8', 'iso-8859-1', 'iso-8859-15'] if alt_encoding: if isinstance(alt_encoding, basestring): encodings.append(alt_encoding) else: encodings.extend(alt_encoding) for encoding in encodings: try: return st.encode(encoding).decode("utf-8") except UnicodeEncodeError, UnicodeDecodeError: pass raise UnicodeError('Could not find encoding.') return decode(resp.text, resp.encoding)
Download `url` and return it as utf-8 encoded text. Args: url (str): What should be downloaded? Returns: str: Content of the page.
juraj-google-style
def __init__(self, node_def, op, message, error_code): super(OpError, self).__init__() self._message = message self._node_def = node_def self._op = op self._error_code = error_code
Creates a new `OpError` indicating that a particular op failed. Args: node_def: The `node_def_pb2.NodeDef` proto representing the op that failed, if known; otherwise None. op: The `ops.Operation` that failed, if known; otherwise None. message: The message string describing the failure. error_code: The `error_codes.Code` describing the error.
juraj-google-style
def apply(self, func, **kwargs): oid = self.oid self.call_queue.append((func, kwargs)) def call_queue_closure(oid_obj, call_queues): for (func, kwargs) in call_queues: if isinstance(func, ray.ObjectID): func = ray.get(func) if isinstance(kwargs, ray.ObjectID): kwargs = ray.get(kwargs) oid_obj = func(oid_obj, **kwargs) return oid_obj oid = deploy_ray_func.remote(call_queue_closure, oid, kwargs={'call_queues': self.call_queue}) self.call_queue = [] return PyarrowOnRayFramePartition(oid)
Apply a function to the object stored in this partition. Note: It does not matter if func is callable or an ObjectID. Ray will handle it correctly either way. The keyword arguments are sent as a dictionary. Args: func: The function to apply. Returns: A RayRemotePartition object.
codesearchnet
def __init__(self, resolver_context): super(EncryptedStreamFileSystem, self).__init__(resolver_context) self._encryption_method = None
Initializes an encrypted file system. Args: resolver_context (Context): a resolver context.
juraj-google-style
def raisefrom(exc_type, message, exc): if sys.version_info[:2] >= (3, 2): six.raise_from(exc_type(message), exc) else: six.reraise(exc_type, '%s - %s' % (message, exc), sys.exc_info()[2])
Call Python 3 raise from or emulate it for Python 2 Args: exc_type (Any): Type of Exception message (str): Error message to display exc (BaseException): original exception Returns: None
juraj-google-style
def watch_printer(watch, value): print("({: 8} s) {}: {}".format(value.raw_time, watch, value.value))
Print a watched value. Args: watch (DataStream): The stream that was watched value (IOTileReading): The value to was seen
juraj-google-style
def list_parking(self, **kwargs): url_args = {'lang': util.language_code(kwargs.get('lang'))} result = self.make_request('list_parking', url_args) if not util.check_result(result): return False, result.get('message', 'UNKNOWN ERROR') values = util.response_list(result, 'Data') return True, [emtype.Parking(**a) for a in values]
Obtain a list of parkings. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Parking]), or message string in case of error.
juraj-google-style
def get_distrib(): key = 'distrib' out, err = run_shell_cmd(cmds_all[PLATFORM][key]) if err and FLAGS.debug: print('Error in detecting distribution:\n %s' % str(err)) return out.strip(b'\n')
Retrieves distribution name of the operating system. Returns: String that is the name of distribution. e.g. 'Ubuntu'
github-repos
def CopyRecord(record, **field_overrides): fields = field_overrides for field in record.__slots__: if field in field_overrides: continue value = getattr(record, field) if isinstance(value, RecordClass): new_value = CopyRecord(value) else: new_value = copy.copy(value) fields[field] = new_value return type(record)(**fields)
Copies a record and its fields, recurses for any field that is a Record. For records that have nested mutable fields, use copy.deepcopy. Args: record: A Record instance to be copied. **field_overrides: Fields and their values to override in the new copy. Returns: A copy of the given record with any fields overridden.
juraj-google-style
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, 'For C++11-compatibility, omit template arguments from make_pair OR use pair directly OR if appropriate, construct a pair directly')
Check that make_pair's template arguments are deduced. G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are specified explicitly, and such use isn't intended in any case. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet