code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def minimum(self, vars_list: List[str]) -> 'TensorFluent': return self._aggregation_op(tf.reduce_min, self, vars_list)
Returns the TensorFluent for the minimum aggregation function. Args: vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the minimum aggregation function.
juraj-google-style
def recover_session(self, master: str, saver: saver_lib.Saver=None, checkpoint_dir: str=None, checkpoint_filename_with_path: str=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None) -> Tuple[session.Session, bool]: sess, is_loaded_from_checkpoint = self._restore_checkpoint(master, saver, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config) local_init_success, msg = self._try_run_local_init_op(sess) if not is_loaded_from_checkpoint: return (sess, False) restoring_file = checkpoint_dir or checkpoint_filename_with_path if not local_init_success: logging.info('Restoring model from %s did not make model ready for local init: %s', restoring_file, msg) return (sess, False) is_ready, msg = self._model_ready(sess) if not is_ready: logging.info('Restoring model from %s did not make model ready: %s', restoring_file, msg) return (sess, False) logging.info('Restored model from %s', restoring_file) return (sess, is_loaded_from_checkpoint)
Creates a `Session`, recovering if possible. Creates a new session on 'master'. If the session is not initialized and can be recovered from a checkpoint, recover it. Args: master: `String` representation of the TensorFlow master to use. saver: A `Saver` object used to restore a model. checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the dir will be used to restore. checkpoint_filename_with_path: Full file name path to the checkpoint file. wait_for_checkpoint: Whether to wait for checkpoint to become available. max_wait_secs: Maximum time to wait for checkpoints to become available. config: Optional `ConfigProto` proto used to configure the session. Returns: A pair (sess, initialized) where 'initialized' is `True` if the session could be recovered and initialized, `False` otherwise. Raises: ValueError: If both checkpoint_dir and checkpoint_filename_with_path are set.
github-repos
def GetCustomerIDs(client): managed_customer_service = client.GetService('ManagedCustomerService', version='v201809') offset = 0 selector = { 'fields': ['CustomerId'], 'predicates': [{ 'field': 'CanManageClients', 'operator': 'EQUALS', 'values': [False] }], 'paging': { 'startIndex': str(offset), 'numberResults': str(PAGE_SIZE) } } queue = multiprocessing.Queue() more_pages = True while more_pages: page = managed_customer_service.get(selector) if page and 'entries' in page and page['entries']: for entry in page['entries']: queue.put(entry['customerId']) else: raise Exception('Can\'t retrieve any customer ID.') offset += PAGE_SIZE selector['paging']['startIndex'] = str(offset) more_pages = offset < int(page['totalNumEntries']) return queue
Retrieves all CustomerIds in the account hierarchy. Note that your configuration file must specify a client_customer_id belonging to an AdWords manager account. Args: client: an AdWordsClient instance. Raises: Exception: if no CustomerIds could be found. Returns: A Queue instance containing all CustomerIds in the account hierarchy.
juraj-google-style
def l1_normalize(x, dim, epsilon=1e-12, name=None): with tf.name_scope(name, 'l1_normalize', [x]) as scope: x = tf.convert_to_tensor(x, name='x') x = tf.verify_tensor_all_finite(x, 'Error at input %s' % scope) x_norm = tf.maximum(tf.reduce_sum(tf.abs(x), [dim], keep_dims=True), epsilon) return tf.div(x, x_norm, name=scope)
l1 normalizes x. Args: x: The tensor to normalize. dim: The dimension to normalize along. epsilon: Lower bound on the norm, used to avoid exploding gradients as the norm approaches 0. name: Optional name for this op. Returns: x normalized along dim.
juraj-google-style
def timestamp(stamp, tolerance=150): try: tolerance = datetime.timedelta(0, tolerance) timestamp_low = dateutil.parser.parse(stamp) timestamp_high = (timestamp_low + tolerance) now = datetime.datetime.now(timestamp_low.tzinfo) except ValueError: return False return ((now >= timestamp_low) and (now <= timestamp_high))
Validate timestamp specified by request. See `validate.request` for additional info. Args: stamp: str. Time request was made as ISO 8601 timestamp. tolerance: int. Number of seconds request remains valid from timestamp. Returns bool: True if valid, False otherwise.
codesearchnet
def datetime_from_isoformat(value: str): if (sys.version_info >= (3, 7)): return datetime.fromisoformat(value) return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
Return a datetime object from an isoformat string. Args: value (str): Datetime string in isoformat.
codesearchnet
def get_lang_tags(index_page): dom = dhtmlparser.parseString(index_page) lang_tags = [ get_html_lang_tags(dom), get_dc_lang_tags(dom), [detect_language(dom)], get_html_tag_lang_params(dom), ] return list(sorted(set( SourceString(normalize(lang), source=lang.source) for lang in sum(lang_tags, []) )))
Collect informations about language of the page from HTML and Dublin core tags and langdetect guesses. Args: index_page (str): HTML content of the page you wish to analyze. Returns: list: List of :class:`.SourceString` objects.
juraj-google-style
def _ragged_tensor_binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1): fn = functools.partial(binary_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) return _ragged_tensor_apply_loss(fn, y_true, y_pred)
Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Axis along which to compute crossentropy. Returns: Binary crossentropy loss value. Expected shape: (batch, sequence_len) with sequence_len being variable per batch. Return shape: (batch,); returns the per batch mean of the loss values. When used by BinaryCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over the number of batches.
github-repos
def _loop_exits_early(loop): loop_nodes = (astroid.For, astroid.While) definition_nodes = (astroid.FunctionDef, astroid.ClassDef) inner_loop_nodes = [_node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if (_node != loop)] return any((_node for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes) if (_get_break_loop_node(_node) not in inner_loop_nodes)))
Returns true if a loop may ends up in a break statement. Args: loop (astroid.For, astroid.While): the loop node inspected. Returns: bool: True if the loop may ends up in a break statement, False otherwise.
codesearchnet
def _run_dnb_normalization(self, dnb_data, sza_data): dnb_data = xr.DataArray(dnb_data, dims=('y', 'x')) sza_data = xr.DataArray(sza_data, dims=('y', 'x')) good_mask = (~ (dnb_data.isnull() | sza_data.isnull())) output_dataset = dnb_data.where(good_mask) output_dataset = output_dataset.values.copy() dnb_data = dnb_data.values sza_data = sza_data.values (day_mask, mixed_mask, night_mask) = make_day_night_masks(sza_data, good_mask.values, self.high_angle_cutoff, self.low_angle_cutoff, stepsDegrees=self.mixed_degree_step) did_equalize = False has_multi_times = (len(mixed_mask) > 0) if day_mask.any(): did_equalize = True if ((self.adaptive_day == 'always') or (has_multi_times and (self.adaptive_day == 'multiple'))): LOG.debug('Adaptive histogram equalizing DNB day data...') local_histogram_equalization(dnb_data, day_mask, valid_data_mask=good_mask.values, local_radius_px=self.day_radius_pixels, out=output_dataset) else: LOG.debug('Histogram equalizing DNB day data...') histogram_equalization(dnb_data, day_mask, out=output_dataset) if mixed_mask: for mask in mixed_mask: if mask.any(): did_equalize = True if ((self.adaptive_mixed == 'always') or (has_multi_times and (self.adaptive_mixed == 'multiple'))): LOG.debug('Adaptive histogram equalizing DNB mixed data...') local_histogram_equalization(dnb_data, mask, valid_data_mask=good_mask.values, local_radius_px=self.mixed_radius_pixels, out=output_dataset) else: LOG.debug('Histogram equalizing DNB mixed data...') histogram_equalization(dnb_data, day_mask, out=output_dataset) if night_mask.any(): did_equalize = True if ((self.adaptive_night == 'always') or (has_multi_times and (self.adaptive_night == 'multiple'))): LOG.debug('Adaptive histogram equalizing DNB night data...') local_histogram_equalization(dnb_data, night_mask, valid_data_mask=good_mask.values, local_radius_px=self.night_radius_pixels, out=output_dataset) else: LOG.debug('Histogram equalizing DNB night data...') histogram_equalization(dnb_data, night_mask, out=output_dataset) if (not did_equalize): raise RuntimeError('No valid data found to histogram equalize') return output_dataset
Scale the DNB data using a adaptive histogram equalization method. Args: dnb_data (ndarray): Day/Night Band data array sza_data (ndarray): Solar Zenith Angle data array
codesearchnet
def clarke_thermalcond(self, structure): nsites = structure.num_sites volume = structure.volume tot_mass = sum([e.atomic_mass for e in structure.species]) natoms = structure.composition.num_atoms weight = float(structure.composition.weight) avg_mass = 1.6605e-27 * tot_mass / natoms mass_density = 1.6605e3 * nsites * weight / (natoms * volume) return 0.87 * 1.3806e-23 * avg_mass**(-2./3.) \ * mass_density**(1./6.) * self.y_mod**0.5
Calculates Clarke's thermal conductivity (in SI units) Args: structure: pymatgen structure object Returns: Clarke's thermal conductivity (in SI units)
juraj-google-style
def valid_paths(self, *args): for i, path in enumerate(args, start=0): cp = list(args) current = cp.pop(i) if current in cp: raise SettingsInvalidError("Multiple occurences finded for " "path: {}".format(current)) return True
Validate that given paths are not the same. Args: (string): Path to validate. Raises: boussole.exceptions.SettingsInvalidError: If there is more than one occurence of the same path. Returns: bool: ``True`` if paths are validated.
juraj-google-style
def table_exists(client, table_reference): from google.cloud.exceptions import NotFound try: client.get_table(table_reference) return True except NotFound: return False
Return if a table exists. Args: client (google.cloud.bigquery.client.Client): A client to connect to the BigQuery API. table_reference (google.cloud.bigquery.table.TableReference): A reference to the table to look for. Returns: bool: ``True`` if the table exists, ``False`` otherwise.
juraj-google-style
def average_over_unit_sphere(self, quad=None): quad = (quad or DEFAULT_QUAD) (weights, points) = (quad['weights'], quad['points']) return sum([(w * self.project(n)) for (w, n) in zip(weights, points)])
Method for averaging the tensor projection over the unit with option for custom quadrature. Args: quad (dict): quadrature for integration, should be dictionary with "points" and "weights" keys defaults to quadpy.sphere.Lebedev(19) as read from file Returns: Average of tensor projected into vectors on the unit sphere
codesearchnet
def etm_register_write(self, register_index, value, delay=False): self._dll.JLINKARM_ETM_WriteReg(int(register_index), int(value), int(delay)) return None
Writes a value to an ETM register. Args: self (JLink): the ``JLink`` instance. register_index (int): the register to write to. value (int): the value to write to the register. delay (bool): boolean specifying if the write should be buffered. Returns: ``None``
juraj-google-style
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like RoBERTa, does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def hset(self, key, value): return self.r.hset(self.hash, key, value)
Create key/value pair in Redis. Args: key (string): The key to create in Redis. value (any): The value to store in Redis. Returns: (string): The response from Redis.
juraj-google-style
def composite_tensor_to_variants(value, type_spec=None, name=None): if not isinstance(value, composite_tensor.CompositeTensor): raise TypeError(f'Expected `value` to be a CompositeTensor. Received {type(value)}.') if type_spec is None: type_spec = value._type_spec if not type_spec.is_compatible_with(value): raise ValueError(f'`type_spec` {type_spec} is not compatible with `value` {value!r}.') metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata() metadata.type_spec_proto.CopyFrom(nested_structure_coder.encode_structure(type_spec).type_spec_value) return gen_composite_tensor_ops.CompositeTensorVariantFromComponents(components=nest.flatten(value, expand_composites=True), metadata=metadata.SerializeToString(), name=name)
Encodes `value` as a scalar variant tensor. Args: value: The `ExtensionType` value to encode. type_spec: Information about the value's type that should be included in the encoding. name: Optional name for the operation. Returns: A Tensor with shape=`()` and dtype=`tf.variant`. Raises: ValueError: If `type_spec` is not compatible with `value`.
github-repos
def permute(self, ordering: np.ndarray, axis: int) -> None: if self._file.__contains__('tiles'): del self._file['tiles'] ordering = list(np.array(ordering).flatten()) self.layers._permute(ordering, axis=axis) if (axis == 0): self.row_attrs._permute(ordering) self.row_graphs._permute(ordering) if (axis == 1): self.col_attrs._permute(ordering) self.col_graphs._permute(ordering)
Permute the dataset along the indicated axis. Args: ordering (list of int): The desired order along the axis axis (int): The axis along which to permute Returns: Nothing.
codesearchnet
def generate_poisson_lineage(n_states, n_cells_per_cluster, n_genes, means=300): M = (np.random.random((n_genes, n_states)) * means) center = M.mean(1) W = np.zeros((n_states, (n_cells_per_cluster * n_states))) index = 0 means = np.array(([(1.0 / n_states)] * n_states)) for c in range(n_states): for i in range(n_cells_per_cluster): w = np.copy(means) new_value = (w[c] + ((i * (1.0 - (1.0 / n_states))) / n_cells_per_cluster)) w[:] = ((1.0 - new_value) / (n_states - 1.0)) w[c] = new_value W[(:, index)] = w index += 1 return (M, W)
Generates a lineage for each state- assumes that each state has a common ancestor. Returns: M - genes x clusters W - clusters x cells
codesearchnet
def import_image_from_image(self, image, repository=None, tag=None, changes=None): return self.import_image( image=image, repository=repository, tag=tag, changes=changes )
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only supports importing from another image, like the ``FROM`` Dockerfile parameter. Args: image (str): Image name to import from repository (str): The repository to create tag (str): The tag to apply
juraj-google-style
def HasWarnings(self): has_errors = self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_ERROR) if has_errors: return True return self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING)
Determines if a store contains extraction warnings. Returns: bool: True if the store contains extraction warnings.
codesearchnet
def util_pattern_space(time_series, lag, dim): n = len(time_series) if lag * dim > n: raise Exception('Result matrix exceeded size limit, try to change lag or dim.') elif lag < 1: raise Exception('Lag should be greater or equal to 1.') pattern_space = np.empty((n - lag * (dim - 1), dim)) for i in range(n - lag * (dim - 1)): for j in range(dim): pattern_space[i][j] = time_series[i + j * lag] return pattern_space
Create a set of sequences with given lag and dimension Args: time_series: Vector or string of the sample data lag: Lag between beginning of sequences dim: Dimension (number of patterns) Returns: 2D array of vectors
juraj-google-style
def get(self, type: Type[T], query: Mapping[str, Any]) -> T: LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__)) try: handlers = self._get_types[type] except KeyError: try: LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__)) handlers = self._get_handlers(type) except NoConversionError: handlers = None self._get_types[type] = handlers if handlers is None: raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__)) LOGGER.info("Creating new PipelineContext") context = self._new_context() LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__)) for handler in handlers: try: return handler.get(query, context) except NotFoundError: pass raise NotFoundError("No source returned a query result!")
Gets a query from the data pipeline. 1) Extracts the query the sequence of data sources. 2) Inserts the result into the data sinks (if appropriate). 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object.
juraj-google-style
def write_dirpath(dirpath, strategy): if strategy is None: strategy = distribute_lib.get_strategy() if strategy is None: return dirpath if not strategy.extended._in_multi_worker_mode(): return dirpath if strategy.extended.should_checkpoint: return dirpath return _get_temp_dir(dirpath, strategy)
Returns the writing dir that should be used to save file distributedly. `dirpath` would be created if it doesn't exist. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing dir path that should be used to save with distribution.
github-repos
def determine_final_config(config_module): config = Config( DEFAULT_LIBRARY_RC_ADDITIONS, DEFAULT_LIBRARY_RC_REPLACEMENTS, DEFAULT_TEST_RC_ADDITIONS, DEFAULT_TEST_RC_REPLACEMENTS) for field in config._fields: if hasattr(config_module, field): config = config._replace(**{field: getattr(config_module, field)}) return config
Determines the final additions and replacements. Combines the config module with the defaults. Args: config_module: The loaded local configuration module. Returns: Config: the final configuration.
juraj-google-style
def count_weights(scope=None, exclude=None, graph=None): if scope: scope = scope if scope.endswith('/') else scope + '/' graph = graph or tf.get_default_graph() vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if scope: vars_ = [var for var in vars_ if var.name.startswith(scope)] if exclude: exclude = re.compile(exclude) vars_ = [var for var in vars_ if not exclude.match(var.name)] shapes = [var.get_shape().as_list() for var in vars_] return int(sum(np.prod(shape) for shape in shapes))
Count learnable parameters. Args: scope: Restrict the count to a variable scope. exclude: Regex to match variable names to exclude. graph: Operate on a graph other than the current default graph. Returns: Number of learnable parameters as integer.
juraj-google-style
def _format_collection_name(self): base_uri = self._format_resource_name() if (base_uri[(- 2):] == '_s'): endind = 2 else: endind = 1 return base_uri[:(- endind)]
Formats a name from Collection format Collections are of two name formats based on their actual URI representation in the REST service. 1. For cases where the actual URI of a collection is singular, for example, /mgmt/tm/ltm/node The name of the collection, as exposed to the user, will be made plural. For example, mgmt.tm.ltm.nodes 2. For cases where the actual URI of a collection is plural, for example, /mgmt/cm/shared/licensing/pools/ The name of the collection, as exposed to the user, will remain plural, but will have an `_s` appended to it. For example, mgmt.cm.shared.licensing.pools_s This method is responsible for undoing the user provided plurality. It ensures that the URI that is being sent to the REST service is correctly plural, or plural plus. Returns: A string representation of the user formatted Collection with its plurality identifier removed appropriately.
codesearchnet
def _ParseToken(self, file_object, file_offset): token_type = self._ParseTokenType(file_object, file_offset) token_data = None token_data_map_name = self._DATA_TYPE_MAP_PER_TOKEN_TYPE.get( token_type, None) if token_data_map_name: token_data_map = self._GetDataTypeMap(token_data_map_name) token_data, _ = self._ReadStructureFromFileObject( file_object, file_offset + 1, token_data_map) return token_type, token_data
Parses a token. Args: file_object (dfvfs.FileIO): file-like object. file_offset (int): offset of the token relative to the start of the file-like object. Returns: tuple: containing: int: token type object: token data or None if the token type is not supported.
juraj-google-style
def register_flag_by_module_id(self, module_id, flag): flags_by_module_id = self.flags_by_module_id_dict() flags_by_module_id.setdefault(module_id, []).append(flag)
Records the module that defines a specific flag. Args: module_id: int, the ID of the Python module. flag: Flag, the Flag instance that is key to the module.
juraj-google-style
def __init__(self, nmr_items, ctype): self._ctype = ctype self._mot_float_dtype = None self._nmr_items = nmr_items
Adds a private memory array of the indicated size to the kernel data elements. This is useful if you want to have private memory arrays in kernel data structs. Args: nmr_items (int): the size of the private memory array ctype (str): the desired c-type for this local memory object, like ``int``, ``float`` or ``mot_float_type``.
juraj-google-style
def handle_intermediate_response(self, item_session: ItemSession) -> Actions: self._waiter.reset() action = self.handle_response(item_session) return action
Callback for successful intermediate responses. Returns: A value from :class:`.hook.Actions`.
codesearchnet
def filter_pyfqn(cls, value, relative_to=0): def collect_packages(element, packages): parent = element.eContainer() if parent: collect_packages(parent, packages) packages.append(element.name) packages = [] collect_packages(value, packages) if ((relative_to < 0) or (relative_to > len(packages))): raise ValueError('relative_to not in range of number of packages') fqn = '.'.join(packages[relative_to:]) if relative_to: fqn = ('.' + fqn) return cls.module_path_map.get(fqn, fqn)
Returns Python form of fully qualified name. Args: relative_to: If greater 0, the returned path is relative to the first n directories.
codesearchnet
async def get_run_error(self, pipeline_uuid: str) -> str: self._verify_pipeline_uuid(pipeline_uuid) request = api_pb2.GetRunErrorRequest(pipeline_uuid=pipeline_uuid) response = await self._stub.GetRunError(request, **self._kwargs) return response.output
Get the error of pipeline execution. Args: pipeline_uuid: uuid of the pipeline Returns: output: contain an error of pipeline execution
github-repos
def __init__(self, rdfclass=None, **kwargs): super(RDFValueType, self).__init__(**kwargs) self._type = self.rdfclass = rdfclass
An arg which must be an RDFValue. Args: rdfclass: The RDFValue class that this arg must be. **kwargs: Passthrough to base class.
juraj-google-style
def update(self, data, timeout=-1, force=False): uri = self.data["uri"] self.data = self._helper.update(data, uri=uri, timeout=timeout, force=force) return self
Updates one or more attributes for a server hardware type resource. Args: data (dict): Object to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. force: Flag to force the operation. Returns: dict: Updated server hardware type.
juraj-google-style
def repr_assist(obj, remap=None): if not remap: remap = {} data = [] for arg in inspect.getargspec(getattr(obj.__class__, '__init__'))[0]: if arg == 'self': continue elif arg in remap: value = remap[arg] else: try: value = getattr(obj, arg) except AttributeError: value = getattr(obj, '_%s' % arg) if isinstance(value, (type(None), list, basestring, datetime.date, datetime.time)): data.append(repr(value)) else: data.append(str(value)) return "%s(%s)" % (obj.__class__.__name__, ', '.join(data))
Helper function to simplify ``__repr__`` methods. Args: obj: Object to pull argument values for remap (dict): Argument pairs to remap before output Returns: str: Self-documenting representation of ``value``
juraj-google-style
def ask_stories(self, raw=False, limit=None): ask_stories = self._get_stories('askstories', limit) if raw: ask_stories = [story.raw for story in ask_stories] return ask_stories
Returns list of item ids of latest Ask HN stories Args: limit (int): specifies the number of stories to be returned. raw (bool): Flag to indicate whether to transform all objects into raw json. Returns: `list` object containing ids of Ask HN stories.
juraj-google-style
def set(self, namespace, key, value, description=None): if isinstance(value, DBCChoice): vtype = 'choice' elif isinstance(value, DBCString): vtype = 'string' elif isinstance(value, DBCFloat): vtype = 'float' elif isinstance(value, DBCInt): vtype = 'int' elif isinstance(value, DBCArray): vtype = 'array' elif isinstance(value, DBCJSON): vtype = 'json' elif isinstance(value, bool): vtype = 'bool' else: raise ValueError('Invalid config item type: {}'.format(type(value))) if namespace in self.__data and key in self.__data[namespace]: itm = db.ConfigItem.find_one( ConfigItem.namespace_prefix == namespace, ConfigItem.key == key ) if not itm: raise KeyError(key) itm.value = value itm.type = vtype if description: itm.description = description else: itm = ConfigItem() itm.key = key itm.value = value itm.type = vtype itm.description = description itm.namespace_prefix = namespace db.session.add(itm) db.session.commit() if namespace in self.__data: self.__data[namespace][key] = value else: self.__data[namespace] = {key: value}
Set (create/update) a configuration item Args: namespace (`str`): Namespace for the item key (`str`): Key of the item value (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or `bool` description (`str`): Description of the configuration item Returns: `None`
juraj-google-style
def CopyFromStringTuple(self, time_elements_tuple): if len(time_elements_tuple) < 7: raise ValueError(( 'Invalid time elements tuple at least 7 elements required,' 'got: {0:d}').format(len(time_elements_tuple))) year, month, day_of_month, hours, minutes, seconds, milliseconds = ( time_elements_tuple) try: milliseconds = int(milliseconds, 10) except (TypeError, ValueError): raise ValueError('Invalid millisecond value: {0!s}'.format(milliseconds)) if milliseconds < 0 or milliseconds >= definitions.MILLISECONDS_PER_SECOND: raise ValueError('Invalid number of milliseconds.') fraction_of_second = ( decimal.Decimal(milliseconds) / definitions.MILLISECONDS_PER_SECOND) time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds, str(fraction_of_second)) super(TimeElementsInMilliseconds, self).CopyFromStringTuple( time_elements_tuple)
Copies time elements from string-based time elements tuple. Args: time_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]): time elements, contains year, month, day of month, hours, minutes, seconds and milliseconds. Raises: ValueError: if the time elements tuple is invalid.
juraj-google-style
def __init__(self, parameter, value, valid_values=None): msg = 'Invalid value "{value}" supplied to {parameter}.'.format( parameter=parameter, value=value) if valid_values: msg += ' Valid options are: {}'.format(', '.join(valid_values)) super(InvalidCliValueError, self).__init__(msg)
Instantiate the exception with a descriptive message. Args: parameter: The CLI parameter with the invalid value. value: The invalid value passed to the CLI parameter. valid_values: The values that would have been accepted by the parameter.
juraj-google-style
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): vision_data = {} if image_sizes is not None: num_image_tokens = [self.image_seq_length + 2] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`List[List[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data.
github-repos
def as_signature_def(self, receiver_tensors): pass
Generate a SignatureDef proto for inclusion in a MetaGraphDef. The SignatureDef will specify outputs as described in this ExportOutput, and will use the provided receiver_tensors as inputs. Args: receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying input nodes that will be fed.
github-repos
def get_product_version(path: typing.Union[str, Path]) -> VersionInfo: path = Path(path).absolute() pe_info = pefile.PE(str(path)) try: for file_info in pe_info.FileInfo: if isinstance(file_info, list): result = _parse_file_info(file_info) if result: return result else: result = _parse_file_info(pe_info.FileInfo) if result: return result raise RuntimeError(f'unable to obtain version from {path}') except (KeyError, AttributeError) as exc: traceback.print_exc() raise RuntimeError(f'unable to obtain version from {path}') from exc
Get version info from executable Args: path: path to the executable Returns: VersionInfo
juraj-google-style
def binary_guesser(handle, num_bytes=512): text_chars = (''.join(map(chr, range(32, 127))) + '\n\r\t\x08') byte_chars = text_chars.encode() handle_location = handle.tell() first_block = handle.read(num_bytes) if (type(first_block) is str): first_block = first_block.encode() filtered_block = first_block.translate(None, delete=byte_chars) handle.seek(handle_location) if ((float(len(filtered_block)) / float(len(first_block))) > 0.3): pass else: msg = '{0} is probably not a binary file'.format(handle.name) raise FormatError(message=msg)
Raise error if file not likely binary Guesses if a file is binary, raises error if file is not likely binary, then returns to location in file when handle passed to binary_guesser. Args: handle (file): File handle of file thought to be binary num_bytes (int): Bytes of file to read to guess binary, more bytes is often better but takes longer Raises: FormatError: Error raised if file is not likely binary Example: The following example demonstrate how to use binary_guesser. Note: These doctests will not pass, examples are only in doctest format as per convention. bio_utils uses pytests for testing. >>> binary_guesser(open('test.binary'))
codesearchnet
def range(self, start_row=0, max_rows=None): fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows) return iter(datalab.utils.Iterator(fetcher))
Get an iterator to iterate through a set of table rows. Args: start_row: the row of the table at which to start the iteration (default 0) max_rows: an upper limit on the number of rows to iterate through (default None) Returns: A row iterator.
codesearchnet
def fib(n): assert n > 0 a, b = 1, 1 for i in range(n - 1): a, b = b, a + b return a
Fibonacci example function Args: n (int): integer Returns: int: n-th Fibonacci number
juraj-google-style
def __init__(self, channel): self.Range = channel.unary_unary( '/etcdserverpb.KV/Range', request_serializer=rpc__pb2.RangeRequest.SerializeToString, response_deserializer=rpc__pb2.RangeResponse.FromString, ) self.Put = channel.unary_unary( '/etcdserverpb.KV/Put', request_serializer=rpc__pb2.PutRequest.SerializeToString, response_deserializer=rpc__pb2.PutResponse.FromString, ) self.DeleteRange = channel.unary_unary( '/etcdserverpb.KV/DeleteRange', request_serializer=rpc__pb2.DeleteRangeRequest.SerializeToString, response_deserializer=rpc__pb2.DeleteRangeResponse.FromString, ) self.Txn = channel.unary_unary( '/etcdserverpb.KV/Txn', request_serializer=rpc__pb2.TxnRequest.SerializeToString, response_deserializer=rpc__pb2.TxnResponse.FromString, ) self.Compact = channel.unary_unary( '/etcdserverpb.KV/Compact', request_serializer=rpc__pb2.CompactionRequest.SerializeToString, response_deserializer=rpc__pb2.CompactionResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def set_status(self, status): text = "" colour = " if status == 0: text = "OFFLINE" colour = " elif status == 1: text = "STARTING" colour = " elif status == 2: text = "ONLINE" colour = " self.status.set(text) self.statusbar.config(background=colour)
Updates the status text Args: status (int): The offline/starting/online status of Modis 0: offline, 1: starting, 2: online
juraj-google-style
def get_registered_name(obj): if obj in _GLOBAL_CUSTOM_NAMES: return _GLOBAL_CUSTOM_NAMES[obj] else: return obj.__name__
Returns the name registered to an object within the Keras framework. This function is part of the Keras serialization and deserialization framework. It maps objects to the string names associated with those objects for serialization/deserialization. Args: obj: The object to look up. Returns: The name associated with the object, or the default Python name if the object is not registered.
github-repos
def _check_model_use_buffer_offset(model_object): if not model_object.metadata: return False for meta in model_object.metadata: if meta.name.decode('utf-8') == 'buffer_location': return True return False
Checks if a model object uses buffer offsets to store constant buffers. Args: model_object: tflite model, a python object Returns: True of the model_object has the metadata entry "buffer_location" False otherwise
github-repos
def from_parquet(path: str, timestamps: str='timestamp', indexes: Optional[List[str]]=None, **kwargs) -> EventSet: import pandas as pd if indexes is None: indexes = [] df = pd.read_parquet(path, **kwargs) return from_pandas(df, indexes=indexes, timestamps=timestamps)
Reads an [`EventSet`][temporian.EventSet] from a parquet file. Example: ```python >>> temp_file = str(tmp_dir / "temporal_data.parquet") >>> og_eventset = tp.event_set(timestamps=[1,], features={"f1": [0.1]}) >>> tp.to_parquet(og_eventset, temp_file) >>> evset = tp.from_parquet(temp_file) >>> evset indexes: [] features: [('f1', float64)] events: (1 events): timestamps: [1.] 'f1': [0.1] ... ``` Args: path: Path to the file. timestamps: Name of the column to be used as timestamps for the EventSet. indexes: Names of the columns to be used as indexes for the EventSet. If None, a flat EventSet will be created. Returns: EventSet read from file.
github-repos
def load_json(filename, **kwargs): with open(filename, 'r', encoding='utf-8') as f: return json.load(f, **kwargs)
Load a JSON object from the specified file. Args: filename: Path to the input JSON file. **kwargs: Additional arguments to `json.load`. Returns: The object deserialized from JSON.
codesearchnet
def parse_url(cls, string): match = cls.URL_RE.match(string) if (not match): raise InvalidKeyError(cls, string) return match.groupdict()
If it can be parsed as a version_guid with no preceding org + offering, returns a dict with key 'version_guid' and the value, If it can be parsed as a org + offering, returns a dict with key 'id' and optional keys 'branch' and 'version_guid'. Raises: InvalidKeyError: if string cannot be parsed -or- string ends with a newline.
codesearchnet
def _randomize_speed(base_speed: int, sigma: int=None) -> int: if (sigma is None): int_sigma = int((base_speed / 4)) else: int_sigma = sigma val = MissionWeather._gauss(base_speed, int_sigma) if (val < 0): return 0 return min(val, 50)
Creates a variation in wind speed Args: base_speed: base wind speed sigma: sigma value for gaussian variation Returns: random wind speed
codesearchnet
def run_inference(self, batch: Sequence[Union[tf.Tensor, torch.Tensor]], model: Union[AutoModel, TFAutoModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]: inference_args = {} if not inference_args else inference_args if not self._framework: if isinstance(batch[0], tf.Tensor): self._framework = 'tf' else: self._framework = 'pt' if self._framework == 'pt' and self._device == 'GPU' and is_gpu_available_torch(): model.to(torch.device('cuda')) if self._inference_fn: return self._inference_fn(batch, model, inference_args, inference_args, self._model_uri) if self._framework == 'tf': return _default_inference_fn_tensorflow(batch, model, self._device, inference_args, self._model_uri) else: return _default_inference_fn_torch(batch, model, self._device, inference_args, self._model_uri)
Runs inferences on a batch of Tensors and returns an Iterable of Tensors Predictions. This method stacks the list of Tensors in a vectorized format to optimize the inference call. Args: batch: A sequence of Tensors. These Tensors should be batchable, as this method will call `tf.stack()`/`torch.stack()` and pass in batched Tensors with dimensions (batch_size, n_features, etc.) into the model's predict() function. model: A Tensorflow/PyTorch model. inference_args (dict[str, Any]): Non-batchable arguments required as inputs to the model's inference function. Unlike Tensors in `batch`, these parameters will not be dynamically batched. Returns: An Iterable of type PredictionResult.
github-repos
def best_training_job(self): self._ensure_last_tuning_job() tuning_job_describe_result = self.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=self.latest_tuning_job.name) try: return tuning_job_describe_result['BestTrainingJob']['TrainingJobName'] except KeyError: raise Exception('Best training job not available for tuning job: {}'.format(self.latest_tuning_job.name))
Return name of the best training job for the latest hyperparameter tuning job. Raises: Exception: If there is no best training job available for the hyperparameter tuning job.
codesearchnet
def _ffn_layer_multi_inputs(inputs_list, hparams, ffn_layer_type='dense', name='ffn', kernel_initializer=None, bias_initializer=None, activation=None, pad_remover=None, preprocess=False, postprocess=False): num_inputs = len(inputs_list) assert (num_inputs > 0) if (preprocess and (num_inputs == 1)): inputs_list[0] = common_layers.layer_preprocess(inputs_list[0], hparams) if postprocess: original_inputs = inputs_list[0] main_input = inputs_list[0] original_shape = common_layers.shape_list(main_input) assert (hparams.hidden_size == common_layers.shape_list(main_input)[(- 1)]) for inputs in inputs_list: main_input.get_shape().assert_is_compatible_with(inputs.get_shape()) def remove_pads(x): original_shape = common_layers.shape_list(x) x = tf.reshape(x, tf.concat([[(- 1)], original_shape[2:]], axis=0)) x = tf.expand_dims(pad_remover.remove(x), axis=0) return x if pad_remover: for (i, inputs) in enumerate(inputs_list): inputs_list[i] = remove_pads(inputs) ffn_inputs = inputs_list[0] if (len(inputs_list) != 1): ffn_inputs = tf.concat(inputs_list, axis=(- 1)) if (ffn_layer_type == 'dense'): output = common_layers.dense(ffn_inputs, hparams.hidden_size, name=name, activation=activation, use_bias=True, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer) elif (ffn_layer_type == 'dense_dropconnect'): output = common_layers.dense_dropconnect(ffn_inputs, hparams.hidden_size, name=name, dropconnect_dropout=hparams.dropconnect_dropout, output_activation=activation) postprocess = False elif (ffn_layer_type == 'dense_relu_dense'): output = common_layers.dense_relu_dense(ffn_inputs, hparams.filter_size, hparams.hidden_size, name=name, dropout=hparams.relu_dropout, output_activation=activation) else: raise ValueError(('Unknown ffn_layer type: %s' % ffn_layer_type)) if pad_remover: output = tf.reshape(pad_remover.restore(tf.squeeze(output, axis=0)), original_shape) if postprocess: if (num_inputs == 1): output = common_layers.layer_postprocess(original_inputs, output, hparams) else: hp = copy.copy(hparams) hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace('a', '') output = common_layers.layer_postprocess(original_inputs, output, hp) return output
Implements a Feed-forward layer with multiple inputs, pad-removing, etc. Args: inputs_list: list of input tensors hparams: hyper-parameters ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense name: name kernel_initializer: kernel initializer bias_initializer: bias initializer activation: activation function pad_remover: pad remover preprocess: if preprocess the input postprocess: if postprocess the output Returns: a tensor Raises: ValueError: Unknown ffn_layer type.
codesearchnet
def from_json(json): return Point(lat=json['lat'], lon=json['lon'], time=isostr_to_datetime(json['time']))
Creates Point instance from JSON representation Args: json (:obj:`dict`): Must have at least the following keys: lat (float), lon (float), time (string in iso format). Example, { "lat": 9.3470298, "lon": 3.79274, "time": "2016-07-15T15:27:53.574110" } json: map representation of Point instance Returns: :obj:`Point`
codesearchnet
def has_current_path(self, path, **kwargs): try: return self.assert_current_path(path, **kwargs) except ExpectationNotMet: return False
Checks if the page has the given path. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: bool: Whether it matches.
codesearchnet
def make_parts_for(self, field_name, field_data): typ = field_data.field_type subtyp = field_data.field_subtype if typ in ("read", "xadc"): writeable = False else: writeable = True if typ == "time" or typ in ("param", "read") and subtyp == "time": self._make_time_parts(field_name, field_data, writeable) elif typ == "write" and subtyp == "action": self._make_action_part(field_name, field_data) elif typ in ("param", "read", "write", "xadc"): self._make_param_part(field_name, field_data, writeable) elif typ == "bit_out": self._make_out(field_name, field_data, "bit") elif typ == "pos_out": self._make_out(field_name, field_data, "pos") self._make_scale_offset(field_name) self._make_out_capture(field_name, field_data) elif typ == "ext_out": self._make_out_capture(field_name, field_data) elif typ == "bit_mux": self._make_mux(field_name, field_data, "bit") self._make_mux_delay(field_name) elif typ == "pos_mux": self._make_mux(field_name, field_data, "pos") elif typ == "table": self._make_table(field_name, field_data) else: raise ValueError("Unknown type %r subtype %r" % (typ, subtyp))
Create the relevant parts for this field Args: field_name (str): Short field name, e.g. VAL field_data (FieldData): Field data object
juraj-google-style
def create_remoteckan(cls, site_url, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, session=None, **kwargs): if (not session): session = get_session(user_agent, user_agent_config_yaml, user_agent_lookup, prefix=Configuration.prefix, method_whitelist=frozenset(['HEAD', 'TRACE', 'GET', 'POST', 'PUT', 'OPTIONS', 'DELETE']), **kwargs) ua = session.headers['User-Agent'] else: ua = kwargs.get('full_agent') if (not ua): ua = UserAgent.get(user_agent, user_agent_config_yaml, user_agent_lookup, prefix=Configuration.prefix, **kwargs) return ckanapi.RemoteCKAN(site_url, user_agent=ua, session=session)
Create remote CKAN instance from configuration Args: site_url (str): Site url. user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. session (requests.Session): requests Session object to use. Defaults to calling hdx.utilities.session.get_session() Returns: ckanapi.RemoteCKAN: Remote CKAN instance
codesearchnet
def set_attribute(self, attribute: str, value: Union[(ScalarType, yaml.Node)]) -> None: start_mark = StreamMark('generated node', 0, 0, 0) end_mark = StreamMark('generated node', 0, 0, 0) if isinstance(value, str): value_node = yaml.ScalarNode('tag:yaml.org,2002:str', value, start_mark, end_mark) elif isinstance(value, bool): value_str = ('true' if value else 'false') value_node = yaml.ScalarNode('tag:yaml.org,2002:bool', value_str, start_mark, end_mark) elif isinstance(value, int): value_node = yaml.ScalarNode('tag:yaml.org,2002:int', str(value), start_mark, end_mark) elif isinstance(value, float): value_node = yaml.ScalarNode('tag:yaml.org,2002:float', str(value), start_mark, end_mark) elif (value is None): value_node = yaml.ScalarNode('tag:yaml.org,2002:null', '', start_mark, end_mark) elif isinstance(value, yaml.Node): value_node = value else: raise TypeError('Invalid kind of value passed to set_attribute()') attr_index = self.__attr_index(attribute) if (attr_index is not None): key_node = self.yaml_node.value[attr_index][0] self.yaml_node.value[attr_index] = (key_node, value_node) else: key_node = yaml.ScalarNode('tag:yaml.org,2002:str', attribute, start_mark, end_mark) self.yaml_node.value.append((key_node, value_node))
Sets the attribute to the given value. Use only if is_mapping() returns True. If the attribute does not exist, this adds a new attribute, \ if it does, it will be overwritten. If value is a str, int, float, bool or None, the attribute will \ be set to this value. If you want to set the value to a list or \ dict containing other values, build a yaml.Node and pass it here. Args: attribute: Name of the attribute whose value to change. value: The value to set.
codesearchnet
async def set_start_date(self, date: str, time: str, check_in_duration: int = None): date_time = datetime.strptime(date + ' ' + time, '%Y/%m/%d %H:%M') res = await self.connection('PUT', 'tournaments/{}'.format(self._id), 'tournament', start_at=date_time, check_in_duration=check_in_duration or 0) self._refresh_from_json(res)
set the tournament start date (and check in duration) |methcoro| Args: date: fomatted date as YYYY/MM/DD (2017/02/14) time: fromatted time as HH:MM (20:15) check_in_duration (optional): duration in minutes Raises: APIException
juraj-google-style
def get_densities(self, spin=None): if self.densities is None: result = None elif spin is None: if Spin.down in self.densities: result = self.densities[Spin.up] + self.densities[Spin.down] else: result = self.densities[Spin.up] else: result = self.densities[spin] return result
Returns the density of states for a particular spin. Args: spin: Spin Returns: Returns the density of states for a particular spin. If Spin is None, the sum of all spins is returned.
juraj-google-style
def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0): self.clear_color = (red, green, blue, alpha) self.clear_depth = depth
Sets the clear values for the window buffer. Args: red (float): red compoent green (float): green compoent blue (float): blue compoent alpha (float): alpha compoent depth (float): depth value
juraj-google-style
def ParseLines(self, input_lines): current_macro = None for line in input_lines: if line.startswith('PDDM-'): directive = line.split(' ', 1)[0] if (directive == 'PDDM-DEFINE'): (name, args) = self._ParseDefineLine(line) if self._macros.get(name): raise PDDMError(('Attempt to redefine macro: "%s"' % line)) current_macro = self.MacroDefinition(name, args) self._macros[name] = current_macro continue if (directive == 'PDDM-DEFINE-END'): if (not current_macro): raise PDDMError(('Got DEFINE-END directive without an active macro: "%s"' % line)) current_macro = None continue raise PDDMError(('Hit a line with an unknown directive: "%s"' % line)) if current_macro: current_macro.AppendLine(line) continue if (line.strip() == ''): continue raise PDDMError(('Hit a line that wasn\'t a directive and no open macro definition: "%s"' % line))
Parses list of lines. Args: input_lines: A list of strings of input to parse (no newlines on the strings). Raises: PDDMError if there are any issues.
codesearchnet
def _ParseLogonApplications(self, parser_mediator, registry_key): for application in self._LOGON_APPLICATIONS: command_value = registry_key.GetValueByName(application) if not command_value: continue values_dict = { 'Application': application, 'Command': command_value.GetDataAsObject(), 'Trigger': 'Logon'} event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = ': Winlogon' event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the registered logon applications. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True): errors = np.abs(y - y_hat)[:, 0] if not smooth: return errors smoothing_window = int(smoothing_window * len(y)) return pd.Series(errors).ewm(span=smoothing_window).mean().values
Compute an array of absolute errors comparing predictions and expected output. If smooth is True, apply EWMA to the resulting array of errors. Args: y (array): Ground truth. y_hat (array): Predictions array. smoothing_window (float): Size of the smoothing window, expressed as a proportion of the total length of y. smooth (bool): whether the returned errors should be smoothed with EWMA. Returns: (array): errors
juraj-google-style
def do_ams_delete(endpoint, path, access_token): headers = {"DataServiceVersion": dsversion_min, "MaxDataServiceVersion": dsversion_max, "Accept": json_acceptformat, "Accept-Charset" : charset, "Authorization": 'Bearer ' + access_token, "x-ms-version" : xmsversion} response = requests.delete(endpoint, headers=headers, allow_redirects=False) if response.status_code == 301: redirected_url = ''.join([response.headers['location'], path]) response = requests.delete(redirected_url, headers=headers) return response
Do a AMS DELETE request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON body.
juraj-google-style
def _validate_path(path): if (not path): raise ValueError('Path is empty') if (not isinstance(path, basestring)): raise TypeError(('Path should be a string but is %s (%s).' % (path.__class__, path)))
Basic validation of Google Storage paths. Args: path: a Google Storage path. It should have form '/bucket/filename' or '/bucket'. Raises: ValueError: if path is invalid. TypeError: if path is not of type basestring.
codesearchnet
def create(self, **kwargs): resource = self.resource.create(kwargs) if ('admin_token' in kwargs): resource.context.authorize('Gem-Application', api_token=resource.api_token, admin_token=kwargs['admin_token']) app = self.wrap(resource) return self.add(app)
Create a new Application. Args: **kwargs: Arbitrary keyword arguments, including: name (str): A name for the new Application. Returns: A round.Application object if successful.
codesearchnet
def signature(array): length = len(array) index = (_NUM_SIGNATURE_BYTES if (length > _NUM_SIGNATURE_BYTES) else length) return array[:index]
Returns the first 262 bytes of the given bytearray as part of the file header signature. Args: array: bytearray to extract the header signature. Returns: First 262 bytes of the file content as bytearray type.
codesearchnet
def scoped_state(self, name_context: Union[str, 'common.NameContext'], state_name: str, io_target=None, metrics_container: Optional['MetricsContainer']=None) -> statesampler_impl.ScopedState: if not isinstance(name_context, common.NameContext): name_context = common.NameContext(name_context) counter_name = CounterName(state_name + '-msecs', stage_name=self._prefix, step_name=name_context.metrics_name(), io_target=io_target) if counter_name in self._states_by_name: return self._states_by_name[counter_name] else: output_counter = self._counter_factory.get_counter(counter_name, Counter.SUM) self._states_by_name[counter_name] = super()._scoped_state(counter_name, name_context, output_counter, metrics_container) return self._states_by_name[counter_name]
Returns a ScopedState object associated to a Step and a State. Args: name_context: common.NameContext. It is the step name information. state_name: str. It is the state name (e.g. process / start / finish). io_target: metrics_container: MetricsContainer. The step's metrics container. Returns: A ScopedState that keeps the execution context and is able to switch it for the execution thread.
github-repos
def _run_graph(self, device, dtype, data_format, input_shape, filter_shape, strides, padding, num_iters, warmup_iters): graph = ops.Graph() with graph.as_default(): warmup_outputs, outputs = build_graph(device, dtype, data_format, input_shape, filter_shape, strides, padding, num_iters, warmup_iters) config = config_pb2.ConfigProto() config.graph_options.optimizer_options.opt_level = -1 rewrite_options = config.graph_options.rewrite_options rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON if FLAGS.enable_layout_optimizer else rewriter_config_pb2.RewriterConfig.OFF rewrite_options.dependency_optimization = rewriter_config_pb2.RewriterConfig.OFF with session_lib.Session(graph=graph, config=config) as session: variables.global_variables_initializer().run() session.run(warmup_outputs) start_time = time.time() session.run(outputs) duration = (time.time() - start_time) / num_iters print('%s %s %s inputshape:%s filtershape:%s strides:%s padding:%s %d iters: %.8f sec' % (device, str(dtype), data_format, str(input_shape).replace(' ', ''), str(filter_shape).replace(' ', ''), str(strides).replace(' ', ''), padding, num_iters, duration)) name_template = 'conv2d_{device}_{datatype}_{data_format}_input_shape_{inputshape}_filter_shape_{filtershape}_strides_{strides}_padding_{padding}' self.report_benchmark(name=name_template.format(device=device, datatype=str(dtype), data_format=str(data_format), inputshape=str(input_shape).replace(' ', ''), filtershape=str(filter_shape).replace(' ', ''), strides=str(strides).replace(' ', ''), padding=padding).replace(' ', ''), iters=num_iters, wall_time=duration) return duration
runs the graph and print its execution time. Args: device: String, the device to run on. dtype: Data type for the convolution. data_format: A string from: "NHWC" or "NCHW". Data format for input and output data. input_shape: Shape of the input tensor. filter_shape: Shape of the filter tensor. strides: A list of ints. 1-D of length 4. The stride of sliding window for each dimension of input. padding: A string from: "SAME", "VALID". The type of padding algorithm to use. num_iters: Number of iterations to run the benchmark. num_iters: number of iterations to run conv2d. warmup_iters: number of iterations for warmup runs. Returns: The duration of the run in seconds.
github-repos
def get_release_data(self): previous_package = self.get_previous_release() if previous_package: previous_version = previous_package.version previous_revision = previous_package.revision else: previous_version = None previous_revision = None if (self.vcs is None): return dict(vcs='None', previous_version=previous_version) revision = None with self.repo_operation(): revision = self.vcs.get_current_revision() changelog = self.get_changelog() maxlen = config.max_package_changelog_chars if (maxlen and changelog and (len(changelog) > (maxlen + 3))): changelog = (changelog[:maxlen] + '...') return dict(vcs=self.vcs.name(), revision=revision, changelog=changelog, previous_version=previous_version, previous_revision=previous_revision)
Get release data for this release. Returns: dict.
codesearchnet
def unwrap_arguments(xml_response): xml_response = xml_response.encode('utf-8') try: tree = XML.fromstring(xml_response) except XML.ParseError: filtered = illegal_xml_re.sub('', xml_response.decode('utf-8'))\ .encode('utf-8') tree = XML.fromstring(filtered) action_response = tree.find( "{http: return dict((i.tag, i.text or "") for i in action_response)
Extract arguments and their values from a SOAP response. Args: xml_response (str): SOAP/xml response text (unicode, not utf-8). Returns: dict: a dict of ``{argument_name: value}`` items.
juraj-google-style
def main(argv=None): if (argv is None): argv = sys.argv[1:] try: executor = None parser = build_args() args = parser.parse_args(args=argv) model = DeviceModel() parser = SensorGraphFileParser() parser.parse_file(args.sensor_graph) parser.compile(model) if (not args.disable_optimizer): opt = SensorGraphOptimizer() opt.optimize(parser.sensor_graph, model=model) graph = parser.sensor_graph sim = SensorGraphSimulator(graph) for stop in args.stop: sim.stop_condition(stop) for watch in args.watch: watch_sel = DataStreamSelector.FromString(watch) graph.sensor_log.watch(watch_sel, watch_printer) if (args.semihost_device is not None): executor = SemihostedRPCExecutor(args.port, args.semihost_device) sim.rpc_executor = executor for mock in args.mock_rpc: (slot, rpc_id, value) = process_mock_rpc(mock) sim.rpc_executor.mock(slot, rpc_id, value) for stim in args.stimulus: sim.stimulus(stim) graph.load_constants() if (args.trace is not None): sim.record_trace() try: if args.connected: sim.step(user_connected, 8) sim.run(accelerated=(not args.realtime)) except KeyboardInterrupt: pass if (args.trace is not None): sim.trace.save(args.trace) finally: if (executor is not None): executor.hw.close() return 0
Main entry point for iotile sensorgraph simulator. This is the iotile-sgrun command line program. It takes an optional set of command line parameters to allow for testing. Args: argv (list of str): An optional set of command line parameters. If not passed, these are taken from sys.argv.
codesearchnet
def DocumentVersionsRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) version_path = self._GetRowValue(query_hash, row, 'version_path') path = self._GetRowValue(query_hash, row, 'path') paths = version_path.split('/') if ((len(paths) < 2) or (not paths[1].isdigit())): user_sid = '' else: user_sid = paths[1] version_path = (self.ROOT_VERSION_PATH + version_path) (path, _, _) = path.rpartition('/') event_data = MacDocumentVersionsEventData() event_data.last_time = self._GetRowValue(query_hash, row, 'last_time') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.path = path event_data.query = query event_data.user_sid = '{0!s}'.format(user_sid) event_data.version_path = version_path timestamp = self._GetRowValue(query_hash, row, 'version_time') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a document versions row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str: root = ElementTree.fromstring(xml) return docx_text_from_xml_node(root, 0, config)
Converts an XML tree of a DOCX file to string contents. Args: xml: raw XML text config: :class:`TextProcessingConfig` control object Returns: contents as a string
juraj-google-style
def get_topics_strings(topics_words, alpha, vocabulary, topics_to_print=10, words_per_topic=10): alpha = np.squeeze(alpha, axis=0) highest_weight_topics = np.argsort(-alpha, kind="mergesort") top_words = np.argsort(-topics_words, axis=1) res = [] for topic_idx in highest_weight_topics[:topics_to_print]: l = ["index={} alpha={:.2f}".format(topic_idx, alpha[topic_idx])] l += [vocabulary[word] for word in top_words[topic_idx, :words_per_topic]] res.append(" ".join(l)) return np.array(res)
Returns the summary of the learned topics. Arguments: topics_words: KxV tensor with topics as rows and words as columns. alpha: 1xK tensor of prior Dirichlet concentrations for the topics. vocabulary: A mapping of word's integer index to the corresponding string. topics_to_print: The number of topics with highest prior weight to summarize. words_per_topic: Number of wodrs per topic to return. Returns: summary: A np.array with strings.
juraj-google-style
def set_parameter_vector(self, vector, include_frozen=False): v = self.parameter_vector if include_frozen: v[:] = vector else: v[self.unfrozen_mask] = vector self.parameter_vector = v self.dirty = True
Set the parameter values to the given vector Args: vector (array[vector_size] or array[full_size]): The target parameter vector. This must be in the same order as ``parameter_names`` and it should only include frozen parameters if ``include_frozen`` is ``True``. include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
codesearchnet
class HerbertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = HerbertTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sep_token='</s>', **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sep_token=sep_token, **kwargs) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: cls = [self.cls_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library). Peculiarities: - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file.
github-repos
def __init__(self, context): self._logdir = context.logdir self._multiplexer = context.multiplexer self._plugin_name_to_instance = context.plugin_name_to_instance
Instantiates ScalarsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
juraj-google-style
def allocate(self, workdir=None, use_smartio=False): if workdir is not None: self.set_workdir(workdir) for i, work in enumerate(self): work.set_workdir(os.path.join(self.workdir, "w" + str(i))) if not hasattr(self, "workdir"): raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__") for work in self: work.allocate(manager=self.manager) work.set_flow(self) for task in work: task.set_work(work) self.check_dependencies() if not hasattr(self, "_allocated"): self._allocated = 0 self._allocated += 1 if use_smartio: self.use_smartio() return self
Allocate the `Flow` i.e. assign the `workdir` and (optionally) the :class:`TaskManager` to the different tasks in the Flow. Args: workdir: Working directory of the flow. Must be specified here if we haven't initialized the workdir in the __init__. Return: self
juraj-google-style
def hardware_version(self): version = self._dll.JLINKARM_GetHardwareVersion() major = ((version / 10000) % 100) minor = ((version / 100) % 100) return ('%d.%02d' % (major, minor))
Returns the hardware version of the connected J-Link as a major.minor string. Args: self (JLink): the ``JLink`` instance Returns: Hardware version string.
codesearchnet
def convert(in_file, out_file, in_fmt='', out_fmt=''): in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if (not os.path.exists(in_file)): raise IOError('Input file {0} does not exist, stopping...'.format(in_file)) in_fmt = (in_fmt.lower() or _guess_format_from_extension(in_file.split('.')[(- 1)].lower())) out_fmt = (out_fmt.lower() or _guess_format_from_extension(out_file.split('.')[(- 1)].lower())) if ((not in_fmt) or (not out_fmt)): raise ValueError('Cannot determine conversion formats.') return False if (in_fmt is out_fmt): shutil.copyfileobj(in_file, out_file) return out_file if (in_fmt == 'hdf5'): from . import hdf5 data = hdf5.load(in_file) elif (in_fmt == 'tiff'): from . import tiff data = tiff.load(in_file) elif (in_fmt == 'png'): from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) if (out_fmt == 'hdf5'): from . import hdf5 return hdf5.save(out_file, data) elif (out_fmt == 'tiff'): from . import tiff return tiff.save(out_file, data) elif (out_fmt == 'png'): from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename
codesearchnet
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None): with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]): del weights_fn predictions = tf.nn.sigmoid(logits) labels = tf.argmax(labels, -1) predictions = tf.argmax(predictions, -1) _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) return accuracy, tf.constant(1.0)
Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights
juraj-google-style
def text(self) -> str: if not mime_types.is_text(self.mimetype) and (not mime_types.is_json(self.mimetype)): raise ValueError('Part is not text.') return self.part.text or ''
Returns part text as string. Returns: The text of the part. Raises: ValueError if part has no text.
github-repos
def signmessage(self, address, message): signature = self.rpc.call("signmessage", address, message) self.logger.debug("Signature: %s" % signature) return signature
Sign a message with the private key of an address. Cryptographically signs a message using ECDSA. Since this requires an address's private key, the wallet must be unlocked first. Args: address (str): address used to sign the message message (str): plaintext message to which apply the signature Returns: str: ECDSA signature over the message
juraj-google-style
def remove(self, source: Source) -> None: self.unload(source) if isinstance(source, RemoteSource): shutil.rmtree(source.location, ignore_errors=True) self.save()
Unregisters a given source with this server. If the given source is a remote source, then its local copy will be removed from disk. Raises: KeyError: if the given source is not registered with this server.
codesearchnet
def RetryUpload(self, job, job_id, error): if self.IsErrorRetryable(error): retry_count = 0 sleep_interval = config.CONFIG["BigQuery.retry_interval"] while retry_count < config.CONFIG["BigQuery.retry_max_attempts"]: time.sleep(sleep_interval.seconds) logging.info("Retrying job_id: %s", job_id) retry_count += 1 try: response = job.execute() return response except errors.HttpError as e: if self.IsErrorRetryable(e): sleep_interval *= config.CONFIG["BigQuery.retry_multiplier"] logging.exception("Error with job: %s, will retry in %s", job_id, sleep_interval) else: raise BigQueryJobUploadError( "Can't retry error code %s. Giving up" " on job: %s." % (e.resp.status, job_id)) else: raise BigQueryJobUploadError("Can't retry error code %s. Giving up on " "job: %s." % (error.resp.status, job_id)) raise BigQueryJobUploadError( "Giving up on job:%s after %s retries." % (job_id, retry_count))
Retry the BigQuery upload job. Using the same job id protects us from duplicating data on the server. If we fail all of our retries we raise. Args: job: BigQuery job object job_id: ID string for this upload job error: errors.HttpError object from the first error Returns: API response object on success, None on failure Raises: BigQueryJobUploadError: if we can't get the bigquery job started after retry_max_attempts
juraj-google-style
def _get_ngrams(n, text): ngram_set = set() text_length = len(text) max_index_ngram_start = (text_length - n) for i in range((max_index_ngram_start + 1)): ngram_set.add(tuple(text[i:(i + n)])) return ngram_set
Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams
codesearchnet
def correction(self, word): return max(self.candidates(word), key=self.word_probability)
The most probable correct spelling for the word Args: word (str): The word to correct Returns: str: The most likely candidate
juraj-google-style
def _is_named_tuple(instance): if not isinstance(instance, tuple): return False return hasattr(instance, '_fields') and isinstance(instance._fields, collections_abc.Sequence) and all((isinstance(f, str) for f in instance._fields))
Returns True iff `instance` is a `namedtuple`. Args: instance: An instance of a Python object. Returns: True if `instance` is a `namedtuple`.
github-repos
def unwrap_or(self, default: U) -> Union[(T, U)]: return self.unwrap_or_else((lambda : default))
Returns the contained value or ``default``. Args: default: The default value. Returns: The contained value if the :py:class:`Option` is ``Some``, otherwise ``default``. Notes: If you wish to use a result of a function call as the default, it is recommnded to use :py:meth:`unwrap_or_else` instead. Examples: >>> Some(0).unwrap_or(3) 0 >>> NONE.unwrap_or(0) 0
codesearchnet
def write_file(self, path, contents): path = self._get_dist_path(path) if (not os.path.isdir(os.path.dirname(path))): os.makedirs(os.path.dirname(path)) if isinstance(contents, bytes): mode = 'wb+' else: mode = 'w' with open(path, mode) as file: file.write(contents)
Write a file of any type to the destination path. Useful for files like robots.txt, manifest.json, and so on. Args: path (str): The name of the file to write to. contents (str or bytes): The contents to write.
codesearchnet
def ack(self, items): for item in items: time_to_ack = item.time_to_ack if time_to_ack is not None: self._manager.ack_histogram.add(time_to_ack) ack_ids = [item.ack_id for item in items] request = types.StreamingPullRequest(ack_ids=ack_ids) self._manager.send(request) self.drop(items)
Acknowledge the given messages. Args: items(Sequence[AckRequest]): The items to acknowledge.
juraj-google-style
def run(self, dag): coupling_map = self._coupling_map ordered_virtual_gates = list(dag.serial_layers()) if (self.initial_layout is None): if self.property_set['layout']: self.initial_layout = self.property_set['layout'] else: self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values()) if (len(dag.qubits()) != len(self.initial_layout)): raise TranspilerError('The layout does not match the amount of qubits in the DAG') if (len(self._coupling_map.physical_qubits) != len(self.initial_layout)): raise TranspilerError('Mappers require to have the layout to be the same size as the coupling map') mapped_gates = [] layout = self.initial_layout.copy() gates_remaining = ordered_virtual_gates.copy() while gates_remaining: best_step = _search_forward_n_swaps(layout, gates_remaining, coupling_map) layout = best_step['layout'] gates_mapped = best_step['gates_mapped'] gates_remaining = best_step['gates_remaining'] mapped_gates.extend(gates_mapped) mapped_dag = _copy_circuit_metadata(dag, coupling_map) for node in mapped_gates: mapped_dag.apply_operation_back(op=node.op, qargs=node.qargs, cargs=node.cargs) return mapped_dag
Run one pass of the lookahead mapper on the provided DAG. Args: dag (DAGCircuit): the directed acyclic graph to be mapped Returns: DAGCircuit: A dag mapped to be compatible with the coupling_map in the property_set. Raises: TranspilerError: if the coupling map or the layout are not compatible with the DAG
codesearchnet
def forward(self, inputs, expert_size): input_list = inputs.split(expert_size, dim=0) output_list = [] for i in range(self.num_experts): output_list.append(F.linear(input_list[i], self.weight[i])) results = torch.cat(output_list, dim=0) return results
Forward pass of the GraniteMoeHybridParallelExperts module. Args: inputs (Tensor): Input tensor. expert_size: Expert size information. Returns: Tensor: Output tensor.
github-repos