code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def convert_pil_frames_to_video(videos: List[VideoInput]) -> List[Union['np.ndarray', 'torch.Tensor']]: if not isinstance(videos[0], (list, tuple)): return videos video_converted = [] for video in videos: video = [np.array(frame) for frame in video] video = np.stack(video) video_converted.append(video) return video_converted
Given a batch of videos, converts each video to a 4D array. If video is already in array type, it is simply returned. We assume that all inputs in the list are in the same format, based on the type of the first element. Args: videos (`VideoInput`): Video inputs to turn into a list of videos.
github-repos
def get_string(self, byte_count=_MAX_INT): return self.fdp.ConsumeString(byte_count)
Consume a string with given constraints based on a consumed bool. Args: byte_count: Byte count that defaults to _MAX_INT. Returns: Consumed string based on input bytes and constraints.
github-repos
def resolve(self, reference_path_or_paths: Optional[Union[str, List[str]]]=None) -> Union[Tuple[symbolic.Symbolic, utils.KeyPath], List[Tuple[symbolic.Symbolic, utils.KeyPath]]]: single_input = False if reference_path_or_paths is None: reference_paths = self.reference_paths elif isinstance(reference_path_or_paths, str): reference_paths = [utils.KeyPath.parse(reference_path_or_paths)] single_input = True elif isinstance(reference_path_or_paths, utils.KeyPath): reference_paths = [reference_path_or_paths] single_input = True elif isinstance(reference_path_or_paths, list): paths = [] for path in reference_path_or_paths: if isinstance(path, str): path = utils.KeyPath.parse(path) elif not isinstance(path, utils.KeyPath): raise ValueError("Argument 'reference_path_or_paths' must be None, a string, KeyPath object, a list of strings, or a list of KeyPath objects.") paths.append(path) reference_paths = paths else: raise ValueError("Argument 'reference_path_or_paths' must be None, a string, KeyPath object, a list of strings, or a list of KeyPath objects.") resolved_paths = [] for reference_path in reference_paths: parent = self.sym_parent while parent is not None and (not reference_path.exists(parent)): parent = getattr(parent, 'sym_parent', None) if parent is None: raise ValueError(f"Cannot resolve '{reference_path}': parent not found.") resolved_paths.append((parent, parent.sym_path + reference_path)) return resolved_paths if not single_input else resolved_paths[0]
Resolve reference paths based on the location of this node. Args: reference_path_or_paths: (Optional) a string or KeyPath as a reference path or a list of strings or KeyPath objects as a list of reference paths. If this argument is not provided, prebound reference paths of this object will be used. Returns: A tuple (or list of tuple) of (resolved parent, resolved full path)
github-repos
def _populate_quantization_component_spec(quant_method: _QuantizationMethod) -> None: updated_component_spec = dict() if quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8 or quant_method.preset_method == _PresetMethod.METHOD_DYNAMIC_RANGE_INT8: updated_component_spec[_QuantizationComponent.COMPONENT_ACTIVATION] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_ACTIVATION, tensor_type=_TensorType.TENSORTYPE_INT_8) updated_component_spec[_QuantizationComponent.COMPONENT_WEIGHT] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_WEIGHT, tensor_type=_TensorType.TENSORTYPE_INT_8) updated_component_spec[_QuantizationComponent.COMPONENT_BIAS] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_BIAS, tensor_type=_TensorType.TENSORTYPE_INT_32) elif quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8: updated_component_spec[_QuantizationComponent.COMPONENT_WEIGHT] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_WEIGHT, tensor_type=_TensorType.TENSORTYPE_INT_8) if quant_method.quantization_component_specs: for component_spec in quant_method.quantization_component_specs: if component_spec.quantization_component in [_QuantizationComponent.COMPONENT_WEIGHT, _QuantizationComponent.COMPONENT_ACTIVATION]: if component_spec.tensor_type != _TensorType.TENSORTYPE_INT_8: raise ValueError('Only int8 precision is supported for input operands.') elif component_spec.tensor_type != _TensorType.TENSORTYPE_INT_32: raise ValueError('Only int32 precision is supported for bias.') updated_component_spec[component_spec.quantization_component] = component_spec del quant_method.quantization_component_specs[:] quant_method.quantization_component_specs.extend(updated_component_spec.values()) if (quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8 or quant_method.preset_method == _PresetMethod.METHOD_DYNAMIC_RANGE_INT8) and len(quant_method.quantization_component_specs) != 3: raise ValueError('Only 3 components are needed for', quant_method) elif quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 and len(quant_method.quantization_component_specs) != 1: raise ValueError('At least one component spec needs to be specified.')
Populates default values for QuantizationComponentSpec. Args: quant_method: The quantization method to be updated.
github-repos
def render_template(template_name, info, out_path=None): env = Environment(loader=PackageLoader('iotile.build', 'config/templates'), trim_blocks=True, lstrip_blocks=True) template = env.get_template(template_name) result = template.render(info) if (out_path is not None): with open(out_path, 'wb') as outfile: outfile.write(result.encode('utf-8')) return result
Render a template using the variables in info. You can optionally render to a file by passing out_path. Args: template_name (str): The name of the template to load. This must be a file in config/templates inside this package out_path (str): An optional path of where to save the output file, otherwise it is just returned as a string. info (dict): A dictionary of variables passed into the template to perform substitutions. Returns: string: The rendered template data.
codesearchnet
def parse_device_list(device_list_str, key): clean_lines = new_str(device_list_str, 'utf-8').strip().split('\n') results = [] for line in clean_lines: tokens = line.strip().split('\t') if len(tokens) == 2 and tokens[1] == key: results.append(tokens[0]) return results
Parses a byte string representing a list of devices. The string is generated by calling either adb or fastboot. The tokens in each string is tab-separated. Args: device_list_str: Output of adb or fastboot. key: The token that signifies a device in device_list_str. Returns: A list of android device serial numbers.
juraj-google-style
def __init__(self, uploader, mode='wb'): self._uploader = uploader self.mode = mode self._position = 0
Initializes the stream. Args: uploader: (Uploader) Filesystem dependent implementation. mode: (string) Python mode attribute for this stream.
github-repos
class SampleTSPredictionOutput(ModelOutput): sequences: Optional[torch.FloatTensor] = None
Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`): Sampled values from the chosen distribution.
github-repos
def fts_match_all(self, fts, inv): return all([self.fts_match(fts, s) for s in inv])
Return `True` if all segments in `inv` matches the features in fts Args: fts (list): a collection of (value, feature) tuples inv (list): a collection of IPA segments represented as Unicode strings Returns: bool: `True` if all segments in `inv` matches the features in `fts`
juraj-google-style
def clean_for_storage(self, data): data = self.data_to_unicode(data) if isinstance(data, dict): for k in dict(data).keys(): if (k == '_id'): del data[k] continue if ('.' in k): new_k = k.replace('.', '_') data[new_k] = data[k] del data[k] k = new_k if isinstance(data[k], dict): data[k] = self.clean_for_storage(data[k]) elif isinstance(data[k], list): data[k] = [self.clean_for_storage(item) for item in data[k]] return data
Clean data in preparation for storage. Deletes items with key having a '.' or is '_id'. Also deletes those items whose value is a dictionary or a list. Args: data: Sample data dictionary to be cleaned. Returns: Cleaned data dictionary.
codesearchnet
def resume(self, email, master_token, state=None, sync=True): auth = APIAuth(self.OAUTH_SCOPES) ret = auth.load(email, master_token, android_id=get_mac()) if ret: self.load(auth, state, sync) return ret
Authenticate to Google with the provided master token & sync. Args: email (str): The account to use. master_token (str): The master token. state (dict): Serialized state to load. Raises: LoginException: If there was a problem logging in.
codesearchnet
def merge(tup): if not all(tuple(ts.shape[1:] == tup[0].shape[1:] for ts in tup[1:])): raise ValueError('Timeseries to merge must have compatible shapes') indices = np.vstack(tuple(ts.tspan for ts in tup)).argsort() return np.vstack((tup))[indices]
Merge several timeseries Arguments: tup: sequence of Timeseries, with the same shape except for axis 0 Returns: Resulting merged timeseries which can have duplicate time points.
juraj-google-style
def softsign(x): return ops.softsign(x)
Softsign activation function. Softsign is defined as: `softsign(x) = x / (abs(x) + 1)`. Args: x: Input tensor.
github-repos
def token_request(self, authorization_code): if (not self._client.token_endpoint): return None request = {'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': self._redirect_uri} logger.debug('making token request: %s', request) client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic') auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method, request) resp = self._provider_configuration.requests_session.post(self._client.token_endpoint, data=request, headers=auth_header).json() logger.debug('received token response: %s', json.dumps(resp)) if ('error' in resp): token_resp = TokenErrorResponse(**resp) else: token_resp = AccessTokenResponse(**resp) token_resp.verify(keyjar=self._client.keyjar) if ('id_token' in resp): token_resp['id_token_jwt'] = resp['id_token'] return token_resp
Makes a token request. If the 'token_endpoint' is not configured in the provider metadata, no request will be made. Args: authorization_code (str): authorization code issued to client after user authorization Returns: Union[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token request was performed.
codesearchnet
def instantiate(config): for handle, cfg in list(config["apps"].items()): if not cfg.get("enabled", True): continue app = get_application(handle) instances[app.handle] = app(cfg)
instantiate all registered vodka applications Args: config (dict or MungeConfig): configuration object
juraj-google-style
class MeanSquaredLogarithmicError(MeanMetricWrapper): def __init__(self, name='mean_squared_logarithmic_error', dtype=None): super(MeanSquaredLogarithmicError, self).__init__(mean_squared_logarithmic_error, name, dtype=dtype)
Computes the mean squared logarithmic error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanSquaredLogarithmicError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.12011322 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.24022643 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()]) ```
github-repos
def extract_lookups(value): lookups = set() if isinstance(value, basestring): lookups = lookups.union(extract_lookups_from_string(value)) elif isinstance(value, list): for v in value: lookups = lookups.union(extract_lookups(v)) elif isinstance(value, dict): for v in value.values(): lookups = lookups.union(extract_lookups(v)) return lookups
Recursively extracts any stack lookups within the data structure. Args: value (one of str, list, dict): a structure that contains lookups to output values Returns: list: list of lookups if any
codesearchnet
def get_identifier(identifier, module_globals, module_name): if isinstance(identifier, six.string_types): fn = module_globals.get(identifier) if fn is None: raise ValueError('Unknown {}: {}'.format(module_name, identifier)) return fn elif callable(identifier): return identifier else: raise ValueError('Could not interpret identifier')
Helper utility to retrieve the callable function associated with a string identifier. Args: identifier: The identifier. Could be a string or function. module_globals: The global objects of the module. module_name: The module name Returns: The callable associated with the identifier.
juraj-google-style
def value_matrix(self): if self.__value_matrix: return self.__value_matrix self.__value_matrix = [[value_dp.data for value_dp in value_dp_list] for value_dp_list in self.value_dp_matrix] return self.__value_matrix
Converted rows of tabular data. Returns: |list| or |tuple|: Table rows.
codesearchnet
def _is_test_class(obj): return tf_inspect.isclass(obj) and 'TestCase' in (p.__name__ for p in tf_inspect.getmro(obj))
Check if arbitrary object is a test class (not a test object!). Args: obj: An arbitrary object from within a module. Returns: True iff obj is a test class inheriting at some point from a module named "TestCase". This is because we write tests using different underlying test libraries.
github-repos
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None): t2t_env = rl_utils.setup_env(hparams, batch_size=hparams.real_batch_size, max_num_noops=hparams.max_num_noops) if (which_epoch_data is not None): if (which_epoch_data == 'last'): which_epoch_data = infer_last_epoch_num(data_dir) assert isinstance(which_epoch_data, int), '{}'.format(type(which_epoch_data)) t2t_env.start_new_epoch(which_epoch_data, data_dir) else: t2t_env.start_new_epoch((- 999)) return t2t_env
Load T2TGymEnv with data from one epoch. Args: hparams: hparams. data_dir: data directory. which_epoch_data: data from which epoch to load. Returns: env.
codesearchnet
def _parse(json_str: str, primitive_cls: Type[Date], *, default_timezone: str) -> Date: try: dt = datetime.datetime.strptime(json_str, '%Y') return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.YEAR, primitive_cls) except ValueError: pass try: dt = datetime.datetime.strptime(json_str, '%Y-%m') return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.MONTH, primitive_cls) except ValueError: pass try: dt = datetime.datetime.strptime(json_str, '%Y-%m-%d') return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.DAY, primitive_cls) except ValueError: pass raise fhir_errors.InvalidFhirError('Invalid Date.')
Parses the json_str into a Date FHIR primitive. Args: json_str: The raw JSON string to parse. primitive_cls: The FHIR primitive to parse into. default_timezone: The default timezone to use when parsing in the event that no timezone information is present. Returns: A FHIR primitive Date. Raises: fhir_errors.InvalidFhirError: In the event that no datetime format was able to properly parse the json_str.
github-repos
def get_flux_biases_from_cache(cur, chains, system_name, chain_strength, max_age=3600): select = '\n SELECT\n flux_bias\n FROM flux_bias_view WHERE\n chain_length = :chain_length AND\n nodes = :nodes AND\n chain_strength = :chain_strength AND\n system_name = :system_name AND\n insert_time >= :time_limit;\n ' encoded_data = {'chain_strength': _encode_real(chain_strength), 'system_name': system_name, 'time_limit': (datetime.datetime.now() + datetime.timedelta(seconds=(- max_age)))} flux_biases = {} for chain in chains: encoded_data['chain_length'] = len(chain) encoded_data['nodes'] = json.dumps(sorted(chain), separators=(',', ':')) row = cur.execute(select, encoded_data).fetchone() if (row is None): raise MissingFluxBias flux_bias = _decode_real(*row) if (flux_bias == 0): continue flux_biases.update({v: flux_bias for v in chain}) return flux_biases
Determine the flux biases for all of the the given chains, system and chain strength. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. chains (iterable): An iterable of chains. Each chain is a collection of nodes. Chains in embedding act as one node. system_name (str): The unique name of a system. chain_strength (float): The magnitude of the negative quadratic bias that induces the given chain in an Ising problem. max_age (int, optional, default=3600): The maximum age (in seconds) for the flux_bias offsets. Returns: dict: A dict where the keys are the nodes in the chains and the values are the flux biases.
codesearchnet
def _with_tensor_ranks_only(self) -> 'TypeSpec': def relax(value): if isinstance(value, TypeSpec): return value._with_tensor_ranks_only() elif isinstance(value, tensor_shape.TensorShape) and value.rank is not None: return tensor_shape.TensorShape([None] * value.rank) else: return value return self._deserialize(nest.map_structure(relax, self._serialize()))
Returns a TypeSpec compatible with `self`, with tensor shapes relaxed. Returns: A `TypeSpec` that is compatible with `self`, where any `TensorShape` information has been relaxed to include only tensor rank (and not the dimension sizes for individual axes).
github-repos
def delete_pipeline(self, pipeline_key): if pipeline_key: uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key ]) return self._req('delete', uri) else: return requests.codes.bad_request, None
Deletes the pipeline specified by the key Args: returns (status code for the DELETE request, success message dict) expect (200 , {'success': 'true'}) for successful execution}
juraj-google-style
def read(self, size=None): if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if self._file_data is None or self._current_offset >= self._size: return b'' if size is None: size = self._size if self._current_offset + size > self._size: size = self._size - self._current_offset start_offset = self._current_offset self._current_offset += size return self._file_data[start_offset:self._current_offset]
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
juraj-google-style
def operate(self, point): affine_point = np.array([point[0], point[1], point[2], 1]) return np.dot(self.affine_matrix, affine_point)[0:3]
Apply the operation on a point. Args: point: Cartesian coordinate. Returns: Coordinates of point after operation.
codesearchnet
def read_string(self, key, embedded=True): data = None if (key is not None): key_type = self.variable_type(key) data = self.db.read(key.strip()) if (data is not None): try: data = json.loads(data) if embedded: data = self.read_embedded(data, key_type) if (data is not None): data = u'{}'.format(data) except ValueError as e: err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e) self.tcex.log.error(err) else: self.tcex.log.warning(u'The key field was None.') return data
Read method of CRUD operation for string data. Args: key (string): The variable to read from the DB. embedded (boolean): Resolve embedded variables. Returns: (string): Results retrieved from DB.
codesearchnet
def unicode_convert(obj): try: if isinstance(obj, dict): return {unicode_convert(key): unicode_convert(value) for key, value in obj.items()} elif isinstance(obj, list): return [unicode_convert(element) for element in obj] elif isinstance(obj, str): return obj elif isinstance(obj, six.text_type): return obj.encode('utf-8') elif isinstance(obj, six.integer_types): return obj else: return obj except: return obj
Converts unicode objects to anscii. Args: obj (object): The object to convert. Returns: The object converted to anscii, if possible. For ``dict`` and ``list``, the object type is maintained.
juraj-google-style
def extend(self, other): orig_num_lines = self.num_lines() self._lines.extend(other.lines) for line_index in other.font_attr_segs: self._font_attr_segs[orig_num_lines + line_index] = other.font_attr_segs[line_index] for key in other.annotations: if isinstance(key, int): self._annotations[orig_num_lines + key] = other.annotations[key] else: self._annotations[key] = other.annotations[key]
Extend this instance of RichTextLines with another instance. The extension takes effect on the text lines, the font attribute segments, as well as the annotations. The line indices in the font attribute segments and the annotations are adjusted to account for the existing lines. If there are duplicate, non-line-index fields in the annotations, the value from the input argument "other" will override that in this instance. Args: other: (RichTextLines) The other RichTextLines instance to be appended at the end of this instance.
github-repos
def inverse(self): inverse_circ = self.copy(name=(self.name + '_dg')) inverse_circ.data = [] for (inst, qargs, cargs) in reversed(self.data): inverse_circ.data.append((inst.inverse(), qargs, cargs)) return inverse_circ
Invert this circuit. This is done by recursively inverting all gates. Returns: QuantumCircuit: the inverted circuit Raises: QiskitError: if the circuit cannot be inverted.
codesearchnet
def run_schedule(inputs: Dict[EventSetNode, EventSet], schedule: Schedule, verbose: int, check_execution: bool, force_garbage_collector_interval: Optional[float]=10) -> Dict[EventSetNode, EventSet]: data = {**inputs} gc_begin_time = time.time() num_steps = len(schedule.steps) for step_idx, step in enumerate(schedule.steps): operator_def = step.op.definition implementation_cls = implementation_lib.get_implementation_class(operator_def.key) implementation = implementation_cls(step.op) if verbose == 1: print(f' {step_idx + 1} / {num_steps}: {step.op.operator_key()}', file=sys.stderr, end='', flush=True) elif verbose >= 2: print('=============================', file=sys.stderr) print(f'{step_idx + 1} / {num_steps}: Run {step.op}', file=sys.stderr, flush=True) operator_inputs = {input_key: data[input_node] for input_key, input_node in step.op.inputs.items()} if verbose >= 2: print(f'Inputs:\n{operator_inputs}\n', file=sys.stderr, flush=True) begin_time = time.perf_counter() if check_execution: operator_outputs = implementation.call(**operator_inputs) else: operator_outputs = implementation(**operator_inputs) end_time = time.perf_counter() if verbose == 1: print(f' [{end_time - begin_time:.5f} s]', file=sys.stderr, flush=True) elif verbose >= 2: print(f'Outputs:\n{operator_outputs}\n', file=sys.stderr) print(f'Duration: {end_time - begin_time} s', file=sys.stderr, flush=True) for output_key, output_node in step.op.outputs.items(): output_evset = operator_outputs[output_key] output_evset._internal_node = output_node data[output_node] = output_evset for node in step.released_nodes: assert node in data del data[node] if force_garbage_collector_interval is not None and time.time() - gc_begin_time >= force_garbage_collector_interval: begin_gc = time.time() if verbose >= 2: print('Garbage collection', file=sys.stderr, flush=True, end='') gc.collect() gc_begin_time = time.time() if verbose >= 2: print(f' [{gc_begin_time - begin_gc:.5f} s]', file=sys.stderr, flush=True) return data
Evaluates a schedule on a dictionary of input [`EventSets`][temporian.EventSet]. Args: inputs: Mapping of EventSetNodes to materialized EventSets. schedule: Sequence of operators to apply on the data. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. check_execution: If `True`, data of the intermediate results of the operators is checked against its expected structure and raises if it differs. force_garbage_collector_interval: If set, triggers the garbage collection every "force_garbage_collector_interval" seconds.
github-repos
def add_outputs(self, *args, **kwargs): if 'names' in kwargs: return [self._outputs.add(arg, name=name) for arg, name in zip(args, kwargs['names'])] else: return [self._outputs.add(arg) for arg in args]
Add a sequence of outputs to the function invocation. Args: *args: List of outputs to be converted (should be tf.Tensor). **kwargs: See Returns: Wrapped outputs (identity standins that have additional metadata). These are also tf.Tensor's.
github-repos
def add_ephemeral_listener(self, callback, event_type=None): listener_id = uuid4() self.ephemeral_listeners.append( { 'uid': listener_id, 'callback': callback, 'event_type': event_type } ) return listener_id
Add a callback handler for ephemeral events going to this room. Args: callback (func(room, event)): Callback called when an ephemeral event arrives. event_type (str): The event_type to filter for. Returns: uuid.UUID: Unique id of the listener, can be used to identify the listener.
juraj-google-style
def find_element_by_class(self, class_, update=False) -> Elements: return self.find_element(by=By.CLASS, value=class_, update=update)
Finds an element by class. Args: class_: The class of the element to be found. update: If the interface has changed, this option should be True. Returns: The element if it was found. Raises: NoSuchElementException - If the element wasn't found. Usage: element = driver.find_element_by_class('foo')
codesearchnet
def _remove_files(files): logger.debug('Request for file removal (_remove_files()).') for fn in files: if os.path.exists(fn): logger.debug(("Removing '%s'." % fn)) os.remove(fn)
Remove all given files. Args: files (list): List of filenames, which will be removed.
codesearchnet
def outer_horizontal_border_top(self): return u'{lm}{lv}{hz}{rv}'.format(lm=(' ' * self.margins.left), lv=self.border_style.top_left_corner, rv=self.border_style.top_right_corner, hz=self.outer_horizontals())
The complete outer top horizontal border section, including left and right margins. Returns: str: The top menu border.
codesearchnet
def _inchi_labels(mol): obconv = ob.OBConversion() obconv.SetOutFormat(str('inchi')) obconv.AddOption(str('a'), ob.OBConversion.OUTOPTIONS) obconv.AddOption(str('X'), ob.OBConversion.OUTOPTIONS, str('DoNotAddH')) inchi_text = obconv.WriteString(mol) match = re.search('InChI=(?P<inchi>.+)\\nAuxInfo=.+/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9,;\\(\\)]*)/)?', inchi_text) inchi = match.group('inchi') label_text = match.group('labels') eq_atom_text = match.group('eq_atoms') heavy_atom_labels = tuple([int(i) for i in label_text.replace(';', ',').split(',')]) eq_atoms = [] if (eq_atom_text is not None): eq_tokens = re.findall('\\(((?:[0-9]+,)+[0-9]+)\\)', eq_atom_text.replace(';', ',')) eq_atoms = tuple([tuple([int(i) for i in t.split(',')]) for t in eq_tokens]) return (heavy_atom_labels, eq_atoms, inchi)
Get the inchi canonical labels of the heavy atoms in the molecule Args: mol: The molecule. OpenBabel OBMol object Returns: The label mappings. List of tuple of canonical label, original label List of equivalent atoms.
codesearchnet
def __init__(self, rfc2579_date_time_tuple=None): super(RFC2579DateTime, self).__init__() self._number_of_seconds = None self._precision = definitions.PRECISION_100_MILLISECONDS self.day_of_month = None self.hours = None self.deciseconds = None self.minutes = None self.month = None self.seconds = None self.year = None if rfc2579_date_time_tuple: if len(rfc2579_date_time_tuple) < 10: raise ValueError( 'Invalid RFC2579 date-time tuple 10 elements required.') if rfc2579_date_time_tuple[0] < 0 or rfc2579_date_time_tuple[0] > 65536: raise ValueError('Year value out of bounds.') if rfc2579_date_time_tuple[1] not in range(1, 13): raise ValueError('Month value out of bounds.') days_per_month = self._GetDaysPerMonth( rfc2579_date_time_tuple[0], rfc2579_date_time_tuple[1]) if (rfc2579_date_time_tuple[2] < 1 or rfc2579_date_time_tuple[2] > days_per_month): raise ValueError('Day of month value out of bounds.') if rfc2579_date_time_tuple[3] not in range(0, 24): raise ValueError('Hours value out of bounds.') if rfc2579_date_time_tuple[4] not in range(0, 60): raise ValueError('Minutes value out of bounds.') if rfc2579_date_time_tuple[5] not in range(0, 60): raise ValueError('Seconds value out of bounds.') if rfc2579_date_time_tuple[6] < 0 or rfc2579_date_time_tuple[6] > 9: raise ValueError('Deciseconds value out of bounds.') if rfc2579_date_time_tuple[7] not in ('+', '-'): raise ValueError('Direction from UTC value out of bounds.') if rfc2579_date_time_tuple[8] not in range(0, 14): raise ValueError('Hours from UTC value out of bounds.') if rfc2579_date_time_tuple[9] not in range(0, 60): raise ValueError('Minutes from UTC value out of bounds.') time_zone_offset = ( (rfc2579_date_time_tuple[8] * 60) + rfc2579_date_time_tuple[9]) if rfc2579_date_time_tuple[7] != '-': time_zone_offset = -time_zone_offset self.year, self.month, self.day_of_month, self.hours, self.minutes = ( self._AdjustForTimeZoneOffset( rfc2579_date_time_tuple[0], rfc2579_date_time_tuple[1], rfc2579_date_time_tuple[2], rfc2579_date_time_tuple[3], rfc2579_date_time_tuple[4], time_zone_offset)) self.deciseconds = rfc2579_date_time_tuple[6] self.seconds = rfc2579_date_time_tuple[5] self._number_of_seconds = self._GetNumberOfSecondsFromElements( self.year, self.month, self.day_of_month, self.hours, self.minutes, self.seconds)
Initializes a RFC2579 date-time. Args: rfc2579_date_time_tuple: (Optional[tuple[int, int, int, int, int, int, int]]): RFC2579 date-time time, contains year, month, day of month, hours, minutes, seconds and deciseconds. Raises: ValueError: if the system time is invalid.
juraj-google-style
def convert_fields(fields, field_values): _convert_fields(fields, field_values, context=_ConversionContext.VALUE)
Type-checks and converts each field in `field_values` (in place). Args: fields: A list of `ExtensionTypeField` objects. field_values: A `dict` mapping field names to values. Must contain an entry for each field. I.e., `set(field_values.keys())` must be equal to `set([f.name for f in fields])`. Raises: ValueError: If the keys of `field_values` do not match the names of the fields in `fields`. TypeError: If any value in `field_values` does not have the type indicated by the corresponding `ExtensionTypeField` object.
github-repos
def _get_credentials(vcap_services, service_name=None): service_name = (service_name or os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None)) services = vcap_services['streaming-analytics'] creds = None for service in services: if (service['name'] == service_name): creds = service['credentials'] break if (creds is None): raise ValueError((('Streaming Analytics service ' + str(service_name)) + ' was not found in VCAP_SERVICES')) return creds
Retrieves the credentials of the VCAP Service of the specified `service_name`. If `service_name` is not specified, it takes the information from STREAMING_ANALYTICS_SERVICE_NAME environment variable. Args: vcap_services (dict): A dict representation of the VCAP Services information. service_name (str): One of the service name stored in `vcap_services` Returns: dict: A dict representation of the credentials. Raises: ValueError: Cannot find `service_name` in `vcap_services`
codesearchnet
def build_list(self, title=None, items=None): list_card = _ListSelector(self._speech, display_text=self._display_text, title=title, items=items) return list_card
Presents the user with a vertical list of multiple items. Allows the user to select a single item. Selection generates a user query containing the title of the list item *Note* Returns a completely new object, and does not modify the existing response object Therefore, to add items, must be assigned to new variable or call the method directly after initializing list example usage: simple = ask('I speak this text') mylist = simple.build_list('List Title') mylist.add_item('Item1', 'key1') mylist.add_item('Item2', 'key2') return mylist Arguments: title {str} -- Title displayed at top of list card Returns: _ListSelector -- [_Response object exposing the add_item method]
codesearchnet
def edit_distance_1(self, word): word = word.lower() if self._check_if_should_check(word) is False: return {word} letters = self._word_frequency.letters splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts)
Compute all strings that are one edit away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance one from the \ provided word
juraj-google-style
def DEFINE_alias(name, original_name, flag_values=FLAGS, module_name=None): if (original_name not in flag_values): raise UnrecognizedFlagError(original_name) flag = flag_values[original_name] class _Parser(ArgumentParser): 'The parser for the alias flag calls the original flag parser.' def parse(self, argument): flag.parse(argument) return flag.value class _FlagAlias(Flag): 'Overrides Flag class so alias value is copy of original flag value.' @property def value(self): return flag.value @value.setter def value(self, value): flag.value = value help_msg = ('Alias for --%s.' % flag.name) DEFINE_flag(_FlagAlias(_Parser(), flag.serializer, name, flag.default, help_msg, boolean=flag.boolean), flag_values, module_name)
Defines an alias flag for an existing one. Args: name: A string, name of the alias flag. original_name: A string, name of the original flag. flag_values: FlagValues object with which the flag will be registered. module_name: A string, the name of the module that defines this flag. Raises: gflags.FlagError: UnrecognizedFlagError: if the referenced flag doesn't exist. DuplicateFlagError: if the alias name has been used by some existing flag.
codesearchnet
def __densify_border(self): if isinstance(self._input_geom, MultiPolygon): polygons = [polygon for polygon in self._input_geom] else: polygons = [self._input_geom] points = [] for polygon in polygons: if (len(polygon.interiors) == 0): exterior = LineString(polygon.exterior) points += self.__fixed_interpolation(exterior) else: exterior = LineString(polygon.exterior) points += self.__fixed_interpolation(exterior) for j in range(len(polygon.interiors)): interior = LineString(polygon.interiors[j]) points += self.__fixed_interpolation(interior) return points
Densify the border of a polygon. The border is densified by a given factor (by default: 0.5). The complexity of the polygon's geometry is evaluated in order to densify the borders of its interior rings as well. Returns: list: a list of points where each point is represented by a list of its reduced coordinates Example: [[X1, Y1], [X2, Y2], ..., [Xn, Yn]
codesearchnet
def _FormatOtherFileToken(self, token_data): timestamp = token_data.microseconds + ( token_data.timestamp * definitions.MICROSECONDS_PER_SECOND) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) date_time_string = date_time.CopyToDateTimeString() return { 'string': token_data.name.rstrip('\x00'), 'timestamp': date_time_string}
Formats an other file token as a dictionary of values. Args: token_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data. Returns: dict[str, str]: token values.
juraj-google-style
def get_examples(self, compact=False): examples = copy.deepcopy(self._examples) if not compact: return examples def make_compact(d): if not isinstance(d, dict): return for key in d: if isinstance(d[key], dict): inner_d = d[key] if len(inner_d) == 1 and '.tag' in inner_d: d[key] = inner_d['.tag'] else: make_compact(inner_d) if isinstance(d[key], list): for item in d[key]: make_compact(item) for example in examples.values(): if (isinstance(example.value, dict) and len(example.value) == 1 and '.tag' in example.value): example.value = example.value['.tag'] else: make_compact(example.value) return examples
Returns an OrderedDict mapping labels to Example objects. Args: compact (bool): If True, union members of void type are converted to their compact representation: no ".tag" key or containing dict, just the tag as a string.
juraj-google-style
def wait_all(jobs, timeout=None): return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED)
Return when at all of the specified jobs have completed or timeout expires. Args: jobs: a Job or list of Jobs to wait on. timeout: a timeout in seconds to wait for. None (the default) means no timeout. Returns: A list of the jobs that have now completed or None if there were no jobs.
codesearchnet
def create(self): input_params = {'type': self.type, 'data': self.data, 'name': self.name, 'priority': self.priority, 'port': self.port, 'ttl': self.ttl, 'weight': self.weight, 'flags': self.flags, 'tags': self.tags} data = self.get_data(('domains/%s/records' % self.domain), type=POST, params=input_params) if data: self.id = data['domain_record']['id']
Creates a new record for a domain. Args: type (str): The type of the DNS record (e.g. A, CNAME, TXT). name (str): The host name, alias, or service being defined by the record. data (int): Variable data depending on record type. priority (int): The priority for SRV and MX records. port (int): The port for SRV records. ttl (int): The time to live for the record, in seconds. weight (int): The weight for SRV records. flags (int): An unsigned integer between 0-255 used for CAA records. tags (string): The parameter tag for CAA records. Valid values are "issue", "wildissue", or "iodef"
codesearchnet
def mark_all_as_done(self, **kwargs): result = self.gitlab.http_post('/todos/mark_as_done', **kwargs) try: return int(result) except ValueError: return 0
Mark all the todos as done. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTodoError: If the server failed to perform the request Returns: int: The number of todos maked done
juraj-google-style
def get_group_key(self, devices): with self._lock: devices_key = ','.join(devices) if devices_key not in self._known_groups: self._known_groups[devices_key] = self._get_new_group_key(devices) return self._known_groups[devices_key]
Returns a group key for the list of local devices. The same group key is returned if the list of local devices is the same. Args: devices: a list of local canonical device strings in a collective group. Returns: a group key.
github-repos
def array_to_base64_png(array): array = np.array(array, dtype=np.float32) if len(array.shape) != 2: raise ValueError( "Expected rank-2 array; received rank-%d array." % len(array.shape)) if not np.size(array): raise ValueError( "Cannot encode an empty array (size: %s) as image." % (array.shape,)) is_infinity = np.isinf(array) is_positive = array > 0.0 is_positive_infinity = np.logical_and(is_infinity, is_positive) is_negative_infinity = np.logical_and(is_infinity, np.logical_not(is_positive)) is_nan = np.isnan(array) finite_indices = np.where(np.logical_and(np.logical_not(is_infinity), np.logical_not(is_nan))) if np.size(finite_indices): minval = np.min(array[finite_indices]) maxval = np.max(array[finite_indices]) scaled = np.array((array - minval) / (maxval - minval) * 255, dtype=np.uint8) rgb = np.repeat(np.expand_dims(scaled, -1), IMAGE_COLOR_CHANNELS, axis=-1) else: rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8) rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB rgb[is_nan] = NAN_RGB image_encoded = base64.b64encode(encoder.encode_png(rgb)) return image_encoded
Convert an array into base64-enoded PNG image. Args: array: A 2D np.ndarray or nested list of items. Returns: A base64-encoded string the image. The image is grayscale if the array is 2D. The image is RGB color if the image is 3D with lsat dimension equal to 3. Raises: ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is empty.
juraj-google-style
def act_on_cloned_repo(self, path: Union[str, pathlib.Path], api) -> Optional[HookResult]:
Do something with a cloned repo. Args: path: Path to the repo. api: An instance of :py:class:`repobee.github_api.GitHubAPI`. Returns: optionally returns a HookResult namedtuple for reporting the outcome of the hook. May also return None, in which case no reporting will be performed for the hook.
juraj-google-style
def plot_efficiency(self, key='wall_time', what='good+bad', nmax=5, ax=None, **kwargs): (ax, fig, plt) = get_ax_fig_plt(ax=ax) lw = kwargs.pop('linewidth', 2.0) msize = kwargs.pop('markersize', 10) what = what.split('+') timers = self.timers() peff = self.pefficiency() n = len(timers) xx = np.arange(n) ax.set_prop_cycle(color=['g', 'b', 'c', 'm', 'y', 'k']) (lines, legend_entries) = ([], []) if ('good' in what): good = peff.good_sections(key=key, nmax=nmax) for g in good: yy = peff[g][key] (line,) = ax.plot(xx, yy, '-->', linewidth=lw, markersize=msize) lines.append(line) legend_entries.append(g) if ('bad' in what): bad = peff.bad_sections(key=key, nmax=nmax) for b in bad: yy = peff[b][key] (line,) = ax.plot(xx, yy, '-.<', linewidth=lw, markersize=msize) lines.append(line) legend_entries.append(b) if ('total' not in legend_entries): yy = peff['total'][key] (total_line,) = ax.plot(xx, yy, 'r', linewidth=lw, markersize=msize) lines.append(total_line) legend_entries.append('total') ax.legend(lines, legend_entries, loc='best', shadow=True) ax.set_xlabel('Total_NCPUs') ax.set_ylabel('Efficiency') ax.grid(True) labels = [('MPI=%d, OMP=%d' % (t.mpi_nprocs, t.omp_nthreads)) for t in timers] ax.set_xticks(xx) ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15) return fig
Plot the parallel efficiency Args: key: Parallel efficiency is computed using the wall_time. what: Specifies what to plot: `good` for sections with good parallel efficiency. `bad` for sections with bad efficiency. Options can be concatenated with `+`. nmax: Maximum number of entries in plot ax: matplotlib :class:`Axes` or None if a new figure should be created. ================ ==================================================== kwargs Meaning ================ ==================================================== linewidth matplotlib linewidth. Default: 2.0 markersize matplotlib markersize. Default: 10 ================ ==================================================== Returns: `matplotlib` figure
codesearchnet
def disable_plugin(self, name): url = self._url('/plugins/{0}/disable', name) res = self._post(url) self._raise_for_status(res) return True
Disable an installed plugin. Args: name (string): The name of the plugin. The ``:latest`` tag is optional, and is the default if omitted. Returns: ``True`` if successful
codesearchnet
def add_vcenter(self, **kwargs): config = ET.Element('config') vcenter = ET.SubElement(config, 'vcenter', xmlns='urn:brocade.com:mgmt:brocade-vswitch') id = ET.SubElement(vcenter, 'id') id.text = kwargs.pop('id') credentials = ET.SubElement(vcenter, 'credentials') url = ET.SubElement(credentials, 'url') url.text = kwargs.pop('url') username = ET.SubElement(credentials, 'username') username.text = kwargs.pop('username') password = ET.SubElement(credentials, 'password') password.text = kwargs.pop('password') try: self._callback(config) return True except Exception as error: logging.error(error) return False
Add vCenter on the switch Args: id(str) : Name of an established vCenter url (bool) : vCenter URL username (str): Username of the vCenter password (str): Password of the vCenter callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
codesearchnet
def _to_tensor(self, array, min_dim, return_tensors): if return_tensors == 'pt': array = torch.from_numpy(array) return array.unsqueeze(1) if array.ndim < min_dim else array return array
Convert numpy array to tensor and ensure proper dimensionality. Args: array: The numpy array to convert min_dim: The minimum number of dimensions the result should have return_tensors: The type of tensors to return (e.g., "pt" for PyTorch tensors) Returns: The converted array or tensor with proper dimensions
github-repos
def modify_model_backprop(model, backprop_modifier): modified_model = _MODIFIED_MODEL_CACHE.get((model, backprop_modifier)) if (modified_model is not None): return modified_model model_path = os.path.join(tempfile.gettempdir(), (next(tempfile._get_candidate_names()) + '.h5')) try: model.save(model_path) modifier_fn = _BACKPROP_MODIFIERS.get(backprop_modifier) if (modifier_fn is None): raise ValueError("'{}' modifier is not supported".format(backprop_modifier)) modifier_fn(backprop_modifier) with tf.get_default_graph().gradient_override_map({'Relu': backprop_modifier}): modified_model = load_model(model_path) _MODIFIED_MODEL_CACHE[(model, backprop_modifier)] = modified_model return modified_model finally: os.remove(model_path)
Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior. Args: model: The `keras.models.Model` instance. backprop_modifier: One of `{'guided', 'rectified'}` Returns: A copy of model with modified activations for backwards pass.
codesearchnet
def remove(path): if os.path.isdir(path): return __rmtree(path) else: return __rmfile(path)
Delete a file or directory. Args: path (str): Path to the file or directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise.
codesearchnet
def _create_stage_submission_env_dependencies(temp_dir): try: local_dependency_file_path = os.path.join(temp_dir, SUBMISSION_ENV_DEPENDENCIES_FILE) dependencies = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']) local_python_path = f'Python Path: {sys.executable}\n' with open(local_dependency_file_path, 'w') as f: f.write(local_python_path + str(dependencies)) return [Stager._create_file_stage_to_artifact(local_dependency_file_path, SUBMISSION_ENV_DEPENDENCIES_FILE)] except Exception as e: _LOGGER.warning("Couldn't stage a list of installed dependencies in submission environment. Got exception: %s", e) return []
Create and stage a file with list of dependencies installed in the submission environment. This list can be used at runtime to compare against the dependencies in the runtime environment. This allows runners to warn users about any potential dependency mismatches and help debug issues related to environment mismatches. Args: temp_dir: path to temporary location where the file should be downloaded. Returns: A list of ArtifactInformation of local file path that will be staged to the staging location.
github-repos
def __init__(self, channel): self.ReportErrorEvent = channel.unary_unary( "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_report__errors__service__pb2.ReportErrorEventRequest.SerializeToString, response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_report__errors__service__pb2.ReportErrorEventResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def get_sleep_timer(self): resp = self.avTransport.GetRemainingSleepTimerDuration([('InstanceID', 0)]) if resp['RemainingSleepTimerDuration']: times = resp['RemainingSleepTimerDuration'].split(':') return (((int(times[0]) * 3600) + (int(times[1]) * 60)) + int(times[2])) else: return None
Retrieves remaining sleep time, if any Returns: int or NoneType: Number of seconds left in timer. If there is no sleep timer currently set it will return None.
codesearchnet
def heightmap_has_land_on_border(hm: np.ndarray, waterlevel: float) -> bool: return bool( lib.TCOD_heightmap_has_land_on_border(_heightmap_cdata(hm), waterlevel) )
Returns True if the map edges are below ``waterlevel``, otherwise False. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. waterLevel (float): The water level to use. Returns: bool: True if the map edges are below ``waterlevel``, otherwise False.
juraj-google-style
def add_dr( self, dr ): this_bin = int( dr / self.dr ) if this_bin > self.number_of_bins: raise IndexError( 'dr is larger than rdf max_r' ) self.data[ this_bin ] += 1
Add an observed interatomic distance to the g(r) data at dr. Args: dr (Float): the interatomic distance, dr. Returns: None
juraj-google-style
def assert_child_key_has_value(self, parent, child, caller): assert parent, ("parent parameter must be specified.") assert child, ("child parameter must be specified.") self.assert_key_has_value(parent, caller) try: child_exists = child in self[parent] except TypeError as err: raise ContextError( f"context['{parent}'] must be iterable and contain '{child}' " f"for {caller}. {err}") from err if child_exists: if self[parent][child] is None: raise KeyInContextHasNoValueError( f"context['{parent}']['{child}'] must have a value for " f"{caller}.") else: raise KeyNotInContextError( f"context['{parent}']['{child}'] doesn't " f"exist. It must exist for {caller}.")
Assert that context contains key that has child which has a value. Args: parent: parent key child: validate this sub-key of parent exists AND isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None
juraj-google-style
def list_media_services(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API]) return do_get(endpoint, access_token)
List the media services in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body.
codesearchnet
def get_all_subclasses(asts): hierarchy = {} for ast in asts: hierarchy.update(ast.Visit(visitors.ExtractSuperClasses())) def filter_superclasses(superclasses): return [superclass for superclass in superclasses if is_complete(superclass)] hierarchy = {cls: filter_superclasses(superclasses) for cls, superclasses in hierarchy.items() if is_complete(cls)} return utils.invert_dict(hierarchy)
Compute a class->subclasses mapping. Args: asts: A list of ASTs. Returns: A dictionary, mapping instances of pytd.Type (types) to lists of pytd.Class (the derived classes).
github-repos
def format_tasks(tasks): return [('%d : %s (%s)' % (task.key.id(), task.description, ('done' if task.done else ('created %s' % task.created)))) for task in tasks]
Converts a list of tasks to a list of string representations. Args: tasks: A list of the tasks to convert. Returns: A list of string formatted tasks.
codesearchnet
def double_width(k, v, p): if isinstance(p, Conv2D) and k.key == 'filters': return 2 * v return v
A rebind rule for doubling the filters for Conv2D layers. Args: k: A `pg.KeyPath` object representing the location of current node. v: The value of current node. p: The parent of current node. Returns: The output value for current node.
github-repos
def __init__(self, site=None): super(FileFormatError, self).__init__() self.site = site
Initialise a new ``FileFormatError`` object. Args: site (str): Remote site name to display in error message
juraj-google-style
def call_for_each_replica(strategy, fn, args=None, kwargs=None): if args is None: args = () if kwargs is None: kwargs = {} if isinstance(fn, def_function.Function): if fn._jit_compile and all([_is_gpu_device(d) for d in strategy.extended.worker_devices]): return _call_for_each_replica(strategy, fn, args, kwargs) if strategy not in _cfer_fn_cache: _cfer_fn_cache[strategy] = weakref.WeakKeyDictionary() wrapped = _cfer_fn_cache[strategy].get(fn) if wrapped is None: def wrapped_fn(*args, **kwargs): return call_for_each_replica(strategy, fn.python_function, args, kwargs) wrapped = fn._clone(python_function=wrapped_fn) _cfer_fn_cache[strategy][fn] = wrapped return wrapped(*args, **kwargs) if context.executing_eagerly(): logging.log_first_n(logging.WARN, 'Using %s eagerly has significant overhead currently. We will be working on improving this in the future, but for now please wrap `call_for_each_replica` or `experimental_run` or `run` inside a tf.function to get the best performance.' % strategy.__class__.__name__, 5) else: fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx()) return _call_for_each_replica(strategy, fn, args, kwargs)
Call `fn` on each worker devices(replica). It's highly recommended to wrap the call to this function inside a `tf.function`, otherwise the performance is poor. Args: strategy: `tf.distribute.Strategy`. fn: function to call on each worker devices. args: positional arguments to `fn`. kwargs: keyword arguments to `fn`. Returns: Wrapped returned value of `fn` from all replicas.
github-repos
def smash(self): self._initialize_smash() try: stack_name = self._config.get('environment', {}).get('stack_name', None) response = self._cloudFormation.describe_stacks(StackName=stack_name) logging.debug('smash pre-flight returned: {}'.format(json.dumps(response, indent=4, default=json_util.default))) except ClientError as wtf: logging.warning('your stack is in another castle [0].') return False except Exception as wtf: logging.error('failed to find intial status of smash candidate: {}'.format(wtf)) return False response = self._cloudFormation.delete_stack(StackName=stack_name) logging.info('delete started for stack: {}'.format(stack_name)) logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4))) return self.poll_stack()
Smash the given stack Args: None Returns: True if True Todo: Figure out what could go wrong and take steps to hanlde problems.
codesearchnet
def select_charset(self, charset): charsets = {'USA':0, 'France':1, 'Germany':2, 'UK':3, 'Denmark':4, 'Sweden':5, 'Italy':6, 'Spain':7, 'Japan':8, 'Norway':9, 'Denmark II':10, 'Spain II':11, 'Latin America':12, 'South Korea':13, 'Legal':64, } if charset in charsets: self.send(chr(27)+'R'+chr(charsets[charset])) else: raise RuntimeError('Invalid charset.')
Select international character set and changes codes in code table accordingly Args: charset: String. The character set we want. Returns: None Raises: RuntimeError: Invalid charset.
juraj-google-style
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01): infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL) tf.logging.info('Loading examples') all_examples = [] for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')): if i % 100000 == 0: tf.logging.info('%d examples have been loaded....' % i) ex = {x: int(y) if y.isdigit() else y for x, y in d.items()} all_examples.append(ex) random.seed(1) random.shuffle(all_examples) n_train = int(len(all_examples) * prop_train) n_val = n_train + int(len(all_examples) * prop_val) train = all_examples[:n_train] val = all_examples[n_train:n_val] test = [] for e in all_examples[n_val:]: if e['n_intervening'] == e['n_diff_intervening']: test.append(e) return all_examples, train, val, test
Loads exampls from the tsv file. Args: tmp_dir: temp directory. prop_train: proportion of the train data prop_val: proportion of the validation data Returns: All examples in the dataset pluse train, test, and development splits.
juraj-google-style
def on_train_batch_begin(self, batch, logs=None): self.on_batch_begin(batch, logs=logs)
Called at the beginning of a training batch in `fit` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.train_step`. Typically, the values of the `Model`'s metrics are returned. Example: `{'loss': 0.2, 'accuracy': 0.7}`.
github-repos
def _sd_of_runs(stats, mean, key='runs'): num_runs = len(stats[key]) first = stats[key][0] standard_deviation = {} for stat_key in first: if isinstance(first[stat_key], numbers.Number): standard_deviation[stat_key] = math.sqrt((sum((((run[stat_key] - mean[stat_key]) ** 2) for run in stats[key])) / float(num_runs))) return standard_deviation
Obtain the standard deviation of stats. Args: stats: dict; A set of stats, structured as above. mean: dict; Mean for each key in stats. key: str; Optional key to determine where list of runs is found in stats
codesearchnet
def add(self, rid, data, raise_on_error=True): cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data} return self.ds.post(rid, cache_data, raise_on_error)
Write cache data to the data store. Args: rid (str): The record identifier. data (dict): The record data. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
juraj-google-style
def sparse_categorical_accuracy(y_true, y_pred): y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred) y_true = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_true) y_pred_rank = y_pred.shape.ndims y_true_rank = y_true.shape.ndims if y_true_rank is not None and y_pred_rank is not None and (len(backend.int_shape(y_true)) == len(backend.int_shape(y_pred))): y_true = array_ops.squeeze(y_true, [-1]) y_pred = math_ops.argmax(y_pred, axis=-1) if backend.dtype(y_pred) != backend.dtype(y_true): y_pred = math_ops.cast(y_pred, backend.dtype(y_true)) return math_ops.cast(math_ops.equal(y_true, y_pred), backend.floatx())
Calculates how often predictions match integer labels. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. Args: y_true: Integer ground truth values. y_pred: The prediction values. Returns: Sparse categorical accuracy values.
github-repos
def shutdown(cluster_info, queues=['input']): def _shutdown(iter): host = util.get_ip_address() executor_id = util.read_executor_id() mgr = _get_manager(cluster_info, host, executor_id) for node in cluster_info: if ((node['host'] == host) and (node['executor_id'] == executor_id)): tb_pid = node['tb_pid'] if (tb_pid != 0): logging.info('Stopping tensorboard (pid={0})'.format(tb_pid)) subprocess.Popen(['kill', str(tb_pid)]) logging.info('Stopping all queues') for q in queues: try: queue = mgr.get_queue(q) logging.info('Feeding None into {0} queue'.format(q)) queue.put(None, block=True) except (AttributeError, KeyError): msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(q) raise Exception(msg) logging.info("Setting mgr.state to 'stopped'") mgr.set('state', 'stopped') return [True] return _shutdown
Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues. Args: :cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc). :queues: *INTERNAL_USE* Returns: A nodeRDD.mapPartitions() function
codesearchnet
def get_unique_directives(ast): if (not ast.directives): return dict() result = dict() for directive_obj in ast.directives: directive_name = directive_obj.name.value if (directive_name in ALLOWED_DUPLICATED_DIRECTIVES): pass elif (directive_name in result): raise GraphQLCompilationError(u'Directive was unexpectedly applied twice in the same location: {} {}'.format(directive_name, ast.directives)) else: result[directive_name] = directive_obj return result
Return a dict of directive name to directive object for the given AST node. Any directives that are allowed to exist more than once on any AST node are ignored. For any directives that can only exist up to once, we verify that they are not duplicated raising GraphQLCompilationError in case we find them more than once on the AST node. Args: ast: GraphQL AST node, obtained from the graphql library Returns: dict of string to directive object
codesearchnet
def get_local_current_sample(ip): valid_ip_pat = re.compile( "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" ) if not valid_ip_pat.match(ip): raise ValueError("ip address invalid") url = "http: headers = { "Content-Type": "application/json" } r = requests.get(url, headers=headers) return r.json()
Gets current sample from *local* Neurio device IP address. This is a static method. It doesn't require a token to authenticate. Note, call get_user_information to determine local Neurio IP addresses. Args: ip (string): address of local Neurio device Returns: dictionary object containing current sample information
juraj-google-style
def _PrintTSKPartitionIdentifiersOverview(self, volume_system, volume_identifiers): header = 'The following partitions were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Offset (in bytes)', 'Size (in bytes)'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in sorted(volume_identifiers): volume = volume_system.GetVolumeByIdentifier(volume_identifier) if (not volume): raise errors.SourceScannerError('Partition missing for identifier: {0:s}.'.format(volume_identifier)) volume_extent = volume.extents[0] volume_offset = '{0:d} (0x{0:08x})'.format(volume_extent.offset) volume_size = self._FormatHumanReadableSize(volume_extent.size) table_view.AddRow([volume.identifier, volume_offset, volume_size]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
Prints an overview of TSK partition identifiers. Args: volume_system (dfvfs.TSKVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier.
codesearchnet
def model_variables(scope=None): return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)
Returns all variables in the MODEL_VARIABLES collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of local Variable objects.
github-repos
def add_stickiness(self): stickiness_dict = {} env = boto3.session.Session(profile_name=self.env, region_name=self.region) elbclient = env.client('elb') elb_settings = self.properties['elb'] for listener in elb_settings.get('ports'): if listener.get('stickiness'): sticky_type = listener['stickiness']['type'].lower() externalport = int(listener['loadbalancer'].split(':')[(- 1)]) policyname_tmp = '{0}-{1}-{2}-{3}' if (sticky_type == 'app'): cookiename = listener['stickiness']['cookie_name'] policy_key = cookiename.replace('.', '') policyname = policyname_tmp.format(self.app, sticky_type, externalport, policy_key) elbclient.create_app_cookie_stickiness_policy(LoadBalancerName=self.app, PolicyName=policyname, CookieName=cookiename) stickiness_dict[externalport] = policyname elif (sticky_type == 'elb'): cookie_ttl = listener['stickiness'].get('cookie_ttl', None) policyname = policyname_tmp.format(self.app, sticky_type, externalport, cookie_ttl) if cookie_ttl: elbclient.create_lb_cookie_stickiness_policy(LoadBalancerName=self.app, PolicyName=policyname, CookieExpirationPeriod=cookie_ttl) else: elbclient.create_lb_cookie_stickiness_policy(LoadBalancerName=self.app, PolicyName=policyname) stickiness_dict[externalport] = policyname return stickiness_dict
Adds stickiness policy to created ELB Returns: dict: A dict of stickiness policies and ports:: example: { 80: "$policy_name" }
codesearchnet
def check_and_update_resources(num_cpus, num_gpus, resources): if (resources is None): resources = {} resources = resources.copy() assert ('CPU' not in resources) assert ('GPU' not in resources) if (num_cpus is not None): resources['CPU'] = num_cpus if (num_gpus is not None): resources['GPU'] = num_gpus if ('CPU' not in resources): resources['CPU'] = multiprocessing.cpu_count() gpu_ids = ray.utils.get_cuda_visible_devices() if (('GPU' in resources) and (gpu_ids is not None) and (resources['GPU'] > len(gpu_ids))): raise Exception('Attempting to start raylet with {} GPUs, but CUDA_VISIBLE_DEVICES contains {}.'.format(resources['GPU'], gpu_ids)) if ('GPU' not in resources): resources['GPU'] = _autodetect_num_gpus() if (gpu_ids is not None): resources['GPU'] = min(resources['GPU'], len(gpu_ids)) resources = {resource_label: resource_quantity for (resource_label, resource_quantity) in resources.items() if (resource_quantity != 0)} for (_, resource_quantity) in resources.items(): assert (isinstance(resource_quantity, int) or isinstance(resource_quantity, float)) if (isinstance(resource_quantity, float) and (not resource_quantity.is_integer())): raise ValueError('Resource quantities must all be whole numbers. Received {}.'.format(resources)) if (resource_quantity < 0): raise ValueError('Resource quantities must be nonnegative. Received {}.'.format(resources)) if (resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY): raise ValueError('Resource quantities must be at most {}.'.format(ray_constants.MAX_RESOURCE_QUANTITY)) return resources
Sanity check a resource dictionary and add sensible defaults. Args: num_cpus: The number of CPUs. num_gpus: The number of GPUs. resources: A dictionary mapping resource names to resource quantities. Returns: A new resource dictionary.
codesearchnet
def _GetSignatureMatchParserNames(self, file_object): parser_names = [] scan_state = pysigscan.scan_state() self._file_scanner.scan_file_object(scan_state, file_object) for scan_result in iter(scan_state.scan_results): format_specification = self._formats_with_signatures.GetSpecificationBySignature(scan_result.identifier) if (format_specification.identifier not in parser_names): parser_names.append(format_specification.identifier) return parser_names
Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures.
codesearchnet
def _GetSignatureScanner(cls, specification_store): signature_scanner = pysigscan.scanner() signature_scanner.set_scan_buffer_size(cls._SCAN_BUFFER_SIZE) for format_specification in specification_store.specifications: for signature in format_specification.signatures: pattern_offset = signature.offset if pattern_offset is None: signature_flags = pysigscan.signature_flags.NO_OFFSET elif pattern_offset < 0: pattern_offset *= -1 signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START signature_scanner.add_signature( signature.identifier, pattern_offset, signature.pattern, signature_flags) return signature_scanner
Initializes a signature scanner based on a specification store. Args: specification_store (FormatSpecificationStore): specification store. Returns: pysigscan.scanner: signature scanner.
juraj-google-style
def Verify(self, written_keys): self.log.debug('verification starting on %r', self.temp_cache_filename) cache_data = self.GetMap(self.temp_cache_filename) map_entry_count = len(cache_data) self.log.debug('entry count: %d', map_entry_count) if map_entry_count <= 0: self.log.error('The files cache being verified "%r" is empty.', self.temp_cache_filename) raise error.EmptyMap(self.temp_cache_filename + ' is empty') cache_keys = set() try: while 1: entry = cache_data.PopItem() cache_keys.update(self._ExpectedKeysForEntry(entry)) except KeyError: pass missing_from_cache = written_keys - cache_keys if missing_from_cache: self.log.warning('verify failed: %d missing from the on-disk cache', len(missing_from_cache)) if len(missing_from_cache) < 1000: self.log.debug('keys missing from the on-disk cache: %r', missing_from_cache) else: self.log.debug('More than 1000 keys missing from cache. Not printing.') self._Rollback() return False missing_from_map = cache_keys - written_keys if missing_from_map: self.log.warning('verify failed: %d keys found, unexpected in the on-disk cache', len(missing_from_map)) if len(missing_from_map) < 1000: self.log.debug('keys missing from map: %r', missing_from_map) else: self.log.debug('More than 1000 keys missing from map. Not printing.') self._Rollback() return False return True
Verify that the cache is correct. Perform some unit tests on the written data, such as reading it back and verifying that it parses and has the entries we expect. Args: written_keys: a set of keys that should have been written to disk. Returns: a boolean indicating success. Raises: EmptyMap: The cache being verified is empty.
github-repos
def mementoweb_api_tags(url): memento_url = "http: r = requests.get(memento_url + url) if r.status_code != 200: return [] data = r.json().get("mementos", {}).get("list", []) if not data: return [] resources = ( TimeResource( url=item.get("uri", ""), date=item.get("datetime", ""), val=item.get("datetime", "").split("-")[0], source="MementoWeb.org", ) for item in data ) resource_dict = { res.val: res for res in resources } return sorted(resource_dict.values(), key=lambda x: x.val)
Parse list of :class:`TimeResource` objects based on the mementoweb.org. Args: url (str): Any url. Returns: list: :class:`TimeResource` objects.
juraj-google-style
def create_project(self, resource): self.project_service.set_auth(self._token_project) return self.project_service.create(resource)
Create the entity described by the given resource. Args: resource (intern.resource.boss.BossResource) Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure.
codesearchnet
def add_node(self, node_name): graph = self.graph if (node_name in graph): raise KeyError(('node %s already exists' % node_name)) graph[node_name] = set()
Add a node if it does not exist yet, or error out. Args: node_name (str): The unique name of the node to add. Raises: KeyError: Raised if a node with the same name already exist in the graph
codesearchnet
def handle(self, message): opcode = message['op'] if (opcode == 10): self.on_hello(message) elif (opcode == 11): self.on_heartbeat(message) elif (opcode == 0): self.on_message(message) else: logger.debug('Not a message we handle: OPCODE {}'.format(opcode)) return
Dispatches messages to appropriate handler based on opcode Args: message (dict): Full message from Discord websocket connection
codesearchnet
def _ParseAnalysisPluginOptions(self, options): analysis_plugin_info = self._analysis_manager.GetAllPluginInformation() analysis_plugin_names = { name.lower() for name, _, _ in analysis_plugin_info} analysis_plugins = self.ParseStringOption(options, 'analysis_plugins') if not analysis_plugins: return requested_plugin_names = { name.strip().lower() for name in analysis_plugins.split(',')} difference = requested_plugin_names.difference(analysis_plugin_names) if difference: raise errors.BadConfigOption( 'Non-existent analysis plugins specified: {0:s}'.format( ' '.join(difference))) self._analysis_plugins = self._GetAnalysisPlugins(analysis_plugins) for analysis_plugin in self._analysis_plugins: helpers_manager.ArgumentHelperManager.ParseOptions( options, analysis_plugin)
Parses the analysis plugin options. Args: options (argparse.Namespace): command line arguments.
juraj-google-style
def lookup(self, obj): return self._registered_map[self.get_registered_name(obj)]
Looks up the registered object using the predicate. Args: obj: Object to pass to each of the registered predicates to look up the registered object. Returns: The object registered with the first passing predicate. Raises: LookupError if the object does not match any of the predicate functions.
github-repos
def rgstr_stamps_root(rgstr_stamps): rgstr_stamps = sanitize_rgstr_stamps(rgstr_stamps) f.root.rgstr_stamps = rgstr_stamps return rgstr_stamps
Register stamps with the root timer (see subdivision()). Args: rgstr_stamps (list, tuple): Collection of identifiers, passed through set(), then each is passed through str(). Returns: list: Implemented registered stamp collection.
juraj-google-style
def _mean_of_runs(stats, key='runs'): num_runs = len(stats[key]) first = stats[key][0] mean = {} for stat_key in first: if isinstance(first[stat_key], numbers.Number): mean[stat_key] = sum(run[stat_key] for run in stats[key]) / float(num_runs) return mean
Obtain the mean of stats. Args: stats: dict; A set of stats, structured as above. key: str; Optional key to determine where list of runs is found in stats
juraj-google-style
def decode_field(self, field, value): for decoder in _GetFieldCodecs(field, 'decoder'): result = decoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.MessageField): field_value = self.decode_message( field.message_type, json.dumps(value)) elif isinstance(field, messages.EnumField): value = GetCustomJsonEnumMapping( field.type, json_name=value) or value try: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) except messages.DecodeError: if not isinstance(value, six.string_types): raise field_value = None else: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) return field_value
Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field.
juraj-google-style
class MimiEncoderOutput(ModelOutput): audio_codes: Optional[torch.LongTensor] = None encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None
Args: audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model).
github-repos
def GetSubFileEntryByName(self, name, case_sensitive=True): name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if (sub_file_entry.name == name): return sub_file_entry if ((not case_sensitive) and (sub_file_entry.name.lower() == name_lower)): if (not matching_sub_file_entry): matching_sub_file_entry = sub_file_entry return matching_sub_file_entry
Retrieves a sub file entry by name. Args: name (str): name of the file entry. case_sensitive (Optional[bool]): True if the name is case sensitive. Returns: FileEntry: a file entry or None if not available.
codesearchnet
def get_capture_handler_config_by_name(self, name): handler_confs = [] for address, stream_capturer in self._stream_capturers.iteritems(): handler_data = stream_capturer[0].dump_handler_config_data() for h in handler_data: if h['handler']['name'] == name: handler_confs.append(h) return handler_confs
Return data for handlers of a given name. Args: name: Name of the capture handler(s) to return config data for. Returns: Dictionary dump from the named capture handler as given by the :func:`SocketStreamCapturer.dump_handler_config_data` method.
juraj-google-style