code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _on_disconnect(self): self._logger.info('Connection to device %s was interrupted', self.connection_string) self.connection_interrupted = True
Callback when a device is disconnected unexpectedly. Args: adapter_id (int): An ID for the adapter that was connected to the device connection_id (int): An ID for the connection that has become disconnected
codesearchnet
def ParseRecord(self, parser_mediator, key, structure): if key != 'line': raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=structure.date_time) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return body_text = structure.body if not body_text: parser_mediator.ProduceExtractionWarning( 'invalid body {0:s}'.format(structure.body)) return event_data = DpkgEventData() event_data.body = body_text event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a structure of tokens derived from a line of a text file. Args: parser_mediator (ParserMediator): parser mediator. key (str): identifier of the structure of tokens. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
juraj-google-style
def _ExtractDataStream( self, file_entry, data_stream_name, destination_path, output_writer, skip_duplicates=True): if not data_stream_name and not file_entry.IsFile(): return display_name = path_helper.PathHelper.GetDisplayNameForPathSpec( file_entry.path_spec) if skip_duplicates: try: digest = self._CalculateDigestHash(file_entry, data_stream_name) except (IOError, dfvfs_errors.BackEndError) as exception: output_writer.Write(( '[skipping] unable to read content of file entry: {0:s} ' 'with error: {1!s}\n').format(display_name, exception)) return if not digest: output_writer.Write( '[skipping] unable to read content of file entry: {0:s}\n'.format( display_name)) return duplicate_display_name = self._digests.get(digest, None) if duplicate_display_name: output_writer.Write(( '[skipping] file entry: {0:s} is a duplicate of: {1:s} with ' 'digest: {2:s}\n').format( display_name, duplicate_display_name, digest)) return self._digests[digest] = display_name target_directory, target_filename = self._CreateSanitizedDestination( file_entry, file_entry.path_spec, data_stream_name, destination_path) if not os.path.isdir(target_directory): os.makedirs(target_directory) target_path = os.path.join(target_directory, target_filename) if os.path.exists(target_path): output_writer.Write(( '[skipping] unable to export contents of file entry: {0:s} ' 'because exported file: {1:s} already exists.\n').format( display_name, target_path)) return try: self._WriteFileEntry(file_entry, data_stream_name, target_path) except (IOError, dfvfs_errors.BackEndError) as exception: output_writer.Write(( '[skipping] unable to export contents of file entry: {0:s} ' 'with error: {1!s}\n').format(display_name, exception)) try: os.remove(target_path) except (IOError, OSError): pass
Extracts a data stream. Args: file_entry (dfvfs.FileEntry): file entry containing the data stream. data_stream_name (str): name of the data stream. destination_path (str): path where the extracted files should be stored. output_writer (CLIOutputWriter): output writer. skip_duplicates (Optional[bool]): True if files with duplicate content should be skipped.
juraj-google-style
def __init__(self, output_mediator): hostname = output_mediator.GetStoredHostname() if hostname: logger.debug('Hostname: {0:s}'.format(hostname)) super(TimesketchOutputModule, self).__init__(output_mediator) self._timeline_name = hostname self._timeline_owner = None self._timesketch = timesketch.create_app()
Initializes a Timesketch output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
juraj-google-style
def _ParseHeader(self, parser_mediator, file_object): header_map = self._GetDataTypeMap('cups_ipp_header') try: header, _ = self._ReadStructureFromFileObject(file_object, 0, header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( '[{0:s}] Unable to parse header with error: {1!s}'.format( self.NAME, exception)) format_version = '{0:d}.{1:d}'.format( header.major_version, header.minor_version) if format_version not in self._SUPPORTED_FORMAT_VERSIONS: raise errors.UnableToParseFile( '[{0:s}] Unsupported format version {1:s}.'.format( self.NAME, format_version)) if header.operation_identifier != 5: display_name = parser_mediator.GetDisplayName() logger.debug(( '[{0:s}] Non-standard operation identifier: 0x{1:08x} in file header ' 'of: {2:s}.').format( self.NAME, header.operation_identifier, display_name))
Parses a CUPS IPP header from a file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the header cannot be parsed.
juraj-google-style
def SampleMemoryUsage(self, parser_name): if self._memory_profiler: used_memory = self._process_information.GetUsedMemory() or 0 self._memory_profiler.Sample(parser_name, used_memory)
Takes a sample of the memory usage for profiling. Args: parser_name (str): name of the parser.
juraj-google-style
def get_average_along_axis(self, ind): m = self.data['total'] ng = self.dim if (ind == 0): total = np.sum(np.sum(m, axis=1), 1) elif (ind == 1): total = np.sum(np.sum(m, axis=0), 1) else: total = np.sum(np.sum(m, axis=0), 0) return ((total / ng[((ind + 1) % 3)]) / ng[((ind + 2) % 3)])
Get the averaged total of the volumetric data a certain axis direction. For example, useful for visualizing Hartree Potentials from a LOCPOT file. Args: ind (int): Index of axis. Returns: Average total along axis
codesearchnet
def with_hot_key_fanout(self, fanout): from apache_beam.transforms.combiners import curry_combine_fn if fanout is None: return self else: return _CombinePerKeyWithHotKeyFanout(curry_combine_fn(self.fn, self.args, self.kwargs), fanout)
A per-key combine operation like self but with two levels of aggregation. If a given key is produced by too many upstream bundles, the final reduction can become a bottleneck despite partial combining being lifted pre-GroupByKey. In these cases it can be helpful to perform intermediate partial aggregations in parallel and then re-group to peform a final (per-key) combine. This is also useful for high-volume keys in streaming where combiners are not generally lifted for latency reasons. Note that a fanout greater than 1 requires the data to be sent through two GroupByKeys, and a high fanout can also result in more shuffle data due to less per-bundle combining. Setting the fanout for a key at 1 or less places values on the "cold key" path that skip the intermediate level of aggregation. Args: fanout: either None, for no fanout, an int, for a constant-degree fanout, or a callable mapping keys to a key-specific degree of fanout. Returns: A per-key combining PTransform with the specified fanout.
github-repos
def _serialize(self, entity, pb, prefix='', parent_repeated=False, projection=None): values = self._get_base_value_unwrapped_as_list(entity) name = (prefix + self._name) if (projection and (name not in projection)): return if self._indexed: create_prop = (lambda : pb.add_property()) else: create_prop = (lambda : pb.add_raw_property()) if (self._repeated and (not values) and self._write_empty_list): p = create_prop() p.set_name(name) p.set_multiple(False) p.set_meaning(entity_pb.Property.EMPTY_LIST) p.mutable_value() else: for val in values: p = create_prop() p.set_name(name) p.set_multiple((self._repeated or parent_repeated)) v = p.mutable_value() if (val is not None): self._db_set_value(v, p, val) if projection: new_p = entity_pb.Property() new_p.set_name(p.name()) new_p.set_meaning(entity_pb.Property.INDEX_VALUE) new_p.set_multiple(False) new_p.mutable_value().CopyFrom(v) p.CopyFrom(new_p)
Internal helper to serialize this property to a protocol buffer. Subclasses may override this method. Args: entity: The entity, a Model (subclass) instance. pb: The protocol buffer, an EntityProto instance. prefix: Optional name prefix used for StructuredProperty (if present, must end in '.'). parent_repeated: True if the parent (or an earlier ancestor) is a repeated Property. projection: A list or tuple of strings representing the projection for the model instance, or None if the instance is not a projection.
codesearchnet
def set_working_directory(working_directory): logger.debug('starting') logger.debug(f'adding {working_directory} to sys.paths') sys.path.append(working_directory) logger.debug('done')
Add working_directory to sys.paths. This allows dynamic loading of arbitrary python modules in cwd. Args: working_directory: string. path to add to sys.paths
codesearchnet
def visualize(G, settings, filename="dependencies", no_graphviz=False): error = settings["error"] if no_graphviz: write_dot_file(G, filename) return 0 write_dot_file(G, "tempdot") renderer = "svg" if re.search("\.jpg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.jpeg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.svg$", filename, re.IGNORECASE): renderer = "svg" elif re.search("\.png$", filename, re.IGNORECASE): renderer = "png" elif re.search("\.gif$", filename, re.IGNORECASE): renderer = "gif" elif re.search("\.ps$", filename, re.IGNORECASE): renderer = "ps" elif re.search("\.pdf$", filename, re.IGNORECASE): renderer = "pdf" else: renderer = "svg" filename += ".svg" command = "dot -T{} tempdot -o {}".format(renderer, filename) p = Popen(command, shell=True) p.communicate() if p.returncode: errmes = "Either graphviz is not installed, or its not on PATH" os.remove("tempdot") error(errmes) sys.exit(1) os.remove("tempdot") return 0
Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure
juraj-google-style
def export_as_code(self, cv_source): rand_value = ''.join((random.choice((string.ascii_uppercase + string.digits)) for _ in range(25))) base_learner_code = '' base_learner_code += 'base_learner_list_{} = []\n'.format(rand_value) base_learner_code += 'meta_feature_generators_list_{} = []\n\n'.format(rand_value) for (idx, base_learner) in enumerate(self.base_learners): base_learner_code += ' base_learner_code += ' base_learner_code += ' base_learner_code += base_learner.base_learner_origin.source base_learner_code += '\n\n' base_learner_code += 'base_learner.set_params(**{})\n'.format(base_learner.hyperparameters) base_learner_code += 'base_learner_list_{}.append(base_learner)\n'.format(rand_value) base_learner_code += 'meta_feature_generators_list_{}.append("{}")\n'.format(rand_value, base_learner.base_learner_origin.meta_feature_generator) base_learner_code += '\n\n' base_learner_code += ' base_learner_code += ' base_learner_code += ' base_learner_code += self.base_learner_origin.source base_learner_code += '\n\n' base_learner_code += 'base_learner.set_params(**{})\n'.format(self.secondary_learner_hyperparameters) base_learner_code += 'secondary_learner_{} = base_learner\n'.format(rand_value) base_learner_code += '\n\n' base_learner_code += ' base_learner_code += ' base_learner_code += ' base_learner_code += cv_source base_learner_code += '\n\n' base_learner_code += ' base_learner_code += ' base_learner_code += ' stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py') with open(stacker_file_loc) as f2: base_learner_code += f2.read() base_learner_code += '\n\n def {}(self, X):\n return self._process_using_meta_feature_generator(X, "{}")\n\n'.format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator) base_learner_code += '\n\n' base_learner_code += 'base_learner = XcessivStackedEnsemble(base_learners=base_learner_list_{}, meta_feature_generators=meta_feature_generators_list_{}, secondary_learner=secondary_learner_{}, cv_function=return_splits_iterable)\n'.format(rand_value, rand_value, rand_value) return base_learner_code
Returns a string value that contains the Python code for the ensemble Args: cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Returns: base_learner_code (str, unicode): String that can be used as Python code
codesearchnet
def writeTable(self, tableName): lock_and_call( lambda: self._impl.writeTable(tableName), self._lock )
Write the table corresponding to the specified name, equivalent to the AMPL statement .. code-block:: ampl write table tableName; Args: tableName: Name of the table to be written.
juraj-google-style
def _add_variable_proxy_methods(var, proxy_tensor): proxy_tensor.read_value = lambda: tf.identity(proxy_tensor) proxy_tensor.assign_sub = var.assign_sub proxy_tensor.assign = var.assign proxy_tensor.initialized_value = var.initialized_value
Proxy methods of underlying variable. This enables our custom getters to still work with, e.g., batch norm. Args: var: Variable to proxy proxy_tensor: Tensor that is identity of var
juraj-google-style
def events(config): celery_app = create_app(config) for event in event_stream(celery_app, filter_by_prefix='task'): try: (yield create_event_model(event)) except JobEventTypeUnsupported: pass
Return a generator that yields workflow events. For every workflow event that is sent from celery this generator yields an event object. Args: config (Config): Reference to the configuration object from which the settings are retrieved. Returns: generator: A generator that returns workflow events.
codesearchnet
def sum(x, axis=None, keepdims=False): from .function_bases import sum as sum_base if (axis is None): axis = range(x.ndim) elif (not hasattr(axis, '__iter__')): axis = [axis] return sum_base(x, axis, keepdims)
Reduction along axes with sum operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which the sum is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array.
codesearchnet
def matches(self, address, name=None): if self.controller: return (address == 8) return (self.address == address)
Check if this slot identifier matches the given tile. Matching can happen either by address or by module name (not currently implemented). Returns: bool: True if there is a match, otherwise False.
codesearchnet
def create_nanopubs_fh(output_fn: str): json_flag, jsonl_flag, yaml_flag = False, False, False if output_fn: if re.search("gz$", output_fn): out_fh = gzip.open(output_fn, "wt") else: out_fh = click.open_file(output_fn, mode="wt") if re.search("ya?ml", output_fn): yaml_flag = True elif "jsonl" in output_fn or "-" == output_fn: jsonl_flag = True elif "json" in output_fn: json_flag = True else: out_fh = sys.stdout return (out_fh, yaml_flag, jsonl_flag, json_flag)
Create Nanopubs output filehandle \b If output fn is '-' will write JSONlines to STDOUT If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file Args: output_fn: Name of output file Returns: (filehandle, yaml_flag, jsonl_flag, json_flag)
juraj-google-style
def emit(self, record): record.task = self.cur_task if ((record.levelno >= self.dump_level) and self.cur_task): self.tasks[self.cur_task].failed = True self.tasks[self.cur_task].force_show = True is_start = START_TASK_REG.match(str(record.msg)) if is_start: self.handle_new_task(is_start.groupdict()['task_name'], record) return is_end = END_TASK_REG.match(str(record.msg)) if is_end: self.handle_closed_task(is_end.groupdict()['task_name'], record) return force_show_record = ALWAYS_SHOW_REG.match(str(record.msg)) if force_show_record: record.msg = force_show_record.groupdict()['message'] self.pretty_emit(record) if ((not force_show_record) and self.should_show_by_level(record) and self.should_show_by_depth()): self.pretty_emit(record) return if self.cur_task: self.tasks[self.cur_task].append(record)
Handle the given record, this is the entry point from the python logging facility Params: record (logging.LogRecord): log record to handle Returns: None
codesearchnet
def attach_bytes(key, the_bytes): tf_v1.add_to_collection( _ATTACHMENT_COLLECTION_INTERNAL, module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))
Adds a ModuleAttachment to the current graph. Args: key: A string with the unique key of the attachment. the_bytes: A bytes object with the serialized attachment.
juraj-google-style
def delete(self, url, params=None, **kwargs): return self.call_api('DELETE', url, params=params, **kwargs)
Call the API with a DELETE request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. Returns: ResultParser or ErrorParser.
codesearchnet
def fetch_credential(self, credential=None, profile=None): q = self.db.get((self.query.profile == profile)) if (q is not None): return q.get(credential)
Fetch credential from credentials file. Args: credential (str): Credential to fetch. profile (str): Credentials profile. Defaults to ``'default'``. Returns: str, None: Fetched credential or ``None``.
codesearchnet
def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False, **kwargs): if size is None and ratio is None: raise ValueError('expected size or ratio must be specified') elif size is not None and ratio is not None: raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if not keep_ar: options['vf'] = 'scale={}:{}'.format(size[0], size[1]) else: options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio' '=decrease'.format(size[0], size[1])) else: if not isinstance(ratio, tuple): ratio = (ratio, ratio) options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format( ratio[0], ratio[1]) convert_video(in_file, out_file, print_cmd, **options)
Resize a video. Args: in_file (str): Input video filename. out_file (str): Output video filename. size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1). ratio (tuple or float): Expected resize ratio, (2, 0.5) means (w*2, h*0.5). keep_ar (bool): Whether to keep original aspect ratio. log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command.
juraj-google-style
def _generate_image_and_label_batch(image, label, min_queue_examples, batch_size, shuffle): num_preprocess_threads = 16 if shuffle: (images, label_batch) = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples) else: (images, label_batch) = tf.train.batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size))) tf.summary.image('images', images) return (images, tf.reshape(label_batch, [batch_size]))
Construct a queued batch of images and labels. Args: image: 3-D Tensor of [height, width, 3] of type.float32. label: 1-D Tensor of type.int32 min_queue_examples: int32, minimum number of samples to retain in the queue that provides of batches of examples. batch_size: Number of images per batch. shuffle: boolean indicating whether to use a shuffling queue. Returns: images: Images. 4D tensor of [batch_size, height, width, 3] size. labels: Labels. 1D tensor of [batch_size] size.
codesearchnet
def __init__(self, scope, parent, name): CodeStatement.__init__(self, scope, parent) self.name = name self.value = None
Constructor for jump statements. Args: scope (CodeEntity): The program scope where this object belongs. parent (CodeEntity): This object's parent in the program tree. name (str): The name of the statement in the program.
juraj-google-style
def get_group(self, group_id): group = self.group_id_map.get(group_id) if group: return group self.logger.error(('Group ID "%s" is not in datafile.' % group_id)) self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID_ERROR)) return None
Get group for the provided group ID. Args: group_id: Group ID for which group is to be determined. Returns: Group corresponding to the provided group ID.
codesearchnet
def NotEqualTo(self, value): self._awql = self._CreateSingleValueCondition(value, '!=') return self._query_builder
Sets the type of the WHERE clause as "not equal to". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
juraj-google-style
def instantiate(self, input_types): key = _type_list_to_str(input_types) defined = self._overload.get(key) if not defined: name = self._func_name if name is not None: name = '_'.join([name, key]) defined = _DefinedFunction(self._func, self._argnames, input_types, name, None, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) _ = defined.name if self._grad_func: output_types = [dtypes.DType(_.type) for _ in defined._signature.output_arg] defined._grad_func = self._grad_func.instantiate(input_types + output_types) self._overload[key] = defined return defined
Instantiate this function given input argument types. Args: input_types: A list of data types for the inputs. Returns: _DefinedFunction for the given input types.
github-repos
def broadcast_dimension(self, axis, lengths): lengths = ragged_util.convert_to_int_tensor(lengths, name='lengths', dtype=self.dim_size_dtype) if lengths.shape.ndims is None: raise ValueError('lengths must have a known rank.') elif lengths.shape.ndims > 1: raise ValueError('lengths must be a scalar or vector') else: lengths_is_scalar = lengths.shape.ndims == 0 if self.is_ragged(axis): if lengths_is_scalar: condition = math_ops.equal(lengths, 1) else: condition = math_ops.reduce_all(math_ops.equal(lengths, self.dimension_size(axis))) else: axis_dim_size = self.dimension_size(axis) if lengths_is_scalar: condition = math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1) | math_ops.equal(axis_dim_size, lengths) else: condition = math_ops.equal(axis_dim_size, 1) broadcast_err = ['Unable to broadcast: dimension size mismatch in dimension', axis, 'lengths=', lengths, 'dim_size=', self.dimension_size(axis)] broadcast_check = control_flow_assert.Assert(condition, data=broadcast_err, summarize=10) with ops.control_dependencies([broadcast_check]): if axis < self.num_partitioned_dimensions: if self.is_ragged(axis): return RaggedTensorDynamicShape(self._partitioned_dim_sizes, array_ops.identity(self.inner_dim_sizes), self.dim_size_dtype) else: return self._broadcast_uniform_partitioned_dimension(axis, lengths) elif lengths_is_scalar: return self._broadcast_inner_dimension_to_uniform(axis, lengths) else: if axis == 0: raise ValueError('Unable to broadcast: outermost dimension must be uniform.') return self._broadcast_inner_dimension_to_ragged(axis, lengths)
Returns a shape that is broadcast-compatible with self & lengths. * If dimension[axis] is uniform and lengths is a scalar, the check that either lengths==1 or axis==1 or lengths==axis, and tile dimension[axis] with tf.where(lengths==axis, 1, axis) repeats. * If dimension[axis] is uniform and lengths is a vector, then check that dimension[axis]==1, and raggedly tile dimension[axis] with lengths repeats. (we can skip tiling if we statically know that slice_lengths == 1??) * If dimension[axis] is ragged and lengths is a scalar, then check that lengths==1. * If dimension[axis] is ragged and lengths is a vector, then check that self.dimension_size(axis) == lengths. Args: axis: `int`. The dimension to broadcast. lengths: 0-D or 1-D integer `Tensor`. Returns: A `RaggedTensorDynamicShape`.
github-repos
def write_info_file(resource, path, dataset_name, original_fname): info_path = _get_info_path(path) info = (_read_info(info_path) or {}) urls = set((info.get('urls', []) + [resource.url])) dataset_names = info.get('dataset_names', []) if dataset_name: dataset_names.append(dataset_name) if (('original_fname' in info) and (info['original_fname'] != original_fname)): raise AssertionError(('`original_fname` "%s" stored in %s does NOT match "%s".' % (info['original_fname'], info_path, original_fname))) info = dict(urls=list(urls), dataset_names=list(set(dataset_names)), original_fname=original_fname) with py_utils.atomic_write(info_path, 'w') as info_f: json.dump(info, info_f, sort_keys=True)
Write the INFO file next to local file. Although the method is synchronized, there is still a risk two processes running at the same time overlap here. Risk accepted, since potentially lost data (`dataset_name`) is only for human consumption. Args: resource: resource for which to write the INFO file. path: path of downloaded file. dataset_name: data used to dl the file. original_fname: name of file as downloaded.
codesearchnet
def _einsum_helper(input_shapes, output_shape, mesh_impl): input_shape_union = _shape_union(input_shapes) total_num_dims = input_shape_union.ndims full_shapes = [ s for s in input_shapes + [output_shape] if s.ndims == total_num_dims] full_shape = full_shapes[0] if full_shapes else input_shape_union reduce_slice_fn, reduced_mesh_axes = _reduce_helper( full_shape, output_shape, mesh_impl.tensor_layout(full_shape)) def einsum_slice_fn_naive(*slices): return reduce_slice_fn(functools.reduce(tf.multiply, [ _expand_dims(x, input_shape, full_shape) for x, input_shape in zip(slices, input_shapes)])) if full_shapes: einsum_slice_fn = einsum_slice_fn_naive else: equation = _einsum_equation(input_shapes, output_shape) def einsum_slice_fn(*slices): if slices[0].dtype.is_floating: return mesh_impl.einsum(equation, *slices) else: return einsum_slice_fn_naive(*slices) return einsum_slice_fn, reduced_mesh_axes
Returns slicewise function and reduced mesh dimensions. Assumes the output shape contains no new dimensions. Args: input_shapes: a list of Shapes output_shape: a Shape mesh_impl: a MeshImpl Returns: einsum_slice_fn: a function from tf.Tensors to tf.Tensor reduced_mesh_axes: a list of integers
juraj-google-style
def decode(self, tx): if not isinstance(self._service, BitcoinBlockrService): raise NotImplementedError('Currently only supported for "blockr.io"') return self._service.decode(tx)
Decodes the given transaction. Args: tx: hex of transaction Returns: decoded transaction .. note:: Only supported for blockr.io at the moment.
juraj-google-style
def _bitResponseToValue(bytestring): _checkString(bytestring, description='bytestring', minlength=1, maxlength=1) RESPONSE_ON = '\x01' RESPONSE_OFF = '\x00' if bytestring == RESPONSE_ON: return 1 elif bytestring == RESPONSE_OFF: return 0 else: raise ValueError('Could not convert bit response to a value. Input: {0!r}'.format(bytestring))
Convert a response string to a numerical value. Args: bytestring (str): A string of length 1. Can be for example ``\\x01``. Returns: The converted value (int). Raises: TypeError, ValueError
juraj-google-style
def FetchBlobsForSignedBinary( binary_urn, token = None ): if _ShouldUseLegacyDatastore(): try: aff4_stream = aff4.FACTORY.Open( binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token) except aff4.InstantiationError: raise SignedBinaryNotFoundError(binary_urn) timestamp = aff4_stream.Get(aff4_stream.Schema.TYPE).age return (blob for blob in aff4_stream), timestamp else: try: references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn)) except db.UnknownSignedBinaryError: raise SignedBinaryNotFoundError(binary_urn) blob_ids = [r.blob_id for r in references.items] raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids) blobs = ( rdf_crypto.SignedBlob.FromSerializedString(raw_blob) for raw_blob in raw_blobs) return blobs, timestamp
Retrieves blobs for the given binary from the datastore. Args: binary_urn: RDFURN that uniquely identifies the binary. token: ACL token to use with the legacy (non-relational) datastore. Returns: A tuple containing an iterator for all the binary's blobs and an RDFDatetime representing when the binary's contents were saved to the datastore. Raises: SignedBinaryNotFoundError: If no signed binary with the given URN exists.
juraj-google-style
def minimize(self, time, variables, **kwargs): loss = kwargs["fn_loss"] sampled_loss = kwargs["sampled_loss"] min_op, _ = self.minimize_(loss, sampled_loss, var_list=variables) return min_op
Performs an optimization step. Args: time: Time tensor. Not used for this variables: List of variables to optimize. **kwargs: fn_loss : loss function tensor that is differentiated sampled_loss : the sampled loss from running the model. Returns: The optimization operation.
juraj-google-style
def reraise_modify(caught_exc, append_msg, prepend=False): ExceptClass = type(caught_exc) traceback = sys.exc_info()[2] if (not caught_exc.args): arg_list = [append_msg] else: arg_list = list(caught_exc.args[:(- 1)]) last_arg = caught_exc.args[(- 1)] if isinstance(last_arg, str): if prepend: arg_list.append((append_msg + last_arg)) else: arg_list.append((last_arg + append_msg)) else: arg_list += [last_arg, append_msg] caught_exc.args = tuple(arg_list) six.reraise(ExceptClass, caught_exc, traceback)
Append message to exception while preserving attributes. Preserves exception class, and exception traceback. Note: This function needs to be called inside an except because `sys.exc_info()` requires the exception context. Args: caught_exc(Exception): The caught exception object append_msg(str): The message to append to the caught exception prepend(bool): If True prepend the message to args instead of appending Returns: None Side Effects: Re-raises the exception with the preserved data / trace but modified message
codesearchnet
def __init__(self, name, filterString='', dataFrame=pd.DataFrame()): self._filterString = filterString self._dataFrame = dataFrame self.name = name
Constructs a `DataSearch` object from the given attributes. Args: name (str): The name of the filter. filterString (str, optional): A python expression as string. Defaults to an empty string. dataFrame (pandas.DataFrame, optional): The object to filter. Defaults to an empty `DataFrame`.
juraj-google-style
def truncated_normal_ll_gradient(params, low, high, data): if (params[1] == 0): return np.array([np.inf, np.inf]) return np.array([_TruncatedNormalFitter.partial_derivative_mu(params[0], params[1], low, high, data), _TruncatedNormalFitter.partial_derivative_sigma(params[0], params[1], low, high, data)])
Return the gradient of the log likelihood of the truncated normal at the given position. Args: params: tuple with (mean, std), the parameters under which we evaluate the model low (float): the lower truncation bound high (float): the upper truncation bound data (ndarray): the one dimension list of data points for which we want to calculate the likelihood Returns: tuple: the gradient of the log likelihood given as a tuple with (mean, std)
codesearchnet
def handle_no_document(self, item_session: ItemSession) -> Actions: self._waiter.reset() action = self.handle_response(item_session) if (action == Actions.NORMAL): item_session.set_status(Status.skipped) return action
Callback for successful responses containing no useful document. Returns: A value from :class:`.hook.Actions`.
codesearchnet
def tf_solve(self, fn_x, x_init, b): return super(ConjugateGradient, self).tf_solve(fn_x, x_init, b)
Iteratively solves the system of linear equations $A x = b$. Args: fn_x: A callable returning the left-hand side $A x$ of the system of linear equations. x_init: Initial solution guess $x_0$, zero vector if None. b: The right-hand side $b$ of the system of linear equations. Returns: A solution $x$ to the problem as given by the solver.
juraj-google-style
def when(self, key): ctx = Context(key, self) self.context.append(ctx) return ctx
Specify context, i.e. condition that must be met. Arguments: key (str): Name of the context whose value you want to query. Returns: Context:
codesearchnet
def _serve_plugins_listing(self, request): response = {} for plugin in self._plugins: start = time.time() response[plugin.plugin_name] = plugin.is_active() elapsed = time.time() - start logger.info( 'Plugin listing: is_active() for %s took %0.3f seconds', plugin.plugin_name, elapsed) return http_util.Respond(request, response, 'application/json')
Serves an object mapping plugin name to whether it is enabled. Args: request: The werkzeug.Request object. Returns: A werkzeug.Response object.
juraj-google-style
def _bond_option_variance(model, option_expiry, bond_maturity): if model._sample_with_generic: raise ValueError('The paramerization of `mean_reversion` and/or `volatility` does not support analytic computation of bond option variance.') mean_reversion = model.mean_reversion(option_expiry) volatility = model.volatility(option_expiry) var_between_vol_knots = model._variance_int(model._padded_knots, model._jump_locations, model._jump_values_vol, model._jump_values_mr)[0] varx_at_vol_knots = tf.concat([tf.zeros([1], dtype=var_between_vol_knots.dtype), utils.cumsum_using_matvec(var_between_vol_knots)], axis=-1) time_index = tf.searchsorted(model._jump_locations[0], option_expiry) vn = tf.concat([model._zero_padding, model._jump_locations], axis=-1) var_expiry = model._variance_int(tf.gather(vn, time_index, axis=-1), option_expiry, volatility, mean_reversion)[0] var_expiry = var_expiry + tf.gather(varx_at_vol_knots, time_index) var_expiry = var_expiry * (tf.math.exp(-mean_reversion * option_expiry) - tf.math.exp(-mean_reversion * bond_maturity)) ** 2 / mean_reversion ** 2 return var_expiry
Computes black equivalent variance for bond options. Black equivalent variance is defined as the variance to use in the Black formula to obtain the model implied price of European bond options. Args: model: An instance of `VectorHullWhiteModel`. option_expiry: A rank 1 `Tensor` of real dtype specifying the time to expiry of each option. bond_maturity: A rank 1 `Tensor` of real dtype specifying the time to maturity of underlying zero coupon bonds. Returns: A rank 1 `Tensor` of same dtype and shape as the inputs with computed Black-equivalent variance for the underlying options.
github-repos
def run(self, *args, **kwargs): accounts = list(AWSAccount.get_all(include_disabled=False).values()) self.manage_policies(accounts)
Iterate through all AWS accounts and apply roles and policies from Github Args: *args: Optional list of arguments **kwargs: Optional list of keyword arguments Returns: `None`
juraj-google-style
def traverse_data(obj, use_numpy=True, buffers=None): if use_numpy and all(isinstance(el, np.ndarray) for el in obj): return [transform_array(el, buffers=buffers) for el in obj] obj_copy = [] for item in obj: if type(item) is float: if math.isnan(item): item = 'NaN' elif math.isinf(item): if item > 0: item = 'Infinity' else: item = '-Infinity' obj_copy.append(item) elif isinstance(item, (list, tuple)): obj_copy.append(traverse_data(item)) else: obj_copy.append(item) return obj_copy
Recursively traverse an object until a flat list is found. If NumPy is available, the flat list is converted to a numpy array and passed to transform_array() to handle ``nan``, ``inf``, and ``-inf``. Otherwise, iterate through all items, converting non-JSON items Args: obj (list) : a list of values or lists use_numpy (bool, optional) toggle NumPy as a dependency for testing This argument is only useful for testing (default: True)
juraj-google-style
def handle_incoming_message(self, msg): if (msg.type == MessageType.START_JOB): job = msg.message['job'] self.schedule_job(job) elif (msg.type == MessageType.CANCEL_JOB): job_id = msg.message['job_id'] self.cancel(job_id)
Start or cancel a job, based on the msg. If msg.type == MessageType.START_JOB, then start the job given by msg.job. If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. Args: msg (barbequeue.messaging.classes.Message): Returns: None
codesearchnet
def __init__(self, ctx): member_map = collections_overlay.copy() ast = ctx.loader.import_name('collections') super().__init__(ctx, 'collections', member_map, ast)
Initializes the CollectionsOverlay. This function loads the AST for the collections module, which is used to access type information for any members that are not explicitly provided by the overlay. See get_attribute in attribute.py for how it's used. Args: ctx: An instance of context.Context.
github-repos
def get_assistants_from_file_hierarchy(cls, file_hierarchy, superassistant, role=settings.DEFAULT_ASSISTANT_ROLE): result = [] warn_msg = 'Failed to load assistant {source}, skipping subassistants.' for (name, attrs) in file_hierarchy.items(): loaded_yaml = yaml_loader.YamlLoader.load_yaml_by_path(attrs['source']) if (loaded_yaml is None): logger.warning(warn_msg.format(source=attrs['source'])) continue try: ass = cls.assistant_from_yaml(attrs['source'], loaded_yaml, superassistant, role=role) except exceptions.YamlError as e: logger.warning(e) continue ass._subassistants = cls.get_assistants_from_file_hierarchy(attrs['subhierarchy'], ass, role=role) result.append(ass) return result
Accepts file_hierarch as returned by cls.get_assistant_file_hierarchy and returns instances of YamlAssistant for loaded files Args: file_hierarchy: structure as described in cls.get_assistants_file_hierarchy role: role of all assistants in this hierarchy (we could find this out dynamically but it's not worth the pain) Returns: list of top level assistants from given hierarchy; these assistants contain references to instances of their subassistants (and their subassistants, ...)
codesearchnet
def _get_cl_dependency_code(self): code = '' for d in self._dependencies: code += (d.get_cl_code() + '\n') return code
Get the CL code for all the CL code for all the dependencies. Returns: str: The CL code with the actual code.
codesearchnet
def _isbn_cleanse(isbn, checksum=True): if (not isinstance(isbn, string_types)): raise TypeError(('ISBN must be a string, received %r' % isbn)) if (PY2 and isinstance(isbn, str)): isbn = unicode(isbn) uni_input = False else: uni_input = True for dash in DASHES: isbn = isbn.replace(dash, unicode()) if checksum: if (not isbn[:(- 1)].isdigit()): raise IsbnError('non-digit parts') if (len(isbn) == 9): isbn = ('0' + isbn) if (len(isbn) == 10): if (not (isbn[(- 1)].isdigit() or (isbn[(- 1)] in 'Xx'))): raise IsbnError('non-digit or X checksum') elif (len(isbn) == 13): if (not isbn[(- 1)].isdigit()): raise IsbnError('non-digit checksum') if (not isbn.startswith(('978', '979'))): raise IsbnError('invalid Bookland region') else: raise IsbnError('ISBN must be either 10 or 13 characters long') else: if (len(isbn) == 8): isbn = ('0' + isbn) elif ((len(isbn) == 12) and (not isbn[:3].startswith(('978', '979')))): raise IsbnError('invalid Bookland region') if (not isbn.isdigit()): raise IsbnError('non-digit parts') if (not (len(isbn) in (9, 12))): raise IsbnError('ISBN must be either 9 or 12 characters long without checksum') if (PY2 and (not uni_input)): return str(isbn) else: return isbn
Check ISBN is a string, and passes basic sanity checks. Args: isbn (str): SBN, ISBN-10 or ISBN-13 checksum (bool): ``True`` if ``isbn`` includes checksum character Returns: ``str``: ISBN with hyphenation removed, including when called with a SBN Raises: TypeError: ``isbn`` is not a ``str`` type IsbnError: Incorrect length for ``isbn`` IsbnError: Incorrect SBN or ISBN formatting
codesearchnet
def summarize_variables(variables=None): variable_counts = count_variables_by_type(variables=variables) total_num_scalars = 0 total_num_bytes = 0 for dtype in sorted(variable_counts, key=(lambda dtype: ('%r' % dtype))): var_info_for_type = variable_counts[dtype] num_bytes = (var_info_for_type['num_scalars'] * dtype.size) total_num_scalars += var_info_for_type['num_scalars'] total_num_bytes += num_bytes tf.logging.info('%r: %d variables comprising %d scalars, %s', dtype, var_info_for_type['num_variables'], var_info_for_type['num_scalars'], _num_bytes_to_human_readable(num_bytes))
Logs a summary of variable information. This function groups Variables by dtype and prints out the number of Variables and the total number of scalar values for each datatype, as well as the total memory consumed. For Variables of type tf.string, the memory usage cannot be accurately calculated from the Graph as the memory requirements change based on what strings are actually stored, which can only be determined inside a session. In this case, the amount of memory used to stored the pointers to the strings is logged, along with a warning. Args: variables: iterable of variables; if not provided, then all variables (in the default graph) are summarized.
codesearchnet
def add_listener_policy(self, json_data): env = boto3.session.Session(profile_name=self.env, region_name=self.region) elbclient = env.client('elb') stickiness = {} elb_settings = self.properties['elb'] if elb_settings.get('ports'): ports = elb_settings['ports'] for listener in ports: if listener.get("stickiness"): stickiness = self.add_stickiness() LOG.info('Stickiness Found: %s', stickiness) break for job in json.loads(json_data)['job']: for listener in job['listeners']: policies = [] ext_port = listener['externalPort'] if listener['listenerPolicies']: policies.extend(listener['listenerPolicies']) if stickiness.get(ext_port): policies.append(stickiness.get(ext_port)) if policies: LOG.info('Adding listener policies: %s', policies) elbclient.set_load_balancer_policies_of_listener( LoadBalancerName=self.app, LoadBalancerPort=ext_port, PolicyNames=policies)
Attaches listerner policies to an ELB Args: json_data (json): return data from ELB upsert
juraj-google-style
def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec): self._current_display_name = parser_mediator.GetDisplayNameForPathSpec( path_spec) try: extraction_worker.ProcessPathSpec(parser_mediator, path_spec) except dfvfs_errors.CacheFullError: self._abort = True logger.error(( 'ABORT: detected cache full error while processing path spec: ' '{0:s}').format(self._current_display_name)) except Exception as exception: parser_mediator.ProduceExtractionWarning(( 'unable to process path specification with error: ' '{0!s}').format(exception), path_spec=path_spec) if self._processing_configuration.debug_output: logger.warning(( 'Unhandled exception while processing path specification: ' '{0:s}.').format(self._current_display_name)) logger.exception(exception)
Processes a path specification. Args: extraction_worker (worker.ExtractionWorker): extraction worker. parser_mediator (ParserMediator): parser mediator. path_spec (dfvfs.PathSpec): path specification.
juraj-google-style
def triangle(duration: int, amp: complex, period: float = None, phase: float = 0, name: str = None) -> SamplePulse: if period is None: period = duration return _sampled_triangle_pulse(duration, amp, period, phase=phase, name=name)
Generates triangle wave `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse.
juraj-google-style
def register_ops_if_needed(graph_ops): missing_ops = graph_ops - set(op_def_registry.get_registered_ops().keys()) if not missing_ops: return p_buffer = c_api.TF_GetAllOpList() cpp_op_list = op_def_pb2.OpList() cpp_op_list.ParseFromString(c_api.TF_GetBuffer(p_buffer)) cpp_registry_ops = {op.name: op for op in cpp_op_list.op} missing_op_list = op_def_pb2.OpList() for missing_op in missing_ops: if missing_op not in cpp_registry_ops: logging.info( "Op %s is missing from both the python and C++ registry.", missing_op) else: missing_op_list.op.extend([cpp_registry_ops[missing_op]]) logging.info( "Adding op %s from c++ registry to python registry.", missing_op) op_def_registry.register_op_list(missing_op_list) if not missing_ops <= set(cpp_registry_ops.keys()): raise RuntimeError( "Graph ops missing from the python registry (%s) are also absent from " "the c++ registry." % missing_ops.difference(set(cpp_registry_ops.keys())))
Register graph ops absent in op_def_registry, if present in c++ registry. Args: graph_ops: set with graph op names to register. Raises: RuntimeError: if `graph_ops` contains ops that are not in either python or c++ registry.
juraj-google-style
def RemoveUser(self, user): self.logger.info('Removing user %s.', user) if self.remove: command = self.userdel_cmd.format(user=user) try: subprocess.check_call(command.split(' ')) except subprocess.CalledProcessError as e: self.logger.warning('Could not remove user %s. %s.', user, str(e)) else: self.logger.info('Removed user account %s.', user) self._RemoveAuthorizedKeys(user) self._UpdateSudoer(user, sudoer=False)
Remove a Linux user account. Args: user: string, the Linux user account to remove.
codesearchnet
def CheckCheck(filename, clean_lines, linenum, error): lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return (last_line, end_line, end_pos) = CloseExpression( clean_lines, linenum, start_pos) if end_pos < 0: return if not Match(r'\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] lhs = '' rhs = '' operator = None while expression: matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): lhs += token expression = matched.group(2) else: operator = token rhs = matched.group(2) break else: matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) if not (lhs and operator and rhs): return if rhs.find('&&') > -1 or rhs.find('||') > -1: return lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[check_macro][operator], check_macro, operator))
Checks the use of CHECK and EXPECT macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
juraj-google-style
def create(self, name, description='', private=False, runs_executable_tasks=True, runs_docker_container_tasks=True, runs_singularity_container_tasks=True, active=True, whitelists=None): if (whitelists is None): whitelists = [] request_url = (self._client.base_api_url + self.list_url) data_to_post = {'name': name, 'description': description, 'private': private, 'runs_executable_tasks': runs_executable_tasks, 'runs_docker_container_tasks': runs_docker_container_tasks, 'runs_singularity_container_tasks': runs_singularity_container_tasks, 'active': active, 'whitelists': whitelists} response = self._client.session.post(request_url, data=data_to_post) self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED) return self.response_data_to_model_instance(response.json())
Create a task queue. Args: name (str): The name of the task queue. description (str, optional): A description of the task queue. private (bool, optional): A boolean specifying whether the queue is exclusive to its creator. Defaults to False. runs_executable_tasks (bool, optional): A Boolean specifying whether the queue runs executable tasks. Defaults to True. runs_docker_container_tasks (bool, optional): A Boolean specifying whether the queue runs container tasks that run in Docker containers. Defaults to True. runs_singularity_container_tasks (bool, optional): A Boolean specifying whether the queue runs container tasks that run in Singularity containers. Defaults to True. active (bool, optional): A boolean specifying whether the queue is active. Default to True. whitelists (list, optional): A list of task whitelist IDs. Defaults to None (which gets translated to []). Returns: :class:`saltant.models.task_queue.TaskQueue`: A task queue model instance representing the task queue just created.
codesearchnet
def is_node_inside_try_except(node: astroid.Raise) -> bool: context = find_try_except_wrapper_node(node) return isinstance(context, astroid.TryExcept)
Check if the node is directly under a Try/Except statement. (but not under an ExceptHandler!) Args: node (astroid.Raise): the node raising the exception. Returns: bool: True if the node is inside a try/except statement, False otherwise.
juraj-google-style
def LogHttpFrontendAccess(self, request, source=None, message_count=None): event_id = self.GetNewEventId() log_msg = ('%s-%s [%s]: %s %s %s %s (%d)' % (event_id, request.source_ip, (source or '<unknown>'), request.method, request.url, request.user_agent, request.user, (message_count or 0))) logging.info(log_msg)
Write a log entry for a Frontend or UI Request. Args: request: A HttpRequest protobuf. source: Client id of the client initiating the request. Optional. message_count: Number of messages received from the client. Optional.
codesearchnet
def to_pb(self): return policy_pb2.Policy(etag=self.etag, version=(self.version or 0), bindings=[policy_pb2.Binding(role=role, members=sorted(self[role])) for role in self])
Render a protobuf message. Returns: google.iam.policy_pb2.Policy: a message to be passed to the ``set_iam_policy`` gRPC API.
codesearchnet
def remove_overlap(self, also_remove_contiguous: bool=False) -> None: overlap = True while overlap: overlap = self._remove_overlap_sub(also_remove_contiguous) self._sort()
Merges any overlapping intervals. Args: also_remove_contiguous: treat contiguous (as well as overlapping) intervals as worthy of merging?
codesearchnet
def _lookup_global(self, symbol): assert symbol.parts namespace = self.namespaces if (len(symbol.parts) == 1): namespace = self.namespaces[None] try: return self._lookup_namespace(symbol, namespace) except Error as orig_exc: try: namespace = self.namespaces[None] return self._lookup_namespace(symbol, namespace) except Error: raise orig_exc
Helper for lookup_symbol that only looks up global variables. Args: symbol: Symbol
codesearchnet
def get_mailcap_entry(self, url): for parser in mime_parsers.parsers: if parser.pattern.match(url): try: (modified_url, content_type) = parser.get_mimetype(url) except Exception as e: _logger.warning('parser %s raised an exception', parser) _logger.exception(e) raise exceptions.MailcapEntryNotFound() if (not content_type): _logger.info('Content type could not be determined') raise exceptions.MailcapEntryNotFound() elif (content_type == 'text/html'): _logger.info('Content type text/html, deferring to browser') raise exceptions.MailcapEntryNotFound() (command, entry) = mailcap.findmatch(self._mailcap_dict, content_type, filename=modified_url) if (not entry): _logger.info('Could not find a valid mailcap entry') raise exceptions.MailcapEntryNotFound() return (command, entry) raise exceptions.MailcapEntryNotFound()
Search through the mime handlers list and attempt to find the appropriate command to open the provided url with. Will raise a MailcapEntryNotFound exception if no valid command exists. Params: url (text): URL that will be checked Returns: command (text): The string of the command that should be executed in a subprocess to open the resource. entry (dict): The full mailcap entry for the corresponding command
codesearchnet
def inquire_by_mech(self, mech, name=True, init_lifetime=True, accept_lifetime=True, usage=True): res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime, accept_lifetime, usage) if (res.name is not None): res_name = names.Name(res.name) else: res_name = None return tuples.InquireCredByMechResult(res_name, res.init_lifetime, res.accept_lifetime, res.usage)
Inspect these credentials for per-mechanism information This method inspects these credentials for per-mechanism information about them. Args: mech (OID): the mechanism for which to retrive the information name (bool): get the name associated with the credentials init_lifetime (bool): get the remaining initiate lifetime for the credentials accept_lifetime (bool): get the remaining accept lifetime for the credentials usage (bool): get the usage for the credentials Returns: InquireCredByMechResult: the information about the credentials, with None used when the corresponding argument was False
codesearchnet
def update(self, resource, id_or_uri): return self._client.update(resource=resource, uri=id_or_uri)
Updates a registered Device Manager. Args: resource (dict): Object to update. id_or_uri: Can be either the Device manager ID or URI. Returns: dict: The device manager resource.
juraj-google-style
def __init__(self, corpus): self.words = corpus self.floor = log10(0.01 / len(self.words))
Build function with set of words from a corpus. Args: corpus (collection): collection of words to use
juraj-google-style
def get_range_tracker(self, start_position: Optional[Any], stop_position: Optional[Any]) -> 'RangeTracker': raise NotImplementedError
Returns a RangeTracker for a given position range. Framework may invoke ``read()`` method with the RangeTracker object returned here to read data from the source. Args: start_position: starting position of the range. If 'None' default start position of the source must be used. stop_position: ending position of the range. If 'None' default stop position of the source must be used. Returns: a ``RangeTracker`` for the given position range.
github-repos
def save(self, filething=None, deleteid3=False, padding=None): self._save(filething, self.metadata_blocks, deleteid3, padding)
Save metadata blocks to a file. Args: filething (filething) deleteid3 (bool): delete id3 tags while at it padding (:obj:`mutagen.PaddingFunction`) If no filename is given, the one most recently loaded is used.
codesearchnet
def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', pagesize='2MB', mount=True, set_shmmax=False): group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) if (max_map_count < (2 * nr_hugepages)): max_map_count = (2 * nr_hugepages) sysctl_settings = {'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid} if set_shmmax: shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) shmmax_minsize = (bytes_from_string(pagesize) * nr_hugepages) if (shmmax_minsize > shmmax_current): sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=493, force=False) lfstab = fstab.Fstab() fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) if fstab_entry: lfstab.remove_entry(fstab_entry) entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) lfstab.add_entry(entry) if mount: fstab_mount(mnt_point)
Enable hugepages on system. Args: user (str) -- Username to allow access to hugepages to group (str) -- Group name to own hugepages nr_hugepages (int) -- Number of pages to reserve max_map_count (int) -- Number of Virtual Memory Areas a process can own mnt_point (str) -- Directory to mount hugepages on pagesize (str) -- Size of hugepages mount (bool) -- Whether to Mount hugepages
codesearchnet
def from_scf_input(cls, workdir, scf_input, manager=None, allocate=True): flow = cls(workdir, manager=manager) flow.register_scf_task(scf_input) scf_task = flow[0][0] nl_work = DteWork.from_scf_task(scf_task) flow.register_work(nl_work) if allocate: flow.allocate() return flow
Create a `NonlinearFlow` for second order susceptibility calculations from an `AbinitInput` defining a ground-state run. Args: workdir: Working directory of the flow. scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run. manager: :class:`TaskManager` object. Read from `manager.yml` if None. allocate: True if the flow should be allocated before returning. Return: :class:`NonlinearFlow` object.
codesearchnet
def soft_shrink(x, threshold=0.5): return ops.soft_shrink(x, threshold=threshold)
Soft Shrink activation function. It is defined as: `soft_shrink(x) = x - threshold` if `x > threshold`, `soft_shrink(x) = x + threshold` if `x < -threshold`, `soft_shrink(x) = 0` otherwise. Args: x: Input tensor. threshold: Threshold value. Defaults to 0.5.
github-repos
def image_needs_pushing(image): d = docker_client() try: d.images.get_registry_data(image) except docker.errors.APIError: return True else: return False
Return whether an image needs pushing Args: image (str): the `repository:tag` image to be build. Returns: True: if image needs to be pushed (not on registry) False: if not (already present on registry)
juraj-google-style
def is_scalar_event(self, name='is_scalar_event'): with self._name_scope(name): return ops.convert_to_tensor(self._is_scalar_helper(self.event_shape, self.event_shape_tensor), name='is_scalar_event')
Indicates that `event_shape == []`. Args: name: Python `str` prepended to names of ops created by this function. Returns: is_scalar_event: `bool` scalar `Tensor`.
github-repos
class _PruneReindexingLMHead(nn.Module): def __init__(self, original_lm_head, assistant_overlap_token_ids): super().__init__() self.pruned_lm_head = prune_linear_layer(original_lm_head, assistant_overlap_token_ids).to(original_lm_head.weight.dtype) def forward(self, hidden_states): pruned_logits = self.pruned_lm_head(hidden_states) return pruned_logits
A class to prune and reindex the language model head. This class prunes the language model head to only include the specified token IDs and reindexes the logits to map back to the original vocabulary. Args: original_lm_head (nn.Module): The original language model head. token_ids (list[int]): The list of token IDs to keep.
github-repos
def QA_fetch_risk(message={}, params={"_id": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE): collection = DATABASE.risk return [res for res in collection.find(message, params)]
get the risk message Arguments: query_mes {[type]} -- [description] Keyword Arguments: collection {[type]} -- [description] (default: {DATABASE}) Returns: [type] -- [description]
juraj-google-style
def flatten_dict(x): out = {} for k, v in x.items(): out = _recur_flatten(k, v, out) return out
Flatten a dict Flatten an arbitrarily nested dict as output by to_dict .. note:: Keys in the flattened dict may get very long. Args: x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values Returns: dict: flattened 1D dict
juraj-google-style
def ncx2cdf_and_gradient(x, k, l, truncation=10): g = 0.0 dg = 0.0 factorial = 1.0 for j in range(truncation + 1): factorial *= j if j > 0 else 1 h = (1 - tf.math.igammac((k + 2 * j) / 2.0, x / 2.0)) / factorial g += h * (l * 0.5) ** j dg += h * 0.5 * j * (l * 0.5) ** (j - 1) f = tf.math.exp(-0.5 * l) df = -0.5 * f p = f * g dp = df * g + f * dg return (p, dp)
Returns the CDF of noncentral X2 distribution and its gradient over l. Args: x: Values of the random variable following a noncentral X2 distribution. A real `Tensor`. k: Degrees of freedom. A positive real `Tensor` of same shape as `x`. l: Non-centrality parameter. A positive real `Tensor` of same shape as `x`. truncation: A positive integer. When computing the CDF of a noncentral X2 distribution, it needs to calculate the sum of an expression from 0 to infinity. In practice, it needs to be truncated to compute an approximate value. This argument is the index of the last term that will be included in the sum. Default value: 10. Returns: A tuple of two `Tensor`s. The first `Tensor` is the CDF. The second `Tensor` is the gradient of the CDF over l. Both of the `Tensors` are of same shape as `x`.
github-repos
def defer(target, args=None, kwargs=None, callback=None): obj = _defer(target, args, kwargs, callback) obj.finished.connect((lambda : _defer_cleanup(obj))) obj.start() _defer_threads.append(obj) return obj
Perform operation in thread with callback Instances are cached until finished, at which point they are garbage collected. If we didn't do this, Python would step in and garbage collect the thread before having had time to finish, resulting in an exception. Arguments: target (callable): Method or function to call callback (callable, optional): Method or function to call once `target` has finished. Returns: None
codesearchnet
def on_persist_completed(self, block): if len(self._events_to_write): addr_db = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR) block_db = self.db.prefixed_db(NotificationPrefix.PREFIX_BLOCK) contract_db = self.db.prefixed_db(NotificationPrefix.PREFIX_CONTRACT) block_write_batch = block_db.write_batch() contract_write_batch = contract_db.write_batch() block_count = 0 block_bytes = self._events_to_write[0].block_number.to_bytes(4, 'little') for evt in self._events_to_write: write_both = True hash_data = evt.ToByteArray() bytes_to = bytes(evt.addr_to.Data) bytes_from = bytes(evt.addr_from.Data) if bytes_to == bytes_from: write_both = False total_bytes_to = addr_db.get(bytes_to + NotificationPrefix.PREFIX_COUNT) total_bytes_from = addr_db.get(bytes_from + NotificationPrefix.PREFIX_COUNT) if not total_bytes_to: total_bytes_to = b'\x00' if not total_bytes_from: total_bytes_from = b'x\00' addr_to_key = bytes_to + total_bytes_to addr_from_key = bytes_from + total_bytes_from with addr_db.write_batch() as b: b.put(addr_to_key, hash_data) if write_both: b.put(addr_from_key, hash_data) total_bytes_to = int.from_bytes(total_bytes_to, 'little') + 1 total_bytes_from = int.from_bytes(total_bytes_from, 'little') + 1 new_bytes_to = total_bytes_to.to_bytes(4, 'little') new_bytes_from = total_bytes_from.to_bytes(4, 'little') b.put(bytes_to + NotificationPrefix.PREFIX_COUNT, new_bytes_to) if write_both: b.put(bytes_from + NotificationPrefix.PREFIX_COUNT, new_bytes_from) per_block_key = block_bytes + block_count.to_bytes(4, 'little') block_write_batch.put(per_block_key, hash_data) block_count += 1 contract_bytes = bytes(evt.contract_hash.Data) count_for_contract = contract_db.get(contract_bytes + NotificationPrefix.PREFIX_COUNT) if not count_for_contract: count_for_contract = b'\x00' contract_event_key = contract_bytes + count_for_contract contract_count_int = int.from_bytes(count_for_contract, 'little') + 1 new_contract_count = contract_count_int.to_bytes(4, 'little') contract_write_batch.put(contract_bytes + NotificationPrefix.PREFIX_COUNT, new_contract_count) contract_write_batch.put(contract_event_key, hash_data) block_write_batch.write() contract_write_batch.write() self._events_to_write = [] if len(self._new_contracts_to_write): token_db = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN) token_write_batch = token_db.write_batch() for token_event in self._new_contracts_to_write: try: hash_data = token_event.ToByteArray() hash_key = token_event.contract.Code.ScriptHash().ToBytes() token_write_batch.put(hash_key, hash_data) except Exception as e: logger.debug(f"Failed to write new contract, reason: {e}") token_write_batch.write() self._new_contracts_to_write = []
Called when a block has been persisted to disk. Used as a hook to persist notification data. Args: block (neo.Core.Block): the currently persisting block
juraj-google-style
def get_variant(self, index=None): for variant in self.iter_variants(): if (variant.index == index): return variant
Get the variant with the associated index. Returns: `Variant` object, or None if no variant with the given index exists.
codesearchnet
def _ParseRecordExtraField(self, byte_stream, file_offset): extra_field_map = self._GetDataTypeMap('asl_record_extra_field') try: record_extra_field = self._ReadStructureFromByteStream( byte_stream, file_offset, extra_field_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse record extra field at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) return record_extra_field
Parses a record extra field. Args: byte_stream (bytes): byte stream. file_offset (int): offset of the record extra field relative to the start of the file. Returns: asl_record_extra_field: record extra field. Raises: ParseError: if the record extra field cannot be parsed.
juraj-google-style
def assert_iter(**kw): for (name, value) in kw.items(): if (not isiter(value)): raise TypeError('paco: {} must be an iterable object'.format(name))
Asserts if a given values implements a valid iterable interface. Arguments: **kw (mixed): value to check if it is an iterable. Raises: TypeError: if assertion fails.
codesearchnet
def nne(dim_red, true_labels): bt = BallTree(dim_red.T) correct = 0 for i, l in enumerate(true_labels): dist, ind = bt.query([dim_red[:,i]], k=2) closest_cell = ind[0, 1] if true_labels[closest_cell] == l: correct += 1 return float(correct)/len(true_labels)
Calculates the nearest neighbor accuracy (basically leave-one-out cross validation with a 1NN classifier). Args: dim_red (array): dimensions (k, cells) true_labels (array): 1d array of integers Returns: Nearest neighbor accuracy - fraction of points for which the 1NN 1NN classifier returns the correct value.
juraj-google-style
def export_json(data, status, headers): dumped = json.dumps(data, ensure_ascii=False) resp = current_app.response_class( dumped, status=status, headers=headers, content_type='application/json; charset=utf-8') return resp
Creates a JSON response JSON content is encoded by utf-8, not unicode escape. Args: data: any type object that can dump to json status (int): http status code headers (dict): http headers
juraj-google-style
def walk_dependencies(root, visitor): def visit(parent, visitor): for d in get_dependencies(parent): visitor(d, parent) visit(d, visitor) visitor(root, None) visit(root, visitor)
Call visitor on root and all dependencies reachable from it in breadth first order. Args: root (component): component function or class visitor (function): signature is `func(component, parent)`. The call on root is `visitor(root, None)`.
codesearchnet
def get(url, max_backoff=32, verbose=False, **kwargs): sleep_seconds = 1 while sleep_seconds <= max_backoff: try: response = requests.get(url, **{**{'timeout': 30}, **kwargs}) if 400 <= response.status_code < 500: return None if 200 <= response.status_code < 400: return response except RequestException as e: if verbose: print(str(e)) time.sleep(sleep_seconds) sleep_seconds *= 2 return None
Adding retries to requests.get with exponential backoff. Args: url (str): The URL to fetch max_backoff (int): The number of seconds to sleep at maximums verbose (bool): Whether to print exceptions. Returns: Response: For successful requests return requests' response. `None` otherwise.
juraj-google-style
def from_rfc3339(cls, rfc3339: str) -> 'Timestamp': try: dt = dateutil.parser.isoparse(rfc3339).astimezone(pytz.UTC) except ValueError as e: raise ValueError("Could not parse RFC 3339 string '{}' due to error: '{}'.".format(rfc3339, e)) return cls.from_utc_datetime(dt)
Create a ``Timestamp`` instance from an RFC 3339 compliant string. .. note:: All timezones are implicitly converted to UTC. Args: rfc3339: String in RFC 3339 form.
github-repos
def set_cn_energies(self, cn_energies): for site in self.sites: site.set_cn_occupation_energies(cn_energies[site.label]) self.cn_energies = cn_energies
Set the coordination number dependent energies for this lattice. Args: cn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.:: { 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } } Returns: None
codesearchnet
def _actor_method_call(self, method_name, args=None, kwargs=None, num_return_vals=None): worker = ray.worker.get_global_worker() worker.check_connected() function_signature = self._ray_method_signatures[method_name] if (args is None): args = [] if (kwargs is None): kwargs = {} args = signature.extend_args(function_signature, args, kwargs) if (worker.mode == ray.LOCAL_MODE): return getattr(worker.actors[self._ray_actor_id], method_name)(*copy.deepcopy(args)) function_descriptor = FunctionDescriptor(self._ray_module_name, method_name, self._ray_class_name) with self._ray_actor_lock: object_ids = worker.submit_task(function_descriptor, args, actor_id=self._ray_actor_id, actor_handle_id=self._ray_actor_handle_id, actor_counter=self._ray_actor_counter, actor_creation_dummy_object_id=self._ray_actor_creation_dummy_object_id, execution_dependencies=[self._ray_actor_cursor], new_actor_handles=self._ray_new_actor_handles, num_return_vals=(num_return_vals + 1), resources={'CPU': self._ray_actor_method_cpus}, placement_resources={}, driver_id=self._ray_actor_driver_id) self._ray_actor_counter += 1 self._ray_actor_cursor = object_ids.pop() self._ray_new_actor_handles = [] if (len(object_ids) == 1): object_ids = object_ids[0] elif (len(object_ids) == 0): object_ids = None return object_ids
Method execution stub for an actor handle. This is the function that executes when `actor.method_name.remote(*args, **kwargs)` is called. Instead of executing locally, the method is packaged as a task and scheduled to the remote actor instance. Args: method_name: The name of the actor method to execute. args: A list of arguments for the actor method. kwargs: A dictionary of keyword arguments for the actor method. num_return_vals (int): The number of return values for the method. Returns: object_ids: A list of object IDs returned by the remote actor method.
codesearchnet
def __parse_entry(entry_line): if entry_line.startswith("!"): entry_line = sub(r"!\w*?_", '', entry_line) else: entry_line = entry_line.strip()[1:] try: entry_type, entry_name = [i.strip() for i in entry_line.split("=", 1)] except ValueError: entry_type = [i.strip() for i in entry_line.split("=", 1)][0] entry_name = '' return entry_type, entry_name
Parse the SOFT file entry name line that starts with '^', '!' or '#'. Args: entry_line (:obj:`str`): Line from SOFT to be parsed. Returns: :obj:`2-tuple`: Type of entry, value of entry.
juraj-google-style
def build(self, input_shape): if self._is_graph_network: super(Model, self).build(input_shape) return if input_shape is None: raise ValueError('Input shape must be defined when calling build on a model subclass network.') valid_types = (tuple, list, tensor_shape.TensorShape, dict) if not isinstance(input_shape, valid_types): raise ValueError('Specified input shape is not one of the valid types. Please specify a batch input shape of type tuple or list of input shapes. User provided input type: {}'.format(type(input_shape))) if input_shape and (not self.inputs): if context.executing_eagerly(): graph = func_graph.FuncGraph('build_graph') else: graph = backend.get_graph() with graph.as_default(): if isinstance(input_shape, list) and all((d is None or isinstance(d, int) for d in input_shape)): input_shape = tuple(input_shape) if isinstance(input_shape, list): x = [base_layer_utils.generate_placeholders_from_shape(shape) for shape in input_shape] elif isinstance(input_shape, dict): x = {k: base_layer_utils.generate_placeholders_from_shape(shape) for k, shape in input_shape.items()} else: x = base_layer_utils.generate_placeholders_from_shape(input_shape) kwargs = {} call_signature = self._call_full_argspec call_args = call_signature.args if len(call_args) > 2: if call_signature.defaults: call_args = call_args[2:-len(call_signature.defaults)] else: call_args = call_args[2:] for arg in call_args: if arg == 'training': kwargs['training'] = False else: raise ValueError('Currently, you cannot build your model if it has positional or keyword arguments that are not inputs to the model, but are required for its `call` method. Instead, in order to instantiate and build your model, `call` your model on real tensor data with all expected call arguments.') elif len(call_args) < 2: raise ValueError('You can only call `build` on a model if its `call` method accepts an `inputs` argument.') try: self.call(x, **kwargs) except (errors.InvalidArgumentError, TypeError): raise ValueError('You cannot build your model by calling `build` if your layers do not support float type inputs. Instead, in order to instantiate and build your model, `call` your model on real tensor data (of the correct dtype).') super(Model, self).build(input_shape)
Builds the model based on input shapes received. This is to be used for subclassed models, which do not know at instantiation time what their inputs look like. This method only exists for users who want to call `model.build()` in a standalone way (as a substitute for calling the model on real data to build it). It will never be called by the framework (and thus it will never throw unexpected errors in an unrelated workflow). Args: input_shape: Single tuple, TensorShape, or list/dict of shapes, where shapes are tuples, integers, or TensorShapes. Raises: ValueError: 1. In case of invalid user-provided data (not of type tuple, list, TensorShape, or dict). 2. If the model requires call arguments that are agnostic to the input shapes (positional or kwarg in call signature). 3. If not all layers were properly built. 4. If float type inputs are not supported within the layers. In each of these cases, the user should build their model by calling it on real tensor data.
github-repos
def handle_closed_task(self, task_name, record): if task_name not in self.tasks: return if self.main_failed: self.mark_parent_tasks_as_failed(self.cur_task) if self.tasks[task_name].failed: record.msg = ColorFormatter.colored('red', END_TASK_ON_ERROR_MSG) else: record.msg = ColorFormatter.colored('green', END_TASK_MSG) record.msg += ' (in %s)' % self.tasks[task_name].elapsed_time() if self.should_show_by_depth() or self.tasks[task_name].force_show: if self.tasks[task_name].force_show: self.handle_error() self.pretty_emit(record, is_header=True) self.close_children_tasks(task_name) self.tasks.pop(task_name)
Do everything needed when a task is closed Params: task_name (str): name of the task that is finishing record (logging.LogRecord): log record with all the info Returns: None
juraj-google-style
def add_vtep(self, name, vtep, vlan=None): if (not vlan): cmd = 'vxlan flood vtep add {}'.format(vtep) else: cmd = 'vxlan vlan {} flood vtep add {}'.format(vlan, vtep) return self.configure_interface(name, cmd)
Adds a new VTEP endpoint to the global or local flood list EosVersion: 4.13.7M Args: name (str): The name of the interface to configure vtep (str): The IP address of the remote VTEP endpoint to add vlan (str): The VLAN ID associated with this VTEP. If the VLAN keyword is used, then the VTEP is configured as a local flood endpoing Returns: True if the command completes successfully
codesearchnet
def hrs_84_and_db12_8_or_20_6(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `hrs_84_and_db12_8_or_20_6`'.format(value)) self._hrs_84_and_db12_8_or_20_6 = value
Corresponds to IDD Field `hrs_84_and_db12_8_or_20_6` Number of hours between 8 AM and 4 PM (inclusive) with dry-bulb temperature between 12.8 and 20.6 C Args: value (float): value for IDD Field `hrs_84_and_db12_8_or_20_6` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def trade_day(dt, cal='US'): from xone import calendar dt = pd.Timestamp(dt).date() return calendar.trading_dates(start=(dt - pd.Timedelta('10D')), end=dt, calendar=cal)[(- 1)]
Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25').strftime('%Y-%m-%d') '2018-12-24'
codesearchnet
def plot(self, freq=None, figsize=(15, 5), title=None, logy=False, **kwargs): if title is None: title = self._get_default_plot_title( freq, 'Equity Progression') ser = self._get_series(freq).rebase() return ser.plot(figsize=figsize, logy=logy, title=title, **kwargs)
Helper function for plotting the series. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * logy (bool): log-scale for y axis * kwargs: passed to pandas' plot method
juraj-google-style
def add_variants(self, variants): operations = [] nr_inserted = 0 for i,variant in enumerate(variants, 1): if not variant: continue nr_inserted += 1 update = self._get_update(variant) operations.append( UpdateOne( {'_id': variant['_id']}, update, upsert=True ) ) if i % 10000 == 0: self.db.variant.bulk_write(operations, ordered=False) operations = [] if len(operations) > 0: self.db.variant.bulk_write(operations, ordered=False) return nr_inserted
Add a bulk of variants This could be used for faster inserts Args: variants(iterable(dict))
juraj-google-style
def from_response(self, response_data): return HSAccessTokenAuth(response_data['access_token'], response_data['token_type'], response_data['refresh_token'], response_data['expires_in'], response_data.get('state'))
Builds a new HSAccessTokenAuth straight from response data Args: response_data (dict): Response data to use Returns: A HSAccessTokenAuth objet
codesearchnet
def escalation_date(self, escalation_date): if not self.can_update(): self._tcex.handle_error(910, [self.type]) escalation_date = self._utils.format_datetime( escalation_date, date_format='%Y-%m-%dT%H:%M:%SZ' ) self._data['escalationDate'] = escalation_date request = {'escalationDate': escalation_date} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
Sets the task escalation_date Args: escalation_date: Converted to %Y-%m-%dT%H:%M:%SZ date format
juraj-google-style