code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def ReleaseObject(self, identifier): if identifier not in self._values: raise KeyError('Missing cached object for identifier: {0:s}'.format( identifier)) cache_value = self._values[identifier] if not cache_value: raise RuntimeError('Missing cache value for identifier: {0:s}'.format( identifier)) cache_value.DecrementReferenceCount()
Releases a cached object based on the identifier. This method decrements the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache. RuntimeError: if the cache value is missing.
juraj-google-style
def __init__(self, application, a_service): if not isinstance(a_service, sm_messages.Service): raise ValueError(u"service is None or not an instance of Service") self._application = application self._service = a_service method_registry, reporting_rules = self._configure() self._method_registry = method_registry self._reporting_rules = reporting_rules
Initializes a new Middleware instance. Args: application: the wrapped wsgi application a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a service instance
juraj-google-style
def follow(id, edges, directed=False, _visited=None): if _visited is None: _visited = set() _visited.add(id) for row in edges[edges.ix[:, 0] == id].values: if(row[1] not in _visited): follow(row[1], edges, directed, _visited) if not directed: for row in edges[edges.ix[:, 1] == id].values: if(row[0] not in _visited): follow(row[0], edges, directed, _visited) return _visited
Follow the a graph to find the nodes connected to a given node. Args: id: the id of the starting node edges: a pandas DataFrame of edges. Each row is an edge with two columns containing the ids of the vertices. directed: If True, edges are directed from first column to second column. Otherwise edges are undirected. _visited: used internally for recursion Returns: the set of all nodes connected to the starting node.
juraj-google-style
def apply_masks(tensor: torch.Tensor, masks: List[torch.Tensor]) -> torch.Tensor: all_masked_tensors = [] for mask in masks: mask = mask.to(tensor.device) mask_keep = mask.unsqueeze(-1).repeat(1, 1, tensor.size(-1)) all_masked_tensors += [torch.gather(tensor, dim=1, index=mask_keep)] return torch.cat(all_masked_tensors, dim=0)
Args: tensor (`torch.Tensor`): Tensor of shape [batch_size, num_patches, feature_dim] masks (`List[torch.Tensor]`): List of tensors of shape [batch_size, num_patches] containing indices of patches to keep
github-repos
def retrieve_review(self, reviewer, product): if (not isinstance(reviewer, self._reviewer_cls)): raise TypeError("Type of given reviewer isn't acceptable:", reviewer, ', expected:', self._reviewer_cls) elif (not isinstance(product, self._product_cls)): raise TypeError("Type of given product isn't acceptable:", product, ', expected:', self._product_cls) try: return self.graph[reviewer][product]['review'] except TypeError: raise KeyError('{0} does not review {1}.'.format(reviewer, product))
Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product.
codesearchnet
def in_to_out(self, in_path, out_path=None): is_in_place_edit = False if is_same_file(in_path, out_path): logger.debug('in path and out path are the same file. writing to temp file and then replacing in path with the temp file.') out_path = None is_in_place_edit = True logger.debug(f'opening source file: {in_path}') with open(in_path) as infile: if out_path: logger.debug(f'opening destination file for writing: {out_path}') ensure_dir(out_path) with open(out_path, 'w') as outfile: outfile.writelines(self.formatter(infile)) return else: logger.debug('opening temp file for writing...') with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile: outfile.writelines(self.formatter(infile)) is_in_place_edit = True if is_in_place_edit: logger.debug(f'moving temp file to: {in_path}') move_temp_file(outfile.name, infile.name)
Write a single file in to out, running self.formatter on each line. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None.
codesearchnet
def __init__(self, out_stream=None, hide_cursor=True): BaseWindow.__init__(self, out_stream=out_stream, hide_cursor=hide_cursor) self.fullscreen_ctx = self.t.fullscreen()
Constructs a FullscreenWindow Args: out_stream (file): Defaults to sys.__stdout__ hide_cursor (bool): Hides cursor while in context
juraj-google-style
def _build_graph(self, tags): graph = SimpleGraph() for tag_index in xrange(len(tags)): for entity_index in xrange(len(tags[tag_index].get('entities'))): a_entity_name = graph_key_from_tag(tags[tag_index], entity_index) tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match')) for tag in tags[(tag_index + 1):]: start_token = tag.get('start_token') if (start_token >= (tags[tag_index].get('start_token') + len(tokens))): for b_entity_index in xrange(len(tag.get('entities'))): b_entity_name = graph_key_from_tag(tag, b_entity_index) graph.add_edge(a_entity_name, b_entity_name) return graph
Builds a graph from the entities included in the tags. Note this is used internally. Args: tags (list): A list of the tags to include in graph Returns: graph : this is the resulting graph of the tagged entities.
codesearchnet
def add(self, *dic): dicList = list(flatten(dic)) for d in dicList: di = [] for k in d: di.append(Pair(k, IntegerSingle(d[k]))) dictSingle = DictSingle(di) self._add([dictSingle], self.l)
add a config to StartCalendarInterval. Args: *dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
juraj-google-style
def _StructPackEncoder(wire_type, format): value_size = struct.calcsize(format) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, (len(value) * value_size)) for element in value: write(local_struct_pack(format, element)) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) write(local_struct_pack(format, element)) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return write(local_struct_pack(format, value)) return EncodeField return SpecificEncoder
Return a constructor for an encoder for a fixed-width field. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack().
codesearchnet
def ask_for_approval(full_changeset=None, params_diff=None, include_verbose=False): approval_options = ['y', 'n'] if include_verbose: approval_options.append('v') approve = ui.ask('Execute the above changes? [{}] '.format('/'.join(approval_options))).lower() if (include_verbose and (approve == 'v')): if params_diff: logger.info('Full changeset:\n\n%s\n%s', format_params_diff(params_diff), yaml.safe_dump(full_changeset)) else: logger.info('Full changeset:\n%s', yaml.safe_dump(full_changeset)) return ask_for_approval() elif (approve != 'y'): raise exceptions.CancelExecution
Prompt the user for approval to execute a change set. Args: full_changeset (list, optional): A list of the full changeset that will be output if the user specifies verbose. params_diff (list, optional): A list of DictValue detailing the differences between two parameters returned by :func:`stacker.actions.diff.diff_dictionaries` include_verbose (bool, optional): Boolean for whether or not to include the verbose option
codesearchnet
def arange(start, stop=None, step=1, dtype=None): if not step: raise ValueError('step must be non-zero.') if dtype: dtype = np_utils.result_type(dtype) elif stop is None: dtype = np_utils.result_type(start, step) else: dtype = np_utils.result_type(start, step, stop) if step > 0 and (stop is not None and start > stop or (stop is None and start < 0)): return array([], dtype=dtype) if step < 0 and (stop is not None and start < stop or (stop is None and start > 0)): return array([], dtype=dtype) return math_ops.cast(math_ops.range(start, limit=stop, delta=step), dtype=dtype)
Returns `step`-separated values in the range [start, stop). Args: start: Start of the interval. Included in the range. stop: End of the interval. If not specified, `start` is treated as 0 and `start` value is used as `stop`. If specified, it is not included in the range if `step` is integer. When `step` is floating point, it may or may not be included. step: The difference between 2 consecutive values in the output range. It is recommended to use `linspace` instead of using non-integer values for `step`. dtype: Optional. Type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. If not provided, the largest type of `start`, `stop`, `step` is used. Raises: ValueError: If step is zero.
github-repos
def common_vector_root(vec1, vec2): root = [] for (v1, v2) in zip(vec1, vec2): if (v1 == v2): root.append(v1) else: return root return root
Return common root of the two vectors. Args: vec1 (list/tuple): First vector. vec2 (list/tuple): Second vector. Usage example:: >>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0]) [1, 2] Returns: list: Common part of two vectors or blank list.
codesearchnet
def to_add_skip_model(self, start_id, end_id): self.operation_history.append(("to_add_skip_model", start_id, end_id)) filters_end = self.layer_list[end_id].output.shape[-1] filters_start = self.layer_list[start_id].output.shape[-1] start_node_id = self.layer_id_to_output_node_ids[start_id][0] pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] end_node_id = self.layer_id_to_output_node_ids[end_id][0] skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id) new_conv_layer = get_conv_class(self.n_dim)(filters_start, filters_end, 1) skip_output_id = self.add_layer(new_conv_layer, skip_output_id) add_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) add_layer = StubAdd() self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id) self._add_edge(add_layer, add_input_node_id, end_node_id) self._add_edge(add_layer, skip_output_id, end_node_id) add_layer.input = [ self.node_list[add_input_node_id], self.node_list[skip_output_id], ] add_layer.output = self.node_list[end_node_id] self.node_list[end_node_id].shape = add_layer.output_shape if self.weighted: filter_shape = (1,) * self.n_dim weights = np.zeros((filters_end, filters_start) + filter_shape) bias = np.zeros(filters_end) new_conv_layer.set_weights( (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) )
Add a weighted add skip-connection from after start node to end node. Args: start_id: The convolutional layer ID, after which to start the skip-connection. end_id: The convolutional layer ID, after which to end the skip-connection.
juraj-google-style
def calculate_sleep_time(attempt, delay_factor=5.0, randomization_factor=0.5, max_delay=120): if (attempt <= 0): return 0 delay = (float((2 ** (attempt - 1))) * float(delay_factor)) delay = (delay * ((randomization_factor * random.random()) + 1)) return min(delay, max_delay)
Calculate the sleep time between retries, in seconds. Based off of `taskcluster.utils.calculateSleepTime`, but with kwargs instead of constant `delay_factor`/`randomization_factor`/`max_delay`. The taskcluster function generally slept for less than a second, which didn't always get past server issues. Args: attempt (int): the retry attempt number delay_factor (float, optional): a multiplier for the delay time. Defaults to 5. randomization_factor (float, optional): a randomization multiplier for the delay time. Defaults to .5. max_delay (float, optional): the max delay to sleep. Defaults to 120 (seconds). Returns: float: the time to sleep, in seconds.
codesearchnet
def get_data(self, file_path=sys.stdin, delimiter=',', categories_delimiter=None): if (file_path == sys.stdin): logger.info('Read data from standard input') lines = [line.replace('\n', '') for line in file_path] else: logger.info(('Read data from file ' + file_path)) with open(file_path) as file: lines = list(file) columns = lines[0].rstrip('\n').split(delimiter)[1:] categories = None if categories_delimiter: (columns, categories) = zip(*[c.split(categories_delimiter, 1) for c in columns]) size = len(columns) data = [list(map(int, l.split(delimiter)[1:])) for l in lines[1:(size + 1)]] return DesignStructureMatrix(data, columns, categories)
Implement get_dsm method from Provider class. Parse CSV to return an instance of DSM. Args: file_path (str/fd): path or file descriptor. delimiter (str): character(s) used as delimiter for columns. categories_delimiter (str): character(s) used as delimiter for categories and keys (first column). Returns: DSM: instance of DSM.
codesearchnet
def plot_feature_correlation_heatmap(df, features, font_size=9, figsize=(15, 15), save_filename=None): features = features[:] features += ['target'] mcorr = df[features].corr() mask = np.zeros_like(mcorr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True cmap = sns.diverging_palette(220, 10, as_cmap=True) fig = plt.figure(figsize=figsize) heatmap = sns.heatmap(mcorr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f', annot_kws={'size': font_size}) heatmap.tick_params(axis='both', which='major', labelsize=font_size) heatmap.tick_params(axis='both', which='minor', labelsize=font_size) heatmap.set_xticklabels(features, rotation=90) heatmap.set_yticklabels(reversed(features)) plt.show() if (save_filename is not None): fig.savefig(save_filename, dpi=300)
Plot a correlation heatmap between every feature pair. Args: df: Pandas dataframe containing the target column (named 'target'). features: The list of features to include in the correlation plot. font_size: Font size for heatmap cells and axis labels. figsize: The size of the plot. save_filename: (Optional) The path of the file to save a high-res version of the plot to.
codesearchnet
def _setup(self, delete=True): if delete: self.clear() with nn.context_scope(self.ctx): outputs = self.func(*(self.inputs_f + self.func_args), **self.func_kwargs) if (not hasattr(outputs, '__iter__')): self.outputs = [outputs] else: self.outputs = outputs self.func_ins = self.outputs[0].parent self.inputs = self.func_ins.inputs
Create a function instance and execute setup. Args: delete (bool): Delete buffered variables.
codesearchnet
def fit(self, x_tr, y_tr, epochs=50, batchsize=32, learning_rate=0.01, verbose=None, device=None): if batchsize > len(x_tr): batchsize = len(x_tr) verbose, device = SETTINGS.get_default(('verbose', verbose), ('device', device)) self.model = NCC_model() opt = th.optim.Adam(self.model.parameters(), lr=learning_rate) criterion = th.nn.BCEWithLogitsLoss() y = y_tr.values if isinstance(y_tr, pd.DataFrame) else y_tr y = th.Tensor(y)/2 + .5 self.model = self.model.to(device) y = y.to(device) dataset = [] for i, (idx, row) in enumerate(x_tr.iterrows()): a = row['A'].reshape((len(row['A']), 1)) b = row['B'].reshape((len(row['B']), 1)) m = np.hstack((a, b)) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) dataset.append(m) dataset = [m.to(device) for m in dataset] acc = [0] da = th.utils.data.DataLoader(Dataset(dataset, y), batch_size=batchsize, shuffle=True) data_per_epoch = (len(dataset) with trange(epochs, desc="Epochs", disable=not verbose) as te: for epoch in te: with trange(data_per_epoch, desc="Batches of {}".format(batchsize), disable=not (verbose and batchsize == len(dataset))) as t: output = [] labels = [] for (batch, label), i in zip(da, t): opt.zero_grad() out = th.stack([self.model(m) for m in batch], 0).squeeze(2) loss = criterion(out, label) loss.backward() t.set_postfix(loss=loss.item()) opt.step() output.append(out) labels.append(label) acc = th.where(th.cat(output, 0) > .5, th.ones(len(output)), th.zeros(len(output))) - th.cat(labels, 0) te.set_postfix(Acc=1-acc.abs().mean().item())
Fit the NCC model. Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs epochs (int): number of train epochs learning_rate (float): learning rate of Adam verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``)
juraj-google-style
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext=None) -> None: pass
Puts multiple objects of the same type into the data sink. Args: type: The type of the objects being inserted. items: The objects to be inserted. context: The context of the insertion (mutable).
codesearchnet
def get(self, key=None, **kwargs): clone = copy.deepcopy(self) if self._start: clone.adapter.set_params(start=self._start) if self._rows: clone.adapter.set_params(rows=self._rows) if key: (data, key) = clone.adapter.get(key) elif kwargs: (data, key) = clone.filter(**kwargs).adapter.get() else: (data, key) = clone.adapter.get() if (clone._cfg['rtype'] == ReturnType.Object): return (data, key) return self._make_model(data, key)
Ensures that only one result is returned from DB and raises an exception otherwise. Can work in 3 different way. - If no argument is given, only does "ensuring about one and only object" job. - If key given as only argument, retrieves the object from DB. - if query filters given, implicitly calls filter() method. Raises: MultipleObjectsReturned: If there is more than one (1) record is returned.
codesearchnet
def _compute_hparam_info_from_values(self, name, values): result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET) distinct_values = set( _protobuf_value_to_string(v) for v in values if _protobuf_value_type(v)) for v in values: v_type = _protobuf_value_type(v) if not v_type: continue if result.type == api_pb2.DATA_TYPE_UNSET: result.type = v_type elif result.type != v_type: result.type = api_pb2.DATA_TYPE_STRING if result.type == api_pb2.DATA_TYPE_STRING: break if result.type == api_pb2.DATA_TYPE_UNSET: return None if (result.type == api_pb2.DATA_TYPE_STRING and len(distinct_values) <= self._max_domain_discrete_len): result.domain_discrete.extend(distinct_values) return result
Builds an HParamInfo message from the hparam name and list of values. Args: name: string. The hparam name. values: list of google.protobuf.Value messages. The list of values for the hparam. Returns: An api_pb2.HParamInfo message.
juraj-google-style
def underlying_variable_ref(t): while t.op.type in ["Identity", "ReadVariableOp", "Enter"]: t = t.op.inputs[0] op_type = t.op.type if "Variable" in op_type or "VarHandle" in op_type: return t else: return None
Find the underlying variable ref. Traverses through Identity, ReadVariableOp, and Enter ops. Stops when op type has Variable or VarHandle in name. Args: t: a Tensor Returns: a Tensor that is a variable ref, or None on error.
juraj-google-style
def is_commutable(expr1, expr2, eps=0.00000001): return sum((x * x.conjugate()).real for x in commutator(expr1, expr2).coeffs()) < eps
Test whether expr1 and expr2 are commutable. Args: expr1 (Expr, Term or Pauli operator): Pauli's expression. expr2 (Expr, Term or Pauli operator): Pauli's expression. eps (float, optional): Machine epsilon. If |[expr1, expr2]| < eps, consider it is commutable. Returns: bool: if expr1 and expr2 are commutable, returns True, otherwise False.
juraj-google-style
def get_last_checkpoint(rundir='runinfo'): if (not os.path.isdir(rundir)): return [] dirs = sorted(os.listdir(rundir)) if (len(dirs) == 0): return [] last_runid = dirs[(- 1)] last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid)) if (not os.path.isdir(last_checkpoint)): return [] return [last_checkpoint]
Find the checkpoint from the last run, if one exists. Note that checkpoints are incremental, and this helper will not find previous checkpoints from earlier than the most recent run. It probably should be made to do so. Kwargs: - rundir(str) : Path to the runinfo directory Returns: - a list suitable for checkpointFiles parameter of DataFlowKernel constructor, with 0 or 1 elements
codesearchnet
def max(self, value): if value == self._defaults['max'] and 'max' in self._values: del self._values['max'] else: self._values['max'] = value
The max property. Args: value (float). the property value.
juraj-google-style
def DecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode): return layers.Serial( layers.Residual( layers.LayerNorm(), layers.Branch(), layers.Parallel(layers.Identity(), layers.CausalMask(axis=-2)), layers.MultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), layers.Dropout(rate=dropout, mode=mode) ), ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode) )
Transformer decoder layer. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) mode: str: 'train' or 'eval' Returns: the layer.
juraj-google-style
def is_noncopyable(class_, already_visited_cls_vars=None): logger = utils.loggers.cxx_parser class_decl = class_traits.get_declaration(class_) true_header = "is_noncopyable(TRUE) - %s - " % class_.decl_string if is_union(class_): return False if class_decl.is_abstract: logger.debug(true_header + "abstract client") return True copy_ = find_copy_constructor(class_decl) if copy_ and copy_.access_type == 'public' and not copy_.is_artificial: return False if already_visited_cls_vars is None: already_visited_cls_vars = [] for base_desc in class_decl.recursive_bases: assert isinstance(base_desc, class_declaration.hierarchy_info_t) if base_desc.related_class.decl_string in \ ('::boost::noncopyable', '::boost::noncopyable_::noncopyable'): logger.debug(true_header + "derives from boost::noncopyable") return True if not has_copy_constructor(base_desc.related_class): base_copy_ = find_copy_constructor(base_desc.related_class) if base_copy_ and base_copy_.access_type == 'private': logger.debug( true_header + "there is private copy constructor") return True elif __is_noncopyable_single( base_desc.related_class, already_visited_cls_vars): logger.debug( true_header + "__is_noncopyable_single returned True") return True if __is_noncopyable_single( base_desc.related_class, already_visited_cls_vars): logger.debug( true_header + "__is_noncopyable_single returned True") return True if not has_copy_constructor(class_decl): logger.debug(true_header + "does not have trivial copy constructor") return True elif not has_public_constructor(class_decl): logger.debug(true_header + "does not have a public constructor") return True elif has_destructor(class_decl) and not has_public_destructor(class_decl): logger.debug(true_header + "has private destructor") return True return __is_noncopyable_single(class_decl, already_visited_cls_vars)
Checks if class is non copyable Args: class_ (declarations.class_t): the class to be checked already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. In general you can ignore this argument, it is mainly used during recursive calls of is_noncopyable() done by pygccxml. Returns: bool: if the class is non copyable
juraj-google-style
def google_api_append(schema, values, rows): for row in rows: for s in schema: row[s['name']] = values[s['name']] yield row
Append columns to the rows containing the kwargs used to call the API. Args: schema (dict): name of the key to use for the api arguments values (dict): the kwargs used to call the API rows (list): a list of rows to add the prefix to each one Returns (list): A generator containing the rows
github-repos
def count_resource_variables(model): if not isinstance(model, schema_fb.ModelT): model = convert_bytearray_to_object(model) unique_shared_names = set() for subgraph in model.subgraphs: if subgraph.operators is None: continue for op in subgraph.operators: builtin_code = schema_util.get_builtin_code_from_operator_code(model.operatorCodes[op.opcodeIndex]) if builtin_code == schema_fb.BuiltinOperator.VAR_HANDLE: unique_shared_names.add(op.builtinOptions.sharedName) return len(unique_shared_names)
Calculates the number of unique resource variables in a model. Args: model: the input tflite model, either as bytearray or object. Returns: An integer number representing the number of unique resource variables.
github-repos
def bulkDetails(self, packageNames): params = {'au': '1'} req = googleplay_pb2.BulkDetailsRequest() req.docid.extend(packageNames) data = req.SerializeToString() message = self.executeRequestApi2(BULK_URL, post_data=data.decode('utf-8'), content_type=CONTENT_TYPE_PROTO, params=params) response = message.payload.bulkDetailsResponse return [(None if (not utils.hasDoc(entry)) else utils.parseProtobufObj(entry.doc)) for entry in response.entry]
Get several apps details from a list of package names. This is much more efficient than calling N times details() since it requires only one request. If an item is not found it returns an empty object instead of throwing a RequestError('Item not found') like the details() function Args: packageNames (list): a list of app IDs (usually starting with 'com.'). Returns: a list of dictionaries containing docv2 data, or None if the app doesn't exist
codesearchnet
def ParseFileEntry(self, parser_mediator, file_entry): filename = parser_mediator.GetFilename() database = SQLiteDatabase(filename, temporary_directory=parser_mediator.temporary_directory) file_object = file_entry.GetFileObject() try: database.Open(file_object) except (IOError, ValueError, sqlite3.DatabaseError) as exception: parser_mediator.ProduceExtractionWarning('unable to open SQLite database with error: {0!s}'.format(exception)) file_object.close() return (database_wal, wal_file_entry) = self._OpenDatabaseWithWAL(parser_mediator, file_entry, file_object, filename) file_object.close() cache = SQLiteCache() try: table_names = frozenset(database.tables) for plugin in self._plugins: if (not plugin.REQUIRED_TABLES.issubset(table_names)): continue schema_match = plugin.CheckSchema(database) if (plugin.REQUIRES_SCHEMA_MATCH and (not schema_match)): parser_mediator.ProduceExtractionWarning('plugin: {0:s} found required tables but not a matching schema'.format(plugin.NAME)) continue parser_mediator.SetFileEntry(file_entry) parser_mediator.AddEventAttribute('schema_match', schema_match) try: plugin.UpdateChainAndProcess(parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry) except Exception as exception: parser_mediator.ProduceExtractionWarning('plugin: {0:s} unable to parse SQLite database with error: {1!s}'.format(plugin.NAME, exception)) finally: parser_mediator.RemoveEventAttribute('schema_match') if (not database_wal): continue schema_match = plugin.CheckSchema(database) parser_mediator.SetFileEntry(wal_file_entry) parser_mediator.AddEventAttribute('schema_match', schema_match) try: plugin.UpdateChainAndProcess(parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry) except Exception as exception: parser_mediator.ProduceExtractionWarning('plugin: {0:s} unable to parse SQLite database and WAL with error: {1!s}'.format(plugin.NAME, exception)) finally: parser_mediator.RemoveEventAttribute('schema_match') finally: database.Close()
Parses a SQLite database file entry. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry to be parsed. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def ExtractCredentialsFromPathSpec(self, path_spec): credentials = manager.CredentialsManager.GetCredentials(path_spec) for identifier in credentials.CREDENTIALS: value = getattr(path_spec, identifier, None) if value is None: continue self.SetCredential(path_spec, identifier, value)
Extracts credentials from a path specification. Args: path_spec (PathSpec): path specification to extract credentials from.
juraj-google-style
def filter_line(self, line): filtered = [] warn_msg = [] splited = line.split('\n') if not line and len(splited) < 1: warn_msg = '[Warning] Empty line detected while filtering lines.' logging.warning(warn_msg) self.warning_msg.append(warn_msg) if splited[0] == '[': filtered = splited[1:] elif '[' in splited[0]: splited = splited[0].replace('[', '') filtered = splited else: warn_msg = '[Warning] Format error. `[` could be missing in ' warn_msg += 'the config (.ini) file. (line = %s)' % str(line) logging.warning(warn_msg) self.warning_msg.append(warn_msg) if filtered[-1] == ']': filtered = filtered[:-1] elif ']' in filtered[-1]: filtered[-1] = filtered[-1].replace(']', '') else: warn_msg = '[Warning] Format error. `]` could be missing in ' warn_msg += 'the config (.ini) file. (line = %s)' % str(line) logging.warning(warn_msg) self.warning_msg.append(warn_msg) return filtered
Removes `[` or `]` from the input line. Args: line: String that is a compatibility specification line from the `.ini` config file. Returns: String that is a compatibility specification line without `[` and `]`.
github-repos
def clean_pdb(pdb_file, out_suffix='_clean', outdir=None, force_rerun=False, remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True, remove_res_hetero=True, keep_chemicals=None, keep_res_only=None, add_chain_id_if_empty='X', keep_chains=None): outfile = ssbio.utils.outfile_maker(inname=pdb_file, append_to_name=out_suffix, outdir=outdir, outext='.pdb') if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): my_pdb = StructureIO(pdb_file) my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt, remove_atom_hydrogen=remove_atom_hydrogen, keep_atom_alt_id=keep_atom_alt_id, add_atom_occ=add_atom_occ, remove_res_hetero=remove_res_hetero, keep_res_only=keep_res_only, add_chain_id_if_empty=add_chain_id_if_empty, keep_chains=keep_chains, keep_chemicals=keep_chemicals) my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix, out_dir=outdir, custom_selection=my_cleaner, force_rerun=force_rerun) return my_clean_pdb else: return outfile
Clean a PDB file. Args: pdb_file (str): Path to input PDB file out_suffix (str): Suffix to append to original filename outdir (str): Path to output directory force_rerun (bool): If structure should be re-cleaned if a clean file exists already remove_atom_alt (bool): Remove alternate positions keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep remove_atom_hydrogen (bool): Remove hydrogen atoms add_atom_occ (bool): Add atom occupancy fields if not present remove_res_hetero (bool): Remove all HETATMs keep_chemicals (str, list): If removing HETATMs, keep specified chemical names keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else! add_chain_id_if_empty (str): Add a chain ID if not present keep_chains (str, list): Keep only these chains Returns: str: Path to cleaned PDB file
codesearchnet
def vec_to_surface(vec): miller = ([None] * 3) index = [] for (i, value) in enumerate(vec): if (abs(value) < 1e-08): miller[i] = 0 else: index.append(i) if (len(index) == 1): miller[index[0]] = 1 else: min_index = np.argmin([i for i in vec if (i != 0)]) true_index = index[min_index] index.pop(min_index) frac = [] for (i, value) in enumerate(index): frac.append(Fraction((vec[value] / vec[true_index])).limit_denominator(100)) if (len(index) == 1): miller[true_index] = frac[0].denominator miller[index[0]] = frac[0].numerator else: com_lcm = lcm(frac[0].denominator, frac[1].denominator) miller[true_index] = com_lcm miller[index[0]] = (frac[0].numerator * int(round((com_lcm / frac[0].denominator)))) miller[index[1]] = (frac[1].numerator * int(round((com_lcm / frac[1].denominator)))) return miller
Transform a float vector to a surface miller index with integers. Args: vec (1 by 3 array float vector): input float vector Return: the surface miller index of the input vector.
codesearchnet
def flatten(weights, start=0, stop=2): for key, val in weights.items(): new_shape = val.shape[0:start] + (-1, ) + val.shape[stop:] weights[key] = val.reshape(new_shape) return weights
This methods reshapes all values in a dictionary. The indices from start to stop will be flattened into a single index. Args: weights: A dictionary mapping keys to numpy arrays. start: The starting index. stop: The ending index.
juraj-google-style
def compile_sgf(in_path, optimize=True, model=None): if (model is None): model = DeviceModel() parser = SensorGraphFileParser() parser.parse_file(in_path) parser.compile(model) if optimize: opt = SensorGraphOptimizer() opt.optimize(parser.sensor_graph, model=model) return parser.sensor_graph
Compile and optionally optimize an SGF file. Args: in_path (str): The input path to the sgf file to compile. optimize (bool): Whether to optimize the compiled result, defaults to True if not passed. model (DeviceModel): Optional device model if we are compiling for a nonstandard device. Normally you should leave this blank. Returns: SensorGraph: The compiled sensorgraph object
codesearchnet
def validate_tag(self, key, value): if key == 'owner': return validate_email(value, self.partial_owner_match) elif key == self.gdpr_tag: return value in self.gdpr_tag_values else: return True
Check whether a tag value is valid Args: key: A tag key value: A tag value Returns: `(True or False)` A boolean indicating whether or not the value is valid
juraj-google-style
def handle_config_change(self, new_config): if self.user_handler: self.user_handler(self.current_config, new_config) self._call_spec_handlers(new_config) self.current_config = copy.deepcopy(new_config)
Handle the new configuration. Args: new_config (dict): The new configuration
juraj-google-style
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> List[str]: raise NotImplementedError
Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`. Args: text (`str`): The sequence to be encoded. pair (`str`, *optional*): A second sequence to be encoded with the first. add_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add the special tokens associated with the corresponding model. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific encode method. See details in [`~PreTrainedTokenizerBase.__call__`] Returns: `List[str]`: The list of tokens.
github-repos
def get_bond_length(sp1, sp2, bond_order=1): sp1 = (Element(sp1) if isinstance(sp1, str) else sp1) sp2 = (Element(sp2) if isinstance(sp2, str) else sp2) try: all_lengths = obtain_all_bond_lengths(sp1, sp2) return all_lengths[bond_order] except (ValueError, KeyError): warnings.warn(('No order %d bond lengths between %s and %s found in database. Returning sum of atomic radius.' % (bond_order, sp1, sp2))) return (sp1.atomic_radius + sp2.atomic_radius)
Get the bond length between two species. Args: sp1 (Specie): First specie. sp2 (Specie): Second specie. bond_order: For species with different possible bond orders, this allows one to obtain the bond length for a particular bond order. For example, to get the C=C bond length instead of the C-C bond length, this should be set to 2. Defaults to 1. Returns: Bond length in Angstrom. If no data is available, the sum of the atomic radius is used.
codesearchnet
def Serialize(self, writer: BinaryWriter): byt = None if self.Type == StateType.Account: byt = b'\x40' elif self.Type == StateType.Validator: byt = b'\x48' writer.WriteByte(byt) writer.WriteVarBytes(self.Key) writer.WriteVarString(self.Field) writer.WriteVarBytes(self.Value)
Serialize full object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes, check_all_flat=False): any_sample_weight = sample_weights is not None and any((w is not None for w in sample_weights)) partial_sample_weight = any_sample_weight and any((w is None for w in sample_weights)) if not any_sample_weight: return (None, any_sample_weight, partial_sample_weight) if not partial_sample_weight: return (sample_weights, any_sample_weight, partial_sample_weight) if check_all_flat: nest.assert_same_structure(list_to_tuple(sample_weights), list_to_tuple(nest.flatten(sample_weights))) nest.assert_same_structure(list_to_tuple(outputs), list_to_tuple(nest.flatten(outputs))) if sample_weight_modes is not None: nest.assert_same_structure(sample_weight_modes, nest.flatten(sample_weight_modes)) new_sample_weights = [] for i, sw in enumerate(sample_weights): if sw is None: as_numpy = isinstance(outputs[i], np.ndarray) output = outputs[i] output_shape = output.shape if as_numpy else array_ops.shape(output) is_temporal = sample_weight_modes is not None and sample_weight_modes[i] == 'temporal' sw_shape = (output_shape[0], output_shape[1]) if is_temporal else (output_shape[0],) new_sample_weights.append(np.ones(sw_shape) if as_numpy else array_ops.ones(sw_shape)) else: new_sample_weights.append(sw) return (list_to_tuple(new_sample_weights), any_sample_weight, partial_sample_weight)
Adds 1.0 as sample weights for the outputs for which there is no weight. Args: outputs: List of model outputs. sample_weights: List of sample weight inputs. sample_weight_modes: List of sample weight modes or None. check_all_flat: Ensure that inputs are not nested structures. This is not a free check, so we may not want to run it eagerly every iteration. Returns: Tuple of sample weights, one sample weight for every output, and booleans describing the raw sample weights.
github-repos
def __init__(self, path, delimiter=b','): self._path = path self._delimiter = delimiter
Initializes an instance of a Csv instance. Args: path: path of the Csv file. delimiter: the separator used to parse a Csv line.
juraj-google-style
def get_metrics_namespace(self) -> str: return 'BeamML_Sklearn'
Returns: A namespace for metrics collected by the RunInference transform.
github-repos
def __init__(self, size): super(CircularBuffer, self).__init__() self._index = 0 self._list = [] self._size = size
Initializes a circular buffer object. Args: size (int): number of elements in the buffer.
juraj-google-style
def CheckGradConfigsToTest(): def Config(input_size, filter_size, out_size, stride=1, padding='SAME', dilations=None): return (input_size, filter_size, out_size, stride, padding, dilations) return [Config([2, 5, 8, 1], [4, 4, 1, 2], [2, 5, 8, 2]), Config([4, 5, 5, 1], [2, 2, 1, 2], [4, 2, 2, 2], 2, padding='VALID'), Config([2, 4, 4, 2], [3, 1, 2, 2], [2, 4, 4, 4]), Config([1, 15, 15, 2], [1, 3, 2, 1], [1, 15, 15, 2]), Config([2, 15, 16, 1], [3, 3, 1, 2], [2, 5, 5, 2], 3, padding='VALID'), Config([2, 5, 8, 1], [4, 3, 1, 2], [2, 5, 8, 2], dilations=[1, 2]), Config([1, 3, 1, 2], [2, 1, 2, 1], [1, 3, 1, 2]), Config([2, 2, 3, 2], [2, 1, 2, 1], [2, 2, 3, 2]), Config([2, 2, 3, 1], [2, 2, 1, 1], [2, 2, 3, 1])]
Iterator for different convolution shapes, strides and paddings. compute_gradient_error() is very expensive. So the configs should be relatively small. Returns: List of tuples (input_size, filter_size, out_size, stride, padding, dilations), the depthwise convolution parameters.
github-repos
async def getPropNorm(self, prop, valu): pobj = self.model.prop(prop) if (pobj is None): raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.', prop=prop) (norm, info) = pobj.type.norm(valu) return (norm, info)
Get the normalized property value based on the Cortex data model. Args: prop (str): The property to normalize. valu: The value to normalize. Returns: (tuple): A two item tuple, containing the normed value and the info dictionary. Raises: s_exc.NoSuchProp: If the prop does not exist. s_exc.BadTypeValu: If the value fails to normalize.
codesearchnet
def extract_jtl_string_pairs_from_text_file(results_dict, file_path): result_pairs = re.findall(JTL_REGEX, open(file_path).read()) for (result_key, result_comment) in result_pairs: results_dict[result_key] = result_comment return results_dict
Extracts all string pairs matching the JTL pattern from given text file. This can be used as an "extract_func" argument in the extract_string_pairs_in_directory method. Args: results_dict (dict): The dict to add the the string pairs to. file_path (str): The path of the file from which to extract the string pairs.
codesearchnet
def stations_listeners(stations): stations = stations if isinstance(stations, (list, tuple)) else [stations] listeners = [] for sta in stations: listeners.append(StationSignalListener(sta)) listeners.append(StationMaxListener(sta)) if sta.mask is not None: listeners.append(StationMaskListener(sta)) return listeners
Function for creating listeners for a a list of station Args: stations (iterable): List of TopocentricFrame Return: list of Listener
juraj-google-style
def __init__(self, score, related_data=None): self.check_score(score) if related_data is None: related_data = {} self.score, self.related_data = score, related_data if isinstance(score, Exception): self.__class__ = ErrorScore super(Score, self).__init__()
Abstract base class for scores. Args: score (int, float, bool): A raw value to wrap in a Score class. related_data (dict, optional): Artifacts to store with the score.
juraj-google-style
def has_button(self, button): rc = self._libinput.libinput_device_pointer_has_button( self._handle, button) assert rc >= 0, 'This device is not a pointer device' return bool(rc)
Check if this device has a given button. Args: button (int): Button to check for, see ``input.h`` for button definitions. Returns: bool: :obj:`True` if the device has this button, :obj:`False` if it does not. Raises: AssertionError
juraj-google-style
def format_checksum(checksum_pyxb): return '{}/{}'.format( checksum_pyxb.algorithm.upper().replace('-', ''), checksum_pyxb.value().lower() )
Create string representation of a PyXB Checksum object. Args: PyXB Checksum object Returns: str : Combined hexadecimal value and algorithm name.
juraj-google-style
def readToken(self): if (not self.tokenFile): raise SkypeAuthException('No token file specified') try: with open(self.tokenFile, 'r') as f: lines = f.read().splitlines() except OSError: raise SkypeAuthException("Token file doesn't exist or not readable") try: (user, skypeToken, skypeExpiry, regToken, regExpiry, msgsHost) = lines skypeExpiry = datetime.fromtimestamp(int(skypeExpiry)) regExpiry = datetime.fromtimestamp(int(regExpiry)) except ValueError: raise SkypeAuthException('Token file is malformed') if (datetime.now() >= skypeExpiry): raise SkypeAuthException('Token file has expired') self.userId = user self.tokens['skype'] = skypeToken self.tokenExpiry['skype'] = skypeExpiry if (datetime.now() < regExpiry): self.tokens['reg'] = regToken self.tokenExpiry['reg'] = regExpiry self.msgsHost = msgsHost else: self.getRegToken()
Attempt to re-establish a connection using previously acquired tokens. If the Skype token is valid but the registration token is invalid, a new endpoint will be registered. Raises: .SkypeAuthException: if the token file cannot be used to authenticate
codesearchnet
def __init__(self, profile_datum_list, time_unit=cli_shared.TIME_UNIT_US): self._profile_datum_list = profile_datum_list self.formatted_start_time = [datum.start_time for datum in profile_datum_list] self.formatted_op_time = [cli_shared.time_to_readable_str(datum.op_time, force_time_unit=time_unit) for datum in profile_datum_list] self.formatted_exec_time = [cli_shared.time_to_readable_str(datum.node_exec_stats.all_end_rel_micros, force_time_unit=time_unit) for datum in profile_datum_list] self._column_names = ['Node', 'Op Type', 'Start Time (us)', 'Op Time (%s)' % time_unit, 'Exec Time (%s)' % time_unit, 'Filename:Lineno(function)'] self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE, SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME, SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]
Constructor. Args: profile_datum_list: List of `ProfileDatum` objects. time_unit: must be in cli_shared.TIME_UNITS.
github-repos
def __get_all_scrapers(self): modules_strings = self.__get_all_scrapers_modules() modules = [] for module_string in modules_strings: module = importlib.import_module(('nyawc.scrapers.' + module_string)) modules.append(getattr(module, module_string)) return modules
Find all available scraper references. Returns: list(obj): The scraper references.
codesearchnet
def swo_supported_speeds(self, cpu_speed, num_speeds=3): buf_size = num_speeds buf = (ctypes.c_uint32 * buf_size)() res = self._dll.JLINKARM_SWO_GetCompatibleSpeeds(cpu_speed, 0, buf, buf_size) if res < 0: raise errors.JLinkException(res) return list(buf)[:res]
Retrives a list of SWO speeds supported by both the target and the connected J-Link. The supported speeds are returned in order from highest to lowest. Args: self (JLink): the ``JLink`` instance cpu_speed (int): the target's CPU speed in Hz num_speeds (int): the number of compatible speeds to return Returns: A list of compatible SWO speeds in Hz in order from highest to lowest.
juraj-google-style
def _get_next_date_from_partial_date(partial_date): relativedelta_arg = 'years' if partial_date.month: relativedelta_arg = 'months' if partial_date.day: relativedelta_arg = 'days' next_date = parse(partial_date.dumps()) + relativedelta(**{relativedelta_arg: 1}) return PartialDate.from_parts( next_date.year, next_date.month if partial_date.month else None, next_date.day if partial_date.day else None )
Calculates the next date from the given partial date. Args: partial_date (inspire_utils.date.PartialDate): The partial date whose next date should be calculated. Returns: PartialDate: The next date from the given partial date.
juraj-google-style
def has_resource(self, feature_column, resource_name): return resource_name in self._cols_to_resources_map[feature_column]
Returns true iff a resource with same name exists. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A `FeatureColumn` object this variable corresponds to. resource_name: Name of the resource.
github-repos
def WrapTypeDeclUnit(name, items): functions = {} classes = {} constants = collections.defaultdict(TypeBuilder) aliases = {} typevars = {} for item in items: if isinstance(item, pytd.Function): if item.name in functions: if item.kind != functions[item.name].kind: raise ValueError(f"Can't combine {item.kind} and {functions[item.name].kind}") functions[item.name] = pytd.Function(item.name, functions[item.name].signatures + item.signatures, item.kind) else: functions[item.name] = item elif isinstance(item, pytd.Class): if item.name in classes: raise NameError(f'Duplicate top level class: {item.name!r}') classes[item.name] = item elif isinstance(item, pytd.Constant): constants[item.name].add_type(item.type) elif isinstance(item, pytd.Alias): if item.name in aliases: raise NameError(f'Duplicate top level alias or import: {item.name!r}') aliases[item.name] = item elif isinstance(item, pytd.TypeParameter): if item.name in typevars: raise NameError(f'Duplicate top level type parameter: {item.name!r}') typevars[item.name] = item else: raise ValueError(f'Invalid top level pytd item: {type(item)!r}') categories = {'function': functions, 'class': classes, 'constant': constants, 'alias': aliases, 'typevar': typevars} for c1, c2 in itertools.combinations(categories, 2): _check_intersection(categories[c1], categories[c2], c1, c2) return pytd.TypeDeclUnit(name=name, constants=tuple((pytd.Constant(name, t.build()) for name, t in sorted(constants.items()))), type_params=tuple(typevars.values()), classes=tuple(classes.values()), functions=tuple(functions.values()), aliases=tuple(aliases.values()))
Given a list (classes, functions, etc.), wrap a pytd around them. Args: name: The name attribute of the resulting TypeDeclUnit. items: A list of items. Can contain pytd.Class, pytd.Function and pytd.Constant. Returns: A pytd.TypeDeclUnit. Raises: ValueError: In case of an invalid item in the list. NameError: For name conflicts.
github-repos
def create_latest_log_alias(actual_path, alias): alias_path = os.path.join(os.path.dirname(actual_path), alias) utils.create_alias(actual_path, alias_path)
Creates a symlink to the latest test run logs. Args: actual_path: string, the source directory where the latest test run's logs are. alias: string, the name of the directory to contain the latest log files.
github-repos
def eval_from_json(json): close = json[-1]['close'] low = min(poloniex.get_attribute(json, 'low')) high = max(poloniex.get_attribute(json, 'high')) return SO.eval_algorithm(close, low, high)
Evaluates SO from JSON (typically Poloniex API response) Args: json: List of dates where each entry is a dict of raw market data. Returns: Float SO between 0 and 100.
juraj-google-style
def get_open_file(self, file_des): if (not is_int_type(file_des)): raise TypeError('an integer is required') if ((file_des >= len(self.open_files)) or (self.open_files[file_des] is None)): self.raise_os_error(errno.EBADF, str(file_des)) return self.open_files[file_des][0]
Return an open file. Args: file_des: File descriptor of the open file. Raises: OSError: an invalid file descriptor. TypeError: filedes is not an integer. Returns: Open file object.
codesearchnet
def when_matches(self, path, good_value, bad_values=None, timeout=None, event_timeout=None): future = self.when_matches_async(path, good_value, bad_values) self.wait_all_futures(future, timeout=timeout, event_timeout=event_timeout)
Resolve when an path value equals value Args: path (list): The path to wait to good_value (object): the value to wait for bad_values (list): values to raise an error on timeout (float): time in seconds to wait for responses, wait forever if None event_timeout: maximum time in seconds to wait between each response event, wait forever if None
codesearchnet
def overlay(array1, array2, alpha=0.5): if ((alpha < 0.0) or (alpha > 1.0)): raise ValueError('`alpha` needs to be between [0, 1]') if (array1.shape != array2.shape): raise ValueError('`array1` and `array2` must have the same shapes') return ((array1 * alpha) + (array2 * (1.0 - alpha))).astype(array1.dtype)
Overlays `array1` onto `array2` with `alpha` blending. Args: array1: The first numpy array. array2: The second numpy array. alpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1], with 0 being `array2` only to 1 being `array1` only (Default value = 0.5). Returns: The `array1`, overlayed with `array2` using `alpha` blending.
codesearchnet
def random_masking(inputs: torch.Tensor, mask_ratio: float, unmasked_channel_indices: Optional[list]=None, channel_consistent_masking: bool=False, mask_value: int=0): if mask_ratio < 0 or mask_ratio >= 1: raise ValueError(f'Mask ratio {mask_ratio} has to be between 0 and 1.') batch_size, num_channels, sequence_length, num_features = inputs.shape device = inputs.device len_keep = int(sequence_length * (1 - mask_ratio)) if channel_consistent_masking: noise = torch.rand(batch_size, 1, sequence_length, device=device) noise = noise.repeat(1, num_channels, 1) else: noise = torch.rand(batch_size, num_channels, sequence_length, device=device) mask = torch.ones(batch_size, num_channels, sequence_length, device=device) mask[:, :, :len_keep] = 0 ids_shuffle = torch.argsort(noise, dim=-1) ids_restore = torch.argsort(ids_shuffle, dim=-1) mask = torch.gather(mask, dim=-1, index=ids_restore) mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) if unmasked_channel_indices is not None: mask[:, unmasked_channel_indices, :, :] = 0 inputs_mask = inputs.masked_fill(mask.bool(), mask_value) return (inputs_mask, mask[..., 0])
random_masking: Mask the input considering the control variables. Args: inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`): The input tensor to mask. mask_ratio (`float`): Masking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1. unmasked_channel_indices (list, *optional*): Indices of channels that will not be masked. channel_consistent_masking (bool, *optional*, defaults to `False`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. mask_value (int, *optional*, defaults to 0): Define the value of masked patches for pretraining. Returns: `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x n]
github-repos
def set_mode(path, mode): func_name = '{0}.set_mode'.format(__virtualname__) if __opts__.get('fun', '') == func_name: log.info('The function %s should not be used on Windows systems; ' 'see function docs for details. The value returned is ' 'always None. Use set_perms instead.', func_name) return get_mode(path)
Set the mode of a file This just calls get_mode, which returns None because we don't use mode on Windows Args: path: The path to the file or directory mode: The mode (not used) Returns: None CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644
juraj-google-style
def GetDecompressor(cls, compression_method): compression_method = compression_method.lower() decompressor = cls._decompressors.get(compression_method, None) if not decompressor: return None return decompressor()
Retrieves the decompressor object for a specific compression method. Args: compression_method (str): compression method identifier. Returns: Decompressor: decompressor or None if the compression method does not exists.
juraj-google-style
def _render_objects(self, items, attributes=None, datatype='object'): if not items: return if datatype == 'chartdata': if not attributes: attributes = [items['cols'][i]['label'] for i in range(0, len(items['cols']))] items = items['rows'] indices = {attributes[i]: i for i in range(0, len(attributes))} num_segments = len(self._segments) self._segments.append('<table>') first = True for o in items: if first: first = False if datatype == 'dict' and not attributes: attributes = list(o.keys()) if attributes is not None: self._segments.append('<tr>') for attr in attributes: self._segments.append('<th>%s</th>' % attr) self._segments.append('</tr>') self._segments.append('<tr>') if attributes is None: self._segments.append('<td>%s</td>' % HtmlBuilder._format(o)) else: for attr in attributes: if datatype == 'dict': self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.get(attr, None), nbsp=True)) elif datatype == 'chartdata': self._segments.append('<td>%s</td>' % HtmlBuilder._format(o['c'][indices[attr]]['v'], nbsp=True)) else: self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.__getattribute__(attr), nbsp=True)) self._segments.append('</tr>') self._segments.append('</table>') if first: self._segments = self._segments[:num_segments]
Renders an HTML table with the specified list of objects. Args: items: the iterable collection of objects to render. attributes: the optional list of properties or keys to render. datatype: the type of data; one of 'object' for Python objects, 'dict' for a list of dictionaries, or 'chartdata' for Google chart data.
juraj-google-style
def _are_scopes_sufficient(authorized_scopes, sufficient_scopes): for sufficient_scope_set in sufficient_scopes: if sufficient_scope_set.issubset(authorized_scopes): return True return False
Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes
codesearchnet
def get_dilation_rates(hparams, width): allowed_dilations = [([1] * 5)] apply_dilations = hparams.get('latent_apply_dilations', False) dilation_rates = hparams.get('latent_dilation_rates', [1, 3]) if apply_dilations: for rate in dilation_rates: filter_size = (3 + (2 * rate)) if (filter_size <= width): curr_dilation = [1, 1, (rate + 1), (rate + 1), 1] allowed_dilations.append(curr_dilation) return allowed_dilations
Get a list of valid dilation rates. Args: hparams: HParams. width: spatial dimension. Ensures that the effective filter size is not larger than the spatial dimension. Returns: allowed_dilations: A list of dilation rates.
codesearchnet
def create_latest_log_alias(actual_path): alias_path = os.path.join(os.path.dirname(actual_path), 'latest') utils.create_alias(actual_path, alias_path)
Creates a symlink to the latest test run logs. Args: actual_path: The source directory where the latest test run's logs are.
juraj-google-style
def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved.
github-repos
def one_hot_class_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn): del model_hparams, vocab_size loss_scale = tf.losses.softmax_cross_entropy(onehot_labels=targets, logits=top_out) weights = weights_fn(targets) loss_denom = tf.reduce_sum(weights) return (loss_scale, loss_denom)
Apply softmax cross-entropy between outputs and targets. Args: top_out: logits Tensor with shape [batch, ?, ?, num_classes] targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. weights_fn: Returns: loss_scale (cross-entropy), loss_denom
codesearchnet
def write_seq_as_temp_fasta(seq): sr = ssbio.protein.sequence.utils.cast_to_seq_record(seq, id='tempfasta') return write_fasta_file(seq_records=sr, outname='temp', outdir=tempfile.gettempdir(), force_rerun=True)
Write a sequence as a temporary FASTA file Args: seq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object Returns: str: Path to temporary FASTA file (located in system temporary files directory)
juraj-google-style
def _get_time(header, keys, name): for key in keys: try: return _to_timestamp(header.pop(key)) except KeyError: continue raise _UnsupportedOperation(name)
Get time from header Args: header (dict): Object header. keys (tuple of str): Header keys. name (str): Method name. Returns: float: The number of seconds since the epoch
juraj-google-style
def _break_ties(self, Y_s, break_ties='random'): (n, k) = Y_s.shape Y_h = np.zeros(n) diffs = np.abs((Y_s - Y_s.max(axis=1).reshape((- 1), 1))) TOL = 1e-05 for i in range(n): max_idxs = np.where((diffs[(i, :)] < TOL))[0] if (len(max_idxs) == 1): Y_h[i] = (max_idxs[0] + 1) elif (break_ties == 'random'): Y_h[i] = (np.random.choice(max_idxs) + 1) elif (break_ties == 'abstain'): Y_h[i] = 0 elif isinstance(break_ties, int): Y_h[i] = break_ties else: ValueError(f'break_ties={break_ties} policy not recognized.') return Y_h
Break ties in each row of a tensor according to the specified policy Args: Y_s: An [n, k] np.ndarray of probabilities break_ties: A tie-breaking policy: "abstain": return an abstain vote (0) "random": randomly choose among the tied options NOTE: if break_ties="random", repeated runs may have slightly different results due to difference in broken ties [int]: ties will be broken by using this label
codesearchnet
def MakePmfFromHist(hist, name=None): if name is None: name = hist.name d = dict(hist.GetDict()) pmf = Pmf(d, name) pmf.Normalize() return pmf
Makes a normalized PMF from a Hist object. Args: hist: Hist object name: string name Returns: Pmf object
juraj-google-style
def _GetISO8601String(self, structure): fraction_of_second_length = len(structure.fraction_of_second) if fraction_of_second_length not in (3, 6, 7): raise ValueError( 'unsupported time fraction of second length: {0:d}'.format( fraction_of_second_length)) try: fraction_of_second = int(structure.fraction_of_second, 10) except (TypeError, ValueError) as exception: raise ValueError( 'unable to determine fraction of second with error: {0!s}'.format( exception)) if fraction_of_second_length == 7: fraction_of_second, _ = divmod(fraction_of_second, 10) date_time_string = '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}'.format( structure.year, structure.month, structure.day, structure.hour, structure.minute, structure.second) if fraction_of_second_length > 0: date_time_string = '{0:s}.{1:d}'.format( date_time_string, fraction_of_second) utc_offset_minutes = structure.get('utc_offset_minutes', None) if utc_offset_minutes is not None: try: time_zone_offset = int(utc_offset_minutes[1:], 10) except (IndexError, ValueError) as exception: raise ValueError( 'Unable to parse time zone offset with error: {0!s}.'.format( exception)) time_zone_hours, time_zone_minutes = divmod(time_zone_offset, 60) date_time_string = '{0:s}{1:s}{2:02d}:{3:02d}'.format( date_time_string, utc_offset_minutes[0], time_zone_hours, time_zone_minutes) return date_time_string
Retrieves an ISO8601 date time string from the structure. The date and time values in the SCCM log are formatted as: time="19:33:19.766-330" date="11-28-2014" Args: structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Returns: str: ISO 8601 date time string. Raises: ValueError: if the structure cannot be converted into a date time string.
juraj-google-style
def has_intersection(self, other): return bool(lib.SDL_HasIntersection(self._ptr, other._ptr))
Return whether this rectangle intersects with another rectangle. Args: other (Rect): The rectangle to test intersection with. Returns: bool: True if there is an intersection, False otherwise.
juraj-google-style
def equal(mol, query, largest_only=True, ignore_hydrogen=True): m = molutil.clone(mol) q = molutil.clone(query) if largest_only: m = molutil.largest_graph(m) q = molutil.largest_graph(q) if ignore_hydrogen: m = molutil.make_Hs_implicit(m) q = molutil.make_Hs_implicit(q) if molutil.mw(m) == molutil.mw(q): gm = GraphMatcher(q.graph, m.graph, node_match=atom_match) return gm.is_isomorphic() return False
if mol is exactly same structure as the query, return True Args: mol: Compound query: Compound
juraj-google-style
def initialize_or_restore(self, session=None): if context.executing_eagerly(): return if session is None: session = get_session() all_objects = util.list_objects(self._object_graph_view) already_initialized_objects = object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values()) initializers_for_non_restored_variables = [c.initializer for c in all_objects if hasattr(c, 'initializer') and c not in already_initialized_objects and (getattr(c, '_update_uid', self._checkpoint.restore_uid - 1) < self._checkpoint.restore_uid)] self.run_restore_ops(session=session) session.run(initializers_for_non_restored_variables)
Run operations to initialize or restore objects in the dependency graph. Any objects in the dependency graph which have initializers but are not in the checkpoint will have those initializers run, unless those variables are being restored by a later call to `tf.train.Checkpoint.restore()`. This method has a sibling in `InitializationOnlyStatus` which instead initializes variables. That type is returned if no checkpoint is specified in `Saver.restore`. Args: session: The session to run init/restore ops in. If `None`, uses the default session.
github-repos
def _pare_down_model(self, strain_gempro, genes_to_remove): strain_genes = [x.id for x in strain_gempro.genes] genes_to_remove.extend(self.missing_in_orthology_matrix) genes_to_remove = list(set(genes_to_remove).intersection(set(strain_genes))) if (len(genes_to_remove) == 0): log.info('{}: no genes marked non-functional'.format(strain_gempro.id)) return else: log.debug('{}: {} genes to be marked non-functional'.format(strain_gempro.id, len(genes_to_remove))) if strain_gempro.model: strain_gempro.model._trimmed = False strain_gempro.model._trimmed_genes = [] strain_gempro.model._trimmed_reactions = {} cobra.manipulation.delete_model_genes(strain_gempro.model, genes_to_remove) if strain_gempro.model._trimmed: log.info('{}: marked {} genes as non-functional, deactivating {} reactions'.format(strain_gempro.id, len(strain_gempro.model._trimmed_genes), len(strain_gempro.model._trimmed_reactions))) else: for g in genes_to_remove: strain_gempro.genes.get_by_id(g).functional = False log.info('{}: marked {} genes as non-functional'.format(strain_gempro.id, len(genes_to_remove)))
Mark genes as non-functional in a GEM-PRO. If there is a COBRApy model associated with it, the COBRApy method delete_model_genes is utilized to delete genes. Args: strain_gempro (GEMPRO): GEMPRO object genes_to_remove (list): List of gene IDs to remove from the model
codesearchnet
def _generate_altered_sql_dependencies(self, dep_changed_keys): for key, removed_deps, added_deps in dep_changed_keys: app_label, sql_name = key operation = AlterSQLState(sql_name, add_dependencies=tuple(added_deps), remove_dependencies=tuple(removed_deps)) sql_deps = [key] self.add_sql_operation(app_label, sql_name, operation, sql_deps)
Generate forward operations for changing/creating SQL item dependencies. Dependencies are only in-memory and should be reflecting database dependencies, so changing them in SQL config does not alter database. Such actions are persisted in separate type operation - `AlterSQLState`. Args: dep_changed_keys (list): Data about keys, that have their dependencies changed. List of tuples (key, removed depndencies, added_dependencies).
juraj-google-style
def resource_json(self, resource: str) -> dict: resource = self.api_document['schemas'][resource]['properties'] return self.to_json(from_api=resource)
Return Discovery API Document json for a resource. Expands all the references. Args: resource: the name of the Google API resource Returns: A dictionary representation of the resource.
github-repos
def _AddPathSegments(self, path, ignore_list): path_segments = path.split(self._path_segment_separator) for path_segment_index, path_segment in enumerate(path_segments): if path_segment_index not in self.path_segments_per_index: self.path_segments_per_index[path_segment_index] = {} if path_segment_index not in ignore_list: path_segments = self.path_segments_per_index[path_segment_index] if path_segment not in path_segments: path_segments[path_segment] = [] paths_per_segment_list = path_segments[path_segment] paths_per_segment_list.append(path)
Adds the path segments to the table. Args: path: a string containing the path. ignore_list: a list of path segment indexes to ignore, where 0 is the index of the first path segment relative from the root.
juraj-google-style
def login(self, email, password): response = FlightData.session.post(url=LOGIN_URL, data={'email': email, 'password': password, 'remember': 'true', 'type': 'web'}, headers={'Origin': 'https: response = (self._fr24.json_loads_byteified(response.content) if (response.status_code == 200) else None) if response: token = response['userData']['subscriptionKey'] self.AUTH_TOKEN = token
Login to the flightradar24 session The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans. For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains a token that will be passed on all the requests; this obtains the data as per the plan limits. Args: email (str): The email ID which is used to login to flightradar24 password (str): The password for the user ID Example:: from pyflightdata import FlightData f=FlightData() f.login(myemail,mypassword)
codesearchnet
def document(self, document_tree, backend=None): return self.template(document_tree, configuration=self, backend=backend)
Create a :class:`DocumentTemplate` object based on the given document tree and this template configuration Args: document_tree (DocumentTree): tree of the document's contents backend: the backend to use when rendering the document
juraj-google-style
def _compute_initial_out_degree(self): out_degree = collections.defaultdict(int) for tensor_name in self.get_all_tensor_names(): if self.is_tensor_final(tensor_name): out_degree[tensor_name] = 1 for operation_name in self.get_all_operation_names(): for input_name in self.get_operation_input_names(operation_name): out_degree[input_name] += 1 return out_degree
The number of operations which use each tensor as input. Returns: a {string, int} mapping tensor name to the number of operations which use it as input, or one plus that quantity if the tensor is final.
codesearchnet
def delete(self, start=None, stop=None): _check_start_stop(start, stop) start_loc = (self._bisect_right(start) - 1) if (stop is None): stop_loc = len(self._keys) else: stop_loc = self._bisect_left(stop) for value in self._values[start_loc:stop_loc]: if (value is NOT_SET): raise KeyError((start, stop)) self.set(NOT_SET, start=start, stop=stop)
Delete the range from start to stop from self. Raises: KeyError: If part of the passed range isn't mapped.
codesearchnet
def set_triple(self, p, o, auto_refresh=True): self.rdf.graph.set((self.uri, p, self._handle_object(o))) self._handle_triple_refresh(auto_refresh)
Assuming the predicate or object matches a single triple, sets the other for that triple. Args: p (rdflib.term.URIRef): predicate o (): object auto_refresh (bool): whether or not to update object-like self.rdf.triples Returns: None: modifies pre-existing triple in self.rdf.graph
juraj-google-style
def __init__(self, context): self._logdir = context.logdir self._has_auth_group = (context.flags and 'authorized_groups' in context.flags and context.flags.authorized_groups is not '')
Constructs an interactive inference plugin for TensorBoard. Args: context: A base_plugin.TBContext instance.
juraj-google-style
def sheets_url(config, auth, url_or_name): sheet_id = sheets_id(config, auth, url_or_name) return 'https:
Normalizes a full sheet URL from some key. Args: config - see starthinker/util/configuration.py auth - user or service url_or_name - one of: URL, document title, or id Returns: URL of sheet.
github-repos
def create_dir_v2(path): _pywrap_file_io.CreateDir(compat.path_to_bytes(path))
Creates a directory with the name given by `path`. Args: path: string, name of the directory to be created Notes: The parent directories need to exist. Use `tf.io.gfile.makedirs` instead if there is the possibility that the parent dirs don't exist. Raises: errors.OpError: If the operation fails.
github-repos
def WritePathStatHistory(self, client_path, stat_entries): client_path_history = ClientPathHistory() for timestamp, stat_entry in iteritems(stat_entries): client_path_history.AddStatEntry(timestamp, stat_entry) self.MultiWritePathHistory({client_path: client_path_history})
Writes a collection of `StatEntry` observed for particular path. Args: client_path: A `ClientPath` instance. stat_entries: A dictionary with timestamps as keys and `StatEntry` instances as values.
juraj-google-style
def download(timestamp, dataset, path=None, products=None, levels=None, offset=0): if (path is None): path = DATA_PATH closest = ((timestamp.hour filename = dataset(closest, offset) gfs_timestamp = ('%s%02d' % (timestamp.strftime('%Y%m%d'), closest)) url = baseurl(gfs_timestamp, filename) index = (url + '.idx') messages = message_index(index) segments = _filter_messages(messages, products, levels) dl_path = (path + ('/%s/' % gfs_timestamp)) _verify_path(dl_path) _download_segments((path + filename), url, segments)
save GFS grib file to DATA_PATH. Args: dataset(function): naming convention function. eg. pgrb2 timestamp(datetime): ??? path(str): if None defaults to DATA_PATH products(list): TMP, etc. if None downloads all. layers(list): surface, etc. if None downloads all. offset(int): should be multiple of 3
codesearchnet
def _hat_integral(self, x): x = tf.cast(x, self.power.dtype) t = self.power - 1. return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t))
Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x.
juraj-google-style
def tarfile_extract(fileobj, dest_path): tar = tarfile.open(mode='r|', fileobj=fileobj, bufsize=pipebuf.PIPE_BUF_BYTES) dest_path = os.path.realpath(dest_path) extracted_files = [] for member in tar: assert not member.name.startswith('/') relpath = os.path.join(dest_path, member.name) if member.issym(): target_path = os.path.join(dest_path, member.name) try: os.symlink(member.linkname, target_path) except OSError as e: if e.errno == errno.EEXIST: os.remove(target_path) os.symlink(member.linkname, target_path) else: raise continue if member.isreg() and member.size >= pipebuf.PIPE_BUF_BYTES: cat_extract(tar, member, relpath) else: tar.extract(member, path=dest_path) filename = os.path.realpath(relpath) extracted_files.append(filename) if len(extracted_files) > 1000: _fsync_files(extracted_files) del extracted_files[:] tar.close() _fsync_files(extracted_files)
Extract a tarfile described by a file object to a specified path. Args: fileobj (file): File object wrapping the target tarfile. dest_path (str): Path to extract the contents of the tarfile to.
juraj-google-style
def add(self, spec): for limit in spec.limit_to: if (limit not in self.limit_to): self.limit_to.append(limit)
Add limitations of given spec to self's. Args: spec (PackageSpec): another spec.
codesearchnet