code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None): vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy image_num_patches = [image_size_to_num_patches(image_size=imsize, grid_pinpoints=self.config.image_grid_pinpoints, patch_size=self.config.vision_config.image_size) for imsize in image_sizes] if pixel_values.dim() == 5: _pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)] pixel_values = torch.cat(_pixel_values_list, dim=0) elif pixel_values.dim() != 4: raise ValueError(f'pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions') image_features = self.vision_tower(pixel_values, output_hidden_states=True) if isinstance(vision_feature_layer, int): selected_image_feature = image_features.hidden_states[vision_feature_layer] else: hs_pool = [image_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer] selected_image_feature = torch.cat(hs_pool, dim=-1) if vision_feature_select_strategy == 'default': selected_image_feature = selected_image_feature[:, 1:] elif vision_feature_select_strategy == 'full': selected_image_feature = selected_image_feature image_features = self.multi_modal_projector(selected_image_feature) image_features = torch.split(image_features, image_num_patches, dim=0) image_features, feature_lens = self.pack_image_features(image_features, image_sizes, vision_feature_select_strategy, image_newline=self.image_newline) return image_features
Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`) The tensors corresponding to the input images. image_sizes (`torch.Tensor` of shape `(num_images, 2)`) Actual image size of each images (H, W). vision_feature_layer (`Union[int, List[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"` Returns: image_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches and are of shape `(num_patches, image_length, embed_dim)`).
github-repos
def merge_config( config: Mapping[str, Any], override_config: Mapping[str, Any] = None, override_config_fn: str = None, ) -> Mapping[str, Any]: if override_config_fn: with open(override_config_fn, "r") as f: override_config = yaml.load(f, Loader=yaml.SafeLoader) if not override_config: log.info("Missing override_config") return functools.reduce(rec_merge, (config, override_config))
Override config with additional configuration in override_config or override_config_fn Used in script to merge CLI options with Config Args: config: original configuration override_config: new configuration to override/extend current config override_config_fn: new configuration filename as YAML file
juraj-google-style
def replace(self, i, species, coords=None, coords_are_cartesian=False, properties=None): if (coords is None): frac_coords = self[i].frac_coords elif coords_are_cartesian: frac_coords = self._lattice.get_fractional_coords(coords) else: frac_coords = coords new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties) self._sites[i] = new_site
Replace a single site. Takes either a species or a dict of species and occupations. Args: i (int): Index of the site in the _sites list. species (species-like): Species of replacement site coords (3x1 array): Coordinates of replacement site. If None, the current coordinates are assumed. coords_are_cartesian (bool): Whether coordinates are cartesian. Defaults to False. properties (dict): Properties associated with the site.
codesearchnet
def rename_next_state_fluent(name: str) -> str: i = name.index('/') functor = name[:(i - 1)] arity = name[(i + 1):] return '{}/{}'.format(functor, arity)
Returns next state fluent canonical name. Args: name (str): The current state fluent name. Returns: str: The next state fluent name.
codesearchnet
def add_update(self, updates, inputs=None): if inputs is not None: tf_logging.warning('`add_update` `inputs` kwarg has been deprecated. You no longer need to pass a value to `inputs` as it is being automatically inferred.') call_context = base_layer_utils.call_context() if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context() and (not call_context.saving): return updates = generic_utils.to_list(updates) if call_context.in_call: relevant_inputs = call_context.inputs else: inbound_nodes = getattr(self, '_inbound_nodes', []) relevant_inputs = [node.input_tensors for node in inbound_nodes] def process_update(x): if callable(x): update = lambda: process_update(x()) return update() elif isinstance(x, ops.Operation): update = x elif hasattr(x, 'op'): update = x.op else: update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x) reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update]) update._unconditional_update = update not in reachable return update updates = [process_update(x) for x in updates] self._updates.extend(updates)
Add update op(s), potentially dependent on layer inputs. Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs `a` and `b`, some entries in `layer.updates` may be dependent on `a` and some on `b`. This method automatically keeps track of dependencies. The `get_updates_for` method allows to retrieve the updates relevant to a specific set of inputs. This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution). Args: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting `trainable=False` on this Layer, when executing in Eager mode. inputs: Deprecated, will be automatically inferred.
github-repos
def get_module_functions(modules): module_fns = set() for module in modules: for key in dir(module): attr = getattr(module, key) if isinstance(attr, (types.BuiltinFunctionType, types.FunctionType, numpy.ufunc)): module_fns.add(attr) return module_fns
Finds functions that do not have implemented derivatives. Args: modules: A list of Python modules. Functions contained in these modules will be checked for membership in 'implemented', and if not found, will be added to an 'unimplemented' set implemented: A Python object containing implemented derivatives. A function should be checkable for membership using the `fn in implemented` syntax. Returns: module_fns: A set of functions, builtins or ufuncs in `modules`.
codesearchnet
def unarchive_user(self, user_id): url = (self.record_url + '/unarchive') res = requests.patch(url=url, json={'user_id': user_id}, headers=HEADERS, verify=False) self.write_response_html_to_file(res, 'bob.html') res.raise_for_status()
Unarchives the user with the specified user ID. Args: user_id: `int`. The ID of the user to unarchive. Returns: `NoneType`: None.
codesearchnet
def numeric_summary(tensor): def _counts_summary(counts, skip_zeros=True, total_count=None): if skip_zeros: counts = [(count_key, count_val) for count_key, count_val in counts if count_val] max_common_len = 0 for count_key, count_val in counts: count_val_str = str(count_val) common_len = max(len(count_key) + 1, len(count_val_str) + 1) max_common_len = max(common_len, max_common_len) key_line = debugger_cli_common.RichLine('|') val_line = debugger_cli_common.RichLine('|') for count_key, count_val in counts: count_val_str = str(count_val) key_line += _pad_string_to_length(count_key, max_common_len) val_line += _pad_string_to_length(count_val_str, max_common_len) key_line += ' |' val_line += ' |' if total_count is not None: total_key_str = 'total' total_val_str = str(total_count) max_common_len = max(len(total_key_str) + 1, len(total_val_str)) total_key_str = _pad_string_to_length(total_key_str, max_common_len) total_val_str = _pad_string_to_length(total_val_str, max_common_len) key_line += total_key_str + ' |' val_line += total_val_str + ' |' return debugger_cli_common.rich_text_lines_from_rich_line_list([key_line, val_line]) if not isinstance(tensor, np.ndarray) or not np.size(tensor): return debugger_cli_common.RichTextLines(['No numeric summary available due to empty tensor.']) elif np.issubdtype(tensor.dtype, np.floating) or np.issubdtype(tensor.dtype, np.complexfloating) or np.issubdtype(tensor.dtype, np.integer): counts = [('nan', np.sum(np.isnan(tensor))), ('-inf', np.sum(np.isneginf(tensor))), ('-', np.sum(np.logical_and(tensor < 0.0, np.logical_not(np.isneginf(tensor))))), ('0', np.sum(tensor == 0.0)), ('+', np.sum(np.logical_and(tensor > 0.0, np.logical_not(np.isposinf(tensor))))), ('+inf', np.sum(np.isposinf(tensor)))] output = _counts_summary(counts, total_count=np.size(tensor)) valid_array = tensor[np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))] if np.size(valid_array): stats = [('min', np.min(valid_array)), ('max', np.max(valid_array)), ('mean', np.mean(valid_array)), ('std', np.std(valid_array))] output.extend(_counts_summary(stats, skip_zeros=False)) return output elif tensor.dtype == np.bool_: counts = [('False', np.sum(tensor == 0)), ('True', np.sum(tensor > 0))] return _counts_summary(counts, total_count=np.size(tensor)) else: return debugger_cli_common.RichTextLines(['No numeric summary available due to tensor dtype: %s.' % tensor.dtype])
Get a text summary of a numeric tensor. This summary is only available for numeric (int*, float*, complex*) and Boolean tensors. Args: tensor: (`numpy.ndarray`) the tensor value object to be summarized. Returns: The summary text as a `RichTextLines` object. If the type of `tensor` is not numeric or Boolean, a single-line `RichTextLines` object containing a warning message will reflect that.
github-repos
def ReadPathInfoHistory(self, client_id, path_type, components): histories = self.ReadPathInfosHistories(client_id, path_type, [components]) return histories[components]
Reads a collection of hash and stat entry for given path. Args: client_id: An identifier string for a client. path_type: A type of a path to retrieve path history for. components: A tuple of path components corresponding to path to retrieve information for. Returns: A list of `rdf_objects.PathInfo` ordered by timestamp in ascending order.
codesearchnet
def _data_from_df(df): _df = df.copy() if isinstance(df.columns, pd.MultiIndex): try: _df.columns = ['_'.join(col) for col in _df.columns.values] except TypeError: raise TypeError('Could not flatten MultiIndex columns. use string column names or flatten manually') if isinstance(df.columns, pd.CategoricalIndex): _df.columns = df.columns.tolist() index_name = ColumnDataSource._df_index_name(df) if (index_name == 'index'): _df.index = pd.Index(_df.index.values) else: _df.index = pd.Index(_df.index.values, name=index_name) _df.reset_index(inplace=True) tmp_data = {c: v.values for (c, v) in _df.iteritems()} new_data = {} for (k, v) in tmp_data.items(): new_data[k] = v return new_data
Create a ``dict`` of columns from a Pandas ``DataFrame``, suitable for creating a ColumnDataSource. Args: df (DataFrame) : data to convert Returns: dict[str, np.array]
codesearchnet
def DotProductAttention(query, key, value, mask, dropout, mode, rng): depth = np.shape(query)[(- 1)] dots = (np.matmul(query, np.swapaxes(key, (- 1), (- 2))) / np.sqrt(depth)) if (mask is not None): dots = np.where(mask, dots, (- 1000000000.0)) dots = np.exp((dots - backend.logsumexp(dots, axis=(- 1), keepdims=True))) if (dropout >= 1.0): raise ValueError('Dropout rates must be lower than 1.') if ((dropout is not None) and (dropout > 0.0) and (mode == 'train')): keep = backend.random.bernoulli(rng, (1.0 - dropout), dots.shape) dots = np.where(keep, (dots / (1.0 - dropout)), 0) out = np.matmul(dots, value) return out
Core dot product self-attention. Args: query: array of representations key: array of representations value: array of representations mask: attention-mask, gates attention dropout: float: dropout rate mode: 'eval' or 'train': whether to use dropout rng: JAX PRNGKey: subkey for disposable use Returns: Self attention for q, k, v arrays.
codesearchnet
def __init__(self, intervals: List[Interval] = None, no_overlap: bool = True, no_contiguous: bool = True) -> None: self.intervals = [] if intervals is None else list(intervals) self.no_overlap = no_overlap self.no_contiguous = no_contiguous for i in self.intervals: if not isinstance(i, Interval): raise TypeError( "IntervalList creation failed: contents are not all " "Interval: {}".format(repr(self.intervals))) self._tidy()
Creates the :class:`IntervalList`. Args: intervals: optional list of :class:`Interval` objects to incorporate into the :class:`IntervalList` no_overlap: merge intervals that overlap (now and on subsequent addition)? no_contiguous: if ``no_overlap`` is set, merge intervals that are contiguous too?
juraj-google-style
def rebuild_tree(cls, session, tree_id): session.query(cls).filter_by(tree_id=tree_id).update({cls.left: 0, cls.right: 0, cls.level: 0}) top = session.query(cls).filter_by(parent_id=None).filter_by(tree_id=tree_id).one() top.left = left = 1 top.right = right = 2 top.level = level = cls.get_default_level() def recursive(children, left, right, level): level = (level + 1) for (i, node) in enumerate(children): same_level_right = children[(i - 1)].right left = (left + 1) if (i > 0): left = (left + 1) if same_level_right: left = (same_level_right + 1) right = (left + 1) node.left = left node.right = right parent = node.parent j = 0 while parent: parent.right = ((right + 1) + j) parent = parent.parent j += 1 node.level = level recursive(node.children, left, right, level) recursive(top.children, left, right, level)
This method rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session tree_id (int or str): id of tree Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild`
codesearchnet
def Format(self, format_string, rdf): result = [] for (literal_text, field_name, _, _) in self.parse(format_string): if literal_text: result.append(literal_text) if (field_name is not None): rslts = [] objs = self.expander(rdf, field_name) for o in objs: rslts.extend(self.FanOut(o)) result.append(','.join(rslts)) return ''.join(result)
Apply string formatting templates to rdf data. Uses some heuristics to coerce rdf values into a form compatible with string formatter rules. Repeated items are condensed into a single comma separated list. Unlike regular string.Formatter operations, we use objectfilter expansion to fully acquire the target attribute in one pass, rather than recursing down each element of the attribute tree. Args: format_string: A format string specification. rdf: The rdf value to be formatted. Returns: A string of formatted data.
codesearchnet
def path_in_cache(self, filename, metahash): cpath = self._genpath(filename, metahash) if os.path.exists(cpath): return cpath else: raise CacheMiss
Generates the path to a file in the mh cache. The generated path does not imply the file's existence! Args: filename: Filename relative to buildroot rule: A targets.SomeBuildRule object metahash: hash object
juraj-google-style
def build_from_token_counts(self, token_counts, min_count, num_iterations=4): self._init_alphabet_from_tokens(six.iterkeys(token_counts)) self._init_subtokens_from_list(list(self._alphabet)) if (min_count < 1): min_count = 1 for i in xrange(num_iterations): subtoken_counts = collections.defaultdict(int) for (token, count) in six.iteritems(token_counts): escaped_token = _escape_token(token, self._alphabet) subtokens = self._escaped_token_to_subtoken_strings(escaped_token) start = 0 for subtoken in subtokens: for end in xrange((start + 1), (len(escaped_token) + 1)): new_subtoken = escaped_token[start:end] subtoken_counts[new_subtoken] += count start += len(subtoken) len_to_subtoken_strings = [] for (subtoken_string, count) in six.iteritems(subtoken_counts): lsub = len(subtoken_string) if (count >= min_count): while (len(len_to_subtoken_strings) <= lsub): len_to_subtoken_strings.append(set()) len_to_subtoken_strings[lsub].add(subtoken_string) new_subtoken_strings = [] for lsub in xrange((len(len_to_subtoken_strings) - 1), 0, (- 1)): subtoken_strings = len_to_subtoken_strings[lsub] for subtoken_string in subtoken_strings: count = subtoken_counts[subtoken_string] if (count >= min_count): if (subtoken_string not in self._alphabet): new_subtoken_strings.append((count, subtoken_string)) for l in xrange(1, lsub): subtoken_counts[subtoken_string[:l]] -= count new_subtoken_strings.extend(((subtoken_counts.get(a, 0), a) for a in self._alphabet)) new_subtoken_strings.sort(reverse=True) self._init_subtokens_from_list([subtoken for (_, subtoken) in new_subtoken_strings])
Train a SubwordTextTokenizer based on a dictionary of word counts. Args: token_counts: a dictionary of Unicode strings to int. min_count: an integer - discard subtokens with lower counts. num_iterations: an integer; how many iterations of refinement.
codesearchnet
def interruptRead(self, endpoint, size, timeout = 100): r return self.dev.read(endpoint, size, timeout)
r"""Performs a interrupt read request to the endpoint specified. Arguments: endpoint: endpoint number. size: number of bytes to read. timeout: operation timeout in milliseconds. (default: 100) Returns a tuple with the data read.
juraj-google-style
def get_values(js_dict, value='value'): values = js_dict[value] if type(values) is list: if type(values[0]) is not dict or tuple: return values values = {int(key): value for (key, value) in values.items()} if js_dict.get('size'): max_val = np.prod(np.array((js_dict['size']))) else: max_val = np.prod(np.array((js_dict['dimension']['size']))) vals = max_val * [None] for (key, value) in values.items(): vals[key] = value values = vals return values
Get values from input data. Args: js_dict (dict): dictionary containing dataset data and metadata. value (string, optional): name of the value column. Defaults to 'value'. Returns: values (list): list of dataset values.
juraj-google-style
def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs): if max_procs is None: max_procs = cpu_count() kw_arr = saturate_kwargs(keys=keys, **kwargs) if len(kw_arr) == 0: return if isinstance(affinity, int): win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity) task_queue = queue.Queue() while len(kw_arr) > 0: for _ in range(max_procs): if len(kw_arr) == 0: break kw = kw_arr.pop(0) p = Process(target=func, kwargs=kw) p.start() sys.stdout.flush() task_queue.put(p) if show_proc: signature = ', '.join([f'{k}={v}' for k, v in kw.items()]) print(f'[{func.__name__}] ({signature})') while not task_queue.empty(): p = task_queue.get() p.join()
Provide interface for multiprocessing Args: func: callable functions keys: keys in kwargs that want to use process max_procs: max number of processes show_proc: whether to show process affinity: CPU affinity **kwargs: kwargs for func
juraj-google-style
class PerceiverOneHotPreprocessor(AbstractPreprocessor): def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config: PerceiverConfig = config @property def num_channels(self) -> int: return self.config.num_labels def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True): inputs = inputs[:, None, :] return (inputs, None, inputs)
One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input. Args: config ([`PerceiverConfig`]): Model configuration.
github-repos
def copy_count_a(input_a, *other_inputs, **kwargs): count = input_a.count() input_a.skip_all() for input_x in other_inputs: input_x.skip_all() return [IOTileReading(0, 0, count)]
Copy the latest reading from input a into the output. All other inputs are skipped to that after this function runs there are no readings left in any of the input walkers even if no output is generated. Returns: list(IOTileReading)
codesearchnet
def ParseShadowEntry(self, line): fields = ("login", "passwd", "last_change", "min_age", "max_age", "warn_time", "inactivity", "expire", "reserved") if line: rslt = dict(zip(fields, line.split(":"))) pw_entry = self.shadow.setdefault(rslt["login"], rdf_client.PwEntry()) pw_entry.store = self.shadow_store pw_entry.hash_type = self.GetHashType(rslt["passwd"]) last_change = rslt.get("last_change") if last_change: pw_entry.age = int(last_change) max_age = rslt.get("max_age") if max_age: pw_entry.max_age = int(max_age)
Extract the user accounts in /etc/shadow. Identifies the users in /etc/shadow and several attributes of their account, including how their password is crypted and password aging characteristics. Args: line: An entry of the shadow file.
juraj-google-style
def try_get_column(column_name, node, context): selectable = get_node_selectable(node, context) if not hasattr(selectable, 'c'): raise AssertionError( u'Selectable "{}" does not have a column collection. Context is {}.'.format( selectable, context)) return selectable.c.get(column_name, None)
Attempt to get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: Optional[column], the SQLAlchemy column if found, None otherwise.
juraj-google-style
def _testMultipleReduceJoin(self, input_array, axis, separator=' '): with self.cached_session(): output = string_ops.reduce_join(inputs=input_array, axis=axis, keep_dims=False, separator=separator) output_keep_dims = string_ops.reduce_join(inputs=input_array, axis=axis, keep_dims=True, separator=separator) truth = input_array for index in axis: truth = string_ops.reduce_join(inputs=truth, axis=index, keep_dims=True, separator=separator) if not axis: truth = constant_op.constant(truth) truth_squeezed = array_ops.squeeze(truth, axis=axis) output_array = self.evaluate(output) output_keep_dims_array = self.evaluate(output_keep_dims) truth_array = self.evaluate(truth) truth_squeezed_array = self.evaluate(truth_squeezed) self.assertAllEqualUnicode(truth_array, output_keep_dims_array) self.assertAllEqualUnicode(truth_squeezed_array, output_array) self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape()) self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
Tests reduce_join for one input and multiple axes. Does so by comparing the output to that from nested reduce_string_joins. The correctness of single-dimension reduce_join is verified by other tests below using _testReduceJoin. Args: input_array: The input to test. axis: The indices to reduce. separator: The separator to use when joining.
github-repos
def _convert_template_option(template): option = {} extraction_method = template.get('extraction_method') if extraction_method == 'guess': option['guess'] = True elif extraction_method == 'lattice': option['lattice'] = True elif extraction_method == 'stream': option['stream'] = True option['pages'] = template.get('page') option['area'] = [round(template['y1'], 3), round(template['x1'], 3), round(template['y2'], 3), round(template['x2'], 3)] return option
Convert Tabula app template to tabula-py option Args: template (dict): Tabula app template Returns: `obj`:dict: tabula-py option
juraj-google-style
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def iter_compress(item_iter, flag_iter): true_items = (item for (item, flag) in zip(item_iter, flag_iter) if flag) return true_items
iter_compress - like numpy compress Args: item_iter (list): flag_iter (list): of bools Returns: list: true_items Example: >>> # ENABLE_DOCTEST >>> from utool.util_iter import * # NOQA >>> item_iter = [1, 2, 3, 4, 5] >>> flag_iter = [False, True, True, False, True] >>> true_items = iter_compress(item_iter, flag_iter) >>> result = list(true_items) >>> print(result) [2, 3, 5]
codesearchnet
def CheckComment(line, filename, linenum, next_line_start, error): commentpos = line.find(' if (commentpos != (- 1)): if ((re.sub('\\\\.', '', line[0:commentpos]).count('"') % 2) == 0): if ((not (Match('^.*{ * error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') comment = line[commentpos:] match = _RE_PATTERN_TODO.match(comment) if match: leading_whitespace = match.group(1) if (len(leading_whitespace) > 1): error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if (not username): error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like " middle_whitespace = match.group(3) if ((middle_whitespace != ' ') and (middle_whitespace != '')): error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') if (Match(' error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between
Checks for common mistakes in comments. Args: line: The line in question. filename: The name of the current file. linenum: The number of the line to check. next_line_start: The first non-whitespace column of the next line. error: The function to call with any errors found.
codesearchnet
def _parse_authors(authors): link = authors.find('a') link = (link[0].params.get('href') if link else None) author_list = _strip_content(authors) if ('(' in author_list): author_list = author_list.split('(')[0] if (not author_list.strip()): return [] return map((lambda author: Author(author.strip(), link)), author_list.strip().split(','))
Parse informations about authors of the book. Args: dom (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`.Author` objects. Blank if no author \ found.
codesearchnet
def main(args): if not args: raise Exception('Please specify at least one JSON config path') inputs = [] program = [] outputs = [] for arg in args: with open(arg) as fd: config = json.load(fd) inputs.extend(config.get('inputs', [])) program.extend(config.get('program', [])) outputs.extend(config.get('outputs', [])) if not program: raise Exception('Please specify a program') return run(inputs, program, outputs)
Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data is missing.
juraj-google-style
def slice_hidden(x, hidden_size, num_blocks): batch_size, latent_dim, _ = common_layers.shape_list(x) block_dim = hidden_size x_sliced = tf.reshape(x, shape=[batch_size, latent_dim, num_blocks, block_dim]) return x_sliced
Slice encoder hidden state under num_blocks. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
juraj-google-style
def http_exception(channel, title): gui = ui_embed.UI( channel, "Too much help", "{} is too helpful! Try trimming some of the help messages.".format(title), modulename=modulename ) return gui
Creates an embed UI containing the 'too long' error message Args: channel (discord.Channel): The Discord channel to bind the embed to title (str): The title of the embed Returns: ui (ui_embed.UI): The embed UI object
juraj-google-style
def get_cohp(self, spin=None, integrated=False): if (not integrated): populations = self.cohp else: populations = self.icohp if (populations is None): return None elif (spin is None): return populations else: if isinstance(spin, int): spin = Spin(spin) elif isinstance(spin, str): s = {'up': 1, 'down': (- 1)}[spin.lower()] spin = Spin(s) return {spin: populations[spin]}
Returns the COHP or ICOHP for a particular spin. Args: spin: Spin. Can be parsed as spin object, integer (-1/1) or str ("up"/"down") integrated: Return COHP (False) or ICOHP (True) Returns: Returns the CHOP or ICOHP for the input spin. If Spin is None and both spins are present, both spins will be returned as a dictionary.
codesearchnet
def _get_input_readers(self, state): serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY % state.key().id_or_name()) serialized_input_readers = model._HugeTaskPayload.get_by_key_name( serialized_input_readers_key, parent=state) input_reader_class = state.mapreduce_spec.mapper.input_reader_class() split_param = state.mapreduce_spec.mapper if issubclass(input_reader_class, map_job.InputReader): split_param = map_job.JobConfig._to_map_job_config( state.mapreduce_spec, os.environ.get("HTTP_X_APPENGINE_QUEUENAME")) if serialized_input_readers is None: readers = input_reader_class.split_input(split_param) else: readers = [input_reader_class.from_json_str(_json) for _json in json.loads(zlib.decompress( serialized_input_readers.payload))] if not readers: return None, None state.mapreduce_spec.mapper.shard_count = len(readers) state.active_shards = len(readers) if serialized_input_readers is None: serialized_input_readers = model._HugeTaskPayload( key_name=serialized_input_readers_key, parent=state) readers_json_str = [i.to_json_str() for i in readers] serialized_input_readers.payload = zlib.compress(json.dumps( readers_json_str)) return readers, serialized_input_readers
Get input readers. Args: state: a MapreduceState model. Returns: A tuple: (a list of input readers, a model._HugeTaskPayload entity). The payload entity contains the json serialized input readers. (None, None) when input reader inplitting returned no data to process.
juraj-google-style
def _url_dirname(self, url_or_path): scheme, path = self._split_scheme(url_or_path) return self._combine_scheme(scheme, posixpath.dirname(path))
Like posixpath.dirname, but preserves scheme:// prefix. Args: url_or_path: A string in the form of scheme://some/path OR /some/path.
github-repos
def phase_flip(p: Optional[float]=None) -> Union[(common_gates.ZPowGate, PhaseFlipChannel)]: if (p is None): return _phase_flip_Z() return _phase_flip(p)
r""" Returns a PhaseFlipChannel that flips a qubit's phase with probability p if p is None, return a guaranteed phase flip in the form of a Z operation. This channel evolves a density matrix via: $$ \rho \rightarrow M_0 \rho M_0^\dagger + M_1 \rho M_1^\dagger $$ With: $$ \begin{aligned} M_0 =& \sqrt{p} \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \\ M_1 =& \sqrt{1-p} \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \end{aligned} $$ Args: p: the probability of a phase flip. Raises: ValueError: if p is not a valid probability.
codesearchnet
def to_file(self, filename): d = {"mass_info": self.mass_info, "nonbond_coeffs": self.nonbond_coeffs, "topo_coeffs": self.topo_coeffs} yaml = YAML(typ="safe") with open(filename, "w") as f: yaml.dump(d, f)
Saves object to a file in YAML format. Args: filename (str): Filename.
juraj-google-style
def do_load(self, design, init=False): if design: filename = self._validated_config_filename(design) with open(filename, "r") as f: text = f.read() structure = json_decode(text) else: structure = {} attributes = structure.get("attributes", structure) children = structure.get("children", structure) name, mri, x, y, visible = [], [], [], [], [] for part_name, d in attributes.get("layout", {}).items(): name.append(part_name) mri.append("") x.append(d["x"]) y.append(d["y"]) visible.append(d["visible"]) self.set_layout(LayoutTable(name, mri, x, y, visible)) source, export = [], [] for source_name, export_name in attributes.get("exports", {}).items(): source.append(source_name) export.append(export_name) self.exports.set_value(ExportTable(source, export)) our_values = {k: v for k, v in attributes.items() if k in self.our_config_attributes} block = self.block_view() block.put_attribute_values(our_values) self.run_hooks( LoadHook(p, c, children.get(p.name, {}), init) for p, c in self.create_part_contexts(only_visible=False).items()) self._mark_clean(design, init)
Load a design name, running the child LoadHooks. Args: design: Name of the design json file, without extension init: Passed to the LoadHook to tell the children if this is being run at Init or not
juraj-google-style
def __init__(self, key_spec: Union[KeySpec, str], value_spec: ValueSpec, description: Optional[str]=None, metadata: Optional[Dict[str, Any]]=None, origin: Optional[Type[Any]]=None) -> None: if isinstance(key_spec, str): key_spec = KeySpec.from_str(key_spec) assert isinstance(key_spec, KeySpec), key_spec self._key = key_spec self._value = value_spec self._description = description self._origin = origin if metadata and (not isinstance(metadata, dict)): raise ValueError('metadata must be a dict.') self._metadata = metadata or {}
Constructor. Args: key_spec: Key specification of the field. Can be a string or a KeySpec instance. value_spec: Value specification of the field. description: Description of the field. metadata: A dict of objects as metadata for the field. origin: The class that this field originates from. Raises: ValueError: metadata is not a dict.
github-repos
def _version_from_file( path_to_version, default_version=DEFAULT_VERSION, ): version_filepath = os.path.join(path_to_version, 'version.txt') if not os.path.isfile(version_filepath): warnings.warn( 'Unable to resolve current version', exceptions.ProsperDefaultVersionWarning) return default_version with open(version_filepath, 'r') as v_fh: data = v_fh.read() return data
for PyPI installed versions, just get data from file Args: path_to_version (str): abspath to dir where version.txt exists default_version (str): fallback version in case of error Returns: str: current working version
juraj-google-style
def _CheckLine(self, line): for rule in self._cur_state: matched = self._CheckRule(rule, line) if matched: for value in matched.groupdict(): self._AssignVar(matched, value) if self._Operations(rule): if rule.new_state: if (rule.new_state not in ('End', 'EOF')): self._cur_state = self.states[rule.new_state] self._cur_state_name = rule.new_state break
Passes the line through each rule until a match is made. Args: line: A string, the current input line.
codesearchnet
def __init__(self): super(JLinkTraceRegion, self).__init__() self.SizeOfStruct = ctypes.sizeof(self)
Initializes the trace region. Sets the size of the structure. Args: self (JLinkTraceRegion): the ``JLinkTraceRegion`` instance. Returns: ``None``
juraj-google-style
def set_ylim(self, xlims, dx, xscale, reverse=False): self._set_axis_limits('y', xlims, dx, xscale, reverse) return
Set y limits for plot. This will set the limits for the y axis for the specific plot. Args: ylims (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
juraj-google-style
def get_model(self): model_cls = get_connected_model_for_table_name(self.table_name) return model_cls._default_manager.filter(id=self.record_id).first()
Fetch the instance of the connected model referenced by this log record. Returns: The connected instance, or ``None`` if it does not exists.
codesearchnet
def _augment_observation(self, ob, reward, cumulative_reward): img = PIL_Image().new("RGB", (ob.shape[1], self.HEADER_HEIGHT,)) draw = PIL_ImageDraw().Draw(img) draw.text( (1, 0), "c:{:3}, r:{:3}".format(int(cumulative_reward), int(reward)), fill=(255, 0, 0) ) draw.text( (1, 15), "fc:{:3}".format(int(self._frame_counter)), fill=(255, 0, 0) ) header = np.asarray(img) del img header.setflags(write=1) if self._wait: pixel_fill = (0, 255, 0) else: pixel_fill = (255, 0, 0) header[0, :, :] = pixel_fill return np.concatenate([header, ob], axis=0)
Expand observation array with additional information header (top rows). Args: ob: observation reward: reward to be included in header. cumulative_reward: total cumulated reward to be included in header. Returns: Expanded observation array.
juraj-google-style
def clean_value(self): result = [] for mdl in self: result.append(super(ListNode, mdl).clean_value()) return result
Populates json serialization ready data. This is the method used to serialize and store the object data in to DB Returns: List of dicts.
codesearchnet
def execute(self, correlation_id, args): if self._schema != None: self.validate_and_throw_exception(correlation_id, args) try: return self._function(correlation_id, args) except Exception as ex: raise InvocationException( correlation_id, "EXEC_FAILED", "Execution " + self._name + " failed: " + str(ex) ).with_details("command", self._name).wrap(ex)
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails for whatever reason.
juraj-google-style
def request(self, method, params=None): msg_id = self._id_generator() log.debug('Sending request with id %s: %s %s', msg_id, method, params) message = { 'jsonrpc': JSONRPC_VERSION, 'id': msg_id, 'method': method, } if params is not None: message['params'] = params request_future = futures.Future() request_future.add_done_callback(self._cancel_callback(msg_id)) self._server_request_futures[msg_id] = request_future self._consumer(message) return request_future
Send a JSON RPC request to the client. Args: method (str): The method name of the message to send params (any): The payload of the message Returns: Future that will resolve once a response has been received
juraj-google-style
def trace_format(self): cmd = enums.JLinkTraceCommand.GET_FORMAT data = ctypes.c_uint32(0) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to get trace format.') return data.value
Retrieves the current format the trace buffer is using. Args: self (JLink): the ``JLink`` instance. Returns: The current format the trace buffer is using. This is one of the attributes of ``JLinkTraceFormat``.
juraj-google-style
def _read_and_batch_from_files(file_pattern, batch_size, max_length, num_cpu_cores, shuffle, repeat): dataset = tf.data.Dataset.list_files(file_pattern) if shuffle: mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER) dataset = dataset.shuffle(buffer_size=_FILE_SHUFFLE_BUFFER) dataset = dataset.apply(tf.contrib.data.parallel_interleave(_load_records, sloppy=shuffle, cycle_length=num_cpu_cores)) dataset = dataset.map(_parse_example, num_parallel_calls=num_cpu_cores) dataset = dataset.filter((lambda x, y: _filter_max_length((x, y), max_length))) mlperf_log.transformer_print(key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size) mlperf_log.transformer_print(key=mlperf_log.INPUT_MAX_LENGTH, value=max_length) dataset = _batch_examples(dataset, batch_size, max_length) dataset = dataset.repeat(repeat) dataset = dataset.prefetch(1) return dataset
Create dataset where each item is a dict of "inputs" and "targets". Args: file_pattern: String used to match the input TFRecord files. batch_size: Maximum number of tokens per batch of examples max_length: Maximum number of tokens per example num_cpu_cores: Number of cpu cores for parallel input processing. shuffle: If true, randomizes order of elements. repeat: Number of times to repeat the dataset. If None, the dataset is repeated forever. Returns: tf.data.Dataset object containing examples loaded from the files.
codesearchnet
def finalise(self): assert self._is_solved() g = self._get_minimal_graph() scopes = dict(((x.package_name, x) for x in self.scopes if (not x.package_request.conflict))) fam_cycle = find_cycle(g) if fam_cycle: cycle = [] for fam in fam_cycle: scope = scopes[fam] variant = scope._get_solved_variant() stmt = VersionedObject.construct(fam, variant.version) cycle.append(stmt) phase = copy.copy(self) phase.scopes = scopes.values() phase.failure_reason = Cycle(cycle) phase.status = SolverStatus.cyclic return phase fams = [x.name for x in self.solver.request_list] ordered_fams = _get_dependency_order(g, fams) scopes_ = [] for fam in ordered_fams: scope = scopes[fam] if (not scope.package_request.conflict): scopes_.append(scope) phase = copy.copy(self) phase.scopes = scopes_ return phase
Remove conflict requests, detect cyclic dependencies, and reorder packages wrt dependency and then request order. Returns: A new copy of the phase with conflict requests removed and packages correctly ordered; or, if cyclic dependencies were detected, a new phase marked as cyclic.
codesearchnet
def label_count(self, label_list_ids=None): count = collections.defaultdict(int) for utterance in self.utterances.values(): for label_value, utt_count in utterance.label_count(label_list_ids=label_list_ids).items(): count[label_value] += utt_count return count
Return a dictionary containing the number of times, every label-value in this corpus is occurring. Args: label_list_ids (list): If not None, only labels from label-lists with an id contained in this list are considered. Returns: dict: A dictionary containing the number of occurrences with the label-value as key.
juraj-google-style
def delete_devices(self, auth_body, devices): content = { "auth": auth_body, "devices": devices } return self._send("POST", "/delete_devices", content=content)
Bulk deletion of devices. NOTE: This endpoint uses the User-Interactive Authentication API. Args: auth_body (dict): Authentication params. devices (list): List of device ID"s to delete.
juraj-google-style
def get_disk_usage(self, path=None): DiskUsage = namedtuple('usage', 'total, used, free') if path is None: mount_point = self.mount_points[self.root.name] else: mount_point = self._mount_point_for_path(path) if mount_point and mount_point['total_size'] is not None: return DiskUsage(mount_point['total_size'], mount_point['used_size'], mount_point['total_size'] - mount_point['used_size']) return DiskUsage( 1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024)
Return the total, used and free disk space in bytes as named tuple, or placeholder values simulating unlimited space if not set. .. note:: This matches the return value of shutil.disk_usage(). Args: path: The disk space is returned for the file system device where `path` resides. Defaults to the root path (e.g. '/' on Unix systems).
juraj-google-style
def __init__(self, version_string, first_matched_type, second_matched_type): super(TooManyTypesError, self).__init__( 'Release "{}" cannot match types "{}" and "{}"'.format( version_string, first_matched_type, second_matched_type ) )
Constructor. Args: version_string (str): The string that gave too many types. first_matched_type (str): The name of the first detected type. second_matched_type (str): The name of the second detected type
juraj-google-style
def all_subnets_shorter_prefix(ip_net, cidr, include_default=False): subnets_list = list() if include_default: while int(cidr) >= 0: try: subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)) except Exception as e: LOGGER.critical('Function all_subnets_shorter_prefix {item}'.format(item=e)) cidr = str(int(cidr) - 1) else: while int(cidr) > 0: try: subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)) except Exception as e: LOGGER.critical('Function all_subnets_shorter_prefix {item}'.format(item=e)) cidr = str(int(cidr) - 1) return subnets_list
Function to return every subnet a ip can belong to with a shorter prefix Args: ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 cidr: CIDR value of 0 to 32 include_default: If you want the list to inlclude the default route set to True Returns: returns a list of subnets
juraj-google-style
def findall(self, title=None): if (title is None): return list(self) files = backend.iterfiles(self._drive, name=title) return [self[id] for (id, _) in files]
Fetch and return a list of spreadsheets with the given title. Args: title(str): title/name of the spreadsheets to return, or ``None`` for all Returns: list: list of new SpreadSheet instances (possibly empty)
codesearchnet
def set_working_directory(working_directory): logger.debug("starting") logger.debug(f"adding {working_directory} to sys.paths") sys.path.append(working_directory) logger.debug("done")
Add working_directory to sys.paths. This allows dynamic loading of arbitrary python modules in cwd. Args: working_directory: string. path to add to sys.paths
juraj-google-style
def pymmh3_hash64(key: Union[(bytes, bytearray)], seed: int=0, x64arch: bool=True) -> Tuple[(int, int)]: hash_128 = pymmh3_hash128(key, seed, x64arch) unsigned_val1 = (hash_128 & 18446744073709551615) if ((unsigned_val1 & 9223372036854775808) == 0): signed_val1 = unsigned_val1 else: signed_val1 = (- ((unsigned_val1 ^ 18446744073709551615) + 1)) unsigned_val2 = ((hash_128 >> 64) & 18446744073709551615) if ((unsigned_val2 & 9223372036854775808) == 0): signed_val2 = unsigned_val2 else: signed_val2 = (- ((unsigned_val2 ^ 18446744073709551615) + 1)) return (signed_val1, signed_val2)
Implements 64bit murmur3 hash, as per ``pymmh3``. Returns a tuple. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: tuple: tuple of integers, ``(signed_val1, signed_val2)``
codesearchnet
def augment(self, dct: NonAugmentedDict, document: Optional[YamlDocument] = None) -> AugmentedDict: Validator.instance_of(dict, raise_ex=True, dct=dct) for instance in self._extensions: nodes = list(dict_find_pattern(dct, **instance.config())) for parent, k, val in nodes: parent.pop(k) fragment = instance.apply( ExtensionContext( mentor=self, document=document or dct, dct=dct, parent_node=parent, node=(k, val) ) ) if fragment is not None: parent.update(fragment) return dct
Augments the given dictionary by using all the bound extensions. Args: dct: Dictionary to augment. document: The document the dictionary was loaded from. Returns: The augmented dictionary.
juraj-google-style
def _custom_diag_normal_kl(lhs, rhs, name=None): with tf.name_scope((name or 'kl_divergence')): mean0 = lhs.mean() mean1 = rhs.mean() logstd0 = tf.log(lhs.stddev()) logstd1 = tf.log(rhs.stddev()) (logstd0_2, logstd1_2) = ((2 * logstd0), (2 * logstd1)) return (0.5 * ((((tf.reduce_sum(tf.exp((logstd0_2 - logstd1_2)), (- 1)) + tf.reduce_sum((((mean1 - mean0) ** 2) / tf.exp(logstd1_2)), (- 1))) + tf.reduce_sum(logstd1_2, (- 1))) - tf.reduce_sum(logstd0_2, (- 1))) - mean0.shape[(- 1)].value))
Empirical KL divergence of two normals with diagonal covariance. Args: lhs: Diagonal Normal distribution. rhs: Diagonal Normal distribution. name: Name scope for the op. Returns: KL divergence from lhs to rhs.
codesearchnet
def get_module_object_and_name(globals_dict): name = globals_dict.get('__name__', None) module = sys.modules.get(name, None) return _ModuleObjectAndName(module, (sys.argv[0] if name == '__main__' else name))
Returns the module that defines a global environment, and its name. Args: globals_dict: A dictionary that should correspond to an environment providing the values of the globals. Returns: _ModuleObjectAndName - pair of module object & module name. Returns (None, None) if the module could not be identified.
juraj-google-style
def waitForEvent(self, event_name, predicate, timeout=DEFAULT_TIMEOUT): deadline = time.perf_counter() + timeout while time.perf_counter() <= deadline: rpc_timeout = deadline - time.perf_counter() if rpc_timeout < 0: break rpc_timeout = min(rpc_timeout, MAX_TIMEOUT) try: event = self.waitAndGet(event_name, rpc_timeout) except TimeoutError: break if predicate(event): return event raise TimeoutError(self._ad, 'Timed out after %ss waiting for an "%s" event that satisfies the predicate "%s".' % (timeout, event_name, predicate.__name__))
Wait for an event of a specific name that satisfies the predicate. This call will block until the expected event has been received or time out. The predicate function defines the condition the event is expected to satisfy. It takes an event and returns True if the condition is satisfied, False otherwise. Note all events of the same name that are received but don't satisfy the predicate will be discarded and not be available for further consumption. Args: event_name: string, the name of the event to wait for. predicate: function, a function that takes an event (dictionary) and returns a bool. timeout: float, default is 120s. Returns: dictionary, the event that satisfies the predicate if received. Raises: TimeoutError: raised if no event that satisfies the predicate is received after timeout seconds.
github-repos
def export_template(access_token, subscription_id, rgname): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/exportTemplate', '?api-version=', RESOURCE_API]) rg_body = {'options': 'IncludeParameterDefaultValue', 'resources': ['*']} body = json.dumps(rg_body) return do_post(endpoint, body, access_token)
Capture the specified resource group as a template Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body.
codesearchnet
def test(cls, test_expr: bool = True) -> None: cls.test_dialect_specific_1() cls.test_identifiers() if test_expr: cls.test_expr() cls.test_sql_core() cls.test_dialect_specific_2()
Runs self-tests. Args: test_expr: include tests of expressions (which can be slow).
juraj-google-style
def __init__(self, room_id, api): self.room_id = room_id self.api = api
Instantiates MatrixRoom object. Args: room_id(str): Matrix room id (e.g. !1234567:example.com) api(MatrixASHttpAPI): Api for calls to the server.
juraj-google-style
def upcoming(self, **kwargs): path = self._get_path('upcoming') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of upcoming movies. This list refreshes every day. The maximum number of items this list will include is 100. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def PyParseRangeCheck(lower_bound, upper_bound): def CheckRange(string, location, tokens): 'Parse the arguments.\n\n Args:\n string (str): original string.\n location (int): location in the string where the match was made\n tokens (list[str]): tokens.\n ' try: check_number = tokens[0] except IndexError: check_number = (- 1) if (check_number < lower_bound): raise pyparsing.ParseException('Value: {0:d} precedes lower bound: {1:d}'.format(check_number, lower_bound)) if (check_number > upper_bound): raise pyparsing.ParseException('Value: {0:d} exceeds upper bound: {1:d}'.format(check_number, upper_bound)) return CheckRange
Verify that a number is within a defined range. This is a callback method for pyparsing setParseAction that verifies that a read number is within a certain range. To use this method it needs to be defined as a callback method in setParseAction with the upper and lower bound set as parameters. Args: lower_bound (int): lower bound of the range. upper_bound (int): upper bound of the range. Returns: Function: callback method that can be used by pyparsing setParseAction.
codesearchnet
def __init__(self, lmda, theta, phi) -> None: self.lmda = lmda % 2 self.theta = theta % 2 self.phi = phi % 2
A QASM gate representing any single qubit unitary with a series of three rotations, Z, Y, and Z. The angles are normalized to the range [0, 2) half_turns. Args: lmda: Half turns to rotate about Z (applied first). theta: Half turns to rotate about Y. phi: Half turns to rotate about Z (applied last).
juraj-google-style
def join_sources(source_module: DeploymentModule, contract_name: str): joined_file = Path(__file__).parent.joinpath('joined.sol') remapping = {module: str(path) for (module, path) in contracts_source_path().items()} command = ['./utils/join-contracts.py', '--import-map', json.dumps(remapping), str(contracts_source_path_of_deployment_module(source_module).joinpath((contract_name + '.sol'))), str(joined_file)] working_dir = Path(__file__).parent.parent try: subprocess.check_call(command, cwd=working_dir) except subprocess.CalledProcessError as ex: print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.') raise ex return joined_file.read_text()
Use join-contracts.py to concatenate all imported Solidity files. Args: source_module: a module name to look up contracts_source_path() contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
codesearchnet
def block_start(self, previous_block): previous_header_bytes = previous_block.header previous_header = BlockHeader() previous_header.ParseFromString(previous_header_bytes) block_info = BlockInfo(block_num=previous_header.block_num, previous_block_id=previous_header.previous_block_id, signer_public_key=previous_header.signer_public_key, header_signature=previous_block.header_signature, timestamp=int(time.time())) return [self.create_batch(block_info)]
Returns an ordered list of batches to inject at the beginning of the block. Can also return None if no batches should be injected. Args: previous_block (Block): The previous block. Returns: A list of batches to inject.
codesearchnet
def format_snippet(sensor_graph): output = [] output.append('disable') output.append('clear') output.append('reset') for node in sensor_graph.dump_nodes(): output.append('add_node "{}"'.format(node)) for streamer in sensor_graph.streamers: line = "add_streamer '{}' '{}' {} {} {}".format(streamer.selector, streamer.dest, streamer.automatic, streamer.format, streamer.report_type) if (streamer.with_other is not None): line += ' --withother {}'.format(streamer.with_other) output.append(line) for (stream, value) in sorted(sensor_graph.constant_database.items(), key=(lambda x: x[0].encode())): output.append("set_constant '{}' {}".format(stream, value)) output.append('persist') output.append('back') app_tag = sensor_graph.metadata_database.get('app_tag') app_version = sensor_graph.metadata_database.get('app_version') if (app_tag is not None): if (app_version is None): app_version = '0.0' output.append('test_interface') output.append(("set_version app %d --version '%s'" % (app_tag, app_version))) output.append('back') output.append('config_database') output.append('clear_variables') for (slot, conf_vars) in sensor_graph.config_database.items(): for (conf_var, conf_def) in conf_vars.items(): (conf_type, conf_val) = conf_def if (conf_type == 'binary'): conf_val = ('hex:' + hexlify(conf_val)) elif isinstance(conf_val, str): conf_val = ('"%s"' % conf_val) output.append("set_variable '{}' {} {} {}".format(slot, conf_var, conf_type, conf_val)) output.append('back') output.append('reset') return ('\n'.join(output) + '\n')
Format this sensor graph as iotile command snippets. This includes commands to reset and clear previously stored sensor graphs. Args: sensor_graph (SensorGraph): the sensor graph that we want to format
codesearchnet
def validate_functions(ast: BELAst, bo): if isinstance(ast, Function): log.debug(f"Validating: {ast.name}, {ast.function_type}, {ast.args}") function_signatures = bo.spec["functions"]["signatures"][ast.name]["signatures"] function_name = ast.name (valid_function, messages) = check_function_args( ast.args, function_signatures, function_name ) if not valid_function: message = ", ".join(messages) bo.validation_messages.append( ( "ERROR", "Invalid BEL Statement function {} - problem with function signatures: {}".format( ast.to_string(), message ), ) ) bo.parse_valid = False if hasattr(ast, "args"): for arg in ast.args: validate_functions(arg, bo) return bo
Recursively validate function signatures Determine if function matches one of the available signatures. Also, 1. Add entity types to AST NSArg, e.g. Abundance, ... 2. Add optional to AST Arg (optional means it is not a fixed, required argument and needs to be sorted for canonicalization, e.g. reactants(A, B, C) ) Args: bo: bel object Returns: bel object
juraj-google-style
def _addConfig(instance, config, parent_section): try: section_name = "{p}/{n}".format(p = parent_section, n=instance.NAME.lower()) config.add_section(section_name) for k in instance.CONFIG.keys(): config.set(section_name, k, instance.CONFIG[k]) except Exception as e: print "[!] %s" % e
Writes a section for a plugin. Args: instance (object): Class instance for plugin config (object): Object (ConfigParser) which the current config parent_section (str): Parent section for plugin. Usually 'checkers' or 'reports'
juraj-google-style
def _find_docstring_line(self, start, end): for i in range(start, (end + 1)): if (i in self._tokenized_triple_quotes): return i return None
Find the row where a docstring starts in a function or class. This will search for the first match of a triple quote token in row sequence from the start of the class or function. Args: start: the row where the class / function starts. end: the row where the class / function ends. Returns: int: the row number where the docstring is found.
codesearchnet
def __init__(cls, name, bases, dictionary): super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: return descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] service_stub_builder = _ServiceStubBuilder(descriptor) service_stub_builder.BuildServiceStub(cls)
Creates a message service stub class. Args: name: Name of the class (ignored, here). bases: Base classes of the class being constructed. dictionary: The class dictionary of the class being constructed. dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object describing this protocol service type.
juraj-google-style
def _merge_field(self, json_value: Any, field: descriptor.FieldDescriptor, parent: message.Message) -> None: if not annotation_utils.is_primitive_type(field.message_type) and proto_utils.field_is_set(parent, field): raise ValueError(f'Target field {field.full_name} is already set.') if field.containing_oneof is not None: oneof_field = parent.DESCRIPTOR.oneofs_by_name[field.containing_oneof.name] if annotation_utils.is_primitive_type(field.message_type) and oneof_field.full_name == field.full_name: raise ValueError(f'Cannot set field {field.full_name} since oneof field {oneof_field.full_name} is already set.') existing_field_size = proto_utils.field_content_length(parent, field) if proto_utils.field_is_repeated(field): if not isinstance(json_value, list): raise ValueError(f'Attempted to merge a repeated field, {field.name}, a json_value with type {type(json_value)} instead of a list.') if existing_field_size != 0 and existing_field_size != len(json_value): raise ValueError('Repeated primitive list length does not match extension list for field: {field.full_name!r}.') json_value = json_value if proto_utils.field_is_repeated(field) else [json_value] for i, value in enumerate(json_value): parsed_value = self._parse_field_value(field, value) if existing_field_size > 0: field_value = proto_utils.get_value_at_field_index(parent, field, i) field_value.MergeFrom(parsed_value) extensions.clear_fhir_extensions_with_url(field_value, extensions.PRIMITIVE_HAS_NO_VALUE_URL) else: field_value = proto_utils.set_in_parent_or_add(parent, field) field_value.MergeFrom(parsed_value)
Merges the json_value into the provided field of the parent Message. Args: json_value: The JSON value to set. field: The FieldDescriptor of the field to set in parent. parent: The parent Message to set the value on. Raises: ValueError: In the event that a non-primitive field has already been set. ValueError: In the event that a oneof field has already been set.
github-repos
def _get_scopes(state, names: Sequence[str], ctx) -> Sequence[abstract.InterpreterClass | abstract.InterpreterFunction]: scopes = [] for name in names: prev = scopes[-1] if scopes else None if not prev: try: _, var = ctx.vm.load_global(state, name) except KeyError: break elif isinstance(prev, abstract.InterpreterClass): if name in prev.members: var = prev.members[name] else: break else: assert isinstance(prev, abstract.InterpreterFunction) if prev.last_frame and name in prev.last_frame.f_locals.pyval: var = prev.last_frame.f_locals.pyval[name] else: break try: scopes.append(abstract_utils.get_atomic_value(var, (abstract.InterpreterClass, abstract.InterpreterFunction))) except abstract_utils.ConversionError: break return scopes
Gets the class or function objects for a sequence of nested scope names. For example, if the code under analysis is: class Foo: def f(self): def g(): ... then when called with ['Foo', 'f', 'g'], this method returns [InterpreterClass(Foo), InterpreterFunction(f), InterpreterFunction(g)]. Arguments: state: The current state. names: A sequence of names for consecutive nested scopes in the module under analysis. Must start with a module-level name. ctx: The current context. Returns: The class or function object corresponding to each name in 'names'.
github-repos
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES): if loss_collection and (not context.executing_eagerly()): ops.add_to_collection(loss_collection, loss)
Adds a externally defined loss to the collection of losses. Args: loss: A loss `Tensor`. loss_collection: Optional collection to add the loss to.
github-repos
def in_same_dir(as_file, target_file): return os.path.abspath(os.path.join(os.path.dirname(as_file), target_file))
Return an absolute path to a target file that is located in the same directory as as_file Args: as_file: File name (including __file__) Use the directory path of this file target_file: Name of the target file
juraj-google-style
def fail_steamid(channel): gui = ui_embed.UI( channel, "That SteamID doesn't exist.", "You can get your SteamID by going to your profile page and looking at the url, " "or you can set a custom ID by going to edit profile on your profile page.", modulename=modulename, colour=0x0088FF ) return gui
Creates an embed UI for invalid SteamIDs Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
juraj-google-style
def _build(self, one_hot_input_sequence): input_shape = one_hot_input_sequence.get_shape() batch_size = input_shape[1] batch_embed_module = snt.BatchApply(self._embed_module) input_sequence = batch_embed_module(one_hot_input_sequence) input_sequence = tf.nn.relu(input_sequence) initial_state = self._core.initial_state(batch_size) if self._use_dynamic_rnn: output_sequence, final_state = tf.nn.dynamic_rnn( cell=self._core, inputs=input_sequence, time_major=True, initial_state=initial_state) else: rnn_input_sequence = tf.unstack(input_sequence) output, final_state = tf.contrib.rnn.static_rnn( cell=self._core, inputs=rnn_input_sequence, initial_state=initial_state) output_sequence = tf.stack(output) batch_output_module = snt.BatchApply(self._output_module) output_sequence_logits = batch_output_module(output_sequence) return output_sequence_logits, final_state
Builds the deep LSTM model sub-graph. Args: one_hot_input_sequence: A Tensor with the input sequence encoded as a one-hot representation. Its dimensions should be `[truncation_length, batch_size, output_size]`. Returns: Tuple of the Tensor of output logits for the batch, with dimensions `[truncation_length, batch_size, output_size]`, and the final state of the unrolled core,.
juraj-google-style
def xw_plus_b(x, weights, biases, name=None): with ops.name_scope(name, 'xw_plus_b', [x, weights, biases]) as name: x = ops.convert_to_tensor(x, name='x') weights = ops.convert_to_tensor(weights, name='weights') biases = ops.convert_to_tensor(biases, name='biases') mm = math_ops.matmul(x, weights) return bias_add(mm, biases, name=name)
Computes matmul(x, weights) + biases. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "xw_plus_b" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units.
github-repos
def add_argument_to(self, parser): from devassistant.cli.devassistant_argparse import DefaultIffUsedActionFactory if isinstance(self.kwargs.get('action', ''), list): if self.kwargs['action'][0] == 'default_iff_used': self.kwargs['action'] = DefaultIffUsedActionFactory.generate_action( self.kwargs['action'][1]) self.kwargs.pop('preserved', None) try: parser.add_argument(*self.flags, **self.kwargs) except Exception as ex: problem = "Error while adding argument '{name}': {error}".\ format(name=self.name, error=repr(ex)) raise exceptions.ExecutionException(problem)
Used by cli to add this as an argument to argparse parser. Args: parser: parser to add this argument to
juraj-google-style
def grow(self, times=1): self.nodes.append([]) for (n, node) in enumerate(self.nodes[self.age]): if (self.age == 0): p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent((self.age - 1), n) angle = node.get_node_angle(p_node) for i in range(self.comp): tot_angle = self.__get_total_angle(angle, i) length = self.__get_total_length((self.age + 1), i) self.nodes[(self.age + 1)].append(node.make_new_node(length, tot_angle)) self.age += 1 if (times > 1): self.grow((times - 1))
Let the tree grow. Args: times (integer): Indicate how many times the tree will grow.
codesearchnet
def logged(level=logging.DEBUG): def wrap(f): _logger = logging.getLogger('{}.{}'.format(f.__module__, f.__name__)) def wrapped_f(*args, **kwargs): _logger.log(level, 'Called at {} with args = {} and kwargs = {}'.format(datetime.datetime.now(), args, kwargs)) data = f(*args, **kwargs) _logger.log(level, 'Done at {} with args = {} and kwargs = {}'.format(datetime.datetime.now(), args, kwargs)) return data return wrapped_f return wrap
Useful logging decorator. If a method is logged, the beginning and end of the method call will be logged at a pre-specified level. Args: level: Level to log method at. Defaults to DEBUG.
codesearchnet
def _GetUnsortedNotifications(self, queue_shard, notifications_by_session_id=None): if (notifications_by_session_id is None): notifications_by_session_id = {} end_time = (self.frozen_timestamp or rdfvalue.RDFDatetime.Now()) for notification in self.data_store.GetNotifications(queue_shard, end_time): existing = notifications_by_session_id.get(notification.session_id) if existing: if (notification.first_queued > existing.first_queued): notifications_by_session_id[notification.session_id] = notification elif ((notification.first_queued == existing.first_queued) and (notification.last_status > existing.last_status)): logging.warning('Notifications with equal first_queued fields detected: %s %s', notification, existing) notifications_by_session_id[notification.session_id] = notification else: notifications_by_session_id[notification.session_id] = notification return notifications_by_session_id
Returns all the available notifications for a queue_shard. Args: queue_shard: urn of queue shard notifications_by_session_id: store notifications in this dict rather than creating a new one Returns: dict of notifications. keys are session ids.
codesearchnet
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: residual = hidden_states hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def parse_flags_with_usage(args): try: return FLAGS(args) except flags.Error as error: sys.stderr.write('FATAL Flags parsing error: %s\n' % error) sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n') sys.exit(1)
Tries to parse the flags, print usage, and exit if unparseable. Args: args: [str], a non-empty list of the command line arguments including program name. Returns: [str], a non-empty list of remaining command line arguments after parsing flags, including program name.
juraj-google-style
def __init__(self, campfire, data): dataType = type(data) if dataType == types.StringType or dataType == types.UnicodeType: messageType = self._TYPE_PASTE if data.find("\n") >= 0 else self._TYPE_TEXT if messageType == self._TYPE_TEXT: matches = re.match("^https?: if matches: messageType = self._TYPE_TWEET data = { "type": messageType, "body": data } super(Message, self).__init__(campfire) self.set_data(data, ["created_at"]) self.user = None self.room = None if "user_id" in data and data["user_id"]: self.user = self._campfire.get_user(data["user_id"]) if "room_id" in data and data["room_id"]: self.room = self._campfire.get_room(data["room_id"]) if self.is_upload(): self.upload = self._connection.get("room/%s/messages/%s/upload" % (self.room.id, self.id), key="upload") if "full_url" in self.upload: self.upload["url"] = self.upload["full_url"] del self.upload["full_url"] if self.is_tweet(): matches = re.match("(.+)\s+--\s+@([^,]+),\s*(.+)$", self.body) if matches: self.tweet = { "tweet": matches.group(1), "user": matches.group(2), "url": matches.group(3) } else: tweet_data = {} if re.match("^---", self.body): for line in self.body.split("\n")[1:]: matches = re.match('^:([^:]+):\s*"?(.+)"?$', line) if matches: tweet_data[matches.group(1)] = matches.group(2) if tweet_data and "author_username" in tweet_data and "message" in tweet_data and "id" in tweet_data: self.tweet = { "tweet": tweet_data["message"], "user": tweet_data["author_username"], "url": "http: } else: self.type = self._TYPE_TEXT
Initialize. Args: campfire (:class:`Campfire`): Campfire instance data (dict or str): If string, message type will be set to either paste or text
juraj-google-style
def select_embedding_from_source(cur, source_nodelist, source_edgelist, target_nodelist, target_edgelist): encoded_data = {'target_num_nodes': len(target_nodelist), 'target_num_edges': len(target_edgelist), 'target_edges': json.dumps(target_edgelist, separators=(',', ':')), 'source_num_nodes': len(source_nodelist), 'source_num_edges': len(source_edgelist), 'source_edges': json.dumps(source_edgelist, separators=(',', ':'))} select = '\n SELECT\n source_node,\n chain\n FROM\n embedding_component_view\n WHERE\n source_num_edges = :source_num_edges AND\n source_edges = :source_edges AND\n source_num_nodes = :source_num_nodes AND\n\n target_num_edges = :target_num_edges AND\n target_edges = :target_edges AND\n target_num_nodes = :target_num_nodes\n ' embedding = {v: json.loads(chain) for (v, chain) in cur.execute(select, encoded_data)} return embedding
Select an embedding from the source graph and target graph. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. target_nodelist (list): The nodes in the target graph. Should be integer valued. target_edgelist (list): The edges in the target graph. embedding_tag (str): A string tag to associate with the embedding. Returns: dict: The mapping from the source graph to the target graph. In the form {v: {s, ...}, ...} where v is a variable in the source model and s is a variable in the target model.
codesearchnet
def Substitute(self, pattern): if isinstance(pattern, bytes): substs = [re.escape(subst.encode("ascii")) for subst in self._substs] regex = re.compile(b"|".join(substs)) def Replacement(match): key = match.group(0).decode("ascii") return self._substs[key].encode("utf-8") elif isinstance(pattern, Text): substs = [re.escape(subst) for subst in self._substs] regex = re.compile("|".join(substs)) def Replacement(match): key = match.group(0) return self._substs[key] else: raise TypeError("Unexpected pattern type '{}'".format(type(pattern))) if not substs: return pattern else: return regex.sub(Replacement, pattern)
Formats given pattern with this substitution environment. A pattern can contain placeholders for variables (`%%foo%%`) and scopes (`%%bar.baz%%`) that are replaced with concrete values in this substiution environment (specified in the constructor). Args: pattern: A pattern with placeholders to substitute. Returns: A pattern with placeholders substituted with concrete values.
juraj-google-style
def __init__(self, server_address, username, password): _wtflog.info( "connecting to %s, using %s:%s", server_address, username, password) self._mail = imaplib.IMAP4_SSL(server_address) self._mail.login(username, password) _wtflog.info("connected.")
Constructor Args: server_address (str): Email Server address. username (str): Username password (str): Password
juraj-google-style
def invoke(self, line): finished = True while (len(line) > 0): (val, line, finished) = self.invoke_one(line) if (val is not None): iprint(val) return finished
Invoke a one or more function given a list of arguments. The functions are searched for using the current context on the context stack and its annotated type information is used to convert all of the string parameters passed in line to appropriate python types. Args: line (list): The list of command line arguments. Returns: bool: A boolean specifying if the last function created a new context (False if a new context was created) and a list with the remainder of the command line if this function did not consume all arguments.)
codesearchnet
def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None): return _json.dumps(structured(struct, wrap=wrap, meta=meta, struct_key=struct_key, pre_render_callback=pre_render_callback), default=json_encoder)
Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]'
codesearchnet
def tracker(obj): import types as typ global oids, uuids import six from inspect import isclass untracked = (six.string_types, six.integer_types, float, complex, six.text_type) semitrack = (list, dict, set, tuple) if six.PY3: semitrack = semitrack + (range, filter, map) if (isinstance(obj, semitrack) and all([isinstance(t, untracked) for t in obj])): if len(obj) > 0: semiform = "{0} len={1:d} min={2} max={3}" return semiform.format(type(obj), len(obj), min(obj), max(obj)) else: semiform = "{0} len={1:d}" return semiform.format(type(obj), len(obj)) elif isinstance(obj, semitrack): result = [] for o in obj[0:min((len(obj), 5))]: track = tracker(o) if isinstance(track, Instance): result.append(track.uuid) else: result.append(track) if len(obj) > 5: result.append("... ({0:d} items)".format(len(obj))) return tuple(result) elif isinstance(obj, slice): return "slice({}, {}, {})".format(obj.start, obj.stop, obj.step) elif type(obj) is type: return obj.__name__ elif type(obj) is typ.LambdaType: if hasattr(obj, "__fqdn__"): return obj.__fqdn__ else: if six.PY2: _code = obj.func_code else: _code = obj.__code__ return "lambda ({})".format(', '.join(_code.co_varnames)) elif type(obj) in [typ.FunctionType, typ.MethodType]: return obj.__name__ elif not isinstance(obj, untracked): oid = id(obj) if oid in oids: result = oids[oid] else: result = Instance(oid, obj) oids[oid] = result uuids[result.uuid] = result return result else: return None
Returns the :class:`Instance` of the specified object if it is one that we track by default. Args: obj (object): any python object passed as an argument to a method. Returns: Instance: if the object is trackable, the Instance instance of that object; else None.
juraj-google-style
def with_rank(self, rank): try: return self.merge_with(unknown_shape(rank=rank)) except ValueError: raise ValueError('Shape %s must have rank %d' % (self, rank))
Returns a shape based on `self` with the given rank. This method promotes a completely unknown shape to one with a known rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with the given rank. Raises: ValueError: If `self` does not represent a shape with the given `rank`.
github-repos
def sanity_check_actions(actions_spec): actions = copy.deepcopy(actions_spec) is_unique = ('type' in actions) if is_unique: actions = dict(action=actions) for name, action in actions.items(): if 'type' not in action: action['type'] = 'int' if action['type'] == 'int': if 'num_actions' not in action: raise TensorForceError("Action requires value 'num_actions' set!") elif action['type'] == 'float': if ('min_value' in action) != ('max_value' in action): raise TensorForceError("Action requires both values 'min_value' and 'max_value' set!") if 'shape' not in action: action['shape'] = () if isinstance(action['shape'], int): action['shape'] = (action['shape'],) return actions, is_unique
Sanity checks an actions dict, used to define the action space for an MDP. Throws an error or warns if mismatches are found. Args: actions_spec (Union[None,dict]): The spec-dict to check (or None). Returns: Tuple of 1) the action space desc and 2) whether there is only one component in the action space.
juraj-google-style
def CreateDataTypeMapByType(cls, data_type_definition): data_type_map_class = cls._MAP_PER_DEFINITION.get(data_type_definition.TYPE_INDICATOR, None) if (not data_type_map_class): return None return data_type_map_class(data_type_definition)
Creates a specific data type map by type indicator. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeMap: data type map or None if the date type definition is not available.
codesearchnet
def _TensorArrayConcatGrad(op: ops.Operation, grad, unused_lengths_grad): handle = op.inputs[0] flow = op.inputs[1] lengths = op.outputs[1] dtype = op.get_attr('dtype') grad_source = _GetGradSource(grad) g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow) u_g = g.split(grad, lengths=lengths) return [None, u_g.flow]
Gradient for TensorArrayConcat. Args: op: Forward TensorArrayConcat op. grad: Gradient `Tensor` to TensorArrayConcat. Returns: A flow `Tensor`, which can be used in control dependencies to force the write of `grad` to the gradient `TensorArray`.
github-repos