code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def publish(msg="checkpoint: publish package"): test = check() if test.succeeded: sdist = local("python setup.py sdist") if sdist.succeeded: build = local( 'python setup.py build && python setup.py bdist_egg') if build.succeeded: upload = local("twine upload dist/*") if upload.succeeded: tag()
Deploy the app to PYPI. Args: msg (str, optional): Description
juraj-google-style
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False): residual = hidden_states hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def get_model_indexes(model): indexes = [] for index in get_index_names(): for app_model in get_index_models(index): if (app_model == model): indexes.append(index) return indexes
Return list of all indexes in which a model is configured. A model may be configured to appear in multiple indexes. This function will return the names of the indexes as a list of strings. This is useful if you want to know which indexes need updating when a model is saved. Args: model: a Django model class.
codesearchnet
def _add_row_partitions(self, flat_values, validate=False): if self.row_partitions: if validate: flat_values = self._validate_flat_values(flat_values) return ragged_tensor.RaggedTensor._from_nested_row_partitions(flat_values, self.row_partitions, validate=False) else: return flat_values
Add row partitions to flat_values, if necessary. If the shape is truly ragged, then this adds the row_partitions. The shape is dense, then this just returns flat_values. Args: flat_values: the flat_values of a ragged tensor with this shape, or a dense tensor with this shape. validate: validate the flat_values have the right first dimension. Returns: flat_values reshaped to have row_partitions.
github-repos
def symbol_top(body_output, targets, model_hparams, vocab_size): del targets if model_hparams.shared_embedding_and_softmax_weights: scope_name = "shared" reuse = tf.AUTO_REUSE else: scope_name = "softmax" reuse = False with tf.variable_scope(scope_name, reuse=reuse): body_output_shape = common_layers.shape_list(body_output) var = get_weights(model_hparams, vocab_size, body_output_shape[-1]) if (model_hparams.factored_logits and model_hparams.mode == tf.estimator.ModeKeys.TRAIN): body_output = tf.expand_dims(body_output, 3) return common_layers.FactoredTensor(body_output, var) else: body_output = tf.reshape(body_output, [-1, body_output_shape[-1]]) logits = tf.matmul(body_output, var, transpose_b=True) return tf.reshape(logits, body_output_shape[:-1] + [1, vocab_size])
Generate logits. Args: body_output: A Tensor with shape [batch, p0, p1, model_hparams.hidden_size]. targets: Unused. model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
juraj-google-style
def _get_event_id(object_type: str) -> str: key = _keys.event_counter(object_type) DB.watch(key, pipeline=True) count = DB.get_value(key) DB.increment(key) DB.execute() if count is None: count = 0 return '{}_event_{:08d}'.format(object_type, int(count))
Return an event key for the event on the object type. This must be a unique event id for the object. Args: object_type (str): Type of object Returns: str, event id
juraj-google-style
def convert(self, point): (x, y) = point (x1, y1) = ((x - self.x_offset), (y - self.y_offset)) logger.debug('converted {} {} ==> {} {}'.format(x, y, x1, y1)) return (x1, y1)
Convert a point from one coordinate system to another. Args: point: tuple(int x, int y) The point in the original coordinate system. Returns: converted_point: tuple(int x, int y) The point in the new coordinate system. Example: convert coordinate from original image into a pixel location within a cutout image. @rtype: list(float,float)
codesearchnet
def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse: kwargs.update({"presence": presence}) return self.api_call("users.setPresence", json=kwargs)
Manually sets user presence. Args: presence (str): Either 'auto' or 'away'.
juraj-google-style
def convert_relu(params, w_name, scope_name, inputs, layers, weights, names): print('Converting relu ...') if names == 'short': tf_name = 'RELU' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) relu = keras.layers.Activation('relu', name=tf_name) layers[scope_name] = relu(layers[inputs[0]])
Convert relu layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def aggregate_and_return_name_for_input(self, out_graphdef): flattened = self.flatten_nodes() if self.aggregation == OpHint.AGGREGATE_FIRST or self.aggregation == OpHint.AGGREGATE_LAST: assert len(flattened) == 1 if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK: return _tensor_name_base(flattened[0].name) else: new_node = _node_def_pb2.NodeDef() new_node.op = 'Pack' new_node.name = 'OpHintStack-%s' % flattened[0].name new_node.attr['N'].i = len(flattened) new_node.attr['T'].type = flattened[0].attr['T'].type for discrete in flattened: new_node.input.append(_tensor_name_base(discrete.name)) out_graphdef.node.extend([new_node]) return new_node.name
This adds the nodes to out_graphdef and returns an aggregated output. In particular, if you have 4 inputs to a hint stub, this will be the node that you can use as an output. I.e. you have 4 timesteps from a static rnn, then a fused UnidirectionalLSTM will expect 1 input with all 4 time steps. So here we make a pack and return the output name of that pack. Args: out_graphdef: A graphdef that is ready to have this input added. Returns: The name of a pack that aggregates this node.
github-repos
def get(self, name): return self.prepare_model(self.client.api.inspect_image(name))
Gets an image. Args: name (str): The name of the image. Returns: (:py:class:`Image`): The image. Raises: :py:class:`docker.errors.ImageNotFound` If the image does not exist. :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def _parse_rd(self, config): match = RD_RE.search(config) if match: value = match.group('value') else: value = match return dict(rd=value)
_parse_rd scans the provided configuration block and extracts the vrf rd. The return dict is intended to be merged into the response dict. Args: config (str): The vrf configuration block from the nodes running configuration Returns: dict: resource dict attribute
juraj-google-style
def _build_mac_signature_key_information(self, value): if (value is None): return None if (not isinstance(value, dict)): raise TypeError('MAC/signature key information must be a dictionary.') cryptographic_parameters = value.get('cryptographic_parameters') if cryptographic_parameters: cryptographic_parameters = self._build_cryptographic_parameters(cryptographic_parameters) mac_signature_key_information = cobjects.MACSignatureKeyInformation(unique_identifier=value.get('unique_identifier'), cryptographic_parameters=cryptographic_parameters) return mac_signature_key_information
Build an MACSignatureKeyInformation struct from a dictionary. Args: value (dict): A dictionary containing the key/value pairs for a MACSignatureKeyInformation struct. Returns: MACSignatureInformation: a MACSignatureKeyInformation struct Raises: TypeError: if the input argument is invalid
codesearchnet
def prefix(self, imod: YangIdentifier, mid: ModuleId) -> YangIdentifier: try: did = (imod, self.implement[imod]) except KeyError: raise ModuleNotImplemented(imod) from None try: pmap = self.modules[mid].prefix_map except KeyError: raise ModuleNotRegistered(*mid) from None for p in pmap: if (pmap[p] == did): return p raise ModuleNotImported(imod, mid)
Return the prefix corresponding to an implemented module. Args: imod: Name of an implemented module. mid: Identifier of the context module. Raises: ModuleNotImplemented: If `imod` is not implemented. ModuleNotRegistered: If `mid` is not registered in YANG library. ModuleNotImported: If `imod` is not imported in `mid`.
codesearchnet
def delete(self, key): path = self.object_path(key) if os.path.exists(path): os.remove(path)
Removes the object named by `key`. Args: key: Key naming the object to remove.
juraj-google-style
def _import_and_bind(self, imp): with self.block.alloc_temp() as mod, \ self.block.alloc_temp('[]*πg.Object') as mod_slice: self.writer.write_checked_call2( mod_slice, 'πg.ImportModule(πF, {})', util.go_str(imp.name)) for binding in imp.bindings: if binding.bind_type == imputil.Import.MODULE: self.writer.write('{} = {}[{}]'.format( mod.name, mod_slice.expr, binding.value)) self.block.bind_var(self.writer, binding.alias, mod.expr) else: self.writer.write('{} = {}[{}]'.format( mod.name, mod_slice.expr, imp.name.count('.'))) with self.block.alloc_temp() as member: self.writer.write_checked_call2( member, 'πg.GetAttr(πF, {}, {}, nil)', mod.expr, self.block.root.intern(binding.value)) self.block.bind_var(self.writer, binding.alias, member.expr)
Generates code that imports a module and binds it to a variable. Args: imp: Import object representing an import of the form "import x.y.z" or "from x.y import z". Expects only a single binding.
juraj-google-style
def from_comm(cls, pub): filename = None if pub.b64_data: filename = cls._save_to_unique_filename(pub) return cls( title=pub.title, author=pub.author, pub_year=pub.pub_year, isbn=pub.isbn, urnnbn=pub.urnnbn, uuid=pub.uuid, aleph_id=pub.aleph_id, producent_id=pub.producent_id, is_public=pub.is_public, filename=pub.filename, is_periodical=pub.is_periodical, path=pub.path, file_pointer=filename )
Convert communication namedtuple to this class. Args: pub (obj): :class:`.Publication` instance which will be converted. Returns: obj: :class:`DBPublication` instance.
juraj-google-style
async def setup_round_robin_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None): params = {} if match_win is not None: params['rr_pts_for_match_win'] = match_win if match_win is not None: params['rr_pts_for_match_tie'] = match_tie if match_win is not None: params['rr_pts_for_game_win'] = game_win if match_win is not None: params['rr_pts_for_game_tie'] = game_tie assert_or_raise(len(params) > 0, ValueError, 'At least one of the points must be given') await self.update(**params)
|methcoro| Args: match_win match_tie game_win game_tie Raises: APIException
juraj-google-style
def value_instance_to_pytd_type(self, node, v, instance, seen, view): if abstract_utils.is_recursive_annotation(v): return pytd.LateType(v.unflatten_expr() if self._detailed else v.expr) elif isinstance(v, abstract.Union): return pytd.UnionType(tuple((self.value_instance_to_pytd_type(node, t, instance, seen, view) for t in v.options))) elif isinstance(v, abstract.AnnotationContainer): return self.value_instance_to_pytd_type(node, v.base_cls, instance, seen, view) elif isinstance(v, abstract.LiteralClass): if isinstance(v.value, abstract.Instance) and v.value.cls.is_enum: typ = pytd_utils.NamedTypeWithModule(v.value.cls.official_name or v.value.cls.name, v.value.cls.module) value = pytd.Constant(v.value.name, typ) elif isinstance(v.value.pyval, (str, bytes)): value = repr(v.value.pyval) elif isinstance(v.value.pyval, bool): value = self.ctx.loader.lookup_pytd('builtins', v.value.pyval) else: assert isinstance(v.value.pyval, int), v.value.pyval value = v.value.pyval return pytd.Literal(value) elif isinstance(v, typed_dict.TypedDictClass): return pytd.NamedType(v.name) elif isinstance(v, fiddle_overlay.BuildableType): param = self.value_instance_to_pytd_type(node, v.underlying, None, seen, view) return pytd.GenericType(base_type=pytd.NamedType(f'fiddle.{v.fiddle_type_name}'), parameters=(param,)) elif isinstance(v, abstract.Class): if not self._detailed and v.official_name is None: return pytd.AnythingType() if seen is None: seen = frozenset() if instance in seen: type_params = () else: type_params = tuple((t.name for t in v.template)) if instance is not None: seen |= {instance} type_arguments = self._value_to_parameter_types(node, v, instance, type_params, seen, view) base = pytd_utils.NamedTypeWithModule(v.official_name or v.name, v.module) if self._is_tuple(v, instance): homogeneous = False elif v.full_name == 'typing.Callable': homogeneous = not isinstance(v, abstract.CallableClass) else: homogeneous = len(type_arguments) == 1 return pytd_utils.MakeClassOrContainerType(base, type_arguments, homogeneous) elif isinstance(v, abstract.TYPE_VARIABLE_TYPES): return self._type_variable_to_def(node, v, v.name) elif isinstance(v, typing_overlay.Never): return pytd.NothingType() elif isinstance(v, abstract.Concatenate): params = tuple((self.value_instance_to_pytd_type(node, t, instance, seen, view) for t in v.args + [v.paramspec])) return pytd.Concatenate(pytd.NamedType('typing.Concatenate'), parameters=params) else: log.info('Using Any for instance of %s', v.name) return pytd.AnythingType()
Get the PyTD type an instance of this object would have. Args: node: The node. v: The object. instance: The instance. seen: Already seen instances. view: A Variable -> binding map. Returns: A PyTD type.
github-repos
def bresenham(x1, y1, x2, y2): points = [] issteep = (abs((y2 - y1)) > abs((x2 - x1))) if issteep: (x1, y1) = (y1, x1) (x2, y2) = (y2, x2) rev = False if (x1 > x2): (x1, x2) = (x2, x1) (y1, y2) = (y2, y1) rev = True deltax = (x2 - x1) deltay = abs((y2 - y1)) error = int((deltax / 2)) y = y1 ystep = None if (y1 < y2): ystep = 1 else: ystep = (- 1) for x in range(x1, (x2 + 1)): if issteep: points.append((y, x)) else: points.append((x, y)) error -= deltay if (error < 0): y += ystep error += deltax if rev: points.reverse() return points
Return a list of points in a bresenham line. Implementation hastily copied from RogueBasin. Returns: List[Tuple[int, int]]: A list of (x, y) points, including both the start and end-points.
codesearchnet
def ping(self, url, endpoint=''): r = self.get_url(url + "/" + endpoint) return r.status_code
Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code)
juraj-google-style
def _AddDSATargeting(client, ad_group_id, label_name): ad_group_criterion_service = client.GetService('AdGroupCriterionService', version='v201809') operation = { 'operand': { 'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': ad_group_id, 'criterion': { 'xsi_type': 'Webpage', 'parameter': { 'criterionName': 'Test criterion', 'conditions': [{ 'operand': 'CUSTOM_LABEL', 'argument': label_name }], } }, 'biddingStrategyConfiguration': { 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'microAmount': 1500000 } }] } }, 'operator': 'ADD' } criterion = ad_group_criterion_service.mutate([operation])['value'][0] print 'Web page criterion with ID "%d" and status "%s" was created.' % ( criterion['criterion']['id'], criterion['userStatus']) return criterion
Set custom targeting for the page feed URLs based on a list of labels. Args: client: an AdWordsClient instance. ad_group_id: a str AdGroup ID. label_name: a str label name. Returns: A suds.sudsobject.Object representing the newly created webpage criterion.
juraj-google-style
def DeregisterSourceType(cls, source_type_class): if source_type_class.TYPE_INDICATOR not in cls._source_type_classes: raise KeyError( 'Source type not set for type: {0:s}.'.format( source_type_class.TYPE_INDICATOR)) del cls._source_type_classes[source_type_class.TYPE_INDICATOR]
Deregisters a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if a source type is not set for the corresponding type indicator.
juraj-google-style
def GetDataByPath(self, path): (_, path_data) = self._paths.get(path, (None, None)) return path_data
Retrieves the data associated to a path. Args: path (str): path of the file entry. Returns: bytes: data or None if not available.
codesearchnet
def on_modified(self, event): if (not self._event_error): self.logger.info(u'Change detected from an edit on: %s', event.src_path) self.compile_dependencies(event.src_path)
Called when a file or directory is modified. Args: event: Watchdog event, ``watchdog.events.DirModifiedEvent`` or ``watchdog.events.FileModifiedEvent``.
codesearchnet
def skip(self, count=1): if self.closed(): raise ValueError('Attempt to call skip() on a closed Queryable.') count = max(0, count) if (count == 0): return self if hasattr(self._iterable, '__getitem__'): try: stop = len(self._iterable) return self._create(self._generate_optimized_skip_result(count, stop)) except TypeError: pass return self._create(self._generate_skip_result(count))
Skip the first count contiguous elements of the source sequence. If the source sequence contains fewer than count elements returns an empty sequence and does not raise an exception. Note: This method uses deferred execution. Args: count: The number of elements to skip from the beginning of the sequence. If omitted defaults to one. If count is less than one the result sequence will be empty. Returns: A Queryable over the elements of source excluding the first count elements. Raises: ValueError: If the Queryable is closed().
codesearchnet
def enum(cls): assert cls.__bases__ == (object,) d = dict(cls.__dict__) new_type = type(cls.__name__, (int,), d) new_type.__module__ = cls.__module__ map_ = {} for key, value in iteritems(d): if key.upper() == key and isinstance(value, integer_types): value_instance = new_type(value) setattr(new_type, key, value_instance) map_[value] = key def str_(self): if self in map_: return "%s.%s" % (type(self).__name__, map_[self]) return "%d" % int(self) def repr_(self): if self in map_: return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self)) return "%d" % int(self) setattr(new_type, "__repr__", repr_) setattr(new_type, "__str__", str_) return new_type
A decorator for creating an int enum class. Makes the values a subclass of the type and implements repr/str. The new class will be a subclass of int. Args: cls (type): The class to convert to an enum Returns: type: A new class :: @enum class Foo(object): FOO = 1 BAR = 2
juraj-google-style
def _callable_func(self, func, axis, *args, **kwargs): def callable_apply_builder(df, axis=0): if not axis: df.index = index df.columns = pandas.RangeIndex(len(df.columns)) else: df.columns = index df.index = pandas.RangeIndex(len(df.index)) result = df.apply(func, axis=axis, *args, **kwargs) return result index = self.index if not axis else self.columns func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis) result_data = self._map_across_full_axis(axis, func_prepared) return self._post_process_apply(result_data, axis)
Apply callable functions across given axis. Args: func: The functions to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
juraj-google-style
def _read_hopopt_options(self, length): counter = 0 optkind = list() options = dict() while (counter < length): code = self._read_unpack(1) if (not code): break (abbr, desc) = _HOPOPT_OPT.get(code, ('none', 'Unassigned')) data = _HOPOPT_PROC(abbr)(self, code, desc=desc) enum = _OPT_TYPE.get(code) counter += data['length'] if (enum in optkind): if isinstance(options[abbr], tuple): options[abbr] += (Info(data),) else: options[abbr] = (Info(options[abbr]), Info(data)) else: optkind.append(enum) options[abbr] = data if (counter != length): raise ProtocolError(f'{self.alias}: invalid format') return (tuple(optkind), options)
Read HOPOPT options. Positional arguments: * length -- int, length of options Returns: * dict -- extracted HOPOPT options
codesearchnet
def bench(image, thread_count): threads = [threading.Thread(target=(lambda : encoder.encode_png(image))) for _ in xrange(thread_count)] start_time = datetime.datetime.now() for thread in threads: thread.start() for thread in threads: thread.join() end_time = datetime.datetime.now() delta = (end_time - start_time).total_seconds() return delta
Encode `image` to PNG on `thread_count` threads in parallel. Returns: A `float` representing number of seconds that it takes all threads to finish encoding `image`.
codesearchnet
def _get_manager(cluster_info, host, executor_id): for node in cluster_info: if node['host'] == host and node['executor_id'] == executor_id: addr = node['addr'] authkey = node['authkey'] TFSparkNode.mgr = TFManager.connect(addr, authkey) break if TFSparkNode.mgr is None: msg = "No TFManager found on this node, please ensure that:\n" + \ "1. Spark num_executors matches TensorFlow cluster_size\n" + \ "2. Spark cores/tasks per executor is 1.\n" + \ "3. Spark dynamic allocation is disabled." raise Exception(msg) logging.info("Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}".format(host, executor_id, str(TFSparkNode.mgr.get('state')))) return TFSparkNode.mgr
Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed. Args: :cluster_info: cluster node reservations :host: host IP address :executor_id: unique id per executor (created during initial call to run()) Returns: TFManager instance for this executor/python-worker
juraj-google-style
def output_mask(self): output = self.output if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None)
Retrieves the output mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Output mask tensor (potentially None) or list of output mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers.
github-repos
def _get_config_instance(group_or_term, session, **kwargs): path = group_or_term._get_path() cached = group_or_term._top._cached_configs.get(path) if cached: config = cached created = False else: config, created = get_or_create(session, Config, **kwargs) return config, created
Finds appropriate config instance and returns it. Args: group_or_term (Group or Term): session (Sqlalchemy session): kwargs (dict): kwargs to pass to get_or_create. Returns: tuple of (Config, bool):
juraj-google-style
def __init__(self, conf_path=ZEO_CLIENT_PATH, project_key=PROJECT_KEY): super(self.__class__, self).__init__( conf_path=conf_path, project_key=project_key ) self.name_db_key = "name_db" self.name_db = self._get_key_or_create(self.name_db_key) self.aleph_id_db_key = "aleph_id_db" self.aleph_id_db = self._get_key_or_create(self.aleph_id_db_key) self.issn_db_key = "issn_db" self.issn_db = self._get_key_or_create(self.issn_db_key) self.path_db_key = "path_db" self.path_db = self._get_key_or_create(self.path_db_key) self.parent_db_key = "parent_db" self.parent_db = self._get_key_or_create(self.parent_db_key)
Constructor. Args: conf_path (str): Path to the ZEO configuration file. Default :attr:`~storage.settings.ZEO_CLIENT_PATH`. project_key (str): Project key, which is used for lookups into ZEO. Default :attr:`~storage.settings.TREE_PROJECT_KEY`.
juraj-google-style
def _get_connection(self, conn_or_int_id): key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: return None try: data = table[key] except KeyError: return None return data
Get the data for a connection by either conn_id or internal_id Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid.
codesearchnet
def WriteTimestamp(timestamp, filename): if timestamp is None: return True timestamp_dir = os.path.dirname(filename) filedesc, temp_filename = tempfile.mkstemp(prefix='nsscache-update-', dir=timestamp_dir) time_string = time.strftime('%Y-%m-%dT%H:%M:%SZ', timestamp) try: os.write(filedesc, b'%s\n' % time_string.encode()) os.fsync(filedesc) os.close(filedesc) except OSError: os.unlink(temp_filename) logging.warning('writing timestamp failed!') return False os.chmod(temp_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) os.rename(temp_filename, filename) logging.debug('wrote timestamp %s to file %r', time_string, filename) return True
Write a given timestamp out to a file, converting to the ISO-8601 format. We convert internal timestamp format (epoch) to ISO-8601 format, i.e. YYYY-MM-DDThh:mm:ssZ which is basically UTC time, then write it out to a file. Args: timestamp: A struct time.struct_time or time tuple. filename: A String naming the file to write to. Returns: A boolean indicating success of write.
github-repos
def finish(self, end='\n', dirty=False): if not dirty: self.end_time = datetime.now() self.update(self.max_value, force=True) StdRedirectMixin.finish(self, end=end) ResizableMixin.finish(self) ProgressBarBase.finish(self)
Puts the ProgressBar bar in the finished state. Also flushes and disables output buffering if this was the last progressbar running. Args: end (str): The string to end the progressbar with, defaults to a newline dirty (bool): When True the progressbar kept the current state and won't be set to 100 percent
juraj-google-style
def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None, cancel_op=None, queue_closed_exception_types=None): if not queue or not enqueue_ops: raise ValueError('Must provide queue and enqueue_ops.') self._queue = queue self._enqueue_ops = enqueue_ops self._close_op = close_op self._cancel_op = cancel_op if queue_closed_exception_types is not None: if not isinstance(queue_closed_exception_types, tuple) or not queue_closed_exception_types or (not all((issubclass(t, errors.OpError) for t in queue_closed_exception_types))): raise TypeError('queue_closed_exception_types, when provided, must be a tuple of tf.error types, but saw: %s' % queue_closed_exception_types) self._queue_closed_exception_types = queue_closed_exception_types if self._close_op is None: self._close_op = self._queue.close() if self._cancel_op is None: self._cancel_op = self._queue.close(cancel_pending_enqueues=True) if not self._queue_closed_exception_types: self._queue_closed_exception_types = (errors.OutOfRangeError,) else: self._queue_closed_exception_types = tuple(self._queue_closed_exception_types)
Create a QueueRunner from arguments. Args: queue: A `Queue`. enqueue_ops: List of enqueue ops to run in threads later. close_op: Op to close the queue. Pending enqueue ops are preserved. cancel_op: Op to close the queue and cancel pending enqueue ops. queue_closed_exception_types: Tuple of exception types, which indicate the queue has been safely closed. Raises: ValueError: If `queue` or `enqueue_ops` are not provided when not restoring from `queue_runner_def`. TypeError: If `queue_closed_exception_types` is provided, but is not a non-empty tuple of error types (subclasses of `tf.errors.OpError`).
github-repos
def _get_python_exe_version(python_exe: list[str]): try: python_exe_version = subprocess.check_output(python_exe + ['-V'], stderr=subprocess.STDOUT).decode() except (subprocess.CalledProcessError, FileNotFoundError): return None return _parse_exe_version_string(python_exe_version)
Determine the major and minor version of given Python executable. Arguments: python_exe: absolute path to the Python executable Returns: Version as (major, minor) tuple, or None if it could not be determined.
github-repos
def add(self, term): if isinstance(term, Conjunction): for term_ in term.terms: self.add(term_) elif isinstance(term, Term): self._terms.append(term) else: raise TypeError('Not a Term or Conjunction')
Add a term to the conjunction. Args: term (:class:`Term`, :class:`Conjunction`): term to add; if a :class:`Conjunction`, all of its terms are added to the current conjunction. Raises: :class:`TypeError`: when *term* is an invalid type
juraj-google-style
def delete(self, *names: str, pipeline=False): if pipeline: self._pipeline.delete(*names) else: self._db.delete(*names)
Delete one or more keys specified by names. Args: names (str): Names of keys to delete pipeline (bool): True, start a transaction block. Default false.
codesearchnet
def _extract_cell_info(self, structure, site_idx, sites, targets, voro, compute_adj_neighbors=False): all_vertices = voro.vertices center_coords = sites[site_idx].coords results = {} for (nn, vind) in voro.ridge_dict.items(): if (site_idx in nn): other_site = (nn[0] if (nn[1] == site_idx) else nn[1]) if ((- 1) in vind): if self.allow_pathological: continue else: raise RuntimeError('This structure is pathological, infinite vertex in the voronoi construction') facets = [all_vertices[i] for i in vind] angle = solid_angle(center_coords, facets) volume = 0 for (j, k) in zip(vind[1:], vind[2:]): volume += vol_tetra(center_coords, all_vertices[vind[0]], all_vertices[j], all_vertices[k]) face_dist = (np.linalg.norm((center_coords - sites[other_site].coords)) / 2) face_area = ((3 * volume) / face_dist) normal = np.subtract(sites[other_site].coords, center_coords) normal /= np.linalg.norm(normal) results[other_site] = {'site': sites[other_site], 'normal': normal, 'solid_angle': angle, 'volume': volume, 'face_dist': face_dist, 'area': face_area, 'n_verts': len(vind)} if compute_adj_neighbors: results[other_site]['verts'] = vind resultweighted = {} for (nn_index, nstats) in results.items(): nn = nstats['site'] if nn.is_ordered: if (nn.specie in targets): resultweighted[nn_index] = nstats else: for disordered_sp in nn.species.keys(): if (disordered_sp in targets): resultweighted[nn_index] = nstats if compute_adj_neighbors: adj_neighbors = dict(((i, []) for i in resultweighted.keys())) for (a_ind, a_nninfo) in resultweighted.items(): a_verts = set(a_nninfo['verts']) for (b_ind, b_nninfo) in resultweighted.items(): if (b_ind > a_ind): continue if (len(a_verts.intersection(b_nninfo['verts'])) == 2): adj_neighbors[a_ind].append(b_ind) adj_neighbors[b_ind].append(a_ind) for (key, neighbors) in adj_neighbors.items(): resultweighted[key]['adj_neighbors'] = neighbors return resultweighted
Get the information about a certain atom from the results of a tessellation Args: structure (Structure) - Structure being assessed site_idx (int) - Index of the atom in question sites ([Site]) - List of all sites in the tessellation targets ([Element]) - Target elements voro - Output of qvoronoi compute_adj_neighbors (boolean) - Whether to compute which neighbors are adjacent Returns: A dict of sites sharing a common Voronoi facet. Key is facet id (not useful) and values are dictionaries containing statistics about the facet: - site: Pymatgen site - solid_angle - Solid angle subtended by face - angle_normalized - Solid angle normalized such that the faces with the largest - area - Area of the facet - face_dist - Distance between site n and the facet - volume - Volume of Voronoi cell for this face - n_verts - Number of vertices on the facet - adj_neighbors - Facet id's for the adjacent neighbors
codesearchnet
def is_http_running_on(port): try: conn = httplib.HTTPConnection(('127.0.0.1:' + str(port))) conn.connect() conn.close() return True except Exception: return False
Check if an http server runs on a given port. Args: The port to check. Returns: True if it is used by an http server. False otherwise.
codesearchnet
def _to_dict(self, include=None, exclude=None): if (include is not None and not isinstance(include, (list, tuple, set, frozenset))): raise TypeError('include should be a list, tuple or set') if (exclude is not None and not isinstance(exclude, (list, tuple, set, frozenset))): raise TypeError('exclude should be a list, tuple or set') values = {} for prop in self._properties.itervalues(): name = prop._code_name if include is not None and name not in include: continue if exclude is not None and name in exclude: continue try: values[name] = prop._get_for_dict(self) except UnprojectedPropertyError: pass return values
Return a dict containing the entity's property values. Args: include: Optional set of property names to include, default all. exclude: Optional set of property names to skip, default none. A name contained in both include and exclude is excluded.
juraj-google-style
def append_transformed_structures(self, tstructs_or_transmuter): if isinstance(tstructs_or_transmuter, self.__class__): self.transformed_structures.extend(tstructs_or_transmuter.transformed_structures) else: for ts in tstructs_or_transmuter: assert isinstance(ts, TransformedStructure) self.transformed_structures.extend(tstructs_or_transmuter)
Method is overloaded to accept either a list of transformed structures or transmuter, it which case it appends the second transmuter"s structures. Args: tstructs_or_transmuter: A list of transformed structures or a transmuter.
codesearchnet
async def peers(self): response = (await self._api.get('/v1/status/peers')) if (response.status == 200): return set(response.body)
Returns the current Raft peer set Returns: Collection: addresses of peers This endpoint retrieves the Raft peers for the datacenter in which the agent is running. It returns a collection of addresses, such as:: [ "10.1.10.12:8300", "10.1.10.11:8300", "10.1.10.10:8300" ] This list of peers is strongly consistent and can be useful in determining when a given server has successfully joined the cluster.
codesearchnet
def ensemble_center(self, site_list, indices, cartesian=True): if cartesian: return np.average([site_list[i].coords for i in indices], axis=0) else: return np.average([site_list[i].frac_coords for i in indices], axis=0)
Finds the center of an ensemble of sites selected from a list of sites. Helper method for the find_adsorption_sites algorithm. Args: site_list (list of sites): list of sites indices (list of ints): list of ints from which to select sites from site list cartesian (bool): whether to get average fractional or cartesian coordinate
codesearchnet
def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) body = '{"sku":{"capacity":"' + str(capacity) + '"}}' return do_patch(endpoint, body, access_token)
Change the instance count of an existing VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. capacity (int): New number of VMs. Returns: HTTP response.
juraj-google-style
def _make_output_composite_tensors_match(op_type, branch_graphs): assert branch_graphs branch_outputs = [g.structured_outputs for g in branch_graphs] outputs_per_branch = list((len(outs) for outs in branch_outputs)) assert len(set(outputs_per_branch)) == 1, outputs_per_branch for output_idx, branch_outs in enumerate(zip(*branch_outputs)): if len(set((type(out) for out in branch_outs))) == 1: continue if not any((isinstance(out, indexed_slices.IndexedSlices) for out in branch_outs)): continue for branch_idx, branch_out in enumerate(branch_outs): if isinstance(branch_out, indexed_slices.IndexedSlices): continue elif isinstance(branch_out, tensor_lib.Tensor): with branch_graphs[branch_idx].as_default(): branch_outputs[branch_idx][output_idx] = math_ops._as_indexed_slices(branch_out) else: raise TypeError('Cannot reconcile {op_name} {output_idx}-th outputs:\n outputs from all branches: {outputs}'.format(op_name='tf.cond' if op_type == _COND else 'tf.switch_case', output_idx=output_idx, outputs=branch_outs)) for branch_graph, branch_outs in zip(branch_graphs, branch_outputs): branch_graph.structured_outputs = branch_outs branch_graph.outputs = [t for t in func_graph_module.flatten(branch_outs) if t is not None]
Modifies each branch_graph's outputs to have the same output signature. Currently the only transformation implemented is turning a Tensor into an equivalent IndexedSlices if the other branch returns an IndexedSlices. Updates branch_graph.{outputs,structured_outputs} for each branch_graph in branch_graphs. Args: op_type: _COND or _CASE branch_graphs: `list` of `FuncGraph` Raises: TypeError: if a set of outputs cannot be rewritten.
github-repos
def get_arrays(self, type_img): if (type_img.lower() == 'lola'): return LolaMap(self.ppdlola, *self.window, path_pdsfile=self.path_pdsfiles).image() elif (type_img.lower() == 'wac'): return WacMap(self.ppdwac, *self.window, path_pdsfile=self.path_pdsfiles).image() else: raise ValueError('The img type has to be either "Lola" or "Wac"')
Return arrays the region of interest Args: type_img (str): Either lola or wac. Returns: A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the longitudes, ``Y`` contains the latitude and ``Z`` the values extracted for the region of interest. Note: The argument has to be either lola or wac. Note case sensitive. All return arrays have the same size. All coordinates are in degree.
codesearchnet
def _validate_symbol_names(self) -> None: all_symbol_names = set(self._names) | set(self._names_v1) if self._api_name == TENSORFLOW_API_NAME: for subpackage in SUBPACKAGE_NAMESPACES: if any((n.startswith(subpackage) for n in all_symbol_names)): raise InvalidSymbolNameError('@tf_export is not allowed to export symbols under %s.*' % subpackage) elif not all((n.startswith(self._api_name) for n in all_symbol_names)): raise InvalidSymbolNameError('Can only export symbols under package name of component.')
Validate you are exporting symbols under an allowed package. We need to ensure things exported by tf_export, etc. export symbols under disjoint top-level package names. For TensorFlow, we check that it does not export anything under subpackage names used by components (keras, etc.). For each component, we check that it exports everything under its own subpackage. Raises: InvalidSymbolNameError: If you try to export symbol under disallowed name.
github-repos
def add_resource(self, resource, *class_args, **class_kwargs): name = resource.__name__.lower() meta_resource = parse_docs(resource.__doc__, ["$shared"]) self.meta[name] = meta_resource shared = self.meta["$shared"].copy() shared.update(meta_resource.get("$shared", {})) with MarkKey("%s.$shared" % resource.__name__): sp = SchemaParser(validators=self.validators, shared=shared) with MarkKey(resource.__name__): resource = resource(*class_args, **class_kwargs) actions = defaultdict(lambda: {}) for action in dir(resource): find = PATTERN_ACTION.findall(action) if not find: continue httpmethod, action_name = find[0] action_group = actions[action_name] fn = getattr(resource, action) meta_action = parse_docs( fn.__doc__, ["$input", "$output", "$error"]) meta_resource[action] = meta_action with MarkKey(fn.__name__): action_group[httpmethod] = \ self.make_action(fn, sp, meta_action) for action_name in actions: if action_name == "": url = "/" + name endpoint = name else: url = "/{0}/{1}".format(name, action_name) endpoint = "{0}@{1}".format(name, action_name) action_group = actions[action_name] self.app.add_url_rule( url, endpoint=endpoint, view_func=self.make_view(action_group), methods=set(action_group) )
Add resource Parse resource and it's actions, route actions by naming rule. Args: resource: resource class class_args: class_args class_kwargs: class_kwargs
juraj-google-style
def _ParseInternetPasswordRecord(self, parser_mediator, record): key = record.get('_key_', None) if ((not key) or (not key.startswith(b'ssgp'))): raise errors.ParseError('Unsupported Internet password record key value does not start with: "ssgp".') protocol_string = codecs.decode('{0:08x}'.format(record['ptcl']), 'hex') protocol_string = codecs.decode(protocol_string, 'utf-8') event_data = KeychainInternetRecordEventData() event_data.account_name = self._ParseBinaryDataAsString(parser_mediator, record['acct']) event_data.comments = self._ParseBinaryDataAsString(parser_mediator, record['crtr']) event_data.entry_name = self._ParseBinaryDataAsString(parser_mediator, record['PrintName']) event_data.protocol = self._PROTOCOL_TRANSLATION_DICT.get(protocol_string, protocol_string) ssgp_hash = codecs.encode(key[4:], 'hex') event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8') event_data.text_description = self._ParseBinaryDataAsString(parser_mediator, record['desc']) event_data.type_protocol = self._ParseBinaryDataAsString(parser_mediator, record['atyp']) event_data.where = self._ParseBinaryDataAsString(parser_mediator, record['srvr']) date_time = self._ParseDateTimeValue(parser_mediator, record['cdat']) if date_time: event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseDateTimeValue(parser_mediator, record['mdat']) if date_time: event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts the information from an Internet password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed.
codesearchnet
def normal_mean(data, variance): if not isinstance(data, np.ndarray): data = np.array(data) i_variance_2 = 1 / (variance ** 2) cmm = [0.0] cmm.extend(np.cumsum(data)) cmm2 = [0.0] cmm2.extend(np.cumsum(np.abs(data))) def cost(start, end): cmm2_diff = cmm2[end] - cmm2[start] cmm_diff = pow(cmm[end] - cmm[start], 2) i_diff = end - start diff = cmm2_diff - cmm_diff return (diff/i_diff) * i_variance_2 return cost
Creates a segment cost function for a time series with a Normal distribution with changing mean Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
juraj-google-style
def read_passwd_file(pass_file): with open(pass_file) as fin: passwd = fin.read().strip() return passwd
Read password from external file and retrun as string. The file should contain just single line. Prevents hard-coding password anywhere in this script. IMPORTANT! Password is stored as plain text! Do NOT use with your personal account!" Args: pass_file (str): /path/to/pass_file
codesearchnet
def tpu_core_ids_to_locations(self, tpu_core_ids): return _pywrap_dtensor_device.TPUCoreIDsToLocations(context.context()._handle, self._device_info, tpu_core_ids)
Translates TPU core IDs to TPU core locations. Args: tpu_core_ids: A list of TPU core IDs. Each one is an unsigned integer. Returns: A list of corresponding TPU core locations.
github-repos
def crossCombine(l): resultList = [] firstList = l[0] rest = l[1:] if (len(rest) == 0): return firstList for e in firstList: for e1 in crossCombine(rest): resultList.append(combinteDict(e, e1)) return resultList
Taken a list of lists, returns a big list of lists contain all the possibilities of elements of sublist combining together. It is basically a Combinatorics of list. For example: >>> crossCombine([[a,a1,a2,...], [b,b1,b2,...]]) >>> [[a,b], [a,b1], [a,b2], [a1,b], [a1,b1], [a1, b2], [a2,b], [a2,b1], [a2,b2], ...] For using in StartCalendarInterval, the syntax of ``l`` is like below: ``l: [[dic of month], [dict of day]]`` such as: ``l: [[{'month': 1}, {'month': 2}], [{'day': 2}, {'day': 3}, {'day': 4}]]`` Args: l (list[list]): the list of lists you want to crossCombine with. Returns: list: crossCombined list
codesearchnet
def __init__(self, retry_params, retriable_exceptions=_RETRIABLE_EXCEPTIONS, should_retry=lambda r: False): self.retry_params = retry_params self.retriable_exceptions = retriable_exceptions self.should_retry = should_retry
Init. Args: retry_params: an RetryParams instance. retriable_exceptions: a list of exception classes that are retriable. should_retry: a function that takes a result from the tasklet and returns a boolean. True if the result should be retried.
juraj-google-style
def get_task(config): path = os.path.join(config['work_dir'], "task.json") message = "Can't read task from {}!\n%(exc)s".format(path) contents = load_json_or_yaml(path, is_path=True, message=message) return contents
Read the task.json from work_dir. Args: config (dict): the running config, to find work_dir. Returns: dict: the contents of task.json Raises: ScriptWorkerTaskException: on error.
juraj-google-style
def member_command(self, member_id, command): server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) return self._servers.command(server_id, command)
apply command (start/stop/restart) to member instance of replica set Args: member_id - member index command - string command (start/stop/restart) return True if operation success otherwise False
juraj-google-style
def make_preprocessing_fn(frequency_threshold): def preprocessing_fn(inputs): result = {'clicked': inputs['clicked']} for name in _INTEGER_COLUMN_NAMES: feature = inputs[name] feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value=-1) feature = tf.squeeze(feature, axis=1) result[name] = feature result[name + '_bucketized'] = tft.bucketize(feature, _NUM_BUCKETS) for name in _CATEGORICAL_COLUMN_NAMES: feature = inputs[name] feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value='') feature = tf.squeeze(feature, axis=1) result[get_transformed_categorical_column_name(name)] = tft.compute_and_apply_vocabulary(feature, frequency_threshold=frequency_threshold) return result return preprocessing_fn
Creates a preprocessing function for criteo. Args: frequency_threshold: The frequency_threshold used when generating vocabularies for the categorical features. Returns: A preprocessing function.
github-repos
def __init__(self, format_string): try: struct_object = struct.Struct(format_string) except (TypeError, struct.error) as exception: raise errors.FormatError(( 'Unable to create struct object from data type definition ' 'with error: {0!s}').format(exception)) super(StructOperation, self).__init__() self._struct = struct_object self._struct_format_string = format_string
Initializes a Python struct-base byte stream operation. Args: format_string (str): format string as used by Python struct. Raises: FormatError: if the struct operation cannot be determined from the data type definition.
juraj-google-style
def copy_and_move_messages(from_channel, to_channel): with BlockSave(Message, query_dict={'channel_id': to_channel.key}): for message in Message.objects.filter(channel=from_channel, typ=15): message.key = '' message.channel = to_channel message.save()
While splitting channel and moving chosen subscribers to new channel, old channel's messages are copied and moved to new channel. Args: from_channel (Channel object): move messages from channel to_channel (Channel object): move messages to channel
codesearchnet
def propagate(self, date): if (type(date) is timedelta): date = (self.orbit.date + date) _date = [float(x) for x in '{:%Y %m %d %H %M %S.%f}'.format(date).split()] (p, v) = self.tle.propagate(*_date) result = [(x * 1000) for x in (p + v)] return self.orbit.__class__(date, result, 'cartesian', 'TEME', self.__class__(), **self.orbit.complements)
Propagate the initialized orbit Args: date (Date or datetime.timedelta) Return: Orbit
codesearchnet
def insert(self, i, species, coords, validate_proximity=False, properties=None): new_site = Site(species, coords, properties=properties) if validate_proximity: for site in self: if site.distance(new_site) < self.DISTANCE_TOLERANCE: raise ValueError("New site is too close to an existing " "site!") self._sites.insert(i, new_site)
Insert a site to the molecule. Args: i (int): Index to insert site species: species of inserted site coords (3x1 array): coordinates of inserted site validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to True. properties (dict): Dict of properties for the Site. Returns: New molecule with inserted site.
juraj-google-style
def with_step(self, step): self._options['step'] = step return self
Which profile step to use for profiling. The 'step' here refers to the step defined by `Profiler.add_step()` API. Args: step: When multiple steps of profiles are available, select which step's profile to use. If -1, use average of all available steps. Returns: self
github-repos
def setup_low_rank_optimizer(optimizer_name: str, optimizer_mapping: dict[str, Any], optim_kwargs: dict[str, Any], is_layerwise_supported: bool=True) -> tuple[Any, Any]: is_layerwise = optimizer_name.lower().endswith('layerwise') if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED and is_layerwise_supported: raise NotImplementedError(f'Layer-wise {optimizer_name} does not support DDP at this time') optimizer_cls = optimizer_mapping[optimizer_name] if args.optim_target_modules is None: raise ValueError(f'You need to define `optim_target_modules` to use {optimizer_name} optimizers') if not isinstance(args.optim_target_modules, (list, str)): raise ValueError(f"`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: {args.optim_target_modules}") if model is None: raise ValueError(f'You need to pass a model to initialize {optimizer_name} optimizer.') all_linear = isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace('_', '-') == 'all-linear' target_params_names = [] for module_name, module in model.named_modules(): target_module_exists, is_regex = check_target_module_exists(args.optim_target_modules, module_name, return_is_regex=True) if not isinstance(module, nn.Linear): if target_module_exists and (not is_regex): logger.warning(f'{module_name} matched but ignored. {optimizer_name} only supports linear layers.') continue if not target_module_exists and (not all_linear): continue target_params_names.append(module_name + '.weight') if len(target_params_names) == 0: raise ValueError(f'No target modules found for {optimizer_name} ({args.optim_target_modules}).') target_params = [p for n, p in model.named_parameters() if n in target_params_names] non_target_params = [p for n, p in model.named_parameters() if n not in target_params_names] optim_kwargs.update(optim_args) param_groups = [{'params': non_target_params}, {'params': target_params, **optim_kwargs}] if is_layerwise: if args.gradient_accumulation_steps != 1: raise ValueError(f'Layerwise {optimizer_name} does not support gradient accumulation!') optimizer_dict = {} for param in non_target_params: optimizer_dict[param] = optimizer_cls([{'params': [param]}], **optimizer_kwargs) for param in target_params: optimizer_dict[param] = optimizer_cls([{'params': [param], **optim_kwargs}], **optimizer_kwargs) def optimizer_hook(param): if param.grad is not None: optimizer_dict[param].step() optimizer_dict[param].zero_grad() for param in model.parameters(): if param.requires_grad: param.register_post_accumulate_grad_hook(optimizer_hook) optimizer_cls = LayerWiseDummyOptimizer optimizer_kwargs.update({'optimizer_dict': optimizer_dict}) optimizer_kwargs.update({'params': param_groups}) return (optimizer_cls, optimizer_kwargs)
Helper function to set up low-rank optimizers like GaLore and Apollo. Args: optimizer_name (str): Name of the optimizer. optimizer_mapping (dict): Mapping of optimizer names to their classes. optim_kwargs (dict): Keyword arguments for the optimizer. is_layerwise_supported (bool): Whether layerwise optimization is supported. Returns: Tuple[Any, Any]: Optimizer class and updated optimizer kwargs.
github-repos
def indent(lines, amount=2, char=' '): lines = str(lines) padding = (amount * char) return (padding + ('\n' + padding).join(lines.split('\n')))
r"""Indent a string. Prepends whitespace to every line in the passed string. (Lines are separated by newline characters.) Args: lines (str): The string to indent. Keyword Args: amount (int): The number of columns to indent by. char (str): The character to to use as the indentation. Returns: str: The indented string. Example: >>> print(indent('line1\nline2', char='*')) **line1 **line2
codesearchnet
def mat2quat(rmat, precise=False): M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3] if precise: q = np.empty((4,)) t = np.trace(M) if t > M[3, 3]: q[0] = t q[3] = M[1, 0] - M[0, 1] q[2] = M[0, 2] - M[2, 0] q[1] = M[2, 1] - M[1, 2] else: i, j, k = 0, 1, 2 if M[1, 1] > M[0, 0]: i, j, k = 1, 2, 0 if M[2, 2] > M[i, i]: i, j, k = 2, 0, 1 t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q = q[[3, 0, 1, 2]] q *= 0.5 / math.sqrt(t * M[3, 3]) else: m00 = M[0, 0] m01 = M[0, 1] m02 = M[0, 2] m10 = M[1, 0] m11 = M[1, 1] m12 = M[1, 2] m20 = M[2, 0] m21 = M[2, 1] m22 = M[2, 2] K = np.array( [ [m00 - m11 - m22, 0.0, 0.0, 0.0], [m01 + m10, m11 - m00 - m22, 0.0, 0.0], [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22], ] ) K /= 3.0 w, V = np.linalg.eigh(K) q = V[[3, 0, 1, 2], np.argmax(w)] if q[0] < 0.0: np.negative(q, q) return q[[1, 2, 3, 0]]
Converts given rotation matrix to quaternion. Args: rmat: 3x3 rotation matrix precise: If isprecise is True, the input matrix is assumed to be a precise rotation matrix and a faster algorithm is used. Returns: vec4 float quaternion angles
juraj-google-style
def replace_iterable_params(args, kwargs, iterable_params): args = list(args) for name, index in iterable_params: if index < len(args): args[index] = list(args[index]) elif name in kwargs: kwargs[name] = list(kwargs[name]) return (tuple(args), kwargs)
Returns (args, kwargs) with any iterable parameters converted to lists. Args: args: Positional rguments to a function kwargs: Keyword arguments to a function. iterable_params: A list of (name, index) tuples for iterable parameters. Returns: A tuple (args, kwargs), where any positional or keyword parameters in `iterable_params` have their value converted to a `list`.
github-repos
def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs): setup_sdk_logging(logfile, loglevel) defaults = lago_config.get_section('init') if (workdir is None): workdir = os.path.abspath('.lago') defaults['workdir'] = workdir defaults['virt_config'] = config defaults.update(kwargs) (workdir, prefix) = cmd.do_init(**defaults) return SDK(workdir, prefix)
Initialize the Lago environment Args: config(str): Path to LagoInitFile workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago" **kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init` logfile(str): A path to setup a log file. loglevel(int): :mod:`logging` log level. Returns: :class:`~lago.sdk.SDK`: Initialized Lago enviornment Raises: :exc:`~lago.utils.LagoException`: If initialization failed
codesearchnet
def adjoint(matrix, name=None): with ops.name_scope(name, 'adjoint', [matrix]): matrix = ops.convert_to_tensor(matrix, name='matrix') return array_ops.matrix_transpose(matrix, conjugate=True)
Transposes the last two dimensions of and conjugates tensor `matrix`. For example: ```python x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]) tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j], # [2 - 2j, 5 - 5j], # [3 - 3j, 6 - 6j]] ``` Args: matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or `complex128` with shape `[..., M, M]`. name: A name to give this `Op` (optional). Returns: The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of matrix.
github-repos
def get_structure_by_id(self, cod_id, **kwargs): r = requests.get(('http: return Structure.from_str(r.text, fmt='cif', **kwargs)
Queries the COD for a structure by id. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A Structure.
codesearchnet
def fts_count(self, fts, inv): return len(list(filter((lambda s: self.fts_match(fts, s)), inv)))
Return the count of segments in an inventory matching a given feature mask. Args: fts (set): feature mask given as a set of (value, feature) tuples inv (set): inventory of segments (as Unicode IPA strings) Returns: int: number of segments in `inv` that match feature mask `fts`
codesearchnet
def load_default(self): path = ctypes_util.find_library(self._sdk) if (path is None): if (self._windows or self._cygwin): path = next(self.find_library_windows(), None) elif sys.platform.startswith('linux'): path = next(self.find_library_linux(), None) elif sys.platform.startswith('darwin'): path = next(self.find_library_darwin(), None) if (path is not None): return self.load(path) return False
Loads the default J-Link SDK DLL. The default J-Link SDK is determined by first checking if ``ctypes`` can find the DLL, then by searching the platform-specific paths. Args: self (Library): the ``Library`` instance Returns: ``True`` if the DLL was loaded, otherwise ``False``.
codesearchnet
def classify_format(f): l0, l1 = _get_two_lines(f) if loader.glove.check_valid(l0, l1): return _glove elif loader.word2vec_text.check_valid(l0, l1): return _word2vec_text elif loader.word2vec_bin.check_valid(l0, l1): return _word2vec_bin else: raise OSError(b"Invalid format")
Determine the format of word embedding file by their content. This operation only looks at the first two lines and does not check the sanity of input file. Args: f (Filelike): Returns: class
juraj-google-style
def distinct(l): seen = set() seen_add = seen.add return (_ for _ in l if (not ((_ in seen) or seen_add(_))))
Return a list where the duplicates have been removed. Args: l (list): the list to filter. Returns: list: the same list without duplicates.
codesearchnet
def Search(self, search_base, search_filter, search_scope, attrs): self._last_search_params = (search_base, search_filter, search_scope, attrs) self.log.debug('searching for base=%r, filter=%r, scope=%r, attrs=%r', search_base, search_filter, search_scope, attrs) if 'dn' in attrs: self._dn_requested = True self.message_id = self.conn.search_ext(base=search_base, filterstr=search_filter, scope=search_scope, attrlist=attrs, serverctrls=[self.ldap_controls])
Search the data source. The search is asynchronous; data should be retrieved by iterating over the source object itself (see __iter__() below). Args: search_base: the base of the tree being searched search_filter: a filter on the objects to be returned search_scope: the scope of the search from ldap.SCOPE_* attrs: a list of attributes to be returned Returns: nothing.
github-repos
def initializer(self): if self._initializer is not None: return self._initializer else: raise ValueError('The iterator does not have an initializer. This means it was likely created using `tf.data.Dataset.make_one_shot_iterator()`. For an initializable iterator, use `tf.data.Dataset.make_initializable_iterator()` instead.')
A `tf.Operation` that should be run to initialize this iterator. Returns: A `tf.Operation` that should be run to initialize this iterator Raises: ValueError: If this iterator initializes itself automatically.
github-repos
def Serialize(self, writer): writer.WriteUInt32(self.Version) writer.WriteUInt64(self.Services) writer.WriteUInt32(self.Timestamp) writer.WriteUInt16(self.Port) writer.WriteUInt32(self.Nonce) writer.WriteVarString(self.UserAgent) writer.WriteUInt32(self.StartHeight) writer.WriteBool(self.Relay)
Serialize object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def is_artifact_optional(chain, task_id, path): upstream_artifacts = chain.task['payload'].get('upstreamArtifacts', []) optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts) return path in optional_artifacts_per_task_id.get(task_id, [])
Tells whether an artifact is flagged as optional or not. Args: chain (ChainOfTrust): the chain of trust object task_id (str): the id of the aforementioned task Returns: bool: True if artifact is optional
juraj-google-style
async def import_image(self, data, stream: bool = False): headers = {"Content-Type": "application/x-tar"} response = await self.docker._query_chunked_post( "images/load", "POST", data=data, headers=headers ) return await json_stream_result(response, stream=stream)
Import tarball of image to docker. Args: data: tarball data of image to be imported Returns: Tarball of the image
juraj-google-style
def list_datasets(self, get_global_public): appending = "" if get_global_public: appending = "public" url = self.url() + "/resource/{}dataset/".format(appending) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format
juraj-google-style
def _AddShardedRestoreOps(self, filename_tensor, per_device, restore_sequentially, reshape): sharded_restores = [] for shard, (device, saveables) in enumerate(per_device): with ops.device(device): sharded_restores.append(self._AddRestoreOps(filename_tensor, saveables, restore_sequentially, reshape, preferred_shard=shard, name='restore_shard')) return control_flow_ops.group(*sharded_restores, name='restore_all')
Add Ops to restore variables from multiple devices. Args: filename_tensor: Tensor for the path of the file to load. per_device: A list of (device, SaveableObject) pairs, as returned by _GroupByDevices(). restore_sequentially: True if we want to restore variables sequentially within a shard. reshape: True if we want to reshape loaded tensors to the shape of the corresponding variable. Returns: An Operation that restores the variables.
github-repos
def desc_from_uri(uri): if (':' in uri): (_, uri) = uri.split(':', 1) query_string = parse_qs(urlparse(uri, 'http').query) if query_string.get('sn'): account_serial_number = query_string['sn'][0] try: account = Account.get_accounts()[account_serial_number] desc = 'SA_RINCON{}_{}'.format(account.service_type, account.username) return desc except KeyError: pass if query_string.get('sid'): service_id = query_string['sid'][0] for service in MusicService._get_music_services_data().values(): if (service_id == service['ServiceID']): service_type = service['ServiceType'] account = Account.get_accounts_for_service(service_type) if (not account): break account = account[0] desc = 'SA_RINCON{}_{}'.format(account.service_type, account.username) return desc desc = 'RINCON_AssociatedZPUDN' return desc
Create the content of DIDL desc element from a uri. Args: uri (str): A uri, eg: ``'x-sonos-http:track%3a3402413.mp3?sid=2&amp;flags=32&amp;sn=4'`` Returns: str: The content of a desc element for that uri, eg ``'SA_RINCON519_email@example.com'``
codesearchnet
def getOption(self, name): try: value = lock_and_call((lambda : self._impl.getOption(name).value()), self._lock) except RuntimeError: return None else: try: return int(value) except ValueError: try: return float(value) except ValueError: return value
Get the current value of the specified option. If the option does not exist, returns None. Args: name: Option name. Returns: Value of the option. Raises: InvalidArgumet: if the option name is not valid.
codesearchnet
def get_query_columns(engine, query): con = engine.connect() result = con.execute(query).fetchone() values = list(result) cols_names = result.keys() cols = OrderedDict() for i in range(len(cols_names)): cols[cols_names[i]] = type(values[i]).__name__ return cols
Extract columns names and python typos from query Args: engine: SQLAlchemy connection engine query: SQL query Returns: dict with columns names and python types
juraj-google-style
def heightmap_get_minmax(hm: np.ndarray) -> Tuple[(float, float)]: mi = ffi.new('float *') ma = ffi.new('float *') lib.TCOD_heightmap_get_minmax(_heightmap_cdata(hm), mi, ma) return (mi[0], ma[0])
Return the min and max values of this heightmap. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. Returns: Tuple[float, float]: The (min, max) values. .. deprecated:: 2.0 Use ``hm.min()`` or ``hm.max()`` instead.
codesearchnet
def stop_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=StopOrderRequest(**kwargs) )
Shortcut to replace a pending Stop Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Stop Order to replace kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def implement(self, implementation, for_type=None, for_types=None): unbound_implementation = self.__get_unbound_function(implementation) for_types = self.__get_types(for_type, for_types) for t in for_types: self._write_lock.acquire() try: self.implementations.append((t, unbound_implementation)) finally: self._write_lock.release()
Registers an implementing function for for_type. Arguments: implementation: Callable implementation for this type. for_type: The type this implementation applies to. for_types: Same as for_type, but takes a tuple of types. for_type and for_types cannot both be passed (for obvious reasons.) Raises: ValueError
juraj-google-style
def _to_map_job_config(cls, mr_spec, queue_name): mapper_spec = mr_spec.mapper api_version = mr_spec.params.get('api_version', 0) old_api = (api_version == 0) input_reader_cls = mapper_spec.input_reader_class() input_reader_params = input_readers._get_params(mapper_spec) if issubclass(input_reader_cls, input_reader.InputReader): input_reader_params = input_reader_cls.params_from_json(input_reader_params) output_writer_cls = mapper_spec.output_writer_class() output_writer_params = output_writers._get_params(mapper_spec) return cls(_lenient=old_api, job_name=mr_spec.name, job_id=mr_spec.mapreduce_id, mapper=util.for_name(mapper_spec.handler_spec), input_reader_cls=input_reader_cls, input_reader_params=input_reader_params, output_writer_cls=output_writer_cls, output_writer_params=output_writer_params, shard_count=mapper_spec.shard_count, queue_name=queue_name, user_params=mr_spec.params.get('user_params'), shard_max_attempts=mr_spec.params.get('shard_max_attempts'), done_callback_url=mr_spec.params.get('done_callback'), _force_writes=mr_spec.params.get('force_writes'), _base_path=mr_spec.params['base_path'], _task_max_attempts=mr_spec.params.get('task_max_attempts'), _task_max_data_processing_attempts=mr_spec.params.get('task_max_data_processing_attempts'), _hooks_cls=util.for_name(mr_spec.hooks_class_name), _app=mr_spec.params.get('app_id'), _api_version=api_version)
Converts model.MapreduceSpec back to JobConfig. This method allows our internal methods to use JobConfig directly. This method also allows us to expose JobConfig as an API during execution, despite that it is not saved into datastore. Args: mr_spec: model.MapreduceSpec. queue_name: queue name. Returns: The JobConfig object for this job.
codesearchnet
def configure_interface(self, name, commands): commands = make_iterable(commands) commands.insert(0, ('interface %s' % name)) return self.configure(commands)
Configures the specified interface with the commands Args: name (str): The interface name to configure commands: The commands to configure in the interface Returns: True if the commands completed successfully
codesearchnet
def query(self, query): if (str(query.key) in self._items): return query(self._items[str(query.key)].values()) else: return query([])
Returns an iterable of objects matching criteria expressed in `query` Naively applies the query operations on the objects within the namespaced collection corresponding to ``query.key.path``. Args: query: Query object describing the objects to return. Raturns: iterable cursor with all objects matching criteria
codesearchnet
def calculate_part_visibility(self, ports): source_port_lookup = {} for (part_name, port_infos) in SourcePortInfo.filter_parts(ports).items(): for port_info in port_infos: source_port_lookup[port_info.connected_value] = (part_name, port_info.port) for (part_name, port_infos) in SinkPortInfo.filter_parts(ports).items(): for port_info in port_infos: if (port_info.value != port_info.disconnected_value): (conn_part, port) = source_port_lookup.get(port_info.value, (None, None)) if (conn_part and (port == port_info.port)): if (conn_part not in self.part_visibility): self.part_visibility[conn_part] = True if (part_name not in self.part_visibility): self.part_visibility[part_name] = True
Calculate what is connected to what Args: ports: {part_name: [PortInfo]} from other ports
codesearchnet
def passgen(length=12, punctuation=False, digits=True, letters=True, case='both', **kwargs): p_min = punctuation p_max = (0 if (punctuation is False) else length) d_min = digits d_max = (0 if (digits is False) else length) a_min = letters a_max = (0 if (letters is False) else length) if (((d_min + p_min) + a_min) > length): raise ValueError('Minimum punctuation and digits number cannot be greater than length') if ((not digits) and (not letters)): raise ValueError('digits and letters cannot be False at the same time') if (length < 1): raise ValueError('length must be greater than zero') if letters: if (case == 'both'): alpha = (string.ascii_uppercase + string.ascii_lowercase) elif (case == 'upper'): alpha = string.ascii_uppercase elif (case == 'lower'): alpha = string.ascii_lowercase else: raise ValueError("case can only be 'both', 'upper' or 'lower'") else: alpha = (string.ascii_uppercase + string.ascii_lowercase) if punctuation: limit_punctuation = kwargs.get('limit_punctuation', '') if (limit_punctuation == ''): punctuation_set = string.punctuation else: punctuation_set = ''.join([p for p in limit_punctuation if (p in string.punctuation)]) else: punctuation_set = string.punctuation srandom = random.SystemRandom() p_generator = Generator(punctuation_set, srandom, p_min, p_max) d_generator = Generator(string.digits, srandom, d_min, d_max) a_generator = Generator(alpha, srandom, a_min, a_max) main_generator = SuperGenerator(srandom, length, length) main_generator.add(p_generator) main_generator.add(a_generator) main_generator.add(d_generator) chars = [] for i in main_generator: chars.append(i) try: srandom.shuffle(chars, srandom) except: random.shuffle(chars) return ''.join(chars)
Generate random password. Args: length (int): The length of the password. Must be greater than zero. Defaults to 12. punctuation (bool): Whether to use punctuation or not. Defaults to False. limit_punctuation (str): Limits the allowed puncturation to defined characters. digits (bool): Whether to use digits or not. Defaults to True. One of *digits* and *letters* must be True. letters (bool): Whether to use letters or not. Defaults to True. One of *digits* and *letters* must be True. case (str): Letter case to use. Accepts 'upper' for upper case, 'lower' for lower case, and 'both' for both. Defaults to 'both'. Returns: str. The generated password. Raises: ValueError Below are some basic examples. >>> passgen() z7GlutdEEbnk >>> passgen(case='upper') Q81J9DOAMBRN >>> passgen(length=6) EzJMRX
codesearchnet
def results(self, use_cache=True, dialect=None, billing_tier=None): if ((not use_cache) or (self._results is None)): self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier) return self._results.results
Retrieves table of results for the query. May block if the query must be executed first. Args: use_cache: whether to use cached results or not. Ignored if append is specified. dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryResultsTable containing the result set. Raises: Exception if the query could not be executed or query response was malformed.
codesearchnet
def get_log_file_name(level=INFO): if level not in converter.ABSL_LEVELS: raise ValueError('Invalid absl.logging level {}'.format(level)) stream = get_absl_handler().python_handler.stream if (stream == sys.stderr or stream == sys.stdout or not hasattr(stream, 'name')): return '' else: return stream.name
Returns the name of the log file. For Python logging, only one file is used and level is ignored. And it returns empty string if it logs to stderr/stdout or the log stream has no `name` attribute. Args: level: int, the absl.logging level. Raises: ValueError: Raised when `level` has an invalid value.
juraj-google-style
def rate_to_mcs(rate, bw=20, long_gi=True): if bw not in [20, 40, 80, 160]: raise Exception("Unknown bandwidth: %d MHz" % (bw)) idx = int((math.log(bw/10, 2)-1)*2) if not long_gi: idx += 1 for mcs, rates in MCS_TABLE.items(): if abs(rates[idx] - rate) < 1e-3: return mcs for idx, r in enumerate(DOT11A_RATES): if abs(r-rate) < 1e-3: return idx raise Exception("MCS not found: rate=%f, bw=%d, long_gi=%s" % (rate, bw, long_gi))
Convert bit rate to MCS index. Args: rate (float): bit rate in Mbps bw (int): bandwidth, 20, 40, 80, ... long_gi (bool): True if long GI is used. Returns: mcs (int): MCS index >>> rate_to_mcs(120, bw=40, long_gi=False) 5
juraj-google-style
def call(self, inputs, training=None, mask=None): raise NotImplementedError('When subclassing the `Model` class, you should implement a `call` method.')
Calls the model on new inputs. In this case `call` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Note: This method should not be called directly. It is only meant to be overridden when subclassing `tf.keras.Model`. To call a model on an input, always use the `__call__` method, i.e. `model(inputs)`, which relies on the underlying `call` method. Args: inputs: Input tensor, or dict/list/tuple of input tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. mask: A mask or list of masks. A mask can be either a tensor or None (no mask). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs.
github-repos
def _build_cryptographic_parameters(self, value): if value is None: return None elif not isinstance(value, dict): raise TypeError("Cryptographic parameters must be a dictionary.") cryptographic_parameters = CryptographicParameters( block_cipher_mode=value.get('block_cipher_mode'), padding_method=value.get('padding_method'), hashing_algorithm=value.get('hashing_algorithm'), key_role_type=value.get('key_role_type'), digital_signature_algorithm=value.get( 'digital_signature_algorithm' ), cryptographic_algorithm=value.get('cryptographic_algorithm'), random_iv=value.get('random_iv'), iv_length=value.get('iv_length'), tag_length=value.get('tag_length'), fixed_field_length=value.get('fixed_field_length'), invocation_field_length=value.get('invocation_field_length'), counter_length=value.get('counter_length'), initial_counter_value=value.get('initial_counter_value') ) return cryptographic_parameters
Build a CryptographicParameters struct from a dictionary. Args: value (dict): A dictionary containing the key/value pairs for a CryptographicParameters struct. Returns: None: if value is None CryptographicParameters: a CryptographicParameters struct Raises: TypeError: if the input argument is invalid
juraj-google-style