code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def parse_fs_url(fs_url): match = _RE_FS_URL.match(fs_url) if match is None: raise ParseError("{!r} is not a fs2 url".format(fs_url)) fs_name, credentials, url1, url2, path = match.groups() if not credentials: username = None password = None url = url2 else: username, _, password = credentials.partition(":") username = unquote(username) password = unquote(password) url = url1 url, has_qs, qs = url.partition("?") resource = unquote(url) if has_qs: _params = parse_qs(qs, keep_blank_values=True) params = {k: unquote(v[0]) for k, v in six.iteritems(_params)} else: params = {} return ParseResult(fs_name, username, password, resource, params, path)
Parse a Filesystem URL and return a `ParseResult`. Arguments: fs_url (str): A filesystem URL. Returns: ~fs.opener.parse.ParseResult: a parse result instance. Raises: ~fs.errors.ParseError: if the FS URL is not valid.
juraj-google-style
def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type): types = [treefun(n) for n in neuron.neurites] return CheckResult(types.count(NeuriteType.basal_dendrite) >= min_number)
Check if a neuron has basal dendrites Arguments: neuron(Neuron): The neuron object to test min_number: minimum number of basal dendrites required treefun: Optional function to calculate the tree type of neuron's neurites Returns: CheckResult with result
juraj-google-style
def add_inputs(self, mutable_accumulator, elements, *args, **kwargs): for element in elements: mutable_accumulator = self.add_input(mutable_accumulator, element, *args, **kwargs) return mutable_accumulator
Returns the result of folding each element in elements into accumulator. This is provided in case the implementation affords more efficient bulk addition of elements. The default implementation simply loops over the inputs invoking add_input for each one. Args: mutable_accumulator: the current accumulator, may be modified and returned for efficiency elements: the elements to add, should not be mutated *args: Additional arguments and side inputs. **kwargs: Additional arguments and side inputs.
github-repos
def Sign(self, data, signing_key, verify_key=None): if (signing_key.KeyLen() < 2048): logging.warning('signing key is too short.') self.signature = signing_key.Sign(data) self.signature_type = self.SignatureType.RSA_PKCS1v15 self.digest = hashlib.sha256(data).digest() self.digest_type = self.HashType.SHA256 self.data = data if (verify_key is None): verify_key = signing_key.GetPublicKey() self.Verify(verify_key) return self
Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.
codesearchnet
def str2dict_keys(str_in): tmp_dict = str2dict(str_in) if (tmp_dict is None): return None return sorted([k for k in tmp_dict])
Extracts the keys from a string that represents a dict and returns them sorted by key. Args: str_in (string) that contains python dict Returns: (list) with keys or None if no valid dict was found Raises: -
codesearchnet
def gather_dilated_memory_blocks(x, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction='left'): gathered_blocks = [] for block_id in range(num_memory_blocks): block_end_index = (- ((query_block_size + (gap_size * (block_id + 1))) + (memory_block_size * block_id))) block_start_index = ((memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1))) if (direction != 'left'): [block_end_index, block_start_index] = [(- block_start_index), (- block_end_index)] if (block_end_index == 0): x_block = x[block_start_index:] else: x_block = x[block_start_index:block_end_index] def gather_dilated_1d_blocks(x, gather_indices): x_new = tf.gather(x, gather_indices) return tf.transpose(x_new, [2, 3, 0, 1, 4]) gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices)) return tf.concat(gathered_blocks, 3)
Gathers blocks with gaps in between. Args: x: Tensor of shape [length, batch, heads, depth] num_memory_blocks: how many memory blocks to look in "direction". Each will be separated by gap_size. gap_size: an integer indicating the gap size query_block_size: an integer indicating size of query block memory_block_size: an integer indicating the size of a memory block. gather_indices: The indices to gather from. direction: left or right Returns: Tensor of shape [batch, heads, blocks, block_length, depth]
codesearchnet
def normalize_bytes2str(x): if isinstance(x, str): return x if isinstance(x, bytes): return x.decode('utf8') elif is_array_str(x): return _to_str_array(x) else: return x
Normalize `bytes` array to `str` (UTF-8). Example of usage: ```python for ex in tfds.as_numpy(ds): # tf.data returns `tf.string` as `bytes` ex = tf.nest.map_structure(enp.normalize_bytes2str, ex) ``` Args: x: Any array Returns: x: `bytes` array are decoded as `str`
github-repos
def load(cls, config: Optional[Config]=None): if (cls._dfk is not None): raise RuntimeError('Config has already been loaded') if (config is None): cls._dfk = DataFlowKernel(Config()) else: cls._dfk = DataFlowKernel(config) return cls._dfk
Load a DataFlowKernel. Args: - config (Config) : Configuration to load. This config will be passed to a new DataFlowKernel instantiation which will be set as the active DataFlowKernel. Returns: - DataFlowKernel : The loaded DataFlowKernel object.
codesearchnet
def GetValidHostsForCert(cert): if 'subjectAltName' in cert: return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] else: return [x[0][1] for x in cert['subject'] if x[0][0].lower() == 'commonname']
Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs.
juraj-google-style
def _iflat_tasks_wti(self, status=None, op='==', nids=None, with_wti=True): nids = as_set(nids) if (status is None): for (wi, work) in enumerate(self): for (ti, task) in enumerate(work): if (nids and (task.node_id not in nids)): continue if with_wti: (yield (task, wi, ti)) else: (yield task) else: op = operator_from_str(op) status = Status.as_status(status) for (wi, work) in enumerate(self): for (ti, task) in enumerate(work): if (nids and (task.node_id not in nids)): continue if op(task.status, status): if with_wti: (yield (task, wi, ti)) else: (yield task)
Generators that produces a flat sequence of task. if status is not None, only the tasks with the specified status are selected. nids is an optional list of node identifiers used to filter the tasks. Returns: (task, work_index, task_index) if with_wti is True else task
codesearchnet
def execute_workflow(self, directory: str) -> None: thread = current_thread() print(f'Executing directory {directory} on thread {thread.name}...') for path, subdirs, files in os.walk(directory): s = os.path.join(path, 'service.json') if os.path.isfile(s): project = self.get_project_from_service(s) service = s else: project = self.get_project_from_vm() service = 'DEFAULT' for filename in files: if filename != 'service.json': workflow = os.path.join(path, filename) if os.path.isfile(workflow): command = f'python3 starthinker/tool/recipe.py {workflow} -s {service} -p {project} --verbose' self.execute_command(command) print(f'Finished executing workflows in directory {directory}.')
Executes workflows in the provided directory, one per thread Args: - directory: (string) The directory with the workflow JSON files to execute
github-repos
def __call__(self, token, device, args): func = self.get(token, None) if func is None: raise ValueError(f'Could not find callback with key={token} in the registry.') if isinstance(func, EagerFunc): return func(device, token, args) else: ret = func(*args) if isinstance(ret, bytes): ret = [ret] if isinstance(ret, (tuple, list)): return [self._convert(x) for x in ret] else: return self._convert(ret)
Calls the registered function for `token` with args. Args: token: A key into this `FuncRegistry` identifying which function to call. device: Name of the device on which outputs of `token`'s corresponding operation should be placed. Used iff the function registered for `token` is an EagerPyFunc. args: The arguments to pass to the function registered for `token`. Returns: The output of the function registered for `token`. Raises: ValueError: if no function is registered for `token`.
github-repos
def __init__(self, zslgen=ZSLGenerator(), film_max_miller=1, substrate_max_miller=1): self.zsl = zslgen self.film_max_miller = film_max_miller self.substrate_max_miller = substrate_max_miller
Initializes the substrate analyzer Args: zslgen(ZSLGenerator): Defaults to a ZSLGenerator with standard tolerances, but can be fed one with custom tolerances film_max_miller(int): maximum miller index to generate for film surfaces substrate_max_miller(int): maximum miller index to generate for substrate surfaces
juraj-google-style
def get_board(self, **query_params): board_json = self.get_board_json(self.base_uri, query_params=query_params) return self.create_board(board_json)
Get board information for this card. Returns a Board object. Returns: Board: The board this card is attached to
codesearchnet
def add_annotation(self, subj: URIRef, pred: URIRef, obj: Union[(Literal, URIRef)], a_p: URIRef, a_o: Union[(Literal, URIRef)]) -> BNode: bnode: BNode = self.triple2annotation_bnode.get((subj, pred, obj)) if (not bnode): a_s: BNode = BNode() self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s self.g.add((a_s, RDF.type, OWL.Axiom)) self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj))) self.g.add((a_s, OWL.annotatedProperty, self.process_subj_or_pred(pred))) self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj))) else: a_s: BNode = bnode self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o))) return bnode
Adds annotation to rdflib graph. The annotation axiom will filled in if this is a new annotation for the triple. Args: subj: Entity subject to be annotated pref: Entities Predicate Anchor to be annotated obj: Entities Object Anchor to be annotated a_p: Annotation predicate a_o: Annotation object Returns: A BNode which is an address to the location in the RDF graph that is storing the annotation information.
codesearchnet
def EnableNetworkInterfaces( self, interfaces, logger, dhclient_script=None): helpers.CallDhclient(interfaces, logger, dhclient_script=dhclient_script)
Enable the list of network interfaces. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port. dhclient_script: string, the path to a dhclient script used by dhclient.
juraj-google-style
def decode(self, image_tokens: torch.LongTensor) -> torch.FloatTensor: if image_tokens.shape[1] != self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]: raise ValueError(f'Expected `image_tokens` to have shape `(batch_size, {self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]})`, but got shape `{image_tokens.shape}`.') codebook_entry = self.quantize.get_codebook_entry(image_tokens) hidden_states = self.post_quant_conv(codebook_entry) pixel_values = self.decoder(hidden_states) return pixel_values
Decodes quantized token IDs into pixel values. Args: image_tokens (torch.LongTensor): Batch of token IDs. Returns: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): Pixel values decoded from the token IDs.
github-repos
def wrap_http_for_auth(credentials, http): orig_request_method = http.request def new_request(uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): if not credentials.access_token: _LOGGER.info('Attempting refresh to obtain ' 'initial access_token') credentials._refresh(orig_request_method) headers = _initialize_headers(headers) credentials.apply(headers) _apply_user_agent(headers, credentials.user_agent) body_stream_position = None if all(getattr(body, stream_prop, None) for stream_prop in _STREAM_PROPERTIES): body_stream_position = body.tell() resp, content = request(orig_request_method, uri, method, body, clean_headers(headers), redirections, connection_type) max_refresh_attempts = 2 for refresh_attempt in range(max_refresh_attempts): if resp.status not in REFRESH_STATUS_CODES: break _LOGGER.info('Refreshing due to a %s (attempt %s/%s)', resp.status, refresh_attempt + 1, max_refresh_attempts) credentials._refresh(orig_request_method) credentials.apply(headers) if body_stream_position is not None: body.seek(body_stream_position) resp, content = request(orig_request_method, uri, method, body, clean_headers(headers), redirections, connection_type) return resp, content http.request = new_request http.request.credentials = credentials
Prepares an HTTP object's request method for auth. Wraps HTTP requests with logic to catch auth failures (typically identified via a 401 status code). In the event of failure, tries to refresh the token used and then retry the original request. Args: credentials: Credentials, the credentials used to identify the authenticated user. http: httplib2.Http, an http object to be used to make auth requests.
juraj-google-style
def get_common_register(start, end): registers = defaultdict(int) for line in lines(start, end): insn = line.insn for operand in insn.operands: if (not operand.type.has_phrase): continue if (not operand.base): continue register_name = operand.base registers[register_name] += 1 return max(registers.iteritems(), key=operator.itemgetter(1))[0]
Get the register most commonly used in accessing structs. Access to is considered for every opcode that accesses memory in an offset from a register:: mov eax, [ebx + 5] For every access, the struct-referencing registers, in this case `ebx`, are counted. The most used one is returned. Args: start: The adderss to start at end: The address to finish at
codesearchnet
def replace_batch_norm(model): for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = TableTransformerFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module)
Recursively replace all `torch.nn.BatchNorm2d` with `TableTransformerFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model
github-repos
def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs): d_file = data_file(file_fmt=file_fmt, info=info, **kwargs) if (append and files.exists(d_file)): data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False)) if (drop_dups is not None): data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True) if (not data.empty): data.to_parquet(d_file) return data
Save data to file Args: data: pd.DataFrame file_fmt: data file format in terms of f-strings append: if append data to existing data drop_dups: list, drop duplicates in columns info: dict, infomation to be hashed and passed to f-strings **kwargs: additional parameters for f-strings Examples: >>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> # save_data( >>> # data, '{ROOT}/daily/{typ}.parq', >>> # ROOT='tests/data', typ='earnings' >>> # )
codesearchnet
def _finalize_namespaces(self, ns_dict=None): if ns_dict: for (ns, alias) in six.iteritems(ns_dict): self._collected_namespaces.add_namespace_uri(ns, alias) self._collected_namespaces.add_namespace_uri(ns_uri=idgen.get_id_namespace(), prefix=idgen.get_id_namespace_alias()) self._fix_example_namespace() for (prefix, uri) in six.iteritems(self._input_namespaces): self._collected_namespaces.add_namespace_uri(uri, prefix) self._collected_namespaces.import_from(namespaces.XML_NAMESPACES) for ns_uri in self._collected_namespaces.namespace_uris: preferred_prefix = self._collected_namespaces.preferred_prefix_for_namespace(ns_uri) if preferred_prefix: continue prefixes = self._collected_namespaces.get_prefixes(ns_uri) if prefixes: prefix = next(iter(prefixes)) else: prefix = namespaces.lookup_name(ns_uri) if (prefix is None): raise namespaces.NoPrefixesError(ns_uri) self._collected_namespaces.set_preferred_prefix_for_namespace(ns_uri=ns_uri, prefix=prefix, add_if_not_exist=True)
Returns a dictionary of namespaces to be exported with an XML document. This loops over all the namespaces that were discovered and built during the execution of ``collect()`` and ``_parse_collected_classes()`` and attempts to merge them all. Raises: .namespaces.DuplicatePrefixError: If namespace prefix was mapped to more than one namespace. .namespaces.NoPrefixError: If a namespace was collected that is not mapped to a prefix.
codesearchnet
def get_central_coors(self, row, col): if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols: raise ValueError("The row (%d) or col (%d) must be >=0 and less than " "nRows (%d) or nCols (%d)!" % (row, col, self.nRows, self.nCols)) else: tmpx = self.xMin + (col + 0.5) * self.dx tmpy = self.yMax - (row + 0.5) * self.dx return tmpx, tmpy
Get the coordinates of central grid. Args: row: row number, range from 0 to (nRows - 1). col: col number, range from 0 to (nCols - 1). Returns: XY coordinates. If the row or col are invalid, raise ValueError.
juraj-google-style
def _track_trackable(self, trackable, name, overwrite=False): self._maybe_initialize_trackable() if not isinstance(trackable, Trackable): raise TypeError(f'Trackable._track_trackable() can only be used to track objects of type Trackable. Got type {type(trackable)}.') if not getattr(self, '_manual_tracking', True): return trackable new_reference = TrackableReference(name=name, ref=trackable) current_object = self._lookup_dependency(name) if current_object is not None and current_object is not trackable: if not overwrite: raise ValueError(f"Called Trackable._track_trackable() with name='{name}', but a Trackable with this name is already declared as a dependency. Names must be unique (or overwrite=True).") for index, (old_name, _) in enumerate(self._self_unconditional_checkpoint_dependencies): if name == old_name: self._self_unconditional_checkpoint_dependencies[index] = new_reference elif current_object is None: self._self_unconditional_checkpoint_dependencies.append(new_reference) self._handle_deferred_dependencies(name=name, trackable=trackable) self._self_unconditional_dependency_names[name] = trackable return trackable
Declare a dependency on another `Trackable` object. Indicates that checkpoints for this object should include variables from `trackable`. Variables in a checkpoint are mapped to `Trackable`s based on the names provided when the checkpoint was written. To avoid breaking existing checkpoints when modifying a class, neither variable names nor dependency names (the names passed to `_track_trackable`) may change. Args: trackable: A `Trackable` which this object depends on. name: A local name for `trackable`, used for loading checkpoints into the correct objects. overwrite: Boolean, whether silently replacing dependencies is OK. Used for __setattr__, where throwing an error on attribute reassignment would be inappropriate. Returns: `trackable`, for convenience when declaring a dependency and assigning to a member variable in one statement. Raises: TypeError: If `trackable` does not inherit from `Trackable`. ValueError: If another object is already tracked by this name.
github-repos
def _Open(self, path_spec=None, mode='rb'): if not path_spec: raise ValueError('Missing path specification.') data_stream = getattr(path_spec, 'data_stream', None) self._file_system = resolver.Resolver.OpenFileSystem( path_spec, resolver_context=self._resolver_context) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) if not file_entry: raise IOError('Unable to open file entry.') fsntfs_data_stream = None fsntfs_file_entry = file_entry.GetNTFSFileEntry() if not fsntfs_file_entry: raise IOError('Unable to open NTFS file entry.') if data_stream: fsntfs_data_stream = fsntfs_file_entry.get_alternate_data_stream_by_name( data_stream) if not fsntfs_data_stream: raise IOError('Unable to open data stream: {0:s}.'.format( data_stream)) elif not fsntfs_file_entry.has_default_data_stream(): raise IOError('Missing default data stream.') self._fsntfs_data_stream = fsntfs_data_stream self._fsntfs_file_entry = fsntfs_file_entry
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): super(GetAttributesResponsePayload, self).read(input_buffer, kmip_version=kmip_version) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer): unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER) unique_identifier.read(local_buffer, kmip_version=kmip_version) self.unique_identifier = unique_identifier.value else: raise exceptions.InvalidKmipEncoding('The GetAttributes response payload encoding is missing the unique identifier.') if (kmip_version < enums.KMIPVersion.KMIP_2_0): self._attributes = list() while self.is_tag_next(enums.Tags.ATTRIBUTE, local_buffer): attribute = objects.Attribute() attribute.read(local_buffer, kmip_version=kmip_version) self._attributes.append(attribute) elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer): attributes = objects.Attributes() attributes.read(local_buffer, kmip_version=kmip_version) temp_attr = objects.convert_attributes_to_template_attribute(attributes) self._attributes = temp_attr.attributes else: raise exceptions.InvalidKmipEncoding('The GetAttributes response payload encoding is missing the attributes structure.') self.is_oversized(local_buffer)
Read the data encoding the GetAttributes response payload and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
codesearchnet
def __init__(self, scope, parent, name): CodeEntity.__init__(self, scope, parent) self.name = name self.children = []
Constructor for namespaces. Args: scope (CodeEntity): The program scope where this object belongs. parent (CodeEntity): This object's parent in the program tree. name (str): The name of the namespace in the program.
juraj-google-style
def _full_reduce(nodes): (was_reduced, nodes) = maybe_reduce(nodes) while was_reduced: (was_reduced, nodes) = maybe_reduce(nodes) return nodes
Apply degree reduction to ``nodes`` until it can no longer be reduced. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes in the curve. Returns: numpy.ndarray: The fully degree-reduced nodes.
codesearchnet
async def _get_person_json(self, id_, url_params=None): url = self.url_builder('person/{person_id}', dict(person_id=id_), url_params=(url_params or OrderedDict())) data = (await self.get_data(url)) return data
Retrieve raw person JSON by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. url_params (:py:class:`dict`): Any additional URL parameters. Returns: :py:class:`dict`: The JSON data.
codesearchnet
def add_record_references(self, app_id, record_id, field_id, target_record_ids): self._swimlane.request('post', 'app/{0}/record/{1}/add-references'.format(app_id, record_id), json={'fieldId': field_id, 'targetRecordIds': target_record_ids})
Bulk operation to directly add record references without making any additional requests Warnings: Does not perform any app, record, or target app/record validation Args: app_id (str): Full App ID string record_id (str): Full parent Record ID string field_id (str): Full field ID to target reference field on parent Record string target_record_ids (List(str)): List of full target reference Record ID strings
codesearchnet
def check_cache(resource_type): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: adapter = args[0] key, val = list(kwargs.items())[0] except IndexError: logger.warning("Couldn't generate full index key, skipping cache") else: index_key = (resource_type, key, val) try: cached_record = adapter._swimlane.resources_cache[index_key] except KeyError: logger.debug('Cache miss: `{!r}`'.format(index_key)) else: logger.debug('Cache hit: `{!r}`'.format(cached_record)) return cached_record return func(*args, **kwargs) return wrapper return decorator
Decorator for adapter methods to check cache for resource before normally sending requests to retrieve data Only works with single kwargs, almost always used with @one_of_keyword_only decorator Args: resource_type (type(APIResource)): Subclass of APIResource of cache to be checked when called
juraj-google-style
def lf_empirical_accuracies(L, Y): Y = arraylike_to_numpy(Y) L = L.toarray() X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1)) return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1)
Return the **empirical accuracy** against a set of labels Y (e.g. dev set) for each LF. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels
juraj-google-style
def GetTokenBalance(self, token, watch_only=0): total = Decimal(0) if (watch_only > 0): for addr in self._watch_only: balance = token.GetBalance(self, addr) total += balance else: for contract in self._contracts.values(): balance = token.GetBalance(self, contract.Address) total += balance return total
Get the balance of the specified token. Args: token (NEP5Token): an instance of type neo.Wallets.NEP5Token to get the balance from. watch_only (bool): True, to limit to watch only wallets. Returns: Decimal: total balance for `token`.
codesearchnet
def add_param_summary(*summary_lists, **kwargs): collections = kwargs.pop('collections', None) assert (len(kwargs) == 0), ('Unknown kwargs: ' + str(kwargs)) ctx = get_current_tower_context() if ((ctx is not None) and (not ctx.is_main_training_tower)): return params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) with cached_name_scope('param-summary'): for p in params: name = p.op.name for (rgx, actions) in summary_lists: if (not rgx.endswith('$')): rgx = (rgx + '$') if re.match(rgx, name): add_tensor_summary(p, actions, name=name, collections=collections)
Add summary ops for all trainable variables matching the regex, under a reused 'param-summary' name scope. This function is a no-op if not calling from main training tower. Args: summary_lists (list): each is (regex, [list of summary type]). Summary type is defined in :func:`add_tensor_summary`. collections (list[str]): collections of the summary ops. Example: .. code-block:: python add_param_summary( ('.*/W', ['histogram', 'rms']), ('.*/gamma', ['scalar']), )
codesearchnet
def forbidden(cls, errors=None): if cls.expose_status: cls.response.content_type = 'application/json' cls.response._status_line = '403 Forbidden' return cls(403, errors=errors).to_json
Shortcut API for HTTP 403 `Forbidden` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
juraj-google-style
def _create_distributed_tensor_spec(strategy, tensor_spec): num_replicas = len(strategy.extended.worker_devices) if not _always_wrap(strategy): return tensor_spec def _get_value_per_replica(tensor_spec_per_input): value_specs = [tensor_spec_per_input for _ in range(num_replicas)] return values.PerReplicaSpec(*value_specs) return nest.map_structure(_get_value_per_replica, tensor_spec)
Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`. Args: strategy: The given `tf.distribute` strategy. tensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the shape should be None if you have partial batches. Returns: A `tf.TypeSpec` that matches the values produced by a given strategy. This can be a `tf.TensorSpec` or a `PerRelicaSpec`.
github-repos
def DeregisterDecrypter(cls, decrypter): encryption_method = decrypter.ENCRYPTION_METHOD.lower() if encryption_method not in cls._decrypters: raise KeyError( 'Decrypter for encryption method: {0:s} not set.'.format( decrypter.ENCRYPTION_METHOD)) del cls._decrypters[encryption_method]
Deregisters a decrypter for a specific encryption method. Args: decrypter (type): decrypter class. Raises: KeyError: if the corresponding decrypter is not set.
juraj-google-style
def __init__(self, proxy: T, reference: Any=None): super().__init__('placeholder', proxy) self._reference = reference
Initialize a placeholder expression. Args: proxy: A proxy object with the type expected to be bound to this expression. Used for type checking at pipeline construction time.
github-repos
def get_all_supported_aspect_ratios(max_image_tiles: int) -> List[Tuple[int, int]]: aspect_ratios = [] for width in range(1, max_image_tiles + 1): for height in range(1, max_image_tiles + 1): if width * height <= max_image_tiles: aspect_ratios.append((width, height)) return aspect_ratios
Computes all allowed aspect ratios for a given maximum number of input tiles. This function calculates all possible arrangements of tiles that can be formed within the constraint of the maximum number of tiles. Each arrangement is represented by its aspect ratio (width/height) and the corresponding tile configuration. Args: max_image_tiles (`int`): The maximum number of tiles allowed. Returns: `List[Tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height) configuration in terms of number of tiles. Example: >>> get_all_supported_aspect_ratios(4) [(1, 1), (1, 2), (1, 3), (1, 4), (2, 1), (2, 2), (3, 1), (4, 1)]
github-repos
def ParseBookmarkAnnotationRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = FirefoxPlacesBookmarkAnnotationEventData() event_data.content = self._GetRowValue(query_hash, row, 'content') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a bookmark annotation row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def _parse_title(dom, details): title = details.find("h1") if not title: title = dom.find("title") assert title, "Can't find <title> tag!" return title[0].getContent().split("|")[0].strip() return title[0].getContent().strip()
Parse title/name of the book. Args: dom (obj): HTMLElement containing whole HTML page. details (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. Raises: AssertionError: If title not found.
juraj-google-style
def _rapply(input_layer, operation, *op_args, **op_kwargs): op_args = list(op_args) op_args.append(input_layer.tensor) return input_layer.with_tensor(operation(*op_args, **op_kwargs))
Applies the given operation to this after expanding op_args. Args: input_layer: The input layer for this op. operation: An operation that takes a tensor and the supplied args. *op_args: Extra arguments for operation. **op_kwargs: Keyword arguments for the operation. Returns: A new layer with operation applied.
codesearchnet
def add_case(self, case_obj, vtype='snv', mode='vcf', ped_svg=None): new_case = Case(case_id=case_obj.case_id, name=case_obj.name, variant_source=case_obj.variant_source, variant_type=vtype, variant_mode=mode, pedigree=ped_svg, compressed=case_obj.compressed, tabix_index=case_obj.tabix_index) inds = [Individual( ind_id=ind.ind_id, name=ind.name, mother=ind.mother, father=ind.father, sex=ind.sex, phenotype=ind.phenotype, ind_index=ind.ind_index, variant_source=ind.variant_source, bam_path=ind.bam_path, ) for ind in case_obj.individuals] new_case.individuals = inds if self.case(new_case.case_id): logger.warning("Case already exists in database!") else: self.session.add(new_case) self.save() return new_case
Load a case with individuals. Args: case_obj (puzzle.models.Case): initialized case model
juraj-google-style
def _assert_valid_dtypes(self, tensors): valid_dtypes = self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError('Invalid type %r for %s, expected: %s.' % (dtype, t.name, [v for v in valid_dtypes]))
Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type.
github-repos
def __getattr__(self, name): if not str(name) in ['_initialized', '_settings']: try: xx = self.read_probes(name) return xx except: print(('class ' + type(self).__name__ + ' has no attribute ' + str(name))) raise AttributeError('class ' + type(self).__name__ + ' has no attribute ' + str(name))
allows to read instrument inputs in the form value = instrument.input Args: name: name of input channel Returns: value of input channel
juraj-google-style
def enable_beacon(name, **kwargs): ret = {'comment': [], 'result': True} if (not name): ret['comment'] = 'Beacon name is required.' ret['result'] = False return ret if (('test' in kwargs) and kwargs['test']): ret['comment'] = 'Beacon {0} would be enabled.'.format(name) else: _beacons = list_(return_yaml=False, **kwargs) if (name not in _beacons): ret['comment'] = 'Beacon {0} is not currently configured.'.format(name) ret['result'] = False return ret try: eventer = salt.utils.event.get_event('minion', opts=__opts__) res = __salt__['event.fire']({'func': 'enable_beacon', 'name': name}, 'manage_beacons') if res: event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_enabled_complete', wait=kwargs.get('timeout', 30)) if (event_ret and event_ret['complete']): beacons = event_ret['beacons'] beacon_config_dict = _get_beacon_config_dict(beacons[name]) if (('enabled' in beacon_config_dict) and beacon_config_dict['enabled']): ret['result'] = True ret['comment'] = 'Enabled beacon {0} on minion.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name) elif event_ret: ret['result'] = False ret['comment'] = event_ret['comment'] else: ret['result'] = False ret['comment'] = 'Did not receive the manage event before the timeout of {0}s'.format(kwargs.get('timeout', 30)) return ret except KeyError: ret['result'] = False ret['comment'] = 'Event module not available. Beacon enable job failed.' return ret
Enable a beacon on the minion. Args: name (str): Name of the beacon to enable. Returns: dict: Boolean and status message on success or failure of enable. CLI Example: .. code-block:: bash salt '*' beacons.enable_beacon ps
codesearchnet
def releases(self, **kwargs): path = self._get_id_path('releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the release date and certification information by country for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def UploadAccount(self, hash_algorithm, hash_key, accounts): param = {'hashAlgorithm': hash_algorithm, 'signerKey': hash_key, 'users': accounts} return self._InvokeGitkitApi('uploadAccount', param)
Uploads multiple accounts to Gitkit server. Args: hash_algorithm: string, algorithm to hash password. hash_key: string, base64-encoded key of the algorithm. accounts: array of accounts to be uploaded. Returns: Response of the API.
codesearchnet
def _pad_for_batching(self, pixel_values: List['torch.Tensor']) -> List['torch.Tensor']: max_patch = max((len(x) for x in pixel_values)) pixel_values = [torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]]) for image in pixel_values] return pixel_values
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`List[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) Returns: List[`torch.Tensor`]: The padded images.
github-repos
def GetMessageStrings(cls, formatter_mediator, event): formatter_object = cls.GetFormatterObject(event.data_type) return formatter_object.GetMessages(formatter_mediator, event)
Retrieves the formatted message strings for a specific event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: list[str, str]: long and short version of the message string.
juraj-google-style
def list_locations(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/locations?api-version=', BASE_API]) return do_get(endpoint, access_token)
List available locations for a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON list of locations.
codesearchnet
def __init__(self, n=65, radius=1, port_distance_from_surface=.07): super(Sphere, self).__init__() particle = mb.Particle(name='np') particle.add(mb.Port(anchor=particle), label='out') pattern = mb.SpherePattern(n) pattern.scale(radius) particles = pattern.apply(particle, orientation='normal', compound_port='out') self.add(particles, label='np_[$]') for i, pos in enumerate(pattern.points): particle = mb.Particle(name="np", pos=pos) self.add(particle, "np_{}".format(i)) port = mb.Port(anchor=particle) self.add(port, "port_{}".format(i)) port.spin(-pi/2, [0, 0, 1]) port.spin(-arcsin(pos[2]/radius), [0, 1, 0]) port.spin(arctan2(pos[1], pos[0]), [0, 0, 1]) port.translate(pos/radius * port_distance_from_surface)
Initialize a Sphere object. Args: n (int): Number of points used to construct the Sphere. radius (float): Radius of the Sphere. port_distance_from_surface (float): Distance of Ports from Sphere.
juraj-google-style
def scatterplot_matrix(df, features, downsample_frac=None, figsize=(15, 15)): if downsample_frac: df = df.sample(frac=downsample_frac) plt.figure(figsize=figsize) sns.pairplot(df[features], hue='target') plt.show()
Plot a scatterplot matrix for a list of features, colored by target value. Example: `scatterplot_matrix(X, X.columns.tolist(), downsample_frac=0.01)` Args: df: Pandas dataframe containing the target column (named 'target'). features: The list of features to include in the correlation plot. downsample_frac: Dataframe downsampling rate (0.1 to include 10% of the dataset). figsize: The size of the plot.
codesearchnet
def __init__(self, channel): self.NotifyReviewEvent = channel.unary_unary( '/pb.Analyzer/NotifyReviewEvent', request_serializer=lookout_dot_sdk_dot_event__pb2.ReviewEvent.SerializeToString, response_deserializer=lookout_dot_sdk_dot_service__analyzer__pb2.EventResponse.FromString, ) self.NotifyPushEvent = channel.unary_unary( '/pb.Analyzer/NotifyPushEvent', request_serializer=lookout_dot_sdk_dot_event__pb2.PushEvent.SerializeToString, response_deserializer=lookout_dot_sdk_dot_service__analyzer__pb2.EventResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value): UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename))) header = ('NCOLS %d\nNROWS %d\nXLLCENTER %f\nYLLCENTER %f\nCELLSIZE %f\nNODATA_VALUE %f' % (xsize, ysize, (geotransform[0] + (0.5 * geotransform[1])), (geotransform[3] - ((ysize - 0.5) * geotransform[1])), geotransform[1], nodata_value)) with open(filename, 'w', encoding='utf-8') as f: f.write(header) for i in range(0, ysize): for j in range(0, xsize): f.write(('%s\t' % repr(data[i][j]))) f.write('\n') f.close()
Output Raster to ASCII file. Args: filename: output ASCII filename. data: 2D array data. xsize: Col count. ysize: Row count. geotransform: geographic transformation. nodata_value: nodata_flow value.
codesearchnet
def saturate_cast(value, dtype, name=None): with ops.name_scope(name, 'saturate_cast', [value]) as name: value = ops.convert_to_tensor(value, name='value') dtype = dtypes.as_dtype(dtype).base_dtype in_dtype = value.dtype if in_dtype.is_complex: if dtype.is_complex: real_in_dtype = in_dtype.real_dtype real_out_dtype = dtype.real_dtype if real_in_dtype.min < real_out_dtype.min or real_in_dtype.max > real_out_dtype.max: value = gen_math_ops._clip_by_value(value, ops.convert_to_tensor(builtins.complex(real_out_dtype.min, real_out_dtype.min), dtype=in_dtype), ops.convert_to_tensor(builtins.complex(real_out_dtype.max, real_out_dtype.max), dtype=in_dtype), name='clamp') return cast(value, dtype, name=name) else: value = real(value) logging.warn('Casting complex to real discards imaginary part.') in_dtype = in_dtype.real_dtype out_real_dtype = dtype.real_dtype if forward_compat.forward_compatible(2024, 11, 1) or in_dtype.min < out_real_dtype.min or in_dtype.max > out_real_dtype.max: np_dtype = in_dtype.as_numpy_dtype try: promoted_type = np.promote_types(np_dtype, out_real_dtype.as_numpy_dtype) except TypeError: promoted_type = float min_limit = np_dtype(np.maximum(in_dtype.min, out_real_dtype.min)) promoted = np.array([min_limit, out_real_dtype.min], dtype=promoted_type) if promoted[0] < promoted[1]: min_limit = np.nextafter(min_limit, np_dtype(0), dtype=np_dtype) max_limit = np_dtype(np.minimum(in_dtype.max, out_real_dtype.max)) promoted = np.array([max_limit, out_real_dtype.max], dtype=promoted_type) if promoted[0] > promoted[1]: max_limit = np.nextafter(max_limit, np_dtype(0), dtype=np_dtype) value = gen_math_ops._clip_by_value(value, ops.convert_to_tensor(min_limit, dtype=in_dtype), ops.convert_to_tensor(max_limit, dtype=in_dtype), name='clamp') return cast(value, dtype, name=name)
Performs a safe saturating cast of `value` to `dtype`. This function casts the input to `dtype` without overflow. If there is a danger that values would over or underflow in the cast, this op applies the appropriate clamping before the cast. See `tf.cast` for more details. Args: value: A `Tensor`. dtype: The desired output `DType`. name: A name for the operation (optional). Returns: `value` safely cast to `dtype`.
github-repos
def leak(self): (capacity, last_leak) = self.storage.mget(self.key_amount, self.key_last_leak, coherent=True) now = time.time() if last_leak: elapsed = (now - last_leak) decrement = (elapsed * self.rate) new_capacity = max(int((capacity - decrement)), 0) else: new_capacity = 0 self.storage.mset({self.key_amount: new_capacity, self.key_last_leak: now}) return new_capacity
Leak the adequate amount of data from the bucket. This should be called before any consumption takes place. Returns: int: the new capacity of the bucket
codesearchnet
def _validate_measure_sampling(self, experiment): if (self._shots <= 1): self._sample_measure = False return if hasattr(experiment.config, 'allows_measure_sampling'): self._sample_measure = experiment.config.allows_measure_sampling else: measure_flag = False for instruction in experiment.instructions: if (instruction.name == 'reset'): self._sample_measure = False return if measure_flag: if (instruction.name not in ['measure', 'barrier', 'id', 'u0']): self._sample_measure = False return elif (instruction.name == 'measure'): measure_flag = True self._sample_measure = True
Determine if measure sampling is allowed for an experiment Args: experiment (QobjExperiment): a qobj experiment.
codesearchnet
def process_openxml_file(filename: str, print_good: bool, delete_if_bad: bool) -> None: print_bad = (not print_good) try: file_good = is_openxml_good(filename) file_bad = (not file_good) if ((print_good and file_good) or (print_bad and file_bad)): print(filename) if (delete_if_bad and file_bad): log.warning('Deleting: {}', filename) os.remove(filename) except Exception as e: log.critical('Uncaught error in subprocess: {!r}\n{}', e, traceback.format_exc()) raise
Prints the filename of, or deletes, an OpenXML file depending on whether it is corrupt or not. Args: filename: filename to check print_good: if ``True``, then prints the filename if the file appears good. delete_if_bad: if ``True``, then deletes the file if the file appears corrupt.
codesearchnet
def random_masking(self, sequence, noise=None): batch_size, seq_length, dim = sequence.shape len_keep = int(seq_length * (1 - self.config.mask_ratio)) if noise is None: noise = torch.rand(batch_size, seq_length, device=sequence.device) ids_shuffle = torch.argsort(noise, dim=1).to(sequence.device) ids_restore = torch.argsort(ids_shuffle, dim=1).to(sequence.device) ids_keep = ids_shuffle[:, :len_keep] sequence_unmasked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, dim)) mask = torch.ones([batch_size, seq_length], device=sequence.device) mask[:, :len_keep] = 0 mask = torch.gather(mask, dim=1, index=ids_restore) return (sequence_unmasked, mask, ids_restore)
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random noise. Args: sequence (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`) noise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*) which is mainly used for testing purposes to control randomness and maintain the reproducibility
github-repos
def verified(self, institute_id): query = {'verb': 'validate', 'institute': institute_id} res = [] validate_events = self.event_collection.find(query) for validated in list(validate_events): case_id = validated['case'] var_obj = self.variant(case_id=case_id, document_id=validated['variant_id']) case_obj = self.case(case_id=case_id) if ((not case_obj) or (not var_obj)): continue var_obj['case_obj'] = {'display_name': case_obj['display_name'], 'individuals': case_obj['individuals']} res.append(var_obj) return res
Return all verified variants for a given institute Args: institute_id(str): institute id Returns: res(list): a list with validated variants
codesearchnet
def add_router(self, path, router): if self.strict_router_check and not isinstance(router, Router): raise TypeError("Expected object of type Router, found %r" % type(router)) log.info("{} Adding router {} on path {}", id(self), router, path) self.middleware.add(path=path, func=router, method_mask=HTTPMethod.ALL,)
Adds a router to the list of routers Args: path (str or regex): The path on which the router binds router (growler.Router): The router which will respond to requests Raises: TypeError: If `strict_router_check` attribute is True and the router is not an instance of growler.Router.
juraj-google-style
def get_metadata_attribute(self, metaname): metadata_value = self.metadata.get(metaname, None) if (metadata_value is None): raise NoMetadataException(('No metadata attribute named %s' % metaname)) if (not isinstance(metadata_value, list)): raise TypeError('Metadata is not a list and it should be.') if (len(metadata_value) > 1): return metadata_value else: return metadata_value[0]
Get the metadata attribute by the name. Args: metaname (:obj:`str`): Name of the attribute Returns: :obj:`list` or :obj:`str`: Value(s) of the requested metadata attribute Raises: NoMetadataException: Attribute error TypeError: Metadata should be a list
codesearchnet
def OpenFileEntry(cls, path_spec_object, resolver_context=None): file_system = cls.OpenFileSystem(path_spec_object, resolver_context=resolver_context) if (resolver_context is None): resolver_context = cls._resolver_context file_entry = file_system.GetFileEntryByPathSpec(path_spec_object) resolver_context.ReleaseFileSystem(file_system) return file_entry
Opens a file entry object defined by path specification. Args: path_spec_object (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built in context which is not multi process safe. Returns: FileEntry: file entry or None if the path specification could not be resolved.
codesearchnet
def _parse_exchange_token_response(content): resp = {} content = _helpers._from_bytes(content) try: resp = json.loads(content) except Exception: resp = _helpers.parse_unique_urlencoded(content) if resp and 'expires' in resp: resp['expires_in'] = resp.pop('expires') return resp
Parses response of an exchange token request. Most providers return JSON but some (e.g. Facebook) return a url-encoded string. Args: content: The body of a response Returns: Content as a dictionary object. Note that the dict could be empty, i.e. {}. That basically indicates a failure.
juraj-google-style
def secondary_training_status_message(job_description, prev_description): if job_description is None or job_description.get('SecondaryStatusTransitions') is None\ or len(job_description.get('SecondaryStatusTransitions')) == 0: return '' prev_description_secondary_transitions = prev_description.get('SecondaryStatusTransitions')\ if prev_description is not None else None prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])\ if prev_description_secondary_transitions is not None else 0 current_transitions = job_description['SecondaryStatusTransitions'] if len(current_transitions) == prev_transitions_num: transitions_to_print = current_transitions[-1:] else: transitions_to_print = current_transitions[prev_transitions_num - len(current_transitions):] status_strs = [] for transition in transitions_to_print: message = transition['StatusMessage'] time_str = datetime.utcfromtimestamp( time.mktime(job_description['LastModifiedTime'].timetuple())).strftime('%Y-%m-%d %H:%M:%S') status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message)) return '\n'.join(status_strs)
Returns a string contains last modified time and the secondary training job status message. Args: job_description: Returned response from DescribeTrainingJob call prev_description: Previous job description from DescribeTrainingJob call Returns: str: Job status string to be printed.
juraj-google-style
def set_requestable(self, requestable=True): self.data['is_requestdata_type'] = requestable if requestable: self.data['private'] = False
Set the dataset to be of type requestable or not Args: requestable (bool): Set whether dataset is requestable. Defaults to True. Returns: None
juraj-google-style
def make_initial_frame_chooser(real_env, frame_stack_size, simulation_random_starts, simulation_flip_first_random_for_beginning, split=tf.estimator.ModeKeys.TRAIN): initial_frame_rollouts = real_env.current_epoch_rollouts(split=split, minimal_rollout_frames=frame_stack_size) def initial_frame_chooser(batch_size): 'Frame chooser.' deterministic_initial_frames = initial_frame_rollouts[0][:frame_stack_size] if (not simulation_random_starts): initial_frames = ([deterministic_initial_frames] * batch_size) else: initial_frames = random_rollout_subsequences(initial_frame_rollouts, batch_size, frame_stack_size) if simulation_flip_first_random_for_beginning: initial_frames[0] = deterministic_initial_frames return np.stack([[frame.observation.decode() for frame in initial_frame_stack] for initial_frame_stack in initial_frames]) return initial_frame_chooser
Make frame chooser. Args: real_env: T2TEnv to take initial frames from. frame_stack_size (int): Number of consecutive frames to extract. simulation_random_starts (bool): Whether to choose frames at random. simulation_flip_first_random_for_beginning (bool): Whether to flip the first frame stack in every batch for the frames at the beginning. split (tf.estimator.ModeKeys or None): Data split to take the frames from, None means use all frames. Returns: Function batch_size -> initial_frames.
codesearchnet
def conjugate(self): return self.__class__(scalar=self.scalar, vector=(- self.vector))
Quaternion conjugate, encapsulated in a new instance. For a unit quaternion, this is the same as the inverse. Returns: A new Quaternion object clone with its vector part negated
codesearchnet
def requestMapIdentity(self, subject, vendorSpecific=None): response = self.requestMapIdentityResponse(subject, vendorSpecific) return self._read_boolean_response(response)
See Also: requestMapIdentityResponse() Args: subject: vendorSpecific: Returns:
juraj-google-style
def run_config_diagnostics(config_path=CONFIG_PATH): config = read_config(config_path) missing_sections = set() malformed_entries = defaultdict(set) for section, expected_section_keys in SECTION_KEYS.items(): section_content = config.get(section) if not section_content: missing_sections.add(section) else: for option in expected_section_keys: option_value = section_content.get(option) if not option_value: malformed_entries[section].add(option) return config_path, missing_sections, malformed_entries
Run diagnostics on the configuration file. Args: config_path (str): Path to the configuration file. Returns: str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing sections and a dict that maps each section to the entries that have either missing or empty options.
juraj-google-style
def save_data_files(bs, prefix=None, directory=None): filename = 'phonon_band.dat' filename = ('{}_phonon_band.dat'.format(prefix) if prefix else filename) directory = (directory if directory else '.') filename = os.path.join(directory, filename) with open(filename, 'w') as f: header = ' f.write(header) for band in bs.bands: for (d, e) in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e)) f.write('\n') return filename
Write the phonon band structure data files to disk. Args: bs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`): The phonon band structure. prefix (:obj:`str`, optional): Prefix for data file. directory (:obj:`str`, optional): Directory in which to save the data. Returns: str: The filename of the written data file.
codesearchnet
def _einsum_matmul_index_helper(gate_indices, number_of_qubits): if ((len(gate_indices) + number_of_qubits) > 26): raise QiskitError('Total number of free indexes limited to 26') tens_in = ascii_lowercase[:number_of_qubits] tens_out = list(tens_in) mat_left = '' mat_right = '' for (pos, idx) in enumerate(reversed(gate_indices)): mat_left += ascii_lowercase[((- 1) - pos)] mat_right += tens_in[((- 1) - idx)] tens_out[((- 1) - idx)] = ascii_lowercase[((- 1) - pos)] tens_out = ''.join(tens_out) return (mat_left, mat_right, tens_in, tens_out)
Return the index string for Numpy.eignsum matrix multiplication. The returned indices are to perform a matrix multiplication A.v where the matrix A is an M-qubit matrix, matrix v is an N-qubit vector, and M <= N, and identity matrices are implied on the subsystems where A has no support on v. Args: gate_indices (list[int]): the indices of the right matrix subsystems to contract with the left matrix. number_of_qubits (int): the total number of qubits for the right matrix. Returns: tuple: (mat_left, mat_right, tens_in, tens_out) of index strings for that may be combined into a Numpy.einsum function string. Raises: QiskitError: if the total number of qubits plus the number of contracted indices is greater than 26.
codesearchnet
def diet_expert(x, hidden_size, params): @fn_with_diet_vars(params) def diet_expert_internal(x): dim = x.get_shape().as_list()[-1] h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False) y = tf.layers.dense(h, dim, use_bias=False) y *= tf.rsqrt(tf.to_float(dim * hidden_size)) return y return diet_expert_internal(x)
A two-layer feed-forward network with relu activation on hidden layer. Uses diet variables. Recomputes hidden layer on backprop to save activation memory. Args: x: a Tensor with shape [batch, io_size] hidden_size: an integer params: a diet variable HParams object. Returns: a Tensor with shape [batch, io_size]
juraj-google-style
def extend(self, *bindings): self._bindings.extend(self._preprocess(bindings)) return self
Append the given bindings to this keymap. Arguments: *bindings (Binding): Bindings to be added. Returns: Keymap: self
juraj-google-style
def _obj_to_path(obj): if (obj is None): return obj if (inspect.isclass(obj) or inspect.isfunction(obj)): fetched = getattr(sys.modules[obj.__module__], obj.__name__, None) if (fetched is None): raise ValueError(('Object %r must be defined on the top level of a module.' % obj)) return ('%s.%s' % (obj.__module__, obj.__name__)) raise TypeError(('Unexpected type %s.' % type(obj)))
Returns the fully qualified path to the object. Args: obj: obj must be a new style top level class, or a top level function. No inner function or static method. Returns: Fully qualified path to the object. Raises: TypeError: when argument obj has unsupported type. ValueError: when obj can't be discovered on the top level.
codesearchnet
def _retrieve_all_filtered_nodes(self): if self._node_filters is None: return None all_filtered_nodes = set() nodes_to_visit = list(self._node_filters) while nodes_to_visit: node_path = nodes_to_visit.pop(0) node_id = self._node_path_to_id[node_path] if node_id in all_filtered_nodes: continue all_filtered_nodes.add(node_id) node, setter = self._loaded_nodes.get(node_id, (None, None)) if node is not None: if not isinstance(node, base.Trackable): raise TypeError(f"Error when processing dictionary values passed to nodes_to_load.Object at {node_path} is expected to be a checkpointable (i.e. 'trackable') TensorFlow object (e.g. tf.Variable, tf.Module or Keras layer).") node._maybe_initialize_trackable() for reference in self._proto.nodes[node_id].children: child_object, _ = self._loaded_nodes.get(reference.node_id, (None, None)) if child_object is None and node is not None: child_object = node._lookup_dependency(reference.local_name) if isinstance(child_object, data_structures.TrackableDataStructure): setter = lambda *args: None self._loaded_nodes[reference.node_id] = (child_object, setter) child_path = '{}.{}'.format(node_path, reference.local_name) self._node_path_to_id[child_path] = reference.node_id nodes_to_visit.append(child_path) if 0 in all_filtered_nodes: return None return all_filtered_nodes
Traverses through the object graph to get the IDs of all nodes to load. As a side-effect, if node_filters is a dictionary that contains already- created objects, then the children tracked by those objects will be added to node_filters. Returns: List of all nodes to load, or None if all nodes should be loaded.
github-repos
def _select_class_id(ids, selected_id): ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids) if isinstance(ids, sparse_tensor.SparseTensor): return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values, selected_id)) ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape(ids_shape, array_ops.reshape(ids_last_dim, [1])) filled_selected_id = array_ops.fill(filled_selected_id_shape, math_ops.cast(selected_id, dtypes.int64)) result = sets.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor(indices=result.indices, values=result.values, dense_shape=ids_shape)
Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`.
github-repos
def assertAllLessEqual(self, a, comparison_target): a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target) a = self._GetNdArray(a) self.assertLessEqual(np.max(a), comparison_target)
Assert element values are all less than or equal to a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison.
github-repos
def parse(self, string, root=None): phrases = [] meta = self.meta.search(string) while meta: pos = meta.start() if (meta.group() == '<'): (string, child, meta) = self.open_phrase(string, pos) if (child and root): root.nested.append(child) elif child: phrases.append(child) continue elif root: if (meta.group() == '('): meta = self.meta.search(string, (pos + 1)) if (meta.group() == ')'): (string, root, meta) = self.handle_arguments(string, root, pos, meta.start()) continue elif (meta.group() == '>'): (string, phrase, meta) = self.close_phrase(string, root, pos) if phrase: return (string, phrase) continue (string, meta) = self.escape_meta(string, pos) if (not root): return (string, phrases) word = re.search('([\\w\\s]+)(?![\\d]*>[\\w\\s]+>)', string) what = 'No closing tag found for opening tag' if word: what += " after expression '{0}'".format(word.group()) raise errors.ParseError((what + '!'))
Parses a string to handle escaped tags and retrieve phrases. This method works recursively to parse nested tags. When escaped tags are found, those are removed from the string. Also argument sequences are removed from the string. The string returned can thus be quite different from the string passed. Arguments: string (str): The string to parse. root (Phrase): If in a recursive call, the root/parent phrase. Returns: For one, the escaped string (without escape characters and phrase arguments). For the other, it depends on the stack-depth. If this is the lowest recursion depth/level (i.e. the stack call resulting from the first function call in self.beautify()), it will return a list of phrases. For higher stack levels ( i.e. resulting from recursive function calls from with self.parse(), for nested phrases), it returns exactly one Phrase instance. Raises: errors.ParseError: If no closing tag could be found for an opening tag.
codesearchnet
def words_string(fake: Faker, n: int) -> str: return ' '.join(fake.words(n))
Provide Faker words as a joined string. Args: * fake: Faker instance * n: number of words Returns: * string of n words joined by spaces
github-repos
def get_newest(blocks, layout_blocks): layout_temp = list(layout_blocks) for i in range(0, len(layout_temp)): for k in range(0, len(layout_blocks)): if (blocks[layout_temp[i]].ec_hdr.image_seq != blocks[layout_blocks[k]].ec_hdr.image_seq): continue if (blocks[layout_temp[i]].leb_num != blocks[layout_blocks[k]].leb_num): continue if (blocks[layout_temp[i]].vid_hdr.sqnum > blocks[layout_blocks[k]].vid_hdr.sqnum): del layout_blocks[k] break return layout_blocks
Filter out old layout blocks from list Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Newest layout blocks in list
codesearchnet
def push(self, key, value, *, section=DataStoreDocumentSection.Data): key_notation = '.'.join([section, key]) result = self._collection.update_one({'_id': ObjectId(self._workflow_id)}, {'$push': {key_notation: self._encode_value(value)}, '$currentDate': {'lastModified': True}}) return (result.modified_count == 1)
Appends a value to a list in the specified section of the document. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be appended to a list in the data store. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be appended, otherwise ``False``.
codesearchnet
def uninstall(pkg): ret = {'result': None, 'output': ''} out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' uninstall ' + pkg) if out['retcode'] and out['stderr']: ret['stderr'] = out['stderr'].strip() ret['result'] = False else: ret['stdout'] = out['stdout'].strip() ret['result'] = True return ret
Uninstall the specified package. Args: pkg (str): The package name. Returns: dict: The ``result`` and ``output``. CLI Example: .. code-block:: bash salt '*' flatpak.uninstall org.gimp.GIMP
juraj-google-style
def get_corrections_dict(self, entry): corrections = {} for c in self.corrections: val = c.get_correction(entry) if val != 0: corrections[str(c)] = val return corrections
Returns the corrections applied to a particular entry. Args: entry: A ComputedEntry object. Returns: ({correction_name: value})
juraj-google-style
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3): if (kmip_version < enums.KMIPVersion.KMIP_1_3): raise exceptions.VersionNotSupported('KMIP {} does not support the ProfileInformation object.'.format(kmip_version.value)) local_buffer = BytearrayStream() if self._profile_name: self._profile_name.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The ProfileInformation structure is missing the profile name field.') if self._server_uri: self._server_uri.write(local_buffer, kmip_version=kmip_version) if self._server_port: self._server_port.write(local_buffer, kmip_version=kmip_version) self.length = local_buffer.length() super(ProfileInformation, self).write(output_buffer, kmip_version=kmip_version) output_buffer.write(local_buffer.buffer)
Write the ProfileInformation structure encoding to the data stream. Args: output_buffer (stream): A data stream in which to encode ProfileInformation structure data, supporting a write method. kmip_version (enum): A KMIPVersion enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 2.0. Raises: InvalidField: Raised if the profile name field is not defined. VersionNotSupported: Raised when a KMIP version is provided that does not support the ProfileInformation structure.
codesearchnet
def multiply(x1, x2, output_shape=None, name=None): if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="mul"): x1, x2 = binary_arguments_to_tensors(x1, x2) return einsum( [x1, x2], output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape))
Binary multiplication with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
juraj-google-style
def register_instance(self, instance: '_instance_base.Instance') -> None:
Treating self as a class definition, register an instance of it. This is used for keeping merging call records on instances when generating the formal definition of a class. See InterpreterClass and TupleClass. Args: instance: An instance of this class (as a BaseValue)
github-repos
def delete(self, collector_id=None): cid = self.collector_id if collector_id: cid = collector_id url = '{0}/{1}'.format(self.url, cid) request = requests.delete(url, auth=self.auth) try: response = request.json() except ValueError: response = { u'message': u'The request completed successfully.', u'status': 200, } return response
Delete a collector from inventory. Args: collector_id (int): id of collector (optional)
juraj-google-style
def calculate_parity(n): if (not is_natural(n)): raise ValueError('Expected n to be a positive integer.') y = 0 n = abs(n) while n: y += (n & 1) n = (n >> 1) return (y & 1)
Calculates and returns the parity of a number. The parity of a number is ``1`` if the number has an odd number of ones in its binary representation, otherwise ``0``. Args: n (int): the number whose parity to calculate Returns: ``1`` if the number has an odd number of ones, otherwise ``0``. Raises: ValueError: if ``n`` is less than ``0``.
codesearchnet
def numeric_function_clean_dataframe(self, axis): result = None query_compiler = self if not axis and len(self.index) == 0: result = pandas.Series(dtype=np.int64) nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] if len(nonnumeric) == len(self.columns): if axis: result = pandas.Series([np.nan for _ in self.index]) else: result = pandas.Series([0 for _ in self.index]) else: query_compiler = self.drop(columns=nonnumeric) return result, query_compiler
Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager.
juraj-google-style
def gui(discord_token, discord_client_id): logger.info("Starting Modis in GUI") import tkinter as tk logger.debug("Loading packages") from modis.discord_modis import gui as discord_modis_gui from modis.reddit_modis import gui as reddit_modis_gui from modis.facebook_modis import gui as facebook_modis_gui logger.debug("Initialising window") root = tk.Tk() root.minsize(width=800, height=400) root.geometry("800x600") root.title("Modis Control Panel") root.iconbitmap(r"{}/assets/modis.ico".format(file_dir)) discord = discord_modis_gui.Frame(root, discord_token, discord_client_id) discord.grid(column=0, row=0, padx=0, pady=0, sticky="W E N S") root.columnconfigure(0, weight=1) root.rowconfigure(0, weight=1) discord.columnconfigure(0, weight=1) discord.rowconfigure(0, weight=1) logger.debug("GUI initialised") root.mainloop()
Start Modis in gui format. Args: discord_token (str): The bot token for your Discord application discord_client_id: The bot's client ID
juraj-google-style
async def connect(self, client_id, conn_string): conn_id = self.adapter.unique_conn_id() self._client_info(client_id) (await self.adapter.connect(conn_id, conn_string)) self._hook_connect(conn_string, conn_id, client_id)
Connect to a device on behalf of a client. See :meth:`AbstractDeviceAdapter.connect`. Args: client_id (str): The client we are working for. conn_string (str): A connection string that will be passed to the underlying device adapter to connect. Raises: DeviceServerError: There is an issue with your client_id. DeviceAdapterError: The adapter had an issue connecting.
codesearchnet
def make_timebar(progress=0, duration=0): duration_string = api_music.duration_to_string(duration) if (duration <= 0): return '---' time_counts = int(round(((progress / duration) * TIMEBAR_LENGTH))) if (time_counts > TIMEBAR_LENGTH): time_counts = TIMEBAR_LENGTH if (duration > 0): bar = ((('│' + (TIMEBAR_PCHAR * time_counts)) + (TIMEBAR_ECHAR * (TIMEBAR_LENGTH - time_counts))) + '│') time_bar = '{} {}'.format(bar, duration_string) else: time_bar = duration_string return time_bar
Makes a new time bar string Args: progress: How far through the current song we are (in seconds) duration: The duration of the current song (in seconds) Returns: timebar (str): The time bar string
codesearchnet
def end(self, session): pass
Called at the end of session. The `session` argument can be used in case the hook wants to run final ops, such as saving a last checkpoint. If `session.run()` raises exception other than OutOfRangeError or StopIteration then `end()` is not called. Note the difference between `end()` and `after_run()` behavior when `session.run()` raises OutOfRangeError or StopIteration. In that case `end()` is called but `after_run()` is not called. Args: session: A TensorFlow Session that will be soon closed.
github-repos
def __init__(self, log_path, config_path, output_path): if FLAGS.phantomjs_timeout is not None: logging.info( 'Using FLAGS.phantomjs_timeout which is deprecated in favor' 'of FLAGS.capture_timeout - please update your config') capture_timeout = FLAGS.phantomjs_timeout else: capture_timeout = FLAGS.capture_timeout process_worker.ProcessWorkflow.__init__( self, log_path, timeout_seconds=capture_timeout) self.config_path = config_path self.output_path = output_path
Initializer. Args: log_path: Where to write the verbose logging output. config_path: Path to the screenshot config file to pass to PhantomJs. output_path: Where the output screenshot should be written.
juraj-google-style
def augment(self, dct: NonAugmentedDict, document: Optional[YamlDocument]=None) -> AugmentedDict: Validator.instance_of(dict, raise_ex=True, dct=dct) for instance in self._extensions: nodes = list(dict_find_pattern(dct, **instance.config())) for (parent, k, val) in nodes: parent.pop(k) fragment = instance.apply(ExtensionContext(mentor=self, document=(document or dct), dct=dct, parent_node=parent, node=(k, val))) if (fragment is not None): parent.update(fragment) return dct
Augments the given dictionary by using all the bound extensions. Args: dct: Dictionary to augment. document: The document the dictionary was loaded from. Returns: The augmented dictionary.
codesearchnet
def get_distrib_version(): key = 'distrib_ver' out, err = run_shell_cmd(cmds_all[PLATFORM][key]) if err and FLAGS.debug: print('Error in detecting distribution version:\n %s' % str(err)) return out.strip(b'\n')
Retrieves distribution version of the operating system. Returns: String that is the distribution version. e.g. '14.04'
github-repos
def parse_simple_id(chrom, pos, ref, alt): return '_'.join([chrom, pos, ref, alt])
Parse the simple id for a variant Simple id is used as a human readable reference for a position, it is in no way unique. Args: chrom(str) pos(str) ref(str) alt(str) Returns: simple_id(str): The simple human readable variant id
codesearchnet
def auto_forward(auto=True): global __auto_forward_state prev = __auto_forward_state __auto_forward_state = auto (yield) __auto_forward_state = prev
Context for dynamic graph execution mode. Args: auto (bool): Whether forward computation is executed during a computation graph construction. Returns: bool
codesearchnet