code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def read_header(self, return_idxs=False): self.header = sigproc.read_header(self.filename, return_idxs=return_idxs) return self.header
Read blimpy header and return a Python dictionary of key:value pairs Args: filename (str): name of file to open Optional args: return_idxs (bool): Default False. If true, returns the file offset indexes for values Returns: Python dict of key:value pairs, OR returns file offset indexes for values.
juraj-google-style
def add_or_update(data, item, value): data = data.splitlines() data = map((lambda x: bytearray(x)), data) conf = filter((lambda x: (x.strip() and (x.strip().split()[0] == item))), data) if conf: conf[0][:] = ((conf[0].strip().split()[0] + ' ') + value) else: comments = filter((lambda x: (x.strip().startswith(' if comments: comments[0][:] = ((comments[0].split(' else: data.append((((item + ' ') + value) + '\n')) return '\n'.join(map((lambda x: str(x)), data))
Add or update value in configuration file format used by proftpd. Args: data (str): Configuration file as string. item (str): What option will be added/updated. value (str): Value of option. Returns: str: updated configuration
codesearchnet
def set_number_of_atoms( self, n, selected_sites=None ): self.number_of_atoms = n self.atoms = species.Species( self.lattice.populate_sites( self.number_of_atoms, selected_sites=selected_sites ) )
Set the number of atoms for the simulation, and populate the simulation lattice. Args: n (Int): Number of atoms for this simulation. selected_sites (:obj:(List|Set|String), optional): Selects a subset of site types to be populated with atoms. Defaults to None. Returns: None
juraj-google-style
def sparse_embedding_aggregate_slice(params, values_and_values_mask, combiner='mean', name='sparse_embedding_aggregate_slice'): values, values_mask = values_and_values_mask with ops.name_scope(name): _, embedding_dimension = params.get_shape().as_list() n_batch, n_indices_padded = values.get_shape().as_list() if not n_batch: n_batch = -1 emb_lookup = array_ops.reshape(embedding_ops.embedding_lookup(params, array_ops.reshape(values, [n_batch, n_indices_padded])), [n_batch, n_indices_padded, embedding_dimension]) values_mask_broadcast = array_ops.reshape(values_mask, [n_batch, n_indices_padded, 1]) aggregate_emb = math_ops.reduce_sum(emb_lookup * values_mask_broadcast, axis=1) if combiner == 'sum': return aggregate_emb elif combiner == 'mean': return aggregate_emb / math_ops.maximum(math_ops.reduce_sum(values_mask_broadcast, axis=1), 1.0) else: raise ValueError('Dense TPU Embedding does not support combiner other than sum and mean.')
Uses XLA's dynamic slice operations to perform embedding lookups. From third_party/cloud_tpu/models/movielens/tpu_embedding.py Args: params: Tensor of embedding table. Rank 2 (table_size x embedding dim) values_and_values_mask: is a two-tuple that contains: values - Tensor of embedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask / weights. Rank 2 (batch x n_indices) combiner: The combiner to use for the embedding lookup. Currently supports 'sum' and 'mean'. name: Optional name scope for created ops Returns: Rank 2 tensor of aggregated (per batch element) embedding vectors. Raises: ValueError: Combiner is not supported.
github-repos
def add_omim_info(genes, alias_genes, genemap_lines, mim2gene_lines): LOG.info("Add omim info") omim_genes = get_mim_genes(genemap_lines, mim2gene_lines) for hgnc_symbol in omim_genes: omim_info = omim_genes[hgnc_symbol] inheritance = omim_info.get('inheritance', set()) for hgnc_id in get_correct_ids(hgnc_symbol, alias_genes): gene_info = genes[hgnc_id] gene_info['omim_id'] = omim_info['mim_number'] gene_info['inheritance_models'] = list(inheritance) gene_info['phenotypes'] = omim_info.get('phenotypes', [])
Add omim information We collect information on what phenotypes that are associated with a gene, what inheritance models that are associated and the correct omim id. Args: genes(dict): Dictionary with all genes alias_genes(dict): Genes mapped to all aliases genemap_lines(iterable): Iterable with raw omim info mim2gene_lines(iterable): Iterable with raw omim info
juraj-google-style
def ParseLeakFilesTable(self, parser_mediator, database=None, table=None, **unused_kwargs): if (database is None): raise ValueError('Missing database value.') if (table is None): raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues(parser_mediator, table.name, esedb_record) event_data = MsieWebCacheLeakFilesEventData() event_data.cached_filename = record_values.get('Filename', None) event_data.leak_identifier = record_values.get('LeakId', None) timestamp = record_values.get('CreationTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the LeakFiles table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
codesearchnet
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): values_dict = {} for registry_value in registry_key.GetValues(): if not registry_value.name or not registry_value.data: continue if registry_value.name == 'UpdateKey': self._ParseUpdateKeyValue( parser_mediator, registry_value, registry_key.path) else: values_dict[registry_value.name] = registry_value.GetDataAsObject() event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def validate(obj, schema): if isinstance(obj, str): obj = json.loads(obj) return JsonValidator(schema)._validate(obj)
Validate an object against a schema Args: obj (dict): schema (dict):
codesearchnet
def match_as_dict(self, film_sl_vectors, substrate_sl_vectors, film_vectors, substrate_vectors, match_area): d = {} d['film_sl_vecs'] = np.asarray(film_sl_vectors) d['sub_sl_vecs'] = np.asarray(substrate_sl_vectors) d['match_area'] = match_area d['film_vecs'] = np.asarray(film_vectors) d['sub_vecs'] = np.asarray(substrate_vectors) return d
Returns dict which contains ZSL match Args: film_miller(array) substrate_miller(array)
codesearchnet
def get_by_name(self, name): managed_sans = self.get_all() result = [x for x in managed_sans if x['name'] == name] resource = result[0] if result else None if resource: resource = self.new(self._connection, resource) return resource
Gets a Managed SAN by name. Args: name: Name of the Managed SAN Returns: dict: Managed SAN.
juraj-google-style
def add(self, virtual_bit, physical_bit=None): if physical_bit is None: physical_candidate = len(self) while physical_candidate in self._p2v: physical_candidate += 1 physical_bit = physical_candidate self[virtual_bit] = physical_bit
Adds a map element between `bit` and `physical_bit`. If `physical_bit` is not defined, `bit` will be mapped to a new physical bit (extending the length of the layout by one.) Args: virtual_bit (tuple): A (qu)bit. For example, (QuantumRegister(3, 'qr'), 2). physical_bit (int): A physical bit. For example, 3.
juraj-google-style
def DeserializeUnsigned(self, reader): self.Version = reader.ReadUInt32() self.PrevHash = reader.ReadUInt256() self.MerkleRoot = reader.ReadUInt256() self.Timestamp = reader.ReadUInt32() self.Index = reader.ReadUInt32() self.ConsensusData = reader.ReadUInt64() self.NextConsensus = reader.ReadUInt160()
Deserialize unsigned data only. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def jsbuild_prompt(): print(BOKEHJS_BUILD_PROMPT) mapping = {'1': True, '2': False} value = input('Choice? ') while (value not in mapping): print(("Input '%s' not understood. Valid choices: 1, 2\n" % value)) value = input('Choice? ') return mapping[value]
Prompt users whether to build a new BokehJS or install an existing one. Returns: bool : True, if a new build is requested, False otherwise
codesearchnet
def get_object_from_name(name): dot = name.rindex(".") mod_name, property_name = name[:dot], name[dot + 1:] __import__(mod_name) return getattr(sys.modules[mod_name], property_name)
Returns the named object. Arguments: name (str): A string of form `package.subpackage.etc.module.property`. This function will import `package.subpackage.etc.module` and return `property` from that module.
juraj-google-style
def add_showcases(self, showcases, showcases_to_check=None): if showcases_to_check is None: showcases_to_check = self.get_showcases() allshowcasesadded = True for showcase in showcases: if not self.add_showcase(showcase, showcases_to_check=showcases_to_check): allshowcasesadded = False return allshowcasesadded
Add dataset to multiple showcases Args: showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if all showcases added or False if any already present
juraj-google-style
def cpu_halt_reasons(self): buf_size = self.MAX_NUM_MOES buf = (structs.JLinkMOEInfo * buf_size)() num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size) if num_reasons < 0: raise errors.JLinkException(num_reasons) return list(buf)[:num_reasons]
Retrives the reasons that the CPU was halted. Args: self (JLink): the ``JLink`` instance Returns: A list of ``JLInkMOEInfo`` instances specifying the reasons for which the CPU was halted. This list may be empty in the case that the CPU is not halted. Raises: JLinkException: on hardware error.
juraj-google-style
def getmethodclass(m): if not hasattr(m, '__name__') and hasattr(m, '__class__') and hasattr(m, '__call__'): if isinstance(m.__class__, type): return m.__class__ m_self = getattr(m, '__self__', None) if m_self is not None: if inspect.isclass(m_self): return m_self return m_self.__class__ owners = [] caller_frame = tf_inspect.currentframe().f_back try: for v in itertools.chain(caller_frame.f_locals.values(), caller_frame.f_globals.values()): if hasattr(v, m.__name__): candidate = getattr(v, m.__name__) if hasattr(candidate, 'im_func'): candidate = candidate.im_func if hasattr(m, 'im_func'): m = m.im_func if candidate is m: owners.append(v) finally: del caller_frame if owners: if len(owners) == 1: return owners[0] owner_types = tuple((o if tf_inspect.isclass(o) else type(o) for o in owners)) for o in owner_types: if tf_inspect.isclass(o) and issubclass(o, tuple(owner_types)): return o raise ValueError('Found too many owners of %s: %s' % (m, owners)) return None
Resolves a function's owner, e.g. a method's class. Note that this returns the object that the function was retrieved from, not necessarily the class where it was defined. This function relies on Python stack frame support in the interpreter, and has the same limitations that inspect.currentframe. Limitations. This function will only work correctly if the owned class is visible in the caller's global or local variables. Args: m: A user defined function Returns: The class that this function was retrieved from, or None if the function is not an object or class method, or the class that owns the object or method is not visible to m. Raises: ValueError: if the class could not be resolved for any unexpected reason.
github-repos
def conformPadding(cls, chars): pad = chars if (pad and (pad[0] not in PAD_MAP)): pad = cls.getPaddingChars(cls.getPaddingNum(pad)) return pad
Ensure alternate input padding formats are conformed to formats defined in PAD_MAP If chars is already a format defined in PAD_MAP, then it is returned unmodified. Example:: '#' -> '#' '@@@@' -> '@@@@' '%04d' -> '#' Args: chars (str): input padding chars Returns: str: conformed padding chars Raises: ValueError: If chars contains invalid padding characters
codesearchnet
def upload(self, file_path, timeout=(- 1)): return self._client.upload(file_path, timeout=timeout)
Upload an SPP ISO image file or a hotfix file to the appliance. The API supports upload of one hotfix at a time into the system. For the successful upload of a hotfix, ensure its original name and extension are not altered. Args: file_path: Full path to firmware. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Information about the updated firmware bundle.
codesearchnet
def download_and_extract(uri, name, path): if not os.path.exists(path): os.makedirs(path) if not os.listdir(path): with tmpdir() as tmp: if uri.startswith('s3: dst = os.path.join(tmp, 'tar_file') s3_download(uri, dst) with tarfile.open(name=dst, mode='r:gz') as t: t.extractall(path=path) elif os.path.isdir(uri): if uri == path: return if os.path.exists(path): shutil.rmtree(path) shutil.move(uri, path) else: shutil.copy2(uri, os.path.join(path, name))
Download, prepare and install a compressed tar file from S3 or local directory as an entry point. SageMaker Python SDK saves the user provided entry points as compressed tar files in S3 Args: name (str): name of the entry point. uri (str): the location of the entry point. path (bool): The path where the script will be installed. It will not download and install the if the path already has the user entry point.
juraj-google-style
def get_cosmosdb_account_keys(access_token, subscription_id, rgname, account_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name, '/listKeys', '?api-version=', COSMOSDB_API]) return do_post(endpoint, '', access_token)
Get the access keys for the specified Cosmos DB account. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the Cosmos DB account. Returns: HTTP response. JSON body of Cosmos DB account keys.
juraj-google-style
def __init__(self, tensor_proto, initialized=True): self._tensor_proto = tensor_proto self._initialized = initialized
Constructor. Args: tensor_proto: the `TensorProto` object that cannot be represented as a `np.ndarray` object. initialized: (`bool`) whether the Tensor is initialized.
github-repos
def HasTable(self, table_name): if not self._connection: raise RuntimeError( 'Cannot determine if table exists database not opened.') sql_query = self._HAS_TABLE_QUERY.format(table_name) self._cursor.execute(sql_query) if self._cursor.fetchone(): return True return False
Determines if a specific table exists. Args: table_name (str): table name. Returns: bool: True if the table exists. Raises: RuntimeError: if the database is not opened.
juraj-google-style
def set_documents(self, documents, fully_formed=False): def add_id(document, id): def make_id_tag(root, rel_path, max_depth): if (max_depth < 0): raise ParameterError('document_id_xpath too deep!') if (not rel_path): return root else: child = root.find(rel_path[0]) if (child is None): child = ET.Element(rel_path[0]) root.append(child) return make_id_tag(child, rel_path[1:], (max_depth - 1)) make_id_tag(document, doc_id_xpath, 10).text = str(id) if fully_formed: if (not isinstance(documents, list)): documents = [documents] else: doc_root_tag = self.connection.document_root_xpath doc_id_xpath = self.connection.document_id_xpath.split('/') documents = dict([(id, to_etree((document if (document is not None) else query.term('', doc_root_tag)), doc_root_tag)) for (id, document) in documents.items()]) for (id, document) in documents.items(): if (document.tag != doc_root_tag): documents[id] = ET.Element(doc_root_tag) documents[id].append(document) for (id, document) in documents.items(): add_id(document, id) documents = documents.values() self._documents = map(to_raw_xml, documents)
Wrap documents in the correct root tags, add id fields and convert them to xml strings. Args: documents -- If fully_formed is False (default), accepts dict where keys are document ids and values can be ether xml string, etree.ElementTree or dict representation of an xml document (see dict_to_etree()). If fully_formed is True, accepts list or single document where ids are integrated in document or not needed and document has the right root tag. Keyword args: fully_formed -- If documents are fully formed (contains the right root tags and id fields) set to True to avoid the owerhead of documets beeing parsed at all. If set to True only list of documents or a single document can be pased as 'documents', not a dict of documents. Default is False.
codesearchnet
def _get_package_name(prefix=settings.TEMP_DIR, book_id=None): if book_id is None: book_id = str(uuid.uuid4()) return os.path.join(prefix, book_id)
Return package path. Use uuid to generate package's directory name. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Returns: str: Path to the root directory.
juraj-google-style
def range_index_map(batch_shape, num_segments, name='range_index_map'): batch_shape = tf.convert_to_tensor(batch_shape) batch_shape.shape.assert_has_rank(1) num_segments = tf.convert_to_tensor(num_segments) num_segments.shape.assert_has_rank(0) indices = tf.range(num_segments) shape = tf.concat([tf.ones_like(batch_shape, dtype=tf.int32), tf.expand_dims(num_segments, axis=0)], axis=0) indices = tf.reshape(indices, shape) multiples = tf.concat([batch_shape, [1]], axis=0) indices = tf.tile(indices, multiples) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=batch_shape.shape.as_list()[0])
Constructs an index map equal to range(num_segments). Args: batch_shape (`tf.Tensor`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
github-repos
def aggregate_gradients_using_copy_with_device_selection(tower_grads, avail_devices, use_mean=True, check_inf_nan=False): agg_grads = [] has_nan_or_inf_list = [] for (i, single_grads) in enumerate(zip(*tower_grads)): with tf.device(avail_devices[(i % len(avail_devices))]): (grad_and_var, has_nan_or_inf) = aggregate_single_gradient(single_grads, use_mean, check_inf_nan) agg_grads.append(grad_and_var) has_nan_or_inf_list.append(has_nan_or_inf) return agg_grads
Aggregate gradients, controlling device for the aggregation. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over towers. The inner list is over individual gradients. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: If true, check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all towers. The variable is chosen from the first tower. The has_nan_or_inf indicates the grads has nan or inf.
codesearchnet
def _build_update_ops(self, mean, variance, is_training): def build_update_ops(): 'Builds the exponential moving average update ops.' update_mean_op = moving_averages.assign_moving_average(variable=self._moving_mean, value=tf.reshape(mean, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name='update_moving_mean').op update_variance_op = moving_averages.assign_moving_average(variable=self._moving_variance, value=tf.reshape(variance, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name='update_moving_variance').op return (update_mean_op, update_variance_op) def build_no_ops(): return (tf.no_op(), tf.no_op()) is_training_const = utils.constant_value(is_training) if ((is_training_const is None) or is_training_const): (update_mean_op, update_variance_op) = utils.smart_cond(is_training, build_update_ops, build_no_ops) return (update_mean_op, update_variance_op) else: return None
Builds the moving average update ops when using moving variance. Args: mean: The mean value to update with. variance: The variance value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. Returns: Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or could be `True`. Returns `None` when `is_training=False`.
codesearchnet
def check_python_import(package_or_module): logger = logging.getLogger(__name__) logger.debug("Checking python import '%s'...", package_or_module) loader = pkgutil.get_loader(package_or_module) found = loader is not None if found: logger.debug("Python %s '%s' found", "package" if loader.is_package(package_or_module) else "module", package_or_module) else: logger.debug("Python import '%s' not found", package_or_module) return found
Checks if a python package or module is importable. Arguments: package_or_module -- the package or module name to check Returns: True or False
juraj-google-style
def get(self): parser = reqparse.RequestParser() parser.add_argument('search', type=str, required=True) parser.add_argument('limit', type=int) args = parser.parse_args() if not args['search']: return make_error(400, 'text_search cannot be empty') if not args['limit']: del args['limit'] pool = current_app.config['bigchain_pool'] with pool() as bigchain: assets = bigchain.text_search(**args) try: return list(assets) except OperationError as e: return make_error( 400, '({}): {}'.format(type(e).__name__, e) )
API endpoint to perform a text search on the assets. Args: search (str): Text search string to query the text index limit (int, optional): Limit the number of returned documents. Return: A list of assets that match the query.
juraj-google-style
def with_env_recursive(cmd, **envvars): from plumbum.commands.base import BoundCommand, BoundEnvCommand if isinstance(cmd, BoundCommand): cmd.cmd = with_env_recursive(cmd.cmd, **envvars) elif isinstance(cmd, BoundEnvCommand): cmd.envvars.update(envvars) cmd.cmd = with_env_recursive(cmd.cmd, **envvars) return cmd
Recursively updates the environment of cmd and all its subcommands. Args: cmd - A plumbum command-like object **envvars - The environment variables to update Returns: The updated command.
codesearchnet
def convert_to_scl(spec, scl_options): scl_options['skip_functions'] = scl_options['skip_functions'].split(',') scl_options['meta_spec'] = None convertor = SclConvertor(options=scl_options) return str(convertor.convert(spec))
Convert spec into SCL-style spec file using `spec2scl`. Args: spec: (str) a spec file scl_options: (dict) SCL options provided Returns: A converted spec file
juraj-google-style
def set_representative_sequence(self, force_rerun=False): if len(self.sequences) == 0: log.error('{}: no sequences mapped'.format(self.id)) return self.representative_sequence kegg_mappings = self.filter_sequences(KEGGProp) if len(kegg_mappings) > 0: kegg_to_use = kegg_mappings[0] if len(kegg_mappings) > 1: log.warning('{}: multiple KEGG mappings found, using the first entry {}'.format(self.id, kegg_to_use.id)) uniprot_mappings = self.filter_sequences(UniProtProp) if self.representative_sequence and not force_rerun: log.debug('{}: representative sequence already set'.format(self.id)) elif len(kegg_mappings) > 0 and len(uniprot_mappings) == 0: self.representative_sequence = kegg_to_use log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id)) elif len(kegg_mappings) == 0 and len(uniprot_mappings) > 0: u_ranker = [] for u in uniprot_mappings: u_ranker.append((u.id, u.ranking_score())) sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True) best_u_id = sorted_by_second[0][0] best_u = uniprot_mappings.get_by_id(best_u_id) self.representative_sequence = best_u log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id)) elif len(kegg_mappings) > 0 and len(uniprot_mappings) > 0: if kegg_to_use.num_pdbs > 0 and not uniprot_mappings.has_id(kegg_to_use.uniprot): self.representative_sequence = kegg_to_use log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id)) else: u_ranker = [] for u in uniprot_mappings: u_ranker.append((u.id, u.ranking_score())) sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True) best_u_id = sorted_by_second[0][0] best_u = uniprot_mappings.get_by_id(best_u_id) self.representative_sequence = best_u log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id)) return self.representative_sequence
Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence. Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings except when KEGG mappings have PDBs associated with them and UniProt doesn't. Args: force_rerun (bool): Set to True to recheck stored sequences Returns: SeqProp: Which sequence was set as representative
juraj-google-style
def __init__(self, host, cert, reason): httplib.HTTPException.__init__(self) self.host = host self.cert = cert self.reason = reason
Constructor. Args: host: The hostname the connection was made to. cert: The SSL certificate (as a dictionary) the host returned.
juraj-google-style
def destroy_sns_event(app_name, env, region): session = boto3.Session(profile_name=env, region_name=region) sns_client = session.client('sns') lambda_subscriptions = get_sns_subscriptions(app_name=app_name, env=env, region=region) for subscription_arn in lambda_subscriptions: sns_client.unsubscribe(SubscriptionArn=subscription_arn) LOG.debug("Lambda SNS event deleted") return True
Destroy all Lambda SNS subscriptions. Args: app_name (str): name of the lambda function env (str): Environment/Account for lambda function region (str): AWS region of the lambda function Returns: boolean: True if subscription destroyed successfully
juraj-google-style
def create_checksum_object_from_stream( f, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): checksum_str = calculate_checksum_on_stream(f, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
Calculate the checksum of a stream. Args: f: file-like object Only requirement is a ``read()`` method that returns ``bytes``. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object.
juraj-google-style
def is_applicable(self, trackable: base.Trackable) -> bool:
Returns whether the adapter is applicable to trackable for resharding. Args: trackable: A Trackable object that is being restored. Returns: A Boolean indicating if the checkpoint value for this Trackable should be resharded.
github-repos
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask): if ((kernel_size != 1) and (kernel_size != (1, 1)) and (nonpadding_mask is not None)): while (nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims): nonpadding_mask = tf.expand_dims(nonpadding_mask, (- 1)) return (inputs * nonpadding_mask) return inputs
If necessary, zero out inputs to a conv for padding positions. Args: inputs: a Tensor with shape [batch, length, ...] kernel_size: an integer or pair of integers nonpadding_mask: a Tensor with shape [batch, length] Returns: Tensor of the same shape as inputs.
codesearchnet
def __init__(self, opt, reduction=losses.Reduction.MEAN, name='CrossShardOptimizer', group_assignment=None): accepted_reductions = (losses.Reduction.SUM, losses.Reduction.MEAN) if reduction not in accepted_reductions: raise ValueError(f'Argument `reduction` should be one of {accepted_reductions}. Received: {reduction}') if not isinstance(opt, optimizer.Optimizer): raise TypeError(f'CrossShardOptimizer only works with tf.training.Optimizer and not Keras Optimizer. Received: {opt}. If you are using TPUStrategy, Keras Optimizer will sum gradients across replicas.If you want to average your gradients, rescale your loss with: `loss /= global_batch_size`') super(CrossShardOptimizer, self).__init__(False, name) self._opt = opt self._reduction = reduction self._group_assignment = group_assignment
Construct a new cross-shard optimizer. Args: opt: An existing `Optimizer` to encapsulate. reduction: The reduction to apply to the shard losses. name: Optional name prefix for the operations created when applying gradients. Defaults to "CrossShardOptimizer". group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group] which describles how to apply optimizer to subgroups. Raises: ValueError: If reduction is not a valid cross-shard reduction.
github-repos
def redact_event(self, room_id, event_id, reason=None, txn_id=None, timestamp=None): if not txn_id: txn_id = self._make_txn_id() path = '/rooms/%s/redact/%s/%s' % ( room_id, event_id, txn_id ) content = {} if reason: content['reason'] = reason params = {} if timestamp: params["ts"] = timestamp return self._send("PUT", path, content, query_params=params)
Perform PUT /rooms/$room_id/redact/$event_id/$txn_id/ Args: room_id(str): The room ID to redact the message event in. event_id(str): The event id to redact. reason (str): Optional. The reason the message was redacted. txn_id(int): Optional. The transaction ID to use. timestamp(int): Optional. Set origin_server_ts (For application services only)
juraj-google-style
def get_frame(self, index): frame_num = self.frame_index[index] onset = (float(frame_num) / self.fps) if (index < (self.n_frames - 1)): next_frame_num = self.frame_index[(index + 1)] end = (float(next_frame_num) / self.fps) else: end = float(self.duration) duration = ((end - onset) if (end > onset) else 0.0) return VideoFrameStim(self, frame_num, data=self.clip.get_frame(onset), duration=duration)
Get video frame at the specified index. Args: index (int): Positional index of the desired frame.
codesearchnet
def dot_product(t1, t2, keep_dims=False, name=None, reduction_dim=None): with tf.name_scope(name, 'dot', [t1, t2]) as scope: t1 = tf.convert_to_tensor(t1, name='t1') t2 = tf.convert_to_tensor(t2, name='t2') mul = tf.multiply(t1, t2) if (not reduction_dim): reduction_dim = _last_index(mul, 1) return tf.reduce_sum(mul, reduction_dim, name=scope, keep_dims=keep_dims)
Computes the dot product of t1 and t2. Args: t1: A rank 2 tensor. t2: A tensor that is the same size as t1. keep_dims: If true, reduction does not change the rank of the input. name: Optional name for this op. reduction_dim: The dimension to reduce, by default choose the last one and if no shape is specified guess 1. Returns: The dot product.
codesearchnet
def from_string(contents): lines = [l.strip() for l in contents.split("\n")] link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)") link0_dict = {} for i, l in enumerate(lines): if link0_patt.match(l): m = link0_patt.match(l) link0_dict[m.group(1).strip("=")] = m.group(2) route_patt = re.compile(r"^ route = "" route_index = None for i, l in enumerate(lines): if route_patt.match(l): route += " " + l route_index = i elif (l == "" or l.isspace()) and route_index: break functional, basis_set, route_paras, dieze_tag = read_route_line(route) ind = 2 title = [] while lines[route_index + ind].strip(): title.append(lines[route_index + ind].strip()) ind += 1 title = ' '.join(title) ind += 1 toks = re.split(r"[,\s]+", lines[route_index + ind]) charge = int(toks[0]) spin_mult = int(toks[1]) coord_lines = [] spaces = 0 input_paras = {} ind += 1 for i in range(route_index + ind, len(lines)): if lines[i].strip() == "": spaces += 1 if spaces >= 2: d = lines[i].split("=") if len(d) == 2: input_paras[d[0]] = d[1] else: coord_lines.append(lines[i].strip()) mol = GaussianInput._parse_coords(coord_lines) mol.set_charge_and_spin(charge, spin_mult) return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult, title=title, functional=functional, basis_set=basis_set, route_parameters=route_paras, input_parameters=input_paras, link0_parameters=link0_dict, dieze_tag=dieze_tag)
Creates GaussianInput from a string. Args: contents: String representing an Gaussian input file. Returns: GaussianInput object
juraj-google-style
def __init__(self, *args, **kwargs): super(UnionClusterResolver, self).__init__() self._rpc_layer = kwargs.pop('rpc_layer', None) self._task_type = kwargs.pop('task_type', None) self._task_id = kwargs.pop('task_id', None) if kwargs: raise ValueError('Unexpected kwargs provided {!r}'.format(kwargs)) if not args: raise ValueError('At least one ClusterResolver is required.') for cluster_resolver in args: if not isinstance(cluster_resolver, ClusterResolver): raise TypeError('All arguments must be a sub-class of `ClusterResolver.`') self._cluster_resolvers = args
Initializes a UnionClusterResolver with other ClusterResolvers. Args: *args: `ClusterResolver` objects to be unionized. **kwargs: rpc_layer - (Optional) Override value for the RPC layer used by TensorFlow. task_type - (Optional) Override value for the current task type. task_id - (Optional) Override value for the current task index. Raises: TypeError: If any argument is not a subclass of `ClusterResolvers`. ValueError: If there are no arguments passed.
github-repos
def ragged_rank(self): values_is_ragged = isinstance(self._values, RaggedTensor) return self._values.ragged_rank + 1 if values_is_ragged else 1
The number of times the RaggedTensor's flat_values is partitioned. Examples: >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) >>> values.ragged_rank 1 >>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2) >>> rt.ragged_rank 2 Returns: A Python `int` indicating the number of times the underlying `flat_values` Tensor has been partitioned to add a new dimension. I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
github-repos
def __init__(self, name): super(MemberSectionDefinition, self).__init__() self.name = name self.members = []
Initializes a member section definition. Args: name (str): name.
juraj-google-style
def _GetRequestClass(self, method_descriptor): if (method_descriptor.containing_service != self.descriptor): raise RuntimeError('GetRequestClass() given method descriptor for wrong service type.') return method_descriptor.input_type._concrete_class
Returns the class of the request protocol message. Args: method_descriptor: Descriptor of the method for which to return the request protocol message class. Returns: A class that represents the input protocol message of the specified method.
codesearchnet
def remove_object_from_list(self, obj, list_element): list_element = self._handle_location(list_element) if isinstance(obj, JSSObject): results = [item for item in list_element.getchildren() if item.findtext("id") == obj.id] elif isinstance(obj, (int, basestring)): results = [item for item in list_element.getchildren() if item.findtext("id") == str(obj) or item.findtext("name") == obj] if len(results) == 1: list_element.remove(results[0]) elif len(results) > 1: raise ValueError("There is more than one matching object at that " "path!")
Remove an object from a list element. Args: obj: Accepts JSSObjects, id's, and names list_element: Accepts an Element or a string path to that element
juraj-google-style
def expression(value): if isinstance(value, Expression): return Expression(value._type, value._value) if hasattr(value, 'spl_json'): sj = value.spl_json() return Expression(sj['type'], sj['value']) return Expression('splexpr', value)
Create an SPL expression. Args: value: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value. Returns: Expression: SPL expression from `value`.
juraj-google-style
def getColorHSV(name): try: x = getColorInfoList()[getColorList().index(name.upper())] except: return ((- 1), (- 1), (- 1)) r = (x[1] / 255.0) g = (x[2] / 255.0) b = (x[3] / 255.0) cmax = max(r, g, b) V = round((cmax * 100), 1) cmin = min(r, g, b) delta = (cmax - cmin) if (delta == 0): hue = 0 elif (cmax == r): hue = (60.0 * (((g - b) / delta) % 6)) elif (cmax == g): hue = (60.0 * (((b - r) / delta) + 2)) else: hue = (60.0 * (((r - g) / delta) + 4)) H = int(round(hue)) if (cmax == 0): sat = 0 else: sat = (delta / cmax) S = int(round((sat * 100))) return (H, S, V)
Retrieve the hue, saturation, value triple of a color name. Returns: a triple (degree, percent, percent). If not found (-1, -1, -1) is returned.
codesearchnet
def to_lasio(self, keys=None, basis=None): l = lasio.LASFile() l.well.DATE = str(datetime.datetime.today()) for (obj, dic) in LAS_FIELDS.items(): if (obj == 'data'): continue for (attr, (sect, item)) in dic.items(): value = getattr(getattr(self, obj), attr, None) try: getattr(l, sect)[item].value = value except: h = lasio.HeaderItem(item, '', value, '') getattr(l, sect)[item] = h l.header['Curves'] = [] if (basis is None): basis = self.survey_basis(keys=keys) try: l.add_curve('DEPT', basis) except: raise Exception('Please provide a depth basis.') setattr(l.well, 'STRT', basis[0]) setattr(l.well, 'STOP', basis[(- 1)]) setattr(l.well, 'STEP', (basis[1] - basis[0])) other = '' if (keys is None): keys = [k for (k, v) in self.data.items() if isinstance(v, Curve)] else: keys = utils.flatten_list(keys) for k in keys: d = self.data[k] if (getattr(d, 'null', None) is not None): d[np.isnan(d)] = d.null try: new_data = np.copy(d.to_basis_like(basis)) except: pass try: descr = getattr(d, 'description', '') l.add_curve(k.upper(), new_data, unit=d.units, descr=descr) except: try: other += ('{}\n'.format(k.upper()) + d.to_csv()) except: pass if other: l.other = other return l
Makes a lasio object from the current well. Args: basis (ndarray): Optional. The basis to export the curves in. If you don't specify one, it will survey all the curves with ``survey_basis()``. keys (list): List of strings: the keys of the data items to include, if not all of them. You can have nested lists, such as you might use for ``tracks`` in ``well.plot()``. Returns: lasio. The lasio object.
codesearchnet
def save(self, file_prefix: tensor_lib.Tensor, options: 'checkpoint_options.CheckpointOptions | None'=None) -> ops.Operation: options = options or checkpoint_options.CheckpointOptions() with ops.device('CPU'): sharded_suffix = array_ops.where(string_ops.regex_full_match(file_prefix, '^s3: tmp_checkpoint_prefix = string_ops.string_join([file_prefix, sharded_suffix]) registered_paths = {saver_name: registered_saver_filename(file_prefix, saver_name) for saver_name in self._registered_savers} def save_fn() -> ops.Operation: saved_prefixes = [] for saver_name, (save_fn, _) in self._registered_savers.items(): maybe_saved_prefixes = save_fn(registered_paths[saver_name]) if maybe_saved_prefixes is not None: flattened_saved_prefixes = nest.flatten(maybe_saved_prefixes) if not all((tensor_util.is_tf_type(x) and x.dtype == dtypes.string for x in flattened_saved_prefixes)): raise ValueError(f'Registered saver must return a (maybe empty) list of string type tensors. Got {maybe_saved_prefixes}.') saved_prefixes.extend(flattened_saved_prefixes) shards_by_task = self._get_shards_by_task(options.experimental_sharding_callback) num_shards = sum([len(shards) for _, shards in shards_by_task]) metrics.AddNumCheckpointShardsWritten(num_shards=num_shards) num_shards_tensor = constant_op.constant(num_shards, name='num_shards') sharded_saves = [] shard_idx = 0 for task, shards in shards_by_task: for shard in shards: with ops.device(task): shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard_idx, num_shards_tensor) shard_idx += 1 saved_prefixes.append(shard_prefix) sharded_saves.append(_single_shard_save(shard_prefix, shard, task, options)) with ops.control_dependencies(sharded_saves): tensor_device_spec = list(self._shardable_tensors_by_task.keys())[-1] merge_device_spec = options.experimental_io_device or saveable_object_util.set_cpu0(tensor_device_spec.to_string()) with ops.device(merge_device_spec): return gen_io_ops.merge_v2_checkpoints(saved_prefixes, file_prefix, delete_old_dirs=True) if context.executing_eagerly() and self._num_unique_tasks > 1: @def_function.function(jit_compile=False) def tf_function_save() -> None: save_fn() tf_function_save() else: return save_fn()
Save the saveable objects to a checkpoint with `file_prefix`. Args: file_prefix: A string or scalar string Tensor containing the prefix to save under. options: Optional `CheckpointOptions` object. Returns: An `Operation`, or None when executing eagerly.
github-repos
def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None): vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy image_num_patches = [image_size_to_num_patches(image_size=imsize, grid_pinpoints=self.config.image_grid_pinpoints, patch_size=self.config.vision_config.image_size) for imsize in image_sizes] if pixel_values.dim() == 5: _pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)] pixel_values = torch.cat(_pixel_values_list, dim=0) elif pixel_values.dim() != 4: raise ValueError(f'pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions') image_features = self.vision_tower(pixel_values, output_hidden_states=True) if isinstance(vision_feature_layer, int): selected_image_feature = image_features.hidden_states[vision_feature_layer] else: hs_pool = [image_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer] selected_image_feature = torch.cat(hs_pool, dim=-1) if vision_feature_select_strategy == 'default': selected_image_feature = selected_image_feature[:, 1:] elif vision_feature_select_strategy == 'full': selected_image_feature = selected_image_feature image_features = self.multi_modal_projector(selected_image_feature) image_features = torch.split(image_features, image_num_patches, dim=0) image_features, feature_lens = self.pack_image_features(image_features, image_sizes, vision_feature_select_strategy=vision_feature_select_strategy, image_newline=self.image_newline) return image_features
Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`) The tensors corresponding to the input images. image_sizes (`torch.Tensor` of shape `(num_images, 2)`) Actual image size of each images (H, W). vision_feature_layer (`Union[int, List[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"` Returns: image_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches and are of shape `(num_patches, image_length, embed_dim)`).
github-repos
def decode(obj, content_type): try: decoder = _decoders_map[content_type] return decoder(obj) except KeyError: raise _errors.UnsupportedFormatError(content_type)
Decode an object ton a one of the default content types to a numpy array. Args: obj (object): to be decoded. content_type (str): content type to be used. Returns: np.array: decoded object.
codesearchnet
def profile_setting_default_args(ij): profile_default_args = OrderedDict() profile_default_args['api_default_org'] = '$env.API_DEFAULT_ORG' profile_default_args['api_access_id'] = '$env.API_ACCESS_ID' profile_default_args['api_secret_key'] = '$envs.API_SECRET_KEY' profile_default_args['tc_api_path'] = '$env.TC_API_PATH' profile_default_args['tc_docker'] = False profile_default_args['tc_in_path'] = 'log' profile_default_args['tc_log_level'] = 'debug' profile_default_args['tc_log_path'] = 'log' profile_default_args['tc_log_to_api'] = False profile_default_args['tc_out_path'] = 'log' profile_default_args['tc_proxy_external'] = False profile_default_args['tc_proxy_host'] = '$env.TC_PROXY_HOST' profile_default_args['tc_proxy_port'] = '$env.TC_PROXY_PORT' profile_default_args['tc_proxy_password'] = '$envs.TC_PROXY_PASSWORD' profile_default_args['tc_proxy_tc'] = False profile_default_args['tc_proxy_username'] = '$env.TC_PROXY_USERNAME' profile_default_args['tc_temp_path'] = 'log' if (ij.get('runtimeLevel') == 'Playbook'): profile_default_args['tc_playbook_db_type'] = 'Redis' profile_default_args['tc_playbook_db_context'] = str(uuid4()) profile_default_args['tc_playbook_db_path'] = '$env.DB_PATH' profile_default_args['tc_playbook_db_port'] = '$env.DB_PORT' profile_default_args['tc_playbook_out_variables'] = '' return profile_default_args
Build the default args for this profile. Args: ij (dict): The install.json contents. Returns: dict: The default args for a Job or Playbook App.
codesearchnet
def url(self): base_url = 'https: archived_at = self._get_archived_at() if (archived_at and archived_at.startswith('<')): archived_at = archived_at[1:] if (archived_at and archived_at.endswith('>')): archived_at = archived_at[:(- 1)] if (archived_at and archived_at.startswith('http')): return archived_at elif archived_at: return (base_url + archived_at) else: return None
An URL to the email in HyperKitty Returns: str or None: A relevant URL.
codesearchnet
def _find_executable_or_die(executable_name: str, executable_path: Optional[str]=None) -> str: if executable_path: return str(pathlib.Path(executable_path).resolve(strict=True)) resolved_path_to_exe = _find_executable(executable_name) if resolved_path_to_exe is None: raise RuntimeError(f'Could not find executable `{executable_name}`! Please change your $PATH or pass the path directly like`--{executable_name}_path=path/to/executable.') logging.info('Found path to %s at %s', executable_name, resolved_path_to_exe) return resolved_path_to_exe
Finds executable and resolves symlinks or raises RuntimeError. Resolving symlinks is sometimes necessary for finding system headers. Args: executable_name: The name of the executable that we want to find. executable_path: If not None, the path to the executable. Returns: The path to the executable we are looking for, after symlinks are resolved. Raises: RuntimeError: if path to the executable cannot be found.
github-repos
def __init__(self, file_pattern, interval=360.0, has_deduplication=True, start_timestamp=Timestamp.now(), stop_timestamp=MAX_TIMESTAMP, match_updated_files=False, apply_windowing=False, empty_match_treatment=EmptyMatchTreatment.ALLOW): self.file_pattern = file_pattern self.interval = interval self.has_deduplication = has_deduplication self.start_ts = start_timestamp self.stop_ts = stop_timestamp self.match_upd = match_updated_files self.apply_windowing = apply_windowing self.empty_match_treatment = empty_match_treatment _LOGGER.warning('Matching Continuously is stateful, and can scale poorly. Consider using Pub/Sub Notifications (https:
Initializes a MatchContinuously transform. Args: file_pattern: The file path to read from. interval: Interval at which to check for files in seconds. has_deduplication: Whether files already read are discarded or not. start_timestamp: Timestamp for start file checking. stop_timestamp: Timestamp after which no more files will be checked. match_updated_files: (When has_deduplication is set to True) whether match file with timestamp changes. apply_windowing: Whether each element should be assigned to individual window. If false, all elements will reside in global window.
github-repos
def load_weights_from_hdf5_group(f, layers): if 'keras_version' in f.attrs: original_keras_version = f.attrs['keras_version'] if hasattr(original_keras_version, 'decode'): original_keras_version = original_keras_version.decode('utf8') else: original_keras_version = '1' if 'backend' in f.attrs: original_backend = f.attrs['backend'] if hasattr(original_backend, 'decode'): original_backend = original_backend.decode('utf8') else: original_backend = None filtered_layers = [] for layer in layers: weights = _legacy_weights(layer) if weights: filtered_layers.append(layer) layer_names = load_attributes_from_hdf5_group(f, 'layer_names') filtered_layer_names = [] for name in layer_names: g = f[name] weight_names = load_attributes_from_hdf5_group(g, 'weight_names') if weight_names: filtered_layer_names.append(name) layer_names = filtered_layer_names if len(layer_names) != len(filtered_layers): raise ValueError('You are trying to load a weight file containing ' + str(len(layer_names)) + ' layers into a model with ' + str(len(filtered_layers)) + ' layers.') weight_value_tuples = [] for k, name in enumerate(layer_names): g = f[name] weight_names = load_attributes_from_hdf5_group(g, 'weight_names') weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names] layer = filtered_layers[k] symbolic_weights = _legacy_weights(layer) weight_values = preprocess_weights_for_loading(layer, weight_values, original_keras_version, original_backend) if len(weight_values) != len(symbolic_weights): raise ValueError('Layer weight_value_tuples += zip(symbolic_weights, weight_values) backend.batch_set_value(weight_value_tuples)
Implements topological (order-based) weight loading. Args: f: A pointer to a HDF5 group. layers: a list of target layers. Raises: ValueError: in case of mismatch between provided layers and weights file.
github-repos
def remove(self, uids: Iterable[int]) -> None: for uid in uids: self._recent.discard(uid) self._flags.pop(uid, None)
Remove any session flags for the given message. Args: uids: The message UID values.
codesearchnet
def set_value(self, text): if self.single_line: text = text.replace('\n', '') self.set_text(text)
Sets the text content. Args: text (str): The string content that have to be appended as standard child identified by the key 'text'
codesearchnet
class CsvPipelineDataFormat(PipelineDataFormat): def __init__(self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False): super().__init__(output_path, input_path, column, overwrite=overwrite) def __iter__(self): with open(self.input_path, 'r') as f: reader = csv.DictReader(f) for row in reader: if self.is_multi_columns: yield {k: row[c] for k, c in self.column} else: yield row[self.column[0]] def save(self, data: List[dict]): with open(self.output_path, 'w') as f: if len(data) > 0: writer = csv.DictWriter(f, list(data[0].keys())) writer.writeheader() writer.writerows(data)
Support for pipelines using CSV data format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`.
github-repos
def set_bias(self, bias): self.x_offset += (bias - self._bias) self._bias = bias self._build_cdict()
Adjusts the image bias. Bias determines where the color changes start. At low bias, low intensities (i.e., low pixel values) will have non-zero color differences, while at high bias only high pixel values will have non-zero differences Args: bias: float A number between 0 and 1. Note that upon initialization the colormap has a default bias of 0.5. Returns: void
codesearchnet
def get_numeric_feature_names(example): numeric_features = ('float_list', 'int64_list') features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') in numeric_features ])
Returns a list of feature names for float and int64 type features. Args: example: An example. Returns: A list of strings of the names of numeric features.
juraj-google-style
def get_atom_map(structure): syms = [site.specie.symbol for site in structure] unique_pot_atoms = [] [unique_pot_atoms.append(i) for i in syms if (not unique_pot_atoms.count(i))] atom_map = {} for (i, atom) in enumerate(unique_pot_atoms): atom_map[atom] = (i + 1) return atom_map
Returns a dict that maps each atomic symbol to a unique integer starting from 1. Args: structure (Structure) Returns: dict
codesearchnet
def _convert_args(handler, args): args = list(args) params = inspect.signature(handler).parameters for (i, (arg, name)) in enumerate(zip(args, params)): default = params[name].default annotation = params[name].annotation if (annotation != inspect.Parameter.empty): if (isinstance(annotation, type) and (annotation != str)): args[i] = annotation(arg) elif (default != inspect.Parameter.empty): if ((default is not None) and (not isinstance(default, str))): args[i] = type(default)(arg) return args
Convert a list of command arguments to types specified by the handler. Args: handler: a command handler function. args: the list of string arguments to pass to handler. Returns: A new list containing `args` that have been converted to the expected type for `handler`. For each function parameter of `handler` that has either an explicit type annotation or a non-None default value, the corresponding element in `args` is converted to that type.
codesearchnet
def cmPrecision(cm, average=True): cm = cm.type(torch.float64) precision = cm.diag() / (cm.sum(dim=0) + 1e-15) if average: return precision.mean() return precision
Calculates precision using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: cm (ConfusionMatrix): instance of confusion matrix metric average (bool, optional): if True metric value is averaged over all classes Returns: MetricsLambda
juraj-google-style
def retrieve_all(self, subset=None): get_object = self.factory.get_object obj_class = self.obj_class full_objects = [get_object(obj_class, list_obj.id, subset) for list_obj in self] return JSSObjectList(self.factory, obj_class, full_objects)
Return a list of all JSSListData elements as full JSSObjects. This can take a long time given a large number of objects, and depending on the size of each object. Subsetting to only include the data you need can improve performance. Args: subset: For objects which support it, a list of sub-tags to request, or an "&" delimited string, (e.g. "general&purchasing"). Default to None.
codesearchnet
def get_start_time_metric(result: PipelineResult, namespace: str, name: str) -> int: distributions = result.metrics().query(MetricsFilter().with_namespace(namespace).with_name(name))['distributions'] min_list = list(map(lambda m: m.result.min, distributions)) return min(min_list) if len(min_list) > 0 else -1
get the start time out of all times recorded by the specified distribution metric Args: result: the PipelineResult which metrics are read from namespace: a string representing the namespace of wanted metric name: a string representing the name of the wanted metric Returns: the smallest time in the metric or -1 if it doesn't exist
github-repos
def run(self, row, **kwargs): self.source = row kwargs['output'] = self.__graph__() super(CSVRowProcessor, self).run(**kwargs) return kwargs['output']
Methods takes a row and depending if a dict or list, runs RML rules. Args: ----- row(Dict, List): Row from CSV Reader
codesearchnet
def _construct_w(self, inputs): weight_shape = (self._kernel_shape + (1, 1)) if ('w' not in self._initializers): self._initializers['w'] = create_weight_initializer(weight_shape[:2], dtype=inputs.dtype) w = tf.get_variable('w', shape=weight_shape, dtype=inputs.dtype, initializer=self._initializers['w'], partitioner=self._partitioners.get('w', None), regularizer=self._regularizers.get('w', None)) return w
Construct the convolution weight matrix. Figures out the shape of the weight matrix, initialize it, and return it. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: w: A weight matrix of the same type as `inputs` and of shape [kernel_shape, 1, 1].
codesearchnet
def load_snippet(self, name, package): if hasattr(self, name): raise SnippetError(self, ('Attribute "%s" already exists, please use a different name.' % name)) self.services.snippets.add_snippet_client(name, package)
Starts the snippet apk with the given package name and connects. Examples: .. code-block:: python ad.load_snippet( name='maps', package='com.google.maps.snippets') ad.maps.activateZoom('3') Args: name: string, the attribute name to which to attach the snippet client. E.g. `name='maps'` attaches the snippet client to `ad.maps`. package: string, the package name of the snippet apk to connect to. Raises: SnippetError: Illegal load operations are attempted.
codesearchnet
def _make_pred_succ_maps(self, node): pred_map = {e[2]['wire']: e[0] for e in self._multi_graph.in_edges(nbunch=node, data=True)} succ_map = {e[2]['wire']: e[1] for e in self._multi_graph.out_edges(nbunch=node, data=True)} return (pred_map, succ_map)
Return predecessor and successor dictionaries. Args: node (DAGNode): reference to multi_graph node Returns: tuple(dict): tuple(predecessor_map, successor_map) These map from wire (Register, int) to predecessor (successor) nodes of n.
codesearchnet
def write_file(self, filepath, filename=None, directory=None): arcname = None if filename or directory: directory = directory.rstrip("/") + "/" if directory else "" filename = filename or os.path.basename(filepath) arcname = "{}{}".format(directory, filename) self._copy_to_zipfile(filepath, arcname=arcname) return arcname or filepath
write_file: Write local file to zip Args: filepath: (str) location to local file directory: (str) directory in zipfile to write file to (optional) Returns: path to file in zip Note: filepath must be a relative path
juraj-google-style
def load_config(self, config): for k, v in config.items(): if hasattr(self, k): raise DeviceError( self, ('Attribute %s already exists with value %s, cannot set ' 'again.') % (k, getattr(self, k))) setattr(self, k, v)
Add attributes to the AndroidDevice object based on config. Args: config: A dictionary representing the configs. Raises: Error: The config is trying to overwrite an existing attribute.
juraj-google-style
def Log(self, format_str, *args): log_entry = rdf_flow_objects.FlowLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=format_str % args) data_store.REL_DB.WriteFlowLogEntries([log_entry]) if self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args)
Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string
juraj-google-style
def threw(self, error_type=None): if not error_type: return True if len(self.exceptions) > 0 else False else: return uch.obj_in_list(self.exceptions, error_type)
Determining whether the exception is thrown Args: error_type: None: checking without specified exception Specified Exception Return: Boolean
juraj-google-style
def get_creation_date_tags(url, domain, as_dicts=False): creation_date_tags = [mementoweb_api_tags(url), get_whois_tags(domain)] creation_date_tags = sorted(sum(creation_date_tags, []), key=(lambda x: x.date)) if (not as_dicts): return creation_date_tags return [item._as_dict() for item in creation_date_tags]
Put together all data sources in this module and return it's output. Args: url (str): URL of the web. With relative paths and so on. domain (str): Just the domain of the web. as_dicts (bool, default False): Convert output to dictionaries compatible with :class:`.SourceString`? Returns: list: Sorted list of :class:`TimeResource` objects or dicts.
codesearchnet
def remove_collisions(self, min_dist=0.5): vfcoords = [v.frac_coords for v in self.vnodes] sfcoords = self.structure.frac_coords dist_matrix = self.structure.lattice.get_all_distances(vfcoords, sfcoords) all_dist = np.min(dist_matrix, axis=1) new_vnodes = [] for i, v in enumerate(self.vnodes): if all_dist[i] > min_dist: new_vnodes.append(v) self.vnodes = new_vnodes
Remove vnodes that are too close to existing atoms in the structure Args: min_dist(float): The minimum distance that a vertex needs to be from existing atoms.
juraj-google-style
def _cast_to_frameset(cls, other): if isinstance(other, FrameSet): return other try: return FrameSet(other) except Exception: return NotImplemented
Private method to simplify comparison operations. Args: other (:class:`FrameSet` or set or frozenset or or iterable): item to be compared Returns: :class:`FrameSet` Raises: :class:`NotImplemented`: if a comparison is impossible
codesearchnet
def load(fh, model): graphs = penman.load(fh, cls=XMRSCodec) xs = [model.from_triples(g.triples()) for g in graphs] return xs
Deserialize PENMAN graphs from a file (handle or filename) Args: fh: filename or file object model: Xmrs subclass instantiated from decoded triples Returns: a list of objects (of class *model*)
juraj-google-style
def xresnet18(pretrained=False, **kwargs): model = XResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet18'])) return model
Constructs a XResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
codesearchnet
def __init__(self, dims): if not isinstance(dims, tuple): raise TypeError('The dimensions passed to DummyMultiDimensionalLSTM should be a tuple of ints.') self._dims = dims self._output_size = tensor_shape.TensorShape(self._dims) self._state_size = (tensor_shape.TensorShape(self._dims), tensor_shape.TensorShape(self._dims))
Initialize the Multi-dimensional LSTM cell. Args: dims: tuple that contains the dimensions of the output of the cell, without including 'Time' or 'Batch' dimensions.
github-repos
def get_device_details(self, device): if not isinstance(device, PhysicalDevice): raise ValueError('device must be a tf.config.PhysicalDevice, but got: %s' % (device,)) if self._physical_device_to_index is None or device not in self._physical_device_to_index: raise ValueError('The PhysicalDevice must be one obtained from calling `tf.config.list_physical_devices`, but got: %s' % (device,)) index = self._physical_device_to_index[device] details = pywrap_tfe.TF_GetDeviceDetails(index) if 'compute_capability' in details: try: major, minor = details['compute_capability'].split('.') details['compute_capability'] = (int(major), int(minor)) except ValueError as exc: raise RuntimeError('Device returned compute capability an in invalid format: %s' % details['compute_capability']) from exc return details
Returns details about a physical devices. Args: device: A `tf.config.PhysicalDevice` returned by `tf.config.list_physical_devices` or `tf.config.get_visible_devices`. Returns: A dict with string keys.
github-repos
def _define_step(self, done, score, summary): if (done.shape.ndims == 0): done = done[None] if (score.shape.ndims == 0): score = score[None] score_mean = streaming_mean.StreamingMean((), tf.float32) with tf.control_dependencies([done, score, summary]): done_score = tf.gather(score, tf.where(done)[(:, 0)]) submit_score = tf.cond(tf.reduce_any(done), (lambda : score_mean.submit(done_score)), tf.no_op) with tf.control_dependencies([submit_score]): mean_score = tf.cond(self._report, score_mean.clear, float) steps_made = tf.shape(score)[0] next_step = self._step.assign_add(steps_made) with tf.control_dependencies([mean_score, next_step]): return (tf.identity(summary), mean_score, next_step, steps_made)
Combine operations of a phase. Keeps track of the mean score and when to report it. Args: done: Tensor indicating whether current score can be used. score: Tensor holding the current, possibly intermediate, score. summary: Tensor holding summary string to write if not an empty string. Returns: Tuple of summary tensor, mean score, and new global step. The mean score is zero for non reporting steps.
codesearchnet
def UninstallDriver(bundle_name): km = objc.KextManager() cf_bundle_name = km.PyStringToCFString(bundle_name) status = km.iokit.KextManagerUnloadKextWithIdentifier(cf_bundle_name) km.dll.CFRelease(cf_bundle_name) return status
Calls into the IOKit to unload a kext by its name. Args: bundle_name: The bundle identifier of the kernel extension as defined in Info.plist field CFBundleIdentifier. Returns: The error code from the library call. objc.OS_SUCCESS if successfull.
juraj-google-style
def sg_summary_param(tensor, prefix=None, name=None): r prefix = '' if prefix is None else prefix + '/' name = prefix + _pretty_name(tensor) if name is None else prefix + name _scalar(name + '/abs', tf.reduce_mean(tf.abs(tensor))) _histogram(name + '/abs-h', tf.abs(tensor))
r"""Register `tensor` to summary report as `parameters` Args: tensor: A `Tensor` to log as parameters prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
juraj-google-style
def setScales(self,scales=None,term_num=None): if scales==None: for term_i in range(self.n_terms): n_scales = self.vd.getTerm(term_i).getNumberScales() self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales))) elif term_num==None: assert scales.shape[0]==self.vd.getNumberScales(), 'incompatible shape' index = 0 for term_i in range(self.n_terms): index1 = index+self.vd.getTerm(term_i).getNumberScales() self.vd.getTerm(term_i).setScales(scales[index:index1]) index = index1 else: assert scales.shape[0]==self.vd.getTerm(term_num).getNumberScales(), 'incompatible shape' self.vd.getTerm(term_num).setScales(scales)
get random initialization of variances based on the empirical trait variance Args: scales: if scales==None: set them randomly, else: set scales to term_num (if term_num==None: set to all terms) term_num: set scales to term_num
juraj-google-style
def embedding_lookup(self, x, means): x_means_hot = self.nearest_neighbor(x, means) x_means_hot_flat = tf.reshape( x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) q_loss = tf.reduce_mean( tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss
Compute nearest neighbors and loss for training the embeddings. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means. Returns: The nearest neighbor in one hot form, the nearest neighbor itself, the commitment loss, embedding training loss.
juraj-google-style
def get_dataframe(self, md5, compress='lz4'): sample = self.data_store.get_sample(md5) if not sample: raise WorkBench.DataNotFound("Could not find %s in the data store", md5) if not compress: return sample['raw_bytes'] else: compress_df = lz4.dumps(sample['raw_bytes']) print 'Info: DataFrame compression %.0f%%' % (len(compress_df)*100.0/float(len(sample['raw_bytes']))) return compress_df
Return a dataframe from the DataStore. This is just a convenience method that uses get_sample internally. Args: md5: the md5 of the dataframe compress: compression to use: (defaults to 'lz4' but can be set to None) Returns: A msgpack'd Pandas DataFrame Raises: Workbench.DataNotFound if the dataframe is not found.
juraj-google-style
def peek_native(make): def peek(service, container, _stack=None): return make(service.peekNative(container)) return peek
Deserializer factory for types which state can be natively serialized. Arguments: make (callable): type constructor. Returns: callable: deserializer (`peek` routine)
codesearchnet
def _get_params(self, validator_parameter, name_prefix): params_validator = self.request.get(validator_parameter) user_params = {} for key in self.request.arguments(): if key.startswith(name_prefix): values = self.request.get_all(key) adjusted_key = key[len(name_prefix):] if (len(values) == 1): user_params[adjusted_key] = values[0] else: user_params[adjusted_key] = values if params_validator: resolved_validator = util.for_name(params_validator) resolved_validator(user_params) return user_params
Retrieves additional user-supplied params for the job and validates them. Args: validator_parameter: name of the request parameter which supplies validator for this parameter set. name_prefix: common prefix for all parameter names in the request. Raises: Any exception raised by the 'params_validator' request parameter if the params fail to validate. Returns: The user parameters.
codesearchnet
def _GetXY(fd): try: rc = struct.unpack(b'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, 'junk')) return (rc[1], rc[0]) if rc else None except: return None
Returns the terminal (x,y) size for fd. Args: fd: The terminal file descriptor. Returns: The terminal (x,y) size for fd or None on error.
github-repos
def parallel_concat(processor_list: Sequence[Processor]) -> Processor: if not processor_list: raise ValueError('processor_list is empty') return _ParallelProcessor(processor_list)
Create a sequence of processors to be run in parallel. The output is the concatenation of all processors, i.e.: parallel_concat([p1, p2])(stream) -> [p1(stream), p2(stream)] Args: processor_list: list of processors. Returns: A processor consisting of the parallel run of all the processors in the list. The execution is sequential from the first processor to the last and the result of each processor is concatenated
github-repos
def _update_step(self, sequence): (observ, action, old_policy_params, reward, advantage) = sequence['sequence'] length = sequence['length'] old_policy = self._policy_type(**old_policy_params) (value_loss, value_summary) = self._value_loss(observ, reward, length) network = self._network(observ, length) (policy_loss, policy_summary) = self._policy_loss(old_policy, network.policy, action, advantage, length) network_loss = network.get('loss', 0.0) loss = ((policy_loss + value_loss) + tf.reduce_mean(network_loss)) (gradients, variables) = zip(*self._optimizer.compute_gradients(loss)) optimize = self._optimizer.apply_gradients(zip(gradients, variables)) summary = tf.summary.merge([value_summary, policy_summary, tf.summary.histogram('network_loss', network_loss), tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)), tf.summary.scalar('gradient_norm', tf.global_norm(gradients)), utility.gradient_summaries(zip(gradients, variables))]) with tf.control_dependencies([optimize]): return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
Compute the current combined loss and perform a gradient update step. The sequences must be a dict containing the keys `length` and `sequence`, where the latter is a tuple containing observations, actions, parameters of the behavioral policy, rewards, and advantages. Args: sequence: Sequences of episodes or chunks of episodes. Returns: Tuple of value loss, policy loss, and summary tensor.
codesearchnet
def approximate_gradient(f, variables, delta=0.1): def var_gradient(var): def mapper_func(i): stencil = _five_point_stencil(f, var, i, delta) inner_sum = tf.nest.map_structure(tf.math.reduce_sum, tf.nest.flatten(stencil)) outer_sum = tf.math.reduce_sum(tf.stack(inner_sum)) entry_derivative = tf.reduce_sum(outer_sum) return entry_derivative derivatives = tf.map_fn(mapper_func, tf.range(tf.size(var)), fn_output_signature=tf.float32) return tf.reshape(derivatives, tf.shape(var)) return tf.nest.map_structure(var_gradient, variables)
Approximates the gradient of f using five point stencil. Suppose the input function returns a possibly nested structure `r` under gradient tape `t`. Then this function returns an approximation to `t.gradient(r, variables, unconnected_gradients=tf.UnconnectedGradients.ZERO)` Args: f: Callable taking no arguments and returning a possibly nested structure whose atomic elements are `tf.Tensor`. variables: Possibly nested structure of `tf.Variable` in which to differentiate `f`. delta: Size of the fundamental perturbation in the stencil. Returns: The approximate gradient. Has the same structure as the return from a corresponding call to `tf.GradientTape().gradient`.
github-repos
def decode_field(self, field, value): for decoder in _GetFieldCodecs(field, 'decoder'): result = decoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.MessageField): field_value = self.decode_message(field.message_type, json.dumps(value)) elif isinstance(field, messages.EnumField): value = (GetCustomJsonEnumMapping(field.type, json_name=value) or value) try: field_value = super(_ProtoJsonApiTools, self).decode_field(field, value) except messages.DecodeError: if (not isinstance(value, six.string_types)): raise field_value = None else: field_value = super(_ProtoJsonApiTools, self).decode_field(field, value) return field_value
Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field.
codesearchnet
def sigmoid(x): if any_symbolic_tensors((x,)): return Sigmoid().symbolic_call(x) return backend.nn.sigmoid(x)
Sigmoid activation function. It is defined as `f(x) = 1 / (1 + exp(-x))`. Args: x: Input tensor. Returns: A tensor with the same shape as `x`. Example: >>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0]) >>> keras.ops.sigmoid(x) array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)
github-repos
def __init__(self, entry_list, weights=None): if weights is None: self.weights = [1.0] * len(entry_list) else: self.weights = weights self.entry_list = entry_list
Initializes a MultiEntry. Args: entry_list ([PourbaixEntry]): List of component PourbaixEntries weights ([float]): Weights associated with each entry. Default is None
juraj-google-style
def create_upload_url(success_path, max_bytes_per_blob=None, max_bytes_total=None, **options): fut = create_upload_url_async(success_path, max_bytes_per_blob=max_bytes_per_blob, max_bytes_total=max_bytes_total, **options) return fut.get_result()
Create upload URL for POST form. Args: success_path: Path within application to call when POST is successful and upload is complete. max_bytes_per_blob: The maximum size in bytes that any one blob in the upload can be or None for no maximum size. max_bytes_total: The maximum size in bytes that the aggregate sizes of all of the blobs in the upload can be or None for no maximum size. **options: Options for create_rpc(). Returns: The upload URL. Raises: TypeError: If max_bytes_per_blob or max_bytes_total are not integral types. ValueError: If max_bytes_per_blob or max_bytes_total are not positive values.
codesearchnet