code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def text(self, value): self._text = value self.timestamps.edited = datetime.datetime.utcnow() self.touch(True)
Set the text value. Args: value (str): Text value.
codesearchnet
def update_parser(self, parser): self._parser = parser ini_str = argparse_to_ini(parser) configp = configparser.ConfigParser(allow_no_value=True) configp.read_dict(self._config) configp.read_string(ini_str) self._config.update( {s: dict(configp.items(s)) for s in configp.sections()} )
Update config dictionary with declared arguments in an argparse.parser New variables will be created, and existing ones overridden. Args: parser (argparse.ArgumentParser): parser to read variables from
juraj-google-style
def get_samples_live(self, sensor_id, last=None): url = 'https: headers = self.__gen_headers() headers['Content-Type'] = 'application/json' params = {'sensorId': sensor_id} if last: params['last'] = last url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
Get recent samples, one sample per second for up to the last 2 minutes. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` last (string): starting range, as ISO8601 timestamp Returns: list: dictionary objects containing sample data
codesearchnet
def import_global(name, modules=None, exceptions=DummyException, locals_=None, globals_=None, level=(- 1)): frame = None try: if ((locals_ is None) or (globals_ is None)): import inspect frame = inspect.stack()[1][0] if (locals_ is None): locals_ = frame.f_locals if (globals_ is None): globals_ = frame.f_globals try: name = name.split('.') if (not name[0]): name = name[1:] level = 1 module = __import__(name=(name[0] or '.'), globals=globals_, locals=locals_, fromlist=name[1:], level=max(level, 0)) try: for attr in name[1:]: module = getattr(module, attr) except AttributeError: raise ImportError(('No module named ' + '.'.join(name))) if (not modules): modules = getattr(module, '__all__', dir(module)) else: modules = set(modules).intersection(dir(module)) for k in set(dir(module)).intersection(modules): if (k and (k[0] != '_')): globals_[k] = getattr(module, k) except exceptions as e: return e finally: del name, modules, exceptions, locals_, globals_, frame
Import the requested items into the global scope WARNING! this method _will_ overwrite your global scope If you have a variable named "path" and you call import_global('sys') it will be overwritten with sys.path Args: name (str): the name of the module to import, e.g. sys modules (str): the modules to import, use None for everything exception (Exception): the exception to catch, e.g. ImportError `locals_`: the `locals()` method (in case you need a different scope) `globals_`: the `globals()` method (in case you need a different scope) level (int): the level to import from, this can be used for relative imports
codesearchnet
def get_content(url, headers={}, decoded=True): logging.debug(('get_content: %s' % url)) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) response = urlopen_with_retry(req) data = response.read() content_encoding = response.getheader('Content-Encoding') if (content_encoding == 'gzip'): data = ungzip(data) elif (content_encoding == 'deflate'): data = undeflate(data) if decoded: charset = match1(response.getheader('Content-Type', ''), 'charset=([\\w-]+)') if (charset is not None): data = data.decode(charset, 'ignore') else: data = data.decode('utf-8', 'ignore') return data
Gets the content of a URL via sending a HTTP GET request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string.
codesearchnet
def get_remote_info(url_id): try: data = _send_request(url_id) except Exception as e: sys.stderr.write("Seeder GET error: ") sys.stderr.write(str(e.message)) return None return _convert_to_wakat_format(data)
Download data and convert them to dict used in frontend. Args: url_id (str): ID used as identification in Seeder. Returns: dict: Dict with data for frontend or None in case of error.
juraj-google-style
def write_compounds(self, stream, compounds, properties=None): self._write_entries(stream, compounds, self.convert_compound_entry, properties)
Write iterable of compounds as YAML object to stream. Args: stream: File-like object. compounds: Iterable of compound entries. properties: Set of compound properties to output (or None to output all).
codesearchnet
def remove_species(self, species): new_sites = [] species = [get_el_sp(sp) for sp in species] for site in self._sites: new_sp_occu = {sp: amt for (sp, amt) in site.species.items() if (sp not in species)} if (len(new_sp_occu) > 0): new_sites.append(Site(new_sp_occu, site.coords, properties=site.properties)) self._sites = new_sites
Remove all occurrences of a species from a molecule. Args: species: Species to remove.
codesearchnet
def make_new(self, rev): return self.vcs.make_rev_options(rev, extra_args=self.extra_args)
Make a copy of the current instance, but with a new rev. Args: rev: the name of the revision for the new object.
juraj-google-style
def _assign_stablehlo_quantization_config_or_populate_default(self, args): if self.experimental_stablehlo_quantizer_config is not None and Optimize.DEFAULT not in self.optimizations: args['quantization_config'] = self.experimental_stablehlo_quantizer_config elif Optimize.DEFAULT in self.optimizations and self.representative_dataset: if len(self._saved_model_exported_names) != 1: raise ValueError('StableHLO quantizer is only supported when converting from a SavedModel with one signature key.') signature_key = self._saved_model_exported_names[0] tfrecord_file_path = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)[1] rd.TfRecordRepresentativeDatasetSaver({signature_key: tfrecord_file_path}).save({signature_key: self.representative_dataset()}) quantization_config = qc.QuantizationConfig(static_range_ptq_preset=qc.StaticRangePtqPreset(representative_datasets=[qc.RepresentativeDatasetConfig(tf_record=qc.TfRecordFile(path=tfrecord_file_path))], enable_per_channel_quantized_weight=True, enable_full_int_quantization=True), pipeline_config=qc.PipelineConfig(unpack_quantized_types=False)) args['quantization_config'] = quantization_config else: raise ValueError('StableHLO quantizer only supports static-range and weight-only PTQ.')
Assigns `QuantizationConfig` to `args` or populate default. Args: args: Dictionary of argument names and associated values.
github-repos
def from_control_flow_context_def(context_def, import_scope=None): if context_def.HasField('cond_ctxt'): return CondContext.from_proto(context_def.cond_ctxt, import_scope=import_scope) if context_def.HasField('while_ctxt'): return WhileContext.from_proto(context_def.while_ctxt, import_scope=import_scope) raise NotImplementedError('Unknown ControlFlowContextDef field: %s' % context_def.WhichOneof('ctxt'))
Deserializes `context_def` into the appropriate ControlFlowContext. Args: context_def: ControlFlowContextDef proto import_scope: Optional `string`. Name scope to add. Returns: A ControlFlowContext subclass
github-repos
def CreateKey(self, private_key=None): if private_key is None: private_key = bytes(Random.get_random_bytes(32)) key = KeyPair(priv_key=private_key) self._keys[key.PublicKeyHash.ToBytes()] = key return key
Create a KeyPair Args: private_key (iterable_of_ints): (optional) 32 byte private key Returns: KeyPair: a KeyPair instance
juraj-google-style
def read(self): if self._cache: img = self._cache.get(self._position) if (img is not None): ret = True else: if (self._position != self._get_real_position()): self._set_real_position(self._position) (ret, img) = self._vcap.read() if ret: self._cache.put(self._position, img) else: (ret, img) = self._vcap.read() if ret: self._position += 1 return img
Read the next frame. If the next frame have been decoded before and in the cache, then return it directly, otherwise decode, cache and return it. Returns: ndarray or None: Return the frame if successful, otherwise None.
codesearchnet
def send(self, message, socket_): if not socket_: raise TensorForceError("No socket given in call to `send`!") elif not isinstance(message, dict): raise TensorForceError("Message to be sent must be a dict!") message = msgpack.packb(message) len_ = len(message) socket_.send(bytes("{:08d}".format(len_), encoding="ascii") + message)
Sends a message (dict) to the socket. Message consists of a 8-byte len header followed by a msgpack-numpy encoded dict. Args: message: The message dict (e.g. {"cmd": "reset"}) socket_: The python socket object to use.
juraj-google-style
def Add(self, file_desc_proto): proto_name = file_desc_proto.name if proto_name not in self._file_desc_protos_by_file: self._file_desc_protos_by_file[proto_name] = file_desc_proto elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: raise DescriptorDatabaseConflictingDefinitionError( '%s already added, but with different descriptor.' % proto_name) package = file_desc_proto.package for message in file_desc_proto.message_type: self._file_desc_protos_by_symbol.update( (name, file_desc_proto) for name in _ExtractSymbols(message, package)) for enum in file_desc_proto.enum_type: self._file_desc_protos_by_symbol[ '.'.join((package, enum.name))] = file_desc_proto for extension in file_desc_proto.extension: self._file_desc_protos_by_symbol[ '.'.join((package, extension.name))] = file_desc_proto for service in file_desc_proto.service: self._file_desc_protos_by_symbol[ '.'.join((package, service.name))] = file_desc_proto
Adds the FileDescriptorProto and its types to this database. Args: file_desc_proto: The FileDescriptorProto to add. Raises: DescriptorDatabaseConflictingDefinitionError: if an attempt is made to add a proto with the same name but different definition than an exisiting proto in the database.
juraj-google-style
def _generate_pickle_name(gt): grammar_textfile_name = os.path.basename(gt) head, tail = os.path.splitext(grammar_textfile_name) if tail == '.txt': tail = '' cache_dir = user_cache_dir(appname='YAPF', appauthor='Google', version=yapf_version) return cache_dir + os.sep + head + tail + '-py' + '.'.join(map(str, sys.version_info)) + '.pickle'
Get the filepath to write a pickle file to given the path of a grammar textfile. The returned filepath should be in a user-specific cache directory. Args: gt (str): path to grammar text file Returns: str: path to pickle file
github-repos
def to_json(self): return {'name': self.name, 'segments': [segment.to_json() for segment in self.segments], 'meta': self.meta}
Converts track to a JSON serializable format Returns: Map with the name, and segments of the track.
codesearchnet
def traverse_pagination(response, endpoint, content_filter_query, query_params): results = response.get('results', []) page = 1 while response.get('next'): page += 1 response = endpoint().post(content_filter_query, **dict(query_params, page=page)) results += response.get('results', []) return results
Traverse a paginated API response and extracts and concatenates "results" returned by API. Arguments: response (dict): API response object. endpoint (Slumber.Resource): API endpoint object. content_filter_query (dict): query parameters used to filter catalog results. query_params (dict): query parameters used to paginate results. Returns: list: all the results returned by the API.
juraj-google-style
def _create_non_scalar_select(main_expr: _evaluation.ExpressionNode, other_expr: _evaluation.ExpressionNode, main_result: _sql_data_types.StandardSqlExpression, other_result: _sql_data_types.StandardSqlExpression, collection_check_func_name: str, sql_data_type: _sql_data_types.StandardSqlDataType, sql_alias: str): if isinstance(other_expr, _evaluation.LiteralNode): expression = _build_main_expr(main_expr) sql_expr = f'ARRAY_CONTAINS({expression}, {other_expr})' return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_expr, _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK) nested_query = f'ARRAY({other_result})' if isinstance(main_expr, _evaluation.LiteralNode) else f'ARRAY_AGG({main_result.sql_alias}) FROM ({main_result})' sql_expr = f'ARRAY_EXCEPT((SELECT ARRAY({main_result.sql_alias})), (SELECT {nested_query}))' return _sql_data_types.Select(select_part=_sql_data_types.FunctionCall(name=collection_check_func_name, params=[_sql_data_types.RawExpression(sql_expr, _sql_data_type=_sql_data_types.Int64), 'x -> x IS NOT NULL'], _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=f'(SELECT {main_result.as_operand()})', sql_dialect=_sql_data_types.SqlDialect.SPARK)
Construct a Spark SQL select statement for non-scalar values. Args: main_expr: The primary (either left or right) expression being evaluated. other_expr: The secondary (opposite of main) expression. main_result: The result of evaluating the main expression. other_result: The result of evaluating the other expression. collection_check_func_name: The function name for collection checking ('EXISTS' or 'NOT EXISTS'). sql_data_type: The SQL data type for the result. sql_alias: The SQL alias for the result. Returns: A compiled Spark SQL select statement.
github-repos
def install_bootstrapped_files(nb_path=None, server_config=True, DEBUG=False): install_path = None print('Starting hide_code.js install...') current_dir = path.abspath(path.dirname(__file__)) config_dirs = j_path.jupyter_config_path() notebook_module_path = Utils.get_notebook_module_dir() for dir in config_dirs: custom_dir = path.join(dir, "custom") if path.isdir(custom_dir): install_path = custom_dir break if install_path == None: print("No config directories contain \"custom\" folder. Trying Jupyter notebook module path...") install_path = path.join(notebook_module_path, "static", "custom") if nb_path != None: install_path = nb_path print("Using argument supplied path: " + install_path) if DEBUG: print(install_path) if path.isdir(install_path): shutil.copyfile(path.join(current_dir, "hide_code.js"), path.join(install_path, "hide_code.js")) print('Copying hide_code.js to ' + install_path) print("Attempting to configure custom.js to auto-load hide_code.js...") try: with open(path.join(current_dir, "auto-load.txt")) as auto: auto_load_txt = auto.read(); auto_loaded = False with open(path.join(install_path, "custom.js"), 'r') as customJS: if auto_load_txt in customJS.read(): auto_loaded = True print("Custom.js already configured to auto-load hide_code.js.") if not auto_loaded: with open(path.join(install_path, "custom.js"), 'a') as customJS: customJS.write(auto_load_txt) print("Configured custom.js to auto-load hide_code.js.") except: print("Custom.js not in custom directory.") else: print('Unable to install into ' + install_path) print('Directory doesn\'t exist.') print('Make sure Jupyter is installed.') if server_config: print("Attempting to configure auto-loading for hide_code export handlers.") try: server_cm = ConfigManager(config_dir=j_path.jupyter_config_dir()) cfg = server_cm.get('jupyter_notebook_config') server_extensions = (cfg.setdefault('NotebookApp', {}) .setdefault('server_extensions', []) ) extension = 'hide_code.hide_code' if extension not in server_extensions: cfg['NotebookApp']['server_extensions'] += [extension] server_cm.update('jupyter_notebook_config', cfg) print('Configured jupyter to auto-load hide_code export handlers.') else: print("Jupyter already configured to auto-load export handlers.") except: print('Unable to install server extension.')
Installs javascript and exporting server extensions in Jupyter notebook. Args: nb_path (string): Path to notebook module. server_config (boolean): Install exporting server extensions. DEBUG (boolean): Verbose mode.
juraj-google-style
def accept_prompt(self, text=None, response=None, wait=None): with self.driver.accept_modal("prompt", text=text, response=response, wait=wait): yield
Execute the wrapped code, accepting a prompt, optionally responding to the prompt. Args: text (str | RegexObject, optional): Text to match against the text in the modal. response (str, optional): Response to provide to the prompt. wait (int | float, optional): Maximum time to wait for the modal to appear after executing the wrapped code. Raises: ModalNotFound: If a modal dialog hasn't been found.
juraj-google-style
def format_underline(s, char="=", indents=0): n = len(s) ind = " " * indents return ["{}{}".format(ind, s), "{}{}".format(ind, char*n)]
Traces a dashed line below string Args: s: string char: indents: number of leading intenting spaces Returns: list >>> print("\\n".join(format_underline("Life of João da Silva", "^", 2))) Life of João da Silva ^^^^^^^^^^^^^^^^^^^^^
juraj-google-style
def valid_file(value): if not value: raise argparse.ArgumentTypeError("'' is not a valid file path") elif not os.path.exists(value): raise argparse.ArgumentTypeError( "%s is not a valid file path" % value) elif os.path.isdir(value): raise argparse.ArgumentTypeError( "%s is a directory, not a regular file" % value) return value
Check if given file exists and is a regular file. Args: value (str): path to the file. Raises: argparse.ArgumentTypeError: if not valid. Returns: str: original value argument.
juraj-google-style
def validate_variable_name(self, name): if not name: raise SerializerError("Variable name is empty".format(name)) if name[0] not in PROPERTY_ALLOWED_START: msg = "Variable name '{}' must starts with a letter" raise SerializerError(msg.format(name)) for item in name: if item not in PROPERTY_ALLOWED_CHARS: msg = ("Invalid variable name '{}': it must only contains " "letters, numbers and '_' character") raise SerializerError(msg.format(name)) return True
Validate variable name. Arguments: name (string): Property name. Returns: bool: ``True`` if variable name is valid.
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART-50 sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `[src_lang_code] X [eos]` - `labels`: (for decoder) `[tgt_lang_code] X [eos]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def rapidfire(self, max_nlaunch=(- 1), max_loops=1, sleep_time=5): (num_launched, do_exit, launched) = (0, False, []) for count in range(max_loops): if do_exit: break if (count > 0): time.sleep(sleep_time) tasks = self.fetch_tasks_to_run() if any(((task in launched) for task in tasks)): logger.critical(('numtasks %d already in launched list:\n%s' % (len(tasks), launched))) tasks = [t for t in tasks if (t not in launched)] if (not tasks): continue for task in tasks: fired = task.start() if fired: launched.append(task) num_launched += 1 if (num_launched >= max_nlaunch > 0): logger.info('num_launched >= max_nlaunch, going back to sleep') do_exit = True break self.flow.pickle_dump() return num_launched
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run. Args: max_nlaunch: Maximum number of launches. default: no limit. max_loops: Maximum number of loops sleep_time: seconds to sleep between rapidfire loop iterations Returns: The number of tasks launched.
codesearchnet
def positions(self, account: str = '') -> List[Position]: if account: return list(self.wrapper.positions[account].values()) else: return [v for d in self.wrapper.positions.values() for v in d.values()]
List of positions for the given account, or of all accounts if account is left blank. Args: account: If specified, filter for this account name.
juraj-google-style
def delete_template(self, template_id): url = self.TEMPLATE_DELETE_URL request = self._get_request() response = request.post((url + template_id), get_json=False) return response
Deletes the specified template Args: template_id (str): The id of the template to delete Returns: A status code
codesearchnet
def Update(self, other, callback): self.conditions.update(other.conditions) self._Register(other.conditions, callback)
Adds existing triggers to this set, optionally rebuilding the registry. Used to aggregate trigger methods from Probes to Methods to Checks. Args: other: Another Triggers object. callback: Registers all the updated triggers to the specified function.
juraj-google-style
def iplot_state_hinton(rho, figsize=None): html_template = Template() javascript_template = Template() rho = _validate_input_state(rho) if figsize is None: options = {} else: options = {'width': figsize[0], 'height': figsize[1]} div_number = str(time.time()) div_number = re.sub('[.]', '', div_number) real = [] imag = [] for xvalue in rho: row_real = [] col_imag = [] for value_real in xvalue.real: row_real.append(float(value_real)) real.append(row_real) for value_imag in xvalue.imag: col_imag.append(float(value_imag)) imag.append(col_imag) html = html_template.substitute({ 'divNumber': div_number }) javascript = javascript_template.substitute({ 'divNumber': div_number, 'executions': [{'data': real}, {'data': imag}], 'options': options }) display(HTML(html + javascript))
Create a hinton representation. Graphical representation of the input array using a 2D city style graph (hinton). Args: rho (array): Density matrix figsize (tuple): Figure size in pixels.
juraj-google-style
def __init__(self, obj): if isinstance(obj, Stream) and obj.stream_dict.get("/Subtype") != "/Image": raise TypeError("can't construct PdfImage from non-image") self.obj = obj
Construct a PDF image from a Image XObject inside a PDF ``pim = PdfImage(page.Resources.XObject['/ImageNN'])`` Args: obj (pikepdf.Object): an Image XObject
juraj-google-style
def rotateInZMat(theta_deg): ct = np.cos(np.radians(theta_deg)) st = np.sin(np.radians(theta_deg)) rMat = np.array([[ct, (- st), 0], [st, ct, 0], [0, 0, 1]]) return rMat
Rotate a vector theta degrees around the z-axis Equivalent to yaw left Rotates the vector in the sense that the x-axis is rotated towards the y-axis. If looking along the z-axis (which is not the way you usually look at it), the vector rotates clockwise. If sitting on the vector [1,0,0], the rotation is towards the left Input: theta_deg (float) Angle through which vectors should be rotated in degrees Returns: A matrix To rotate a vector, premultiply by this matrix. To rotate the coord sys underneath the vector, post multiply
codesearchnet
def format(self, data: Iterable[_FormatArg]) -> bytes: fix_arg = self._fix_format_arg return (self.how % tuple((fix_arg(item) for item in data)))
String interpolation into the format string. Args: data: The data interpolated into the format string. Examples: :: BytesFormat(b'Hello, %b!') % b'World' BytesFormat(b'%b, %b!') % (b'Hello', b'World')
codesearchnet
def resolve(self, context, provider): try: self._value.resolve(context, provider) except FailedLookup as e: raise FailedVariableLookup(self.name, e.lookup, e.error)
Recursively resolve any lookups with the Variable. Args: context (:class:`stacker.context.Context`): Current context for building the stack provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider
codesearchnet
def __init__(self, request, file, *args, **kwargs): self.ranged_file = RangedFileReader(file) super(RangedFileResponse, self).__init__(self.ranged_file, *args, **kwargs) if 'HTTP_RANGE' in request.META: self.add_range_headers(request.META['HTTP_RANGE'])
RangedFileResponse constructor also requires a request, which checks whether range headers should be added to the response. Args: request(WGSIRequest): The Django request object. file (File): A file-like object.
juraj-google-style
def save_chkpt_vars(dic, path): logger.info("Variables to save to {}:".format(path)) keys = sorted(list(dic.keys())) logger.info(pprint.pformat(keys)) assert not path.endswith('.npy') if path.endswith('.npz'): np.savez_compressed(path, **dic) else: with tf.Graph().as_default(), \ tf.Session() as sess: for k, v in six.iteritems(dic): k = get_op_tensor_name(k)[0] _ = tf.Variable(name=k, initial_value=v) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.save(sess, path, write_meta_graph=False)
Save variables in dic to path. Args: dic: {name: value} path: save as npz if the name ends with '.npz', otherwise save as a checkpoint.
juraj-google-style
def make_rsa_keypair(bits): private_key = rsa.generate_private_key( public_exponent=65537, key_size=bits, backend=default_backend(), ) private_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) public_pem = private_key.public_key().public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo, ) return private_pem, public_pem
Generate an RSA keypair. Args: bits (int): number of bits to use for the key. Returns: (private_key, public_key) - both as PEM encoded strings
juraj-google-style
def get_current_track_info(self): response = self.avTransport.GetPositionInfo([('InstanceID', 0), ('Channel', 'Master')]) track = {'title': '', 'artist': '', 'album': '', 'album_art': '', 'position': ''} track['playlist_position'] = response['Track'] track['duration'] = response['TrackDuration'] track['uri'] = response['TrackURI'] track['position'] = response['RelTime'] metadata = response['TrackMetaData'] track['metadata'] = metadata if ((metadata != '') and (track['duration'] == '0:00:00')): metadata = XML.fromstring(really_utf8(metadata)) trackinfo = (metadata.findtext('. index = trackinfo.find(' - ') if (index > (- 1)): track['artist'] = trackinfo[:index] track['title'] = trackinfo[(index + 3):] else: track['title'] = metadata.findtext('. if (not track['title']): track['title'] = trackinfo elif (metadata not in ('', 'NOT_IMPLEMENTED', None)): metadata = XML.fromstring(really_utf8(metadata)) md_title = metadata.findtext('. md_artist = metadata.findtext('. md_album = metadata.findtext('. track['title'] = '' if md_title: track['title'] = md_title track['artist'] = '' if md_artist: track['artist'] = md_artist track['album'] = '' if md_album: track['album'] = md_album album_art_url = metadata.findtext('. if (album_art_url is not None): track['album_art'] = self.music_library.build_album_art_full_uri(album_art_url) return track
Get information about the currently playing track. Returns: dict: A dictionary containing information about the currently playing track: playlist_position, duration, title, artist, album, position and an album_art link. If we're unable to return data for a field, we'll return an empty string. This can happen for all kinds of reasons so be sure to check values. For example, a track may not have complete metadata and be missing an album name. In this case track['album'] will be an empty string. .. note:: Calling this method on a slave in a group will not return the track the group is playing, but the last track this speaker was playing.
codesearchnet
def transform(self, df): for (name, function) in self.outputs: df[name] = function(df)
Transforms a DataFrame in place. Computes all outputs of the DataFrame. Args: df (pandas.DataFrame): DataFrame to transform.
codesearchnet
def preprocess_mel(self, audio: np.ndarray, beatstep: np.ndarray): if audio is not None and len(audio.shape) != 1: raise ValueError(f'Expected `audio` to be a single channel audio input of shape `(n, )` but found shape {audio.shape}.') if beatstep[0] > 0.0: beatstep = beatstep - beatstep[0] num_steps = self.num_bars * 4 num_target_steps = len(beatstep) extrapolated_beatstep = self.interpolate_beat_times(beat_times=beatstep, steps_per_beat=1, n_extend=(self.num_bars + 1) * 4 + 1) sample_indices = [] max_feature_length = 0 for i in range(0, num_target_steps, num_steps): start_idx = i end_idx = min(i + num_steps, num_target_steps) start_sample = int(extrapolated_beatstep[start_idx] * self.sampling_rate) end_sample = int(extrapolated_beatstep[end_idx] * self.sampling_rate) sample_indices.append((start_sample, end_sample)) max_feature_length = max(max_feature_length, end_sample - start_sample) padded_batch = [] for start_sample, end_sample in sample_indices: feature = audio[start_sample:end_sample] padded_feature = np.pad(feature, ((0, max_feature_length - feature.shape[0]),), 'constant', constant_values=0) padded_batch.append(padded_feature) padded_batch = np.asarray(padded_batch) return (padded_batch, extrapolated_beatstep)
Preprocessing for log-mel-spectrogram Args: audio (`numpy.ndarray` of shape `(audio_length, )` ): Raw audio waveform to be processed. beatstep (`numpy.ndarray`): Interpolated values of the raw audio. If beatstep[0] is greater than 0.0, then it will be shifted by the value at beatstep[0].
github-repos
def __init__(self, email, password): self.email = email self.password = password self.token = None self.refresh_token = None self.last_api_call = None self.state = [] self.authenticated = self._authenticate()
Create the EcoNet API interface object. Args: email (str): EcoNet account email address. password (str): EcoNet account password.
juraj-google-style
def determinize(self): epsilon_closure = {} for state in self.states: sid = state.stateid epsilon_closure[sid] = self._epsilon_closure(state) trans_table = {} for state in self.states: trans_table[state.stateid] = defaultdict(set) for arc in state: char = self.isyms.find(arc.ilabel) trans_table[state.stateid][char].add(arc.nextstate) is_final = lambda nfa_states, dfa_state: True \ if sum([ int(nfa_states[x].final) for x in dfa_state ]) >= 1 \ else False state_idx = 1 nfa_states = copy.deepcopy(self.states) self.states = [] self.add_state() new_initial = epsilon_closure[nfa_states[0].stateid] self.states[0].final = is_final(nfa_states, new_initial) dfa_state_idx_map = { frozenset(new_initial) : 0 } stack = [new_initial] while True: if not stack: break src_dfa_state = stack.pop() src_dfa_state_idx = dfa_state_idx_map[frozenset(src_dfa_state)] for char in self.alphabet: target_dfa_state = set([]) for nfa_state in src_dfa_state: next_states = \ set([y for x in trans_table[nfa_state][char] \ for y in epsilon_closure[x] ]) target_dfa_state.update(next_states) if frozenset(target_dfa_state) not in dfa_state_idx_map: self.add_state() dfa_state_idx_map[frozenset(target_dfa_state)] = state_idx self.states[state_idx].final = is_final(nfa_states, target_dfa_state) state_idx += 1 stack.append(target_dfa_state) dst_state_idx = dfa_state_idx_map[frozenset(target_dfa_state)] self.add_arc(src_dfa_state_idx, dst_state_idx, char) return self
Transforms a Non Deterministic DFA into a Deterministic Args: None Returns: DFA: The resulting DFA Creating an equivalent DFA is done using the standard algorithm. A nice description can be found in the book: Harry R. Lewis and Christos H. Papadimitriou. 1998. E print target_dfa_statelements of the Theory of Computation.
juraj-google-style
def _CheckByteStreamSize(self, byte_stream, byte_offset, data_type_size): try: byte_stream_size = len(byte_stream) except Exception as exception: raise errors.MappingError(exception) if byte_stream_size - byte_offset < data_type_size: raise errors.ByteStreamTooSmallError( 'Byte stream too small requested: {0:d} available: {1:d}'.format( data_type_size, byte_stream_size))
Checks if the byte stream is large enough for the data type. Args: byte_stream (bytes): byte stream. byte_offset (int): offset into the byte stream where to start. data_type_size (int): data type size. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the size of the byte stream cannot be determined.
juraj-google-style
def resolves_for(self, node): self.actual_title = normalize_text(node.title) return bool(self.search_regexp.search(self.actual_title))
Resolves this query relative to the given node. Args: node (node.Document): The node to be evaluated. Returns: bool: Whether the given node matches this query.
juraj-google-style
def update_data(func): default = dict([ (param.name, param.default) for param in inspect.signature(func).parameters.values() if param.default != getattr(inspect, '_empty') ]) @wraps(func) def wrapper(*args, **kwargs): default.update(kwargs) kwargs.update(default) cur_mod = sys.modules[func.__module__] logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream') root_path = cur_mod.DATA_PATH date_type = kwargs.pop('date_type', 'date') save_static = kwargs.pop('save_static', True) save_dynamic = kwargs.pop('save_dynamic', True) symbol = kwargs.get('symbol') file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type) d_file = cache_file(has_date=True, **file_kw) s_file = cache_file(has_date=False, **file_kw) cached = kwargs.pop('cached', False) if cached and save_static and files.exists(s_file): logger.info(f'Reading data from {s_file} ...') return pd.read_parquet(s_file) data = func(*args, **kwargs) if save_static: files.create_folder(s_file, is_file=True) save_data(data=data, file_fmt=s_file, append=False) logger.info(f'Saved data file to {s_file} ...') if save_dynamic: drop_dups = kwargs.pop('drop_dups', None) files.create_folder(d_file, is_file=True) save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups) logger.info(f'Saved data file to {d_file} ...') return data return wrapper
Decorator to save data more easily. Use parquet as data format Args: func: function to load data from data source Returns: wrapped function
juraj-google-style
def add(self, spec): for limit in spec.limit_to: if limit not in self.limit_to: self.limit_to.append(limit)
Add limitations of given spec to self's. Args: spec (PackageSpec): another spec.
juraj-google-style
def deep_variable_product(variables, limit: int=DEEP_VARIABLE_LIMIT): return _deep_values_list_product([v.bindings for v in variables], set(), ComplexityLimit(limit))
Take the deep Cartesian product of a list of Variables. For example: x1.children = {v2, v3} v1 = {x1, x2} v2 = {x3} v3 = {x4, x5} v4 = {x6} then deep_variable_product([v1, v4]) will return: [[x1, x3, x4, x6], [x1, x3, x5, x6], [x2, x6]] . Args: variables: A sequence of Variables. limit: How many results we allow before aborting. Returns: A list of lists of Values, where each sublist has one Value from each of the corresponding Variables and the Variables of their Values' children. Raises: TooComplexError: If we expanded too many values.
github-repos
def build_vep_string(vep_info, vep_columns): logger = getLogger(__name__) logger.debug("Building vep string from {0}".format(vep_info)) logger.debug("Found vep headers {0}".format(vep_columns)) vep_strings = [] for vep_annotation in vep_info: try: vep_info_list = [ vep_annotation[vep_key] for vep_key in vep_columns ] except KeyError: raise SyntaxError("Vep entry does not correspond to vep headers") vep_strings.append('|'.join(vep_info_list)) return ','.join(vep_strings)
Build a vep string formatted string. Take a list with vep annotations and build a new vep string Args: vep_info (list): A list with vep annotation dictionaries vep_columns (list): A list with the vep column names found in the header of the vcf Returns: string: A string with the proper vep annotations
juraj-google-style
def delete_file_v2(path): _pywrap_file_io.DeleteFile(compat.path_to_bytes(path))
Deletes the path located at 'path'. Args: path: string, a path Raises: errors.OpError: Propagates any errors reported by the FileSystem API. E.g., `NotFoundError` if the path does not exist.
github-repos
def random_string(length=8, charset=None): if length < 1: raise ValueError('Length must be > 0') if not charset: charset = string.letters + string.digits return ''.join(random.choice(charset) for unused in xrange(length))
Generates a string with random characters. If no charset is specified, only letters and digits are used. Args: length (int) length of the returned string charset (string) list of characters to choose from Returns: (str) with random characters from charset Raises: -
juraj-google-style
def requirements(requirements_file): return [str(pkg.req) for pkg in parse_requirements(requirements_file, session=pip_download.PipSession()) if (pkg.req is not None)]
Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file.
codesearchnet
def update_conversation(self, conversation): new_state = conversation.self_conversation_state old_state = self._conversation.self_conversation_state self._conversation = conversation if (not new_state.delivery_medium_option): new_state.delivery_medium_option.extend(old_state.delivery_medium_option) old_timestamp = old_state.self_read_state.latest_read_timestamp new_timestamp = new_state.self_read_state.latest_read_timestamp if (new_timestamp == 0): new_state.self_read_state.latest_read_timestamp = old_timestamp for new_entry in conversation.read_state: tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp) if (tstamp == 0): continue uid = parsers.from_participantid(new_entry.participant_id) if ((uid not in self._watermarks) or (self._watermarks[uid] < tstamp)): self._watermarks[uid] = tstamp
Update the internal state of the conversation. This method is used by :class:`.ConversationList` to maintain this instance. Args: conversation: ``Conversation`` message.
codesearchnet
def get_metadata(self, key) -> str: return self.metadata[key] if key in self.metadata else None
Get the value of a metadata. Returns None if metadata does not exist. Args: key (str): name of the metadata Returns: str: the value of the metadata (or None)
juraj-google-style
def _wait_for_any_event(events, timeout_s): def any_event_set(): return any(event.is_set() for event in events) result = timeouts.loop_until_timeout_or_true( timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S) return result or any_event_set()
Wait for any in a list of threading.Event's to be set. Args: events: List of threading.Event's. timeout_s: Max duration in seconds to wait before returning. Returns: True if at least one event was set before the timeout expired, else False.
juraj-google-style
def points_are_in_a_straight_line(points, tolerance=1e-07): a = points[0] b = points[1] for c in points[2:]: if (area_of_a_triangle_in_cartesian_space(a, b, c) > tolerance): return False return True
Check whether a set of points fall on a straight line. Calculates the areas of triangles formed by triplets of the points. Returns False is any of these areas are larger than the tolerance. Args: points (list(np.array)): list of Cartesian coordinates for each point. tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7. Returns: (bool): True if all points fall on a straight line (within the allowed tolerance).
codesearchnet
def _parse_order_by(model, order_by): out = [] for key in order_by: key = key.strip() if key.startswith('+'): out.append(getattr(model, key[1:])) elif key.startswith('-'): out.append(getattr(model, key[1:]).desc()) else: out.append(getattr(model, key)) return out
This function figures out the list of orderings for the given model and argument. Args: model (nautilus.BaseModel): The model to compute ordering against order_by (list of str): the list of fields to order_by. If the field starts with a `+` then the order is acending, if `-` descending, if no character proceeds the field, the ordering is assumed to be ascending. Returns: (list of filters): the model filters to apply to the query
codesearchnet
def iterable_source(iterable, target): it = iter(iterable) for item in it: try: target.send(item) except StopIteration: return prepend(item, it) return empty_iter()
Convert an iterable into a stream of events. Args: iterable: A series of items which will be sent to the target one by one. target: The target coroutine or sink. Returns: An iterator over any remaining items.
juraj-google-style
def onCall(self, n): cond_oncall = (n + 1) return _SinonStubCondition(copy=self._copy, oncall=cond_oncall, cond_args=self._cond_args, cond_kwargs=self._cond_kwargs)
Adds a condition for when the stub is called. When the condition is met, a special return value can be returned. Adds the specified call number into the condition list. For example, when the stub function is called the second time, it will return "#": stub.onCall(1).returns("#") Without returns/throws at the end of the chain of functions, nothing will happen. For example, in this case, although 2 is in the condition list, nothing will happen: stub.onCall(2) Args: n: integer, the call # for which we want a special return value. The first call has an index of 0. Return: a SinonStub object (able to be chained)
codesearchnet
def track_event(self, name, properties=None, measurements=None): data = channel.contracts.EventData() data.name = name or NULL_CONSTANT_STRING if properties: data.properties = properties if measurements: data.measurements = measurements self.track(data, self._context)
Send information about a single event that has occurred in the context of the application. Args: name (str). the data to associate to this event.\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
juraj-google-style
def load_attributes_from_hdf5_group(group, name): if name in group.attrs: data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]] else: data = [] chunk_id = 0 while f'{name}{chunk_id}' in group.attrs: data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[f'{name}{chunk_id}']]) chunk_id += 1 return data
Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data.
github-repos
def fully_qualify_alias_labels(label, aliases): for (alias, full_name) in aliases.items(): if (label == alias): return full_name elif label.startswith((alias + '.')): return (full_name + label[len(alias):]) return label
Replace any aliases in label with the fully qualified name. Args: label -- A label : str representing a name (e.g. myos.system) aliases -- A dict of {alias: real_name} (e.g. {'myos': 'os'}) >>> fully_qualify_alias_labels('myos.mycall', {'myos':'os'}) 'os.mycall'
codesearchnet
def _ParseRecord(self, parser_mediator, file_object, record_offset): record_strings_data_offset = file_object.tell() record_strings_data_size = record_offset - record_strings_data_offset record_strings_data = self._ReadData( file_object, record_strings_data_offset, record_strings_data_size) record_map = self._GetDataTypeMap('asl_record') try: record, record_data_size = self._ReadStructureFromFileObject( file_object, record_offset, record_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse record at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) hostname = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.hostname_string_offset) sender = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.sender_string_offset) facility = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.facility_string_offset) message = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.message_string_offset) file_offset = record_offset + record_data_size additional_data_size = record.data_size + 6 - record_data_size if additional_data_size % 8 != 0: raise errors.ParseError( 'Invalid record additional data size: {0:d}.'.format( additional_data_size)) additional_data = self._ReadData( file_object, file_offset, additional_data_size) extra_fields = {} for additional_data_offset in range(0, additional_data_size - 8, 16): record_extra_field = self._ParseRecordExtraField( additional_data[additional_data_offset:], file_offset) file_offset += 16 name = self._ParseRecordString( record_strings_data, record_strings_data_offset, record_extra_field.name_string_offset) value = self._ParseRecordString( record_strings_data, record_strings_data_offset, record_extra_field.value_string_offset) if name is not None: extra_fields[name] = value event_data = ASLEventData() event_data.computer_name = hostname event_data.extra_information = ', '.join([ '{0:s}: {1:s}'.format(name, value) for name, value in sorted(extra_fields.items())]) event_data.facility = facility event_data.group_id = record.group_identifier event_data.level = record.alert_level event_data.message_id = record.message_identifier event_data.message = message event_data.pid = record.process_identifier event_data.read_gid = record.real_group_identifier event_data.read_uid = record.real_user_identifier event_data.record_position = record_offset event_data.sender = sender event_data.user_sid = '{0:d}'.format(record.user_identifier) microseconds, _ = divmod(record.written_time_nanoseconds, 1000) timestamp = (record.written_time * 1000000) + microseconds date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) return record.next_record_offset
Parses a record and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (file): file-like object. record_offset (int): offset of the record relative to the start of the file. Returns: int: next record offset. Raises: ParseError: if the record cannot be parsed.
juraj-google-style
def get_location_from_HDX_code(code, locations=None, configuration=None): if (locations is None): locations = Locations.validlocations(configuration) for locdict in locations: if (code.upper() == locdict['name'].upper()): return locdict['title'] return None
Get location from HDX location code Args: code (str): code for which to get location name locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: location name
codesearchnet
def assert_same_float_dtype(tensors=None, dtype=None): if tensors: dtype = _assert_same_base_type(tensors, dtype) if not dtype: dtype = dtypes.float32 elif not dtype.is_floating: raise ValueError('Expected floating point type, got %s.' % dtype) return dtype
Validate and return float type based on `tensors` and `dtype`. For ops such as matrix multiplication, inputs and weights must be of the same float type. This function validates that all `tensors` are the same type, validates that type is `dtype` (if supplied), and returns the type. Type must be a floating point type. If neither `tensors` nor `dtype` is supplied, the function will return `dtypes.float32`. Args: tensors: Tensors of input values. Can include `None` elements, which will be ignored. dtype: Expected type. Returns: Validated type. Raises: ValueError: if neither `tensors` nor `dtype` is supplied, or result is not float, or the common type of the inputs is not a floating point type.
github-repos
def parse_datetime(__string: str) -> datetime.datetime: if (not __string): datetime_ = datetime.datetime.now(datetime.timezone.utc) else: datetime_ = ciso8601.parse_datetime(__string) if (datetime_.tzinfo is None): datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc) return datetime_
Parse ISO-8601 datetime string. Args: __string: Datetime string to parse Returns: Parsed datetime object
codesearchnet
def open_usb_handle(self, port_num): serial = self.get_usb_serial(port_num) return local_usb.LibUsbHandle.open(serial_number=serial)
open usb port Args: port_num: port number on the Cambrionix unit Return: usb handle
juraj-google-style
def decode(self, fp: TextIO) -> BioCCollection: tree = etree.parse(fp) collection = self.__parse_collection(tree.getroot()) collection.encoding = tree.docinfo.encoding collection.standalone = tree.docinfo.standalone collection.version = tree.docinfo.xml_version return collection
Deserialize ``fp`` to a BioC collection object. Args: fp: a ``.read()``-supporting file-like object containing a BioC collection Returns: an object of BioCollection
codesearchnet
def as_int(self) -> int: if len(self._messages) != 1: raise ValueError('FHIRPath did not evaluate to a single integer.') return proto_utils.get_value_at_field(self._messages[0], 'value')
Returns the result as an integer. Raises: ValueError if the `EvaluationResult` is not a single integer.
github-repos
def kick_user(self, user_id, reason=""): try: self.client.api.kick_user(self.room_id, user_id) return True except MatrixRequestError: return False
Kick a user from this room. Args: user_id (str): The matrix user id of a user. reason (str): A reason for kicking the user. Returns: boolean: Whether user was kicked.
juraj-google-style
def setPulseInputRatio(self, line_in, new_cnst, password='00000000'): result = False self.setContext('setPulseInputRatio') try: if (not self.requestA()): self.writeCmdMsg('Bad read CRC on setting') elif (not self.serialCmdPwdAuth(password)): self.writeCmdMsg('Password failure') else: req_const = binascii.hexlify(str(new_cnst).zfill(4)) line_const = binascii.hexlify(str((line_in - 1))) req_str = (((('01573102303041' + line_const) + '28') + req_const) + '2903') req_str += self.calc_crc16(req_str[2:].decode('hex')) self.m_serial_port.write(req_str.decode('hex')) if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'): self.writeCmdMsg('Success: 06 returned.') result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext('') return result
Serial call to set pulse input ratio on a line. Args: line_in (int): Member of :class:`~ekmmeters.Pulse` new_cnst (int): New pulse input ratio password (str): Optional password Returns:
codesearchnet
def message_factory(msg_type, msg_types=MESSAGE_TYPES, *args, **kwargs): try: return msg_types[msg_type.lower()](*args, **kwargs) except (UnknownProfileError, InvalidMessageInputError) as e: err_exit('Unable to send message: ', e) except KeyError: raise UnsupportedMessageTypeError(msg_type, msg_types)
Factory function to return the specified message instance. Args: :msg_type: (str) the type of message to send, i.e. 'Email' :msg_types: (str, list, or set) the supported message types :kwargs: (dict) keywords arguments that are required for the various message types. See docstrings for each type. i.e. help(messages.Email), help(messages.Twilio), etc.
codesearchnet
def get_file_handle(file_path): LOG.debug("Check if file end is correct") if not os.path.exists(file_path): raise IOError("No such file:{0}".format(file_path)) if not os.path.splitext(file_path)[-1] in VALID_ENDINGS: raise IOError("Not a valid vcf file name: {}".format(file_path)) vcf_obj = VCF(file_path) return vcf_obj
Return cyvcf2 VCF object Args: file_path(str) Returns: vcf_obj(cyvcf2.VCF)
juraj-google-style
def readinto(self, b): self._checkClosed() if self._position >= self._downloader.size: return 0 start = self._position end = min(self._position + len(b), self._downloader.size) data = self._downloader.get_range(start, end) self._position += len(data) b[:len(data)] = data return len(data)
Read up to len(b) bytes into b. Returns number of bytes read (0 for EOF). Args: b: (bytearray/memoryview) Buffer to read into.
github-repos
def lift_to_graph(tensors, graph, sources=None, disallowed_placeholders=None, add_sources=False, handle_captures=False, base_graph=None, op_map=None): variable_init_tensors = [] init_tensors = [] for tensor in tensors: if isinstance(tensor, resource_variable_ops.ResourceVariable): variable_init_tensors.append(tensor) else: init_tensors.append(tensor) base_graph = base_graph or init_tensors[0].graph op_map = op_map or object_identity.ObjectIdentityDictionary() sources = object_identity.ObjectIdentitySet(sources or []) visited_ops = set((x.op for x in sources)) op_outputs = collections.defaultdict(set) for init_tensor in init_tensors: sources.update(op_selector.map_subgraph(init_tensor=init_tensor, sources=sources, disallowed_placeholders=disallowed_placeholders, visited_ops=visited_ops, op_outputs=op_outputs, add_sources=add_sources)) ops_to_copy = [] marked_ops = set([]) ops_to_visit = [_as_operation(t) for t in init_tensors if not op_outputs[_as_operation(t)]] unvisited_ops = set(ops_to_visit) while unvisited_ops: while ops_to_visit: op = ops_to_visit.pop() if op in marked_ops: continue marked_ops.add(op) ops_to_copy.append(op) for inp in op_selector.graph_inputs(op): if inp.type == 'TPUReplicateMetadata': continue unvisited_ops.add(inp) if all((x in marked_ops for x in op_outputs[inp])) and inp not in sources: ops_to_visit.append(inp) unvisited_ops.difference_update(marked_ops) if unvisited_ops: ops_to_visit.append(next(iter(unvisited_ops))) ops_to_copy.sort(key=lambda op: len(op_selector.graph_inputs(op)) == 0) captures = [] inverse_captures = object_identity.ObjectIdentityDictionary() internal_captures = [] if isinstance(base_graph, func_graph.FuncGraph) and isinstance(graph, func_graph.FuncGraph): captures = base_graph.captures for external_capture, internal_capture in captures: inverse_captures[internal_capture] = external_capture internal_captures = base_graph.internal_captures with graph.as_default(): for i in variable_init_tensors: op_map[i] = i source_ops = set() for s in internal_captures: if s in sources: sources.remove(s) source_ops.add(s.op) _copy_source(s=s, graph=graph, op_map=op_map, handle_captures=handle_captures, inverse_captures=inverse_captures, base_graph=base_graph) for s in sources: source_ops.add(s.op) _copy_source(s=s, graph=graph, op_map=op_map, handle_captures=handle_captures, inverse_captures=inverse_captures, base_graph=base_graph) input_mutations = [] control_mutations = [] for op in reversed(ops_to_copy): if op in source_ops or op in op_map: continue new_input_mutations, new_control_mutations = _copy_non_source(op=op, graph=graph, op_map=op_map, base_graph=base_graph) input_mutations.extend(new_input_mutations) control_mutations.extend(new_control_mutations) with graph._mutation_lock(): for mutation in input_mutations: mutation.copied_op._update_input(mutation.input_index, op_map[mutation.old_graph_tensor]) for mutation in control_mutations: if mutation.old_graph_op.type == 'TPUReplicateMetadata': continue mutation.copied_op._add_control_input(op_map[mutation.old_graph_op]) return op_map
Copies the tensor and all its inputs recursively to the outer graph. Args: tensors: The Tensors to lift. graph: The graph to lift to. sources: Optional sequence of nodes to start from. If omitted the whole subgraph which feeds into `init_tensor` is lifted. disallowed_placeholders: An optional set of ops which may not appear in the lifted graph. Defaults to all placeholders. add_sources: A boolean indicating whether placeholders which are not in sources should be allowed. handle_captures: A boolean indicating whether to re-capture s in the new graph or simply create a vanilla placeholder. base_graph: The graph from which to lift ops. This will be inferred if not specified. op_map: A map contains all the existing nodes that have been lifted to the destination graph, so they won't be lifted and copied again. Returns: A mapping from ops in the current default graph to ops in `graph`. Raises: UnliftableError: If a placeholder blocks lifting.
github-repos
def run_inference(self, batch: Sequence[dict[str, torch.Tensor]], model: torch.nn.Module, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]: inference_args = {} if not inference_args else inference_args model_id = self._state_dict_path if not self._torch_script_model_path else self._torch_script_model_path return self._inference_fn(batch, model, self._device, inference_args, model_id)
Runs inferences on a batch of Keyed Tensors and returns an Iterable of Tensor Predictions. For the same key across all examples, this will stack all Tensors values in a vectorized format to optimize the inference call. Args: batch: A sequence of keyed Tensors. These Tensors should be batchable, as this method will call `torch.stack()` and pass in batched Tensors with dimensions (batch_size, n_features, etc.) into the model's forward() function. model: A PyTorch model. inference_args: Non-batchable arguments required as inputs to the model's forward() function. Unlike Tensors in `batch`, these parameters will not be dynamically batched Returns: An Iterable of type PredictionResult.
github-repos
def error_handler(self, handler): if (not self.opened()): handler = (handler or util.noop) self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler) self._dll.JLINKARM_SetErrorOutHandler(self._error_handler)
Setter for the error handler function. If the DLL is open, this function is a no-op, so it should be called prior to calling ``open()``. Args: self (JLink): the ``JLink`` instance handler (function): function to call on error messages Returns: ``None``
codesearchnet
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key): if signature_def_key not in meta_graph_def.signature_def: raise ValueError(f'Could not find signature "{signature_def_key}". Please choose from: {', '.join(meta_graph_def.signature_def.keys())}') return meta_graph_def.signature_def[signature_def_key].inputs
Gets TensorInfo for all inputs of the SignatureDef. Returns a dictionary that maps each input key to its TensorInfo for the given signature_def_key in the meta_graph_def Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to look up SignatureDef key. signature_def_key: A SignatureDef key string. Returns: A dictionary that maps input tensor keys to TensorInfos. Raises: ValueError if `signature_def_key` is not found in the MetaGraphDef.
github-repos
def _bulk_cache_lookup(self, api_name, keys): if self._cache: responses = self._cache.bulk_lookup(api_name, keys) missing_keys = [key for key in keys if (key not in responses.keys())] return (responses, missing_keys) return ({}, keys)
Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys).
codesearchnet