code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def from_db(cls, bigchain, tx_dict_list): return_list = True if isinstance(tx_dict_list, dict): tx_dict_list = [tx_dict_list] return_list = False tx_map = {} tx_ids = [] for tx in tx_dict_list: tx.update({'metadata': None}) tx_map[tx['id']] = tx tx_ids.append(tx['id']) assets = list(bigchain.get_assets(tx_ids)) for asset in assets: if (asset is not None): tx = tx_map[asset['id']] del asset['id'] tx['asset'] = asset tx_ids = list(tx_map.keys()) metadata_list = list(bigchain.get_metadata(tx_ids)) for metadata in metadata_list: tx = tx_map[metadata['id']] tx.update({'metadata': metadata.get('metadata')}) if return_list: tx_list = [] for (tx_id, tx) in tx_map.items(): tx_list.append(cls.from_dict(tx)) return tx_list else: tx = list(tx_map.values())[0] return cls.from_dict(tx)
Helper method that reconstructs a transaction dict that was returned from the database. It checks what asset_id to retrieve, retrieves the asset from the asset table and reconstructs the transaction. Args: bigchain (:class:`~bigchaindb.tendermint.BigchainDB`): An instance of BigchainDB used to perform database queries. tx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or list of transaction dict as returned from the database. Returns: :class:`~Transaction`
codesearchnet
def normalize_date(tmy_date, year): month = tmy_date.month day = (tmy_date.day - 1) hour = tmy_date.hour if ((month is 1) and (day is 0) and (hour is 0)): year = (year + 1) return (datetime.datetime(year, month, 1) + datetime.timedelta(days=day, hours=hour, minutes=0))
change TMY3 date to an arbitrary year. Args: tmy_date (datetime): date to mangle. year (int): desired year. Returns: (None)
codesearchnet
async def send_message(self, name, level, message): if (name not in self.services): raise ArgumentError('Unknown service name', short_name=name) msg = self.services[name]['state'].post_message(level, message) (await self._notify_update(name, 'new_message', msg.to_dict()))
Post a message for a service. Args: name (string): The short name of the service to query level (int): The level of the message (info, warning, error) message (string): The message contents
codesearchnet
def collect_publications(self): pubs = list(self.sub_publications) for sub_tree in self.sub_trees: pubs.extend(sub_tree.collect_publications()) return pubs
Recursively collect list of all publications referenced in this tree and all sub-trees. Returns: list: List of UUID strings.
codesearchnet
def __init__(self, dtype, shape, accumulator_ref): self._dtype = dtype if shape is not None: self._shape = tensor_shape.TensorShape(shape) else: self._shape = tensor_shape.unknown_shape() self._accumulator_ref = accumulator_ref if context.executing_eagerly(): self._name = context.context().scope_name else: self._name = self._accumulator_ref.op.name.split('/')[-1]
Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. accumulator_ref: A handle to the conditional accumulator, created by sub- classes
github-repos
def DefaultParseValue(value): try: return _LiteralEval(value) except (SyntaxError, ValueError): return value
The default argument parsing function used by Fire CLIs. If the value is made of only Python literals and containers, then the value is parsed as it's Python value. Otherwise, provided the value contains no quote, escape, or parenthetical characters, the value is treated as a string. Args: value: A string from the command line to be parsed for use in a Fire CLI. Returns: The parsed value, of the type determined most appropriate.
github-repos
def get(self): return self._master._get_helper(self._master._sorted_items, self._q)
Returns the calculated quantiles based on the master tracker's buffer. Returns: A list of calculated quantiles.
github-repos
def easeInOutCirc(n): _checkRange(n) n = n * 2 if n < 1: return -0.5 * (math.sqrt(1 - n**2) - 1) else: n = n - 2 return 0.5 * (math.sqrt(1 - n**2) + 1)
A circular tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
juraj-google-style
def copy_to_file(self, name, fp_dest, callback=None): assert compat.is_native(name) def _write_to_file(data): fp_dest.write(data) if callback: callback(data) self.ftp.retrbinary( "RETR {}".format(name), _write_to_file, FtpTarget.DEFAULT_BLOCKSIZE )
Write cur_dir/name to file-like `fp_dest`. Args: name (str): file name, located in self.curdir fp_dest (file-like): must support write() method callback (function, optional): Called like `func(buf)` for every written chunk
juraj-google-style
def _tokens_to_subtoken(self, tokens): ret = [] for token in tokens: ret.extend( self._escaped_token_to_subtoken_strings(_escape_token(token, self._alphabet))) return ret
Converts a list of tokens to a list of subtoken. Args: tokens: a list of strings. Returns: a list of integers in the range [0, vocab_size)
juraj-google-style
def select_by_value(self, value): self._selected_key = None self._selected_item = None for k in self.children: item = self.children[k] if (item.get_text() == value): item.attributes['selected'] = 'selected' self._selected_key = k self._selected_item = item elif ('selected' in item.attributes): del item.attributes['selected']
Selects a DropDownItem by means of the contained text- Args: value (str): Textual content of the DropDownItem that have to be selected.
codesearchnet
def _unwrap_el(self, value): if isinstance(value, dict) and 'ELEMENT' in value: element_id = value.get('ELEMENT') return WebElement(element_id, self) elif isinstance(value, list) and not isinstance(value, str): return [self._unwrap_el(item) for item in value] else: return value
Convert {'Element': 1234} to WebElement Object Args: value(str|list|dict): The value field in the json response. Returns: The unwrapped value.
juraj-google-style
def extend(*args): if not args: return {} first = args[0] rest = args[1:] out = type(first)(first) for each in rest: out.update(each) return out
shallow dictionary merge Args: a: dict to extend b: dict to apply to a Returns: new instance of the same type as _a_, with _a_ and _b_ merged.
juraj-google-style
def save_to_file(json_data, filename): if filename[-5:] != '.json': print('filename: %s' % filename) filename += '.json' with open(PATH_TO_DIR + '/' + filename, 'w') as f: json.dump(json_data, f, sort_keys=True, indent=4) print(' Successfully wrote configs to file `%s`.\n' % filename)
Saves all detected configuration(s) into a JSON file. Args: json_data: Dict of all configurations found. filename: String that is the name of the output JSON file.
github-repos
def weights_concatenated(labels): eos_mask = tf.to_int32(tf.equal(labels, 1)) sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True) in_target = tf.equal(tf.mod(sentence_num, 2), 1) sentence_num_plus_one = (sentence_num + 1) shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0], [0, 0]])[(:, :(- 2), :, :)] nonboilerplate = tf.equal(sentence_num_plus_one, shifted) ret = to_float(tf.logical_and(nonboilerplate, in_target)) return ret
Assign weight 1.0 to the "target" part of the concatenated labels. The labels look like: source English I love you . ID1 target French Je t'aime . ID1 source English the cat ID1 target French le chat ID1 source English ... We want to assign weight 1.0 to all words in the target text (including the ID1 end symbol), but not to the source text or the boilerplate. In the above example, the target words that get positive weight are: Je t'aime . ID1 le chat ID1 Args: labels: a Tensor Returns: a Tensor
codesearchnet
def get_dict(self): self.is_valid() return self._get_dict()
Returns the internal-API dictionary representing the :class:`DisplayDataItem`. Returns: Dict[str, Any]: A dictionary. The internal-API dictionary representing the :class:`DisplayDataItem`. Raises: ValueError: if the item is not valid.
github-repos
def insert_chain(cur, chain, encoded_data=None): if (encoded_data is None): encoded_data = {} if ('nodes' not in encoded_data): encoded_data['nodes'] = json.dumps(sorted(chain), separators=(',', ':')) if ('chain_length' not in encoded_data): encoded_data['chain_length'] = len(chain) insert = 'INSERT OR IGNORE INTO chain(chain_length, nodes) VALUES (:chain_length, :nodes);' cur.execute(insert, encoded_data)
Insert a chain into the cache. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. chain (iterable): A collection of nodes. Chains in embedding act as one node. encoded_data (dict, optional): If a dictionary is provided, it will be populated with the serialized data. This is useful for preventing encoding the same information many times. Notes: This function assumes that the nodes in chain are index-labeled.
codesearchnet
def CopyMicrosecondsToFractionOfSecond(cls, microseconds): if microseconds < 0 or microseconds >= definitions.MICROSECONDS_PER_SECOND: raise ValueError( 'Number of microseconds value: {0:d} out of bounds.'.format( microseconds)) return decimal.Decimal(microseconds) / definitions.MICROSECONDS_PER_SECOND
Copies the number of microseconds to a fraction of second value. Args: microseconds (int): number of microseconds. Returns: decimal.Decimal: fraction of second, which must be a value between 0.0 and 1.0. Raises: ValueError: if the number of microseconds is out of bounds.
juraj-google-style
def fit(self, X, y): self._word_vocab.add_documents(X) self._label_vocab.add_documents(y) if self._use_char: for doc in X: self._char_vocab.add_documents(doc) self._word_vocab.build() self._char_vocab.build() self._label_vocab.build() return self
Learn vocabulary from training set. Args: X : iterable. An iterable which yields either str, unicode or file objects. Returns: self : IndexTransformer.
codesearchnet
def setRelay(self, seconds, relay, status, password="00000000"): result = False self.setContext("setRelay") try: self.clearCmdMsg() if len(password) != 8: self.writeCmdMsg("Invalid password length.") self.setContext("") return result if seconds < 0 or seconds > 9999: self.writeCmdMsg("Relay duration must be between 0 and 9999.") self.setContext("") return result if not self.requestA(): self.writeCmdMsg("Bad read CRC on setting") else: if not self.serialCmdPwdAuth(password): self.writeCmdMsg("Password failure") else: req_str = "" req_str = ("01573102303038" + binascii.hexlify(str(relay)).zfill(2) + "28" + binascii.hexlify(str(status)).zfill(2) + binascii.hexlify(str(seconds).zfill(4)) + "2903") req_str += self.calc_crc16(req_str[2:].decode("hex")) self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": self.writeCmdMsg("Success: 06 returned.") result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return result
Serial call to set relay. Args: seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`. relay (int): Selected relay, see :class:`~ekmmeters.Relay`. status (int): Status to set, see :class:`~ekmmeters.RelayState` password (str): Optional password Returns: bool: True on completion and ACK.
juraj-google-style
def typing(self, room: Room, timeout: int=5000): path = f'/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}' return self.api._send('PUT', path, {'typing': True, 'timeout': timeout})
Send typing event directly to api Args: room: room to send typing event to timeout: timeout for the event, in ms
codesearchnet
def list_workers(config, *, filter_by_queues=None): celery_app = create_app(config) worker_stats = celery_app.control.inspect().stats() queue_stats = celery_app.control.inspect().active_queues() if worker_stats is None: return [] workers = [] for name, w_stat in worker_stats.items(): queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]] add_worker = filter_by_queues is None if not add_worker: for queue in queues: if queue.name in filter_by_queues: add_worker = True break if add_worker: workers.append(WorkerStats.from_celery(name, w_stat, queues)) return workers
Return a list of all available workers. Args: config (Config): Reference to the configuration object from which the settings are retrieved. filter_by_queues (list): Restrict the returned workers to workers that listen to at least one of the queue names in this list. Returns: list: A list of WorkerStats objects.
juraj-google-style
def _RemoveForwardedIps(self, forwarded_ips, interface): for address in forwarded_ips: self.ip_forwarding_utils.RemoveForwardedIp(address, interface)
Remove the forwarded IP addresses from the network interface. Args: forwarded_ips: list, the forwarded IP address strings to delete. interface: string, the output device to use.
codesearchnet
def add_to_screen(self, screen_width, screen): for (lineno, fields) in enumerate(self.line_fields): for (left, field) in self.compute_positions(screen_width, fields): logger.debug('Adding field %s to screen %s at x=%d->%d, y=%d', field, screen.ref, left, ((left + field.width) - 1), (1 + lineno)) self.widgets[field] = field.add_to_screen(screen, left, (1 + lineno)) self.register_hooks(field)
Add the pattern to a screen. Also fills self.widgets. Args: screen_width (int): the width of the screen screen (lcdprod.Screen): the screen to fill.
codesearchnet
def create(cls, **kwargs): try: return cls.add(cls.new(**kwargs)) except: cls.session.rollback() raise
Initializes a new instance, adds it to the db and commits the transaction. Args: **kwargs: The keyword arguments for the init constructor. Examples: >>> user = User.create(name="Vicky", email="vicky@h.com") >>> user.id 35
codesearchnet
def __add__(self, other): assert isinstance(other, LocationDescriptor), "You can only add LocationDescriptor together." assert self._separation_char == other._separation_char, \ "You can only add LocationDescriptor together if they share the same separator character." new_location_string_list = self.get_locations_list() + other.get_locations_list() return LocationDescriptor(new_location_string_list)
Create a **new** :class:`LocationDescriptor` object that is the sum of this one and another. Args: self: This :class:`LocationDescriptor` object. other: Another :class:`LocationDescriptor` object. Returns: Sum of both :class:`LocationDescriptor` objects.
juraj-google-style
def _GetFirefoxConfig(self, file_object, display_name): to_read = min(file_object.get_size(), self._INITIAL_CACHE_FILE_SIZE) while (file_object.get_offset() < to_read): offset = file_object.get_offset() try: (cache_entry, _) = self._ReadCacheEntry(file_object, display_name, self._MINIMUM_BLOCK_SIZE) record_size = ((self._CACHE_ENTRY_HEADER_SIZE + cache_entry.request_size) + cache_entry.information_size) if (record_size >= 4096): block_size = 4096 elif (record_size >= 1024): block_size = 1024 else: block_size = 256 return self.FIREFOX_CACHE_CONFIG(block_size, offset) except IOError: logger.debug('[{0:s}] {1:s}:{2:d}: Invalid record.'.format(self.NAME, display_name, offset)) raise errors.UnableToParseFile('Could not find a valid cache record. Not a Firefox cache file.')
Determine cache file block size. Args: file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. Returns: firefox_cache_config: namedtuple containing the block size and first record offset. Raises: UnableToParseFile: if no valid cache record could be found.
codesearchnet
def cooccurrences(self, domains): api_name = 'opendns-cooccurrences' fmt_url_path = u'recommendations/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
Get the domains related to input domains. Args: domains: an enumerable of strings domain names Returns: An enumerable of string domain names
juraj-google-style
def circuit_to_quirk_url(circuit: circuits.Circuit, prefer_unknown_gate_to_failure: bool=False, escape_url=True) -> str: circuit = circuit.copy() linearize_circuit_qubits(circuit) cols = [] for moment in circuit: can_merges = [] for op in moment.operations: for (col, can_merge) in _to_quirk_cols(op, prefer_unknown_gate_to_failure): if can_merge: can_merges.append(col) else: cols.append(col) if can_merges: merged_col = ([1] * max((len(e) for e in can_merges))) for col in can_merges: for i in range(len(col)): if (col[i] != 1): merged_col[i] = col[i] cols.append(merged_col) circuit_json = json.JSONEncoder(ensure_ascii=False, separators=(',', ':'), sort_keys=True).encode({'cols': cols}) if escape_url: suffix = urllib.parse.quote(circuit_json) else: suffix = circuit_json return 'http:
Returns a Quirk URL for the given circuit. Args: circuit: The circuit to open in Quirk. prefer_unknown_gate_to_failure: If not set, gates that fail to convert will cause this function to raise an error. If set, a URL containing bad gates will be generated. (Quirk will open the URL, and replace the bad gates with parse errors, but still get the rest of the circuit.) escape_url: If set, the generated URL will have special characters such as quotes escaped using %. This makes it possible to paste the URL into forums and the command line and etc and have it properly parse. If not set, the generated URL will be more compact and human readable (and can still be pasted directly into a browser's address bar). Returns:
codesearchnet
def _prepare_tables(self): values = torch.tensor([[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]]) row_index = IndexMap(indices=torch.tensor([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]]]), num_segments=3, batch_dims=1) col_index = IndexMap(indices=torch.tensor([[[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]), num_segments=3, batch_dims=1) return (values, row_index, col_index)
Prepares two tables, both with three distinct rows. The first table has two columns: 1.0, 2.0 | 3.0 2.0, 0.0 | 1.0 1.0, 3.0 | 4.0 The second table has three columns: 1.0 | 2.0 | 3.0 2.0 | 0.0 | 1.0 1.0 | 3.0 | 4.0 Returns: SegmentedTensors with the tables.
github-repos
def str(name, default=None, allow_none=False, fallback=None): value = read(name, default, allow_none, fallback=fallback) if ((value is None) and allow_none): return None else: return builtins.str(value).strip()
Get a string based environment value or the default. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional)
codesearchnet
def is_deterministic(self): return False
Whether this coder is guaranteed to encode values deterministically. A deterministic coder is required for key coders in GroupByKey operations to produce consistent results. For example, note that the default coder, the PickleCoder, is not deterministic: the ordering of picked entries in maps may vary across executions since there is no defined order, and such a coder is not in general suitable for usage as a key coder in GroupByKey operations, since each instance of the same key may be encoded differently. Returns: Whether coder is deterministic.
github-repos
def _cauchy_equation(wavelength, coefficients): n = 0.0 for (i, c) in enumerate(coefficients): exponent = (2 * i) n += (c / (wavelength ** exponent)) return n
Helpful function to evaluate Cauchy equations. Args: wavelength (float, list, None): The wavelength(s) the Cauchy equation will be evaluated at. coefficients (list): A list of the coefficients of the Cauchy equation. Returns: float, list: The refractive index at the target wavelength(s).
codesearchnet
def _calculate_scores(self, query, key): q_reshaped = array_ops.expand_dims(query, axis=-2) k_reshaped = array_ops.expand_dims(key, axis=-3) if self.use_scale: scale = self.scale else: scale = 1.0 return math_ops.reduce_sum(scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)
Calculates attention scores as a nonlinear sum of query and key. Args: query: Query tensor of shape `[batch_size, Tq, dim]`. key: Key tensor of shape `[batch_size, Tv, dim]`. Returns: Tensor of shape `[batch_size, Tq, Tv]`.
github-repos
def request_via_socket(sock, search_target): msgparts = dict(HOST=MCAST_IP_PORT, MAN='"ssdp:discover"', MX='3', ST=search_target) msg = encode_request('M-SEARCH * HTTP/1.1', **msgparts) sock.sendto(msg, (MCAST_IP, MCAST_PORT))
Send an SSDP search request via the provided socket. Args: sock: A socket suitable for use to send a broadcast message - preferably one created by :py:func:`make_socket`. search_target (string): A :term:`resource type` target to search for.
codesearchnet
def add_gemini_query(self, name, query): logger.info('Adding query {0} with text {1}'.format(name, query)) new_query = GeminiQuery(name=name, query=query) self.session.add(new_query) self.save() return new_query
Add a user defined gemini query Args: name (str) query (str)
codesearchnet
def codeblocks(start=None, end=None, full=True): if full: for function in functions(start, end): fc = FlowChart(f=function.func_t) for block in fc: (yield block) else: (start, end) = fix_addresses(start, end) for code_block in FlowChart(bounds=(start, end)): (yield code_block)
Get all `CodeBlock`s in a given range. Args: start - start address of the range. If `None` uses IDB start. end - end address of the range. If `None` uses IDB end. full - `True` is required to change node info (e.g. color). `False` causes faster iteration.
codesearchnet
def load(self, key: str) -> _ModelLoadStats: if key in self._tag_map: self._tag_map.move_to_end(key) return _ModelLoadStats(self._tag_map[key], None, None) else: self._tag_map[key] = uuid.uuid4().hex tag = self._tag_map[key] mh = self._mh_map[key] if self._max_models is not None and self._max_models < len(self._tag_map): tag_to_remove = self._tag_map.popitem(last=False)[1] shared_handle, model_to_remove = self._proxy_map[tag_to_remove] shared_handle.release(model_to_remove) del self._proxy_map[tag_to_remove] memory_before = _get_current_process_memory_in_bytes() start_time = _to_milliseconds(time.time_ns()) shared_handle = multi_process_shared.MultiProcessShared(mh.load_model, tag=tag) model_reference = shared_handle.acquire() self._proxy_map[tag] = (shared_handle, model_reference) memory_after = _get_current_process_memory_in_bytes() end_time = _to_milliseconds(time.time_ns()) return _ModelLoadStats(tag, end_time - start_time, memory_after - memory_before)
Loads the appropriate model for the given key into memory. Args: key: the key associated with the model we'd like to load. Returns: _ModelLoadStats with tag, byte size, and latency to load the model. If the model was already loaded, byte size/latency will be None.
github-repos
def chain(processor_list: Sequence[Processor | PartProcessor]) -> Processor: if not processor_list: raise ValueError('processor_list is empty') chain_processor = processor_list[0] for p in processor_list[1:]: chain_processor = chain_processor + p if isinstance(chain_processor, PartProcessor): chain_processor = chain_processor.to_processor() return chain_processor
Chain a sequence of processors. Args: processor_list: list of part processors or generic processors. Returns: A processor consisting of the chain of all the processors in the list. The execution is sequential from the first processor to the last but parts are processed concurrently overall.
github-repos
def _int_to_pos(self, flat_position): return ((flat_position % self.env.action_space.screen_shape[0]), (flat_position % self.env.action_space.screen_shape[1]))
Returns x, y from flat_position integer. Args: flat_position: flattened position integer Returns: x, y
codesearchnet
def search_rule_by_id(self, ruleID) -> Rule: for r in self.rules: if r.id == ruleID: return r return None
searches a rule by given id Args: ruleID(str): the rule to search for Returns the rule object or None if it couldn't find a rule
juraj-google-style
def __init__(self, text_encoder_config=None, data="clean100", **kwargs): if data not in _DATA_OPTIONS: raise ValueError("data must be one of %s" % _DATA_OPTIONS) name = kwargs.get("name") if name is None: encoder_name = ( text_encoder_config.name if text_encoder_config else "plain_text") data_name = data name = "%s_%s" % (data_name, encoder_name) kwargs["name"] = name description = kwargs.get("description") if description is None: if text_encoder_config: encoder_description = "Transcriptions use the %s" % ( text_encoder_config.encoder_cls.__name__) else: encoder_description = "Transcriptions are in plain text." if data == "all": data_description = "Uses all data." else: data_description = ("Uses only clean data,%s including train-clean-360." % ("" if data == "clean360" else " not")) description = "%s %s" % (data_description, encoder_description) kwargs["description"] = description super(LibrispeechConfig, self).__init__(**kwargs) self.text_encoder_config = text_encoder_config self.data = data
Constructs a LibrispeechConfig. Args: text_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration for the `tfds.features.text.TextEncoder` used for the text feature. data: `str`, one of `(clean100, clean360, all)`. `clean100` uses only the clean data without `train-clean-360`. `clean360` uses clean data with `train-clean-360`. `all` uses all the data. **kwargs: keyword arguments forwarded to super.
juraj-google-style
def check_for_missing_options(config): for (section_name, section) in config: for (option_name, option) in section: if (option.required and (option.value is None)): raise exc.MissingRequiredOption('Option {0} in namespace {1} is required.'.format(option_name, section_name)) return config
Iter over a config and raise if a required option is still not set. Args: config (confpy.core.config.Configuration): The configuration object to validate. Raises: MissingRequiredOption: If any required options are not set in the configuration object. Required options with default values are considered set and will not cause this function to raise.
codesearchnet
def _alephResultToDict(dom): result = {} for i in dom.childs: if not i.isOpeningTag(): continue keyword = i.getTagName().strip() value = _tryConvertToInt(i.getContent().strip()) if keyword in result: if isinstance(result[keyword], list): result[keyword].append(value) else: result[keyword] = [result[keyword], value] else: result[keyword] = value return result
Convert part of non-nested XML to :py:class:`dict`. Args: dom (HTMLElement tree): pre-parsed XML (see dhtmlparser). Returns: dict: with python data
juraj-google-style
def lookup_instances(fragment, verbose=True, filter_by_key=True): def vprint(*args): if verbose: print(*args) region = get_region() client = get_ec2_client() ec2 = get_ec2_resource() response = client.describe_instances() assert is_good_response(response) instance_list = [] for instance in ec2.instances.all(): if instance.state['Name'] != 'running': continue name = get_name(instance) if (fragment in name or fragment in str(instance.public_ip_address) or fragment in str(instance.id) or fragment in str(instance.private_ip_address)): instance_list.append((util.toseconds(instance.launch_time), instance)) sorted_instance_list = reversed(sorted(instance_list, key=itemgetter(0))) filtered_instance_list = [] vprint("Using region ", region) for (ts, instance) in sorted_instance_list: if filter_by_key and instance.key_name != get_keypair_name(): vprint(f"Got key {instance.key_name}, expected {get_keypair_name()}") continue filtered_instance_list.append(instance) return filtered_instance_list
Returns ec2.Instance object whose name contains fragment, in reverse order of launching (ie, most recent intance first). Optionally filters by key, only including instances launched with key_name matching current username. args: verbose: print information about all matching instances found filter_by_key if True, ignore instances that are not launched with current user's default key
juraj-google-style
def __setattr__(self, name, value): if name in self.__by_name or name.startswith('_Message__'): object.__setattr__(self, name, value) else: raise AttributeError("May not assign arbitrary value %s " "to message %s" % (name, type(self).__name__))
Change set behavior for messages. Messages may only be assigned values that are fields. Does not try to validate field when set. Args: name: Name of field to assign to. value: Value to assign to field. Raises: AttributeError when trying to assign value that is not a field.
juraj-google-style
def __init__(self, port=CONTROLLER_PORT, easgd_alpha=0.5, start_halving_at=6, end_at=10, sync_freq=10, halving_freq=1, valid_freq=1500, learning_rate=0.1, log_path=None): Controller.__init__(self, port) self.epoch_start_halving = start_halving_at self.end_at = end_at self.sync_freq = sync_freq self.start_time = None self.rand = np.random.RandomState(3) self.epoch = 0 self._current_iter = 0 self._iters_from_last_valid = 0 self._evaluating = False self._valid_freq = valid_freq self._halving_freq = halving_freq self._done = False self._lr = learning_rate self._easgd_alpha = easgd_alpha self._training_names = [] self._evaluation_names = [] self._best_valid_cost = sys.float_info.max self._lock = Lock() self.num_train_batches = 0 self.batch_pool = [] self._train_costs = [] self._epoch_start_time = None self.prepared_worker_pool = set() self.log_file = open(log_path, "w") if log_path else None if log_path: logging.info("write logs into {}".format(log_path)) logging.info("multi-gpu server is listening port {}".format(port))
Initialize the controller. Args: port (int): batches in one training step easgd_alpha (float)
juraj-google-style
def get_metadata(self, key: str, per_trial: bool=True) -> Optional[Any]:
Gets metadata for current trial or current sampling. Args: key: A string as key to metadata. per_trial: If True, the key is retrieved per curent trial. Otherwise, it is retrieved per current sampling. Returns: A value that can be deserialized by `pg.from_json_str`.
github-repos
def _check_wiremap_validity(self, wire_map, keymap, valmap): for k, v in wire_map.items(): kname = "%s[%d]" % (k[0].name, k[1]) vname = "%s[%d]" % (v[0].name, v[1]) if k not in keymap: raise DAGCircuitError("invalid wire mapping key %s" % kname) if v not in valmap: raise DAGCircuitError("invalid wire mapping value %s" % vname) if type(k) is not type(v): raise DAGCircuitError("inconsistent wire_map at (%s,%s)" % (kname, vname))
Check that the wiremap is consistent. Check that the wiremap refers to valid wires and that those wires have consistent types. Args: wire_map (dict): map from (register,idx) in keymap to (register,idx) in valmap keymap (dict): a map whose keys are wire_map keys valmap (dict): a map whose keys are wire_map values Raises: DAGCircuitError: if wire_map not valid
juraj-google-style
def word_fts(self, word): return list(map(self.fts, self.segs(word)))
Return featural analysis of `word` Args: word (unicode): one or more IPA segments Returns: list: list of lists (value, feature) tuples where each inner list corresponds to a segment in `word`
codesearchnet
def batch_shape_tensor(self, name='batch_shape_tensor'): with self._name_scope(name): return self._batch_shape_tensor()
Shape of batch dimensions of this operator, determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding `[B1,...,Bb]`. Args: name: A name for this `Op`. Returns: `int32` `Tensor`
github-repos
def podcast_episodes(self, *, device_id=None): if (device_id is None): device_id = self.device_id podcast_episode_list = [] for chunk in self.podcast_episodes_iter(device_id=device_id, page_size=49995): podcast_episode_list.extend(chunk) return podcast_episode_list
Get a listing of podcast episodes for all subscribed podcasts. Paramaters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. Returns: list: Podcast episode dicts.
codesearchnet
def __init__(self, query): self._timeseries_list = list(query.iter(headers_only=True)) self._metric_type = query.metric_type
Initializes the QueryMetadata given the query object. Args: query: A Query object.
juraj-google-style
class DbrxAttentionConfig(PretrainedConfig): base_config_key = 'attn_config' def __init__(self, attn_pdrop: float=0.0, clip_qkv: Optional[float]=None, kv_n_heads: int=1, rope_theta: float=10000.0, **kwargs: Any): super().__init__(**kwargs) self.attn_pdrop = attn_pdrop self.clip_qkv = clip_qkv self.kv_n_heads = kv_n_heads self.rope_theta = rope_theta for k in ['model_type', 'attn_implementation', 'transformers_version', '_commit_hash', 'torch_dtype']: if k in kwargs: kwargs.pop(k) if len(kwargs) != 0: raise ValueError(f'Found unknown kwargs={kwargs!r}')
Configuration class for Dbrx Attention. [`DbrxAttention`] class. It is used to instantiate attention layers according to the specified arguments, defining the layers architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: attn_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability for the attention layers. clip_qkv (`float`, *optional*): If set, clip the queries, keys, and values in the attention layer to this value. kv_n_heads (`int`, *optional*, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads. rope_theta (`float`, *optional*, defaults to 10000.0): The base frequency for rope.
github-repos
def uniform_row_length(self): return self._uniform_row_length
Returns the length of each row in this partition, if rows are uniform. If all rows in this `RowPartition` have the same length, then this returns that length as a scalar integer `Tensor`. Otherwise, it returns `None`. Returns: scalar Tensor with `type=self.dtype`, or `None`.
github-repos
def _test_dir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.isdir(test_dir): for f in glob.glob('%s/*' % test_dir): os.remove(f) else: os.makedirs(test_dir) return test_dir
Create an empty dir to use for tests. Args: test_name: Name of the test. Returns: Absolute path to the test directory.
github-repos
def try_claim(self, position): raise NotImplementedError
Atomically determines if a record at a split point is within the range. This method should be called **if and only if** the record is at a split point. This method may modify the internal state of the ``RangeTracker`` by updating the last-consumed position to ``position``. ** Thread safety ** Methods of the class ``RangeTracker`` including this method may get invoked by different threads, hence must be made thread-safe, e.g. by using a single lock object. Args: position: starting position of a record being read by a source. Returns: ``True``, if the given position falls within the current range, returns ``False`` otherwise.
github-repos
def up_to(self, term: str) -> str: end = self.input.find(term, self.offset) if (end < 0): raise EndOfInput(self) res = self.input[self.offset:end] self.offset = (end + 1) return res
Parse and return segment terminated by the first occurence of a string. Args: term: Terminating string. Raises: EndOfInput: If `term` does not occur in the rest of the input text.
codesearchnet
def advance_for_next_slice(self, recovery_slice=False): self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False if recovery_slice: self.slice_id += 2 else: self.slice_id += 1
Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info.
codesearchnet
def log_every_n(level, msg, n, *args): count = _GetNextLogCountPerToken(_GetFileAndLine()) log_if(level, msg, not (count % n), *args)
Log 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg.
juraj-google-style
def GetShadowMap(self, since=None): return ShadowUpdateGetter(self.conf).GetUpdates(source=self, search_base=self.conf['base'], search_filter=self.conf['filter'], search_scope=self.conf['scope'], since=since)
Return the shadow map from this source. Args: since: Get data only changed since this timestamp (inclusive) or None for all data. Returns: instance of ShadowMap
github-repos
def save_component(self, component_name, save_path): component = self.get_component(component_name=component_name) self._validate_savable(component=component, component_name=component_name) return component.save(sess=self.session, save_path=save_path)
Saves a component of this model to the designated location. Args: component_name: The component to save. save_path: The location to save to. Returns: Checkpoint path where the component was saved.
codesearchnet
def model(x): hidden_act = dense_layer(hidden_weights, x) logits_act = dense_layer(output_weights, hidden_act, tf.identity) y = tf.nn.softmax(logits_act) return y
Feed forward function of the model. Args: x: a (?, 28*28) tensor consisting of the feature inputs for a batch of examples. Returns: A (?, 10) tensor containing the class scores for each example.
github-repos
def swo_read(self, offset, num_bytes, remove=False): buf_size = ctypes.c_uint32(num_bytes) buf = (ctypes.c_uint8 * num_bytes)(0) self._dll.JLINKARM_SWO_Read(buf, offset, ctypes.byref(buf_size)) buf_size = buf_size.value if remove: self.swo_flush(buf_size) return list(buf)[:buf_size]
Reads data from the SWO buffer. The data read is not automatically removed from the SWO buffer after reading unless ``remove`` is ``True``. Otherwise the callee must explicitly remove the data by calling ``.swo_flush()``. Args: self (JLink): the ``JLink`` instance offset (int): offset of first byte to be retrieved num_bytes (int): number of bytes to read remove (bool): if data should be removed from buffer after read Returns: A list of bytes read from the SWO buffer.
codesearchnet
def sholl_frequency(nrn, neurite_type=NeuriteType.all, step_size=10): nrns = neuron_population(nrn) neurite_filter = is_type(neurite_type) min_soma_edge = float('Inf') max_radii = 0 neurites_list = [] for neuron in nrns: neurites_list.extend(((neurites, neuron.soma.center) for neurites in neuron.neurites if neurite_filter(neurites))) min_soma_edge = min(min_soma_edge, neuron.soma.radius) max_radii = max(max_radii, np.max(np.abs(bounding_box(neuron)))) radii = np.arange(min_soma_edge, (max_radii + step_size), step_size) ret = np.zeros_like(radii) for (neurites, center) in neurites_list: ret += sholl_crossings(neurites, center, radii) return ret
perform Sholl frequency calculations on a population of neurites Args: nrn(morph): nrn or population neurite_type(NeuriteType): which neurites to operate on step_size(float): step size between Sholl radii Note: Given a neuron, the soma center is used for the concentric circles, which range from the soma radii, and the maximum radial distance in steps of `step_size`. When a population is given, the concentric circles range from the smallest soma radius to the largest radial neurite distance. Finally, each segment of the neuron is tested, so a neurite that bends back on itself, and crosses the same Sholl radius will get counted as having crossed multiple times.
codesearchnet
def center_crop(self, image: 'torch.Tensor', size: SizeDict, **kwargs) -> 'torch.Tensor': if size.height is None or size.width is None: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") image_height, image_width = image.shape[-2:] crop_height, crop_width = (size.height, size.width) if crop_width > image_width or crop_height > image_height: padding_ltrb = [(crop_width - image_width) image = F.pad(image, padding_ltrb, fill=0) image_height, image_width = image.shape[-2:] if crop_width == image_width and crop_height == image_height: return image crop_top = int((image_height - crop_height) / 2.0) crop_left = int((image_width - crop_width) / 2.0) return F.crop(image, crop_top, crop_left, crop_height, crop_width)
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Args: image (`"torch.Tensor"`): Image to center crop. size (`Dict[str, int]`): Size of the output image. Returns: `torch.Tensor`: The center cropped image.
github-repos
def forward(self, hidden_features): hidden_features = hidden_features.transpose(-1, -2) if self.head_aggregation == 'use_last': hidden_features = hidden_features[..., -1] elif self.head_aggregation == 'max_pool': hidden_features = hidden_features.max(dim=-1).values elif self.head_aggregation == 'avg_pool': hidden_features = hidden_features.mean(dim=-1) if self.flatten: hidden_features = self.flatten(hidden_features) hidden_features = self.dropout(hidden_features) hidden_features = self.projection(hidden_features) if self.distribution_output is None and self.output_range is not None: hidden_features = torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0] return hidden_features
Args: hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size x num_targets)`.
github-repos
def process_rule(edges: Edges, ast: Function, rule: Mapping[(str, Any)], spec: BELSpec): ast_type = ast.__class__.__name__ trigger_functions = rule.get('trigger_function', []) trigger_types = rule.get('trigger_type', []) rule_subject = rule.get('subject') rule_relation = rule.get('relation') rule_object = rule.get('object') log.debug(f'Running {rule_relation} Type: {ast_type}') if isinstance(ast, Function): function_name = ast.name args = ast.args parent_function = ast.parent_function if (function_name in trigger_functions): if (rule_subject == 'trigger_value'): subject = ast if (rule_object == 'args'): for arg in args: log.debug(f'1: {subject} {arg}') edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif ((rule_object == 'parent_function') and parent_function): log.debug(f'2: {subject} {parent_function}') edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) elif (ast_type in trigger_types): if (rule_subject == 'trigger_value'): subject = ast if (rule_object == 'args'): for arg in args: log.debug(f'3: {subject} {arg}') edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif ((rule_object == 'parent_function') and parent_function): log.debug(f'4: {subject} {parent_function}') edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if isinstance(ast, NSArg): term = '{}:{}'.format(ast.namespace, ast.value) parent_function = ast.parent_function if (ast_type in trigger_types): if (rule_subject == 'trigger_value'): subject = term if (rule_object == 'args'): for arg in args: log.debug(f'5: {subject} {arg}') edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif ((rule_object == 'parent_function') and parent_function): log.debug(f'6: {subject} {parent_function}') edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if hasattr(ast, 'args'): for arg in ast.args: process_rule(edges, arg, rule, spec)
Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule
codesearchnet
def load_state(self, in_path): with open(in_path, 'r') as infile: state = json.load(infile) self.restore_state(state)
Load the current state of this emulated object from a file. The file should have been produced by a previous call to save_state. Args: in_path (str): The path to the saved state dump that you wish to load.
codesearchnet
def apply_op(input_layer, operation, *op_args, **op_kwargs): return input_layer.with_tensor( operation(input_layer.tensor, *op_args, **op_kwargs))
Applies the given operation to this before without adding any summaries. Args: input_layer: The input layer for this op. operation: An operation that takes a tensor and the supplied args. *op_args: Extra arguments for operation. **op_kwargs: Keyword arguments for the operation. Returns: A new layer with operation applied.
juraj-google-style
def convert_to_rgb(image: ImageInput) -> ImageInput: if not isinstance(image, PIL.Image.Image): return image if image.mode == 'RGB': return image image_rgba = image.convert('RGBA') background = Image.new('RGBA', image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert('RGB') return alpha_composite
Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is. Args: image (Image): The image to convert.
github-repos
def add(self, method_mask, path, func): is_err = (len(signature(func).parameters) == 3) is_subchain = isinstance(func, MiddlewareChain) tup = MiddlewareNode(func=func, mask=method_mask, path=path, is_errorhandler=is_err, is_subchain=is_subchain) self.mw_list.append(tup)
Add a function to the middleware chain. This function is returned when iterating over the chain with matching method and path. Args: method_mask (growler.http.HTTPMethod): A bitwise mask intended to match specific request methods. path (str or regex): An object with which to compare request urls func (callable): The function to be yieled from the generator upon a request matching the method_mask and path
codesearchnet
def get_float(self, min_float=_MIN_FLOAT, max_float=_MAX_FLOAT): return self.fdp.ConsumeFloatInRange(min_float, max_float)
Consume a float with given constraints. Args: min_float: Minimum allowed float. max_float: Maximum allowed float. Returns: Consumed float based on input bytes and constraints.
github-repos
def _unary_op(cls, x: 'TensorFluent', op: Callable[[tf.Tensor], tf.Tensor], dtype: tf.DType) -> 'TensorFluent': x = x.cast(dtype) t = op(x.tensor) scope = x.scope.as_list() batch = x.batch return TensorFluent(t, scope, batch=batch)
Returns a TensorFluent for the unary `op` applied to fluent `x`. Args: x: The input fluent. op: The unary operation. dtype: The output's data type. Returns: A TensorFluent wrapping the unary operator's output.
juraj-google-style
def bleu_score(predictions, labels, **unused_kwargs): outputs = tf.to_int32(tf.argmax(predictions, axis=(- 1))) outputs = tf.squeeze(outputs, axis=[(- 1), (- 2)]) labels = tf.squeeze(labels, axis=[(- 1), (- 2)]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return (bleu, tf.constant(1.0))
BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score
codesearchnet
def _ParseFileData(self, knowledge_base, file_object): text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8') product_values = {} for line in text_file_object.readlines(): line = line.strip() if line.startswith(' continue key, value = line.split('=') key = key.strip().upper() value = value.strip().strip('"') product_values[key] = value if not knowledge_base.GetValue('operating_system_product'): system_product = product_values.get('DISTRIB_DESCRIPTION', None) if system_product: knowledge_base.SetValue('operating_system_product', system_product)
Parses file content (data) for system product preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
juraj-google-style
def end_at(self, document_fields): return self._cursor_helper(document_fields, before=False, start=False)
End query results at a particular document value. The result set will **include** the document specified by ``document_fields``. If the current query already has specified an end cursor -- either via this method or :meth:`~.firestore_v1beta1.query.Query.end_before` -- this will overwrite it. When the query is sent to the server, the ``document_fields`` will be used in the order given by fields set by :meth:`~.firestore_v1beta1.query.Query.order_by`. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. Acts as a copy of the current query, modified with the newly added "end at" cursor.
codesearchnet
def has_option(self, section, option): if section not in self.sections(): return False else: option = self.optionxform(option) return option in self[section]
Checks for the existence of a given option in a given section. Args: section (str): name of section option (str): name of option Returns: bool: whether the option exists in the given section
juraj-google-style
def publish(cls, message, client_filter=None): with cls._lock: for client in cls.subscribers: if (not client_filter) or client_filter(client): client.send(message)
Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them.
juraj-google-style
def time_series(timefile, colnames): if (not timefile.is_file()): return None data = pd.read_csv(timefile, delim_whitespace=True, dtype=str, header=None, skiprows=1, index_col=0, engine='c', memory_map=True, error_bad_lines=False, warn_bad_lines=False) data = data.apply(pd.to_numeric, raw=True, errors='coerce') rows_to_del = [] irow = (len(data) - 1) while (irow > 0): iprev = (irow - 1) while ((iprev >= 0) and (data.index[irow] <= data.index[iprev])): rows_to_del.append(iprev) iprev -= 1 irow = iprev if rows_to_del: rows_to_keep = (set(range(len(data))) - set(rows_to_del)) data = data.take(list(rows_to_keep), convert=False) ncols = data.shape[1] _tidy_names(colnames, ncols) data.columns = colnames return data
Read temporal series text file. If :data:`colnames` is too long, it will be truncated. If it is too short, additional numeric column names from 0 to N-1 will be attributed to the N extra columns present in :data:`timefile`. Args: timefile (:class:`pathlib.Path`): path of the time.dat file. colnames (list of names): names of the variables expected in :data:`timefile` (may be modified). Returns: :class:`pandas.DataFrame`: Time series, with the variables in columns and the time steps in rows.
codesearchnet
async def verify_worker_impls(chain): valid_worker_impls = get_valid_worker_impls() for obj in chain.get_all_links_in_chain(): worker_impl = obj.worker_impl log.info("Verifying {} {} as a {} task...".format(obj.name, obj.task_id, worker_impl)) await valid_worker_impls[worker_impl](chain, obj)
Verify the task type (e.g. decision, build) of each link in the chain. Args: chain (ChainOfTrust): the chain we're operating on Raises: CoTError: on failure
juraj-google-style
def get_group_by_id(self, group_id: str) -> typing.Optional['Group']: VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id', exc=ValueError) for group in self.groups: if group.group_id == group_id: return group return None
Gets a group by id Args: group_id: group id Returns: Group
juraj-google-style
def reset(self, indices=None): if indices is None: indices = np.arange(len(self._envs)) if self._blocking: observs = [self._envs[index].reset() for index in indices] else: observs = [self._envs[index].reset(blocking=False) for index in indices] observs = [observ() for observ in observs] observ = np.stack(observs) return observ
Reset the environment and convert the resulting observation. Args: indices: The batch indices of environments to reset; defaults to all. Returns: Batch of observations.
juraj-google-style
def _try_load_par_source(source_file_path): prefix_path = source_file_path while True: prefix_path, basename = os.path.split(prefix_path) if not basename: break suffix_path = os.path.normpath(os.path.relpath(source_file_path, start=prefix_path)) if prefix_path.endswith('.par') and os.path.isfile(prefix_path): with zipfile.ZipFile(prefix_path) as z: norm_names = [os.path.normpath(name) for name in z.namelist()] if suffix_path in norm_names: with z.open(z.namelist()[norm_names.index(suffix_path)]) as zf: source_text = zf.read().decode('utf-8') return source_text.split('\n')
Try loading the source code inside a .par file. A .par file is a zip-compressed, self-contained Python executable. It contains the content of individual Python source files that can be read only through extracting from the zip file. Args: source_file_path: The full path to the file inside the .par file. This path should include the path to the .par file itself, followed by the intra-par path, e.g., "/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py". Returns: If successful, lines of the source file as a `list` of `str`s. Else, `None`.
github-repos
def sg_summary_activation(tensor, prefix=None, name=None): r prefix = '' if prefix is None else prefix + '/' name = prefix + _pretty_name(tensor) if name is None else prefix + name _scalar(name + '/ratio', tf.reduce_mean(tf.cast(tf.greater(tensor, 0), tf.sg_floatx))) _histogram(name + '/ratio-h', tensor)
r"""Register `tensor` to summary report as `activation` Args: tensor: A `Tensor` to log as activation prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
juraj-google-style
def query_foursquare(point, max_distance, client_id, client_secret): if (not client_id): return [] if (not client_secret): return [] if from_cache(FS_CACHE, point, max_distance): return from_cache(FS_CACHE, point, max_distance) url = (FOURSQUARE_URL % (client_id, client_secret, point.lat, point.lon, max_distance)) req = requests.get(url) if (req.status_code != 200): return [] response = req.json() result = [] venues = response['response']['venues'] for venue in venues: name = venue['name'] distance = venue['location']['distance'] categories = [c['shortName'] for c in venue['categories']] result.append({'label': name, 'distance': distance, 'types': categories, 'suggestion_type': 'FOURSQUARE'}) foursquare_insert_cache(point, result) return result
Queries Squarespace API for a location Args: point (:obj:`Point`): Point location to query max_distance (float): Search radius, in meters client_id (str): Valid Foursquare client id client_secret (str): Valid Foursquare client secret Returns: :obj:`list` of :obj:`dict`: List of locations with the following format: { 'label': 'Coffee house', 'distance': 19, 'types': 'Commerce', 'suggestion_type': 'FOURSQUARE' }
codesearchnet
def _load_from_file(path): config = [] try: with open(path, 'r') as config_file: config = yaml.load(config_file)['normalizations'] except EnvironmentError as e: raise ConfigError('Problem while loading file: %s' % e.args[1] if len(e.args) > 1 else e) except (TypeError, KeyError) as e: raise ConfigError('Config file has an unexpected structure: %s' % e) except yaml.YAMLError: raise ConfigError('Invalid YAML file syntax') return config
Load a config file from the given path. Load all normalizations from the config file received as argument. It expects to find a YAML file with a list of normalizations and arguments under the key 'normalizations'. Args: path: Path to YAML file.
juraj-google-style
async def pipe_to_log(pipe, filehandles=(), level=logging.INFO): while True: line = await pipe.readline() if line: line = to_unicode(line) log.log(level, line.rstrip()) for filehandle in filehandles: print(line, file=filehandle, end="") else: break
Log from a subprocess PIPE. Args: pipe (filehandle): subprocess process STDOUT or STDERR filehandles (list of filehandles, optional): the filehandle(s) to write to. If empty, don't write to a separate file. Defaults to (). level (int, optional): the level to log to. Defaults to ``logging.INFO``.
juraj-google-style
def compute_number_edges(function): n = 0 for node in function.nodes: n += len(node.sons) return n
Compute the number of edges of the CFG Args: function (core.declarations.function.Function) Returns: int
juraj-google-style
def loads(s, model): graphs = penman.loads(s, cls=XMRSCodec) xs = [model.from_triples(g.triples()) for g in graphs] return xs
Deserialize PENMAN graphs from a string Args: s (str): serialized PENMAN graphs model: Xmrs subclass instantiated from decoded triples Returns: a list of objects (of class *model*)
juraj-google-style
def _HandleMetadataUpdate( self, metadata_key='', recursive=True, wait=True, timeout=None, retry=True): exception = None while True: try: return self._GetMetadataUpdate( metadata_key=metadata_key, recursive=recursive, wait=wait, timeout=timeout) except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: if not isinstance(e, type(exception)): exception = e self.logger.error('GET request error retrieving metadata. %s.', e) if retry: continue else: break
Wait for a successful metadata response. Args: metadata_key: string, the metadata key to watch for changes. recursive: bool, True if we should recursively watch for metadata changes. wait: bool, True if we should wait for a metadata change. timeout: int, timeout in seconds for returning metadata output. retry: bool, True if we should retry on failure. Returns: json, the deserialized contents of the metadata server.
juraj-google-style
def pretty_print_config_to_json(self, configs): descriptor = self.get_directory_list_doc(configs) return json.dumps(descriptor, sort_keys=True, indent=2, separators=(',', ': '))
JSON string description of a protorpc.remote.Service in a discovery doc. Args: configs: Either a single dict or a list of dicts containing the service configurations to list. Returns: string, The directory list document as a JSON string.
codesearchnet
def resource_path(relative_path=None, expect=None): if (expect not in (None, 'file', 'folder')): raise ArgumentError("Invalid expect parameter, must be None, 'file' or 'folder'", expect=expect) this_dir = os.path.dirname(__file__) _resource_path = os.path.join(this_dir, '..', 'config') if (relative_path is not None): path = os.path.normpath(relative_path) _resource_path = os.path.join(_resource_path, path) if ((expect == 'file') and (not os.path.isfile(_resource_path))): raise DataError(("Expected resource %s to be a file and it wasn't" % _resource_path)) elif ((expect == 'folder') and (not os.path.isdir(_resource_path))): raise DataError(("Expected resource %s to be a folder and it wasn't" % _resource_path)) return os.path.abspath(_resource_path)
Return the absolute path to a resource in iotile-build. This method finds the path to the `config` folder inside iotile-build, appends `relative_path` to it and then checks to make sure the desired file or directory exists. You can specify expect=(None, 'file', or 'folder') for what you expect to find at the given path. Args: relative_path (str): The relative_path from the config folder to the resource in question. This path can be specified using / characters on all operating systems since it will be normalized before usage. If None is passed, the based config folder will be returned. expect (str): What the path should resolve to, which is checked before returning, raising a DataError if the check fails. You can pass None for no checking, file for checking `os.path.isfile`, or folder for checking `os.path.isdir`. Default: None Returns: str: The normalized absolute path to the resource.
codesearchnet
def forceSetSlaac(self, slaacAddress): print '%s call forceSetSlaac' % self.port print slaacAddress try: cmd = 'ipaddr add %s' % str(slaacAddress) print cmd return self.__sendCommand(cmd)[0] == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("forceSetSlaac() Error: " + str(e))
force to set a slaac IPv6 address to Thread interface Args: slaacAddress: a slaac IPv6 address to be set Returns: True: successful to set slaac address to Thread interface False: fail to set slaac address to Thread interface
juraj-google-style
def _matrix_conv(self, m1, m2): n = m1[0, 0].shape.as_list()[0] if n != m2[0, 0].shape.as_list()[0]: raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0, 0].shape={m1[0, 0].shape} and m2[0, 0].shape={m2[0, 0].shape}.') k = int(np.sqrt(len(m1))) l = int(np.sqrt(len(m2))) result = {} size = k + l - 1 for i in range(size): for j in range(size): result[i, j] = array_ops.zeros([n, n], self.dtype) for index1 in range(min(k, i + 1)): for index2 in range(min(k, j + 1)): if i - index1 < l and j - index2 < l: result[i, j] += math_ops.matmul(m1[index1, index2], m2[i - index1, j - index2]) return result
Matrix convolution. Args: m1: A k x k dictionary, each element is a n x n matrix. m2: A l x l dictionary, each element is a n x n matrix. Returns: (k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix. Raises: ValueError: if the entries of m1 and m2 are of different dimensions.
github-repos
def _EnforceShapeInvariant(merge_var, next_var): if isinstance(merge_var, tensor_lib.Tensor): m_shape = merge_var.get_shape() n_shape = next_var.get_shape() if not _ShapeLessThanOrEqual(n_shape, m_shape): enter = merge_var.op.inputs[0].op assert util.IsLoopEnter(enter) input_t = enter.inputs[0] raise ValueError("Input tensor '%s' enters the loop with shape %s, but has shape %s after one iteration. To allow the shape to vary across iterations, use the `shape_invariants` argument of tf.while_loop to specify a less-specific shape." % (input_t.name, input_t.shape, n_shape)) else: raise TypeError(f"'merge_var' must be a Tensor. Received: {type(merge_var)}.")
Check if the shapes of the loops variables are invariants. Args: merge_var: The tensor representing the initial values of the loop variables. next_var: The tensor representing the values of the loop variables after one loop iteration. Raises: ValueError: If any tensor in `merge_var` has a more specific shape than its corresponding tensor in `next_var`.
github-repos
def process(self, element): input_ids = self._tokenizer(element, return_tensors='pt', padding='max_length', max_length=512).input_ids return input_ids
Process the raw text input to a format suitable for T5ForConditionalGeneration model inference Args: element: A string of text Returns: A tokenized example that can be read by the T5ForConditionalGeneration
github-repos
def __init__(self, token): r self.base_url = 'http: self.token = token self.geo_criteria = ['stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', 'subgacc']
r""" Instantiates an instance of MesoPy. Arguments: ---------- token: string, mandatory Your API token that authenticates you for requests against MesoWest.mes Returns: -------- None. Raises: ------- None.
juraj-google-style
def all_logging_disabled(highest_level=logging.CRITICAL): previous_level = logging.root.manager.disable logging.disable(highest_level) try: (yield) finally: logging.disable(previous_level)
Disable all logging temporarily. A context manager that will prevent any logging messages triggered during the body from being processed. Args: highest_level: the maximum logging level that is being blocked
codesearchnet
def __init__(self, tid=None, stdout=None, stderr=None): self._tid = tid super().__init__() self.parent = None self._update_lock = threading.Lock() self._outputs = [] self._stdout = stdout self._stderr = stderr
Initialize the AppFuture. Args: KWargs: - tid (Int) : Task id should be any unique identifier. Now Int. - stdout (str) : Stdout file of the app. Default: None - stderr (str) : Stderr file of the app. Default: None
juraj-google-style