code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def getitem_slot(self, node: cfg.CFGNode, index_var: cfg.Variable) -> tuple[cfg.CFGNode, cfg.Variable]: results = [] unresolved = False node, ret = self.call_pytd(node, '__getitem__', index_var) if self.is_concrete: for val in index_var.bindings: try: index = self.ctx.convert.value_to_constant(val.data, int) except abstract_utils.ConversionError: unresolved = True else: self_len = len(self.pyval) if -self_len <= index < self_len: results.append(self.pyval[index]) else: unresolved = True if unresolved or not self.is_concrete: results.append(ret) return (node, self.ctx.join_variables(node, results))
Implements __getitem__ for List. Arguments: node: The current CFG node. index_var: The Variable containing the index value, the i in lst[i]. Returns: Tuple of (node, return_variable). node may be the same as the argument. return_variable is a Variable with bindings of the possible return values.
github-repos
def from_millis(cls, timeout_ms): if hasattr(timeout_ms, 'has_expired'): return timeout_ms if timeout_ms is None: return cls(None) return cls(timeout_ms / 1000.0)
Create a new PolledTimeout if needed. If timeout_ms is already a PolledTimeout, just return it, otherwise create a new PolledTimeout with the given timeout in milliseconds. Args: timeout_ms: PolledTimeout object, or number of milliseconds to use for creating a new one. Returns: A PolledTimeout object that will expire in timeout_ms milliseconds, which may be timeout_ms itself, or a newly allocated PolledTimeout.
juraj-google-style
def __deepcopy__(self, memo): with distribute_lib.enter_or_assert_strategy(self._distribute_strategy): v = copy.deepcopy(self._v, memo) copied_variable = type(self)(strategy=self._distribute_strategy, v=v, aggregation=self._aggregation) memo[id(self)] = copied_variable return copied_variable
Perform a deepcopy of the `AggregatingVariable`. Unlike the deepcopy of a regular tf.Variable, this keeps the original strategy and devices of the `AggregatingVariable`. To avoid confusion with the behavior of deepcopy on a regular `Variable` (which does copy into new devices), we only allow a deepcopy of a `AggregatingVariable` within its originating strategy scope. Args: memo: The memoization object for `deepcopy`. Returns: A deep copy of the current `AggregatingVariable`. Raises: RuntimeError: If trying to deepcopy into a different strategy.
github-repos
def make_mapper(features): if (not features): features = Feature(input=[], transformer=NullTransformer()) if (not iterable(features)): features = (features,) return DataFrameMapper([t.as_input_transformer_tuple() for t in features], input_df=True)
Make a DataFrameMapper from a feature or list of features Args: features (Union[Feature, List[Feature]]): feature or list of features Returns: DataFrameMapper: mapper made from features
codesearchnet
def compute_classification_results(self, adv_batches, dataset_batches, dataset_meta, defense_work=None): class_batch_to_work = {} if defense_work: for v in itervalues(defense_work.work): class_batch_to_work[v['output_classification_batch_id']] = v accuracy_matrix = ResultMatrix() error_matrix = ResultMatrix() hit_target_class_matrix = ResultMatrix() processed_images_count = {} total_count = len(self.data) processed_count = 0 logging.info('Processing %d files with classification results', len(self.data)) for (k, v) in iteritems(self.data): if ((processed_count % 100) == 0): logging.info('Processed %d out of %d classification results', processed_count, total_count) processed_count += 1 defense_id = v['submission_id'] adv_batch = adv_batches.data[v['adversarial_batch_id']] attack_id = adv_batch['submission_id'] work_item = class_batch_to_work.get(k) required_work_stats = ['stat_correct', 'stat_error', 'stat_target_class', 'stat_num_images'] if (work_item and work_item['error']): continue if (work_item and all(((work_item.get(i) is not None) for i in required_work_stats))): count_correctly_classified = work_item['stat_correct'] count_errors = work_item['stat_error'] count_hit_target_class = work_item['stat_target_class'] num_images = work_item['stat_num_images'] else: logging.warning('Recomputing accuracy for classification batch %s', k) (count_correctly_classified, count_errors, count_hit_target_class, num_images) = analyze_one_classification_result(self._storage_client, v['result_path'], adv_batch, dataset_batches, dataset_meta) accuracy_matrix[(defense_id, attack_id)] += count_correctly_classified error_matrix[(defense_id, attack_id)] += count_errors hit_target_class_matrix[(defense_id, attack_id)] += count_hit_target_class processed_images_count[defense_id] = (processed_images_count.get(defense_id, 0) + num_images) return (accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count)
Computes classification results. Args: adv_batches: instance of AversarialBatches dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata defense_work: instance of DefenseWorkPieces Returns: accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count
codesearchnet
def index_bgen(fn, legacy=False): logger.info("Indexing {} (BGEN) using 'bgenix'{}".format(fn, (' (legacy mode)' if legacy else ''))) command = ['bgenix', '-g', fn, '-index'] if legacy: command.append('-with-rowid') try: logger.info("Executing '{}'".format(' '.join(command))) subprocess.Popen(command).communicate() except FileNotFoundError: logger.error("Cannot find 'bgenix', impossible to index {}".format(fn)) sys.exit(1) logger.info('Index generated')
Indexes a BGEN file. Args: fn (str): The name of the BGEN file.
codesearchnet
def tensor_dimension_to_mesh_axis(self, tensor_dimension, mesh_shape): val = [i for i, mesh_dimension in enumerate(mesh_shape) if (tensor_dimension.name, mesh_dimension.name) in self._pairs] if len(val) > 1: raise ValueError( "Tensor dimension maps to multiple mesh dimensions" " tensor_dimension=%s mesh_shape=%s layout=%s" % (tensor_dimension, mesh_shape, self._pairs)) return val[0] if val else None
Mesh axis associated with tensor dimension (or None). Args: tensor_dimension: Dimension. mesh_shape: Shape. Returns: Integer or None. Raises: ValueError: If one Tensor dimension maps to two mesh dimensions.
juraj-google-style
def _term(self, term): term = str(term) if term: self.__query["q"] += term return self
Add a term to the query. Arguments: term (str): The term to add. Returns: SearchHelper: Self
juraj-google-style
def sigmoid(x): return nn.sigmoid(x)
Element-wise sigmoid. Args: x: A tensor or variable. Returns: A tensor.
github-repos
def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False): launch_script = './launchClient.sh' if os.name == 'nt': launch_script = 'launchClient.bat' cwd = os.getcwd() os.chdir(installdir) os.chdir("Minecraft") try: cmd = [launch_script, '-port', str(port), '-env'] if replaceable: cmd.append('-replaceable') subprocess.check_call(cmd) finally: os.chdir(cwd)
Launch Minecraft listening for malmoenv connections. Args: port: the TCP port to listen on. installdir: the install dir name. Defaults to MalmoPlatform. Must be same as given (or defaulted) in download call if used. replaceable: whether or not to automatically restart Minecraft (default is false).
juraj-google-style
def get_parent(self, path): self.__validate_storage_path(path, projects_allowed=False) path_steps = [step for step in path.split('/') if step] del path_steps[(- 1)] parent_path = '/{0}'.format('/'.join(path_steps)) return self.api_client.get_entity_by_query(path=parent_path)
Get the parent entity of the entity pointed by the given path. Args: path (str): The path of the entity whose parent is needed Returns: A JSON object of the parent entity if found. Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
codesearchnet
def _WriteCacheFile(self, cache_filename, scopes): creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name} creds_str = json.dumps(creds) cache_file = _MultiProcessCacheFile(cache_filename) try: cache_file.LockedWrite(creds_str) except KeyboardInterrupt: raise except: pass
Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials.
codesearchnet
def write_value(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): try: ostream.write(pack('!Q', self.value)) except Exception: self.logger.error('Error writing boolean value to buffer') raise
Write the value of the Boolean object to the output stream. Args: ostream (Stream): A buffer to contain the encoded bytes of the value of a Boolean object. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
codesearchnet
def do_test(create_module_fn, exported_names=None, show_debug_info=False): if exported_names is None: exported_names = [] logging.set_stderrthreshold('error') tf.enable_v2_behavior() def app_main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') if FLAGS.save_model_path: save_model_path = FLAGS.save_model_path else: save_model_path = tempfile.mkdtemp(suffix='.saved_model') save_options = tf.saved_model.SaveOptions(save_debug_info=show_debug_info) tf.saved_model.save(create_module_fn(), save_model_path, options=save_options) logging.info('Saved model to: %s', save_model_path) mlir = pywrap_mlir.experimental_convert_saved_model_to_mlir(save_model_path, ','.join(exported_names), show_debug_info) mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir, 'canonicalize', show_debug_info) print(mlir) filename = '%s/result.mlirbc' % save_model_path pywrap_mlir.experimental_write_bytecode(filename, mlir) if not file_io.file_exists(filename): raise app.UsageError('Failed to create bytecode output.') app.run(app_main)
Runs test. 1. Performs absl and tf "main"-like initialization that must run before almost anything else. 2. Converts `tf.Module` to SavedModel 3. Converts SavedModel to MLIR 4. Prints the textual MLIR to stdout (it is expected that the caller will have FileCheck checks in its file to check this output). This is only for use by the MLIR SavedModel importer tests. Args: create_module_fn: A callable taking no arguments, which returns the `tf.Module` to be converted and printed. exported_names: A set of exported names for the MLIR converter (default is "export all"). show_debug_info: If true, shows debug locations in the resulting MLIR.
github-repos
def block_diag(*blocks: np.ndarray) -> np.ndarray: for b in blocks: if (b.shape[0] != b.shape[1]): raise ValueError('Blocks must be square.') if (not blocks): return np.zeros((0, 0), dtype=np.complex128) n = sum((b.shape[0] for b in blocks)) dtype = functools.reduce(_merge_dtypes, (b.dtype for b in blocks)) result = np.zeros(shape=(n, n), dtype=dtype) i = 0 for b in blocks: j = (i + b.shape[0]) result[(i:j, i:j)] = b i = j return result
Concatenates blocks into a block diagonal matrix. Args: *blocks: Square matrices to place along the diagonal of the result. Returns: A block diagonal matrix with the given blocks along its diagonal. Raises: ValueError: A block isn't square.
codesearchnet
def _WriteAttributeContainer(self, attribute_container): if (attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT): (timestamp, serialized_data) = self._serialized_event_heap.PopEvent() else: serialized_data = self._SerializeAttributeContainer(attribute_container) if (self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB): compressed_data = zlib.compress(serialized_data) serialized_data = sqlite3.Binary(compressed_data) else: compressed_data = '' if self._storage_profiler: self._storage_profiler.Sample('write', attribute_container.CONTAINER_TYPE, len(serialized_data), len(compressed_data)) if (attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT): query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)' self._cursor.execute(query, (timestamp, serialized_data)) else: query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(attribute_container.CONTAINER_TYPE) self._cursor.execute(query, (serialized_data,)) identifier = identifiers.SQLTableIdentifier(attribute_container.CONTAINER_TYPE, self._cursor.lastrowid) attribute_container.SetIdentifier(identifier)
Writes an attribute container. The table for the container type must exist. Args: attribute_container (AttributeContainer): attribute container.
codesearchnet
def get_available_versions(self, project_name): available_versions = self.pypi_client.package_releases(project_name) if not available_versions: available_versions = self.pypi_client.package_releases( project_name.capitalize() ) return dict( (self._parse_version(version), version) for version in available_versions )
Query PyPI to see if package has any available versions. Args: project_name (str): The name the project on PyPI. Returns: dict: Where keys are tuples of parsed versions and values are the versions returned by PyPI.
juraj-google-style
def contains(self, key): path = self.object_path(key) return os.path.exists(path) and os.path.isfile(path)
Returns whether the object named by `key` exists. Optimized to only check whether the file object exists. Args: key: Key naming the object to check. Returns: boalean whether the object exists
juraj-google-style
def scalar_input_map(func, input_): if util_iter.isiterable(input_): return list(map(func, input_)) else: return func(input_)
Map like function Args: func: function to apply input_ : either an iterable or scalar value Returns: If ``input_`` is iterable this function behaves like map otherwise applies func to ``input_``
codesearchnet
def load_types_for_deserialization(cls, *types_to_deserialize: Type[Any]) -> ContextManager[Dict[str, Type[Any]]]: return cls._TYPE_REGISTRY.load_types_for_deserialization(*types_to_deserialize)
Context manager for loading unregistered types for deserialization. Example:: class A(pg.Object): auto_register = False x: int class B(A): y: str with pg.JSONConvertile.load_types_for_deserialization(A, B): pg.from_json_str(A(1).to_json_str()) pg.from_json_str(B(1, 'hi').to_json_str()) Args: *types_to_deserialize: A list of types to be loaded for deserialization. Returns: A context manager within which the objects of the requested types could be deserialized.
github-repos
def dataframe(start_row=0, max_rows=None, use_cache=True): output = QueryOutput() output._output_type = 'dataframe' output._dataframe_start_row = start_row output._dataframe_max_rows = max_rows output._use_cache = use_cache return output
Construct a query output object where the result is a dataframe Args: start_row: the row of the table at which to start the export (default 0). max_rows: an upper limit on the number of rows to export (default None). use_cache: whether to use cached results or not (default True).
juraj-google-style
def docx_table_from_xml_node(table_node: ElementTree.Element, level: int, config: TextProcessingConfig) -> str: table = CustomDocxTable() for row_node in table_node: if row_node.tag != DOCX_TABLE_ROW: continue table.new_row() for cell_node in row_node: if cell_node.tag != DOCX_TABLE_CELL: continue table.new_cell() for para_node in cell_node: text = docx_text_from_xml_node(para_node, level, config) if text: table.add_paragraph(text) return docx_process_table(table, config)
Converts an XML node representing a DOCX table into a textual representation. Args: table_node: XML node level: current level in XML hierarchy (used for recursion; start level is 0) config: :class:`TextProcessingConfig` control object Returns: string representation
juraj-google-style
def mix_over_posterior_draws(means, variances): with tf.compat.v1.name_scope( 'mix_over_posterior_draws', values=[means, variances]): num_posterior_draws = dist_util.prefer_static_value( tf.shape(input=means))[0] component_observations = tfd.Independent( distribution=tfd.Normal( loc=dist_util.move_dimension(means, 0, -2), scale=tf.sqrt(dist_util.move_dimension(variances, 0, -2))), reinterpreted_batch_ndims=1) return tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical( logits=tf.zeros([num_posterior_draws], dtype=component_observations.dtype)), components_distribution=component_observations)
Construct a predictive normal distribution that mixes over posterior draws. Args: means: float `Tensor` of shape `[num_posterior_draws, ..., num_timesteps]`. variances: float `Tensor` of shape `[num_posterior_draws, ..., num_timesteps]`. Returns: mixture_dist: `tfd.MixtureSameFamily(tfd.Independent(tfd.Normal))` instance representing a uniform mixture over the posterior samples, with `batch_shape = ...` and `event_shape = [num_timesteps]`.
juraj-google-style
class AssertEqual(beam.PTransform): def __init__(self, elements: Iterable[Any]): self._elements = elements def expand(self, pcoll): return assert_that(pcoll | beam.Map(lambda row: beam.Row(**row._asdict())), equal_to(dicts_to_rows(self._elements)))
Asserts that the input contains exactly the elements provided. This is primarily used for testing; it will cause the entire pipeline to fail if the input to this transform is not exactly the set of `elements` given in the config parameter. As with Create, YAML/JSON-style mappings are interpreted as Beam rows, e.g.:: type: AssertEqual input: SomeTransform config: elements: - {a: 0, b: "foo"} - {a: 1, b: "bar"} would ensure that `SomeTransform` produced exactly two elements with values `(a=0, b="foo")` and `(a=1, b="bar")` respectively. Args: elements: The set of elements that should belong to the PCollection. YAML/JSON-style mappings will be interpreted as Beam rows.
github-repos
def write(self, __text: str) -> None: if __text == os.linesep: self.handle.write(__text) else: frame = inspect.currentframe() if frame is None: filename = 'unknown' lineno = 0 else: outer = frame.f_back filename = outer.f_code.co_filename.split(os.sep)[-1] lineno = outer.f_lineno self.handle.write('[{:>15s}:{:03d}] {}'.format(filename[-15:], lineno, __text))
Write text to the debug stream. Args: __text: Text to write
juraj-google-style
def _replace_image(image_url, image_tag, ebook_folder, image_name=None): try: assert isinstance(image_tag, bs4.element.Tag) except AssertionError: raise TypeError(('image_tag cannot be of type ' + str(type(image_tag)))) if (image_name is None): image_name = str(uuid.uuid4()) try: image_full_path = os.path.join(ebook_folder, 'images') assert os.path.exists(image_full_path) image_extension = save_image(image_url, image_full_path, image_name) image_tag['src'] = (((('images' + '/') + image_name) + '.') + image_extension) except ImageErrorException: image_tag.decompose() except AssertionError: raise ValueError(("%s doesn't exist or doesn't contain a subdirectory images" % ebook_folder)) except TypeError: image_tag.decompose()
Replaces the src of an image to link to the local copy in the images folder of the ebook. Tightly coupled with bs4 package. Args: image_url (str): The url of the image. image_tag (bs4.element.Tag): The bs4 tag containing the image. ebook_folder (str): The directory where the ebook files are being saved. This must contain a subdirectory called "images". image_name (Option[str]): The short name to save the image as. Should not contain a directory or an extension.
codesearchnet
def _get_resource_list(self, rsrc_dict): if 'collections' in rsrc_dict: return rsrc_dict['collections'] if 'experiments' in rsrc_dict: return rsrc_dict['experiments'] if 'channels' in rsrc_dict: return rsrc_dict['channels'] if 'coords' in rsrc_dict: return rsrc_dict['coords'] raise RuntimeError('Invalid list response received from Boss. No known resource type returned.')
Extracts list of resources from the HTTP response. Args: rsrc_dict (dict): HTTP response encoded in a dictionary. Returns: (list[string]): List of a type of resource (collections, experiments, etc). Raises: (RuntimeError): If rsrc_dict does not contain any known resources.
juraj-google-style
def load_pip_addons(_globals): for package_name in known_pip_addons: (_, username) = package_username(package_name) try: load_addon(username, package_name.replace('-', '_'), _globals) except ImportError: pass
Load all known fabsetup addons which are installed as pypi pip-packages. Args: _globals(dict): the globals() namespace of the fabric script. Return: None
codesearchnet
def egress(self, envelope, http_headers, operation, binding_options): if self._logger.isEnabledFor(logging.INFO): service_name = operation.binding.wsdl.services.keys()[0] self._logger.info(_REQUEST_LOG_LINE, service_name, operation.name, binding_options['address']) if self._logger.isEnabledFor(logging.DEBUG): http_headers_safe = http_headers.copy() if (self._AUTHORIZATION_HEADER in http_headers_safe): http_headers_safe[self._AUTHORIZATION_HEADER] = self._REDACTED request_string = etree.tostring(envelope, pretty_print=True) safe_request = self._DEVELOPER_TOKEN_SUB.sub(self._REDACTED, request_string.decode('utf-8')) self._logger.debug(_REQUEST_XML_LOG_LINE, http_headers_safe, safe_request) return (envelope, http_headers)
Overrides the egress function ror request logging. Args: envelope: An Element with the SOAP request data. http_headers: A dict of the current http headers. operation: The SoapOperation instance. binding_options: An options dict for the SOAP binding. Returns: A tuple of the envelope and headers.
codesearchnet
def send(email, subject=None, from_email=None, to_email=None, cc=None, bcc=None, reply_to=None, smtp=None): if is_string(email): email = EmailContent(email) from_email = sanitize_email_address((from_email or email.headers.get('from'))) to_email = sanitize_email_address((to_email or email.headers.get('to'))) cc = sanitize_email_address((cc or email.headers.get('cc'))) bcc = sanitize_email_address((bcc or email.headers.get('bcc'))) reply_to = sanitize_email_address((reply_to or email.headers.get('reply-to'))) message_args = {'html': email.html, 'text': email.text, 'subject': (subject or email.headers.get('subject', '')), 'mail_from': from_email, 'mail_to': to_email} if cc: message_args['cc'] = cc if bcc: message_args['bcc'] = bcc if reply_to: message_args['headers'] = {'reply-to': reply_to} message = emails.Message(**message_args) for (filename, data) in email.inline_images: message.attach(filename=filename, content_disposition='inline', data=data) message.send(smtp=smtp)
Send markdown email Args: email (str/obj): A markdown string or EmailContent object subject (str): subject line from_email (str): sender email address to_email (str/list): recipient email addresses cc (str/list): CC email addresses (string or a list) bcc (str/list): BCC email addresses (string or a list) reply_to (str): Reply-to email address smtp (dict): SMTP configuration (dict) Schema of smtp dict: host (str): SMTP server host. Default: localhost port (int): SMTP server port. Default: 25 tls (bool): Use TLS. Default: False ssl (bool): Use SSL. Default: False user (bool): SMTP login user. Default empty password (bool): SMTP login password. Default empty
codesearchnet
def read(self, vals): i = 0 {%- for field in fields %} {%- if field.is_list %} count = int(vals[i]) i += 1 for _ in range(count): obj = {{field.object_name}}() obj.read(vals[i:i + obj.field_count]) self.add_{{field.field_name}}(obj) i += obj.field_count {%- else %} if len(vals[i]) == 0: self.{{field.field_name}} = None else: self.{{field.field_name}} = vals[i] i += 1 {%- endif %} {%- endfor %}
Read values Args: vals (list): list of strings representing values
juraj-google-style
def save(self, filename=None, directory=None): if (filename is not None): self.filename = filename if (directory is not None): self.directory = directory filepath = self.filepath tools.mkdirs(filepath) data = text_type(self.source) with io.open(filepath, 'w', encoding=self.encoding) as fd: fd.write(data) if (not data.endswith(u'\n')): fd.write(u'\n') return filepath
Save the DOT source to file. Ensure the file ends with a newline. Args: filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``) directory: (Sub)directory for source saving and rendering. Returns: The (possibly relative) path of the saved source file.
codesearchnet
def Add(self, service, method, request, global_params=None): method_config = service.GetMethodConfig(method) upload_config = service.GetUploadConfig(method) http_request = service.PrepareHttpRequest(method_config, request, global_params=global_params, upload_config=upload_config) api_request = self.ApiCall(http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request)
Add a request to the batch. Args: service: A class inheriting base_api.BaseApiService. method: A string indicated desired method from the service. See the example in the class docstring. request: An input message appropriate for the specified service.method. global_params: Optional additional parameters to pass into method.PrepareHttpRequest. Returns: None
codesearchnet
def _read_parquet_columns(path, columns, num_splits, kwargs): import pyarrow.parquet as pq df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas() return (_split_result_for_readers(0, num_splits, df) + [len(df.index)])
Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
codesearchnet
def break_bond(self, ind1, ind2, tol=0.2): sites = self._sites clusters = [[sites[ind1]], [sites[ind2]]] sites = [site for (i, site) in enumerate(sites) if (i not in (ind1, ind2))] def belongs_to_cluster(site, cluster): for test_site in cluster: if CovalentBond.is_bonded(site, test_site, tol=tol): return True return False while (len(sites) > 0): unmatched = [] for site in sites: for cluster in clusters: if belongs_to_cluster(site, cluster): cluster.append(site) break else: unmatched.append(site) if (len(unmatched) == len(sites)): raise ValueError('Not all sites are matched!') sites = unmatched return (self.__class__.from_sites(cluster) for cluster in clusters)
Returns two molecules based on breaking the bond between atoms at index ind1 and ind2. Args: ind1 (int): Index of first site. ind2 (int): Index of second site. tol (float): Relative tolerance to test. Basically, the code checks if the distance between the sites is less than (1 + tol) * typical bond distances. Defaults to 0.2, i.e., 20% longer. Returns: Two Molecule objects representing the two clusters formed from breaking the bond.
codesearchnet
def _on_report(self, sequence, topic, message): try: conn_key = self._find_connection(topic) conn_id = self.conns.get_connection_id(conn_key) except ArgumentError: self._logger.warn("Dropping report message that does not correspond with a known connection, topic=%s", topic) return try: rep_msg = messages.ReportNotification.verify(message) serialized_report = {} serialized_report['report_format'] = rep_msg['report_format'] serialized_report['encoded_report'] = rep_msg['report'] serialized_report['received_time'] = datetime.datetime.strptime(rep_msg['received_time'].encode().decode(), "%Y%m%dT%H:%M:%S.%fZ") report = self.report_parser.deserialize_report(serialized_report) self._trigger_callback('on_report', conn_id, report) except Exception: self._logger.exception("Error processing report conn_id=%d", conn_id)
Process a report received from a device. Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message (dict): The message itself
juraj-google-style
def random_channel_shift(x, intensity_range, channel_axis=0): intensity = np.random.uniform(-intensity_range, intensity_range) return apply_channel_shift(x, intensity, channel_axis=channel_axis)
Performs a random channel shift. DEPRECATED. Args: x: Input tensor. Must be 3D. intensity_range: Transformation intensity. channel_axis: Index of axis for channels in the input tensor. Returns: Numpy image tensor.
github-repos
def _apply_conv(self, inputs, w): if self._data_format == DATA_FORMAT_NWC: h_dim = 1 two_dim_conv_data_format = DATA_FORMAT_NHWC else: h_dim = 2 two_dim_conv_data_format = DATA_FORMAT_NCHW inputs = tf.expand_dims(inputs, axis=h_dim) two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:] two_dim_conv_rate = (1,) + self._rate w_dw, w_pw = w outputs = tf.nn.separable_conv2d(inputs, w_dw, w_pw, strides=two_dim_conv_stride, rate=two_dim_conv_rate, padding=self._conv_op_padding, data_format=two_dim_conv_data_format) outputs = tf.squeeze(outputs, [h_dim]) return outputs
Apply a `separable_conv2d` operation on `inputs` using `w`. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. w: A tuple of weight matrices of the same type as `inputs`, the first being the depthwise weight matrix, and the second being the pointwise weight matrix. Returns: outputs: The result of the convolution operation on `inputs`.
juraj-google-style
def load_dot_env_file(dot_env_path): if not os.path.isfile(dot_env_path): return {} logger.log_info("Loading environment variables from {}".format(dot_env_path)) env_variables_mapping = {} with io.open(dot_env_path, 'r', encoding='utf-8') as fp: for line in fp: if "=" in line: variable, value = line.split("=", 1) elif ":" in line: variable, value = line.split(":", 1) else: raise exceptions.FileFormatError(".env format error") env_variables_mapping[variable.strip()] = value.strip() utils.set_os_environ(env_variables_mapping) return env_variables_mapping
load .env file. Args: dot_env_path (str): .env file path Returns: dict: environment variables mapping { "UserName": "debugtalk", "Password": "123456", "PROJECT_KEY": "ABCDEFGH" } Raises: exceptions.FileFormatError: If .env file format is invalid.
juraj-google-style
def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> types.FloatTensor: name = name or self._name + '_price' with tf.name_scope(name): discount_curve = cashflow_streams.get_discount_curve(self._discount_curve_type, market, self._discount_curve_mask) currencies = [cur.currency.value for cur in self._discount_curve_type] vol_surface = equity_utils.get_vol_surface(currencies, self._equity, market, self._equity_mask) spots = tf.stack(market.spot(currencies, self._equity), axis=0) discount_factors = discount_curve.discount_factor(self._expiry_date.expand_dims(axis=-1)) daycount_convention = discount_curve.daycount_convention day_count_fn = market_data_utils.get_daycount_fn(daycount_convention) if spots.shape.rank > 0: spots = tf.gather(spots, self._equity_mask) if self._model == 'BS-LSM': vols = vol_surface.volatility(expiry_dates=self._expiry_date.expand_dims(axis=-1), strike=tf.expand_dims(self._strike, axis=-1)) prices = utils.bs_lsm_price(spots=spots, expiry_times=day_count_fn(start_date=market.date, end_date=self._expiry_date, dtype=self._dtype), strikes=self._strike, volatility=tf.squeeze(vols, axis=-1), discount_factors=tf.squeeze(discount_factors), is_call_option=self._is_call_option, num_samples=self._num_samples, num_exercise_times=self._num_exercise_times, num_calibration_samples=self._num_calibration_samples, seed=self._seed) return self._short_position * self._contract_amount * prices else: raise ValueError('Only BS-LSM model is supported. Supplied {}'.format(self._model))
Returns the present value of the American options. Args: market: An instance of `ProcessedMarketData`. name: Python str. The name to give to the ops created by this function. Default value: `None` which maps to 'price'. Returns: A `Tensor` of shape `batch_shape` containing the modeled price of each American option contract based on the input market data.
github-repos
def write(self, face, data, viewport=None, *, alignment=1) -> None: if (type(data) is Buffer): data = data.mglo self.mglo.write(face, data, viewport, alignment)
Update the content of the texture. Args: face (int): The face to update. data (bytes): The pixel data. viewport (tuple): The viewport. Keyword Args: alignment (int): The byte alignment of the pixels.
codesearchnet
def _parse_dbpath(dbpath): if isinstance(dbpath, list): dbpath = '|'.join(dbpath) if (not dbpath.endswith('$')): dbpath = ('(%s)$' % dbpath) return dbpath
Converts the dbpath to a regexp pattern. Transforms dbpath from a string or an array of strings to a regexp pattern which will be used to match database names. Args: dbpath: a string or an array containing the databases to be matched from a cluster. Returns: A regexp pattern that will match any of the desired databases on on a cluster.
codesearchnet
def window_unpartition(windows, window_size, pad_height_width, height_width): padded_height, padded_width = pad_height_width height, width = height_width batch_size = windows.shape[0] hidden_state = windows.view(batch_size, padded_height hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous() hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1) hidden_state = hidden_state[:, :height, :width, :].contiguous() return hidden_state
Window unpartition into original sequences and removing padding. Args: windows (`torch.Tensor`): Input tokens with [batch_size * num_windows, window_size, window_size, num_channels]. window_size (`int`): Window size. pad_height_width (`Tuple[int]`): Padded height and width (padded_height, padded_width). height_width (`Tuple[int]`): Original height and width before padding. Returns: hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].
github-repos
def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs): config_dict = {} config_dict['text_config'] = text_config config_dict['vision_config'] = vision_config return cls.from_dict(config_dict, **kwargs)
Instantiate a [`Owlv2Config`] (or a derived class) from owlv2 text model configuration and owlv2 vision model configuration. Returns: [`Owlv2Config`]: An instance of a configuration object
github-repos
def retry(self, retry_message=''): if not self.async: raise UnexpectedPipelineError( 'May only call retry() method for asynchronous pipelines.') if self.try_cancel(): self._context.transition_retry(self._pipeline_key, retry_message) return True else: return False
Forces a currently running asynchronous pipeline to retry. Note this may not be called by synchronous or generator pipelines. Those must instead raise the 'Retry' exception during execution. Args: retry_message: Optional message explaining why the retry happened. Returns: True if the Pipeline should be retried, False if it cannot be cancelled mid-flight for some reason.
juraj-google-style
def gumbel_softmax(x, z_size, mode, softmax_k=0, temperature_warmup_steps=150000, summary=True, name=None): with tf.variable_scope(name, default_name='gumbel_softmax'): m = tf.layers.dense(x, (2 ** z_size), name='mask') if (softmax_k > 0): (m, kl) = top_k_softmax(m, softmax_k) return (m, m, (1.0 - tf.reduce_mean(kl))) logsm = tf.nn.log_softmax(m) gumbel_samples = gumbel_sample(common_layers.shape_list(m)) steps = temperature_warmup_steps gumbel_samples *= (common_layers.inverse_exp_decay((steps temperature = (1.2 - common_layers.inverse_lin_decay(steps)) temperature = tf.cond(tf.less(tf.random_uniform([]), 0.9), (lambda : temperature), (lambda : tf.random_uniform([], minval=0.5, maxval=1.0))) s = tf.nn.softmax(((logsm + gumbel_samples) / temperature)) m = tf.nn.softmax(m) kl = (- tf.reduce_max(logsm, axis=(- 1))) if summary: tf.summary.histogram('max-log', tf.reshape(kl, [(- 1)])) maxvec = tf.reshape(tf.argmax(m, axis=(- 1)), [(- 1)]) maxvhot = tf.stop_gradient(tf.one_hot(maxvec, (2 ** z_size))) distrib = (tf.reshape(logsm, [(- 1), (2 ** z_size)]) * maxvhot) d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) d_variance = tf.reduce_mean(tf.squared_difference(distrib, d_mean), axis=[0]) d_dev = (- tf.reduce_mean(d_variance)) ret = s if (mode != tf.estimator.ModeKeys.TRAIN): ret = tf.reshape(maxvhot, common_layers.shape_list(s)) return (m, ret, ((d_dev * 5.0) + (tf.reduce_mean(kl) * 0.002)))
Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss.
codesearchnet
def temporal_latent_to_dist(name, x, hparams, output_channels=None): _, _, width, _, res_channels = common_layers.shape_list(x) if output_channels is None: output_channels = res_channels dilation_rates = get_dilation_rates(hparams, width) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): h = x for i in range(hparams.latent_encoder_depth): if hparams.latent_apply_dilations: h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, dilation_rates=dilation_rates, activation=hparams.latent_activation, dropout=hparams.latent_dropout) else: h2 = conv_stack("latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, activation=hparams.latent_activation, dropout=hparams.latent_dropout) h += h2 h = h[:, -1, :, :, :] h = conv("res_final", h, apply_actnorm=False, conv_init="zeros", output_channels=2*output_channels, filter_size=[1, 1]) mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2] return tfp.distributions.Normal(mean, tf.exp(log_scale))
Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal
juraj-google-style
def _save(filename, tensor_names, tensors, tensor_slices=None, name='save'): if tensor_slices is None: return gen_io_ops.save(filename, tensor_names, tensors, name=name) else: return gen_io_ops.save_slices(filename, tensor_names, tensor_slices, tensors, name=name)
Save a list of tensors to a file with given names. Example usage without slice info: Save("/foo/bar", ["w", "b"], [w, b]) Example usage with slices: Save("/foo/bar", ["w", "w"], [slice0, slice1], tensor_slices=["4 10 0,2:-", "4 10 2,2:-"]) Args: filename: the file name of the sstable. tensor_names: a list of strings. tensors: the list of tensors to be saved. tensor_slices: Optional list of strings to specify the shape and slices of a larger virtual tensor that each tensor is a part of. If not specified each tensor is saved as a full slice. name: string. Optional name for the op. Requires: The length of tensors should match the size of tensor_names and of tensor_slices. Returns: An Operation that saves the tensors.
github-repos
def HashFilePath(self, path, byte_count): with open(path, "rb") as fd: self.HashFile(fd, byte_count)
Updates underlying hashers with file on a given path. Args: path: A path to the file that is going to be fed to the hashers. byte_count: A maximum numbers of bytes that are going to be processed.
juraj-google-style
def one_of(self, chset: str) -> str: res = self.peek() if res in chset: self.offset += 1 return res raise UnexpectedInput(self, "one of " + chset)
Parse one character form the specified set. Args: chset: string of characters to try as alternatives. Returns: The character that was actually matched. Raises: UnexpectedInput: If the next character is not in `chset`.
juraj-google-style
def pnlSingle( self, account: str = '', modelCode: str = '', conId: int = 0) -> List[PnLSingle]: return [v for v in self.wrapper.pnlSingles.values() if (not account or v.account == account) and (not modelCode or v.modelCode == modelCode) and (not conId or v.conId == conId)]
List of subscribed :class:`.PnLSingle` objects (profit and loss for single positions). The :class:`.PnLSingle` objects are kept live updated. Args: account: If specified, filter for this account name. modelCode: If specified, filter for this account model. conId: If specified, filter for this contract ID.
juraj-google-style
def _create_key_func(extractor, none_is_largest): if none_is_largest: def key_func_none_is_largest(session_group): value = extractor(session_group) return (value is None, value) return key_func_none_is_largest def key_func_none_is_smallest(session_group): value = extractor(session_group) return (value is not None, value) return key_func_none_is_smallest
Returns a key_func to be used in list.sort(). Returns a key_func to be used in list.sort() that sorts session groups by the value extracted by extractor. 'None' extracted values will either be considered largest or smallest as specified by the "none_is_largest" boolean parameter. Args: extractor: An extractor function that extract the key from the session group. none_is_largest: bool. If true treats 'None's as largest; otherwise smallest.
juraj-google-style
def time(func, *args, **kwargs): start_time = time_module.time() func(*args, **kwargs) end_time = time_module.time() return (end_time - start_time)
Call the supplied function with the supplied arguments, and return the total execution time as a float in seconds. The precision of the returned value depends on the precision of `time.time()` on your platform. Arguments: func: the function to run. *args: positional arguments to pass into the function. **kwargs: keyword arguments to pass into the function. Returns: Execution time of the function as a float in seconds.
codesearchnet
def words(self, index = None): if index is None: return self.select(Word,None,True,default_ignore_structure) else: if index < 0: index = self.count(Word,None,True,default_ignore_structure) + index for i, e in enumerate(self.select(Word,None,True,default_ignore_structure)): if i == index: return e raise IndexError
Returns a generator of Word elements found (recursively) under this element. Arguments: * ``index``: If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
juraj-google-style
def print_tools(self, pattern=None, buf=sys.stdout): seen = set() rows = [] context = self.context if context: data = context.get_tools() conflicts = set(context.get_conflicting_tools().keys()) for (_, (variant, tools)) in sorted(data.items()): pkg_str = variant.qualified_package_name for tool in tools: if (pattern and (not fnmatch(tool, pattern))): continue if (tool in conflicts): label = '(in conflict)' color = critical else: label = '' color = None rows.append([tool, '-', pkg_str, 'active context', label, color]) seen.add(tool) for suite in self.suites: for (tool, d) in suite.get_tools().iteritems(): if (tool in seen): continue if (pattern and (not fnmatch(tool, pattern))): continue label = [] color = None path = which(tool) if path: path_ = os.path.join(suite.tools_path, tool) if (path != path_): label.append(("(hidden by unknown tool '%s')" % path)) color = warning variant = d['variant'] if isinstance(variant, set): pkg_str = ', '.join(variant) label.append('(in conflict)') color = critical else: pkg_str = variant.qualified_package_name orig_tool = d['tool_name'] if (orig_tool == tool): orig_tool = '-' label = ' '.join(label) source = ("context '%s' in suite '%s'" % (d['context_name'], suite.load_path)) rows.append([tool, orig_tool, pkg_str, source, label, color]) seen.add(tool) _pr = Printer(buf) if (not rows): _pr('No matching tools.') return False headers = [['TOOL', 'ALIASING', 'PACKAGE', 'SOURCE', '', None], ['----', '--------', '-------', '------', '', None]] rows = (headers + sorted(rows, key=(lambda x: x[0].lower()))) print_colored_columns(_pr, rows) return True
Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern.
codesearchnet
def _getAuthenticated(self, browser, url): try: if (len(self.creds) > 0): c = random.choice(self.creds)[0] browser.setNewPassword(url, c.user, c.password) return True else: raise NoCredentialsException(str(self)) except AttributeError as e: raise BadImplementationError(str(e))
Getting authenticated. This method may be overwritten. TODO: update to version 2 of the wrappers. Args: ----- browser: The browser in which the user will be authenticated. url: The URL to get authenticated in. Return: ------- True or False. Raises: ------ NoCredentialsException: If no valid credentials have been found. BadImplementationError: If an expected attribute is missing.
codesearchnet
def get_tree_starting_at(module: str, edges: List[Tuple[str, str]]) -> List[Union[str, List[str]]]: vertices_seen = [module] new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and ('__init__.py' not in edge[1])] tree = [module] while len(new_edges) > 0: tree.append(new_edges) final_vertices = list({edge[1] for edge in new_edges}) vertices_seen.extend(final_vertices) new_edges = [edge for edge in edges if edge[0] in final_vertices and edge[1] not in vertices_seen and ('__init__.py' not in edge[1])] return tree
Returns the tree starting at a given module following all edges. Args: module (`str`): The module that will be the root of the subtree we want. eges (`List[Tuple[str, str]]`): The list of all edges of the tree. Returns: `List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges starting at module], [list of edges starting at the preceding level], ...]
github-repos
def get_destination(self, filepath, targetdir=None): dst = self.change_extension(filepath, 'css') if targetdir: dst = os.path.join(targetdir, dst) return dst
Return destination path from given source file path. Destination is allways a file with extension ``.css``. Args: filepath (str): A file path. The path is allways relative to sources directory. If not relative, ``targetdir`` won't be joined. absolute (bool): If given will be added at beginning of file path. Returns: str: Destination filepath.
codesearchnet
def preprocess_bel_stmt(stmt: str) -> str: stmt = stmt.strip() stmt = re.sub(r",+", ",", stmt) stmt = re.sub(r",", ", ", stmt) stmt = re.sub(r" +", " ", stmt) return stmt
Clean up basic formatting of BEL statement Args: stmt: BEL statement as single string Returns: cleaned BEL statement
juraj-google-style
def get_sub_category(alt_len, ref_len, category, svtype=None): subcategory = '' if category in ('snv', 'indel', 'cancer'): if ref_len == alt_len: subcategory = 'snv' else: subcategory = 'indel' elif category == 'sv': subcategory = svtype return subcategory
Get the subcategory for a VCF variant The sub categories are: 'snv', 'indel', 'del', 'ins', 'dup', 'bnd', 'inv' Args: alt_len(int) ref_len(int) category(str) svtype(str) Returns: subcategory(str)
juraj-google-style
def from_flag(cls, flagname, flag_values, other_flag_values=None): first_module = flag_values.find_module_defining_flag(flagname, default='<unknown>') if (other_flag_values is None): second_module = _helpers.get_calling_module() else: second_module = other_flag_values.find_module_defining_flag(flagname, default='<unknown>') flag_summary = flag_values[flagname].help msg = ("The flag '%s' is defined twice. First from %s, Second from %s. Description from first occurrence: %s" % (flagname, first_module, second_module, flag_summary)) return cls(msg)
Creates a DuplicateFlagError by providing flag name and values. Args: flagname: str, the name of the flag being redefined. flag_values: FlagValues, the FlagValues instance containing the first definition of flagname. other_flag_values: FlagValues, if it is not None, it should be the FlagValues object where the second definition of flagname occurs. If it is None, we assume that we're being called when attempting to create the flag a second time, and we use the module calling this one as the source of the second definition. Returns: An instance of DuplicateFlagError.
codesearchnet
def log_uuid(self, uuid): if ((uuid not in self.uuids) and (uuid in uuids)): self.uuids[uuid] = uuids[uuid].describe()
Logs the object with the specified `uuid` to `self.uuids` if possible. Args: uuid (str): string value of :meth:`uuid.uuid4` value for the object.
codesearchnet
def list_partitions(self, table, retry=DEFAULT_RETRY): table = _table_arg_to_table_ref(table, default_project=self.project) meta_table = self.get_table(TableReference(self.dataset(table.dataset_id, project=table.project), ('%s$__PARTITIONS_SUMMARY__' % table.table_id))) subset = [col for col in meta_table.schema if (col.name == 'partition_id')] return [row[0] for row in self.list_rows(meta_table, selected_fields=subset, retry=retry)]
List the partitions in a table. Arguments: table (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): The table or reference from which to get partition info retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: List[str]: A list of the partition ids present in the partitioned table
codesearchnet
def __getitem__(self, pkg_id): if pkg_id in self.__reg_software: return self.__reg_software[pkg_id] else: raise KeyError(pkg_id)
Returns information on a package. Args: pkg_id (str): Package Id of the software/component Returns: dict or list: List if ``version_only`` is ``True`` otherwise dict
juraj-google-style
def prepare_for_model(self, ids: List[int], pair_ids: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, None]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding: padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids and (not add_special_tokens): raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.') if return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and (pair_ids is not None): raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.') if return_token_type_ids is None: return_token_type_ids = 'token_type_ids' in self.model_input_names if return_attention_mask is None: return_attention_mask = 'attention_mask' in self.model_input_names encoded_inputs = {} total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) overflowing_tokens = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length): ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride) if return_overflowing_tokens: encoded_inputs['overflowing_tokens'] = overflowing_tokens encoded_inputs['num_truncated_tokens'] = total_len - max_length if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) encoded_inputs['input_ids'] = sequence if return_token_type_ids: encoded_inputs['token_type_ids'] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs['special_tokens_mask'] = [0] * len(sequence) self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose) if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) if return_length: encoded_inputs['length'] = len(encoded_inputs['input_ids']) batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis) return batch_outputs
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods.
github-repos
def to_json_string(self) -> str: dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() _processor_class = dictionary.pop('_processor_class', None) if _processor_class is not None: dictionary['processor_class'] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + '\n'
Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
github-repos
def binary_cross_entropy_loss_with_logits(x, target, name=None): with tf.name_scope(name, 'binary_cross_entropy_with_logits', [x, target]) as scope: x.get_shape().assert_is_compatible_with(target.get_shape()) neg_softplus = -tf.nn.softplus(-x) return -tf.add(tf.multiply(target, neg_softplus), tf.multiply(1 - target, -x + neg_softplus), name=scope)
Calculates the binary cross entropy between sigmoid(x) and target. Expects unscaled logits. Do not pass in results of sigmoid operation. Args: x: the calculated pre-sigmoid values target: the desired values. name: the name for this op, defaults to binary_cross_entropy_with_logits Returns: -(target * -softplus(-x) + (1-target) * (-x - softplus(-x))) Raises: ValueError: If shapes are incompatible.
juraj-google-style
def forward(self, layer_input): bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) return (layer_output, router_logits)
Forward pass of the mixture of experts layer. Args: layer_input (Tensor): Input tensor. Returns: Tensor: Output tensor. Tensor: Router logits.
github-repos
def get_model_schema_and_features(model_dir): schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json') schema = json.loads(file_io.read_file_to_string(schema_file)) features_file = os.path.join(model_dir, 'assets.extra', 'features.json') features_config = json.loads(file_io.read_file_to_string(features_file)) return schema, features_config
Get a local model's schema and features config. Args: model_dir: local or GCS path of a model. Returns: A tuple of schema (list) and features config (dict).
juraj-google-style
def claim(self, file_readers): (prefix_to_reader, unclaimed_readers) = self._find_strelka_files(file_readers) prefix_by_patients = self._split_prefix_by_patient(prefix_to_reader) self._validate_vcf_readers(prefix_by_patients) vcf_readers = self._create_vcf_readers(prefix_to_reader) return (unclaimed_readers, vcf_readers)
Recognizes and claims Strelka VCFs form the set of all input VCFs. Each defined caller has a chance to evaluate and claim all the incoming files as something that it can process. Args: file_readers: the collection of currently unclaimed files Returns: A tuple of unclaimed readers and StrelkaVcfReaders.
codesearchnet
def __init__(self, listener, dispatcher): logger.info('Creating %s', ClearlyServer.__name__) self.listener = listener self.dispatcher = dispatcher
Constructs a server instance. Args: listener (EventListener): the object that listens and keeps celery events dispatcher (StreamingDispatcher): the mechanism to dispatch data to clients
juraj-google-style
def automatic_linemode(divisions, ibz): kpoints = list() labels = list() for path in ibz.kpath["path"]: kpoints.append(ibz.kpath["kpoints"][path[0]]) labels.append(path[0]) for i in range(1, len(path) - 1): kpoints.append(ibz.kpath["kpoints"][path[i]]) labels.append(path[i]) kpoints.append(ibz.kpath["kpoints"][path[i]]) labels.append(path[i]) kpoints.append(ibz.kpath["kpoints"][path[-1]]) labels.append(path[-1]) return Kpoints("Line_mode KPOINTS file", style=Kpoints.supported_modes.Line_mode, coord_type="Reciprocal", kpts=kpoints, labels=labels, num_kpts=int(divisions))
Convenient static constructor for a KPOINTS in mode line_mode. gamma centered Monkhorst-Pack grids and the number of subdivisions along each reciprocal lattice vector determined by the scheme in the VASP manual. Args: divisions: Parameter determining the number of k-points along each hight symetry lines. ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure) Returns: Kpoints object
juraj-google-style
def _CopyFromDateTimeValues(self, date_time_values): year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds) self.is_local_time = False
Copies time elements from date and time values. Args: date_time_values (dict[str, int]): date and time values, such as year, month, day of month, hours, minutes, seconds, microseconds.
juraj-google-style
def fit(self, x, augment=False, rounds=1, seed=None): x = np.asarray(x, dtype=self.dtype) if x.ndim != 4: raise ValueError('Input to `.fit()` should have rank 4. Got array with shape: ' + str(x.shape)) if x.shape[self.channel_axis] not in {1, 3, 4}: warnings.warn('Expected input to be images (as Numpy array) following the data format convention "' + self.data_format + '" (channels on axis ' + str(self.channel_axis) + '), i.e. expected either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. However, it was passed an array with shape ' + str(x.shape) + ' (' + str(x.shape[self.channel_axis]) + ' channels).') if seed is not None: np.random.seed(seed) x = np.copy(x) if self.rescale: x *= self.rescale if augment: ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype) for r in range(rounds): for i in range(x.shape[0]): ax[i + r * x.shape[0]] = self.random_transform(x[i]) x = ax if self.featurewise_center: self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis)) broadcast_shape = [1, 1, 1] broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] self.mean = np.reshape(self.mean, broadcast_shape) x -= self.mean if self.featurewise_std_normalization: self.std = np.std(x, axis=(0, self.row_axis, self.col_axis)) broadcast_shape = [1, 1, 1] broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] self.std = np.reshape(self.std, broadcast_shape) x /= self.std + 1e-06 if self.zca_whitening: n = len(x) flat_x = np.reshape(x, (n, -1)) u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False) s_inv = np.sqrt(n) / (s + self.zca_epsilon) self.zca_whitening_matrix = (u * s_inv).dot(u.T)
Fits the data generator to some sample data. This computes the internal data stats related to the data-dependent transformations, based on an array of sample data. Only required if `featurewise_center` or `featurewise_std_normalization` or `zca_whitening` are set to `True`. When `rescale` is set to a value, rescaling is applied to sample data before computing the internal data stats. Args: x: Sample data. Should have rank 4. In case of grayscale data, the channels axis should have value 1, in case of RGB data, it should have value 3, and in case of RGBA data, it should have value 4. augment: Boolean (default: False). Whether to fit on randomly augmented samples. rounds: Int (default: 1). If using data augmentation (`augment=True`), this is how many augmentation passes over the data to use. seed: Int (default: None). Random seed.
github-repos
def add_one(self, url: str, url_properties: Optional[URLProperties]=None, url_data: Optional[URLData]=None): self.add_many([AddURLInfo(url, url_properties, url_data)])
Add a single URL to the table. Args: url: The URL to be added url_properties: Additional values to be saved url_data: Additional data to be saved
juraj-google-style
def norm(self, coords: Vector3Like, frac_coords: bool = True) -> float: return np.sqrt(self.dot(coords, coords, frac_coords=frac_coords))
Compute the norm of vector(s). Args: coords: Array-like object with the coordinates. frac_coords: Boolean stating whether the vector corresponds to fractional or cartesian coordinates. Returns: one-dimensional `numpy` array.
juraj-google-style
def _safe_close(self, sess: session.Session): try: sess.close() except Exception: pass
Closes a session without raising an exception. Just like sess.close() but ignores exceptions. Args: sess: A `Session`.
github-repos
def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout): if varnames is not None: varnames = [s.strip() for s in list_strings(varnames)] dlist = collections.defaultdict(list) for task in self.select_tasks(nids=nids, wslice=wslice): dstruct = task.input.structure.as_dict(fmt="abivars") for vname in varnames: value = task.input.get(vname, None) if value is None: value = dstruct.get(vname, None) if value is not None: dlist[vname].append((task, value)) for vname in varnames: tv_list = dlist[vname] if not tv_list: stream.write("[%s]: Found 0 tasks with this variable\n" % vname) else: stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list))) for i, (task, value) in enumerate(tv_list): stream.write(" %s --> %s\n" % (str(value), task)) stream.write("\n") else: lines = [] for task in self.select_tasks(nids=nids, wslice=wslice): s = task.make_input(with_header=True) if task.deps: s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps) else: s += "\n\nDependencies: None" lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n") stream.writelines(lines)
Print the input of the tasks to the given stream. Args: varnames: List of Abinit variables. If not None, only the variable in varnames are selected and printed. nids: List of node identifiers. By defaults all nodes are shown wslice: Slice object used to select works. stream: File-like object, Default: sys.stdout
juraj-google-style
def eval(self, session=None): raise NotImplementedError
In a session, computes and returns the value of this variable. This is not a graph construction method, it does not add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See `tf.compat.v1.Session` for more information on launching a graph and on sessions. ```python v = tf.Variable([1, 2]) init = tf.compat.v1.global_variables_initializer() with tf.compat.v1.Session() as sess: sess.run(init) # Usage passing the session explicitly. print(v.eval(sess)) # Usage with the default session. The 'with' block # above makes 'sess' the default session. print(v.eval()) ``` Args: session: The session to use to evaluate this variable. If none, the default session is used. Returns: A numpy `ndarray` with a copy of the value of this variable.
github-repos
def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'): try: mrulist = self._ParseMRUListValue(registry_key) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse MRUList value with error: {0!s}'.format(exception)) return if not mrulist: return values_dict = {} found_terminator = False for entry_index, entry_letter in enumerate(mrulist): if entry_letter == 0: break if found_terminator: parser_mediator.ProduceExtractionWarning(( 'found additional MRUList entries after terminator in key: ' '{0:s}.').format(registry_key.path)) found_terminator = False entry_letter = chr(entry_letter) value_string = self._ParseMRUListEntryValue( parser_mediator, registry_key, entry_index, entry_letter, codepage=codepage) value_text = 'Index: {0:d} [MRU Value {1:s}]'.format( entry_index + 1, entry_letter) values_dict[value_text] = value_string event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extract event objects from a MRUList Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. codepage (Optional[str]): extended ASCII string codepage.
juraj-google-style
def get_sanger_unevaluated(store, institute_id, user_id): sanger_ordered_by_case = store.sanger_ordered(institute_id, user_id) unevaluated = [] for item in sanger_ordered_by_case: case_id = item['_id'] case_obj = store.case(case_id=case_id) if not case_obj: continue case_display_name = case_obj.get('display_name') varid_list = item['vars'] unevaluated_by_case = {} unevaluated_by_case[case_display_name] = [] for var_id in varid_list: variant_obj = store.variant(document_id=var_id, case_id=case_id) if variant_obj is None or variant_obj.get('sanger_ordered') is None or variant_obj.get('sanger_ordered') is False: continue validation = variant_obj.get('validation', 'not_evaluated') if validation in ['True positive', 'False positive']: continue unevaluated_by_case[case_display_name].append(variant_obj['_id']) if len(unevaluated_by_case[case_display_name]) > 0: unevaluated.append(unevaluated_by_case) return unevaluated
Get all variants for an institute having Sanger validations ordered but still not evaluated Args: store(scout.adapter.MongoAdapter) institute_id(str) Returns: unevaluated: a list that looks like this: [ {'case1': [varID_1, varID_2, .., varID_n]}, {'case2' : [varID_1, varID_2, .., varID_n]} ], where the keys are case_ids and the values are lists of variants with Sanger ordered but not yet validated
juraj-google-style
def __init__(self, min_bundle_size=0, desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE, columns=None, with_filename=False, label='ReadAllFiles'): super().__init__() source_from_file = partial(_ParquetSource, min_bundle_size=min_bundle_size, columns=columns) self._read_all_files = filebasedsource.ReadAllFiles(True, CompressionTypes.UNCOMPRESSED, desired_bundle_size, min_bundle_size, source_from_file, with_filename) self.label = label
Initializes ``ReadAllFromParquet``. Args: min_bundle_size: the minimum size in bytes, to be considered when splitting the input into bundles. desired_bundle_size: the desired size in bytes, to be considered when splitting the input into bundles. columns: list of columns that will be read from files. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e' with_filename: If True, returns a Key Value with the key being the file name and the value being the actual data. If False, it only returns the data.
github-repos
def _infer_state_dtype(explicit_dtype, state): if explicit_dtype is not None: return explicit_dtype elif nest.is_nested(state): inferred_dtypes = [element.dtype for element in nest.flatten(state)] if not inferred_dtypes: raise ValueError(f'Unable to infer dtype from argument state={state}.') all_same = all((x == inferred_dtypes[0] for x in inferred_dtypes)) if not all_same: raise ValueError(f'Argument state={state} has tensors of different inferred dtypes. Unable to infer a single representative dtype. Dtypes received: {inferred_dtypes}') return inferred_dtypes[0] else: return state.dtype
Infer the dtype of an RNN state. Args: explicit_dtype: explicitly declared dtype or None. state: RNN's hidden state. Must be a Tensor or a nested iterable containing Tensors. Returns: dtype: inferred dtype of hidden state. Raises: ValueError: if `state` has heterogeneous dtypes or is empty.
github-repos
def root(self) -> bytes: retry_interval = ADB_ROOT_RETRY_ATTEMPT_INTERVAL_SEC for attempt in range(ADB_ROOT_RETRY_ATTEMPTS): try: return self._exec_adb_cmd('root', args=None, shell=False, timeout=None, stderr=None) except AdbError as e: if attempt + 1 < ADB_ROOT_RETRY_ATTEMPTS: logging.debug('Retry the command "%s" since Error "%s" occurred.' % (utils.cli_cmd_to_string(e.cmd), e.stderr.decode('utf-8').strip())) time.sleep(retry_interval) retry_interval *= 2 else: raise e
Enables ADB root mode on the device. This method will retry to execute the command `adb root` when an AdbError occurs, since sometimes the error `adb: unable to connect for root: closed` is raised when executing `adb root` immediately after the device is booted to OS. Returns: A string that is the stdout of root command. Raises: AdbError: If the command exit code is not 0.
github-repos
def links(res: requests.models.Response, search: str=None, pattern: str=None) -> list: hrefs = [link.to_text() for link in find_all_links(res.text)] if search: hrefs = [href for href in hrefs if (search in href)] if pattern: hrefs = [href for href in hrefs if re.findall(pattern, href)] return list(set(hrefs))
Get the links of the page. Args: res (requests.models.Response): The response of the page. search (str, optional): Defaults to None. Search the links you want. pattern (str, optional): Defaults to None. Search the links use a regex pattern. Returns: list: All the links of the page.
codesearchnet
def draw(self, filename, color=True): verify_dependencies(['pgv']) if (not hasattr(self, '_results')): raise RuntimeError('Graph cannot be drawn before it is executed. Try calling run() first.') g = pgv.AGraph(directed=True) g.node_attr['colorscheme'] = 'set312' for elem in self._results: if (not hasattr(elem, 'history')): continue log = elem.history while log: source_from = (log.parent[6] if log.parent else '') s_node = hash((source_from, log[2])) s_color = stim_list.index(log[2]) s_color = ((s_color % 12) + 1) t_node = hash((log[6], log[7])) t_style = ('filled,' if color else '') t_style += ('dotted' if log.implicit else '') if log[6].endswith('Extractor'): t_color = ' elif log[6].endswith('Filter'): t_color = ' else: t_color = ' r_node = hash((log[6], log[5])) r_color = stim_list.index(log[5]) r_color = ((r_color % 12) + 1) if color: g.add_node(s_node, label=log[2], shape='ellipse', style='filled', fillcolor=s_color) g.add_node(t_node, label=log[6], shape='box', style=t_style, fillcolor=t_color) g.add_node(r_node, label=log[5], shape='ellipse', style='filled', fillcolor=r_color) else: g.add_node(s_node, label=log[2], shape='ellipse') g.add_node(t_node, label=log[6], shape='box', style=t_style) g.add_node(r_node, label=log[5], shape='ellipse') g.add_edge(s_node, t_node, style=t_style) g.add_edge(t_node, r_node, style=t_style) log = log.parent g.draw(filename, prog='dot')
Render a plot of the graph via pygraphviz. Args: filename (str): Path to save the generated image to. color (bool): If True, will color graph nodes based on their type, otherwise will draw a black-and-white graph.
codesearchnet
def _get_or_load_domain(self, domain): if isinstance(domain, six.string_types): if (domain in self.domains): return self.domains[domain] elif exists(domain): with open(domain, 'r') as fobj: domain = json.load(fobj) else: raise ValueError("No domain could be found/loaded from input '{}'; value must be either the name of an existing Domain, or a valid path to a configuration file.".format(domain)) name = domain['name'] if (name in self.domains): msg = "Domain with name '{}' already exists; returning existing Domain configuration.".format(name) warnings.warn(msg) return self.domains[name] entities = domain.get('entities', []) domain = Domain(domain) for e in entities: self.add_entity(domain=domain, **e) self.domains[name] = domain return self.domains[name]
Return a domain if one already exists, or create a new one if not. Args: domain (str, dict): Can be one of: - The name of the Domain to return (fails if none exists) - A path to the Domain configuration file - A dictionary containing configuration information
codesearchnet
def inter_data_operation(self, axis, func, other): if axis: partitions = self.row_partitions other_partitions = other.row_partitions else: partitions = self.column_partitions other_partitions = other.column_partitions func = self.preprocess_func(func) result = np.array( [ partitions[i].apply( func, num_splits=self._compute_num_partitions(), other_axis_partition=other_partitions[i], ) for i in range(len(partitions)) ] ) return self.__constructor__(result) if axis else self.__constructor__(result.T)
Apply a function that requires two BaseFrameManager objects. Args: axis: The axis to apply the function over (0 - rows, 1 - columns) func: The function to apply other: The other BaseFrameManager object to apply func to. Returns: A new BaseFrameManager object, the type of object that called this.
juraj-google-style
def chain_break_frequency(samples, embedding): counts = {v: 0 for v in embedding} total = 0 for sample in samples: for (v, chain) in iteritems(embedding): vals = [sample[u] for u in chain] if (not _all_equal(vals)): counts[v] += 1 total += 1 return {v: (counts[v] / total) for v in embedding}
Determines the frequency of chain breaks in the given samples. Args: samples (iterable): An iterable of samples where each sample is a dict of the form {v: val, ...} where v is a variable in the target graph and val is the associated value as determined by a binary quadratic model sampler. embedding (dict): The mapping from the source graph to the target graph. Should be of the form {v: {s, ...}, ...} where v is a variable in the source model and s is a variable in the target model. Returns: dict: The frequency of chain breaks in the form {v: f, ...} where v is a variable in the source graph and frequency is the fraction of chains that were broken as a float.
codesearchnet
def removeRouter(self, xRouterId): print '%s call removeRouter' % self.port print xRouterId routerId = '' routerId = self.__convertRlocToRouterId(xRouterId) print routerId if routerId == None: print 'no matched xRouterId' return False try: cmd = 'releaserouterid %s' % routerId return self.__sendCommand(cmd)[0] != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('removeRouter() Error: ' + str(e))
kick router with a given router id from the Thread Network Args: xRouterId: a given router id in hex format Returns: True: successful to remove the router from the Thread Network False: fail to remove the router from the Thread Network
juraj-google-style
def _CreateAnalysisPlugins(self, options): if (not self._analysis_plugins): return {} analysis_plugins = analysis_manager.AnalysisPluginManager.GetPluginObjects(self._analysis_plugins) for analysis_plugin in analysis_plugins.values(): helpers_manager.ArgumentHelperManager.ParseOptions(options, analysis_plugin) return analysis_plugins
Creates the analysis plugins. Args: options (argparse.Namespace): command line arguments. Returns: dict[str, AnalysisPlugin]: analysis plugins and their names.
codesearchnet
def documentation(self, level='first'): docs = (t.docstring for t in (list(self.conjunction.terms) + [self]) if (t.docstring is not None)) if (level.lower() == 'first'): doc = next(docs, None) elif (level.lower() == 'top'): doc = list(docs) return doc
Return the documentation of the type. By default, this is the first docstring on a top-level term. By setting *level* to `"top"`, the list of all docstrings on top-level terms is returned, including the type's `docstring` value, if not `None`, as the last item. The docstring for the type itself is available via :attr:`TypeDefinition.docstring`. Args: level (str): `"first"` or `"top"` Returns: a single docstring or a list of docstrings
codesearchnet
def create(self, domain, type_name, search_command, body): return self._request(domain, type_name, search_command, 'POST', body)
Create entry in ThreatConnect Data Store Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. body (str): JSON serialized data.
codesearchnet
def _convert_ddb_list_to_list(conversion_list): ret_list = [] for v in conversion_list: for v1 in v: ret_list.append(v[v1]) return ret_list
Given a dynamodb list, it will return a python list without the dynamodb datatypes Args: conversion_list (dict): a dynamodb list which includes the datatypes Returns: list: Returns a sanitized list without the dynamodb datatypes
juraj-google-style
def transform_absolute_coords(self, width, height): if (self.type != EventType.POINTER_MOTION_ABSOLUTE): raise AttributeError(_wrong_meth.format(self.type)) abs_x = self._libinput.libinput_event_pointer_get_absolute_x_transformed(self._handle, width) abs_y = self._libinput.libinput_event_pointer_get_absolute_y_transformed(self._handle, height) return (abs_x, abs_y)
Return the current absolute coordinates of the pointer event, transformed to screen coordinates. For pointer events that are not of type :attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`, this method raises :exc:`AttributeError`. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: (float, float): The current absolute (x, y) coordinates transformed to a screen coordinates. Raises: AttributeError
codesearchnet
def getJsonFromApi(view, request): jsonText = view(request) jsonText = json.loads(jsonText.content.decode('utf-8')) return jsonText
Return json from querying Web Api Args: view: django view function. request: http request object got from django. Returns: json format dictionary
codesearchnet
def force_checkpoint_conversion(value=True): global _FORCE_CHECKPOINT_CONVERSION _FORCE_CHECKPOINT_CONVERSION = value
Forces checkpoint to use the new implementation. The new checkpoint implementation is changing the saved metadata slightly, and therefore may break forward compatibility in newly saved checkpoints. This means: - Previous versions of TensorFlow may not be able to load new checkpoints. - Backwards compatibility is unchanged: Old checkpoints can still be loaded. TensorFlow guarantees 3 weeks of forward compatibility, so this flag will be removed in the future weeks, after which checkpoint conversion will happen by default. **What happens when this flag is enabled?** The checkpoint will be saved with different metadata, meaning that previous versions of TensorFlow (<=2.10) will not be able to load this checkpoint. Args: value: Boolean value, whether or not to force checkpoint conversion to the new implementation.
github-repos
def nack(self, items): self.modify_ack_deadline( [requests.ModAckRequest(ack_id=item.ack_id, seconds=0) for item in items] ) self.drop([requests.DropRequest(*item) for item in items])
Explicitly deny receipt of messages. Args: items(Sequence[NackRequest]): The items to deny.
juraj-google-style
def get_help_usage(command): if not command: doc = get_primary_command_usage() elif command in ('-a', '--all'): subcommands = [k for k in settings.subcommands if k is not None] available_commands = subcommands + ['help'] command_doc = '\nAvailable commands:\n{}\n'.format( '\n'.join(' {}'.format(c) for c in sorted(available_commands))) doc = get_primary_command_usage(command_doc) elif command.startswith('-'): raise ValueError("Unrecognized option '{}'.".format(command)) elif command in settings.subcommands: subcommand = settings.subcommands[command] doc = format_usage(subcommand.__doc__) docopt.docopt(doc, argv=('--help',))
Print out a help message and exit the program. Args: command: If a command value is supplied then print the help message for the command module if available. If the command is '-a' or '--all', then print the standard help message but with a full list of available commands. Raises: ValueError: Raised if the help message is requested for an invalid command or an unrecognized option is passed to help.
juraj-google-style
def _choose_random_edge(self, edges: Set[EDGE]) -> Optional[EDGE]: if edges: index = self._rand.randint(len(edges)) for e in edges: if not index: return e index -= 1 return None
Picks random edge from the set of edges. Args: edges: Set of edges to pick from. Returns: Random edge from the supplied set, or None for empty set.
juraj-google-style