code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def from_der(der): d = get_bytes(der) if len(d) < 8: raise ValueError("DER signature string is too short.") if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s)
Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature.
juraj-google-style
def WriteEventBody(self, event): inode = getattr(event, 'inode', None) if inode is None: event.inode = 0 json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event) json_string = json.dumps(json_dict, sort_keys=True) if self._event_counter != 0: self._output_writer.Write(', ') line = '"event_{0:d}": {1:s}\n'.format(self._event_counter, json_string) self._output_writer.Write(line) self._event_counter += 1
Writes the body of an event object to the output. Args: event (EventObject): event.
juraj-google-style
def _ReadFileEntries(self, file_object): self._file_entries = {} file_offset = 0 while file_offset < self._file_size or self._file_size == 0: file_entry = self._ReadFileEntry(file_object, file_offset) file_offset += file_entry.size if file_entry.path == 'TRAILER!!!': break if file_entry.path in self._file_entries: continue self._file_entries[file_entry.path] = file_entry
Reads the file entries from the cpio archive. Args: file_object (FileIO): file-like object.
juraj-google-style
def _GenerateSection(self, problem_type): if (problem_type == transitfeed.TYPE_WARNING): dataset_problems = self._dataset_warnings heading = 'Warnings' else: dataset_problems = self._dataset_errors heading = 'Errors' if (not dataset_problems): return '' prefix = ('<h2 class="issueHeader">%s:</h2>' % heading) dataset_sections = [] for (dataset_merger, problems) in dataset_problems.items(): dataset_sections.append(('<h3>%s</h3><ol>%s</ol>' % (dataset_merger.FILE_NAME, '\n'.join(problems)))) body = '\n'.join(dataset_sections) return (prefix + body)
Generate a listing of the given type of problems. Args: problem_type: The type of problem. This is one of the problem type constants from transitfeed. Returns: The generated HTML as a string.
codesearchnet
def get_service_name(self, service_id: str) -> str: if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve all' ' the services details.') service = self._client.services.get(service_id) return service.name
Get the name of the docker service. Only the manager nodes can retrieve service name Args: service_id (string): List of service ID Returns: string, name of the docker service
juraj-google-style
def terminate(self, nowait=False): logger.debug("Acquiring lock for service termination") with self.lock: logger.debug("Terminating service") if not self.listener: logger.warning("Service already stopped.") return self.listener.stop(nowait) try: if not nowait: self._post_log_batch() except Exception: if self.error_handler: self.error_handler(sys.exc_info()) else: raise finally: self.queue = None self.listener = None
Finalize and stop service Args: nowait: set to True to terminate immediately and skip processing messages still in the queue
juraj-google-style
def get_axis(self, undefined=np.zeros(3)): tolerance = 1e-17 self._normalise() norm = np.linalg.norm(self.vector) if (norm < tolerance): return undefined else: return (self.vector / norm)
Get the axis or vector about which the quaternion rotation occurs For a null rotation (a purely real quaternion), the rotation angle will always be `0`, but the rotation axis is undefined. It is by default assumed to be `[0, 0, 0]`. Params: undefined: [optional] specify the axis vector that should define a null rotation. This is geometrically meaningless, and could be any of an infinite set of vectors, but can be specified if the default (`[0, 0, 0]`) causes undesired behaviour. Returns: A Numpy unit 3-vector describing the Quaternion object's axis of rotation. Note: This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
codesearchnet
def is_method_call(func, types=(), methods=()): return (isinstance(func, astroid.BoundMethod) and isinstance(func.bound, astroid.Instance) and ((func.bound.name in types) if types else True) and ((func.name in methods) if methods else True))
Determines if a BoundMethod node represents a method call. Args: func (astroid.BoundMethod): The BoundMethod AST node to check. types (Optional[String]): Optional sequence of caller type names to restrict check. methods (Optional[String]): Optional sequence of method names to restrict check. Returns: bool: true if the node represents a method call for the given type and method names, False otherwise.
codesearchnet
def get(self, file_path, ref, **kwargs): file_path = file_path.replace('/', '%2F') return GetMixin.get(self, file_path, ref=ref, **kwargs)
Retrieve a single file. Args: file_path (str): Path of the file to retrieve ref (str): Name of the branch, tag or commit **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the file could not be retrieved Returns: object: The generated RESTObject
codesearchnet
def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int=0) -> int: if dimension <= 0: dimension = fixed_dimension dimension -= num_token_to_add return dimension
Args: dimension: fixed_dimension: num_token_to_add: Returns:
github-repos
def GetMACBRepresentationFromDescriptions(self, timestamp_descriptions): macb_representation = [] if (('mtime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_MODIFICATION in timestamp_descriptions)): macb_representation.append('M') else: macb_representation.append('.') if (('atime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_LAST_ACCESS in timestamp_descriptions)): macb_representation.append('A') else: macb_representation.append('.') if (('ctime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_CHANGE in timestamp_descriptions)): macb_representation.append('C') else: macb_representation.append('.') if (('crtime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_CREATION in timestamp_descriptions)): macb_representation.append('B') else: macb_representation.append('.') return ''.join(macb_representation)
Determines the MACB representation from the timestamp descriptions. MACB representation is a shorthand for representing one or more of modification, access, change, birth timestamp descriptions as the letters "MACB" or a "." if the corresponding timestamp is not set. Note that this is an output format shorthand and does not guarantee that the timestamps represent the same occurrence. Args: timestamp_descriptions (list[str]): timestamp descriptions, which are defined in definitions.TIME_DESCRIPTIONS. Returns: str: MACB representation.
codesearchnet
def default_storable(python_type, exposes=None, version=None, storable_type=None, peek=default_peek): if (not exposes): for extension in expose_extensions: try: exposes = extension(python_type) except (SystemExit, KeyboardInterrupt): raise except: pass else: if exposes: break if (not exposes): raise AttributeError('`exposes` required for type: {!r}'.format(python_type)) return Storable(python_type, key=storable_type, handlers=StorableHandler(version=version, exposes=exposes, poke=poke(exposes), peek=peek(python_type, exposes)))
Default mechanics for building the storable instance for a type. Arguments: python_type (type): type. exposes (iterable): attributes exposed by the type. version (tuple): version number. storable_type (str): universal string identifier for the type. peek (callable): peeking routine. Returns: Storable: storable instance.
codesearchnet
def docs(recreate, gen_index, run_doctests): build_dir = conf.get_path('build_dir', '.build') docs_dir = conf.get_path('docs.path', 'docs') refdoc_paths = conf.get('docs.reference', []) docs_html_dir = conf.get_path('docs.out', os.path.join(docs_dir, 'html')) docs_tests_dir = conf.get_path('docs.tests_out', os.path.join(docs_dir, 'doctest')) docs_build_dir = os.path.join(build_dir, 'docs') if recreate: for path in (docs_html_dir, docs_build_dir): if os.path.exists(path): log.info('<91>Deleting <94>{}'.format(path)) shutil.rmtree(path) if refdoc_paths: gen_ref_docs(gen_index) else: log.err('Not generating any reference documentation - No docs.reference specified in config') with conf.within_proj_dir(docs_dir): log.info('Building docs') shell.run('sphinx-build -b html -d {build} {docs} {out}'.format(build=docs_build_dir, docs=docs_dir, out=docs_html_dir)) if run_doctests: log.info('Running doctests') shell.run('sphinx-build -b doctest -d {build} {docs} {out}'.format(build=docs_build_dir, docs=docs_dir, out=docs_tests_dir)) log.info('You can view the docs by browsing to <34>file:
Build the documentation for the project. Args: recreate (bool): If set to **True**, the build and output directories will be cleared prior to generating the docs. gen_index (bool): If set to **True**, it will generate top-level index file for the reference documentation. run_doctests (bool): Set to **True** if you want to run doctests after the documentation is generated. pretend (bool): If set to **True**, do not actually execute any shell commands, just print the command that would be executed.
codesearchnet
def get_output_slot(element_name): _, output_slot = parse_node_or_tensor_name(element_name) return output_slot if output_slot is not None else 0
Get the output slot number from the name of a graph element. If element_name is a node name without output slot at the end, 0 will be assumed. Args: element_name: (`str`) name of the graph element in question. Returns: (`int`) output slot number.
github-repos
def compute_mu(L_aug, Y, k, p): n, d = L_aug.shape assert Y.shape[0] == n mu = np.zeros((d, k)) for y in range(1, k + 1): L_y = L_aug[Y == y] mu[:, y - 1] = L_y.sum(axis=0) / L_y.shape[0] return mu
Given label matrix L_aug and labels Y, compute the true mu params. Args: L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k} k: (int) Cardinality p: (np.array float) [k] The class balance
juraj-google-style
def Query(self, query, parameters=None): if parameters: self._cursor.execute(query, parameters) else: self._cursor.execute(query) return self._cursor.fetchall()
Queries the database file. Args: query (str): SQL query. parameters (Optional[dict|tuple]): query parameters. Returns: list[sqlite3.Row]: rows resulting from the query.
codesearchnet
def filter_data(self, field, filter_value, filter_operator, field_converter=None): data = [] if (self._indexes.get(field) is not None): data = self._index_filter(self._indexes.get(field), filter_value, filter_operator, field_converter) return set(data)
Filter the data given the provided. Args: field (string): The field to filter on. filter_value (string | list): The value to match. filter_operator (string): The operator for comparison. field_converter (method): A method used to convert the field before comparison. Returns: (set): List of matching data objects
codesearchnet
def refresh_access_token(self, refresh_token): request = self._get_request() response = request.post(self.OAUTH_TOKEN_URL, { "grant_type": "refresh_token", "refresh_token": refresh_token }) self.auth = HSAccessTokenAuth.from_response(response) return self.auth.access_token
Refreshes the current access token. Gets a new access token, updates client auth and returns it. Args: refresh_token (str): Refresh token to use Returns: The new access token
juraj-google-style
def delete_all(self, filter=None, timeout=-1): return self._client.delete_all(filter=filter, timeout=timeout)
Delete an SNMPv3 User based on User name specified in filter. The user will be deleted only if it has no associated destinations. Args: username: ID or URI of SNMPv3 user. filter: A general filter/query string to narrow the list of items returned. The default is no filter - all resources are returned. Returns: bool: Indicates if the resource was successfully deleted.
juraj-google-style
def encode(cls, command): args = [] for arg in command.args: if (not isinstance(arg, str)): arg = str(arg) if ((',' in arg) or arg.startswith(' ') or arg.endswith(' ') or arg.startswith('hex:')): arg = 'hex:{}'.format(hexlify(arg.encode('utf-8')).decode('utf-8')) args.append(arg) argstr = '' if (len(args) > 0): argstr = ((' {' + ','.join(args)) + '}') return (command.name + argstr)
Encode a command as an unambiguous string. Args: command (Command): The command to encode. Returns: str: The encoded command
codesearchnet
def repeat(self, count=None, name=None) -> 'DatasetV2': from tensorflow.python.data.ops import repeat_op return repeat_op._repeat(self, count, name)
Repeats this dataset so each original value is seen `count` times. >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> dataset = dataset.repeat(3) >>> [a.item() for a in dataset.as_numpy_iterator()] [1, 2, 3, 1, 2, 3, 1, 2, 3] Note: If the input dataset depends on global state (e.g. a random number generator) or its output is non-deterministic (e.g. because of upstream `shuffle`), then different repetitions may produce different elements. Args: count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the number of times the dataset should be repeated. The default behavior (if `count` is `None` or `-1`) is for the dataset be repeated indefinitely. name: (Optional.) A name for the tf.data operation. Returns: A new `Dataset` with the transformation applied as described above.
github-repos
def _wrap_section(source, width): if _get_section('usage', source): return _wrap_usage_section(source, width) if _is_definition_section(source): return _wrap_definition_section(source, width) lines = inspect.cleandoc(source).splitlines() paragraphs = (textwrap.wrap(line, width, replace_whitespace=False) for line in lines) return '\n'.join(line for paragraph in paragraphs for line in paragraph)
Wrap the given section string to the current terminal size. Intelligently wraps the section string to the given width. When wrapping section lines, it auto-adjusts the spacing between terms and definitions. It also adjusts commands the fit the correct length for the arguments. Args: source: The section string to wrap. Returns: The wrapped section string.
juraj-google-style
def sign_hash(private_key, hash, hash_algo): hash_algo = _hash_algorithms[hash_algo] return get_privatekey(private_key).sign(hash, padding.PKCS1v15(), utils.Prehashed(hash_algo))
Sign the given hash with the given private key. Args: private_key (str): PEM enoded private key hash (byte str): hash to sign hash_algo (str): name of hash algorithm used Returns: byte string representing the signature
codesearchnet
def aggregate_scores(weights: typing.List[str]) -> typing.Dict[str, typing.Dict[str, float]]: decision_trees: typing.Dict[str, typing.Dict[str, float]] = dict() for row in weights: row = row.strip() if not row: continue feature = row.split('\t')[0] feature_group, feature_content = feature.split(':', 1) score = float(row.split('\t')[1]) decision_trees.setdefault(feature_group, {}) decision_trees[feature_group].setdefault(feature_content, 0) decision_trees[feature_group][feature_content] += score return decision_trees
Exports the model by aggregating the weight scores. Args: weights (List[str]): The lines of exported weight score file. Returns: model (Dict[string, Dict[string, float]]) The exported model.
github-repos
def init_app(self, app): self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY) if not self._key: return self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI) sender = AsynchronousSender(self._endpoint_uri) queue = AsynchronousQueue(sender) self._channel = TelemetryChannel(None, queue) self._init_request_logging(app) self._init_trace_logging(app) self._init_exception_logging(app)
Initializes the extension for the provided Flask application. Args: app (flask.Flask). the Flask application for which to initialize the extension.
juraj-google-style
def submit_snl(self, snl): try: snl = snl if isinstance(snl, list) else [snl] jsondata = [s.as_dict() for s in snl] payload = {"snl": json.dumps(jsondata, cls=MontyEncoder)} response = self.session.post("{}/snl/submit".format(self.preamble), data=payload) if response.status_code in [200, 400]: resp = json.loads(response.text, cls=MontyDecoder) if resp["valid_response"]: if resp.get("warning"): warnings.warn(resp["warning"]) return resp['inserted_ids'] else: raise MPRestError(resp["error"]) raise MPRestError("REST error with status code {} and error {}" .format(response.status_code, response.text)) except Exception as ex: raise MPRestError(str(ex))
Submits a list of StructureNL to the Materials Project site. .. note:: As of now, this MP REST feature is open only to a select group of users. Opening up submissions to all users is being planned for the future. Args: snl (StructureNL/[StructureNL]): A single StructureNL, or a list of StructureNL objects Returns: A list of inserted submission ids. Raises: MPRestError
juraj-google-style
def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult: known_args, pipeline_args = parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = save_main_session class OnnxNoBatchModelHandler(OnnxModelHandlerNumpy): def batch_elements_kwargs(self): return {'max_batch_size': 1} model_handler = OnnxNoBatchModelHandler(model_uri=known_args.model_uri) pipeline = test_pipeline if not test_pipeline: pipeline = beam.Pipeline(options=pipeline_options) tokenizer = RobertaTokenizer.from_pretrained('roberta-base') text = pipeline | 'ReadSentences' >> beam.io.ReadFromText(known_args.input) text_and_tokenized_text_tuple = text | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines) | 'TokenizeSentence' >> beam.Map(lambda x: tokenize_sentence(x, tokenizer)) output = text_and_tokenized_text_tuple | 'PyTorchRunInference' >> RunInference(KeyedModelHandler(model_handler)) | 'ProcessOutput' >> beam.ParDo(PostProcessor()) _ = output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True) result = pipeline.run() result.wait_until_finish() return result
Args: argv: Command line arguments defined for this example. save_main_session: Used for internal testing. test_pipeline: Used for internal testing.
github-repos
def vectorize(self, token_list): vector_list = [self.__collection.tf_idf(token, self.__collection) for token in token_list] return vector_list
Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...]
juraj-google-style
def xmon_op_from_proto_dict(proto_dict: Dict) -> ops.Operation: def raise_missing_fields(gate_name: str): raise ValueError('{} missing required fields: {}'.format(gate_name, proto_dict)) param = _parameterized_value_from_proto_dict qubit = devices.GridQubit.from_proto_dict if ('exp_w' in proto_dict): exp_w = proto_dict['exp_w'] if (('half_turns' not in exp_w) or ('axis_half_turns' not in exp_w) or ('target' not in exp_w)): raise_missing_fields('ExpW') return ops.PhasedXPowGate(exponent=param(exp_w['half_turns']), phase_exponent=param(exp_w['axis_half_turns'])).on(qubit(exp_w['target'])) elif ('exp_z' in proto_dict): exp_z = proto_dict['exp_z'] if (('half_turns' not in exp_z) or ('target' not in exp_z)): raise_missing_fields('ExpZ') return (ops.Z(qubit(exp_z['target'])) ** param(exp_z['half_turns'])) elif ('exp_11' in proto_dict): exp_11 = proto_dict['exp_11'] if (('half_turns' not in exp_11) or ('target1' not in exp_11) or ('target2' not in exp_11)): raise_missing_fields('Exp11') return (ops.CZ(qubit(exp_11['target1']), qubit(exp_11['target2'])) ** param(exp_11['half_turns'])) elif ('measurement' in proto_dict): meas = proto_dict['measurement'] invert_mask = cast(Tuple[(Any, ...)], ()) if ('invert_mask' in meas): invert_mask = tuple((json.loads(x) for x in meas['invert_mask'])) if (('key' not in meas) or ('targets' not in meas)): raise_missing_fields('Measurement') return ops.MeasurementGate(num_qubits=len(meas['targets']), key=meas['key'], invert_mask=invert_mask).on(*[qubit(q) for q in meas['targets']]) else: raise ValueError('invalid operation: {}'.format(proto_dict))
Convert the proto dictionary to the corresponding operation. See protos in api/google/v1 for specification of the protos. Args: proto_dict: Dictionary representing the proto. Keys are always strings, but values may be types correspond to a raw proto type or another dictionary (for messages). Returns: The operation. Raises: ValueError if the dictionary does not contain required values corresponding to the proto.
codesearchnet
def get_block_hash(self, height, id=None, endpoint=None): return self._call_endpoint(GET_BLOCK_HASH, params=[height], id=id, endpoint=endpoint)
Get hash of a block by its height Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
juraj-google-style
def output_file(self, filename, title='Bokeh Plot', mode='cdn', root_dir=None): self._file = {'filename': filename, 'resources': Resources(mode=mode, root_dir=root_dir), 'title': title} if os.path.isfile(filename): log.info(("Session output file '%s' already exists, will be overwritten." % filename))
Configure output to a standalone HTML file. Calling ``output_file`` not clear the effects of any other calls to ``output_notebook``, etc. It adds an additional output destination (publishing to HTML files). Any other active output modes continue to be active. Args: filename (str) : a filename for saving the HTML document title (str, optional) : a title for the HTML document mode (str, optional) : how to include BokehJS (default: ``'cdn'``) One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or ``'absolute(-dev)'``. See :class:`~bokeh.resources.Resources` for more details. root_dir (str, optional) : root dir to use for absolute resources (default: None) This value is ignored for other resource types, e.g. ``INLINE`` or ``CDN``. .. warning:: The specified output file will be overwritten on every save, e.g., every time ``show()`` or ``save()`` is called.
codesearchnet
def _io_write_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment): if (pipeline := test_spec.get('pipeline', None)): for transform in pipeline.get('transforms', []): if transform.get('type', '').startswith('WriteTo'): transform['type'] = 'LogForTesting' transform['config'] = {k: v for k, v in transform.get('config', {}).items() if k.startswith('__') or k == 'error_handling'} return test_spec
Preprocessor for tests that involve writing to IO. This preprocessor replaces any WriteTo transform with a LogForTesting transform. This allows the test to verify the data being written without actually writing to an external system. Args: test_spec: The dictionary representation of the YAML pipeline specification. expected: A list of strings representing the expected output of the pipeline. env: The TestEnvironment object providing utilities for creating temporary files. Returns: The modified test_spec dictionary with WriteTo transforms replaced.
github-repos
def get_all_resource_ids_in_datastore(configuration=None): resource = Resource(configuration=configuration) (success, result) = resource._read_from_hdx('datastore', '_table_metadata', 'resource_id', Resource.actions()['datastore_search'], limit=10000) resource_ids = list() if (not success): logger.debug(result) else: for record in result['records']: resource_ids.append(record['name']) return resource_ids
Get list of resources that have a datastore returning their ids. Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[str]: List of resource ids that are in the datastore
codesearchnet
def _from_base_type(self, value): if (not value): return None try: credentials = client.Credentials.new_from_json(value) except ValueError: credentials = None return credentials
Converts our stored JSON string back to the desired type. Args: value: A value from the datastore to be converted to the desired type. Returns: A deserialized Credentials (or subclass) object, else None if the value can't be parsed.
codesearchnet
def clinsig_query(self, query, mongo_query): LOG.debug('clinsig is a query parameter') trusted_revision_level = ['mult', 'single', 'exp', 'guideline'] rank = [] str_rank = [] clnsig_query = {} for item in query['clinsig']: rank.append(int(item)) rank.append(CLINSIG_MAP[int(item)]) str_rank.append(CLINSIG_MAP[int(item)]) if (query.get('clinsig_confident_always_returned') == True): LOG.debug('add CLINSIG filter with trusted_revision_level') clnsig_query = {'clnsig': {'$elemMatch': {'$or': [{'$and': [{'value': {'$in': rank}}, {'revstat': {'$in': trusted_revision_level}}]}, {'$and': [{'value': re.compile('|'.join(str_rank))}, {'revstat': re.compile('|'.join(trusted_revision_level))}]}]}}} else: LOG.debug(('add CLINSIG filter for rank: %s' % ', '.join(str(query['clinsig'])))) clnsig_query = {'clnsig': {'$elemMatch': {'$or': [{'value': {'$in': rank}}, {'value': re.compile('|'.join(str_rank))}]}}} return clnsig_query
Add clinsig filter values to the mongo query object Args: query(dict): a dictionary of query filters specified by the users mongo_query(dict): the query that is going to be submitted to the database Returns: clinsig_query(dict): a dictionary with clinsig key-values
codesearchnet
def ValidateToken(token, targets): def GetSubjectForError(): if (len(targets) == 1): return list(targets)[0] else: return None if (not token): raise access_control.UnauthorizedAccess(('Must give an authorization token for %s' % targets), subject=GetSubjectForError()) token.CheckExpiry() if (not token.username): raise access_control.UnauthorizedAccess(('Must specify a username for access to %s.' % targets), subject=GetSubjectForError()) return True
Does basic token validation. Args: token: User's credentials as access_control.ACLToken. targets: List of targets that were meant to be accessed by the token. This is used for logging purposes only. Returns: True if token is valid. Raises: access_control.UnauthorizedAccess: if token is not valid. ValueError: if targets list is empty.
codesearchnet
def collection(self, *collection_path): if (len(collection_path) == 1): path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER) else: path = collection_path return CollectionReference(*path, client=self)
Get a reference to a collection. For a top-level collection: .. code-block:: python >>> client.collection('top') For a sub-collection: .. code-block:: python >>> client.collection('mydocs/doc/subcol') >>> # is the same as >>> client.collection('mydocs', 'doc', 'subcol') Sub-collections can be nested deeper in a similar fashion. Args: collection_path (Tuple[str, ...]): Can either be * A single ``/``-delimited path to a collection * A tuple of collection path segments Returns: ~.firestore_v1beta1.collection.CollectionReference: A reference to a collection in the Firestore database.
codesearchnet
def __build_cmd_maps(cls): cmd_map_all = {} cmd_map_visible = {} cmd_map_internal = {} for name in dir(cls): obj = getattr(cls, name) if iscommand(obj): for cmd in getcommands(obj): if (cmd in cmd_map_all.keys()): raise PyShellError("The command '{}' already has cmd method '{}', cannot register a second method '{}'.".format(cmd, cmd_map_all[cmd], obj.__name__)) cmd_map_all[cmd] = obj.__name__ if isvisiblecommand(obj): cmd_map_visible[cmd] = obj.__name__ if isinternalcommand(obj): cmd_map_internal[cmd] = obj.__name__ return (cmd_map_all, cmd_map_visible, cmd_map_internal)
Build the mapping from command names to method names. One command name maps to at most one method. Multiple command names can map to the same method. Only used by __init__() to initialize self._cmd_map. MUST NOT be used elsewhere. Returns: A tuple (cmd_map, hidden_cmd_map, internal_cmd_map).
codesearchnet
def get_seed(op_seed): eager = context.executing_eagerly() if eager: global_seed = context.global_seed() else: global_seed = ops.get_default_graph().seed if global_seed is not None: if op_seed is None: if hasattr(ops.get_default_graph(), '_seed_used'): ops.get_default_graph()._seed_used = True if eager: op_seed = context.internal_operation_seed() else: op_seed = _graph_to_seed_dict.setdefault(ops.get_default_graph(), 0) _graph_to_seed_dict[ops.get_default_graph()] += 1 seeds = (_truncate_seed(global_seed), _truncate_seed(op_seed)) elif op_seed is not None: seeds = (DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)) else: seeds = (None, None) if seeds == (None, None) and config.is_op_determinism_enabled(): raise RuntimeError('Random ops require a seed to be set when determinism is enabled. Please set a seed before running the op, e.g. by calling tf.random.set_seed(1).') if seeds == (0, 0): return (0, _MAXINT32) return seeds
Returns the local seeds an operation should use given an op-specific seed. Given operation-specific seed, `op_seed`, this helper function returns two seeds derived from graph-level and op-level seeds. Many random operations internally use the two seeds to allow user to change the seed globally for a graph, or for only specific operations. For details on how the graph-level seed interacts with op seeds, see `tf.compat.v1.random.set_random_seed`. Args: op_seed: integer. Returns: A tuple of two integers that should be used for the local seed of this operation.
github-repos
def take_while(self, predicate): if self.closed(): raise ValueError('Attempt to call take_while() on a closed Queryable.') if (not is_callable(predicate)): raise TypeError('take_while() parameter predicate={0} is not callable'.format(repr(predicate))) return self._create(self._generate_take_while_result(predicate))
Returns elements from the start while the predicate is True. Note: This method uses deferred execution. Args: predicate: A function returning True or False with which elements will be tested. Returns: A Queryable over the elements from the beginning of the source sequence for which predicate is True. Raises: ValueError: If the Queryable is closed() TypeError: If the predicate is not callable.
codesearchnet
def render(self, link_url, image_url, **kwargs): path = ('%s/render' % self.path) data = {'link_url': link_url, 'image_url': image_url} return self.gitlab.http_get(path, data, **kwargs)
Preview link_url and image_url after interpolation. Args: link_url (str): URL of the badge link image_url (str): URL of the badge image **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabRenderError: If the rendering failed Returns: dict: The rendering properties
codesearchnet
def put_archive(self, path, data): return self.client.api.put_archive(self.id, path, data)
Insert a file or folder in this container using a tar archive as source. Args: path (str): Path inside the container where the file(s) will be extracted. Must exist. data (bytes): tar data to be extracted Returns: (bool): True if the call succeeds. Raises: :py:class:`~docker.errors.APIError` If an error occurs.
codesearchnet
def __fetch_route53_zone_records(self, zone_id): route53 = self.session.client('route53') done = False nextName = nextType = None records = {} try: while not done: if nextName and nextType: response = route53.list_resource_record_sets( HostedZoneId=zone_id, StartRecordName=nextName, StartRecordType=nextType ) else: response = route53.list_resource_record_sets(HostedZoneId=zone_id) if response['IsTruncated']: nextName = response['NextRecordName'] nextType = response['NextRecordType'] else: done = True if 'ResourceRecordSets' in response: for record in response['ResourceRecordSets']: record_id = self._get_resource_hash(zone_id, record) if 'AliasTarget' in record: value = record['AliasTarget']['DNSName'] records[record_id] = { 'id': record_id, 'name': record['Name'].rstrip('.'), 'type': 'ALIAS', 'ttl': 0, 'value': [value] } else: value = [y['Value'] for y in record['ResourceRecords']] records[record_id] = { 'id': record_id, 'name': record['Name'].rstrip('.'), 'type': record['Type'], 'ttl': record['TTL'], 'value': value } return list(records.values()) finally: del route53
Return all resource records for a specific Route53 zone Args: zone_id (`str`): Name / ID of the hosted zone Returns: `dict`
juraj-google-style
def __sweeten(self, dumper: 'Dumper', class_: Type, node: Node) -> None: for base_class in class_.__bases__: if base_class in dumper.yaml_representers: logger.debug('Sweetening for class {}'.format( self.class_.__name__)) self.__sweeten(dumper, base_class, node) if hasattr(class_, 'yatiml_sweeten'): class_.yatiml_sweeten(node)
Applies the user's yatiml_sweeten() function(s), if any. Sweetening is done for the base classes first, then for the \ derived classes, down the hierarchy to the class we're \ constructing. Args: dumper: The dumper that is dumping this object. class_: The type of the object to be dumped. represented_object: The object to be dumped.
juraj-google-style
def get_node_list(self) -> list: nodes = [] if (not self._manager): raise RuntimeError('Only the Swarm manager node can retrieve all the nodes.') node_list = self._client.nodes.list() for n_list in node_list: nodes.append(n_list.id) return nodes
Get a list of nodes. Only the manager nodes can retrieve all the nodes Returns: list, all the ids of the nodes in swarm
codesearchnet
def _create_single_feature_method(feature): fx_name = feature.name.lower() if "detection" in fx_name: fx_doc = "Perform {0}.".format(fx_name.replace("_", " ")) else: fx_doc = "Return {desc} information.".format(desc=fx_name.replace("_", " ")) fx_doc += feature_value = {"type": feature} def inner(self, image, max_results=None, retry=None, timeout=None, **kwargs): copied_features = feature_value.copy() if max_results is not None: copied_features["max_results"] = max_results request = dict(image=image, features=[copied_features], **kwargs) response = self.annotate_image(request, retry=retry, timeout=timeout) return response inner.__name__ = fx_name inner.__doc__ = fx_doc return inner
Return a function that will detect a single feature. Args: feature (enum): A specific feature defined as a member of :class:`~enums.Feature.Type`. Returns: function: A helper function to detect just that feature.
juraj-google-style
def log_warning(self, msg): if self.__logger: self.__logger.warning(msg) if self.__raise_exception_on_warning: raise RuntimeError(msg)
Log a warning if ``logger`` exists. Args: msg: Warning to log. Warning: Can raise a ``RuntimeError`` if this was asked in the constructor.
juraj-google-style
def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic): if ((type(aggregate_reports) == dict) or (type(aggregate_reports) == OrderedDict)): aggregate_reports = [aggregate_reports] if (len(aggregate_reports) < 1): return for report in aggregate_reports: report['date_range'] = self.generate_daterange(report) report = self.strip_metadata(report) for slice in report['records']: slice['date_range'] = report['date_range'] slice['org_name'] = report['org_name'] slice['org_email'] = report['org_email'] slice['policy_published'] = report['policy_published'] slice['report_id'] = report['report_id'] logger.debug('Sending slice.') try: logger.debug('Saving aggregate report to Kafka') self.producer.send(aggregate_topic, slice) except UnknownTopicOrPartitionError: raise KafkaError('Kafka error: Unknown topic or partition on broker') except Exception as e: raise KafkaError('Kafka error: {0}'.format(e.__str__())) try: self.producer.flush() except Exception as e: raise KafkaError('Kafka error: {0}'.format(e.__str__()))
Saves aggregate DMARC reports to Kafka Args: aggregate_reports (list): A list of aggregate report dictionaries to save to Kafka aggregate_topic (str): The name of the Kafka topic
codesearchnet
def weCanCheckTheseDomains(email): notWorking = ['@aol.com', '@bk.ru', '@breakthru.com', '@gmx.', '@hotmail.co', '@inbox.com', '@latinmail.com', '@libero.it', '@mail.ru', '@mail2tor.com', '@outlook.com', '@rambler.ru', '@rocketmail.com', '@starmedia.com', '@ukr.net@yahoo.', '@ymail.'] for n in notWorking: if (n in email): print("\t[*] Verification of '{}' aborted. Details:\n\t\t{}".format(general.warning(email), 'This domain CANNOT be verified using mailfy.')) return False emailDomains = EMAIL_DOMAINS safe = False for e in EMAIL_DOMAINS: if (e in email): safe = True if (not safe): print("\t[*] Verification of '{}' aborted. Details:\n\t\t{}".format(general.warning(email), 'This domain CANNOT be verified using mailfy.')) return False return True
Method that verifies if a domain can be safely verified. Args: ----- email: the email whose domain will be verified. Returns: -------- bool: it represents whether the domain can be verified.
codesearchnet
def new_cells(self, name=None, formula=None): return self._impl.new_cells(name, formula).interface
Create a cells in the space. Args: name: If omitted, the model is named automatically ``CellsN``, where ``N`` is an available number. func: The function to define the formula of the cells. Returns: The new cells.
codesearchnet
def get_attr_info(binary_view): global _ATTR_BASIC (attr_type, attr_len, non_resident) = _ATTR_BASIC.unpack(binary_view[:9]) return (AttrTypes(attr_type), attr_len, bool(non_resident))
Gets basic information from a binary stream to allow correct processing of the attribute header. This function allows the interpretation of the Attribute type, attribute length and if the attribute is non resident. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: An tuple with the attribute type, the attribute length, in bytes, and if the attribute is resident or not.
codesearchnet
def get_namespaces(start=None, end=None): q = Namespace.query() if start is not None: q = q.filter(Namespace.key >= Namespace.key_for_namespace(start)) if end is not None: q = q.filter(Namespace.key < Namespace.key_for_namespace(end)) return [x.namespace_name for x in q]
Return all namespaces in the specified range. Args: start: only return namespaces >= start if start is not None. end: only return namespaces < end if end is not None. Returns: A list of namespace names between the (optional) start and end values.
juraj-google-style
def create_submission(self, user_id, institute_id): submission_obj = { 'status' : 'open', 'created_at' : datetime.now(), 'user_id' : user_id, 'institute_id' : institute_id } LOG.info("Creating a new clinvar submission for user '%s' and institute %s", user_id, institute_id) result = self.clinvar_submission_collection.insert_one(submission_obj) return result.inserted_id
Create an open clinvar submission for a user and an institute Args: user_id(str): a user ID institute_id(str): an institute ID returns: submission(obj): an open clinvar submission object
juraj-google-style
def expand_source_files(filenames, cwd=None): out = [] for f in expand_paths(filenames, cwd): if os.path.isdir(f): out += collect_files(f, '.py') elif f.endswith('.py'): out.append(f) return sorted(set(out))
Expand a list of filenames passed in as sources. This is a helper function for handling command line arguments that specify a list of source files and directories. Any directories in filenames will be scanned recursively for .py files. Any files that do not end with ".py" will be dropped. Args: filenames: A list of filenames to process. cwd: An optional working directory to expand relative paths Returns: A list of sorted full paths to .py files
codesearchnet
def format_page(text): width = max(map(len, text.splitlines())) page = "+-" + "-" * width + "-+\n" for line in text.splitlines(): page += "| " + line.ljust(width) + " |\n" page += "+-" + "-" * width + "-+\n" return page
Format the text for output adding ASCII frame around the text. Args: text (str): Text that needs to be formatted. Returns: str: Formatted string.
juraj-google-style
def assert_title(self, title, **kwargs): query = TitleQuery(title, **kwargs) @self.synchronize(wait=query.wait) def assert_title(): if (not query.resolves_for(self)): raise ExpectationNotMet(query.failure_message) return True return assert_title()
Asserts that the page has the given title. Args: title (str | RegexObject): The string or regex that the title should match. **kwargs: Arbitrary keyword arguments for :class:`TitleQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
codesearchnet
def _GetResolverHelper(cls, type_indicator): if not cls._resolver_helpers_manager: from dfvfs.resolver_helpers import manager cls._resolver_helpers_manager = manager.ResolverHelperManager return cls._resolver_helpers_manager.GetHelper(type_indicator)
Retrieves the path specification resolver helper for the specified type. Args: type_indicator (str): type indicator. Returns: ResolverHelper: a resolver helper.
juraj-google-style
def add_case(self, case, update=False): existing_case = self.case(case) if existing_case and not update: raise CaseError("Case {} already exists".format(case['case_id'])) if existing_case: self.db.case.find_one_and_replace( {'case_id': case['case_id']}, case, ) else: self.db.case.insert_one(case) return case
Add a case to the case collection If the case exists and update is False raise error. Args: db (MongoClient): A connection to the mongodb case (dict): A case dictionary update(bool): If existing case should be updated Returns: mongo_case_id(ObjectId)
juraj-google-style
def register_repeating_metric(self, metric_name, frequency, getter): l = task.LoopingCall(self._publish_repeating_metric, metric_name, getter) repeating_metric_handle = RepeatingMetricHandle(l, frequency) self._repeating_metric_handles.append(repeating_metric_handle) if self.running: repeating_metric_handle.start() return repeating_metric_handle
Record hits to a metric at a specified interval. Args: metric_name: The name of the metric to record with Carbon. frequency: The frequency with which to poll the getter and record the value with Carbon. getter: A function which takes no arguments and returns the value to record with Carbon. Returns: RepeatingMetricHandle instance. Call .stop() on it to stop recording the metric.
juraj-google-style
def count_true_positive(truth, recommend): tp = 0 for r in recommend: if (r in truth): tp += 1 return tp
Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives.
codesearchnet
def __init__(self, caller: Caller[RequestT, ResponseT], timeout: Optional[float]=DEFAULT_TIMEOUT_SECS, should_backoff: Optional[ShouldBackOff]=None, repeater: Repeater=ExponentialBackOffRepeater(), cache: Optional[Cache]=None, throttler: PreCallThrottler=DefaultThrottler()): self._caller = caller self._timeout = timeout self._should_backoff = should_backoff if repeater: self._repeater = repeater else: self._repeater = NoOpsRepeater() self._cache = cache self._throttler = throttler self._batching_kwargs = self._caller.batch_elements_kwargs()
Instantiates a RequestResponseIO transform. Args: caller: an implementation of `Caller` object that makes call to the API. timeout (float): timeout value in seconds to wait for response from API. should_backoff: (Optional) provides methods for backoff. repeater: provides method to repeat failed requests to API due to service errors. Defaults to :class:`apache_beam.io.requestresponse.ExponentialBackOffRepeater` to repeat requests with exponential backoff. cache: (Optional) a `~apache_beam.io.requestresponse.Cache` object to use the appropriate cache. throttler: provides methods to pre-throttle a request. Defaults to :class:`apache_beam.io.requestresponse.DefaultThrottler` for client-side adaptive throttling using :class:`apache_beam.io.components.adaptive_throttler.AdaptiveThrottler`
github-repos
def _call_with_flat_signature(self, args, kwargs): if len(args) > self._num_positional_args: raise TypeError(f'{self._flat_signature_summary()} takes {self._num_positional_args} positional arguments, got {len(args)}.') args = list(args) kwargs = dict(kwargs) kwargs = {function_type_lib.sanitize_arg_name(k): v for k, v in kwargs.items()} for keyword in self._arg_keywords[len(args):]: try: args.append(kwargs.pop(function_type_lib.sanitize_arg_name(compat.as_str(keyword)))) except KeyError: specified_keywords = list(self._arg_keywords[:len(args)]) + list(kwargs.keys()) missing_required_args = sorted(set(self._arg_keywords) - set(specified_keywords)) raise TypeError(f'{self._flat_signature_summary()} missing required arguments: {', '.join(missing_required_args)}.') if kwargs: positional_arg_keywords = set(self._arg_keywords[:len(args)]) for unused_key in kwargs: if unused_key in positional_arg_keywords: raise TypeError(f"{self._flat_signature_summary()} got two values for '{unused_key}'.") raise TypeError(f'{self._flat_signature_summary()} got unexpected keyword arguments: {', '.join(sorted(kwargs))}.') for i, arg in enumerate(args): if not isinstance(arg, (tensor_lib.Tensor, resource_variable_ops.BaseResourceVariable)): raise TypeError(f'{self._flat_signature_summary()}: expected argument return self._call_flat(args, self.captured_inputs)
Executes the wrapped function with the flat signature. Args: args: Positional arguments to the concrete function. kwargs: Keyword arguments to the concrete function. Returns: The result of applying the function on the Tensors/Variables contained in `args` and `kwargs`. Raises: TypeError: if `args` and `kwargs` do not match the flat signature of this `ConcreteFunction`.
github-repos
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) def _setup_val(data_format, use_gpu): with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] if data_format == 'NCHW': t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == 'NCHW': conv = test_util.NCHWToNHWC(conv) return conv tensors = [] for data_format, use_gpu in get_test_configs(): tensors.append(_setup_val(data_format, use_gpu)) values = self.evaluate(tensors) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=0.001, atol=0.001)
Verifies that CPU and GPU produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type.
github-repos
def _ScanVolumeSystemRootNode(self, scan_context, scan_node, auto_recurse=True): if (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW): path_spec = self.ScanForFileSystem(scan_node.path_spec.parent) if path_spec: scan_context.AddScanNode(path_spec, scan_node.parent_node) file_entry = resolver.Resolver.OpenFileEntry(scan_node.path_spec, resolver_context=self._resolver_context) for sub_file_entry in file_entry.sub_file_entries: sub_scan_node = scan_context.AddScanNode(sub_file_entry.path_spec, scan_node) if (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW): continue if (auto_recurse or (not scan_context.updated)): self._ScanNode(scan_context, sub_scan_node, auto_recurse=auto_recurse)
Scans a volume system root node for supported formats. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): source scan node. auto_recurse (Optional[bool]): True if the scan should automatically recurse as far as possible. Raises: ValueError: if the scan context or scan node is invalid.
codesearchnet
def concatenate(xs, axis=0): if any_symbolic_tensors(xs): return Concatenate(axis=axis).symbolic_call(xs) return backend.numpy.concatenate(xs, axis=axis)
Join a sequence of tensors along an existing axis. Args: xs: The sequence of tensors to concatenate. axis: The axis along which the tensors will be joined. Defaults to `0`. Returns: The concatenated tensor.
github-repos
def impersonate(self, user, enterprise): if not user or not enterprise: raise ValueError('You must set a user name and an enterprise name to begin impersonification') self._is_impersonating = True self._impersonation = "%s@%s" % (user, enterprise)
Impersonate a user in a enterprise Args: user: the name of the user to impersonate enterprise: the name of the enterprise where to use impersonation
juraj-google-style
def run(self, inputs=None, warmup_iterations: int=10, benchmark_iterations: int=100, enable_gpu: bool=True) -> TestResult:
Runs the model with provided or randomly generated input tensors. Args: inputs: Mapping from names to input ndarrays in TF1, or a sequence of tensors in TF2. If `None`, ramdomly generated inputs will be used instead. warmup_iterations: Number of inferences to warm up the runtime. benchmark_iterations: Number of inferences to measure the latency. enable_gpu: Whether it is allowed to use GPU or not. Returns: `TestResult` summarizing latency and numerics information.
github-repos
def _HandleMetadataUpdate(self, metadata_key='', recursive=True, wait=True, timeout=None, retry=True): exception = None while True: try: return self._GetMetadataUpdate(metadata_key=metadata_key, recursive=recursive, wait=wait, timeout=timeout) except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: if (not isinstance(e, type(exception))): exception = e self.logger.error('GET request error retrieving metadata. %s.', e) if retry: continue else: break
Wait for a successful metadata response. Args: metadata_key: string, the metadata key to watch for changes. recursive: bool, True if we should recursively watch for metadata changes. wait: bool, True if we should wait for a metadata change. timeout: int, timeout in seconds for returning metadata output. retry: bool, True if we should retry on failure. Returns: json, the deserialized contents of the metadata server.
codesearchnet
def is_dir(self, follow_symlinks=True): try: return (self._system.isdir( path=self._path, client_kwargs=self._client_kwargs, virtual_dir=False) or bool(S_ISDIR(self.stat().st_mode))) except ObjectPermissionError: return True
Return True if this entry is a directory or a symbolic link pointing to a directory; return False if the entry is or points to any other kind of file, or if it doesn’t exist anymore. The result is cached on the os.DirEntry object. Args: follow_symlinks (bool): Follow symlinks. Not supported on cloud storage objects. Returns: bool: True if directory exists.
juraj-google-style
def find_elements_by_class(self, class_, update=False) -> Elements: return self.find_elements(by=By.CLASS, value=class_, update=update)
Finds multiple elements by class. Args: class_: The class of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_class('foo')
juraj-google-style
def prob(self, value, name='prob'): return self._call_prob(value, name)
Probability density/mass function. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`.
github-repos
def SetServerInformation(self, server, port): self._host = server self._port = port
Sets the server information. Args: server (str): hostname or IP address of the database server. port (int): port number of the database server.
codesearchnet
def has_unitary(val: Any) -> bool: from cirq.protocols.decompose import decompose_once, decompose_once_with_qubits from cirq import Gate, Operation, LineQubit getter = getattr(val, '_has_unitary_', None) result = (NotImplemented if (getter is None) else getter()) if (result is not NotImplemented): return result unitary_getter = getattr(val, '_unitary_', None) if ((unitary_getter is not None) and (unitary_getter() is not NotImplemented)): return True if isinstance(val, Gate): decomposed_val = decompose_once_with_qubits(val, LineQubit.range(val.num_qubits()), default=None) if (decomposed_val is not None): return all((has_unitary(v) for v in decomposed_val)) elif isinstance(val, Operation): decomposed_val = decompose_once(val, default=None) if (decomposed_val is not None): return all((has_unitary(v) for v in decomposed_val)) return (unitary(val, None) is not None)
Returns whether the value has a unitary matrix representation. Returns: If `val` has a _has_unitary_ method and its result is not NotImplemented, that result is returned. Otherwise, if `val` is a cirq.Gate or cirq.Operation, a decomposition is attempted and the resulting unitary is returned if has_unitary is True for all operations of the decompostion. Otherwise, if the value has a _unitary_ method return if that has a non-default value. Returns False if neither function exists.
codesearchnet
def __ge__(self, other): if self.index_type is not None: expr = grizzly_impl.get_field(self.expr, 1) else: expr = self.expr return SeriesWeld( grizzly_impl.compare( expr, other, ">=", self.weld_type ), WeldBit(), self.df, self.column_name )
Summary Args: other (TYPE): Description Returns: TYPE: Description
juraj-google-style
def __init__(self, window_fn, main_receivers, tagged_receivers, per_element_output_counter, output_batch_converter, process_yields_batches, process_batch_yields_elements): self.window_fn = window_fn self.main_receivers = main_receivers self.tagged_receivers = tagged_receivers if per_element_output_counter is not None and per_element_output_counter.is_cythonized: self.per_element_output_counter = per_element_output_counter else: self.per_element_output_counter = None self.output_batch_converter = output_batch_converter self._process_yields_batches = process_yields_batches self._process_batch_yields_elements = process_batch_yields_elements
Initializes ``_OutputHandler``. Args: window_fn: a windowing function (WindowFn). main_receivers: a dict of tag name to Receiver objects. tagged_receivers: main receiver object. per_element_output_counter: per_element_output_counter of one work_item. could be none if experimental flag turn off
github-repos
def normpath(self, path): path = self.normcase(path) (drive, path) = self.splitdrive(path) sep = self._path_separator(path) is_absolute_path = path.startswith(sep) path_components = path.split(sep) collapsed_path_components = [] dot = self._matching_string(path, '.') dotdot = self._matching_string(path, '..') for component in path_components: if ((not component) or (component == dot)): continue if (component == dotdot): if (collapsed_path_components and (collapsed_path_components[(- 1)] != dotdot)): collapsed_path_components.pop() continue elif is_absolute_path: continue collapsed_path_components.append(component) collapsed_path = sep.join(collapsed_path_components) if is_absolute_path: collapsed_path = (sep + collapsed_path) return ((drive + collapsed_path) or dot)
Mimic os.path.normpath using the specified path_separator. Mimics os.path.normpath using the path_separator that was specified for this FakeFilesystem. Normalizes the path, but unlike the method absnormpath, does not make it absolute. Eliminates dot components (. and ..) and combines repeated path separators (//). Initial .. components are left in place for relative paths. If the result is an empty path, '.' is returned instead. This also replaces alternative path separator with path separator. That is, it behaves like the real os.path.normpath on Windows if initialized with '\\' as path separator and '/' as alternative separator. Args: path: (str) The path to normalize. Returns: (str) A copy of path with empty components and dot components removed.
codesearchnet
def load_from_file(filepath, format_=FileFormat.py, update_data_callback=None, disable_memcache=False): filepath = os.path.realpath(filepath) cache_filepath = file_cache.get(filepath) if cache_filepath: return _load_file(filepath=cache_filepath, format_=format_, update_data_callback=update_data_callback, original_filepath=filepath) elif disable_memcache: return _load_file(filepath=filepath, format_=format_, update_data_callback=update_data_callback) else: return _load_from_file(filepath=filepath, format_=format_, update_data_callback=update_data_callback)
Load data from a file. Note: Any functions from a .py file will be converted to `SourceCode` objects. Args: filepath (str): File to load. format_ (`FileFormat`): Format of file contents. update_data_callback (callable): Used to change data before it is returned or cached. disable_memcache (bool): If True, don't r/w to memcache. Returns: dict.
juraj-google-style
def package_in_memory(cls, workflow_name, workflow_files): s = StringIO() p = cls(s, workflow_name, meta_data=[]) p.add_bpmn_files_by_glob(workflow_files) p.create_package() return s.getvalue()
Generates wf packages from workflow diagrams. Args: workflow_name: Name of wf workflow_files: Diagram file. Returns: Workflow package (file like) object
codesearchnet
def find_call(self, path, method): if not path.endswith('/'): path += '/' path = path.split('/')[1:] return self._recursive_route_match(self._routes, path, method, [])
Find callable for the specified URL path and HTTP method. Args: path (:obj:`str`): URL path to match method (:obj:`str`): HTTP method Note: A trailing '/' is always assumed in the path.
juraj-google-style
def get_path_from_query_string(req): if (req.args.get('path') is None): raise exceptions.UserError('Path not found in query string') return req.args.get('path')
Gets path from query string Args: req (flask.request): Request object from Flask Returns: path (str): Value of "path" parameter from query string Raises: exceptions.UserError: If "path" is not found in query string
codesearchnet
def _shapes(tensor_list_list, shapes, enqueue_many): if shapes is None: len0 = len(tensor_list_list[0]) for tl in tensor_list_list: for i in range(len0): if tl[i].shape.ndims is None: raise ValueError("Cannot infer Tensor's rank: %s" % tl[i]) shapes = [_merge_shapes([tl[i].shape.as_list() for tl in tensor_list_list], enqueue_many) for i in range(len0)] return shapes
Calculate and merge the shapes of incoming tensors. Args: tensor_list_list: List of tensor lists. shapes: List of shape tuples corresponding to tensors within the lists. enqueue_many: Boolean describing whether shapes will be enqueued as batches or individual entries. Returns: A list of shapes aggregating shape inference info from `tensor_list_list`, or returning `shapes` if it is not `None`. Raises: ValueError: If any of the inferred shapes in `tensor_list_list` lack a well defined rank.
github-repos
def _get_access_from_refresh(self) -> Tuple[str, float]: headers = self._get_authorization_headers() data = { 'grant_type': 'refresh_token', 'refresh_token': self.refresh_token } r = self.session.post(self.TOKEN_URL, headers=headers, data=data) response_data = r.json() return (response_data['access_token'], response_data['expires_in'])
Uses the stored refresh token to get a new access token. This method assumes that the refresh token exists. Args: None Returns: new access token and expiration time (from now)
juraj-google-style
def __init__(self, message, exc=None): super(WorkerError, self).__init__() self.msg = message self.exc = exc
Initializes WorkerError. Args: message: error message exc: optional underlying exception.
juraj-google-style
def _emit_with_loc(self, op_str, node=None): loc = '' if node: loc = self._create_mlir_loc(anno.getanno(node, anno.Basic.ORIGIN, default=None)) self.emit(op_str + ' ' + loc)
Emit the mlir operation with the location associated with the node. Args: op_str: The mlir operation string to be emitted. node: The node of the AST tree, the mlir operation translated from.
github-repos
def balance(self, as_of=None, raw=False, leg_query=None, **kwargs): balances = [account.simple_balance(as_of=as_of, raw=raw, leg_query=leg_query, **kwargs) for account in self.get_descendants(include_self=True)] return sum(balances, Balance())
Get the balance for this account, including child accounts Args: as_of (Date): Only include transactions on or before this date raw (bool): If true the returned balance should not have its sign adjusted for display purposes. kwargs (dict): Will be used to filter the transaction legs Returns: Balance See Also: :meth:`simple_balance()`
codesearchnet
def snyder_ac(self, structure): nsites = structure.num_sites volume = structure.volume natoms = structure.composition.num_atoms num_density = 1e30 * nsites / volume tot_mass = sum([e.atomic_mass for e in structure.species]) avg_mass = 1.6605e-27 * tot_mass / natoms return 0.38483*avg_mass * \ ((self.long_v(structure) + 2.*self.trans_v(structure))/3.) ** 3.\ / (300.*num_density ** (-2./3.) * nsites ** (1./3.))
Calculates Snyder's acoustic sound velocity (in SI units) Args: structure: pymatgen structure object Returns: Snyder's acoustic sound velocity (in SI units)
juraj-google-style
def error_messages(self, driver_id=None): if (driver_id is not None): assert isinstance(driver_id, ray.DriverID) return self._error_messages(driver_id) error_table_keys = self.redis_client.keys((ray.gcs_utils.TablePrefix_ERROR_INFO_string + '*')) driver_ids = [key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):] for key in error_table_keys] return {binary_to_hex(driver_id): self._error_messages(ray.DriverID(driver_id)) for driver_id in driver_ids}
Get the error messages for all drivers or a specific driver. Args: driver_id: The specific driver to get the errors for. If this is None, then this method retrieves the errors for all drivers. Returns: A dictionary mapping driver ID to a list of the error messages for that driver.
codesearchnet
def ms_to_times(ms): ms = int(round(ms)) h, ms = divmod(ms, 3600000) m, ms = divmod(ms, 60000) s, ms = divmod(ms, 1000) return Times(h, m, s, ms)
Convert milliseconds to normalized tuple (h, m, s, ms). Arguments: ms: Number of milliseconds (may be int, float or other numeric class). Should be non-negative. Returns: Named tuple (h, m, s, ms) of ints. Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
juraj-google-style
def json(self, ondemand=False): self._request_entity = 'indicator' self._request_uri = '{}/{}'.format(self._api_uri, 'json') self._stream = True if ondemand: self._request.add_payload('runNow', True)
Update request URI to return JSON data. For onDemand bulk generation to work it must first be enabled in the ThreatConnect platform under System settings. Args: ondemand (boolean): Enable on demand bulk generation.
codesearchnet
def add(self, obj): if not isinstance(obj, dict): raise TypeError("Add object should be a dict object") obj = self.validation(obj) obj["id"] = self.maxId + 1 obj = self._cast_model(obj) self.model.db.append(obj) if not self._batch.enable.is_set(): self.model.save_db() return obj
Add a object Args: Object: Object will be added Returns: Object: Object with id Raises: TypeError: If add object is not a dict MultipleInvalid: If input object is invaild
juraj-google-style
def infer_from_frame_stack(self, ob_stack): logits, vf = self.sess.run([self.logits_t, self.value_function_t], feed_dict={self.obs_t: ob_stack}) return logits, vf
Infer policy from stack of observations. Args: ob_stack: array of shape (1, frame_stack_size, height, width, channels) Returns: logits and vf.
juraj-google-style
def get_batch_strategy_instance(strategy, splitter): if strategy == 'SingleRecord': return SingleRecordStrategy(splitter) elif strategy == 'MultiRecord': return MultiRecordStrategy(splitter) else: raise ValueError('Invalid Batch Strategy: %s - Valid Strategies: "SingleRecord", "MultiRecord"')
Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy` Args: strategy (str): Either 'SingleRecord' or 'MultiRecord' splitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from. Returns :class:`sagemaker.local.data.BatchStrategy`: an Instance of a BatchStrategy
juraj-google-style
def get_meshes_fld(step, var): fld = step.fields[var] if step.geom.twod_xz: xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :] fld = fld[:, 0, :, 0] elif step.geom.cartesian and step.geom.twod_yz: xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :] fld = fld[0, :, :, 0] else: xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :] fld = fld[0, :, :, 0] return xmesh, ymesh, fld
Return scalar field along with coordinates meshes. Only works properly in 2D geometry. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. var (str): scalar field name. Returns: tuple of :class:`numpy.array`: xmesh, ymesh, fld 2D arrays containing respectively the x position, y position, and the value of the requested field.
juraj-google-style
def __del__(self): if self._initialized: if self.connected(): if self.swo_enabled(): self.swo_stop() if self.opened(): self.close()
Destructor for the ``JLink`` instance. Closes the J-Link connection if one exists. Args: self (JLink): the ``JLink`` instance Returns: ``None``
juraj-google-style
def __init__(self, datastore_client, storage_client, round_name): self._datastore_client = datastore_client self._storage_client = storage_client self._round_name = round_name self._attacks = None self._targeted_attacks = None self._defenses = None
Initializes CompetitionSubmissions. Args: datastore_client: instance of CompetitionDatastoreClient storage_client: instance of CompetitionStorageClient round_name: name of the round
juraj-google-style
def InternalSendApdu(self, apdu_to_send): response = None if (not self.use_legacy_format): response = apdu.ResponseApdu(self.transport.SendMsgBytes(apdu_to_send.ToByteArray())) if ((response.sw1 == 103) and (response.sw2 == 0)): self.use_legacy_format = True return self.InternalSendApdu(apdu_to_send) else: response = apdu.ResponseApdu(self.transport.SendMsgBytes(apdu_to_send.ToLegacyU2FByteArray())) return response
Send an APDU to the device. Sends an APDU to the device, possibly falling back to the legacy encoding format that is not ISO7816-4 compatible. Args: apdu_to_send: The CommandApdu object to send Returns: The ResponseApdu object constructed out of the devices reply.
codesearchnet
def generate_skip_gram_data_set(self, token_list): n_gram_tuple_zip = self.generate_tuple_zip(token_list, 3) skip_gram_list = [] for pre, point, post in n_gram_tuple_zip: skip_gram_list.append((point, pre)) skip_gram_list.append((point, post)) return zip(skip_gram_list)
Generate the Skip-gram's pair. Args: token_list: The list of tokens. Returns: zip of Tuple(Training N-gram data, Target N-gram data)
juraj-google-style
def __find_incongruities(self, op, index): if len(self) == 1: return hits = [] intervals = [] if self.order == 'depth': one, two = 'base', 'top' else: one, two = 'top', 'base' for i, iv in enumerate(self[:-1]): next_iv = self[i+1] if op(getattr(iv, one), getattr(next_iv, two)): hits.append(i) top = getattr(iv, one) base = getattr(next_iv, two) iv_gap = Interval(top, base) intervals.append(iv_gap) if index and hits: return hits elif intervals: return Striplog(intervals) else: return
Private method. Finds gaps and overlaps in a striplog. Called by find_gaps() and find_overlaps(). Args: op (operator): ``operator.gt`` or ``operator.lt`` index (bool): If ``True``, returns indices of intervals with gaps after them. Returns: Striplog: A striplog of all the gaps. A sort of anti-striplog.
juraj-google-style
def tv_list(self, **kwargs): path = self._get_path('tv_list') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of TV genres. Args: language: (optional) ISO 639-1 code. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def __new__(cls, *args, **kwargs) -> Any: dynamic_evaluate_fn = get_dynamic_evaluate_fn() if dynamic_evaluate_fn is None: return super().__new__(cls) else: hyper_value = object.__new__(cls) cls.__init__(hyper_value, *args, **kwargs) return dynamic_evaluate_fn(hyper_value)
Overrides __new__ for supporting dynamic evaluation mode. Args: *args: Positional arguments passed to init the custom hyper. **kwargs: Keyword arguments passed to init the custom hyper. Returns: A dynamic evaluated value according to current `dynamic_evaluate` context.
github-repos