code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _ProcessFileEntry(self, mediator, file_entry): display_name = mediator.GetDisplayName() logger.debug( '[ProcessFileEntry] processing file entry: {0:s}'.format(display_name)) reference_count = mediator.resolver_context.GetFileObjectReferenceCount( file_entry.path_spec) try: if self._IsMetadataFile(file_entry): self._ProcessMetadataFile(mediator, file_entry) else: file_entry_processed = False for data_stream in file_entry.data_streams: if self._abort: break if self._CanSkipDataStream(file_entry, data_stream): logger.debug(( '[ProcessFileEntry] Skipping datastream {0:s} for {1:s}: ' '{2:s}').format( data_stream.name, file_entry.type_indicator, display_name)) continue self._ProcessFileEntryDataStream(mediator, file_entry, data_stream) file_entry_processed = True if not file_entry_processed: self._ProcessFileEntryDataStream(mediator, file_entry, None) finally: new_reference_count = ( mediator.resolver_context.GetFileObjectReferenceCount( file_entry.path_spec)) if reference_count != new_reference_count: if mediator.resolver_context.ForceRemoveFileObject( file_entry.path_spec): logger.warning( 'File-object not explicitly closed for file: {0:s}'.format( display_name)) logger.debug( '[ProcessFileEntry] done processing file entry: {0:s}'.format( display_name))
Processes a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry.
juraj-google-style
def wait_for_batches(self, batch_ids, timeout=None): self._batch_tracker.watch_statuses(self, batch_ids) timeout = timeout or DEFAULT_TIMEOUT start_time = time() with self._wait_condition: while True: if self._statuses is not None: return _format_batch_statuses( self._statuses, batch_ids, self._batch_tracker) if time() - start_time > timeout: statuses = self._batch_tracker.get_statuses(batch_ids) return _format_batch_statuses( statuses, batch_ids, self._batch_tracker) self._wait_condition.wait(timeout - (time() - start_time))
Locks until a list of batch ids is committed to the block chain or a timeout is exceeded. Returns the statuses of those batches. Args: batch_ids (list of str): The ids of the batches to wait for timeout(int): Maximum time in seconds to wait for Returns: list of BatchStatus: BatchStatuses to send back to client
juraj-google-style
def ParseCmd(self, cmd_input, attributes=None, templates=None): self.raw = cmd_input if not templates: row_idx = self.index.GetRowMatch(attributes) if row_idx: templates = self.index.index[row_idx]["Template"] else: raise CliTableError( 'No template found for attributes: "%s"' % attributes ) template_files = self._TemplateNamesToFiles(templates) try: self.Reset() self._keys = set() self.table = self._ParseCmdItem(self.raw, template_file=template_files[0]) for tmplt in template_files[1:]: self.extend( self._ParseCmdItem(self.raw, template_file=tmplt), set(self._keys) ) finally: for f in template_files: f.close()
Creates a TextTable table of values from cmd_input string. Parses command output with template/s. If more than one template is found subsequent tables are merged if keys match (dropped otherwise). Args: cmd_input: String, Device/command response. attributes: Dict, attribute that further refine matching template. templates: String list of templates to parse with. If None, uses index Raises: CliTableError: A template was not found for the given command.
juraj-google-style
def is_global(self): return ((not ((self.network_address in IPv4Network('100.64.0.0/10')) and (self.broadcast_address in IPv4Network('100.64.0.0/10')))) and (not self.is_private))
Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry.
codesearchnet
def _query(self, query_type, query_str, verbose=False): cached = self.query_cache.get(query_str) if cached: if verbose: print('Returning Cached VT Query Results') return cached if query_type == 'file': response = requests.get('https: params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1}) else: response = requests.post('https: params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1}) try: vt_output = response.json() except ValueError: error_msg = 'VirusTotal no valid response, throttling and trying again...' if self.throttle: if verbose: print(error_msg) time.sleep(30) return self._query(query_type, query_str) return {'vt_error': error_msg} if not vt_output or vt_output['response_code'] == 0: output = {'query': query_str, 'not_found': True} self.query_cache.set(query_str, output) return output output = {field: vt_output[field] for field in vt_output.keys() if field not in self.exclude} output['query'] = query_str scan_results = collections.Counter() for scan in vt_output['scans'].values(): if 'result' in scan: if scan['result']: scan_results[scan['result']] += 1 output['scan_results'] = scan_results.most_common(5) self.query_cache.set(query_str, output) return output
Internal query method for the VirusTotal Service Args: query_type(str): The type of query (either 'file' or 'url') query_str (str): The file hash or domain/url to be queried
juraj-google-style
def zeros(shape, dtype=None, **kwargs): data = np.zeros(shape, dtype) return dc.array(data, **kwargs)
Create an array of given shape and type, filled with zeros. Args: shape (sequence of ints): 2D shape of the array. dtype (data-type, optional): Desired data-type for the array. kwargs (optional): Other arguments of the array (*coords, attrs, and name). Returns: array (decode.array): Decode array filled with zeros.
juraj-google-style
def GetCredential(self, path_spec, identifier): credentials = self._credentials_per_path_spec.get(path_spec.comparable, {}) return credentials.get(identifier, None)
Retrieves a specific credential from the key chain. Args: path_spec (PathSpec): path specification. identifier (str): credential identifier. Returns: object: credential or None if the credential for the path specification is not set.
codesearchnet
def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length), dtype='i4') attention_mask = jnp.ones_like(input_ids, dtype='i4') position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache'])
Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache.
github-repos
def update(self, domain, type_name, search_command, body): return self._request(domain, type_name, search_command, 'PUT', body)
Update entry in ThreatConnect Data Store Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. body (str): JSON body
juraj-google-style
def key_validation_check(tweet_keys_list, superset_keys, minset_keys): tweet_keys = set(tweet_keys_list) minset_overlap = (tweet_keys & minset_keys) if (minset_overlap != minset_keys): raise UnexpectedFormatError('keys ({}) missing from Tweet (Public API data is not supported)'.format((minset_keys - tweet_keys))) unexpected_keys = (tweet_keys - superset_keys) if (len(unexpected_keys) > 0): raise UnexpectedFormatError('Unexpected keys ({}) are in this Tweet'.format(unexpected_keys)) return 0
Validates the keys present in a Tweet. Args: tweet_keys_list (list): the keys present in a tweet superset_keys (set): the set of all possible keys for a tweet minset_keys (set): the set of minimal keys expected in a tweet. Returns: 0 if no errors Raises: UnexpectedFormatError on any mismatch of keys.
codesearchnet
def add(self, coro, *args, **kw): if asyncio.iscoroutinefunction(coro): coro = coro(*args, **kw) if (not asyncio.iscoroutine(coro)): raise TypeError('paco: coro must be a coroutine object') index = max(len(self.pool), 0) task = Task(index, coro) self.pool.append(task) return coro
Adds a new coroutine function with optional variadic argumetns. Arguments: coro (coroutine function): coroutine to execute. *args (mixed): optional variadic arguments Raises: TypeError: if the coro object is not a valid coroutine Returns: future: coroutine wrapped future
codesearchnet
def charge_balance(model): compound_charge = {} for compound in model.compounds: if compound.charge is not None: compound_charge[compound.id] = compound.charge for reaction in model.reactions: charge = reaction_charge(reaction.equation, compound_charge) yield reaction, charge
Calculate the overall charge for all reactions in the model. Yield (reaction, charge) pairs. Args: model: :class:`psamm.datasource.native.NativeModel`.
juraj-google-style
def clip_range(nodes1, nodes2): (coeff_a, coeff_b, coeff_c, d_min, d_max) = compute_fat_line(nodes1) (_, num_nodes2) = nodes2.shape polynomial = np.empty((2, num_nodes2), order='F') denominator = float((num_nodes2 - 1)) for index in six.moves.xrange(num_nodes2): polynomial[(0, index)] = (index / denominator) polynomial[(1, index)] = (((coeff_a * nodes2[(0, index)]) + (coeff_b * nodes2[(1, index)])) + coeff_c) start_bottom = np.asfortranarray([0.0, d_min]) end_bottom = np.asfortranarray([1.0, d_min]) start_top = np.asfortranarray([0.0, d_max]) end_top = np.asfortranarray([1.0, d_max]) s_min = DEFAULT_S_MIN s_max = DEFAULT_S_MAX for start_index in six.moves.xrange((num_nodes2 - 1)): for end_index in six.moves.xrange((start_index + 1), num_nodes2): (s_min, s_max) = _update_parameters(s_min, s_max, start_bottom, end_bottom, polynomial[(:, start_index)], polynomial[(:, end_index)]) (s_min, s_max) = _update_parameters(s_min, s_max, start_top, end_top, polynomial[(:, start_index)], polynomial[(:, end_index)]) return _check_parameter_range(s_min, s_max)
r"""Reduce the parameter range where two curves can intersect. Does so by using the "fat line" for ``nodes1`` and computing the distance polynomial against ``nodes2``. .. note:: This assumes, but does not check that the curves being considered will only have one intersection in the parameter ranges :math:`s \in \left[0, 1\right]`, :math:`t \in \left[0, 1\right]`. This assumption is based on the fact that B |eacute| zier clipping is meant to be used to find tangent intersections for already subdivided (i.e. sufficiently zoomed in) curve segments. Args: nodes1 (numpy.ndarray): ``2 x N1`` array of nodes in a curve which will define the clipping region. nodes2 (numpy.ndarray): ``2 x N2`` array of nodes in a curve which will be clipped. Returns: Tuple[float, float]: The pair of * The start parameter of the clipped range. * The end parameter of the clipped range.
codesearchnet
def from_file(cls, filename, *, strict=True): config = cls() config.load_from_file(filename, strict=strict) return config
Create a new Config object from a configuration file. Args: filename (str): The location and name of the configuration file. strict (bool): If true raises a ConfigLoadError when the configuration cannot be found. Returns: An instance of the Config class. Raises: ConfigLoadError: If the configuration cannot be found.
codesearchnet
def MakeSimpleProtoClass(fields, full_name=None, pool=None): factory = message_factory.MessageFactory(pool=pool) if full_name is not None: try: proto_cls = _GetMessageFromFactory(factory, full_name) return proto_cls except KeyError: pass field_items = fields.items() if not isinstance(fields, OrderedDict): field_items = sorted(field_items) fields_hash = hashlib.sha1() for f_name, f_type in field_items: fields_hash.update(f_name.encode('utf-8')) fields_hash.update(str(f_type).encode('utf-8')) proto_file_name = fields_hash.hexdigest() + '.proto' if full_name is None: full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + fields_hash.hexdigest()) try: proto_cls = _GetMessageFromFactory(factory, full_name) return proto_cls except KeyError: pass factory.pool.Add( _MakeFileDescriptorProto(proto_file_name, full_name, field_items)) return _GetMessageFromFactory(factory, full_name)
Create a Protobuf class whose fields are basic types. Note: this doesn't validate field names! Args: fields: dict of {name: field_type} mappings for each field in the proto. If this is an OrderedDict the order will be maintained, otherwise the fields will be sorted by name. full_name: optional str, the fully-qualified name of the proto type. pool: optional DescriptorPool instance. Returns: a class, the new protobuf class with a FileDescriptor.
juraj-google-style
def _get_example_from_properties(self, spec): local_spec = deepcopy(spec) additional_property = False if ('additionalProperties' in local_spec): additional_property = True if ('properties' not in local_spec): local_spec['properties'] = {} local_spec['properties'].update({'any_prop1': local_spec['additionalProperties'], 'any_prop2': local_spec['additionalProperties']}) del local_spec['additionalProperties'] required = local_spec.get('required', []) required += ['any_prop1', 'any_prop2'] local_spec['required'] = required example = {} properties = local_spec.get('properties') if (properties is not None): required = local_spec.get('required', properties.keys()) for (inner_name, inner_spec) in properties.items(): if (inner_name not in required): continue partial = self.get_example_from_prop_spec(inner_spec) if isinstance(partial, list): partial = partial[0] example[inner_name] = partial return (example, additional_property)
Get example from the properties of an object defined inline. Args: prop_spec: property specification you want an example of. Returns: An example for the given spec A boolean, whether we had additionalProperties in the spec, or not
codesearchnet
def _get_suffix(path): suffix = os.path.basename(path).split('.')[(- 1)] if ('/' in suffix): raise UserWarning(("Filename can't contain '/' in suffix (%s)!" % path)) return suffix
Return suffix from `path`. ``/home/xex/somefile.txt`` --> ``txt``. Args: path (str): Full file path. Returns: str: Suffix. Raises: UserWarning: When ``/`` is detected in suffix.
codesearchnet
def write_string_to_file(filename, file_content): with FileIO(filename, mode='w') as f: f.write(file_content)
Writes a string to a given file. Args: filename: string, path to a file file_content: string, contents that need to be written to the file Raises: errors.OpError: If there are errors during the operation.
github-repos
def baredoc(obj): doc = getdoc(obj) if not doc: return '' doc = doc.splitlines()[0] return doc.rstrip(' .').lstrip()
Return the first line of the docstring of an object. Trailing periods and spaces as well as leading spaces are removed from the output. Args: obj: any Python object. Returns: str: the first line of the docstring of obj.
juraj-google-style
def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT): self._whoosh.add_field(fieldname, fieldspec) return self._whoosh.schema
Add a field in the index of the model. Args: fieldname (Text): This parameters register a new field in specified model. fieldspec (Name, optional): This option adds various options as were described before. Returns: TYPE: The new schema after deleted is returned.
codesearchnet
def create_projection(self, fov: float = 75.0, near: float = 1.0, far: float = 100.0, aspect_ratio: float = None): return matrix44.create_perspective_projection_matrix( fov, aspect_ratio or self.window.aspect_ratio, near, far, dtype='f4', )
Create a projection matrix with the following parameters. When ``aspect_ratio`` is not provided the configured aspect ratio for the window will be used. Args: fov (float): Field of view (float) near (float): Camera near value far (float): Camrea far value Keyword Args: aspect_ratio (float): Aspect ratio of the viewport Returns: The projection matrix as a float32 :py:class:`numpy.array`
juraj-google-style
def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2): cand1_f = tf.to_float(cand1) cand2_f = tf.to_float(cand2) step_size = (cand2_f - cand1_f) fpart = ((x - cand1_f) / step_size) ret = tf.where(tf.greater(fpart, noise), cand2, cand1) return ret
Round-off x to cand1 or to cand2 in an unbiased way. Cand1 and cand2 are the same shape as x. For every element of x, the corresponding elements of cand1 and cand2 should be the two closest bfloat16 values to x. Order does not matter. cand1 and cand2 must differ from each other. Args: x: A float32 Tensor. noise: A Tensor broadcastable to the shape of x containing random uniform values in [0.0, 1.0]. cand1: A bfloat16 Tensor the same shape as x. cand2: A bfloat16 Tensor the same shape as x. Returns: A bfloat16 Tensor.
codesearchnet
def protorpc_to_endpoints_error(self, status, body): try: rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body) except (ValueError, messages.ValidationError): rpc_error = remote.RpcStatus() if (rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR): error_class = _ERROR_NAME_MAP.get(rpc_error.error_name) if error_class: (status, body) = self.__write_error(error_class.http_status, rpc_error.error_message) return (status, body)
Convert a ProtoRPC error to the format expected by Google Endpoints. If the body does not contain an ProtoRPC message in state APPLICATION_ERROR the status and body will be returned unchanged. Args: status: HTTP status of the response from the backend body: JSON-encoded error in format expected by Endpoints frontend. Returns: Tuple of (http status, body)
codesearchnet
def optimize(objective_function, domain, stopping_condition, parameters=None, position_update=functions.std_position, velocity_update=functions.std_velocity, parameter_update=functions.std_parameter_update, measurements=(), measurer=dictionary_based_metrics): params = __init_parameters__(parameters) rng = np.random.RandomState(params['seed']) initial_swarm = [functions.initialize_particle(rng, domain, objective_function) for i in range(params['swarm_size'])] state = types.PSOState(rng, params, iterations=0, swarm=initial_swarm) topology_function = state.params['topology'] update_fitness = functions.update_fitness update_particle = functions.update_particle results, measure = measurer(measurements) while not stopping_condition(state): n_bests = topology_function(state) state = state._replace(swarm=[update_particle(position_update, velocity_update, state, n_bests, ip) for ip in enumerate(state.swarm)]) state = state._replace(swarm=[update_fitness(objective_function, particle) for particle in state.swarm], iterations=state.iterations + 1) state = parameter_update(state, objective_function) results = measure(results, state) return functions.solution(state.swarm), results
Perform particle swarm optimization of the given fitness function. Args: objective_function: the cost function to optimize. stopping_condition: function specifying the stopping condition. parameters: dictionary: parameter dictionary for the PSO. Returns: cipy.algorithms.pso.Particle: The global best particle.
juraj-google-style
def get_metric_by_name(self, metric_name, **kwargs): return self._get_object_by_name(self._METRIC_ENDPOINT_SUFFIX, metric_name, **kwargs)
get a metric by name Args: metric_name (string): name of metric Returns: dictionary of response
codesearchnet
def load(self, response): self._response = response if self.next_location(raw=True): self._num_redirects += 1
Load the response and increment the counter. Args: response (:class:`.http.request.Response`): The response from a previous request.
codesearchnet
def load_delivery_report(adapter: MongoAdapter, report_path: str, case_id: str, update: bool=False): case_obj = adapter.case(case_id=case_id) if (case_obj is None): raise DataNotFoundError('no case found') if (not case_obj.get('delivery_report')): _put_report_in_case_root(case_obj, report_path) elif update: _put_report_in_case_root(case_obj, report_path) else: raise IntegrityError('Existing delivery report found, use update = True to overwrite') logger.info('Saving report for case {} in database'.format(case_obj['_id'])) return adapter.replace_case(case_obj)
Load a delivery report into a case in the database If the report already exists the function will exit. If the user want to load a report that is already in the database 'update' has to be 'True' Args: adapter (MongoAdapter): Connection to the database report_path (string): Path to delivery report case_id (string): Optional case identifier update (bool): If an existing report should be replaced Returns: updated_case(dict)
codesearchnet
def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic): if type(forensic_reports) == dict: forensic_reports = [forensic_reports] if len(forensic_reports) < 1: return try: logger.debug("Saving forensic reports to Kafka") self.producer.send(forensic_topic, forensic_reports) except UnknownTopicOrPartitionError: raise KafkaError( "Kafka error: Unknown topic or partition on broker") except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__()))
Saves forensic DMARC reports to Kafka, sends individual records (slices) since Kafka requires messages to be <= 1MB by default. Args: forensic_reports (list): A list of forensic report dicts to save to Kafka forensic_topic (str): The name of the Kafka topic
juraj-google-style
def WriteStatEntries(stat_entries, client_id, mutation_pool, token=None): for stat_response in stat_entries: if stat_response.pathspec.last.stream_name: stat_response.st_mode &= (~ stat_type_mask) stat_response.st_mode |= stat.S_IFREG if data_store.AFF4Enabled(): for stat_entry in stat_entries: CreateAFF4Object(stat_entry, client_id_urn=rdf_client.ClientURN(client_id), mutation_pool=mutation_pool, token=token) if data_store.RelationalDBEnabled(): path_infos = [rdf_objects.PathInfo.FromStatEntry(s) for s in stat_entries] data_store.REL_DB.WritePathInfos(client_id, _FilterOutPathInfoDuplicates(path_infos))
Persists information about stat entries. Args: stat_entries: A list of `StatEntry` instances. client_id: An id of a client the stat entries come from. mutation_pool: A mutation pool used for writing into the AFF4 data store. token: A token used for writing into the AFF4 data store.
codesearchnet
def get_session(db_url): engine = create_engine(db_url, poolclass=NullPool, echo=False) Session = sessionmaker(bind=engine) Base.metadata.create_all(engine) return Session()
Gets SQLAlchemy session given url. Your tables must inherit from Base in hdx.utilities.database. Args: db_url (str): SQLAlchemy url Returns: sqlalchemy.orm.session.Session: SQLAlchemy session
juraj-google-style
def get_ip_prefixes_from_config(config, services, ip_version): ip_prefixes = set() for service in services: ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix')) if (ip_prefix.version == ip_version): ip_prefixes.add(ip_prefix.with_prefixlen) return ip_prefixes
Build a set of IP prefixes found in service configuration files. Arguments: config (obg): A configparser object which holds our configuration. services (list): A list of section names which are the name of the service checks. ip_version (int): IP protocol version Returns: A set of IP prefixes.
codesearchnet
def as_graph(self, depth=0): if depth in self._graph_cache: return self._graph_cache[depth] self._graph_cache[depth] = graph = Graph(self, depth=depth) return graph
Create a graph with self as node, cache it, return it. Args: depth (int): depth of the graph. Returns: Graph: an instance of Graph.
juraj-google-style
def _launch_cli(self): self._register_this_run_info(self._run_cli) response = self._run_cli.run_ui(init_command=self._init_command, title=self._title, title_color=self._title_color) return response
Launch the interactive command-line interface. Returns: The OnRunStartResponse specified by the user using the "run" command.
github-repos
def _wait_after(provider, job_ids, poll_interval, stop_on_failure): job_ids_to_check = {j for j in job_ids if (j != dsub_util.NO_JOB)} error_messages = [] while (job_ids_to_check and ((not error_messages) or (not stop_on_failure))): print(('Waiting for: %s.' % ', '.join(job_ids_to_check))) jobs_left = _wait_for_any_job(provider, job_ids_to_check, poll_interval) jobs_completed = job_ids_to_check.difference(jobs_left) tasks_completed = provider.lookup_job_tasks({'*'}, job_ids=jobs_completed) dominant_job_tasks = _dominant_task_for_jobs(tasks_completed) if (len(dominant_job_tasks) != len(jobs_completed)): jobs_found = dsub_util.tasks_to_job_ids(dominant_job_tasks) jobs_not_found = jobs_completed.difference(jobs_found) for j in jobs_not_found: error = ('%s: not found' % j) print_error((' %s' % error)) error_messages += [error] for t in dominant_job_tasks: job_id = t.get_field('job-id') status = t.get_field('task-status') print((' %s: %s' % (str(job_id), str(status)))) if (status in ['FAILURE', 'CANCELED']): error_messages += [provider.get_tasks_completion_messages([t])] job_ids_to_check = jobs_left return error_messages
Print status info as we wait for those jobs. Blocks until either all of the listed jobs succeed, or one of them fails. Args: provider: job service provider job_ids: a set of job IDs (string) to wait for poll_interval: integer seconds to wait between iterations stop_on_failure: whether to stop waiting if one of the tasks fails. Returns: Empty list if there was no error, a list of error messages from the failed tasks otherwise.
codesearchnet
def last_updated(self, path): return self._gcsIO().last_updated(path)
Get UNIX Epoch time in seconds on the FileSystem. Args: path: string path of file. Returns: float UNIX Epoch time Raises: ``BeamIOError``: if path doesn't exist.
github-repos
def _HasDuplicateRegistryKeyPaths( self, filename, artifact_definition, source): result = False intersection = self._artifact_registry_key_paths.intersection( set(source.keys)) if intersection: duplicate_key_paths = '\n'.join(intersection) logging.warning(( 'Artifact definition: {0:s} in file: {1:s} has duplicate ' 'Registry key paths:\n{2:s}').format( artifact_definition.name, filename, duplicate_key_paths)) result = True self._artifact_registry_key_paths.update(source.keys) return result
Checks if Registry key paths are not already defined by other artifacts. Note that at the moment this function will only find exact duplicate Registry key paths. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. source (SourceType): source definition. Returns: bool: True if the Registry key paths defined by the source type are used in other artifacts.
juraj-google-style
def scored_to_phenotype(self,phenotypes): def _apply_score(scored_calls,phenotypes): present = sorted(list(set(phenotypes)&set(scored_calls.keys()))) total = sum([scored_calls[x] for x in present]) if total > 1: raise ValueError("You cant extract phenotypes from scores if they are not mutually exclusive") if total == 0: return np.nan for label in present: if scored_calls[label] == 1: return label raise ValueError("Should have hit an exit criteria already") output = self.copy() output['phenotype_label'] = output.apply(lambda x: _apply_score(x['scored_calls'],phenotypes),1) output['phenotype_calls'] = output.apply(lambda x: dict([(y,1 if x['phenotype_label']==y else 0) for y in phenotypes]) ,1) return output
Convert binary pehnotypes to mutually exclusive phenotypes. If none of the phenotypes are set, then phenotype_label becomes nan If any of the phenotypes are multiply set then it throws a fatal error. Args: phenotypes (list): a list of scored_names to convert to phenotypes Returns: CellDataFrame
juraj-google-style
def export(self, name=None): with tf.name_scope(name or '%s_lookup_table_export' % self._name): keys, values = gen_simple_hash_table_op.examples_simple_hash_table_export(self.resource_handle, key_dtype=self._key_dtype, value_dtype=self._value_dtype) return (keys, values)
Export all `key` and `value` pairs. Args: name: A name for the operation (optional). Returns: A tuple of two tensors, the first with the `keys` and the second with the `values`.
github-repos
def transform_content(tags, content_transformer): if type(tags) not in [tuple, list]: tags = [tags] for tag in tags: new_child = dhtmlparser.HTMLElement(content_transformer(tag)) if hasattr(tag, "parent"): new_child.parent = tag tag.childs = [new_child]
Transform content in all `tags` using result of `content_transformer(tag)` call. Args: tags (obj/list): HTMLElement instance, or list of HTMLElement instances. content_transformer (function): Function which is called as ``content_transformer(tag)``.
juraj-google-style
def _parse_hextet(self, hextet_str): if not self._HEX_DIGITS.issuperset(hextet_str): raise ValueError if len(hextet_str) > 4: raise ValueError hextet_int = int(hextet_str, 16) if hextet_int > 0xFFFF: raise ValueError return hextet_int
Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF].
juraj-google-style
def AsDict(self, dt=True): data = {} if self.sharekey: data['sharekey'] = self.sharekey if self.name: data['name'] = self.name if self.user: data['user'] = self.user.AsDict() if self.title: data['title'] = self.title if self.description: data['description'] = self.description if self.posted_at: if dt: data['posted_at'] = self.posted_at else: data['posted_at'] = self.posted_at_iso if self.permalink: data['permalink'] = self.permalink if self.width: data['width'] = self.width if self.height: data['height'] = self.height if self.image_url: data['image_url'] = self.image_url if self.source_url: data['source_url'] = self.source_url data['views'] = self.views data['likes'] = self.likes data['saves'] = self.saves data['comments'] = self.comments data['nsfw'] = self.nsfw data['saved'] = self.saved data['liked'] = self.liked return data
A dict representation of this Shake instance. The return value uses the same key names as the JSON representation. Args: dt (bool): If True, return dates as python datetime objects. If False, return dates as ISO strings. Return: A dict representing this Shake instance
juraj-google-style
def from_json_str(cls, json_str: str, primitive_cls: Type[message.Message], context: Context) -> 'PrimitiveWrapper':
Serializes json_str into an instance of primitive_cls and wraps. Args: json_str: The string-representation of the raw json_value to serialize into primitive_cls and wrap. primitive_cls: The FHIR primitive class to serialize into and wrap. context: Related primitive information to use for printing/parsing a wrapped primitive. Returns: An instance of PrimitiveWrapper.
github-repos
def does_attribute_meet_condition(self, attribute, conditions): if conditions is None or len(conditions) == 0: return True for attribute_name, attribute_value in conditions.items(): value = getattr(attribute, attribute_name, False) if value != attribute_value and bool(value) != attribute_value: return False return True
Check if the attribute meet all the given conditions Args: attribute: the attribute information conditions: a dictionary of condition to match Returns: True if the attribute match all conditions. False otherwise
juraj-google-style
def _repr_to_list(value: torch.Tensor): torch.set_printoptions(sci_mode=True, linewidth=120) with StringIO() as buf, redirect_stdout(buf): print(value) raw = buf.getvalue() return _sanitize_repr_for_diff(raw).splitlines()
Converts a tensor into a sanitized multi-line string representation. Args: value (`torch.Tensor`): The tensor to represent. Returns: `List[str]`: List of string lines representing the tensor.
github-repos
def open_if_needed(self, mode=None): was_open = self.is_open() if not was_open: self.open(mode=mode) try: yield self finally: if not was_open: self.close()
Convenience context-manager for the use with ``with``. Opens the container if not already done. Only closes the container if it was opened within this context. Args: mode (str): Either 'r' for read-only, 'w' for truncate and write or 'a' for append. (default: 'a'). If ``None``, uses ``self.mode``.
juraj-google-style
def get_field(proto: message.Message, fields: FieldTypes) -> tuple[Any, Optional[descriptor.FieldDescriptor]]: field_proto = proto field_desc = None for field_proto, field_desc, _, _ in _walk_fields(proto, fields): pass return (field_proto, field_desc)
Returns the field and field descriptor from the proto. Args: proto: Parent proto of any message type. fields: List of string/int/map key fields, e.g. ["nodes", "attr", "value"] can represent `proto.nodes.attr["value"]`. Returns: Tuple of ( Field in the proto or `None` if none are found, Field descriptor )
github-repos
def get(cls, blob_key, **ctx_options): fut = cls.get_async(blob_key, **ctx_options) return fut.get_result()
Retrieve a BlobInfo by key. Args: blob_key: A blob key. This may be a str, unicode or BlobKey instance. **ctx_options: Context options for Model().get_by_id(). Returns: A BlobInfo entity associated with the provided key, If there was no such entity, returns None.
juraj-google-style
def mark_experimental(fn): @wraps(fn) def wrapper(*args, **kw): from peltak.core import shell if shell.is_tty: warnings.warn('This command is has experimental status. The interface is not yet stable and might change without notice within with a patch version update. Use at your own risk') return fn(*args, **kw) return wrapper
Mark function as experimental. Args: fn (FunctionType): The command function to decorate.
codesearchnet
def _create_record_internal(self, rtype, name, content, identifier=None): name = (self._relative_name(name) if (name is not None) else name) LOGGER.debug('Creating record with name %s', name) if self._is_duplicate_record(rtype, name, content): return True data = self._get_post_data_to_create_dns_entry(rtype, name, content, identifier) LOGGER.debug('Create DNS data: %s', data) create_response = self.session.post(self.URLS['dns_create_entry'].format(self.domain_id), data=data) self._invalidate_records_cache() self._log('Create DNS entry', create_response) was_success = (len(self._list_records(rtype, name, content)) > 0) if was_success: msg = 'Successfully added record %s' else: msg = 'Failed to add record %s' LOGGER.info(msg, name) return was_success
Create a new DNS entry in the domain zone if it does not already exist. Args: rtype (str): The DNS type (e.g. A, TXT, MX, etc) of the new entry. name (str): The name of the new DNS entry, e.g the domain for which a MX entry shall be valid. content (str): The content of the new DNS entry, e.g. the mail server hostname for a MX entry. [identifier] (str): The easyname id of a DNS entry. Use to overwrite an existing entry. Returns: bool: True if the record was created successfully, False otherwise.
codesearchnet
def create_output(self, key, value, variable_type=None): results = None if key is not None: key = key.strip() key_type = '{}-{}'.format(key, variable_type) if self.out_variables_type.get(key_type) is not None: v = self.out_variables_type.get(key_type) self.tcex.log.info( u'Variable {} was requested by downstream app.'.format(v.get('variable')) ) if value is not None: results = self.create(v.get('variable'), value) else: self.tcex.log.info( u'Variable {} has a none value and will not be written.'.format(key) ) elif self.out_variables.get(key) is not None and variable_type is None: v = self.out_variables.get(key) self.tcex.log.info( u'Variable {} was requested by downstream app.'.format(v.get('variable')) ) if value is not None: results = self.create(v.get('variable'), value) else: self.tcex.log.info( u'Variable {} has a none value and will not be written.'.format( v.get('variable') ) ) else: var_value = key if variable_type is not None: var_value = key_type self.tcex.log.info( u'Variable {} was NOT requested by downstream app.'.format(var_value) ) return results
Wrapper for Create method of CRUD operation for working with KeyValue DB. This method will automatically check to see if provided variable was requested by a downstream app and if so create the data in the KeyValue DB. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. variable_type (string): The variable type being written. Returns: (string): Result string of DB write.
juraj-google-style
def port_tag_details(cls, tags): for tag in tags: match = port_tag_re.match(tag) if match: source_sink, port, extra = match.groups() return source_sink == "source", cls(port), extra
Search tags for port info, returning it Args: tags: A list of tags to check Returns: None or (is_source, port, connected_value|disconnected_value) where port is one of the Enum entries of Port
juraj-google-style
def topological_sort(data): def check_self_dependencies(input_data): for k, v in input_data.items(): if k in v: raise ValueError('Self-dependency, {} depends on itself.'.format(k)) def prepare_input_data(input_data): return {k: set(v) for k, v in input_data.items()} def find_items_without_dependencies(input_data): return list(reduce(set.union, input_data.values()) - set(input_data.keys())) def add_empty_dependencies(data): items_without_dependencies = find_items_without_dependencies(data) data.update({item: set() for item in items_without_dependencies}) def get_sorted(input_data): data = input_data while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered} if len(data) != 0: raise ValueError('Cyclic dependencies exist ' 'among these items: {}'.format(', '.join(repr(x) for x in data.items()))) check_self_dependencies(data) if not len(data): return [] data_copy = prepare_input_data(data) add_empty_dependencies(data_copy) result = [] for d in get_sorted(data_copy): try: d = sorted(d) except TypeError: d = list(d) result.extend(d) return result
Topological sort the given dictionary structure. Args: data (dict); dictionary structure where the value is a list of dependencies for that given key. For example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``. Returns: tuple: the dependencies in constructor order
juraj-google-style
async def get_tournaments(self, subdomain: str = None, force_update: bool = False) -> list: if self.tournaments is None: force_update = True self._subdomains_searched.append('' if subdomain is None else subdomain) elif subdomain is None and '' not in self._subdomains_searched: force_update = True self._subdomains_searched.append('') elif subdomain is not None and subdomain not in self._subdomains_searched: force_update = True self._subdomains_searched.append(subdomain) if force_update: params = { 'include_participants': 1 if AUTO_GET_PARTICIPANTS else 0, 'include_matches': 1 if AUTO_GET_MATCHES else 0 } if subdomain is not None: params['subdomain'] = subdomain res = await self.connection('GET', 'tournaments', **params) if len(res) == 0: self.tournaments = [] else: for t_data in res: self._refresh_tournament_from_json(t_data) return self.tournaments
gets all user's tournaments |methcoro| Args: subdomain: *optional* subdomain needs to be given explicitely to get tournaments in a subdomain force_update: *optional* set to True to force the data update from Challonge Returns: list[Tournament]: list of all the user tournaments Raises: APIException
juraj-google-style
def set_hostname(self, value=None, default=False, disable=False): cmd = self.command_builder('hostname', value=value, default=default, disable=disable) return self.configure(cmd)
Configures the global system hostname setting EosVersion: 4.13.7M Args: value (str): The hostname value default (bool): Controls use of the default keyword disable (bool): Controls the use of the no keyword Returns: bool: True if the commands are completed successfully
juraj-google-style
def _insert(self, item, feed_item): return self._api().insert(profileId=self.profile_id, body=item).execute()
Inserts a new item into CM. Args: item: The CM object to insert. feed_item: The feed item from the Bulkdozer feed representing the item to insert. Returns: The CM object representing the item inserted.
github-repos
def pull(self, project, run=None, entity=None): (project, run) = self.parse_slug(project, run=run) urls = self.download_urls(project, run, entity) responses = [] for fileName in urls: (_, response) = self.download_write_file(urls[fileName]) if response: responses.append(response) return responses
Download files from W&B Args: project (str): The project to download run (str, optional): The run to upload to entity (str, optional): The entity to scope this project to. Defaults to wandb models Returns: The requests library response object
codesearchnet
def _GetPathSegmentSeparator(self, path): if path.startswith('\\') or path[1:].startswith(':\\'): return '\\' if path.startswith('/'): return '/' if '/' and '\\' in path: forward_count = len(path.split('/')) backward_count = len(path.split('\\')) if forward_count > backward_count: return '/' return '\\' if '/' in path: return '/' return '\\'
Given a path give back the path separator as a best guess. Args: path (str): path. Returns: str: path segment separator.
juraj-google-style
def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]: if not isinstance(hidden_states, (tuple, list)): raise TypeError('hidden_states should be a tuple or list of tensors') if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.') if self.reassemble_stage is not None: hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] output = self.fusion_stage(features) return (output, features[-1])
Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone.
github-repos
def __init__(self, profile_id: str, profile_location: Optional[str]=None, log_results: bool=False, file_copy_fn: Optional[Callable[[str, str], None]]=None, time_prefix: str='%Y-%m-%d_%H_%M_%S-', enable_cpu_profiling: bool=False, enable_memory_profiling: bool=False): self.profile_id = str(profile_id) self.profile_location = profile_location self.log_results = log_results self.file_copy_fn = file_copy_fn or self.default_file_copy_fn self.time_prefix = time_prefix self.enable_cpu_profiling = enable_cpu_profiling self.enable_memory_profiling = enable_memory_profiling
Creates a Profile object. Args: profile_id: Unique id of the profiling session. profile_location: The file location where the profiling results will be stored. log_results: Log the result to console if true. file_copy_fn: Lambda function for copying files. time_prefix: Format of the timestamp prefix in profiling result files. enable_cpu_profiling: CPU profiler will be enabled during the profiling session. enable_memory_profiling: Memory profiler will be enabled during the profiling session, the profiler only records the newly allocated objects in this session.
github-repos
def materialize(self, ref, table_name=None, index_columns=None, logger=None): from ambry.library import Library assert isinstance(self._library, Library) logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref)) partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install(connection, partition, table_name=table_name, index_columns=index_columns, materialize=True, logger=logger)
Creates materialized table for given partition reference. Args: ref (str): id, vid, name or vname of the partition. Returns: str: name of the partition table in the database.
codesearchnet
def thermal_conductivity(self, temperature, volume): gamma = self.gruneisen_parameter(temperature, volume) theta_d = self.debye_temperature(volume) theta_a = theta_d * self.natoms**(-1./3.) prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3) prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228) kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10 return kappa
Eq(17) in 10.1103/PhysRevB.90.174107 Args: temperature (float): temperature in K volume (float): in Ang^3 Returns: float: thermal conductivity in W/K/m
juraj-google-style
def visit_invoke_reference(self, identifier: InvokeReferenceNode) -> Any: return self.visit_invoke_expression(identifier)
Allows visitors to implement custom Reference logic. By default, calls `visit_invoke_expression`. Subclasses may override this method to introduce custom logic for handling references. This function is called when the 'reference' identifier is invoked against a FHIR Reference resource. The visit_invoke_expression function is called for all other invocations. Args: identifier: The identifier on the right hand side of an invocation. Returns: The result of the reference invocation.
github-repos
def verify_id_token(id_token, audience, http=None, cert_uri=ID_TOKEN_VERIFICATION_CERTS): _require_crypto_or_die() if (http is None): http = transport.get_cached_http() (resp, content) = transport.request(http, cert_uri) if (resp.status == http_client.OK): certs = json.loads(_helpers._from_bytes(content)) return crypt.verify_signed_jwt_with_certs(id_token, certs, audience) else: raise VerifyJwtTokenError('Status code: {0}'.format(resp.status))
Verifies a signed JWT id_token. This function requires PyOpenSSL and because of that it does not work on App Engine. Args: id_token: string, A Signed JWT. audience: string, The audience 'aud' that the token should be for. http: httplib2.Http, instance to use to make the HTTP request. Callers should supply an instance that has caching enabled. cert_uri: string, URI of the certificates in JSON format to verify the JWT against. Returns: The deserialized JSON in the JWT. Raises: oauth2client.crypt.AppIdentityError: if the JWT fails to verify. CryptoUnavailableError: if no crypto library is available.
codesearchnet
def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): initializing_from_value = False if initializer is not None and (not callable(initializer)): initializing_from_value = True if shape is not None and initializing_from_value: raise ValueError('If initializer is a constant, do not specify shape.') dtype = dtypes.as_dtype(dtype) if shape is not None: shape = tensor_shape.as_shape(shape) if name in self._vars: if reuse is False: var = self._vars[name] err_msg = 'Variable %s already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?' % name if isinstance(var, resource_variable_ops.ResourceVariable): raise ValueError(err_msg) tb = var.op.traceback[::-1] tb = [x for x in tb if 'tensorflow/python' not in x[0]][:5] raise ValueError('%s Originally defined at:\n\n%s' % (err_msg, ''.join(traceback.format_list(tb)))) found_var = self._vars[name] if shape is not None and (not shape.is_compatible_with(found_var.get_shape())): raise ValueError('Trying to share variable %s, but specified shape %s and found shape %s.' % (name, shape, found_var.get_shape())) if not dtype.is_compatible_with(found_var.dtype): dtype_str = dtype.name found_type_str = found_var.dtype.name raise ValueError('Trying to share variable %s, but specified dtype %s and found dtype %s.' % (name, dtype_str, found_type_str)) return found_var if reuse is True: raise ValueError('Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=tf.AUTO_REUSE in VarScope?' % name) if initializer is None: if shape is None: raise ValueError(f'Variable {name} did not get an initializer, so its `shape` argument must be specified.') initializer, initializing_from_value = self._get_default_initializer(name=name, shape=shape, dtype=dtype) with ops.init_scope(): if initializing_from_value: init_val = initializer variable_dtype = None else: if tf_inspect.isclass(initializer): initializer = initializer() if shape is not None and shape.is_fully_defined(): if 'partition_info' in tf_inspect.getargspec(initializer).args: init_val = functools.partial(initializer, shape.as_list(), dtype=dtype, partition_info=partition_info) else: init_val = functools.partial(initializer, shape.as_list(), dtype=dtype) variable_dtype = dtype.base_dtype elif _needs_no_arguments(initializer): init_val = initializer variable_dtype = None else: raise ValueError("The initializer passed is not valid. It should be a callable with no arguments and the shape should not be provided or an instance of `tf.keras.initializers.*' and `shape` should be fully defined.") if use_resource is None: use_resource = resource_variables_toggle.resource_variables_enabled() v = _variable_v1(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, shape=shape) if context.executing_eagerly() and self._store_eager_variables: if collections: ops.add_to_collections(collections, v) else: ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v) if trainable: ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v) if not context.executing_eagerly() or self._store_eager_variables: self._vars[name] = v logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name, format(shape), initializer) if regularizer: def make_regularizer_op(): with ops.colocate_with(v): with ops.name_scope(name + '/Regularizer/'): return regularizer(v) if regularizer(v) is not None: lazy_eval_tensor = _LazyEvalTensor(make_regularizer_op) ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, lazy_eval_tensor) return v
Get or create a single Variable (e.g. a shard or entire variable). See the documentation of get_variable above (ignore partitioning components) for details. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. initializer: see get_variable. regularizer: see get_variable. partition_info: _PartitionInfo object. reuse: see get_variable. trainable: see get_variable. collections: see get_variable. caching_device: see get_variable. validate_shape: see get_variable. use_resource: see get_variable. constraint: see get_variable. synchronization: see get_variable. aggregation: see get_variable. Returns: A Variable. See documentation of get_variable above. Raises: ValueError: See documentation of get_variable above.
github-repos
def generate_lars_path(weighted_data, weighted_labels): x_vector = weighted_data (alphas, _, coefs) = lars_path(x_vector, weighted_labels, method='lasso', verbose=False) return (alphas, coefs)
Generates the lars path for weighted data. Args: weighted_data: data that has been weighted by kernel weighted_label: labels, weighted by kernel Returns: (alphas, coefs), both are arrays corresponding to the regularization parameter and coefficients, respectively
codesearchnet
def snr(*args, **kwargs): squeeze = False max_length = 0 for arg in args: try: length = len(arg) if (length > max_length): max_length = length except TypeError: pass if (max_length == 0): squeeze = True kwargs['length'] = max_length snr_main = SNR(**kwargs) if squeeze: snr_out = snr_main(*args) return {key: np.squeeze(snr_out[key]) for key in snr_out} return snr_main(*args)
Compute the SNR of binaries. snr is a function that takes binary parameters and sensitivity curves as inputs, and returns snr for chosen phases. Warning: All binary parameters must be either scalar, len-1 arrays, or arrays of the same length. All of these can be used at once. However, you cannot input multiple arrays of different lengths. Arguments: *args: Arguments for :meth:`gwsnrcalc.utils.pyphenomd.PhenomDWaveforms.__call__` **kwargs: Keyword arguments related to parallel generation (see :class:`gwsnrcalc.utils.parallel`), waveforms (see :class:`gwsnrcalc.utils.pyphenomd`), or sensitivity information (see :class:`gwsnrcalc.utils.sensitivity`). Returns: (dict or list of dict): Signal-to-Noise Ratio dictionary for requested phases.
codesearchnet
def _get_showcase_dataset_dict(self, dataset): if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict): if 'id' not in dataset: dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name']) dataset = dataset['id'] elif not isinstance(dataset, str): raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__) if is_valid_uuid(dataset) is False: raise hdx.data.hdxobject.HDXError('%s is not a valid dataset id!' % dataset) return {'showcase_id': self.data['id'], 'package_id': dataset}
Get showcase dataset dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: Dict: showcase dataset dict
juraj-google-style
def _CheckIsDevice(self, file_entry): if definitions.FILE_ENTRY_TYPE_DEVICE not in self._file_entry_types: return False return file_entry.IsDevice()
Checks the is_device find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not.
juraj-google-style
def clean_args(self, args, keys): for k in keys: if getattr(args, k) is None: delattr(args, k)
Clean None values out of the arg namespace. This lets us check for a config file arg based on whether the None default was overwritten. Args: args: an argparse.Namespace. keys: Keys to clean if None
github-repos
def transform(self, col): out = pd.DataFrame(index=col.index) out[self.col_name] = col.fillna(self.default_value) out[self.new_name] = (pd.notnull(col) * 1).astype(int) return out
Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
juraj-google-style
def bdp(tickers, flds, **kwargs): logger = logs.get_logger(bdp, level=kwargs.pop('log', logs.LOG_LEVEL)) con, _ = create_connection() ovrds = assist.proc_ovrds(**kwargs) logger.info( f'loading reference data from Bloomberg:\n' f'{assist.info_qry(tickers=tickers, flds=flds)}' ) data = con.ref(tickers=tickers, flds=flds, ovrds=ovrds) if not kwargs.get('cache', False): return [data] qry_data = [] for r, snap in data.iterrows(): subset = [r] data_file = storage.ref_file( ticker=snap.ticker, fld=snap.field, ext='pkl', **kwargs ) if data_file: if not files.exists(data_file): qry_data.append(data.iloc[subset]) files.create_folder(data_file, is_file=True) data.iloc[subset].to_pickle(data_file) return qry_data
Bloomberg reference data Args: tickers: tickers flds: fields to query **kwargs: bbg overrides Returns: pd.DataFrame Examples: >>> bdp('IQ US Equity', 'Crncy', raw=True) ticker field value 0 IQ US Equity Crncy USD >>> bdp('IQ US Equity', 'Crncy').reset_index() ticker crncy 0 IQ US Equity USD
juraj-google-style
def load_subclasses(klass, modules=None): if modules: if isinstance(modules, six.string_types): modules = [modules] loader = Loader() loader.load(*modules) return klass.__subclasses__()
Load recursively all all subclasses from a module. Args: klass (str or list of str): Class whose subclasses we want to load. modules: List of additional modules or module names that should be recursively imported in order to find all the subclasses of the desired class. Default: None FIXME: This function is kept only for backward compatibility reasons, it should not be used. Deprecation warning should be raised and it should be replaces by the ``Loader`` class.
codesearchnet
def relative_humidity(self, value=999): if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `relative_humidity`'.format(value)) if value < 0: raise ValueError('value need to be greater or equal 0 ' 'for field `relative_humidity`') if value > 110: raise ValueError('value need to be smaller 110 ' 'for field `relative_humidity`') self._relative_humidity = value
Corresponds to IDD Field `relative_humidity` Args: value (int): value for IDD Field `relative_humidity` value >= 0 value <= 110 Missing value: 999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def to_tensor_list(element_spec, element): return _to_tensor_list_helper(lambda state, spec, component: state + spec._to_tensor_list(component), element_spec, element)
Returns a tensor list representation of the element. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. element: The element to convert to tensor list representation. Returns: A tensor list representation of `element`. Raises: ValueError: If `element_spec` and `element` do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If `element_spec` and `element` differ in the type of sequence in any of their substructures.
github-repos
def write_tarball(voevents, filepath): tuple_gen = ( (v.ivorn, v.xml) for v in voevents) return write_tarball_from_ivorn_xml_tuples(tuple_gen, filepath)
Iterate over voevent models / dbrows and write to bz'd tarball. Args: voevents (iterable): An iterable (e.g. list) of e.g. Voevent db-rows, with access to the 'ivorn' and 'xml' attributes. filepath (string): Path to the new tarball to create. Typically of form '/path/to/foo.tar.bz2' Returns packet_count (int): Number of packets written to tarball
juraj-google-style
def AcceptableMimeType(accept_patterns, mime_type): if ('/' not in mime_type): raise exceptions.InvalidUserInputError(('Invalid MIME type: "%s"' % mime_type)) unsupported_patterns = [p for p in accept_patterns if (';' in p)] if unsupported_patterns: raise exceptions.GeneratedClientError(('MIME patterns with parameter unsupported: "%s"' % ', '.join(unsupported_patterns))) def MimeTypeMatches(pattern, mime_type): 'Return True iff mime_type is acceptable for pattern.' if (pattern == '*'): pattern = '*/*' return all(((accept in ('*', provided)) for (accept, provided) in zip(pattern.split('/'), mime_type.split('/')))) return any((MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns))
Return True iff mime_type is acceptable for one of accept_patterns. Note that this function assumes that all patterns in accept_patterns will be simple types of the form "type/subtype", where one or both of these can be "*". We do not support parameters (i.e. "; q=") in patterns. Args: accept_patterns: list of acceptable MIME types. mime_type: the mime type we would like to match. Returns: Whether or not mime_type matches (at least) one of these patterns.
codesearchnet
def get_item_concept_mapping(self, lang): concepts = self.filter(active=True, lang=lang) return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))
Get mapping of items_ids to concepts containing these items Args: lang (str): language of concepts Returns: dict: item (int) -> set of concepts (int)
juraj-google-style
def get_lonlatalts(self): band = self.filehandle ((xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs)) = self.get_gcps() longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape) latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape) altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape) longitudes.attrs['gcps'] = gcps longitudes.attrs['crs'] = crs latitudes.attrs['gcps'] = gcps latitudes.attrs['crs'] = crs altitudes.attrs['gcps'] = gcps altitudes.attrs['crs'] = crs return (longitudes, latitudes, altitudes)
Obtain GCPs and construct latitude and longitude arrays. Args: band (gdal band): Measurement band which comes with GCP's array_shape (tuple) : The size of the data array Returns: coordinates (tuple): A tuple with longitude and latitude arrays
codesearchnet
def get(self): chunk_size = self._smallest_buffer() all_full = self._all_full() if all_full: right_context = 0 num_frames = (chunk_size - self.current_left_context) else: right_context = self.right_context num_frames = self.min_frames chunk_size_needed = ((num_frames + self.current_left_context) + right_context) if (chunk_size >= chunk_size_needed): data = [] keep_frames = (self.left_context + self.right_context) keep_from = max(0, (chunk_size - keep_frames)) for index in range(self.num_buffers): data.append(self.buffers[index][:chunk_size]) self.buffers[index] = self.buffers[index][keep_from:] if (self.num_buffers == 1): data = data[0] chunk = Chunk(data, self.current_frame, all_full, self.current_left_context, right_context) self.current_left_context = min(self.left_context, chunk_size) self.current_frame = max(((self.current_frame + chunk_size) - keep_frames), 0) return chunk
Get a new chunk if available. Returns: Chunk or list: If enough frames are available a chunk is returned. Otherwise None. If ``self.num_buffer >= 1`` a list instead of single chunk is returned.
codesearchnet
def MapFile(self, key_path_prefix, registry_file): self._registry_files[key_path_prefix.upper()] = registry_file registry_file.SetKeyPathPrefix(key_path_prefix)
Maps the Windows Registry file to a specific key path prefix. Args: key_path_prefix (str): key path prefix. registry_file (WinRegistryFile): Windows Registry file.
juraj-google-style
def milliseconds(value: Union[int, float]) -> Duration: return float(value / 1000)
Converts input value from milliseconds to a `Duration` in seconds. Example: ```python >>> duration = tp.duration.milliseconds(250) >>> duration 0.25 >>> # Usage in a window operation >>> a = tp.event_set( ... timestamps=[0.5, 1.0, 1.2], ... features={"f1": [1, 5, -5]} ... ) >>> a.moving_sum(window_length=duration) indexes: ... timestamps: [0.5 1. 1.2] 'f1': [1 5 0] ... ``` Args: value: Number of milliseconds. Returns: Equivalent number of seconds.
github-repos
def assert_visible(self, selector, testid=None, **kwargs): self.info_log(('Assert visible selector(%s) testid(%s)' % (selector, testid))) highlight = kwargs.get('highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success']) self.debug_log(('effective highlight: %s' % highlight)) wait_until_visible = kwargs.get('wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible']) self.debug_log(('effective wait_until_visible: %s' % wait_until_visible)) if wait_until_visible: self.wait_until_visible(selector, raise_exception=False) element = self.find(selector, raise_exception=False, wait_until_visible=False, wait_until_present=False) if (element and element.is_displayed(raise_exception=False)): if highlight: element.highlight(style=BROME_CONFIG['highlight']['style_on_assertion_success']) if (testid is not None): self.create_test_result(testid, True) return True else: if (testid is not None): self.create_test_result(testid, False) return False
Assert that the element is visible in the dom Args: selector (str): the selector used to find the element testid (str): the test_id or a str Kwargs: wait_until_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
codesearchnet
def price(self, valuation_date, market, model=None, pricing_context=None, name=None): del model, pricing_context name = name or self._name + '_price' with tf.name_scope(name): discount_curve = market.discount_curve discount_factors = discount_curve.get_discount_factor(self._payment_dates) future_cashflows = tf.cast(self._payment_dates >= valuation_date, dtype=self._dtype) cashflow_pvs = self._notional * (future_cashflows * self._daycount_fractions * self._coupon_rate * discount_factors) return tf.math.reduce_sum(tf.reshape(cashflow_pvs, (self._batch_size, self._num_cashflows)), axis=1)
Returns the present value of the stream on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the cashflow stream. model: Reserved for future use. pricing_context: Additional context relevant for pricing. name: Python str. The name to give to the ops created by this function. Default value: `None` which maps to 'price'. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each stream based on the input market data.
github-repos
def find_equivalent_sites(self, site): for sites in self.equivalent_sites: if site in sites: return sites raise ValueError("Site not in structure")
Finds all symmetrically equivalent sites for a particular site Args: site (PeriodicSite): A site in the structure Returns: ([PeriodicSite]): List of all symmetrically equivalent sites.
juraj-google-style
def watch(self, enable=True, gpsd_protocol=PROTOCOL, devicepath=None): command = '?WATCH={{"enable":true,"{0}":true}}'.format(gpsd_protocol) if gpsd_protocol == 'rare': command = command.replace('"rare":true', '"raw":1') if gpsd_protocol == 'raw': command = command.replace('"raw":true', '"raw",2') if not enable: command = command.replace('true', 'false') if devicepath: command = command.replace('}', ',"device":"') + devicepath + '"}' return self.send(command)
watch gpsd in various gpsd_protocols or devices. Arguments: enable: (bool) stream data to socket gpsd_protocol: (str) 'json' | 'nmea' | 'rare' | 'raw' | 'scaled' | 'split24' | 'pps' devicepath: (str) device path - '/dev/ttyUSBn' for some number n or '/dev/whatever_works' Returns: command: (str) e.g., '?WATCH={"enable":true,"json":true};'
juraj-google-style
def _start_profiler(self, logdir): if self._profiler_started: return try: profiler.start(logdir=logdir) self._profiler_started = True except errors.AlreadyExistsError as e: logging.error('Failed to start profiler: %s', e.message)
Starts the profiler if currently inactive. Args: logdir: Directory where profiler results will be saved.
github-repos
def get_internal_urls(self): internal_urls = self.get_subfields('856', 'u', i1='4', i2='0') internal_urls.extend(self.get_subfields('998', 'a')) internal_urls.extend(self.get_subfields('URL', 'u')) return map((lambda x: x.replace('&amp;', '&')), internal_urls)
URL's, which may point to edeposit, aleph, kramerius and so on. Fields ``856u40``, ``998a`` and ``URLu``. Returns: list: List of internal URLs.
codesearchnet
def kill_raylet(self, check_alive=True): self._kill_process_type(ray_constants.PROCESS_TYPE_RAYLET, check_alive=check_alive)
Kill the raylet. Args: check_alive (bool): Raise an exception if the process was already dead.
codesearchnet
def _extract_inner_match(self, candidate, offset): for possible_inner_match in _INNER_MATCHES: group_match = possible_inner_match.search(candidate) is_first_match = True while group_match and self._max_tries > 0: if is_first_match: group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, candidate[:group_match.start()]) match = self._parse_and_verify(group, offset) if match is not None: return match self._max_tries -= 1 is_first_match = False group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, group_match.group(1)) match = self._parse_and_verify(group, offset + group_match.start(1)) if match is not None: return match self._max_tries -= 1 group_match = possible_inner_match.search(candidate, group_match.start() + 1) return None
Attempts to extract a match from candidate if the whole candidate does not qualify as a match. Arguments: candidate -- The candidate text that might contain a phone number offset -- The current offset of candidate within text Returns the match found, None if none can be found
juraj-google-style
def _ImportAll(self, module): aliases = [] getattrs = set() ast = self._module_map[module] type_param_names = set() if module == 'http.client': exports = None else: exports = [x for x in ast.constants if x.name.endswith('.__all__')] if exports: exports = exports[0].value for member in sum((ast.constants, ast.type_params, ast.classes, ast.functions, ast.aliases), ()): _, _, member_name = member.name.rpartition('.') if exports and member_name not in exports: continue new_name = self._ModulePrefix() + member_name if isinstance(member, pytd.Function) and member_name == '__getattr__': getattrs.add(member.Replace(name=new_name)) else: if isinstance(member, pytd.TypeParameter): type_param_names.add(new_name) elif new_name in type_param_names: continue if member_name.startswith('_'): continue t = pytd.ToType(member, allow_constants=True, allow_functions=True) aliases.append(pytd.Alias(new_name, t)) return (aliases, getattrs)
Get the new members that would result from a star import of the module. Args: module: The module name. Returns: A tuple of: - a list of new aliases, - a set of new __getattr__ functions.
github-repos
def visit_and_get_function_nodes(self, definition, first_node): len_before_visiting_func = len(self.nodes) previous_node = self.nodes[(- 1)] entry_node = self.append_node(EntryOrExitNode(('Function Entry ' + definition.name))) if (not first_node): first_node = entry_node self.connect_if_allowed(previous_node, entry_node) function_body_connect_statements = self.stmt_star_handler(definition.node.body) entry_node.connect(function_body_connect_statements.first_statement) exit_node = self.append_node(EntryOrExitNode(('Exit ' + definition.name))) exit_node.connect_predecessors(function_body_connect_statements.last_statements) the_new_nodes = self.nodes[len_before_visiting_func:] return_connection_handler(the_new_nodes, exit_node) return (the_new_nodes, first_node)
Visits the nodes of a user defined function. Args: definition(LocalModuleDefinition): Definition of the function being added. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function. Returns: the_new_nodes(list[Node]): The nodes added while visiting the function. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.
codesearchnet
def save_driver_logs(driver, prefix): browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox') log_dir = os.environ.get('SELENIUM_DRIVER_LOG_DIR') if not log_dir: LOGGER.warning('The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs') return elif not os.path.exists(log_dir): os.makedirs(log_dir) if browser_name == 'firefox': log_path = os.path.join(os.getcwd(), 'geckodriver.log') if os.path.exists(log_path): dest_path = os.path.join(log_dir, '{}_geckodriver.log'.format(prefix)) copyfile(log_path, dest_path) return log_types = driver.log_types for log_type in log_types: try: log = driver.get_log(log_type) file_name = os.path.join( log_dir, '{}_{}.log'.format(prefix, log_type) ) with open(file_name, 'w') as output_file: for line in log: output_file.write("{}{}".format(dumps(line), '\n')) except: msg = ( u"Could not save browser log of type '{log_type}'. " u"It may be that the browser does not support it." ).format(log_type=log_type) LOGGER.warning(msg, exc_info=True)
Save the selenium driver logs. The location of the driver log files can be configured by the environment variable `SELENIUM_DRIVER_LOG_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. prefix (str): A prefix which will be used in the output file names for the logs. Returns: None
juraj-google-style
def _scalar(tf_fn, x, promote_to_float=False): x = np_array_ops.asarray(x) if promote_to_float and (not np.issubdtype(x.dtype.as_numpy_dtype, np.inexact)): x = x.astype(np_utils.result_type(float)) return tf_fn(x)
Computes the tf_fn(x) for each element in `x`. Args: tf_fn: function that takes a single Tensor argument. x: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `ops.convert_to_tensor`. promote_to_float: whether to cast the argument to a float dtype if it is not already. Returns: An ndarray with the same shape as `x`. The default output dtype is determined by `np_utils.result_type(float)`, unless x is an ndarray with a floating point type, in which case the output type is same as x.dtype.
github-repos
def gates_to_idx(gates, qregs): sizes = [qr.size for qr in qregs.values()] reg_idx = np.cumsum(([0] + sizes)) regint = {} for (ind, qreg) in enumerate(qregs.values()): regint[qreg] = ind out = np.zeros((2 * len(gates)), dtype=np.int32) for (idx, gate) in enumerate(gates): out[(2 * idx)] = (reg_idx[regint[gate[0][0]]] + gate[0][1]) out[((2 * idx) + 1)] = (reg_idx[regint[gate[1][0]]] + gate[1][1]) return out
Converts gate tuples into a nested list of integers. Args: gates (list): List of (QuantumRegister, int) pairs representing gates. qregs (dict): List of )QuantumRegister, int) tuples. Returns: list: Nested list of integers for gates.
codesearchnet
def to_sql(cls, qc, **kwargs): empty_df = qc.head(1).to_pandas().head(0) empty_df.to_sql(**kwargs) kwargs["if_exists"] = "append" columns = qc.columns def func(df, **kwargs): df.columns = columns df.to_sql(**kwargs) return pandas.DataFrame() map_func = qc._prepare_method(func, **kwargs) result = qc._map_across_full_axis(1, map_func) result.to_pandas()
Write records stored in a DataFrame to a SQL database. Args: qc: the query compiler of the DF that we want to run to_sql on kwargs: parameters for pandas.to_sql(**kwargs)
juraj-google-style
def from_keras_log(csv_path, output_dir_path, **kwargs): data = pd.read_csv(csv_path, sep=None, engine='python') _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up.
juraj-google-style
def series_with_slh(self, other): new_S = self.S * other.S new_L = self.S * other.L + self.L def ImAdjoint(m): return (m.H - m) * (I / 2) delta = ImAdjoint(self.L.adjoint() * self.S * other.L) if isinstance(delta, Matrix): new_H = self.H + other.H + delta[0, 0] else: assert delta == 0 new_H = self.H + other.H return SLH(new_S, new_L, new_H)
Series product with another :class:`SLH` object Args: other (SLH): An upstream SLH circuit. Returns: SLH: The combined system.
juraj-google-style
def _sequence_search(self, start: GridQubit, current: List[GridQubit]) -> List[GridQubit]: used = set(current) seq = [] n = start while (n is not None): seq.append(n) used.add(n) n = self._choose_next_qubit(n, used) return seq
Search for the continuous linear sequence from the given qubit. This method is called twice for the same starting qubit, so that sequences that begin and end on this qubit are searched for. Args: start: The first qubit, where search should be trigerred from. current: Previously found linear sequence, which qubits are forbidden to use during the search. Returns: Continuous linear sequence that begins with the starting qubit and does not contain any qubits from the current list.
codesearchnet
def set_heat_pump_mode(self, device_label, mode): response = None try: response = requests.put( urls.set_heatpump_state(self._giid, device_label), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({'mode': mode})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Set heatpump mode Args: mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
juraj-google-style
def __call__(self, fn): def completion(app, *args, **kwargs): app.exit_message = self.msg return fn(app, *args, **kwargs) return completion
Implement __call__ function for decorator. Args: fn (function): The decorated function. Returns: function: The custom decorator function.
juraj-google-style