code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _get_compositor_prereqs(self, parent, prereq_names, skip=False, **dfilter): prereq_ids = [] unknowns = set() for prereq in prereq_names: (n, u) = self._find_dependencies(prereq, **dfilter) if u: unknowns.update(u) if skip: u_str = ', '.join([str(x) for x in u]) LOG.debug('Skipping optional %s: Unknown dataset %s', str(prereq), u_str) else: prereq_ids.append(n) self.add_child(parent, n) return (prereq_ids, unknowns)
Determine prerequisite Nodes for a composite. Args: parent (Node): Compositor node to add these prerequisites under prereq_names (sequence): Strings (names), floats (wavelengths), or DatasetIDs to analyze. skip (bool, optional): If True, prerequisites are considered optional if they can't be found and a debug message is logged. If False (default), the missing prerequisites are not logged and are expected to be handled by the caller.
codesearchnet
def substructure(mol, query, largest_only=True, ignore_hydrogen=True): def subset_filter(cnt1, cnt2): diff = cnt2 diff.subtract(cnt1) if any(v < 0 for v in diff.values()): return True if not (len(mol) and len(query)): return False m = molutil.clone(mol) q = molutil.clone(query) if largest_only: m = molutil.largest_graph(m) q = molutil.largest_graph(q) if ignore_hydrogen: m = molutil.make_Hs_implicit(m) q = molutil.make_Hs_implicit(q) if filter_(m, q, f=subset_filter): gm = GraphMatcher(q.graph, m.graph, node_match=atom_match) return gm.subgraph_is_isomorphic() return False
if mol is a substructure of the query, return True Args: mol: Compound query: Compound largest_only: compare only largest graph molecule
juraj-google-style
def ones_like(x, dtype=None): if any_symbolic_tensors((x,)): return OnesLike(dtype=dtype).symbolic_call(x) return backend.numpy.ones_like(x, dtype=dtype)
Return a tensor of ones with the same shape and type of `x`. Args: x: Input tensor. dtype: Overrides the data type of the result. Returns: A tensor of ones with the same shape and type as `x`.
github-repos
def sort(self, by=None, reverse=False): if (by is None): by = self.kdims elif (not isinstance(by, list)): by = [by] sorted_columns = self.interface.sort(self, by, reverse) return self.clone(sorted_columns)
Sorts the data by the values along the supplied dimensions. Args: by: Dimension(s) to sort by reverse (bool, optional): Reverse sort order Returns: Sorted Dataset
codesearchnet
def get_power_state(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + "/powerState" return self._client.get(uri)
Gets the power state (on, off or unknown) of the specified power delivery device that supports power control. The device must be an HP Intelligent Outlet. Args: id_or_uri: Can be either the power device id or the uri Returns: str: The power state
juraj-google-style
def union(self, streamSet): if(not isinstance(streamSet,set)) : raise TypeError("The union operator parameter must be a set object") if(len(streamSet) == 0): return self op = self.topology.graph.addOperator("$Union$") op.addInputPort(outputPort=self.oport) for stream in streamSet: op.addInputPort(outputPort=stream.oport) oport = op.addOutputPort(schema=self.oport.schema) return Stream(self.topology, oport)
Creates a stream that is a union of this stream and other streams Args: streamSet: a set of Stream objects to merge with this stream Returns: Stream:
juraj-google-style
def _viscounts2radiance(counts, slope, offset): rad = ((counts * slope) + offset) return rad.clip(min=0)
Convert VIS counts to radiance References: [VIS] Args: counts: Raw detector counts slope: Slope [W m-2 um-1 sr-1] offset: Offset [W m-2 um-1 sr-1] Returns: Radiance [W m-2 um-1 sr-1]
codesearchnet
def remove_item(name, system_wide=False): desktop_env = system.get_name() if (desktop_env == 'windows'): import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') for startup_file in os.path.listdir(start_dir): if ((startup_file == name) or (startup_file.split('.')[0] == name)): os.remove(os.path.join(startup_dir, startup_file)) elif (desktop_env == 'mac'): sp.Popen(['launchctl', 'remove', name]) elif (desktop_env == 'unknown'): if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file) as f: login_file_contents = f.read() final_login_file_contents = '' for line in login_file_contents.split('\n'): if (line.split(' ')[0] != name): final_login_file_contents += line with open(login_file, 'w') as f: f.write(final_login_file_contents) else: try: desktop_file_name = (name + '.desktop') startup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) if (not os.path.isfile(startup_file)): for possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]): possible_startup_file_parsed = desktopfile.parse(possible_startup_file) if (possible_startup_file_parsed['Name'] == name): startup_file = possible_startup_file os.remove(startup_file) except IndexError: pass
Removes a program from startup. Removes a program from startup. Args: name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``. system_wide (bool): Remove it from system-wide startup. Note: ``system_wide`` requires superuser/admin privileges.
codesearchnet
def _process_req_body(self, body): try: return json.loads(body) except ValueError: return urlparse.parse_qs(body, keep_blank_values=True)
Process the body of the HTTP request. If the body is valid JSON, return the JSON as a dict. Else, convert the key=value format to a dict and return that. Args: body: The body of the HTTP request.
codesearchnet
def get_value_at_field(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> Any: if isinstance(field, str): field = _field_descriptor_for_name(msg, field) return getattr(msg, field.name)
Returns the value at the field desribed by field. Args: msg: The message whose fields to examine. field: The FieldDescriptor or name of the field to retrieve. Returns: The value of msg at field.
github-repos
def isconst(cls, val): return (isinstance(val, string_types) and (((len(val) == 7) and (val[0] == '
Whether the value is a string color literal. Checks for a well-formed hexadecimal color value or a named color. Args: val (str) : the value to check Returns: True, if the value is a string color literal
codesearchnet
def add_namespace_uri(self, ns_uri, prefix=None, schema_location=None): assert ns_uri if (ns_uri in self.__ns_uri_map): ni = self.__lookup_uri(ns_uri) new_ni = copy.deepcopy(ni) if prefix: self.__check_prefix_conflict(ni, prefix) new_ni.prefixes.add(prefix) self.__merge_schema_locations(new_ni, schema_location) for p in new_ni.prefixes: self.__prefix_map[p] = new_ni self.__ns_uri_map[new_ni.uri] = new_ni else: if prefix: self.__check_prefix_conflict(ns_uri, prefix) ni = _NamespaceInfo(ns_uri, prefix, schema_location) self.__add_namespaceinfo(ni)
Adds a new namespace to this set, optionally with a prefix and schema location URI. If the namespace already exists, the given prefix and schema location are merged with the existing entry: * If non-None, ``prefix`` is added to the set. The preferred prefix is not modified. * If a schema location is not already associated with the namespace, it is set to ``schema_location`` (if given). If the namespace doesn't already exist in this set (so a new one is being created) and a prefix is given, that prefix becomes preferred. If not given, a preference as a default namespace is used. Args: ns_uri (str): The URI of the new namespace prefix (str): The desired prefix for the new namespace (optional) schema_location (str): The desired schema location for the new namespace (optional). Raises: DuplicatePrefixError: If a prefix is given which already maps to a different namespace ConflictingSchemaLocationError: If a schema location is given and the namespace already exists in this set with a different schema location.
codesearchnet
def __init__(self, was_reversed=False, was_copy=False): self._was_reversed = was_reversed self._was_copy = was_copy self._encoders = None self._hparams = None self._feature_info = None self._task_id = -1
Create a Problem. Args: was_reversed: bool, whether to reverse inputs and targets. was_copy: bool, whether to copy inputs to targets. Can be composed with was_reversed so that if both are true, the targets become the inputs, which are then copied to targets so that the task is targets->targets.
juraj-google-style
def convert_fields_for_spec(fields, field_values): _convert_fields(fields, field_values, context=_ConversionContext.SPEC)
Type-checks and converts field values for a TypeSpec (in place). This is similar to `convert_fields`, except that we expect a `TypeSpec` for tensor-like types. In particular, if the `value_type` of a field is `tf.Tensor` or a `CompositeTensor` subclass, then the corresponding value in `fields` is expected to contain a `TypeSpec` (rather than a value described by that `TypeSpec`). Args: fields: A list of `ExtensionTypeField` objects. field_values: A `dict` mapping field names to values. Must contain an entry for each field. I.e., `set(field_values.keys())` must be equal to `set([f.name for f in fields])`. Raises: ValueError: If the keys of `field_values` do not match the names of the fields in `fields`. TypeError: If any value in `field_values` does not have the type indicated by the corresponding `ExtensionTypeField` object.
github-repos
def read_hdf(cls, path_or_buf, **kwargs): if (cls.read_hdf_remote_task is None): return super(RayIO, cls).read_hdf(path_or_buf, **kwargs) format = cls._validate_hdf_format(path_or_buf=path_or_buf) if (format is None): ErrorMessage.default_to_pandas('File format seems to be `fixed`. For better distribution consider saving the file in `table` format. df.to_hdf(format=`table`).') return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs)) columns = kwargs.get('columns', None) if (not columns): empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0) columns = empty_pd_df.columns num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) column_splits = ((len(columns) col_partitions = [columns[i:(i + column_splits)] for i in range(0, len(columns), column_splits)] blk_partitions = np.array([cls.read_hdf_remote_task._remote(args=(path_or_buf, cols, num_splits, kwargs), num_return_vals=(num_splits + 1)) for cols in col_partitions]).T remote_partitions = np.array([[cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:(- 1)]]) index_len = ray.get(blk_partitions[(- 1)][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls(cls.frame_mgr_cls(remote_partitions), index, columns) return new_query_compiler
Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file.
codesearchnet
def to_b58check(self, testnet=False): b = (self.testnet_bytes if testnet else bytes(self)) return base58.b58encode_check(b)
Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key.
codesearchnet
def adversary_assets(self, main_type, sub_type, unique_id, params=None): params = params or {} url = '/v2/{}/{}/{}/adversaryAssets'.format(main_type, sub_type, unique_id) for aa in self._iterate(url, params, 'adversaryAsset'): yield aa
Args: main_type: sub_type: unique_id: params: Return:
juraj-google-style
def set_long_features(self, features, columns_to_set=[], partition=2): features_long = self.set_features(partition=(2 * partition)) unwanted_features = [f for f in features.columns if (f not in columns_to_set)] features_long = features_long.drop(unwanted_features, axis=1) features_long.columns = ['long_{0}'.format(f) for f in features_long.columns] skip = partition return pd.concat([features[skip:].reset_index(drop=True), features_long], axis=1)
Sets features of double the duration Example: Setting 14 day RSIs to longer will create add a feature column of a 28 day RSIs. Args: features: Pandas DataFrame instance with columns as numpy.float32 features. columns_to_set: List of strings of feature names to make longer partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features.
codesearchnet
def get_listed_projects(): index_path = ((Path().resolve() / 'docs') / 'index.md') with open(index_path, 'r') as index_file: lines = index_file.readlines() listed_projects = set() project_section = False for (_, l) in enumerate(lines): idx = l.find(PROJECT_KEY) if (idx >= 0): project_section = True if project_section: start = l.find('](') if (start > 0): closing_parenthesis = sorted([m.start() for m in re.finditer('\\)', l) if (m.start() > start)])[0] project = l[(start + 2):closing_parenthesis] listed_projects.add(project) if ((len(listed_projects) > 0) and l.startswith(' return listed_projects return listed_projects
Find the projects listed in the Home Documentation's index.md file Returns: set(str): projects' names, with the '/' in their beginings
codesearchnet
def exists(self) -> 'Builder': return self._to_builder(_evaluation.ExistsFunction(self.node.context, self.node, []))
The FHIRPath exists() function. Returns: An expression that returns True if the parent expression evaluates to one or more values.
github-repos
def zero_or_more(e, delimiter=None): if delimiter is None: delimiter = lambda s, grm, pos: (s, Ignore, (pos, pos)) def match_zero_or_more(s, grm=None, pos=0): start = pos try: s, obj, span = e(s, grm, pos) pos = span[1] data = [] if obj is Ignore else [obj] except PegreError: return PegreResult(s, [], (pos, pos)) try: while True: s, obj, span = delimiter(s, grm, pos) pos = span[1] if obj is not Ignore: data.append(obj) s, obj, span = e(s, grm, pos) pos = span[1] if obj is not Ignore: data.append(obj) except PegreError: pass return PegreResult(s, data, (start, pos)) return match_zero_or_more
Create a PEG function to match zero or more expressions. Args: e: the expression to match delimiter: an optional expression to match between the primary *e* matches.
juraj-google-style
def observe_timestamp(self, timestamp: timestamp.Timestamp) -> None: raise NotImplementedError(type(self))
Update tracking watermark with latest output timestamp. Args: timestamp: the `timestamp.Timestamp` of current output element. This is called with the timestamp of every element output from the DoFn.
github-repos
def _GenApiConfigCallback(args, api_func=GenApiConfig): service_configs = api_func(args.service, hostname=args.hostname, application_path=args.application) for (api_name_version, config) in service_configs.iteritems(): _WriteFile(args.output, (api_name_version + '.api'), config)
Generate an api file. Args: args: An argparse.Namespace object to extract parameters from. api_func: A function that generates and returns an API configuration for a list of services.
codesearchnet
def compare_config(self, target, init=True, indent_level=0): if init: fwd = self.full_path_fwd bwd = self.full_path_bwd else: fwd = self.rel_path_fwd bwd = self.rel_path_bwd indent = ((4 * indent_level) * ' ') if ((indent_level == 0) and (self.vdom is not None)): if (self.vdom == 'global'): pre = 'conf global\n' else: pre = ('conf vdom\n edit %s\n' % self.vdom) post = 'end' else: pre = '' post = '' pre_block = ('%s%s' % (indent, fwd)) post_block = ('%s%s' % (indent, bwd)) my_params = self.parameters.keys() ot_params = target.parameters.keys() text = '' for param in my_params: if (param not in ot_params): text += (' %sunset %s\n' % (indent, param)) elif (str(self.get_param(param)).replace('"', '') != str(target.get_param(param)).replace('"', '')): text += (' %sset %s %s\n' % (indent, param, target.get_param(param))) for param in ot_params: if (param not in my_params): text += (' %sset %s %s\n' % (indent, param, target.get_param(param))) my_blocks = self.sub_blocks.keys() ot_blocks = target.sub_blocks.keys() for block_name in my_blocks: if (block_name not in ot_blocks): text += (' %sdelete %s\n' % (indent, block_name)) else: text += self[block_name].compare_config(target[block_name], False, (indent_level + 1)) for block_name in ot_blocks: if (block_name not in my_blocks): text += target[block_name].to_text(True, (indent_level + 1), True) if (text == ''): return '' else: return ('%s%s%s%s%s' % (pre, pre_block, text, post_block, post))
This method will return all the necessary commands to get from the config we are in to the target config. Args: * **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config. * **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\ the recursion. You can ignore this parameter. * **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it. Returns: A string containing all the necessary commands to reach the target config.
codesearchnet
def get_pourbaix_entries(self, chemsys): from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, IonEntry from pymatgen.analysis.phase_diagram import PhaseDiagram from pymatgen.core.ion import Ion from pymatgen.entries.compatibility import MaterialsProjectAqueousCompatibility pbx_entries = [] url = ('/pourbaix_diagram/reference_data/' + '-'.join(chemsys)) ion_data = self._make_request(url) ion_ref_comps = [Composition(d['Reference Solid']) for d in ion_data] ion_ref_elts = list(itertools.chain.from_iterable((i.elements for i in ion_ref_comps))) ion_ref_entries = self.get_entries_in_chemsys(list(set(([str(e) for e in ion_ref_elts] + ['O', 'H']))), property_data=['e_above_hull'], compatible_only=False) compat = MaterialsProjectAqueousCompatibility('Advanced') ion_ref_entries = compat.process_entries(ion_ref_entries) ion_ref_pd = PhaseDiagram(ion_ref_entries) for (n, i_d) in enumerate(ion_data): ion_entry = IonEntry(Ion.from_formula(i_d['Name']), i_d['Energy']) refs = [e for e in ion_ref_entries if (e.composition.reduced_formula == i_d['Reference Solid'])] if (not refs): raise ValueError('Reference solid not contained in entry list') stable_ref = sorted(refs, key=(lambda x: x.data['e_above_hull']))[0] rf = stable_ref.composition.get_reduced_composition_and_factor()[1] solid_diff = (ion_ref_pd.get_form_energy(stable_ref) - (i_d['Reference solid energy'] * rf)) elt = i_d['Major_Elements'][0] correction_factor = (ion_entry.ion.composition[elt] / stable_ref.composition[elt]) ion_entry.energy += (solid_diff * correction_factor) pbx_entries.append(PourbaixEntry(ion_entry, 'ion-{}'.format(n))) extra_elts = ((set(ion_ref_elts) - {Element(s) for s in chemsys}) - {Element('H'), Element('O')}) for entry in ion_ref_entries: entry_elts = set(entry.composition.elements) if (not ((entry_elts <= {Element('H'), Element('O')}) or extra_elts.intersection(entry_elts))): form_e = ion_ref_pd.get_form_energy(entry) new_entry = deepcopy(entry) new_entry.uncorrected_energy = form_e new_entry.correction = 0.0 pbx_entry = PourbaixEntry(new_entry) pbx_entries.append(pbx_entry) return pbx_entries
A helper function to get all entries necessary to generate a pourbaix diagram from the rest interface. Args: chemsys ([str]): A list of elements comprising the chemical system, e.g. ['Li', 'Fe']
codesearchnet
def _register_array_types(self, objects): types = [o for o in objects if isinstance(o, VhdlType) and o.type_of == 'array_type'] for t in types: self.array_types.add(t.name) subtypes = {o.name:o.base_type for o in objects if isinstance(o, VhdlSubtype)} for k,v in subtypes.iteritems(): while v in subtypes: v = subtypes[v] if v in self.array_types: self.array_types.add(k)
Add array type definitions to internal registry Args: objects (list of VhdlType or VhdlSubtype): Array types to track
juraj-google-style
def get_job(self, id): return self._get_element_by_id(self.jobs, 'jobs', Job, str(id))
Retrieves a job matching the given `id` Args: id (str): Job `id` to match. Returns: Job: Job matching the given `id` Raises: ValueError: No resource matches given `id` or multiple resources matching given `id`
juraj-google-style
def update_compounds(self, variants): LOG.debug('Updating compound objects') for var_id in variants: variant_obj = variants[var_id] if (not variant_obj.get('compounds')): continue updated_compounds = self.update_variant_compounds(variant_obj, variants) variant_obj['compounds'] = updated_compounds LOG.debug('Compounds updated') return variants
Update the compounds for a set of variants. Args: variants(dict): A dictionary with _ids as keys and variant objs as values
codesearchnet
def get_metadata(changeset): url = 'https: return ET.fromstring(requests.get(url).content).getchildren()[0]
Get the metadata of a changeset using the OSM API and return it as a XML ElementTree. Args: changeset: the id of the changeset.
juraj-google-style
def call(self, input_ids: Optional[tf.Tensor]=None, bbox: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor: assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) if bbox is None: bbox = bbox = tf.fill(input_shape + [4], value=0) try: left_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 0]) upper_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 1]) right_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 2]) lower_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 3]) except IndexError as e: raise IndexError('The `bbox`coordinate values should be within 0-1000 range.') from e h_position_embeddings = tf.gather(self.h_position_embeddings, bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = tf.gather(self.w_position_embeddings, bbox[:, :, 2] - bbox[:, :, 0]) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds + left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings
Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor.
github-repos
def _SetSELinuxContext(path): restorecon = '/sbin/restorecon' if (os.path.isfile(restorecon) and os.access(restorecon, os.X_OK)): subprocess.call([restorecon, path])
Set the appropriate SELinux context, if SELinux tools are installed. Calls /sbin/restorecon on the provided path to set the SELinux context as specified by policy. This call does not operate recursively. Only some OS configurations use SELinux. It is therefore acceptable for restorecon to be missing, in which case we do nothing. Args: path: string, the path on which to fix the SELinux context.
codesearchnet
def GetWarnings(self): if self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_ERROR): return self._GetExtractionErrorsAsWarnings() return self._GetAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING)
Retrieves the warnings. Returns: generator(ExtractionWarning): warning generator.
codesearchnet
def _compute_offsets(self, token_ids, time_precision=0.02, segment_size=1500): offsets = [] if 'torch' in str(type(token_ids)) and (hasattr(token_ids, 'cpu') and callable(token_ids.cpu)): token_ids = token_ids.cpu() token_ids = np.array(token_ids) if token_ids.shape[0] > 1 and len(token_ids.shape) > 1: raise ValueError('Can only process a single input at a time') timestamp_begin = self.all_special_ids[-1] + 1 timestamp_tokens = token_ids >= timestamp_begin consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 if consecutive.shape[0] == 0 and timestamp_tokens.sum() <= 1: return [] elif np.where(timestamp_tokens)[0][-1] + 1 not in consecutive: consecutive = np.append(consecutive, np.where(timestamp_tokens)[0][-1] + 1) last_slice = np.where(timestamp_tokens)[0][0] cur_max_timestamp = 0 prev_segments_len = 0 for current_slice in consecutive: sliced_tokens = token_ids[last_slice:current_slice] if len(sliced_tokens) > 1: start_timestamp_position = sliced_tokens[0].item() - timestamp_begin end_timestamp_position = sliced_tokens[-1].item() - timestamp_begin if start_timestamp_position < cur_max_timestamp: is_single_ending = last_slice >= 2 and (not (token_ids[last_slice - 2] >= timestamp_begin and token_ids[last_slice - 1] >= timestamp_begin)) if is_single_ending: prev_segments_len += segment_size else: prev_segments_len += cur_max_timestamp cur_max_timestamp = end_timestamp_position sliced_tokens = self._preprocess_token_ids(sliced_tokens) text = self._decode(sliced_tokens) text = self._filter_timestamp_ids(text) offsets.append({'text': text, 'timestamp': (start_timestamp_position * time_precision + prev_segments_len * time_precision, end_timestamp_position * time_precision + prev_segments_len * time_precision)}) last_slice = current_slice return offsets
Compute offsets for a given tokenized input Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. time_precision (`float`, *optional*, defaults to 0.02): The time ratio to convert from token to time. segment_size (`int`, *optional*, defaults to 1500): The number of features in the input mel spectrogram.
github-repos
def validate_checksum(filename, md5sum): filename = match_filename(filename) md5_hash = file_md5(filename=filename) if (md5_hash != md5sum): raise ValueError('md5 checksums are inconsistent: {}'.format(filename))
Compares the md5 checksum of a file with an expected value. If the calculated and expected checksum values are not equal, ValueError is raised. If the filename `foo` is not found, will try to read a gzipped file named `foo.gz`. In this case, the checksum is calculated for the unzipped file. Args: filename (str): Path for the file to be checksummed. md5sum (str): The expected hex checksum. Returns: None
codesearchnet
def insert(self, parts, leaf_value, update=False): tree = self if (not parts): return tree cur = tree last = (len(parts) - 1) for (i, part) in enumerate(parts): if (part not in cur): cur[part] = (TreeMap() if (i != last) else leaf_value) elif (i == last): if update: cur[part].update(leaf_value) else: cur[part] = leaf_value cur = cur[part] return self
Add a list of nodes into the tree. The list will be converted into a TreeMap (chain) and then merged with the current TreeMap. For example, this method would insert `['a','b','c']` as `{'a':{'b':{'c':{}}}}`. Arguments: parts: List of nodes representing a chain. leaf_value: Value to insert into the leaf of the chain. update: Whether or not to update the leaf with the given value or to replace the value. Returns: self
codesearchnet
def plots_html_page(query_module): template = jenv.get_template("analysis.html") context = dict(extended=config.EXTENDED) cl = client.get_client() session = cl.create_session() seaborn.set_style('whitegrid') decade_df = query_module.decade_query() pix_size = pixels_to_inches((600, 400)) ax = seaborn.lmplot(x='decade', y='area', data=decade_df, size=pix_size[1], aspect=pix_size[0] / pix_size[1], scatter_kws={"s": 30, "alpha": 0.3}) ax.set(xlabel='Decade', ylabel='Area, m^2') context['area_by_decade_svg'] = fig_to_svg(plt.gcf()) plt.close('all') if config.EXTENDED: gender_df = query_module.gender_query() pix_size = pixels_to_inches((600, 400)) g = seaborn.FacetGrid(gender_df, hue="gender", margin_titles=True, size=pix_size[1], aspect=pix_size[0] / pix_size[1]) bins = np.linspace(0, 5, 30) g.map(plt.hist, "area", bins=bins, lw=0, alpha=0.5, normed=True) g.axes[0, 0].set_xlabel('Area, m^2') g.axes[0, 0].set_ylabel('Percentage of paintings') context['area_by_gender_svg'] = fig_to_svg(plt.gcf()) plt.close('all') out_file = path.join(out_dir, "analysis.html") html_content = template.render(**context) with open(out_file, 'w') as f: f.write(html_content) plt.close('all') session.close()
Generate analysis output as html page Args: query_module (module): module to use for querying data for the desired model/pipeline variant, e.g. leonardo.standard.queries
juraj-google-style
def is_treshold_reached(self, scraped_request): for route in self.__routing_options.routes: if re.compile(route).match(scraped_request.url): count_key = str(route) + scraped_request.method if count_key in self.__routing_count.keys(): return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold return False
Check if similar requests to the given requests have already been crawled X times. Where X is the minimum treshold amount from the options. Args: scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold. Returns: bool: True if treshold reached, false otherwise.
juraj-google-style
def __floordiv__(self, other: Self | Processor) -> PartProcessor | Processor: if isinstance(other, _ParallelPartProcessor): return _ParallelPartProcessor([self] + other._processor_list) elif isinstance(other, PartProcessor): return _ParallelPartProcessor([self, other]) else: raise ValueError(f'Parallel operator not valid between a PartProcessor and {type(other)}.')
Make `other` be computed in parallel to this processor. Args: other: a processor to compute in parallel to `self`. Returns: The parallel computation of this process with `other`.
github-repos
def __init__(self, underlying_result, pipeline_instrument): super().__init__(underlying_result.state) self._underlying_result = underlying_result self._pipeline_instrument = pipeline_instrument
Constructor of PipelineResult. Args: underlying_result: (PipelineResult) the result returned by the underlying runner running the pipeline. pipeline_instrument: (PipelineInstrument) pipeline instrument describing the pipeline being executed with interactivity applied and related metadata including where the interactivity-backing cache lies.
github-repos
def _parse_lambda(lam): mod = inspect.getmodule(lam) f = inspect.getsourcefile(lam) def_line = lam.__code__.co_firstlineno lines = linecache.getlines(f, mod.__dict__) source = ''.join(lines) all_nodes = parse(source, preamble_len=0, single_node=False) search_nodes = [] for node in all_nodes: if getattr(node, 'lineno', def_line) <= def_line: search_nodes.append(node) else: break lambda_nodes = [] for node in search_nodes: lambda_nodes.extend((n for n in gast.walk(node) if isinstance(n, gast.Lambda))) candidates = [] for ln in lambda_nodes: minl, maxl = (MAX_SIZE, 0) for n in gast.walk(ln): minl = min(minl, getattr(n, 'lineno', minl)) lineno = getattr(n, 'lineno', maxl) end_lineno = getattr(n, 'end_lineno', None) if end_lineno is not None: lineno = end_lineno maxl = max(maxl, lineno) if minl <= def_line <= maxl: candidates.append((ln, minl, maxl)) if len(candidates) == 1: (node, minl, maxl), = candidates return _without_context(node, lines, minl, maxl) elif not candidates: lambda_codes = '\n'.join([unparse(l) for l in lambda_nodes]) raise errors.UnsupportedLanguageElementError(f'could not parse the source code of {lam}: no matching AST found among candidates:\n{lambda_codes}') matches = [v for v in candidates if _node_matches_argspec(v[0], lam)] if len(matches) == 1: (node, minl, maxl), = matches return _without_context(node, lines, minl, maxl) matches = '\n'.join(('Match {}:\n{}\n'.format(i, unparse(node, include_encoding_marker=False)) for i, (node, _, _) in enumerate(matches))) raise errors.UnsupportedLanguageElementError(f'could not parse the source code of {lam}: found multiple definitions with identical signatures at the location. This error may be avoided by defining each lambda on a single line and with unique argument names. The matching definitions were:\n{matches}')
Returns the AST and source code of given lambda function. Args: lam: types.LambdaType, Python function/method/class Returns: gast.AST, Text: the parsed AST node; the source code that was parsed to generate the AST (including any prefixes that this function may have added).
github-repos
def isdisjoint(self, other): other = self._cast_to_frameset(other) if other is NotImplemented: return NotImplemented return self.items.isdisjoint(other.items)
Check if the contents of :class:self has no common intersection with the contents of :class:other. Args: other (:class:`FrameSet`): Returns: bool: :class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
juraj-google-style
def load(self, profile_args): for (key, value) in profile_args.items(): self.add(key, value)
Load provided CLI Args. Args: args (dict): Dictionary of args in key/value format.
codesearchnet
def res_name(self, ns, types_ns, name): raise NotImplementedError('subclasses must implement')
Resolves the type/value an external (e.g. closure, global) variable. Args: ns: namespace types_ns: types namespace name: symbol name Returns: Tuple (type, static_value). The first element is the type to use for inference. The second is the static value to use. Return None to treat it as unknown.
github-repos
def reminders_complete(self, *, reminder: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({'reminder': reminder}) return self.api_call('reminders.complete', json=kwargs)
Marks a reminder as complete. Args: reminder (str): The ID of the reminder to be marked as complete. e.g. 'Rm12345678'
codesearchnet
def calculate_elem_per_kb(max_chunk_kb, matrix_dtype): if (matrix_dtype == numpy.float32): return ((max_chunk_kb * 8) / 32) elif (matrix_dtype == numpy.float64): return ((max_chunk_kb * 8) / 64) else: msg = 'Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported'.format(matrix_dtype) logger.error(msg) raise Exception(('write_gctx.calculate_elem_per_kb ' + msg))
Calculates the number of elem per kb depending on the max chunk size set. Input: - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. Currently needs to be np.float32 or np.float64 (TODO: figure out a better way to get bits from a numpy dtype). Returns: elem_per_kb (int), the number of elements per kb for matrix dtype specified.
codesearchnet
def is_transcript_available(video_id, language_code=None): filter_attrs = {'video__edx_video_id': video_id} if language_code: filter_attrs['language_code'] = language_code transcript_set = VideoTranscript.objects.filter(**filter_attrs) return transcript_set.exists()
Returns whether the transcripts are available for a video. Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: it will the language code of the requested transcript.
juraj-google-style
def _get_feed(self): if self.feed_name in self._feed_name_tab_map: for tab_name in self._feed_name_tab_map[self.feed_name]: for sheet in self.spreadsheet['sheets']: if sheet['properties']['title'] == tab_name: self.tab_name = tab_name return sheets_read(self.config, self.auth, self.trix_id, tab_name, self.trix_range) return [[]]
Fetches the feed based on initialization parameters. Returns: List of lists that represents the rows and columns of the feed. If the feed isn't found returns a list with an empty list.
github-repos
def make_target(url, extra_opts=None): parts = compat.urlparse(url, allow_fragments=False) scheme = parts.scheme.lower() if scheme in ["ftp", "ftps"]: creds = parts.username, parts.password tls = scheme == "ftps" from ftpsync import ftp_target target = ftp_target.FtpTarget( parts.path, parts.hostname, parts.port, username=creds[0], password=creds[1], tls=tls, timeout=None, extra_opts=extra_opts, ) else: target = FsTarget(url, extra_opts) return target
Factory that creates `_Target` objects from URLs. FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS. Note: TLS is only supported on Python 2.7/3.2+. Args: url (str): extra_opts (dict, optional): Passed to Target constructor. Default: None. Returns: :class:`_Target`
juraj-google-style
def default(fields=None, count=5): projection = Sampling._create_projection(fields) return lambda sql: 'SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count)
Provides a simple default sampling strategy which limits the result set by a count. Args: fields: an optional list of field names to retrieve. count: optional number of rows to limit the sampled results to. Returns: A sampling function that can be applied to get a random sampling.
juraj-google-style
def resolve(self, pid, vendorSpecific=None): response = self.resolveResponse(pid, vendorSpecific) return self._read_dataone_type_response( response, 'ObjectLocationList', response_is_303_redirect=True )
See Also: resolveResponse() Args: pid: vendorSpecific: Returns:
juraj-google-style
def __init__(self, context=None): self._context = context or google.datalab.Context.default() self._client = _utils.make_client(self._context) self._group_dict = None
Initializes the Groups for a Stackdriver project. Args: context: An optional Context object to use instead of the global default.
juraj-google-style
def map_indices_in_shard(num_sparse_cores: int, offset_in_shard: int, shard_rotation: int, row_indices: tensor.Tensor) -> tuple[tensor.Tensor, tensor.Tensor]: shard_index = (row_indices % num_sparse_cores + shard_rotation) % num_sparse_cores position_in_shard = offset_in_shard + row_indices return (shard_index, position_in_shard)
Maps a row of a given table to its sparse core shard and position. Maps a given a row index of a logical table and its layout in sparse core, returns the index of the shard where the row is placed and its relative position within that sparse core shard. Args: num_sparse_cores: The number of sparsecores, this determines the number of shards present. offset_in_shard: Offset within a shard where the queried table starts. shard_rotation: The rotation of this table's shards. row_indices: row indices of the embedding table being looked up. Returns: A Tuple representing shard_index and position of the row in that shard.
github-repos
def enter_diff_mode(self, context_model=None): assert not self.diff_mode self.diff_mode = True if context_model is None: self.diff_from_source = True self.diff_context_model = self.context_model.copy() else: self.diff_from_source = False self.diff_context_model = context_model self.clear() self.setColumnCount(5) self.refresh()
Enter diff mode. Args: context_model (`ContextModel`): Context to diff against. If None, a copy of the current context is used.
juraj-google-style
def convert_to_tensor_or_composite(value, dtype=None, name=None) -> Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor]: return internal_convert_to_tensor_or_composite(value=value, dtype=dtype, name=name, as_ref=False)
Converts the given object to a `Tensor` or `CompositeTensor`. If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: A `CompositeTensor` or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `CompositeTensor`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: A `Tensor` or `CompositeTensor`, based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`.
github-repos
def Close(self): if (self.locked and (self.CheckLease() == 0)): raise LockError('Can not update lease that has already expired.') self._WriteAttributes() if self.locked: self.transaction.Release() if self.parent: self.parent.Close() self.mode = ''
Close and destroy the object. This is similar to Flush, but does not maintain object validity. Hence the object should not be interacted with after Close(). Raises: LockError: The lease for this object has expired.
codesearchnet
def deserialize_feature_columns(configs, custom_objects=None): columns_by_name = {} return [deserialize_feature_column(c, custom_objects, columns_by_name) for c in configs]
Deserializes a list of FeatureColumns configs. Returns a list of FeatureColumns given a list of config dicts acquired by `serialize_feature_columns`. Args: configs: A list of Dicts with the serialization of feature columns acquired by `serialize_feature_columns`. custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). Returns: FeatureColumn objects corresponding to the input configs. Raises: ValueError if called with input that is not a list of FeatureColumns.
github-repos
def get_params(self, deep=True): params = {'weights':self.coef_, 'bias':self.intercept_} if deep: for key, value in self.B.items(): params['b_'+str(key)] = value return params
Get parameters for the estimator. Args: deep (boolean, optional) : If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns: params : mapping of string to any contained subobjects that are estimators.
juraj-google-style
def TransferFrom(self, wallet, from_addr, to_addr, amount): invoke_args = [self.ScriptHash.ToString(), 'transferFrom', [PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)]] (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True) return (tx, fee, results)
Transfer a specified amount of a token from the wallet specified in the `from_addr` to the `to_addr` if the originator `wallet` has been approved to do so. Args: wallet (neo.Wallets.Wallet): a wallet instance. from_addr (str): public address of the account to transfer the given amount from. to_addr (str): public address of the account to transfer the given amount to. amount (int): quantity to send. Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluation stack results.
codesearchnet
def run(self, circuit): name = circuit.name dag = circuit_to_dag(circuit) del circuit for passset in self.working_list: for pass_ in passset: dag = self._do_pass(pass_, dag, passset.options) circuit = dag_to_circuit(dag) circuit.name = name return circuit
Run all the passes on a QuantumCircuit Args: circuit (QuantumCircuit): circuit to transform via all the registered passes Returns: QuantumCircuit: Transformed circuit.
juraj-google-style
def should_invoke_op_callbacks(): ctx = context.context() return ctx.op_callbacks and (not ctx.invoking_op_callbacks)
Determine if op callbacks are present and should be invoked. Returns: A thread-local result (boolean) indicating whether any op callback(s) exist and should be invoked.
github-repos
def _output_types(self) -> list[int]: num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op) output_types = [int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i))) for i in range(num_outputs)] return output_types
List this operation's output types. Returns: List of the types of the Tensors computed by this operation. Each element in the list is an integer whose value is one of the TF_DataType enums defined in pywrap_tf_session.h The length of this list indicates the number of output endpoints of the operation.
github-repos
def set_cache_policy(self, func): if func is None: func = self.default_cache_policy elif isinstance(func, bool): func = lambda unused_key, flag=func: flag self._cache_policy = func
Set the context cache policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should be cached. May be None.
juraj-google-style
def Encode(self): assert self.value_dict_or_array is not None logging.log(1, 'Encoding ' + self.name) resolved = MessageValue._ResolveVars(self.value_dict_or_array) logging.debug('Resolved: ' + str(resolved)) return self.msg.encoding.SerializeToString(resolved, self.msg)
Encode this message instance into actual data stream. The supported encoding methods are: json, protobuf, and user-defined encodings. Returns: A string encoded.
github-repos
def get_organisations(self, **query_params): organisations = self.get_organisations_json(self.base_uri, query_params=query_params) organisations_list = [] for organisation_json in organisations: organisations_list.append(self.create_organisation(organisation_json)) return organisations_list
Get all organisations this member is attached to. Return a list of Organisation objects. Returns: list(Organisation): Return all organisations this member is attached to
codesearchnet
def observe(self, terminal, reward, index=0): self.current_terminal = terminal self.current_reward = reward if self.batched_observe: self.observe_terminal[index].append(self.current_terminal) self.observe_reward[index].append(self.current_reward) if self.current_terminal or len(self.observe_terminal[index]) >= self.batching_capacity: self.episode = self.model.observe( terminal=self.observe_terminal[index], reward=self.observe_reward[index], index=index ) self.observe_terminal[index] = list() self.observe_reward[index] = list() else: self.episode = self.model.observe( terminal=self.current_terminal, reward=self.current_reward )
Observe experience from the environment to learn from. Optionally pre-processes rewards Child classes should call super to get the processed reward EX: terminal, reward = super()... Args: terminal (bool): boolean indicating if the episode terminated after the observation. reward (float): scalar reward that resulted from executing the action.
juraj-google-style
def parts(path): _path = normpath(path) components = _path.strip("/") _parts = ["/" if _path.startswith("/") else "./"] if components: _parts += components.split("/") return _parts
Split a path in to its component parts. Arguments: path (str): Path to split in to parts. Returns: list: List of components Example: >>> parts('/foo/bar/baz') ['/', 'foo', 'bar', 'baz']
juraj-google-style
def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None): with warnings.catch_warnings(): warnings.simplefilter('ignore') if (cloud is None): return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint) if (not isinstance(cloud, dict)): cloud = {} return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud)
Preprocess data. Produce output that can be used by training efficiently. Args: train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet. If eval_dataset is None, the pipeline will randomly split train_dataset into train/eval set with 7:3 ratio. output_dir: The output directory to use. Preprocessing will create a sub directory under it for each run, and also update "latest" file which points to the latest preprocessed directory. Users are responsible for cleanup. Can be local or GCS path. eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet. If specified, it will be used for evaluation during training, and train_dataset will be completely used for training. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but not None, it will run in cloud. Otherwise, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait.
codesearchnet
def draw(self): for age, level in enumerate(self.tree.get_branches()): if age in self.ages: thickness = self._get_thickness(age) color = self._get_color(age) for branch in level: self._draw_branch(branch, color, thickness, age)
Draws the tree. Args: ages (array): Contains the ages you want to draw.
juraj-google-style
def binfiles_set(self, isnap): possible_files = set(self.filename(fstem, isnap, force_legacy=True) for fstem in phyvars.FIELD_FILES) return possible_files & self.files
Set of existing binary files at a given snap. Args: isnap (int): snapshot index. Returns: set of pathlib.Path: the set of output files available for this snapshot number.
juraj-google-style
def register_token( self, registry_address_hex: typing.AddressHex, token_address_hex: typing.AddressHex, retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT, ) -> TokenNetwork: registry_address = decode_hex(registry_address_hex) token_address = decode_hex(token_address_hex) registry = self._raiden.chain.token_network_registry(registry_address) contracts_version = self._raiden.contract_manager.contracts_version if contracts_version == DEVELOPMENT_CONTRACT_VERSION: token_network_address = registry.add_token_with_limits( token_address=token_address, channel_participant_deposit_limit=UINT256_MAX, token_network_deposit_limit=UINT256_MAX, ) else: token_network_address = registry.add_token_without_limits( token_address=token_address, ) waiting.wait_for_payment_network( self._raiden, registry.address, token_address, retry_timeout, ) return self._raiden.chain.token_network(token_network_address)
Register a token with the raiden token manager. Args: registry_address: registry address token_address_hex (string): a hex encoded token address. Returns: The token network proxy.
juraj-google-style
def find_site_python(module_name, paths=None): from rez.packages_ import iter_packages import subprocess import ast import os py_cmd = 'import {x}; print {x}.__path__'.format(x=module_name) p = popen(['python', '-c', py_cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() if p.returncode: raise InvalidPackageError(("Failed to find installed python module '%s':\n%s" % (module_name, err))) module_paths = ast.literal_eval(out.strip()) def issubdir(path, parent_path): return path.startswith((parent_path + os.sep)) for package in iter_packages('python', paths=paths): if (not hasattr(package, '_site_paths')): continue contained = True for module_path in module_paths: if (not any((issubdir(module_path, x) for x in package._site_paths))): contained = False if contained: return package raise InvalidPackageError(("Failed to find python installation containing the module '%s'. Has python been installed as a rez package?" % module_name))
Find the rez native python package that contains the given module. This function is used by python 'native' rez installers to find the native rez python package that represents the python installation that this module is installed into. Note: This function is dependent on the behavior found in the python '_native' package found in the 'rez-recipes' repository. Specifically, it expects to find a python package with a '_site_paths' list attribute listing the site directories associated with the python installation. Args: module_name (str): Target python module. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package`: Native python package containing the named module.
codesearchnet
def lines_from_string(string, as_interned=False): if as_interned: return [sys.intern(line) for line in string.splitlines()] return string.splitlines()
Create a list of file lines from a given string. Args: string (str): File string as_interned (bool): List of "interned" strings (default False) Returns: strings (list): File line list
codesearchnet
def create_temp(node, namer): if isinstance(node, gast.Name): name = node.id elif isinstance(node, (gast.Attribute, gast.Subscript)): name = node.value.id else: raise TypeError temp_node = gast.Name(id=namer.temp(name), annotation=None, ctx=None) anno.setanno(temp_node, 'temp_var', node) return temp_node
Create a temporary variable. Args: node: Create a temporary variable to store this variable in. namer: A naming object that guarantees the names are unique. Returns: node: See `create_grad`. Returns a temporary variable, which is always a simple variable annotated with `temp_var`.
codesearchnet
def _controller_name(self, objtype): if objtype.endswith('y'): return objtype[:-1] + 'ies' if objtype[-1] in 'sx' or objtype[-2:] in ['sh', 'ch']: return objtype + 'es' if objtype.endswith('an'): return objtype[:-2] + 'en' return objtype + 's'
Determines the controller name for the object's type Args: objtype (str): The object type Returns: A string with the controller name
juraj-google-style
def pprint_cell(self, row, col): ndims = self.ndims if (col >= self.cols): raise Exception((('Maximum column index is %d' % self.cols) - 1)) elif (row >= self.rows): raise Exception((('Maximum row index is %d' % self.rows) - 1)) elif (row == 0): if (col >= ndims): if self.vdims: return self.vdims[(col - ndims)].pprint_label else: return '' return self.kdims[col].pprint_label else: dim = self.get_dimension(col) return dim.pprint_value(self.iloc[((row - 1), col)])
Formatted contents of table cell. Args: row (int): Integer index of table row col (int): Integer index of table column Returns: Formatted table cell contents
codesearchnet
def generate_nb_state_data(means, weights, R): cells = weights.shape[1] x_true = np.dot(means, weights) R_ = np.tile(R, (cells, 1)).T P_true = (x_true / (R_ + x_true)) sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true) return sample.astype(float)
Generates data according to the Negative Binomial Convex Mixture Model. Args: means (array): Cell types- genes x clusters weights (array): Cell cluster assignments- clusters x cells R (array): dispersion parameter - 1 x genes Returns: data matrix - genes x cells
codesearchnet
def analyze(self) -> Sequence[_HasReturnT]:
Calls every signature of this function with appropriate fake arguments. Returns: A sequence of objects with information about the result of calling the function with each of its signatures, with get_return_value() methods that retrieve the return values.
github-repos
def validate_and_copy_one_submission(self, submission_path): if os.path.exists(self.download_dir): shutil.rmtree(self.download_dir) os.makedirs(self.download_dir) if os.path.exists(self.validate_dir): shutil.rmtree(self.validate_dir) os.makedirs(self.validate_dir) logging.info('\n' + (' + ' local_path = self.copy_submission_locally(submission_path) metadata = self.base_validator.validate_submission(local_path) if not metadata: logging.error('Submission "%s" is INVALID', submission_path) self.stats.add_failure() return submission_type = metadata['type'] container_name = metadata['container_gpu'] logging.info('Submission "%s" is VALID', submission_path) self.list_of_containers.add(container_name) self.stats.add_success(submission_type) if self.do_copy: submission_id = '{0:04}'.format(self.cur_submission_idx) self.cur_submission_idx += 1 self.copy_submission_to_destination(submission_path, TYPE_TO_DIR[submission_type], submission_id) self.id_to_path_mapping[submission_id] = submission_path
Validates one submission and copies it to target directory. Args: submission_path: path in Google Cloud Storage of the submission file
juraj-google-style
def get_user(self, username): response = self._get((self.rest_url + '/user'), params={'username': username, 'expand': 'attributes'}) if (not response.ok): return None return response.json()
Retrieve information about a user Returns: dict: User information None: If no user or failure occurred
codesearchnet
def create_route53_zone(client, zone_name): if (not zone_name.endswith('.')): zone_name += '.' zone_id = get_or_create_hosted_zone(client, zone_name) old_soa = get_soa_record(client, zone_id, zone_name) if (old_soa.text.min_ttl == '300'): return zone_id new_soa = copy.deepcopy(old_soa) logger.debug('Updating negative caching value on zone %s to 300.', zone_name) new_soa.text.min_ttl = '300' client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch={'Comment': 'Update SOA min_ttl to 300.', 'Changes': [{'Action': 'UPSERT', 'ResourceRecordSet': {'Name': zone_name, 'Type': 'SOA', 'TTL': old_soa.ttl, 'ResourceRecords': [{'Value': str(new_soa.text)}]}}]}) return zone_id
Creates the given zone_name if it doesn't already exists. Also sets the SOA negative caching TTL to something short (300 seconds). Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_name (string): The name of the DNS hosted zone to create. Returns: string: The zone id returned from AWS for the existing, or newly created zone.
codesearchnet
def configuration_check(config): log_level = config.get('daemon', 'loglevel') num_level = getattr(logging, log_level.upper(), None) pidfile = config.get('daemon', 'pidfile') if (not os.path.isdir(os.path.dirname(pidfile))): raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile))) if (not isinstance(num_level, int)): raise ValueError('Invalid log level: {}'.format(log_level)) for _file in ('log_file', 'stderr_file'): if config.has_option('daemon', _file): try: touch(config.get('daemon', _file)) except OSError as exc: raise ValueError(exc) for (option, getter) in DAEMON_OPTIONS_TYPE.items(): try: getattr(config, getter)('daemon', option) except configparser.NoOptionError as error: if (option not in DAEMON_OPTIONAL_OPTIONS): raise ValueError(error) except configparser.Error as error: raise ValueError(error) except ValueError as exc: msg = "invalid data for '{opt}' option in daemon section: {err}".format(opt=option, err=exc) raise ValueError(msg) service_configuration_check(config)
Perform a sanity check on configuration. First it performs a sanity check against settings for daemon and then against settings for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all checks are successfully passed otherwise raises a ValueError exception.
codesearchnet
def _GetSanitizedEventValues(self, event): data_type = getattr(event, 'data_type', 'UNKNOWN') event_formatter = self._output_mediator.GetEventFormatter(event) if not event_formatter: raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) message, _ = self._output_mediator.GetFormattedMessages(event) if message is None: raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) source_short, source = self._output_mediator.GetFormattedSources(event) if source is None or source_short is None: raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) datetime_string = self._FormatDateTime(event) format_variables = self._output_mediator.GetFormatStringAttributeNames( event) if format_variables is None: raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) extra_attributes = [] for attribute_name, attribute_value in sorted(event.GetAttributes()): if (attribute_name in definitions.RESERVED_VARIABLE_NAMES or attribute_name in format_variables): continue extra_attributes.append( '{0:s}: {1!s} '.format(attribute_name, attribute_value)) extra_attributes = ' '.join(extra_attributes) inode = event.inode if inode is None and hasattr(event, 'pathspec'): inode = getattr(event.pathspec, 'inode', '-') if inode is None: inode = '-' tags = None if getattr(event, 'tag', None): tags = getattr(event.tag, 'tags', None) taglist = '' if isinstance(tags, (list, tuple)): taglist = ','.join(tags) offset = event.offset if offset is None: offset = 0 row = { 'timezone': '{0!s}'.format(self._output_mediator.timezone), 'MACB': self._output_mediator.GetMACBRepresentation(event), 'source': source_short, 'sourcetype': source, 'type': event.timestamp_desc or '-', 'user': getattr(event, 'username', '-'), 'host': getattr(event, 'hostname', '-'), 'description': message, 'filename': getattr(event, 'filename', '-'), 'inode': inode, 'notes': getattr(event, 'notes', '-'), 'format': getattr(event, 'parser', '-'), 'extra': extra_attributes, 'datetime': datetime_string, 'reportnotes': '', 'inreport': '', 'tag': taglist, 'offset': offset, 'vss_store_number': self._GetVSSNumber(event), 'URL': getattr(event, 'url', '-'), 'record_number': getattr(event, 'record_number', 0), 'event_identifier': getattr(event, 'event_identifier', '-'), 'event_type': getattr(event, 'event_type', '-'), 'source_name': getattr(event, 'source_name', '-'), 'user_sid': getattr(event, 'user_sid', '-'), 'computer_name': getattr(event, 'computer_name', '-'), 'evidence': self._evidence} return row
Sanitizes the event for use in 4n6time. Args: event (EventObject): event. Returns: dict[str, object]: dictionary containing the sanitized event values. Raises: NoFormatterFound: If no event formatter can be found to match the data type in the event object.
juraj-google-style
def _load_config_include(self, include_directory): include_directory = os.path.join(self.app_path, include_directory) if not os.path.isdir(include_directory): msg = 'Provided include directory does not exist ({}).'.format(include_directory) sys.exit(msg) profiles = [] for filename in sorted(os.listdir(include_directory)): if filename.endswith('.json'): self.log.info('Loading config: {}'.format(filename)) print('Include File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, filename)) config_file = os.path.join(include_directory, filename) with open(config_file) as data_file: try: profiles.extend(json.load(data_file)) except ValueError as e: print('Invalid JSON file: {}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e)) sys.exit(1) return profiles
Load included configuration files. Args: include_directory (str): The name of the config include directory. Returns: list: A list of all profiles for the current App.
juraj-google-style
def stop(self, timeout=5): for worker in self._threads: self._queue.put(_SHUTDOWNREQUEST) current = threading.currentThread() if timeout is not None and timeout >= 0: endtime = time.time() + timeout while self._threads: worker = self._threads.pop() if worker is not current and worker.isAlive(): try: if timeout is None or timeout < 0: worker.join() else: remaining_time = endtime - time.time() if remaining_time > 0: worker.join(remaining_time) if worker.isAlive(): c = worker.conn if c and not c.rfile.closed: try: c.socket.shutdown(socket.SHUT_RD) except TypeError: c.socket.shutdown() worker.join() except ( AssertionError, KeyboardInterrupt, ): pass
Terminate all worker threads. Args: timeout (int): time to wait for threads to stop gracefully
juraj-google-style
def withdraw(self, amount, currency, payment_method_id): params = {'amount': amount, 'currency': currency, 'payment_method_id': payment_method_id} return self._send_message('post', '/withdrawals/payment-method', data=json.dumps(params))
Withdraw funds to a payment method. See AuthenticatedClient.get_payment_methods() to receive information regarding payment methods. Args: amount (Decimal): The amount to withdraw. currency (str): Currency type (eg. 'BTC') payment_method_id (str): ID of the payment method. Returns: dict: Withdraw details. Example:: { "id":"593533d2-ff31-46e0-b22e-ca754147a96a", "amount": "10.00", "currency": "USD", "payout_at": "2016-08-20T00:31:09Z" }
codesearchnet
def DeserializeUnsigned(self, reader): self.Version = reader.ReadUInt32() self.PrevHash = reader.ReadUInt256() self.MerkleRoot = reader.ReadUInt256() self.Timestamp = reader.ReadUInt32() self.Index = reader.ReadUInt32() self.ConsensusData = reader.ReadUInt64() self.NextConsensus = reader.ReadUInt160()
Deserialize unsigned data only. Args: reader (neo.IO.BinaryReader):
codesearchnet
def partition_or_replicate_on_host(tensor, dims): if dims is None: return itertools.repeat(tensor) dims = np.array(dims) output = [tensor] shape_list = np.array(tensor.shape.as_list()) quotients, remainders = np.divmod(shape_list, dims) for axis, (quotient, remainder, dim, original_size) in enumerate(zip(quotients, remainders, dims, shape_list)): if dim <= 1: continue if remainder > 0: ceil_ratio = quotient + 1 num_full_slots, left_over = np.divmod(original_size, ceil_ratio) num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over] if len(num_or_size_splits) < dim: num_or_size_splits += [0] * (dim - len(num_or_size_splits)) new_output = [] for x in output: new_output.append(array_ops.split(x, num_or_size_splits=num_or_size_splits, axis=axis)) output = new_output else: output = [array_ops.split(x, int(dim), axis=axis) for x in output] output = nest.flatten(output) return output
Partitions or replicates the input tensor. The ops inside this function are placed on the host side. Args: tensor: The input tensor which will be partitioned or replicated. dims: A list of integer describes how to partition the input tensor. Returns: An iterator of `Tensor`s or a list of partitioned tensors.
github-repos
def to_hour(num) -> str: to_str = str(int(num)) return pd.Timestamp(f'{to_str[:(- 2)]}:{to_str[(- 2):]}').strftime('%H:%M')
Convert YAML input to hours Args: num: number in YMAL file, e.g., 900, 1700, etc. Returns: str Examples: >>> to_hour(900) '09:00' >>> to_hour(1700) '17:00'
codesearchnet
def add(self, text, checked=False, sort=None): if (self.parent is None): raise exception.InvalidException('Item has no parent') node = self.parent.add(text, checked, sort) self.indent(node) return node
Add a new sub item to the list. This item must already be attached to a list. Args: text (str): The text. checked (bool): Whether this item is checked. sort (int): Item id for sorting.
codesearchnet
def heightmap_count_cells(hm: np.ndarray, mi: float, ma: float) -> int: return int(lib.TCOD_heightmap_count_cells(_heightmap_cdata(hm), mi, ma))
Return the number of map cells which value is between ``mi`` and ``ma``. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. mi (float): The lower bound. ma (float): The upper bound. Returns: int: The count of values which fall between ``mi`` and ``ma``. .. deprecated:: 8.1 Can be replaced by an equivalent NumPy function such as: ``numpy.count_nonzero((mi <= hm) & (hm < ma))``
codesearchnet
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''): key_flags = self._GetKeyFlagsForModule(module) if key_flags: self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
Generates a help string for the key flags of a given module. Args: module: A module object or a module name (a string). output_lines: A list of strings. The generated help message lines will be appended to this list. prefix: A string that is prepended to each generated help line.
juraj-google-style
def format_sec_to_dhm(sec): rem_int, s_int = divmod(int(sec), 60) rem_int, m_int, = divmod(rem_int, 60) d_int, h_int, = divmod(rem_int, 24) return '{}d{:02d}h{:02d}m'.format(d_int, h_int, m_int)
Format seconds to days, hours, minutes. Args: sec: float or int Number of seconds in a period of time Returns: Period of time represented as a string on the form ``0d:00h:00m``.
juraj-google-style
def copy(self, src, dst, other_system=None): with _handle_azure_exception(): self.client.copy_file( copy_source=(other_system or self)._format_src_url(src, self), **self.get_client_kwargs(dst))
Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio.storage.azure._AzureBaseSystem subclass): The source storage system.
juraj-google-style
def __matches(s1, s2, ngrams_fn, n=3): (ngrams1, ngrams2) = (set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n))) return ngrams1.intersection(ngrams2)
Returns the n-grams that match between two sequences See also: SequenceMatcher.get_matching_blocks Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set:
codesearchnet
def pandas_dataframe(self, start, stop, ncol, **kwargs): try: int(start) int(stop) except TypeError: print('start and stop must be ints') try: ncol = int(ncol) return pd.read_csv(six.StringIO('\n'.join(self[start:stop])), delim_whitespace=True, names=range(ncol), **kwargs) except TypeError: try: ncol = list(ncol) return pd.read_csv(six.StringIO('\n'.join(self[start:stop])), delim_whitespace=True, names=ncol, **kwargs) except TypeError: print('Cannot pandas_dataframe if ncol is {}, must be int or list'.format(type(ncol)))
Returns the result of tab-separated pandas.read_csv on a subset of the file. Args: start (int): line number where structured data starts stop (int): line number where structured data stops ncol (int or list): the number of columns in the structured data or a list of that length with column names Returns: pd.DataFrame: structured data
codesearchnet
def update_snmp_configuration(self, configuration, timeout=(- 1)): data = configuration.copy() if ('type' not in data): data['type'] = 'snmp-configuration' uri = '{}{}'.format(self.data['uri'], self.SNMP_CONFIGURATION_PATH) return self._helper.update(data, uri=uri, timeout=timeout)
Updates the SNMP configuration of a logical interconnect. Changes to the SNMP configuration are asynchronously applied to all managed interconnects. Args: configuration: snmp configuration. Returns: dict: The Logical Interconnect.
codesearchnet
def GetTopLevel(self, file_object): try: top_level_object = biplist.readPlist(file_object) except (biplist.InvalidPlistException, biplist.NotBinaryPlistException) as exception: raise errors.UnableToParseFile('Unable to parse plist with error: {0!s}'.format(exception)) return top_level_object
Returns the deserialized content of a plist as a dictionary object. Args: file_object (dfvfs.FileIO): a file-like object to parse. Returns: dict[str, object]: contents of the plist. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> List[dict]: listing = [] for line in text.splitlines(False): facts = line.split(';') row = {} filename = None for fact in facts: (name, sep, value) = fact.partition('=') if sep: name = name.strip().lower() value = value.strip().lower() if convert: try: value = convert_machine_list_value(name, value) except ValueError: if strict: raise row[name] = value elif (name[0:1] == ' '): filename = name[1:] else: name = name.strip().lower() row[name] = '' if filename: row['name'] = filename listing.append(row) elif strict: raise ValueError('Missing filename.') return listing
Parse machine listing. Args: text: The listing. convert: Convert sizes and dates. strict: Method of handling errors. ``True`` will raise ``ValueError``. ``False`` will ignore rows with errors. Returns: list: A list of dict of the facts defined in RFC 3659. The key names must be lowercase. The filename uses the key ``name``.
codesearchnet
def ReadLine(self, file_object): line, _, self.lines = self.lines.partition('\n') if not line: self.ReadLines(file_object) line, _, self.lines = self.lines.partition('\n') return line
Reads a line. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the lines buffer.
juraj-google-style
def _init_params(self, amplitude, length_scale, validate_args): dtype = util.maybe_get_common_dtype( [amplitude, length_scale]) if amplitude is not None: amplitude = tf.convert_to_tensor( value=amplitude, name='amplitude', dtype=dtype) self._amplitude = _validate_arg_if_not_none( amplitude, tf.compat.v1.assert_positive, validate_args) if length_scale is not None: length_scale = tf.convert_to_tensor( value=length_scale, name='length_scale', dtype=dtype) self._length_scale = _validate_arg_if_not_none( length_scale, tf.compat.v1.assert_positive, validate_args) return dtype
Shared init logic for `amplitude` and `length_scale` params. Args: amplitude: `Tensor` (or convertible) or `None` to convert, validate. length_scale: `Tensor` (or convertible) or `None` to convert, validate. validate_args: If `True`, parameters are checked for validity despite possibly degrading runtime performance Returns: dtype: The common `DType` of the parameters.
juraj-google-style