code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_parameter(self, name): i = self.get_parameter_names(include_frozen=True).index(name) return self.get_parameter_vector(include_frozen=True)[i]
Get a parameter value by name Args: name: The name of the parameter
juraj-google-style
def from_dict(cls, dict_repr: Dict[Union['DecisionPoint', str], Union[None, 'DNA', float, int, str]], dna_spec: DNASpec, use_ints_as_literals: bool=False) -> 'DNA': def _get_decision(spec: DNASpec): decision = dict_repr.get(spec.id, None) if decision is None: decision = dict_repr.get(spec, None) if decision is None and spec.name: decision = dict_repr.get(spec.name, None) if isinstance(decision, list): dict_repr[spec.name] = decision[1:] decision = decision[0] if decision else None return decision def _choice_index(subchoice, value: Union[int, float, str]) -> int: if isinstance(value, int) and (not use_ints_as_literals): index = value if index < 0 or index >= len(subchoice.candidates): identifier = subchoice.name or subchoice.id raise ValueError(f"Candidate index out of range at choice '{identifier}'. Index={index}, Number of candidates={len(subchoice.candidates)}.") else: index = subchoice.candidate_index(value) return index def _make_dna(spec: DNASpec) -> DNA: if spec.is_space: children = [] for elem in spec.elements: child = _make_dna(elem) if child is not None: children.append(child) return DNA(None, children) elif spec.is_categorical: children = [] for choice_idx in range(spec.num_choices): subchoice = spec.subchoice(choice_idx) value = _get_decision(subchoice) if value is None and subchoice.is_subchoice: parent_decisions = _get_decision(spec) if parent_decisions is not None: assert len(parent_decisions) == spec.num_choices, (parent_decisions, spec) value = parent_decisions[choice_idx] if value is None: identifier = subchoice.name or subchoice.id raise ValueError(f"Value for '{identifier}' is not found in the dictionary {dict_repr!r}.") if isinstance(value, DNA): children.append(value) else: choice_index = _choice_index(subchoice, value) subspace_dna = _make_dna(subchoice.candidates[choice_index]) children.append(DNA(choice_index, [subspace_dna] if subspace_dna else [])) return DNA(None, children) elif spec.is_numerical or spec.is_custom_decision_point: value = _get_decision(spec) if value is None: raise ValueError(f"Value for '{spec.name or spec.id}' is not found in the dictionary {dict_repr!r}.") if isinstance(value, DNA): value = value.value if spec.is_numerical: if value < spec.min_value: raise ValueError(f"The decision for '{spec.name or spec.id}' should be no less than {spec.min_value}. Encountered {value}.") if value > spec.max_value: raise ValueError(f"The decision for '{spec.name or spec.id}' should be no greater than {spec.max_value}. Encountered {value}.") elif not isinstance(value, str): raise ValueError(f"The decision for '{spec.name or spec.id}' should be a string. Encountered {value}.") return DNA(value, None) else: raise NotImplementedError('Should never happen.') dna = _make_dna(dna_spec) return dna.use_spec(dna_spec)
Create a DNA from its dictionary representation. Args: dict_repr: The dictionary representation of the DNA. The keys should be either strings as the decision point ID or DNASpec objects. The values should be either numeric or literal values for the decisions. For inactive decisions, their ID/spec should either be absent from the dictionary, or use None as their values. dna_spec: The DNASpec that applies to the DNA. use_ints_as_literals: If True, when an integer is encountered for a dictinonary value, treat it as the literal value. Otherwise, always treat it as a candidate index. Returns: A DNA object.
github-repos
def py_hash(key, num_buckets): (b, j) = ((- 1), 0) if (num_buckets < 1): raise ValueError('num_buckets must be a positive number') while (j < num_buckets): b = int(j) key = (((key * long(2862933555777941757)) + 1) & 18446744073709551615) j = (float((b + 1)) * (float((1 << 31)) / float(((key >> 33) + 1)))) return int(b)
Generate a number in the range [0, num_buckets). Args: key (int): The key to hash. num_buckets (int): Number of buckets to use. Returns: The bucket number `key` computes to. Raises: ValueError: If `num_buckets` is not a positive number.
codesearchnet
def isdir(self, path, follow_symlinks=True): return self._is_of_type(path, S_IFDIR, follow_symlinks)
Determine if path identifies a directory. Args: path: Path to filesystem object. Returns: `True` if path points to a directory (following symlinks). Raises: TypeError: if path is None.
juraj-google-style
def pack_range(key, packing, grad_vars, rng): to_pack = grad_vars[rng[0]:(rng[1] + 1)] members = [] variables = [] restore_shapes = [] with tf.name_scope('pack'): for (g, v) in to_pack: variables.append(v) restore_shapes.append(g.shape) with tf.device(g.device): members.append(tf.reshape(g, [(- 1)])) packing[key] = GradPackTuple(indices=range(rng[0], (rng[1] + 1)), vars=variables, shapes=restore_shapes) with tf.device(members[0].device): return tf.concat(members, 0)
Form the concatenation of a specified range of gradient tensors. Args: key: Value under which to store meta-data in packing that will be used later to restore the grad_var list structure. packing: Dict holding data describing packed ranges of small tensors. grad_vars: List of (grad, var) pairs for one tower. rng: A pair of integers giving the first, last indices of a consecutive range of tensors to be packed. Returns: A tensor that is the concatenation of all the specified small tensors.
codesearchnet
def Info(component): try: from IPython.core import oinspect try: inspector = oinspect.Inspector(theme_name='neutral') except TypeError: inspector = oinspect.Inspector() info = inspector.info(component) if info['docstring'] == '<no docstring>': info['docstring'] = None except ImportError: info = _InfoBackup(component) try: unused_code, lineindex = inspect.findsource(component) info['line'] = lineindex + 1 except (TypeError, OSError): info['line'] = None if 'docstring' in info: info['docstring_info'] = docstrings.parse(info['docstring']) return info
Returns a dict with information about the given component. The dict will have at least some of the following fields. type_name: The type of `component`. string_form: A string representation of `component`. file: The file in which `component` is defined. line: The line number at which `component` is defined. docstring: The docstring of `component`. init_docstring: The init docstring of `component`. class_docstring: The class docstring of `component`. call_docstring: The call docstring of `component`. length: The length of `component`. Args: component: The component to analyze. Returns: A dict with information about the component.
github-repos
def rename(self, renaming: Dict[(str, str)]) -> 'Substitution': return Substitution(((renaming.get(name, name), value) for (name, value) in self.items()))
Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old variable names to new ones. Returns: A copy of the substitution where variable names have been replaced according to the given renaming dictionary. Names that are not contained in the dictionary are left unchanged.
codesearchnet
def check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame: cols = ['Trading_Day_Start_Time_EOD', 'Trading_Day_End_Time_EOD'] con, _ = create_connection() hours = con.ref(tickers=tickers, flds=cols) cur_dt = pd.Timestamp('today').strftime('%Y-%m-%d ') hours.loc[:, 'local'] = hours.value.astype(str).str[:-3] hours.loc[:, 'exch'] = pd.DatetimeIndex( cur_dt + hours.value.astype(str) ).tz_localize(tz_loc).tz_convert(tz_exch).strftime('%H:%M') hours = pd.concat([ hours.set_index(['ticker', 'field']).exch.unstack().loc[:, cols], hours.set_index(['ticker', 'field']).local.unstack().loc[:, cols], ], axis=1) hours.columns = ['Exch_Start', 'Exch_End', 'Local_Start', 'Local_End'] return hours
Check exchange hours vs local hours Args: tickers: list of tickers tz_exch: exchange timezone tz_loc: local timezone Returns: Local and exchange hours
juraj-google-style
def insert_into_obj(self, data): if not data: data = '' size = len(data) n1 = size%256 n2 = size/256 self.send('^DI'+chr(n1)+chr(n2)+data)
Insert text into selected object. Args: data: The data you want to insert. Returns: None Raises: None
juraj-google-style
def _build_split_filenames(self, split_info_list): filenames = [] for split_info in split_info_list: filenames.extend(naming.filepaths_for_dataset_split( dataset_name=self.name, split=split_info.name, num_shards=split_info.num_shards, data_dir=self._data_dir, filetype_suffix=self._file_format_adapter.filetype_suffix, )) return filenames
Construct the split filenames associated with the split info. The filenames correspond to the pre-processed datasets files present in the root directory of the dataset. Args: split_info_list: (list[SplitInfo]) List of split from which generate the filenames Returns: filenames: (list[str]) The list of filenames path corresponding to the split info object
juraj-google-style
def serve(self, model_dir, environment): logger.info("serving") self.container_root = self._create_tmp_folder() logger.info('creating hosting dir in {}'.format(self.container_root)) volumes = self._prepare_serving_volumes(model_dir) if sagemaker.estimator.DIR_PARAM_NAME.upper() in environment: script_dir = environment[sagemaker.estimator.DIR_PARAM_NAME.upper()] parsed_uri = urlparse(script_dir) if parsed_uri.scheme == 'file': volumes.append(_Volume(parsed_uri.path, '/opt/ml/code')) environment = environment.copy() environment[sagemaker.estimator.DIR_PARAM_NAME.upper()] = '/opt/ml/code' if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image): _pull_image(self.image) self._generate_compose_file('serve', additional_env_vars=environment, additional_volumes=volumes) compose_command = self._compose() self.container = _HostingContainer(compose_command) self.container.start()
Host a local endpoint using docker-compose. Args: primary_container (dict): dictionary containing the container runtime settings for serving. Expected keys: - 'ModelDataUrl' pointing to a file or s3:// location. - 'Environment' a dictionary of environment variables to be passed to the hosting container.
juraj-google-style
def concatenate(cls, list_of_stats): all_stats = np.stack([stats.values for stats in list_of_stats]) all_counts = all_stats[(:, 4)] all_counts_relative = (all_counts / np.sum(all_counts)) min_value = float(np.min(all_stats[(:, 2)])) max_value = float(np.max(all_stats[(:, 3)])) mean_value = float(np.sum((all_counts_relative * all_stats[(:, 0)]))) var_value = float(np.sum((all_counts_relative * (all_stats[(:, 1)] + np.power((all_stats[(:, 0)] - mean_value), 2))))) num_value = int(np.sum(all_counts)) return cls(mean_value, var_value, min_value, max_value, num_value)
Take a list of stats from different sets of data points and merge the stats for getting stats overall data points. Args: list_of_stats (iterable): A list containing stats for different sets of data points. Returns: DataStats: Stats calculated overall sets of data points.
codesearchnet
def get_offset(self, envelope): if isinstance(envelope, collections.Sequence): envelope = Envelope(envelope) if not (self.envelope.contains(envelope) or self.envelope.intersects(envelope)): raise ValueError('Envelope does not intersect with this extent') coords = self.affine.transform((envelope.ul, envelope.lr)) nxy = [(min(dest, size) - origin) or 1 for size, origin, dest in zip(self.size, *coords)] return coords[0] + tuple(nxy)
Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size). Arguments: envelope -- coordinate extent tuple or Envelope
juraj-google-style
def write_version(name=None, path=None): if name in (None, '__main__'): path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "version.json") contents = { 'version': __version__, 'version_string': __version_string__, } with open(path, 'w') as filehandle: filehandle.write(json.dumps(contents, sort_keys=True, indent=4))
Write the version info to ../version.json, for setup.py. Args: name (Optional[str]): this is for the ``write_version(name=__name__)`` below. That's one way to both follow the ``if __name__ == '__main__':`` convention but also allow for full coverage without ignoring parts of the file. path (Optional[str]): the path to write the version json to. Defaults to ../version.json
juraj-google-style
def manual_get_pfam_annotations(seq, outpath, searchtype='phmmer', force_rerun=False): if op.exists(outpath): with open(outpath, 'r') as f: json_results = json.loads(json.load(f)) else: fseq = '>Seq\n' + seq if searchtype == 'phmmer': parameters = {'seqdb': 'pdb', 'seq': fseq} if searchtype == 'hmmscan': parameters = {'hmmdb': 'pfam', 'seq': fseq} enc_params = urllib.urlencode(parameters).encode('utf-8') request = urllib2.Request('http: url = (urllib2.urlopen(request).geturl() + '?output=json') request = str(url) request_read = urlopen(request).read().decode("utf-8") with open(outpath, 'w') as f: json.dump(request_read, f) json_results = json.loads(request_read) return json_results['results']['hits']
Retrieve and download PFAM results from the HMMER search tool. Args: seq: outpath: searchtype: force_rerun: Returns: Todo: * Document and test!
juraj-google-style
def ipv4_lstrip_zeros(address): obj = address.strip().split('.') for (x, y) in enumerate(obj): obj[x] = y.split('/')[0].lstrip('0') if (obj[x] in ['', None]): obj[x] = '0' return '.'.join(obj)
The function to strip leading zeros in each octet of an IPv4 address. Args: address (:obj:`str`): An IPv4 address. Returns: str: The modified IPv4 address.
codesearchnet
def __init__(self, enterprise_configuration): super(DegreedAPIClient, self).__init__(enterprise_configuration) self.global_degreed_config = apps.get_model('degreed', 'DegreedGlobalConfiguration').current() self.session = None self.expires_at = None
Instantiate a new client. Args: enterprise_configuration (DegreedEnterpriseCustomerConfiguration): An enterprise customers's configuration model for connecting with Degreed
juraj-google-style
def step(self, actions): (observations, raw_rewards, dones, infos) = self._step(actions) raw_rewards = raw_rewards.astype(np.float32) processed_rewards = self.process_rewards(raw_rewards) processed_observations = self.process_observations(observations) self.trajectories.step(processed_observations, raw_rewards, processed_rewards, dones, actions) return (processed_observations, processed_rewards, dones, infos)
Takes a step in all environments. Subclasses should override _step to do the actual reset if something other than the default implementation is desired. Args: actions: Batch of actions. Returns: (preprocessed_observations, processed_rewards, dones, infos).
codesearchnet
def generate_visualizations(methods, data, true_labels, base_dir='visualizations', figsize=(18, 10), **scatter_options): plt.figure(figsize=figsize) for method in methods: preproc = method[0] if isinstance(preproc, Preprocess): (preprocessed, ll) = preproc.run(data) output_names = preproc.output_names else: p1 = data output_names = [''] for p in preproc: (p1, ll) = p.run(p1) p1 = p1[0] output_names[0] = (output_names[0] + p.output_names[0]) preprocessed = [p1] for (r, name) in zip(preprocessed, output_names): print(name) if (r.shape[0] == 2): r_dim_red = r elif (sparse.issparse(r) and (r.shape[0] > 100)): name = ('tsvd_' + name) tsvd = TruncatedSVD(50) r_dim_red = tsvd.fit_transform(r.T) try: tsne = TSNE(2) r_dim_red = tsne.fit_transform(r_dim_red).T name = ('tsne_' + name) except: tsvd2 = TruncatedSVD(2) r_dim_red = tsvd2.fit_transform(r_dim_red).T else: name = ('tsne_' + name) tsne = TSNE(2) r_dim_red = tsne.fit_transform(r.T).T if isinstance(method[1], list): for clustering_method in method[1]: try: cluster_labels = clustering_method.run(r) except: print('clustering failed') continue output_path = (base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)) visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options) else: clustering_method = method[1] try: cluster_labels = clustering_method.run(r) except: print('clustering failed') continue output_path = (base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)) visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options) output_path = (base_dir + '/{0}_true_labels.png'.format(name)) visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options)
Generates visualization scatters for all the methods. Args: methods: follows same format as run_experiments. List of tuples. data: genes x cells true_labels: array of integers base_dir: base directory to save all the plots figsize: tuple of ints representing size of figure scatter_options: options for plt.scatter
codesearchnet
def tokenize(self, vector_list): vector_arr = np.array(vector_list) if vector_arr.ndim == 1: key_arr = vector_arr.argmax() else: key_arr = vector_arr.argmax(axis=-1) return self.__token_arr[key_arr]
Tokenize vector. Args: vector_list: The list of vector of one token. Returns: token
juraj-google-style
def _send(self, method, path, data, filename): if (filename is None): return self._send_json(method, path, data) else: return self._send_file(method, path, data, filename)
Send data to a remote server, either with a POST or a PUT request. Args: `method`: The method (POST or PUT) to use. `path`: The path to the resource. `data`: The data to send. `filename`: The filename of the file to send (if any). Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response.
codesearchnet
def _check_load_existing_object(self, object_type, id_field_name, operation='update'): self._check_existing_object(object_type, id_field_name) if not self._load_from_hdx(object_type, self.data[id_field_name]): raise HDXError('No existing %s to %s!' % (object_type, operation))
Check metadata exists and contains HDX object identifier, and if so load HDX object Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier operation (str): Operation to report if error. Defaults to update. Returns: None
juraj-google-style
def toggle_scan(self, enable, filter_duplicates=False): command = struct.pack(">BB", enable, filter_duplicates) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
Enables or disables BLE scanning Args: enable: boolean value to enable (True) or disable (False) scanner filter_duplicates: boolean value to enable/disable filter, that omits duplicated packets
juraj-google-style
def assert_no_new_python_objects(self, threshold=None): self._python_memory_checker.assert_no_new_objects(threshold=threshold)
Raises an exception if there are new Python objects created. It computes the number of new Python objects per type using the first and the last snapshots. Args: threshold: A dictionary of [Type name string], [count] pair. It won't raise an exception if the new Python objects are under this threshold.
github-repos
def OpenSourcePath(self, source_path): source_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_OS, location=source_path) self.AddScanNode(source_path_spec, None)
Opens the source path. Args: source_path (str): source path.
codesearchnet
def peek_with_kwargs(init, args=[]): def peek(store, container, _stack=None): return init(*[store.peek(attr, container, _stack=_stack) for attr in args], **dict([(attr, store.peek(attr, container, _stack=_stack)) for attr in container if (attr not in args)])) return peek
Make datatypes passing keyworded arguments to the constructor. This is a factory function; returns the actual `peek` routine. Arguments: init (callable): type constructor. args (iterable): arguments NOT to be keyworded; order does matter. Returns: callable: deserializer (`peek` routine). All the peeked attributes that are not referenced in `args` are passed to `init` as keyworded arguments.
codesearchnet
def validate_txn_obj(obj_name, obj, key, validation_fun): backend = bigchaindb.config['database']['backend'] if (backend == 'localmongodb'): data = obj.get(key, {}) if isinstance(data, dict): validate_all_keys_in_obj(obj_name, data, validation_fun) elif isinstance(data, list): validate_all_items_in_list(obj_name, data, validation_fun)
Validate value of `key` in `obj` using `validation_fun`. Args: obj_name (str): name for `obj` being validated. obj (dict): dictionary object. key (str): key to be validated in `obj`. validation_fun (function): function used to validate the value of `key`. Returns: None: indicates validation successful Raises: ValidationError: `validation_fun` will raise exception on failure
codesearchnet
def __init__(self, oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC, oxm_field=None, oxm_hasmask=False, oxm_value=None): super().__init__() self.oxm_class = oxm_class self.oxm_field_and_mask = None self.oxm_length = None self.oxm_value = oxm_value self.oxm_field = oxm_field self.oxm_hasmask = oxm_hasmask
Create an OXM TLV struct with the optional parameters below. Args: oxm_class (OxmClass): Match class: member class or reserved class oxm_field (OxmMatchFields, OxmOfbMatchField): Match field within the class oxm_hasmask (bool): Set if OXM include a bitmask in payload oxm_value (bytes): OXM Payload
juraj-google-style
def solution(swarm): best = swarm[0] cmp = comparator(best.best_fitness) for particle in swarm: if cmp(particle.best_fitness, best.best_fitness): best = particle return best
Determines the global best particle in the swarm. Args: swarm: iterable: an iterable that yields all particles in the swarm. Returns: cipy.algorithms.pso.Particle: The best particle in the swarm when comparing the best_fitness values of the particles.
codesearchnet
def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False): if ((get_dirs is None) and (get_files is None)): get_dirs = True get_files = True source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name) dirs = [] for dir_or_file_name in os.listdir(source_dir): path = os.path.join(source_dir, dir_or_file_name) if (hide_ignored and dir_or_file_name.startswith('_')): continue is_dir = os.path.isdir(path) if ((get_dirs and is_dir) or (get_files and (not is_dir))): dirs.append(dir_or_file_name) return dirs
Return list of all dirs and files inside given dir. Also can filter contents to return only dirs or files. Args: - dir_name: Which directory we need to scan (relative) - get_dirs: Return dirs list - get_files: Return files list - hide_ignored: Exclude files and dirs with initial underscore
codesearchnet
def myGrades(year, candidateNumber, badFormat, length): weights1 = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5] weights2 = [1, 1, 1, 1, 1, 1, 0.5, 0.5] if (year == 1): myFinalResult = (sum([(int(badFormat[candidateNumber][(2 * (i + 1))]) * weights1[i]) for i in range((length - 1))]) / 6) elif ((year == 2) or (year == 3)): myFinalResult = (sum([(int(badFormat[candidateNumber][(2 * (i + 1))]) * weights2[i]) for i in range((length - 1))]) / 7) elif (year == 4): myFinalResult = (sum([int(badFormat[candidateNumber][(2 * (i + 1))]) for i in range((length - 1))]) / 8) return myFinalResult
returns final result of candidateNumber in year Arguments: year {int} -- the year candidateNumber is in candidateNumber {str} -- the candidateNumber of candidateNumber badFormat {dict} -- candNumber : [results for candidate] length {int} -- length of each row in badFormat divided by 2 Returns: int -- a weighted average for a specific candidate number and year
codesearchnet
def underlying_variable(t): t = underlying_variable_ref(t) assert (t is not None) if (not hasattr(tf.get_default_graph(), 'var_index')): tf.get_default_graph().var_index = {} var_index = tf.get_default_graph().var_index for v in tf.global_variables()[len(var_index):]: var_index[v.name] = v return var_index[t.name]
Find the underlying tf.Variable object. Args: t: a Tensor Returns: tf.Variable.
codesearchnet
def move_to(self, x=0, y=0): self._driver.move_to(self, x, y)
Deprecated use element.touch('drag', { toX, toY, duration(s) }) instead. Move the mouse by an offset of the specificed element. Support: Android Args: x(float): X offset to move to, relative to the top-left corner of the element. y(float): Y offset to move to, relative to the top-left corner of the element. Returns: WebElement object.
juraj-google-style
def Patch(self, request, global_params=None): config = self.GetMethodConfig('Patch') return self._RunMethod(config, request, global_params=global_params)
Patch specific fields in the specified model. Args: request: (BigqueryModelsPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Model) The response message.
github-repos
def bbox_flip(bbox, d, rows, cols): if (d == 0): bbox = bbox_vflip(bbox, rows, cols) elif (d == 1): bbox = bbox_hflip(bbox, rows, cols) elif (d == (- 1)): bbox = bbox_hflip(bbox, rows, cols) bbox = bbox_vflip(bbox, rows, cols) else: raise ValueError('Invalid d value {}. Valid values are -1, 0 and 1'.format(d)) return bbox
Flip a bounding box either vertically, horizontally or both depending on the value of `d`. Raises: ValueError: if value of `d` is not -1, 0 or 1.
codesearchnet
def AddWarning(self, warning): self._RaiseIfNotWritable() self._storage_file.AddWarning(warning) self.number_of_warnings += 1
Adds an warning. Args: warning (ExtractionWarning): an extraction warning. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
juraj-google-style
def _FindNodeWithStandaloneLineParent(node): if pytree_utils.NodeName(node.parent) in _STANDALONE_LINE_NODES: return node else: return _FindNodeWithStandaloneLineParent(node.parent)
Find a node whose parent is a 'standalone line' node. See the comment above _STANDALONE_LINE_NODES for more details. Arguments: node: node to start from Returns: Suitable node that's either the node itself or one of its ancestors.
github-repos
def _zsh_comp_command(self, zcf, cmd, grouping, add_help=True): if add_help: if grouping: print("+ '(help)'", end=BLK, file=zcf) print("'--help[show help message]'", end=BLK, file=zcf) print("'-h[show help message]'", end=BLK, file=zcf) no_comp = ('store_true', 'store_false') cmd_dict = (self._opt_cmds[cmd] if cmd else self._opt_bare) for (opt, sct) in cmd_dict.items(): meta = self._conf[sct].def_[opt] if (meta.cmd_kwargs.get('action') == 'append'): (grpfmt, optfmt) = ("+ '{}'", "'*{}[{}]{}'") if (meta.comprule is None): meta.comprule = '' else: (grpfmt, optfmt) = ("+ '({})'", "'{}[{}]{}'") if ((meta.cmd_kwargs.get('action') in no_comp) or (meta.cmd_kwargs.get('nargs') == 0)): meta.comprule = None if (meta.comprule is None): compstr = '' elif (meta.comprule == ''): optfmt = optfmt.split('[') optfmt = ((optfmt[0] + '=[') + optfmt[1]) compstr = ': :( )' else: optfmt = optfmt.split('[') optfmt = ((optfmt[0] + '=[') + optfmt[1]) compstr = ': :{}'.format(meta.comprule) if grouping: print(grpfmt.format(opt), end=BLK, file=zcf) for name in _names(self._conf[sct], opt): print(optfmt.format(name, meta.help.replace("'", '\'"\'"\''), compstr), end=BLK, file=zcf)
Write zsh _arguments compdef for a given command. Args: zcf (file): zsh compdef file. cmd (str): command name, set to None or '' for bare command. grouping (bool): group options (zsh>=5.4). add_help (bool): add an help option.
codesearchnet
def __init__(self, *args, **kwargs): self.command = list(args) self.directory = kwargs['directory'] if 'directory' in kwargs else None self.env_vars = kwargs['env_vars'] if 'env_vars' in kwargs else None
Define a subcommand. Args: *args (str): Sequence of program arguments needed to run the command. directory (Optional[str]): Directory the command is run in. env_vars (Optional[dict]): Environment variable to feed to the subcommand.
juraj-google-style
def forward_transfer_pair(payer_transfer: LockedTransferSignedState, available_routes: List['RouteState'], channelidentifiers_to_channels: Dict, pseudo_random_generator: random.Random, block_number: BlockNumber) -> Tuple[(Optional[MediationPairState], List[Event])]: transfer_pair = None mediated_events: List[Event] = list() lock_timeout = BlockTimeout((payer_transfer.lock.expiration - block_number)) payee_channel = next_channel_from_routes(available_routes=available_routes, channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_amount=payer_transfer.lock.amount, lock_timeout=lock_timeout) if payee_channel: assert (payee_channel.settle_timeout >= lock_timeout) assert (payee_channel.token_address == payer_transfer.token) message_identifier = message_identifier_from_prng(pseudo_random_generator) lock = payer_transfer.lock lockedtransfer_event = channel.send_lockedtransfer(channel_state=payee_channel, initiator=payer_transfer.initiator, target=payer_transfer.target, amount=get_lock_amount_after_fees(lock, payee_channel), message_identifier=message_identifier, payment_identifier=payer_transfer.payment_identifier, expiration=lock.expiration, secrethash=lock.secrethash) assert lockedtransfer_event transfer_pair = MediationPairState(payer_transfer, payee_channel.partner_state.address, lockedtransfer_event.transfer) mediated_events = [lockedtransfer_event] return (transfer_pair, mediated_events)
Given a payer transfer tries a new route to proceed with the mediation. Args: payer_transfer: The transfer received from the payer_channel. available_routes: Current available routes that may be used, it's assumed that the routes list is ordered from best to worst. channelidentifiers_to_channels: All the channels available for this transfer. pseudo_random_generator: Number generator to generate a message id. block_number: The current block number.
codesearchnet
def whois_domains(self, domains): api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self._multi_get(api_name, fmt_url_path, domains)
Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result}
juraj-google-style
def _single_quote_handler_factory(on_single_quote, on_other): @coroutine def single_quote_handler(c, ctx, is_field_name=False): assert (c == _SINGLE_QUOTE) (c, self) = (yield) if ((c == _SINGLE_QUOTE) and (not _is_escaped(c))): (yield on_single_quote(c, ctx, is_field_name)) else: ctx.set_unicode(quoted_text=True) (yield on_other(c, ctx, is_field_name)) return single_quote_handler
Generates handlers used for classifying tokens that begin with one or more single quotes. Args: on_single_quote (callable): Called when another single quote is found. Accepts the current character's ordinal, the current context, and True if the token is a field name; returns a Transition. on_other (callable): Called when any character other than a single quote is found. Accepts the current character's ordinal, the current context, and True if the token is a field name; returns a Transition.
codesearchnet
def execute_dynamo_definition(self, definition_path, show_ui=False, shutdown=True, automation=False, path_exec=True): self._add_entry(templates.DYNAMO_COMMAND.format(dynamo_def_path=definition_path, dyn_show_ui=show_ui, dyn_automation=automation, dyn_path_exec=path_exec, dyn_shutdown=shutdown))
Execute a dynamo definition. Args: definition_path (str): full path to dynamo definition file show_ui (bool): show dynamo UI at execution shutdown (bool): shutdown model after execution automation (bool): activate dynamo automation path_exec (bool): activate dynamo path execute Examples: >>> jm = JournalMaker() >>> jm.execute_dynamo_definition( ... definition_path='C:/testdef.dyn', ... show_ui=True, ... shutdown=True ... )
codesearchnet
def __init__(self, cluster_resolver=None): if cluster_resolver is None: cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver() super(ParameterServerStrategyV1, self).__init__(ParameterServerStrategyExtended(self, cluster_resolver=cluster_resolver)) distribute_lib.distribution_strategy_gauge.get_cell('V1').set('ParameterServerStrategy')
Initializes this strategy with an optional `cluster_resolver`. Args: cluster_resolver: Optional `tf.distribute.cluster_resolver.ClusterResolver` object. Defaults to a `tf.distribute.cluster_resolver.TFConfigClusterResolver`.
github-repos
def _create_dom(data): if not isinstance(data, dhtmlparser.HTMLElement): data = dhtmlparser.parseString( utils.handle_encodnig(data) ) dhtmlparser.makeDoubleLinked(data) return data
Creates doublelinked DOM from `data`. Args: data (str/HTMLElement): Either string or HTML element. Returns: obj: HTMLElement containing double linked DOM.
juraj-google-style
def _text_io_wrapper(stream, mode, encoding, errors, newline): if (('t' in mode) and (not hasattr(stream, 'encoding'))): text_stream = TextIOWrapper(stream, encoding=encoding, errors=errors, newline=newline) (yield text_stream) text_stream.flush() else: (yield stream)
Wrap a binary stream to Text stream. Args: stream (file-like object): binary stream. mode (str): Open mode. encoding (str): Stream encoding. errors (str): Decoding error handling. newline (str): Universal newlines
codesearchnet
def forward(self, prompt_masks: torch.FloatTensor, pred_masks: torch.FloatTensor, labels: torch.FloatTensor, bool_masked_pos: torch.BoolTensor): ground_truth = torch.cat((prompt_masks, labels), dim=2) mask = bool_masked_pos[:, :, None].repeat(1, 1, self.patch_size ** 2 * 3) mask = unpatchify(mask, ground_truth.shape[2] loss = F.smooth_l1_loss(pred_masks, ground_truth, reduction='none', beta=self.beta) loss = (loss * mask).sum() / mask.sum() return loss
Computes the L1 loss between the predicted masks and the ground truth masks. Args: prompt_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values from mask prompt. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): Predicted masks. labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Ground truth mask for input images. bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: `torch.FloatTensor`: The mean L1 loss between the predicted masks and the ground truth masks.
github-repos
def set_suite_run_display_name(self, suite_run_display_name): self._suite_run_display_name = suite_run_display_name
Interface for sub-classes to set a customized display name. This name provides run-specific context intended for display. Default to suite class name. Set this in sub-classes to include run-specific context. Args: suite_run_display_name: str, the display name to set.
github-repos
def EnableNetworkInterfaces(self, interfaces): if not interfaces or set(interfaces) == self.interfaces: return self.logger.info('Ethernet interfaces: %s.', interfaces) self.interfaces = set(interfaces) if self.dhcp_command: try: subprocess.check_call([self.dhcp_command]) except subprocess.CalledProcessError: self.logger.warning('Could not enable Ethernet interfaces.') return self.distro_utils.EnableNetworkInterfaces( interfaces, self.logger, dhclient_script=self.dhclient_script)
Enable the list of network interfaces. Args: interfaces: list of string, the output device names to enable.
juraj-google-style
def raster_to_asc(raster_f, asc_f): raster_r = RasterUtilClass.read_raster(raster_f) RasterUtilClass.write_asc_file(asc_f, raster_r.data, raster_r.nCols, raster_r.nRows, raster_r.geotrans, raster_r.noDataValue)
Converting Raster format to ASCII raster. Args: raster_f: raster file. asc_f: output ASCII file.
juraj-google-style
def get_full_filename_by_suffixes(dir_src, suffixes): file_names = FileClass.get_filename_by_suffixes(dir_src, suffixes) if (file_names is None): return None return list((((dir_src + os.sep) + name) for name in file_names))
get full file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes Returns: full file names with the given suffixes as list
codesearchnet
async def confirmbalance(self, *args, **kwargs): if kwargs.get('message'): kwargs = json.loads(kwargs.get('message', '{}')) txid = kwargs.get('txid') coinid = kwargs.get('coinid') buyer_address = kwargs.get('buyer_address') cid = kwargs.get('cid') address = kwargs.get('buyer_address') try: coinid = coinid.replace('TEST', '') except: pass if (not all([coinid, cid, buyer_address, txid])): return {'error': 400, 'reason': 'Confirm balance. Missed required fields'} if (not (coinid in settings.bridges.keys())): return (await self.error_400(('Confirm balance. Invalid coinid: %s' % coinid))) self.account.blockchain.setendpoint(settings.bridges[coinid]) offer = (await self.account.blockchain.getoffer(cid=cid, buyer_address=buyer_address)) amount = int(offer['price']) coinid = 'PUT' history_database = self.client[settings.HISTORY] history_collection = history_database[coinid] history = (await history_collection.find_one({'txid': txid})) try: account = (await self.account.getaccountdata(public_key=history['public_key'])) except: return (await self.error_404('Confirm balance. Not found current deal.')) database = self.client[self.collection] balance_collection = database[coinid] balance = (await balance_collection.find_one({'uid': account['id']})) submitted = (int(balance['amount_frozen']) - int(amount)) if (submitted < 0): return (await self.error_400('Not enough frozen amount.')) decremented = (await balance_collection.find_one_and_update({'uid': account['id']}, {'$set': {'amount_frozen': str(submitted)}})) difference = (int(balance['amount_active']) + int(amount)) updated = (await balance_collection.find_one_and_update({'uid': account['id']}, {'$set': {'amount_active': str(difference)}})) if (not updated): return {'error': 404, 'reason': 'Confirm balance. Not found current transaction id'} (await history_collection.find_one_and_update({'txid': txid}, {'$unset': {'txid': 1}})) if (int(account['level']) == 2): (await self.account.updatelevel(**{'id': account['id'], 'level': 3})) return {i: updated[i] for i in updated if ((i != '_id') and (i != 'txid'))}
Confirm balance after trading Accepts: - message (signed dictionary): - "txid" - str - "coinid" - str - "amount" - int Returns: - "address" - str - "coinid" - str - "amount" - int - "uid" - int - "unconfirmed" - int (0 by default) - "deposit" - int (0 by default) Verified: True
codesearchnet
def splitpath(self, path): path = self.normcase(path) sep = self._path_separator(path) path_components = path.split(sep) if not path_components: return ('', '') starts_with_drive = self._starts_with_drive_letter(path) basename = path_components.pop() colon = self._matching_string(path, ':') if not path_components: if starts_with_drive: components = basename.split(colon) return (components[0] + colon, components[1]) return ('', basename) for component in path_components: if component: while not path_components[-1]: path_components.pop() if starts_with_drive: if not path_components: components = basename.split(colon) return (components[0] + colon, components[1]) if (len(path_components) == 1 and path_components[0].endswith(colon)): return (path_components[0] + sep, basename) return (sep.join(path_components), basename) return (sep, basename)
Mimic os.path.splitpath using the specified path_separator. Mimics os.path.splitpath using the path_separator that was specified for this FakeFilesystem. Args: path: (str) The path to split. Returns: (str) A duple (pathname, basename) for which pathname does not end with a slash, and basename does not contain a slash.
juraj-google-style
def FromJsonString(self, value): if len(value) < 1 or value[-1] != 's': raise ParseError( 'Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if pos == -1: self.seconds = int(value[:-1]) self.nanos = 0 else: self.seconds = int(value[:pos]) if value[0] == '-': self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) else: self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) except ValueError: raise ParseError( 'Couldn\'t parse duration: {0}.'.format(value))
Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems.
juraj-google-style
def create_mask(x): unique = np.unique(x) num_unique_elems = len(unique) keys = range(num_unique_elems) d = dict(zip(unique, keys)) mask_map = dict(zip(keys, unique)) return ([d[el] for el in x], mask_map, num_unique_elems)
Given a list of object creates integer mask for unique values in the list. Args: x: 1-d numpy array. Returns: A tuple of three objects: * A list of integers that is the mask for `x`, * A dictionary map between entries of `x` and the list * The number of unique elements.
github-repos
def get_full_alias(self, query): if query in self.alias_table.sections(): return query return next((section for section in self.alias_table.sections() if section.split()[0] == query), '')
Get the full alias given a search query. Args: query: The query this function performs searching on. Returns: The full alias (with the placeholders, if any).
juraj-google-style
def inspect_edge(G: AnalysisGraph, source: str, target: str): return create_statement_inspection_table(G[source][target]['InfluenceStatements'])
'Drill down' into an edge in the analysis graph and inspect its provenance. This function prints the provenance. Args: G source target
codesearchnet
def parse(self, ping_message): try: if typepy.is_not_null_string(ping_message.stdout): ping_message = ping_message.stdout except AttributeError: pass logger.debug('parsing ping result: {}'.format(ping_message)) self.__parser = NullPingParser() if typepy.is_null_string(ping_message): logger.debug('ping_message is empty') self.__stats = PingStats() return self.__stats ping_lines = _to_unicode(ping_message).splitlines() parser_class_list = (LinuxPingParser, WindowsPingParser, MacOsPingParser, AlpineLinuxPingParser) for parser_class in parser_class_list: self.__parser = parser_class() try: self.__stats = self.__parser.parse(ping_lines) return self.__stats except ParseError as e: if (e.reason != ParseErrorReason.HEADER_NOT_FOUND): raise e except pp.ParseException: pass self.__parser = NullPingParser() return self.__stats
Parse ping command output. Args: ping_message (str or :py:class:`~pingparsing.PingResult`): ``ping`` command output. Returns: :py:class:`~pingparsing.PingStats`: Parsed result.
codesearchnet
def restore_from_checkpoint(self, checkpoint_path): import tensorflow as tf all_vars = tf.contrib.slim.get_variables_to_restore( exclude=['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step']) saver = tf.train.Saver(all_vars) saver.restore(self.tf_session, checkpoint_path)
To restore inception model variables from the checkpoint file. Some variables might be missing in the checkpoint file, so it only loads the ones that are avialable, assuming the rest would be initialized later. Args: checkpoint_path: Path to the checkpoint file for the Inception graph.
juraj-google-style
def convert_bytes(value): n = np.rint(len(str(value))/4).astype(int) return value/(1024**n), sizes[n]
Reduces bytes to more convenient units (i.e. KiB, GiB, TiB, etc.). Args: values (int): Value in Bytes Returns: tup (tuple): Tuple of value, unit (e.g. (10, 'MiB'))
juraj-google-style
def to_dict(pipe: BeamEventSet, schema: Schema, timestamp_key: str='timestamp', format: DictEventSetFormatChoices=DictEventSetFormat.GROUPED_BY_INDEX) -> beam.PCollection[Dict[str, Any]]: grouped_by_features = add_feature_idx_and_flatten(pipe) | 'Group by index ' >> beam.GroupByKey() if format == DictEventSetFormat.GROUPED_BY_INDEX: return grouped_by_features | 'Convert to dict' >> beam.Map(_convert_to_dict_event_set_key_value, schema, timestamp_key) elif format == DictEventSetFormat.SINGLE_EVENTS: return grouped_by_features | 'Convert to dict' >> beam.FlatMap(_convert_to_dict_event_key_value, schema, timestamp_key) else: raise ValueError(f'Unknown format {format}')
Converts a Beam EventSet to PCollection of key->value. This method is compatible with the output of `from_csv_raw` and the Official Beam IO connectors. This method is the inverse of `to_event_set`. Args: pipe: PCollection of Beam EventSet. schema: Schema of the data. timestamp_key: Key containing the timestamps. format: Format of the events inside the output dictionary. See [DictEventSetFormat][temporian.io.format.DictEventSetFormat] for more. Returns: Beam pipe of key values.
github-repos
def moveaxis(x, source, destination): if any_symbolic_tensors((x,)): return Moveaxis(source, destination).symbolic_call(x) return backend.numpy.moveaxis(x, source=source, destination=destination)
Move axes of a tensor to new positions. Other axes remain in their original order. Args: x: Tensor whose axes should be reordered. source: Original positions of the axes to move. These must be unique. destination: Destinations positions for each of the original axes. These must also be unique. Returns: Tensor with moved axes.
github-repos
def __init__(self, *columns, **kwargs): if not all([isinstance(c, Column) for c in columns]): raise TypeError('All elements of Row must be Column instances') self.type = 'row' self.columns = columns
Init method. Args: *columns (): the instances of Column. **kwargs (): not used.
juraj-google-style
def predict_proba(self, text): assert isinstance(text, str) words = self.tokenizer(text) X = self.preprocessor.transform([words]) y = self.model.predict(X) y = y[0] return y
Probability estimates. The returned estimates for all classes are ordered by the label of classes. Args: text : string, the input text. Returns: y : array-like, shape = [num_words, num_classes] Returns the probability of the word for each class in the model,
juraj-google-style
def get_acgt_geno_marker(self, marker): geno, snp_position = self.get_geno_marker(marker, return_index=True) return self._allele_encoding[snp_position][geno]
Gets the genotypes for a given marker (ACGT format). Args: marker (str): The name of the marker. Returns: numpy.ndarray: The genotypes of the marker (ACGT format).
juraj-google-style
def _GetFileSystemTypeFromFileEntry(self, file_entry): if file_entry.type_indicator != dfvfs_definitions.TYPE_INDICATOR_TSK: return file_entry.type_indicator file_system = file_entry.GetFileSystem() fs_info = file_system.GetFsInfo() if fs_info.info: type_string = '{0!s}'.format(fs_info.info.ftype) if type_string.startswith('TSK_FS_TYPE_'): type_string = type_string[12:] if type_string.endswith('_DETECT'): type_string = type_string[:-7] return type_string
Retrieves the file system type indicator of a file entry. Args: file_entry (dfvfs.FileEntry): a file entry. Returns: str: file system type.
juraj-google-style
def is_copy_constructor(constructor): assert isinstance(constructor, calldef_members.constructor_t) args = constructor.arguments parent = constructor.parent if (len(args) != 1): return False arg = args[0] if (not isinstance(arg.decl_type, cpptypes.compound_t)): return False if (not type_traits.is_reference(arg.decl_type)): return False if (not type_traits.is_const(arg.decl_type.base)): return False un_aliased = type_traits.remove_alias(arg.decl_type.base) if (not isinstance(un_aliased.base, cpptypes.declarated_t)): return False return (id(un_aliased.base.declaration) == id(parent))
Check if the declaration is a copy constructor, Args: constructor (declarations.constructor_t): the constructor to be checked. Returns: bool: True if this is a copy constructor, False instead.
codesearchnet
def textx_isinstance(obj, obj_cls): if isinstance(obj, obj_cls): return True if hasattr(obj_cls, "_tx_fqn") and hasattr(obj, "_tx_fqn"): if obj_cls._tx_fqn == obj._tx_fqn: return True if hasattr(obj_cls, "_tx_inh_by"): for cls in obj_cls._tx_inh_by: if (textx_isinstance(obj, cls)): return True return False
This function determines, if a textx object is an instance of a textx class. Args: obj: the object to be analyzed obj_cls: the class to be checked Returns: True if obj is an instance of obj_cls.
juraj-google-style
def get_source_var_declaration(self, var): return next((x.source_mapping for x in self.variables if x.name == var))
Return the source mapping where the variable is declared Args: var (str): variable name Returns: (dict): sourceMapping
juraj-google-style
def vae(x, z_size, name=None): with tf.variable_scope(name, default_name='vae'): mu = tf.layers.dense(x, z_size, name='mu') log_sigma = tf.layers.dense(x, z_size, name='log_sigma') shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = (mu + (tf.exp((log_sigma / 2)) * epsilon)) kl = (0.5 * tf.reduce_mean(((tf.expm1(log_sigma) + tf.square(mu)) - log_sigma), axis=(- 1))) free_bits = (z_size kl_loss = tf.reduce_mean(tf.maximum((kl - free_bits), 0.0)) return (z, kl_loss, mu, log_sigma)
Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga.
codesearchnet
def result_wrapper(result_fn): def decorated(metric_obj, *args): has_strategy = distribute_lib.has_strategy() replica_context = distribute_lib.get_replica_context() if not has_strategy or replica_context is None or (not distribute_lib.get_strategy().extended._use_merge_call()): with distribute_lib.variable_sync_on_read_context(): raw_result = result_fn(*args) if isinstance(raw_result, (tensor.Tensor, variables_module.Variable, float, int)): result_t = array_ops.identity(raw_result) elif isinstance(raw_result, dict): result_t = {key: array_ops.identity(value) for key, value in raw_result.items()} else: try: result_t = array_ops.identity(raw_result) except (ValueError, TypeError): raise RuntimeError('The output of `metric.result()` can only be a single Tensor/Variable, or a dict of Tensors/Variables. For metric %s, got result %s.' % (metric_obj.name, raw_result)) else: def merge_fn_wrapper(distribution, merge_fn, *args): result = distribution.experimental_local_results(merge_fn)[0](*args) return array_ops.identity(result) result_t = replica_context.merge_call(merge_fn_wrapper, args=(result_fn,) + args) metric_obj._call_result = result_t return result_t return tf_decorator.make_decorator(result_fn, decorated)
Decorator to wrap metric `result()` function in `merge_call()`. Result computation is an idempotent operation that simply calculates the metric value using the state variables. If metric state variables are distributed across replicas/devices and `result()` is requested from the context of one device - This function wraps `result()` in a distribution strategy `merge_call()`. With this, the metric state variables will be aggregated across devices. Args: result_fn: function that computes the metric result. Returns: Decorated function that wraps `result_fn()` in distribution strategy `merge_call()`.
github-repos
class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer): def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)
Average pooling layer for 2D inputs (e.g. images). Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer.
github-repos
def visit_ImportFrom(self, node): if not node.module: self.generic_visit(node) return from_import = node.module for import_alias in node.names: full_module_name = '%s.%s' % (from_import, import_alias.name) full_import = (full_module_name, import_alias.asname) detection = self._api_analysis_spec.imports_to_detect.get(full_import, None) if detection: self.add_result(detection) self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message) self.generic_visit(node)
Handle visiting an import-from node in the AST. Args: node: Current Node
github-repos
def delete_group(self, name): self.service.delete_group( name, self.url_prefix, self.auth, self.session, self.session_send_opts)
Delete given group. Args: name (string): Name of group. Raises: requests.HTTPError on failure.
juraj-google-style
def _request(self, method, url, **kwargs): resp = self._session.request(method, '{}/{}'.format(self._base_url, url), headers=self._headers, **kwargs) try: resp.raise_for_status() except HTTPError as e: logging.error(resp.content) raise RestClientError(e) return resp
Make HTTP request and return response object Args: method (str): GET, POST, PUT, DELETE url (str): path appended to the base_url to create request **kwargs: passed directly to a requests.request object
juraj-google-style
def tensor_layout(self, arg): if isinstance(arg, Tensor): arg = arg.shape return self.layout_rules.tensor_layout(arg, self.shape)
Compute TensorLayout for a Tensor or a Shape. Args: arg: Tensor or Shape. Returns: TensorLayout.
codesearchnet
def _mapping(self): return self.__search_client.get('/unstable/index/{}/mapping'.format(mdf_toolbox.translate_index(self.index)))['mappings']
Fetch the entire mapping for the specified index. Returns: dict: The full mapping for the index.
codesearchnet
def combine(**kwargs): if not kwargs: return [OrderedDict()] sort_by_key = lambda k: k[0] kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key)) first = list(kwargs.items())[0] rest = dict(list(kwargs.items())[1:]) rest_combined = combine(**rest) key = first[0] values = first[1] if not isinstance(values, list): values = [values] return [OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key)) for v in values for combined in rest_combined]
Generate combinations based on its keyword arguments. Two sets of returned combinations can be concatenated using +. Their product can be computed using `times()`. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values.
github-repos
def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None: if not org_wav_fn.exists(): raise FileNotFoundError args = [config.FFMPEG_PATH, "-i", str(org_wav_fn), "-ac", "1", "-ar", "16000", str(tgt_wav_fn)] subprocess.run(args)
Converts the wav into a 16bit mono 16000Hz wav. Args: org_wav_fn: A `Path` to the original wave file tgt_wav_fn: The `Path` to output the processed wave file
juraj-google-style
def _module_info_from_proto_safe(module_info_def, import_scope=None): try: return _module_info_from_proto(module_info_def, import_scope) except Exception as e: logging.warning( "Error encountered when deserializing sonnet ModuleInfo:\n%s", str(e)) return None
Deserializes the `module_info_def` proto without raising exceptions. Args: module_info_def: An instance of `module_pb2.SonnetModule`. import_scope: Optional `string`. Name scope to use. Returns: An instance of `ModuleInfo`.
juraj-google-style
def _NonEagerInputs(op: ops.Operation, xs_set): return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)]
Returns the inputs of op, crossing closure boundaries where necessary. Does not return any captured EagerTensors, i.e., the number of tensors returned may be less than the actual number of inputs. Args: op: Operation xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t. Returns: A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op is in a FuncGraph and has captured inputs.
github-repos
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', 0) timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) timestamp *= definitions.MILLISECONDS_PER_SECOND if microseconds: milliseconds, _ = divmod( microseconds, definitions.MILLISECONDS_PER_SECOND) timestamp += milliseconds self._timestamp = timestamp self.is_local_time = False
Copies a POSIX timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
juraj-google-style
def _ExtractResponseSummaryFields(document): headers = document.childAtPath('Envelope/Header/ResponseHeader') body = document.childAtPath('Envelope/Body') summary_fields = {} if headers is not None: summary_fields['requestId'] = headers.getChild('requestId').text summary_fields['responseTime'] = headers.getChild('responseTime').text service_name = headers.getChild('serviceName') if service_name is not None: summary_fields['serviceName'] = service_name.text method_name = headers.getChild('methodName') if method_name is not None: summary_fields['methodName'] = method_name.text operations = headers.getChild('operations') if operations is not None: summary_fields['operations'] = operations.text if body is not None: fault = body.getChild('Fault') if fault is not None: summary_fields['isFault'] = True summary_fields['faultMessage'] = fault.getChild( 'faultstring').text[:16000] else: summary_fields['isFault'] = False return summary_fields
Extract logging fields from the response's suds.sax.document.Document. Args: document: A suds.sax.document.Document instance containing the parsed API response for a given API request. Returns: A dict mapping logging field names to their corresponding value.
juraj-google-style
def secondary_structure_summary(dssp_df): chains = dssp_df.chain.unique() infodict = {} for chain in chains: expoinfo = defaultdict(int) chain_df = dssp_df[(dssp_df.chain == chain)] counts = chain_df.ss.value_counts() total = float(len(chain_df)) for (ss, count) in iteritems(counts): if (ss == '-'): expoinfo['percent_C-dssp'] = (count / total) if (ss == 'H'): expoinfo['percent_H-dssp'] = (count / total) if (ss == 'B'): expoinfo['percent_B-dssp'] = (count / total) if (ss == 'E'): expoinfo['percent_E-dssp'] = (count / total) if (ss == 'G'): expoinfo['percent_G-dssp'] = (count / total) if (ss == 'I'): expoinfo['percent_I-dssp'] = (count / total) if (ss == 'T'): expoinfo['percent_T-dssp'] = (count / total) if (ss == 'S'): expoinfo['percent_S-dssp'] = (count / total) for per in ['percent_C-dssp', 'percent_H-dssp', 'percent_B-dssp', 'percent_E-dssp', 'percent_G-dssp', 'percent_I-dssp', 'percent_T-dssp', 'percent_S-dssp']: if (per not in expoinfo): expoinfo[per] = 0.0 infodict[chain] = dict(expoinfo) return infodict
Summarize the secondary structure content of the DSSP dataframe for each chain. Args: dssp_df: Pandas DataFrame of parsed DSSP results Returns: dict: Chain to secondary structure summary dictionary
codesearchnet
def _check_one_size(self): block_one = ((self.end[0] == (self.start[0] + 1)) or (self.end[1] == (self.start[1] + 1))) if block_one: self.flag_change(self.flags, 'error', self.start, self.worksheet, message=self.FLAGS['1-size']) return block_one
Checks for single height or single width blocks and flags the occurrence. Returns: True if the block is size 1.
codesearchnet
def load_profiles_from_file(self, fqfn): if self.args.verbose: print('Loading profiles from File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, fqfn)) with open(fqfn, 'r+') as fh: data = json.load(fh) for profile in data: self.profile_update(profile) if self.args.action == 'validate': self.validate(profile) fh.seek(0) fh.write(json.dumps(data, indent=2, sort_keys=True)) fh.truncate() for d in data: if d.get('profile_name') in self.profiles: self.handle_error( 'Found a duplicate profile name ({}).'.format(d.get('profile_name')) ) self.profiles.setdefault( d.get('profile_name'), {'data': d, 'ij_filename': d.get('install_json'), 'fqfn': fqfn}, )
Load profiles from file. Args: fqfn (str): Fully qualified file name.
juraj-google-style
def execute_query(self, verb, verb_arguments): request = self._build_request(verb, verb_arguments) return self._execute(request)
Executes query (ex. get) via a dedicated http object. Args: verb (str): Method to execute on the component (ex. get, list). verb_arguments (dict): key-value pairs to be passed to _BuildRequest. Returns: dict: Service Response.
juraj-google-style
def has_event_handler(self, handler, event_name=None): if (event_name is not None): if (event_name not in self._event_handlers): return False events = [event_name] else: events = self._event_handlers for e in events: for (h, _, _) in self._event_handlers[e]: if (h == handler): return True return False
Check if the specified event has the specified handler. Args: handler (callable): the callable event handler. event_name: The event the handler attached to. Set this to ``None`` to search all events.
codesearchnet
def has_object_error(self): if (self._has_object_error is None): self._has_object_error = next((True for o in self.objects() if o.has_error()), False) return self._has_object_error
Returns true if any requested object had a business logic error, otherwise returns false Returns: boolean
codesearchnet
def __init__(self, channel): self.CheckConfig = channel.unary_unary( '/pulumirpc.ResourceProvider/CheckConfig', request_serializer=provider__pb2.CheckRequest.SerializeToString, response_deserializer=provider__pb2.CheckResponse.FromString, ) self.DiffConfig = channel.unary_unary( '/pulumirpc.ResourceProvider/DiffConfig', request_serializer=provider__pb2.DiffRequest.SerializeToString, response_deserializer=provider__pb2.DiffResponse.FromString, ) self.Configure = channel.unary_unary( '/pulumirpc.ResourceProvider/Configure', request_serializer=provider__pb2.ConfigureRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.Invoke = channel.unary_unary( '/pulumirpc.ResourceProvider/Invoke', request_serializer=provider__pb2.InvokeRequest.SerializeToString, response_deserializer=provider__pb2.InvokeResponse.FromString, ) self.Check = channel.unary_unary( '/pulumirpc.ResourceProvider/Check', request_serializer=provider__pb2.CheckRequest.SerializeToString, response_deserializer=provider__pb2.CheckResponse.FromString, ) self.Diff = channel.unary_unary( '/pulumirpc.ResourceProvider/Diff', request_serializer=provider__pb2.DiffRequest.SerializeToString, response_deserializer=provider__pb2.DiffResponse.FromString, ) self.Create = channel.unary_unary( '/pulumirpc.ResourceProvider/Create', request_serializer=provider__pb2.CreateRequest.SerializeToString, response_deserializer=provider__pb2.CreateResponse.FromString, ) self.Read = channel.unary_unary( '/pulumirpc.ResourceProvider/Read', request_serializer=provider__pb2.ReadRequest.SerializeToString, response_deserializer=provider__pb2.ReadResponse.FromString, ) self.Update = channel.unary_unary( '/pulumirpc.ResourceProvider/Update', request_serializer=provider__pb2.UpdateRequest.SerializeToString, response_deserializer=provider__pb2.UpdateResponse.FromString, ) self.Delete = channel.unary_unary( '/pulumirpc.ResourceProvider/Delete', request_serializer=provider__pb2.DeleteRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.Cancel = channel.unary_unary( '/pulumirpc.ResourceProvider/Cancel', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetPluginInfo = channel.unary_unary( '/pulumirpc.ResourceProvider/GetPluginInfo', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=plugin__pb2.PluginInfo.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def __cmp__(self, other): prec_self = OPERATOR_MAP[self.value][1] prec_other = OPERATOR_MAP[other.value][1] if prec_self < prec_other: return -1 if prec_self > prec_other: return 1 return 0
Compare using operator precedence. Args: other (Operator): The ``Operator`` we are comparing precedence against. Returns: integer: ``1`` if greater than ``other``, ``-1`` if less than ``other``, and ``0`` if of equal precedence of ``other``.
juraj-google-style
def master(self, task_type=None, task_id=None, rpc_layer=None): if self._tpu != 'local': cluster_spec = self.cluster_spec() if task_type is not None and task_id is not None: master = cluster_spec.task_address(task_type, task_id) elif self.task_type is not None and self.task_id is not None: master = cluster_spec.task_address(self.task_type, self.task_id) else: job_tasks = cluster_spec.job_tasks(self.task_type) if not job_tasks: raise ValueError('No TPUs with the specified names exist.') master = job_tasks[0] return cluster_resolver_lib.format_master_url(master, 'grpc') else: return ''
Get the Master string to be used for the session. In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of first instance in the ClusterSpec returned by the cluster_spec function. If a non-TPU name is used when constructing a TPUClusterResolver, that will be returned instead (e.g. If the tpus argument's value when constructing this TPUClusterResolver was 'grpc://10.240.1.2:8470', 'grpc://10.240.1.2:8470' will be returned). Args: task_type: (Optional, string) The type of the TensorFlow task of the master. task_id: (Optional, integer) The index of the TensorFlow task of the master. rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to communicate with TPUs. Returns: string, the connection string to use when creating a session. Raises: ValueError: If none of the TPUs specified exists.
github-repos
def subscriber(address, topics, callback, message_type): return Subscriber(address, topics, callback, message_type)
Creates a subscriber binding to the given address and subscribe the given topics. The callback is invoked for every message received. Args: - address: the address to bind the PUB socket to. - topics: the topics to subscribe - callback: the callback to invoke for every message. Must accept 2 variables - topic and message - message_type: the type of message to receive
codesearchnet
def confirm_cw_log(self, account, region, vpcname): try: cw = self.session.client('logs', region) token = None log_groups = [] while True: result = cw.describe_log_groups() if not token else cw.describe_log_groups(nextToken=token) token = result.get('nextToken') log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])]) if not token: break if vpcname not in log_groups: cw.create_log_group(logGroupName=vpcname) cw_vpc = VPC.get(vpcname) cw_vpc.set_property('vpc_flow_logs_log_group', vpcname) self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname)) auditlog( event='vpc_flow_logs.create_cw_log_group', actor=self.ns, data={ 'account': account.account_name, 'region': region, 'log_group_name': vpcname, 'vpc': vpcname } ) return True except Exception: self.log.exception('Failed creating log group for {}/{}/{}.'.format( account, region, vpcname ))
Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful Args: account (:obj:`Account`): Account to create the log group in region (`str`): Region to create the log group in vpcname (`str`): Name of the VPC the log group is fow Returns: `bool`
juraj-google-style
def hash_stream(fileobj, hasher=None, blocksize=65536): hasher = (hasher or hashlib.sha1()) buf = fileobj.read(blocksize) while buf: hasher.update(buf) buf = fileobj.read(blocksize) return hasher
Read from fileobj stream, return hash of its contents. Args: fileobj: File-like object with read() hasher: Hash object such as hashlib.sha1(). Defaults to sha1. blocksize: Read from fileobj this many bytes at a time.
codesearchnet
def range(self, dim, data_range=True, dimension_range=True): dim = self.get_dimension(dim) if dim is None or (not data_range and not dimension_range): return (None, None) elif all(util.isfinite(v) for v in dim.range) and dimension_range: return dim.range elif dim in self.dimensions() and data_range and bool(self): lower, upper = self.interface.range(self, dim) else: lower, upper = (np.NaN, np.NaN) if not dimension_range: return lower, upper return util.dimension_range(lower, upper, dim.range, dim.soft_range)
Return the lower and upper bounds of values along dimension. Args: dimension: The dimension to compute the range on. data_range (bool): Compute range from data values dimension_range (bool): Include Dimension ranges Whether to include Dimension range and soft_range in range calculation Returns: Tuple containing the lower and upper bound
juraj-google-style
def generate_pseudo(strain_states, order=3): s = sp.Symbol('s') nstates = len(strain_states) ni = (np.array(strain_states) * s) (mis, absent_syms) = ([], []) for degree in range(2, (order + 1)): (cvec, carr) = get_symbol_list(degree) sarr = np.zeros((nstates, 6), dtype=object) for (n, strain_v) in enumerate(ni): exps = carr.copy() for i in range((degree - 1)): exps = np.dot(exps, strain_v) exps /= np.math.factorial((degree - 1)) sarr[n] = [sp.diff(exp, s, (degree - 1)) for exp in exps] svec = sarr.ravel() present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec]) absent_syms += [(set(cvec) - present_syms)] m = np.zeros(((6 * nstates), len(cvec))) for (n, c) in enumerate(cvec): m[(:, n)] = v_diff(svec, c) mis.append(np.linalg.pinv(m)) return (mis, absent_syms)
Generates the pseudoinverse for a given set of strains. Args: strain_states (6xN array like): a list of voigt-notation "strain-states", i. e. perturbed indices of the strain as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0) order (int): order of pseudoinverse to calculate Returns: mis: pseudo inverses for each order tensor, these can be multiplied by the central difference derivative of the stress with respect to the strain state absent_syms: symbols of the tensor absent from the PI expression
codesearchnet
def start(self, channel): if self._started: raise InternalError("The method start() was called twice on VirtualIOTileDevice.") self._push_channel = channel self.start_workers()
Start running this virtual device including any necessary worker threads. Args: channel (IOTilePushChannel): the channel with a stream and trace routine for streaming and tracing data through a VirtualInterface
juraj-google-style
def CreateTaskStorage(self, task): if task.identifier in self._task_storage_writers: raise IOError('Storage writer for task: {0:s} already exists.'.format( task.identifier)) storage_writer = FakeStorageWriter( self._session, storage_type=definitions.STORAGE_TYPE_TASK, task=task) self._task_storage_writers[task.identifier] = storage_writer return storage_writer
Creates a task storage. Args: task (Task): task. Returns: FakeStorageWriter: storage writer. Raises: IOError: if the task storage already exists. OSError: if the task storage already exists.
juraj-google-style
def delete(self, messageId): check_type(messageId, basestring, may_be_none=False) self._session.delete(((API_ENDPOINT + '/') + messageId))
Delete a message. Args: messageId(basestring): The ID of the message to be deleted. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet