code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None): value_modifications = {} for (name, options) in images.items(): image_path = options.get('contextPath', os.path.join('images', name)) image_tag = tag paths = (list(options.get('paths', [])) + [image_path, 'chartpress.yaml']) last_commit = last_modified_commit(*paths) if (tag is None): if chart_version: image_tag = '{}-{}'.format(chart_version, last_commit) else: image_tag = last_commit image_name = (prefix + name) image_spec = '{}:{}'.format(image_name, image_tag) value_modifications[options['valuesPath']] = {'repository': image_name, 'tag': SingleQuotedScalarString(image_tag)} template_namespace = {'LAST_COMMIT': last_commit, 'TAG': image_tag} if (tag or image_needs_building(image_spec)): build_args = render_build_args(options, template_namespace) build_image(image_path, image_spec, build_args, options.get('dockerfilePath')) else: print(f'Skipping build for {image_spec}, it already exists') if push: if (tag or image_needs_pushing(image_spec)): check_call(['docker', 'push', image_spec]) else: print(f'Skipping push for {image_spec}, already on registry') return value_modifications
Build a collection of docker images Args: prefix (str): the prefix to add to images images (dict): dict of image-specs from chartpress.yml tag (str): Specific tag to use instead of the last modified commit. If unspecified the tag for each image will be the hash of the last commit to modify the image's files. commit_range (str): The range of commits to consider, e.g. for building in CI. If an image hasn't changed in the given range, it will not be rebuilt. push (bool): Whether to push the resulting images (default: False). chart_version (str): The chart version, included as a prefix on image tags if `tag` is not specified.
codesearchnet
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return (len(token_ids_0) + 2) * [0] return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
Create the token type IDs corresponding to the sequences passed. [What are token type IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of building: those. Args: token_ids_0 (`List[int]`): The first tokenized sequence. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. Returns: `List[int]`: The token type ids.
github-repos
def from_orbit(cls, orbit, name=None, norad_id=None, cospar_id=None): name = "0 %s\n" % name if name is not None else "" norad_id = norad_id if norad_id is not None else "99999" if cospar_id is not None: y, _, i = cospar_id.partition('-') cospar_id = y[2:] + i else: cospar_id = "" orbit = orbit.copy(form='TLE', frame='TEME') date = orbit.date.datetime i, Ω, e, ω, M, n = orbit line1 = "1 {norad_id}U {cospar_id:<8} {date:%y}{day:012.8f} {ndot:>10} {ndotdot:>8} {bstar:>8} 0 999".format( norad_id=norad_id, cospar_id=cospar_id, date=date, day=int("{:%j}".format(date)) + date.hour / 24. + date.minute / 1440 + date.second / 86400 + date.microsecond / 86400000000., ndot="{: 0.8f}".format(orbit.complements['ndot'] / 2).replace("0.", "."), ndotdot=_unfloat(orbit.complements['ndotdot'] / 6), bstar=_unfloat(orbit.complements['bstar']), ) line2 = "2 {norad_id} {i:8.4f} {Ω:8.4f} {e} {ω:8.4f} {M:8.4f} {n:11.8f}99999".format( norad_id=norad_id, i=np.degrees(i), Ω=np.degrees(Ω), e="{:.7f}".format(e)[2:], ω=np.degrees(ω), M=np.degrees(M), n=n * 86400 / (2 * np.pi) ) line1 += str(cls._checksum(line1)) line2 += str(cls._checksum(line2)) return cls("%s%s\n%s" % (name, line1, line2))
Convert an orbit to it's TLE representation Args: orbit (Orbit) norad_id (str or int): cospar_id (str): Return: str: TLE representation
juraj-google-style
def sequential_experts_gemm(token_states, expert_weights, tokens_per_expert): num_tokens = token_states.shape[0] out_features = expert_weights.shape[-1] output = torch.zeros(num_tokens, out_features, dtype=token_states.dtype, device=token_states.device) cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0) zero_tensor = torch.zeros(1, dtype=torch.long, device=cumsum_num_tokens.device) cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens)) for expert_num in range(expert_weights.shape[0]): start = cumsum_num_tokens[expert_num] end = cumsum_num_tokens[expert_num + 1] tokens = token_states[start:end] out = torch.matmul(tokens, expert_weights[expert_num]) output[start:end] = out return output
Compute the matrix multiplication (GEMM) for each expert sequentially. This approach is computationally inefficient, especially when dealing with a large number of experts. Args: token_states (torch.Tensor): Input tensor of shape (num_tokens, in_features). expert_weights (torch.Tensor): Weight tensor of shape (num_experts, in_features, out_features). tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert. Returns: torch.Tensor: Output tensor of shape (num_tokens, out_features).
github-repos
def get_asset(self, asset_hash, id=None, endpoint=None): return self._call_endpoint(GET_ASSET_STATE, params=[asset_hash], id=id, endpoint=endpoint)
Get an asset by its hash Args: asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
juraj-google-style
def en010(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `en010`'.format(value)) self._en010 = value
Corresponds to IDD Field `en010` mean coincident dry-bulb temperature to Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `en010` Unit: kJ/kg if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def set_management_icmp(enabled=True, deploy=False): if (enabled is True): value = 'no' elif (enabled is False): value = 'yes' else: raise CommandExecutionError('Invalid option provided for service enabled option.') ret = {} query = {'type': 'config', 'action': 'set', 'xpath': "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", 'element': '<disable-icmp>{0}</disable-icmp>'.format(value)} ret.update(__proxy__['panos.call'](query)) if (deploy is True): ret.update(commit()) return ret
Enables or disables the ICMP management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_icmp salt '*' panos.set_management_icmp enabled=False deploy=True
codesearchnet
def from_string(cls, key, password='notasecret'): key = _helpers._from_bytes(key) marker_id, key_bytes = pem.readPemBlocksFromFile( six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER) if marker_id == 0: pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes, format='DER') elif marker_id == 1: key_info, remaining = decoder.decode( key_bytes, asn1Spec=_PKCS8_SPEC) if remaining != b'': raise ValueError('Unused bytes', remaining) pkey_info = key_info.getComponentByName('privateKey') pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(), format='DER') else: raise ValueError('No key could be detected.') return cls(pkey)
Construct an RsaSigner instance from a string. Args: key: string, private key in PEM format. password: string, password for private key file. Unused for PEM files. Returns: RsaSigner instance. Raises: ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in PEM format.
juraj-google-style
def _get_scripts(self, host_metadata): deploy_scripts = host_metadata.get('deploy-scripts', []) if deploy_scripts: return deploy_scripts ovirt_scripts = host_metadata.get('ovirt-scripts', []) if ovirt_scripts: warnings.warn('Deprecated entry "ovirt-scripts" will not be supported in the future, replace with "deploy-scripts"') return ovirt_scripts
Temporary method to retrieve the host scripts TODO: remove once the "ovirt-scripts" option gets deprecated Args: host_metadata(dict): host metadata to retrieve the scripts for Returns: list: deploy scripts for the host, empty if none found
codesearchnet
def _ModifyInterface(self, interface_config, config_key, config_value, replace=False): config_entry = ('%s=%s' % (config_key, config_value)) if (not open(interface_config).read().count(config_key)): with open(interface_config, 'a') as config: config.write(('%s\n' % config_entry)) elif replace: for line in fileinput.input(interface_config, inplace=True): print(re.sub(('%s=.*' % config_key), config_entry, line.rstrip()))
Write a value to a config file if not already present. Args: interface_config: string, the path to a config file. config_key: string, the configuration key to set. config_value: string, the value to set for the configuration key. replace: bool, replace the configuration option if already present.
codesearchnet
def readHolidayDates(self): self.setContext('readHolidayDates') try: req_str = '0152310230304230282903' self.request(False) req_crc = self.calc_crc16(req_str[2:].decode('hex')) req_str += req_crc self.m_serial_port.write(req_str.decode('hex')) raw_ret = self.m_serial_port.getResponse(self.getContext()) self.serialPostEnd() unpacked_read = self.unpackStruct(raw_ret, self.m_hldy) self.convertData(unpacked_read, self.m_hldy, self.m_kwh_precision) return_crc = self.calc_crc16(raw_ret[1:(- 2)]) if (str(return_crc) == str(self.m_hldy['crc16'][MeterData.StringValue])): ekm_log('Holidays and Schedules CRC success') self.setContext('') return True except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext('') return False
Serial call to read holiday dates into meter object buffer. Returns: bool: True on completion.
codesearchnet
def _stratonovich_integral(dim, dt, sqrt_dt, dw, stratonovich_draws, order): p = order - 1 sqrt_rho_p = tf.sqrt(tf.constant(1 / 12 - sum([1 / r ** 2 for r in range(1, order + 1)]) / 2 / _PI ** 2, dtype=dw.dtype)) mu = stratonovich_draws[0] zeta = tf.transpose(stratonovich_draws[1], [2, 0, 1]) eta = tf.transpose(stratonovich_draws[2], [2, 0, 1]) xi = dw / sqrt_dt r_i = tf.stack([tf.ones(zeta[0, ...].shape + [dim], dtype=zeta.dtype) / r for r in range(1, order + 1)], 0) value = dt * (_outer_prod(dw, dw) / 2 + sqrt_rho_p * (_outer_prod(mu[..., p], xi) - _outer_prod(xi, mu[..., p]))) value += dt * tf.reduce_sum(tf.multiply(_outer_prod(zeta, _SQRT_2 * xi + eta) - _outer_prod(_SQRT_2 * xi + eta, zeta), r_i), 0) / (2 * _PI) return value
Approximate Stratonovich integrals J(i, j). Args: dim: An integer. The dimension of the state. dt: A double. The time step. sqrt_dt: A double. The square root of dt. dw: A double. The Wiener increment. stratonovich_draws: A list of tensors corresponding to the independent N(0,1) random variables used in the approximation. order: An integer. The stratonovich_order. Returns: A Tensor of shape [dw.shape[0], dim, dim] corresponding to the Stratonovich integral for each pairwise component of the Wiener process. In other words, J(i,j) corresponds to an integral over W_i and W_j.
github-repos
def get_filters(component): def inner(c, filters=None): filters = (filters or set()) if (not ENABLED): return filters if (not plugins.is_datasource(c)): return filters if (c in FILTERS): filters |= FILTERS[c] for d in dr.get_dependents(c): filters |= inner(d, filters) return filters if (component not in _CACHE): _CACHE[component] = inner(component) return _CACHE[component]
Get the set of filters for the given datasource. Filters added to a ``RegistryPoint`` will be applied to all datasources that implement it. Filters added to a datasource implementation apply only to that implementation. For example, a filter added to ``Specs.ps_auxww`` will apply to ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``, ``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww`` will only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs`` for those classes. Args: component (a datasource): The target datasource Returns: set: The set of filters defined for the datasource
codesearchnet
def fill_tree(self, tree, input_dict): def add_element(item, key, value): child_name = QtGui.QStandardItem(key) child_name.setDragEnabled(False) child_name.setSelectable(False) child_name.setEditable(False) if isinstance(value, dict): for ket_child, value_child in value.items(): add_element(child_name, ket_child, value_child) child_value = QtGui.QStandardItem('') else: child_value = QtGui.QStandardItem(str(value)) child_value.setData(value) child_value.setDragEnabled(False) child_value.setSelectable(False) child_value.setEditable(False) item.appendRow([child_name, child_value]) for index, (loaded_item, loaded_item_settings) in enumerate(input_dict.items()): item = QtGui.QStandardItem(loaded_item) for key, value in loaded_item_settings['settings'].items(): add_element(item, key, value) value = QtGui.QStandardItem('') tree.model().appendRow([item, value]) if tree == self.tree_loaded: item.setEditable(False) tree.setFirstColumnSpanned(index, self.tree_infile.rootIndex(), True)
fills a tree with nested parameters Args: tree: QtGui.QTreeView parameters: dictionary or Parameter object Returns:
juraj-google-style
def _requests_post(self, url, json=None, data=None, username='', password='', xapikey='', headers=None, timeout=30): if (headers is None): headers = {} auth = None if (username and password): auth = requests.auth.HTTPBasicAuth(username, password) elif xapikey: headers['x-api-key'] = xapikey headers['User-Agent'] = self.user_agent request = requests.post(url, auth=auth, data=data, json=json, headers=headers, timeout=timeout) message = json return (request.text, message, request.status_code, request.headers)
This function will POST to the url endpoint using requests. Returning an AdyenResult object on 200 HTTP response. Either json or data has to be provided. If username and password are provided, basic auth will be used. Args: url (str): url to send the POST json (dict, optional): Dict of the JSON to POST data (dict, optional): Dict, presumed flat structure of key/value of request to place username (str, optionl): Username for basic auth. Must be included as part of password. password (str, optional): Password for basic auth. Must be included as part of username. headers (dict, optional): Key/Value pairs of headers to include timeout (int, optional): Default 30. Timeout for the request. Returns: str: Raw response received str: Raw request placed int: HTTP status code, eg 200,404,401 dict: Key/Value pairs of the headers received.
codesearchnet
def outgoing_args(self, nodeid): _vars = self._vars _hcons = self._hcons args = self.args(nodeid) for arg, val in list(args.items()): if arg == IVARG_ROLE or val not in _vars: del args[arg] else: refs = _vars[val]['refs'] if not (val in _hcons or IVARG_ROLE in refs or 'LBL' in refs): del args[arg] return args
Return the arguments going from *nodeid* to other predications. Valid arguments include regular variable arguments and scopal (label-selecting or HCONS) arguments. MOD/EQ links, intrinsic arguments, and constant arguments are not included. Args: nodeid: the nodeid of the EP that is the arguments' source Returns: dict: `{role: tgt}`
juraj-google-style
def TransformerEncoder(vocab_size, num_classes=10, feature_depth=512, feedforward_depth=2048, num_layers=6, num_heads=8, dropout=0.1, max_len=2048, mode='train'): input_embedding = layers.Serial(layers.Embedding(feature_depth, vocab_size), layers.Dropout(rate=dropout, mode=mode), layers.PositionalEncoding(max_len=max_len)) return layers.Serial(layers.Branch(), layers.Parallel(input_embedding, layers.PaddingMask()), layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode) for _ in range(num_layers)]), layers.FirstBranch(), layers.LayerNorm(), layers.Mean(axis=1), layers.Dense(num_classes), layers.LogSoftmax())
Transformer encoder. Args: vocab_size: int: vocab size num_classes: how many classes on output feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the Transformer encoder layer.
codesearchnet
def dms_maker(self, force_rerun=False): log.debug('{}: running surface representation maker...'.format(self.id)) if not self.receptorpdb_path: return ValueError('Please run protein_only_and_noH') dms = op.join(self.dock_dir, '{}_receptor.dms'.format(self.id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=dms): cmd = 'dms {} -n -w 1.4 -o {}'.format(self.receptorpdb_path, dms) os.system(cmd) self.dms_path = dms if ssbio.utils.is_non_zero_file(dms): self.dms_path = dms log.debug('{}: successful dms execution'.format(self.dms_path)) else: log.critical('{}: dms_maker failed to run on receptor file'.format(self.receptorpdb_path))
Create surface representation (dms file) of receptor Args: force_rerun (bool): If method should be rerun even if output file exists
juraj-google-style
def stage_tc_create_security_label(self, label, resource): sl_resource = resource.security_labels(label) sl_resource.http_method = 'POST' sl_response = sl_resource.request() if sl_response.get('status') != 'Success': self.log.warning( '[tcex] Failed adding security label "{}" ({}).'.format( label, sl_response.get('response').text ) )
Add a security label to a resource. Args: label (str): The security label (must exit in ThreatConnect). resource (obj): An instance of tcex resource class.
juraj-google-style
def __init__(self, n=3, cap_front=True, cap_end=True): if n < 2: raise ValueError('n must be 1 or more') super(Alkane, self).__init__() if not cap_front: n += 1 if not cap_end: n += 1 chain = mb.recipes.Polymer(CH2(), n=n-2, port_labels=('up', 'down')) self.add(chain, 'chain') if cap_front: self.add(CH3(), "methyl_front") mb.force_overlap(move_this=self['chain'], from_positions=self['chain']['up'], to_positions=self['methyl_front']['up']) else: self.add(chain['up'], 'up', containment=False) if cap_end: self.add(CH3(), 'methyl_end') mb.force_overlap(self['methyl_end'], self['methyl_end']['up'], self['chain']['down']) else: self.add(chain['down'], 'down', containment=False)
Initialize an Alkane Compound. Args: n: Number of carbon atoms. cap_front: Add methyl group to beginning of chain ('down' port). cap_end: Add methyl group to end of chain ('up' port).
juraj-google-style
def ping(self, timeout=12): self.conn("POST", "{0}/users/ME/endpoints/{1}/active".format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={"timeout": timeout})
Send a keep-alive request for the endpoint. Args: timeout (int): maximum amount of time for the endpoint to stay active
juraj-google-style
def config(): out = shell.run('git config --list', capture=True, never_pretend=True).stdout.strip() result = {} for line in out.splitlines(): (name, value) = line.split('=', 1) result[name.strip()] = value.strip() return result
Return the current git configuration. Returns: dict[str, Any]: The current git config taken from ``git config --list``.
codesearchnet
def l2_regression_loss(y, target, name=None): with tf.name_scope(name, 'l2_regression', [y, target]) as scope: y = tf.convert_to_tensor(y, name='y') target = tf.convert_to_tensor(target, name='target') return tf.sqrt(l2_regression_sq_loss(y, target, name=scope))
Calculates the square root of the SSE between y and target. Args: y: the calculated values. target: the desired values. name: the name for this op, defaults to l2_regression Returns: A tensorflow op.
juraj-google-style
def dict_from_file(filename, key_type=str): mapping = {} with open(filename, 'r') as f: for line in f: items = line.rstrip('\n').split() assert (len(items) >= 2) key = key_type(items[0]) val = (items[1:] if (len(items) > 2) else items[1]) mapping[key] = val return mapping
Load a text file and parse the content as a dict. Each line of the text file will be two or more columns splited by whitespaces or tabs. The first column will be parsed as dict keys, and the following columns will be parsed as dict values. Args: filename(str): Filename. key_type(type): Type of the dict's keys. str is user by default and type conversion will be performed if specified. Returns: dict: The parsed contents.
codesearchnet
def fn_with_custom_grad(grad_fn, use_global_vars=False): def dec(fn): @functools.wraps(fn) def wrapped(*args): return _fn_with_custom_grad(fn, args, grad_fn, use_global_vars=use_global_vars) return wrapped return dec
Decorator to create a subgraph with a custom gradient function. The subgraph created by the decorated function is NOT put in a Defun and so does not suffer from the limitations of the Defun (all subgraph ops on the same device, no summaries). Args: grad_fn: function with signature (inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars), all of which are lists of Tensors. use_global_vars: if True, variables will be the global variables created. If False, will be the trainable variables. Returns: Decorator for function such that the gradient is defined by grad_fn.
codesearchnet
def set_tensor_final(self, tensor_name): tensor = self._name_to_tensor(tensor_name) self._final_tensors.add(tensor)
Denotes a tensor as a final output of the computation. Args: tensor_name: a string, name of a tensor in the graph.
codesearchnet
def resolve_variables(self, provided_variables): self.resolved_variables = {} defined_variables = self.defined_variables() variable_dict = dict((var.name, var) for var in provided_variables) for var_name, var_def in defined_variables.items(): value = resolve_variable( var_name, var_def, variable_dict.get(var_name), self.name ) self.resolved_variables[var_name] = value
Resolve the values of the blueprint variables. This will resolve the values of the `VARIABLES` with values from the env file, the config, and any lookups resolved. Args: provided_variables (list of :class:`stacker.variables.Variable`): list of provided variables
juraj-google-style
def with_inverse(points, noise): n_points = len(points)/2 break_point = n_points points_part = copy.deepcopy(points) points_part = list(reversed(points_part)) part = kalman_filter(points_part, noise) total = kalman_filter(points, noise) result = list(reversed(part))[:break_point] + total[break_point:] result[break_point] = point_mean(part[break_point], total[break_point]) return result
Smooths a set of points It smooths them twice, once in given order, another one in the reverse order. The the first half of the results will be taken from the reverse order and the second half from the normal order. Args: points (:obj:`list` of :obj:`Point`) noise (float): Expected noise, the higher it is the more the path will be smoothed. Returns: :obj:`list` of :obj:`Point`
juraj-google-style
def _SwitchRefOrTensor(data, pred, name='Switch'): data = ops.convert_to_tensor_or_composite(data, name='data') with ops.colocate_with(data, ignore_existing=True): if isinstance(data, tensor_lib.Tensor): if data.dtype._is_ref_dtype: return ref_switch(data, pred, name=name) return switch(data, pred, name=name)
Forwards `data` to an output determined by `pred`. If `pred` is false, the `data` input is forwarded to the first output. Otherwise, the data goes to the second output. This op handles `Tensor`s and `IndexedSlices`. Args: data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data. name: A name for this operation (optional). Returns: `(output_false, output_true)`: If `pred` is true, data will be forwarded to `output_true`, otherwise it goes to `output_false`. Raises: TypeError: if data is not a Tensor or IndexedSlices
github-repos
def parse_individual(sample): ind_info = {} if 'sample_id' not in sample: raise PedigreeError("One sample is missing 'sample_id'") sample_id = sample['sample_id'] if 'sex' not in sample: raise PedigreeError("Sample %s is missing 'sex'" % sample_id) sex = sample['sex'] if sex not in REV_SEX_MAP: log.warning("'sex' is only allowed to have values from {}" .format(', '.join(list(REV_SEX_MAP.keys())))) raise PedigreeError("Individual %s has wrong formated sex" % sample_id) if 'phenotype' not in sample: raise PedigreeError("Sample %s is missing 'phenotype'" % sample_id) phenotype = sample['phenotype'] if phenotype not in REV_PHENOTYPE_MAP: log.warning("'phenotype' is only allowed to have values from {}" .format(', '.join(list(REV_PHENOTYPE_MAP.keys())))) raise PedigreeError("Individual %s has wrong formated phenotype" % sample_id) ind_info['individual_id'] = sample_id ind_info['display_name'] = sample.get('sample_name', sample['sample_id']) ind_info['sex'] = sex ind_info['phenotype'] = phenotype ind_info['father'] = sample.get('father') ind_info['mother'] = sample.get('mother') ind_info['confirmed_parent'] = sample.get('confirmed_parent') ind_info['confirmed_sex'] = sample.get('confirmed_sex') ind_info['predicted_ancestry'] = sample.get('predicted_ancestry') bam_file = sample.get('bam_path') if bam_file: ind_info['bam_file'] = bam_file mt_bam = sample.get('mt_bam') if mt_bam: ind_info['mt_bam'] = mt_bam analysis_type = sample.get('analysis_type') if analysis_type: ind_info['analysis_type'] = analysis_type ind_info['capture_kits'] = ([sample.get('capture_kit')] if 'capture_kit' in sample else []) vcf2cytosure = sample.get('vcf2cytosure') if vcf2cytosure: ind_info['vcf2cytosure'] = vcf2cytosure tumor_type = sample.get('tumor_type') if tumor_type: ind_info['tumor_type'] = tumor_type tumor_mutational_burden = sample.get('tmb') if tumor_mutational_burden: ind_info['tmb'] = tumor_mutational_burden msi = sample.get('msi') if msi: ind_info['msi'] = msi tumor_purity = sample.get('tumor_purity') if tumor_purity: ind_info['tumor_purity'] = tumor_purity return ind_info
Parse individual information Args: sample (dict) Returns: { 'individual_id': str, 'father': str, 'mother': str, 'display_name': str, 'sex': str, 'phenotype': str, 'bam_file': str, 'vcf2cytosure': str, 'analysis_type': str, 'capture_kits': list(str), }
juraj-google-style
def emit_counter(self, category: str, name: str, pid: int, timestamp: int, counter: str, value: int) -> None: event = self._create_event('C', category, name, pid, 0, timestamp) event['args'] = {counter: value} self._events.append(event)
Emits a record for a single counter. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counter: Name of the counter as a string. value: Value of the counter as an integer.
github-repos
def _extract_dir(self, dir_not_exists, output): if not dir_not_exists: lst = output.dir_cache return {i["relpath"]: i["md5"] for i in lst} return {}
Extract the content of dvc tree file Args: self(object) - Repo class instance dir_not_exists(bool) - flag for directory existence output(object) - OutputLOCAL class instance Returns: dict - dictionary with keys - paths to file in .dvc/cache values -checksums for that files
juraj-google-style
def write_object_proto_for_resource_variable(resource_variable, proto, options, enforce_naming=True): proto.variable.SetInParent() if enforce_naming and (not resource_variable.name.endswith(':0')): raise ValueError(f"Cowardly refusing to save variable {resource_variable.name} because of unexpected suffix in the name (expected ':0')which won't be restored.") proto.variable.name = tensor_module.get_op_name(resource_variable.name) proto.variable.trainable = resource_variable.trainable proto.variable.dtype = resource_variable.dtype.as_datatype_enum proto.variable.synchronization = resource_variable.synchronization.value proto.variable.aggregation = resource_variable.aggregation.value proto.variable.shape.CopyFrom(resource_variable.shape.as_proto()) if options.experimental_variable_policy._save_variable_devices(): if hasattr(resource_variable, 'device'): proto.variable.device = resource_variable.device
Writes additional information of the variable into the SavedObject proto. This allows users to define a `hook` to provide extra information of the variable to the SavedObject. For example, DistributedVariable class would fill in components in the distributed context. Args: resource_variable: A `ResourceVariable` or `DistributedValue` that has the information to be saved into the proto. proto: `SavedObject` proto to update. options: A `SaveOption` instance that configures save behavior. enforce_naming: A bool determining whether to check that names end in the expected string ':0'
github-repos
def check_config_options(_class, required_options, optional_options, options): for opt in required_options: if (opt not in options): msg = 'Required option missing: {0}' raise ConfigurationError(msg.format(opt)) for opt in options: if (opt not in (required_options + optional_options)): msg = 'Unknown config option to `{0}`: {1}' _logger.warn(msg.format(_class, opt))
Helper method to check options. Arguments: _class -- the original class that takes received the options. required_options -- the options that are required. If they are not present, a ConfigurationError is raised. Given as a tuple. optional_options -- the options that are optional. Given options that are not present in `optional_options` nor in `required_options` will be logged as unrecognized. Given as a tuple. options -- a dictionary of given options. Raises: ConfigurationError -- if any required option is missing.
codesearchnet
def add_logged_in_session(self, response=None): if (not response): response = self.get('go/api/pipelines.xml') self._set_session_cookie(response) if (not self._session_id): raise AuthenticationFailed('No session id extracted from request.') response = self.get('go/pipelines') match = re.search('name="authenticity_token".+?value="([^"]+)', response.read().decode('utf-8')) if match: self._authenticity_token = match.group(1) else: raise AuthenticationFailed('Authenticity token not found on page')
Make the request appear to be coming from a browser This is to interact with older parts of Go that doesn't have a proper API call to be made. What will be done: 1. If no response passed in a call to `go/api/pipelines.xml` is made to get a valid session 2. `JSESSIONID` will be populated from this request 3. A request to `go/pipelines` will be so the `authenticity_token` (CSRF) can be extracted. It will then silently be injected into `post_args` on any POST calls that doesn't start with `go/api` from this point. Args: response: a :class:`Response` object from a previously successful API call. So we won't have to query `go/api/pipelines.xml` unnecessarily. Raises: HTTPError: when the HTTP request fails. AuthenticationFailed: when failing to get the `session_id` or the `authenticity_token`.
codesearchnet
def show_fields(self, block=None): mapping = self._mapping() if block is None: return mapping elif block == "top": blocks = set() for key in mapping.keys(): blocks.add(key.split(".")[0]) block_map = {} for b in blocks: block_map[b] = "object" else: block_map = {} for key, value in mapping.items(): if key.startswith(block): block_map[key] = value return block_map
Retrieve and return the mapping for the given metadata block. Arguments: block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``), or the special values ``None`` for everything or ``"top"`` for just the top-level fields. **Default:** ``None``. index (str): The Search index to map. **Default:** The current index. Returns: dict: ``field:datatype`` pairs.
juraj-google-style
def restart(self, container, timeout=10): params = {'t': timeout} url = self._url('/containers/{0}/restart', container) conn_timeout = self.timeout if (conn_timeout is not None): conn_timeout += timeout res = self._post(url, params=params, timeout=conn_timeout) self._raise_for_status(res)
Restart a container. Similar to the ``docker restart`` command. Args: container (str or dict): The container to restart. If a dict, the ``Id`` key is used. timeout (int): Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def check_prerequisites(prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not found, please install them first.'): def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = ([prerequisites] if isinstance(prerequisites, str) else prerequisites) missing = [] for item in requirements: if (not checker(item)): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
A decorator factory to check if prerequisites are satisfied. Args: prerequisites (str of list[str]): Prerequisites to be checked. checker (callable): The checker method that returns True if a prerequisite is meet, False otherwise. msg_tmpl (str): The message template with two variables. Returns: decorator: A specific decorator.
codesearchnet
def lasio_get(l, section, item, attrib='value', default=None, remap=None, funcs=None): remap = (remap or {}) item_to_fetch = remap.get(item, item) if (item_to_fetch is None): return None try: obj = getattr(l, section) result = getattr(obj, item_to_fetch)[attrib] except: return default if (funcs is not None): f = funcs.get(item, null) result = f(result) return result
Grabs, renames and transforms stuff from a lasio object. Args: l (lasio): a lasio instance. section (str): The LAS section to grab from, eg ``well`` item (str): The item in the LAS section to grab from, eg ``name`` attrib (str): The attribute of the item to grab, eg ``value`` default (str): What to return instead. remap (dict): Optional. A dict of 'old': 'new' LAS field names. funcs (dict): Optional. A dict of 'las field': function() for implementing a transform before loading. Can be a lambda. Returns: The transformed item.
codesearchnet
def get_ast_dict(belstr, component_type: str = ""): errors = [] parsed = {} bels = list(belstr) char_locs, errors = parse_chars(bels, errors) parsed, errors = parse_functions(belstr, char_locs, parsed, errors) parsed, errors = parse_args(bels, char_locs, parsed, errors) parsed, errors = arg_types(parsed, errors) parsed, errors = parse_relations(belstr, char_locs, parsed, errors) parsed, errors = parse_nested(bels, char_locs, parsed, errors) errors = parsed_top_level_errors(parsed, errors) ast, errors = parsed_to_ast(parsed, errors, component_type=component_type) return ast, errors
Convert BEL string to AST dictionary Args: belstr: BEL string component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
juraj-google-style
def send(self, **req_kwargs): i = 0 while True: response = self._send(**req_kwargs).json() if ('error' not in response): break error = response['error'] if (error['code'] != 401): raise exception.APIException(error['code'], error) if (i >= self.RETRY_CNT): raise exception.APIException(error['code'], error) logger.info('Refreshing access token') self._auth.refresh() i += 1 return response
Send an authenticated request to a Google API. Automatically retries if the access token has expired. Args: **req_kwargs: Arbitrary keyword arguments to pass to Requests. Return: dict: The parsed JSON response. Raises: APIException: If the server returns an error. LoginException: If :py:meth:`login` has not been called.
codesearchnet
def img_to_array(img, data_format=None, dtype=None): data_format = backend.standardize_data_format(data_format) if dtype is None: dtype = backend.floatx() x = np.asarray(img, dtype=dtype) if len(x.shape) == 3: if data_format == 'channels_first': x = x.transpose(2, 0, 1) elif len(x.shape) == 2: if data_format == 'channels_first': x = x.reshape((1, x.shape[0], x.shape[1])) else: x = x.reshape((x.shape[0], x.shape[1], 1)) else: raise ValueError(f'Unsupported image shape: {x.shape}') return x
Converts a PIL Image instance to a NumPy array. Example: ```python from PIL import Image img_data = np.random.random(size=(100, 100, 3)) img = keras.utils.array_to_img(img_data) array = keras.utils.image.img_to_array(img) ``` Args: img: Input PIL Image instance. data_format: Image data format, can be either `"channels_first"` or `"channels_last"`. Defaults to `None`, in which case the global setting `keras.backend.image_data_format()` is used (unless you changed it, it defaults to `"channels_last"`). dtype: Dtype to use. `None` means the global setting `keras.backend.floatx()` is used (unless you changed it, it defaults to `"float32"`). Returns: A 3D NumPy array.
github-repos
def _RemoveAuthorizedKeys(self, user): pw_entry = self._GetUser(user) if (not pw_entry): return home_dir = pw_entry.pw_dir authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys') if os.path.exists(authorized_keys_file): try: os.remove(authorized_keys_file) except OSError as e: message = 'Could not remove authorized keys for user %s. %s.' self.logger.warning(message, user, str(e))
Remove a Linux user account's authorized keys file to prevent login. Args: user: string, the Linux user account to remove access.
codesearchnet
def get_platform(): global PLATFORM cmd = 'uname' out, err = run_shell_cmd(cmd) platform_detected = out.strip().lower() if platform_detected != 'linux': if err and FLAGS.debug: print('Error in detecting platform:\n %s' % str(err)) print('Error: Detected unsupported operating system.\nStopping...') sys.exit(1) else: PLATFORM = platform_detected return PLATFORM
Retrieves platform information. Currently the script only support linux. If other platoforms such as Windows or MacOS is detected, it throws an error and terminates. Returns: String that is platform type. e.g. 'linux'
github-repos
def __init__(self, lexer=None, **kwargs): if lexer is not None: if isinstance(lexer, JbossLexer): self.lexer = lexer.lexer else: self.lexer = lexer else: self.lexer = JbossLexer().lexer kwargs.setdefault('debug', False) kwargs.setdefault('write_tables', False) self.parser = ply.yacc.yacc(module=self, **kwargs)
Constructs the JsonParser based on the grammar contained herein. Successful construction builds the ply.yacc instance and sets self.parser. Args: lexer: A ply.lex or JsonLexer instance that will produce JSON_TOKENS.
juraj-google-style
def parents(self, as_resources=False): parents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))] if as_resources: logger.debug('retrieving parent as resource') parents = [ self.repo.get_resource(parent) for parent in parents ] return parents
method to return hierarchical parents of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
juraj-google-style
def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius): n_atoms = len(struct.species) fc = np.array(struct.frac_coords) species = list(map(str, struct.species)) for (i, item) in enumerate(species): if (not (item in ldict.keys())): species[i] = str(Specie.from_string(item).element) latmat = struct.lattice.matrix connected_list = [] for i in range(n_atoms): for j in range((i + 1), n_atoms): max_bond_length = ((ldict[species[i]] + ldict[species[j]]) + tolerance) add_ij = False for move_cell in itertools.product([0, 1, (- 1)], [0, 1, (- 1)], [0, 1, (- 1)]): if (not add_ij): frac_diff = ((fc[j] + move_cell) - fc[i]) distance_ij = np.dot(latmat.T, frac_diff) if (np.linalg.norm(distance_ij) < max_bond_length): add_ij = True if add_ij: connected_list.append([i, j]) return np.array(connected_list)
Finds the list of bonded atoms. Args: struct (Structure): Input structure tolerance: length in angstroms used in finding bonded atoms. Two atoms are considered bonded if (radius of atom 1) + (radius of atom 2) + (tolerance) < (distance between atoms 1 and 2). Default value = 0.45, the value used by JMol and Cheon et al. ldict: dictionary of bond lengths used in finding bonded atoms. Values from JMol are used as default standardize: works with conventional standard structures if True. It is recommended to keep this as True. Returns: connected_list: A numpy array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj]. atomi and atomj are the indices of the atoms in the input structure. If any image of atomj is bonded to atomi with periodic boundary conditions, [atomi, atomj] is included in the list. If atomi is bonded to multiple images of atomj, it is only counted once.
codesearchnet
def update_config(self, config, timeout=(- 1)): return self._client.update(config, uri=(self.URI + '/config'), timeout=timeout)
Updates the remote server configuration and the automatic backup schedule for backup. Args: config (dict): Object to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Backup details.
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + [0] * len(token_ids_0) + eos_token_id return bos_token_id + [0] * len(token_ids_0) + eos_token_id + bos_token_id + [0] * len(token_ids_1) + eos_token_id
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def fetch(self, transfer_id, data={}, **kwargs): return super(Transfer, self).fetch(transfer_id, data, **kwargs)
Fetch Transfer for given Id Args: transfer_id : Id for which transfer object has to be retrieved Returns: Transfer dict for given transfer Id
juraj-google-style
async def verify_task_types(chain): valid_task_types = get_valid_task_types() task_count = {} for obj in chain.get_all_links_in_chain(): task_type = obj.task_type log.info("Verifying {} {} as a {} task...".format(obj.name, obj.task_id, task_type)) task_count.setdefault(task_type, 0) task_count[task_type] += 1 await valid_task_types[task_type](chain, obj) return task_count
Verify the task type (e.g. decision, build) of each link in the chain. Args: chain (ChainOfTrust): the chain we're operating on Returns: dict: mapping task type to the number of links.
juraj-google-style
def __init__(self, name: str, ctx: 'context.Context'): super().__init__(name, ctx) self._cls = None self.members = datatypes.MonitorDict() self._instance_type_parameters: 'datatypes.AliasingMonitorDict[str, cfg.Variable]' = datatypes.AliasingMonitorDict() self._maybe_missing_members: bool | None = None self._type_key: 'frozenset[_base.BaseValue | _typing.LateAnnotation | tuple[str, frozenset]] | None' = None self._fullhash = None self._cached_changestamps = self._get_changestamps()
Initialize a SimpleValue. Args: name: Name of this value. For debugging and error reporting. ctx: The abstract context.
github-repos
def looks_like_url(url): if (not isinstance(url, basestring)): return False if ((not isinstance(url, basestring)) or (len(url) >= 1024) or (not cre_url.match(url))): return False return True
Simplified check to see if the text appears to be a URL. Similar to `urlparse` but much more basic. Returns: True if the url str appears to be valid. False otherwise. >>> url = looks_like_url("totalgood.org") >>> bool(url) True
codesearchnet
def _check_module_is_image_embedding(module_spec): issues = [] input_info_dict = module_spec.get_input_info_dict() if (list(input_info_dict.keys()) != ["images"] or input_info_dict["images"].dtype != tf.float32): issues.append("Module 'default' signature must require a single input, " "which must have type float32 and name 'images'.") else: try: image_util.get_expected_image_size(module_spec) except ValueError as e: issues.append("Module does not support hub.get_expected_image_size(); " "original error was:\n" + str(e)) output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module 'default' signature must have a 'default' output.") else: output_type = output_info_dict["default"].dtype output_shape = output_info_dict["default"].get_shape() if not (output_type == tf.float32 and output_shape.ndims == 2 and output_shape.dims[1].value): issues.append("Module 'default' signature must have a 'default' output " "of tf.Tensor(shape=(_,K), dtype=float32).") if issues: raise ValueError("Module is not usable as image embedding: %r" % issues)
Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)).
juraj-google-style
def _CheckIsFile(self, file_entry): if definitions.FILE_ENTRY_TYPE_FILE not in self._file_entry_types: return False return file_entry.IsFile()
Checks the is_file find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not.
juraj-google-style
def grep(regex, output): lines = output.decode('utf-8').strip().splitlines() results = [] for line in lines: if re.search(regex, line): results.append(line.strip()) return results
Similar to linux's `grep`, this returns the line in an output stream that matches a given regex pattern. It does not rely on the `grep` binary and is not sensitive to line endings, so it can be used cross-platform. Args: regex: string, a regex that matches the expected pattern. output: byte string, the raw output of the adb cmd. Returns: A list of strings, all of which are output lines that matches the regex pattern.
codesearchnet
def record_corrected_value(self, value, expected_interval, count=1): while True: if not self.record_value(value, count): return False if value <= expected_interval or expected_interval <= 0: return True value -= expected_interval
Record a new value into the histogram and correct for coordinated omission if needed Args: value: the value to record (must be in the valid range) expected_interval: the expected interval between 2 value samples count: incremental count (defaults to 1)
juraj-google-style
def parse_response(service, response, search_type): _LOG.debug('Parse response "%s" from service "%s" of type "%s"', response, service, search_type) items = [] if ('searchResult' in response): response = response['searchResult'] elif ('getMetadataResult' in response): response = response['getMetadataResult'] else: raise ValueError('"response" should contain either the key "searchResult" or "getMetadataResult"') search_metadata = {'number_returned': response['count'], 'total_matches': None, 'search_type': search_type, 'update_id': None} for result_type in ('mediaCollection', 'mediaMetadata'): result_type_proper = (result_type[0].upper() + result_type[1:]) raw_items = response.get(result_type, []) if isinstance(raw_items, OrderedDict): raw_items = [raw_items] for raw_item in raw_items: class_key = (result_type_proper + raw_item['itemType'].title()) cls = get_class(class_key) items.append(cls.from_music_service(service, raw_item)) return SearchResult(items, **search_metadata)
Parse the response to a music service query and return a SearchResult Args: service (MusicService): The music service that produced the response response (OrderedDict): The response from the soap client call search_type (str): A string that indicates the search type that the response is from Returns: SearchResult: A SearchResult object
codesearchnet
def _process(compressor, input_filename, output_filename): compressor(input_filename, output_filename) result_size = os.path.getsize(output_filename) return _CompressorResult(result_size, output_filename, compressor.__name__)
Helper function to compress an image. Returns: _CompressorResult named tuple, with the resulting size, the name of the output file and the name of the compressor.
codesearchnet
def percent_point(self, U): self.check_fit() return norm.ppf(U, loc=self.mean, scale=self.std)
Given a cumulated distribution value, returns a value in original space. Arguments: U: `np.ndarray` of shape (n, 1) and values in [0,1] Returns: `np.ndarray`: Estimated values in original space.
juraj-google-style
def open_writer(self, init_result, uid): raise NotImplementedError
Opens a writer for writing a bundle of elements to the sink. Args: init_result: the result of initialize_write() invocation. uid: a unique identifier generated by the system. Returns: an ``iobase.Writer`` that can be used to write a bundle of records to the current sink.
github-repos
def load_model_from_hdf5(filepath, custom_objects=None, compile=True): if h5py is None: raise ImportError('`load_model()` using h5 format requires h5py. Could not import h5py.') if not custom_objects: custom_objects = {} gco = object_registration.GLOBAL_CUSTOM_OBJECTS tlco = global_state.get_global_attribute('custom_objects_scope_dict', {}) custom_objects = {**custom_objects, **gco, **tlco} opened_new_file = not isinstance(filepath, h5py.File) if opened_new_file: f = h5py.File(filepath, mode='r') else: f = filepath model = None try: model_config = f.attrs.get('model_config') if model_config is None: raise ValueError(f'No model config found in the file at {filepath}.') if hasattr(model_config, 'decode'): model_config = model_config.decode('utf-8') model_config = json_utils.decode(model_config) with saving_options.keras_option_scope(use_legacy_config=True): model = saving_utils.model_from_config(model_config, custom_objects=custom_objects) load_weights_from_hdf5_group(f['model_weights'], model) if compile: training_config = f.attrs.get('training_config') if hasattr(training_config, 'decode'): training_config = training_config.decode('utf-8') if training_config is None: logging.warning('No training configuration found in the save file, so the model was *not* compiled. Compile it manually.') return model training_config = json_utils.decode(training_config) model.compile(**saving_utils.compile_args_from_training_config(training_config, custom_objects)) saving_utils.try_build_compiled_arguments(model) if 'optimizer_weights' in f: try: if isinstance(model.optimizer, optimizers.Optimizer): model.optimizer.build(model._trainable_variables) else: model.optimizer._create_all_weights(model._trainable_variables) except (NotImplementedError, AttributeError): logging.warning('Error when creating the weights of optimizer {}, making it impossible to restore the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.') optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f) try: model.optimizer.set_weights(optimizer_weight_values) except ValueError: logging.warning('Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.') finally: if opened_new_file: f.close() return model
Loads a model saved via `save_model_to_hdf5`. Args: filepath: One of the following: - String, path to the saved model - `h5py.File` object from which to load the model custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. compile: Boolean, whether to compile the model after loading. Returns: A Keras model instance. If an optimizer was found as part of the saved model, the model is already compiled. Otherwise, the model is uncompiled and a warning will be displayed. When `compile` is set to `False`, the compilation is omitted without any warning. Raises: ImportError: if h5py is not available. ValueError: In case of an invalid savefile.
github-repos
def AddCalledComponent(self, component, target, args, filename, lineno, capacity, action=CALLED_CALLABLE): element = FireTraceElement(component=component, action=action, target=target, args=args, filename=filename, lineno=lineno, capacity=capacity) self.elements.append(element)
Adds an element to the trace indicating that a component was called. Also applies to instantiating a class. Args: component: The result of calling the callable. target: The name of the callable. args: The args consumed in order to call this callable. filename: The file in which the callable is defined, or None if N/A. lineno: The line number on which the callable is defined, or None if N/A. capacity: (bool) Whether the callable could have accepted additional args. action: The value to include as the action in the FireTraceElement.
github-repos
def get_appliance(self, id_or_uri, fields=''): uri = ((self.URI + '/image-streamer-appliances/') + extract_id_from_uri(id_or_uri)) if fields: uri += ('?fields=' + fields) return self._client.get(uri)
Gets the particular Image Streamer resource based on its ID or URI. Args: id_or_uri: Can be either the Os Deployment Server ID or the URI fields: Specifies which fields should be returned in the result. Returns: dict: Image Streamer resource.
codesearchnet
def copy_fhir_type_with_root_element_definition(self, root_element_definition: message.Message) -> 'FhirPathDataType': return dataclasses.replace(self, root_element_definition=root_element_definition)
Copies the type and sets the root_element_definition. Args: root_element_definition: Element definition to set for the type. Returns: A copy of the original type with the root_element_definition set.
github-repos
async def find_user(cls, config: Config, user: str) \ -> Tuple[str, str]: with open(config.users_file, 'r') as users_file: for line in users_file: this_user, user_dir, password = line.split(':', 2) if user == this_user: return password.rstrip('\r\n'), user_dir or user raise InvalidAuth()
If the given user ID exists, return its expected password and mailbox path. Override this method to implement custom login logic. Args: config: The maildir config object. user: The expected user ID. Raises: InvalidAuth: The user ID was not valid.
juraj-google-style
def calculate_bv_sum(site, nn_list, scale_factor=1.0): el1 = Element(site.specie.symbol) bvsum = 0 for (nn, dist) in nn_list: el2 = Element(nn.specie.symbol) if (((el1 in ELECTRONEG) or (el2 in ELECTRONEG)) and (el1 != el2)): r1 = BV_PARAMS[el1]['r'] r2 = BV_PARAMS[el2]['r'] c1 = BV_PARAMS[el1]['c'] c2 = BV_PARAMS[el2]['c'] R = ((r1 + r2) - (((r1 * r2) * ((sqrt(c1) - sqrt(c2)) ** 2)) / ((c1 * r1) + (c2 * r2)))) vij = exp(((R - (dist * scale_factor)) / 0.31)) bvsum += (vij * (1 if (el1.X < el2.X) else (- 1))) return bvsum
Calculates the BV sum of a site. Args: site: The site nn_list: List of nearest neighbors in the format [(nn_site, dist), ...]. scale_factor: A scale factor to be applied. This is useful for scaling distance, esp in the case of calculation-relaxed structures which may tend to under (GGA) or over bind (LDA).
codesearchnet
def _usage_id_from_node(self, node, parent_id, id_generator=None): if (id_generator is not None): warnings.warn('Passing an id_generator directly is deprecated in favor of constructing the Runtime with the id_generator', DeprecationWarning, stacklevel=3) id_generator = (id_generator or self.id_generator) block_type = node.tag node.attrib.pop('xblock-family', None) def_id = id_generator.create_definition(block_type) usage_id = id_generator.create_usage(def_id) keys = ScopeIds(None, block_type, def_id, usage_id) block_class = self.mixologist.mix(self.load_block_type(block_type)) aside_children = [] for child in node.iterchildren(): xblock_family = child.attrib.pop('xblock-family', None) if xblock_family: xblock_family = self._family_id_to_superclass(xblock_family) if issubclass(xblock_family, XBlockAside): aside_children.append(child) for child in aside_children: self._aside_from_xml(child, def_id, usage_id, id_generator) node.remove(child) block = block_class.parse_xml(node, self, keys, id_generator) block.parent = parent_id block.save() return usage_id
Create a new usage id from an XML dom node. Args: node (lxml.etree.Element): The DOM node to interpret. parent_id: The usage ID of the parent block id_generator (IdGenerator): The :class:`.IdGenerator` to use for creating ids
codesearchnet
def start_services(self, service_alises): for name in service_alises: if name not in self._service_objects: raise Error(self._device, 'No service is registered under the name "%s", cannot start.' % name) service = self._service_objects[name] if not service.is_alive: service.start()
Starts the specified services. Services will be started in the order specified by the input list. No-op for services that are already running. Args: service_alises: list of strings, the aliases of services to start.
github-repos
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(CancelResponsePayload, self).read(input_stream, kmip_version=kmip_version) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE, local_stream): self._asynchronous_correlation_value = primitives.ByteString(tag=enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE) self._asynchronous_correlation_value.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.CANCELLATION_RESULT, local_stream): self._cancellation_result = primitives.Enumeration(enums.CancellationResult, tag=enums.Tags.CANCELLATION_RESULT) self._cancellation_result.read(local_stream, kmip_version=kmip_version) self.is_oversized(local_stream)
Read the data encoding the Cancel response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is missing from the encoded payload.
codesearchnet
def get_case(family_lines, family_type='ped', vcf_path=None): family = None LOG.info("Parsing family information") family_parser = FamilyParser(family_lines, family_type) families = list(family_parser.families.keys()) LOG.info("Found families {0}".format(', '.join(families))) if len(families) > 1: raise CaseError("Only one family per load can be used") family = family_parser.families[families[0]] return family
Return ped_parser case from a family file Create a dictionary with case data. If no family file is given create from VCF Args: family_lines (iterator): The family lines family_type (str): The format of the family lines vcf_path(str): Path to VCF Returns: family (Family): A ped_parser family object
juraj-google-style
def _FormatSizeInUnitsOf1024(self, size): magnitude_1024 = 0 used_memory_1024 = float(size) while (used_memory_1024 >= 1024): used_memory_1024 /= 1024 magnitude_1024 += 1 if (0 < magnitude_1024 <= 7): return '{0:.1f} {1:s}'.format(used_memory_1024, self._UNITS_1024[magnitude_1024]) return '{0:d} B'.format(size)
Represents a number of bytes in units of 1024. Args: size (int): size in bytes. Returns: str: human readable string of the size.
codesearchnet
def resolve_peer_creds(self): if (not IS_UID_GID_RESOLVABLE): raise NotImplementedError('UID/GID lookup is unavailable under current platform. It can only be done under UNIX-like OS but not under the Google App Engine') elif (not self.peercreds_resolve_enabled): raise RuntimeError('UID/GID lookup is disabled within this server') user = pwd.getpwuid(self.peer_uid).pw_name group = grp.getgrgid(self.peer_gid).gr_name return (user, group)
Return the username and group tuple of the peercreds if available. Raises: NotImplementedError: in case of unsupported OS RuntimeError: in case of UID/GID lookup unsupported or disabled
codesearchnet
def from_string(cls, cl_function, dependencies=(), nmr_constraints=None): return_type, function_name, parameter_list, body = split_cl_function(cl_function) return SimpleConstraintFunction(return_type, function_name, parameter_list, body, dependencies=dependencies, nmr_constraints=nmr_constraints)
Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration
juraj-google-style
def log_softmax(x, axis=-1): return ops.log_softmax(x, axis=axis)
Log-Softmax activation function. Each input vector is handled independently. The `axis` argument sets which axis of the input the function is applied along. Args: x: Input tensor. axis: Integer, axis along which the softmax is applied.
github-repos
def get_next_as_optional(self): raise NotImplementedError('Iterator.get_next_as_optional()')
Returns the next element wrapped in `tf.experimental.Optional`. If the iterator has reached the end of the sequence, the returned `tf.experimental.Optional` will have no value. >>> dataset = tf.data.Dataset.from_tensors(42) >>> iterator = iter(dataset) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) >>> print(optional.get_value()) tf.Tensor(42, shape=(), dtype=int32) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool) Returns: A `tf.experimental.Optional` object representing the next element.
github-repos
def with_subject(self, subject): return self.__class__(self._signer, service_account_email=self._service_account_email, scopes=self._scopes, token_uri=self._token_uri, subject=subject, project_id=self._project_id, additional_claims=self._additional_claims.copy())
Create a copy of these credentials with the specified subject. Args: subject (str): The subject claim. Returns: google.auth.service_account.Credentials: A new credentials instance.
codesearchnet
def set(self, refresh_token): logger.info('Saving refresh_token to %s', repr(self._filename)) try: with open(self._filename, 'w') as f: f.write(refresh_token) except IOError as e: logger.warning('Failed to save refresh_token: %s', e)
Cache a refresh token, ignoring any failure. Args: refresh_token (str): Refresh token to cache.
codesearchnet
def _GetTaskStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._task_storage_path, filename)
Retrieves the path of a task storage file in the temporary directory. Args: task (Task): task. Returns: str: path of a task storage file in the temporary directory.
codesearchnet
def get_acmg(acmg_terms): prediction = 'uncertain_significance' pvs = False ps_terms = [] pm_terms = [] pp_terms = [] ba = False bs_terms = [] bp_terms = [] for term in acmg_terms: if term.startswith('PVS'): pvs = True elif term.startswith('PS'): ps_terms.append(term) elif term.startswith('PM'): pm_terms.append(term) elif term.startswith('PP'): pp_terms.append(term) elif term.startswith('BA'): ba = True elif term.startswith('BS'): bs_terms.append(term) elif term.startswith('BP'): bp_terms.append(term) pathogenic = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms) likely_pathogenic = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms) benign = is_benign(ba, bs_terms) likely_benign = is_likely_benign(bs_terms, bp_terms) if (pathogenic or likely_pathogenic): if (benign or likely_benign): prediction = 'uncertain_significance' elif pathogenic: prediction = 'pathogenic' else: prediction = 'likely_pathogenic' else: if benign: prediction = 'benign' if likely_benign: prediction = 'likely_benign' return prediction
Use the algorithm described in ACMG paper to get a ACMG calssification Args: acmg_terms(set(str)): A collection of prediction terms Returns: prediction(int): 0 - Uncertain Significanse 1 - Benign 2 - Likely Benign 3 - Likely Pathogenic 4 - Pathogenic
juraj-google-style
def _set_value(self, slot_record): if slot_record.status == _SlotRecord.FILLED: self.filled = True self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore( slot_record) self._fill_datetime = slot_record.fill_time self._value = slot_record.value
Sets the value of this slot based on its corresponding _SlotRecord. Does nothing if the slot has not yet been filled. Args: slot_record: The _SlotRecord containing this Slot's value.
juraj-google-style
def ProcessFile(filename, vlevel, extra_check_functions=None): _SetVerboseLevel(vlevel) _BackupFilters() if not ProcessConfigOverrides(filename): _RestoreFilters() return lf_lines = [] crlf_lines = [] try: if filename == '-': lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') for linenum in range(len(lines) - 1): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') crlf_lines.append(linenum + 1) else: lf_lines.append(linenum + 1) except IOError: _cpplint_state.PrintError( "Skipping input '%s': Can't open for reading\n" % filename) _RestoreFilters() return file_extension = filename[filename.rfind('.') + 1:] if filename != '-' and file_extension not in GetAllExtensions(): bazel_gen_files = set([ "external/local_config_cc/libtool", "external/local_config_cc/make_hashed_objlist.py", "external/local_config_cc/wrapped_ar", "external/local_config_cc/wrapped_clang", "external/local_config_cc/xcrunwrapper.sh", ]) if not filename in bazel_gen_files: _cpplint_state.PrintError('Ignoring %s; not a valid file name ' '(%s)\n' % (filename, ', '.join(GetAllExtensions()))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) if lf_lines and crlf_lines: for linenum in crlf_lines: Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') _RestoreFilters()
Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
juraj-google-style
def InTemplateArgumentList(self, clean_lines, linenum, pos): while linenum < clean_lines.NumLines(): line = clean_lines.elided[linenum] match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) if not match: linenum += 1 pos = 0 continue token = match.group(1) pos += len(match.group(0)) if token in ('{', '}', ';'): return False if token in ('>', '=', '[', ']', '.'): return True if token != '<': pos += 1 if pos >= len(line): linenum += 1 pos = 0 continue (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) if end_pos < 0: return False linenum = end_line pos = end_pos return False
Check if current position is inside template argument list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: position just after the suspected template argument. Returns: True if (linenum, pos) is inside template arguments.
juraj-google-style
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs): if 'quantization_config' in config_dict: config_dict = dict(sparsity_config=config_dict.get('sparsity_config'), **config_dict['quantization_config']) return super().from_dict(config_dict, return_unused_kwargs=return_unused_kwargs, **kwargs)
Instantiates a [`CompressedTensorsConfig`] from a Python dictionary of parameters. Optionally unwraps any args from the nested quantization_config Args: config_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the configuration object. return_unused_kwargs (`bool`,*optional*, defaults to `False`): Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in `PreTrainedModel`. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the configuration object. Returns: [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
github-repos
def update_splits_if_different(self, split_dict): assert isinstance(split_dict, splits_lib.SplitDict) if (self._splits and splits_lib.check_splits_equals(self._splits, split_dict)): return self._set_splits(split_dict)
Overwrite the splits if they are different from the current ones. * If splits aren't already defined or different (ex: different number of shards), then the new split dict is used. This will trigger stats computation during download_and_prepare. * If splits are already defined in DatasetInfo and similar (same names and shards): keep the restored split which contains the statistics (restored from GCS or file) Args: split_dict: `tfds.core.SplitDict`, the new split
codesearchnet
def compile_dependencies(self, sourcepath, include_self=False): items = self.inspector.parents(sourcepath) if include_self: items.add(sourcepath) return filter(None, [self.compile_source(item) for item in items])
Apply compile on all dependencies Args: sourcepath (string): Sass source path to compile to its destination using project settings. Keyword Arguments: include_self (bool): If ``True`` the given sourcepath is add to items to compile, else only its dependencies are compiled.
juraj-google-style
def init_properties(env='dev', app='unnecessary', **_): aws_env = boto3.session.Session(profile_name=env) s3client = aws_env.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() archaius_file = ('{path}/application.properties').format(path=archaius['path']) try: s3client.Object(archaius['bucket'], archaius_file).get() LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return True except boto3.exceptions.botocore.client.ClientError: s3client.Object(archaius['bucket'], archaius_file).put() LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return False
Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created.
juraj-google-style
def parseArgs(args): if not isinstance(args, (list,tuple)): raise ValueError('args is not a list or tuple') dRet = {} for s in args: oRes = re.match(u'^--([^=]+)(?:=(.+))?$', s) if oRes: mGroup2 = oRes.group(2) dRet[oRes.group(1)] = (not mGroup2 and True or mGroup2) else: try: dRet['?'].append(s) except KeyError: dRet['?'] = [s] return dRet
Parse Arguments Used to parse the arguments passed to the script Args: args (list): A list of strings representing arguments to a script Returns: dict: Returns a dictionary with args as keys and the values sent with them or True for valueless arguments Raises: ValueError: If args is not a list or tuple
juraj-google-style
def redirect_stdout(new_stdout): old_stdout, sys.stdout = sys.stdout, new_stdout try: yield None finally: sys.stdout = old_stdout
Redirect the stdout Args: new_stdout (io.StringIO): New stdout to use instead
juraj-google-style
def iter_variants_by_names(self, names): if not self.is_parallel: yield from super().iter_variants_by_names(names) else: for info, dosage in self._bgen.iter_variants_by_names(names): yield Genotypes( Variant(info.name, CHROM_STR_ENCODE.get(info.chrom, info.chrom), info.pos, [info.a1, info.a2]), dosage, reference=info.a1, coded=info.a2, multiallelic=True, )
Iterates over the genotypes for variants using a list of names. Args: names (list): The list of names for variant extraction.
juraj-google-style
def __init__(self, output_mediator): super(OutputModule, self).__init__() self._output_mediator = output_mediator
Initializes an output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs. Raises: ValueError: when there are unused keyword arguments.
juraj-google-style
def parse_function_params(params): function_meta = {'args': [], 'kwargs': {}} params_str = params.strip() if (params_str == ''): return function_meta args_list = params_str.split(',') for arg in args_list: arg = arg.strip() if ('=' in arg): (key, value) = arg.split('=') function_meta['kwargs'][key.strip()] = parse_string_value(value.strip()) else: function_meta['args'].append(parse_string_value(arg)) return function_meta
parse function params to args and kwargs. Args: params (str): function param in string Returns: dict: function meta dict { "args": [], "kwargs": {} } Examples: >>> parse_function_params("") {'args': [], 'kwargs': {}} >>> parse_function_params("5") {'args': [5], 'kwargs': {}} >>> parse_function_params("1, 2") {'args': [1, 2], 'kwargs': {}} >>> parse_function_params("a=1, b=2") {'args': [], 'kwargs': {'a': 1, 'b': 2}} >>> parse_function_params("1, 2, a=3, b=4") {'args': [1, 2], 'kwargs': {'a':3, 'b':4}}
codesearchnet
def relative_probability_from_lookup_table( self, jump_lookup_table ): l1 = self.initial_site.label l2 = self.final_site.label c1 = self.initial_site.nn_occupation() c2 = self.final_site.nn_occupation() return jump_lookup_table.jump_probability[ l1 ][ l2 ][ c1 ][ c2 ]
Relative probability of accepting this jump from a lookup-table. Args: jump_lookup_table (LookupTable): the lookup table to be used for this jump. Returns: (Float): relative probability of accepting this jump.
juraj-google-style
def _add_remove_user_template(self, url, template_id, account_id=None, email_address=None): if ((not email_address) and (not account_id)): raise HSException('No email address or account_id specified') data = {} if (account_id is not None): data = {'account_id': account_id} else: data = {'email_address': email_address} request = self._get_request() response = request.post((url + template_id), data) return response
Add or Remove user from a Template We use this function for two tasks because they have the same API call Args: template_id (str): The id of the template account_id (str): ID of the account to add/remove access to/from email_address (str): The email_address of the account to add/remove access to/from Raises: HSException: If no email address or account_id specified Returns: A Template object
codesearchnet
def _module_info_from_proto(module_info_def, import_scope=None): graph = tf.get_default_graph() def prepend_name_scope(name_scope): return ops.prepend_name_scope(name_scope, import_scope) def process_leafs(name): return _path_to_graph_element(prepend_name_scope(name), graph) connected_subgraphs = [] module_info = ModuleInfo(module_name=module_info_def.module_name, scope_name=prepend_name_scope(module_info_def.scope_name), class_name=module_info_def.class_name, connected_subgraphs=connected_subgraphs) for connected_subgraph_def in module_info_def.connected_subgraphs: connected_subgraph = ConnectedSubGraph(module=module_info, name_scope=prepend_name_scope(connected_subgraph_def.name_scope), inputs=_nested_from_proto(connected_subgraph_def.inputs, process_leafs), outputs=_nested_from_proto(connected_subgraph_def.outputs, process_leafs)) connected_subgraphs.append(connected_subgraph) return module_info
Deserializes `module_info_def` proto. Args: module_info_def: An instance of `module_pb2.SonnetModule`. import_scope: Optional `string`. Name scope to use. Returns: An instance of `ModuleInfo`. Raises: base_errors.ModuleInfoError: If the probobuf is of the wrong type or if some of its fields are missing.
codesearchnet
def debug(self, status=None, nids=None): (nrows, ncols) = get_terminal_size() sched_excfile = os.path.join(self.workdir, '_exceptions') if os.path.exists(sched_excfile): with open(sched_excfile, 'r') as fh: cprint('Found exceptions raised by the scheduler', 'red') cprint(fh.read(), color='red') return if (status is not None): tasks = list(self.iflat_tasks(status=status, nids=nids)) else: errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids)) qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids)) abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids)) tasks = ((errors + qcriticals) + abicriticals) ntasks = 0 for task in tasks: print(make_banner(str(task), width=ncols, mark='=')) ntasks += 1 for efname in ['qerr_file', 'stderr_file']: err_file = getattr(task, efname) if err_file.exists: s = err_file.read() if (not s): continue print(make_banner(str(err_file), width=ncols, mark='=')) cprint(s, color='red') try: report = task.get_event_report() if (report and report.num_errors): print(make_banner(os.path.basename(report.filename), width=ncols, mark='=')) s = '\n'.join((str(e) for e in report.errors)) else: s = None except Exception as exc: s = str(exc) count = 0 if (s is not None): cprint(s, color='red') count += 1 if (not count): log_files = task.tmpdir.list_filepaths(wildcard='*LOG_*') if (not log_files): cprint('No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs', color='magenta') for log_file in log_files: try: report = EventsParser().parse(log_file) if report.errors: print(report) count += 1 break except Exception as exc: cprint(str(exc), color='red') count += 1 break if (not count): cprint('Houston, we could not find any error message that can explain the problem', color='magenta') print(('Number of tasks analyzed: %d' % ntasks))
This method is usually used when the flow didn't completed succesfully It analyzes the files produced the tasks to facilitate debugging. Info are printed to stdout. Args: status: If not None, only the tasks with this status are selected nids: optional list of node identifiers used to filter the tasks.
codesearchnet
def serialize(self, accumulator): pass
Serialize an accumulator for a remote call. This function serializes an accumulator to be sent to a remote process. Args: accumulator: The accumulator to serialize. Returns: A byte string representing the passed accumulator.
github-repos
def copy(self, src, dst, other_system=None): copy_source = self.get_client_kwargs(src) copy_destination = self.get_client_kwargs(dst) with _handle_client_error(): self.client.copy_object(CopySource=copy_source, **copy_destination)
Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio._core.io_system.SystemBase subclass): Unused.
juraj-google-style
def regrep(filename, patterns, reverse=False, terminate_on_match=False, postprocess=str): compiled = {k: re.compile(v) for (k, v) in patterns.items()} matches = collections.defaultdict(list) gen = (reverse_readfile(filename) if reverse else zopen(filename, 'rt')) for (i, l) in enumerate(gen): for (k, p) in compiled.items(): m = p.search(l) if m: matches[k].append([[postprocess(g) for g in m.groups()], ((- i) if reverse else i)]) if (terminate_on_match and all([len(matches.get(k, [])) for k in compiled.keys()])): break try: gen.close() except: pass return matches
A powerful regular expression version of grep. Args: filename (str): Filename to grep. patterns (dict): A dict of patterns, e.g., {"energy": "energy\(sigma->0\)\s+=\s+([\d\-\.]+)"}. reverse (bool): Read files in reverse. Defaults to false. Useful for large files, especially when used with terminate_on_match. terminate_on_match (bool): Whether to terminate when there is at least one match in each key in pattern. postprocess (callable): A post processing function to convert all matches. Defaults to str, i.e., no change. Returns: A dict of the following form: {key1: [[[matches...], lineno], [[matches...], lineno], [[matches...], lineno], ...], key2: ...} For reverse reads, the lineno is given as a -ve number. Please note that 0-based indexing is used.
codesearchnet
def __init__(self, pidfile, logger, port = 64042, host = 'localhost'): super(RemoteControllerDeamon, self).__init__(pidfile, logger) self.__port = port self.__host = host for name in dir(self): method = getattr(self, name) if hasattr(method, 'registered_for_rpc'): self.register_method(method, method.registered_for_rpc.__name__)
Create a daemon which is controllable via jsonrpc with decorator Args: pidfile (str): path to create pid file logger (logging.Logger): logger for the daemon port (int): host (str):
juraj-google-style