code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs): return self.tokenizer.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
Post-process the output of the model to decode the text. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` or `(sequence_length,)`. skip_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method. **kwargs: Additional arguments to be passed to the tokenizer's `batch_decode method`. Returns: `List[str]`: The decoded text.
github-repos
def save_image(imager, grid_data, grid_norm, output_file): imager.finalise_plane(grid_data, grid_norm) grid_data = numpy.real(grid_data) border = ((imager.plane_size - imager.image_size) if (border > 0): end = (border + imager.image_size) grid_data = grid_data[(border:end, border:end)] hdr = fits.header.Header() fits.writeto(output_file, grid_data, hdr, clobber=True)
Makes an image from gridded visibilities and saves it to a FITS file. Args: imager (oskar.Imager): Handle to configured imager. grid_data (numpy.ndarray): Final visibility grid. grid_norm (float): Grid normalisation to apply. output_file (str): Name of output FITS file to write.
codesearchnet
def DeserializeTX(buffer): mstream = MemoryStream(buffer) reader = BinaryReader(mstream) tx = Transaction.DeserializeFrom(reader) return tx
Deserialize the stream into a Transaction object. Args: buffer (BytesIO): stream to deserialize the Transaction from. Returns: neo.Core.TX.Transaction:
juraj-google-style
def _get_ops_from_nodedefs(node_defs): ops = set() for node_def in node_defs: op_and_kernel = get_ops_from_nodedef(node_def) if op_and_kernel: ops.add(op_and_kernel) return ops
Gets the ops and kernels needed from the list of NodeDef. If a NodeDef's op is not in the allowlist of ops without kernel and there is no kernel found for this NodeDef, then skip that NodeDef and proceed to the next one. Args: node_defs: list of NodeDef's to get op/kernel information. Returns: A set of (op_name, kernel_name) tuples.
github-repos
def load_dict(self, db_key: str, hierarchical: bool=False) -> dict: if (not hierarchical): db_values = self._db.hgetall(db_key) for (_key, _value) in db_values.items(): if isinstance(_value, str): db_values[_key] = ast.literal_eval(_value) my_dict = db_values else: my_dict = self._load_dict_hierarchical(db_key) return my_dict
Load the dictionary at the specified key. Hierarchically stored dictionaries use a ':' separator to expand the dictionary into a set of Redis hashes. Args: db_key (str): Key at which the dictionary is stored in the db. hierarchical (bool): If True, expect the dictionary to have been stored hierarchically. If False, expect the dictionary to have been stored flat. Returns: dict, the dictionary stored at key
codesearchnet
def prepare_to_run_task(context, claim_task): current_task_info = {} context.claim_task = claim_task current_task_info['taskId'] = get_task_id(claim_task) current_task_info['runId'] = get_run_id(claim_task) log.info('Going to run taskId {taskId} runId {runId}!'.format(**current_task_info)) context.write_json(os.path.join(context.config['work_dir'], 'current_task_info.json'), current_task_info, 'Writing current task info to {path}...') return current_task_info
Given a `claim_task` json dict, prepare the `context` and `work_dir`. Set `context.claim_task`, and write a `work_dir/current_task_info.json` Args: context (scriptworker.context.Context): the scriptworker context. claim_task (dict): the claim_task dict. Returns: dict: the contents of `current_task_info.json`
codesearchnet
def sendCommand(self, command): command_data = [ord(x) for x in buffer(command)] self.hid.write(command_data) response_data = ''.join(chr(x) for x in self.hid.read(64)) response = command.RESPONSE.from_buffer_copy(response_data) if response.status != 0: raise CommandException(response.status) return response
Sends a Command object to the MCP2210 and returns its response. Arguments: A commands.Command instance Returns: A commands.Response instance, or raises a CommandException on error.
juraj-google-style
def unbatch(self, spec): raise NotImplementedError(f'{type(self).__name__}.unbatch')
Returns the TypeSpec for a single unbatched element in `spec`. Args: spec: The `TypeSpec` for a batch of values. Returns: A `TypeSpec` for an individual value.
github-repos
def edit_distance_2(self, word): word = word.lower() return [e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)]
Compute all strings that are two edits away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided word
codesearchnet
def gene_panels(self, panel_id=None, institute_id=None, version=None): query = {} if panel_id: query['panel_name'] = panel_id if version: query['version'] = version if institute_id: query['institute'] = institute_id return self.panel_collection.find(query)
Return all gene panels If panel_id return all versions of panels by that panel name Args: panel_id(str) Returns: cursor(pymongo.cursor)
juraj-google-style
def profile_stats(adapter, threshold=0.9): profiles = [] samples = [] distance_dict = {key: 0 for key in HAMMING_RANGES.keys()} for case in adapter.cases(): for individual in case['individuals']: if individual.get('profile'): sample_id = f"{case['case_id']}.{individual['ind_id']}" ind_profile = individual['profile'] distance_array = np.array([], dtype=np.float) for (sample, profile) in zip(samples, profiles): distance = compare_profiles(ind_profile, profile) distance_array = np.append(distance_array, distance) if (distance >= threshold): LOG.warning(f'{sample_id} is {distance} similar to {sample}') for (key, range) in HAMMING_RANGES.items(): distance_dict[key] += np.sum(((distance_array >= range[0]) & (distance_array < range[1]))) profiles.append(ind_profile) samples.append(sample_id) return distance_dict
Compares the pairwise hamming distances for all the sample profiles in the database. Returns a table of the number of distances within given ranges. Args: adapter (MongoAdapter): Adapter to mongodb threshold (float): If any distance is found above this threshold a warning will be given, stating the two matching samples. Returns: distance_dict (dict): dictionary with ranges as keys, and the number of distances that are within these ranges as values.
codesearchnet
def resize_bilinear_nd(t, target_shape): shape = t.get_shape().as_list() target_shape = list(target_shape) assert (len(shape) == len(target_shape)) d = 0 while (d < len(shape)): if (shape[d] == target_shape[d]): d += 1 continue new_shape = shape[:] new_shape[d:(d + 2)] = target_shape[d:(d + 2)] shape_ = collapse_shape(shape, d, (d + 2)) new_shape_ = collapse_shape(new_shape, d, (d + 2)) t_ = tf.reshape(t, shape_) t_ = tf.image.resize_bilinear(t_, new_shape_[1:3]) t = tf.reshape(t_, new_shape) shape = new_shape d += 2 return t
Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor
codesearchnet
def preprocess_GIF(self, image, **kwargs): if 'transparency' in image.info: save_kwargs = {'transparency': image.info['transparency']} else: save_kwargs = {} return (image, save_kwargs)
Receive a PIL Image instance of a GIF and return 2-tuple. Args: * [0]: Original Image instance (passed to `image`) * [1]: Dict with a transparency key (to GIF transparency layer)
juraj-google-style
def set_config(config): bigchaindb.config = copy.deepcopy(bigchaindb._config) update(bigchaindb.config, update_types(config, bigchaindb.config)) bigchaindb.config['CONFIGURED'] = True
Set bigchaindb.config equal to the default config dict, then update that with whatever is in the provided config dict, and then set bigchaindb.config['CONFIGURED'] = True Args: config (dict): the config dict to read for changes to the default config Note: Any previous changes made to ``bigchaindb.config`` will be lost.
juraj-google-style
def pick_unused_port(pid=None, portserver_address=None): try: port = _free_ports.pop() except KeyError: pass else: _owned_ports.add(port) return port if portserver_address: port = get_port_from_port_server(portserver_address, pid=pid) if port: return port if ('PORTSERVER_ADDRESS' in os.environ): port = get_port_from_port_server(os.environ['PORTSERVER_ADDRESS'], pid=pid) if port: return port return _pick_unused_port_without_server()
A pure python implementation of PickUnusedPort. Args: pid: PID to tell the portserver to associate the reservation with. If None, the current process's PID is used. portserver_address: The address (path) of a unix domain socket with which to connect to a portserver, a leading '@' character indicates an address in the "abstract namespace". OR On systems without socket.AF_UNIX, this is an AF_INET address. If None, or no port is returned by the portserver at the provided address, the environment will be checked for a PORTSERVER_ADDRESS variable. If that is not set, no port server will be used. Returns: A port number that is unused on both TCP and UDP. Raises: NoFreePortFoundError: No free port could be found.
codesearchnet
def compress(content, method='gzip'): if (method == True): method = 'gzip' method = (method or '').lower() if (method == ''): return content elif (method == 'gzip'): return gzip_compress(content) raise NotImplementedError((str(method) + ' is not currently supported. Supported Options: None, gzip'))
Compresses file content. Required: content (bytes): The information to be compressed method (str, default: 'gzip'): None or gzip Raises: NotImplementedError if an unsupported codec is specified. compression.DecodeError if the encoder has an issue Return: compressed content
codesearchnet
def decode_message(self, message_type, encoded_message): encoded_message = six.ensure_str(encoded_message) if not encoded_message.strip(): return message_type() dictionary = json.loads(encoded_message) message = self.__decode_dictionary(message_type, dictionary) message.check_initialized() return message
Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized.
juraj-google-style
def __init__(self, database_config: VectorDatabaseWriteConfig): if not isinstance(database_config, VectorDatabaseWriteConfig): raise TypeError(f'database_config must be VectorDatabaseWriteConfig, got {type(database_config)}') self.database_config = database_config
Initialize transform with database config. Args: database_config: Configuration for target vector database.
github-repos
class DonutFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): do_thumbnail: Optional[bool] do_align_long_axis: Optional[bool] do_pad: Optional[bool]
Args: do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are padded to the largest image size in the batch.
github-repos
def process(self, element): (text, uid), prediction = element cluster = prediction.inference.item() if cluster == -1: body = f'Tweet-Id is {uid} and text is {text}' self.yag_smtp_client.send(to=cfg.EMAIL_ADDRESS, subject='Anomaly Detected', contents=body)
Takes a tuple of (text, id) and a prediction, and if the prediction is -1, it sends an email to the specified address Args: element: The element that is being processed.
github-repos
def get_storage(self, contract_hash, storage_key, id=None, endpoint=None): result = self._call_endpoint(GET_STORAGE, params=[contract_hash, binascii.hexlify(storage_key.encode('utf-8')).decode('utf-8')], id=id, endpoint=endpoint) try: return bytearray(binascii.unhexlify(result.encode('utf-8'))) except Exception as e: raise NEORPCException("could not decode result %s " % e)
Returns a storage item of a specified contract Args: contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' storage_key: (str) storage key to lookup, for example 'totalSupply' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bytearray: bytearray value of the storage item
juraj-google-style
def _setup(self): if isinstance(self.module, torch.nn.RNNBase): self.module.flatten_parameters = noop for name_w in self.weights: w = getattr(self.module, name_w) del self.module._parameters[name_w] self.module.register_parameter((name_w + '_raw'), nn.Parameter(w.data))
for each string defined in self.weights, the corresponding attribute in the wrapped module is referenced, then deleted, and subsequently registered as a new parameter with a slightly modified name. Args: None Returns: None
codesearchnet
def max_csi(self): csi = (self.contingency_tables['TP'] / ((self.contingency_tables['TP'] + self.contingency_tables['FN']) + self.contingency_tables['FP'])) return csi.max()
Calculate the maximum Critical Success Index across all probability thresholds Returns: The maximum CSI as a float
codesearchnet
def create_labels(ptransform=None, namespace=None, name=None, pcollection=None): labels = {} if ptransform: labels[PTRANSFORM_LABEL] = ptransform if namespace: labels[NAMESPACE_LABEL] = namespace if name: labels[NAME_LABEL] = name if pcollection: labels[PCOLLECTION_LABEL] = pcollection return labels
Create the label dictionary based on the provided values. Args: ptransform: The ptransform id used as a label. pcollection: The pcollection id used as a label.
github-repos
def traverse_preorder(self, leaves=True, internal=True): for node in self.root.traverse_preorder(leaves=leaves, internal=internal): yield node
Perform a preorder traversal of the ``Node`` objects in this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
juraj-google-style
def update_config_data(msg, cfg): for attr in msg: if attr in cfg.data[msg.profile] and attr is not "auth": cfg.data[msg.profile][attr] = getattr(msg, attr)
Updates the profile's config entry with values set in each attr by the user. This will overwrite existing values. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
juraj-google-style
def assert_is_fully_defined(self): if not self.is_fully_defined(): raise ValueError('Shape %s is not fully defined' % self)
Raises an exception if `self` is not fully defined in every dimension. Raises: ValueError: If `self` does not have a known value for every dimension.
github-repos
def _validate_pos_args_syntax(alias_name, alias_command): pos_args_from_alias = get_placeholders(alias_name) pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)] if (set(pos_args_from_alias) != set(pos_args_from_command)): arg_diff = (set(pos_args_from_alias) ^ set(pos_args_from_command)) raise CLIError(INCONSISTENT_ARG_ERROR.format(('' if (len(arg_diff) == 1) else 's'), arg_diff, ('is' if (len(arg_diff) == 1) else 'are')))
Check if the positional argument syntax is valid in alias name and alias command. Args: alias_name: The name of the alias to validate. alias_command: The command to validate.
codesearchnet
def add_toolkit(topology, location): import streamsx.topology.topology assert isinstance(topology, streamsx.topology.topology.Topology) tkinfo = dict() tkinfo['root'] = os.path.abspath(location) topology.graph._spl_toolkits.append(tkinfo)
Add an SPL toolkit to a topology. Args: topology(Topology): Topology to include toolkit in. location(str): Location of the toolkit directory.
codesearchnet
def compute_mask(self, inputs, mask=None): if not self._supports_masking: if any((m is not None for m in nest.flatten(mask))): raise TypeError('Layer ' + self.name + ' does not support masking, but was passed an input_mask: ' + str(mask)) return None return mask
Computes an output mask tensor. Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. Returns: None or a tensor (or list of tensors, one per output tensor of the layer).
github-repos
def wait_for_postgres(database, host, port, username, password): connecting_string = 'Checking for PostgreSQL...' if port is not None: port = int(port) while True: try: logger.info(connecting_string) connection = psycopg2.connect( database=database, host=host, port=port, user=username, password=password, connect_timeout=3 ) connection.close() logger.info('PostgreSQL is running!') break except psycopg2.OperationalError: time.sleep(1)
Waits for PostgreSQL database to be up Args: database (Optional[str]): Database name host (Optional[str]): Host where database is located port (Union[int, str, None]): Database port username (Optional[str]): Username to log into database password (Optional[str]): Password to log into database Returns: None
juraj-google-style
def __init__(self, name, aliases=None, description=None, urls=None): super(ConstantDefinition, self).__init__( name, aliases=aliases, description=description, urls=urls) self.value = None
Initializes an enumeration data type definition. Args: name (str): name. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. urls (Optional[list[str]]): URLs.
juraj-google-style
def override_parent_subgraph(self, parent_subgraph, invisible_edges=None): with transaction.atomic(): if (invisible_edges is None): invisible_edges = set() children = list(parent_subgraph.keys()) all_old_relations = dict(proso.list.group_by(list(ItemRelation.objects.filter(child_id__in=children)), by=(lambda relation: relation.child_id))) to_delete = set() for (child_id, parents) in parent_subgraph.items(): old_relations = {relation.parent_id: relation for relation in all_old_relations.get(child_id, [])} for parent_id in parents: if (parent_id not in old_relations): ItemRelation.objects.create(parent_id=parent_id, child_id=child_id, visible=((child_id, parent_id) not in invisible_edges)) elif (old_relations[parent_id].visible != ((child_id, parent_id) not in invisible_edges)): old_relations[parent_id].visible = ((child_id, parent_id) not in invisible_edges) old_relations[parent_id].save() to_delete |= {old_relations[parent_id].pk for parent_id in (set(old_relations.keys()) - set(parents))} ItemRelation.objects.filter(pk__in=to_delete).delete()
Get all items with outcoming edges from the given subgraph, drop all their parent relations, and then add parents according to the given subgraph. Args: parent_subgraph (dict): item id -> list of parents(item ids) invisible_edges (list|set): set of (from, to) tuples specifying invisible edges
codesearchnet
def restore_site_properties(self, site_property="ff_map", filename=None): if not self.control_params["filetype"] == "pdb": raise ValueError() filename = filename or self.control_params["output"] bma = BabelMolAdaptor.from_file(filename, "pdb") pbm = pb.Molecule(bma._obmol) assert len(pbm.residues) == sum([x["number"] for x in self.param_list]) packed_mol = self.convert_obatoms_to_molecule( pbm.residues[0].atoms, residue_name=pbm.residues[0].name, site_property=site_property) for resid in pbm.residues[1:]: mol = self.convert_obatoms_to_molecule( resid.atoms, residue_name=resid.name, site_property=site_property) for site in mol: packed_mol.append(site.species, site.coords, properties=site.properties) return packed_mol
Restore the site properties for the final packed molecule. Args: site_property (str): filename (str): path to the final packed molecule. Returns: Molecule
juraj-google-style
def getctime(self, path): try: file_obj = self.filesystem.resolve(path) except IOError: self.filesystem.raise_os_error(errno.ENOENT) return file_obj.st_ctime
Returns the creation time of the fake file. Args: path: the path to fake file. Returns: (int, float) the creation time of the fake file in number of seconds since the epoch. Raises: OSError: if the file does not exist.
juraj-google-style
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None): outputs = self.layernorm_before_attention(hidden_states) outputs = self.self_attention(outputs, outputs, attention_mask, position_bias, output_attentions, past_key_values, use_cache) outputs, attn_weights, current_key_value = outputs if self.dropout is not None: outputs = self.dropout(outputs) hidden_states = hidden_states + outputs return (hidden_states, attn_weights, current_key_value)
Args: hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences. attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Avoid invalid areas to participate in the calculation of self-attention. position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Provide positional information to self-attention block. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Tuple(torch.FloatTensor)`, *optional*): Cached past key and value projection states. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`).
github-repos
def create_route53_zone(client, zone_name): if not zone_name.endswith("."): zone_name += "." zone_id = get_or_create_hosted_zone(client, zone_name) old_soa = get_soa_record(client, zone_id, zone_name) if old_soa.text.min_ttl == "300": return zone_id new_soa = copy.deepcopy(old_soa) logger.debug("Updating negative caching value on zone %s to 300.", zone_name) new_soa.text.min_ttl = "300" client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch={ "Comment": "Update SOA min_ttl to 300.", "Changes": [ { "Action": "UPSERT", "ResourceRecordSet": { "Name": zone_name, "Type": "SOA", "TTL": old_soa.ttl, "ResourceRecords": [ { "Value": str(new_soa.text) } ] } }, ] } ) return zone_id
Creates the given zone_name if it doesn't already exists. Also sets the SOA negative caching TTL to something short (300 seconds). Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_name (string): The name of the DNS hosted zone to create. Returns: string: The zone id returned from AWS for the existing, or newly created zone.
juraj-google-style
def test_sample_paths_1d(self, use_batch, watch_params, supply_normal_draws, random_type): dtype = tf.float64 mu = 0.2 a = 0.4 b = 0.33 def drift_fn(t, x): drift = mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype) return drift def vol_fn(t, x): del x if not use_batch: return (a * t + b) * tf.ones([1, 1], dtype=t.dtype) else: return (a * t + b) * tf.ones([2, 1, 1, 1], dtype=t.dtype) times = np.array([0.0, 0.1, 0.21, 0.32, 0.43, 0.55]) num_samples = 10000 if supply_normal_draws: normal_draws = tf.random.stateless_normal(shape=[2, 5000, 55, 1], seed=[1, 42], dtype=dtype) normal_draws = tf.concat([normal_draws, -normal_draws], axis=1) else: normal_draws = None if use_batch: x0 = np.array([[[0.1]], [[0.1]]]) else: x0 = np.array([0.1]) paths = self.evaluate(euler_sampling.sample(dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=num_samples, initial_state=x0, random_type=random_type, normal_draws=normal_draws, watch_params=watch_params, time_step=0.01, seed=[1, 42], dtype=dtype)) paths_no_zero = self.evaluate(euler_sampling.sample(dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, times=times[1:], num_samples=num_samples, initial_state=x0, random_type=random_type, normal_draws=normal_draws, time_step=0.01, seed=[1, 42], dtype=dtype)) with self.subTest('CorrectShape'): if not use_batch: self.assertAllClose(paths.shape, (num_samples, 6, 1), atol=0) else: self.assertAllClose(paths.shape, (2, num_samples, 6, 1), atol=0) if not use_batch: means = np.mean(paths, axis=0).reshape(-1) else: means = np.mean(paths, axis=1).reshape([2, 1, 6]) expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5) with self.subTest('ExpectedResult'): self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01) if not use_batch: with self.subTest('IncludeInitialState'): self.assertAllClose(paths[:, 1:, :], paths_no_zero)
Tests path properties for 1-dimentional Ito process. We construct the following Ito process. ```` dX = mu * sqrt(t) * dt + (a * t + b) dW ```` For this process expected value at time t is x_0 + 2/3 * mu * t^1.5 . Args: use_batch: Test parameter to specify if we are testing the batch of Euler sampling. watch_params: Triggers custom for loop. supply_normal_draws: Supply normal draws. random_type: `RandomType` of the sampled normal draws.
github-repos
def sync(self, since=None, timeout_ms=30000, filter=None, full_state=None, set_presence=None): request = {'timeout': int(timeout_ms)} if since: request['since'] = since if filter: request['filter'] = filter if full_state: request['full_state'] = json.dumps(full_state) if set_presence: request['set_presence'] = set_presence return self._send('GET', '/sync', query_params=request, api_path=MATRIX_V2_API_PATH)
Perform a sync request. Args: since (str): Optional. A token which specifies where to continue a sync from. timeout_ms (int): Optional. The time in milliseconds to wait. filter (int|str): Either a Filter ID or a JSON string. full_state (bool): Return the full state for every room the user has joined Defaults to false. set_presence (str): Should the client be marked as "online" or" offline"
codesearchnet
def has_abiext(self, ext, single_file=True): if (ext != 'abo'): ext = (ext if ext.startswith('_') else ('_' + ext)) files = [] for f in self.list_filepaths(): if ((ext == '_DDB') and f.endswith('.nc')): continue if ((ext == '_MDF') and (not f.endswith('.nc'))): continue if ((ext == '_DDK') and f.endswith('.nc')): continue if (f.endswith(ext) or f.endswith((ext + '.nc'))): files.append(f) if (not files): files = [f for f in self.list_filepaths() if fnmatch(f, ('*%s*' % ext))] if (not files): return '' if ((len(files) > 1) and single_file): raise ValueError((('Found multiple files with the same extensions:\n %s\n' % files) + 'Please avoid using multiple datasets!')) return (files[0] if single_file else files)
Returns the absolute path of the ABINIT file with extension ext. Support both Fortran files and netcdf files. In the later case, we check whether a file with extension ext + ".nc" is present in the directory. Returns empty string is file is not present. Raises: `ValueError` if multiple files with the given ext are found. This implies that this method is not compatible with multiple datasets.
codesearchnet
def truediv(self, other, axis="columns", level=None, fill_value=None): return self._binary_op( "truediv", other, axis=axis, level=level, fill_value=fill_value )
Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied.
juraj-google-style
def deepcopy(original_obj): if isinstance(original_obj, list): return list((deepcopy(item) for item in original_obj)) elif isinstance(original_obj, dict): return dict(((key, deepcopy(val)) for (key, val) in original_obj.items())) else: return original_obj
Creates a deep copy of an object with no crossed referenced lists or dicts, useful when loading from yaml as anchors generate those cross-referenced dicts and lists Args: original_obj(object): Object to deep copy Return: object: deep copy of the object
codesearchnet
def value_to_pytd_def(self, node, v, name): if isinstance(v, abstract.Module): return pytd.Alias(name, pytd.Module(name, module_name=v.full_name)) elif isinstance(v, abstract.BoundFunction): d = self.value_to_pytd_def(node, v.underlying, name) assert isinstance(d, pytd.Function) sigs = tuple((sig.Replace(params=sig.params[1:]) for sig in d.signatures)) return d.Replace(signatures=sigs) elif isinstance(v, attr_overlay.AttrsBase): ret = pytd.NamedType('typing.Callable') md = metadata.to_pytd(v.to_metadata()) return pytd.Annotated(ret, ("'pytype_metadata'", md)) elif isinstance(v, abstract.PyTDFunction) and (not isinstance(v, typing_overlay.TypeVar)): return pytd.Function(name=name, signatures=tuple((sig.pytd_sig for sig in v.signatures)), kind=v.kind, flags=pytd.MethodFlag.abstract_flag(v.is_abstract)) elif isinstance(v, abstract.InterpreterFunction): return self._function_to_def(node, v, name) elif isinstance(v, abstract.SimpleFunction): return self._simple_func_to_def(node, v, name) elif isinstance(v, (abstract.ParameterizedClass, abstract.Union)): return pytd.Alias(name, v.to_pytd_type_of_instance(node)) elif isinstance(v, abstract.PyTDClass) and v.module: return v.to_pytd_type(node) elif isinstance(v, typed_dict.TypedDictClass): return self._typed_dict_to_def(node, v, name) elif isinstance(v, abstract.PyTDClass): assert name != v.name return pytd.Alias(name, pytd.NamedType(v.name)) elif isinstance(v, abstract.InterpreterClass): if (v.official_name is None or name == v.official_name or v.official_name.endswith(f'.{name}')) and (not v.module): return self._class_to_def(node, v, name) else: type_name = v.full_name if v.module else v.official_name return pytd.Constant(name, pytd.GenericType(pytd.NamedType('builtins.type'), (pytd.NamedType(type_name),))) elif isinstance(v, abstract.TYPE_VARIABLE_TYPES): return self._type_variable_to_def(node, v, name) elif isinstance(v, abstract.Unsolvable): return pytd.Constant(name, v.to_pytd_type(node)) else: raise NotImplementedError(v.__class__.__name__)
Get a PyTD definition for this object. Args: node: The node. v: The object. name: The object name. Returns: A PyTD definition.
github-repos
def final_bearing(self, format='numeric'): bearings = [] for segment in self: if (len(segment) < 2): bearings.append([]) else: bearings.append(segment.final_bearing(format)) return bearings
Calculate final bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments
codesearchnet
def difference(self, *others): result = self.__copy__() _elements = result._elements _total = result._total for other in map(self._as_multiset, others): for (element, multiplicity) in other.items(): if (element in _elements): old_multiplicity = _elements[element] new_multiplicity = (old_multiplicity - multiplicity) if (new_multiplicity > 0): _elements[element] = new_multiplicity _total -= multiplicity else: del _elements[element] _total -= old_multiplicity result._total = _total return result
r"""Return a new multiset with all elements from the others removed. >>> ms = Multiset('aab') >>> sorted(ms.difference('bc')) ['a', 'a'] You can also use the ``-`` operator for the same effect. However, the operator version will only accept a set as other operator, not any iterable, to avoid errors. >>> ms = Multiset('aabbbc') >>> sorted(ms - Multiset('abd')) ['a', 'b', 'b', 'c'] For a variant of the operation which modifies the multiset in place see :meth:`difference_update`. Args: others: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\[~T] or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. Returns: The resulting difference multiset.
codesearchnet
def power(self, n): if n > 0: return super().power(n) return PTM(SuperOp(self).power(n))
The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: PTM: the matrix power of the SuperOp converted to a PTM channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer.
juraj-google-style
def ddel_tasks(provider, user_ids=None, job_ids=None, task_ids=None, labels=None, create_time_min=None, create_time_max=None): (deleted_tasks, error_messages) = provider.delete_jobs(user_ids, job_ids, task_ids, labels, create_time_min, create_time_max) for msg in error_messages: print(msg) return deleted_tasks
Kill jobs or job tasks. This function separates ddel logic from flag parsing and user output. Users of ddel who intend to access the data programmatically should use this. Args: provider: an instantiated dsub provider. user_ids: a set of user ids who "own" the job(s) to delete. job_ids: a set of job ids to delete. task_ids: a set of task ids to delete. labels: a set of LabelParam, each must match the job(s) to be cancelled. create_time_min: a timezone-aware datetime value for the earliest create time of a task, inclusive. create_time_max: a timezone-aware datetime value for the most recent create time of a task, inclusive. Returns: list of job ids which were deleted.
codesearchnet
def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool=False): group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return (new_image_tokens, attention)
Args: image_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels] group_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
github-repos
def after_create_session(self, session, coord): pass
Called when new TensorFlow session is created. This is called to signal the hooks that a new session has been created. This has two essential differences with the situation in which `begin` is called: * When this is called, the graph is finalized and ops can no longer be added to the graph. * This method will also be called as a result of recovering a wrapped session, not only at the beginning of the overall session. Args: session: A TensorFlow Session that has been created. coord: A Coordinator object which keeps track of all threads.
github-repos
def _md5_file(fn, block_size=1048576): h = hashlib.md5() with open(fn) as fp: d = 1 while d: d = fp.read(block_size) h.update(d) return h.hexdigest()
Builds the MD5 of a file block by block Args: fn: File path block_size: Size of the blocks to consider (default 1048576) Returns: File MD5
juraj-google-style
def tag(name, message, author=None): cmd = 'git -c "user.name={author.name}" -c "user.email={author.email}" tag -a "{name}" -m "{message}"'.format(author=(author or latest_commit().author), name=name, message=message.replace('"', '\\"').replace('`', '\\`')) shell.run(cmd)
Tag the current commit. Args: name (str): The tag name. message (str): The tag message. Same as ``-m`` parameter in ``git tag``. author (Author): The commit author. Will default to the author of the commit. pretend (bool): If set to **True** it will print the full ``git tag`` command instead of actually executing it.
codesearchnet
def _get_validator(name, schema=None, check_schema=True, validator_class=None, **validator_kwargs): if (schema is None): try: schema = _SCHEMAS[name] except KeyError: raise SchemaValidationError('Valid schema name or schema must be provided.') if (name not in _VALIDATORS): if (validator_class is None): validator_class = jsonschema.validators.validator_for(schema) _VALIDATORS[name] = validator_class(schema, **validator_kwargs) validator = _VALIDATORS[name] if check_schema: validator.check_schema(schema) return validator
Generate validator for JSON schema. Args: name (str): Name for validator. Will be validator key in `_VALIDATORS` dict. schema (dict): JSON schema `dict`. If not provided searches for schema in `_SCHEMAS`. check_schema (bool): Verify schema is valid. validator_class (jsonschema.IValidator): jsonschema IValidator instance. Default behavior is to determine this from the schema `$schema` field. **validator_kwargs (dict): Additional keyword arguments for validator. Return: jsonschema.IValidator: Validator for JSON schema. Raises: SchemaValidationError: Raised if validation fails.
codesearchnet
def deprecate_entity(self, ilx_id: str, note=None) -> None: (term_id, term_version) = [(d['id'], d['version']) for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0] annotations = [{'tid': term_id, 'annotation_tid': '306375', 'value': 'True', 'term_version': term_version, 'annotation_term_version': '1'}] if note: editor_note = {'tid': term_id, 'annotation_tid': '306378', 'value': note, 'term_version': term_version, 'annotation_term_version': '1'} annotations.append(editor_note) self.addAnnotations(annotations, crawl=True, _print=False) print(annotations)
Tagged term in interlex to warn this term is no longer used There isn't an proper way to delete a term and so we have to mark it so I can extrapolate that in mysql/ttl loads. Args: term_id: id of the term of which to be deprecated term_version: version of the term of which to be deprecated Example: deprecateTerm('ilx_0101431', '6')
codesearchnet
def _create_session(self, username, password): session = requests.Session() session.verify = False try: response = session.get(self.host_url) except requests.exceptions.ConnectionError: return False soup = BeautifulSoup(response.text, 'html.parser') csrf_token = soup.find('input', dict(name='csrf_token'))['value'] login_data = dict(username=username, password=password) session.headers.update({'x-csrftoken': csrf_token, 'referer': self.host_url}) _ = session.post('{0:s}/login/'.format(self.host_url), data=login_data) return session
Create HTTP session. Args: username (str): Timesketch username password (str): Timesketch password Returns: requests.Session: Session object.
codesearchnet
def _check_condition(self, name, condition): if condition is not None and condition[0].name not in self.cregs: raise DAGCircuitError("invalid creg in condition for %s" % name)
Verify that the condition is valid. Args: name (string): used for error reporting condition (tuple or None): a condition tuple (ClassicalRegister,int) Raises: DAGCircuitError: if conditioning on an invalid register
juraj-google-style
def _build_mac_signature_key_information(self, value): if value is None: return None if not isinstance(value, dict): raise TypeError( "MAC/signature key information must be a dictionary." ) cryptographic_parameters = value.get('cryptographic_parameters') if cryptographic_parameters: cryptographic_parameters = self._build_cryptographic_parameters( cryptographic_parameters ) mac_signature_key_information = cobjects.MACSignatureKeyInformation( unique_identifier=value.get('unique_identifier'), cryptographic_parameters=cryptographic_parameters ) return mac_signature_key_information
Build an MACSignatureKeyInformation struct from a dictionary. Args: value (dict): A dictionary containing the key/value pairs for a MACSignatureKeyInformation struct. Returns: MACSignatureInformation: a MACSignatureKeyInformation struct Raises: TypeError: if the input argument is invalid
juraj-google-style
def new_type(self, name: str | pytd_node.Node, parameters: list[pytd.Type] | None=None) -> pytd.Type: base_type = self.resolve_type(name) if not isinstance(base_type, pytd.NamedType): type_params = self.type_params + [pytd.TypeParameter('typing.AnyStr')] base_type = base_type.Visit(_InsertTypeParameters(type_params)) try: resolved_type = visitors.MaybeSubstituteParameters(base_type, parameters) except ValueError as e: raise _ParseError(str(e)) from e if resolved_type: return resolved_type if parameters is not None: if len(parameters) > 1 and isinstance(base_type, pytd.NamedType) and (base_type.name == 'typing.Optional'): raise _ParseError(f'Too many options to {base_type.name}') return self._parameterized_type(base_type, parameters) else: if isinstance(base_type, pytd.NamedType) and base_type.name in _TYPING_SETS: raise _ParseError(f'Missing options to {base_type.name}') return base_type
Return the AST for a type. Args: name: The name of the type. parameters: Sequence of type parameters. Returns: A pytd type node. Raises: ParseError: if the wrong number of parameters is supplied for the base_type - e.g., 2 parameters to Optional or no parameters to Union.
github-repos
def _add_thousand_g(self, variant_obj, info_dict): thousand_g = info_dict.get('1000GAF') if thousand_g: logger.debug("Updating thousand_g to: {0}".format( thousand_g)) variant_obj.thousand_g = float(thousand_g) variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))
Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
juraj-google-style
def find(pattern, path=os.path.curdir, recursive=False): root = realpath(path) Finder = lambda item: regex.is_regex(pattern) \ and pattern.match(item) or (pattern == item) if recursive: for base, dirs, files in os.walk(root, topdown=True): for segment in itertools.chain(filter(Finder, files), filter(Finder, dirs)): yield FS(os.path.join(base, segment)) else: for segment in filter(Finder, os.listdir(root)): yield(os.path.join(root, segment))
Find absolute file/folder paths with the given ``re`` pattern. Args: * pattern: search pattern, support both string (exact match) and `re` pattern. * path: root path to start searching, default is current working directory. * recursive: whether to recursively find the matched items from `path`, False by default Returns: Generator of the matched items of Files/Folders.
juraj-google-style
def write_data(num_lines, no_data=False, directory=None, prefix=tempfile.template, eol=EOL.LF): all_data = [] with tempfile.NamedTemporaryFile(delete=False, dir=directory, prefix=prefix) as f: sep_values = [b'\n', b'\r\n'] for i in range(num_lines): data = b'' if no_data else b'line' + str(i).encode() all_data.append(data) if eol == EOL.LF: sep = sep_values[0] elif eol == EOL.CRLF: sep = sep_values[1] elif eol == EOL.MIXED: sep = sep_values[i % len(sep_values)] elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE: sep = b'' if i == num_lines - 1 else sep_values[0] else: raise ValueError('Received unknown value %s for eol.' % eol) f.write(data + sep) return (f.name, all_data)
Writes test data to a temporary file. Args: num_lines (int): The number of lines to write. no_data (bool): If :data:`True`, empty lines will be written, otherwise each line will contain a concatenation of b'line' and the line number. directory (str): The name of the directory to create the temporary file in. prefix (str): The prefix to use for the temporary file. eol (int): The line ending to use when writing. :class:`~apache_beam.io.filebasedsource_test.EOL` exposes attributes that can be used here to define the eol. Returns: Tuple[str, List[bytes]]: A tuple of the filename and a list of the written data.
github-repos
def repr_names(self, callself_repr: 'Callable[[cfg.Variable], str] | None'=None) -> Sequence[str]: callself_repr = callself_repr or (lambda v: v.name) if self._callself and self._callself.bindings: callself_names = [callself_repr(v) for v in self._callself.data] else: callself_names = ['<class>'] underlying = self.underlying.name if underlying.count('.') > 0: underlying = underlying.split('.', 1)[-1] return [callself + '.' + underlying for callself in callself_names]
Names to use in the bound function's string representation. This function can return multiple names because there may be multiple bindings in callself. Args: callself_repr: Optionally, a repr function for callself. Returns: A non-empty iterable of string names.
github-repos
def GetFilename(self): if (not self._file_entry): return None data_stream = getattr(self._file_entry.path_spec, 'data_stream', None) if data_stream: return '{0:s}:{1:s}'.format(self._file_entry.name, data_stream) return self._file_entry.name
Retrieves the name of the active file entry. Returns: str: name of the active file entry or None.
codesearchnet
def get_shape(self) -> tensor_shape.TensorShape: return self._dense_shape_default
Get the `TensorShape` representing the shape of the dense tensor. Returns: A `TensorShape` object.
github-repos
def labelset_heads(self, label): _eps = self._eps _vars = self._vars _hcons = self._hcons nodeids = {nodeid: _eps[nodeid][3].get(IVARG_ROLE, None) for nodeid in _vars[label]['refs']['LBL']} if len(nodeids) <= 1: return list(nodeids) scope_sets = {} for nid in nodeids: scope_sets[nid] = _ivs_in_scope(nid, _eps, _vars, _hcons) out = {} for n in nodeids: out[n] = 0 for role, val in _eps[n][3].items(): if role == IVARG_ROLE or role == CONSTARG_ROLE: continue elif any(val in s for n2, s in scope_sets.items() if n2 != n): out[n] += 1 candidates = [n for n, out_deg in out.items() if out_deg == 0] rank = {} for n in candidates: iv = nodeids[n] pred = _eps[n][1] if iv in _vars and self.nodeid(iv, quantifier=True) is not None: rank[n] = 0 elif pred.is_quantifier(): rank[n] = 0 elif pred.type == Pred.ABSTRACT: rank[n] = 2 else: rank[n] = 1 return sorted(candidates, key=lambda n: rank[n])
Return the heads of the labelset selected by *label*. Args: label: the label from which to find head nodes/EPs. Returns: An iterable of nodeids.
juraj-google-style
class ReadAllFromBigQuery(PTransform): COUNTER = 0 def __init__(self, gcs_location: Union[str, ValueProvider]=None, validate: bool=False, kms_key: str=None, temp_dataset: Union[str, DatasetReference]=None, bigquery_job_labels: Dict[str, str]=None, query_priority: str=BigQueryQueryPriority.BATCH): if gcs_location: if not isinstance(gcs_location, (str, ValueProvider)): raise TypeError('%s: gcs_location must be of type string or ValueProvider; got %r instead' % (self.__class__.__name__, type(gcs_location))) self.gcs_location = gcs_location self.validate = validate self.kms_key = kms_key self.bigquery_job_labels = bigquery_job_labels self.temp_dataset = temp_dataset self.query_priority = query_priority def expand(self, pcoll): job_name = pcoll.pipeline.options.view_as(GoogleCloudOptions).job_name project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project unique_id = str(uuid.uuid4())[0:10] try: step_name = self.label except AttributeError: step_name = 'ReadAllFromBigQuery_%d' % ReadAllFromBigQuery.COUNTER ReadAllFromBigQuery.COUNTER += 1 sources_to_read, cleanup_locations = pcoll | beam.ParDo(_BigQueryReadSplit(options=pcoll.pipeline.options, gcs_location=self.gcs_location, validate=self.validate, bigquery_job_labels=self.bigquery_job_labels, job_name=job_name, step_name=step_name, unique_id=unique_id, kms_key=self.kms_key, project=project, temp_dataset=self.temp_dataset, query_priority=self.query_priority)).with_outputs('location_to_cleanup', main='files_to_read') return sources_to_read | SDFBoundedSourceReader(data_to_display=self.display_data()) | _PassThroughThenCleanup(beam.pvalue.AsIter(cleanup_locations))
Read data from BigQuery. PTransform:ReadFromBigQueryRequest->Rows This PTransform uses a BigQuery export job to take a snapshot of the table on GCS, and then reads from each produced file. Data is exported into a new subdirectory for each export using UUIDs generated in `ReadFromBigQueryRequest` objects. It is recommended not to use this PTransform for streaming jobs on GlobalWindow, since it will not be able to cleanup snapshots. Args: gcs_location (str): The name of the Google Cloud Storage bucket where the extracted table should be written as a string. If :data:`None`, then the temp_location parameter is used. validate (bool): If :data:`True`, various checks will be done when source gets initialized (e.g., is table present?). Set this to :data:`False` if the BigQuery export method is slow due to checking file existence. kms_key (str): Experimental. Optional Cloud KMS key name for use when creating new temporary tables.
github-repos
def has_checked_field(self, locator, **kwargs): kwargs['checked'] = True return self.has_selector('field', locator, **kwargs)
Checks if the page or current node has a radio button or checkbox with the given label, value, or id, that is currently checked. Args: locator (str): The label, name, or id of a checked field. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. Returns: bool: Whether it exists.
codesearchnet
def pick(self, connections): if (len(connections) == 1): return connections[0] def key(conn): return (datetime.min if (conn.backoff_time is None) else conn.backoff_time) return min(*connections, key=key)
Picks a connection with the earliest backoff time. As a result, the first connection is picked for as long as it has no backoff time. Otherwise, the connections are tried in a round robin fashion. Args: connections (:obj:list): List of :class:`~bigchaindb_driver.connection.Connection` instances.
codesearchnet
def _prepare_np_fun_name_and_fun(np_fun_name, np_fun): if np_fun_name is not None: assert isinstance(np_fun_name, str) if np_fun is not None: assert not isinstance(np_fun, str) if np_fun is None: assert np_fun_name is not None try: np_fun = getattr(np, str(np_fun_name)) except AttributeError: np_fun = None if np_fun_name is None: assert np_fun is not None np_fun_name = np_fun.__name__ return (np_fun_name, np_fun)
Mutually propagates information between `np_fun_name` and `np_fun`. If one is None and the other is not, we'll try to make the former not None in a best effort. Args: np_fun_name: name for the np_fun symbol. At least one of np_fun or np_fun_name shoud be set. np_fun: the numpy function whose docstring will be used. Returns: Processed `np_fun_name` and `np_fun`.
github-repos
def get_variables(self, include_nontrainable=False): if include_nontrainable: return [self.all_variables[key] for key in sorted(self.all_variables)] else: return [self.variables[key] for key in sorted(self.variables)]
Returns the TensorFlow variables used by the baseline. Returns: List of variables
codesearchnet
def _add_partition(self, connection, partition): logger.debug('Creating foreign table for partition.\n partition: {}'.format(partition.name)) with connection.cursor() as cursor: postgres_med.add_partition(cursor, partition.datafile, partition.vid)
Creates FDW for the partition. Args: connection: partition (orm.Partition):
codesearchnet
def _Parse(self, template): if not template: raise TextFSMTemplateError('Null template.') self._ParseFSMVariables(template) while self._ParseFSMState(template): pass self._ValidateFSM()
Parses template file for FSM structure. Args: template: Valid template file. Raises: TextFSMTemplateError: If template file syntax is invalid.
juraj-google-style
def train(cluster_info, cluster_meta, feed_timeout=600, qname='input'): def _train(iter): mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id()) try: queue = mgr.get_queue(qname) equeue = mgr.get_queue('error') except (AttributeError, KeyError): msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname) raise Exception(msg) state = str(mgr.get('state')) logging.info('mgr.state={0}'.format(state)) terminating = (state == "'terminating'") if terminating: logging.info('mgr is terminating, skipping partition') count = sum((1 for item in iter)) logging.info('Skipped {0} items from partition'.format(count)) else: logging.info('Feeding partition {0} into {1} queue {2}'.format(iter, qname, queue)) count = 0 for item in iter: count += 1 queue.put(item, block=True) joinThr = Thread(target=queue.join) joinThr.start() timeout = feed_timeout while joinThr.isAlive(): if (not equeue.empty()): e_str = equeue.get() equeue.task_done() raise Exception(('exception in worker:\n' + e_str)) time.sleep(1) timeout -= 1 if (timeout <= 0): raise Exception('Timeout while feeding partition') logging.info('Processed {0} items in partition'.format(count)) if (not terminating): state = str(mgr.get('state')) terminating = (state == "'terminating'") if terminating: try: logging.info('TFSparkNode: requesting stop') client = reservation.Client(cluster_meta['server_addr']) client.request_stop() client.close() except Exception as e: logging.debug('Error while requesting stop: {0}'.format(e)) return [terminating] return _train
Feeds Spark partitions into the shared multiprocessing.Queue. Args: :cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc) :cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc) :feed_timeout: number of seconds after which data feeding times out (600 sec default) :qname: *INTERNAL_USE* Returns: A dataRDD.mapPartitions() function
codesearchnet
def check_graph_consistency(tensor=None, method='add_loss', force_raise=False): if force_raise or (ops.executing_eagerly_outside_functions() and hasattr(tensor, 'graph') and tensor.graph.is_control_flow_graph): if method == 'activity_regularizer': bad_example = "\n class TestModel(tf.keras.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(name='test_model')\n self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')\n\n def call(self, x, training=None):\n if training:\n return self.dense(x)\n else:\n return self.dense(x)\n " correct_example = "\n class TestModel(tf.keras.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(name='test_model')\n self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')\n\n def call(self, x, training=None):\n return self.dense(x)\n " raise RuntimeError('You are using a layer with `activity_regularizer` in a control flow branch, e.g.:\n{bad_example}\nThis is currently not supported. Please move your call to the layer with `activity_regularizer` out of the control flow branch, e.g.:\n{correct_example}\nYou can also resolve this by marking your outer model/layer dynamic (eager-only) by passing `dynamic=True` to the layer constructor. Any kind of control flow is supported with dynamic layers. Note that using `dynamic=True` requires you to implement static shape inference in the `compute_output_shape(input_shape)` method.'.format(bad_example=bad_example, correct_example=correct_example)) if method == 'add_metric': bad_example = "\n def call(self, inputs, training=None):\n if training:\n metric = compute_metric(inputs)\n self.add_metric(metric, name='my_metric', aggregation='mean')\n return inputs\n " correct_example = "\n def call(self, inputs, training=None):\n if training:\n metric = compute_metric(inputs)\n else:\n metric = 0.\n self.add_metric(metric, name='my_metric', aggregation='mean')\n return inputs\n " elif method == 'add_loss': bad_example = '\n def call(self, inputs, training=None):\n if training:\n loss = compute_loss(inputs)\n self.add_loss(loss)\n return inputs\n ' correct_example = '\n def call(self, inputs, training=None):\n if training:\n loss = compute_loss(inputs)\n else:\n loss = 0.\n self.add_loss(loss)\n return inputs\n ' else: bad_example = '\n def call(self, inputs, training=None):\n if training:\n self.add_update(self.w.assign_add(1))\n return inputs\n ' correct_example = '\n def call(self, inputs, training=None):\n if training:\n increment = 1\n else:\n increment = 0\n self.add_update(self.w.assign_add(increment))\n return inputs\n ' raise RuntimeError('You are using the method `{method}` in a control flow branch in your layer, e.g.:\n{bad_example}\nThis is not currently supported. Please move your call to {method} out of the control flow branch, e.g.:\n{correct_example}\nYou can also resolve this by marking your layer as dynamic (eager-only) by passing `dynamic=True` to the layer constructor. Any kind of control flow is supported with dynamic layers. Note that using `dynamic=True` requires you to implement static shape inference in the `compute_output_shape(input_shape)` method.'.format(method=method, bad_example=bad_example, correct_example=correct_example))
Checks that tensors passed to `add_*` method match the Keras graph. When one of the `add_*` method is called inside a V2 conditional branch, the underlying tensor gets created in a FuncGraph managed by control_flow_v2. We need to raise clear error messages in such cases. Args: tensor: Tensor to check, or `False` if it is known that an error should be raised. method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}. force_raise: If an error should be raised regardless of `tensor`. Raises: RuntimeError: In case of an out-of-graph tensor.
github-repos
def _SoftmaxGrad(op: ops.Operation, grad_softmax): softmax = op.outputs[0] sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True) return (grad_softmax - sum_channels) * softmax
The derivative of the softmax nonlinearity. We assume that probs is of shape [batch_size * dim] The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax'). This matrix is diagonal minus a rank one matrix, so it is easy to implement as follows: grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax Args: op: the Softmax op. grad_softmax: the tensor representing the gradient w.r.t. the softmax output. Returns: gradient w.r.t the input to the softmax
github-repos
def process_file(self, path): if self._config.verbose: self._logger.info('Processing file "%s"', path) output_path = '%s%s' % (path, BATCH_EXTENSION) with open(output_path, 'w') as file: for line in lines_generator(path): file.write('%s\n' % self._cucco.normalize( line.encode().decode('utf-8'))) self._logger.debug('Created file "%s"', output_path)
Process a file applying normalizations. Get a file as input and generate a new file with the result of applying normalizations to every single line in the original file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the file.
juraj-google-style
def ParseMessage(self, parser_mediator, key, date_time, tokens): if (key != 'task_run'): raise ValueError('Unknown grammar key: {0:s}'.format(key)) event_data = CronTaskRunEventData() event_data.body = tokens.get('body', None) event_data.command = tokens.get('command', None) event_data.hostname = tokens.get('hostname', None) event_data.offset = 0 event_data.pid = tokens.get('pid', None) event_data.reporter = tokens.get('reporter', None) event_data.severity = tokens.get('severity', None) event_data.username = tokens.get('username', None) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a syslog body that matched one of defined grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided.
codesearchnet
def set_slats_level(self, slatsLevel=0.0, shutterLevel=None): if shutterLevel is None: shutterLevel = self.shutterLevel data = { "channelIndex": 1, "deviceId": self.id, "slatsLevel": slatsLevel, "shutterLevel": shutterLevel, } return self._restCall("device/control/setSlatsLevel", json.dumps(data))
sets the slats and shutter level Args: slatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed, shutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value Returns: the result of the _restCall
juraj-google-style
def AddService(self, new_service): for service in self._services: if new_service == service: service.sources.append(new_service.sources[0]) return self._services.append(new_service)
Add a new service to the list of ones we know about. Args: new_service (WindowsService): the service to add.
juraj-google-style
def ParseGenericRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = WindowsTimelineGenericEventData() payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload')) payload_json_string = payload_json_bytes.decode('utf-8') appid_entries_string = self._GetRowValue(query_hash, row, 'AppId') payload = json.loads(payload_json_string) appid_entries = json.loads(appid_entries_string) package_id_locations = ['packageId', 'x_exe_path', 'windows_win32', 'windows_universal', 'alternateId'] for location in package_id_locations: for entry in appid_entries: if ((entry['platform'] == location) and (entry['application'] != '')): event_data.package_identifier = entry['application'] break if (event_data.package_identifier is None): break if ('description' in payload): event_data.description = payload['description'] else: event_data.description = '' if (('appDisplayName' in payload) and (payload['appDisplayName'] != '')): event_data.application_display_name = payload['appDisplayName'] elif (('displayText' in payload) and (payload['displayText'] != '')): event_data.application_display_name = payload['displayText'] timestamp = self._GetRowValue(query_hash, row, 'StartTime') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a generic windows timeline row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def plot_predictions_histogram(Y_ph, Y, title=None): labels = list(set(Y).union(set(Y_ph))) edges = [(x - 0.5) for x in range(min(labels), (max(labels) + 2))] plt.hist([Y_ph, Y], bins=edges, label=['Predicted', 'Gold']) ax = plt.gca() ax.set_xticks(labels) plt.xlabel('Label') plt.ylabel(' plt.legend(loc='upper right') if isinstance(title, str): plt.title(title) plt.show()
Plot a histogram comparing int predictions vs true labels by class Args: Y_ph: An [n] or [n, 1] np.ndarray of predicted int labels Y: An [n] or [n, 1] np.ndarray of gold labels
codesearchnet
def download_file(self, remote_filename, local_filename=None): status = 'Failed' if local_filename is None: local_filename = remote_filename if not self.args.force and os.access(local_filename, os.F_OK): if not self._confirm_overwrite(local_filename): self._print_results(local_filename, 'Skipped') return url = '{}{}'.format(self.base_url, remote_filename) r = requests.get(url, allow_redirects=True) if r.ok: open(local_filename, 'wb').write(r.content) status = 'Success' else: self.handle_error('Error requesting: {}'.format(url), False) self._print_results(local_filename, status)
Download file from github. Args: remote_filename (str): The name of the file as defined in git repository. local_filename (str, optional): Defaults to None. The name of the file as it should be be written to local filesystem.
juraj-google-style
def _get_valid_formats(): if NO_SOX: return [] so = subprocess.check_output(['sox', '-h']) if (type(so) is not str): so = str(so, encoding='UTF-8') so = so.split('\n') idx = [i for i in range(len(so)) if ('AUDIO FILE FORMATS:' in so[i])][0] formats = so[idx].split(' ')[3:] return formats
Calls SoX help for a lists of audio formats available with the current install of SoX. Returns: -------- formats : list List of audio file extensions that SoX can process.
codesearchnet
def _clean_query_string(q): q = q.replace("()", "").strip() if q.endswith("("): q = q[:-1].strip() if q[-3:] == "AND" or q[-3:] == "NOT": q = q[:-3] elif q[-2:] == "OR": q = q[:-2] while q.count("(") > q.count(")"): q += ")" while q.count(")") > q.count("("): q = "(" + q return q.strip()
Clean up a query string for searching. Removes unmatched parentheses and joining operators. Arguments: q (str): Query string to be cleaned Returns: str: The clean query string.
juraj-google-style
def __cloudflare_list_zones(self, *, account, **kwargs): done = False zones = [] page = 1 while (not done): kwargs['page'] = page response = self.__cloudflare_request(account=account, path='/zones', args=kwargs) info = response['result_info'] if (('total_pages' not in info) or (page == info['total_pages'])): done = True else: page += 1 zones += response['result'] return zones
Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones Args: account (:obj:`CloudFlareAccount`): A CloudFlare Account object **kwargs (`dict`): Extra arguments to pass to the API endpoint Returns: `list` of `dict`
codesearchnet
def _call(callable_obj, arg_names, namespace): arguments = {arg_name: getattr(namespace, arg_name) for arg_name in arg_names} return callable_obj(**arguments)
Actually calls the callable with the namespace parsed from the command line. Args: callable_obj: a callable object arg_names: name of the function arguments namespace: the namespace object parsed from the command line
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): if not self.LINE_STRUCTURES: raise errors.UnableToParseFile('Missing line structures.') encoding = self._ENCODING or parser_mediator.codepage text_reader = EncodedTextReader( encoding, buffer_size=self.BUFFER_SIZE) text_reader.Reset() try: text_reader.ReadLines(file_object) except UnicodeDecodeError as exception: raise errors.UnableToParseFile( 'Not a text file, with error: {0!s}'.format(exception)) if not self.VerifyStructure(parser_mediator, text_reader.lines): raise errors.UnableToParseFile('Wrong file structure.') for key, structure in self.LINE_STRUCTURES: structure.parseWithTabs() consecutive_line_failures = 0 while text_reader.lines: if parser_mediator.abort: break tokens = None start = 0 end = 0 key = None index = None for index, (key, structure) in enumerate(self._line_structures): try: structure_generator = structure.scanString( text_reader.lines, maxMatches=1) parsed_structure = next(structure_generator, None) except pyparsing.ParseException: parsed_structure = None if not parsed_structure: continue tokens, start, end = parsed_structure if start == 0: break if tokens and start == 0: if index is not None and index != 0: key_structure = self._line_structures.pop(index) self._line_structures.insert(0, key_structure) try: self.ParseRecord(parser_mediator, key, tokens) consecutive_line_failures = 0 except (errors.ParseError, errors.TimestampError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse record: {0:s} with error: {1!s}'.format( key, exception)) text_reader.SkipAhead(file_object, end) else: odd_line = text_reader.ReadLine(file_object) if odd_line: if len(odd_line) > 80: odd_line = '{0:s}...'.format(odd_line[:77]) parser_mediator.ProduceExtractionWarning( 'unable to parse log line: {0:s}'.format(repr(odd_line))) consecutive_line_failures += 1 if (consecutive_line_failures > self.MAXIMUM_CONSECUTIVE_LINE_FAILURES): raise errors.UnableToParseFile( 'more than {0:d} consecutive failures to parse lines.'.format( self.MAXIMUM_CONSECUTIVE_LINE_FAILURES)) try: text_reader.ReadLines(file_object) except UnicodeDecodeError as exception: parser_mediator.ProduceExtractionWarning( 'unable to read lines with error: {0!s}'.format(exception))
Parses a text file-like object using a pyparsing definition. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def validate(message, ssldir=None, **config): for field in ['signature', 'certificate']: if (field not in message): _log.warn('No %s field found.', field) return False if (not isinstance(message[field], six.text_type)): _log.error(('msg[%r] is not a unicode string' % field)) try: message[field] = message[field].decode('utf-8') except UnicodeError as e: _log.error("Unable to decode the message '%s' field: %s", field, str(e)) return False signature = base64.b64decode(message['signature']) certificate = base64.b64decode(message['certificate']) message = fedmsg.crypto.strip_credentials(message) ca_location = config.get('ca_cert_location', 'https: crl_location = config.get('crl_location', 'https: try: (ca_certificate, crl) = utils.load_certificates(ca_location, crl_location) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: try: (ca_certificate, crl) = utils.load_certificates(ca_location, crl_location, invalidate_cache=True) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: _log.error(str(e)) return False try: crypto_certificate = x509.load_pem_x509_certificate(certificate, default_backend()) crypto_certificate.public_key().verify(signature, fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1()) except InvalidSignature as e: _log.error('message [{m}] has an invalid signature: {e}'.format(m=message, e=str(e))) return False common_name = crypto_certificate.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) common_name = common_name[0] routing_policy = config.get('routing_policy', {}) nitpicky = config.get('routing_nitpicky', False) return utils.validate_policy(message.get('topic'), common_name.value, routing_policy, nitpicky=nitpicky)
Validate the signature on the given message. Four things must be true for the signature to be valid: 1) The X.509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X.509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict. Args: message (dict): A signed message in need of validation. A signed message contains the 'signature' and 'certificate' keys. ssldir (str): The path to the directory containing PEM-encoded X.509 key pairs. Returns: bool: True of the message passes validation, False otherwise.
codesearchnet
def Cleanse(obj, encoding='utf-8'): if isinstance(obj, int): return obj elif isinstance(obj, float): if obj == _INFINITY: return 'Infinity' elif obj == _NEGATIVE_INFINITY: return '-Infinity' elif math.isnan(obj): return 'NaN' else: return obj elif isinstance(obj, bytes): return tf.compat.as_text(obj, encoding) elif isinstance(obj, (list, tuple)): return [Cleanse(i, encoding) for i in obj] elif isinstance(obj, set): return [Cleanse(i, encoding) for i in sorted(obj)] elif isinstance(obj, dict): return {Cleanse(k, encoding): Cleanse(v, encoding) for k, v in obj.items()} else: return obj
Makes Python object appropriate for JSON serialization. - Replaces instances of Infinity/-Infinity/NaN with strings. - Turns byte strings into unicode strings. - Turns sets into sorted lists. - Turns tuples into lists. Args: obj: Python data structure. encoding: Charset used to decode byte strings. Returns: Unicode JSON data structure.
juraj-google-style
def _one_body_mapping(a_i, a_j, threshold=0.000001): pauli_list = [] for alpha in range(2): for beta in range(2): pauli_prod = Pauli.sgn_prod(a_i[alpha], a_j[beta]) coeff = 1.0/4 * pauli_prod[1] * np.power(-1j, alpha) * np.power(1j, beta) pauli_term = [coeff, pauli_prod[0]] if np.absolute(pauli_term[0]) > threshold: pauli_list.append(pauli_term) return Operator(paulis=pauli_list)
Subroutine for one body mapping. Args: a_i (Pauli): pauli at index i a_j (Pauli): pauli at index j threshold: (float): threshold to remove a pauli Returns: Operator: Operator for those paulis
juraj-google-style
def _Upgrade0To1(self, data): subgraph = {} for key_to_promote in ['tensors', 'operators', 'inputs', 'outputs']: subgraph[key_to_promote] = data[key_to_promote] del data[key_to_promote] data['subgraphs'] = [subgraph]
Upgrade data from Version 0 to Version 1. Changes: Added subgraphs (which contains a subset of formally global entries). Args: data: Dictionary representing the TensorFlow lite data to be upgraded. This will be modified in-place to be an upgraded version.
github-repos
def check_web_config(config_fname): print('Looking for config file at {0} ...'.format(config_fname)) config = RawConfigParser() try: config.readfp(open(config_fname)) return config except IOError: print("ERROR: Seems like the config file does not exist. Please call 'opensubmit-web configcreate' first, or specify a location with the '-c' option.") return None
Try to load the Django settings. If this does not work, than settings file does not exist. Returns: Loaded configuration, or None.
codesearchnet
def resolve_revision(self, dest, url, rev_options): rev = rev_options.arg_rev (sha, is_branch) = self.get_revision_sha(dest, rev) if (sha is not None): rev_options = rev_options.make_new(sha) rev_options.branch_name = (rev if is_branch else None) return rev_options if (not looks_like_hash(rev)): logger.warning("Did not find branch or tag '%s', assuming revision or ref.", rev) if (not rev.startswith('refs/')): return rev_options self.run_command((['fetch', '-q', url] + rev_options.to_args()), cwd=dest) sha = self.get_revision(dest, rev='FETCH_HEAD') rev_options = rev_options.make_new(sha) return rev_options
Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. Args: rev_options: a RevOptions object.
codesearchnet
def Patch(self, request, global_params=None): config = self.GetMethodConfig('Patch') return self._RunMethod(config, request, global_params=global_params)
Updates a `BuildTrigger` by its project ID and trigger ID. This API is experimental. Args: request: (CloudbuildProjectsLocationsTriggersPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BuildTrigger) The response message.
github-repos
def save_libsvm(X, y, path): dump_svmlight_file(X, y, path, zero_based=False)
Save data as a LibSVM file. Args: X (numpy or scipy sparse matrix): Data matrix y (numpy array): Target vector. path (str): Path to the CSV file to save data.
codesearchnet
def _FormatSizeInUnitsOf1024(self, size): magnitude_1024 = 0 used_memory_1024 = float(size) while used_memory_1024 >= 1024: used_memory_1024 /= 1024 magnitude_1024 += 1 if 0 < magnitude_1024 <= 7: return '{0:.1f} {1:s}'.format( used_memory_1024, self._UNITS_1024[magnitude_1024]) return '{0:d} B'.format(size)
Represents a number of bytes in units of 1024. Args: size (int): size in bytes. Returns: str: human readable string of the size.
juraj-google-style
def _add_to_schema(self, field_name, schema): super(ForeignKeyField, self)._add_to_schema(field_name, schema) if self.get_field_value('convert_fks', default=True): self.attribute = field_name.replace('_id', '')
Set the ``attribute`` attr to the field in question so this always gets deserialzed into the field name without ``_id``. Args: field_name (str): The name of the field (the attribute name being set in the schema). schema (marshmallow.Schema): The actual parent schema this field belongs to.
codesearchnet
def hum44(msg): d = hex2bin(data(msg)) if d[49] == '0': return None hm = bin2int(d[50:56]) * 100.0 / 64 return round(hm, 1)
humidity Args: msg (String): 28 bytes hexadecimal message string Returns: float: percentage of humidity, [0 - 100] %
juraj-google-style
def _CheckPythonModuleVersion(self, module_name, module_object, version_property, minimum_version, maximum_version): module_version = None if (not version_property.endswith('()')): module_version = getattr(module_object, version_property, None) else: version_method = getattr(module_object, version_property[:(- 2)], None) if version_method: module_version = version_method() if (not module_version): status_message = 'unable to determine version information for: {0:s}'.format(module_name) return (False, status_message) module_version = '{0!s}'.format(module_version) module_version = self._VERSION_NUMBERS_REGEX.findall(module_version)[0] if (module_version[(- 1)] == '.'): module_version = module_version[:(- 1)] try: module_version_map = list(map(int, self._VERSION_SPLIT_REGEX.split(module_version))) except ValueError: status_message = 'unable to parse module version: {0:s} {1:s}'.format(module_name, module_version) return (False, status_message) if minimum_version: try: minimum_version_map = list(map(int, self._VERSION_SPLIT_REGEX.split(minimum_version))) except ValueError: status_message = 'unable to parse minimum version: {0:s} {1:s}'.format(module_name, minimum_version) return (False, status_message) if (module_version_map < minimum_version_map): status_message = '{0:s} version: {1!s} is too old, {2!s} or later required'.format(module_name, module_version, minimum_version) return (False, status_message) if maximum_version: try: maximum_version_map = list(map(int, self._VERSION_SPLIT_REGEX.split(maximum_version))) except ValueError: status_message = 'unable to parse maximum version: {0:s} {1:s}'.format(module_name, maximum_version) return (False, status_message) if (module_version_map > maximum_version_map): status_message = '{0:s} version: {1!s} is too recent, {2!s} or earlier required'.format(module_name, module_version, maximum_version) return (False, status_message) status_message = '{0:s} version: {1!s}'.format(module_name, module_version) return (True, status_message)
Checks the version of a Python module. Args: module_object (module): Python module. module_name (str): name of the Python module. version_property (str): version attribute or function. minimum_version (str): minimum version. maximum_version (str): maximum version. Returns: tuple: consists: bool: True if the Python module is available and conforms to the minimum required version, False otherwise. str: status message.
codesearchnet
def load_disease_term(self, disease_obj): LOG.debug('Loading disease term %s into database', disease_obj['_id']) try: self.disease_term_collection.insert_one(disease_obj) except DuplicateKeyError as err: raise IntegrityError('Disease term %s already exists in database'.format(disease_obj['_id'])) LOG.debug('Disease term saved')
Load a disease term into the database Args: disease_obj(dict)
codesearchnet
def get_tag(self, name, params=None): return self.tag(name, action='GET', params=params)
Gets a tag from a Indicator/Group/Victim/Security Label Args: name: The name of the tag params:
juraj-google-style