code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def download_items(cache_fn, start=None): with SqliteDict(cache_fn) as db: last_id = (db.get('last_id', 0) if (not start) else start) _download_items(db, last_id) db.commit()
Open the `cache_fn` as database and download all not-yet downloaded items. Args: cache_fn (str): Path to the sqlite database. If not exists, it will be created. start (int, default None): If set, start from this sysno.
codesearchnet
def _DropCommonSuffixes(filename): for suffix in itertools.chain( ('%s.%s' % (test_suffix.lstrip('_'), ext) for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())), ('%s.%s' % (suffix, ext) for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
juraj-google-style
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() decoder_outputs = decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * self.config.d_model ** (-0.5) if self.config.tie_word_embeddings: shared_embedding = module.shared.variables['params']['embedding'] lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, sequence_output) else: lm_logits = module.lm_head(sequence_output) return (lm_logits, decoder_outputs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions) else: outputs = (lm_logits,) + decoder_outputs[1:] if past_key_values is not None and return_dict: outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs
Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> text = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```
github-repos
def parse(self) -> Statement: self.opt_separator() start = self.offset res = self.statement() if res.keyword not in ["module", "submodule"]: self.offset = start raise UnexpectedInput(self, "'module' or 'submodule'") if self.name is not None and res.argument != self.name: raise ModuleNameMismatch(res.argument, self.name) if self.rev: revst = res.find1("revision") if revst is None or revst.argument != self.rev: raise ModuleRevisionMismatch(revst.argument, self.rev) try: self.opt_separator() except EndOfInput: return res raise UnexpectedInput(self, "end of input")
Parse a complete YANG module or submodule. Args: mtext: YANG module text. Raises: EndOfInput: If past the end of input. ModuleNameMismatch: If parsed module name doesn't match `self.name`. ModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`. UnexpectedInput: If top-level statement isn't ``(sub)module``.
juraj-google-style
def _write(self, file_prefix, session=None, options=None): start_time = time.time() output = self._saver.save(file_prefix=file_prefix, session=session, options=options) end_time = time.time() metrics.AddCheckpointWriteDuration(api_label=_CHECKPOINT_V1, microseconds=_get_duration_microseconds(start_time, end_time)) global _END_TIME_OF_LAST_WRITE with _END_TIME_OF_LAST_WRITE_LOCK: metrics.AddTrainingTimeSaved(api_label=_CHECKPOINT_V1, microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time)) if checkpoint_context.in_preemption_save_context(): _preemption_checkpoint_saved_time_usecs.get_cell().increase_by(_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time)) _END_TIME_OF_LAST_WRITE = end_time if tensor_util.is_tf_type(output): if context.executing_eagerly(): output = compat.as_str(output.numpy()) else: output = compat.as_str(output) if options is not None and options.experimental_write_callbacks is not None: _execute_callbacks(options.experimental_write_callbacks, output) metrics.RecordCheckpointSize(api_label=_CHECKPOINT_V1, filesize=_get_checkpoint_size(output)) return output
Writes a training checkpoint. The checkpoint includes variables created by this object and any trackable objects it depends on at the time `Checkpoint.write()` is called. `write` does not number checkpoints, increment `save_counter`, or update the metadata used by `tf.train.latest_checkpoint`. It is primarily intended for use by higher level checkpoint management utilities. `save` provides a very basic implementation of these features. Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). session: The session to evaluate variables in. Ignored when executing eagerly. If not provided when graph building, the default session is used. options: Optional `tf.train.CheckpointOptions` object. Returns: The full path to the checkpoint (i.e. `file_prefix`).
github-repos
def parse_uniprot_txt_file(infile): uniprot_metadata_dict = {} metadata = old_parse_uniprot_txt_file(infile) metadata_keys = list(metadata.keys()) if metadata_keys: metadata_key = metadata_keys[0] else: return uniprot_metadata_dict uniprot_metadata_dict['seq_len'] = len(str(metadata[metadata_key]['sequence'])) uniprot_metadata_dict['reviewed'] = metadata[metadata_key]['is_reviewed'] uniprot_metadata_dict['seq_version'] = metadata[metadata_key]['sequence_version'] uniprot_metadata_dict['entry_version'] = metadata[metadata_key]['entry_version'] if ('gene' in metadata[metadata_key]): uniprot_metadata_dict['gene_name'] = metadata[metadata_key]['gene'] if ('description' in metadata[metadata_key]): uniprot_metadata_dict['description'] = metadata[metadata_key]['description'] if ('refseq' in metadata[metadata_key]): uniprot_metadata_dict['refseq'] = metadata[metadata_key]['refseq'] if ('kegg' in metadata[metadata_key]): uniprot_metadata_dict['kegg'] = metadata[metadata_key]['kegg'] if ('ec' in metadata[metadata_key]): uniprot_metadata_dict['ec_number'] = metadata[metadata_key]['ec'] if ('pfam' in metadata[metadata_key]): uniprot_metadata_dict['pfam'] = metadata[metadata_key]['pfam'] if ('pdbs' in metadata[metadata_key]): uniprot_metadata_dict['pdbs'] = list(set(metadata[metadata_key]['pdbs'])) return uniprot_metadata_dict
Parse a raw UniProt metadata file and return a dictionary. Args: infile: Path to metadata file Returns: dict: Metadata dictionary
codesearchnet
def create(self, secret_type, value=None): if (secret_type is ObjectType.CERTIFICATE): return self._create_certificate(value) elif (secret_type is ObjectType.SYMMETRIC_KEY): return self._create_symmetric_key(value) elif (secret_type is ObjectType.PUBLIC_KEY): return self._create_public_key(value) elif (secret_type is ObjectType.PRIVATE_KEY): return self._create_private_key(value) elif (secret_type is ObjectType.SPLIT_KEY): return self._create_split_key(value) elif (secret_type is ObjectType.TEMPLATE): return self._create_template(value) elif (secret_type is ObjectType.SECRET_DATA): return self._create_secret_data(value) elif (secret_type is ObjectType.OPAQUE_DATA): return self._create_opaque_data(value) else: raise TypeError('Unrecognized secret type: {0}'.format(secret_type))
Create a secret object of the specified type with the given value. Args: secret_type (ObjectType): An ObjectType enumeration specifying the type of secret to create. value (dict): A dictionary containing secret data. Optional, defaults to None. Returns: secret: The newly constructed secret object. Raises: TypeError: If the provided secret type is unrecognized. Example: >>> factory.create(ObjectType.SYMMETRIC_KEY) SymmetricKey(...)
codesearchnet
def write_hashes(self, arr): length = len(arr) self.write_var_int(length) for item in arr: ba = bytearray(binascii.unhexlify(item)) ba.reverse() self.write_bytes(ba)
Write an array of hashes to the stream. Args: arr (list): a list of 32 byte hashes.
juraj-google-style
def ssim_value(self, target): if not isinstance(target, SSIMImage) \ or not np.array_equal(self.gaussian_kernel_1d, target.gaussian_kernel_1d): target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size) img_mat_12 = self.img.img_gray * target.img_gray img_mat_sigma_12 = convolve_gaussian_2d( img_mat_12, self.gaussian_kernel_1d) img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12 num_ssim = ((2 * img_mat_mu_12 + self.c_1) * (2 * img_mat_sigma_12 + self.c_2)) den_ssim = ( (self.img.img_gray_mu_squared + target.img_gray_mu_squared + self.c_1) * (self.img.img_gray_sigma_squared + target.img_gray_sigma_squared + self.c_2)) ssim_map = num_ssim / den_ssim index = np.average(ssim_map) return index
Compute the SSIM value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). Returns: Computed SSIM float value.
juraj-google-style
def replace_flat_tensors_for_gradients(xs, flat_grads): xs_structure = [_get_tensors_for_gradient(x) for x in xs] grads = nest.pack_sequence_as(xs_structure, flat_grads) return [_replace_tensors_for_gradient(x, grad) for x, grad in zip(xs, grads)]
Replaces Tensors that should be differentiated in `xs` with `flat_grads`. Args: xs: A list of `Tensor`s or `CompositeTensor`s. flat_grads: A list of `Tensor`. Returns: A list of `Tensor` or `CompositeTensor`.
github-repos
def groupby(iterable, key=0, filter=None): if isinstance(key, (basestring, int)): key = itemgetter(key) elif isinstance(key, (tuple, list)): key = itemgetter(*key) for label, grp in igroupby(iterable, key): yield label, list(grp)
wrapper to itertools.groupby that returns a list of each group, rather than a generator and accepts integers or strings as the key and automatically converts them to callables with itemgetter(key) Arguments: iterable: iterable key: string, int or callable that tells how to group Returns: an iterable where each item is the key and a *list* of that group. (itertools.groupby returns a generator of that group). e.g. groupby(iterable, 0)
juraj-google-style
def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-05, soft_em=False, num_samples=10, temperature=None, do_update=True): x_shape = common_layers.shape_list(x) target_shape = common_layers.shape_list(targets) hidden_size = x_shape[(- 1)] (means, _, _) = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [(- 1), hidden_size]) targets = tf.reshape(targets, [(- 1)]) one_hot_targets = tf.one_hot(targets, codebook_size) target_means = tf.matmul(one_hot_targets, means) (discrete_x, code_loss, distances) = vq_body(x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update) logits = (- distances) targets_loss = tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=targets) targets_loss = tf.reduce_mean(targets_loss) x_means = tf.matmul(discrete_x, means) x_means = (x + tf.stop_gradient((x_means - x))) discrete_x = tf.reshape(discrete_x, (x_shape[:(- 1)] + [codebook_size])) target_means = tf.reshape(target_means, (target_shape + [hidden_size])) return (discrete_x, x_means, target_means, code_loss, targets_loss)
Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target
codesearchnet
def from_url(url, format=None): string = urllib2.urlopen(url).read() if (PY3 is True): string = string.decode('utf-8') if format: format = format.lower().replace(' ', '_') func = parse.__getattr__(('from_%s' % format)) else: func = parse.from_unknown_text crs = func(string) return crs
Returns the crs object from a string interpreted as a specified format, located at a given url site. Arguments: - *url*: The url where the crs string is to be read from. - *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4". If None, tries to autodetect the format for you (default). Returns: - CRS object.
codesearchnet
def _read_content_or_path(content_or_path): if ('\n' in content_or_path.strip()): return content_or_path if (not os.path.exists(content_or_path)): raise IOError(("File '%s' doesn't exists!" % content_or_path)) with open(content_or_path) as f: return f.read()
If `content_or_path` contains ``\\n``, return it. Else assume, that it is path and read file at that path. Args: content_or_path (str): Content or path to the file. Returns: str: Content. Raises: IOError: whhen the file is not found.
codesearchnet
def get_params(brightness, contrast, saturation, hue): transforms = [] if (brightness is not None): brightness_factor = random.uniform(brightness[0], brightness[1]) transforms.append(Lambda((lambda img: F.adjust_brightness(img, brightness_factor)))) if (contrast is not None): contrast_factor = random.uniform(contrast[0], contrast[1]) transforms.append(Lambda((lambda img: F.adjust_contrast(img, contrast_factor)))) if (saturation is not None): saturation_factor = random.uniform(saturation[0], saturation[1]) transforms.append(Lambda((lambda img: F.adjust_saturation(img, saturation_factor)))) if (hue is not None): hue_factor = random.uniform(hue[0], hue[1]) transforms.append(Lambda((lambda img: F.adjust_hue(img, hue_factor)))) random.shuffle(transforms) transform = Compose(transforms) return transform
Get a randomized transform to be applied on image. Arguments are same as that of __init__. Returns: Transform which randomly adjusts brightness, contrast and saturation in a random order.
codesearchnet
def destroy_walker(self, walker): if walker.buffered: self._queue_walkers.remove(walker) else: self._virtual_walkers.remove(walker)
Destroy a previously created stream walker. Args: walker (StreamWalker): The walker to remove from internal updating lists.
codesearchnet
def need_rejoin(self): if (not self._subscription.partitions_auto_assigned()): return False if self._auto_assign_all_partitions(): return False if ((self._assignment_snapshot is not None) and (self._assignment_snapshot != self._metadata_snapshot)): return True if ((self._joined_subscription is not None) and (self._joined_subscription != self._subscription.subscription)): return True return super(ConsumerCoordinator, self).need_rejoin()
Check whether the group should be rejoined Returns: bool: True if consumer should rejoin group, False otherwise
codesearchnet
def __init__(self, parameters, cost_fn_val): self.parameters = parameters self.cost_fn_val = cost_fn_val self.fitness_score = self.__calc_fitness_score(cost_fn_val)
Member object Args: parameters (dictionary): dictionary of parameter names and values cost_fn_val (float): value returned by cost function using params
juraj-google-style
def read_into(self, buffer, size=-1, *, offset=0, write_offset=0) -> None: return self.mglo.read_into(buffer, size, offset, write_offset)
Read the content into a buffer. Args: buffer (bytarray): The buffer that will receive the content. size (int): The size. Value ``-1`` means all. Keyword Args: offset (int): The read offset. write_offset (int): The write offset.
juraj-google-style
def populate_defaults(base_type, removed_method=False, removed_args=None): def wrap(func): if removed_method: return func base_argspec = getfullargspec(unwrap(getattr(base_type, func.__name__))) if not base_argspec.defaults and (not base_argspec.kwonlydefaults): return func arg_to_default = {} if base_argspec.defaults: arg_to_default.update(zip(base_argspec.args[-len(base_argspec.defaults):], base_argspec.defaults)) if base_argspec.kwonlydefaults: arg_to_default.update(base_argspec.kwonlydefaults) unwrapped_func = unwrap(func) func_argspec = getfullargspec(unwrapped_func) num_non_defaults = len(func_argspec.args) - len(func_argspec.defaults or ()) defaults_to_populate = set(func_argspec.args[:num_non_defaults]).intersection(arg_to_default.keys()) if removed_args: defaults_to_populate -= set(removed_args) if 'copy' in arg_to_default and arg_to_default['copy'] is None: arg_to_default['copy'] = True @functools.wraps(func) def wrapper(**kwargs): for name in defaults_to_populate: if name not in kwargs: kwargs[name] = arg_to_default[name] return func(**kwargs) return wrapper return wrap
Populate default values for keyword arguments in decorated function. When applied to a function, this decorator creates a new function with default values for all keyword arguments, based on the default values for the identically-named method on `base_type`. For internal use only. No backwards compatibility guarantees. Args: base_type: The pandas type of the method that this is trying to replicate. removed_method: Whether this method has been removed in the running Pandas version. removed_args: If not empty, which arguments have been dropped in the running Pandas version.
github-repos
def upsert_sweep(self, config): mutation = gql('\n mutation UpsertSweep(\n $config: String,\n $description: String,\n $entityName: String!,\n $projectName: String!\n ) {\n upsertSweep(input: {\n config: $config,\n description: $description,\n entityName: $entityName,\n projectName: $projectName\n }) {\n sweep {\n name\n }\n }\n }\n ') def no_retry_400_or_404(e): if (not isinstance(e, requests.HTTPError)): return True if ((e.response.status_code != 400) and (e.response.status_code != 404)): return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={'config': yaml.dump(config), 'description': config.get('description'), 'entityName': self.settings('entity'), 'projectName': self.settings('project')}, check_retry_fn=no_retry_400_or_404) return response['upsertSweep']['sweep']['name']
Upsert a sweep object. Args: config (str): sweep config (will be converted to yaml)
codesearchnet
def list_devices(device_type=None): device_type = device_type.lower() if device_type else None jax_devices = jax.devices(backend=device_type) return [f'{device.platform}:{device.id}' for device in jax_devices]
Return all the available devices based on the device type. Note that this should return the global devices in a distributed setting. Args: device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Defaults to `"gpu"` or `"tpu"` if available when device_type is not provided. Otherwise will return the `"cpu"` devices. Return: List of devices that are available for distribute computation.
github-repos
def pad_nested_sequences(sequences, dtype='int32'): max_sent_len = 0 max_word_len = 0 for sent in sequences: max_sent_len = max(len(sent), max_sent_len) for word in sent: max_word_len = max(len(word), max_word_len) x = np.zeros((len(sequences), max_sent_len, max_word_len)).astype(dtype) for (i, sent) in enumerate(sequences): for (j, word) in enumerate(sent): x[(i, j, :len(word))] = word return x
Pads nested sequences to the same length. This function transforms a list of list sequences into a 3D Numpy array of shape `(num_samples, max_sent_len, max_word_len)`. Args: sequences: List of lists of lists. dtype: Type of the output sequences. # Returns x: Numpy array.
codesearchnet
def eigenvalues(df): corr = np.corrcoef(df, rowvar=0) eigvals = np.linalg.eigvals(corr) return pd.Series(eigvals, df.columns, name='Eigenvalue')
Returns a pandas Series with eigenvalues of the correlation matrix. Args: df: pandas DataFrame with columns to run diagnostics on
juraj-google-style
def __validate(self, value, validate_element): if not self.repeated: return validate_element(value) else: if isinstance(value, (list, tuple)): result = [] for element in value: if element is None: try: name = self.name except AttributeError: raise ValidationError( 'Repeated values for %s ' 'may not be None' % self.__class__.__name__) else: raise ValidationError( 'Repeated values for field %s ' 'may not be None' % name) result.append(validate_element(element)) return result elif value is not None: try: name = self.name except AttributeError: raise ValidationError('%s is repeated. Found: %s' % ( self.__class__.__name__, value)) else: raise ValidationError( 'Field %s is repeated. Found: %s' % (name, value)) return value
Internal validation function. Validate an internal value using a function to validate individual elements. Args: value: Value to validate. validate_element: Function to use to validate individual elements. Raises: ValidationError if value is not expected type.
juraj-google-style
def next_layer(self, original_rp, broadcast_rp): gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp) return _LayerBroadcaster.from_gather_index(gather_index)
Create the next layer gather_index whether or not a broadcast happens. *---------self------->* | | original_rp broadcast_rp | | \|/ \|/ *--next_broadcaster-->* Args: original_rp: the original row partition. broadcast_rp: the target row partition. Returns: the gather_index for next_broadcaster.
github-repos
def revoke(self, revocation_reason, uid=None, revocation_message=None, compromise_occurrence_date=None): if (not isinstance(revocation_reason, enums.RevocationReasonCode)): raise TypeError('revocation_reason must be a RevocationReasonCode enumeration') if (uid is not None): if (not isinstance(uid, six.string_types)): raise TypeError('uid must be a string') if (revocation_message is not None): if (not isinstance(revocation_message, six.string_types)): raise TypeError('revocation_message must be a string') if (compromise_occurrence_date is not None): if (not isinstance(compromise_occurrence_date, six.integer_types)): raise TypeError('compromise_occurrence_date must be an integer') compromise_occurrence_date = primitives.DateTime(compromise_occurrence_date, enums.Tags.COMPROMISE_OCCURRENCE_DATE) result = self.proxy.revoke(revocation_reason, uid, revocation_message, compromise_occurrence_date) status = result.result_status.value if (status == enums.ResultStatus.SUCCESS): return else: reason = result.result_reason.value message = result.result_message.value raise exceptions.KmipOperationFailure(status, reason, message)
Revoke a managed object stored by a KMIP appliance. Args: revocation_reason (RevocationReasonCode): An enumeration indicating the revocation reason. uid (string): The unique ID of the managed object to revoke. Optional, defaults to None. revocation_message (string): A message regarding the revocation. Optional, defaults to None. compromise_occurrence_date (int): An integer, the number of seconds since the epoch, which will be converted to the Datetime when the managed object was first believed to be compromised. Optional, defaults to None. Returns: None Raises: ClientConnectionNotOpen: if the client connection is unusable KmipOperationFailure: if the operation result is a failure TypeError: if the input argument is invalid
codesearchnet
def _consume_single_get(response_iterator): all_responses = list(response_iterator) if (len(all_responses) != 1): raise ValueError('Unexpected response from `BatchGetDocumentsResponse`', all_responses, 'Expected only one result') return all_responses[0]
Consume a gRPC stream that should contain a single response. The stream will correspond to a ``BatchGetDocuments`` request made for a single document. Args: response_iterator (~google.cloud.exceptions.GrpcRendezvous): A streaming iterator returned from a ``BatchGetDocuments`` request. Returns: ~google.cloud.proto.firestore.v1beta1.\ firestore_pb2.BatchGetDocumentsResponse: The single "get" response in the batch. Raises: ValueError: If anything other than exactly one response is returned.
codesearchnet
def ndim(x): if any_symbolic_tensors((x,)): return Ndim().symbolic_call(x) return backend.numpy.ndim(x)
Return the number of dimensions of a tensor. Args: x: Input tensor. Returns: The number of dimensions in `x`.
github-repos
def slice(filename, number_tiles=None, col=None, row=None, save=True): im = Image.open(filename) (im_w, im_h) = im.size columns = 0 rows = 0 if (not (number_tiles is None)): validate_image(im, number_tiles) (columns, rows) = calc_columns_rows(number_tiles) extras = ((columns * rows) - number_tiles) else: validate_image_col_row(im, col, row) columns = col rows = row extras = ((columns * rows) - number_tiles) (tile_w, tile_h) = (int(floor((im_w / columns))), int(floor((im_h / rows)))) tiles = [] number = 1 for pos_y in range(0, (im_h - rows), tile_h): for pos_x in range(0, (im_w - columns), tile_w): area = (pos_x, pos_y, (pos_x + tile_w), (pos_y + tile_h)) image = im.crop(area) position = ((int(floor((pos_x / tile_w))) + 1), (int(floor((pos_y / tile_h))) + 1)) coords = (pos_x, pos_y) tile = Tile(image, number, position, coords) tiles.append(tile) number += 1 if save: save_tiles(tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)) return tuple(tiles)
Split an image into a specified number of tiles. Args: filename (str): The filename of the image to split. number_tiles (int): The number of tiles required. Kwargs: save (bool): Whether or not to save tiles to disk. Returns: Tuple of :class:`Tile` instances.
codesearchnet
def layout(mtf_graph, mesh_shape, mtf_outputs=()): mesh_shape = mtf.convert_to_shape(mesh_shape) estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape, mtf_outputs) optimizer = layout_optimizer.LayoutOptimizer(estimator) return mtf.convert_to_layout_rules(optimizer.solve())
Compute layout rules based on a computational graph and mesh shape. Args: mtf_graph: a mtf.Graph. mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension. mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs of the computation. Returns: a mtf.LayoutRules
codesearchnet
def index_path_for(window): if output_path: return '%s/INDEX-%s' % (output_path, window.max_timestamp()) else: return None
Returns: path to the index file containing all shard names or None if no output_path is set
github-repos
def load_config_file(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME, fallback_config_instance=None): if (not fallback_config_instance): fallback_config_instance = backend_config_to_configparser(get_default_backend_config(appdirs)) config = SafeConfigParser() path = get_config_path(appdirs, file_name) if (not config.read(path)): config = write_config_file(fallback_config_instance, appdirs=appdirs, file_name=file_name) return config
Retrieve config information from file at default location. If no config file is found a new one will be created either with ``fallback_config_instance`` as content or if none is provided with the result of ``get_default_backend_config``. Args: appdirs (HamsterAppDirs, optional): ``HamsterAppDirs`` instance storing app/user specific path information. file_name (text_type, optional): Name of the config file. Defaults to ``DEFAULT_CONFIG_FILENAME``. fallback_config_instance (ConfigParser): Backend config that is to be used to populate the config file that is created if no pre-existing one can be found. Returns: SafeConfigParser: Config loaded from file, either from the the pre-existing config file or the one created with fallback values.
codesearchnet
def remove_regex(urls, regex): if not regex: return urls if not isinstance(urls, (list, set, tuple)): urls = [urls] try: non_matching_urls = [url for url in urls if not re.search(regex, url)] except TypeError: return [] return non_matching_urls
Parse a list for non-matches to a regex. Args: urls: iterable of urls regex: string regex to be parsed for Returns: list of strings not matching regex
juraj-google-style
def put_rpc(self, address, rpc_id, arg_payload, response): self._rpc_queue.put_nowait((address, rpc_id, arg_payload, response))
Place an RPC onto the RPC queue. The rpc will be dispatched asynchronously by the background dispatch task. This method must be called from the event loop. This method does not block. Args: address (int): The address of the tile with the RPC rpc_id (int): The id of the rpc you want to call arg_payload (bytes): The RPC payload respones (GenericResponse): The object to use to signal the result.
juraj-google-style
def _maybe_resolve_alias(alias, name_to_class, name_to_constant): if not isinstance(alias.type, pytd.NamedType): return alias if alias.type.name in _TYPING_SETS: return None if '.' not in alias.type.name: return alias parts = alias.type.name.split('.') if parts[0] not in name_to_class and parts[0] not in name_to_constant: return alias prev_value = None value = name_to_class.get(parts[0]) or name_to_constant[parts[0]] for part in parts[1:]: prev_value = value if isinstance(value, pytd.Constant): if not isinstance(value.type, pytd.NamedType) or value.type.name not in name_to_class: return alias value = name_to_class[value.type.name] if not isinstance(value, pytd.Class): return alias if part in value: value = value.Lookup(part) else: for base in value.bases: if base.name not in name_to_class: return alias if part in name_to_class[base.name]: value = name_to_class[base.name].Lookup(part) break else: return alias if isinstance(value, pytd.Class): return pytd.Constant(alias.name, pytdgen.pytd_type(pytd.NamedType(alias.type.name))) elif isinstance(value, pytd.Function): return pytd.AliasMethod(value.Replace(name=alias.name), from_constant=isinstance(prev_value, pytd.Constant)) else: return value.Replace(name=alias.name)
Resolve the alias if possible. Args: alias: A pytd.Alias name_to_class: A class map used for resolution. name_to_constant: A constant map used for resolution. Returns: None, if the alias pointed to an un-aliasable type. The resolved value, if the alias was resolved. The alias, if it was not resolved.
github-repos
def exists(self, workflow_id): try: db = self._client[self.database] col = db[WORKFLOW_DATA_COLLECTION_NAME] return col.find_one({"_id": ObjectId(workflow_id)}) is not None except ConnectionFailure: raise DataStoreNotConnected()
Checks whether a document with the specified workflow id already exists. Args: workflow_id (str): The workflow id that should be checked. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: bool: ``True`` if a document with the specified workflow id exists.
juraj-google-style
def print_gate(gate: Gate, ndigits: int = 2, file: TextIO = None) -> None: N = gate.qubit_nb gate_tensor = gate.vec.asarray() lines = [] for index, amplitude in np.ndenumerate(gate_tensor): ket = "".join([str(n) for n in index[0:N]]) bra = "".join([str(index[n]) for n in range(N, 2*N)]) if round(abs(amplitude)**2, ndigits) > 0.0: lines.append('{} -> {} : {}'.format(bra, ket, amplitude)) lines.sort(key=lambda x: int(x[0:N])) print('\n'.join(lines), file=file)
Pretty print a gate tensor Args: gate: ndigits: file: Stream to which to write. Defaults to stdout
juraj-google-style
def has_member(self, device_object): if (device_object.tag == 'computer'): container_search = 'computers/computer' elif (device_object.tag == 'mobile_device'): container_search = 'mobile_devices/mobile_device' else: raise ValueError return (len([device for device in self.findall(container_search) if (device.findtext('id') == device_object.id)]) is not 0)
Return bool whether group has a device as a member. Args: device_object (Computer or MobileDevice). Membership is determined by ID, as names can be shared amongst devices.
codesearchnet
def use_wrapped_call(layer, call_fn, default_training_value=None, return_method=False): expects_training_arg = layer_uses_training_bool(layer) if hasattr(call_fn, 'original_layer_call'): original_call = call_fn.original_layer_call call_fn = call_fn.__call__ else: original_call = call_fn fn, arg_spec = maybe_add_training_arg(original_call, call_fn, expects_training_arg, default_training_value) def return_outputs_and_add_losses(*args, **kwargs): if return_method: args = args[1:] outputs, losses = fn(*args, **kwargs) layer.add_loss(losses, inputs=True) if context.executing_eagerly(): for i in layer._flatten_layers(): if i is not layer: i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER] return outputs decorated = tf_decorator.make_decorator(target=call_fn, decorator_func=return_outputs_and_add_losses, decorator_argspec=arg_spec) if return_method: return types.MethodType(decorated, layer) else: return decorated
Creates fn that adds the losses returned by call_fn & returns the outputs. Args: layer: A Keras layer object call_fn: tf.function that takes layer inputs (and possibly a training arg), and returns a tuple of (outputs, list of losses). default_training_value: Default value of the training kwarg. If `None`, the default is `K.learning_phase()`. return_method: Whether to return a method bound to the layer. Returns: function that calls call_fn and returns the outputs. Losses returned by call_fn are added to the layer losses.
github-repos
def convert_to_date_tensor(date_inputs): if isinstance(date_inputs, DateTensor): return date_inputs if hasattr(date_inputs, 'year'): return from_datetimes(date_inputs) if isinstance(date_inputs, np.ndarray): date_inputs = date_inputs.astype('datetime64[D]') return from_np_datetimes(date_inputs) if tf.is_tensor(date_inputs): return from_ordinals(date_inputs) if isinstance(date_inputs, collections.abc.Sequence): if not date_inputs: return from_ordinals([]) test_element = date_inputs[0] if hasattr(test_element, 'year'): return from_datetimes(date_inputs) if isinstance(test_element, collections.abc.Sequence): return from_tuples(date_inputs) if len(date_inputs) == 3: return from_year_month_day(date_inputs[0], date_inputs[1], date_inputs[2]) try: as_ordinals = tf.convert_to_tensor(date_inputs, dtype=tf.int32) return from_ordinals(as_ordinals) except ValueError as e: raise ValueError('Failed to convert inputs to DateTensor. Unrecognized format. Error: ' + e)
Converts supplied data to a `DateTensor` if possible. Args: date_inputs: One of the supported types that can be converted to a DateTensor. The following input formats are supported. 1. Sequence of `datetime.datetime`, `datetime.date`, or any other structure with data attributes called 'year', 'month' and 'day'. 2. A numpy array of `datetime64` type. 3. Sequence of (year, month, day) Tuples. Months are 1-based (with January as 1) and constants.Months enum may be used instead of ints. Days are also 1-based. 4. A tuple of three int32 `Tensor`s containing year, month and date as positive integers in that order. 5. A single int32 `Tensor` containing ordinals (i.e. number of days since 31 Dec 0 with 1 being 1 Jan 1.) Returns: A `DateTensor` object representing the supplied dates. Raises: ValueError: If conversion fails for any reason.
github-repos
def predict(self, x, add_intercept=False): if x.min() < self.start: raise Warning("x.min() < self.start") if x.max() > self.end: raise Warning("x.max() > self.end") return get_X_spline(x=x, knots=self.knots, n_bases=self.n_bases, spline_order=self.spline_order, add_intercept=add_intercept)
For some x, predict the bn(x) for each base Arguments: x: np.array; Vector of dimension 1 add_intercept: bool; should we add the intercept to the final array Returns: np.array, of shape (len(x), n_bases + (add_intercept))
juraj-google-style
def get_seqprop_within(self, chain_id, resnum, angstroms, only_protein=True, use_ca=False, custom_coord=None, return_resnums=False): (polypep, resnums) = self.get_polypeptide_within(chain_id=chain_id, resnum=resnum, angstroms=angstroms, use_ca=use_ca, only_protein=only_protein, custom_coord=custom_coord, return_resnums=True) chain_subseq = self.chains.get_by_id(chain_id).get_subsequence(resnums) if return_resnums: return (chain_subseq, resnums) else: return chain_subseq
Get a SeqProp object of the amino acids within X angstroms of the specified chain + residue number. Args: resnum (int): Residue number of the structure chain_id (str): Chain ID of the residue number angstroms (float): Radius of the search sphere only_protein (bool): If only protein atoms (no HETATMS) should be included in the returned sequence use_ca (bool): If the alpha-carbon atom should be used for searching, default is False (last atom of residue used) Returns: SeqProp: Sequence that represents the amino acids in the vicinity of your residue number.
codesearchnet
def resolve_for(self, node, exact=None): from capybara.driver.node import Node from capybara.node.element import Element from capybara.node.simple import Simple @node.synchronize def resolve(): if self.selector.format == "css": children = node._find_css(self.css()) else: children = node._find_xpath(self.xpath(exact)) def wrap(child): if isinstance(child, Node): return Element(node.session, child, node, self) else: return Simple(child) children = [wrap(child) for child in children] return Result(children, self) return resolve()
Resolves this query relative to the given node. Args: node (node.Base): The node relative to which this query should be resolved. exact (bool, optional): Whether to exactly match text. Returns: list[Element]: A list of elements matched by this query.
juraj-google-style
def _string_from_ip_int(self, ip_int): octets = [] for _ in xrange(4): octets.insert(0, str(ip_int & 0xFF)) ip_int >>= 8 return '.'.join(octets)
Turns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation.
juraj-google-style
def get_event_q(self, event_name): self.lock.acquire() if ((not (event_name in self.event_dict)) or (self.event_dict[event_name] is None)): self.event_dict[event_name] = queue.Queue() self.lock.release() event_queue = self.event_dict[event_name] return event_queue
Obtain the queue storing events of the specified name. If no event of this name has been polled, wait for one to. Returns: A queue storing all the events of the specified name. None if timed out. Raises: queue.Empty: Raised if the queue does not exist and timeout has passed.
codesearchnet
def from_json(cls, data): optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False} assert ('wind_speed' in data), 'Required key "wind_speed" is missing!' for (key, val) in optional_keys.items(): if (key not in data): data[key] = val return cls(data['wind_speed'], data['wind_direction'], data['rain'], data['snow_on_ground'])
Create a Wind Condition from a dictionary. Args: data = { "wind_speed": float, "wind_direction": float, "rain": bool, "snow_on_ground": bool}
codesearchnet
def group_by(what, by): return proso.dict.group_keys_by_values({x: by(x) for x in what})
Take a list and apply the given function on each its value, then group the values by the function results. .. testsetup:: from proso.list import group_by .. doctest:: >>> group_by([i for i in range(10)], by=lambda x: x % 2 == 0) {False: [1, 3, 5, 7, 9], True: [0, 2, 4, 6, 8]} Args: what: a list which will be transformed by: a function which will be applied on values of the given list Returns: dict: values groupped by the function results
codesearchnet
def simple_two_objective_reward(example): num = int(example * 10) % 9 + 1 return [num, 10 - num]
Reward for the trivial search space. The reward (i.e. fitness) is a 2-element list. The goal of the search, therefore, is to find the pareto frontier in simple_two_objective_pareto function. Args: example: a materialized value. Returns: A 2-element list.
github-repos
def __init__( self, batch_size=20, seq_len=10, min_pitch=24, max_pitch=108 ): self.__batch_size = batch_size self.__seq_len = seq_len self.__dim = max_pitch - min_pitch
Init. Args: batch_size: Batch size. seq_len: The length of sequneces. The length corresponds to the number of `time` splited by `time_fraction`. min_pitch: The minimum of note number. max_pitch: The maximum of note number.
juraj-google-style
def isfile(self, path, follow_symlinks=True): return self._is_of_type(path, S_IFREG, follow_symlinks)
Determine if path identifies a regular file. Args: path: Path to filesystem object. Returns: `True` if path points to a regular file (following symlinks). Raises: TypeError: if path is None.
codesearchnet
def do_decode(cls, obj, obj_type): if (inspect.isclass(obj_type) and issubclass(obj_type, ConjureBeanType)): return cls.decode_conjure_bean_type(obj, obj_type) elif (inspect.isclass(obj_type) and issubclass(obj_type, ConjureUnionType)): return cls.decode_conjure_union_type(obj, obj_type) elif (inspect.isclass(obj_type) and issubclass(obj_type, ConjureEnumType)): return cls.decode_conjure_enum_type(obj, obj_type) elif isinstance(obj_type, DictType): return cls.decode_dict(obj, obj_type.key_type, obj_type.value_type) elif isinstance(obj_type, ListType): return cls.decode_list(obj, obj_type.item_type) elif isinstance(obj_type, OptionalType): return cls.decode_optional(obj, obj_type.item_type) return cls.decode_primitive(obj, obj_type)
Decodes json into the specified type Args: obj: the json object to decode element_type: a class object which is the type we're decoding into.
codesearchnet
def prune_candidates(candidates): pruned = [] for first, second in candidates: if first.__class__ is Linearization: nodes1 = first.curve.nodes else: nodes1 = first.nodes if second.__class__ is Linearization: nodes2 = second.curve.nodes else: nodes2 = second.nodes if convex_hull_collide(nodes1, nodes2): pruned.append((first, second)) return pruned
Reduce number of candidate intersection pairs. .. note:: This is a helper for :func:`_all_intersections`. Uses more strict bounding box intersection predicate by forming the actual convex hull of each candidate curve segment and then checking if those convex hulls collide. Args: candidates (List): An iterable of pairs of curves (or linearized curves). Returns: List: A pruned list of curve pairs.
juraj-google-style
def _tflearn_features(train_config, args): feature_columns = [] target_name = train_config['target_column'] key_name = train_config['key_column'] for name in train_config['numerical_columns']: if ((name != target_name) and (name != key_name)): feature_columns.append(tf.contrib.layers.real_valued_column(name, dimension=1)) for name in train_config['categorical_columns']: if ((name != target_name) and (name != key_name)): transform_config = train_config['transforms'].get(name, {}) transform_name = transform_config.get('transform', None) if is_dnn_model(args.model_type): if (transform_name == 'embedding'): sparse = tf.contrib.layers.sparse_column_with_integerized_feature(name, bucket_size=train_config['vocab_stats'][name]['n_classes']) learn_feature = tf.contrib.layers.embedding_column(sparse, dimension=transform_config['embedding_dim']) elif ((transform_name == 'one_hot') or (transform_name is None)): sparse = tf.contrib.layers.sparse_column_with_integerized_feature(name, bucket_size=train_config['vocab_stats'][name]['n_classes']) learn_feature = tf.contrib.layers.one_hot_column(sparse) else: raise ValueError(("Unknown transform name. Only 'embedding' and 'one_hot' transforms are supported. Got %s" % transform_name)) elif is_linear_model(args.model_type): if ((transform_name == 'one_hot') or (transform_name is None)): learn_feature = tf.contrib.layers.sparse_column_with_integerized_feature(name, bucket_size=train_config['vocab_stats'][name]['n_classes']) elif (transform_name == 'embedding'): learn_feature = tf.contrib.layers.sparse_column_with_hash_bucket(name, hash_bucket_size=transform_config['embedding_dim']) else: raise ValueError(("Unknown transform name. Only 'embedding' and 'one_hot' transforms are supported. Got %s" % transform_name)) feature_columns.append(learn_feature) return feature_columns
Builds the tf.learn feature list. All numerical features are just given real_valued_column because all the preprocessing transformations are done in preprocess_input. Categoriacl features are processed here depending if the vocab map (from string to int) was applied in preprocess_input. Args: train_config: our train config object args: command line args. Returns: List of TF lean feature columns. Raises: ValueError: if wrong transforms are used for the model type.
codesearchnet
def for_all_test_methods(decorator, *args, **kwargs): def all_test_methods_impl(cls): for name in dir(cls): value = getattr(cls, name) if callable(value) and name.startswith('test') and (name != 'test_session'): setattr(cls, name, decorator(*args, **kwargs)(value)) return cls return all_test_methods_impl
Generate class-level decorator from given method-level decorator. It is expected for the given decorator to take some arguments and return a method that is then called on the test method to produce a decorated method. Args: decorator: The decorator to apply. *args: Positional arguments **kwargs: Keyword arguments Returns: Function that will decorate a given classes test methods with the decorator.
github-repos
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng params = {'params': params or self.params} if past_key_values: params['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs): projection_module = module._get_projection_module() decoder_module = module._get_decoder_module() if projection_module is not None: encoder_hidden_states = projection_module(encoder_hidden_states) return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states=encoder_hidden_states, **kwargs) outputs = self.module.apply(params, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: outputs, past = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs, past = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs
Returns: Example: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel >>> import jax.numpy as jnp >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/wav2vec2-large-lv60", "facebook/bart-large" ... ) >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> encoder_outputs = model.encode(inputs) >>> decoder_start_token_id = model.config.decoder.bos_token_id >>> decoder_input_ids = jnp.ones((inputs.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```
github-repos
def render_asset_html(self, path, tag_template): url = os.path.join(settings.STATIC_URL, path) return tag_template.format(url=url)
Render HTML tag for a given path. Arguments: path (string): Relative path from static directory. tag_template (string): Template string for HTML tag. Returns: string: HTML tag with url from given path.
juraj-google-style
def create_sprite_image(examples): def generate_image_from_thubnails(thumbnails, thumbnail_dims): num_thumbnails = tf.shape(thumbnails)[0].eval() images_per_row = int(math.ceil(math.sqrt(num_thumbnails))) thumb_height = thumbnail_dims[0] thumb_width = thumbnail_dims[1] master_height = images_per_row * thumb_height master_width = images_per_row * thumb_width num_channels = 3 master = np.zeros([master_height, master_width, num_channels]) for idx, image in enumerate(thumbnails.eval()): left_idx = idx % images_per_row top_idx = int(math.floor(idx / images_per_row)) left_start = left_idx * thumb_width left_end = left_start + thumb_width top_start = top_idx * thumb_height top_end = top_start + thumb_height master[top_start:top_end, left_start:left_end, :] = image return tf.image.encode_png(master) image_feature_name = 'image/encoded' sprite_thumbnail_dim_px = 32 with tf.compat.v1.Session(): keys_to_features = { image_feature_name: tf.FixedLenFeature((), tf.string, default_value=''), } parsed = tf.parse_example(examples, keys_to_features) images = tf.zeros([1, 1, 1, 1], tf.float32) i = tf.constant(0) thumbnail_dims = (sprite_thumbnail_dim_px, sprite_thumbnail_dim_px) num_examples = tf.constant(len(examples)) encoded_images = parsed[image_feature_name] def loop_body(i, encoded_images, images): encoded_image = encoded_images[i] image = tf.image.decode_jpeg(encoded_image, channels=3) resized_image = tf.image.resize(image, thumbnail_dims) expanded_image = tf.expand_dims(resized_image, 0) images = tf.cond( tf.equal(i, 0), lambda: expanded_image, lambda: tf.concat([images, expanded_image], 0)) return i + 1, encoded_images, images loop_out = tf.while_loop( lambda i, encoded_images, images: tf.less(i, num_examples), loop_body, [i, encoded_images, images], shape_invariants=[ i.get_shape(), encoded_images.get_shape(), tf.TensorShape(None) ]) sprite = generate_image_from_thubnails(loop_out[2], thumbnail_dims) return sprite.eval()
Returns an encoded sprite image for use in Facets Dive. Args: examples: A list of serialized example protos to get images for. Returns: An encoded PNG.
juraj-google-style
def delete(self, rid, raise_on_error=True): return self.ds.delete(rid, raise_on_error)
Write cache data to the data store. Args: rid (str): The record identifier. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
juraj-google-style
def wrap_lines(self, text, indent_level, indent_size=4): indent = ((' ' * indent_size) * indent_level) lines = text.split('\n') wrapped_lines = [] for line in lines: if (line == ''): wrapped_lines.append(line) else: wrapped_lines.append((indent + line)) return '\n'.join(wrapped_lines)
Indent a multiline string Args: text (string): The string to indent indent_level (int): The number of indent_size spaces to prepend to each line indent_size (int): The number of spaces to prepend for each indent level Returns: string: The indented block of text
codesearchnet
def concatenate(tup, axis=0): from distob import engine if (len(tup) is 0): raise ValueError('need at least one array to concatenate') first = tup[0] others = tup[1:] if (hasattr(first, 'concatenate') and hasattr(type(first), '__array_interface__')): return first.concatenate(others, axis) arrays = [] for ar in tup: if isinstance(ar, DistArray): if (axis == ar._distaxis): arrays.extend(ar._subarrays) else: arrays.append(gather(ar)) elif isinstance(ar, RemoteArray): arrays.append(ar) elif isinstance(ar, Remote): arrays.append(_remote_to_array(ar)) elif hasattr(type(ar), '__array_interface__'): arrays.append(ar) else: arrays.append(np.array(ar)) if all((isinstance(ar, np.ndarray) for ar in arrays)): return np.concatenate(arrays, axis) total_length = 0 commonshape = list(arrays[0].shape) commonshape[axis] = None for ar in arrays: total_length += ar.shape[axis] shp = list(ar.shape) shp[axis] = None if (shp != commonshape): raise ValueError('incompatible shapes for concatenation') blocksize = (((total_length - 1) rarrays = [] for ar in arrays: if isinstance(ar, DistArray): rarrays.extend(ar._subarrays) elif isinstance(ar, RemoteArray): rarrays.append(ar) else: da = _scatter_ndarray(ar, axis, blocksize) for ra in da._subarrays: rarrays.append(ra) del da del arrays eid = rarrays[0]._id.engine if all(((ra._id.engine == eid) for ra in rarrays)): if (eid == engine.eid): return concatenate([gather(r) for r in rarrays], axis) else: return call(concatenate, rarrays, axis) else: return DistArray(rarrays, axis)
Join a sequence of arrays together. Will aim to join `ndarray`, `RemoteArray`, and `DistArray` without moving their data, if they happen to be on different engines. Args: tup (sequence of array_like): Arrays to be concatenated. They must have the same shape, except in the dimension corresponding to `axis`. axis (int, optional): The axis along which the arrays will be joined. Returns: res: `ndarray`, if inputs were all local `RemoteArray`, if inputs were all on the same remote engine `DistArray`, if inputs were already scattered on different engines
codesearchnet
def vmstats(): spi = SYSTEM_PERFORMANCE_INFORMATION() retlen = ctypes.c_ulong() ctypes.windll.ntdll.NtQuerySystemInformation(2, ctypes.byref(spi), ctypes.sizeof(spi), ctypes.byref(retlen)) ret = {} for field in spi._fields_: ret.update({field[0]: getattr(spi, field[0])}) return ret
Return information about the virtual memory on the machine Returns: dict: A dictionary of virtual memory stats CLI Example: .. code-block:: bash salt * status.vmstats
codesearchnet
def logical_interconnect_groups(self): if (not self.__logical_interconnect_groups): self.__logical_interconnect_groups = LogicalInterconnectGroups(self.__connection) return self.__logical_interconnect_groups
Gets the LogicalInterconnectGroups API client. Returns: LogicalInterconnectGroups:
codesearchnet
def info(self, show_defaults=False): pprinter = PrettyPrinter(show_options=True, show_defaults=show_defaults) print(pprinter.pprint(self._obj))
Prints a repr of the object including any applied options. Args: show_defaults: Whether to include default options
codesearchnet
def __init__(self, elements=None): super(TermList, self).__init__() self._contents = set() try: for t in elements or []: super(TermList, self).append(t) self._contents.add(t.id) except AttributeError: raise TypeError('TermList can only contain Terms.')
Create a new `TermList`. Arguments: elements (collections.Iterable, optional): an Iterable that yields `Term` objects. Raises: TypeError: when the given ``elements`` are not instances of `Term`.
juraj-google-style
def set_card_simple(self, title, content): self.response.card.type = 'Simple' self.response.card.title = title self.response.card.content = content
Set response card as simple type. title and content cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. content: str. Content of Simple type card.
codesearchnet
def WriteFileHash(self, path, hash_value): string = '{0:s}\t{1:s}'.format(hash_value, path) encoded_string = self._EncodeString(string) print(encoded_string)
Writes the file path and hash to stdout. Args: path (str): path of the file. hash_value (str): message digest hash calculated over the file data.
codesearchnet
def start(self, use_atexit=True): assert not self._process _logger.debug('Starting process %s', self._proc_args) process_future = asyncio.create_subprocess_exec( stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *self._proc_args ) self._process = yield from process_future self._stderr_reader = asyncio.async(self._read_stderr()) self._stdout_reader = asyncio.async(self._read_stdout()) if use_atexit: atexit.register(self.close)
Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit.
juraj-google-style
def _checkBeginIndicesAnnotations(self, out, a): begin_line_num = 0 while not out.lines[begin_line_num].startswith('array'): begin_line_num += 1 element_index = 0 for line_num in range(begin_line_num, len(out.lines)): line = out.lines[line_num] if '...' in line: raise ValueError('Unexpected found ellipses in line representing array') matches = re.finditer(self._ELEMENT_REGEX, line) for line_item_index, _ in enumerate(matches): subscripts = list(np.unravel_index(element_index, a.shape)) if line_item_index == 0: self.assertEqual({tensor_format.BEGIN_INDICES_KEY: subscripts}, out.annotations[line_num]) element_index += 1 self.assertEqual(element_index, np.size(a))
Check the beginning-index annotations of an ndarray representation. Args: out: An instance of RichTextLines representing a numpy.ndarray. a: The numpy.ndarray being represented. Raises: ValueError: if any ellipses ("...") are found in the lines representing the array.
github-repos
def add_datasets(self, datasets, datasets_to_check=None): if (datasets_to_check is None): datasets_to_check = self.get_datasets() alldatasetsadded = True for dataset in datasets: if (not self.add_dataset(dataset, datasets_to_check=datasets_to_check)): alldatasetsadded = False return alldatasetsadded
Add multiple datasets Args: datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if all datasets added or False if any already present
codesearchnet
def __init__(self, maximum_iterations=None, parallel_iterations=10, back_prop=True, swap_memory=False, name='while_context', grad_state=None, context_def=None, import_scope=None): if context_def: self._init_from_proto(context_def, import_scope=import_scope) else: ControlFlowContext.__init__(self) self._init_from_args(maximum_iterations, parallel_iterations, back_prop, swap_memory, name) self._grad_state = grad_state
"Creates a `WhileContext`. Args: maximum_iterations: Optional upper bound on number of loop iterations. parallel_iterations: The number of iterations allowed to run in parallel. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. grad_state: The gradient loop state. context_def: Optional `WhileContextDef` protocol buffer to initialize the `Whilecontext` python object from. import_scope: Optional `string`. Name scope to add. Only used when initialing from protocol buffer.
github-repos
def GetPresetsByOperatingSystem(self, operating_system): preset_definitions = [] for preset_definition in self._definitions.values(): for preset_operating_system in preset_definition.operating_systems: if preset_operating_system.IsEquivalent(operating_system): preset_definitions.append(preset_definition) return preset_definitions
Retrieves preset definitions for a specific operating system. Args: operating_system (OperatingSystemArtifact): an operating system artifact attribute container. Returns: list[PresetDefinition]: preset definition that correspond with the operating system.
codesearchnet
def zbar_function(fname, restype, *args): prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libzbar()))
Returns a foreign function exported by `zbar`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function.
codesearchnet
def __init__(self, file_path_prefix, coder, file_name_suffix='', num_shards=0, shard_name_template=None, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO, *, max_records_per_shard=None, max_bytes_per_shard=None, skip_if_empty=False): if not isinstance(file_path_prefix, (str, ValueProvider)): raise TypeError('file_path_prefix must be a string or ValueProvider;got %r instead' % file_path_prefix) if not isinstance(file_name_suffix, (str, ValueProvider)): raise TypeError('file_name_suffix must be a string or ValueProvider;got %r instead' % file_name_suffix) if not CompressionTypes.is_valid_compression_type(compression_type): raise TypeError('compression_type must be CompressionType object but was %s' % type(compression_type)) if shard_name_template is None: shard_name_template = DEFAULT_SHARD_NAME_TEMPLATE elif shard_name_template == '': num_shards = 1 if isinstance(file_path_prefix, str): file_path_prefix = StaticValueProvider(str, file_path_prefix) if isinstance(file_name_suffix, str): file_name_suffix = StaticValueProvider(str, file_name_suffix) self.file_path_prefix = file_path_prefix self.file_name_suffix = file_name_suffix self.num_shards = num_shards self.coder = coder self.shard_name_format = self._template_to_format(shard_name_template) self.shard_name_glob_format = self._template_to_glob_format(shard_name_template) self.compression_type = compression_type self.mime_type = mime_type self.max_records_per_shard = max_records_per_shard self.max_bytes_per_shard = max_bytes_per_shard self.skip_if_empty = skip_if_empty
Raises: TypeError: if file path parameters are not a :class:`str` or :class:`~apache_beam.options.value_provider.ValueProvider`, or if **compression_type** is not member of :class:`~apache_beam.io.filesystem.CompressionTypes`. ValueError: if **shard_name_template** is not of expected format.
github-repos
def search(nasbench, search_model, algo, repeat_id, max_train_hours=5000000.0): nasbench.reset_budget_counters() times, best_valids, best_tests = ([0.0], [0.0], [0.0]) valid_models = 0 time_spent = 0 start_time = time.time() last_report_time = start_time for model, feedback in pg.sample(search_model, algo, name=str(repeat_id)): spec = model() if nasbench.is_valid(spec): results = nasbench.query(spec) valid_models += 1 feedback(results['validation_accuracy']) if results['validation_accuracy'] > best_valids[-1]: best_valids.append(results['validation_accuracy']) best_tests.append(results['test_accuracy']) else: best_valids.append(best_valids[-1]) best_tests.append(best_tests[-1]) time_spent, _ = nasbench.get_budget_counters() times.append(time_spent) if time_spent > max_train_hours: feedback.end_loop() break else: feedback.skip() if feedback.id % 100 == 0: now = time.time() print(f'Tried {feedback.id} models, valid {valid_models}, time_spent {time_spent}, elapse since last report: {now - last_report_time} seconds.') last_report_time = now print(f'Total time elapse: {time.time() - start_time} seconds.') return (times, best_valids, best_tests)
Define the search procedure. Args: nasbench: NASBench object. search_model: which is a `model` object annotated with `oneof`. algo: algorithm for search. repeat_id: identifier of current repeat. max_train_hours: max time budget to train the models, which is the sum of training time queried from NAS-Bench. Returns: A tuple of (total time spent at step i for all steps, best validation accuracy at step i for all steps, best test accuracy at step i for all steps)
github-repos
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.urls = self.URLS values_dict = {} for registry_value in registry_key.GetValues(): value_name = registry_value.name or '(default)' if value_name == 'BootExecute': if registry_value.DataIsString(): value_string = registry_value.GetDataAsObject() elif registry_value.DataIsMultiString(): value_string = ''.join(registry_value.GetDataAsObject()) elif registry_value.DataIsBinaryData(): value_string = registry_value.GetDataAsObject() else: value_string = '' error_string = ( 'Key: {0:s}, value: {1:s}: unsupported value data type: ' '{2:s}.').format( registry_key.path, value_name, registry_value.data_type_string) parser_mediator.ProduceExtractionWarning(error_string) event_data.regvalue = {'BootExecute': value_string} event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) else: values_dict[value_name] = registry_value.GetDataAsObject() event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def GetOutputClass(cls, name): if (not isinstance(name, py2to3.STRING_TYPES)): raise ValueError('Name attribute is not a string.') name = name.lower() if (name not in cls._output_classes): raise KeyError('Name: [{0:s}] not registered as an output module.'.format(name)) return cls._output_classes[name]
Retrieves the output class for a specific name. Args: name (str): name of the output module. Returns: type: output module class. Raises: KeyError: if there is no output class found with the supplied name. ValueError: if name is not a string.
codesearchnet
def poly_to_power_basis(bezier_coeffs): (num_coeffs,) = bezier_coeffs.shape if (num_coeffs == 1): return bezier_coeffs elif (num_coeffs == 2): (coeff0, coeff1) = bezier_coeffs return np.asfortranarray([coeff0, (coeff1 - coeff0)]) elif (num_coeffs == 3): (coeff0, coeff1, coeff2) = bezier_coeffs return np.asfortranarray([coeff0, (2.0 * (coeff1 - coeff0)), ((coeff2 - (2.0 * coeff1)) + coeff0)]) elif (num_coeffs == 4): (coeff0, coeff1, coeff2, coeff3) = bezier_coeffs return np.asfortranarray([coeff0, (3.0 * (coeff1 - coeff0)), (3.0 * ((coeff2 - (2.0 * coeff1)) + coeff0)), (((coeff3 - (3.0 * coeff2)) + (3.0 * coeff1)) - coeff0)]) else: raise _helpers.UnsupportedDegree((num_coeffs - 1), supported=(0, 1, 2, 3))
Convert a B |eacute| zier curve to polynomial in power basis. .. note:: This assumes, but does not verify, that the "B |eacute| zier degree" matches the true degree of the curve. Callers can guarantee this by calling :func:`.full_reduce`. Args: bezier_coeffs (numpy.ndarray): A 1D array of coefficients in the Bernstein basis. Returns: numpy.ndarray: 1D array of coefficients in monomial basis. Raises: .UnsupportedDegree: If the degree of the curve is not among 0, 1, 2 or 3.
codesearchnet
def SCM(root_dir, repo=None): if (Git.is_repo(root_dir) or Git.is_submodule(root_dir)): return Git(root_dir, repo=repo) return NoSCM(root_dir, repo=repo)
Returns SCM instance that corresponds to a repo at the specified path. Args: root_dir (str): path to a root directory of the repo. repo (dvc.repo.Repo): dvc repo instance that root_dir belongs to. Returns: dvc.scm.base.Base: SCM instance.
codesearchnet
def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer): if not isinstance(model, Model): raise ValueError('Expected `model` argument to be a `Model` instance, got ', model) if isinstance(model, Sequential): raise ValueError('Expected `model` argument to be a functional `Model` instance, got a `Sequential` instance instead:', model) if not model._is_graph_network: raise ValueError('Expected `model` argument to be a functional `Model` instance, but got a subclass model instead.') new_input_layers = {} if input_tensors is not None: input_tensors = nest.flatten(input_tensors) for i, input_tensor in enumerate(input_tensors): original_input_layer = model._input_layers[i] if not backend.is_keras_tensor(input_tensor): name = original_input_layer.name input_tensor = Input(tensor=input_tensor, name='input_wrapper_for_' + name) newly_created_input_layer = input_tensor._keras_history.layer new_input_layers[original_input_layer] = newly_created_input_layer else: new_input_layers[original_input_layer] = original_input_layer if not callable(layer_fn): raise ValueError('Expected `layer_fn` argument to be a callable.') model_configs, created_layers = _clone_layers_and_model_config(model, new_input_layers, layer_fn) input_tensors, output_tensors, created_layers = functional.reconstruct_from_config(model_configs, created_layers=created_layers) metrics_names = model.metrics_names model = Model(input_tensors, output_tensors, name=model.name) ancillary_layers = [layer for layer in created_layers.values() if layer not in model.layers] if ancillary_layers: new_nodes = nest.flatten([layer.inbound_nodes[1:] if functional._should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values()]) _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes) return model
Clone a functional `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Input layers are always cloned. Args: model: Instance of `Model`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. layer_fn: callable to be applied on non-input layers in the model. By default it clones the layer. Another example is to preserve the layer to share the weights. This is required when we create a per-replica copy of the model with distribution strategy; we want the weights to be shared but still feed inputs separately so we create new input layers. Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. Raises: ValueError: in case of invalid `model` argument value or `layer_fn` argument value.
github-repos
def wait_idle(self, timeout=1.0): async def _awaiter(): background_work = {x.join() for x in self._work_queues} for event in self._events: if (not event.is_set()): background_work.add(event.wait()) (_done, pending) = (await asyncio.wait(background_work, timeout=timeout)) if (len(pending) > 0): raise TimeoutExpiredError('Timeout waiting for event loop to become idle', pending=pending) if self._on_emulation_thread(): return asyncio.wait_for(_awaiter(), timeout=timeout) self.run_task_external(_awaiter()) return None
Wait until the rpc queue is empty. This method may be called either from within the event loop or from outside of it. If it is called outside of the event loop it will block the calling thread until the rpc queue is temporarily empty. If it is called from within the event loop it will return an awaitable object that can be used to wait for the same condition. The awaitable object will already have a timeout if the timeout parameter is passed. Args: timeout (float): The maximum number of seconds to wait.
codesearchnet
def reactions_to_files(model, dest, writer, split_subsystem): def safe_file_name(origin_name): safe_name = re.sub('\\W+', '_', origin_name, flags=re.UNICODE) safe_name = re.sub('_+', '_', safe_name.lower(), flags=re.UNICODE) safe_name = safe_name.strip('_') return safe_name common_reactions = [] reaction_files = [] if (not split_subsystem): common_reactions = sorted(model.reactions, key=(lambda r: r.id)) if (len(common_reactions) > 0): reaction_file = 'reactions.yaml' with open(os.path.join(dest, reaction_file), 'w') as f: writer.write_reactions(f, common_reactions) reaction_files.append(reaction_file) else: subsystems = {} for reaction in sorted(model.reactions, key=(lambda r: r.id)): if ('subsystem' in reaction.properties): subsystem_file = safe_file_name(reaction.properties['subsystem']) subsystems.setdefault(subsystem_file, []).append(reaction) else: common_reactions.append(reaction) subsystem_folder = 'reactions' sub_existance = False for (subsystem_file, reactions) in iteritems(subsystems): if (len(reactions) < _MAX_REACTION_COUNT): for reaction in reactions: common_reactions.append(reaction) elif (len(reactions) > 0): mkdir_p(os.path.join(dest, subsystem_folder)) subsystem_file = os.path.join(subsystem_folder, '{}.yaml'.format(subsystem_file)) with open(os.path.join(dest, subsystem_file), 'w') as f: writer.write_reactions(f, reactions) reaction_files.append(subsystem_file) sub_existance = True reaction_files.sort() if sub_existance: reaction_file = os.path.join(subsystem_folder, 'other_reactions.yaml') else: reaction_file = 'reactions.yaml' if (len(common_reactions) > 0): with open(os.path.join(dest, reaction_file), 'w') as f: writer.write_reactions(f, common_reactions) reaction_files.append(reaction_file) return reaction_files
Turn the reaction subsystems into their own files. If a subsystem has a number of reactions over the threshold, it gets its own YAML file. All other reactions, those that don't have a subsystem or are in a subsystem that falls below the threshold, get added to a common reaction file. Args: model: :class:`psamm_import.model.MetabolicModel`. dest: output path for model files. writer: :class:`psamm.datasource.native.ModelWriter`. split_subsystem: Divide reactions into multiple files by subsystem.
codesearchnet
def expand_valid_values(valid_values): if ('${GROUP_TYPES}' in valid_values): valid_values.remove('${GROUP_TYPES}') valid_values.extend(['Adversary', 'Campaign', 'Document', 'Email', 'Event', 'Incident', 'Intrusion Set', 'Signature', 'Task', 'Threat']) elif ('${OWNERS}' in valid_values): valid_values.remove('${OWNERS}') valid_values.append('') elif ('${USERS}' in valid_values): valid_values.remove('${USERS}') valid_values.append('') return valid_values
Expand supported playbook variables to their full list. Args: valid_values (list): The list of valid values for Choice or MultiChoice inputs. Returns: List: An expanded list of valid values for Choice or MultiChoice inputs.
codesearchnet
def AddContract(self, contract): if (not (contract.PublicKeyHash.ToBytes() in self._keys.keys())): raise Exception('Invalid operation - public key mismatch') self._contracts[contract.ScriptHash.ToBytes()] = contract if (contract.ScriptHash in self._watch_only): self._watch_only.remove(contract.ScriptHash)
Add a contract to the wallet. Args: contract (Contract): a contract of type neo.SmartContract.Contract. Raises: Exception: Invalid operation - public key mismatch.
codesearchnet
def global_norm(t_list, name=None): if not isinstance(t_list, collections_abc.Sequence) or isinstance(t_list, str): raise TypeError(f'`t_list` should be a sequence of tensors. Received {type(t_list)}.') t_list = list(t_list) with ops.name_scope(name, 'global_norm', t_list) as name: values = [ops.convert_to_tensor(t.values if isinstance(t, indexed_slices.IndexedSlices) else t, name='t_%d' % i) if t is not None else t for i, t in enumerate(t_list)] half_squared_norms = [] for v in values: if v is not None: with ops.colocate_with(v): half_squared_norms.append(gen_nn_ops.l2_loss(v)) half_squared_norm = math_ops.reduce_sum(array_ops_stack.stack(half_squared_norms)) norm = math_ops.sqrt(half_squared_norm * constant_op.constant(2.0, dtype=half_squared_norm.dtype), name='global_norm') return norm
Computes the global norm of multiple tensors. Given a tuple or list of tensors `t_list`, this operation returns the global norm of the elements in all tensors in `t_list`. The global norm is computed as: `global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))` Any entries in `t_list` that are of type None are ignored. Args: t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None. name: A name for the operation (optional). Returns: A 0-D (scalar) `Tensor` of type `float`. Raises: TypeError: If `t_list` is not a sequence.
github-repos
def stage_in(self, file, executor): if (file.scheme == 'ftp'): working_dir = self.dfk.executors[executor].working_dir stage_in_app = self._ftp_stage_in_app(executor=executor) app_fut = stage_in_app(working_dir, outputs=[file]) return app_fut._outputs[0] elif ((file.scheme == 'http') or (file.scheme == 'https')): working_dir = self.dfk.executors[executor].working_dir stage_in_app = self._http_stage_in_app(executor=executor) app_fut = stage_in_app(working_dir, outputs=[file]) return app_fut._outputs[0] elif (file.scheme == 'globus'): globus_ep = self._get_globus_endpoint(executor) stage_in_app = self._globus_stage_in_app() app_fut = stage_in_app(globus_ep, outputs=[file]) return app_fut._outputs[0] else: raise Exception('Staging in with unknown file scheme {} is not supported'.format(file.scheme))
Transport the file from the input source to the executor. This function returns a DataFuture. Args: - self - file (File) : file to stage in - executor (str) : an executor the file is going to be staged in to. If the executor argument is not specified for a file with 'globus' scheme, the file will be staged in to the first executor with the "globus" key in a config.
codesearchnet
def get_added_vocab(self) -> dict[str, int]: return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])}
Returns the added tokens in the vocabulary as a dictionary of token to index. Returns: `Dict[str, int]`: The added tokens.
github-repos
def DEFINE_alias(name, original_name, flag_values=FLAGS, module_name=None): if original_name not in flag_values: raise UnrecognizedFlagError(original_name) flag = flag_values[original_name] class _Parser(ArgumentParser): def parse(self, argument): flag.parse(argument) return flag.value class _FlagAlias(Flag): @property def value(self): return flag.value @value.setter def value(self, value): flag.value = value help_msg = 'Alias for --%s.' % flag.name DEFINE_flag(_FlagAlias(_Parser(), flag.serializer, name, flag.default, help_msg, boolean=flag.boolean), flag_values, module_name)
Defines an alias flag for an existing one. Args: name: A string, name of the alias flag. original_name: A string, name of the original flag. flag_values: FlagValues object with which the flag will be registered. module_name: A string, the name of the module that defines this flag. Raises: gflags.FlagError: UnrecognizedFlagError: if the referenced flag doesn't exist. DuplicateFlagError: if the alias name has been used by some existing flag.
juraj-google-style
def save(self, savefile): with open(str(savefile), 'wb') as f: self.write_to_fp(f) log.debug("Saved to %s", savefile)
Do the TTS API request and write result to file. Args: savefile (string): The path and file name to save the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request.
juraj-google-style
def min_rank(series, ascending=True): ranks = series.rank(method='min', ascending=ascending) return ranks
Equivalent to `series.rank(method='min', ascending=ascending)`. Args: series: column to rank. Kwargs: ascending (bool): whether to rank in ascending order (default is `True`).
juraj-google-style
def compute_eos_token_mask(self, input_ids: torch.LongTensor, eos_token_id: int) -> torch.LongTensor: self._check_input_ids_shape(input_ids) noneos_masks = [] all_eos_equated = input_ids == eos_token_id for eos_equated in all_eos_equated: nonzero_idx = torch.nonzero(eos_equated) noneos_mask = torch.ones_like(eos_equated) if nonzero_idx.shape[0] != 0: noneos_mask[nonzero_idx[0][0]:] = 0 noneos_masks.append(noneos_mask) return torch.stack(noneos_masks, dim=0)
Computes repetitions mask. 1 stands for ngrams that don't contain EOS tokens and vice versa. Args: input_ids (`torch.LongTensor`): Input token ids (batch_size, input_len). eos_token_id (`int`): EOS token ID. Returns: EOS token mask (batch_size, input_len).
github-repos
def ucast_ip_mask(ip_addr_and_mask, return_tuple=True): regex_ucast_ip_and_mask = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$") if return_tuple: while not regex_ucast_ip_and_mask.match(ip_addr_and_mask): print("Not a good unicast IP and CIDR mask combo.") print("Please try again.") ip_addr_and_mask = input("Please enter a unicast IP address and mask in the follwing format x.x.x.x/x: ") ip_cidr_split = ip_addr_and_mask.split("/") ip_addr = ip_cidr_split[0] cidr = ip_cidr_split[1] return ip_addr, cidr elif not return_tuple: if not regex_ucast_ip_and_mask.match(ip_addr_and_mask): return False else: return True
Function to check if a address is unicast and that the CIDR mask is good Args: ip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24 return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False Returns: see return_tuple for return options
juraj-google-style
def _get_annotations(self, text, language=''): body = {'document': {'type': 'PLAIN_TEXT', 'content': text}, 'features': {'extract_syntax': True}, 'encodingType': 'UTF32'} if language: body['document']['language'] = language request = self.service.documents().annotateText(body=body) response = request.execute() tokens = response.get('tokens', []) language = response.get('language') return {'tokens': tokens, 'language': language}
Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input.
codesearchnet
def connect_direct(self, connection_string, no_rpc=False, force=False): if ((not force) and self.connected): raise HardwareError(("Cannot connect when we are already connected to '%s'" % self.connection_string)) self._loop.run_coroutine(self.adapter.connect(0, connection_string)) try: if no_rpc: self._logger.info('Not opening RPC interface on device %s', self.connection_string) else: self._loop.run_coroutine(self.adapter.open_interface(0, 'rpc')) except HardwareError as exc: self._logger.exception('Error opening RPC interface on device %s', connection_string) self._loop.run_coroutine(self.adapter.disconnect(0)) raise exc except Exception as exc: self._logger.exception('Error opening RPC interface on device %s', connection_string) self._loop.run_coroutine(self.adapter.disconnect(0)) raise HardwareError(('Could not open RPC interface on device due to an exception: %s' % str(exc))) from exc self.connected = True self.connection_string = connection_string self.connection_interrupted = False
Directly connect to a device using its stream specific connection string. Normally, all connections to a device include opening the RPC interface to send RPCs. However, there are certain, very specific, circumstances when you would not want to or be able to open the RPC interface (such as when you are using the debug interface on a bare MCU that has not been programmed yet). In those cases you can pass no_rpc=True to not attempt to open the RPC interface. If you do not open the RPC interface at connection time, there is no public interface to open it later, so you must disconnect and reconnect to the device in order to open the interface. Args: connection_string (str): The connection string that identifies the desired device. no_rpc (bool): Do not open the RPC interface on the device (default=False). force (bool): Whether to force another connection even if we think we are currently connected. This is for internal use and not designed to be set externally.
codesearchnet
def _parse_exe_version_string(version_str): matcher = re.search('Python (\\d+\\.\\d+)\\.\\d+', version_str) if matcher: return utils.version_from_string(matcher.group(1)) else: return None
Parse the version string of a Python executable. Arguments: version_str: Version string as emitted by running `PYTHON_EXE -V` Returns: Version as (major, minor) tuple, or None if it could not be determined.
github-repos
def load(self, profile_args): for key, value in profile_args.items(): self.add(key, value)
Load provided CLI Args. Args: args (dict): Dictionary of args in key/value format.
juraj-google-style
def highway_core_with_recurrent_dropout(hidden_size, num_layers, keep_prob=0.5, **kwargs): core = HighwayCore(hidden_size, num_layers, **kwargs) return (RecurrentDropoutWrapper(core, keep_prob), core)
Highway core with recurrent dropout. Args: hidden_size: (int) Hidden size dimensionality. num_layers: (int) Number of highway layers. keep_prob: the probability to keep an entry when applying dropout. **kwargs: Extra keyword arguments to pass to the highway core. Returns: A tuple (train_core, test_core) where train_core is a higway core with recurrent dropout enabled to be used for training and test_core is the same highway core without recurrent dropout.
codesearchnet
def __init__(self, name, annotation): self._name = name self._annotation = annotation
Initializer. Args: name: the name of the bound arg annotation: an Annotation
juraj-google-style
def _has_extras(ctx): if not ctx.index.entries: return False return ctx.data_offset > 8 and ctx.data_offset > (ctx.signatures.offset_end + 8)
Determine if a MAR file has an additional section block or not. It does this by looking at where file data starts in the file. If this starts immediately after the signature data, then no additional sections are present. Args: ctx (context): construct parsing context Returns: True if the MAR file has an additional section block False otherwise
juraj-google-style
def extract(self, text: str) -> List[Extraction]: doc = self._parser(text) extractions = list() for sent in doc.sents: this_extraction = Extraction(value=sent.text, extractor_name=self.name, start_token=sent[0], end_token=sent[-1], start_char=sent.text[0], end_char=sent.text[-1]) extractions.append(this_extraction) return extractions
Splits text by sentences. Args: text (str): Input text to be extracted. Returns: List[Extraction]: the list of extraction or the empty list if there are no matches.
juraj-google-style