code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _make_signature_checker(api_signature, signature):
if not (isinstance(signature, dict) and all((isinstance(k, (str, int)) for k in signature))):
raise TypeError('signatures must be dictionaries mapping parameter names to type annotations.')
checkers = []
param_names = list(api_signature.parameters)
for param_name, param_type in signature.items():
if isinstance(param_name, int) and param_name < len(api_signature.parameters):
param_name = list(api_signature.parameters.values())[param_name].name
param = api_signature.parameters.get(param_name, None)
if param is None:
raise ValueError(f'signature includes annotation for unknown parameter {param_name!r}.')
if param.kind not in (tf_inspect.Parameter.POSITIONAL_ONLY, tf_inspect.Parameter.POSITIONAL_OR_KEYWORD):
raise ValueError(f"Dispatch currently only supports type annotations for positional parameters; can't handle annotation for {param.kind!r} parameter {param_name}.")
checker = make_type_checker(param_type)
index = param_names.index(param_name)
checkers.append((index, checker))
return _api_dispatcher.PySignatureChecker(checkers)
|
Builds a PySignatureChecker for the given type signature.
Args:
api_signature: The `inspect.Signature` of the API whose signature is
being checked.
signature: Dictionary mapping parameter names to type annotations.
Returns:
A `PySignatureChecker`.
|
github-repos
|
def generate_surface_vectors(self, film_millers, substrate_millers):
vector_sets = []
for f in film_millers:
film_slab = SlabGenerator(self.film, f, 20, 15,
primitive=False).get_slab()
film_vectors = reduce_vectors(film_slab.lattice.matrix[0],
film_slab.lattice.matrix[1])
for s in substrate_millers:
substrate_slab = SlabGenerator(self.substrate, s, 20, 15,
primitive=False).get_slab()
substrate_vectors = reduce_vectors(
substrate_slab.lattice.matrix[0],
substrate_slab.lattice.matrix[1])
vector_sets.append((film_vectors, substrate_vectors, f, s))
return vector_sets
|
Generates the film/substrate slab combinations for a set of given
miller indicies
Args:
film_millers(array): all miller indices to generate slabs for
film
substrate_millers(array): all miller indicies to generate slabs
for substrate
|
juraj-google-style
|
def convert_to_dataframe(ds: xr.Dataset) -> pd.DataFrame:
if len(ds.coords):
df = ds.to_dataframe().reset_index()
else:
ds = ds.compute().to_dict(data='list')
df = pd.DataFrame({k: [v['data']] for k, v in ds['data_vars'].items()})
return df
|
Convert xarray Dataset to pandas DataFrame.
Args:
ds (xr.Dataset): xarray Dataset to be converted.
Returns:
pd.DataFrame: Pandas DataFrame containing the data from the xarray Dataset.
|
github-repos
|
def RegisterHelper(cls, resolver_helper):
if (resolver_helper.type_indicator in cls._resolver_helpers):
raise KeyError('Resolver helper object already set for type indicator: {0!s}.'.format(resolver_helper.type_indicator))
cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper
|
Registers a path specification resolver helper.
Args:
resolver_helper (ResolverHelper): resolver helper.
Raises:
KeyError: if resolver helper object is already set for the corresponding
type indicator.
|
codesearchnet
|
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
|
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` methods.
Args:
token_ids_0 (`List[int]`):
List of ids.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Set to True if the token list is already formatted with special tokens for the model
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
github-repos
|
def convert_structure_to_signature(structure, arg_names=None, signature_context=None):
def encode_arg(arg, path):
if isinstance(arg, tensor_lib.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(arg.op.get_attr('_user_specified_name'))
except (ValueError, AttributeError):
pass
if path and user_specified_name and (user_specified_name != path[0]):
name = user_specified_name
else:
name = tensor_lib.sanitize_spec_name('_'.join((str(p) for p in path)))
return tensor_lib.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, resource_variable_ops.ResourceVariable):
return trace_type.from_value(arg, signature_context)
if isinstance(arg, composite_tensor.CompositeTensor):
return arg._type_spec
if isinstance(arg, (int, float, bool, str, type(None), dtypes.DType, tensor_lib.TensorSpec, type_spec.TypeSpec)):
return arg
return UnknownArgument()
flattened = nest.flatten_with_tuple_paths(structure)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError("Passed in arg_names don't match actual signature (%s)." % arg_names)
flattened = [((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped)
|
Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
signature_context: TraceType InternalTracingContext to generate alias_ids
for mutable objects, like ResourceVariables.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UnknownArgument instead of any unsupported types.
|
github-repos
|
def _extract_token_timestamps(self, generate_outputs, alignment_heads, time_precision=0.02, num_frames=None, num_input_ids=None):
cross_attentions = []
for i in range(self.config.decoder_layers):
cross_attentions.append(torch.cat([x[i] for x in generate_outputs.cross_attentions], dim=2))
weights = torch.stack([cross_attentions[l][:, h] for l, h in alignment_heads])
weights = weights.permute([1, 0, 2, 3])
weight_length = None
if 'beam_indices' in generate_outputs:
weight_length = (generate_outputs.beam_indices != -1).sum(-1).max()
beam_indices = generate_outputs.beam_indices[:, :weight_length]
if num_input_ids is not None and num_input_ids > 1:
weight_length += num_input_ids - 1
beam_indices_first_step_unrolled = torch.ones(beam_indices.shape[0], num_input_ids - 1, device=beam_indices.device, dtype=torch.long) * beam_indices[:, 0:1]
unrolled_beam_indices = torch.cat([beam_indices_first_step_unrolled, beam_indices], dim=-1)
else:
unrolled_beam_indices = beam_indices
unrolled_beam_indices = unrolled_beam_indices.masked_fill(unrolled_beam_indices == -1, 0)
weights = torch.stack([torch.index_select(weights[:, :, i, :], dim=0, index=unrolled_beam_indices[:, i]) for i in range(unrolled_beam_indices.shape[1])], dim=2)
input_length = weight_length or cross_attentions[0].shape[2]
batch_size = generate_outputs.sequences.shape[0]
timestamps = torch.zeros((batch_size, input_length + 1), dtype=torch.float32, device=generate_outputs.sequences.device)
if num_frames is not None:
if isinstance(num_frames, int):
weights = weights[..., :num_frames
elif isinstance(num_frames, (list, tuple, np.ndarray)) and len(np.unique(num_frames)) == 1:
weights = weights[..., :num_frames[0]
elif isinstance(num_frames, torch.Tensor) and len(torch.unique(num_frames)) == 1:
weights = weights[..., :num_frames[0]
else:
repeat_time = batch_size if isinstance(num_frames, int) else batch_size
num_frames = num_frames.cpu() if isinstance(num_frames, torch.Tensor) else num_frames
num_frames = np.repeat(num_frames, repeat_time)
if num_frames is None or isinstance(num_frames, int):
std = torch.std(weights, dim=-2, keepdim=True, unbiased=False)
mean = torch.mean(weights, dim=-2, keepdim=True)
weights = (weights - mean) / std
weights = _median_filter(weights, self.config.median_filter_width)
weights = weights.mean(dim=1)
for batch_idx in range(batch_size):
if num_frames is not None and isinstance(num_frames, (tuple, list, np.ndarray, torch.Tensor)):
matrix = weights[batch_idx, ..., :num_frames[batch_idx]
std = torch.std(matrix, dim=-2, keepdim=True, unbiased=False)
mean = torch.mean(matrix, dim=-2, keepdim=True)
matrix = (matrix - mean) / std
matrix = _median_filter(matrix, self.config.median_filter_width)
matrix = matrix.mean(dim=0)
else:
matrix = weights[batch_idx]
text_indices, time_indices = _dynamic_time_warping(-matrix.cpu().double().numpy())
jumps = np.pad(np.diff(text_indices), (1, 0), constant_values=1).astype(bool)
jump_times = time_indices[jumps] * time_precision
timestamps[batch_idx, 1:] = torch.tensor(jump_times)
return timestamps
|
Calculates token-level timestamps using the encoder-decoder cross-attentions and dynamic time-warping (DTW) to
map each output token to a position in the input audio. If `num_frames` is specified, the encoder-decoder
cross-attentions will be cropped before applying DTW.
Returns:
tensor containing the timestamps in seconds for each predicted token
|
github-repos
|
def run(self, input_dir, output_file_path):
logging.info('Running defense %s', self.submission_id)
tmp_run_dir = self.temp_copy_extracted_submission()
output_dir = os.path.dirname(output_file_path)
output_filename = os.path.basename(output_file_path)
cmd = ['--network=none',
'-m=24g',
'--cpus=3.75',
'-v', '{0}:/input_images:ro'.format(input_dir),
'-v', '{0}:/output_data'.format(output_dir),
'-v', '{0}:/code'.format(tmp_run_dir),
'-w', '/code',
self.container_name,
'./' + self.entry_point,
'/input_images',
'/output_data/' + output_filename]
elapsed_time_sec = self.run_with_time_limit(cmd)
sudo_remove_dirtree(tmp_run_dir)
return elapsed_time_sec
|
Runs defense inside Docker.
Args:
input_dir: directory with input (adversarial images).
output_file_path: path of the output file.
Returns:
how long it took to run submission in seconds
|
juraj-google-style
|
def validate(cls, job_config):
if job_config.output_writer_cls != cls:
raise errors.BadWriterParamsError(
"Expect output writer class %r, got %r." %
(cls, job_config.output_writer_cls))
|
Validates relevant parameters.
This method can validate fields which it deems relevant.
Args:
job_config: an instance of map_job.JobConfig.
Raises:
errors.BadWriterParamsError: required parameters are missing or invalid.
|
juraj-google-style
|
def load_info(cat):
res = _load_yaml_(f'{PKG_PATH}/markets/{cat}.yml')
root = os.environ.get('BBG_ROOT', '').replace('\\', '/')
if (not root):
return res
for (cat, ovrd) in _load_yaml_(f'{root}/markets/{cat}.yml').items():
if isinstance(ovrd, dict):
if (cat in res):
res[cat].update(ovrd)
else:
res[cat] = ovrd
if (isinstance(ovrd, list) and isinstance(res[cat], list)):
res[cat] += ovrd
return res
|
Load parameters for assets
Args:
cat: category
Returns:
dict
Examples:
>>> import pandas as pd
>>>
>>> assets = load_info(cat='assets')
>>> all(cat in assets for cat in ['Equity', 'Index', 'Curncy', 'Corp'])
True
>>> os.environ['BBG_PATH'] = ''
>>> exch = load_info(cat='exch')
>>> pd.Series(exch['EquityUS']).allday
[400, 2000]
>>> test_root = f'{PKG_PATH}/tests'
>>> os.environ['BBG_PATH'] = test_root
>>> ovrd_exch = load_info(cat='exch')
>>> # Somehow os.environ is not set properly in doctest environment
>>> ovrd_exch.update(_load_yaml_(f'{test_root}/markets/exch.yml'))
>>> pd.Series(ovrd_exch['EquityUS']).allday
[300, 2100]
|
codesearchnet
|
def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS):
handler = partial(_length_scalar_handler, scalar_factory)
return _bind_length_handlers(tids, handler, lns)
|
Binds a set of scalar handlers for an inclusive range of low-nibble values.
Args:
tids (Sequence[int]): The Type IDs to bind to.
scalar_factory (Callable): The factory for the scalar parsing function.
This function can itself return a function representing a thunk to defer the
scalar parsing or a direct value.
lns (Sequence[int]): The low-nibble lengths to bind to.
|
codesearchnet
|
def auth_criteria(self):
auth = {}
for attr in dir(self):
if (attr != 'auth_criteria'):
attribute = getattr(self, attr)
if (isinstance(attribute, Callable) and hasattr(attribute, '_service_auth')):
auth[getattr(self, attr)._service_auth] = attribute
return auth
|
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
|
codesearchnet
|
def _run_graph_for_calibration(float_model_dir: str, signature_keys: Sequence[str], tags: Collection[str], representative_dataset: rd.RepresentativeDatasetOrMapping, force_graph_mode_calibration: bool) -> None:
try:
_validate_representative_dataset(representative_dataset, signature_keys)
except Exception as ex:
raise ValueError('Invalid representative dataset.') from ex
representative_dataset_map = representative_dataset
if not isinstance(representative_dataset, Mapping):
representative_dataset_map = {signature_keys[0]: representative_dataset}
try:
if context.executing_eagerly() and (not force_graph_mode_calibration):
logging.info('Calibration step is executed in eager mode.')
_run_graph_for_calibration_eager_mode(float_model_dir, tags, representative_dataset_map)
else:
logging.info('Calibration step is executed in graph mode.')
_run_graph_for_calibration_graph_mode(float_model_dir, tags, representative_dataset_map)
except Exception as ex:
raise ValueError('Failed to run graph for post-training quantization calibration.') from ex
logging.info('Calibration step complete.')
|
Runs the graph for calibration using representative datasets.
Args:
float_model_dir: Path to the model to calibrate.
signature_keys: Sequence of keys identifying SignatureDef containing inputs
and outputs.
tags: Collection of tags identifying the MetaGraphDef within the SavedModel
to analyze.
representative_dataset: An iterator that returns a dictionary of {input_key:
input_value} or a mapping from signature keys to such iterators. When
`signature_keys` contains more than one signature key,
`representative_datsaet` should be a mapping that maps each signature keys
to the corresponding representative dataset.
force_graph_mode_calibration: If set to true, it forces calibration in graph
model instead of eager mode when the context is in eager mode.
Raises:
ValueError iff:
* The representative dataset format is invalid.
* It fails to run the functions using the representative datasets.
|
github-repos
|
def _validate_cluster_spec(cluster_spec, task_type, task_id):
allowed_task_types = ('chief', 'worker', 'evaluator', 'ps', None)
cluster_spec = normalize_cluster_spec(cluster_spec)
if any((job not in allowed_task_types for job in cluster_spec.jobs)):
raise ValueError('Disallowed task type found in cluster spec. Allowed types are {} and the cluster spec is {}.'.format(allowed_task_types, cluster_spec))
if task_type not in allowed_task_types:
raise ValueError('Unrecognized task_type: {}, valid task types are: {}'.format(task_type, allowed_task_types))
if task_type and task_type not in cluster_spec.jobs and (task_type != 'evaluator'):
raise ValueError('`task_type` %r not found in cluster_spec.' % task_type)
if task_count(cluster_spec, 'chief') > 1:
raise ValueError("There must be at most one 'chief' job.")
if task_count(cluster_spec, 'evaluator') > 1:
raise ValueError("There must be at most one 'evaluator' job.")
if task_type in cluster_spec.jobs and task_id >= task_count(cluster_spec, task_type):
raise ValueError('The `task_id` %d exceeds the maximum id of %s.' % (task_id, task_type))
|
Validates `cluster_spec`.
It checks:
1) task type is one of "chief", "worker", "ps", "evaluator", or not provided
(None).
2) whether there is such a task type as `task_type` in the `cluster_spec`. The
only exception is `evaluator`. In other words, it is still a valid
configuration when `task_type` is `evaluator` but it doesn't appear in
`cluster_spec`.
3) whether there is at most one "chief" job.
4) whether there is at most one "evaluator" job.
5) whether the `task_id` is smaller than the number of tasks for that
particular `task_type`.
Args:
cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated.
task_type: string indicating the type of the task.
task_id: the id of the `task_type` in this cluster.
Raises:
ValueError: if `cluster_spec` fails any check.
|
github-repos
|
def get_environment_details(zone, environment):
default_context = google.datalab.Context.default()
url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone, environment)))
return google.datalab.utils.Http.request(url, credentials=default_context.credentials)
|
Issues a request to Composer to get the environment details.
Args:
zone: GCP zone of the composer environment
environment: name of the Composer environment
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
def __matches(s1, s2, ngrams_fn, n=3):
ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n))
return ngrams1.intersection(ngrams2)
|
Returns the n-grams that match between two sequences
See also: SequenceMatcher.get_matching_blocks
Args:
s1: a string
s2: another string
n: an int for the n in n-gram
Returns:
set:
|
juraj-google-style
|
def roll_to_business_day(self, date_tensor, roll_convention):
if roll_convention == constants.BusinessDayConvention.NONE:
return date_tensor
rolled_ordinals_table = self._compute_rolled_dates_table(roll_convention)
ordinals_with_offset = date_tensor.ordinal() - self._ordinal_offset + 1
rolled_ordinals = self._gather(rolled_ordinals_table, ordinals_with_offset)
with tf.control_dependencies(self._assert_ordinals_in_bounds(rolled_ordinals)):
return dt.from_ordinals(rolled_ordinals, validate=False)
|
Rolls the given dates to business dates according to given convention.
Args:
date_tensor: DateTensor of dates to roll from.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
|
github-repos
|
def __init__(self, flow_obj, parent_runner=None, runner_args=None,
token=None):
self.token = token or flow_obj.token
self.parent_runner = parent_runner
if parent_runner is not None:
self.queue_manager = parent_runner.queue_manager
else:
self.queue_manager = queue_manager.QueueManager(token=self.token)
self.queue_manager.FreezeTimestamp()
self.queued_replies = []
self.outbound_lock = threading.Lock()
self.flow_obj = flow_obj
if runner_args is not None:
self.runner_args = runner_args
self.session_id = self.GetNewSessionID()
self.flow_obj.urn = self.session_id
self.context = self.InitializeContext(runner_args)
self.flow_obj.context = self.context
self.context.session_id = self.session_id
else:
self.context = self.flow_obj.context
self.runner_args = self.flow_obj.runner_args
self.flow_obj.urn = self.session_id = self.context.session_id
self.sent_replies = []
|
Constructor for the Flow Runner.
Args:
flow_obj: The flow object this runner will run states for.
parent_runner: The parent runner of this runner.
runner_args: A FlowRunnerArgs() instance containing initial values. If not
specified, we use the runner_args from the flow_obj.
token: An instance of access_control.ACLToken security token.
|
juraj-google-style
|
def _PrintCheckDependencyStatus(
self, dependency, result, status_message, verbose_output=True):
if not result or dependency.is_optional:
if dependency.is_optional:
status_indicator = '[OPTIONAL]'
else:
status_indicator = '[FAILURE]'
print('{0:s}\t{1:s}'.format(status_indicator, status_message))
elif verbose_output:
print('[OK]\t\t{0:s}'.format(status_message))
|
Prints the check dependency status.
Args:
dependency (DependencyDefinition): dependency definition.
result (bool): True if the Python module is available and conforms to
the minimum required version, False otherwise.
status_message (str): status message.
verbose_output (Optional[bool]): True if output should be verbose.
|
juraj-google-style
|
def switch_to_line_in(self, source=None):
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
|
Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
|
juraj-google-style
|
def set(self, key, samples, sampling_rate):
if (not np.issubdtype(samples.dtype, np.floating)):
raise ValueError('Samples are required as np.float32!')
if (len(samples.shape) > 1):
raise ValueError('Only single channel supported!')
self.raise_error_if_not_open()
if (key in self._file):
del self._file[key]
samples = (samples * MAX_INT16_VALUE).astype(np.int16)
dset = self._file.create_dataset(key, data=samples)
dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate
|
Set the samples and sampling-rate for the given key.
Existing data will be overwritten.
The samples have to have ``np.float32`` datatype and values in
the range of -1.0 and 1.0.
Args:
key (str): A key to store the data for.
samples (numpy.ndarray): 1-D array of audio samples (np.float32).
sampling_rate (int): The sampling-rate of the audio samples.
Note:
The container has to be opened in advance.
|
codesearchnet
|
def setValues(self, values):
if isinstance(values, dict):
indices, values = list(zip(*values.items()))
indices = Utils.toTupleArray(indices)
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.setValuesTaStr(indices, values, len(values))
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.setValuesTaDbl(indices, values, len(values))
else:
raise TypeError
elif isinstance(values, (list, tuple)):
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.setValuesStr(values, len(values))
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.setValuesDbl(values, len(values))
else:
raise TypeError
else:
if np is not None and isinstance(values, np.ndarray):
self.setValues(DataFrame.fromNumpy(values).toList())
return
Entity.setValues(self, values)
|
Assign the values (string or float) to the parameter instances with the
specified indices, equivalent to the AMPL code:
.. code-block:: ampl
let {i in indices} par[i] := values[i];
Args:
values: list, dictionary or :class:`~amplpy.DataFrame` with the
indices and the values to be set.
Raises:
TypeError: If called on a scalar parameter.
|
juraj-google-style
|
def Print(x, data, message, **kwargs):
return PrintOperation(x, data, message, **kwargs).outputs[0]
|
Call tf.Print.
Args:
x: a Tensor.
data: a list of Tensor
message: a string
**kwargs: keyword arguments to tf.Print
Returns:
a Tensor which is identical in value to x
|
juraj-google-style
|
def imrotate(img, angle, center=None, scale=1.0, border_value=0, auto_bound=False):
if ((center is not None) and auto_bound):
raise ValueError('`auto_bound` conflicts with `center`')
(h, w) = img.shape[:2]
if (center is None):
center = (((w - 1) * 0.5), ((h - 1) * 0.5))
assert isinstance(center, tuple)
matrix = cv2.getRotationMatrix2D(center, (- angle), scale)
if auto_bound:
cos = np.abs(matrix[(0, 0)])
sin = np.abs(matrix[(0, 1)])
new_w = ((h * sin) + (w * cos))
new_h = ((h * cos) + (w * sin))
matrix[(0, 2)] += ((new_w - w) * 0.5)
matrix[(1, 2)] += ((new_h - h) * 0.5)
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value)
return rotated
|
Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple): Center of the rotation in the source image, by default
it is the center of the image.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
|
codesearchnet
|
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False,
deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(for_cas, bool):
raise TypeError('for_cas must be a bool; received %r' % for_cas)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = (for_cas, namespace, deadline)
batcher = self.memcache_get_batcher
if use_cache:
return batcher.add_once(key, options)
else:
return batcher.add(key, options)
|
An auto-batching wrapper for memcache.get() or .get_multi().
Args:
key: Key to set. This must be a string; no prefix is applied.
for_cas: If True, request and store CAS ids on the Context.
namespace: Optional namespace.
deadline: Optional deadline for the RPC.
Returns:
A Future (!) whose return value is the value retrieved from
memcache, or None.
|
juraj-google-style
|
def from_func_graph(name: Union[str, bytes], graph: func_graph_module.FuncGraph, attrs: Dict[str, attr_value_pb2.AttrValue], function_type: Optional[function_type_lib.FunctionType]=None, overwrite: bool=False) -> AtomicFunction:
if attrs and attributes_lib.IMPLEMENTS in attrs:
has_resource_vars = any((inp.dtype == dtypes.resource for inp in graph.inputs))
captured_inputs = graph.external_captures + graph.deferred_external_captures
assert not any((has_resource_vars, captured_inputs)), 'Function {name} has "{attr}={value}" attribute and thus can not depend on any tensors outside of its signature or modify variables. \n\nNote: variables are always captured and cause function re-tracing for every variable called.\n inputs: {inputs}\n captures: {captured}\n\nTo pass a variable to such function use use variable.read_value().'.format(name=graph.name, attr=attributes_lib.IMPLEMENTS, value=attrs[attributes_lib.IMPLEMENTS], inputs=graph.inputs, captured=captured_inputs)
input_ops = set((arg.op for arg in graph.inputs))
operations = [op for op in graph.get_operations() if op not in input_ops]
graph_output_names = graph._output_names
if graph_output_names is not None and all((ops.tensor_id(t) in graph_output_names for t in graph.outputs)):
output_names = [compat.as_bytes(graph_output_names[ops.tensor_id(t)]) for t in graph.outputs]
if len(set(output_names)) != len(output_names):
output_names = []
else:
output_names = []
with graph._c_graph.get() as c_graph:
fn = pywrap_tf_session.TF_GraphToFunction_wrapper(c_graph, compat.as_str(name), False, [o._c_op for o in operations], [t._as_tf_output() for t in graph.inputs], [t._as_tf_output() for t in graph.outputs], output_names, [o._c_op for o in graph.control_outputs], [], None, compat.as_str(''))
attrs = attributes_lib.parse_func_attrs(attrs or {})
for attr_name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
pywrap_tf_session.TF_FunctionSetAttrValueProto(fn, compat.as_str(attr_name), serialized)
name = compat.as_bytes(name)
bound_context = context.context()
if overwrite and bound_context.has_function(name):
bound_context.remove_function(name)
bound_context.add_c_function(fn)
pywrap_tf_session.TF_DeleteFunction(fn)
call_options = CallOptions(collective_manager_ids_used=getattr(graph, 'collective_manager_ids_used', []), control_captures=graph.function_captures.control, is_stateful=any((op._is_stateful for op in operations)))
if not function_type:
function_type = function_type_utils.derive_from_graph(graph)
return AtomicFunction(name, bound_context, function_type, list(graph._functions.values()), call_options, cached_graph=graph)
|
Initializes an AtomicFunction from FuncGraph.
Args:
name: str, the name for the created function.
graph: Graph, the graph containing the operations in the function
attrs: dict mapping names of attributes to their AttrValue values
function_type: known FunctionType to use, otherwise one is derived.
overwrite: overwrites function definition in the current context if needed
Returns:
An AtomicFunction instance.
|
github-repos
|
def _create_produce_requests(self, collated):
requests = {}
for (node_id, batches) in six.iteritems(collated):
requests[node_id] = self._produce_request(node_id, self.config['acks'], self.config['request_timeout_ms'], batches)
return requests
|
Transfer the record batches into a list of produce requests on a
per-node basis.
Arguments:
collated: {node_id: [RecordBatch]}
Returns:
dict: {node_id: ProduceRequest} (version depends on api_version)
|
codesearchnet
|
def restore_site_properties(self, site_property='ff_map', filename=None):
if (not (self.control_params['filetype'] == 'pdb')):
raise ValueError()
filename = (filename or self.control_params['output'])
bma = BabelMolAdaptor.from_file(filename, 'pdb')
pbm = pb.Molecule(bma._obmol)
assert (len(pbm.residues) == sum([x['number'] for x in self.param_list]))
packed_mol = self.convert_obatoms_to_molecule(pbm.residues[0].atoms, residue_name=pbm.residues[0].name, site_property=site_property)
for resid in pbm.residues[1:]:
mol = self.convert_obatoms_to_molecule(resid.atoms, residue_name=resid.name, site_property=site_property)
for site in mol:
packed_mol.append(site.species, site.coords, properties=site.properties)
return packed_mol
|
Restore the site properties for the final packed molecule.
Args:
site_property (str):
filename (str): path to the final packed molecule.
Returns:
Molecule
|
codesearchnet
|
def framebuffer(self, color_attachments=(), depth_attachment=None) -> 'Framebuffer':
if type(color_attachments) is Texture or type(color_attachments) is Renderbuffer:
color_attachments = (color_attachments,)
ca_mglo = tuple(x.mglo for x in color_attachments)
da_mglo = None if depth_attachment is None else depth_attachment.mglo
res = Framebuffer.__new__(Framebuffer)
res.mglo, res._size, res._samples, res._glo = self.mglo.framebuffer(ca_mglo, da_mglo)
res._color_attachments = tuple(color_attachments)
res._depth_attachment = depth_attachment
res.ctx = self
res.extra = None
return res
|
A :py:class:`Framebuffer` is a collection of buffers that can be used as the destination for rendering.
The buffers for Framebuffer objects reference images from either Textures or Renderbuffers.
Args:
color_attachments (list): A list of :py:class:`Texture` or :py:class:`Renderbuffer` objects.
depth_attachment (Renderbuffer or Texture): The depth attachment.
Returns:
:py:class:`Framebuffer` object
|
juraj-google-style
|
def extract_paths(self, paths, ignore_nopath):
try:
super().extract_paths(paths=paths, ignore_nopath=ignore_nopath)
except ExtractPathError as err:
LOGGER.debug('%s: failed extracting files: %s', self.vm.name(), err.message)
if self._has_guestfs:
self.extract_paths_dead(paths, ignore_nopath)
else:
raise
|
Extract the given paths from the domain
Attempt to extract all files defined in ``paths`` with the method
defined in :func:`~lago.plugins.vm.VMProviderPlugin.extract_paths`,
if it fails, and `guestfs` is available it will try extracting the
files with guestfs.
Args:
paths(list of tuples): files to extract in
`[(src1, dst1), (src2, dst2)...]` format.
ignore_nopath(boolean): if True will ignore none existing paths.
Returns:
None
Raises:
:exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing
path was found on the VM, and `ignore_nopath` is False.
:exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.
|
codesearchnet
|
def get(self, key):
lock.acquire()
try:
if key not in self:
return None
current_time = time.time()
if self[key].expire > current_time:
return self[key].value
deletes = []
for k, val in self.items():
if val.expire <= current_time:
deletes.append(k)
for k in deletes:
del self[k]
return None
finally:
lock.release()
|
Get an object from the cache
Arguments:
key (str): Cache key
Returns:
Cached object
|
juraj-google-style
|
def is_valid_isbn(isbn):
length = len(isbn)
if length == 10:
return is_isbn10_valid(isbn)
elif length == 13:
return is_isbn13_valid(isbn)
return False
|
Validate given `isbn`. Wrapper for :func:`is_isbn10_valid`/
:func:`is_isbn13_valid`.
Args:
isbn (str/list): ISBN number as string or list of digits.
Note:
Function doesn't require `isbn` type to be specified (it can be both
10/13 isbn's versions).
Returns:
bool: ``True`` if ISBN is valid.
|
juraj-google-style
|
def __init__(self, submission_id, submissions, storage_bucket):
self.submission_id = submission_id
self.storage_bucket = storage_bucket
self.type = None
self.submission = None
if submission_id in submissions.attacks:
self.type = TYPE_NONTARGETED
self.submission = submissions.attacks[submission_id]
elif submission_id in submissions.targeted_attacks:
self.type = TYPE_TARGETED
self.submission = submissions.targeted_attacks[submission_id]
elif submission_id in submissions.defenses:
self.type = TYPE_DEFENSE
self.submission = submissions.defenses[submission_id]
else:
raise WorkerError(
'Submission with ID "{0}" not found'.format(submission_id))
self.submission_dir = None
self.extracted_submission_dir = None
|
Initializes ExecutableSubmission.
Args:
submission_id: ID of the submissions
submissions: instance of CompetitionSubmissions with all submissions
storage_bucket: storage bucket where all submissions are stored
Raises:
WorkerError: if submission was not found
|
juraj-google-style
|
def prune_unused_nodes(meta_graph, signature_def):
graph = tf_v1.Graph()
with graph.as_default():
tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope='')
used_node_names = set()
for (_, tensor_def) in signature_def.outputs.items():
output_tensor = graph.get_tensor_by_name(tensor_def.name)
mark_backward(output_tensor, used_node_names)
node_filter_in_list = []
for node in meta_graph.graph_def.node:
if ((node.name in used_node_names) or (node.op == 'VarHandleOp')):
node_filter_in_list.append(node)
del meta_graph.graph_def.node[:]
meta_graph.graph_def.node.extend(node_filter_in_list)
del graph
|
Function to prune unused ops given a signature def.
This function does a graph traversal through from all outputs as
defined in the signature_def to collect all used nodes. Then, any
nodes which are unused can be discarded. This is useful for graph which are
executing eagerly or on TPUs.
Args:
meta_graph: The input/output MetaGraphDef for which we wish to prune.
signature_def: A SignatureDef which specifies the outputs from which we wish
to start graph traversal.
|
codesearchnet
|
def albedo(self, value=999.0):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `albedo`'.format(value))
self._albedo = value
|
Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def write_auth(msg_type, profile_name, auth, cfg):
key_fmt = ((profile_name + '_') + msg_type)
pwd = []
for (k, v) in CONFIG[msg_type]['auth'].items():
pwd.append(auth[k])
if (len(pwd) > 1):
cfg.pwd[key_fmt] = ' :: '.join(pwd)
else:
cfg.pwd[key_fmt] = pwd[0]
|
Write the settings into the auth portion of the cfg.
Args:
:msg_type: (str) message type to create config entry.
:profile_name: (str) name of the profile entry
:auth: (dict) auth parameters
:cfg: (jsonconfig.Config) config instance.
|
codesearchnet
|
def set_global_step(self, new_global_step, name=None):
return gen_data_flow_ops.accumulator_set_global_step(self._accumulator_ref, math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64), name=name)
|
Sets the global time step of the accumulator.
The operation logs a warning if we attempt to set to a time step that is
lower than the accumulator's own time step.
Args:
new_global_step: Value of new time step. Can be a variable or a constant
name: Optional name for the operation.
Returns:
Operation that sets the accumulator's time step.
|
github-repos
|
def _trigger(self):
self._completed.set()
for callback in self._callbacks:
callback(self)
|
Trigger all callbacks registered to this Future.
This method is called internally by the batch once the batch
completes.
Args:
message_id (str): The message ID, as a string.
|
juraj-google-style
|
def get_corner(self, time):
if (self.start_time <= time <= self.end_time):
diff = (time - self.start_time)
return (self.i[diff][(0, 0)], self.j[diff][(0, 0)])
else:
return ((- 1), (- 1))
|
Gets the corner array indices of the STObject at a given time that corresponds
to the upper left corner of the bounding box for the STObject.
Args:
time: time at which the corner is being extracted.
Returns:
corner index.
|
codesearchnet
|
def decorate(self, name_or_func):
if os.environ.get("SC2_NO_STOPWATCH"):
return name_or_func if callable(name_or_func) else lambda func: func
def decorator(name, func):
@functools.wraps(func)
def _stopwatch(*args, **kwargs):
with self(name):
return func(*args, **kwargs)
return _stopwatch
if callable(name_or_func):
return decorator(name_or_func.__name__, name_or_func)
else:
return lambda func: decorator(name_or_func, func)
|
Decorate a function/method to check its timings.
To use the function's name:
@sw.decorate
def func():
pass
To name it explicitly:
@sw.decorate("name")
def random_func_name():
pass
Args:
name_or_func: the name or the function to decorate.
Returns:
If a name is passed, returns this as a decorator, otherwise returns the
decorated function.
|
juraj-google-style
|
def _PrintTSKPartitionIdentifiersOverview(
self, volume_system, volume_identifiers):
header = 'The following partitions were found:\n'
self._output_writer.Write(header)
column_names = ['Identifier', 'Offset (in bytes)', 'Size (in bytes)']
table_view = views.CLITabularTableView(column_names=column_names)
for volume_identifier in sorted(volume_identifiers):
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Partition missing for identifier: {0:s}.'.format(
volume_identifier))
volume_extent = volume.extents[0]
volume_offset = '{0:d} (0x{0:08x})'.format(volume_extent.offset)
volume_size = self._FormatHumanReadableSize(volume_extent.size)
table_view.AddRow([volume.identifier, volume_offset, volume_size])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
|
Prints an overview of TSK partition identifiers.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Raises:
SourceScannerError: if a volume cannot be resolved from the volume
identifier.
|
juraj-google-style
|
def crop(img, i, j, h, w):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.crop((j, i, (j + w), (i + h)))
|
Crop the given PIL Image.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped image.
w (int): Width of the cropped image.
Returns:
PIL Image: Cropped image.
|
codesearchnet
|
def is_fit_to_structure(self, structure, tol=0.01):
return ((self - self.fit_to_structure(structure)) < tol).all()
|
Tests whether a tensor is invariant with respect to the
symmetry operations of a particular structure by testing
whether the residual of the symmetric portion is below a
tolerance
Args:
structure (Structure): structure to be fit to
tol (float): tolerance for symmetry testing
|
codesearchnet
|
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
return convert_graphdef(input_data, input_tensors, output_tensors, *args, **kwargs)
|
Convert a TensorFlow GraphDef to TFLite.
This function is deprecated. Please use `tf.lite.TFLiteConverter` API instead.
Conversion can be customized by providing arguments that are forwarded to
`build_model_flags` and `build_conversion_flags` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`).
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_model_flags` and `build_conversion_flags`.
**kwargs: See `build_model_flags` and `build_conversion_flags`.
Returns:
The converted TensorFlow Lite model in a bytes array.
Raises:
Defined in `convert`.
|
github-repos
|
def proc_val(key, val):
float_keys = ('etot_conv_thr','forc_conv_thr','conv_thr','Hubbard_U','Hubbard_J0','defauss',
'starting_magnetization',)
int_keys = ('nstep','iprint','nberrycyc','gdir','nppstr','ibrav','nat','ntyp','nbnd','nr1',
'nr2','nr3','nr1s','nr2s','nr3s','nspin','nqx1','nqx2','nqx3','lda_plus_u_kind',
'edir','report','esm_nfit','space_group','origin_choice','electron_maxstep',
'mixing_ndim','mixing_fixed_ns','ortho_para','diago_cg_maxiter','diago_david_ndim',
'nraise','bfgs_ndim','if_pos','nks','nk1','nk2','nk3','sk1','sk2','sk3','nconstr')
bool_keys = ('wf_collect','tstress','tprnfor','lkpoint_dir','tefield','dipfield','lelfield',
'lorbm','lberry','lfcpopt','monopole','nosym','nosym_evc','noinv','no_t_rev',
'force_symmorphic','use_all_frac','one_atom_occupations','starting_spin_angle',
'noncolin','x_gamma_extrapolation','lda_plus_u','lspinorb','london',
'ts_vdw_isolated','xdm','uniqueb','rhombohedral','realxz','block',
'scf_must_converge','adaptive_thr','diago_full_acc','tqr','remove_rigid_rot',
'refold_pos')
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in bool_keys:
if val.lower() == ".true.":
return True
elif val.lower() == ".false.":
return False
else:
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*d?-?\d*", val.lower()).group(0).replace("d", "e"))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
try:
val = val.replace("d","e")
return smart_int_or_float(val)
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
m = re.match(r"^[\"|'](.+)[\"|']$", val)
if m:
return m.group(1)
|
Static helper method to convert PWINPUT parameters to proper type, e.g.,
integers, floats, etc.
Args:
key: PWINPUT parameter key
val: Actual value of PWINPUT parameter.
|
juraj-google-style
|
def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False):
prefix = (prefix or cls.argument_prefix)
group.add_argument(('--%s-sources' % prefix), action='store', nargs='+', dest=('%s_sources' % prefix.replace('-', '_')), help=('%s source files to parse' % prefix))
if allow_filters:
group.add_argument(('--%s-source-filters' % prefix), action='store', nargs='+', dest=('%s_source_filters' % prefix.replace('-', '_')), help=('%s source files to ignore' % prefix))
if add_root_paths:
group.add_argument(('--%s-source-roots' % prefix), action='store', nargs='+', dest=('%s_source_roots' % prefix.replace('-', '_')), help=('%s source root directories allowing files to be referenced relatively to those' % prefix))
|
Subclasses may call this to add sources and source_filters arguments.
Args:
group: arparse.ArgumentGroup, the extension argument group
allow_filters: bool, Whether the extension wishes to expose a
source_filters argument.
prefix: str, arguments have to be namespaced.
|
codesearchnet
|
def _save_and_log_checkpoint(self, actor):
actor_id = self._worker.actor_id
checkpoint_info = self._worker.actor_checkpoint_info[actor_id]
checkpoint_info.num_tasks_since_last_checkpoint += 1
now = int(1000 * time.time())
checkpoint_context = ray.actor.CheckpointContext(
actor_id, checkpoint_info.num_tasks_since_last_checkpoint,
now - checkpoint_info.last_checkpoint_timestamp)
if actor.should_checkpoint(checkpoint_context):
try:
now = int(1000 * time.time())
checkpoint_id = (self._worker.raylet_client.
prepare_actor_checkpoint(actor_id))
checkpoint_info.checkpoint_ids.append(checkpoint_id)
actor.save_checkpoint(actor_id, checkpoint_id)
if (len(checkpoint_info.checkpoint_ids) >
ray._config.num_actor_checkpoints_to_keep()):
actor.checkpoint_expired(
actor_id,
checkpoint_info.checkpoint_ids.pop(0),
)
checkpoint_info.num_tasks_since_last_checkpoint = 0
checkpoint_info.last_checkpoint_timestamp = now
except Exception:
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
ray.utils.push_error_to_driver(
self._worker,
ray_constants.CHECKPOINT_PUSH_ERROR,
traceback_str,
driver_id=self._worker.task_driver_id)
|
Save an actor checkpoint if necessary and log any errors.
Args:
actor: The actor to checkpoint.
Returns:
The result of the actor's user-defined `save_checkpoint` method.
|
juraj-google-style
|
def assign(self, value, use_locking=False, name=None, read_value=True):
assign = state_ops.assign(self._variable, value, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
|
Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
|
github-repos
|
def convert_persistent_value(self, shift, instruction):
command_dict = {'name': 'pv', 't0': (shift + instruction.start_time), 'ch': instruction.channels[0].name, 'val': instruction.command.value}
return self._qobj_model(**command_dict)
|
Return converted `PersistentValueInstruction`.
Args:
shift(int): Offset time.
instruction (PersistentValueInstruction): persistent value instruction.
Returns:
dict: Dictionary of required parameters.
|
codesearchnet
|
def create_chunker(self, chunk_size):
rolling_hash = _rabinkarprh.RabinKarpHash(self.window_size, self._seed)
rolling_hash.set_threshold(1.0 / chunk_size)
return RabinKarpCDC._Chunker(rolling_hash)
|
Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a
specific, expected chunk size.
Args:
chunk_size (int): (Expected) target chunk size.
Returns:
BaseChunker: A chunker object.
|
juraj-google-style
|
def open_model(self, model_path, audit=False):
if audit:
self._add_entry(templates.FILE_OPEN_AUDIT
.format(model_path=model_path))
else:
self._add_entry(templates.FILE_OPEN
.format(model_path=model_path))
|
Append a open non-workshared model entry to the journal.
This instructs Revit to open a non-workshared model.
Args:
model_path (str): full path to non-workshared model
audit (bool): if True audits the model when opening
|
juraj-google-style
|
def _field_to_json(field, row_value):
if (row_value is None):
return None
if (field.mode == 'REPEATED'):
return _repeated_field_to_json(field, row_value)
if (field.field_type == 'RECORD'):
return _record_field_to_json(field.fields, row_value)
return _scalar_field_to_json(field, row_value)
|
Convert a field into JSON-serializable values.
Args:
field ( \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
):
The SchemaField to use for type conversion and field name.
row_value (Union[ \
Sequence[list], \
any, \
]):
Row data to be inserted. If the SchemaField's mode is
REPEATED, assume this is a list. If not, the type
is inferred from the SchemaField's field_type.
Returns:
any:
A JSON-serializable object.
|
codesearchnet
|
def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, signature_key, inputs):
interpreter = Interpreter(model_content=tflite_model)
signature_runner = interpreter.get_signature_runner(signature_key)
return signature_runner(**inputs)
|
Evaluates the model on the `inputs`.
Args:
tflite_model: TensorFlow Lite model.
signature_key: Signature key.
inputs: Map from input tensor names in the SignatureDef to tensor value.
Returns:
Dictionary of outputs.
Key is the output name in the SignatureDef 'signature_key'
Value is the output value
|
github-repos
|
def generate_typegraph(program: cfg.Program, var_table: dict[int, str], loader: jinja2.BaseLoader) -> str:
encoder = typegraph_serializer.TypegraphEncoder()
enc_prog = encoder.default(program)
return _generate_visualization(template_file=_TYPEGRAPH_TEMPLATE_NAME, loader=loader, program=json.dumps(enc_prog), query_table=enc_prog['queries'], var_table=var_table)
|
Generate the visualization webpage.
Args:
program: cfg.Program. The instance of the program to visualize.
var_table: dict[int, str]. A mapping of cfg.Variable IDs to names.
loader: A jinja2 loader
Returns:
str. The rendered visualization page.
|
github-repos
|
def get_by(self, field, value):
if (not field):
logger.exception(RESOURCE_CLIENT_INVALID_FIELD)
raise ValueError(RESOURCE_CLIENT_INVALID_FIELD)
filter = '"{0}=\'{1}\'"'.format(field, value)
results = self.get_all(filter=filter)
if ('.' not in field):
results = [item for item in results if (str(item.get(field, '')).lower() == value.lower())]
return results
|
Get the resource by passing a field and its value.
Note:
This function uses get_all passing a filter.The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
dict
|
codesearchnet
|
def Serialize(self, writer):
super(AccountState, self).Serialize(writer)
writer.WriteUInt160(self.ScriptHash)
writer.WriteBool(self.IsFrozen)
writer.WriteVarInt(len(self.Votes))
for vote in self.Votes:
writer.WriteBytes(vote)
blen = len(self.Balances)
writer.WriteVarInt(blen)
for key, fixed8 in self.Balances.items():
writer.WriteUInt256(key)
writer.WriteFixed8(fixed8)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
juraj-google-style
|
def default(self, value):
if isinstance(value, messages.Enum):
return str(value)
if (six.PY3 and isinstance(value, bytes)):
return value.decode('utf8')
if isinstance(value, messages.Message):
result = {}
for field in value.all_fields():
item = value.get_assigned_value(field.name)
if (item not in (None, [], ())):
result[field.name] = self.__protojson_protocol.encode_field(field, item)
for unknown_key in value.all_unrecognized_fields():
(unrecognized_field, _) = value.get_unrecognized_field_info(unknown_key)
result[unknown_key] = unrecognized_field
return result
return super(MessageJSONEncoder, self).default(value)
|
Return dictionary instance from a message object.
Args:
value: Value to get dictionary for. If not encodable, will
call superclasses default method.
|
codesearchnet
|
def FindNode(self, component_path):
node = self.state.component_tree
for component in component_path:
node = node[component]
return node
|
Find the node in the component_tree from component_path.
Args:
component_path: A list of components which reference a node in the
component tree. This allows us to resume processing in the tree.
Returns:
A node in the component_tree.
|
codesearchnet
|
def GetTermSize(self):
return self._term_size
|
Returns the terminal (x, y) dimensions in characters.
Returns:
(x, y): A tuple of the terminal x and y dimensions.
|
github-repos
|
def __init__(self, **kwargs):
if kwargs:
raise ValueError('Unused keyword arguments: {0:s}.'.format(
', '.join(kwargs)))
super(Decrypter, self).__init__()
|
Initializes a decrypter.
Args:
kwargs (dict): keyword arguments depending on the decrypter.
Raises:
ValueError: when there are unused keyword arguments.
|
juraj-google-style
|
def __init__(self, xid=None, experimenter=None, exp_type=None, data=b''):
super().__init__(xid)
self.experimenter = experimenter
self.exp_type = exp_type
self.data = data
|
Create a ExperimenterHeader with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
experimenter (int): Vendor ID:
MSB 0: low-order bytes are IEEE OUI.
MSB != 0: defined by ONF.
exp_type (int): Experimenter defined.
|
juraj-google-style
|
def to_geojson(self, filename, proj, metadata=None):
if (metadata is None):
metadata = {}
json_obj = {'type': 'FeatureCollection', 'features': [], 'properties': {}}
json_obj['properties']['times'] = self.times.tolist()
json_obj['properties']['dx'] = self.dx
json_obj['properties']['step'] = self.step
json_obj['properties']['u'] = self.u.tolist()
json_obj['properties']['v'] = self.v.tolist()
for (k, v) in metadata.items():
json_obj['properties'][k] = v
for (t, time) in enumerate(self.times):
feature = {'type': 'Feature', 'geometry': {'type': 'Polygon'}, 'properties': {}}
boundary_coords = self.boundary_polygon(time)
lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True))
lonlat_list = lonlat.T.tolist()
if (len(lonlat_list) > 0):
lonlat_list.append(lonlat_list[0])
feature['geometry']['coordinates'] = [lonlat_list]
for attr in ['timesteps', 'masks', 'x', 'y', 'i', 'j']:
feature['properties'][attr] = getattr(self, attr)[t].tolist()
feature['properties']['attributes'] = {}
for (attr_name, steps) in self.attributes.items():
feature['properties']['attributes'][attr_name] = steps[t].tolist()
json_obj['features'].append(feature)
file_obj = open(filename, 'w')
json.dump(json_obj, file_obj, indent=1, sort_keys=True)
file_obj.close()
return
|
Output the data in the STObject to a geoJSON file.
Args:
filename: Name of the file
proj: PyProj object for converting the x and y coordinates back to latitude and longitue values.
metadata: Metadata describing the object to be included in the top-level properties.
|
codesearchnet
|
def convert_datetime_type(obj):
if (pd and (obj is pd.NaT)):
return np.nan
if (pd and isinstance(obj, pd.Period)):
return (obj.to_timestamp().value / (10 ** 6.0))
if (pd and isinstance(obj, _pd_timestamp)):
return (obj.value / (10 ** 6.0))
elif (pd and isinstance(obj, pd.Timedelta)):
return (obj.value / (10 ** 6.0))
elif isinstance(obj, dt.datetime):
diff = (obj.replace(tzinfo=None) - DT_EPOCH)
return (diff.total_seconds() * 1000.0)
elif isinstance(obj, dt.date):
return ((dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000)
elif isinstance(obj, np.datetime64):
epoch_delta = (obj - NP_EPOCH)
return (epoch_delta / NP_MS_DELTA)
elif isinstance(obj, dt.time):
return (((((obj.hour * 3600) + (obj.minute * 60)) + obj.second) * 1000) + (obj.microsecond / 1000.0))
|
Convert any recognized date, time, or datetime value to floating point
milliseconds since epoch.
Arg:
obj (object) : the object to convert
Returns:
float : milliseconds
|
codesearchnet
|
def delete_vmss_vms(access_token, subscription_id, resource_group, vmss_name, vm_ids):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'/delete?api-version=', COMP_API])
body = '{"instanceIds" : ' + vm_ids + '}'
return do_post(endpoint, body, access_token)
|
Delete a VM in a VM Scale Set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
vm_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'.
Returns:
HTTP response.
|
juraj-google-style
|
def _CopyDateTimeFromStringISO8601(self, time_string):
if (not time_string):
raise ValueError('Invalid time string.')
time_string_length = len(time_string)
(year, month, day_of_month) = self._CopyDateFromString(time_string)
if (time_string_length <= 10):
return {'year': year, 'month': month, 'day_of_month': day_of_month}
if (time_string[10] != 'T'):
raise ValueError('Invalid time string - missing as date and time separator.')
(hours, minutes, seconds, microseconds, time_zone_offset) = self._CopyTimeFromStringISO8601(time_string[11:])
if time_zone_offset:
(year, month, day_of_month, hours, minutes) = self._AdjustForTimeZoneOffset(year, month, day_of_month, hours, minutes, time_zone_offset)
date_time_values = {'year': year, 'month': month, 'day_of_month': day_of_month, 'hours': hours, 'minutes': minutes, 'seconds': seconds}
if (microseconds is not None):
date_time_values['microseconds'] = microseconds
return date_time_values
|
Copies a date and time from an ISO 8601 date and time string.
Args:
time_string (str): time value formatted as:
hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The fraction of second and
time zone offset are optional.
Returns:
tuple[int, int, int, int, int]: hours, minutes, seconds, microseconds,
time zone offset in minutes.
Raises:
ValueError: if the time string is invalid or not supported.
|
codesearchnet
|
def set_record(self, name, record_id, record):
if (name not in self._cache):
self._cache[name] = {}
self._cache[name][record_id] = record
|
Save a record into the cache.
Args:
name (string): The name to save the model under.
record_id (int): The record id.
record (:class:`cinder_data.model.CinderModel`): The model
|
codesearchnet
|
def sg_prod(tensor, opt):
r
return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
|
r"""Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
|
juraj-google-style
|
def AddArg(self, argument):
self.args.append(argument)
if (len(self.args) > self.number_of_args):
raise errors.ParseError('Too many arguments for this expression.')
elif (len(self.args) == self.number_of_args):
return True
return False
|
Adds a new argument to this expression.
Args:
argument (str): argument to add.
Returns:
True if the argument is the last argument, False otherwise.
Raises:
ParseError: If there are too many arguments.
|
codesearchnet
|
def __checkDecisionParameters(self, result, **values):
error = []
if not result:
error.append('Function parameter (result array) should contain one or more header string!')
if not values:
error.append('Function parameter (values variables) should contain one or more variable')
for header in result:
if not header in self.header:
error.append('String (' + header + ') in result is not in header!')
for header in values:
if not header in self.header:
error.append('Variable (' + header + ') in values is not in header!')
elif not values[header].split():
error.append('Variable (' + header + ') in values is empty string')
if error:
return error
|
Checker of decision parameters, it will raise ValueError if finds something wrong.
Args:
result (array of str): See public decision methods
**values (array of str): See public decision methods
Raise:
ValueError: Result array none.
ValueError: Values dict none.
ValueError: Not find result key in header.
ValueError: Result value is empty.
Returns:
Error array values
|
juraj-google-style
|
def send_msg(self, address, args=[]):
if not address.startswith('/'):
address = '/{}'.format(address)
msg = osc_message_builder.OscMessageBuilder(address=address)
for arg in args:
msg.add_arg(arg)
self.conn.send(msg.build())
return
|
Send multiple args into a single message to a given address.
Args:
address (str): OSC Address.
args (list): Arguments to be parsed in VVVV.
|
juraj-google-style
|
def __init__(self, channel):
self.Exchange = channel.unary_unary(
'/communicator_objects.UnityToExternal/Exchange',
request_serializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.SerializeToString,
response_deserializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def create_pipeline_field(self, pipeline_key, name, field_type, **kwargs):
uri = '/'.join([self.api_uri,
self.pipelines_suffix,
pipeline_key,
self.fields_suffix
])
code, data = self._create_field(uri, name, field_type, **kwargs)
return code, data
|
Creates a pipeline field with the provided attributes.
Args:
pipeline_key specifying the pipeline to add the field to
name required name string
field_type required type string [TEXT_INPUT, DATE or PERSON]
kwargs {}
return (status code, field dict)
|
juraj-google-style
|
def write(self, data):
block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE
if block_remaining < _HEADER_LENGTH:
self.__writer.write('\x00' * block_remaining)
self.__position += block_remaining
block_remaining = _BLOCK_SIZE
if block_remaining < len(data) + _HEADER_LENGTH:
first_chunk = data[:block_remaining - _HEADER_LENGTH]
self.__write_record(_RECORD_TYPE_FIRST, first_chunk)
data = data[len(first_chunk):]
while True:
block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE
if block_remaining >= len(data) + _HEADER_LENGTH:
self.__write_record(_RECORD_TYPE_LAST, data)
break
else:
chunk = data[:block_remaining - _HEADER_LENGTH]
self.__write_record(_RECORD_TYPE_MIDDLE, chunk)
data = data[len(chunk):]
else:
self.__write_record(_RECORD_TYPE_FULL, data)
|
Write single record.
Args:
data: record data to write as string, byte array or byte sequence.
|
juraj-google-style
|
def inv_attractor(dx, alpha: float=300, gamma: int=2):
return dx.div(1 + alpha * dx.pow(gamma))
|
Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center
This is the default one according to the accompanying paper.
Args:
dx (`torch.Tensor`):
The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (`float`, *optional*, defaults to 300):
Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction.
gamma (`int`, *optional*, defaults to 2):
Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected.
Lower gamma = farther reach.
Returns:
torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc
|
github-repos
|
def convert_bboxes_from_albumentations(bboxes, target_format, rows, cols, check_validity=False):
return [convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity) for bbox in bboxes]
|
Convert a list of bounding boxes from the format used by albumentations to a format, specified
in `target_format`.
Args:
bboxes (list): List of bounding box with coordinates in the format used by albumentations
target_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'.
rows (int): image height
cols (int): image width
check_validity (bool): check if all boxes are valid boxes
|
juraj-google-style
|
def filter_by_analysis_period(self, analysis_period):
_filtered_data = self.filter_by_months(analysis_period.months_int)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data
|
Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
|
juraj-google-style
|
def is_datafile_valid(datafile):
try:
datafile_json = json.loads(datafile)
except:
return False
try:
jsonschema.Draft4Validator(constants.JSON_SCHEMA).validate(datafile_json)
except:
return False
return True
|
Given a datafile determine if it is valid or not.
Args:
datafile: JSON string representing the project.
Returns:
Boolean depending upon whether datafile is valid or not.
|
juraj-google-style
|
def Open(self, file_object):
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if (len(signature_data) > 2):
if (signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN):
self.file_format = 'bin-big-endian'
elif (signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN):
self.file_format = 'bin-little-endian'
elif (signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII):
self.file_format = 'odc'
elif (signature_data == self._CPIO_SIGNATURE_NEW_ASCII):
self.file_format = 'newc'
elif (signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM):
self.file_format = 'crc'
if (self.file_format is None):
raise IOError('Unsupported CPIO format.')
self._file_object = file_object
self._file_size = file_object.get_size()
self._ReadFileEntries(self._file_object)
|
Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported.
|
codesearchnet
|
async def _call_rpc(self, header):
length, _, cmd, feature, address = struct.unpack("<BBBBB", bytes(header))
rpc_id = (feature << 8) | cmd
payload = self.rpc_payload[:length]
self._logger.debug("Calling RPC %d:%04X with %s", address, rpc_id, binascii.hexlify(payload))
exception = None
response = None
try:
response = await self.send_rpc(self.CLIENT_ID, str(self.device.iotile_id), address, rpc_id, bytes(payload), timeout=30.0)
except VALID_RPC_EXCEPTIONS as err:
exception = err
except Exception as err:
self._logger.exception("Error calling RPC %d:%04X", address, rpc_id)
exception = err
status, response = pack_rpc_response(response, exception)
resp_header = struct.pack("<BBBB", status, 0, 0, len(response))
await self._send_notification(self.ReceiveHeaderHandle, resp_header)
if len(response) > 0:
await self._send_notification(self.ReceivePayloadHandle, response)
|
Call an RPC given a header and possibly a previously sent payload
Args:
header (bytearray): The RPC header we should call
|
juraj-google-style
|
def del_method(self, m):
if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):
wrkey = ('function', id(m))
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
if wrkey in self:
del self[wrkey]
|
Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
|
juraj-google-style
|
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(GetResponsePayload, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_stream):
self._object_type = primitives.Enumeration(enum=enums.ObjectType, tag=enums.Tags.OBJECT_TYPE)
self._object_type.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Parsed payload encoding is missing the object type field.')
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)
self._unique_identifier.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Parsed payload encoding is missing the unique identifier field.')
self.secret = self.secret_factory.create(self.object_type)
if self.is_tag_next(self._secret.tag, local_stream):
self._secret.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Parsed payload encoding is missing the secret field.')
self.is_oversized(local_stream)
|
Read the data encoding the Get response payload and decode it
into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the object type, unique identifier, or
secret attributes are missing from the encoded payload.
|
codesearchnet
|
def CacheFileObject(self, path_spec, file_object):
self._file_object_cache.CacheObject(path_spec.comparable, file_object)
|
Caches a file-like object based on a path specification.
Args:
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
|
juraj-google-style
|
def _collect_tokens(self, node: dict) -> list:
tokens = [self._termination_char] if self._termination_char in node else []
for token, subtrie_head in node.items():
if token != self._termination_char:
subtokens = self._collect_tokens(subtrie_head)
tokens.extend([token + subtoken for subtoken in subtokens])
return tokens
|
Generates all tokens in the Trie starting from a given node.
Args:
node (dict): The node in the Trie from which tokens need to be generated.
Returns:
list: List of tokens generated from the given node.
|
github-repos
|
def _get_query_results(self, job_id, retry, project=None, timeout_ms=None, location=None):
extra_params = {'maxResults': 0}
if (project is None):
project = self.project
if (timeout_ms is not None):
extra_params['timeoutMs'] = timeout_ms
if (location is None):
location = self.location
if (location is not None):
extra_params['location'] = location
path = '/projects/{}/queries/{}'.format(project, job_id)
resource = self._call_api(retry, method='GET', path=path, query_params=extra_params)
return _QueryResults.from_api_repr(resource)
|
Get the query results object for a query job.
Arguments:
job_id (str): Name of the query job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
project (str):
(Optional) project ID for the query job (defaults to the
project of the client).
timeout_ms (int):
(Optional) number of milliseconds the the API call should
wait for the query to complete before the request times out.
location (str): Location of the query job.
Returns:
google.cloud.bigquery.query._QueryResults:
A new ``_QueryResults`` instance.
|
codesearchnet
|
def prune_intermediate_layers(node):
if not node.get('children'):
return
layer_blocks = [(i, child) for i, child in enumerate(node['children']) if is_layer_block(child)]
if len(layer_blocks) > 2:
to_remove = [i for i, _ in layer_blocks[1:-1]]
node['children'] = [child for i, child in enumerate(node['children']) if i not in to_remove]
for child in node['children']:
prune_intermediate_layers(child)
|
Recursively removes intermediate layers from the tree to improve readability.
Keeps at least the first and last layers if many consecutive layers are present.
Args:
node (`dict`): The root or subnode to prune recursively.
|
github-repos
|
def __init__(self, all_reduce_alg='nccl', num_packs=1):
self._all_reduce_alg = all_reduce_alg
self._num_packs = num_packs
self._simple_cross_replica_ops = ReductionToOneDevice()
super(AllReduceCrossDeviceOps, self).__init__()
|
Initializes the object.
Args:
all_reduce_alg: the all-reduce algorithm to use, currently only "nccl" or
"hierarchical_copy" are supported.
num_packs: a non-negative integer. The number of packs to split values
into. If zero, no packing will be done.
|
github-repos
|
def read_named_csv(name, data_path=DATA_PATH, nrows=None, verbose=True):
if os.path.isfile(name):
try:
return read_json(name)
except (IOError, UnicodeDecodeError, json.JSONDecodeError):
pass
try:
return read_csv(name, nrows=nrows)
except (IOError, pd.errors.ParserError):
pass
try:
return read_txt(name, nrows=nrows)
except (IOError, UnicodeDecodeError):
pass
data_path = expand_filepath(data_path)
if os.path.isfile(os.path.join(data_path, name)):
return read_csv(os.path.join(data_path, name), nrows=nrows)
if name in DATASET_NAME2FILENAME:
name = DATASET_NAME2FILENAME[name]
if name.lower().endswith('.txt') or name.lower().endswith('.txt.gz'):
return read_text(os.path.join(data_path, name), nrows=nrows)
else:
return read_csv(os.path.join(data_path, name), nrows=nrows)
try:
return read_csv(os.path.join(data_path, name + '.csv.gz'), nrows=nrows)
except IOError:
pass
try:
return read_csv(os.path.join(data_path, name + '.csv'), nrows=nrows)
except IOError:
pass
try:
return read_json(os.path.join(data_path, name + '.json'))
except IOError:
pass
try:
return read_txt(os.path.join(data_path, name + '.txt'), verbose=verbose)
except IOError:
pass
try:
return KeyedVectors.load_word2vec_format(os.path.join(BIGDATA_PATH, name + '.bin.gz'), binary=True)
except IOError:
pass
except ValueError:
pass
try:
return read_txt(os.path.join(BIGDATA_PATH, name + '.txt'), verbose=verbose)
except IOError:
pass
|
Convert a dataset in a local file (usually a CSV) into a Pandas DataFrame
TODO: should be called read_named_dataset
Args:
`name` is assumed not to have an extension (like ".csv"), alternative extensions are tried automatically.file
|
juraj-google-style
|
def get_new_address(self, id=None, endpoint=None):
return self._call_endpoint(GET_NEW_ADDRESS, id=id, endpoint=endpoint)
|
Create new address
Args:
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
|
juraj-google-style
|
def ListPlugins(logdir):
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)
try:
entries = tf.io.gfile.listdir(plugins_dir)
except tf.errors.NotFoundError:
return []
return [x.rstrip('/') for x in entries if (x.endswith('/') or _IsDirectory(plugins_dir, x))]
|
List all the plugins that have registered assets in logdir.
If the plugins_dir does not exist, it returns an empty list. This maintains
compatibility with old directories that have no plugins written.
Args:
logdir: A directory that was created by a TensorFlow events writer.
Returns:
a list of plugin names, as strings
|
codesearchnet
|
def resource_import(filename: str, *, module: Optional[epath.PathLike]=None) -> str:
path = epath.resource_path(module) if module else _static_path()
path = path.joinpath(filename)
content = path.read_text()
if path.suffix == '.css':
return f'<style>{content}</style>'
elif path.suffix == '.js':
return f'<script>{content}</script>'
else:
raise ValueError('')
|
Returns the `HTML` associated with the resource.
Args:
filename: Path to the `.css`, `.js` resource
module: Python module name from which the filename is relative too.
|
github-repos
|
def bytes_to_long(bytesdata: bytes) -> int:
assert len(bytesdata) == 8
return sum((b << (k * 8) for k, b in enumerate(bytesdata)))
|
Converts an 8-byte sequence to a long integer.
Args:
bytesdata: 8 consecutive bytes, as a ``bytes`` object, in
little-endian format (least significant byte [LSB] first)
Returns:
integer
|
juraj-google-style
|
def open_required(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
'Wrapper function to check that the given ``JLink`` has been\n opened.\n\n Args:\n self (JLink): the ``JLink`` instance\n args: list of arguments to pass to the wrapped function\n kwargs: key-word arguments dict to pass to the wrapped function\n\n Returns:\n The return value of the wrapped function.\n\n Raises:\n JLinkException: if the J-Link DLL is not open or the J-Link is\n disconnected.\n '
if (not self.opened()):
raise errors.JLinkException('J-Link DLL is not open.')
elif (not self.connected()):
raise errors.JLinkException('J-Link connection has been lost.')
return func(self, *args, **kwargs)
return wrapper
|
Decorator to specify that the J-Link DLL must be opened, and a
J-Link connection must be established.
Args:
func (function): function being decorated
Returns:
The wrapper function.
|
codesearchnet
|
def _config_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
return tf.Session(config=config)
|
Configure session for particular device
Returns:
tensorflow.Session
|
codesearchnet
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return token_ids_0 + [self.sep_token_id]
sep = [self.sep_token_id]
return token_ids_0 + sep + token_ids_1 + sep
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def char_decode(self, sequences):
decode_strs = [seq.replace(' ', '') for seq in self.char_tokenizer.batch_decode(sequences)]
return decode_strs
|
Convert a list of lists of char token ids into a list of strings by calling char tokenizer.
Args:
sequences (`torch.Tensor`):
List of tokenized input ids.
Returns:
`List[str]`: The list of char decoded sentences.
|
github-repos
|
def detect_content_type(self, path=None, payload=None, objectInput=None):
if objectInput:
message = 'Detection content type with file object is not stable.'
log.exception(message)
raise TikaAppError(message)
f = file_path(path, payload, objectInput)
switches = ['-d', f]
result = self._command_template(switches).lower()
return (result, path, f)
|
Return the content type of passed file or payload.
Args:
path (string): Path of file to analyze
payload (string): Payload base64 to analyze
objectInput (object): file object/standard input to analyze
Returns:
content type of file (string)
|
codesearchnet
|
def update(self, data):
for (key, value) in data.items():
setattr(self, key, value)
|
Update the current memory record with the given data dict.
Args:
data (dict): Data dictionary to update the record attributes with.
|
codesearchnet
|
def __init__(self, content=None, min=0, max=HUGE, name=None):
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content))
assert len(content), repr(content)
for alt in content:
assert len(alt), repr(alt)
self.content = content
self.min = min
self.max = max
self.name = name
|
Initializer.
Args:
content: optional sequence of subsequences of patterns; if absent,
matches one node; if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
|
github-repos
|
def __init__(self, config=None, start=True):
config = config or DispatcherConfig()
if config.fault_tolerant_mode and (not config.work_dir):
raise ValueError('Cannot enable fault tolerant mode without configuring a work dir. Make sure to set `work_dir` in the `config` object passed to `DispatcherServer`.')
self._config = config
if isinstance(config, service_config_pb2.DispatcherConfig):
config_proto = config
else:
config_proto = service_config_pb2.DispatcherConfig(port=config.port, protocol=config.protocol, work_dir=config.work_dir, fault_tolerant_mode=config.fault_tolerant_mode, worker_addresses=config.worker_addresses, job_gc_check_interval_ms=config.job_gc_check_interval_ms, job_gc_timeout_ms=config.job_gc_timeout_ms, worker_timeout_ms=config.worker_timeout_ms, worker_max_concurrent_snapshots=config.worker_max_concurrent_snapshots)
self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer(config_proto.SerializeToString())
if start:
self._server.start()
|
Creates a new dispatch server.
Args:
config: (Optional.) A `tf.data.experimental.service.DispatcherConfig`
configuration. If `None`, the dispatcher will use default configuration
values.
start: (Optional.) Boolean, indicating whether to start the server after
creating it. Defaults to True.
|
github-repos
|
def _add_bound_method(self, bound_method, identify_observed):
inst = bound_method.__self__
method_name = bound_method.__name__
key = self.make_key(bound_method)
if key not in self.observers:
self.observers[key] = ObserverBoundMethod(
inst, method_name, identify_observed, (key, self.observers))
return True
else:
return False
|
Add an bound method as an observer.
Args:
bound_method: The bound method to add as an observer.
identify_observed: See the docstring for add_observer.
Returns:
True if the bound method is added, otherwise False.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.