code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def read_hdf(cls, path_or_buf, **kwargs):
if cls.read_hdf_remote_task is None:
return super(RayIO, cls).read_hdf(path_or_buf, **kwargs)
format = cls._validate_hdf_format(path_or_buf=path_or_buf)
if format is None:
ErrorMessage.default_to_pandas(
"File format seems to be `fixed`. For better distribution consider saving the file in `table` format. "
"df.to_hdf(format=`table`)."
)
return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs))
columns = kwargs.get("columns", None)
if not columns:
empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0)
columns = empty_pd_df.columns
num_partitions = cls.frame_mgr_cls._compute_num_partitions()
num_splits = min(len(columns), num_partitions)
column_splits = (
len(columns)
if len(columns) % num_partitions == 0
else len(columns)
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
blk_partitions = np.array(
[
cls.read_hdf_remote_task._remote(
args=(path_or_buf, cols, num_splits, kwargs),
num_return_vals=num_splits + 1,
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[cls.frame_partition_cls(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(remote_partitions), index, columns
)
return new_query_compiler
|
Load a h5 file from the file path or buffer, returning a DataFrame.
Args:
path_or_buf: string, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
kwargs: Pass into pandas.read_hdf function.
Returns:
DataFrame constructed from the h5 file.
|
juraj-google-style
|
def single_lf_summary(Y_p, Y=None):
L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape(-1, 1))
return lf_summary(L, Y)
|
Calculates coverage, overlap, conflicts, and accuracy for a single LF
Args:
Y_p: a np.array or torch.Tensor of predicted labels
Y: a np.array or torch.Tensor of true labels (if known)
|
juraj-google-style
|
def make_one_shot_iterator(dataset: DatasetV1) -> Union[iterator_ops.Iterator, iterator_ops.OwnedIterator]:
try:
return dataset._make_one_shot_iterator()
except AttributeError:
return DatasetV1Adapter(dataset)._make_one_shot_iterator()
|
Creates an iterator for elements of `dataset`.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not support re-initialization.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A `tf.data.Iterator` for elements of `dataset`.
@compatibility(TF2)
This is a legacy API for consuming dataset elements and should only be used
during transition from TF 1 to TF 2. Note that using this API should be
a transient state of your code base as there are in general no guarantees
about the interoperability of TF 1 and TF 2 code.
In TF 2 datasets are Python iterables which means you can consume their
elements using `for elem in dataset: ...` or by explicitly creating iterator
via `iterator = iter(dataset)` and fetching its elements via
`values = next(iterator)`.
@end_compatibility
|
github-repos
|
def variant(self, case_id, variant_id):
case_obj = self.case(case_id=case_id)
vcf_file_path = case_obj.variant_source
self.head = get_header(vcf_file_path)
self.vep_header = self.head.vep_columns
self.snpeff_header = self.head.snpeff_columns
handle = VCF(vcf_file_path)
for index, variant in enumerate(handle):
index += 1
line_id = get_variant_id(variant_line=str(variant)).lstrip('chrCHR')
if line_id == variant_id:
return self._format_variants(
variant=variant,
index=index,
case_obj=case_obj,
add_all_info=True
)
return None
|
Return a specific variant.
Args:
case_id (str): Path to vcf file
variant_id (str): A variant id
Returns:
variant (Variant): The variant object for the given id
|
juraj-google-style
|
def _import_object(self, path, look_for_cls_method):
last_nth = 2 if look_for_cls_method else 1
path = path.split('.')
module_path = '.'.join(path[:-last_nth])
class_name = path[-last_nth]
module = importlib.import_module(module_path)
if look_for_cls_method and path[-last_nth:][0] == path[-last_nth]:
class_method = path[-last_nth:][1]
else:
class_method = None
return getattr(module, class_name), class_name, class_method
|
Imports the module that contains the referenced method.
Args:
path: python path of class/function
look_for_cls_method (bool): If True, treat the last part of path as class method.
Returns:
Tuple. (class object, class name, method to be called)
|
juraj-google-style
|
def hour(self, value=None):
if (value is not None):
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int for field `hour`'.format(value))
if (value < 1):
raise ValueError('value need to be greater or equal 1 for field `hour`')
if (value > 24):
raise ValueError('value need to be smaller 24 for field `hour`')
self._hour = value
|
Corresponds to IDD Field `hour`
Args:
value (int): value for IDD Field `hour`
value >= 1
value <= 24
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def create(self, msgtype, *args, **kwargs):
if (msgtype not in self._messages):
raise ProtocolError(('Unknown message type %r for protocol version %s' % (msgtype, self._version)))
return self._messages[msgtype].create(*args, **kwargs)
|
Create a new Message instance for the given type.
Args:
msgtype (str) :
|
codesearchnet
|
def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq, overlapping):
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1, input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._AvgPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(tmp, col_seq, overlapping)
return output_tensor
|
Get expected fractional average pooling result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of average pooling on input_tensor based
on pooling region defined by row_seq and col_seq, conditioned on whether
or not overlapping is used.
|
github-repos
|
def has_deprecation_decorator(symbol):
decorators, symbol = tf_decorator.unwrap(symbol)
if contains_deprecation_decorator(decorators):
return True
if tf_inspect.isfunction(symbol):
return False
if not tf_inspect.isclass(symbol):
return False
if not hasattr(symbol, '__init__'):
return False
init_decorators, _ = tf_decorator.unwrap(symbol.__init__)
return contains_deprecation_decorator(init_decorators)
|
Checks if given object has a deprecation decorator.
We check if deprecation decorator is in decorators as well as
whether symbol is a class whose __init__ method has a deprecation
decorator.
Args:
symbol: Python object.
Returns:
True if symbol has deprecation decorator.
|
github-repos
|
def __call__(self, hidden_states, cls_index=None, deterministic: bool=True):
output = hidden_states[:, 0]
output = self.first_dropout(output, deterministic=deterministic)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output, deterministic=deterministic)
return output
|
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (`jnp.ndarray` of shape `[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (`jnp.ndarray` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
Returns:
`jnp.ndarray`: The summary of the sequence hidden states.
|
github-repos
|
def freeze_graph(sess, input_tensors, output_tensors):
graph_def = _convert_to_constants.disable_lower_using_switch_merge(sess.graph_def)
config = get_grappler_config(['function'])
graph_def = run_graph_optimizations(graph_def, input_tensors, output_tensors, config, graph=sess.graph)
hinted_outputs_nodes = find_all_hinted_output_nodes(sess)
if hinted_outputs_nodes:
return _convert_op_hints_if_present(sess, graph_def, output_tensors, hinted_outputs_nodes)
if not is_frozen_graph(sess):
output_node_names = [tensor.name.split(':')[0] for tensor in output_tensors]
return _convert_to_constants.convert_variables_to_constants(sess, graph_def, output_node_names)
else:
return sess.graph_def
|
Returns a frozen GraphDef.
Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the
existing GraphDef is returned. The Grappler pass is only run on models that
are frozen in order to inline the functions in the graph.
If OpHints is present, it will try to convert the OpHint graph.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors.
output_tensors: List of output tensors (only .name is used from this).
Returns:
Frozen GraphDef.
|
github-repos
|
def serial_wire_viewer(jlink_serial, device):
buf = StringIO.StringIO()
jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)
jlink.open(serial_no=jlink_serial)
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(device, verbose=True)
jlink.coresight_configure()
jlink.set_reset_strategy(pylink.enums.JLinkResetStrategyCortexM3.RESETPIN)
jlink.reset()
jlink.halt()
cpu_speed = jlink.cpu_speed()
swo_speed = jlink.swo_supported_speeds(cpu_speed, 10)[0]
jlink.swo_start(swo_speed)
jlink.swo_flush()
sys.stdout.write('Serial Wire Viewer\n')
sys.stdout.write('Press Ctrl-C to Exit\n')
sys.stdout.write('Reading data from port 0:\n\n')
jlink.reset(ms=10, halt=False)
try:
while True:
num_bytes = jlink.swo_num_bytes()
if num_bytes == 0:
time.sleep(1)
continue
data = jlink.swo_read_stimulus(0, num_bytes)
sys.stdout.write(''.join(map(chr, data)))
sys.stdout.flush()
except KeyboardInterrupt:
pass
sys.stdout.write('\n')
jlink.swo_stop()
return 0
|
Implements a Serial Wire Viewer (SWV).
A Serial Wire Viewer (SWV) allows us implement real-time logging of output
from a connected device over Serial Wire Output (SWO).
Args:
jlink_serial (str): the J-Link serial number
device (str): the target CPU
Returns:
Always returns ``0``.
Raises:
JLinkException: on error
|
juraj-google-style
|
def _WriteRow(self, output_writer, values, in_bold=False):
row_strings = []
for (value_index, value_string) in enumerate(values):
padding_size = (self._column_sizes[value_index] - len(value_string))
padding_string = (' ' * padding_size)
row_strings.extend([value_string, padding_string])
row_strings.pop()
row_strings = ''.join(row_strings)
if (in_bold and (not win32console)):
row_strings = '\x1b[1m{0:s}\x1b[0m'.format(row_strings)
output_writer.Write('{0:s}\n'.format(row_strings))
|
Writes a row of values aligned with the width to the output writer.
Args:
output_writer (CLIOutputWriter): output writer.
values (list[object]): values.
in_bold (Optional[bool]): True if the row should be written in bold.
|
codesearchnet
|
def add_component(self, component, temporary=False):
tile = IOTile(component)
value = os.path.normpath(os.path.abspath(component))
if (temporary is True):
self._component_overlays[tile.name] = value
else:
self.kvstore.set(tile.name, value)
|
Register a component with ComponentRegistry.
Component must be a buildable object with a module_settings.json file
that describes its name and the domain that it is part of. By
default, this component is saved in the permanent registry associated
with this environment and will remain registered for future CoreTools
invocations.
If you only want this component to be temporarily registered during
this program's session, you can pass temporary=True and the component
will be stored in RAM only, not persisted to the underlying key-value
store.
Args:
component (str): The path to a component that should be registered.
temporary (bool): Optional flag to only temporarily register the
component for the duration of this program invocation.
|
codesearchnet
|
def write(self, name, **data):
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.producer.send(topic=self.topic, value=data)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('writing metric %r failure %r', data, exc)
|
Write the metric to kafka
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
|
juraj-google-style
|
def _ParseVSSProcessingOptions(self, options):
vss_only = False
vss_stores = None
self._process_vss = not getattr(options, 'no_vss', False)
if self._process_vss:
vss_only = getattr(options, 'vss_only', False)
vss_stores = getattr(options, 'vss_stores', None)
if vss_stores:
try:
self._ParseVolumeIdentifiersString(vss_stores, prefix='vss')
except ValueError:
raise errors.BadConfigOption('Unsupported VSS stores')
self._vss_only = vss_only
self._vss_stores = vss_stores
|
Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
juraj-google-style
|
def softplus_and_shift(x, shift=1e-5, name=None):
with tf.compat.v1.name_scope(name, 'softplus_and_shift', [x, shift]):
x = tf.convert_to_tensor(value=x, name='x')
y = tf.nn.softplus(x)
if shift is not None:
y += shift
return y
|
Converts (batch of) scalars to (batch of) positive valued scalars.
Args:
x: (Batch of) `float`-like `Tensor` representing scalars which will be
transformed into positive elements.
shift: `Tensor` added to `softplus` transformation of elements.
Default value: `1e-5`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "positive_tril_with_shift").
Returns:
scale: (Batch of) scalars`with `x.dtype` and `x.shape`.
|
juraj-google-style
|
def from_dict(cls, fields, mapping):
iterable = ([None] * len(fields))
for (key, value) in mapping.items():
try:
index = fields.index(key)
except KeyError:
raise ItsdbError(('Invalid field name(s): ' + key))
iterable[index] = value
return cls(fields, iterable)
|
Create a Record from a dictionary of field mappings.
The *fields* object is used to determine the column indices
of fields in the mapping.
Args:
fields: the Relation schema for the table of this record
mapping: a dictionary or other mapping from field names to
column values
Returns:
a :class:`Record` object
|
codesearchnet
|
def get_tool_filepath(self, tool_alias):
tools_dict = self.get_tools()
if (tool_alias in tools_dict):
if (self.tools_path is None):
return None
else:
return os.path.join(self.tools_path, tool_alias)
else:
return None
|
Given a visible tool alias, return the full path to the executable.
Args:
tool_alias (str): Tool alias to search for.
Returns:
(str): Filepath of executable, or None if the tool is not in the
suite. May also return None because this suite has not been saved
to disk, so a filepath hasn't yet been established.
|
codesearchnet
|
def loss_masks(self, masks_queries_logits: torch.Tensor, mask_labels: List[torch.Tensor], indices: Tuple[np.array], num_masks: int) -> Dict[str, torch.Tensor]:
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
pred_masks = masks_queries_logits[src_idx]
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
pred_masks = pred_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
point_coordinates = self.sample_points_using_uncertainty(pred_masks, lambda logits: self.calculate_uncertainty(logits), self.num_points, self.oversample_ratio, self.importance_sample_ratio)
point_labels = sample_point(target_masks, point_coordinates, align_corners=False).squeeze(1)
point_logits = sample_point(pred_masks, point_coordinates, align_corners=False).squeeze(1)
losses = {'loss_mask': sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), 'loss_dice': dice_loss(point_logits, point_labels, num_masks)}
del pred_masks
del target_masks
return losses
|
Compute the losses related to the masks using sigmoid_cross_entropy_loss and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, height, width)`.
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`Tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
losses (`Dict[str, Tensor]`): A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid cross entropy loss on the predicted and ground truth.
masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth,
masks.
|
github-repos
|
def write(self, record):
super(TFRecordWriter, self).write(record)
|
Write a string record to the file.
Args:
record: str
|
github-repos
|
def __init__(self, output_mediator):
super(XLSXOutputModule, self).__init__(output_mediator)
self._column_widths = {}
self._current_row = 0
self._dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
self._fields = self._DEFAULT_FIELDS
self._filename = None
self._sheet = None
self._timestamp_format = self._DEFAULT_TIMESTAMP_FORMAT
self._workbook = None
|
Initializes an Excel Spreadsheet (XLSX) output module.
Args:
output_mediator (OutputMediator): output mediator.
|
juraj-google-style
|
def post_state(self, name, state):
self.post_command(OPERATIONS.CMD_UPDATE_STATE,
{'name': name, 'new_status': state})
|
Asynchronously try to update the state for a service.
If the update fails, nothing is reported because we don't wait for a
response from the server. This function will return immmediately and
not block.
Args:
name (string): The name of the service
state (int): The new state of the service
|
juraj-google-style
|
def upsert_project(self, project, id=None, description=None, entity=None):
mutation = gql()
response = self.gql(mutation, variable_values={
'name': self.format_project(project), 'entity': entity or self.settings('entity'),
'description': description, 'repo': self.git.remote_url, 'id': id})
return response['upsertModel']['model']
|
Create a new project
Args:
project (str): The project to create
description (str, optional): A description of this project
entity (str, optional): The entity to scope this project to.
|
juraj-google-style
|
def createRoles(self, configFiles, dateTimeFormat=None):
if dateTimeFormat is None:
dateTimeFormat = '%Y-%m-%d %H:%M'
scriptStartTime = datetime.datetime.now()
try:
print ("********************Create Roles********************")
print ("Script started at %s" % scriptStartTime.strftime(dateTimeFormat))
if self.securityhandler.valid == False:
print ("Login required")
else:
orgTools = orgtools.orgtools(securityinfo=self)
if orgTools is None:
print ("Error creating orgtools")
else:
for configFile in configFiles:
config = common.init_config_json(config_file=configFile)
if config is not None:
startTime = datetime.datetime.now()
print ("Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat)))
roleInfos = config['Roles']
for roleInfo in roleInfos:
createRoleResults = orgTools.createRole(roleInfo['Name'],roleInfo['Description'],roleInfo['Privileges'])
else:
print ("Config %s not found" % configFile)
except(TypeError,ValueError,AttributeError) as e:
print (e)
except (common.ArcRestHelperError) as e:
print ("error in function: %s" % e[0]['function'])
print ("error on line: %s" % e[0]['line'])
print ("error in file name: %s" % e[0]['filename'])
print ("with error message: %s" % e[0]['synerror'])
if 'arcpyError' in e[0]:
print ("with arcpy message: %s" % e[0]['arcpyError'])
except Exception as e:
if (reportToolsInstalled):
if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):
print ("error in function: %s" % e[0]['function'])
print ("error on line: %s" % e[0]['line'])
print ("error in file name: %s" % e[0]['filename'])
print ("with error message: %s" % e[0]['synerror'])
if 'arcpyError' in e[0]:
print ("with arcpy message: %s" % e[0]['arcpyError'])
else:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror)
else:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror)
finally:
print ("Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime))
print ("
print ("")
groupInfo = None
groupFile = None
iconPath = None
startTime = None
thumbnail = None
result = None
config = None
sciptPath = None
orgTools = None
del groupInfo
del groupFile
del iconPath
del startTime
del thumbnail
del result
del config
del sciptPath
del orgTools
gc.collect()
|
Parses a JSON configuration file to create roles.
Args:
configFiles (list): A list of JSON files on disk containing
configuration data for creating roles.
dateTimeFormat (str): A valid date formatting directive, as understood
by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e.,
``'%Y-%m-%d %H:%M'``.
|
juraj-google-style
|
def _get_timestamp_ms(when):
if when is None:
return None
ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0)
ms_since_epoch += when.microsecond / 1000.0
return int(ms_since_epoch)
|
Converts a datetime.datetime to integer milliseconds since the epoch.
Requires special handling to preserve microseconds.
Args:
when: A datetime.datetime instance.
Returns:
Integer time since the epoch in milliseconds. If the supplied 'when' is
None, the return value will be None.
|
juraj-google-style
|
def register_for_auto_class(cls, auto_class='AutoImageProcessor'):
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f'{auto_class} is not a valid auto class.')
cls._auto_class = auto_class
|
Register this class with a given auto class. This should only be used for custom image processors as the ones
in the library are already mapped with `AutoImageProcessor `.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoImageProcessor "`):
The auto class to register this new image processor with.
|
github-repos
|
def GenerateBand(self, band, meta_only=False, cast=False):
if (not meta_only):
fname = band.get('file_name')
data = self.ReadTif(('%s/%s' % (os.path.dirname(self.filename), fname)))
def FixBitmap(d):
p = d.get('bitmap_description')
if p:
lis = p.get('bit')
bm = dict()
for i in lis:
key = i['num']
value = i['text']
bm[key] = value
del d['bitmap_description']
d['bitmap_description'] = bm
return d
band = SetProperties(Band, FixBitmap(self.CleanDict(band)))
if (not meta_only):
if cast:
data = data.astype(np.float32)
data[(data == band.fill_value)] = (- 9999)
if (band.valid_range is not None):
data[(data < band.valid_range.min)] = (- 9999)
data[(data > band.valid_range.max)] = (- 9999)
data[(data == (- 9999))] = np.nan
else:
data = np.ma.masked_where((data == band.fill_value), data)
if (band.valid_range is not None):
data = np.ma.masked_where((data < band.valid_range.min), data)
data = np.ma.masked_where((data > band.valid_range.max), data)
if self.yflip:
data = np.flip(data, 0)
band.data = data
if (not meta_only):
band.validate()
return band
|
Genreate a Band object given band metadata
Args:
band (dict): dictionary containing metadata for a given band
Return:
Band : the loaded Band onject
|
codesearchnet
|
def dump(self):
walkers = {}
walkers.update({str(walker.selector): walker.dump() for walker in self._queue_walkers})
walkers.update({str(walker.selector): walker.dump() for walker in self._virtual_walkers})
return {u'engine': self._engine.dump(), u'rollover_storage': self._rollover_storage, u'rollover_streaming': self._rollover_streaming, u'last_values': {str(stream): reading.asdict() for (stream, reading) in self._last_values.items()}, u'walkers': walkers}
|
Dump the state of this SensorLog.
The purpose of this method is to be able to restore the same state
later. However there are links in the SensorLog for stream walkers.
So the dump process saves the state of each stream walker and upon
restore, it looks through the current set of stream walkers and
restores each one that existed when dump() was called to its state.
Returns:
dict: The serialized state of this SensorLog.
|
codesearchnet
|
def has_thread(prefix, running_threads):
for thread in running_threads:
if thread.startswith(prefix):
return True
return False
|
Returns whether any 'running_threads' is prefixed with 'prefix'.
Args:
prefix: The prefix of the expected thread name.
running_threads: A collection of the running thread names.
|
github-repos
|
def check_accessible(value_provider_list):
assert isinstance(value_provider_list, list)
def _check_accessible(fnc):
@wraps(fnc)
def _f(self, *args, **kwargs):
for obj in [getattr(self, vp) for vp in value_provider_list]:
if not obj.is_accessible():
raise error.RuntimeValueProviderError('%s not accessible' % obj)
return fnc(self, *args, **kwargs)
return _f
return _check_accessible
|
A decorator that checks accessibility of a list of ValueProvider objects.
Args:
value_provider_list: list of ValueProvider objects
Raises:
``RuntimeValueProviderError``: if any of the provided objects are not
accessible.
|
github-repos
|
def remove(path, force=False):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
if not os.path.exists(path) and not is_link(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
if force:
file_attributes = win32api.GetFileAttributes(path)
win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)
try:
if os.path.isfile(path):
os.remove(path)
elif is_link(path):
os.rmdir(path)
else:
for name in os.listdir(path):
item = '{0}\\{1}'.format(path, name)
remove(item, force)
os.rmdir(path)
except (OSError, IOError) as exc:
if force:
win32api.SetFileAttributes(path, file_attributes)
raise CommandExecutionError(
'Could not remove \'{0}\': {1}'.format(path, exc)
)
return True
|
Remove the named file or directory
Args:
path (str): The path to the file or directory to remove.
force (bool): Remove even if marked Read-Only. Default is False
Returns:
bool: True if successful, False if unsuccessful
CLI Example:
.. code-block:: bash
salt '*' file.remove C:\\Temp
|
juraj-google-style
|
def fetch(self, url):
opener = self._urllib.build_opener()
opener.addheaders = self._requestHeaders.items()
response = opener.open(url)
headers = response.info()
raw = response.read()
raw = raw.decode('utf8')
if not 'Content-Type' in headers:
raise OEmbedError('Missing mime-type in response')
if headers['Content-Type'].find('application/xml') != -1 or \
headers['Content-Type'].find('text/xml') != -1:
response = OEmbedResponse.newFromXML(raw)
elif headers['Content-Type'].find('application/json') != -1 or \
headers['Content-Type'].find('text/javascript') != -1 or \
headers['Content-Type'].find('text/json') != -1:
response = OEmbedResponse.newFromJSON(raw)
else:
raise OEmbedError('Invalid mime-type in response - %s' % headers['Content-Type'])
return response
|
Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched
|
juraj-google-style
|
class XCLIPEncoder(nn.Module):
def __init__(self, config: XCLIPConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([XCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`XCLIPEncoderLayer`].
Args:
config: XCLIPConfig
|
github-repos
|
def blackman(x):
if any_symbolic_tensors((x,)):
return Blackman().symbolic_call(x)
return backend.numpy.blackman(x)
|
Blackman window function.
The Blackman window is a taper formed by using a weighted cosine.
Args:
x: Scalar or 1D Tensor. Window length.
Returns:
A 1D tensor containing the Blackman window values.
Example:
>>> x = keras.ops.convert_to_tensor(5)
>>> keras.ops.blackman(x)
array([-1.3877788e-17, 3.4000000e-01, 1.0000000e+00, 3.4000000e-01,
-1.3877788e-17], dtype=float32)
|
github-repos
|
def get_settings(self):
uri = '{}/settings'.format(self.data['uri'])
return self._helper.do_get(uri)
|
Gets the interconnect settings for a logical interconnect group.
Returns:
dict: Interconnect Settings.
|
codesearchnet
|
def recursive_copy(source, destination):
if os.path.isdir(source):
copy_tree(source, destination)
|
A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source
directory does not exist.
Args:
source (str): source path
destination (str): destination path
|
codesearchnet
|
def __call__(self, name, value):
super(FloatTypeChecker, self).__call__(name, value)
if isinstance(self.minimum, float):
if value < self.minimum:
raise ValueError("%s must be greater or equal %s" % (name, self.minimum))
if isinstance(self.maximum, float):
if value > self.maximum:
raise ValueError("%s must be less or equal %s" % (name, self.maximum))
|
Call method.
Args:
name (str): the value's name.
value (float): the value to check.
Raises:
ValueError: if value is not type float.
ValueError: if value is less than minimum.
ValueError: if value is more than maximum.
|
juraj-google-style
|
def pyrdf(value, class_type=None, datatype=None, **kwargs):
if isinstance(value, BaseRdfDataType):
return value
if isinstance(value, dict):
value = value.copy()
class_type = value.pop('type')
try:
datatype = value.pop('datatype')
except KeyError:
datatype = __TYPE_MATCH__[class_type]
kwargs = value
value = kwargs.pop('value')
if not class_type:
class_type = 'literal'
if not datatype:
datatype = type(value)
try:
return __DT_LOOKUP__[class_type][datatype](value, **kwargs)
except KeyError:
rtn_val = BaseRdfDataType(value)
rtn_val.datatype = Uri(datatype)
return rtn_val
|
Coverts an input to one of the rdfdatatypes classes
Args:
value: any rdfdatatype, json dict or vlaue
class_type: "literal", "uri" or "blanknode"
datatype: "xsd:string", "xsd:int" , etc
kwargs:
lang: language tag
|
juraj-google-style
|
def filter(self, **filters):
for (flt, val) in self._flt.items():
self._flt[flt] = filters.pop(flt, val)
if filters:
raise error.UnknownFiltersError(filters.keys())
return self
|
Update filters with provided arguments.
Note that filters are only resolved when the view is iterated, and
hence they do not compose. Each call to filter merely updates the
relevant filters. For example, with this code::
view = sdat.steps[500:].filter(rprof=True, fields=['T'])
view.filter(fields=[])
the produced ``view``, when iterated, will generate the steps after the
500-th that have radial profiles. The ``fields`` filter set in the
first line is emptied in the second line.
Args:
snap (bool): the step must be a snapshot to pass.
rprof (bool): the step must have rprof data to pass.
fields (list): list of fields that must be present to pass.
func (function): arbitrary function taking a
:class:`~stagpy._step.Step` as argument and returning a True
value if the step should pass the filter.
Returns:
self.
|
codesearchnet
|
def get_twitter_id(self, cache=True):
if not (cache and ('twitter' in self.cache)):
response = self.get_attribute('twitter')
self.cache['twitter'] = response['artist'].get('twitter')
return self.cache['twitter']
|
Get the twitter id for this artist if it exists
Args:
Kwargs:
Returns:
A twitter ID string
Example:
>>> a = artist.Artist('big boi')
>>> a.get_twitter_id()
u'BigBoi'
>>>
|
juraj-google-style
|
class DonutSwinPatchMerging(nn.Module):
def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = height % 2 == 1 or width % 2 == 1
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
input_feature = self.maybe_pad(input_feature, height, width)
input_feature_0 = input_feature[:, 0::2, 0::2, :]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels)
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
|
Patch Merging Layer.
Args:
input_resolution (`Tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
|
github-repos
|
def create_batch(cls, size, **kwargs):
return [cls.create(**kwargs) for _ in range(size)]
|
Create a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to create
Returns:
object list: the created instances
|
juraj-google-style
|
def unstage_signature(vcs, signature):
evidence_path = _get_staged_history_path(vcs)
staged = get_staged_signatures(vcs)
if (signature not in staged):
raise NotStagedError
staged.remove(signature)
string = '\n'.join(staged)
with open(evidence_path, 'w') as f:
f.write(string)
|
Remove `signature` from the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
signature (basestring)
Raises:
NotStagedError
|
codesearchnet
|
def _try_put(self, item):
try:
self._event_queue.put(item)
except QueueClosedError:
self._internal_close()
if self._worker.failure_exc_info:
_, exception, _ = self._worker.failure_exc_info
raise exception from None
|
Attempts to enqueue an item to the event queue.
If the queue is closed, this will close the EventFileWriter and reraise the
exception that caused the queue closure, if one exists.
Args:
item: the item to enqueue
|
github-repos
|
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
if (kmip_version < enums.KMIPVersion.KMIP_1_3):
raise exceptions.VersionNotSupported('KMIP {} does not support the RNGParameters object.'.format(kmip_version.value))
super(RNGParameters, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.RNG_ALGORITHM, local_buffer):
rng_algorithm = primitives.Enumeration(enums.RNGAlgorithm, tag=enums.Tags.RNG_ALGORITHM)
rng_algorithm.read(local_buffer, kmip_version=kmip_version)
self._rng_algorithm = rng_algorithm
else:
raise exceptions.InvalidKmipEncoding('The RNGParameters encoding is missing the RNG algorithm.')
if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_ALGORITHM, local_buffer):
cryptographic_algorithm = primitives.Enumeration(enums.CryptographicAlgorithm, tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM)
cryptographic_algorithm.read(local_buffer, kmip_version=kmip_version)
self._cryptographic_algorithm = cryptographic_algorithm
if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_LENGTH, local_buffer):
cryptographic_length = primitives.Integer(tag=enums.Tags.CRYPTOGRAPHIC_LENGTH)
cryptographic_length.read(local_buffer, kmip_version=kmip_version)
self._cryptographic_length = cryptographic_length
if self.is_tag_next(enums.Tags.HASHING_ALGORITHM, local_buffer):
hashing_algorithm = primitives.Enumeration(enums.HashingAlgorithm, tag=enums.Tags.HASHING_ALGORITHM)
hashing_algorithm.read(local_buffer, kmip_version=kmip_version)
self._hashing_algorithm = hashing_algorithm
if self.is_tag_next(enums.Tags.DRBG_ALGORITHM, local_buffer):
drbg_algorithm = primitives.Enumeration(enums.DRBGAlgorithm, tag=enums.Tags.DRBG_ALGORITHM)
drbg_algorithm.read(local_buffer, kmip_version=kmip_version)
self._drbg_algorithm = drbg_algorithm
if self.is_tag_next(enums.Tags.RECOMMENDED_CURVE, local_buffer):
recommended_curve = primitives.Enumeration(enums.RecommendedCurve, tag=enums.Tags.RECOMMENDED_CURVE)
recommended_curve.read(local_buffer, kmip_version=kmip_version)
self._recommended_curve = recommended_curve
if self.is_tag_next(enums.Tags.FIPS186_VARIATION, local_buffer):
fips186_variation = primitives.Enumeration(enums.FIPS186Variation, tag=enums.Tags.FIPS186_VARIATION)
fips186_variation.read(local_buffer, kmip_version=kmip_version)
self._fips186_variation = fips186_variation
if self.is_tag_next(enums.Tags.PREDICTION_RESISTANCE, local_buffer):
prediction_resistance = primitives.Boolean(tag=enums.Tags.PREDICTION_RESISTANCE)
prediction_resistance.read(local_buffer, kmip_version=kmip_version)
self._prediction_resistance = prediction_resistance
self.is_oversized(local_buffer)
|
Read the data encoding the RNGParameters structure and decode it
into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the RNG algorithm is missing from
the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the RNGParameters structure.
|
codesearchnet
|
def GetPreviousNonBlankLine(clean_lines, linenum):
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline):
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
|
Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
|
juraj-google-style
|
def bind(self, isnap, istep):
self._isteps[isnap] = istep
self.sdat.steps[istep].isnap = isnap
|
Register the isnap / istep correspondence.
Users of :class:`StagyyData` should not use this method.
Args:
isnap (int): snapshot index.
istep (int): time step index.
|
juraj-google-style
|
def __init__(self, credentials=None):
if credentials is None:
credentials = _utils.get_credentials()
self._api = _api.Api(credentials)
|
Initialize the Projects object.
Args:
credentials: the credentials for the account.
|
juraj-google-style
|
def all_days(boo):
earliest = datetime.strptime(('2015-11-12').replace('-', ' '), '%Y %m %d')
latest = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d')
num_days = (latest - earliest).days + 1
all_days = [latest - timedelta(days=x) for x in range(num_days)]
all_days.reverse()
output = []
if boo:
for d in all_days:
output.append(int(str(d).replace('-', '')[:8]))
else:
for d in all_days:
output.append(str(d)[:10])
return output
|
Return a list of all dates from 11/12/2015 to the present.
Args:
boo: if true, list contains Numbers (20151230); if false, list contains Strings ("2015-12-30")
Returns:
list of either Numbers or Strings
|
juraj-google-style
|
def ParseFileObject(self, parser_mediator, file_object):
file_header_map = self._GetDataTypeMap('binarycookies_file_header')
try:
(file_header, file_header_data_size) = self._ReadStructureFromFileObject(file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile('Unable to read file header with error: {0!s}.'.format(exception))
if (file_header.signature != self._SIGNATURE):
raise errors.UnableToParseFile('Unsupported file signature.')
file_offset = file_header_data_size
page_sizes_data_size = (file_header.number_of_pages * 4)
page_sizes_data = file_object.read(page_sizes_data_size)
context = dtfabric_data_maps.DataTypeMapContext(values={'binarycookies_file_header': file_header})
page_sizes_map = self._GetDataTypeMap('binarycookies_page_sizes')
try:
page_sizes_array = self._ReadStructureFromByteStream(page_sizes_data, file_offset, page_sizes_map, context=context)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to map page sizes data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))
file_offset += page_sizes_data_size
for (page_number, page_size) in enumerate(page_sizes_array):
if parser_mediator.abort:
break
page_data = file_object.read(page_size)
if (len(page_data) != page_size):
parser_mediator.ProduceExtractionWarning('unable to read page: {0:d}'.format(page_number))
break
self._ParsePage(parser_mediator, file_offset, page_data)
file_offset += page_size
|
Parses a Safari binary cookie file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): file-like object to be parsed.
Raises:
UnableToParseFile: when the file cannot be parsed, this will signal
the event extractor to apply other parsers.
|
codesearchnet
|
def wait_for_file(self, fn: str, max_wait_sec: int = 3600 * 24 * 365,
check_interval: float = 0.02) -> bool:
print("Waiting for file", fn)
start_time = time.time()
while True:
if time.time() - start_time > max_wait_sec:
util.log(f"Timeout exceeded ({max_wait_sec} sec) for {fn}")
return False
if not self.exists(fn):
time.sleep(check_interval)
continue
else:
break
return True
|
Waits for file maximum of max_wait_sec. Returns True if file was detected within specified max_wait_sec
Args:
fn: filename on task machine
max_wait_sec: how long to wait in seconds
check_interval: how often to check in seconds
Returns:
False if waiting was was cut short by max_wait_sec limit, True otherwise
|
juraj-google-style
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not
make use of token type ids, therefore a list of zeros is returned
Args:
token_ids_0 (`List[int]`):
List of ids.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def _set_operation(a, b, set_operation, validate_indices=True):
if isinstance(a, sparse_tensor.SparseTensor):
if isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(a.indices, a.values, a.dense_shape, b.indices, b.values, b.dense_shape, set_operation, validate_indices)
else:
raise ValueError('Sparse,Dense is not supported, but Dense,Sparse is. Please flip the order of your inputs.')
elif isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(a, b.indices, b.values, b.dense_shape, set_operation, validate_indices)
else:
indices, values, shape = gen_set_ops.dense_to_dense_set_operation(a, b, set_operation, validate_indices)
return sparse_tensor.SparseTensor(indices, values, shape)
|
Compute set operation of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted
in row-major order.
set_operation: String indicating set operation. See
SetOperationOp::SetOperationFromContext for valid values.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the results
of the set operation.
Raises:
TypeError: If inputs are invalid types.
ValueError: If `a` is sparse and `b` is dense.
|
github-repos
|
def from_tensor_4x4(t: torch.Tensor) -> Rigid:
if t.shape[-2:] != (4, 4):
raise ValueError('Incorrectly shaped input tensor')
rots = Rotation(rot_mats=t[..., :3, :3], quats=None)
trans = t[..., :3, 3]
return Rigid(rots, trans)
|
Constructs a transformation from a homogeneous transformation tensor.
Args:
t: [*, 4, 4] homogeneous transformation tensor
Returns:
T object with shape [*]
|
github-repos
|
def GetValueRepresentation(cls, value, version=sorted(_SERVICE_MAP.keys())[(- 1)]):
if (isinstance(value, str) or isinstance(value, unicode)):
return {'value': value, 'xsi_type': 'TextValue'}
elif isinstance(value, bool):
return {'value': value, 'xsi_type': 'BooleanValue'}
elif isinstance(value, numbers.Number):
return {'value': value, 'xsi_type': 'NumberValue'}
elif isinstance(value, datetime.datetime):
if (value.tzinfo is None):
raise googleads.errors.GoogleAdsValueError(('Datetime %s is not timezone aware.' % value))
return {'xsi_type': 'DateTimeValue', 'value': {'date': {'year': value.year, 'month': value.month, 'day': value.day}, 'hour': value.hour, 'minute': value.minute, 'second': value.second, ('timeZoneId' if (version >= 'v201811') else 'timeZoneID'): value.tzinfo.zone}}
elif isinstance(value, datetime.date):
return {'xsi_type': 'DateValue', 'value': {'year': value.year, 'month': value.month, 'day': value.day}}
elif isinstance(value, list):
if (value and (not all((isinstance(x, type(value[0])) for x in value)))):
raise googleads.errors.GoogleAdsValueError('Cannot pass more than one type in a set.')
return {'xsi_type': 'SetValue', 'values': [cls.GetValueRepresentation(v, version) for v in value]}
else:
raise googleads.errors.GoogleAdsValueError(("Can't represent unknown type: %s." % type(value)))
|
Converts a single python value to its PQL representation.
Args:
value: A python value.
version: A string identifying the Ad Manager version the value object
is compatible with. This defaults to what is currently the latest
version. This will be updated in future releases to point to what is
then the latest version.
Returns:
The value formatted for PQL statements which are compatible with a
particular API version.
|
codesearchnet
|
def bessel_k1(x, name=None):
with ops.name_scope(name, 'bessel_k1', [x]):
return gen_special_math_ops.bessel_k1(x)
|
Computes the Bessel k1 function of `x` element-wise.
Modified Bessel function of order 1.
It is preferable to use the numerically stabler function `k1e(x)` instead.
>>> tf.math.special.bessel_k1([0.5, 1., 2., 4.]).numpy()
array([1.65644112, 0.60190723, 0.13986588, 0.0124835 ], dtype=float32)
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.k1
@end_compatibility
|
github-repos
|
def composite_multiscale_entropy(time_series, sample_length, scale, tolerance=None):
cmse = np.zeros((1, scale))
for i in range(scale):
for j in range(i):
tmp = util_granulate_time_series(time_series[j:], (i + 1))
cmse[i] += (sample_entropy(tmp, sample_length, tolerance) / (i + 1))
return cmse
|
Calculate the Composite Multiscale Entropy of the given time series.
Args:
time_series: Time series for analysis
sample_length: Number of sequential points of the time series
scale: Scale factor
tolerance: Tolerance (default = 0.1...0.2 * std(time_series))
Returns:
Vector containing Composite Multiscale Entropy
Reference:
[1] Wu, Shuen-De, et al. "Time series analysis using
composite multiscale entropy." Entropy 15.3 (2013): 1069-1084.
|
codesearchnet
|
def get_userid_from_botid(self, botid):
botinfo = self.slack_client.api_call('bots.info', bot=botid)
if (botinfo['ok'] is True):
return botinfo['bot'].get('user_id')
else:
return botid
|
Perform a lookup of bots.info to resolve a botid to a userid
Args:
botid (string): Slack botid to lookup.
Returns:
string: userid value
|
codesearchnet
|
def unshare(self, group_id, **kwargs):
path = ('/projects/%s/share/%s' % (self.get_id(), group_id))
self.manager.gitlab.http_delete(path, **kwargs)
|
Delete a shared project link within a group.
Args:
group_id (int): ID of the group.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request
|
codesearchnet
|
def lines_from_stream(f, as_interned=False):
if as_interned:
return [sys.intern(line) for line in f.read().splitlines()]
return f.read().splitlines()
|
Create a list of file lines from a given file stream.
Args:
f (io.TextIOWrapper): File stream
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list
|
juraj-google-style
|
def as_tmpfile(self, tmpdir=None):
import tempfile, shutil
tmpdir = (tempfile.mkdtemp() if (tmpdir is None) else tmpdir)
new_path = os.path.join(tmpdir, self.basename)
shutil.copy(self.filepath, new_path)
(root, ext) = os.path.splitext(self.filepath)
djrepo = (root + '.djrepo')
if os.path.exists(djrepo):
shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))
new = self.__class__.from_file(new_path)
if self.has_dojo_report:
new.dojo_report = self.dojo_report.deepcopy()
return new
|
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.
Useful for unit tests in which we have to change the content of the file.
Args:
tmpdir: If None, a new temporary directory is created and files are copied here
else tmpdir is used.
|
codesearchnet
|
def seek(self, offset, whence=os.SEEK_SET):
self._check_open()
self._buffer.reset()
self._buffer_future = None
if (whence == os.SEEK_SET):
self._offset = offset
elif (whence == os.SEEK_CUR):
self._offset += offset
elif (whence == os.SEEK_END):
self._offset = (self._file_size + offset)
else:
raise ValueError(('Whence mode %s is invalid.' % str(whence)))
self._offset = min(self._offset, self._file_size)
self._offset = max(self._offset, 0)
if self._remaining():
self._request_next_buffer()
|
Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
|
codesearchnet
|
def unpack(self, buff, offset=0):
super().unpack(buff, self._pyof_class, offset)
|
Unpack the elements of the list.
This unpack method considers that all elements have the same size.
To use this class with a pyof_class that accepts elements with
different sizes, you must reimplement the unpack method.
Args:
buff (bytes): The binary data to be unpacked.
offset (int): If we need to shift the beginning of the data.
|
juraj-google-style
|
def _wrap_usage_section(source, width):
if (not any(((len(line) > width) for line in source.splitlines()))):
return source
section_header = source[:(source.index(':') + 1)].strip()
lines = [section_header]
for (commands, args) in parse_commands(source):
command = ' {} '.format(' '.join(commands))
max_len = (width - len(command))
sep = ('\n' + (' ' * len(command)))
wrapped_args = sep.join(textwrap.wrap(' '.join(args), max_len))
full_command = (command + wrapped_args)
lines += full_command.splitlines()
return '\n'.join(lines)
|
Wrap the given usage section string to the current terminal size.
Note:
Commands arguments are wrapped to the column that the arguments began
on the first line of the command.
Args:
source: The section string to wrap.
Returns:
The wrapped section string.
|
codesearchnet
|
def render_secrets(
config_path,
secret_path,
):
with open(secret_path, 'r') as s_fh:
secret_ini = anyconfig.load(s_fh, ac_parser='ini')
with open(config_path, 'r') as c_fh:
raw_cfg = c_fh.read()
rendered_cfg = anytemplate.renders(raw_cfg, secret_ini, at_engine='jinja2')
p_config = ProsperConfig(config_path)
local_config = configparser.ConfigParser()
local_config.optionxform = str
local_config.read_string(rendered_cfg)
p_config.local_config = local_config
return p_config
|
combine a jinja template with a secret .ini file
Args:
config_path (str): path to .cfg file with jinja templating
secret_path (str): path to .ini-like secrets file
Returns:
ProsperConfig: rendered configuration object
|
juraj-google-style
|
def region(self, bounds):
if not isinstance(bounds, Bounds):
raise TypeError("region param bounds must be isinstance of Bounds")
_d = copy.copy(self)
_d._bounds = bounds
return _d
|
Set region of the screen area
Args:
bounds: Bounds object
Returns:
A new AndroidDevice object
Raises:
TypeError
|
juraj-google-style
|
def IsLink(self):
if (self._stat_object is None):
self._stat_object = self._GetStat()
if (self._stat_object is not None):
self.entry_type = self._stat_object.type
return (self.entry_type == definitions.FILE_ENTRY_TYPE_LINK)
|
Determines if the file entry is a link.
Returns:
bool: True if the file entry is a link.
|
codesearchnet
|
def CmdAuthenticate(self, challenge_param, app_param, key_handle, check_only=False):
self.logger.debug('CmdAuthenticate')
if ((len(challenge_param) != 32) or (len(app_param) != 32)):
raise errors.InvalidRequestError()
control = (7 if check_only else 3)
body = bytearray((((challenge_param + app_param) + bytearray([len(key_handle)])) + key_handle))
response = self.InternalSendApdu(apdu.CommandApdu(0, apdu.CMD_AUTH, control, 0, body))
response.CheckSuccessOrRaise()
return response.body
|
Attempt to obtain an authentication signature.
Ask the security key to sign a challenge for a particular key handle
in order to authenticate the user.
Args:
challenge_param: SHA-256 hash of client_data object as a bytes
object.
app_param: SHA-256 hash of the app id as a bytes object.
key_handle: The key handle to use to issue the signature as a bytes
object.
check_only: If true, only check if key_handle is valid.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: If check_only is False, a Test of User Precense
is required to proceed. If check_only is True, this means
the key_handle is valid.
InvalidKeyHandleError: The key_handle is not valid for this device.
ApduError: Something else went wrong on the device.
|
codesearchnet
|
def set_db_row(db, start, size, _bytearray):
client.db_write(db, start, size, _bytearray)
|
Here we replace a piece of data in a db block with new data
Args:
db (int): The db to use
start(int): The start within the db
size(int): The size of the data in bytes
_butearray (enumerable): The data to put in the db
|
codesearchnet
|
def strip_html_tags(text, allowed_tags=None):
if (text is None):
return
if (allowed_tags is None):
allowed_tags = ALLOWED_TAGS
return bleach.clean(text, tags=allowed_tags, attributes=['id', 'class', 'style', 'href', 'title'], strip=True)
|
Strip all tags from a string except those tags provided in `allowed_tags` parameter.
Args:
text (str): string to strip html tags from
allowed_tags (list): allowed list of html tags
Returns: a string without html tags
|
codesearchnet
|
def file_config(filename=None):
logger.debug('On entry into file_config(), filename = {}'.format(filename))
if (filename is None):
filename = CONFIG_DEFAULT_PATH
logger.debug('file_config() will try to open `{}`'.format(filename))
with open(filename) as f:
try:
config = json.load(f)
except ValueError as err:
raise exceptions.ConfigurationError('Failed to parse the JSON configuration from `{}`, {}'.format(filename, err))
logger.info('Configuration loaded from `{}`'.format(filename))
return config
|
Returns the config values found in a configuration file.
Args:
filename (str): the JSON file with the configuration values.
If ``None``, CONFIG_DEFAULT_PATH will be used.
Returns:
dict: The config values in the specified config file (or the
file at CONFIG_DEFAULT_PATH, if filename == None)
|
codesearchnet
|
def create(self, friendly_name=None, description=None):
if (not self.exists()):
try:
response = self._api.datasets_insert(self._name_parts, friendly_name=friendly_name, description=description)
except Exception as e:
raise e
if ('selfLink' not in response):
raise Exception(('Could not create dataset %s' % self._full_name))
return self
|
Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
|
codesearchnet
|
def _checkFunctioncode(functioncode, listOfAllowedValues=[]):
FUNCTIONCODE_MIN = 1
FUNCTIONCODE_MAX = 127
_checkInt(functioncode, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode')
if listOfAllowedValues is None:
return
if not isinstance(listOfAllowedValues, list):
raise TypeError('The listOfAllowedValues should be a list. Given: {0!r}'.format(listOfAllowedValues))
for value in listOfAllowedValues:
_checkInt(value, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode inside listOfAllowedValues')
if functioncode not in listOfAllowedValues:
raise ValueError('Wrong function code: {0}, allowed values are {1!r}'.format(functioncode, listOfAllowedValues))
|
Check that the given functioncode is in the listOfAllowedValues.
Also verifies that 1 <= function code <= 127.
Args:
* functioncode (int): The function code
* listOfAllowedValues (list of int): Allowed values. Use *None* to bypass this part of the checking.
Raises:
TypeError, ValueError
|
juraj-google-style
|
async def request(self, method, url, params=None, headers=None, data=None, json=None, token_refresh_attempts=2, **kwargs):
if all([data, json]):
msg = '"data" and "json" request parameters can not be used at the same time'
logging.warn(msg)
raise exceptions.GCPHTTPError(msg)
req_headers = (headers or {})
req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)
req_kwargs = {'params': params, 'headers': req_headers}
if data:
req_kwargs['data'] = data
if json:
req_kwargs['json'] = json
if token_refresh_attempts:
if (not (await self.valid_token_set())):
(await self._auth_client.refresh_token())
token_refresh_attempts -= 1
req_headers.update({'Authorization': f'Bearer {self._auth_client.token}'})
request_id = kwargs.get('request_id', uuid.uuid4())
logging.debug(_utils.REQ_LOG_FMT.format(request_id=request_id, method=method.upper(), url=url, kwargs=req_kwargs))
try:
async with self._session.request(method, url, **req_kwargs) as resp:
log_kw = {'request_id': request_id, 'method': method.upper(), 'url': resp.url, 'status': resp.status, 'reason': resp.reason}
logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))
if (resp.status in REFRESH_STATUS_CODES):
logging.warning(f'[{request_id}] HTTP Status Code {resp.status} returned requesting {resp.url}: {resp.reason}')
if token_refresh_attempts:
logging.info(f'[{request_id}] Attempting request to {resp.url} again.')
return (await self.request(method, url, token_refresh_attempts=token_refresh_attempts, request_id=request_id, **req_kwargs))
logging.warning(f'[{request_id}] Max attempts refreshing auth token exhausted while requesting {resp.url}')
resp.raise_for_status()
return (await resp.text())
except aiohttp.ClientResponseError as e:
msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPResponseError(msg, resp.status)
except exceptions.GCPHTTPResponseError as e:
raise e
except Exception as e:
msg = f'[{request_id}] Request call failed: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPError(msg)
|
Make an asynchronous HTTP request.
Args:
method (str): HTTP method to use for the request.
url (str): URL to be requested.
params (dict): (optional) Query parameters for the request.
Defaults to ``None``.
headers (dict): (optional) HTTP headers to send with the
request. Headers pass through to the request will
include :attr:`DEFAULT_REQUEST_HEADERS`.
data (obj): (optional) A dictionary, bytes, or file-like
object to send in the body of the request.
json (obj): (optional) Any json compatible python
object.
NOTE: json and body parameters cannot be used at the same time.
token_refresh_attempts (int): (optional) Number of attempts a token
refresh should be performed.
Returns:
(str) HTTP response body.
Raises:
:exc:`.GCPHTTPError`: if any exception occurred,
specifically a :exc:`.GCPHTTPResponseError`, if the
exception is associated with a response status code.
|
codesearchnet
|
def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True):
units = units if units != None else {}
return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0,
flags={}, units=units, parens_as_neg=parens_as_neg)
|
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
This version of conversion doesn't flag changes nor store
cell units.
Args:
units: The dictionary holder for cell units.
parens_as_neg: Converts numerics surrounded by parens to
negative values
|
juraj-google-style
|
def _BuildFindSpecsFromFileSourcePath(self, source_path, path_separator, environment_variables, user_accounts):
find_specs = []
for path_glob in path_helper.PathHelper.ExpandRecursiveGlobs(source_path, path_separator):
logger.debug('building find spec from path glob: {0:s}'.format(path_glob))
for path in path_helper.PathHelper.ExpandUsersVariablePath(path_glob, path_separator, user_accounts):
logger.debug('building find spec from path: {0:s}'.format(path))
if ('%' in path):
path = path_helper.PathHelper.ExpandWindowsPath(path, environment_variables)
logger.debug('building find spec from expanded path: {0:s}'.format(path))
if (not path.startswith(path_separator)):
logger.warning('The path filter must be defined as an absolute path: "{0:s}"'.format(path))
continue
path_segments = path.split(path_separator)
path_segments.pop(0)
if (not path_segments[(- 1)]):
logger.warning('Empty last path segment in path filter: "{0:s}"'.format(path))
path_segments.pop((- 1))
try:
find_spec = file_system_searcher.FindSpec(location_glob=path_segments, case_sensitive=False)
except ValueError as exception:
logger.error('Unable to build find specification for path: "{0:s}" with error: {1!s}'.format(path, exception))
continue
find_specs.append(find_spec)
return find_specs
|
Builds find specifications from a file source type.
Args:
source_path (str): file system path defined by the source.
path_separator (str): file system path segment separator.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in key.
user_accounts (list[str]): identified user accounts stored in the
knowledge base.
Returns:
list[dfvfs.FindSpec]: find specifications for the file source type.
|
codesearchnet
|
def create_sonos_playlist_from_queue(self, title):
response = self.avTransport.SaveQueue([('InstanceID', 0), ('Title', title), ('ObjectID', '')])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = 'file:
res = [DidlResource(uri=uri, protocol_info='x-rincon-playlist:*:*:*')]
return DidlPlaylistContainer(resources=res, title=title, parent_id='SQ:', item_id=item_id)
|
Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
|
codesearchnet
|
def add_metric(self, labels, buckets, sum_value, timestamp=None):
for b in buckets:
bucket, value = b[:2]
exemplar = None
if len(b) == 3:
exemplar = b[2]
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value,
timestamp,
exemplar,
))
self.samples.extend([
Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp),
])
|
Add a metric to the metric family.
Args:
labels: A list of label values
buckets: A list of lists.
Each inner list can be a pair of bucket name and value,
or a triple of bucket name, value, and exemplar.
The buckets must be sorted, and +Inf present.
sum_value: The sum value of the metric.
|
juraj-google-style
|
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
|
Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
|
juraj-google-style
|
def get_vcf_header(source):
head = HeaderParser()
for line in source:
line = line.rstrip()
if line.startswith('
if line.startswith('
logger.debug("Found metadata line {0}".format(line))
head.parse_meta_data(line)
else:
logger.debug("Found header line {0}".format(line))
head.parse_header_line(line)
else:
break
return head
|
Get the header lines of a vcf file
Args:
source(iterable): A vcf file
Returns:
head (HeaderParser): A headerparser object
|
juraj-google-style
|
async def get_random_popular_person(self, limit=500):
index = random.randrange(limit)
data = (await self._get_popular_people_page())
if (data is None):
return
if (index >= len(data['results'])):
(page, index) = self._calculate_page_index(index, data)
data = (await self._get_popular_people_page(page))
if (data is None):
return
json_data = data['results'][index]
details = (await self._get_person_json(json_data['id']))
details.update(**json_data)
return Person.from_json(details, self.config['data'].get('images'))
|
Randomly select a popular person.
Notes:
Requires at least two API calls. May require three API calls
if the randomly-selected index isn't within the first page of
required data.
Arguments:
limit (:py:class:`int`, optional): How many of the most
popular people to make random choice from (defaults to top
``500``).
Returns:
:py:class:`~.Person`: A randomly-selected popular person.
|
codesearchnet
|
def __init__(self, field_instance, sequence):
if not field_instance.repeated:
raise FieldDefinitionError(
'FieldList may only accept repeated fields')
self.__field = field_instance
self.__field.validate(sequence)
list.__init__(self, sequence)
|
Constructor.
Args:
field_instance: Instance of field that validates the list.
sequence: List or tuple to construct list from.
|
juraj-google-style
|
def parse_brome_config_from_browser_config(browser_config):
config = {}
brome_keys = [key for key in browser_config if key.find(':') != -1]
for brome_key in brome_keys:
section, option = brome_key.split(':')
value = browser_config[brome_key]
if section not in config:
config[section] = {}
config[section][option] = value
return config
|
Parse the browser config and look for brome specific config
Args:
browser_config (dict)
|
juraj-google-style
|
def forward(ctx, forward_fn, *args, **kwargs):
ctx.forward_fn = forward_fn
ctx.save_for_backward(*args)
try:
output, ctx.grad_fn = forward_fn(*args, **kwargs)
except:
output = forward_fn(*args, **kwargs)
ctx.grad_fn = lambda *args, **kwargs: torch.full((), float('nan'))
return output
|
Forward pass computation specification.
Args:
ctx: Context object.
forward_fn: Function to compute forward pass.
*args: Arguments for the forward pass.
**kwargs: Keyword arguments for the forward pass.
|
github-repos
|
def Lease(self, request, global_params=None):
config = self.GetMethodConfig('Lease')
return self._RunMethod(config, request, global_params=global_params)
|
Leases a dataflow WorkItem to run.
Args:
request: (DataflowProjectsJobsWorkItemsLeaseRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LeaseWorkItemResponse) The response message.
|
github-repos
|
def get_md5sum(fname, chunk_size=1024):
def iter_chunks(f):
while True:
chunk = f.read(chunk_size)
if (not chunk):
break
(yield chunk)
sig = hashlib.md5()
with open(fname, 'rb') as f:
for chunk in iter_chunks(f):
sig.update(chunk)
return sig.hexdigest()
|
Returns the MD5 checksum of a file.
Args:
fname (str): Filename
chunk_size (Optional[int]): Size (in Bytes) of the chunks that should be
read in at once. Increasing chunk size reduces the number of reads
required, but increases the memory usage. Defaults to 1024.
Returns:
The MD5 checksum of the file, which is a string.
|
codesearchnet
|
def nack(self, channel_id=None, **kwargs):
path = '/event-service/v1/channels/{}/nack'.format(channel_id)
r = self._httpclient.request(method='POST', url=self.url, path=path, **kwargs)
return r
|
Send a negative read-acknowledgement to the service.
Causes the channel's read point to move to its previous position
prior to the last poll.
Args:
channel_id (str): The channel ID.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``event_nack.py`` example.
|
codesearchnet
|
def deserialize(name, custom_objects=None):
return serialization_lib.deserialize_keras_object(name, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects)
|
Deserializes a serialized loss class/function instance.
Args:
name: Loss configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `Loss` instance or a loss function.
|
github-repos
|
def get_albums_for_artist(self, artist, full_album_art_uri=False):
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
result[:] = reduced
result._metadata.update({
'item_list': reduced,
'search_type': 'albums_for_artist',
'number_returned': len(reduced),
'total_matches': len(reduced)
})
return result
|
Get an artist's albums.
Args:
artist (str): an artist's name.
full_album_art_uri: whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
|
juraj-google-style
|
def regroup_if_changed(group, op_list, name=None):
has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas)
if ((group is None) or (len(group.control_inputs) != len(op_list)) or (has_deltas and op_list.has_changed())):
if has_deltas:
op_list.mark()
if op_list:
return tf.group(*op_list, name=name)
else:
return tf.no_op(name=name)
else:
return group
|
Creates a new group for op_list if it has changed.
Args:
group: The current group. It is returned if op_list is unchanged.
op_list: The list of operations to check.
name: The name to use if a new group is created.
Returns:
Either group or a new group (or if op_list is empty then no_op).
|
codesearchnet
|
def accept_alert(self, text=None, wait=None):
wait = wait or capybara.default_max_wait_time
with self.driver.accept_modal("alert", text=text, wait=wait):
yield
|
Execute the wrapped code, accepting an alert.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found.
|
juraj-google-style
|
def build_authorization_endpoint(self, request, disable_sso=None):
self.load_config()
redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None)
if not redirect_to:
redirect_to = django_settings.LOGIN_REDIRECT_URL
redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode()
query = QueryDict(mutable=True)
query.update({
"response_type": "code",
"client_id": settings.CLIENT_ID,
"resource": settings.RELYING_PARTY_ID,
"redirect_uri": self.redirect_uri(request),
"state": redirect_to,
})
if self._mode == "openid_connect":
query["scope"] = "openid"
if (disable_sso is None and settings.DISABLE_SSO) or disable_sso is True:
query["prompt"] = "login"
return "{0}?{1}".format(self.authorization_endpoint, query.urlencode())
|
This function returns the ADFS authorization URL.
Args:
request(django.http.request.HttpRequest): A django Request object
disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt.
Returns:
str: The redirect URI
|
juraj-google-style
|
def remove_server(self, name):
cmd = self.command_builder('no ntp server', value=name)
return self.configure(cmd)
|
Remove an NTP server entry from the node config
Args:
name (string): The IP address or FQDN of the NTP server.
Returns:
True if the operation succeeds, otherwise False.
|
codesearchnet
|
def add_time_dimension(padded_inputs, seq_lens):
padded_batch_size = tf.shape(padded_inputs)[0]
max_seq_len = (padded_batch_size
new_batch_size = (padded_batch_size
new_shape = ([new_batch_size, max_seq_len] + padded_inputs.get_shape().as_list()[1:])
return tf.reshape(padded_inputs, new_shape)
|
Adds a time dimension to padded inputs.
Arguments:
padded_inputs (Tensor): a padded batch of sequences. That is,
for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where
A, B, C are sequence elements and * denotes padding.
seq_lens (Tensor): the sequence lengths within the input batch,
suitable for passing to tf.nn.dynamic_rnn().
Returns:
Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].
|
codesearchnet
|
def dict_of_lists_add(dictionary, key, value):
list_objs = dictionary.get(key, list())
list_objs.append(value)
dictionary[key] = list_objs
|
Add value to a list in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to list in dictionary
Returns:
None
|
codesearchnet
|
def get_ut_layer(x,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
if hparams.recurrence_type == "basic":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_basic,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "highway":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_highway,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "skip":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_skip,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "dwa":
memory_size = hparams.num_rec_steps + 1
memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x))
memory = fill_memory_slot(memory_empty, x, 0)
ut_initializer = (x, x, memory)
ut_function = functools.partial(
universal_transformer_depthwise_attention,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "gru":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_with_gru_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "lstm":
memory = tf.zeros(common_layers.shape_list(x))
ut_initializer = (x, x, memory)
ut_function = functools.partial(
universal_transformer_with_lstm_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
else:
raise ValueError("Unknown recurrence type: %s" % hparams.recurrence_type)
return ut_function, ut_initializer
|
Provides the function that is used in universal transforemr steps.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
ut_function and the ut_initializer
Raises:
ValueError: Unknown recurrence type
|
juraj-google-style
|
class TFConvNextStage(keras.layers.Layer):
def __init__(self, config: ConvNextConfig, in_channels: int, out_channels: int, kernel_size: int=2, stride: int=2, depth: int=2, drop_path_rates: Optional[List[float]]=None, **kwargs):
super().__init__(**kwargs)
if in_channels != out_channels or stride > 1:
self.downsampling_layer = [keras.layers.LayerNormalization(epsilon=1e-06, name='downsampling_layer.0'), keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='downsampling_layer.1')]
else:
self.downsampling_layer = [tf.identity]
drop_path_rates = drop_path_rates or [0.0] * depth
self.layers = [TFConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j], name=f'layers.{j}') for j in range(depth)]
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def call(self, hidden_states):
for layer in self.downsampling_layer:
hidden_states = layer(hidden_states)
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, 'layers', None) is not None:
for layer in self.layers:
with tf.name_scope(layer.name):
layer.build(None)
if self.in_channels != self.out_channels or self.stride > 1:
with tf.name_scope(self.downsampling_layer[0].name):
self.downsampling_layer[0].build([None, None, None, self.in_channels])
with tf.name_scope(self.downsampling_layer[1].name):
self.downsampling_layer[1].build([None, None, None, self.in_channels])
|
ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
Args:
config (`ConvNextV2Config`):
Model configuration class.
in_channels (`int`):
Number of input channels.
out_channels (`int`):
Number of output channels.
depth (`int`):
Number of residual blocks.
drop_path_rates(`List[float]`):
Stochastic depth rates for each layer.
|
github-repos
|
def _send_data(self, data, start_offset, file_len):
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers['content-range'] = ('bytes %d-%d/%s' %
(start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
status, response_headers, content = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == '*':
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
|
Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
|
juraj-google-style
|
def _stream_data(self, chunk=None):
self._stream_sm_running = True
if chunk is None:
chunk = self._next_streaming_chunk(20)
if chunk is None or len(chunk) == 0:
self._stream_sm_running = False
return
try:
self._send_notification(StreamingChar.value_handle, chunk)
self._defer(self._stream_data)
except bable_interface.BaBLEException as err:
if err.packet.status == 'Rejected':
time.sleep(0.05)
self._defer(self._stream_data, [chunk])
else:
self._audit('ErrorStreamingReport')
self._logger.exception("Error while streaming data")
|
Stream reports to the ble client in 20 byte chunks
Args:
chunk (bytearray): A chunk that should be sent instead of requesting a
new chunk from the pending reports.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.