code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Embedded representation of the inputs. Should be float, not int tokens.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
github-repos
|
def whatIfOrder(self, contract: Contract, order: Order) -> OrderState:
return self._run(self.whatIfOrderAsync(contract, order))
|
Retrieve commission and margin impact without actually
placing the order. The given order will not be modified in any way.
This method is blocking.
Args:
contract: Contract to test.
order: Order to test.
|
codesearchnet
|
def from_dict(cls, d, ignore=()):
filtered = {}
for (k, v) in d.items():
if (k == 'typeid'):
assert (v == cls.typeid), ('Dict has typeid %s but %s has typeid %s' % (v, cls, cls.typeid))
elif (k not in ignore):
filtered[k] = v
try:
inst = cls(**filtered)
except TypeError as e:
raise TypeError(('%s raised error: %s' % (cls.typeid, str(e))))
return inst
|
Create an instance from a serialized version of cls
Args:
d(dict): Endpoints of cls to set
ignore(tuple): Keys to ignore
Returns:
Instance of this class
|
codesearchnet
|
def GetNumberOfRows(self):
if (not self._database_object):
raise IOError('Not opened.')
if (self._number_of_rows is None):
self._number_of_rows = self._database_object.GetNumberOfRows(self._table_name)
return self._number_of_rows
|
Retrieves the number of rows of the table.
Returns:
int: number of rows.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
|
codesearchnet
|
def squeeze_batch_dims(inp, op, inner_rank, name=None):
with ops.name_scope(name, 'squeeze_batch_dims', [inp]):
inp = ops.convert_to_tensor(inp, name='input')
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = array_ops.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tensor_shape.TensorShape):
inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = array_ops.reshape(inp, array_ops.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]
out = array_ops.reshape(out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
|
Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
name: A string.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
|
github-repos
|
def _SetCredentials(self, **kwds):
args = {'api_key': self._API_KEY, 'client': self, 'client_id': self._CLIENT_ID, 'client_secret': self._CLIENT_SECRET, 'package_name': self._PACKAGE, 'scopes': self._SCOPES, 'user_agent': self._USER_AGENT}
args.update(kwds)
from apitools.base.py import credentials_lib
self._credentials = credentials_lib.GetCredentials(**args)
|
Fetch credentials, and set them for this client.
Note that we can't simply return credentials, since creating them
may involve side-effecting self.
Args:
**kwds: Additional keyword arguments are passed on to GetCredentials.
Returns:
None. Sets self._credentials.
|
codesearchnet
|
def setColumn(self, header, values):
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.setColumnStr(header, values, len(values))
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.setColumnDbl(header, values, len(values))
else:
print(values)
raise NotImplementedError
|
Set the values of a column.
Args:
header: The header of the column to be set.
values: The values to set.
|
juraj-google-style
|
def list_local_devices(session_config=None):
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
serialized_config = None
if session_config is not None:
serialized_config = session_config.SerializeToString()
return [_convert(s) for s in _pywrap_device_lib.list_devices(serialized_config)]
|
List the available devices available in the local process.
Args:
session_config: a session config proto or None to use the default config.
Returns:
A list of `DeviceAttribute` protocol buffers.
|
github-repos
|
def edges(self, tail_head_iter):
edge = self._edge_plain
quote = self._quote_edge
lines = ((edge % (quote(t), quote(h))) for (t, h) in tail_head_iter)
self.body.extend(lines)
|
Create a bunch of edges.
Args:
tail_head_iter: Iterable of ``(tail_name, head_name)`` pairs.
|
codesearchnet
|
def write_json(self, fh, pretty=True):
sjson = json.JSONEncoder().encode(self.json())
if pretty:
json.dump(json.loads(sjson), fh, sort_keys=True, indent=4)
else:
json.dump(json.loads(sjson), fh)
return
|
Write composite object to file handle in JSON format.
Args:
fh (file): File handle to write to.
pretty (bool): Sort keys and indent in output.
|
juraj-google-style
|
def encode_dict(values_dict):
return {key: encode_value(value) for (key, value) in six.iteritems(values_dict)}
|
Encode a dictionary into protobuf ``Value``-s.
Args:
values_dict (dict): The dictionary to encode as protobuf fields.
Returns:
Dict[str, ~google.cloud.firestore_v1beta1.types.Value]: A
dictionary of string keys and ``Value`` protobufs as dictionary
values.
|
codesearchnet
|
def metadata(self, path):
if not self.exists(path):
raise BeamIOError('Path does not exist: %s' % path)
return FileMetadata(path, os.path.getsize(path), os.path.getmtime(path))
|
Fetch metadata fields of a file on the FileSystem.
Args:
path: string path of a file.
Returns:
:class:`~apache_beam.io.filesystem.FileMetadata`.
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
|
github-repos
|
def delete_media_service_rg(access_token, subscription_id, rgname, msname):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '/providers/microsoft.media/mediaservices/', msname, '?api-version=', MEDIA_API])
return do_delete(endpoint, access_token)
|
Delete a media service.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
msname (str): Media service name.
Returns:
HTTP response.
|
codesearchnet
|
def _skip_tensor(self, op_id, out_tensor, report_handler):
non_numeric_tensor_types = set([dtypes.variant, dtypes.resource, dtypes.string])
if out_tensor.dtype in non_numeric_tensor_types:
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))
return True
if [consumer for consumer in out_tensor.consumers() if TensorTracer.while_loop_op(consumer)]:
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))
return True
if self._is_user_included_op(out_tensor.op):
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
if tensor_tracer_flags.TT_CHECK_FILTER.value:
logging.info('USER_INCLUDED tensor %s', out_tensor.name)
return False
if self._is_user_excluded_op(out_tensor.op):
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
if tensor_tracer_flags.TT_CHECK_FILTER.value:
logging.info('USER_EXCLUDED tensor %s', out_tensor.name)
return True
if not out_tensor.get_shape().is_fully_defined():
if self._parameters.trace_mode in (tensor_tracer_flags.TRACE_MODE_NAN_INF, tensor_tracer_flags.TRACE_MODE_NORM, tensor_tracer_flags.TRACE_MODE_HISTORY, tensor_tracer_flags.TRACE_MODE_MAX_ABS, tensor_tracer_flags.TRACE_MODE_SUMMARY):
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))
return True
rank = len(out_tensor.shape)
if rank < 1:
if self._parameters.trace_scalar_ops:
if TensorTracer.unsafe_scalar_trace(out_tensor.op):
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))
return True
else:
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))
return True
else:
report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
|
Returns True if we should not trace out_tensor.
Args:
op_id: Topological index of the op producing tensor.
out_tensor: tf.Tensor
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the tensor should not be traced, false otherwise.
|
github-repos
|
def quad_2d(width, height, xpos=0.0, ypos=0.0) -> VAO:
pos = numpy.array([
xpos - width / 2.0, ypos + height / 2.0, 0.0,
xpos - width / 2.0, ypos - height / 2.0, 0.0,
xpos + width / 2.0, ypos - height / 2.0, 0.0,
xpos - width / 2.0, ypos + height / 2.0, 0.0,
xpos + width / 2.0, ypos - height / 2.0, 0.0,
xpos + width / 2.0, ypos + height / 2.0, 0.0,
], dtype=numpy.float32)
normals = numpy.array([
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
], dtype=numpy.float32)
uvs = numpy.array([
0.0, 1.0,
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0,
], dtype=numpy.float32)
vao = VAO("geometry:quad", mode=moderngl.TRIANGLES)
vao.buffer(pos, '3f', ["in_position"])
vao.buffer(normals, '3f', ["in_normal"])
vao.buffer(uvs, '2f', ["in_uv"])
return vao
|
Creates a 2D quad VAO using 2 triangles with normals and texture coordinates.
Args:
width (float): Width of the quad
height (float): Height of the quad
Keyword Args:
xpos (float): Center position x
ypos (float): Center position y
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance.
|
juraj-google-style
|
def get_backdoor(self, name, version=''):
params = {}
params['or'] = 1
params['c-name'] = name
params['c-aliases__in'] = name
r = requests.get('{0}/backdoors/'.format(self.url), params=params, verify=self.verify, proxies=self.proxies)
if (r.status_code == 200):
result_data = json.loads(r.text)
if ('meta' not in result_data):
return None
if ('total_count' not in result_data['meta']):
return None
if (result_data['meta']['total_count'] <= 0):
return None
if ('objects' not in result_data):
return None
for backdoor in result_data['objects']:
if ('version' in backdoor):
if (backdoor['version'] == version):
return backdoor
else:
log.error('Non-200 status code: {}'.format(r.status_code))
return None
|
Searches for the backdoor based on name and version.
Args:
name: The name of the backdoor. This can be an alias.
version: The version.
Returns:
Returns a JSON object contain one or more backdoor results or
None if not found.
|
codesearchnet
|
def save(self, target, format=None, encoding=None, **options):
if encoding is None:
encoding = config.DEFAULT_ENCODING
if format is None:
_, format = helpers.detect_scheme_and_format(target)
writer_class = self.__custom_writers.get(format)
if writer_class is None:
if format not in config.WRITERS:
message = 'Format "%s" is not supported' % format
raise exceptions.FormatError(message)
writer_class = helpers.import_attribute(config.WRITERS[format])
writer_options = helpers.extract_options(options, writer_class.options)
if options:
message = 'Not supported options "%s" for format "%s"'
message = message % (', '.join(options), format)
raise exceptions.TabulatorException(message)
writer = writer_class(**writer_options)
writer.write(self.iter(), target, headers=self.headers, encoding=encoding)
|
Save stream to the local filesystem.
Args:
target (str): Path where to save the stream.
format (str, optional): The format the stream will be saved as. If
None, detects from the ``target`` path. Defaults to None.
encoding (str, optional): Saved file encoding. Defaults to
``config.DEFAULT_ENCODING``.
**options: Extra options passed to the writer.
|
juraj-google-style
|
def _GetInode(self, inode_value):
if isinstance(inode_value, py2to3.INTEGER_TYPES):
return inode_value
if isinstance(inode_value, float):
return int(inode_value)
if (not isinstance(inode_value, py2to3.STRING_TYPES)):
return (- 1)
if (b'-' in inode_value):
(inode_value, _, _) = inode_value.partition(b'-')
try:
return int(inode_value, 10)
except ValueError:
return (- 1)
|
Retrieves the inode from the inode value.
Args:
inode_value (int|str): inode, such as 1 or '27-128-1'.
Returns:
int: inode or -1 if the inode value cannot be converted to an integer.
|
codesearchnet
|
def convert_gguf_tokenizer(architecture, tokenizer_dict) -> Tokenizer:
tokenizer_class_name = architecture
converter = GGUF_TO_FAST_CONVERTERS[tokenizer_class_name](tokenizer_dict)
fast_tokenizer = converter.converted()
return (fast_tokenizer, converter.additional_kwargs)
|
Utilities to convert a slow tokenizer instance in a fast tokenizer instance.
Args:
architecture (`str`): The model architecture derived from gguf file.
transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):
Instance of a slow tokenizer to convert in the backend tokenizer for
[`~tokenization_utils_base.PreTrainedTokenizerFast`].
Return:
A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a
[`~tokenization_utils_base.PreTrainedTokenizerFast`]
|
github-repos
|
def rm(path):
if (path and os.path.exists(path)):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
|
Equivalent to rm -rf.
Make sure ``path`` doesn't exist after this call. If it's a dir,
shutil.rmtree(); if it's a file, os.remove(); if it doesn't exist,
ignore.
Args:
path (str): the path to nuke.
|
codesearchnet
|
def PushSection(self, name, pre_formatters):
if (name == '@'):
value = self.stack[(- 1)].context
else:
value = self.stack[(- 1)].context.get(name)
for (i, (f, args, formatter_type)) in enumerate(pre_formatters):
if (formatter_type == ENHANCED_FUNC):
value = f(value, self, args)
elif (formatter_type == SIMPLE_FUNC):
value = f(value)
else:
assert False, ('Invalid formatter type %r' % formatter_type)
self.stack.append(_Frame(value))
return value
|
Given a section name, push it on the top of the stack.
Returns:
The new section, or None if there is no such section.
|
codesearchnet
|
def dd2dms(dd):
m, s = divmod(dd * 3600, 60)
d, m = divmod(m, 60)
return int(d), int(m), s
|
Decimal degrees to DMS.
Args:
dd (float). Decimal degrees.
Return:
tuple. Degrees, minutes, and seconds.
|
juraj-google-style
|
def _open_repo(args, path_key='<path>'):
path = (pathlib.Path(args[path_key]) if args[path_key] else None)
try:
repo = open_repository(path)
except ValueError as exc:
raise ExitError(ExitCode.DATA_ERR, str(exc))
return repo
|
Open and return the repository containing the specified file.
The file is specified by looking up `path_key` in `args`. This value or
`None` is passed to `open_repository`.
Returns: A `Repository` instance.
Raises:
ExitError: If there is a problem opening the repo.
|
codesearchnet
|
def __init__(self, property_type=TableFeaturePropType.OFPTFPT_MATCH,
oxm_ids=None):
super().__init__(property_type)
self.oxm_ids = ListOfOxmHeader() if oxm_ids is None else oxm_ids
self.update_length()
|
Create an OxmProperty with the optional parameters below.
Args:
type(|TableFeaturePropType_v0x04|):
Property Type value of this instance.
oxm_ids(|ListOfOxmHeader_v0x04|):
List of OxmHeader instances.
|
juraj-google-style
|
async def close_interface(self, client_id, conn_string, interface):
conn_id = self._client_connection(client_id, conn_string)
(await self.adapter.close_interface(conn_id, interface))
self._hook_close_interface(conn_string, interface, client_id)
|
Close a device interface on behalf of a client.
See :meth:`AbstractDeviceAdapter.close_interface`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter.
interface (str): The name of the interface to close.
Raises:
DeviceServerError: There is an issue with your client_id such
as not being connected to the device.
DeviceAdapterError: The adapter had an issue closing the interface.
|
codesearchnet
|
def index_last_dim_with_indices(x, indices):
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
|
Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
|
juraj-google-style
|
def _ord_to_namespace(n, _max_length=None):
if (_max_length is None):
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[(_max_length - 1)]
if (n == 0):
return ''
n -= 1
return (NAMESPACE_CHARACTERS[(n / length)] + _ord_to_namespace((n % length), (_max_length - 1)))
|
Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order.
|
codesearchnet
|
def rouge_l_fscore(predictions, labels, **unused_kwargs):
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
|
ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
|
juraj-google-style
|
def __init__(self, field, **kwargs):
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
self.field = field
self.widget = field.field.widget
self.values = {"class": [], "label": "", "help": "", "errors": ""}
|
Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
|
juraj-google-style
|
def provide(self, cls):
support.verify_class_type(cls, 'cls')
if not self._is_injectable_fn(cls):
provide_loc = locations.get_back_frame_loc()
raise errors.NonExplicitlyBoundClassError(provide_loc, cls)
try:
return self._obj_provider.provide_class(
cls, self._injection_context_factory.new(cls.__init__),
direct_init_pargs=[], direct_init_kwargs={})
except errors.Error as e:
if self._use_short_stack_traces:
raise e
else:
raise
|
Provides an instance of the given class.
Args:
cls: a class (not an instance)
Returns:
an instance of cls
Raises:
Error: an instance of cls is not providable
|
juraj-google-style
|
def _load(cls, prefix, user_agent_config_yaml, user_agent_lookup=None):
if (not user_agent_config_yaml):
user_agent_config_yaml = cls.default_user_agent_config_yaml
logger.info(('No user agent or user agent config file given. Using default user agent config file: %s.' % user_agent_config_yaml))
if (not isfile(user_agent_config_yaml)):
raise UserAgentError("User_agent should be supplied in a YAML config file. It can be your project's name for example.")
logger.info(('Loading user agent config from: %s' % user_agent_config_yaml))
user_agent_config_dict = load_yaml(user_agent_config_yaml)
if user_agent_lookup:
user_agent_config_dict = user_agent_config_dict.get(user_agent_lookup)
if (not user_agent_config_dict):
raise UserAgentError(('No user agent information read from: %s' % user_agent_config_yaml))
ua = user_agent_config_dict.get('user_agent')
return cls._construct(user_agent_config_dict, prefix, ua)
|
Load user agent YAML file
Args:
prefix (str): Text to put at start of user agent
user_agent_config_yaml (str): Path to user agent YAML file
user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.
Returns:
str: user agent
|
codesearchnet
|
def format_argspec_plus(fn, grouped=True):
spec = ((callable(fn) and inspect.getargspec(fn)) or fn)
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = ('%s[0]' % spec[1])
else:
self_arg = None
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
defaulted_vals = (((spec[3] is not None) and spec[0][(0 - len(spec[3])):]) or ())
apply_kw = inspect.formatargspec(spec[0], spec[1], spec[2], defaulted_vals, formatvalue=(lambda x: ('=' + x)))
if grouped:
return dict(args=args, self_arg=self_arg, apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:(- 1)], self_arg=self_arg, apply_pos=apply_pos[1:(- 1)], apply_kw=apply_kw[1:(- 1)])
|
Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
|
codesearchnet
|
def list_channels(self, collection_name, experiment_name):
dont_care = 'image'
chan = ChannelResource(name='', collection_name=collection_name, experiment_name=experiment_name, type=dont_care)
return self._list_resource(chan)
|
List all channels belonging to the named experiment that is part
of the named collection.
Args:
collection_name (string): Name of the parent collection.
experiment_name (string): Name of the parent experiment.
Returns:
(list)
Raises:
requests.HTTPError on failure.
|
codesearchnet
|
def get_plot(self, ylim=None, units="thz"):
u = freq_units(units)
plt = pretty_plot(12, 8)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['frequency'][d][i][j] * u.factor
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
self._maketicks(plt)
plt.axhline(0, linewidth=1, color='k')
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label)
plt.ylabel(ylabel, fontsize=30)
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt
|
Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
|
juraj-google-style
|
def _get_best(values: List[float], losses: List[float], max_loss_div: float=0.9, min_val_div: float=10.0) -> float:
assert (len(values) == len(losses)), 'lengths of values and losses should be equal'
min_ind = np.argmin(losses)
for i in range((min_ind - 1), 0, (- 1)):
if (((losses[i] * max_loss_div) > losses[min_ind]) or ((values[i] * min_val_div) < values[min_ind])):
return values[(i + 1)]
return (values[min_ind] / min_val_div)
|
Find the best value according to given losses
Args:
values: list of considered values
losses: list of obtained loss values corresponding to `values`
max_loss_div: maximal divergence of loss to be considered significant
min_val_div: minimum divergence of loss to be considered significant
Returns:
best value divided by `min_val_div`
|
codesearchnet
|
def correlation_matrix(df):
columns = df.columns.tolist()
corr = pd.DataFrame(
np.corrcoef(df, rowvar=0), columns=columns, index=columns)
return corr
|
Returns a pandas DataFrame with the pair-wise correlations of the columns.
Args:
df: pandas DataFrame with columns to run diagnostics on
|
juraj-google-style
|
def file_crc32(filePath):
crc = 0
with open(filePath, 'rb') as f:
for block in _file_iter(f, _BLOCK_SIZE):
crc = binascii.crc32(block, crc) & 0xFFFFFFFF
return crc
|
计算文件的crc32检验码:
Args:
filePath: 待计算校验码的文件路径
Returns:
文件内容的crc32校验码。
|
juraj-google-style
|
def remove(self, cluster_id):
cluster = self._storage.pop(cluster_id)
cluster.cleanup()
|
remove cluster and data stuff
Args:
cluster_id - cluster identity
|
juraj-google-style
|
def FromTimeString(cls, time_string, dayfirst=False, gmt_as_timezone=True, timezone=pytz.UTC):
if ((not gmt_as_timezone) and time_string.endswith(' GMT')):
time_string = '{0:s}UTC'.format(time_string[:(- 3)])
try:
datetime_object = dateutil.parser.parse(time_string, dayfirst=dayfirst)
except (TypeError, ValueError) as exception:
raise errors.TimestampError('Unable to convert time string: {0:s} in to a datetime object with error: {1!s}'.format(time_string, exception))
if datetime_object.tzinfo:
datetime_object = datetime_object.astimezone(pytz.UTC)
else:
datetime_object = timezone.localize(datetime_object)
posix_time = int(calendar.timegm(datetime_object.utctimetuple()))
timestamp = (posix_time * definitions.MICROSECONDS_PER_SECOND)
return (timestamp + datetime_object.microsecond)
|
Converts a string containing a date and time value into a timestamp.
Args:
time_string: String that contains a date and time value.
dayfirst: An optional boolean argument. If set to true then the
parser will change the precedence in which it parses timestamps
from MM-DD-YYYY to DD-MM-YYYY (and YYYY-MM-DD will be
YYYY-DD-MM, etc).
gmt_as_timezone: Sometimes the dateutil parser will interpret GMT and UTC
the same way, that is not make a distinction. By default
this is set to true, that is GMT can be interpreted
differently than UTC. If that is not the expected result
this attribute can be set to false.
timezone: Optional timezone object (instance of pytz.timezone) that
the data and time value in the string represents. This value
is used when the timezone cannot be determined from the string.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC or 0 on error.
Raises:
TimestampError: if the time string could not be parsed.
|
codesearchnet
|
def wait_ssh(roles, retries=100, interval=30):
utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml')
options = {'enos_action': 'ping'}
for i in range(0, retries):
try:
run_ansible([utils_playbook], roles=roles, extra_vars=options, on_error_continue=False)
break
except EnosUnreachableHostsError as e:
logger.info(('Hosts unreachable: %s ' % e.hosts))
logger.info(('Retrying... %s/%s' % ((i + 1), retries)))
time.sleep(interval)
else:
raise EnosSSHNotReady('Maximum retries reached')
|
Wait for all the machines to be ssh-reachable
Let ansible initiates a communication and retries if needed.
Args:
inventory (string): path to the inventoy file to test
retries (int): Number of time we'll be retrying an SSH connection
interval (int): Interval to wait in seconds between two retries
|
codesearchnet
|
def set_quickchart_resource(self, resource):
if isinstance(resource, int) and not isinstance(resource, bool):
resource = self.get_resources()[resource]
if isinstance(resource, hdx.data.resource.Resource) or isinstance(resource, dict):
res = resource.get('id')
if res is None:
resource = resource['name']
else:
resource = res
elif not isinstance(resource, str):
raise hdx.data.hdxobject.HDXError('Resource id cannot be found in type %s!' % type(resource).__name__)
if is_valid_uuid(resource) is True:
search = 'id'
else:
search = 'name'
changed = False
for dataset_resource in self.resources:
if dataset_resource[search] == resource:
dataset_resource.enable_dataset_preview()
self.preview_resource()
changed = True
else:
dataset_resource.disable_dataset_preview()
return changed
|
Set the resource that will be used for displaying QuickCharts in dataset preview
Args:
resource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position
Returns:
bool: Returns True if resource for QuickCharts in dataset preview set or False if not
|
juraj-google-style
|
def refactor_string(self, data, name):
features = _detect_future_features(data)
if ('print_function' in features):
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug('Refactoring %s', name)
self.refactor_tree(tree, name)
return tree
|
Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
|
codesearchnet
|
def sort_variant_file(infile):
command = [
'sort',
]
command.append('-n')
command.append('-k1')
command.append('-k3')
command = command + [infile, '-o', infile]
logger.info("Start sorting variants...")
logger.info("Sort command: {0}".format(' '.join(command)))
sort_start = datetime.now()
try:
call(command)
except OSError as e:
logger.warning("unix command sort does not seem to exist on your system...")
logger.warning("genmod needs unix sort to provide a sorted output.")
logger.warning("Output VCF will not be sorted since genmod can not find"\
"unix sort")
raise e
logger.info("Sorting done. Time to sort: {0}".format(datetime.now()-sort_start))
return
|
Sort a modified variant file.
Sorting is based on the first column and the POS.
Uses unix sort to sort the variants and overwrites the infile.
Args:
infile : A string that is the path to a file
mode : 'chromosome' or 'rank'
outfile : The path to an outfile where the variants should be printed
Returns:
0 if sorting was performed
1 if variants where not sorted
|
juraj-google-style
|
def selfSignCert(self, cert, pkey):
cert.set_issuer(cert.get_subject())
cert.sign(pkey, self.signing_digest)
|
Self-sign a certificate.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.
Examples:
Sign a given certificate with a given private key:
cdir.selfSignCert(mycert, myotherprivatekey)
Returns:
None
|
codesearchnet
|
def GetCharacterDisplayWidth(char):
if not isinstance(char, str):
return 1
char = unicodedata.normalize('NFC', char)
if unicodedata.combining(char) != 0:
return 0
elif unicodedata.category(char) == 'Cf':
return 0
elif unicodedata.east_asian_width(char) in 'FW':
return 2
else:
return 1
|
Returns the monospaced terminal display width of char.
Assumptions:
- monospaced display
- ambiguous or unknown chars default to width 1
- ASCII control char width is 1 => don't use this for control chars
Args:
char: The character to determine the display width of.
Returns:
The monospaced terminal display width of char: either 0, 1, or 2.
|
github-repos
|
def min(x, axis=None, keepdims=False):
return math_ops.reduce_min(x, axis, keepdims)
|
Minimum value in a tensor.
Args:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
|
github-repos
|
def get_size(fileobj):
old_pos = fileobj.tell()
try:
fileobj.seek(0, 2)
return fileobj.tell()
finally:
fileobj.seek(old_pos, 0)
|
Returns the size of the file.
The position when passed in will be preserved if no error occurs.
Args:
fileobj (fileobj)
Returns:
int: The size of the file
Raises:
IOError
|
juraj-google-style
|
def branches():
out = shell.run('git branch', capture=True, never_pretend=True).stdout.strip()
return [x.strip('* \t\n') for x in out.splitlines()]
|
Return a list of branches in the current repo.
Returns:
list[str]: A list of branches in the current repo.
|
codesearchnet
|
def create_datastore_for_topline(self, delete_first=0, path=None):
data = load_yaml(script_dir_plus_file(join('..', 'hdx_datasource_topline.yml'), Resource))
self.create_datastore_from_dict_schema(data, delete_first, path=path)
|
For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in
YAML definition for a topline. If path is not supplied, the file is first downloaded from HDX.
Args:
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
|
codesearchnet
|
def _ParseCString(self, page_data, string_offset):
cstring_map = self._GetDataTypeMap('cstring')
try:
value_string = self._ReadStructureFromByteStream(
page_data[string_offset:], string_offset, cstring_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map string data at offset: 0x{0:08x} with error: '
'{1!s}').format(string_offset, exception))
return value_string.rstrip('\x00')
|
Parses a C string from the page data.
Args:
page_data (bytes): page data.
string_offset (int): offset of the string relative to the start
of the page.
Returns:
str: string.
Raises:
ParseError: when the string cannot be parsed.
|
juraj-google-style
|
def fetch(self, customer_id, token_id, data={}, **kwargs):
url = '{}/{}/tokens/{}'.format(self.base_url, customer_id, token_id)
return self.get_url(url, data, **kwargs)
|
Fetch Token for given Id and given customer Id
Args:
customer_id : Customer Id for which tokens have to be fetched
token_id : Id for which TOken object has to be fetched
Returns:
Token dict for given token Id
|
codesearchnet
|
def remove_alias(type_):
if isinstance(type_, cpptypes.type_t):
type_ref = type_
elif isinstance(type_, typedef.typedef_t):
type_ref = type_.decl_type
else:
return type_
if type_ref.cache.remove_alias:
return type_ref.cache.remove_alias
no_alias = __remove_alias(type_ref.clone())
type_ref.cache.remove_alias = no_alias
return no_alias
|
Returns `type_t` without typedef
Args:
type_ (type_t | declaration_t): type or declaration
Returns:
type_t: the type associated to the inputted declaration
|
juraj-google-style
|
def body(self, features):
features['targets'] = features['inputs']
is_training = (self.hparams.mode == tf.estimator.ModeKeys.TRAIN)
inputs = tf.to_float(features['targets_raw'])
z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=(- 1), maxval=1, name='z')
out_shape = common_layers.shape_list(inputs)[1:4]
g = self.generator(z, is_training, out_shape)
losses = self.losses(inputs, g)
summary_g_image = tf.reshape(g[(0, :)], ([1] + common_layers.shape_list(inputs)[1:]))
tf.summary.image('generated', summary_g_image, max_outputs=1)
if is_training:
return (tf.zeros_like(inputs), losses)
return (tf.reshape(g, tf.shape(inputs)), losses)
|
Body of the model.
Args:
features: a dictionary with the tensors.
Returns:
A pair (predictions, losses) where predictions is the generated image
and losses is a dictionary of losses (that get added for the final loss).
|
codesearchnet
|
def dump(self, filename, encoding="utf8"):
with open(filename, mode='w', encoding=encoding) as text_file:
text_file.write(self.single_string())
|
Dumps the ascii art in the file.
Args:
filename (str): File to dump the ascii art.
encoding (str): Optional. Default "utf-8".
|
juraj-google-style
|
def recipe_salesforce_to_bigquery(config, domain, client, secret, username, password, query, auth_read, dataset, table, schema):
salesforce(config, {'auth': auth_read, 'domain': domain, 'client': client, 'secret': secret, 'username': username, 'password': password, 'query': query, 'out': {'bigquery': {'dataset': dataset, 'table': table, 'schema': schema}}})
|
Move query results into a BigQuery table.
Args:
domain (string) - Retrieve from a Salesforce Domain.
client (string) - Retrieve from a Salesforce App.
secret (string) - Retrieve from a Salesforce App.
username (email) - Your Salesforce user email.
password (password) - Your Salesforce login password.
query (string) - The query to run in Salesforce.
auth_read (authentication) - Credentials used for reading data.
dataset (string) - Existing BigQuery dataset.
table (string) - Table to create from this report.
schema (json) - Schema provided in JSON list format or empty list.
|
github-repos
|
def get_messages(self, name):
return self._loop.run_coroutine(self._client.get_messages(name))
|
Get stored messages for a service.
Args:
name (string): The name of the service to get messages from.
Returns:
list(ServiceMessage): A list of the messages stored for this service
|
juraj-google-style
|
def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
for t, func in enumerate(self.graph_info.funcs):
if func.name in self.activation_functions:
activation_func = func
o = self._fixed_point_activation_conversion(
activation_func)
continue
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable
|
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
|
juraj-google-style
|
def get_box_folder_location():
box_prefs_path = 'Library/Application Support/Box/Box Sync/sync_root_folder.txt'
box_home = None
box_prefs = os.path.join(os.environ['HOME'], box_prefs_path)
try:
with open(box_prefs, 'r') as sync_path:
data = sync_path.read()
box_home = data
except IOError:
error('Unable to find your Box prefs =(')
return box_home
|
Try to locate the Box folder.
Returns:
(str) Full path to the current Box folder
|
codesearchnet
|
def GetEventData(self, data_type):
event_data = events.EventData(data_type=data_type)
for (property_name, property_value) in iter(self._properties.items()):
if isinstance(property_value, py2to3.BYTES_TYPE):
property_value = repr(property_value)
setattr(event_data, property_name, property_value)
return event_data
|
Retrieves the properties as event data.
Args:
data_type (str): event data type.
Returns:
EventData: event data.
|
codesearchnet
|
def read_user_data(self, user_data_path):
raw_user_data = read_value_from_path(user_data_path)
variables = self.get_variables()
return parse_user_data(variables, raw_user_data, self.name)
|
Reads and parses a user_data file.
Args:
user_data_path (str):
path to the userdata file
Returns:
str: the parsed user data file
|
codesearchnet
|
def flatten(schedule: ScheduleComponent, name: str=None) -> Schedule:
if (name is None):
name = schedule.name
return Schedule(*schedule.instructions, name=name)
|
Create a flattened schedule.
Args:
schedule: Schedules to flatten
name: Name of the new schedule. Defaults to first element of `schedules`
|
codesearchnet
|
def sample(self, sample_shape=(), seed=None, name='sample'):
return self._call_sample_n(sample_shape, seed, name)
|
Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
|
github-repos
|
def parse_log(file_path):
if (not os.path.isfile(file_path)):
return elements.error('Output Log', ('Could not open file: ' + file_path.split(os.sep)[(- 1)]))
headers = ['Converged Iterations', 'Avg. Iterations to Converge', 'Processor Count', 'Dycore Type']
with open(file_path, 'r') as f:
dycore_types = {'0': 'Glide', '1': 'Glam', '2': 'Glissade', '3': 'Albany_felix', '4': 'BISICLES'}
curr_step = 0
proc_count = 0
iter_number = 0
converged_iters = []
iters_to_converge = []
for line in f:
split = line.split()
if ('CISM dycore type' in line):
if (line.split()[(- 1)] == '='):
dycore_type = dycore_types[next(f).strip()]
else:
dycore_type = dycore_types[line.split()[(- 1)]]
elif ('total procs' in line):
proc_count += int(line.split()[(- 1)])
elif ('Nonlinear Solver Step' in line):
curr_step = int(line.split()[4])
elif ('Compute ice velocities, time = ' in line):
converged_iters.append(curr_step)
curr_step = float(line.split()[(- 1)])
elif ('"SOLVE_STATUS_CONVERGED"' in line):
split = line.split()
iters_to_converge.append(int(split[(split.index('"SOLVE_STATUS_CONVERGED"') + 2)]))
elif ('Compute dH/dt' in line):
iters_to_converge.append(int(iter_number))
elif ((len(split) > 0) and split[0].isdigit()):
iter_number = split[0]
if (iters_to_converge == []):
iters_to_converge.append(int(iter_number))
data = {'Dycore Type': dycore_type, 'Processor Count': proc_count, 'Converged Iterations': len(converged_iters), 'Avg. Iterations to Converge': np.mean(iters_to_converge)}
return elements.table('Output Log', headers, data)
|
Parse a CISM output log and extract some information.
Args:
file_path: absolute path to the log file
Return:
A dictionary created by the elements object corresponding to
the results of the bit for bit testing
|
codesearchnet
|
def create_handlers_map(prefix='.*'):
return [
(prefix + '/output', _BarrierHandler),
(prefix + '/run', _PipelineHandler),
(prefix + '/finalized', _PipelineHandler),
(prefix + '/cleanup', _CleanupHandler),
(prefix + '/abort', _PipelineHandler),
(prefix + '/fanout', _FanoutHandler),
(prefix + '/fanout_abort', _FanoutAbortHandler),
(prefix + '/callback', _CallbackHandler),
(prefix + '/rpc/tree', status_ui._TreeStatusHandler),
(prefix + '/rpc/class_paths', status_ui._ClassPathListHandler),
(prefix + '/rpc/list', status_ui._RootListHandler),
(prefix + '(/.+)', status_ui._StatusUiHandler),
]
|
Create new handlers map.
Args:
prefix: url prefix to use.
Returns:
list of (regexp, handler) pairs for WSGIApplication constructor.
|
juraj-google-style
|
def with_doc(fn_with_doc_to_copy):
def decorator(wrapper_init):
@wrapt.decorator
def wrapping_fn(unused_wrapped, instance, args, kwargs):
wrapper_init(instance, *args, **kwargs)
return wrapping_fn(fn_with_doc_to_copy)
return decorator
|
Returns a decorator to copy documentation from the given function.
Docstring is copied, including *args and **kwargs documentation.
Args:
fn_with_doc_to_copy: Function whose docstring, including *args and
**kwargs documentation, is to be copied.
Returns:
Decorated version of `wrapper_init` with documentation copied from
`fn_with_doc_to_copy`.
|
codesearchnet
|
def update_from_string(self, update_str: str):
d = dict((x.split('=') for x in update_str.split(',')))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ['true', '1', 'y', 'yes']:
v = True
elif v.lower() in ['false', '0', 'n', 'no']:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise TypeError(f'You can only update int, float, bool or string values in the config, got {v} for key {k}')
setattr(self, k, v)
|
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
|
github-repos
|
def createEditor(self, parent, option, index):
combo = QtGui.QComboBox(parent)
combo.addItems(SupportedDtypes.names())
combo.currentIndexChanged.connect(self.currentIndexChanged)
return combo
|
Creates an Editor Widget for the given index.
Enables the user to manipulate the displayed data in place. An editor
is created, which performs the change.
The widget used will be a `QComboBox` with all available datatypes in the
`pandas` project.
Args:
parent (QtCore.QWidget): Defines the parent for the created editor.
option (QtGui.QStyleOptionViewItem): contains all the information
that QStyle functions need to draw the items.
index (QtCore.QModelIndex): The item/index which shall be edited.
Returns:
QtGui.QWidget: he widget used to edit the item specified by index
for editing.
|
codesearchnet
|
def _example_short_number_for_cost(region_code, cost):
metadata = PhoneMetadata.short_metadata_for_region(region_code)
if metadata is None:
return U_EMPTY_STRING
desc = None
if cost == ShortNumberCost.TOLL_FREE:
desc = metadata.toll_free
elif cost == ShortNumberCost.STANDARD_RATE:
desc = metadata.standard_rate
elif cost == ShortNumberCost.PREMIUM_RATE:
desc = metadata.premium_rate
else:
pass
if desc is not None and desc.example_number is not None:
return desc.example_number
return U_EMPTY_STRING
|
Gets a valid short number for the specified cost category.
Arguments:
region_code -- the region for which an example short number is needed.
cost -- the cost category of number that is needed.
Returns a valid short number for the specified region and cost
category. Returns an empty string when the metadata does not contain such
information, or the cost is UNKNOWN_COST.
|
juraj-google-style
|
def tas53(msg):
d = hex2bin(data(msg))
if d[33] == '0':
return None
tas = bin2int(d[34:46]) * 0.5
return round(tas, 1)
|
Aircraft true airspeed, BDS 5,3 message
Args:
msg (String): 28 bytes hexadecimal message
Returns:
float: true airspeed in knots
|
juraj-google-style
|
class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel):
def __init__(self, config: ConditionalDetrConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)])
self.post_init()
def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`ConditionalDetrEncoderLayer`].
The encoder updates the flattened feature map through multiple self-attention layers.
Small tweak for ConditionalDETR:
- object_queries are added to the forward pass.
Args:
config: ConditionalDetrConfig
|
github-repos
|
def split(x, axis=0):
from .function_bases import split as split_base
return split_base(x, axis, x.shape[axis])
|
Split arrays at the specified axis.
It returns a number corresponding the size of the given
axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis
Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s
See Also:
:func:`nnabla.function_bases.split`.
|
codesearchnet
|
def _GetUncompressedStreamSize(self):
self._file_object.seek(0, os.SEEK_SET)
self._decompressor = self._GetDecompressor()
self._uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self._file_object.get_size()
uncompressed_stream_size = 0
while (compressed_data_offset < compressed_data_size):
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
if (read_count == 0):
break
compressed_data_offset += read_count
uncompressed_stream_size += self._uncompressed_data_size
return uncompressed_stream_size
|
Retrieves the uncompressed stream size.
Returns:
int: uncompressed stream size.
|
codesearchnet
|
def calculate_expiration(self, token):
if (not token):
return None
now = datetime.utcnow()
time_to_live = self.config['expiration']
if ('exp' not in token):
return (now + timedelta(seconds=time_to_live))
elif self.config['refresh']:
exp = datetime.utcfromtimestamp(token['exp'])
if ((exp - now) < timedelta(seconds=(0.5 * time_to_live))):
return (now + timedelta(seconds=time_to_live))
return None
|
Calculate token expiration
return expiration if the token need to set expiration or refresh,
otherwise return None.
Args:
token (dict): a decoded token
|
codesearchnet
|
def get_month_list(to_date, from_date):
num_months = get_months_apart(to_date, from_date)
month_offset = from_date.month
month_list = []
for month in range((month_offset - 1), (month_offset + num_months)):
year = (from_date.year + (month / 12))
real_month = ((month % 12) + 1)
month_list.append((year, real_month))
return month_list
|
Generate a list containing year+month between two dates.
Returns:
[(2013, 11), (2013, 12), (2014, 1)]
|
codesearchnet
|
def __init__(self, num_evals, steps_per_run=1):
self._num_evals = num_evals
self._evals_completed = None
self._steps_per_run_initial_value = steps_per_run
|
Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
steps_per_run: Number of steps executed per run call.
|
github-repos
|
def generate(self, output_path=None, in_memory=False):
result = (dict() if in_memory else 0)
logger.info('Generating Statik build...')
try:
if ((output_path is None) and (not in_memory)):
raise InternalError('If project is not to be generated in-memory, an output path must be specified')
self.error_context.update(filename=self.config_file_path)
self.config = (self.config or StatikConfig(self.config_file_path))
if (self.config.encoding is not None):
logger.debug('Using encoding: %s', self.config.encoding)
else:
logger.debug('Using encoding: %s', self.config.encoding)
self.error_context.clear()
self.models = self.load_models()
self.template_engine = StatikTemplateEngine(self)
if (self.config.external_database is not None):
self.config.external_database.write_files(output_path, self.models)
self.views = self.load_views()
if (not self.views):
raise NoViewsError()
self.db = self.load_db_data(self.models)
self.project_context = self.load_project_context()
in_memory_result = self.process_views()
if in_memory:
result = in_memory_result
else:
file_count = self.dump_in_memory_result(in_memory_result, output_path)
logger.info('Wrote %d output file(s) to folder: %s', file_count, output_path)
self.copy_assets(output_path)
result = file_count
logger.info('Success!')
except StatikError as exc:
logger.debug(traceback.format_exc())
logger.error(exc.render())
raise exc
except Exception as exc:
logger.debug(traceback.format_exc())
_exc = StatikError(message=('Failed to build project. Run Statik in verbose mode (-v) to see ' + 'additional traceback information about this error.'), orig_exc=exc, context=self.error_context)
logger.error(_exc.render())
raise _exc
finally:
try:
if (self.db is not None):
self.db.shutdown()
except Exception as e:
logger.exception('Unable to clean up properly: %s', e)
return result
|
Executes the Statik project generator.
Args:
output_path: The path to which to write output files.
in_memory: Whether or not to generate the results in memory. If True, this will
generate the output result as a dictionary. If False, this will write the output
to files in the output_path.
Returns:
If in_memory is True, this returns a dictionary containing the actual generated static
content. If in_memory is False, this returns an integer indicating the number of files
generated in the output path.
|
codesearchnet
|
def period_max_neighborhood_probability(self, threshold, radius, sigmas=None):
if (sigmas is None):
sigmas = [0]
weights = disk(radius)
neighborhood_prob = np.zeros(self.data.shape[2:], dtype=np.float32)
thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)
for m in range(self.data.shape[0]):
thresh_data[(self.data[m].max(axis=0) >= threshold)] = 1
maximized = fftconvolve(thresh_data, weights, mode='same')
maximized[(maximized > 1)] = 1
neighborhood_prob += fftconvolve(maximized, weights, mode='same')
neighborhood_prob[(neighborhood_prob < 1)] = 0
neighborhood_prob /= (self.data.shape[0] * float(weights.sum()))
consensus_probs = []
for sigma in sigmas:
if (sigma > 0):
filtered_prob = gaussian_filter(neighborhood_prob, sigma=sigma)
else:
filtered_prob = neighborhood_prob
ec = EnsembleConsensus(filtered_prob, 'neighbor_prob_{0:02d}-hour_r_{1:d}_s_{2:d}'.format(self.data.shape[1], radius, sigma), self.ensemble_name, self.run_date, (self.variable + '_{0:0.2f}'.format(float(threshold))), self.start_date, self.end_date, '')
consensus_probs.append(ec)
return consensus_probs
|
Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded.
Args:
threshold (float): splitting threshold for probability calculatations
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects
|
codesearchnet
|
def calculate(self, token_list_x, token_list_y):
x, y = self.unique(token_list_x, token_list_y)
try:
result = 2 * len(x & y) / float(sum(map(len, (x, y))))
except ZeroDivisionError:
result = 0.0
return result
|
Calculate similarity with the Dice coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
|
juraj-google-style
|
def __init__(self, *args, **kwargs):
super(IssueTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.IssueTransaction
|
Create an instance.
Args:
*args:
**kwargs:
|
juraj-google-style
|
def get_tag(filepath: PurePath) -> Optional[Tag]:
with open(filepath, encoding='utf-8') as parsed_file:
lines = parsed_file.readlines()
line_start: Optional[int] = None
line_finish: Optional[int] = None
tag_prefix: Optional[str] = ''
for idx, line in enumerate(lines):
if line_start is None and line.endswith(Config.BEAM_PLAYGROUND_TITLE):
line_start = idx
prefix_len = len(line) - len(Config.BEAM_PLAYGROUND_TITLE)
tag_prefix = line[:prefix_len]
elif line_start and (not line.startswith(tag_prefix)):
line_finish = idx
break
if not line_start or not line_finish:
return None
embdedded_yaml_content = ''.join((line[len(tag_prefix):] for line in lines[line_start:line_finish]))
yml = yaml.load(embdedded_yaml_content, Loader=yaml.SafeLoader)
try:
return Tag(filepath=str(filepath), line_start=line_start, line_finish=line_finish, **yml[Config.BEAM_PLAYGROUND])
except pydantic.ValidationError as err:
if len(err.errors()) == 1 and err.errors()[0]['msg'] == 'multifile is True but no files defined':
logging.warning('incomplete multifile example ignored %s', filepath)
return None
raise
|
Parse file by filepath and find beam tag
Args:
filepath: path of the file
Returns:
If file contains tag, returns Tag object
If file doesn't contain tag, returns None
|
github-repos
|
def single_gate_matrix(gate, params=None):
(theta, phi, lam) = map(float, single_gate_params(gate, params))
return np.array([[np.cos(theta / 2),
-np.exp(1j * lam) * np.sin(theta / 2)],
[np.exp(1j * phi) * np.sin(theta / 2),
np.exp(1j * phi + 1j * lam) * np.cos(theta / 2)]])
|
Get the matrix for a single qubit.
Args:
gate(str): the single qubit gate name
params(list): the operation parameters op['params']
Returns:
array: A numpy array representing the matrix
|
juraj-google-style
|
def on_deleted(self, event):
if not self._event_error:
self.logger.info(u"Change detected from deletion of: %s",
event.src_path)
self.compile_dependencies(event.src_path, include_self=False)
|
Called when a file or directory is deleted.
Todo:
May be bugged with inspector and sass compiler since the does not
exists anymore.
Args:
event: Watchdog event, ``watchdog.events.DirDeletedEvent`` or
``watchdog.events.FileDeletedEvent``.
|
juraj-google-style
|
def mark_point(img, x, y):
overlay = img.copy()
output = img.copy()
alpha = 0.5
radius = max(5, min(img.shape[:2])
center = int(x), int(y)
color = (0, 0, 255)
cv2.circle(overlay, center, radius, color, -1)
cv2.addWeighted(overlay, alpha, output, 1-alpha, 0, output)
return output
|
Mark a point
Args:
- img(numpy): the source image
- x, y(int): position
|
juraj-google-style
|
def decrypt(self, message):
message = json.loads(message)
unencrypted_msg = []
for line in message:
enc_line = binascii.a2b_base64(line)
unencrypted_line = rsa.decrypt(enc_line, self.private_key)
unencrypted_msg.append(unencrypted_line)
unencrypted_msg = ''.join(unencrypted_msg)
return unencrypted_msg
|
Decrypts a string using our own private key object.
Args:
message (string): The string of the message to decrypt.
Returns:
The unencrypted string.
|
codesearchnet
|
def moses_pipeline(self, text: str) -> List[str]:
text = self.moses_punct_norm(text)
text = self.moses_tokenize(text)
text = tokenize_numbers(text)
return text
|
Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with
*aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large
comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000
people are 1 @.@ 80m tall"
Args:
text: Text to be tokenize
Returns:
A list of tokenized string
Example:
```python
>>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103")
>>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall")
['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall']
```
|
github-repos
|
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
|
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of ids.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Set to True if the token list is already formatted with special tokens for the model
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
github-repos
|
def action_range_type(self) -> Sequence[str]:
fluents = self.domain.action_fluents
ordering = self.domain.action_fluent_ordering
return self._fluent_range_type(fluents, ordering)
|
The range type of each action fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
|
codesearchnet
|
def enable(self, information, id_or_uri, timeout=-1):
uri = self._client.build_uri(id_or_uri)
return self._client.update(information, uri, timeout=timeout)
|
Enables or disables a range.
Args:
information (dict): Information to update.
id_or_uri: ID or URI of range.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Updated resource.
|
juraj-google-style
|
def read_from_hdx(identifier, configuration=None):
organization = Organization(configuration=configuration)
result = organization._load_from_hdx('organization', identifier)
if result:
return organization
return None
|
Reads the organization given by identifier from HDX and returns Organization object
Args:
identifier (str): Identifier of organization
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Organization]: Organization object if successful read, None if not
|
codesearchnet
|
def get_default_configfile_path():
base = homebase.user_config_dir(app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False, use_virtualenv=False, create=False)
path = os.path.join(base, CONF_FILENAME)
return path
|
Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf'
|
codesearchnet
|
def get_config_value(self, overrides, skip_environment=False):
(label, override, key) = self._search_overrides(overrides, skip_environment)
if ((override is None) and (self.default is None) and self.required):
raise YapconfItemNotFound('Could not find config value for {0}'.format(self.fq_name), self)
if (override is None):
self.logger.debug('Config value not found for {0}, falling back to default.'.format(self.name))
value = self.default
else:
value = override[key]
if (value is None):
return value
converted_value = self.convert_config_value(value, label)
self._validate_value(converted_value)
return converted_value
|
Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised.
|
codesearchnet
|
def _parse_property(cls, name, value):
prop = cls._props.get(name)
return_value = value
if (not prop):
logger.debug(('"%s" with value "%s" is not a valid property for "%s".' % (name, value, cls)))
return_value = None
elif isinstance(prop, properties.Instance):
return_value = prop.instance_class.from_api(**value)
elif isinstance(prop, properties.List):
return_value = cls._parse_property_list(prop, value)
elif isinstance(prop, properties.Color):
return_value = cls._parse_property_color(value)
return return_value
|
Parse a property received from the API into an internal object.
Args:
name (str): Name of the property on the object.
value (mixed): The unparsed API value.
Raises:
HelpScoutValidationException: In the event that the property name
is not found.
Returns:
mixed: A value compatible with the internal models.
|
codesearchnet
|
def _parse_batch_lastlog(last_log):
regexp = re.compile('(-?[0-9]\\d*):\\W+(.*)')
wrong_commands = list()
for line in last_log:
result = regexp.match(line)
if (result is not None):
status_code = result.group(1)
command = result.group(2)
if (int(status_code) < 0):
wrong_commands.append((status_code, command))
return wrong_commands
|
This static method will help reading the result of the commit, command by command.
Args:
last_log(list): A list containing, line by line, the result of committing the changes.
Returns:
A list of tuples that went wrong. The tuple will contain (*status_code*, *command*)
|
codesearchnet
|
def create_prefetch(self, addresses):
with self._lock:
for add in addresses:
self._state[add] = _ContextFuture(address=add,
wait_for_tree=True)
|
Create futures needed before starting the process of reading the
address's value from the merkle tree.
Args:
addresses (list of str): addresses in the txn's inputs that
aren't in any base context (or any in the chain).
|
juraj-google-style
|
def get_auth_token(self, user_payload):
now = datetime.utcnow()
payload = {
'user': user_payload
}
if 'iat' in self.verify_claims:
payload['iat'] = now
if 'nbf' in self.verify_claims:
payload['nbf'] = now + self.leeway
if 'exp' in self.verify_claims:
payload['exp'] = now + self.expiration_delta
if self.audience is not None:
payload['aud'] = self.audience
if self.issuer is not None:
payload['iss'] = self.issuer
return jwt.encode(
payload,
self.secret_key,
algorithm=self.algorithm,
json_encoder=ExtendedJSONEncoder).decode('utf-8')
|
Create a JWT authentication token from ``user_payload``
Args:
user_payload(dict, required): A `dict` containing required information
to create authentication token
|
juraj-google-style
|
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = BytearrayStream()
if self._cryptographic_parameters:
self._cryptographic_parameters.write(
local_stream,
kmip_version=kmip_version
)
if self._initialization_vector:
self._initialization_vector.write(
local_stream,
kmip_version=kmip_version
)
if self._derivation_data:
self._derivation_data.write(
local_stream,
kmip_version=kmip_version
)
if self._salt:
self._salt.write(
local_stream,
kmip_version=kmip_version
)
if self._iteration_count:
self._iteration_count.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(DerivationParameters, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer)
|
Write the data encoding the DerivationParameters struct to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
juraj-google-style
|
def get_bond_lengths(self, indices):
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[(indices.index, coords)].values
b_pos = self.loc[(indices.loc[(:, 'b')], coords)].values
else:
indices = np.array(indices)
if (len(indices.shape) == 1):
indices = indices[(None, :)]
i_pos = self.loc[(indices[(:, 0)], coords)].values
b_pos = self.loc[(indices[(:, 1)], coords)].values
return np.linalg.norm((i_pos - b_pos), axis=1)
|
Return the distances between given atoms.
Calculates the distance between the atoms with
indices ``i`` and ``b``.
The indices can be given in three ways:
* As simple list ``[i, b]``
* As list of lists: ``[[i1, b1], [i2, b2]...]``
* As :class:`pd.DataFrame` where ``i`` is taken from the index and
``b`` from the respective column ``'b'``.
Args:
indices (list):
Returns:
:class:`numpy.ndarray`: Vector of angles in degrees.
|
codesearchnet
|
def matrix_worker(data):
matrix = data['matrix']
Logger.get_logger((__name__ + '.worker')).info("Processing pipeline for matrix entry '%s'", matrix['name'])
env = matrix['env'].copy()
env.update({'PIPELINE_MATRIX': matrix['name']})
pipeline = Pipeline(model=data['model'], env=env, options=data['options'])
pipeline.hooks = data['hooks']
return pipeline.process(data['pipeline'])
|
Run pipelines in parallel.
Args:
data(dict): parameters for the pipeline (model, options, ...).
Returns:
dict: with two fields: success True/False and captured output (list of str).
|
codesearchnet
|
def default_value(self):
if (callable(self.default) and self.call_default):
return self.default()
return self.default
|
Property to return the default value.
If the default value is callable and call_default is True, return
the result of default(). Else return default.
Returns:
object: the default value.
|
codesearchnet
|
class IncStdevTracker(WindowedTracker, StdevTracker):
def __init__(self, window_mode, **kwargs):
super().__init__(window_mode, **kwargs)
self._mean = 0
self._m2 = 0
def push(self, x):
if not math.isnan(x):
self._n += 1
delta1 = x - self._mean
else:
delta1 = 0
if self._window_mode == WindowMode.SLIDING:
if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):
self._n -= 1
delta2 = self._mean - old_x
else:
delta2 = 0
super().push(x)
else:
delta2 = 0
if self._n > 0:
self._mean += (delta1 + delta2) / self._n
if delta1 != 0:
self._m2 += delta1 * (x - self._mean)
if delta2 != 0:
self._m2 += delta2 * (old_x - self._mean)
else:
self._mean = 0
self._m2 = 0
def get(self):
if self._n < 2:
return float('nan')
dof = self._n - 1
return math.sqrt(self._m2 / dof)
|
Abstract base class for incremental standard deviation trackers.
This class implements an online algorithm for calculating standard deviation,
updating the standard deviation incrementally as new data points arrive.
Args:
window_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`
or `SLIDING`.
**kwargs: Keyword arguments passed to the parent class constructor.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.