code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, model_file, input_arrays=None, input_shapes=None, output_arrays=None, custom_objects=None):
super(TFLiteKerasModelConverter, self).__init__(experimental_debug_info_func=None)
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError('`input_arrays` and `output_arrays` are unsupported with Eager mode. If your model requires any of these parameters, please use disable_eager_execution().')
keras_model = keras_deps.get_load_model_function()(model_file, custom_objects)
function = _trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(concrete_func, lower_control_flow=False)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
self._keras_model = keras_model
self._graph_def = frozen_func.graph.as_graph_def()
self._input_tensors = frozen_func.inputs
self._output_tensors = frozen_func.outputs
self._debug_info_func = _build_debug_info_func(frozen_func.graph)
return
keras_deps.get_clear_session_function()()
keras_model = keras_deps.get_load_model_function()(model_file, custom_objects)
sess = keras_deps.get_get_session_function()()
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
self._keras_model = keras_model
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess.graph)
|
Constructor for TFLiteConverter.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Raises:
ValueError: Invalid arguments.
|
github-repos
|
def set_storage(self, storage):
if isinstance(storage, BaseStorage):
self.storage = storage
elif isinstance(storage, dict):
if 'backend' not in storage and 'root_dir' in storage:
storage['backend'] = 'FileSystem'
try:
backend_cls = getattr(storage_package, storage['backend'])
except AttributeError:
try:
backend_cls = import_module(storage['backend'])
except ImportError:
self.logger.error('cannot find backend module %s',
storage['backend'])
sys.exit()
kwargs = storage.copy()
del kwargs['backend']
self.storage = backend_cls(**kwargs)
else:
raise TypeError('"storage" must be a storage object or dict')
|
Set storage backend for downloader
For full list of storage backend supported, please see :mod:`storage`.
Args:
storage (dict or BaseStorage): storage backend configuration or instance
|
juraj-google-style
|
def _projected_entity_to_message(ent, message_type):
msg = message_type()
analyzed = _analyze_indexed_fields(ent._projection)
for name, sublist in analyzed.iteritems():
prop = ent._properties[name]
val = prop._get_value(ent)
assert isinstance(prop, model.StructuredProperty) == bool(sublist)
if sublist:
field = message_type.field_by_name(name)
assert isinstance(field, messages.MessageField)
assert prop._repeated == field.repeated
if prop._repeated:
assert isinstance(val, list)
val = [_projected_entity_to_message(v, field.type) for v in val]
else:
assert isinstance(val, prop._modelclass)
val = _projected_entity_to_message(val, field.type)
setattr(msg, name, val)
return msg
|
Recursive helper for _from_base_type() to convert an entity to a message.
Args:
ent: A Model instance.
message_type: A Message subclass.
Returns:
An instance of message_type.
|
juraj-google-style
|
def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, image_hidden_states: Optional[tf.Tensor]=None, image_attention_mask: Optional[tf.Tensor]=None, cross_attention_gate: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, past_key_value: Optional[Tuple[tf.Tensor]]=None) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]:
if image_hidden_states is None:
raise ValueError('`image_hidden_states` is required for Idefics cross attention module which are visual features to be conditioned on.')
if cross_attention_gate is None:
raise ValueError('`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images.')
if past_key_value is not None:
raise NotImplementedError('Past key value states are not implemented for Idefics cross attention module.')
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights, present_key_value = self.cross_attn(hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions)
hidden_states = tf.nn.dropout(hidden_states, rate=self.config)
mask = tf.cast(cross_attention_gate == 0, dtype=hidden_states.dtype)
mask = tf.expand_dims(mask, -1)
hidden_states = tf.where(tf.broadcast_to(mask, tf.shape(hidden_states)) == 1, tf.zeros_like(hidden_states), hidden_states)
hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = tf.nn.dropout(hidden_states, rate=self.config)
hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
|
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`tf.Tensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states
no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored
|
github-repos
|
def assertNotAllClose(self, a, b, rtol=1e-06, atol=1e-06, msg=None):
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ''
raise AssertionError('The two values are close at all elements. %s' % msg)
|
Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
|
github-repos
|
def iter(self, keyed=False, extended=False):
if self.closed:
message = 'Stream is closed. Please call "stream.open()" first.'
raise exceptions.TabulatorException(message)
iterator = chain(self.__sample_extended_rows, self.__parser.extended_rows)
iterator = self.__apply_processors(iterator)
for (row_number, headers, row) in iterator:
if (row_number > self.__row_number):
self.__row_number = row_number
if extended:
(yield (row_number, headers, row))
elif keyed:
(yield dict(zip(headers, row)))
else:
(yield row)
|
Iterate over the rows.
Each row is returned in a format that depends on the arguments `keyed`
and `extended`. By default, each row is returned as list of their
values.
Args:
keyed (bool, optional): When True, each returned row will be a
`dict` mapping the header name to its value in the current row.
For example, `[{'name': 'J Smith', 'value': '10'}]`. Ignored if
``extended`` is True. Defaults to False.
extended (bool, optional): When True, returns each row as a tuple
with row number (starts at 1), list of headers, and list of row
values. For example, `(1, ['name', 'value'], ['J Smith', '10'])`.
Defaults to False.
Returns:
Iterator[Union[List[Any], Dict[str, Any], Tuple[int, List[str], List[Any]]]]:
The row itself. The format depends on the values of `keyed` and
`extended` arguments.
Raises:
exceptions.TabulatorException: If the stream is closed.
|
codesearchnet
|
def has_error(self):
return next((True for cr in self.component_results if cr.has_error()), False)
|
Returns whether there was a business logic error when fetching data
for any components for this property.
Returns:
boolean
|
codesearchnet
|
def match(self, patterns, limits=None):
if limits is None:
limits = [None] * len(patterns)
else:
err_msg = 'Patterns and limits should be equal in length'
assert len(patterns) == len(limits), err_msg
def _match(pattern, limit):
if pattern.endswith('/') or pattern.endswith('\\'):
pattern += '*'
prefix_or_dir = re.match('^[^[*?]*', pattern).group(0)
file_metadatas = []
if prefix_or_dir == pattern:
if self.exists(pattern):
file_metadatas = [self.metadata(pattern)]
else:
if self.has_dirs():
prefix_dirname = self._url_dirname(prefix_or_dir)
if not prefix_dirname == prefix_or_dir:
logger.debug('Changed prefix_or_dir %r -> %r', prefix_or_dir, prefix_dirname)
prefix_or_dir = prefix_dirname
logger.debug('Listing files in %r', prefix_or_dir)
file_metadatas = self._list(prefix_or_dir)
metadata_list = []
for file_metadata in self.match_files(file_metadatas, pattern):
if limit is not None and len(metadata_list) >= limit:
break
metadata_list.append(file_metadata)
return MatchResult(pattern, metadata_list)
exceptions = {}
result = []
for pattern, limit in zip(patterns, limits):
try:
result.append(_match(pattern, limit))
except Exception as e:
exceptions[pattern] = e
if exceptions:
raise BeamIOError('Match operation failed', exceptions)
return result
|
Find all matching paths to the patterns provided.
See Also:
:meth:`translate_pattern`
Patterns ending with '/' or '\' will be appended with '*'.
Args:
patterns: list of string for the file path pattern to match against
limits: list of maximum number of responses that need to be fetched
Returns: list of ``MatchResult`` objects.
Raises:
``BeamIOError``: if any of the pattern match operations fail
|
github-repos
|
def AddDateTimeRange(self, time_value, start_time_string=None, end_time_string=None):
if (not isinstance(time_value, py2to3.STRING_TYPES)):
raise ValueError('Filter type must be a string.')
if ((start_time_string is None) and (end_time_string is None)):
raise ValueError('Filter must have either a start or an end date time value.')
time_value_lower = time_value.lower()
if (time_value_lower not in self._SUPPORTED_TIME_VALUES):
raise ValueError('Unsupported time value: {0:s}.'.format(time_value))
start_date_time = None
if start_time_string:
start_date_time = time_elements.TimeElementsInMicroseconds()
start_date_time.CopyFromDateTimeString(start_time_string)
end_date_time = None
if end_time_string:
end_date_time = time_elements.TimeElementsInMicroseconds()
end_date_time.CopyFromDateTimeString(end_time_string)
if ((None not in (start_date_time, end_date_time)) and (start_date_time > end_date_time)):
raise ValueError('Invalid date time value start must be earlier than end.')
self._date_time_ranges.append(self._DATE_TIME_RANGE_TUPLE(time_value_lower, start_date_time, end_date_time))
|
Adds a date time filter range.
The time strings are formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds fraction
and timezone offset are optional. The default timezone is UTC.
Args:
time_value (str): time value, such as, atime, ctime, crtime, dtime, bkup
and mtime.
start_time_string (str): start date and time value string.
end_time_string (str): end date and time value string.
Raises:
ValueError: If the filter is badly formed.
|
codesearchnet
|
def Unregister(self, name):
precondition.AssertType(name, Text)
try:
del self._constructors[name]
except KeyError:
raise ValueError("Constructor with name '%s' is not registered" % name)
|
Unregisters a constructor.
Args:
name: A name of the constructor to unregister.
Raises:
ValueError: If constructor with specified name has never been registered.
|
juraj-google-style
|
def _construct_linebreak_token(self, d: Dict) -> List[Dict]:
result = []
num_break = int(d["length"][0]) if d["length"] else 1
if num_break:
s = ''
for i in range(num_break):
s += '\n'
this_token = {attrs.LOWER: s}
result.append(this_token)
s += ' '
this_token = {attrs.LOWER: s}
result.append(this_token)
result = self._add_common_constrain(result, d)
return result
|
Construct a shape token
Args:
d: Dict
Returns: List[Dict]
|
juraj-google-style
|
def frombase(path1, path2):
if (not isparent(path1, path2)):
raise ValueError('path1 must be a prefix of path2')
return path2[len(path1):]
|
Get the final path of ``path2`` that isn't in ``path1``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: the final part of ``path2``.
Example:
>>> frombase('foo/bar/', 'foo/bar/baz/egg')
'baz/egg'
|
codesearchnet
|
def singleOrPair(obj):
if len(list(obj.__class__.__mro__)) <= 2:
return 'Neither'
else:
if ancestorJr(obj) is Pair:
return 'Pair'
elif ancestor(obj) is Single:
return 'Single'
else:
return 'Neither'
|
Chech an object is single or pair or neither.
Of course,, all pairs are single, so what the function is really detecting is whether an object is only single or at the same time a pair.
Args:
obj (object): Literally anything.
Returns:
str: 'Single', or 'Pair', or 'Neither'
|
juraj-google-style
|
def coarse_grain(G, ncg):
if (ncg <= 1):
return G
G = numpy.asarray(G)
(nbin, remainder) = divmod(G.shape[(- 1)], ncg)
if (remainder != 0):
nbin += 1
return numpy.transpose([(numpy.sum(G[(..., i:(i + ncg))], axis=(- 1)) / G[(..., i:(i + ncg))].shape[(- 1)]) for i in numpy.arange(0, (ncg * nbin), ncg)])
|
Coarse-grain last index of array ``G``.
Bin the last index of array ``G`` in bins of width ``ncg``, and
replace each bin by its average. Return the binned results.
Args:
G: Array to be coarse-grained.
ncg: Bin width for coarse-graining.
|
codesearchnet
|
def match(self, request):
if (self._times <= 0):
raise PookExpiredMock('Mock expired')
for test in self.filters:
if (not test(request, self)):
return (False, [])
for mapper in self.mappers:
request = mapper(request, self)
if (not request):
raise ValueError('map function must return a request object')
(matches, errors) = self.matchers.match(request)
if (not matches):
return (False, errors)
self._calls.append(request)
self._matches += 1
if (not self._persist):
self._times -= 1
if self._error:
raise self._error
for callback in self.callbacks:
callback(request, self)
return (True, [])
|
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
|
codesearchnet
|
def get_content_field(self, name):
fields = self._content.findall(name)
if not fields:
return None
elif len(fields) == 1:
return etree_to_dict(fields[0])[name]
else:
return [etree_to_dict(field)[name] for field in fields]
|
Get the contents of a specific subtag from Clusterpoint Storage's response's content tag.
Args:
name -- A name string of the content's subtag to be returned.
Returns:
A dict representing the contents of the specified field or a list of dicts
if there are multiple fields with that tag name. Returns None if no field found.
|
juraj-google-style
|
def cs20(msg):
chars = '
d = hex2bin(data(msg))
cs = ''
cs += chars[bin2int(d[8:14])]
cs += chars[bin2int(d[14:20])]
cs += chars[bin2int(d[20:26])]
cs += chars[bin2int(d[26:32])]
cs += chars[bin2int(d[32:38])]
cs += chars[bin2int(d[38:44])]
cs += chars[bin2int(d[44:50])]
cs += chars[bin2int(d[50:56])]
return cs
|
Aircraft callsign
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
string: callsign, max. 8 chars
|
juraj-google-style
|
def _DictToListOfStrings(self, data_dict):
ret_list = []
for (key, value) in iter(data_dict.items()):
if (key in ('body', 'datetime', 'type', 'room', 'rooms', 'id')):
continue
ret_list.append('{0:s} = {1!s}'.format(key, value))
return ret_list
|
Converts a dictionary into a list of strings.
Args:
data_dict (dict[str, object]): dictionary to convert.
Returns:
list[str]: list of strings.
|
codesearchnet
|
def average_datetimes(dt_list):
if (sys.version_info < (3, 3)):
import time
def timestamp_func(dt):
return time.mktime(dt.timetuple())
else:
timestamp_func = datetime.timestamp
total = [timestamp_func(dt) for dt in dt_list]
return datetime.fromtimestamp((sum(total) / len(total)))
|
Average a series of datetime objects.
.. note::
This function assumes all datetime objects are naive and in the same
time zone (UTC).
Args:
dt_list (iterable): Datetime objects to average
Returns: Average datetime as a datetime object
|
codesearchnet
|
def MapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
data_type_size = self._data_type_definition.GetByteSize()
self._CheckByteStreamSize(byte_stream, byte_offset, data_type_size)
try:
struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])
mapped_value = self.MapValue(*struct_tuple)
except Exception as exception:
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: {2!s}').format(
self._data_type_definition.name, byte_offset, exception)
raise errors.MappingError(error_string)
if context:
context.byte_size = data_type_size
return mapped_value
|
Maps the data type on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
|
juraj-google-style
|
def constant_time_string_compare(a, b):
try:
return hmac.compare_digest(a, b)
except AttributeError:
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
|
Helper for comparing string in constant time, independent
of the python version being used.
Args:
a (str): A string to compare
b (str): A string to compare
|
juraj-google-style
|
def show_history(self, status=None, nids=None, full_history=False, metadata=False):
(nrows, ncols) = get_terminal_size()
works_done = []
for task in self.iflat_tasks(status=status, nids=nids):
work = task.work
if (work not in works_done):
works_done.append(work)
if (work.history or full_history):
cprint(make_banner(str(work), width=ncols, mark='='), **work.status.color_opts)
print(work.history.to_string(metadata=metadata))
if (task.history or full_history):
cprint(make_banner(str(task), width=ncols, mark='='), **task.status.color_opts)
print(task.history.to_string(metadata=metadata))
if (self.history or full_history):
cprint(make_banner(str(self), width=ncols, mark='='), **self.status.color_opts)
print(self.history.to_string(metadata=metadata))
|
Print the history of the flow to stdout.
Args:
status: if not None, only the tasks with this status are select
full_history: Print full info set, including nodes with an empty history.
nids: optional list of node identifiers used to filter the tasks.
metadata: print history metadata (experimental)
|
codesearchnet
|
def _extract_direct(self, *, stream):
def normal_dct_rgb():
DEFAULT_CT_RGB = 1
ct = self.filter_decodeparms[0][1].get('/ColorTransform', DEFAULT_CT_RGB)
return self.mode == 'RGB' and ct == DEFAULT_CT_RGB
def normal_dct_cmyk():
DEFAULT_CT_CMYK = 0
ct = self.filter_decodeparms[0][1].get('/ColorTransform', DEFAULT_CT_CMYK)
return self.mode == 'CMYK' and ct == DEFAULT_CT_CMYK
if self.filters == ['/CCITTFaxDecode']:
data = self.obj.read_raw_bytes()
stream.write(self._generate_ccitt_header(data))
stream.write(data)
return '.tif'
elif self.filters == ['/DCTDecode'] and (
self.mode == 'L' or normal_dct_rgb() or normal_dct_cmyk()
):
buffer = self.obj.get_raw_stream_buffer()
stream.write(buffer)
return '.jpg'
raise UnsupportedImageTypeError()
|
Attempt to extract the image directly to a usable image file
If there is no way to extract the image without decompressing or
transcoding then raise an exception. The type and format of image
generated will vary.
Args:
stream: Writable stream to write data to
|
juraj-google-style
|
def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
if not hasattr(self, 'warnings_issued'):
self.warnings_issued = {}
if self.main_input_name in input_dict:
return input_dict[self.main_input_name].numel()
elif 'estimate_tokens' not in self.warnings_issued:
logger.warning('Could not estimate the number of tokens of the input, floating-point operations will not be computed')
self.warnings_issued['estimate_tokens'] = True
return 0
|
Helper function to estimate the total number of tokens from the model inputs.
Args:
inputs (`dict`): The model inputs.
Returns:
`int`: The total number of tokens.
|
github-repos
|
def is_truthy(value, default=False):
if value is None:
return False
if isinstance(value, bool):
return value
if isinstance(value, int):
return value > 0
trues = ('1', 'true', 'y', 'yes', 'ok')
falses = ('', '0', 'false', 'n', 'none', 'no')
if value.lower().strip() in falses:
return False
elif value.lower().strip() in trues:
return True
else:
if default:
return default
else:
raise ValueError('Invalid argument given to truthy: {0}'.format(value))
|
Evaluate a value for truthiness
>>> is_truthy('Yes')
True
>>> is_truthy('False')
False
>>> is_truthy(1)
True
Args:
value (Any): Value to evaluate
default (bool): Optional default value, if the input does not match the true or false values
Returns:
True if a truthy value is passed, else False
|
juraj-google-style
|
def Verify(self, mempool):
if not super(ClaimTransaction, self).Verify(mempool):
return False
otherclaimTxs = [tx for tx in mempool if tx is ClaimTransaction and tx is not self]
for other in otherclaimTxs:
if len([list(filter(lambda x: x in self.Claims, otherClaims)) for otherClaims in other.Claims]):
return False
txResult = None
for tx in self.GetTransactionResults():
if tx.AssetId == Blockchain.SystemCoin().Hash:
txResult = tx
break
if txResult is None or txResult.Amount > Fixed8(0):
return False
try:
return Blockchain.CalculateBonusIgnoreClaimed(self.Claims, False) == -txResult.Amount
except Exception as e:
logger.error('Could not calculate bonus: %s ' % e)
return False
|
Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise.
|
juraj-google-style
|
def __recv(self, size=4096):
data = self.socket.recv(size)
if (not data):
raise NNTPError('Failed to read from socket')
self.__buffer.write(data)
|
Reads data from the socket.
Raises:
NNTPError: When connection times out or read from socket fails.
|
codesearchnet
|
def categorize(values, categories, default=None):
uniq_cats = list(unique_iterator(values))
cats = []
for c in values:
if isinstance(categories, list):
cat_ind = uniq_cats.index(c)
if cat_ind < len(categories):
cat = categories[cat_ind]
else:
cat = default
else:
cat = categories.get(c, default)
cats.append(cat)
return np.asarray(cats)
|
Maps discrete values to supplied categories.
Replaces discrete values in input array with a fixed set of
categories defined either as a list or dictionary.
Args:
values: Array of values to be categorized
categories: List or dict of categories to map inputs to
default: Default value to assign if value not in categories
Returns:
Array of categorized values
|
juraj-google-style
|
def content(self, request, id):
gist = self.send(request, id).json()
def convert(data):
return base64.b64decode(data).decode('utf-8')
content = {}
for name, data in gist['files'].items():
content[name] = convert(data['content'])
return content
|
Returns the content of the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A dict containing the contents of each file in the gist
|
juraj-google-style
|
def from_structure(cls, structure, ff_elements=None, atom_style="charge"):
s = structure.get_sorted_structure()
box, symmop = lattice_2_lmpbox(s.lattice)
coords = symmop.operate_multi(s.cart_coords)
site_properties = s.site_properties
if "velocities" in site_properties:
velos = np.array(s.site_properties["velocities"])
rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix)
rot_velos = rot.operate_multi(velos)
site_properties.update({"velocities": rot_velos})
boxed_s = Structure(box.to_lattice(), s.species, coords,
site_properties=site_properties,
coords_are_cartesian=True)
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(boxed_s)
return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo],
atom_style=atom_style)
|
Simple constructor building LammpsData from a structure without
force field parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must
be present due to force field settings but not
necessarily in the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
|
juraj-google-style
|
def _apply_credentials(auto_refresh=True, credentials=None,
headers=None):
token = credentials.get_credentials().access_token
if auto_refresh is True:
if token is None:
token = credentials.refresh(
access_token=None, timeout=10)
elif credentials.jwt_is_expired():
token = credentials.refresh(timeout=10)
headers.update(
{'Authorization': "Bearer {}".format(token)}
)
|
Update Authorization header.
Update request headers with latest `access_token`. Perform token
`refresh` if token is ``None``.
Args:
auto_refresh (bool): Perform token refresh if access_token is ``None`` or expired. Defaults to ``True``.
credentials (class): Read-only credentials.
headers (class): Requests `CaseInsensitiveDict`.
|
juraj-google-style
|
def strace_data_access_event(self, operation, address, data, data_mask=None, access_width=4, address_range=0):
cmd = enums.JLinkStraceCommand.TRACE_EVENT_SET
event_info = structs.JLinkStraceEventInfo()
event_info.Type = enums.JLinkStraceEvent.DATA_ACCESS
event_info.Op = operation
event_info.AccessSize = int(access_width)
event_info.Addr = int(address)
event_info.Data = int(data)
event_info.DataMask = int((data_mask or 0))
event_info.AddrRangeSize = int(address_range)
handle = self._dll.JLINK_STRACE_Control(cmd, ctypes.byref(event_info))
if (handle < 0):
raise errors.JLinkException(handle)
return handle
|
Sets an event to trigger trace logic when data access is made.
Data access corresponds to either a read or write.
Args:
self (JLink): the ``JLink`` instance.
operation (int): one of the operations in ``JLinkStraceOperation``.
address (int): the address of the load/store data.
data (int): the data to be compared the event data to.
data_mask (int): optional bitmask specifying bits to ignore in
comparison.
acess_width (int): optional access width for the data.
address_range (int): optional range of address to trigger event on.
Returns:
An integer specifying the trace event handle. This handle should be
retained in order to clear the event at a later time.
Raises:
JLinkException: on error.
|
codesearchnet
|
def sg_any(tensor, opt):
r
return tf.reduce_any(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
|
r"""Computes the "logical or" of elements across axis of a tensor.
See `tf.reduce_any()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
|
juraj-google-style
|
def _multi_worker_session(kwargs):
strategy = None
for _, v in kwargs.items():
if isinstance(v, distribute_lib.StrategyBase):
if strategy is not None:
logging.warning('The test uses multiple strategies. Skipping entering a session that is configured for the strategy.')
return ops.NullContextmanager()
strategy = v
if context.executing_eagerly() or not isinstance(strategy, collective_all_reduce_strategy.CollectiveAllReduceStrategy):
return ops.NullContextmanager()
sess_config = copy.deepcopy(context.context().config)
sess_config = strategy.update_config_proto(sess_config)
target = strategy.cluster_resolver.master()
return session.Session(config=sess_config, target=target).as_default()
|
Returns a context manager that enters a session that is configured for the MultiWorkerMirroredStrategy.
Args:
kwargs: a dict. Keyword arguments passed to the test.
Returns:
A context manager. If MultiWorkerMirroredStrategy is the one and only one
strategy in kwargs and it's in graph mode, it's the session that is
configured for that strategy. Otherwise, it's a no-op context manager.
|
github-repos
|
def _parse_mtu(self, config):
match = re.search('mtu (\\d+)', config)
return dict(mtu=int(match.group(1)))
|
Parses the config block and returns the configured IP MTU value
The provided configuration block is scanned and the configured value
for the IP MTU is returned as a dict object. The IP MTU value is
expected to always be present in the provided config block
Args:
config (str): The interface configuration block to parse
Return:
dict: A dict object intended to be merged into the resource dict
|
codesearchnet
|
def print_schema_results(results, level=0):
for error in results.errors:
print_level(logger.error, _RED + "[X] %s", level, error)
|
Print JSON Schema validation errors to stdout.
Args:
results: An instance of ObjectValidationResults.
level: The level at which to print the results.
|
juraj-google-style
|
def get_id_transcripts(self, hgnc_id, build='37'):
transcripts = self.transcripts(build=build, hgnc_id=hgnc_id)
identifier_transcripts = set()
longest = None
nr = []
xm = []
for tx in transcripts:
enst_id = tx['transcript_id']
if not longest:
longest = enst_id
refseq_id = tx.get('refseq_id')
if not refseq_id:
continue
if 'NM' in refseq_id:
identifier_transcripts.add(enst_id)
elif 'NR' in refseq_id:
nr.append(enst_id)
elif 'XM' in refseq_id:
xm.append(enst_id)
if identifier_transcripts:
return identifier_transcripts
if nr:
return set([nr[0]])
if xm:
return set([xm[0]])
return set([longest])
|
Return a set with identifier transcript(s)
Choose all refseq transcripts with NM symbols, if none where found choose ONE with NR,
if no NR choose ONE with XM. If there are no RefSeq transcripts identifiers choose the
longest ensembl transcript.
Args:
hgnc_id(int)
build(str)
Returns:
identifier_transcripts(set)
|
juraj-google-style
|
def _is_univariate_marginal(self, index_points):
num_index_points = tf.compat.dimension_value(index_points.shape[(- (self.kernel.feature_ndims + 1))])
if (num_index_points is None):
warnings.warn('Unable to detect statically whether the number of index_points is 1. As a result, defaulting to treating the marginal GP at `index_points` as a multivariate Gaussian. This makes some methods, like `cdf` unavailable.')
return (num_index_points == 1)
|
True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Gaussian distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
|
codesearchnet
|
def get_interface(self):
raise NotImplementedError('Base class should not be called directly!')
|
This function returns The interface used to configure the sniffer,
e.g. 'wlan0'.
Returns:
The interface (string) used to configure the sniffer. Corresponds to
the 'Interface' key of the sniffer configuration.
|
github-repos
|
def update_parser(self, parser):
self._parser = parser
ini_str = argparse_to_ini(parser)
configp = configparser.ConfigParser(allow_no_value=True)
configp.read_dict(self._config)
configp.read_string(ini_str)
self._config.update({s: dict(configp.items(s)) for s in configp.sections()})
|
Update config dictionary with declared arguments in an argparse.parser
New variables will be created, and existing ones overridden.
Args:
parser (argparse.ArgumentParser): parser to read variables from
|
codesearchnet
|
def __init__(self, app, env, region, prop_path):
self.app_name = app
self.env = env
self.region = region
self.properties = get_properties(prop_path)
generated = get_details(app=self.app_name)
self.group = generated.data['project']
try:
self.pipeline = self.properties['pipeline']['lambda']
except KeyError:
raise RequiredKeyNotFound("Lambda key in pipeline.json is required.")
self.runtime = self.pipeline['runtime']
self.description = self.pipeline['app_description']
self.handler = self.pipeline['handler']
self.vpc_enabled = self.pipeline['vpc_enabled']
self.settings = get_properties(prop_path, env=self.env, region=self.region)
app = self.settings['app']
self.lambda_environment = app['lambda_environment']
self.memory = app['lambda_memory']
self.role = app.get('lambda_role') or generated.iam()['lambda_role']
self.timeout = app['lambda_timeout']
self.concurrency_limit = app.get('lambda_concurrency_limit')
self.role_arn = get_role_arn(self.role, self.env, self.region)
self.session = boto3.Session(profile_name=self.env, region_name=self.region)
self.lambda_client = self.session.client('lambda')
|
Lambda function object.
Args:
app (str): Application name
env (str): Environment/Account
region (str): AWS Region
prop_path (str): Path of environment property file
|
juraj-google-style
|
def _parse_deploy(self, deploy_values: dict, service_config: dict):
mode = {}
for d_value in deploy_values:
if ('restart_policy' in d_value):
restart_spec = docker.types.RestartPolicy(**deploy_values[d_value])
service_config['restart_policy'] = restart_spec
if ('placement' in d_value):
for (constraints_key, constraints_value) in deploy_values[d_value].items():
service_config[constraints_key] = constraints_value
if ('mode' in d_value):
mode[d_value] = deploy_values[d_value]
if ('replicas' in d_value):
mode[d_value] = deploy_values[d_value]
if ('resources' in d_value):
resource_spec = self._parse_resources(deploy_values, d_value)
service_config['resources'] = resource_spec
mode_spec = docker.types.ServiceMode(**mode)
service_config['mode'] = mode_spec
|
Parse deploy key.
Args:
deploy_values (dict): deploy configuration values
service_config (dict): Service configuration
|
codesearchnet
|
def drop(self, index=None, columns=None):
if self._is_transposed:
return self.transpose().drop(index=columns, columns=index).transpose()
if (index is None):
new_data = self.data
new_index = self.index
else:
def delitem(df, internal_indices=[]):
return df.drop(index=df.index[internal_indices])
numeric_indices = list(self.index.get_indexer_for(index))
new_data = self.data.apply_func_to_select_indices(1, delitem, numeric_indices, keep_remaining=True)
new_index = self.index[(~ self.index.isin(index))]
if (columns is None):
new_columns = self.columns
new_dtypes = self.dtypes
else:
def delitem(df, internal_indices=[]):
return df.drop(columns=df.columns[internal_indices])
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = new_data.apply_func_to_select_indices(0, delitem, numeric_indices, keep_remaining=True)
new_columns = self.columns[(~ self.columns.isin(columns))]
new_dtypes = self.dtypes.drop(columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
|
codesearchnet
|
def zeros(shape, dtype=None, **kwargs):
data = np.zeros(shape, dtype)
return dc.array(data, **kwargs)
|
Create an array of given shape and type, filled with zeros.
Args:
shape (sequence of ints): 2D shape of the array.
dtype (data-type, optional): Desired data-type for the array.
kwargs (optional): Other arguments of the array (*coords, attrs, and name).
Returns:
array (decode.array): Decode array filled with zeros.
|
codesearchnet
|
def __init__(self, rate=None, burst_size=None, prec_level=None):
super().__init__(MeterBandType.OFPMBT_DSCP_REMARK, rate, burst_size)
self.prec_level = prec_level
|
Create a MeterBandDscpRemark with the optional parameters below.
Args:
rate (int): Rate for remarking packets.
burst_size (int): Size of bursts.
prec_level (int): Number of precendence level to substract.
|
juraj-google-style
|
def _create_config_proto(self) -> tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration:
config_proto = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration()
learning_rate_index = {r: i for i, r in enumerate(self._dynamic_learning_rates)}
for table in self._table_config:
table._set_table_descriptor(config_proto.table_descriptor.add(), self._strategy.extended.num_hosts, learning_rate_index)
table_to_id = {table: i for i, table in enumerate(self._table_config)}
for feature, output_shape in zip(nest.flatten(self._feature_config), self._output_shapes):
feature_descriptor = config_proto.feature_descriptor.add()
if feature.name:
feature_descriptor.name = feature.name
feature_descriptor.table_id = table_to_id[feature.table]
feature_descriptor.input_shape.extend(output_shape.as_list())
config_proto.mode = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TRAINING
num_replica = self._strategy.num_replicas_in_sync
num_cores_per_replica = self._num_cores_per_replica or 1
config_proto.num_hosts = self._strategy.extended.num_hosts
config_proto.num_tensor_cores = num_replica * num_cores_per_replica
config_proto.sharding_strategy = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.DIV_DEFAULT
config_proto.pipeline_execution_with_tensor_core = self._pipeline_execution_with_tensor_core
if self._num_cores_per_replica:
config_proto.spmd_sharding.enabled = True
config_proto.spmd_sharding.num_cores_per_replica = self._num_cores_per_replica
return config_proto
|
Creates the TPUEmbeddingConfiguration proto.
This proto is used to initialize the TPU embedding engine.
Returns:
A TPUEmbeddingConfiguration proto.
|
github-repos
|
def load_dict_values(self, db_key: str, dict_keys: List[str], hierarchical: bool=False) -> List:
result = []
if (not hierarchical):
_values = self._db.hmget(db_key, *dict_keys)
result = [ast.literal_eval(_value) for _value in _values]
else:
db_keys = self._db.keys(pattern=(db_key + '*'))
for _db_key in db_keys:
for name in _db_key.split(':')[1:]:
if (name in dict_keys):
_values = self._load_values(_db_key)
result.append(_values)
_values = self._db.hmget(_db_key, *dict_keys)
for (i, value) in enumerate(_values):
try:
_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
result += [value for value in _values if (value is not None)]
return result
|
Load values from a dictionary with the specified dict_keys.
Args:
db_key (str): Key where the dictionary is stored
dict_keys (List[str]): Keys within the dictionary to load.
hierarchical (bool): If True, expect the dictionary to have been
stored hierarchically. If False, expect the dictionary to have
been stored flat.
Returns:
object: The value stored at dict_key in the dictionary stored at
key
|
codesearchnet
|
def _range_along_dimension(range_dim, shape):
rank = len(shape)
if (range_dim >= rank):
raise ValueError('Cannot calculate range along non-existent index.')
indices = tf.range(start=0, limit=shape[range_dim])
indices = tf.reshape(indices, shape=[(1 if (i != range_dim) else shape[range_dim]) for i in range(rank)])
return tf.tile(indices, [(shape[i] if (i != range_dim) else 1) for i in range(rank)])
|
Construct a Tensor whose values are the index along a dimension.
Construct a Tensor that counts the distance along a single dimension. This is
useful, for example, when constructing an identity matrix,
>>> x = _range_along_dimension(0, [2, 2]).eval()
>>> x
array([[0, 0],
[1, 1]], dtype=int32)
>>> y = _range_along_dimension(1, [2, 2]).eval()
>>> y
array([[0, 1],
[0, 1]], dtype=int32)
>>> tf.cast(tf.equal(x, y), dtype=tf.int32).eval()
array([[1, 0],
[0, 1]], dtype=int32)
Args:
range_dim: int. Dimension to count indices on.
shape: 1D Tensor of ints. Shape of Tensor to construct.
Returns:
A Tensor whose values are the same as the range along dimension range_dim.
Raises:
ValueError: If range_dim isn't a valid dimension.
|
codesearchnet
|
def write_to_hdf5(self, filename_out, *args, **kwargs):
t0 = time.time()
self.__update_header()
if self.container.isheavy():
self.__write_to_hdf5_heavy(filename_out)
else:
self.__write_to_hdf5_light(filename_out)
t1 = time.time()
logger.info(('Conversion time: %2.2fsec' % (t1 - t0)))
|
Write data to HDF5 file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
|
codesearchnet
|
def create_tree(profile, tree):
resource = '/trees'
payload = {'tree': tree}
data = api.post_request(profile, resource, payload)
return prepare(data)
|
Create a new tree.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
tree
A list of blob objects (each with a path, mode, type, and
content or sha) to put in the tree.
Returns:
A dict with data about the tree.
|
codesearchnet
|
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
if os.path.exists(self.network_path):
self._DisableNetworkManager(interfaces, logger)
helpers.CallDhclient(interfaces, logger)
|
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
juraj-google-style
|
def prefetch_users(persistent_course_grades):
users = User.objects.filter(id__in=[grade.user_id for grade in persistent_course_grades])
return {user.id: user for user in users}
|
Prefetch Users from the list of user_ids present in the persistent_course_grades.
Arguments:
persistent_course_grades (list): A list of PersistentCourseGrade.
Returns:
(dict): A dictionary containing user_id to user mapping.
|
codesearchnet
|
def unique_array(arr):
if not len(arr):
return np.asarray(arr)
elif pd:
if isinstance(arr, np.ndarray) and arr.dtype.kind not in 'MO':
return pd.unique(arr)
values = []
for v in arr:
if (isinstance(v, datetime_types) and
not isinstance(v, cftime_types)):
v = pd.Timestamp(v).to_datetime64()
values.append(v)
return pd.unique(values)
else:
arr = np.asarray(arr)
_, uniq_inds = np.unique(arr, return_index=True)
return arr[np.sort(uniq_inds)]
|
Returns an array of unique values in the input order.
Args:
arr (np.ndarray or list): The array to compute unique values on
Returns:
A new array of unique values
|
juraj-google-style
|
def _transform_binary_composition_to_expression(expression, node, context):
if (expression.operator not in constants.SUPPORTED_OPERATORS):
raise NotImplementedError(u'Filter operation "{}" is not supported by the SQL backend.'.format(expression.operator))
sql_operator = constants.SUPPORTED_OPERATORS[expression.operator]
left = _expression_to_sql(expression.left, node, context)
right = _expression_to_sql(expression.right, node, context)
if (sql_operator.cardinality == constants.CARDINALITY_UNARY):
(left, right) = _get_column_and_bindparam(left, right, sql_operator)
clause = getattr(left, sql_operator.name)(right)
return clause
elif (sql_operator.cardinality == constants.CARDINALITY_BINARY):
clause = getattr(sql_expressions, sql_operator.name)(left, right)
return clause
elif (sql_operator.cardinality == constants.CARDINALITY_LIST_VALUED):
(left, right) = _get_column_and_bindparam(left, right, sql_operator)
right.expanding = True
clause = getattr(left, sql_operator.name)(right)
return clause
raise AssertionError(u'Unreachable, operator cardinality {} for compiler expression {} is unknown'.format(sql_operator.cardinality, expression))
|
Transform a BinaryComposition compiler expression into a SQLAlchemy expression.
Recursively calls _expression_to_sql to convert its left and right sub-expressions.
Args:
expression: expression, BinaryComposition compiler expression.
node: SqlNode, the SqlNode the expression applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy expression.
|
codesearchnet
|
def split_raster(rs, split_shp, field_name, temp_dir):
UtilClass.rmmkdir(temp_dir)
ds = ogr_Open(split_shp)
lyr = ds.GetLayer(0)
lyr.ResetReading()
ft = lyr.GetNextFeature()
while ft:
cur_field_name = ft.GetFieldAsString(field_name)
for r in rs:
cur_file_name = r.split(os.sep)[-1]
outraster = temp_dir + os.sep + \
cur_file_name.replace('.tif', '_%s.tif' %
cur_field_name.replace(' ', '_'))
subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp,
'-crop_to_cutline', '-cwhere',
"'%s'='%s'" % (field_name, cur_field_name), '-dstnodata',
'-9999'])
ft = lyr.GetNextFeature()
ds = None
|
Split raster by given shapefile and field name.
Args:
rs: origin raster file.
split_shp: boundary (ESRI Shapefile) used to spilt raster.
field_name: field name identify the spilt value.
temp_dir: directory to store the spilt rasters.
|
juraj-google-style
|
def _CreateRouteShapesFolder(self, schedule, parent, route, style_id=None, visible=True):
shape_id_to_trips = {}
for trip in route.trips:
if trip.shape_id:
shape_id_to_trips.setdefault(trip.shape_id, []).append(trip)
if (not shape_id_to_trips):
return None
shape_id_to_trips_items = shape_id_to_trips.items()
shape_id_to_trips_items.sort((lambda a, b: cmp(len(b[1]), len(a[1]))))
folder = self._CreateFolder(parent, 'Shapes', visible)
for (shape_id, trips) in shape_id_to_trips_items:
trip_ids = [trip.trip_id for trip in trips]
name = ('%s (trips: %d)' % (shape_id, len(trips)))
description = ('Trips using this shape (%d in total): %s' % (len(trips), ', '.join(trip_ids)))
placemark = self._CreatePlacemark(folder, name, style_id, visible, description)
self._CreateLineStringForShape(placemark, schedule.GetShape(shape_id))
return folder
|
Create a KML Folder for the shapes of a route.
The folder contains a placemark for each shape referenced by a trip in the
route. If there are no such shapes, no folder is created and None is
returned.
Args:
schedule: The transitfeed.Schedule instance.
parent: The parent ElementTree.Element instance.
route: The transitfeed.Route instance.
style_id: The id of a style to use if not None.
visible: Whether the placemark is initially visible or not.
Returns:
The Folder ElementTree.Element instance or None.
|
codesearchnet
|
def sendfrom(self, user_id, dest_address, amount, minconf=1):
amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN)
txhash = self.rpc.call("sendfrom",
user_id, dest_address, float(str(amount)), minconf
)
self.logger.debug("Send %s %s from %s to %s" % (str(amount), self.coin,
str(user_id), dest_address))
self.logger.debug("Transaction hash: %s" % txhash)
return txhash
|
Send coins from user's account.
Args:
user_id (str): this user's unique identifier
dest_address (str): address which is to receive coins
amount (str or Decimal): amount to send (eight decimal points)
minconf (int): ensure the account has a valid balance using this
many confirmations (default=1)
Returns:
str: transaction ID
|
juraj-google-style
|
def garbage_collection(time_limit=YEAR/12.0):
expired_request_infos = (
ri for ri in DATABASE.values()
if ri.creation_ts + time_limit <= time.time()
)
for ri in expired_request_infos:
del DATABASE[ri.url]
|
Collect and remove all :class:`.RequestInfo` objects older than
`time_limit` (in seconds).
Args:
time_limit (float, default YEAR / 2): Collect objects older than
this limit.
|
juraj-google-style
|
def padding_to_length(padding):
non_padding = (1.0 - padding)
return tf.to_int32(tf.reduce_sum(non_padding, axis=(- 1)))
|
Calculate the length of mask based on padding.
Args:
padding: a Tensor with shape [..., length].
Returns:
a Tensor with shape [...].
|
codesearchnet
|
def _init_request_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
if not enabled:
return
self._requests_middleware = WSGIApplication(
self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware
|
Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
|
juraj-google-style
|
def residual_block_layer(inputs, hparams):
kernel = (hparams.res_kernel_size, hparams.res_kernel_size)
x = inputs
for i in range(hparams.num_res_layers):
with tf.variable_scope(('res_conv_%d' % i)):
y = common_layers.conv_block(common_layers.layer_norm(x, hparams.hidden_size, name='lnorm'), hparams.hidden_size, [((1, 1), kernel)], strides=(1, 1), padding='SAME', name='residual_conv')
y = common_layers.conv_block(y, hparams.hidden_size, [((1, 1), (1, 1))], strides=(1, 1), padding='SAME', name='residual_dense')
x = common_layers.layer_postprocess(x, y, hparams)
return x
|
Residual block over inputs.
Runs a residual block consisting of
conv: kernel_size x kernel_size
conv: 1x1
dropout, add and normalize according to hparams.layer_postprocess_sequence.
Args:
inputs: Tensor of shape [batch, height, width, hparams.hidden_size].
hparams: HParams.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
|
codesearchnet
|
def NamedSelector(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES):
check.str_param(name, 'name')
check_user_facing_fields_dict(fields, 'NamedSelector named "{}"'.format(name))
class _NamedSelector(_ConfigSelector):
def __init__(self):
super(_NamedSelector, self).__init__(
key=name,
name=name,
fields=fields,
description=description,
type_attributes=type_attributes,
)
return _NamedSelector
|
A :py:class`Selector` with a name, allowing it to be referenced by that name.
Args:
name (str):
fields (Dict[str, Field])
|
juraj-google-style
|
def call_each(seq):
try:
reduce(lambda _, y: y(), seq)
except TypeError as e:
if text_type(e) != "reduce() of empty sequence with no initial value":
raise
|
Calls each element of sequence to invoke the side effect.
Args:
seq:
Returns: None
|
juraj-google-style
|
def predict_features(self, df_features, df_target, nh=20, idx=0, dropout=0.0, activation_function=th.nn.ReLU, lr=0.01, l1=0.1, batch_size=(- 1), train_epochs=1000, test_epochs=1000, device=None, verbose=None, nb_runs=3):
(device, verbose) = SETTINGS.get_default(('device', device), ('verbose', verbose))
x = th.FloatTensor(scale(df_features.values)).to(device)
y = th.FloatTensor(scale(df_target.values)).to(device)
out = []
for i in range(nb_runs):
model = FSGNN_model([(x.size()[1] + 1), nh, 1], dropout=dropout, activation_function=activation_function).to(device)
out.append(model.train(x, y, lr=0.01, l1=0.1, batch_size=(- 1), train_epochs=train_epochs, test_epochs=test_epochs, device=device, verbose=verbose))
return list(np.mean(np.array(out), axis=0))
|
For one variable, predict its neighbours.
Args:
df_features (pandas.DataFrame):
df_target (pandas.Series):
nh (int): number of hidden units
idx (int): (optional) for printing purposes
dropout (float): probability of dropout (between 0 and 1)
activation_function (torch.nn.Module): activation function of the NN
lr (float): learning rate of Adam
l1 (float): L1 penalization coefficient
batch_size (int): batch size, defaults to full-batch
train_epochs (int): number of train epochs
test_epochs (int): number of test epochs
device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``)
verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``)
nb_runs (int): number of bootstrap runs
Returns:
list: scores of each feature relatively to the target
|
codesearchnet
|
def update_vlan(self, name, vid, vni):
cmd = ('vxlan vlan %s vni %s' % (vid, vni))
return self.configure_interface(name, cmd)
|
Adds a new vlan to vni mapping for the interface
EosVersion:
4.13.7M
Args:
vlan (str, int): The vlan id to map to the vni
vni (str, int): The vni value to use
Returns:
True if the command completes successfully
|
codesearchnet
|
def delete_vnet(access_token, subscription_id, resource_group, name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/virtualNetworks/', name,
'?api-version=', NETWORK_API])
return do_delete(endpoint, access_token)
|
Delete a virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
|
juraj-google-style
|
def __init__(self, target='', graph=None, config=None):
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError(f'Argument `graph` must be a tf.Graph, but got "{type(graph).__name__}"')
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError(f'Argument `target` must be a string, but got "{type(target).__name__}". Did you do "Session(config)" instead of "Session(config=config)"?')
raise TypeError(f'Argument `target` must be a string, but got "{type(target).__name__}"')
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError(f'Argument `config` must be a tf.ConfigProto, but got "{type(config).__name__}"')
if mixed_precision_global_state.is_mixed_precision_graph_rewrite_enabled() and config.graph_options.rewrite_options.auto_mixed_precision != rewriter_config_pb2.RewriterConfig.OFF:
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = rewriter_config_pb2.RewriterConfig.ON
config = new_config
elif config.graph_options.rewrite_options.auto_mixed_precision != rewriter_config_pb2.RewriterConfig.ON:
mixed_precision_global_state.set_non_mixed_precision_session_created(True)
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
with self._graph._c_graph.get() as c_graph:
self._session = tf_session.TF_NewSessionRef(c_graph, opts)
finally:
tf_session.TF_DeleteSessionOptions(opts)
|
Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
|
github-repos
|
def from_pymatgen_molecule(cls, molecule):
new = cls(atoms=[el.value for el in molecule.species],
coords=molecule.cart_coords)
return new._to_numeric()
|
Create an instance of the own class from a pymatgen molecule
Args:
molecule (:class:`pymatgen.core.structure.Molecule`):
Returns:
Cartesian:
|
juraj-google-style
|
def StatEntryFromPath(path, pathspec, ext_attrs=True):
try:
stat = filesystem.Stat.FromPath(path)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
|
Builds a stat entry object from a given path.
Args:
path: A path (string value) to stat.
pathspec: A `PathSpec` corresponding to the `path`.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
|
codesearchnet
|
def queuify_logger(logger, queue_handler, queue_listener):
if isinstance(logger, str):
logger = logging.getLogger(logger)
handlers = [handler for handler in logger.handlers
if handler not in queue_listener.handlers]
if handlers:
queue_listener.handlers = \
tuple(list(queue_listener.handlers) + handlers)
del logger.handlers[:]
logger.addHandler(queue_handler)
|
Replace logger's handlers with a queue handler while adding existing
handlers to a queue listener.
This is useful when you want to use a default logging config but then
optionally add a logger's handlers to a queue during runtime.
Args:
logger (mixed): Logger instance or string name of logger to queue-ify
handlers.
queue_handler (QueueHandler): Instance of a ``QueueHandler``.
queue_listener (QueueListener): Instance of a ``QueueListener``.
|
juraj-google-style
|
def Oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
if not isinstance(command, bytes):
command = command.encode('utf8')
return self._SimpleCommand(
b'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)
|
Executes an OEM command on the device.
Args:
command: Command to execute, such as 'poweroff' or 'bootconfig read'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
info_cb: See Download. Messages vary based on command.
Returns:
The final response from the device.
|
juraj-google-style
|
def Sample(self, tasks_status):
sample_time = time.time()
sample = '{0:f}\t{1:d}\t{2:d}\t{3:d}\t{4:d}\t{5:d}\n'.format(sample_time, tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks)
self._WritesString(sample)
|
Takes a sample of the status of queued tasks for profiling.
Args:
tasks_status (TasksStatus): status information about tasks.
|
codesearchnet
|
def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):
src_r = RasterUtilClass.read_raster(srcfile)
src_data = src_r.data
dst_data = numpy.copy(src_data)
if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:
gdaltype = src_r.dataType
no_data = src_r.noDataValue
new_no_data = DEFAULT_NODATA
if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:
new_no_data = 0
if not MathClass.floatequal(new_no_data, src_r.noDataValue):
if src_r.noDataValue not in v_dict:
v_dict[src_r.noDataValue] = new_no_data
no_data = new_no_data
for (k, v) in iteritems(v_dict):
dst_data[src_data == k] = v
RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,
src_r.geotrans, src_r.srs, no_data, gdaltype)
|
Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
|
juraj-google-style
|
def _get_commands(dist
):
py_files = (f for f in setuptools.findall()
if os.path.splitext(f)[1].lower() == '.py')
pkg_files = (f for f in py_files if _get_package_name(f) in dist.packages)
commands = {}
for file_name in pkg_files:
with open(file_name) as py_file:
module = typing.cast(ast.Module, ast.parse(py_file.read()))
module_name = _get_module_name(file_name)
_append_commands(commands, module_name, _get_module_commands(module))
_append_commands(commands, module_name, _get_class_commands(module))
_append_commands(commands, module_name, _get_function_commands(module))
return commands
|
Find all commands belonging to the given distribution.
Args:
dist: The Distribution to search for docopt-compatible docstrings that
can be used to generate command entry points.
Returns:
A dictionary containing a mapping of primary commands to sets of
subcommands.
|
juraj-google-style
|
def _GetAnalysisPlugins(self, analysis_plugins_string):
if (not analysis_plugins_string):
return []
analysis_plugins_list = [name.strip() for name in analysis_plugins_string.split(',')]
analysis_plugins = self._analysis_manager.GetPluginObjects(analysis_plugins_list)
return analysis_plugins.values()
|
Retrieves analysis plugins.
Args:
analysis_plugins_string (str): comma separated names of analysis plugins
to enable.
Returns:
list[AnalysisPlugin]: analysis plugins.
|
codesearchnet
|
def getAll(self, event_name):
raw_events = self.callEventGetAllRpc(self._id, event_name)
return [callback_event.from_dict(msg) for msg in raw_events]
|
Gets all existing events in the server with the specified identifier.
This is a non-blocking call.
Args:
event_name: str, the name of the event to get.
Returns:
A list of CallbackEvent, each representing an event from the Server side.
|
github-repos
|
def initialize_repository(path, spor_dir='.spor'):
path = pathlib.Path(path)
spor_path = (path / spor_dir)
if spor_path.exists():
raise ValueError('spor directory already exists: {}'.format(spor_path))
spor_path.mkdir()
return Repository(path, spor_dir)
|
Initialize a spor repository in `path` if one doesn't already exist.
Args:
path: Path to any file or directory within the repository.
spor_dir: The name of the directory containing spor data.
Returns: A `Repository` instance.
Raises:
ValueError: A repository already exists at `path`.
|
codesearchnet
|
def ns(self, value):
if value == self._defaults['ns'] and 'ns' in self._values:
del self._values['ns']
else:
self._values['ns'] = value
|
The ns property.
Args:
value (string). the property value.
|
juraj-google-style
|
def __init__(self, feed_merger):
self.feed_merger = feed_merger
self._num_merged = 0
self._num_not_merged_a = 0
self._num_not_merged_b = 0
|
Initialise.
Args:
feed_merger: The FeedMerger.
|
juraj-google-style
|
def AddLabel(self, label):
if (not isinstance(label, py2to3.STRING_TYPES)):
raise TypeError('label is not a string type. Is {0:s}'.format(type(label)))
if (not self._VALID_LABEL_REGEX.match(label)):
raise ValueError('Unsupported label: "{0:s}". A label must only consist of alphanumeric characters or underscores.'.format(label))
if (label not in self.labels):
self.labels.append(label)
|
Adds a label to the event tag.
Args:
label (str): label.
Raises:
TypeError: if the label provided is not a string.
ValueError: if a label is malformed.
|
codesearchnet
|
def add_trial(self, trial):
trial.set_verbose(self._verbose)
self._trials.append(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self, trial)
self.trial_executor.try_checkpoint_metadata(trial)
|
Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial (Trial): Trial to queue.
|
juraj-google-style
|
def lookup_subclass(cls, d):
try:
typeid = d['typeid']
except KeyError:
raise FieldError(('typeid not present in keys %s' % list(d)))
subclass = cls._subcls_lookup.get(typeid, None)
if (not subclass):
raise FieldError(("'%s' not a valid typeid" % typeid))
else:
return subclass
|
Look up a class based on a serialized dictionary containing a typeid
Args:
d (dict): Dictionary with key "typeid"
Returns:
Serializable subclass
|
codesearchnet
|
def process_resource(self, req, resp, resource, uri_kwargs=None):
if 'user' in req.context:
return
identifier = self.identify(req, resp, resource, uri_kwargs)
user = self.try_storage(identifier, req, resp, resource, uri_kwargs)
if user is not None:
req.context['user'] = user
elif self.challenge is not None:
req.context.setdefault(
'challenges', list()
).append(self.challenge)
|
Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None``
|
juraj-google-style
|
def __call__(self, index, s):
if self.colorize:
self._color_wrap(index, s)
else:
print(s)
|
Print the output, colorized or not, depending on the environment.
Args:
index (int): The instance number.
s (str): The string to print.
|
juraj-google-style
|
def importGurobiSolution(self, grbmodel):
self.eval(''.join(
'let {} := {};'.format(var.VarName, var.X)
for var in grbmodel.getVars()
if '$' not in var.VarName
))
|
Import the solution from a gurobipy.Model object.
Args:
grbmodel: A :class:`gurobipy.Model` object with the model solved.
|
juraj-google-style
|
def DecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode):
return layers.Serial(layers.Residual(layers.LayerNorm(), layers.Branch(), layers.Parallel(layers.Identity(), layers.CausalMask(axis=(- 2))), layers.MultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), layers.Dropout(rate=dropout, mode=mode)), ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode))
|
Transformer decoder layer.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer.
|
codesearchnet
|
def swo_stop(self):
res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.STOP, 0)
if res < 0:
raise errors.JLinkException(res)
return None
|
Stops collecting SWO data.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
Raises:
JLinkException: on error
|
juraj-google-style
|
def check_streamers(self, blacklist=None):
ready = []
selected = set()
for (i, streamer) in enumerate(self.streamers):
if ((blacklist is not None) and (i in blacklist)):
continue
if (i in selected):
continue
marked = False
if (i in self._manually_triggered_streamers):
marked = True
self._manually_triggered_streamers.remove(i)
if streamer.triggered(marked):
self._logger.debug('Streamer %d triggered, manual=%s', i, marked)
ready.append(streamer)
selected.add(i)
for (j, streamer2) in enumerate(self.streamers[i:]):
if ((streamer2.with_other == i) and (j not in selected) and streamer2.triggered(True)):
self._logger.debug('Streamer %d triggered due to with-other on %d', j, i)
ready.append(streamer2)
selected.add(j)
return ready
|
Check if any streamers are ready to produce a report.
You can limit what streamers are checked by passing a set-like
object into blacklist.
This method is the primary way to see when you should poll a given
streamer for its next report.
Note, this function is not idempotent. If a streamer is marked as
manual and it is triggered from a node rule inside the sensor_graph,
that trigger will only last as long as the next call to
check_streamers() so you need to explicitly build a report on all
ready streamers before calling check_streamers again.
Args:
blacklist (set): Optional set of streamer indices that should
not be checked right now.
Returns:
list of DataStreamer: A list of the ready streamers.
|
codesearchnet
|
def FindChecks(cls, artifact=None, os_name=None, cpe=None, labels=None, restrict_checks=None):
check_ids = set()
conditions = list(cls.Conditions(artifact, os_name, cpe, labels))
for (chk_id, chk) in iteritems(cls.checks):
if (restrict_checks and (chk_id not in restrict_checks)):
continue
for condition in conditions:
if chk.triggers.Match(*condition):
check_ids.add(chk_id)
break
return check_ids
|
Takes targeting info, identifies relevant checks.
FindChecks will return results when a host has the conditions necessary for
a check to occur. Conditions with partial results are not returned. For
example, FindChecks will not return checks that if a check targets
os_name=["Linux"], labels=["foo"] and a host only has the os_name=["Linux"]
attribute.
Args:
artifact: 0+ artifact names.
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
restrict_checks: A list of check ids to restrict check processing to.
Returns:
the check_ids that apply.
|
codesearchnet
|
def to_pytd_def(self, val: abstract.BaseValue) -> pytd.Node:
if isinstance(val, abstract.SimpleClass):
return self._class_to_pytd_def(val)
elif isinstance(val, abstract.BaseFunction):
return self._function_to_pytd_def(val)
else:
raise NotImplementedError(f'to_pytd_def() not implemented for {val.__class__.__name__}: {val}')
|
Returns the pytd definition of the abstract value.
For example, if the abstract value is:
InterpreterClass(name='C', members={'x': PythonConstant(0)})
then to_pytd_def() produces:
pytd.Class(name='C',
constants=(pytd.Constant(name='x', type=pytd.NamedType(int)),))
Args:
val: The abstract value.
|
github-repos
|
def _GetKeysDefaultEmpty(self, top_level, keys, depth=1):
keys = set(keys)
match = {}
if depth == 1:
for key in keys:
value = top_level.get(key, None)
if value is not None:
match[key] = value
else:
for _, parsed_key, parsed_value in plist_interface.RecurseKey(
top_level, depth=depth):
if parsed_key in keys:
match[parsed_key] = parsed_value
if set(match.keys()) == keys:
return match
return match
|
Retrieves plist keys, defaulting to empty values.
Args:
top_level (plistlib._InternalDict): top level plist object.
keys (set[str]): names of keys that should be returned.
depth (int): depth within the plist, where 1 is top level.
Returns:
dict[str, str]: values of the requested keys.
|
juraj-google-style
|
def getValue(self, scalarExpression):
return lock_and_call((lambda : Utils.castVariant(self._impl.getValue(scalarExpression))), self._lock)
|
Get a scalar value from the underlying AMPL interpreter, as a double or
a string.
Args:
scalarExpression: An AMPL expression which evaluates to a scalar
value.
Returns:
The value of the expression.
|
codesearchnet
|
def genUserCert(self, name, signas=None, outp=None, csr=None):
(pkey, cert) = self._genBasePkeyCert(name, pkey=csr)
cert.add_extensions([crypto.X509Extension(b'nsCertType', False, b'client'), crypto.X509Extension(b'keyUsage', False, b'digitalSignature'), crypto.X509Extension(b'extendedKeyUsage', False, b'clientAuth'), crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')])
if (signas is not None):
self.signCertAs(cert, signas)
else:
self.selfSignCert(cert, pkey)
crtpath = self._saveCertTo(cert, 'users', ('%s.crt' % name))
if (outp is not None):
outp.printf(('cert saved: %s' % (crtpath,)))
if (not pkey._only_public):
keypath = self._savePkeyTo(pkey, 'users', ('%s.key' % name))
if (outp is not None):
outp.printf(('key saved: %s' % (keypath,)))
return (pkey, cert)
|
Generates a user keypair.
Args:
name (str): The name of the user keypair.
signas (str): The CA keypair to sign the new user keypair with.
outp (synapse.lib.output.Output): The output buffer.
csr (OpenSSL.crypto.PKey): The CSR public key when generating the keypair from a CSR.
Examples:
Generate a user cert for the user "myuser":
myuserkey, myusercert = cdir.genUserCert('myuser')
Returns:
((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the key and certificate objects.
|
codesearchnet
|
def getValue(self, scalarExpression):
return lock_and_call(
lambda: Utils.castVariant(self._impl.getValue(scalarExpression)),
self._lock
)
|
Get a scalar value from the underlying AMPL interpreter, as a double or
a string.
Args:
scalarExpression: An AMPL expression which evaluates to a scalar
value.
Returns:
The value of the expression.
|
juraj-google-style
|
def _Open(self, path_spec=None, mode='rb'):
if not path_spec:
raise ValueError('Missing path specification.')
file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
file_system.Close()
raise IOError('Unable to retrieve file entry.')
if not file_entry.IsFile():
file_system.Close()
raise IOError('Not a regular file.')
self._file_system = file_system
self._zip_file = self._file_system.GetZipFile()
self._zip_info = file_entry.GetZipInfo()
self._current_offset = 0
self._uncompressed_stream_size = self._zip_info.file_size
|
Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
juraj-google-style
|
def convert_rgb(self, image):
self._ensure_format_supported(image)
if not isinstance(image, PIL.Image.Image):
return image
return image.convert('RGB')
|
Converts `PIL.Image.Image` to RGB format.
Args:
image (`PIL.Image.Image`):
The image to convert.
|
github-repos
|
def _format_variant(self, case_id, gemini_variant, individual_objs,
index=0, add_all_info=False):
chrom = gemini_variant['chrom']
if chrom.startswith('chr') or chrom.startswith('CHR'):
chrom = chrom[3:]
variant_dict = {
'CHROM':chrom,
'POS':str(gemini_variant['start']),
'ID':gemini_variant['rs_ids'],
'REF':gemini_variant['ref'],
'ALT':gemini_variant['alt'],
'QUAL':gemini_variant['qual'],
'FILTER':gemini_variant['filter']
}
variant = Variant(**variant_dict)
variant.update_variant_id(gemini_variant['variant_id'])
logger.debug("Creating a variant object of variant {0}".format(
variant.variant_id))
variant['index'] = index
self._add_most_severe_consequence(variant, gemini_variant)
self._add_impact_severity(variant, gemini_variant)
variant.start = int(gemini_variant['start'])
variant.stop = int(gemini_variant['end'])
if self.variant_type == 'sv':
variant.sv_type = gemini_variant['sub_type']
variant.stop = int(gemini_variant['end'])
self._add_sv_coordinates(variant)
else:
self._add_transcripts(variant, gemini_variant)
self._add_thousand_g(variant, gemini_variant)
self._add_exac(variant, gemini_variant)
self._add_gmaf(variant, gemini_variant)
if gemini_variant['cadd_scaled']:
variant.cadd_score = gemini_variant['cadd_scaled']
polyphen = gemini_variant['polyphen_pred']
if polyphen:
variant.add_severity('Polyphen', polyphen)
sift = gemini_variant['sift_pred']
if sift:
variant.add_severity('SIFT', sift)
self._add_hgnc_symbols(variant)
if self.variant_type == 'snv':
self._add_genes(variant)
self._add_consequences(variant)
if add_all_info:
self._add_genotypes(variant, gemini_variant, case_id, individual_objs)
if self.variant_type == 'sv':
self._add_genes(variant)
return variant
|
Make a puzzle variant from a gemini variant
Args:
case_id (str): related case id
gemini_variant (GeminiQueryRow): The gemini variant
individual_objs (list(dict)): A list of Individuals
index(int): The index of the variant
Returns:
variant (dict): A Variant object
|
juraj-google-style
|
def agg_wt_avg(mat, min_wt=0.01, corr_metric='spearman'):
assert (mat.shape[1] > 0), 'mat is empty! mat: {}'.format(mat)
if (mat.shape[1] == 1):
out_sig = mat
upper_tri_df = None
raw_weights = None
weights = None
else:
assert (corr_metric in ['spearman', 'pearson'])
corr_mat = mat.corr(method=corr_metric)
upper_tri_df = get_upper_triangle(corr_mat)
(raw_weights, weights) = calculate_weights(corr_mat, min_wt)
weighted_values = (mat * weights)
out_sig = weighted_values.sum(axis=1)
return (out_sig, upper_tri_df, raw_weights, weights)
|
Aggregate a set of replicate profiles into a single signature using
a weighted average.
Args:
mat (pandas df): a matrix of replicate profiles, where the columns are
samples and the rows are features; columns correspond to the
replicates of a single perturbagen
min_wt (float): Minimum raw weight when calculating weighted average
corr_metric (string): Spearman or Pearson; the correlation method
Returns:
out_sig (pandas series): weighted average values
upper_tri_df (pandas df): the correlations between each profile that went into the signature
raw weights (pandas series): weights before normalization
weights (pandas series): weights after normalization
|
codesearchnet
|
def run_config(self, project, run=None, entity=None):
query = gql()
response = self.gql(query, variable_values={
'name': project, 'run': run, 'entity': entity
})
if response['model'] == None:
raise ValueError("Run {}/{}/{} not found".format(entity, project, run) )
run = response['model']['bucket']
commit = run['commit']
patch = run['patch']
config = json.loads(run['config'] or '{}')
if len(run['files']['edges']) > 0:
url = run['files']['edges'][0]['node']['url']
res = requests.get(url)
res.raise_for_status()
metadata = res.json()
else:
metadata = {}
return (commit, config, patch, metadata)
|
Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to.
|
juraj-google-style
|
def read_proto(filename: str, proto_cls: Type[_T]) -> _T:
filepath = _fhir_filepath_from(filename)
proto = proto_cls()
raw_proto = ''
with open(filepath, 'r', encoding='utf-8') as f:
raw_proto = f.read()
text_format.Parse(raw_proto, proto)
return proto
|
Reads protobuf information from filename relative to the fhir/ root dir.
Data is serialized into an instance of `proto_cls`.
Args:
filename: The file to read from.
proto_cls: The type of protobuf message to look for and return.
Returns:
The protobuf message in the file.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.