code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def convert(self, point):
x, y = point
(x1, y1) = x - self.x_offset, y - self.y_offset
logger.debug("converted {} {} ==> {} {}".format(x, y, x1, y1))
return x1, y1
|
Convert a point from one coordinate system to another.
Args:
point: tuple(int x, int y)
The point in the original coordinate system.
Returns:
converted_point: tuple(int x, int y)
The point in the new coordinate system.
Example: convert coordinate from original image into a pixel location
within a cutout image.
@rtype: list(float,float)
|
juraj-google-style
|
def _event_size(event_shape, name=None):
with tf.compat.v1.name_scope(name, 'event_size', [event_shape]):
event_shape = tf.convert_to_tensor(
value=event_shape, dtype=tf.int32, name='event_shape')
event_shape_const = tf.get_static_value(event_shape)
if event_shape_const is not None:
return np.prod(event_shape_const)
else:
return tf.reduce_prod(input_tensor=event_shape)
|
Computes the number of elements in a tensor with shape `event_shape`.
Args:
event_shape: A tensor shape.
name: The name to use for the tensor op to compute the number of elements
(if such an op needs to be created).
Returns:
event_size: The number of elements in `tensor_shape`. Returns a numpy int
when the number of elements can be computed immediately. Otherwise, returns
a scalar tensor.
|
juraj-google-style
|
def from_row_lengths(cls, row_lengths, validate=True, dtype=None, dtype_hint=None):
if not isinstance(validate, bool):
raise TypeError('validate must have type bool')
with ops.name_scope(None, 'RowPartitionFromRowLengths', [row_lengths]):
row_lengths = cls._convert_row_partition(row_lengths, 'row_lengths', dtype_hint=dtype_hint, dtype=dtype)
row_lengths.shape.assert_has_rank(1)
if validate:
msg = 'Arguments to from_row_lengths do not form a valid RowPartition'
checks = [check_ops.assert_rank(row_lengths, 1, message=msg), check_ops.assert_non_negative(row_lengths, message=msg)]
row_lengths = control_flow_ops.with_dependencies(checks, row_lengths)
row_limits = math_ops.cumsum(row_lengths)
row_splits = array_ops.concat([[0], row_limits], axis=0)
return cls(row_splits=row_splits, row_lengths=row_lengths, internal=_row_partition_factory_key)
|
Creates a `RowPartition` with rows partitioned by `row_lengths`.
This `RowPartition` divides a sequence `values` into rows by indicating
the length of each row:
```python
partitioned_rows = [[values.pop(0) for _ in range(length)]
for length in row_lengths]
```
Args:
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative.
validate: If true, then use assertions to check that the arguments form a
valid `RowPartition`.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `row_lengths`, dtype_hint, or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A `RowPartition`.
|
github-repos
|
def freeze_graph(session, outputs):
return convert_to_constants.convert_variables_to_constants(session, session.graph.as_graph_def(), [x.op.name for x in outputs])
|
Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
|
github-repos
|
def resolves_for(self, session):
if self.url:
self.actual_path = session.current_url
else:
result = urlparse(session.current_url)
if self.only_path:
self.actual_path = result.path
else:
request_uri = result.path
if result.query:
request_uri += '?{0}'.format(result.query)
self.actual_path = request_uri
if isregex(self.expected_path):
return self.expected_path.search(self.actual_path)
else:
return (normalize_url(self.actual_path) == normalize_url(self.expected_path))
|
Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves.
|
codesearchnet
|
def replace(self, **kwargs):
clone = copy(self)
clone.transforms = list(clone.transforms)
for (key, value) in kwargs.items():
if (not hasattr(clone, key)):
raise TypeError(u'replace() got an unexpected keyword argument {!r}'.format(key))
setattr(clone, key, value)
return clone
|
Return a copy of this `Query`, but with attributes specified
as keyword arguments replaced by the keyword values.
Keyword Args:
Attributes/values to replace in the copy.
Returns:
A copy of the query that has its attributes updated with the specified values.
Raises:
TypeError: The `Query` does not have the specified attribute.
|
codesearchnet
|
def GetFileObject(self, data_stream_name=''):
if data_stream_name:
return None
return resolver.Resolver.OpenFileObject(
self.path_spec, resolver_context=self._resolver_context)
|
Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FileIO: a file-like object or None if not available.
|
juraj-google-style
|
def get_user_info(self):
resp = self.requester.get(urljoin(self.base_url, '/api/mobile/v0.5/my_user_info'))
resp.raise_for_status()
return Info(resp.json())
|
Returns a UserInfo object for the logged in user.
Returns:
UserInfo: object representing the student current grades
|
codesearchnet
|
def add_config(self, slot, config_id, config_type, value):
if (slot not in self.config_database):
self.config_database[slot] = {}
self.config_database[slot][config_id] = (config_type, value)
|
Add a config variable assignment to this sensor graph.
Args:
slot (SlotIdentifier): The slot identifier that this config
variable is assigned to.
config_id (int): The 16-bit id of this config_id
config_type (str): The type of the config variable, currently
supported are fixed width integer types, strings and binary
blobs.
value (str|int|bytes): The value to assign to the config variable.
|
codesearchnet
|
class _EmbeddingHandler(ModelHandler):
def __init__(self, embeddings_manager: EmbeddingsManager):
self.embedding_config = embeddings_manager
self._underlying = self.embedding_config.get_model_handler()
self.columns = self.embedding_config.get_columns_to_apply()
def load_model(self):
model = self._underlying.load_model()
return model
def _validate_column_data(self, batch):
pass
def run_inference(self, batch: Sequence[dict[str, list[str]]], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> list[dict[str, Union[list[float], list[str]]]]:
embedding_input = self.embedding_config.type_adapter.input_fn(batch)
self._validate_column_data(batch=embedding_input)
prediction = self._underlying.run_inference(embedding_input, model, inference_args)
if isinstance(prediction, np.ndarray):
prediction_seq = prediction.tolist()
elif isinstance(prediction, Iterable) and (not isinstance(prediction, (str, bytes))):
prediction_seq = list(prediction)
else:
prediction_seq = [prediction]
return self.embedding_config.type_adapter.output_fn(batch, prediction_seq)
def get_metrics_namespace(self) -> str:
return self._underlying.get_metrics_namespace() or 'BeamML_EmbeddingHandler'
def batch_elements_kwargs(self) -> Mapping[str, Any]:
batch_sizes_map = {}
if self.embedding_config.max_batch_size:
batch_sizes_map['max_batch_size'] = self.embedding_config.max_batch_size
if self.embedding_config.min_batch_size:
batch_sizes_map['min_batch_size'] = self.embedding_config.min_batch_size
return self._underlying.batch_elements_kwargs() or batch_sizes_map
def __repr__(self):
return self._underlying.__repr__()
def validate_inference_args(self, _):
pass
|
A ModelHandler intended to be work on list[dict[str, Any]] inputs.
The inputs to the model handler are expected to be a list of dicts.
For example, if the original mode is used with RunInference to take a
PCollection[E] to a PCollection[P], this ModelHandler would take a
PCollection[dict[str, E]] to a PCollection[dict[str, P]].
_EmbeddingHandler will accept an EmbeddingsManager instance, which
contains the details of the model to be loaded and the inference_fn to be
used. The purpose of _EmbeddingHandler is to generate embeddings for
general inputs using the EmbeddingsManager instance.
This is an internal class and offers no backwards compatibility guarantees.
Args:
embeddings_manager: An EmbeddingsManager instance.
|
github-repos
|
def get_global_namespace(decls):
found = [
decl for decl in scopedef.make_flatten(decls) if decl.name == '::' and
isinstance(decl, namespace_t)]
if len(found) == 1:
return found[0]
raise RuntimeError("Unable to find global namespace.")
|
Get the global namespace (::) from a declaration tree.
Args:
decls (list[declaration_t]): a list of declarations
Returns:
namespace_t: the global namespace_t object (::)
|
juraj-google-style
|
def parse_options(cls, options):
d = {}
for filename_check, dictionary in cls.filename_checks.items():
filename_data = getattr(options, filename_check)
if len(filename_data) != 0:
parsed_params = {}
for single_line in filename_data:
a = [s.strip() for s in single_line.split('=')]
if a[0] in ['filter_regex', 'filename_regex']:
parsed_params[a[0]] = a[1]
d[filename_check] = parsed_params
cls.filename_checks.update(d)
cls.filename_checks = {x: y for x, y in cls.filename_checks.items() if len(y) > 0}
|
Required by flake8
parse the options, called after add_options
Args:
options (dict): options to be parsed
|
juraj-google-style
|
def to_value_list(original_strings, corenlp_values=None):
assert isinstance(original_strings, (list, tuple, set))
if (corenlp_values is not None):
assert isinstance(corenlp_values, (list, tuple, set))
assert (len(original_strings) == len(corenlp_values))
return list(set((to_value(x, y) for (x, y) in zip(original_strings, corenlp_values))))
else:
return list(set((to_value(x) for x in original_strings)))
|
Convert a list of strings to a list of Values
Args:
original_strings (list[basestring])
corenlp_values (list[basestring or None])
Returns:
list[Value]
|
codesearchnet
|
def list_files_by_mtime(dirpath):
files = [f for f in os.listdir(dirpath) if is_real_file(dirpath, f)]
return sorted(files, key=lambda f: get_mtime(dirpath, f))
|
Return a list of files in the directory, sorted in increasing "mtime".
Return a list of files in the given directory, sorted from older to newer file
according to their modification times. Only return actual files, skipping
directories, symbolic links, pipes, etc.
Args:
dirpath: directory pathname
Returns:
A list of file names relative to the given directory path.
|
github-repos
|
def __init__(self, value: Any, compute_derived: bool=False, where: Optional[Callable[[base.HyperPrimitive], bool]]=None):
super().__init__()
self._value = value
self._root_path = utils.KeyPath()
self._compute_derived = compute_derived
self._where = where
self._parse_generators()
|
Constructor.
Args:
value: Value (maybe) annotated with generators to use as template.
compute_derived: Whether to compute derived value at this level.
We only want to compute derived value at root level since reference path
may go out of scope of a non-root ObjectTemplate.
where: Function to filter hyper primitives. If None, all hyper primitives
from `value` will be included in the encoding/decoding process.
Otherwise only the hyper primitives on which 'where' returns True will
be included. `where` can be useful to partition a search space into
separate optimization processes.
Please see 'ObjectTemplate' docstr for details.
|
github-repos
|
def autorotate(image, orientation=None):
orientation_value = orientation if orientation else \
image._getexif().get(EXIF_KEYS.get('Orientation'))
if orientation_value is None:
raise ImDirectException("No orientation available in Exif "
"tag or given explicitly.")
if orientation_value in (1, 2):
i = image
elif orientation_value in (3, 4):
i = image.transpose(Image.ROTATE_180)
elif orientation_value in (5, 6):
i = image.transpose(Image.ROTATE_270)
elif orientation_value in (7, 8):
i = image.transpose(Image.ROTATE_90)
else:
i = image
if orientation_value in (2, 4, 5, 7):
i = i.transpose(Image.FLIP_LEFT_RIGHT)
return i
|
Rotate and return an image according to its Exif information.
ROTATION_NEEDED = {
1: 0,
2: 0 (Mirrored),
3: 180,
4: 180 (Mirrored),
5: -90 (Mirrored),
6: -90,
7: 90 (Mirrored),
8: 90,
}
Args:
image (PIL.Image.Image): PIL image to rotate
orientation (): Optional orientation value in [1, 8]
Returns:
A :py:class:`~PIL.Image.Image` image.
|
juraj-google-style
|
def _tensor_product(self, other, reverse=False):
if not isinstance(other, Chi):
other = Chi(other)
if reverse:
input_dims = self.input_dims() + other.input_dims()
output_dims = self.output_dims() + other.output_dims()
data = np.kron(other.data, self._data)
else:
input_dims = other.input_dims() + self.input_dims()
output_dims = other.output_dims() + self.output_dims()
data = np.kron(self._data, other.data)
return Chi(data, input_dims, output_dims)
|
Return the tensor product channel.
Args:
other (QuantumChannel): a quantum channel.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False
Returns:
Chi: the tensor product channel as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass.
|
juraj-google-style
|
def _ConvertInputMapValues(name, input_map):
if not all((isinstance(v, tensor.Tensor) for v in input_map.values())):
if name == '':
raise ValueError('tf.import_graph_def() requires a non-empty `name` if `input_map` contains non-Tensor values. Try calling tf.convert_to_tensor() on `input_map` values before calling tf.import_graph_def().')
with ops.name_scope('_inputs'):
input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}
return input_map
|
Ensures all input map values are tensors.
This should be called from inside the import name scope.
Args:
name: the `name` argument passed to import_graph_def
input_map: the `input_map` argument passed to import_graph_def.
Returns:
An possibly-updated version of `input_map`.
Raises:
ValueError: if input map values cannot be converted due to empty name scope.
|
github-repos
|
def acquire_multi(self, n=1):
browsers = []
with self._lock:
if (len(self._in_use) >= self.size):
raise NoBrowsersAvailable
while ((len(self._in_use) < self.size) and (len(browsers) < n)):
browser = self._fresh_browser()
browsers.append(browser)
self._in_use.add(browser)
return browsers
|
Returns a list of up to `n` browsers.
Raises:
NoBrowsersAvailable if none available
|
codesearchnet
|
def execute_work_items(work_items, config):
return celery.group((worker_task.s(work_item, config) for work_item in work_items))
|
Execute a suite of tests for a given set of work items.
Args:
work_items: An iterable of `work_db.WorkItem`s.
config: The configuration to use for the test execution.
Returns: An iterable of WorkItems.
|
codesearchnet
|
def is_user_profile_valid(user_profile):
if not user_profile:
return False
if not type(user_profile) is dict:
return False
if UserProfile.USER_ID_KEY not in user_profile:
return False
if UserProfile.EXPERIMENT_BUCKET_MAP_KEY not in user_profile:
return False
experiment_bucket_map = user_profile.get(UserProfile.EXPERIMENT_BUCKET_MAP_KEY)
if not type(experiment_bucket_map) is dict:
return False
for decision in experiment_bucket_map.values():
if type(decision) is not dict or UserProfile.VARIATION_ID_KEY not in decision:
return False
return True
|
Determine if provided user profile is valid or not.
Args:
user_profile: User's profile which needs to be validated.
Returns:
Boolean depending upon whether profile is valid or not.
|
juraj-google-style
|
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
|
Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
|
juraj-google-style
|
def ccy_pair(local, base='USD') -> CurrencyPair:
ccy_param = param.load_info(cat='ccy')
if (f'{local}{base}' in ccy_param):
info = ccy_param[f'{local}{base}']
elif (f'{base}{local}' in ccy_param):
info = ccy_param[f'{base}{local}']
info['factor'] = (1.0 / info.get('factor', 1.0))
info['power'] = (- info.get('power', 1))
elif (base.lower() == local.lower()):
info = dict(ticker='')
info['factor'] = 1.0
if (base[(- 1)].lower() == base[(- 1)]):
info['factor'] /= 100.0
if (local[(- 1)].lower() == local[(- 1)]):
info['factor'] *= 100.0
else:
logger = logs.get_logger(ccy_pair)
logger.error(f'incorrect currency - local {local} / base {base}')
return CurrencyPair(ticker='', factor=1.0, power=1)
if ('factor' not in info):
info['factor'] = 1.0
if ('power' not in info):
info['power'] = 1
return CurrencyPair(**info)
|
Currency pair info
Args:
local: local currency
base: base currency
Returns:
CurrencyPair
Examples:
>>> ccy_pair(local='HKD', base='USD')
CurrencyPair(ticker='HKD Curncy', factor=1.0, power=1)
>>> ccy_pair(local='GBp')
CurrencyPair(ticker='GBP Curncy', factor=100, power=-1)
>>> ccy_pair(local='USD', base='GBp')
CurrencyPair(ticker='GBP Curncy', factor=0.01, power=1)
>>> ccy_pair(local='XYZ', base='USD')
CurrencyPair(ticker='', factor=1.0, power=1)
>>> ccy_pair(local='GBP', base='GBp')
CurrencyPair(ticker='', factor=0.01, power=1)
>>> ccy_pair(local='GBp', base='GBP')
CurrencyPair(ticker='', factor=100.0, power=1)
|
codesearchnet
|
def recipe_dcm(config, auth_read, account, body, delete):
dcm(config, {'auth': auth_read, 'report': {'account': account, 'body': body}, 'delete': delete})
|
Create a CM report from a JSON definition.
Args:
auth_read (authentication) - Credentials used for reading data.
account (string) - NA
body (json) - NA
delete (boolean) - NA
|
github-repos
|
def __init__(self, loss_tensor, fail_on_nan_loss=True):
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
|
Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
|
github-repos
|
def _global_report_benchmark(name, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, metrics=None):
logging.info('Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,throughput: %g, extras: %s, metrics: %s', name, iters if iters is not None else -1, wall_time if wall_time is not None else -1, cpu_time if cpu_time is not None else -1, throughput if throughput is not None else -1, str(extras) if extras else 'None', str(metrics) if metrics else 'None')
entries = test_log_pb2.BenchmarkEntries()
entry = entries.entry.add()
entry.name = name
if iters is not None:
entry.iters = iters
if cpu_time is not None:
entry.cpu_time = cpu_time
if wall_time is not None:
entry.wall_time = wall_time
if throughput is not None:
entry.throughput = throughput
if extras is not None:
if not isinstance(extras, dict):
raise TypeError('extras must be a dict')
for k, v in extras.items():
if isinstance(v, numbers.Number):
entry.extras[k].double_value = v
else:
entry.extras[k].string_value = str(v)
if metrics is not None:
if not isinstance(metrics, list):
raise TypeError('metrics must be a list')
for metric in metrics:
if 'name' not in metric:
raise TypeError("metric must has a 'name' field")
if 'value' not in metric:
raise TypeError("metric must has a 'value' field")
metric_entry = entry.metrics.add()
metric_entry.name = metric['name']
metric_entry.value = metric['value']
if 'min_value' in metric:
metric_entry.min_value.value = metric['min_value']
if 'max_value' in metric:
metric_entry.max_value.value = metric['max_value']
test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
if test_env is None:
print(str(entries))
return
serialized_entry = entries.SerializeToString()
mangled_name = name.replace('/', '__')
output_path = '%s%s' % (test_env, mangled_name)
if gfile.Exists(output_path):
raise IOError('File already exists: %s' % output_path)
with gfile.GFile(output_path, 'wb') as out:
out.write(serialized_entry)
|
Method for recording a benchmark directly.
Args:
name: The BenchmarkEntry name.
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
metrics: (optional) A list of dict representing metrics generated by the
benchmark. Each dict should contain keys 'name' and'value'. A dict
can optionally contain keys 'min_value' and 'max_value'.
Raises:
TypeError: if extras is not a dict.
IOError: if the benchmark output file already exists.
|
github-repos
|
def output(self, filename):
if not filename.endswith('.dot'):
filename += '.dot'
if filename == ".dot":
filename = "all_contracts.dot"
with open(filename, 'w', encoding='utf8') as f:
self.info(f'Call Graph: {filename}')
f.write('\n'.join(['strict digraph {'] + [self._process_functions(self.slither.functions)] + ['}']))
for derived_contract in self.slither.contracts_derived:
with open(f'{derived_contract.name}.dot', 'w', encoding='utf8') as f:
self.info(f'Call Graph: {derived_contract.name}.dot')
f.write('\n'.join(['strict digraph {'] + [self._process_functions(derived_contract.functions)] + ['}']))
|
Output the graph in filename
Args:
filename(string)
|
juraj-google-style
|
def _inquire(self, **kwargs):
if (rname_rfc6680 is None):
raise NotImplementedError('Your GSSAPI implementation does not support RFC 6680 (the GSSAPI naming extensions)')
if (not kwargs):
default_val = True
else:
default_val = False
attrs = kwargs.get('attrs', default_val)
mech_name = kwargs.get('mech_name', default_val)
return rname_rfc6680.inquire_name(self, mech_name=mech_name, attrs=attrs)
|
Inspect this name for information.
This method inspects the name for information.
If no keyword arguments are passed, all available information
is returned. Otherwise, only the keyword arguments that
are passed and set to `True` are returned.
Args:
mech_name (bool): get whether this is a mechanism name,
and, if so, the associated mechanism
attrs (bool): get the attributes names for this name
Returns:
InquireNameResult: the results of the inquiry, with unused
fields set to None
Raises:
GSSError
|
codesearchnet
|
def get_memory_region(x, query_block_shape, memory_flange, q_indices):
x_query_padded = pad_to_multiple_2d(x, query_block_shape)
x_center = gather_blocks_2d(x_query_padded, q_indices)
paddings = [[0, 0], [0, 0], [memory_flange[0], 0],
[memory_flange[1], memory_flange[1]], [0, 0]]
x_memory_padded = tf.pad(x_query_padded, paddings)
left_x = None
top_x = None
if memory_flange[1] > 0:
left_x_region = x_memory_padded[:, :, memory_flange[
0]:, :-(query_block_shape[1] + memory_flange[1]), :]
left_memory_shape = (query_block_shape[0], memory_flange[1])
left_indices = gather_indices_2d(left_x_region, left_memory_shape,
query_block_shape)
left_x = gather_blocks_2d(left_x_region, left_indices)
if memory_flange[0] > 0:
top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :]
top_memory_shape = (memory_flange[0],
query_block_shape[1] + 2 * memory_flange[1])
top_indices = gather_indices_2d(top_x_region, top_memory_shape,
query_block_shape)
top_x = gather_blocks_2d(top_x_region, top_indices)
x_flange = None
if top_x is not None and left_x is not None:
x_flange = tf.concat([top_x, left_x], axis=3)
else:
x_flange = top_x if top_x is not None else left_x
return x_flange, x_center
|
Get the memory regions that surround a 2d query.
The memory regions will be the left and top right.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
query_block_shape: a 2-d tuple of integers
memory_flange: a 2-d tuple of integers
q_indices: a tensor of indices for each of the center blocks.
[num_blocks, block_length]
Returns:
x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth]
|
juraj-google-style
|
def make_group_index(self, groupby_cols, bool_arr):
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
if len(factor_list) == 0:
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
if self.group_cache_valid(col_list=groupby_cols):
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
nr_groups = len(values)
if skip_key is None:
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
|
Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
|
juraj-google-style
|
def learn(self, grad_arr):
deconvolution_layer_list = self.__deconvolution_layer_list[::-1]
for i in range(len(deconvolution_layer_list)):
try:
grad_arr = deconvolution_layer_list[i].back_propagate(grad_arr)
except:
self.__logger.debug("Error raised in Convolution layer " + str(i + 1))
raise
self.__optimize_deconvolution_layer(self.__learning_rate, 1)
layerable_cnn_list = self.__convolutional_auto_encoder.layerable_cnn_list[::-1]
for i in range(len(layerable_cnn_list)):
try:
grad_arr = layerable_cnn_list[i].back_propagate(grad_arr)
except:
self.__logger.debug(
"Delta computation raised an error in CNN layer " + str(len(layerable_cnn_list) - i)
)
raise
self.__convolutional_auto_encoder.optimize(self.__learning_rate, 1)
return grad_arr
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients.
|
juraj-google-style
|
def decode_datetime(encoded_datetime):
time_zone_match = _TIME_ZONE_RE.search(encoded_datetime)
if time_zone_match:
time_string = encoded_datetime[:time_zone_match.start(1)].upper()
else:
time_string = encoded_datetime.upper()
if '.' in time_string:
format_string = '%Y-%m-%dT%H:%M:%S.%f'
else:
format_string = '%Y-%m-%dT%H:%M:%S'
decoded_datetime = datetime.datetime.strptime(time_string, format_string)
if not time_zone_match:
return decoded_datetime
if time_zone_match.group('z'):
offset_minutes = 0
else:
sign = time_zone_match.group('sign')
hours, minutes = [int(value) for value in
time_zone_match.group('hours', 'minutes')]
offset_minutes = hours * 60 + minutes
if sign == '-':
offset_minutes *= -1
return datetime.datetime(decoded_datetime.year,
decoded_datetime.month,
decoded_datetime.day,
decoded_datetime.hour,
decoded_datetime.minute,
decoded_datetime.second,
decoded_datetime.microsecond,
TimeZoneOffset(offset_minutes))
|
Decode a DateTimeField parameter from a string to a python datetime.
Args:
encoded_datetime: A string in RFC 3339 format.
Returns:
A datetime object with the date and time specified in encoded_datetime.
Raises:
ValueError: If the string is not in a recognized format.
|
juraj-google-style
|
def _ReadSelectedVolumes(self, volume_system, prefix='v'):
volume_identifiers_string = self._input_reader.Read()
volume_identifiers_string = volume_identifiers_string.strip()
if not volume_identifiers_string:
return []
selected_volumes = self._ParseVolumeIdentifiersString(
volume_identifiers_string, prefix=prefix)
if selected_volumes == ['all']:
return [
'{0:s}{1:d}'.format(prefix, volume_index)
for volume_index in range(1, volume_system.number_of_volumes + 1)]
return selected_volumes
|
Reads the selected volumes provided by the user.
Args:
volume_system (APFSVolumeSystem): volume system.
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: selected volume identifiers including prefix.
Raises:
KeyboardInterrupt: if the user requested to abort.
ValueError: if the volume identifiers string could not be parsed.
|
juraj-google-style
|
def read_xyz(cls, buf, start_index=0, get_bonds=True, nrows=None, engine=None):
frame = pd.read_table(buf, skiprows=2, comment='
remove_digits = partial(re.sub, '[0-9]+', '')
frame['atom'] = frame['atom'].apply(remove_digits)
molecule = cls(frame)
molecule.index = range(start_index, (start_index + len(molecule)))
if get_bonds:
molecule.get_bonds(use_lookup=False, set_lookup=True)
return molecule
|
Read a file of coordinate information.
Reads xyz-files.
Args:
inputfile (str):
start_index (int):
get_bonds (bool):
nrows (int): Number of rows of file to read.
Note that the first two rows are implicitly excluded.
engine (str): Wrapper for argument of :func:`pandas.read_csv`.
Returns:
Cartesian:
|
codesearchnet
|
def create_version(self, version_label):
version_response = self.repo.api.http_request('POST', ('%s/fcr:versions' % self.uri), data=None, headers={'Slug': version_label})
if (version_response.status_code == 201):
logger.debug(('version created: %s' % version_response.headers['Location']))
self._affix_version(version_response.headers['Location'], version_label)
|
method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVersion): instance of ResourceVersion, also appended to self.versions
|
codesearchnet
|
def _create_extractors(col_params):
result = []
for col_param in col_params:
result.append(_create_extractor(col_param))
return result
|
Creates extractors to extract properties corresponding to 'col_params'.
Args:
col_params: List of ListSessionGroupsRequest.ColParam protobufs.
Returns:
A list of extractor functions. The ith element in the
returned list extracts the column corresponding to the ith element of
_request.col_params
|
juraj-google-style
|
def with_target_audience(self, target_audience):
return self.__class__(
self._signer,
service_account_email=self._service_account_email,
token_uri=self._token_uri,
target_audience=target_audience,
additional_claims=self._additional_claims.copy())
|
Create a copy of these credentials with the specified target
audience.
Args:
target_audience (str): The intended audience for these credentials,
used when requesting the ID Token.
Returns:
google.auth.service_account.IDTokenCredentials: A new credentials
instance.
|
juraj-google-style
|
def members(name, members_list, **kwargs):
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT:
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT:
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
|
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
|
juraj-google-style
|
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):
if (not current_app.testing):
from flask_sendmail import Message
message = Message(subject, recipients=[recipient], html=html_message, body=text_message)
self.mail.send(message)
|
Send email message via Flask-Sendmail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text.
|
codesearchnet
|
def parsetime(text):
mins, maxs = text.split('-', 1)
minv = s_time.parse(mins)
maxv = s_time.parse(maxs, base=minv)
return minv, maxv
|
Parse an interval time string and return a (min,max) tuple.
Args:
text (str): A time interval string
Returns:
((int,int)): A epoch millis epoch time string
|
juraj-google-style
|
def reply(self, reply_comment):
payload = '{ "Comment": "' + reply_comment + '"}'
endpoint = 'https:
self._make_api_call('post', endpoint, data=payload)
|
Reply to the Message.
Notes:
HTML can be inserted in the string and will be interpreted properly by Outlook.
Args:
reply_comment: String message to send with email.
|
juraj-google-style
|
def bottleneck_block(cnn, depth, depth_bottleneck, stride, pre_activation):
if pre_activation:
bottleneck_block_v2(cnn, depth, depth_bottleneck, stride)
else:
bottleneck_block_v1(cnn, depth, depth_bottleneck, stride)
|
Bottleneck block with identity short-cut.
Args:
cnn: the network to append bottleneck blocks.
depth: the number of output filters for this bottleneck block.
depth_bottleneck: the number of bottleneck filters for this block.
stride: Stride used in the first layer of the bottleneck block.
pre_activation: use pre_activation structure used in v2 or not.
|
juraj-google-style
|
def solveAsync(self, callback):
def async_call():
self._lock.acquire()
try:
self._impl.solve()
except Exception:
self._lock.release()
raise
else:
self._lock.release()
callback.run()
Thread(target=async_call).start()
|
Solve the current model asynchronously.
Args:
callback: Callback to be executed when the solver is done.
|
juraj-google-style
|
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
if (is_training and (keep_prob > 0)):
with tf.name_scope(scope, 'Dropout', [inputs]):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs
|
Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for name_scope.
Returns:
a tensor representing the output of the operation.
|
codesearchnet
|
def _parse_dataset(file_path, tmp_dir, train):
input_path = file_path
file_name = 'train' if train else 'dev'
gen_output_path = os.path.join(tmp_dir, file_name + '.txt')
example_output_path = os.path.join(tmp_dir, _EXAMPLES_FILE)
print('input path: ' + input_path)
print('gen_output_path: ' + gen_output_path)
print('example_output_path: ' + example_output_path)
input_file = tf.gfile.Open(input_path, mode='r')
examples = []
for counter, line in enumerate(input_file):
if counter == 0:
continue
line_split = line.split('\t')
parse1 = line_split[_PARSE1_INDEX]
parse2 = line_split[_PARSE2_INDEX]
consensus_label = line_split[_LABEL_INDEX]
tokens1 = _get_tokens_and_tags(parse1)
tokens2 = _get_tokens_and_tags(parse2)
tokens1_str = ' '.join(tokens1)
tokens2_str = ' '.join(tokens2)
if consensus_label != '-':
examples.append([tokens1_str, tokens2_str, consensus_label])
input_file.close()
with tf.gfile.GFile(gen_output_path, 'w') as f:
for tokens1_str, tokens2_str, consensus_label in examples:
f.write('%s\t%s\t%s\n' % (tokens1_str, tokens2_str, consensus_label))
if train:
with tf.gfile.GFile(example_output_path, 'w') as f:
for tokens1_str, tokens2_str, consensus_label in examples:
f.write('%s %s\n' % (tokens1_str, tokens2_str))
|
Convert the dataset in to a simpler format.
This function creates two files. One for being processed to produce a vocab
and another to generate the data.
Args:
file_path: string, path to the file to parse.
tmp_dir: string, path to the directory to output the files.
train: bool, indicating if we are parsing the training set.
|
juraj-google-style
|
def load_profile_variants(adapter, variant_file):
vcf_info = check_vcf(variant_file)
nr_variants = vcf_info['nr_variants']
variant_type = vcf_info['variant_type']
if variant_type != 'snv':
LOG.critical('Variants used for profiling must be SNVs only')
raise VcfError
vcf = get_vcf(variant_file)
profile_variants = [build_profile_variant(variant) for variant in vcf]
adapter.add_profile_variants(profile_variants)
|
Loads variants used for profiling
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
variant_file(str): Path to variant file
|
juraj-google-style
|
def by_issn(issn):
old_url = aleph.ALEPH_URL
aleph.ALEPH_URL = NTK_ALEPH_URL
records = aleph.getISSNsXML(issn, base='STK02')
aleph.ALEPH_URL = old_url
for record in records:
marc = MARCXMLRecord(record)
additional_info = {'222': marc.get('222', None), 'PER': marc.get('PER', None), '776': marc.get('776', None), '008': marc.get('008', None), 'alt_end_date': ''}
additional_info = {key: val for (key, val) in additional_info.iteritems() if val}
alt_end_date = None
alt_creation_date = None
if additional_info['008']:
alt_creation_date = additional_info['008'][7:11]
alt_end_date = additional_info['008'][11:15]
if (alt_end_date in ['9999', '****']):
alt_creation_date += '-'
alt_end_date = None
additional_info['alt_end_date'] = alt_end_date
author = Author.parse_author(marc)
model = Model(url=_first_or_none(marc.get('856u')), conspect=_first_or_none(marc.get('072a')), annotation_tags=_first_or_none(marc.get('520a')), periodicity=_first_or_none(marc.get('310a')), title_tags=_first_or_none(marc.get('222a')), subtitle_tags=_first_or_none(marc.get('245b')), place_tags=remove_hairs((_first_or_none(marc.get('260a')) or '')), author_tags=(author._asdict() if author else None), publisher_tags=remove_hairs(((_first_or_none(marc.get('260b')) or _first_or_none(marc.get('264b')) or ''),), ', '), creation_dates=_first_or_none(marc.get('260c', [alt_creation_date])), lang_tags=_first_or_none(marc.get('040b')), keyword_tags=marc.get('650a07'), source_info=_first_or_none(marc.get('500a')), original_xml=record, additional_info=additional_info)
(yield _add_source(model))
|
Query aleph for records with given `issn`. The lookup is directed to the
NTK's Aleph.
Args:
issn (str): ISSN of the periodical.
Returns:
obj: :class:`Model` instances for each record.
|
codesearchnet
|
def _init_profile_batch(self, profile_batch):
profile_batch_error_message = f'profile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: {profile_batch}'
if isinstance(profile_batch, str):
profile_batch = str(profile_batch).split(',')
profile_batch = tree.map_structure(int, profile_batch)
if isinstance(profile_batch, int):
self._start_batch = profile_batch
self._stop_batch = profile_batch
elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2:
self._start_batch, self._stop_batch = profile_batch
else:
raise ValueError(profile_batch_error_message)
if self._start_batch < 0 or self._stop_batch < self._start_batch:
raise ValueError(profile_batch_error_message)
self._profiler_started = False
self._batch_trace_context = None
if self._start_batch > 0:
self._start_profiler(logdir='')
self._stop_profiler(save=False)
self._is_tracing = False
self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0)
|
Validate profile_batch value and set the range of batches to profile.
Sets values of _start_batch and _stop_batch attributes,
specifying the start and stop batch to profile.
Setting `profile_batch=0` disables profiling.
Args:
profile_batch: The range of batches to profile. Should be a
non-negative integer or a comma separated string of pair of positive
integers. A pair of positive integers signify a range of batches to
profile.
Raises:
ValueError: If profile_batch is not an integer or a comma separated
pair of positive integers.
|
github-repos
|
def get_volume():
if (system.get_name() == 'windows'):
pass
elif (system.get_name() == 'mac'):
volume = system.get_cmd_out(['osascript', '-e', 'set ovol to output volume of (get volume settings); return the quoted form of ovol'])
return (int(volume) * 10)
else:
volume = system.get_cmd_out("amixer get Master |grep % |awk '{print $5}'|sed -e 's/\\[
return int(volume.replace('%', ''))
|
Get the volume.
Get the current volume.
Returns:
int: The current volume (percentage, between 0 and 100).
|
codesearchnet
|
def codemirror_field_css_bundle(field):
manifesto = CodemirrorAssetTagRender()
manifesto.register_from_fields(field)
try:
bundle_name = manifesto.css_bundle_names()[0]
except IndexError:
msg = "Given field with configuration name '{}' does not have a Javascript bundle name"
raise CodeMirrorFieldBundleError(msg.format(field.config_name))
return bundle_name
|
Filter to get CodeMirror CSS bundle name needed for a single field.
Example:
::
{% load djangocodemirror_tags %}
{{ form.myfield|codemirror_field_css_bundle }}
Arguments:
field (djangocodemirror.fields.CodeMirrorField): A form field.
Raises:
CodeMirrorFieldBundleError: Raised if Codemirror configuration from
field does not have a bundle name.
Returns:
string: Bundle name to load with webassets.
|
codesearchnet
|
def constant_value(pred):
if isinstance(pred, int):
if pred == 1:
pred = True
elif pred == 0:
pred = False
if isinstance(pred, variables.Variable):
return None
return smart_module.smart_constant_value(pred)
|
Return the bool value for `pred`, or None if `pred` had a dynamic value.
Args:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
integer 1 or 0.
|
github-repos
|
def _path_components(self, path):
if ((not path) or (path == self._path_separator(path))):
return []
(drive, path) = self.splitdrive(path)
path_components = path.split(self._path_separator(path))
assert (drive or path_components)
if (not path_components[0]):
if ((len(path_components) > 1) and (not path_components[1])):
path_components = []
else:
path_components = path_components[1:]
if drive:
path_components.insert(0, drive)
return path_components
|
Breaks the path into a list of component names.
Does not include the root directory as a component, as all paths
are considered relative to the root directory for the FakeFilesystem.
Callers should basically follow this pattern:
.. code:: python
file_path = self.absnormpath(file_path)
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
if component not in current_dir.contents:
raise IOError
_do_stuff_with_component(current_dir, component)
current_dir = current_dir.get_entry(component)
Args:
path: Path to tokenize.
Returns:
The list of names split from path.
|
codesearchnet
|
def thread_safe_client(client, lock=None):
if (lock is None):
lock = threading.Lock()
return _ThreadSafeProxy(client, lock)
|
Create a thread-safe proxy which locks every method call
for the given client.
Args:
client: the client object to be guarded.
lock: the lock object that will be used to lock client's methods.
If None, a new lock will be used.
Returns:
A thread-safe proxy for the given client.
|
codesearchnet
|
def deserialize_ndarray_npy(d):
with io.BytesIO() as f:
f.write(json.loads(d['npy']).encode('latin-1'))
f.seek(0)
return np.load(f)
|
Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's
:obj:`save` function.
Args:
d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created
using :obj:`numpy.save`.
Returns:
An :obj:`ndarray` object.
|
juraj-google-style
|
def open_stream(self, destination, timeout_ms=None):
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream_transport = self._make_stream_transport()
self.transport.write_message(adb_message.AdbMessage(command='OPEN', arg0=stream_transport.local_id, arg1=0, data=(destination + '\x00')), timeout)
if (not stream_transport.ensure_opened(timeout)):
return None
return AdbStream(destination, stream_transport)
|
Opens a new stream to a destination service on the device.
Not the same as the posix 'open' or any other Open methods, this
corresponds to the OPEN message described in the ADB protocol
documentation mentioned above. It creates a stream (uniquely identified
by remote/local ids) that connects to a particular service endpoint.
Args:
destination: The service:command string, see ADB documentation.
timeout_ms: Timeout in milliseconds for the Open to succeed (or as a
PolledTimeout object).
Raises:
AdbProtocolError: Wrong local_id sent to us, or we didn't get a ready
response.
Returns:
An AdbStream object that can be used to read/write data to the specified
service endpoint, or None if the requested service couldn't be opened.
|
codesearchnet
|
def get_subject_without_validation(jwt_bu64):
try:
jwt_dict = get_jwt_dict(jwt_bu64)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict)
|
Extract subject from the JWT without validating the JWT.
- The extracted subject cannot be trusted for authn or authz.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
str: The subject contained in the JWT.
|
codesearchnet
|
def _get_validation_labels(val_path):
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().split('\n')
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
|
Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
|
juraj-google-style
|
def diff_charsToLines(self, diffs, lineArray):
for i in range(len(diffs)):
text = []
for char in diffs[i][1]:
text.append(lineArray[ord(char)])
diffs[i] = (diffs[i][0], ''.join(text))
|
Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
|
codesearchnet
|
def forward(self, x):
head_outputs = [None] * self.t
if isinstance(self.input_layer, list):
input_outputs = [mod(x) for mod, x in zip(self.input_layer, x)]
x = torch.stack(input_outputs, dim=1)
for t in self.task_map[0]:
head = self.heads[t]
head_outputs[t] = head(input_outputs[t])
else:
x = self.input_layer(x)
for t in self.task_map[0]:
head = self.heads[t]
head_outputs[t] = head(x)
for i, layer in enumerate(self.middle_layers, start=1):
x = layer(x)
for t in self.task_map[i]:
head = self.heads[t]
if self.config["pass_predictions"] and bool(self.task_graph.parents[t]):
task_input = [x]
for p in self.task_graph.parents[t]:
task_input.append(head_outputs[p])
task_input = torch.stack(task_input, dim=1)
else:
task_input = x
head_outputs[t] = head(task_input)
return head_outputs
|
Returns a list of outputs for tasks 0,...t-1
Args:
x: a [batch_size, ...] batch from X
|
juraj-google-style
|
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
payload = str(request) + self.delimiter
self.socket.send(payload.encode(self.encoding))
response = bytes()
decoded = None
while True:
response += self.socket.recv(1024)
decoded = response.decode(self.encoding)
if len(decoded) < self.delimiter_length:
continue
elif decoded[-self.delimiter_length :] == self.delimiter:
break
assert decoded is not None
return Response(decoded[: -self.delimiter_length])
|
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
|
juraj-google-style
|
def userhome(username=None):
if username is None:
if 'HOME' in os.environ:
userhome_dpath = os.environ['HOME']
else:
if sys.platform.startswith('win32'):
if 'USERPROFILE' in os.environ:
userhome_dpath = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
drive = os.environ.get('HOMEDRIVE', '')
userhome_dpath = join(drive, os.environ['HOMEPATH'])
else:
raise OSError("Cannot determine the user's home directory")
else:
import pwd
userhome_dpath = pwd.getpwuid(os.getuid()).pw_dir
else:
if sys.platform.startswith('win32'):
c_users = dirname(userhome())
userhome_dpath = join(c_users, username)
if not exists(userhome_dpath):
raise KeyError('Unknown user: {}'.format(username))
else:
import pwd
try:
pwent = pwd.getpwnam(username)
except KeyError:
raise KeyError('Unknown user: {}'.format(username))
userhome_dpath = pwent.pw_dir
return userhome_dpath
|
Returns the user's home directory.
If `username` is None, this is the directory for the current user.
Args:
username (str): name of a user on the system
Returns:
PathLike: userhome_dpath: path to the home directory
Example:
>>> import getpass
>>> username = getpass.getuser()
>>> assert userhome() == expanduser('~')
>>> assert userhome(username) == expanduser('~')
|
juraj-google-style
|
def quantization_mode(self):
return self._quantization_mode
|
The quantization mode of this policy.
Returns:
The quantization mode of this policy, as a string. If this policy is
not quantized, it will return `None`.
|
github-repos
|
def index_update(x, idx, y):
return _index_update_helper(tf_np.ndarray._with_index_update, x, idx, y)
|
Pure equivalent of `x[idx] = y`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = y`. Because it's a pure function, `x` itself won't be
changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`.
|
github-repos
|
def _cmd_quote(cmd):
r
pattern = re.compile('^(\\"|\').*|.*(\\"|\')$')
while pattern.match(cmd) is not None:
cmd = cmd.strip('"').strip('\'')
cmd = '"{0}"'.format(cmd)
return cmd
|
r'''
Helper function to properly format the path to the binary for the service
Must be wrapped in double quotes to account for paths that have spaces. For
example:
``"C:\Program Files\Path\to\bin.exe"``
Args:
cmd (str): Full path to the binary
Returns:
str: Properly quoted path to the binary
|
juraj-google-style
|
def parse_pair_args(labels, argclass):
label_data = set()
for arg in labels:
(name, value) = split_pair(arg, '=', nullable_idx=1)
label_data.add(argclass(name, value))
return label_data
|
Parse flags of key=value pairs and return a list of argclass.
For pair variables, we need to:
* split the input into name=value pairs (value optional)
* Create the EnvParam object
Args:
labels: list of 'key' or 'key=value' strings.
argclass: Container class for args, must instantiate with argclass(k, v).
Returns:
list of argclass objects.
|
codesearchnet
|
def GetCommand(self, include_separators=True):
args = []
if self.name:
args.append(self.name)
for element in self.elements:
if element.HasError():
continue
if element.args:
args.extend(element.args)
if element.HasSeparator() and include_separators:
args.append(self.separator)
if self.NeedsSeparator() and include_separators:
args.append(self.separator)
return ' '.join((self._Quote(arg) for arg in args))
|
Returns the command representing the trace up to this point.
Args:
include_separators: Whether or not to include separators in the command.
Returns:
A string representing a Fire CLI command that would produce this trace.
|
github-repos
|
def update_endpoint(self, endpoint_name, endpoint_config_name):
if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)):
raise ValueError('Endpoint with name "{}" does not exist; please use an existing endpoint name'
.format(endpoint_name))
self.sagemaker_client.update_endpoint(EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
return endpoint_name
|
Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request
Raise an error if endpoint with endpoint_name does not exist.
Args:
endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.
endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.
Returns:
str: Name of the Amazon SageMaker ``Endpoint`` being updated.
|
juraj-google-style
|
def _extract_relative_dates(self, text: str) -> List[Extraction]:
if not text or not self._etk:
return list()
base = self._settings[RELATIVE_BASE] if self._settings[RELATIVE_BASE] else datetime.datetime.now()
if not self._settings[RETURN_AS_TIMEZONE_AWARE]:
base = base.replace(tzinfo=None)
elif not base.tzinfo:
base = base.astimezone(self._default_tz)
res = SpacyRuleExtractor(self._etk.default_nlp, spacy_rules, 'relative_date_extractor').extract(text)
ans = list()
for relative_date in res:
if relative_date.rule_id == 'direction_number_unit':
direction, measure, unit = relative_date.value.split()
measure = num_to_digit[measure.lower()]
elif relative_date.rule_id == 'number_unit_direction':
measure, unit, direction = relative_date.value.split()
measure = num_to_digit[measure.lower()]
elif relative_date.rule_id == 'direction_digit_unit':
direction, measure, unit = relative_date.value.split()
elif relative_date.rule_id == 'digit_unit_direction':
measure, unit, direction = relative_date.value.split()
elif relative_date.rule_id == 'direction_unit':
direction, unit = relative_date.value.split()
measure = '1'
elif relative_date.rule_id == 'the_day':
unit = 'days'
key_ = relative_date.value.split()[-1].lower()
if key_ == 'today':
direction = 'ago'
measure = '0'
else:
direction = 'ago' if key_ == 'yesterday' else 'later'
measure = '1' if len(relative_date.value.split()) == 1 else '2'
else:
continue
unit = unit if unit[-1] == 's' else unit+'s'
direction = directions[direction.lower()] if direction.lower() in directions else '+'
delta_args = {unit: int(direction+measure)}
relative_delta = relativedelta(**delta_args)
date = self._post_process_date(base+relative_delta)
if date:
extraction_date = self._wrap_extraction(date,
relative_date.value,
relative_date.provenance['start_char'],
relative_date.provenance['end_char'])
if extraction_date:
ans.append(extraction_date)
return ans
|
Extract relative dates using spaCy rules
Args:
text: str - the text to extract the relative date strings from
Returns: List of Extraction(s)
|
juraj-google-style
|
def flatten_dict_items(dictionary):
return _pywrap_nest.FlattenDictItems(dictionary)
|
Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value do not have the same structure layout, or
if keys are not unique.
|
github-repos
|
def convert_tanh(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting tanh ...')
if names == 'short':
tf_name = 'TANH' + random_string(4)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
tanh = keras.layers.Activation('tanh', name=tf_name)
layers[scope_name] = tanh(layers[inputs[0]])
|
Convert tanh layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def get_package(self, name) -> 'EffectPackage':
name, cls_name = parse_package_string(name)
try:
return self.package_map[name]
except KeyError:
raise EffectError("No package '{}' registered".format(name))
|
Get a package by python path. Can also contain path to an effect.
Args:
name (str): Path to effect package or effect
Returns:
The requested EffectPackage
Raises:
EffectError when no package is found
|
juraj-google-style
|
def _events_from_file(filepath):
records = list(tf.compat.v1.python_io.tf_record_iterator(filepath))
result = []
for r in records:
event = tf.compat.v1.Event()
event.ParseFromString(r)
result.append(event)
return result
|
Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.compat.v1.Event protos in the event file.
|
github-repos
|
def _parse_peer_link(self, config):
match = re.search(r'peer-link (\S+)', config)
value = match.group(1) if match else None
return dict(peer_link=value)
|
Scans the config block and parses the peer-link value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict
|
juraj-google-style
|
def get_path(self, url):
cache_path = self._url_to_path(url)
if os.path.exists(cache_path):
return cache_path
return None
|
Returns the path of a cached resource.
Args:
url: The url of the resource
Returns:
The path to the cached resource or None if not in the cache
|
juraj-google-style
|
def evaluate(conditions, leaf_evaluator):
if isinstance(conditions, list):
if (conditions[0] in list(EVALUATORS_BY_OPERATOR_TYPE.keys())):
return EVALUATORS_BY_OPERATOR_TYPE[conditions[0]](conditions[1:], leaf_evaluator)
else:
return EVALUATORS_BY_OPERATOR_TYPE[ConditionOperatorTypes.OR](conditions, leaf_evaluator)
leaf_condition = conditions
return leaf_evaluator(leaf_condition)
|
Top level method to evaluate conditions.
Args:
conditions: Nested array of and/or conditions, or a single leaf condition value of any type.
Example: ['and', '0', ['or', '1', '2']]
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean: Result of evaluating the conditions using the operator rules and the leaf evaluator.
None: if conditions couldn't be evaluated.
|
codesearchnet
|
def forward(self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool=False, _z_reference_list: Optional[Sequence[torch.Tensor]]=None) -> torch.Tensor:
z = [z]
q = self.linear_q(s)
kv = self.linear_kv(s)
q = q.view(q.shape[:-1] + (self.num_heads, -1))
kv = kv.view(kv.shape[:-1] + (self.num_heads, -1))
k, v = torch.split(kv, self.hidden_dim, dim=-1)
q_pts = self.linear_q_points(s)
q_pts = torch.split(q_pts, q_pts.shape[-1]
q_pts = torch.stack(q_pts, dim=-1)
q_pts = r[..., None].apply(q_pts)
q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3))
kv_pts = self.linear_kv_points(s)
kv_pts = torch.split(kv_pts, kv_pts.shape[-1]
kv_pts = torch.stack(kv_pts, dim=-1)
kv_pts = r[..., None].apply(kv_pts)
kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3))
k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2)
b = self.linear_b(z[0])
if _offload_inference:
assert sys.getrefcount(z[0]) == 2
z[0] = z[0].cpu()
if is_fp16_enabled():
with torch.cuda.amp.autocast(enabled=False):
a = torch.matmul(permute_final_dims(q.float(), (1, 0, 2)), permute_final_dims(k.float(), (1, 2, 0)))
else:
a = torch.matmul(permute_final_dims(q, (1, 0, 2)), permute_final_dims(k, (1, 2, 0)))
a *= math.sqrt(1.0 / (3 * self.hidden_dim))
a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))
pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)
pt_att = pt_att ** 2
pt_att = sum(torch.unbind(pt_att, dim=-1))
head_weights = self.softplus(self.head_weights).view(*(1,) * len(pt_att.shape[:-2]) + (-1, 1))
head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2)))
pt_att = pt_att * head_weights
pt_att = torch.sum(pt_att, dim=-1) * -0.5
square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)
square_mask = self.config.inf * (square_mask - 1)
pt_att = permute_final_dims(pt_att, (2, 0, 1))
a = a + pt_att
a = a + square_mask.unsqueeze(-3)
a = self.softmax(a)
o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)
o = flatten_final_dims(o, 2)
o_pt = torch.sum(a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :], dim=-2)
o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))
o_pt = r[..., None, None].invert_apply(o_pt)
o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.config.epsilon), 2)
o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)
if _offload_inference:
z[0] = z[0].to(o_pt.device)
o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))
o_pair = flatten_final_dims(o_pair, 2)
s = self.linear_out(torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype))
return s
|
Args:
s:
[*, N_res, C_s] single representation
z:
[*, N_res, N_res, C_z] pair representation
r:
[*, N_res] transformation object
mask:
[*, N_res] mask
Returns:
[*, N_res, C_s] single representation update
|
github-repos
|
def Open(self, file_object):
self._file_object = file_object
self._regf_file.open_file_object(self._file_object)
return True
|
Opens the Windows Registry file using a file-like object.
Args:
file_object (file): file-like object.
Returns:
bool: True if successful or False if not.
|
juraj-google-style
|
def _retrieve_offsets(self, timestamps, timeout_ms=float('inf')):
if (not timestamps):
return {}
start_time = time.time()
remaining_ms = timeout_ms
while (remaining_ms > 0):
future = self._send_offset_requests(timestamps)
self._client.poll(future=future, timeout_ms=remaining_ms)
if future.succeeded():
return future.value
if (not future.retriable()):
raise future.exception
elapsed_ms = ((time.time() - start_time) * 1000)
remaining_ms = (timeout_ms - elapsed_ms)
if (remaining_ms < 0):
break
if future.exception.invalid_metadata:
refresh_future = self._client.cluster.request_update()
self._client.poll(future=refresh_future, timeout_ms=remaining_ms)
else:
time.sleep((self.config['retry_backoff_ms'] / 1000.0))
elapsed_ms = ((time.time() - start_time) * 1000)
remaining_ms = (timeout_ms - elapsed_ms)
raise Errors.KafkaTimeoutError(('Failed to get offsets by timestamps in %s ms' % (timeout_ms,)))
|
Fetch offset for each partition passed in ``timestamps`` map.
Blocks until offsets are obtained, a non-retriable exception is raised
or ``timeout_ms`` passed.
Arguments:
timestamps: {TopicPartition: int} dict with timestamps to fetch
offsets by. -1 for the latest available, -2 for the earliest
available. Otherwise timestamp is treated as epoch miliseconds.
Returns:
{TopicPartition: (int, int)}: Mapping of partition to
retrieved offset and timestamp. If offset does not exist for
the provided timestamp, that partition will be missing from
this mapping.
|
codesearchnet
|
def internal_convert_n_to_tensor_or_composite(values, dtype=None, name=None, as_ref=False) -> list[Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor, type(None)]]:
if not isinstance(values, collections_abc.Sequence):
raise TypeError('values must be a sequence.')
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else '%s_%d' % (name, i)
ret.append(internal_convert_to_tensor_or_composite(value, dtype=dtype, name=n, as_ref=as_ref))
return ret
|
Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
|
github-repos
|
def get_new_requests(self):
content_type = self.__queue_item.response.headers.get('content-type')
scrapers = self.__get_all_scrapers()
new_requests = []
for scraper in scrapers:
instance = scraper(self.__options, self.__queue_item)
if self.__content_type_matches(content_type, instance.content_types):
new_requests.extend(instance.get_requests())
return new_requests
|
Retrieve all the new request that were found in this request.
Returns:
list(:class:`nyawc.http.Request`): A list of request objects.
|
codesearchnet
|
def get_examples(self, compact=False):
examples = copy.deepcopy(self._examples)
if (not compact):
return examples
def make_compact(d):
if (not isinstance(d, dict)):
return
for key in d:
if isinstance(d[key], dict):
inner_d = d[key]
if ((len(inner_d) == 1) and ('.tag' in inner_d)):
d[key] = inner_d['.tag']
else:
make_compact(inner_d)
if isinstance(d[key], list):
for item in d[key]:
make_compact(item)
for example in examples.values():
if (isinstance(example.value, dict) and (len(example.value) == 1) and ('.tag' in example.value)):
example.value = example.value['.tag']
else:
make_compact(example.value)
return examples
|
Returns an OrderedDict mapping labels to Example objects.
Args:
compact (bool): If True, union members of void type are converted
to their compact representation: no ".tag" key or containing
dict, just the tag as a string.
|
codesearchnet
|
def get_chain(self, name, table="filter"):
return [r for r in self.rules if r["table"] == table and r["chain"] == name]
|
Get the list of rules for a particular chain. Chain order is kept intact.
Args:
name (str): chain name, e.g. ``
table (str): table name, defaults to ``filter``
Returns:
list: rules
|
juraj-google-style
|
def __init__(self, channel):
self.ListNotificationChannelDescriptors = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsResponse.FromString,
)
self.GetNotificationChannelDescriptor = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelDescriptorRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannelDescriptor.FromString,
)
self.ListNotificationChannels = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/ListNotificationChannels",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsResponse.FromString,
)
self.GetNotificationChannel = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/GetNotificationChannel",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
self.CreateNotificationChannel = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.CreateNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
self.UpdateNotificationChannel = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.UpdateNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
self.DeleteNotificationChannel = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.DeleteNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SendNotificationChannelVerificationCode = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.SendNotificationChannelVerificationCodeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetNotificationChannelVerificationCode = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeResponse.FromString,
)
self.VerifyNotificationChannel = channel.unary_unary(
"/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel",
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.VerifyNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def write_to_file(self, filename='material_index.dat', plot=True):
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
dir_plot = 'material_index/'
if not os.path.exists(dir_plot):
os.makedirs(dir_plot)
for axis, name in zip(self.axes, self.axes_str):
root, ext = os.path.splitext(filename)
fn = dir_plot + root + '_'+ name + ext
with open(fn, 'w') as fs:
for n_row in np.abs(axis.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(fn)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile: %s' % name,
'x_pts': self.xx.x_pts,
'y_pts': self.xx.y_pts,
'x_min': self.xx.x_min,
'x_max': self.xx.x_max,
'y_min': self.xx.y_min,
'y_max': self.xx.y_max,
'filename_data': fn,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args, silent=False)
|
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
|
juraj-google-style
|
def _database_string(self):
if (self._database_string_internal is None):
db_str = firestore_client.FirestoreClient.database_root_path(self.project, self._database)
self._database_string_internal = db_str
return self._database_string_internal
|
The database string corresponding to this client's project.
This value is lazy-loaded and cached.
Will be of the form
``projects/{project_id}/databases/{database_id}``
but ``database_id == '(default)'`` for the time being.
Returns:
str: The fully-qualified database string for the current
project. (The default database is also in this string.)
|
codesearchnet
|
def smear(self, sigma):
diff = [self.x[i + 1] - self.x[i] for i in range(len(self.x) - 1)]
avg_x_per_step = np.sum(diff) / len(diff)
if len(self.ydim) == 1:
self.y = gaussian_filter1d(self.y, sigma / avg_x_per_step)
else:
self.y = np.array([
gaussian_filter1d(self.y[:, k], sigma / avg_x_per_step)
for k in range(self.ydim[1])]).T
|
Apply Gaussian smearing to spectrum y value.
Args:
sigma: Std dev for Gaussian smear function
|
juraj-google-style
|
def parse(cls, buf: memoryview, params: Params) \
-> Tuple[Parseable, memoryview]:
for data_type in params.expected:
try:
return data_type.parse(buf, params)
except NotParseable:
pass
raise UnexpectedType(buf)
|
Parses the given buffer by attempting to parse the list of
:attr:`~Params.expected` types until one of them succeeds,
then returns the parsed object.
Args:
buf: The bytes containing the data to be parsed.
params: The parameters used by some parseable types.
|
juraj-google-style
|
def _get_segments(self, start, request_size):
if not request_size:
return []
end = start + request_size
futures = []
while request_size > self._max_request_size:
futures.append(self._get_segment(start, self._max_request_size))
request_size -= self._max_request_size
start += self._max_request_size
if start < end:
futures.append(self._get_segment(start, end - start))
return [fut.get_result() for fut in futures]
|
Get segments of the file from Google Storage as a list.
A large request is broken into segments to avoid hitting urlfetch
response size limit. Each segment is returned from a separate urlfetch.
Args:
start: start offset to request. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request.
Returns:
A list of file segments in order
|
juraj-google-style
|
def lock(vcs, lock_object, wait=True):
if wait:
timeout = (- 1)
else:
timeout = 0
lock_path = _get_lock_path(vcs, lock_object)
lock = filelock.FileLock(lock_path)
with lock.acquire(timeout=timeout):
(yield)
|
A context manager that grabs the lock and releases it when done.
This blocks until the lock can be acquired.
Args:
vcs (easyci.vcs.base.Vcs)
lock_object (Lock)
wait (boolean) - whether to wait for the lock or error out
Raises:
Timeout
|
codesearchnet
|
def get_shape(value: Union[types.FloatTensor, types.IntTensor]) -> types.IntTensor:
result = value.shape
return tf.shape(value) if None in result.as_list() else result
|
Returns the `shape` of a given `Tensor`.
Args:
value: Scalar `Tensor of integers or real values.
Returns:
`Tensor` of integers with rank 1.
|
github-repos
|
def is_registered(self, prefix):
return self._resolve_prefix(prefix) is not None
|
Test if a command prefix or its alias is has a registered handler.
Args:
prefix: A prefix or its alias, as a str.
Returns:
True iff a handler is registered for prefix.
|
github-repos
|
def parse(self, text, key=None):
try:
data = json.loads(text)
except ValueError as e:
raise ValueError(('%s: Value: [%s]' % (e, text)))
if (data and key):
if (key not in data):
raise ValueError(('Invalid response (key %s not found): %s' % (key, data)))
data = data[key]
return data
|
Parses a response.
Args:
text (str): Text to parse
Kwargs:
key (str): Key to look for, if any
Returns:
Parsed value
Raises:
ValueError
|
codesearchnet
|
def plot_heldout_prediction(input_vals, probs, fname, n=10, title=''):
fig = figure.Figure(figsize=(9, (3 * n)))
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(n, 3, ((3 * i) + 1))
ax.imshow(input_vals[(i, :)].reshape(IMAGE_SHAPE[:(- 1)]), interpolation='None')
ax = fig.add_subplot(n, 3, ((3 * i) + 2))
for prob_sample in probs:
sns.barplot(np.arange(10), prob_sample[(i, :)], alpha=0.1, ax=ax)
ax.set_ylim([0, 1])
ax.set_title('posterior samples')
ax = fig.add_subplot(n, 3, ((3 * i) + 3))
sns.barplot(np.arange(10), np.mean(probs[(:, i, :)], axis=0), ax=ax)
ax.set_ylim([0, 1])
ax.set_title('predictive probs')
fig.suptitle(title)
fig.tight_layout()
canvas.print_figure(fname, format='png')
print('saved {}'.format(fname))
|
Save a PNG plot visualizing posterior uncertainty on heldout data.
Args:
input_vals: A `float`-like Numpy `array` of shape
`[num_heldout] + IMAGE_SHAPE`, containing heldout input images.
probs: A `float`-like Numpy array of shape `[num_monte_carlo,
num_heldout, num_classes]` containing Monte Carlo samples of
class probabilities for each heldout sample.
fname: Python `str` filename to save the plot to.
n: Python `int` number of datapoints to vizualize.
title: Python `str` title for the plot.
|
codesearchnet
|
def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
|
Instantiate a [`BlipConfig`] (or a derived class) from blip text model configuration and blip vision model
configuration.
Returns:
[`BlipConfig`]: An instance of a configuration object
|
github-repos
|
def printMe(self, selfKey, selfValue):
text = '<key>{keyName}</key>\n'.format(keyName=selfKey)
if len(selfValue) == 0:
return ''
else:
valueText = ''
for element in selfValue:
if singleOrPair(element) == 'Single':
valueText += element.printMe(element.tag, element.value)
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value)
text += valueText
return text
|
Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
|
juraj-google-style
|
def SetCampaignTargetingCriteria(client, campaign):
campaign_criterion_service = client.GetService('CampaignCriterionService')
criteria = [
{
'xsi_type': 'Location',
'id': 21137
},
{
'xsi_type': 'Location',
'id': 2484
},
{
'xsi_type': 'Language',
'id': 1000
},
{
'xsi_type': 'Language',
'id': 1003
}
]
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign['id'],
'criterion': criterion
}
} for criterion in criteria]
response = campaign_criterion_service.mutate(operations)
if response and 'value' in response:
for criterion in response['value']:
print ('Campaign criteria of type "%s" and id "%s" was added.'
% (criterion['criterion']['type'],
criterion['criterion']['id']))
|
Sets targeting criteria for the given campaign.
Args:
client: An AdWordsClient instance.
campaign: A suds object representing the campaign we wish to attach
targeting criteria.
|
juraj-google-style
|
def upgrade_name(self, user_):
if user_.name_type > self.name_type:
self.full_name = user_.full_name
self.first_name = user_.first_name
self.name_type = user_.name_type
logger.debug('Added %s name to User "%s": %s',
self.name_type.name.lower(), self.full_name, self)
|
Upgrade name type of this user.
Google Voice participants often first appear with no name at all, and
then get upgraded unpredictably to numbers ("+12125551212") or names.
Args:
user_ (~hangups.user.User): User to upgrade with.
|
juraj-google-style
|
def has_node_with_value(self, value):
for node in self.node_list:
if node.value == value:
return True
else:
return False
|
Whether any node in ``self.node_list`` has the value ``value``.
Args:
value (Any): The value to find in ``self.node_list``
Returns: bool
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.has_node_with_value('One')
True
>>> graph.has_node_with_value('Foo')
False
|
juraj-google-style
|
def get_distribution_dict(metric_type, submit_timestamp, dist, metric_id):
return DistributionMetric(dist, submit_timestamp, metric_id, metric_type).as_dict()
|
Function creates :class:`DistributionMetric`
Args:
metric_type(str): type of value from distribution metric which will
be saved (ex. max, min, mean, sum)
submit_timestamp: timestamp when metric is saved
dist(object) distribution object from pipeline result
metric_id(uuid): id of the current test run
Returns:
dictionary prepared for saving according to schema
|
github-repos
|
def profile_view(request, user_id=None):
if request.user.is_eighthoffice and "full" not in request.GET and user_id is not None:
return redirect("eighth_profile", user_id=user_id)
if user_id is not None:
try:
profile_user = User.objects.get(id=user_id)
if profile_user is None:
raise Http404
except User.DoesNotExist:
raise Http404
else:
profile_user = request.user
num_blocks = 6
eighth_schedule = []
start_block = EighthBlock.objects.get_first_upcoming_block()
blocks = []
if start_block:
blocks = [start_block] + list(start_block.next_blocks(num_blocks - 1))
for block in blocks:
sch = {"block": block}
try:
sch["signup"] = EighthSignup.objects.get(scheduled_activity__block=block, user=profile_user)
except EighthSignup.DoesNotExist:
sch["signup"] = None
except MultipleObjectsReturned:
client.captureException()
sch["signup"] = None
eighth_schedule.append(sch)
if profile_user.is_eighth_sponsor:
sponsor = EighthSponsor.objects.get(user=profile_user)
start_date = get_start_date(request)
eighth_sponsor_schedule = (EighthScheduledActivity.objects.for_sponsor(sponsor).filter(block__date__gte=start_date).order_by(
"block__date", "block__block_letter"))
eighth_sponsor_schedule = eighth_sponsor_schedule[:10]
else:
eighth_sponsor_schedule = None
admin_or_teacher = (request.user.is_eighth_admin or request.user.is_teacher)
can_view_eighth = (profile_user.can_view_eighth or request.user == profile_user)
eighth_restricted_msg = (not can_view_eighth and admin_or_teacher)
if not can_view_eighth and not request.user.is_eighth_admin and not request.user.is_teacher:
eighth_schedule = []
has_been_nominated = profile_user.username in [
u.nominee.username for u in request.user.nomination_votes.filter(position__position_name=settings.NOMINATION_POSITION)
]
context = {
"profile_user": profile_user,
"eighth_schedule": eighth_schedule,
"can_view_eighth": can_view_eighth,
"eighth_restricted_msg": eighth_restricted_msg,
"eighth_sponsor_schedule": eighth_sponsor_schedule,
"nominations_active": settings.NOMINATIONS_ACTIVE,
"nomination_position": settings.NOMINATION_POSITION,
"has_been_nominated": has_been_nominated
}
return render(request, "users/profile.html", context)
|
Displays a view of a user's profile.
Args:
user_id
The ID of the user whose profile is being viewed. If not
specified, show the user's own profile.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.