code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def create_model(self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT):
role = (role or self.role)
return ChainerModel(self.model_data, role, self.entry_point, source_dir=self._model_source_dir(), enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, name=self._current_job_name, container_log_level=self.container_log_level, code_location=self.code_location, py_version=self.py_version, framework_version=self.framework_version, model_server_workers=model_server_workers, image=self.image_name, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override), dependencies=self.dependencies)
|
Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object.
See :func:`~sagemaker.chainer.model.ChainerModel` for full details.
|
codesearchnet
|
def make_decorator(target, decorator_func, decorator_name=None, decorator_doc='', decorator_argspec=None):
if decorator_name is None:
decorator_name = inspect.currentframe().f_back.f_code.co_name
decorator = TFDecorator(decorator_name, target, decorator_doc, decorator_argspec)
setattr(decorator_func, '_tf_decorator', decorator)
if hasattr(target, '__name__'):
decorator_func.__name__ = target.__name__
if hasattr(target, '__qualname__'):
decorator_func.__qualname__ = target.__qualname__
if hasattr(target, '__module__'):
decorator_func.__module__ = target.__module__
if hasattr(target, '__dict__'):
for name in target.__dict__:
if name not in decorator_func.__dict__:
decorator_func.__dict__[name] = target.__dict__[name]
if hasattr(target, '__doc__'):
decorator_func.__doc__ = decorator.__doc__
decorator_func.__wrapped__ = target
decorator_func.__original_wrapped__ = target
if decorator_argspec:
decorator_func.__signature__ = fullargspec_to_signature(decorator_argspec)
elif callable(target):
try:
signature = inspect.signature(target)
except (TypeError, ValueError):
pass
else:
bound_instance = _get_bound_instance(target)
if bound_instance and 'self' in signature.parameters:
signature = inspect.Signature(list(signature.parameters.values())[1:])
decorator_func.__self__ = bound_instance
decorator_func.__signature__ = signature
return decorator_func
|
Make a decorator from a wrapper and a target.
Args:
target: The final callable to be wrapped.
decorator_func: The wrapper function.
decorator_name: The name of the decorator. If `None`, the name of the
function calling make_decorator.
decorator_doc: Documentation specific to this application of
`decorator_func` to `target`.
decorator_argspec: Override the signature using FullArgSpec.
Returns:
The `decorator_func` argument with new metadata attached.
|
github-repos
|
def adjust_contrast(img, contrast_factor):
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
|
Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
|
juraj-google-style
|
def debye_temperature(self, structure):
v0 = ((structure.volume * 1e-30) / structure.num_sites)
(vl, vt) = (self.long_v(structure), self.trans_v(structure))
vm = ((3 ** (1.0 / 3.0)) * (((1 / (vl ** 3)) + (2 / (vt ** 3))) ** ((- 1.0) / 3.0)))
td = (((1.05457e-34 / 1.38065e-23) * vm) * (((6 * (np.pi ** 2)) / v0) ** (1.0 / 3.0)))
return td
|
Estimates the debye temperature from longitudinal and
transverse sound velocities
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
|
codesearchnet
|
def call_rpc(self, rpc_id, payload=bytes()):
if super(ServiceDelegateTile, self).has_rpc(rpc_id):
return super(ServiceDelegateTile, self).call_rpc(rpc_id, payload)
async def _awaitable_wrapper():
resp = await self._client.send_rpc(self._service, rpc_id, payload, timeout=120.0)
result = resp['result']
if result == 'success':
return resp['response']
elif result == 'service_not_found':
raise TileNotFoundError("Could not find service by name", name=self._service)
elif result == 'rpc_not_found':
raise RPCNotFoundError("Could not find RPC on service", name=self._service, rpc_id=rpc_id)
elif result == 'invalid_arguments':
raise RPCInvalidArgumentsError("Invalid arguments to RPC", name=self._service, rpc_id=rpc_id)
elif result == 'invalid_response':
raise RPCInvalidReturnValueError("Invalid response from RPC", name=self._service, rpc_id=rpc_id)
elif result == 'execution_exception':
raise InternalError("Exception raised during processing RPC", name=self._service, rpc_id=rpc_id)
else:
raise InternalError("Unknown response received from delegated RPC", name=self._service, rpc_id=rpc_id, result=result)
return _awaitable_wrapper()
|
Call an RPC by its ID.
Args:
rpc_id (int): The number of the RPC
payload (bytes): A byte string of payload parameters up to 20 bytes
Returns:
str: The response payload from the RPC
|
juraj-google-style
|
def add_output(self, output):
if (not isinstance(output, Output)):
raise TypeError('`output` must be an Output instance or None')
self.outputs.append(output)
|
Adds an output to a Transaction's list of outputs.
Args:
output (:class:`~bigchaindb.common.transaction.
Output`): An Output to be added to the
Transaction.
|
codesearchnet
|
def __init__(self, tcex):
self._tcex = tcex
self._data = {}
self._type = 'Owner'
self._api_type = 'owners'
self._api_entity = 'owner'
self._utils = TcExUtils()
self._tc_requests = TiTcRequest(self._tcex)
|
Initialize Class Properties.
Args:
tcex:
|
juraj-google-style
|
def _xray_clean_up_entries_for_driver(self, driver_id):
xray_task_table_prefix = (
ray.gcs_utils.TablePrefix_RAYLET_TASK_string.encode("ascii"))
xray_object_table_prefix = (
ray.gcs_utils.TablePrefix_OBJECT_string.encode("ascii"))
task_table_objects = self.state.task_table()
driver_id_hex = binary_to_hex(driver_id)
driver_task_id_bins = set()
for task_id_hex, task_info in task_table_objects.items():
task_table_object = task_info["TaskSpec"]
task_driver_id_hex = task_table_object["DriverID"]
if driver_id_hex != task_driver_id_hex:
continue
driver_task_id_bins.add(hex_to_binary(task_id_hex))
object_table_objects = self.state.object_table()
driver_object_id_bins = set()
for object_id, _ in object_table_objects.items():
task_id_bin = ray._raylet.compute_task_id(object_id).binary()
if task_id_bin in driver_task_id_bins:
driver_object_id_bins.add(object_id.binary())
def to_shard_index(id_bin):
return binary_to_object_id(id_bin).redis_shard_hash() % len(
self.state.redis_clients)
sharded_keys = [[] for _ in range(len(self.state.redis_clients))]
for task_id_bin in driver_task_id_bins:
sharded_keys[to_shard_index(task_id_bin)].append(
xray_task_table_prefix + task_id_bin)
for object_id_bin in driver_object_id_bins:
sharded_keys[to_shard_index(object_id_bin)].append(
xray_object_table_prefix + object_id_bin)
for shard_index in range(len(sharded_keys)):
keys = sharded_keys[shard_index]
if len(keys) == 0:
continue
redis = self.state.redis_clients[shard_index]
num_deleted = redis.delete(*keys)
logger.info("Monitor: "
"Removed {} dead redis entries of the "
"driver from redis shard {}.".format(
num_deleted, shard_index))
if num_deleted != len(keys):
logger.warning("Monitor: "
"Failed to remove {} relevant redis "
"entries from redis shard {}.".format(
len(keys) - num_deleted, shard_index))
|
Remove this driver's object/task entries from redis.
Removes control-state entries of all tasks and task return
objects belonging to the driver.
Args:
driver_id: The driver id.
|
juraj-google-style
|
def __init__(self, communication=collective_util.CommunicationImplementation.AUTO, cluster_resolver=None):
communication_options = collective_util.Options(implementation=communication)
super(_CollectiveAllReduceStrategyExperimental, self).__init__(cluster_resolver, communication_options)
|
Creates the strategy.
Args:
communication: optional
`tf.distribute.experimental.CommunicationImplementation`. This is a hint
on the preferred collective communication implementation. Possible
values include `AUTO`, `RING`, and `NCCL`.
cluster_resolver: optional
`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,
`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.
|
github-repos
|
def write_data(num_lines, no_data=False, directory=None, prefix=tempfile.template, eol=EOL.LF, custom_delimiter=None, line_value=b'line'):
all_data = []
with tempfile.NamedTemporaryFile(delete=False, dir=directory, prefix=prefix) as f:
sep_values = [b'\n', b'\r\n']
for i in range(num_lines):
data = b'' if no_data else line_value + str(i).encode()
all_data.append(data)
if eol == EOL.LF:
sep = sep_values[0]
elif eol == EOL.CRLF:
sep = sep_values[1]
elif eol == EOL.MIXED:
sep = sep_values[i % len(sep_values)]
elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE:
sep = b'' if i == num_lines - 1 else sep_values[0]
elif eol == EOL.CUSTOM_DELIMITER:
if custom_delimiter is None or len(custom_delimiter) == 0:
raise ValueError('delimiter can not be null or empty')
else:
sep = custom_delimiter
else:
raise ValueError('Received unknown value %s for eol.' % eol)
f.write(data + sep)
return (f.name, [line.decode('utf-8') for line in all_data])
|
Writes test data to a temporary file.
Args:
num_lines (int): The number of lines to write.
no_data (bool): If :data:`True`, empty lines will be written, otherwise
each line will contain a concatenation of b'line' and the line number.
directory (str): The name of the directory to create the temporary file in.
prefix (str): The prefix to use for the temporary file.
eol (int): The line ending to use when writing.
:class:`~apache_beam.io.textio_test.EOL` exposes attributes that can be
used here to define the eol.
custom_delimiter (bytes): The custom delimiter.
line_value (bytes): Default value for test data, default b'line'
Returns:
Tuple[str, List[str]]: A tuple of the filename and a list of the
utf-8 decoded written data.
|
github-repos
|
def write_data(msg_type, profile_name, data, cfg):
if (profile_name not in cfg.data):
cfg.data[profile_name] = {}
cfg.data[profile_name][msg_type] = data
|
Write the settings into the data portion of the cfg.
Args:
:msg_type: (str) message type to create config entry.
:profile_name: (str) name of the profile entry
:data: (dict) dict values for the 'settings'
:cfg: (jsonconfig.Config) config instance.
|
codesearchnet
|
def __init__(self, observ_shape, action_shape, min_duration, max_duration):
self._observ_shape = observ_shape
self._action_shape = action_shape
self._min_duration = min_duration
self._max_duration = max_duration
self._random = np.random.RandomState(0)
self.steps = []
self.durations = []
|
Generate random agent input and keep track of statistics.
Args:
observ_shape: Shape for the random observations.
action_shape: Shape for the action space.
min_duration: Minimum number of steps per episode.
max_duration: Maximum number of steps per episode.
Attributes:
steps: List of actual simulated lengths for all episodes.
durations: List of decided lengths for all episodes.
|
juraj-google-style
|
def set_xlim(self, xlims, dx, xscale, reverse=False):
self._set_axis_limits('x', xlims, dx, xscale, reverse)
return
|
Set x limits for plot.
This will set the limits for the x axis
for the specific plot.
Args:
xlims (len-2 list of floats): The limits for the axis.
dx (float): Amount to increment by between the limits.
xscale (str): Scale of the axis. Either `log` or `lin`.
reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
|
juraj-google-style
|
def get(self, path, params=None, headers=None):
response = requests.get(
self._url_for(path),
params=params,
headers=self._headers(headers)
)
self._handle_errors(response)
return response
|
Perform a GET request, optionally providing query-string params.
Args:
path (str): A path that gets appended to ``base_url``.
params (dict, optional): Dictionary of param names to values.
Example:
api_client.get('/users', params={'active': True})
Returns:
A requests ``Response`` object.
|
juraj-google-style
|
def __init__(self,
log_dir=DEFAULT_RESULTS_DIR,
reload_interval=30,
standalone=True,
log_level="INFO"):
self.logger = self.init_logger(log_level)
self.standalone = standalone
self.collector = Collector(
reload_interval=reload_interval,
logdir=log_dir,
logger=self.logger)
|
Initialize the collector service.
Args:
log_dir (str): Directory of the logs about trials' information.
reload_interval (int): Sleep time period after each polling round.
standalone (boolean): The service will not stop and if True.
log_level (str): Level of logging.
|
juraj-google-style
|
def _filter_exception(self, ex):
if isinstance(ex, tuple):
ex2 = ex[1]
else:
ex2 = ex
if isinstance(ex2, self._clean_stop_exception_types):
ex = None
return ex
|
Check if the exception indicated in 'ex' should be ignored.
This method examines `ex` to check if it is an exception that should be
reported to the users. If yes, it returns `ex` as is, otherwise it returns
None.
The code returns None for exception types listed in
`_clean_stop_exception_types`.
Args:
ex: None, an `Exception`, or a Python `exc_info` tuple as returned by
`sys.exc_info()`.
Returns:
ex or None.
|
github-repos
|
def moments_of_masked_time_series(time_series_tensor, broadcast_mask):
num_unmasked_entries = tf.cast(
tf.reduce_sum(input_tensor=tf.cast(~broadcast_mask, tf.int32), axis=-1),
time_series_tensor.dtype)
mean = (tf.reduce_sum(input_tensor=tf.where(
broadcast_mask,
tf.zeros_like(time_series_tensor),
time_series_tensor), axis=-1) / num_unmasked_entries)
variance = (tf.reduce_sum(input_tensor=tf.where(
broadcast_mask,
tf.zeros_like(time_series_tensor),
(time_series_tensor - mean[..., tf.newaxis]) ** 2), axis=-1)
/ num_unmasked_entries)
return mean, variance
|
Compute mean and variance, accounting for a mask.
Args:
time_series_tensor: float `Tensor` time series of shape
`concat([batch_shape, [num_timesteps]])`.
broadcast_mask: bool `Tensor` of the same shape as `time_series`.
Returns:
mean: float `Tensor` of shape `batch_shape`.
variance: float `Tensor` of shape `batch_shape`.
|
juraj-google-style
|
def _transpile_circuit(circuit_config_tuple):
circuit, transpile_config = circuit_config_tuple
if transpile_config.pass_manager:
pass_manager = transpile_config.pass_manager
elif transpile_config.coupling_map:
pass_manager = default_pass_manager(transpile_config.basis_gates,
transpile_config.coupling_map,
transpile_config.initial_layout,
transpile_config.seed_transpiler)
else:
pass_manager = default_pass_manager_simulator(transpile_config.basis_gates)
return pass_manager.run(circuit)
|
Select a PassManager and run a single circuit through it.
Args:
circuit_config_tuple (tuple):
circuit (QuantumCircuit): circuit to transpile
transpile_config (TranspileConfig): configuration dictating how to transpile
Returns:
QuantumCircuit: transpiled circuit
|
juraj-google-style
|
def instantiate_resolver(self, name, args):
if (name not in self._known_resolvers):
raise ArgumentError('Attempting to instantiate unknown dependency resolver', name=name)
return self._known_resolvers[name](args)
|
Directly instantiate a dependency resolver by name with the given arguments
Args:
name (string): The name of the class that we want to instantiate
args (dict): The arguments to pass to the resolver factory
Returns:
DependencyResolver
|
codesearchnet
|
def ParseFileObject(self, parser_mediator, file_object):
self._last_charset_attribute = 'ascii'
self._ParseHeader(parser_mediator, file_object)
data_dict = {}
time_dict = {}
try:
for name, value in self._ParseAttributesGroup(file_object):
name = self._ATTRIBUTE_NAME_TRANSLATION.get(name, name)
if name in self._DATE_TIME_VALUE_NAMES:
time_dict.setdefault(name, []).append(value)
else:
data_dict.setdefault(name, []).append(value)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse attributes with error: {0!s}'.format(exception))
return
event_data = CupsIppEventData()
event_data.application = self._GetStringValue(data_dict, 'application')
event_data.computer_name = self._GetStringValue(data_dict, 'computer_name')
event_data.copies = data_dict.get('copies', [0])[0]
event_data.data_dict = data_dict
event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')
event_data.job_id = self._GetStringValue(data_dict, 'job_id')
event_data.job_name = self._GetStringValue(data_dict, 'job_name')
event_data.user = self._GetStringValue(data_dict, 'user')
event_data.owner = self._GetStringValue(data_dict, 'owner')
event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')
event_data.uri = self._GetStringValue(data_dict, 'uri')
for name, usage in iter(self._DATE_TIME_VALUES.items()):
for date_time in time_dict.get(name, []):
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
for name, usage in iter(self._POSIX_TIME_VALUES.items()):
for time_value in time_dict.get(name, []):
date_time = dfdatetime_posix_time.PosixTime(timestamp=time_value)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a CUPS IPP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def write_to_hdf5(self, filename_out, *args, **kwargs):
print("[Filterbank] Warning: Non-standard function to write in HDF5 (.h5) format. Please use Waterfall.")
if not HAS_HDF5:
raise RuntimeError("h5py package required for HDF5 output.")
with h5py.File(filename_out, 'w') as h5:
dset = h5.create_dataset(b'data',
data=self.data,
compression='lzf')
dset_mask = h5.create_dataset(b'mask',
shape=self.data.shape,
compression='lzf',
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
for key, value in self.header.items():
dset.attrs[key] = value
|
Write data to HDF5 file.
Args:
filename_out (str): Name of output file
|
juraj-google-style
|
def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value):
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
if time_a < time_b:
bx = bx - item_b.u
by = by - item_b.v
else:
ax = ax - item_a.u
ay = ay - item_a.v
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
|
Centroid distance with motion corrections.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
|
juraj-google-style
|
def insert_paulis(self, indices=None, paulis=None, pauli_labels=None):
if (pauli_labels is not None):
if (paulis is not None):
raise QiskitError('Please only provide either `paulis` or `pauli_labels`')
if isinstance(pauli_labels, str):
pauli_labels = list(pauli_labels)
paulis = Pauli.from_label(pauli_labels[::(- 1)])
if (indices is None):
self._z = np.concatenate((self._z, paulis.z))
self._x = np.concatenate((self._x, paulis.x))
else:
if (not isinstance(indices, list)):
indices = [indices]
self._z = np.insert(self._z, indices, paulis.z)
self._x = np.insert(self._x, indices, paulis.x)
return self
|
Insert or append pauli to the targeted indices.
If indices is None, it means append at the end.
Args:
indices (list[int]): the qubit indices to be inserted
paulis (Pauli): the to-be-inserted or appended pauli
pauli_labels (list[str]): the to-be-inserted or appended pauli label
Note:
the indices refers to the localion of original paulis,
e.g. if indices = [0, 2], pauli_labels = ['Z', 'I'] and original pauli = 'ZYXI'
the pauli will be updated to ZY'I'XI'Z'
'Z' and 'I' are inserted before the qubit at 0 and 2.
Returns:
Pauli: self
Raises:
QiskitError: provide both `paulis` and `pauli_labels` at the same time
|
codesearchnet
|
def __init__(self, name, func):
self._func = func
if name:
self._var_scope = None
self._name = name
else:
self._var_scope = tf.get_variable_scope()
self._name = None
self._reuse = None
self._stacktrace = traceback.format_stack()[:-3]
|
Creates a template for the given function.
Args:
name: The variable_scope to use, if None the current scope is captured.
func: The function to apply each time.
|
juraj-google-style
|
def wait_for_capture(self, timeout=None):
raise NotImplementedError('Base class should not be called directly!')
|
This function waits for a capture to terminate and guarantees that
the capture is saved to the capture file configured during the
start_capture() method. Depending on the type of the sniffer the file
may previously contain partial results (e.g. for a local sniffer) or
may not exist until the stop_capture() method is executed (e.g. for a
remote sniffer).
Depending on the type/subtype and configuration of the sniffer the
capture may terminate on its own without requiring a call to this
function. In such a case it is still necessary to call either this
function or the stop_capture() function to make sure that the capture
file is moved to the correct location.
Args:
timeout: An integer specifying the number of seconds to wait for
the capture to terminate on its own. On expiration of the
timeout the sniffer is stopped explicitly using the
stop_capture() function.
Raises:
NoPermissionError: No permission when trying to stop a capture and
save the capture file.
|
github-repos
|
def execute(self, sensor_graph, scope_stack):
parent = scope_stack[-1]
alloc = parent.allocator
trigger_stream, trigger_cond = parent.trigger_chain()
rpc_const = alloc.allocate_stream(DataStream.ConstantType, attach=True)
rpc_val = (self.slot_id.address << 16) | self.rpc_id
stream = self.stream
if stream is None:
stream = alloc.allocate_stream(DataStream.UnbufferedType)
sensor_graph.add_node(u"({} {} && {} always) => {} using call_rpc".format(trigger_stream, trigger_cond, rpc_const, stream))
sensor_graph.add_constant(rpc_const, rpc_val)
|
Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with the call_rpc function
as is processing function.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
|
juraj-google-style
|
def recursive_chmod(path, mode=0755):
passwd_reader.set_permissions(path, mode=mode)
if os.path.isfile(path):
return
for root, dirs, files in os.walk(path):
for fn in files + dirs:
passwd_reader.set_permissions(os.path.join(root, fn), mode=mode)
|
Recursively change ``mode`` for given ``path``. Same as ``chmod -R mode``.
Args:
path (str): Path of the directory/file.
mode (octal int, default 0755): New mode of the file.
Warning:
Don't forget to add ``0`` at the beginning of the numbers of `mode`, or
`Unspeakable hOrRoRs` will be awaken from their unholy sleep outside of
the reality and they WILL eat your soul (and your files).
|
juraj-google-style
|
def densifying_unary(func):
@functools.wraps(func)
def sparse_wrapper(x, *args, **kwargs):
if isinstance(x, jax_sparse.JAXSparse):
x = x.todense()
return func(x, *args, **kwargs)
return sparse_wrapper
|
Decorator to add support for `JAXSparse` tensors (including `BCOO`) to a
non-zero-preserving element-wise unary operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise
- The operator must be unary (one input tensor and one output tensor)
- The operator must return a tensor of the same shape.
Additional arguments to the function (besides the input tensor) are
supported. The returned result is a dense tensor.
Args:
func: The unary operator to wrap.
Returns:
Wrapped function that supports `JAXSparse` tensors.
|
github-repos
|
def _compile_function_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
if len(args) == 1:
etype2func = {
'abs': TensorFluent.abs,
'exp': TensorFluent.exp,
'log': TensorFluent.log,
'sqrt': TensorFluent.sqrt,
'cos': TensorFluent.cos,
'sin': TensorFluent.sin,
'tan': TensorFluent.tan,
'acos': TensorFluent.acos,
'arccos': TensorFluent.acos,
'asin': TensorFluent.asin,
'arcsin': TensorFluent.asin,
'atan': TensorFluent.atan,
'arctan': TensorFluent.atan,
'round': TensorFluent.round,
'ceil': TensorFluent.ceil,
'floor': TensorFluent.floor
}
if etype[1] not in etype2func:
raise ValueError('Invalid unary function expression:\n{}'.format(expr))
op = etype2func[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
fluent = op(x)
else:
etype2func = {
'pow': TensorFluent.pow,
'max': TensorFluent.max,
'min': TensorFluent.min
}
if etype[1] not in etype2func:
raise ValueError('Invalid binary function expression:\n{}'.format(expr))
op = etype2func[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
y = self._compile_expression(args[1], scope, batch_size, noise)
fluent = op(x, y)
return fluent
|
Compile a function expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
juraj-google-style
|
def _compare_versions(v1, v2):
if v1 == 'inf' and v2 == 'inf':
raise RuntimeError('Cannot compare `inf` to `inf`.')
rtn_dict = {'smaller': None, 'larger': None}
v1_list = v1.split('.')
v2_list = v2.split('.')
if v1_list[0] == 'inf':
v1_list[0] = str(int(v2_list[0]) + 1)
if v2_list[0] == 'inf':
v2_list[0] = str(int(v1_list[0]) + 1)
v_long = v1_list if len(v1_list) >= len(v2_list) else v2_list
v_short = v1_list if len(v1_list) < len(v2_list) else v2_list
larger, smaller = (None, None)
for i, ver in enumerate(v_short, start=0):
if int(ver) > int(v_long[i]):
larger = _list_to_string(v_short, '.')
smaller = _list_to_string(v_long, '.')
elif int(ver) < int(v_long[i]):
larger = _list_to_string(v_long, '.')
smaller = _list_to_string(v_short, '.')
elif i == len(v_short) - 1:
if v_long[i + 1:] == ['0'] * (len(v_long) - 1 - i):
larger = 'equal'
smaller = 'equal'
else:
larger = _list_to_string(v_long, '.')
smaller = _list_to_string(v_short, '.')
else:
pass
if larger:
break
rtn_dict['smaller'] = smaller
rtn_dict['larger'] = larger
return rtn_dict
|
Compare two versions and return information on which is smaller vs. larger.
Args:
v1: String that is a version to be compared against `v2`.
v2: String that is a version to be compared against `v1`.
Returns:
Dict that stores larger version with key `larger` and smaller version with
key `smaller`.
e.g. {`larger`: `1.5.0`, `smaller`: `1.2.0`}
Raises:
RuntimeError: If asked to compare `inf` to `inf`.
|
github-repos
|
def set_extana_led(self, r, g, b, check_state=True):
(r, g, b) = map(int, [r, g, b])
if ((min([r, g, b]) < LED_MIN) or (max([r, g, b]) > LED_MAX)):
logger.warn('RGB channel values must be {}-{}'.format(LED_MIN, LED_MAX))
return False
if (check_state and ((r, g, b) == self.led_state)):
return True
(ir, ig, ib) = map((lambda x: int((x * (INT_LED_MAX / LED_MAX)))), [r, g, b])
val = struct.pack('<HHH', ir, ig, ib)
extana_led = self.get_characteristic_handle_from_uuid(UUID_EXTANA_LED)
if (extana_led is None):
logger.warn('Failed to find handle for ExtAna LED')
return None
if (not self.dongle._write_attribute(self.conn_handle, extana_led, val)):
return False
self.led_state = (r, g, b)
return True
|
Update the colour of the RGB LED on the SK8-ExtAna board.
Args:
r (int): red channel, 0-255
g (int): green channel, 0-255
b (int): blue channel, 0-255
check_state (bool): if True (default) and the locally cached LED state matches
the given (r, g, b) triplet, pysk8 will NOT send any LED update command to
the SK8. If you want to force the command to be sent even if the local state
matches the new colour, set this to False.
Returns:
True on success, False if an error occurred.
|
codesearchnet
|
def __init__(self, lookup_map, fallback=None):
super().__init__()
if fallback is not None:
lookup_map['*'] = fallback
self._lookup_map = lookup_map
|
Create this visitor.
You're expected to then pass this instance to node.Visit().
Args:
lookup_map: A map from names to symbol tables (i.e., objects that have a
"Lookup" function).
fallback: A symbol table to be tried if lookup otherwise fails.
|
github-repos
|
def is_location(v) -> (bool, str):
def convert2float(value):
try:
float_num = float(value)
return float_num
except ValueError:
return False
if not isinstance(v, str):
return False, v
split_lst = v.split(":")
if len(split_lst) != 5:
return False, v
if convert2float(split_lst[3]):
longitude = abs(convert2float(split_lst[3]))
if longitude > 90:
return False, v
if convert2float(split_lst[4]):
latitude = abs(convert2float(split_lst[3]))
if latitude > 180:
return False, v
return True, v
|
Boolean function for checking if v is a location format
Args:
v:
Returns: bool
|
juraj-google-style
|
def _ParseDateTimeValue(self, byte_stream, file_offset):
datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')
try:
value = self._ReadStructureFromByteStream(byte_stream, file_offset, datetime_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse datetime value with error: {0!s}'.format(exception))
direction_from_utc = chr(value.direction_from_utc)
rfc2579_date_time_tuple = (value.year, value.month, value.day_of_month, value.hours, value.minutes, value.seconds, value.deciseconds, direction_from_utc, value.hours_from_utc, value.minutes_from_utc)
return dfdatetime_rfc2579_date_time.RFC2579DateTime(rfc2579_date_time_tuple=rfc2579_date_time_tuple)
|
Parses a CUPS IPP RFC2579 date-time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the RFC2579 date-time value cannot be parsed.
|
codesearchnet
|
def return_estimator(self):
estimator = self.base_learner_origin.return_estimator()
estimator = estimator.set_params(**self.hyperparameters)
return estimator
|
Returns base learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
|
codesearchnet
|
def get_sailthru_client(site_code):
config = get_sailthru_configuration(site_code)
if not config.get('SAILTHRU_ENABLE'):
msg = 'Sailthru is not enabled for site {}'.format(site_code)
log.debug(msg)
raise SailthruNotEnabled(msg)
key = config.get('SAILTHRU_KEY')
secret = config.get('SAILTHRU_SECRET')
if not (key and secret):
msg = 'Both key and secret are required for site {}'.format(site_code)
log.error(msg)
raise ConfigurationError(msg)
return SailthruClient(key, secret)
|
Returns a Sailthru client for the specified site.
Args:
site_code (str): Site for which the client should be configured.
Returns:
SailthruClient
Raises:
SailthruNotEnabled: If Sailthru is not enabled for the specified site.
ConfigurationError: If either the Sailthru API key or secret are not set for the site.
|
juraj-google-style
|
async def send_command(self, command, args, validator, timeout=10.0):
if (self._con is None):
raise ExternalError('No websock connection established')
cmd_uuid = str(uuid.uuid4())
msg = dict(type='command', operation=command, uuid=cmd_uuid, payload=args)
packed = pack(msg)
response_future = self._manager.wait_for(type='response', uuid=cmd_uuid, timeout=timeout)
(await self._con.send(packed))
response = (await response_future)
if (response.get('success') is False):
self._raise_error(command, response)
if (validator is None):
return response.get('payload')
return validator.verify(response.get('payload'))
|
Send a command and synchronously wait for a single response.
Args:
command (string): The command name
args (dict): Optional arguments.
validator (Verifier): A SchemaVerifier to verify the response
payload.
timeout (float): The maximum time to wait for a response.
Defaults to 10 seconds.
Returns:
dict: The response payload
Raises:
ExternalError: If the server is not connected or the command
fails.
asyncio.TimeoutError: If the command times out.
ValidationError: If the response payload does not match the
given validator.
|
codesearchnet
|
def create_transfer_learning_tuner(parent, additional_parents=None, estimator=None, sagemaker_session=None):
parent_tuner = HyperparameterTuner.attach(tuning_job_name=parent, sagemaker_session=sagemaker_session)
return parent_tuner.transfer_learning_tuner(additional_parents=additional_parents, estimator=estimator)
|
Creates a new ``HyperParameterTuner`` by copying the request fields from the provided parent to the new instance
of ``HyperparameterTuner`` followed by addition of warm start configuration with the type as "TransferLearning"
and ``parents`` as the union of provided list of ``additional_parents`` and the ``parent``.
Args:
parent (str): Primary parent tuning job's name from which the Tuner and Estimator configuration has to be copied
additional_parents (set{str}): Set of additional parent tuning job's names along with the primary parent tuning
job name to be used in warm starting the identical dataset and algorithm tuner.
estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with
the desired configuration. There does not need to be a training job associated with this instance.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, one is created
using the default AWS configuration chain.
Returns:
sagemaker.tuner.HyperparameterTuner: New instance of warm started HyperparameterTuner
|
codesearchnet
|
def _ExtractFileEntry(self, path_spec, destination_path, output_writer, skip_duplicates=True):
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if (not file_entry):
logger.warning('Unable to open file entry for path spec: {0:s}'.format(path_spec.comparable))
return
if (not self._filter_collection.Matches(file_entry)):
return
file_entry_processed = False
for data_stream in file_entry.data_streams:
if self._abort:
break
self._ExtractDataStream(file_entry, data_stream.name, destination_path, output_writer, skip_duplicates=skip_duplicates)
file_entry_processed = True
if (not file_entry_processed):
self._ExtractDataStream(file_entry, '', destination_path, output_writer, skip_duplicates=skip_duplicates)
|
Extracts a file entry.
Args:
path_spec (dfvfs.PathSpec): path specification of the source file.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
|
codesearchnet
|
def traverse_pagination(response, endpoint, content_filter_query, query_params):
results = response.get('results', [])
page = 1
while response.get('next'):
page += 1
response = endpoint().post(content_filter_query, **dict(query_params, page=page))
results += response.get('results', [])
return results
|
Traverse a paginated API response and extracts and concatenates "results" returned by API.
Arguments:
response (dict): API response object.
endpoint (Slumber.Resource): API endpoint object.
content_filter_query (dict): query parameters used to filter catalog results.
query_params (dict): query parameters used to paginate results.
Returns:
list: all the results returned by the API.
|
codesearchnet
|
def _tf_assert_stmt(expression1, expression2):
expression2_tensors = expression2()
if not isinstance(expression2_tensors, list):
expression2_tensors = [expression2_tensors]
return control_flow_assert.Assert(expression1, expression2_tensors)
|
Overload of assert_stmt that stages a TF Assert.
This implementation deviates from Python semantics as follows:
(1) the assertion is verified regardless of the state of __debug__
(2) on assertion failure, the graph execution will fail with
tensorflow.errors.ValueError, rather than AssertionError.
Args:
expression1: tensorflow.Tensor, must evaluate to a tf.bool scalar
expression2: Callable[[], Union[tensorflow.Tensor, List[tensorflow.Tensor]]]
Returns:
tensorflow.Operation
|
github-repos
|
def _do_policy_eval(tf_sess, to_eval, policies, active_episodes):
eval_results = {}
if tf_sess:
builder = TFRunBuilder(tf_sess, 'policy_eval')
pending_fetches = {}
else:
builder = None
if log_once('compute_actions_input'):
logger.info('Inputs to compute_actions():\n\n{}\n'.format(summarize(to_eval)))
for (policy_id, eval_data) in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
policy = _get_or_raise(policies, policy_id)
if (builder and (policy.compute_actions.__code__ is TFPolicyGraph.compute_actions.__code__)):
pending_fetches[policy_id] = policy._build_compute_actions(builder, [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data])
else:
eval_results[policy_id] = policy.compute_actions([t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data], info_batch=[t.info for t in eval_data], episodes=[active_episodes[t.env_id] for t in eval_data])
if builder:
for (k, v) in pending_fetches.items():
eval_results[k] = builder.get(v)
if log_once('compute_actions_result'):
logger.info('Outputs of compute_actions():\n\n{}\n'.format(summarize(eval_results)))
return eval_results
|
Call compute actions on observation batches to get next actions.
Returns:
eval_results: dict of policy to compute_action() outputs.
|
codesearchnet
|
def start_upsert(ini_data):
stack_driver = CloudStackUtility(ini_data)
poll_stack = (not ini_data.get('no_poll', False))
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
profile = ini_data.get('environment', {}).get('profile')
if profile:
boto3_session = boto3.session.Session(profile_name=profile)
else:
boto3_session = boto3.session.Session()
region = ini_data['environment']['region']
stack_name = ini_data['environment']['stack_name']
cf_client = stack_driver.get_cloud_formation_client()
if (not cf_client):
cf_client = boto3_session.client('cloudformation', region_name=region)
stack_tool = stack_tool = StackTool(stack_name, region, cf_client)
except Exception as wtf:
logging.warning('there was a problems creating stack tool: {}'.format(wtf))
if stack_driver.poll_stack():
try:
logging.info('stack create/update was finished successfully.')
stack_tool.print_stack_info()
except Exception as wtf:
logging.warning('there was a problems printing stack info: {}'.format(wtf))
sys.exit(0)
else:
try:
logging.error('stack create/update was did not go well.')
stack_tool.print_stack_events()
except Exception as wtf:
logging.warning('there was a problems printing stack events: {}'.format(wtf))
sys.exit(1)
else:
logging.error('start of stack create/update did not go well.')
sys.exit(1)
|
Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad
|
codesearchnet
|
def clean_code(content: str) -> str:
splits = content.split('"""')
content = ''.join(splits[::2])
splits = content.split("'''")
content = ''.join(splits[::2])
lines_to_keep = []
for line in content.split('\n'):
line = re.sub('
if len(line) != 0 and (not line.isspace()):
lines_to_keep.append(line)
return '\n'.join(lines_to_keep)
|
Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern
comments or docstings).
Args:
content (`str`): The code to clean
Returns:
`str`: The cleaned code.
|
github-repos
|
def map_fn(fn, elems, name=None, dtype=None):
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
|
Map the function fn over the elements elems and return the outputs.
Args:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
|
github-repos
|
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)
elif sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)
|
Computes mean and std for batch then apply batch_normalization on batch.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
|
github-repos
|
def _partitioner(shape, dtype):
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError(f'shape is not a TensorShape: {shape}')
if not shape.is_fully_defined():
raise ValueError(f'shape is not fully defined: {shape}')
dtype = dtypes.as_dtype(dtype)
if dtype.base_dtype == dtypes.string:
element_size = bytes_per_string_element
else:
element_size = dtype.size
partitions = [1] * shape.ndims
bytes_per_slice = 1.0 * (shape.num_elements() / shape.dims[axis].value) * element_size
slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))
axis_shards = int(math.ceil(1.0 * shape.dims[axis].value / slices_per_shard))
if max_shards:
axis_shards = min(max_shards, axis_shards)
partitions[axis] = axis_shards
return partitions
|
Partitioner that partitions shards to have max_shard_bytes total size.
Args:
shape: A `TensorShape`.
dtype: A `DType`.
Returns:
A tuple representing how much to slice each axis in shape.
Raises:
ValueError: If shape is not a fully defined `TensorShape` or dtype is not
a `DType`.
|
github-repos
|
def while_loop_op(op):
return control_flow_util.IsLoopSwitch(op) or control_flow_util.IsLoopMerge(op) or control_flow_util.IsLoopEnter(op) or control_flow_util.IsLoopExit(op) or TensorTracer.loop_cond_op(op) or (op.type in ('RefNextIteration', 'NextIteration'))
|
Returns true if op is one of the special ops of in a while loop.
Args:
op: A tf.Operation.
Returns:
True if the given op is one of [Switch, Merge, Enter, Exit,
NextIteration, LoopCond], which are all building blocks for TF while
loops.
|
github-repos
|
def get_subgraph_for_concept_pair(
self, source: str, target: str, cutoff: Optional[int] = None
):
paths = nx.all_simple_paths(self, source, target, cutoff=cutoff)
return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths))))
|
Get subgraph comprised of simple paths between the source and the
target.
Args:
source
target
cutoff
|
juraj-google-style
|
def get(self, addresses):
with self._lock:
results = []
for add in addresses:
self.validate_read(add)
results.append(self._get(add))
return results
|
Returns the value in this context, or None, for each address in
addresses. Useful for gets on the context manager.
Args:
addresses (list of str): The addresses to return values for, if
within this context.
Returns:
results (list of bytes): The values in state for these addresses.
|
codesearchnet
|
def fit_transform_table(self, table, table_meta, transformer_dict=None, transformer_list=None, missing=None):
if (missing is None):
missing = self.missing
else:
self.missing = missing
warnings.warn(DEPRECATION_MESSAGE.format('fit_transform_table'), DeprecationWarning)
result = pd.DataFrame()
table_name = table_meta['name']
for field in table_meta['fields']:
col_name = field['name']
if transformer_list:
for transformer_name in transformer_list:
if (field['type'] == self.get_class(transformer_name).type):
transformed = self._fit_transform_column(table, field, transformer_name, table_name)
result = pd.concat([result, transformed], axis=1)
elif ((table_name, col_name) in transformer_dict):
transformer_name = TRANSFORMERS[transformer_dict[(table_name, col_name)]]
transformed = self._fit_transform_column(table, field, transformer_name, table_name)
result = pd.concat([result, transformed], axis=1)
return result
|
Create, apply and store the specified transformers for `table`.
Args:
table(pandas.DataFrame): Contents of the table to be transformed.
table_meta(dict): Metadata for the given table.
transformer_dict(dict): Mapping `tuple(str, str)` -> `str` where the tuple in the
keys represent the (table_name, column_name) and the value
the name of the assigned transformer.
transformer_list(list): List of transformers to use. Overrides the transformers in
the meta_file.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
pandas.DataFrame: Transformed table.
|
codesearchnet
|
def check_filepath(self, path, filename):
settings_path = os.path.join(path, filename)
if not os.path.exists(settings_path) or \
not os.path.isfile(settings_path):
msg = "Unable to find settings file: {}"
raise SettingsBackendError(msg.format(settings_path))
return settings_path
|
Check and return the final filepath to settings
Args:
path (str): Directory path where to search for settings file.
filename (str): Filename to use to search for settings file.
Raises:
boussole.exceptions.SettingsBackendError: If determined filepath
does not exists or is a directory.
Returns:
string: Settings file path, joining given path and filename.
|
juraj-google-style
|
def getParameter(self, name):
return lock_and_call((lambda : Parameter(self._impl.getParameter(name))), self._lock)
|
Get the parameter with the corresponding name.
Args:
name: Name of the parameter to be found.
Raises:
TypeError: if the specified parameter does not exist.
|
codesearchnet
|
def predict_image(img, model_func):
orig_shape = img.shape[:2]
resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)
resized_img = resizer.augment(img)
scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1])
boxes, probs, labels, *masks = model_func(resized_img)
boxes = boxes / scale
boxes = clip_boxes(boxes, orig_shape)
if masks:
full_masks = [_paste_mask(box, mask, orig_shape)
for box, mask in zip(boxes, masks[0])]
masks = full_masks
else:
masks = [None] * len(boxes)
results = [DetectionResult(*args) for args in zip(boxes, probs, labels.tolist(), masks)]
return results
|
Run detection on one image, using the TF callable.
This function should handle the preprocessing internally.
Args:
img: an image
model_func: a callable from the TF model.
It takes image and returns (boxes, probs, labels, [masks])
Returns:
[DetectionResult]
|
juraj-google-style
|
def std(x, axis=None, keepdims=False):
if any_symbolic_tensors((x,)):
return Std(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.std(x, axis=axis, keepdims=keepdims)
|
Compute the standard deviation along the specified axis.
Args:
x: Input tensor.
axis: Axis along which to compute standard deviation.
Default is to compute the standard deviation of the
flattened tensor.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Returns:
Output tensor containing the standard deviation values.
|
github-repos
|
def affine_transform(self, image: np.array, center: Tuple[float], scale: Tuple[float], rotation: float, size: Dict[str, int], data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:
data_format = input_data_format if data_format is None else data_format
size = (size['width'], size['height'])
transformation = get_warp_matrix(rotation, center * 2.0, np.array(size) - 1.0, scale * 200.0)
image = image if input_data_format == ChannelDimension.LAST else to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)
image = scipy_warp_affine(src=image, M=transformation, size=(size[1], size[0]))
image = to_channel_dimension_format(image, data_format, ChannelDimension.LAST)
return image
|
Apply an affine transformation to an image.
Args:
image (`np.array`):
Image to transform.
center (`Tuple[float]`):
Center of the bounding box (x, y).
scale (`Tuple[float]`):
Scale of the bounding box with respect to height/width.
rotation (`float`):
Rotation angle in degrees.
size (`Dict[str, int]`):
Size of the destination image.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format of the output image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image.
|
github-repos
|
def received(self, messages):
if messages:
if self._queue:
self._queue.put_nowait(messages)
if self._callback:
self._callback(messages)
|
Called when new messages arrive.
Args:
messages (tuple): Messages
|
codesearchnet
|
def __init__(self, channel, pin):
self._channel = None
self._pin = None
self._open(channel, pin)
|
Instantiate a PWM object and open the sysfs PWM corresponding to the
specified channel and pin.
Args:
channel (int): Linux channel number.
pin (int): Linux pin number.
Returns:
PWM: PWM object.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if `channel` or `pin` types are invalid.
ValueError: if PWM channel does not exist.
|
juraj-google-style
|
def make_innermost_setter(setter):
@functools.wraps(setter)
def _new_setter(kernel_results, *args, **kwargs):
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
new_kernel_results = setter(kernel_results, *args, **kwargs)
for outer_results in reversed(results_stack):
new_kernel_results = outer_results._replace(
inner_results=new_kernel_results)
return new_kernel_results
return _new_setter
|
Wraps a setter so it applies to the inner-most results in `kernel_results`.
The wrapped setter unwraps `kernel_results` and applies `setter` to the first
results without an `inner_results` attribute.
Args:
setter: A callable that takes the kernel results as well as some `*args` and
`**kwargs` and returns a modified copy of those kernel results.
Returns:
new_setter: A wrapped `setter`.
|
juraj-google-style
|
def _CreateShapePointFolder(self, shapes_folder, shape):
folder_name = (shape.shape_id + ' Shape Points')
folder = self._CreateFolder(shapes_folder, folder_name, visible=False)
for (index, (lat, lon, dist)) in enumerate(shape.points):
placemark = self._CreatePlacemark(folder, str((index + 1)))
point = ET.SubElement(placemark, 'Point')
coordinates = ET.SubElement(point, 'coordinates')
coordinates.text = ('%.6f,%.6f' % (lon, lat))
return folder
|
Create a KML Folder containing all the shape points in a shape.
The folder contains placemarks for each shapepoint.
Args:
shapes_folder: A KML Shape Folder ElementTree.Element instance
shape: The shape to plot.
Returns:
The Folder ElementTree.Element instance or None.
|
codesearchnet
|
def plot_brillouin_zone(bz_lattice, lines=None, labels=None, kpoints=None, fold=False, coords_are_cartesian=False, ax=None, **kwargs):
(fig, ax) = plot_lattice_vectors(bz_lattice, ax=ax)
plot_wigner_seitz(bz_lattice, ax=ax)
if (lines is not None):
for line in lines:
plot_path(line, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax)
if (labels is not None):
plot_labels(labels, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax)
plot_points(labels.values(), bz_lattice, coords_are_cartesian=coords_are_cartesian, fold=False, ax=ax)
if (kpoints is not None):
plot_points(kpoints, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax, fold=fold)
ax.set_xlim3d((- 1), 1)
ax.set_ylim3d((- 1), 1)
ax.set_zlim3d((- 1), 1)
ax.set_aspect('equal')
ax.axis('off')
return fig
|
Plots a 3D representation of the Brillouin zone of the structure.
Can add to the plot paths, labels and kpoints
Args:
bz_lattice: Lattice object of the Brillouin zone
lines: list of lists of coordinates. Each list represent a different path
labels: dict containing the label as a key and the coordinates as value.
kpoints: list of coordinates
fold: whether the points should be folded inside the first Brillouin Zone.
Defaults to False. Requires lattice if True.
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: provided by add_fig_kwargs decorator
Returns:
matplotlib figure
|
codesearchnet
|
def is_user_in_group(self, user, group):
search_url = ('%s/%s/%s/%s/%s' % (self.url, 'group', group, 'user', user))
response = self.jss.get(search_url)
length = len(response)
result = False
if (length == 1):
pass
elif (length == 2):
if (response.findtext('ldap_user/username') == user):
if (response.findtext('ldap_user/is_member') == 'Yes'):
result = True
elif (len(response) >= 2):
raise JSSGetError('Unexpected response.')
return result
|
Test for whether a user is in a group.
There is also the ability in the API to test for whether
multiple users are members of an LDAP group, but you should just
call is_user_in_group over an enumerated list of users.
Args:
user: String username.
group: String group name.
Returns bool.
|
codesearchnet
|
def as_bytes(bytes_or_text, encoding='utf-8'):
encoding = codecs.lookup(encoding).name
if isinstance(bytes_or_text, bytearray):
return bytes(bytes_or_text)
elif isinstance(bytes_or_text, str):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' % (bytes_or_text,))
|
Converts `bytearray`, `bytes`, or unicode python input types to `bytes`.
Uses utf-8 encoding for text by default.
Args:
bytes_or_text: A `bytearray`, `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
Returns:
A `bytes` object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
|
github-repos
|
def resize_image(image, tuple_wh, preserve_aspect=True):
if preserve_aspect:
img_cpy = image.copy()
img_cpy.thumbnail(tuple_wh)
return img_cpy
else:
return image.resize(tuple_wh)
|
Resizes an instance of a PIL Image.
In order to prevent un-intended side effects,
this function always returns a copy of the image,
as the resize function from PIL returns a copy
but the thumbnail function does not.
Args:
image: An instance of a PIL Image.
tuple_wh: A tuple containing the (width, height) for resizing.
preserve_aspect: A boolean that determines whether or not the
resizing should preserve the image's aspect ratio.
Returns: A resized copy of the provided PIL image.
|
codesearchnet
|
def _get_nn_shell_info(self, structure, all_nn_info, site_idx, shell, _previous_steps=frozenset(), _cur_image=(0, 0, 0)):
if (shell <= 0):
raise ValueError('Shell must be positive')
_previous_steps = _previous_steps.union({(site_idx, _cur_image)})
possible_steps = list(all_nn_info[site_idx])
for (i, step) in enumerate(possible_steps):
step = dict(step)
step['image'] = tuple(np.add(step['image'], _cur_image).tolist())
possible_steps[i] = step
allowed_steps = [x for x in possible_steps if ((x['site_index'], x['image']) not in _previous_steps)]
if (shell == 1):
return allowed_steps
else:
terminal_neighbors = [self._get_nn_shell_info(structure, all_nn_info, x['site_index'], (shell - 1), _previous_steps, x['image']) for x in allowed_steps]
all_sites = dict()
for (first_site, term_sites) in zip(allowed_steps, terminal_neighbors):
for term_site in term_sites:
key = (term_site['site_index'], tuple(term_site['image']))
term_site['weight'] *= first_site['weight']
value = all_sites.get(key)
if (value is not None):
value['weight'] += term_site['weight']
else:
value = term_site
all_sites[key] = value
return list(all_sites.values())
|
Private method for computing the neighbor shell information
Args:
structure (Structure) - Structure being assessed
all_nn_info ([[dict]]) - Results from `get_all_nn_info`
site_idx (int) - index of site for which to determine neighbor
information.
shell (int) - Which neighbor shell to retrieve (1 == 1st NN shell)
_previous_step ({(site_idx, image}) - Internal use only: Set of
sites that have already been traversed.
_cur_image (tuple) - Internal use only Image coordinates of current atom
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`. Does not update the site positions
|
codesearchnet
|
def constant(name, shape, value=0, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True):
shape = (shape if isinstance(shape, (tuple, list)) else [shape])
x = tf.get_variable(name, shape, dtype=dtype, initializer=tf.constant_initializer(value), regularizer=regularizer, trainable=trainable)
if summary:
tf.sg_summary_param(x)
return x
|
r"""Creates a tensor variable of which initial values are `value` and shape is `shape`.
Args:
name: The name of new variable.
shape: A tuple/list of integers or an integer.
If shape is an integer, it is converted to a list.
value: A Python scalar. All elements of the initialized variable
will be set to this value. Default is 0.
dtype: The data type. Only floating point types are supported. Default is float32.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`.
|
codesearchnet
|
def get_package_install_path(self, path):
from rez.package_repository import package_repository_manager
pkg_repo = package_repository_manager.get_repository(path)
return pkg_repo.get_package_payload_path(
package_name=self.package.name,
package_version=self.package.version
)
|
Return the installation path for a package (where its payload goes).
Args:
path (str): Package repository path.
|
juraj-google-style
|
def memory_read32(self, addr, num_words, zone=None):
return self.memory_read(addr, num_words, zone=zone, nbits=32)
|
Reads memory from the target system in units of 32-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_words (int): number of words to read
zone (str): memory zone to read from
Returns:
List of words read from the target system.
Raises:
JLinkException: if memory could not be read
|
juraj-google-style
|
def send_update(url_id, dataset):
data = _convert_to_seeder_format(dataset)
if (not data):
return
try:
_send_request(url_id, json=data, req_type=requests.patch)
except Exception as e:
sys.stderr.write('Seeder PATCH error: ')
sys.stderr.write(str(e.message))
return None
|
Send request to Seeder's API with data changed by user.
Args:
url_id (str): ID used as identification in Seeder.
dataset (dict): WA-KAT dataset sent from frontend.
|
codesearchnet
|
def add_tags(self, ID3=None):
if (ID3 is None):
ID3 = self.ID3
if (self.tags is None):
self.ID3 = ID3
self.tags = ID3()
else:
raise error('an ID3 tag already exists')
|
Add an empty ID3 tag to the file.
Args:
ID3 (ID3): An ID3 subclass to use or `None` to use the one
that used when loading.
A custom tag reader may be used in instead of the default
`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
|
codesearchnet
|
def self_adjoint_eigvals(tensor, name=None):
e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name)
return e
|
Computes the eigenvalues of one or more self-adjoint matrices.
Note: If your program backpropagates through this function, you should replace
it with a call to tf.linalg.eigh (possibly ignoring the second output) to
avoid computing the eigen decomposition twice. This is because the
eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
_SelfAdjointEigV2Grad in linalg_grad.py.
Args:
tensor: `Tensor` of shape `[..., N, N]`.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
eigenvalues of `tensor[..., :, :]`.
|
github-repos
|
def _benchmarkFetch(self, name, target, size, iters):
times = []
with ops.Graph().as_default():
v = variables.Variable(random_ops.random_normal([size]))
with session.Session(target) as sess:
sess.run(v.initializer)
sess.run(v)
for _ in range(iters):
start_time = time.time()
sess.run(v)
end_time = time.time()
times.append(end_time - start_time)
print('%s %d %f' % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
|
Runs a microbenchmark to measure the cost of fetching a tensor.
Reports the median cost of fetching a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be fetched.
iters: The number of iterations to perform.
|
github-repos
|
def update_configuration(self, did, wid, eid, payload):
req_headers = {'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json'}
res = self._api.request('post', (((((('/api/partstudios/d/' + did) + '/w/') + wid) + '/e/') + eid) + '/configuration'), body=payload, headers=req_headers)
return res
|
Update the configuration specified in the payload
Args:
- did (str): Document ID
- eid (str): Element ID
- payload (json): the request body
Returns:
- configuration (str): the url-ready configuration string.
|
codesearchnet
|
def patch(make_pool=_default_make_pool):
setattr(httplib2, '_HttpOriginal', httplib2.Http)
httplib2.Http = Http
Http._make_pool = make_pool
|
Monkey-patches httplib2.Http to be httplib2shim.Http.
This effectively makes all clients of httplib2 use urlilb3. It's preferable
to specify httplib2shim.Http explicitly where you can, but this can be
useful in situations where you do not control the construction of the http
object.
Args:
make_pool: A function that returns a urllib3.Pool-like object. This
allows you to specify special arguments to your connection pool if
needed. By default, this will create a urllib3.PoolManager with
SSL verification enabled using the certifi certificates.
|
codesearchnet
|
def __init__(self, namespace: str, prefix: str=''):
if prefix:
prefix = f'{prefix}_'
self._inference_counter = beam.metrics.Metrics.counter(namespace, prefix + 'num_inferences')
self.failed_batches_counter = beam.metrics.Metrics.counter(namespace, prefix + 'failed_batches_counter')
self._inference_request_batch_size = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_request_batch_size')
self._inference_request_batch_byte_size = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_request_batch_byte_size')
self._inference_batch_latency_micro_secs = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_batch_latency_micro_secs')
self._model_byte_size = beam.metrics.Metrics.distribution(namespace, prefix + 'model_byte_size')
self._load_model_latency_milli_secs = beam.metrics.Metrics.distribution(namespace, prefix + 'load_model_latency_milli_secs')
self._load_model_latency_milli_secs_cache = None
self._model_byte_size_cache = None
|
Args:
namespace: Namespace for the metrics.
prefix: Unique identifier for metrics, used when models
are updated using side input.
|
github-repos
|
def csv(self, ondemand=False):
self._request_uri = '{}/{}'.format(self._api_uri, 'csv')
self._stream = True
if ondemand:
self._request.add_payload('runNow', True)
|
Update request URI to return CSV data.
For onDemand bulk generation to work it must first be enabled in the
ThreatConnect platform under System settings.
Args:
ondemand (boolean): Enable on demand bulk generation.
|
juraj-google-style
|
def _build_ring_scatter(pred_by_s_d, rank_by_s_d, chunks_by_dev):
num_devices = len(chunks_by_dev)
num_chunks = len(chunks_by_dev[0])
if 0 != num_chunks % num_devices:
raise ValueError('Expect number of chunks per device to be divisible by num_devices')
num_subchunks = int(num_chunks / num_devices)
num_ticks = num_devices - 1
for tick in range(0, num_ticks):
passed_values = [None for _ in range(0, num_chunks)]
for d in range(0, num_devices):
with ops.colocate_with(chunks_by_dev[d][0]):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (1 + tick)) % num_devices
pred_dev = pred_by_s_d[s][d]
chunk_index = seg_index * num_subchunks + s
passed_values[chunk_index] = array_ops.identity(chunks_by_dev[pred_dev][chunk_index])
for d in range(0, num_devices):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (1 + tick)) % num_devices
chunk_index = seg_index * num_subchunks + s
chunks_by_dev[d][chunk_index] = passed_values[chunk_index]
output = []
for x in chunks_by_dev:
with ops.colocate_with(x[0]):
output.append(array_ops.concat(x, 0))
return output
|
Construct subgraph for second (scatter) pass of ring all-reduce.
Args:
pred_by_s_d: as produced by _ring_permutations
rank_by_s_d: as produced by _ring_permutations
chunks_by_dev: list of list of `tf.Tensor` indexed by ints
(device, chunk)
Raises:
ValueError: chunks_by_dev is not well-formed
Returns:
list of `tf.Tensor` which are the fully reduced tensors, one
at each device corresponding to the outer dimension of chunks_by_dev.
|
github-repos
|
def dump_in_memory_result(self, result, output_path):
file_count = 0
logger.debug("Dumping in-memory processing results to output folder: %s", output_path)
for k, v in iteritems(result):
cur_output_path = os.path.join(output_path, k)
if isinstance(v, dict):
file_count += self.dump_in_memory_result(v, cur_output_path)
else:
if not os.path.isdir(output_path):
os.makedirs(output_path)
filename = os.path.join(output_path, k)
logger.debug("Writing output file: %s", filename)
with open(filename, 'wt', encoding=self.config.encoding) as f:
f.write(v)
file_count += 1
return file_count
|
Recursively dumps the result of our processing into files within the
given output path.
Args:
result: The in-memory result of our processing.
output_path: Full path to the folder into which to dump the files.
Returns:
The number of files generated (integer).
|
juraj-google-style
|
def from_string(input_str) -> 'MissionTime':
match = RE_INPUT_STRING.match(input_str)
if not match:
raise ValueError(f'badly formatted date/time: {input_str}')
return MissionTime(
datetime.datetime(
int(match.group('year')),
int(match.group('month')),
int(match.group('day')),
int(match.group('hour')),
int(match.group('minute')),
int(match.group('second')),
)
)
|
Creates a MissionTime instance from a string
Format: YYYYMMDDHHMMSS
Args:
input_str: string to parse
Returns: MissionTime instance
|
juraj-google-style
|
class MaxLengthCriteria(StoppingCriteria):
def __init__(self, max_length: int, max_position_embeddings: Optional[int]=None):
self.max_length = max_length
self.max_position_embeddings = max_position_embeddings
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
cur_len = input_ids.shape[1]
is_done = cur_len >= self.max_length
if self.max_position_embeddings is not None and (not is_done) and (cur_len >= self.max_position_embeddings):
logger.warning_once(f"This is a friendly reminder - the current text generation call will exceed the model's predefined maximum length ({self.max_position_embeddings}). Depending on the model, you may observe exceptions, performance degradation, or nothing at all.")
return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
|
This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
in mind for decoder-only type of transformers, this will include the initial prompted tokens.
Args:
max_length (`int`):
The maximum length that the output sequence can have in number of tokens.
max_position_embeddings (`int`, *optional*):
The maximum model length, as defined by the model's `config.max_position_embeddings` attribute.
|
github-repos
|
def get_course_track_selection_url(course_run, query_parameters):
try:
course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']})
except KeyError:
LOGGER.exception('KeyError while parsing course run data.\nCourse Run: \n[%s]', course_run)
raise
url = '{}{}'.format(settings.LMS_ROOT_URL, course_root)
course_run_url = update_query_parameters(url, query_parameters)
return course_run_url
|
Return track selection url for the given course.
Arguments:
course_run (dict): A dictionary containing course run metadata.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Raises:
(KeyError): Raised when course run dict does not have 'key' key.
Returns:
(str): Course track selection url.
|
codesearchnet
|
def create_degrees(input_dim, hidden_dims, input_order='left-to-right', hidden_order='left-to-right'):
if (isinstance(input_order, str) and (input_order not in ('random', 'left-to-right', 'right-to-left'))):
raise ValueError('Input order is not valid.')
if (hidden_order not in ('random', 'left-to-right')):
raise ValueError('Hidden order is not valid.')
degrees = []
if isinstance(input_order, str):
input_degrees = np.arange(1, (input_dim + 1))
if (input_order == 'right-to-left'):
input_degrees = np.flip(input_degrees, 0)
elif (input_order == 'random'):
np.random.shuffle(input_degrees)
else:
input_order = np.array(input_order)
if np.all((np.sort(input_order) != np.arange(1, (input_dim + 1)))):
raise ValueError('invalid input order')
input_degrees = input_order
degrees.append(input_degrees)
for units in hidden_dims:
if (hidden_order == 'random'):
min_prev_degree = min(np.min(degrees[(- 1)]), (input_dim - 1))
hidden_degrees = np.random.randint(low=min_prev_degree, high=input_dim, size=units)
elif (hidden_order == 'left-to-right'):
hidden_degrees = ((np.arange(units) % max(1, (input_dim - 1))) + min(1, (input_dim - 1)))
degrees.append(hidden_degrees)
return degrees
|
Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
|
codesearchnet
|
def __init__(self, config_block):
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
|
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
|
juraj-google-style
|
def get_service_details(self, service_id: str) -> dict:
if (not self._manager):
raise RuntimeError('Only the Swarm manager node can retrieve all the services details.')
service = self._client.services.get(service_id)
return service.attrs
|
Get details of a service.
Only the manager nodes can retrieve service details
Args:
service_id (string): List of service id
Returns:
dict, details of the service
|
codesearchnet
|
def __call__(self, name, value):
if not isinstance(value, self.base_type):
raise ValueError("%s must be %s, not %s" % (name, self.base_type, value.__class__))
|
Call method.
Args:
name (str): the value's name.
value (object): the value to check.
Raises:
ValueError: if value is not type base_type.
|
juraj-google-style
|
def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
out = F.dropout(x, p=prob, training=training)
out = residual + out
return out
|
Dropout add function
Args:
x (`torch.tensor`):
input tensor
residual (`torch.tensor`):
residual tensor
prob (`float`):
dropout probability
training (`bool`):
training mode
|
github-repos
|
def lease(queue_name, owner, count=1, timeout_seconds=60):
now = datetime.datetime.utcnow()
query = WorkQueue.query.filter_by(queue_name=queue_name, status=WorkQueue.LIVE).filter((WorkQueue.eta <= now)).order_by(WorkQueue.eta).with_lockmode('update').limit(count)
task_list = query.all()
if (not task_list):
return None
next_eta = (now + datetime.timedelta(seconds=timeout_seconds))
for task in task_list:
task.eta = next_eta
task.lease_attempts += 1
task.last_owner = owner
task.last_lease = now
task.heartbeat = None
task.heartbeat_number = 0
db.session.add(task)
return [_task_to_dict(task) for task in task_list]
|
Leases a work item from a queue, usually the oldest task available.
Args:
queue_name: Name of the queue to lease work from.
owner: Who or what is leasing the task.
count: Lease up to this many tasks. Return value will never have more
than this many items present.
timeout_seconds: Number of seconds to lock the task for before
allowing another owner to lease it.
Returns:
List of dictionaries representing the task that was leased, or
an empty list if no tasks are available to be leased.
|
codesearchnet
|
def get_by_name(self, name):
scopes = self._client.get_all()
result = [x for x in scopes if (x['name'] == name)]
return (result[0] if result else None)
|
Gets a Scope by name.
Args:
name: Name of the Scope
Returns:
dict: Scope.
|
codesearchnet
|
def _run_static_range_qat(src_saved_model_path: str, dst_saved_model_path: str, quant_opts: _QuantizationOptions, signature_def_map: _SignatureDefMap) -> None:
logging.info('Running static-range quantization for QAT model.')
pywrap_quantize_model.quantize_qat_model(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quant_opts.SerializeToString(), signature_keys=list(quant_opts.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())
|
Runs static-range quantization for a Quantization-Aware Trained model.
Runs the quantization for a model trained using QAT.
Args:
src_saved_model_path: Path to the source SavedModel directory.
dst_saved_model_path: Path to the destination SavedModel directory.
quant_opts: Quantization options.
signature_def_map: Signature def key -> SignatureDef mapping.
|
github-repos
|
def replace_batch_norm(model):
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = RTDetrV2FrozenBatchNorm2d(module.num_features)
if not module.weight.device == torch.device('meta'):
new_module.weight.data.copy_(module.weight)
new_module.bias.data.copy_(module.bias)
new_module.running_mean.data.copy_(module.running_mean)
new_module.running_var.data.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
|
Recursively replace all `torch.nn.BatchNorm2d` with `RTDetrV2FrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
|
github-repos
|
def segment_ids_to_row_splits(segment_ids, num_segments=None, out_type=None, name=None):
from tensorflow.python.ops import bincount_ops
if out_type is None:
if isinstance(segment_ids, tensor.Tensor):
out_type = segment_ids.dtype
elif isinstance(num_segments, tensor.Tensor):
out_type = num_segments.dtype
else:
out_type = dtypes.int64
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, 'SegmentIdsToRaggedSplits', [segment_ids]) as name:
segment_ids = ragged_util.convert_to_int_tensor(segment_ids, 'segment_ids', dtype=dtypes.int32)
segment_ids.shape.assert_has_rank(1)
if num_segments is not None:
num_segments = ragged_util.convert_to_int_tensor(num_segments, 'num_segments', dtype=dtypes.int32)
num_segments.shape.assert_has_rank(0)
row_lengths = bincount_ops.bincount(segment_ids, minlength=num_segments, maxlength=num_segments, dtype=out_type)
splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
if num_segments is not None:
const_num_segments = tensor_util.constant_value(num_segments)
if const_num_segments is not None:
splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1]))
return splits
|
Generates the RaggedTensor `row_splits` corresponding to a segmentation.
Returns an integer vector `splits`, where `splits[0] = 0` and
`splits[i] = splits[i-1] + count(segment_ids==i)`. Example:
>>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]))
tf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64)
Args:
segment_ids: A 1-D integer Tensor.
num_segments: A scalar integer indicating the number of segments. Defaults
to `max(segment_ids) + 1` (or zero if `segment_ids` is empty).
out_type: The dtype for the return value. Defaults to `segment_ids.dtype`,
or `tf.int64` if `segment_ids` does not have a dtype.
name: A name prefix for the returned tensor (optional).
Returns:
A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`.
|
github-repos
|
def get_book_links(links):
book_links = []
for link in links:
data = DOWNER.download(link + "1")
dom = dhtmlparser.parseString(data)
book_links.extend(_parse_book_links(dom))
max_page = _get_max_page(dom)
if max_page == 1:
continue
for i in range(max_page - 1):
data = DOWNER.download(link + str(i + 2))
book_links.extend(
_parse_book_links(
dhtmlparser.parseString(data)
)
)
return book_links
|
Go thru `links` to categories and return list to all publications in all
given categories.
Args:
links (list): List of strings (absolute links to categories).
Returns:
list: List of strings / absolute links to book details.
|
juraj-google-style
|
def download_kegg_gene_metadata(gene_id, outdir=None, force_rerun=False):
if (not outdir):
outdir = ''
outfile = op.join(outdir, '{}.kegg'.format(custom_slugify(gene_id)))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
raw_text = bs_kegg.get('{}'.format(gene_id))
if (raw_text == 404):
return
with io.open(outfile, mode='wt', encoding='utf-8') as f:
f.write(raw_text)
log.debug('{}: downloaded KEGG metadata file'.format(outfile))
else:
log.debug('{}: KEGG metadata file already exists'.format(outfile))
return outfile
|
Download the KEGG flatfile for a KEGG ID and return the path.
Args:
gene_id: KEGG gene ID (with organism code), i.e. "eco:1244"
outdir: optional output directory of metadata
Returns:
Path to metadata file
|
codesearchnet
|
def Serialize(self, writer):
super(StorageItem, self).Serialize(writer)
writer.WriteVarBytes(self.Value)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
juraj-google-style
|
def update_state(self, y_true, y_pred, sample_weight=None):
return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)
|
Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
|
github-repos
|
def __init__(self, apps):
try:
apps = list(apps.items())
except AttributeError:
pass
def by_path_len(app):
return len(app[0])
apps.sort(key=by_path_len, reverse=True)
self.apps = [(p.rstrip('/'), a) for p, a in apps]
|
Initialize path info WSGI app dispatcher.
Args:
apps (dict[str,object]|list[tuple[str,object]]): URI prefix
and WSGI app pairs
|
juraj-google-style
|
def _FormatIPToken(self, token_data):
data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])
return {'IPv4_Header': data}
|
Formats an IPv4 packet header token as a dictionary of values.
Args:
token_data (bsm_token_data_ip): AUT_IP token data.
Returns:
dict[str, str]: token values.
|
juraj-google-style
|
def compare_jsone_task_definition(parent_link, rebuilt_definitions):
diffs = []
for compare_definition in rebuilt_definitions['tasks']:
if 'taskId' in compare_definition:
del(compare_definition['taskId'])
compare_definition = remove_empty_keys(compare_definition)
runtime_definition = remove_empty_keys(parent_link.task)
diff = list(dictdiffer.diff(compare_definition, runtime_definition))
if diff:
diffs.append(pprint.pformat(diff))
continue
log.info("{}: Good.".format(parent_link.name))
break
else:
error_msg = "{} {}: the runtime task doesn't match any rebuilt definition!\n{}".format(
parent_link.name, parent_link.task_id, pprint.pformat(diffs)
)
log.critical(error_msg)
raise CoTError(error_msg)
|
Compare the json-e rebuilt task definition vs the runtime definition.
Args:
parent_link (LinkOfTrust): the parent link to test.
rebuilt_definitions (dict): the rebuilt task definitions.
Raises:
CoTError: on failure.
|
juraj-google-style
|
def _GetSectionNames(self, pefile_object):
section_names = []
for section in pefile_object.sections:
section_name = getattr(section, 'Name', b'')
try:
section_name = '{0:s}'.format(section_name.decode('unicode_escape'))
except UnicodeDecodeError:
section_name = '{0:s}'.format(repr(section_name))
section_names.append(section_name)
return section_names
|
Retrieves all PE section names.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[str]: names of the sections.
|
juraj-google-style
|
def assert_equal(first, second, msg=None, extras=None):
my_msg = None
try:
_pyunit_proxy.assertEqual(first, second)
except AssertionError as e:
my_msg = str(e)
if msg:
my_msg = ('%s %s' % (my_msg, msg))
if (my_msg is not None):
raise signals.TestFailure(my_msg, extras=extras)
|
Assert the equality of objects, otherwise fail the test.
Error message is "first != second" by default. Additional explanation can
be supplied in the message.
Args:
first: The first object to compare.
second: The second object to compare.
msg: A string that adds additional info about the failure.
extras: An optional field for extra information to be included in
test result.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.