code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _create_variable(self, next_creator, **kwargs):
if kwargs.pop('per_worker_variable', False):
logging.info('Creating per worker variable')
return self._create_per_worker_variable(next_creator, **kwargs)
var_creator = self._create_var_creator(next_creator, **kwargs)
if 'colocate_with' in kwargs:
colocate_with = kwargs['colocate_with']
with ops.device(None):
with ops.colocate_with(colocate_with):
var = var_creator(**kwargs)
logging.debug('Creating variable (name:%s, shape:%r) that colocates with %s', var.name, var.shape, kwargs['colocate_with'].name)
return var
if self._variable_partitioner is None:
return self._create_variable_round_robin(var_creator, **kwargs)
name = kwargs.get('name', None)
dtype = kwargs.get('dtype', None)
shape = kwargs.get('shape', None)
initial_value = kwargs.get('initial_value', None)
if initial_value is None:
v = next_creator(**kwargs)
if not isinstance(v, resource_variable_ops.UninitializedVariable):
raise ValueError('It looks like you are using `ParameterServerStrategy` with a `variable_partitioner`, and trying to create a variable without specifying `initial_value`. This is not allowed. Please specify the `initial_value`.')
elif shape is None or dtype is None:
raise ValueError('It looks like you are trying to load a `SavedModel` using `tf.saved_model.load` within a `ParameterServerStrategy` scope, but the `SavedModel` is missing shape or dtype information.')
else:
def initializer(shape, dtype, **kwargs):
if 'partition_shape' in kwargs:
shape = kwargs['partition_shape']
return array_ops.zeros(shape, dtype)
initial_value = functools.partial(initializer, shape=shape, dtype=dtype)
init_from_fn = callable(initial_value)
if init_from_fn and (shape is None or dtype is None):
init_from_fn = False
initial_value = initial_value()
if not init_from_fn:
initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)
dtype = initial_value.dtype
shape = initial_value.shape
else:
shape = tensor_shape.as_shape(shape)
if shape.rank == 0:
return self._create_variable_round_robin(var_creator, **kwargs)
num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)
if not num_partitions or num_partitions[0] == 0 or any((v != 1 for v in num_partitions[1:])):
raise ValueError('variable_partitioner must return a list/tuple whose elements are 1 besides the first element (non-zero), got: %r' % num_partitions)
if num_partitions[0] == 1:
return self._create_variable_round_robin(var_creator, **kwargs)
num_partitions = min(num_partitions[0], shape[0])
base = shape[0]
extra = shape[0] % num_partitions
offsets = []
for i in range(num_partitions):
if i == 0:
offsets.append(0)
else:
prev_shard_size = base + (1 if i - 1 < extra else 0)
offsets.append(offsets[i - 1] + prev_shard_size)
offsets.append(shape[0])
def init_shard_fn(shard_index):
if not init_from_fn:
logging.log_if(logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
return initial_value[offsets[shard_index]:offsets[shard_index + 1]]
partition_shape = (offsets[shard_index + 1] - offsets[shard_index],) + shape[1:]
partition_offset = (offsets[shard_index],) + (0,) * len(shape[1:])
arg_spec = tf_inspect.getfullargspec(initial_value)
if 'shard_info' not in arg_spec.args and 'shard_info' not in arg_spec.kwonlyargs:
try:
value = initial_value(partition_shape=partition_shape, partition_offset=partition_offset)
except (TypeError, ValueError):
value = initial_value()
if value.shape == partition_shape:
return value
else:
logging.log_if(logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
return value[offsets[shard_index]:offsets[shard_index + 1]]
else:
return initial_value(shard_info=trackable.ShardInfo(shape=tensor_shape.as_shape(partition_shape), offset=partition_offset))
var_list = []
for i in range(num_partitions):
kwargs['shape'] = (offsets[i + 1] - offsets[i],) + shape[1:]
kwargs['initial_value'] = lambda: init_shard_fn(i)
if name is not None:
kwargs['name'] = '{}/part_{}'.format(name, i)
var_list.append(self._create_variable_round_robin(var_creator, **kwargs))
result = sharded_variable.ShardedVariable(var_list)
return result
|
Implements StrategyExtendedV2._create_variable.
Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be
created if satisfying all the following criteria:
1. `self._variable_partitioner` results in more than one partition on the
first axis.
2. variable's rank is greater than 0.
3. variable is not colocated with another variable.
Otherwise a `Variable` will be created.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
**kwargs: Passed through to the next creator.
Returns:
A `Variable` or `ShardedVariable`.
|
github-repos
|
def _StartMonitoringProcess(self, process):
if (process is None):
raise ValueError('Missing process.')
pid = process.pid
if (pid in self._process_information_per_pid):
raise KeyError('Already monitoring process (PID: {0:d}).'.format(pid))
if (pid in self._rpc_clients_per_pid):
raise KeyError('RPC client (PID: {0:d}) already exists'.format(pid))
rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()
rpc_port = process.rpc_port.value
time_waited_for_process = 0.0
while (not rpc_port):
time.sleep(0.1)
rpc_port = process.rpc_port.value
time_waited_for_process += 0.1
if (time_waited_for_process >= self._RPC_SERVER_TIMEOUT):
raise IOError('RPC client unable to determine server (PID: {0:d}) port.'.format(pid))
hostname = 'localhost'
if (not rpc_client.Open(hostname, rpc_port)):
raise IOError('RPC client unable to connect to server (PID: {0:d}) http:
self._rpc_clients_per_pid[pid] = rpc_client
self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
|
Starts monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
IOError: if the RPC client cannot connect to the server.
KeyError: if the process is not registered with the engine or
if the process is already being monitored.
OSError: if the RPC client cannot connect to the server.
ValueError: if the process is missing.
|
codesearchnet
|
def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer):
if len(y_true) <= batch_size * window_size:
raise ValueError("Window size (%s) larger than y_true (len=%s)."
% (batch_size, len(y_true)))
num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size)
anomalies_indices = []
for i in range(num_windows + 1):
prev_index = i * batch_size
curr_index = (window_size * batch_size) + (i * batch_size)
if i == num_windows + 1:
curr_index = len(y_true)
window_smoothed_errors = smoothed_errors[prev_index:curr_index]
window_y_true = y_true[prev_index:curr_index]
epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer)
window_anom_indices = get_anomalies(
window_smoothed_errors,
window_y_true,
sd_threshold,
i,
anomalies_indices,
error_buffer
)
mu = np.mean(window_smoothed_errors)
smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors]
epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer)
inv_anom_indices = get_anomalies(
smoothed_errors_inv,
window_y_true,
sd_inv,
i,
anomalies_indices,
len(y_true)
)
anomalies_indices = list(set(anomalies_indices + inv_anom_indices))
anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices])
anomalies_indices = sorted(list(set(anomalies_indices)))
anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)]
anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]]
anomalies_scores = []
for e_seq in anomaly_sequences:
denominator = np.mean(smoothed_errors) + np.std(smoothed_errors)
score = max([
abs(smoothed_errors[x] - epsilon) / denominator
for x in range(e_seq[0], e_seq[1])
])
anomalies_scores.append(score)
return anomaly_sequences, anomalies_scores
|
Extracts anomalies from the errors.
Args:
y_true ():
smoothed_errors ():
window_size (int):
batch_size (int):
error_buffer (int):
Returns:
|
juraj-google-style
|
def call(self, command, *args):
return self.rpc.call(str(command), *args)
|
Passes an arbitrary command to the coin daemon.
Args:
command (str): command to be sent to the coin daemon
|
juraj-google-style
|
def random_int_generator(maxrange):
try:
return random.randint(0, maxrange)
except:
(line, filename, synerror) = trace()
raise ArcRestHelperError({'function': 'random_int_generator', 'line': line, 'filename': filename, 'synerror': synerror})
finally:
pass
|
Generates a random integer from 0 to `maxrange`, inclusive.
Args:
maxrange (int): The upper range of integers to randomly choose.
Returns:
int: The randomly generated integer from :py:func:`random.randint`.
Examples:
>>> arcresthelper.common.random_int_generator(15)
9
|
codesearchnet
|
def plot_zt_dop(self, temps='all', output='average', relaxation_time=1e-14):
import matplotlib.pyplot as plt
if (output == 'average'):
zt = self._bz.get_zt(relaxation_time=relaxation_time, output='average')
elif (output == 'eigs'):
zt = self._bz.get_zt(relaxation_time=relaxation_time, output='eigs')
tlist = (sorted(zt['n'].keys()) if (temps == 'all') else temps)
plt.figure(figsize=(22, 14))
for (i, dt) in enumerate(['n', 'p']):
plt.subplot((121 + i))
for temp in tlist:
if (output == 'eigs'):
for xyz in range(3):
plt.semilogx(self._bz.doping[dt], zip(*zt[dt][temp])[xyz], marker='s', label=(((str(xyz) + ' ') + str(temp)) + ' K'))
elif (output == 'average'):
plt.semilogx(self._bz.doping[dt], zt[dt][temp], marker='s', label=(str(temp) + ' K'))
plt.title((dt + '-type'), fontsize=20)
if (i == 0):
plt.ylabel('zT', fontsize=30.0)
plt.xlabel('Doping concentration ($cm^{-3}$)', fontsize=30.0)
p = ('lower right' if (i == 0) else '')
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
|
Plot the figure of merit zT in function of doping levels for different
temperatures.
Args:
temps: the default 'all' plots all the temperatures in the analyzer.
Specify a list of temperatures if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
|
codesearchnet
|
def handle_unsubscribe(self, request, path):
ret = []
if path:
name = path[0]
child = self.children[name]
ret += child.handle_unsubscribe(request, path[1:])
if not child.children and not child.update_requests \
and not child.delta_requests:
del self.children[name]
else:
if request in self.update_requests:
self.update_requests.remove(request)
else:
self.delta_requests.remove(request)
ret.append(request.return_response())
return ret
|
Remove from the notifier list and send a return
Args:
request (Subscribe): The original subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
|
juraj-google-style
|
def query_dict_to_string(query):
query_params = []
for key, value in query.items():
query_params.append(key + "=" + value)
return "&".join(query_params)
|
Convert an OrderedDict to a query string.
Args:
query (obj): The key value object with query params.
Returns:
str: The query string.
Note:
This method does the same as urllib.parse.urlencode except
that it doesn't actually encode the values.
|
juraj-google-style
|
def from_lasio_curve(cls, curve, depth=None, basis=None, start=None, stop=None, step=0.1524, run=(- 1), null=(- 999.25), service_company=None, date=None):
data = curve.data
unit = curve.unit
if (depth is not None):
d = np.diff(depth)
if (not np.allclose((d - np.mean(d)), np.zeros_like(d))):
m = 'Irregular sampling in depth is not supported. '
m += 'Interpolating to regular basis.'
warnings.warn(m)
step = np.nanmedian(d)
(start, stop) = (depth[0], (depth[(- 1)] + 1e-05))
basis = np.arange(start, stop, step)
data = np.interp(basis, depth, data)
else:
step = np.nanmedian(d)
start = depth[0]
if (start is None):
if (basis is not None):
start = basis[0]
step = (basis[1] - basis[0])
else:
raise CurveError('You must provide a basis or a start depth.')
if (step == 0):
if (stop is None):
raise CurveError('You must provide a step or a stop depth.')
else:
step = ((stop - start) / (curve.data.shape[0] - 1))
params = {}
params['mnemonic'] = curve.mnemonic
params['description'] = curve.descr
params['start'] = start
params['step'] = step
params['units'] = unit
params['run'] = run
params['null'] = null
params['service_company'] = service_company
params['date'] = date
params['code'] = curve.API_code
return cls(data, params=params)
|
Makes a curve object from a lasio curve object and either a depth
basis or start and step information.
Args:
curve (ndarray)
depth (ndarray)
basis (ndarray)
start (float)
stop (float)
step (float): default: 0.1524
run (int): default: -1
null (float): default: -999.25
service_company (str): Optional.
data (str): Optional.
Returns:
Curve. An instance of the class.
|
codesearchnet
|
def value_to_pytd_type(self, node, v, seen, view):
if isinstance(v, (abstract.Empty, typing_overlay.Never)):
return pytd.NothingType()
elif isinstance(v, abstract.TYPE_VARIABLE_INSTANCES):
return self._type_variable_to_pytd_type(node, v, seen, view)
elif isinstance(v, (typing_overlay.TypeVar, typing_overlay.ParamSpec)):
return pytd.NamedType('builtins.type')
elif isinstance(v, dataclass_overlay.FieldInstance):
if not v.default:
return pytd.AnythingType()
return pytd_utils.JoinTypes((self.value_to_pytd_type(node, d, seen, view) for d in v.default.data))
elif isinstance(v, attr_overlay.AttribInstance):
ret = self.value_to_pytd_type(node, v.typ, seen, view)
md = metadata.to_pytd(v.to_metadata())
return pytd.Annotated(ret, ("'pytype_metadata'", md))
elif isinstance(v, special_builtins.PropertyInstance):
return pytd.NamedType('builtins.property')
elif isinstance(v, typed_dict.TypedDict):
return pytd.NamedType(v.props.name)
elif isinstance(v, abstract.FUNCTION_TYPES):
try:
signatures = function.get_signatures(v)
except NotImplementedError:
return pytd.NamedType('typing.Callable')
if len(signatures) == 1:
val = self.signature_to_callable(signatures[0])
if not isinstance(v, abstract.PYTD_FUNCTION_TYPES) or not val.formal:
return self.value_instance_to_pytd_type(node, val, None, seen, view)
return pytd.NamedType('typing.Callable')
elif isinstance(v, (abstract.ClassMethod, abstract.StaticMethod)):
return self.value_to_pytd_type(node, v.method, seen, view)
elif isinstance(v, (special_builtins.IsInstance, special_builtins.ClassMethodCallable)):
return pytd.NamedType('typing.Callable')
elif isinstance(v, abstract.Class):
param = self.value_instance_to_pytd_type(node, v, None, seen, view)
return pytd.GenericType(base_type=pytd.NamedType('builtins.type'), parameters=(param,))
elif isinstance(v, abstract.Module):
return pytd.Alias(v.name, pytd.Module(v.name, module_name=v.full_name))
elif self._output_mode >= Converter.OutputMode.LITERAL and isinstance(v, abstract.ConcreteValue) and isinstance(v.pyval, (int, str, bytes)):
return pytd.Literal(repr(v.pyval))
elif isinstance(v, abstract.SimpleValue):
ret = self.value_instance_to_pytd_type(node, v.cls, v, seen=seen, view=view)
ret.Visit(visitors.FillInLocalPointers({'builtins': self.ctx.loader.builtins}))
return ret
elif isinstance(v, abstract.Union):
return pytd_utils.JoinTypes((self.value_to_pytd_type(node, o, seen, view) for o in v.options))
elif isinstance(v, special_builtins.SuperInstance):
return pytd.NamedType('builtins.super')
elif isinstance(v, abstract.TypeParameter):
if self._detailed:
return pytd.NamedType('typing.TypeVar')
else:
return pytd.AnythingType()
elif isinstance(v, abstract.ParamSpec):
if self._detailed:
return pytd.NamedType('typing.ParamSpec')
else:
return pytd.AnythingType()
elif isinstance(v, abstract.Unsolvable):
return pytd.AnythingType()
elif isinstance(v, abstract.Unknown):
return pytd.NamedType(v.class_name)
elif isinstance(v, abstract.BuildClass):
return pytd.NamedType('typing.Callable')
elif isinstance(v, abstract.FinalAnnotation):
param = self.value_to_pytd_type(node, v.annotation, seen, view)
return pytd.GenericType(base_type=pytd.NamedType('typing.Final'), parameters=(param,))
elif isinstance(v, abstract.SequenceLength):
return pytd.Annotated(base_type=pytd.NamedType('SequenceLength'), annotations=(str(v.length), str(v.splat)))
elif isinstance(v, abstract.Concatenate):
return pytd.NamedType('typing.Concatenate')
elif isinstance(v, function.ParamSpecMatch):
return pytd.AnythingType()
elif isinstance(v, abstract.ParamSpecArgs):
return pytd.AnythingType()
else:
raise NotImplementedError(v.__class__.__name__)
|
Get a PyTD type representing this object, as seen at a node.
Args:
node: The node from which we want to observe this object.
v: The object.
seen: The set of values seen before while computing the type.
view: A Variable -> binding map.
Returns:
A PyTD type.
|
github-repos
|
def _unverified_decode(token):
token = _helpers.to_bytes(token)
if token.count(b'.') != 2:
raise ValueError(
'Wrong number of segments in token: {0}'.format(token))
encoded_header, encoded_payload, signature = token.split(b'.')
signed_section = encoded_header + b'.' + encoded_payload
signature = _helpers.padded_urlsafe_b64decode(signature)
header = _decode_jwt_segment(encoded_header)
payload = _decode_jwt_segment(encoded_payload)
return header, payload, signed_section, signature
|
Decodes a token and does no verification.
Args:
token (Union[str, bytes]): The encoded JWT.
Returns:
Tuple[str, str, str, str]: header, payload, signed_section, and
signature.
Raises:
ValueError: if there are an incorrect amount of segments in the token.
|
juraj-google-style
|
def find_container_traits(cls_or_string):
if utils.is_str(cls_or_string):
if (not templates.is_instantiation(cls_or_string)):
return None
name = templates.name(cls_or_string)
if name.startswith('std::'):
name = name[len('std::'):]
if name.startswith('std::tr1::'):
name = name[len('std::tr1::'):]
for cls_traits in all_container_traits:
if (cls_traits.name() == name):
return cls_traits
else:
if isinstance(cls_or_string, class_declaration.class_types):
if (cls_or_string.cache.container_traits is not None):
return cls_or_string.cache.container_traits
for cls_traits in all_container_traits:
if cls_traits.is_my_case(cls_or_string):
if isinstance(cls_or_string, class_declaration.class_types):
cls_or_string.cache.container_traits = cls_traits
return cls_traits
|
Find the container traits type of a declaration.
Args:
cls_or_string (str | declarations.declaration_t): a string
Returns:
declarations.container_traits: a container traits
|
codesearchnet
|
def parse_vasprun( self ):
self.vasprun_filename = match_filename( 'vasprun.xml' )
if not self.vasprun_filename:
raise FileNotFoundError( 'Could not find vasprun.xml or vasprun.xml.gz file' )
try:
self.vasprun = Vasprun( self.vasprun_filename, parse_potcar_file=False )
except ET.ParseError:
self.vasprun = None
except:
raise
|
Read in `vasprun.xml` as a pymatgen Vasprun object.
Args:
None
Returns:
None
None:
If the vasprun.xml is not well formed this method will catch the ParseError
and set self.vasprun = None.
|
juraj-google-style
|
def normalize_digits_only(number, keep_non_digits=False):
number = unicod(number)
number_length = len(number)
normalized_digits = U_EMPTY_STRING
for ii in range(number_length):
d = unicode_digit(number[ii], -1)
if d != -1:
normalized_digits += unicod(d)
elif keep_non_digits:
normalized_digits += number[ii]
return normalized_digits
|
Normalizes a string of characters representing a phone number.
This converts wide-ascii and arabic-indic numerals to European numerals,
and strips punctuation and alpha characters (optional).
Arguments:
number -- a string representing a phone number
keep_non_digits -- whether to keep non-digits
Returns the normalized string version of the phone number.
|
juraj-google-style
|
def sparse_intersection_indices_and_values(x1, x2):
ones1 = tf.sparse.map_values(ones_like_int8, x1)
ones2 = tf.sparse.map_values(ones_like_int8, x2)
intersection_extra_dim = tf.sets.intersection(tf.sparse.expand_dims(ones1, axis=-1), tf.sparse.expand_dims(ones2, axis=-1))
def empty_intersection():
return (tf.zeros((0, x1.shape.rank), dtype=tf.int64), tf.zeros((0,), dtype=x1.values.dtype), tf.zeros((0,), dtype=x2.values.dtype))
def non_empty_intersection():
intersection = tf.sparse.reshape(intersection_extra_dim, x1.dense_shape)
zeros1 = tf.sparse.map_values(zeros_like_int8, x1)
zeros2 = tf.sparse.map_values(zeros_like_int8, x2)
mask1 = tf.sparse.add(zeros1, intersection)
mask2 = tf.sparse.add(zeros2, intersection)
return (intersection.indices, tf.sparse.retain(x1, tf.cast(mask1.values, tf.bool)).values, tf.sparse.retain(x2, tf.cast(mask2.values, tf.bool)).values)
return tf.cond(tf.equal(tf.size(intersection_extra_dim), 0), empty_intersection, non_empty_intersection)
|
Compute the indices for the intersection of two `tf.SparseTensor`s and
modify the values for these indices.
Args:
x1: the first `tf.SparseTensor`.
x2: the second `tf.SparseTensor`.
Returns: A tuple containing:
- the indices for the intersection
- `x1` values for the intersection indices (some values were removed)
- `x2` values for the intersection indices (some values were removed)
|
github-repos
|
def parse(self, filename):
with io.open(filename, 'r', encoding='utf-8') as _:
lines = _.readlines()
all_source_files = set()
source_map = {}
lineno = 0
root = None
index = None
cur_level = (- 1)
parent_queue = []
for line in lines:
try:
(level, line) = dedent(line)
if line.startswith('
lineno += 1
continue
elif line.startswith('\\
line = line[1:]
except IndentError as exc:
error('bad-indent', 'Invalid indentation', filename=filename, lineno=lineno, column=exc.column)
if (not line):
lineno += 1
continue
source_file = dequote(line)
if (not source_file):
lineno += 1
continue
if (source_file in all_source_files):
error('sitemap-duplicate', 'Filename listed twice', filename=filename, lineno=lineno, column=((level * 8) + 1))
all_source_files.add(source_file)
source_map[source_file] = (lineno, ((level * 8) + 1))
page = OrderedDict()
if ((root is not None) and (level == 0)):
error('sitemap-error', 'Sitemaps only support one root', filename=filename, lineno=lineno, column=0)
if (root is None):
root = page
index = source_file
else:
lvl_diff = (cur_level - level)
while (lvl_diff >= 0):
parent_queue.pop()
lvl_diff -= 1
parent_queue[(- 1)][source_file] = page
parent_queue.append(page)
cur_level = level
lineno += 1
return Sitemap(root, filename, index, source_map)
|
Parse a sitemap file.
Args:
filename: str, the path to the sitemap file.
Returns:
Sitemap: the generated sitemap.
|
codesearchnet
|
def port_create_gre(br, port, id, remote):
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
|
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
|
juraj-google-style
|
def from_proto(context_def, import_scope=None):
ret = WhileContext(context_def=context_def, import_scope=import_scope)
ret.Enter()
for nested_def in context_def.nested_contexts:
from_control_flow_context_def(nested_def, import_scope=import_scope)
ret.Exit()
return ret
|
Returns a `WhileContext` object created from `context_def`.
Args:
context_def: A `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
Returns:
A `WhileContext` Python object.
|
github-repos
|
def _get_parameter_conversion_entry(parameter_config):
entry = _PARAM_CONVERSION_MAP.get(parameter_config.get('type'))
if ((entry is None) and ('enum' in parameter_config)):
entry = _PARAM_CONVERSION_MAP['enum']
return entry
|
Get information needed to convert the given parameter to its API type.
Args:
parameter_config: The dictionary containing information specific to the
parameter in question. This is retrieved from request.parameters in the
method config.
Returns:
The entry from _PARAM_CONVERSION_MAP with functions/information needed to
validate and convert the given parameter from a string to the type expected
by the API.
|
codesearchnet
|
def _execute_primitives(self, commands):
for p in commands:
if (self._scanchain and self._scanchain._debug):
print(' Executing', p)
p.execute(self)
|
Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises.
Args:
commands: A list of Executable Primitives to be run in order.
|
codesearchnet
|
def _write_log(self, version_key, meta_data, index_fields):
meta_data = meta_data or {}
meta_data.update({
'version_key': version_key,
'timestamp': time.time(),
})
obj = log_bucket.new(data=meta_data)
obj.add_index('version_key_bin', version_key)
obj.add_index('timestamp_int', int(meta_data['timestamp']))
for field, index_type in index_fields:
obj.add_index('%s_%s' % (field, index_type), meta_data.get(field, ""))
obj.store()
|
Creates a log entry for current object,
Args:
version_key(str): Version_bucket key from _write_version().
meta_data (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for secondary indexing keys in riak (with 'bin' or 'int').
[('lorem','bin'),('dolar','int')]
Returns:
|
juraj-google-style
|
def add_document(self, key, url, **kwargs):
document = self._check_metadata_for_file(key=key, url=url, **kwargs)
for dict_key in (
'description',
'fulltext',
'hidden',
'material',
'original_url',
'url',
'filename',
):
if kwargs.get(dict_key):
document[dict_key] = kwargs[dict_key]
if key_already_there(document, self.record.get('documents', ())):
raise ValueError(
'There\'s already a document with the key %s.'
% document['key']
)
self._append_to('documents', document)
|
Adds document to record
Args:
key (string): document key
url (string): document url
Keyword Args:
description (string): simple description
fulltext (bool): mark if this is a full text
hidden (bool): is document should be hidden
material (string):
original_url (string): original url
filename (string): current url
Returns: None
|
juraj-google-style
|
def module_help(self, module):
helplist = []
self._render_our_module_key_flags(module, helplist)
return '\n'.join(helplist)
|
Describes the key flags of a module.
Args:
module: module|str, the module to describe the key flags for.
Returns:
str, describing the key flags of a module.
|
codesearchnet
|
def _get_named_attributes(self):
for (cls, instance) in zip(self.get_class_attributes(), self._get_instance_attributes()):
(attr_name, cls_value) = cls
instance_value = instance[1]
(yield (attr_name, instance_value, cls_value))
|
Return generator for attribute's name, instance and class values.
Add attribute name to meth:`_get_attributes` for a better debugging
message, so user can find the error easier.
Returns:
generator: Tuple with attribute's name, instance and class values.
|
codesearchnet
|
def check_initialized(self):
for (name, field) in self.__by_name.items():
value = getattr(self, name)
if (value is None):
if field.required:
raise ValidationError(('Message %s is missing required field %s' % (type(self).__name__, name)))
else:
try:
if (isinstance(field, MessageField) and issubclass(field.message_type, Message)):
if field.repeated:
for item in value:
item_message_value = field.value_to_message(item)
item_message_value.check_initialized()
else:
message_value = field.value_to_message(value)
message_value.check_initialized()
except ValidationError as err:
if (not hasattr(err, 'message_name')):
err.message_name = type(self).__name__
raise
|
Check class for initialization status.
Check that all required fields are initialized
Raises:
ValidationError: If message is not initialized.
|
codesearchnet
|
def get_cudnn_version():
key = 'cudnn_ver'
cmds = cmds_all[PLATFORM.lower()][key]
out, err = run_shell_cmd(cmds[0])
if err and FLAGS.debug:
print('Error in finding `cudnn.h`:\n %s' % str(err))
if len(out.split(b' ')) > 1:
cmd = cmds[0] + ' | ' + cmds[1]
out_re, err_re = run_shell_cmd(cmd)
if err_re and FLAGS.debug:
print('Error in detecting cuDNN version:\n %s' % str(err_re))
return out_re.strip(b'\n')
else:
return
|
Retrieves the version of cuDNN library detected.
Returns:
String that is the version of cuDNN library detected.
e.g. '7.5.0'
|
github-repos
|
def GetSources(self, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
file_system_type = getattr(event, 'file_system_type', 'UNKNOWN')
timestamp_desc = getattr(event, 'timestamp_desc', 'Time')
source_long = '{0:s} {1:s}'.format(file_system_type, timestamp_desc)
return self.SOURCE_SHORT, source_long
|
Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
juraj-google-style
|
def _is_op_stateful(op):
if op.type == 'GlobalIterId':
return False
if op.type == 'UpdateFdoWithGlobalMinibatchStatistics':
return False
if op.type == 'CollectiveGatherV2' and op.get_attr('is_stateless'):
return False
if op.type == 'CollectiveAllToAllV2' and op.get_attr('is_stateless'):
return False
return op._is_stateful
|
Check whether an op is stateful.
This helper function handles two special cases to make the stateful analysis
consistent with the mlir side effect analysis.
1. GlobalIterIdOp should be stateless.
2. CollectiveGatherV2 with attribute is_stateless to be True should be
stateless.
Args:
op: Operation
Returns:
Boolean indicates whether the operation is stateless or not.
|
github-repos
|
def _CreateWindowsPathResolver(
self, file_system, mount_point, environment_variables):
if environment_variables is None:
environment_variables = []
path_resolver = windows_path_resolver.WindowsPathResolver(
file_system, mount_point)
for environment_variable in environment_variables:
name = environment_variable.name.lower()
if name not in ('systemroot', 'userprofile'):
continue
path_resolver.SetEnvironmentVariable(
environment_variable.name, environment_variable.value)
return path_resolver
|
Create a Windows path resolver and sets the environment variables.
Args:
file_system (dfvfs.FileSystem): file system.
mount_point (dfvfs.PathSpec): mount point path specification.
environment_variables (list[EnvironmentVariableArtifact]): environment
variables.
Returns:
dfvfs.WindowsPathResolver: Windows path resolver.
|
juraj-google-style
|
def _info_from_string(info_string):
try:
json_value = json.loads(info_string)
except ValueError:
raise ValueError(('invalid JSON: %r' % (info_string,)))
if (not isinstance(json_value, dict)):
raise ValueError(('not a JSON object: %r' % (json_value,)))
if (json_value.get('version') != version.VERSION):
raise ValueError(('incompatible version: %r' % (json_value,)))
expected_keys = frozenset(_TENSORBOARD_INFO_FIELDS)
actual_keys = frozenset(json_value)
if (expected_keys != actual_keys):
raise ValueError(('bad keys on TensorBoardInfo (missing: %s; extraneous: %s)' % ((expected_keys - actual_keys), (actual_keys - expected_keys))))
for key in _TENSORBOARD_INFO_FIELDS:
field_type = _TENSORBOARD_INFO_FIELDS[key]
if (not isinstance(json_value[key], field_type.serialized_type)):
raise ValueError(('expected %r of type %s, but found: %r' % (key, field_type.serialized_type, json_value[key])))
json_value[key] = field_type.deserialize(json_value[key])
return TensorBoardInfo(**json_value)
|
Parse a `TensorBoardInfo` object from its string representation.
Args:
info_string: A string representation of a `TensorBoardInfo`, as
produced by a previous call to `_info_to_string`.
Returns:
A `TensorBoardInfo` value.
Raises:
ValueError: If the provided string is not valid JSON, or if it does
not represent a JSON object with a "version" field whose value is
`tensorboard.version.VERSION`, or if it has the wrong set of
fields, or if at least one field is of invalid type.
|
codesearchnet
|
class IntGELU(nn.Module):
def __init__(self, quant_mode=True, force_dequant='none'):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ['nonlinear', 'gelu']:
logger.info('Force dequantize gelu')
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14
self.coeff = [-0.2888, -1.769, 1]
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor ** 2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor ** 2 * self.coeff[0]
y_int = floor_ste.apply(y_int / 2 ** self.const)
scaling_factor = scaling_factor * 2 ** self.const
return (y_int, scaling_factor)
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return (self.activation_fn(x), None)
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return (x_int * scaling_factor, scaling_factor)
|
Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`.
Args:
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "gelu" or "nonlinear" is given.
|
github-repos
|
def UpdateNumberOfEvents(self, number_of_consumed_events, number_of_produced_events):
consumed_events_delta = 0
if (number_of_consumed_events is not None):
if (number_of_consumed_events < self.number_of_consumed_events):
raise ValueError('Number of consumed events smaller than previous update.')
consumed_events_delta = (number_of_consumed_events - self.number_of_consumed_events)
self.number_of_consumed_events = number_of_consumed_events
self.number_of_consumed_events_delta = consumed_events_delta
produced_events_delta = 0
if (number_of_produced_events is not None):
if (number_of_produced_events < self.number_of_produced_events):
raise ValueError('Number of produced events smaller than previous update.')
produced_events_delta = (number_of_produced_events - self.number_of_produced_events)
self.number_of_produced_events = number_of_produced_events
self.number_of_produced_events_delta = produced_events_delta
return ((consumed_events_delta > 0) or (produced_events_delta > 0))
|
Updates the number of events.
Args:
number_of_consumed_events (int): total number of events consumed by
the process.
number_of_produced_events (int): total number of events produced by
the process.
Returns:
bool: True if either number of events has increased.
Raises:
ValueError: if the consumed or produced number of events is smaller
than the value of the previous update.
|
codesearchnet
|
def download_file_maybe_extract(url, directory, filename=None, extension=None, check_files=[]):
if (filename is None):
filename = _get_filename_from_url(url)
filepath = os.path.join(directory, filename)
check_files = [os.path.join(directory, f) for f in check_files]
if ((len(check_files) > 0) and _check_download(*check_files)):
return filepath
if (not os.path.isdir(directory)):
os.makedirs(directory)
logger.info('Downloading {}'.format(filename))
if ('drive.google.com' in url):
_download_file_from_drive(filepath, url)
else:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
urllib.request.urlretrieve(url, filename=filepath, reporthook=_reporthook(t))
_maybe_extract(compressed_filename=filepath, directory=directory, extension=extension)
if (not _check_download(*check_files)):
raise ValueError('[DOWNLOAD FAILED] `*check_files` not found')
return filepath
|
Download the file at ``url`` to ``directory``. Extract to ``directory`` if tar or zip.
Args:
url (str): Url of file.
directory (str): Directory to download to.
filename (str, optional): Name of the file to download; Otherwise, a filename is extracted
from the url.
extension (str, optional): Extension of the file; Otherwise, attempts to extract extension
from the filename.
check_files (list of str): Check if these files exist, ensuring the download succeeded.
If these files exist before the download, the download is skipped.
Returns:
(str): Filename of download file.
Raises:
ValueError: Error if one of the ``check_files`` are not found following the download.
|
codesearchnet
|
def wait(self, container, timeout=None, condition=None):
url = self._url('/containers/{0}/wait', container)
params = {}
if (condition is not None):
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion('wait condition is not supported for API version < 1.30')
params['condition'] = condition
res = self._post(url, timeout=timeout, params=params)
return self._result(res, True)
|
Block until a container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
container (str or dict): The container to wait on. If a dict, the
``Id`` key is used.
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def GetPresetByName(self, name):
name = name.lower()
return self._definitions.get(name, None)
|
Retrieves a specific preset definition by name.
Args:
name (str): name of the preset.
Returns:
ParserPreset: a parser preset or None if not available.
|
juraj-google-style
|
def send_status(status: 'EFBStatus'):
global middlewares, master
if status is None:
return
s: 'Optional[EFBStatus]' = status
for i in middlewares:
s = i.process_status(cast('EFBStatus', s))
if s is None:
return
status = cast('EFBStatus', s)
status.verify()
status.destination_channel.send_status(status)
|
Deliver a message to the destination channel.
Args:
status (EFBStatus): The status
|
juraj-google-style
|
def exists(self):
session = client.get_client().create_session()
ret = (self._base_query(session).count() > 0)
session.close()
return ret
|
Check if a target exists
This function is called by :mod:`luigi` to check if a task output exists. By default,
:mod:`luigi` considers a task as complete if all it targets (outputs) exist.
Returns:
bool: ``True`` if target exists, ``False`` otherwise
|
codesearchnet
|
def atol_for_validation(self) -> float:
return 0.0001
|
What absolute tolerance value to use during model conversion validation.
Returns:
Float absolute tolerance value.
|
github-repos
|
def program_to_text(program):
def label(node):
return '<%d>%s' % (node.id, node.name)
s = io.StringIO()
seen = set()
for node in cfg_utils.order_nodes(program.cfg_nodes):
seen.add(node)
s.write(f'{label(node)}\n')
s.write(f' From: {', '.join((label(n) for n in node.incoming))}\n')
s.write(f' To: {', '.join((label(n) for n in node.outgoing))}\n')
s.write('\n')
variables = {value.variable for value in node.bindings}
for var in sorted(variables, key=lambda v: v.id):
s.write(' %s\n' % _pretty_variable(var).replace('\n', '\n '))
s.write('\n')
return s.getvalue()
|
Generate a text (CFG nodes + assignments) version of a program.
For debugging only.
Args:
program: An instance of cfg.Program
Returns:
A string representing all of the data for this program.
|
github-repos
|
def group_id(self, resource_id):
if (self._name != 'group'):
self._request_uri = '{}/{}'.format(self._api_uri, resource_id)
|
Update the request URI to include the Group ID for specific group retrieval.
Args:
resource_id (string): The group id.
|
codesearchnet
|
def recreate_function(saved_function, concrete_functions):
function_spec = _deserialize_function_spec_as_nonmethod(saved_function.function_spec)
def restored_function_body(*args, **kwargs):
if not saved_function.concrete_functions:
raise ValueError('Found zero restored functions for caller function.')
inputs = (args, kwargs)
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if any([inp is None for inp in function.captured_inputs]):
raise ValueError('Looks like you are trying to run a loaded non-Keras model that was trained using tf.distribute.experimental.ParameterServerStrategy with variable partitioning, which is not currently supported. Try using Keras to define your model if possible.')
if _concrete_function_callable_with(function, inputs, allow_conversion):
return _call_concrete_function(function, inputs)
signature_descriptions = []
def _pretty_format_positional(positional):
return 'Positional arguments ({} total):\n * {}'.format(len(positional), '\n * '.join((pprint.pformat(a) for a in positional)))
for index, function_name in enumerate(saved_function.concrete_functions):
concrete_function = concrete_functions[function_name]
positional, keyword = concrete_function.structured_input_signature
signature_descriptions.append('Option {}:\n {}\n Keyword arguments: {}'.format(index + 1, _pretty_format_positional(positional), keyword))
raise ValueError(f'Could not find matching concrete function to call loaded from the SavedModel. Got:\n {_pretty_format_positional(args)}\n Keyword arguments: {kwargs}\n\n Expected these arguments to match one of the following {len(saved_function.concrete_functions)} option(s):\n\n{(chr(10) + chr(10)).join(signature_descriptions)}')
concrete_function_objects = []
for concrete_function_name in saved_function.concrete_functions:
concrete_function_objects.append(concrete_functions[concrete_function_name])
for cf in concrete_function_objects:
set_preinitialized_function_spec(cf, function_spec)
restored_function = RestoredFunction(restored_function_body, restored_function_body.__name__, function_spec, concrete_function_objects)
return tf_decorator.make_decorator(restored_function_body, restored_function, decorator_argspec=function_spec.fullargspec)
|
Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`. As a side
effect of this function, the `FunctionSpec` from `saved_function` is added
to each `ConcreteFunction` in this map.
Returns:
A `Function`.
|
github-repos
|
def uninstall(path, restart=False):
cmd = ['wusa.exe', '/uninstall', '/quiet']
kb = os.path.splitext(os.path.basename(path))[0]
if os.path.exists(path):
cmd.append(path)
else:
cmd.append('/kb:{0}'.format((kb[2:] if kb.lower().startswith('kb') else kb)))
if restart:
cmd.append('/forcerestart')
else:
cmd.append('/norestart')
ret_code = __salt__['cmd.retcode'](cmd, ignore_retcode=True)
errors = {(- 2145116156): '{0} does not support uninstall'.format(kb), 2359303: '{0} not installed'.format(kb), 87: 'Unknown error. Try specifying an .msu file'}
if (ret_code in errors):
raise CommandExecutionError(errors[ret_code])
elif ret_code:
raise CommandExecutionError('Unknown error: {0}'.format(ret_code))
return True
|
Uninstall a specific KB.
Args:
path (str):
The full path to the msu file to uninstall. This can also be just
the name of the KB to uninstall
restart (bool):
``True`` to force a restart if required by the installation. Adds
the ``/forcerestart`` switch to the ``wusa.exe`` command. ``False``
will add the ``/norestart`` switch instead. Default is ``False``
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If an error is encountered
CLI Example:
.. code-block:: bash
salt '*' wusa.uninstall KB123456
# or
salt '*' wusa.uninstall C:/temp/KB123456.msu
|
codesearchnet
|
def __getitem__(cls, args):
type_, bound, keyfunc = cls._get_args(args)
keyfunc_name = cls._get_fullname(keyfunc)
identity = cls._identity
BaseClass, MetaClass = cls._get_bases(type_)
instantiate = cls._instantiate
@six.add_metaclass(MetaClass)
class _BoundedSubclass(BaseClass):
def __new__(cls, __value, *args, **kwargs):
instance = instantiate(
BaseClass, type_, __value, *args, **kwargs
)
cmp_val = keyfunc(instance)
if bound.start is not None or bound.stop is not None:
if bound.start is not None and cmp_val < bound.start:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is below the minimum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.start,
)
)
raise ValueError(
"The value {} is below the minimum allowed value "
"of {}.".format(repr(__value), bound.start)
)
if bound.stop is not None and cmp_val > bound.stop:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is above the maximum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.stop,
)
)
raise ValueError(
"The value {} is above the maximum allowed value "
"of {}.".format(repr(__value), bound.stop)
)
elif not cmp_val:
raise ValueError(
"{}({}) is False".format(keyfunc_name, repr(instance))
)
return instance
_BoundedSubclass.__type__ = type_
_BoundedSubclass.__class_repr__ = cls._get_class_repr(
type_, bound, keyfunc, keyfunc_name
)
return _BoundedSubclass
|
Create a new subclass of a type bounded by the arguments.
If a callable is passed as the third argument of the slice, it will be
used as the comparison function for the boundaries.
Args:
args: A tuple with two or three parameters: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
|
juraj-google-style
|
def apply_range_set(self, hist: Hist) -> None:
axis = self.axis(hist)
assert (not isinstance(self.min_val, float))
assert (not isinstance(self.max_val, float))
min_val = self.min_val(axis)
max_val = self.max_val(axis)
self.axis(hist).SetRange(min_val, max_val)
|
Apply the associated range set to the axis of a given hist.
Note:
The min and max values should be bins, not user ranges! For more, see the binning
explanation in ``apply_func_to_find_bin(...)``.
Args:
hist: Histogram to which the axis range restriction should be applied.
Returns:
None. The range is set on the axis.
|
codesearchnet
|
def RunStateMethod(self,
method_name,
request=None,
responses=None,
event=None,
direct_response=None):
client_id = None
try:
self.context.current_state = method_name
if request and responses:
client_id = request.client_id or self.runner_args.client_id
logging.debug("%s Running %s with %d responses from %s",
self.session_id, method_name, len(responses), client_id)
else:
logging.debug("%s Running state method %s", self.session_id,
method_name)
self.hunt_obj.HeartBeat()
try:
method = getattr(self.hunt_obj, method_name)
except AttributeError:
raise flow_runner.FlowRunnerError(
"Flow %s has no state method %s" %
(self.hunt_obj.__class__.__name__, method_name))
if direct_response:
method(direct_response)
elif method_name == "Start":
method()
else:
responses = flow_responses.Responses.FromLegacyResponses(
request=request, responses=responses)
if responses.status:
self.SaveResourceUsage(request.client_id, responses.status)
stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")
method(responses)
except Exception as e:
stats_collector_instance.Get().IncrementCounter("grr_flow_errors")
stats_collector_instance.Get().IncrementCounter(
"flow_errors", fields=[self.hunt_obj.Name()])
logging.exception("Hunt %s raised %s.", self.session_id, e)
self.Error(traceback.format_exc(), client_id=client_id)
finally:
if event:
event.set()
|
Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request.
event: A threading.Event() instance to signal completion of this request.
direct_response: A flow.Responses() object can be provided to avoid
creation of one.
|
juraj-google-style
|
def add(self, resource, provider_uri_or_id, timeout=(- 1)):
uri = (self._provider_client.build_uri(provider_uri_or_id) + '/device-managers')
return self._client.create(resource=resource, uri=uri, timeout=timeout)
|
Adds a Device Manager under the specified provider.
Args:
resource (dict): Object to add.
provider_uri_or_id: ID or URI of provider.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Added SAN Manager.
|
codesearchnet
|
def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False):
if outdir:
outdir_set = True
else:
outdir_set = False
counter = 0
for g in tqdm(self.genes):
if (g.id not in input_dict):
continue
if (not outdir_set):
outdir = g.protein.structure_dir
if (not outdir):
raise ValueError('Output directory must be specified')
for (hid, hdict) in input_dict[g.id].items():
if (('model_file' not in hdict) or ('file_type' not in hdict)):
raise KeyError('"model_file" and "file_type" must be keys in the manual input dictionary.')
new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'], file_type=hdict['file_type'], is_experimental=False)
if clean:
new_homology.load_structure_path(new_homology.clean_structure(outdir=outdir, force_rerun=force_rerun), hdict['file_type'])
else:
copy_to = op.join(outdir, op.basename(hdict['model_file']))
if ssbio.utils.force_rerun(force_rerun, copy_to):
log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file'])))
shutil.copy2(hdict['model_file'], outdir)
new_homology.load_structure_path(copy_to, hdict['file_type'])
else:
log.debug('{}: homology model already copied to directory'.format(copy_to))
new_homology.load_structure_path(copy_to, hdict['file_type'])
new_homology.update(hdict)
log.debug('{}: updated homology model information and copied model file.'.format(g.id))
counter += 1
log.info('Updated homology model information for {} genes.'.format(counter))
|
Copy homology models to the GEM-PRO project.
Requires an input of a dictionary formatted like so::
{
model_gene: {
homology_model_id1: {
'model_file': '/path/to/homology/model.pdb',
'file_type': 'pdb'
'additional_info': info_value
},
homology_model_id2: {
'model_file': '/path/to/homology/model.pdb'
'file_type': 'pdb'
}
}
}
Args:
input_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
clean (bool): If homology files should be cleaned and saved as a new PDB file
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
|
codesearchnet
|
def remove_tag(self, tag):
return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')
|
Remove a tag
Args:
tag (str): Tag to remove
Returns:
bool: True if tag removed or False if not
|
juraj-google-style
|
def expectation(self, function):
return self._expectation(function)
|
Returns an estimate of the expectation value of the given function.
Args:
function: Mapping from a 2D tensor of bitstrings to a possibly nested
structure. The structure must have atomic elements all of which are
float tensors with the same batch size as the input bitstrings.
|
github-repos
|
def kmeans_pp(data, k, centers=None):
genes, cells = data.shape
if sparse.issparse(data) and not sparse.isspmatrix_csc(data):
data = sparse.csc_matrix(data)
num_known_centers = 0
if centers is None:
centers = np.zeros((genes, k))
else:
num_known_centers = centers.shape[1]
centers = np.concatenate((centers, np.zeros((genes, k-num_known_centers))), 1)
distances = np.zeros((cells, k))
distances[:] = np.inf
if num_known_centers == 0:
init = np.random.randint(0, cells)
if sparse.issparse(data):
centers[:,0] = data[:, init].toarray().flatten()
else:
centers[:,0] = data[:, init]
num_known_centers+=1
available_cells = list(range(cells))
for c in range(num_known_centers, k):
c2 = c-1
if sparse.issparse(data):
lls = poisson_ll(data, centers[:,c2:c2+1]).flatten()
distances[:,c2] = 1 + lls.max() - lls
distances[:,c2] /= distances[:,c2].max()
else:
for cell in range(cells):
distances[cell, c2] = poisson_dist(data[:,cell], centers[:,c2])
min_distances = np.min(distances, 1)
min_distances = min_distances**2
min_distances = min_distances[available_cells]
min_dist = np.random.choice(available_cells,
p=min_distances/min_distances.sum())
available_cells.pop(available_cells.index(min_dist))
if sparse.issparse(data):
centers[:,c] = data[:, min_dist].toarray().flatten()
else:
centers[:,c] = data[:, min_dist]
lls = poisson_ll(data, centers)
new_assignments = np.argmax(lls, 1)
centers[centers==0.0] = eps
return centers, new_assignments
|
Generates kmeans++ initial centers.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k).
Returns:
centers - a genes x k array of cluster means.
assignments - a cells x 1 array of cluster assignments
|
juraj-google-style
|
def channels_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("channels.replies", http_verb="GET", params=kwargs)
|
Retrieve a thread of messages posted to a channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
|
juraj-google-style
|
def CopyAttributesFromSessionCompletion(self, session_completion):
if self.identifier != session_completion.identifier:
raise ValueError('Session identifier mismatch.')
self.aborted = session_completion.aborted
if session_completion.analysis_reports_counter:
self.analysis_reports_counter = (
session_completion.analysis_reports_counter)
self.completion_time = session_completion.timestamp
if session_completion.event_labels_counter:
self.event_labels_counter = session_completion.event_labels_counter
if session_completion.parsers_counter:
self.parsers_counter = session_completion.parsers_counter
|
Copies attributes from a session completion.
Args:
session_completion (SessionCompletion): session completion attribute
container.
Raises:
ValueError: if the identifier of the session completion does not match
that of the session.
|
juraj-google-style
|
def set_maximum(self, q_data, marked, center, bin_lower, foothills):
as_bin = []
as_glob = []
marked_so_far = []
will_be_considered_again = False
as_bin.append(center)
center_data = q_data[center]
while (len(as_bin) > 0):
p = as_bin.pop((- 1))
if (marked[p] != self.UNMARKED):
continue
marked[p] = q_data[center]
marked_so_far.append(p)
for (index, val) in np.ndenumerate(marked[((p[0] - 1):(p[0] + 2), (p[1] - 1):(p[1] + 2))]):
if (val == self.UNMARKED):
pixel = (((index[0] - 1) + p[0]), ((index[1] - 1) + p[1]))
p_data = q_data[pixel]
if ((not will_be_considered_again) and (p_data >= 0) and (p_data < center_data)):
will_be_considered_again = True
if ((p_data >= bin_lower) and (np.abs((center_data - p_data)) <= self.delta)):
as_bin.append(pixel)
elif (p_data >= 0):
as_glob.append(pixel)
if (bin_lower == 0):
will_be_considered_again = False
big_enough = (len(marked_so_far) >= self.max_size)
if big_enough:
foothills.append((center, as_glob))
elif will_be_considered_again:
for m in marked_so_far:
marked[m] = self.UNMARKED
del as_bin[:]
del as_glob[:]
del marked_so_far[:]
return (big_enough or (not will_be_considered_again))
|
Grow a region at a certain bin level and check if the region has reached the maximum size.
Args:
q_data: Quantized data array
marked: Array marking points that are objects
center: Coordinates of the center pixel of the region being grown
bin_lower: Intensity level of lower bin being evaluated
foothills: List of points that are associated with a center but fall outside the the size or
intensity criteria
Returns:
True if the object is finished growing and False if the object should be grown again at the next
threshold level.
|
codesearchnet
|
def load_bmp(path):
surface = object.__new__(Surface)
surface._ptr = check_ptr_err(lib.SDL_LoadBMP_RW(lib.SDL_RWFromFile(path, 'rb'), 1))
return surface
|
Load a surface from a file.
Args:
path (str): Path to the BMP file to load.
Returns:
Surface: A surface containing the pixels loaded from the file.
Raises:
SDLError: If the file cannot be loaded.
|
codesearchnet
|
def _to_values_def(self, export_scope=None):
values_def = control_flow_pb2.ValuesDef()
values_def.values.extend([ops.strip_name_scope(v, export_scope) for v in sorted(self._values)])
for k, v in self._external_values.items():
k = ops.strip_name_scope(k, export_scope)
values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope)
return values_def
|
Converts the values to a `ValuesDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `ValuesDef` protocol buffer.
|
github-repos
|
def pickle_load(cls, filepath):
if os.path.isdir(filepath):
for (dirpath, dirnames, filenames) in os.walk(filepath):
fnames = [f for f in filenames if (f == cls.PICKLE_FNAME)]
if fnames:
if (len(fnames) == 1):
filepath = os.path.join(dirpath, fnames[0])
break
else:
err_msg = ('Found multiple databases:\n %s' % str(fnames))
raise RuntimeError(err_msg)
else:
err_msg = ('Cannot find %s inside directory %s' % (cls.PICKLE_FNAME, filepath))
raise ValueError(err_msg)
with open(filepath, 'rb') as fh:
new = pickle.load(fh)
from .flows import Flow
(flow_workdirs, new.flows) = (new.flows, [])
for flow in map(Flow.pickle_load, flow_workdirs):
new.add_flow(flow)
return new
|
Loads the object from a pickle file.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
|
codesearchnet
|
def _check_self_to_empty(self, stateid):
x_term = stateid.rfind('@')
y_term = stateid.rfind('A')
if y_term > x_term:
x_term = y_term
ids = stateid[x_term + 1:].split(',')
if len(ids) < 2:
return 0
if ids[0] == ids[1]:
return 1
return 0
|
Because of the optimization, the rule for empty states is missing
A check takes place live
Args:
stateid (int): The state identifier
Returns:
bool: A true or false response
|
juraj-google-style
|
def GetArchiveTypeIndicators(cls, path_spec, resolver_context=None):
if (cls._archive_remainder_list is None or
cls._archive_store is None):
specification_store, remainder_list = cls._GetSpecificationStore(
definitions.FORMAT_CATEGORY_ARCHIVE)
cls._archive_remainder_list = remainder_list
cls._archive_store = specification_store
if cls._archive_scanner is None:
cls._archive_scanner = cls._GetSignatureScanner(cls._archive_store)
return cls._GetTypeIndicators(
cls._archive_scanner, cls._archive_store,
cls._archive_remainder_list, path_spec,
resolver_context=resolver_context)
|
Determines if a file contains a supported archive types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators.
|
juraj-google-style
|
def check_url(url):
request = urllib2.Request(url)
try:
response = urlopen(request)
return (True, response.code)
except urllib2.HTTPError as e:
return (False, e.code)
|
Check if resource at URL is fetchable. (by trying to fetch it and checking for 200 status.
Args:
url (str): Url to check.
Returns:
Returns a tuple of {True/False, response code}
|
codesearchnet
|
def retrieve_info_for_model(model_type, frameworks: Optional[List[str]]=None):
if model_type not in auto_module.MODEL_NAMES_MAPPING:
raise ValueError(f'{model_type} is not a valid model type.')
model_name = auto_module.MODEL_NAMES_MAPPING[model_type]
config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type]
if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES:
tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type]
tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1]
else:
tokenizer_class = None
image_processor_classes = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None)
if isinstance(image_processor_classes, tuple):
image_processor_class, image_processor_fast_class = image_processor_classes
else:
image_processor_class = image_processor_classes
image_processor_fast_class = None
feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None)
processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None)
model_files = get_model_files(model_type, frameworks=frameworks)
model_camel_cased = config_class.replace('Config', '')
available_frameworks = []
for fname in model_files['model_files']:
if 'modeling_tf' in str(fname):
available_frameworks.append('tf')
elif 'modeling_flax' in str(fname):
available_frameworks.append('flax')
elif 'modeling' in str(fname):
available_frameworks.append('pt')
if frameworks is None:
frameworks = get_default_frameworks()
frameworks = [f for f in frameworks if f in available_frameworks]
model_classes = retrieve_model_classes(model_type, frameworks=frameworks)
model_upper_cased = model_camel_cased.upper()
model_patterns = ModelPatterns(model_name, checkpoint=find_base_model_checkpoint(model_type, model_files=model_files), model_type=model_type, model_camel_cased=model_camel_cased, model_lower_cased=model_files['module_name'], model_upper_cased=model_upper_cased, config_class=config_class, tokenizer_class=tokenizer_class, image_processor_class=image_processor_class, image_processor_fast_class=image_processor_fast_class, feature_extractor_class=feature_extractor_class, processor_class=processor_class)
return {'frameworks': frameworks, 'model_classes': model_classes, 'model_files': model_files, 'model_patterns': model_patterns}
|
Retrieves all the information from a given model_type.
Args:
model_type (`str`): A valid model type (like "bert" or "gpt2")
frameworks (`List[str]`, *optional*):
If passed, will only keep the info corresponding to the passed frameworks.
Returns:
`Dict`: A dictionary with the following keys:
- **frameworks** (`List[str]`): The list of frameworks that back this model type.
- **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type.
- **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type.
- **model_patterns** (`ModelPatterns`): The various patterns for the model.
|
github-repos
|
def percent_point(self, U):
self.check_fit()
if not 0 < U < 1:
raise ValueError('cdf value must be in [0,1]')
return scipy.optimize.brentq(self.cumulative_distribution, -1000.0, 1000.0, args=(U))
|
Given a cdf value, returns a value in original space.
Args:
U: `int` or `float` cdf value in [0,1]
Returns:
float: value in original space
|
juraj-google-style
|
def get_surveys(self):
payload = {
'Request': 'getSurveys',
'Format': 'JSON'
}
r = self._session.get(QUALTRICS_URL, params=payload)
output = r.json()
return output['Result']['Surveys']
|
Gets all surveys in account
Args:
None
Returns:
list: a list of all surveys
|
juraj-google-style
|
def eval_algorithm(closing, low, high):
if high - low == 0:
return 100 * (closing - low)
else:
return 100 * (closing - low) / (high - low)
|
Evaluates the SO algorithm
Args:
closing: Float of current closing price.
low: Float of lowest low closing price throughout some duration.
high: Float of highest high closing price throughout some duration.
Returns:
Float SO between 0 and 100.
|
juraj-google-style
|
def build_from_token_counts(self, token_counts, min_count, num_iterations=4):
self._init_alphabet_from_tokens(six.iterkeys(token_counts))
self._init_subtokens_from_list(list(self._alphabet))
if min_count < 1:
min_count = 1
for i in xrange(num_iterations):
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
for end in xrange(start + 1, len(escaped_token) + 1):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
new_subtoken_strings = []
for lsub in xrange(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in xrange(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
self._init_subtokens_from_list([subtoken for _, subtoken in new_subtoken_strings])
|
Train a SubwordTextTokenizer based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer; how many iterations of refinement.
|
juraj-google-style
|
def register(self, task_json=None, json_filename=None):
if ((not task_json) and (not json_filename)):
raise Exception("Both task json and filename can't be none.")
if (task_json and json_filename):
raise Exception("Both task json and filename can't be provided.")
if json_filename:
task_json = json.load(open(json_filename, 'r'))
r = self.gbdx_connection.post(self._base_url, json=task_json)
raise_for_status(r)
return r.text
|
Registers a new GBDX task.
Args:
task_json (dict): Dictionary representing task definition.
json_filename (str): A full path of a file with json representing the task definition.
Only one out of task_json and json_filename should be provided.
Returns:
Response (str).
|
codesearchnet
|
def FromDBInstance(db_token):
hash_ar = bytearray(binascii.unhexlify(db_token.ContractHash))
hash_ar.reverse()
hash = UInt160(data=hash_ar)
token = NEP5Token(script=None)
token.SetScriptHash(hash)
token.name = db_token.Name
token.symbol = db_token.Symbol
token.decimals = db_token.Decimals
return token
|
Get a NEP5Token instance from a database token.
Args:
db_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):
Returns:
NEP5Token: self.
|
juraj-google-style
|
def post_process(self, outputs, target_sizes):
logger.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')
out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)
if len(out_logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if target_sizes.shape[1] != 2:
raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
|
Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`YolosObjectDetectionOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
original image size (before any data augmentation). For visualization, this should be the image size
after data augment, but before padding.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
|
github-repos
|
def dump_table_as_insert_sql(engine: Engine, table_name: str, fileobj: TextIO, wheredict: Dict[(str, Any)]=None, include_ddl: bool=False, multirow: bool=False) -> None:
log.info('dump_data_as_insert_sql: table_name={}', table_name)
writelines_nl(fileobj, [SEP1, sql_comment('Data for table: {}'.format(table_name)), SEP2, sql_comment('Filters: {}'.format(wheredict))])
dialect = engine.dialect
if (not dialect.supports_multivalues_insert):
multirow = False
if multirow:
log.warning('dump_data_as_insert_sql: multirow parameter substitution not working yet')
multirow = False
meta = MetaData(bind=engine)
log.debug('... retrieving schema')
table = Table(table_name, meta, autoload=True)
if include_ddl:
log.debug('... producing DDL')
dump_ddl(table.metadata, dialect_name=engine.dialect.name, fileobj=fileobj)
log.debug('... fetching records')
query = select(table.columns)
if wheredict:
for (k, v) in wheredict.items():
col = table.columns.get(k)
query = query.where((col == v))
cursor = engine.execute(query)
if multirow:
row_dict_list = []
for r in cursor:
row_dict_list.append(dict(r))
if row_dict_list:
statement = table.insert().values(row_dict_list)
insert_str = get_literal_query(statement, bind=engine)
writeline_nl(fileobj, insert_str)
else:
writeline_nl(fileobj, sql_comment('No data!'))
else:
found_one = False
for r in cursor:
found_one = True
row_dict = dict(r)
statement = table.insert(values=row_dict)
insert_str = get_literal_query(statement, bind=engine)
writeline_nl(fileobj, insert_str)
if (not found_one):
writeline_nl(fileobj, sql_comment('No data!'))
writeline_nl(fileobj, SEP2)
log.debug('... done')
|
Reads a table from the database, and writes SQL to replicate the table's
data to the output ``fileobj``.
Args:
engine: SQLAlchemy :class:`Engine`
table_name: name of the table
fileobj: file-like object to write to
wheredict: optional dictionary of ``{column_name: value}`` to use as
``WHERE`` filters
include_ddl: if ``True``, include the DDL to create the table as well
multirow: write multi-row ``INSERT`` statements
|
codesearchnet
|
def from_dict(cls, d):
labels_dict = d['labels_dict']
projections = {}
structure = None
if isinstance(list(d['bands'].values())[0], dict):
eigenvals = {Spin(int(k)): np.array(d['bands'][k]['data'])
for k in d['bands']}
else:
eigenvals = {Spin(int(k)): d['bands'][k] for k in d['bands']}
if 'structure' in d:
structure = Structure.from_dict(d['structure'])
if d.get('projections'):
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
return BandStructure(
d['kpoints'], eigenvals,
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
|
Create from dict.
Args:
A dict with all data for a band structure object.
Returns:
A BandStructure object
|
juraj-google-style
|
def from_dict(cls, image_processor_dict: dict[str, Any], **kwargs):
image_processor_dict = image_processor_dict.copy()
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
if 'size' in kwargs and 'size' in image_processor_dict:
image_processor_dict['size'] = kwargs.pop('size')
if 'crop_size' in kwargs and 'crop_size' in image_processor_dict:
image_processor_dict['crop_size'] = kwargs.pop('crop_size')
image_processor = cls(**image_processor_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(image_processor, key):
setattr(image_processor, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f'Image processor {image_processor}')
if return_unused_kwargs:
return (image_processor, kwargs)
else:
return image_processor
|
Instantiates a type of [`~image_processing_utils.ImageProcessingMixin`] from a Python dictionary of parameters.
Args:
image_processor_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the image processor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
[`~image_processing_utils.ImageProcessingMixin.to_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the image processor object.
Returns:
[`~image_processing_utils.ImageProcessingMixin`]: The image processor object instantiated from those
parameters.
|
github-repos
|
def document(self, name, file_name, owner=None, **kwargs):
return Document(self.tcex, name, file_name, owner=owner, **kwargs)
|
Create the Document TI object.
Args:
owner:
name:
file_name:
**kwargs:
Return:
|
juraj-google-style
|
def evaluate_forward(distribution, x_data, parameters=None, cache=None):
assert (len(x_data) == len(distribution)), ('distribution %s is not of length %d' % (distribution, len(x_data)))
assert hasattr(distribution, '_cdf'), 'distribution require the `_cdf` method to function.'
cache = (cache if (cache is not None) else {})
parameters = load_parameters(distribution, '_cdf', parameters=parameters, cache=cache)
cache[distribution] = x_data
out = numpy.zeros(x_data.shape)
out[:] = distribution._cdf(x_data, **parameters)
return out
|
Evaluate forward Rosenblatt transformation.
Args:
distribution (Dist):
Distribution to evaluate.
x_data (numpy.ndarray):
Locations for where evaluate forward transformation at.
parameters (:py:data:typing.Any):
Collection of parameters to override the default ones in the
distribution.
cache (:py:data:typing.Any):
A collection of previous calculations in case the same distribution
turns up on more than one occasion.
Returns:
The cumulative distribution values of ``distribution`` at location
``x_data`` using parameters ``parameters``.
|
codesearchnet
|
def read(self, length=-1):
if 0 <= length < len(self):
newpos = self.pos + length
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return data
data = self.buf[self.pos:]
self.clear()
return data
|
Reads from the FIFO.
Reads as much data as possible from the FIFO up to the specified
length. If the length argument is negative or ommited all data
currently available in the FIFO will be read. If there is no data
available in the FIFO an empty string is returned.
Args:
length: The amount of data to read from the FIFO. Defaults to -1.
|
juraj-google-style
|
def make_sine_surface(dims=DEFAULT_DIMS, offset=0.5, scale=1.0):
gradients = (((np.array(make_gradients(dims)) - offset) * scale) * np.pi)
return np.sin(np.linalg.norm(gradients, axis=0))
|
Makes a surface from the 3D sine function.
Args:
dims (pair): the dimensions of the surface to create
offset (float): an offset applied to the function
scale (float): a scale applied to the sine frequency
Returns:
surface: A surface.
|
codesearchnet
|
def cast(self, dtype: tf.DType) -> 'TensorFluent':
if self.dtype == dtype:
return self
t = tf.cast(self.tensor, dtype)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch)
|
Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation.
|
juraj-google-style
|
def get_keys(data_list, leading_columns=LEADING_COLUMNS):
all_keys = set().union(*(list(d.keys()) for d in data_list))
leading_keys = []
for key in leading_columns:
if key not in all_keys:
continue
leading_keys.append(key)
all_keys.remove(key)
return leading_keys + sorted(all_keys)
|
Gets all possible keys from a list of dicts, sorting by leading_columns first
Args:
data_list: list of dicts to pull keys from
leading_columns: list of keys to put first in the result
Returns:
list of keys to be included as columns in excel worksheet
|
juraj-google-style
|
def _interpretPayload(functioncode, payload):
r
raise NotImplementedError()
output = ''
output += 'Modbus payload decoder\n'
output += 'Input payload (length {} characters): {!r} \n'.format(len(payload), payload)
output += 'Function code: {} (dec).\n'.format(functioncode)
if len(payload) == 4:
FourbyteMessageFirstHalfValue = _twoByteStringToNum(payload[0:2])
FourbyteMessageSecondHalfValue = _twoByteStringToNum(payload[2:4])
return output
|
r"""Generate a human readable description of a Modbus payload.
Args:
* functioncode (int): Function code
* payload (str): The payload that should be interpreted. It should be a byte string.
Returns:
A descriptive string.
For example, the payload ``'\x10\x01\x00\x01'`` for functioncode 3 should give something like::
TODO: Update
|
juraj-google-style
|
def _push(project):
repo = project.repo
remote_name = project.get('project', 'remote')
remote = repo.remote(remote_name)
result = _call_remote_push(remote)
failures = lfilter(complement(did_git_push_succeed), result)
if failures:
for push_info in failures:
logger.error('Failed to push ref {from_ref} to {to_ref}'.format(from_ref=push_info.local_ref.name, to_ref=push_info.remote_ref.name))
raise BalletError('Push failed')
|
Push default branch and project template branch to remote
With default config (i.e. remote and branch names), equivalent to::
$ git push origin master:master project-template:project-template
Raises:
ballet.exc.BalletError: Push failed in some way
|
codesearchnet
|
def __call__(self, shape, dtype=None, **kwargs):
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.ones(shape, dtype)
|
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
|
github-repos
|
async def rewind(self, query='1'):
if (not (self.state == 'ready')):
logger.debug("Trying to rewind from wrong state '{}'".format(self.state))
return
if (query == ''):
query = '1'
try:
num = int(query)
except TypeError:
self.statuslog.error('Rewind argument must be a number')
except ValueError:
self.statuslog.error('Rewind argument must be a number')
else:
if (len(self.prev_queue) == 0):
self.statuslog.error('No songs to rewind')
return
if (num < 0):
self.statuslog.error('Rewind must be postitive or 0')
return
elif (num > len(self.prev_queue)):
self.statuslog.warning('Rewinding to start')
else:
self.statuslog.info('Rewinding')
for i in range((num + 1)):
if (len(self.prev_queue) > 0):
self.queue.insert(0, self.prev_queue.pop())
try:
self.streamer.stop()
except Exception as e:
logger.exception(e)
|
The rewind command
Args:
query (str): The number of items to skip
|
codesearchnet
|
def _publish_actor_class_to_key(self, key, actor_class_info):
self._worker.redis_client.hmset(key, actor_class_info)
self._worker.redis_client.rpush("Exports", key)
|
Push an actor class definition to Redis.
The is factored out as a separate function because it is also called
on cached actor class definitions when a worker connects for the first
time.
Args:
key: The key to store the actor class info at.
actor_class_info: Information about the actor class.
|
juraj-google-style
|
def push_file(self, local_source, remote_dir):
remote_dest = remote_dir + '/' + os.path.basename(local_source)
try:
self.makedirs(remote_dir, exist_ok=True)
except IOError as e:
logger.exception("Pushing {0} to {1} failed".format(local_source, remote_dir))
if e.errno == 2:
raise BadScriptPath(e, self.hostname)
elif e.errno == 13:
raise BadPermsScriptPath(e, self.hostname)
else:
logger.exception("File push failed due to SFTP client failure")
raise FileCopyException(e, self.hostname)
try:
self.sftp_client.put(local_source, remote_dest, confirm=True)
self.sftp_client.chmod(remote_dest, 0o777)
except Exception as e:
logger.exception("File push from local source {} to remote destination {} failed".format(
local_source, remote_dest))
raise FileCopyException(e, self.hostname)
return remote_dest
|
Transport a local file to a directory on a remote machine
Args:
- local_source (string): Path
- remote_dir (string): Remote path
Returns:
- str: Path to copied file on remote machine
Raises:
- BadScriptPath : if script path on the remote side is bad
- BadPermsScriptPath : You do not have perms to make the channel script dir
- FileCopyException : FileCopy failed.
|
juraj-google-style
|
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
return all(crit.matches(msg_seq, msg) for crit in self.all_criteria)
|
The message matches if all the defined search key criteria match.
Args:
msg_seq: The message sequence ID.
msg: The message object.
|
juraj-google-style
|
def is_gpu(self):
return (self._device.get_info(cl.device_info.TYPE) == cl.device_type.GPU)
|
Check if the device associated with this environment is a GPU.
Returns:
boolean: True if the device is an GPU, false otherwise.
|
codesearchnet
|
def _empty_resource_attributes(self):
self.status_code = 404
self.headers = {}
self.exists = False
self.rdf = self._build_rdf()
if type(self) == NonRDFSource:
self.binary.empty()
|
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes
|
juraj-google-style
|
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context.update(self.extra_context)
context['crumbs'] = self.get_crumbs()
context['title'] = self.title
context['suit'] = ('suit' in settings.INSTALLED_APPS)
if ((context.get('dashboard_grid', None) is None) and self.grid):
context['dashboard_grid'] = self.grid
return self.render_to_response(context)
|
Django view get function.
Add items of extra_context, crumbs and grid to context.
Args:
request (): Django's request object.
*args (): request args.
**kwargs (): request kwargs.
Returns:
response: render to response with context.
|
codesearchnet
|
def normal_var(data, mean):
if not isinstance(data, np.ndarray):
data = np.array(data)
cumm = [0.0]
cumm.extend(np.cumsum(np.power(np.abs(data - mean), 2)))
def cost(s, t):
dist = float(t - s)
diff = cumm[t] - cumm[s]
return dist * np.log(diff/dist)
return cost
|
Creates a segment cost function for a time series with a
Normal distribution with changing variance
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
juraj-google-style
|
def dump(self, destination, with_defaults=False):
if isinstance(destination, six.string_types):
with open(destination, 'w', encoding='utf-8') as f:
self._rw.dump_config_to_file(self._config, f, with_defaults=with_defaults)
else:
self._rw.dump_config_to_file(self._config, destination, with_defaults=with_defaults)
|
Write configuration values to the specified destination.
Args:
destination:
with_defaults (bool): if ``True``, values of items with no custom values will be included in the output
if they have a default value set.
|
juraj-google-style
|
def parse(self, text):
tokens = self.lex(text)
parser = Parser(tokens)
return parser.parse()
|
Parse self.text.
Args:
text (str): the text to lex
Returns:
object: a node representing the current rule.
|
codesearchnet
|
def run_query(query: str) -> None:
try:
result = parse_query(query)
except Exception as e:
result = f'ERROR: {type(e).__name__}: {e.__str__()}.'
return result
return filter_records(convert_to_dataframe(result), query)
|
Run a query and display the result.
Args:
query (str): The query to be executed.
|
github-repos
|
def to_weld_type(weld_type, dim):
for i in xrange(dim):
weld_type = WeldVec(weld_type)
return weld_type
|
Summary
Args:
weld_type (TYPE): Description
dim (TYPE): Description
Returns:
TYPE: Description
|
juraj-google-style
|
def get_firmware(self):
firmware_uri = self._helper.build_subresource_uri(self.data['uri'], subresource_path=self.FIRMWARE_PATH)
return self._helper.do_get(firmware_uri)
|
Gets the installed firmware for a logical interconnect.
Returns:
dict: LIFirmware.
|
codesearchnet
|
def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):
assert (method in ('equal', 'cosine', 'random')), 'Invalid method'
wind_eq = melodist.distribute_equally(wind_daily)
if (method == 'equal'):
wind_disagg = wind_eq
elif (method == 'cosine'):
assert (None not in (a, b, t_shift))
wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)
elif (method == 'random'):
wind_disagg = (wind_eq * ((- np.log(np.random.rand(len(wind_eq)))) ** 0.3))
return wind_disagg
|
general function for windspeed disaggregation
Args:
wind_daily: daily values
method: keyword specifying the disaggregation method to be used
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
Disaggregated hourly values of windspeed.
|
codesearchnet
|
def _ParseFValue(self, registry_key):
registry_value = registry_key.GetValueByName('F')
if not registry_value:
raise errors.ParseError(
'missing value: "F" in Windows Registry key: {0:s}.'.format(
registry_key.name))
f_value_map = self._GetDataTypeMap('f_value')
try:
return self._ReadStructureFromByteStream(
registry_value.data, 0, f_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(exception)
|
Parses an F value.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
f_value: F value stored in the Windows Registry key.
Raises:
ParseError: if the Windows Registry key does not contain an F value or
F value cannot be parsed.
|
juraj-google-style
|
def SetLookupHash(self, lookup_hash):
if lookup_hash not in self.SUPPORTED_HASHES:
raise ValueError('Unsupported lookup hash: {0!s}'.format(lookup_hash))
self.lookup_hash = lookup_hash
|
Sets the hash to query.
Args:
lookup_hash (str): name of the hash attribute to look up.
Raises:
ValueError: if the lookup hash is not supported.
|
juraj-google-style
|
def _full_pred_succ_maps(self, pred_map, succ_map, input_circuit, wire_map):
full_pred_map = {}
full_succ_map = {}
for w in input_circuit.input_map:
if (w in wire_map):
full_pred_map[wire_map[w]] = pred_map[wire_map[w]]
full_succ_map[wire_map[w]] = succ_map[wire_map[w]]
else:
full_succ_map[w] = self.output_map[w]
full_pred_map[w] = self._multi_graph.predecessors(self.output_map[w])[0]
if (len(list(self._multi_graph.predecessors(self.output_map[w]))) != 1):
raise DAGCircuitError(('too many predecessors for %s[%d] output node' % (w[0], w[1])))
return (full_pred_map, full_succ_map)
|
Map all wires of the input circuit.
Map all wires of the input circuit to predecessor and
successor nodes in self, keyed on wires in self.
Args:
pred_map (dict): comes from _make_pred_succ_maps
succ_map (dict): comes from _make_pred_succ_maps
input_circuit (DAGCircuit): the input circuit
wire_map (dict): the map from wires of input_circuit to wires of self
Returns:
tuple: full_pred_map, full_succ_map (dict, dict)
Raises:
DAGCircuitError: if more than one predecessor for output nodes
|
codesearchnet
|
def __live_receivers(signal):
with __lock:
__purge()
receivers = [funcref() for funcref in __receivers[signal]]
return receivers
|
Return all signal handlers that are currently still alive for the
input `signal`.
Args:
signal: A signal name.
Returns:
A list of callable receivers for the input signal.
|
codesearchnet
|
def push(self, targets, jobs=None, remote=None, show_checksums=False):
return self.repo.cache.local.push(targets, jobs=jobs, remote=self._get_cloud(remote, 'push'), show_checksums=show_checksums)
|
Push data items in a cloud-agnostic way.
Args:
targets (list): list of targets to push to the cloud.
jobs (int): number of jobs that can be running simultaneously.
remote (dvc.remote.base.RemoteBase): optional remote to push to.
By default remote from core.remote config option is used.
show_checksums (bool): show checksums instead of file names in
information messages.
|
codesearchnet
|
def load(path, compile=True, options=None):
metadata = saved_metadata_pb2.SavedMetadata()
meta_graph_def = loader_impl.parse_saved_model(path).meta_graphs[0]
object_graph_def = meta_graph_def.object_graph_def
path_to_metadata_pb = os.path.join(path, constants.SAVED_METADATA_PATH)
if gfile.Exists(path_to_metadata_pb):
try:
with gfile.GFile(path_to_metadata_pb, 'rb') as f:
file_content = f.read()
metadata.ParseFromString(file_content)
except message.DecodeError as e:
raise IOError('Cannot parse keras metadata {}: {}.'.format(path_to_metadata_pb, str(e)))
else:
logging.warning('SavedModel saved prior to TF 2.5 detected when loading Keras model. Please ensure that you are saving the model with model.save() or tf.keras.models.save_model(), *NOT* tf.saved_model.save(). To confirm, there should be a file named "keras_metadata.pb" in the SavedModel directory.')
_read_legacy_metadata(object_graph_def, metadata)
if not metadata.nodes:
return tf_load.load(path, options=options)
keras_loader = KerasObjectLoader(metadata, object_graph_def)
keras_loader.load_layers(compile=compile)
nodes_to_load = {'root': None}
for node_id, loaded_node in keras_loader.loaded_nodes.items():
nodes_to_load[keras_loader.get_path(node_id)] = loaded_node
loaded = tf_load.load_partial(path, nodes_to_load, options=options)
keras_loader.finalize_objects()
keras_loader.del_tracking()
model = loaded['root']
if isinstance(model, training_lib.Model) and compile:
training_config = model._serialized_attributes['metadata'].get('training_config', None)
if training_config is not None:
model.compile(**saving_utils.compile_args_from_training_config(training_config), from_serialized=True)
saving_utils.try_build_compiled_arguments(model)
if isinstance(model.optimizer, optimizer_v2.OptimizerV2):
if model.optimizer.get_slot_names():
logging.warning('Your optimizer uses slots. Slots cannot be restored from saved_model, as a result, your model is starting with a new initialized optimizer.')
else:
logging.warning('No training configuration found in save file, so the model was *not* compiled. Compile it manually.')
if not context.executing_eagerly():
sess = backend.get_session()
sess.run(ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS))
return model
|
Loads Keras objects from a SavedModel.
Any Keras layer or model saved to the SavedModel will be loaded back
as Keras objects. Other objects are loaded as regular trackable objects (same
as `tf.saved_model.load`).
Currently, Keras saving/loading only retains the Keras object's weights,
losses, and call function.
The loaded model can be re-compiled, but the original optimizer, compiled loss
functions, and metrics are not retained. This is temporary, and `model.save`
will soon be able to serialize compiled models.
Args:
path: Path to SavedModel.
compile: If true, compile the model after loading it.
options: Optional `tf.saved_model.LoadOptions` object that specifies
options for loading from SavedModel.
Returns:
Object loaded from SavedModel.
|
github-repos
|
def remove_foothills(self, q_data, marked, bin_num, bin_lower, centers, foothills):
hills = []
for foot in foothills:
center = foot[0]
hills[:] = foot[1][:]
while (len(hills) > 0):
pt = hills.pop((- 1))
marked[pt] = self.GLOBBED
for (s_index, val) in np.ndenumerate(marked[((pt[0] - 1):(pt[0] + 2), (pt[1] - 1):(pt[1] + 2))]):
index = (((s_index[0] - 1) + pt[0]), ((s_index[1] - 1) + pt[1]))
if (val == self.UNMARKED):
if ((q_data[index] >= 0) and (q_data[index] < bin_lower) and ((q_data[index] <= q_data[pt]) or self.is_closest(index, center, centers, bin_num))):
hills.append(index)
del foothills[:]
|
Mark points determined to be foothills as globbed, so that they are not included in
future searches. Also searches neighboring points to foothill points to determine
if they should also be considered foothills.
Args:
q_data: Quantized data
marked: Marked
bin_num: Current bin being searched
bin_lower: Next bin being searched
centers: dictionary of local maxima considered to be object centers
foothills: List of foothill points being removed.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.