code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, app, project):
self.project = project
self.app = app
self.sources = set()
self.smart_sources = []
self.index = None
self.source_roots = OrderedSet()
self._created_symbols = DefaultOrderedDict(OrderedSet)
self.__package_root = None
self.__toplevel_comments = OrderedSet()
self.formatter = self._make_formatter()
|
Constructor for `Extension`.
This should never get called directly.
Args:
project: The `project.Project` instance which documentation
is being generated.
|
juraj-google-style
|
def sample(self, num_samples=1):
self.check_fit()
return np.random.normal(self.mean, self.std, num_samples)
|
Returns new data point based on model.
Arguments:
n_samples: `int`
Returns:
np.ndarray: Generated samples
|
codesearchnet
|
class Globally(PTransform):
def __init__(self, num_quantiles, key=None, reverse=False, weighted=False, input_batched=False):
self._num_quantiles = num_quantiles
self._key = key
self._reverse = reverse
self._weighted = weighted
self._input_batched = input_batched
def expand(self, pcoll):
return pcoll | CombineGlobally(ApproximateQuantilesCombineFn.create(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched))
def display_data(self):
return ApproximateQuantiles._display_data(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched)
|
PTransform takes PCollection and returns a list whose single value is
approximate N-tiles of the input collection globally.
Args:
num_quantiles: number of elements in the resulting quantiles values list.
key: (optional) Key is a mapping of elements to a comparable key, similar
to the key argument of Python's sorting methods.
reverse: (optional) whether to order things smallest to largest, rather
than largest to smallest.
weighted: (optional) if set to True, the transform returns weighted
quantiles. The input PCollection is then expected to contain tuples of
input values with the corresponding weight.
input_batched: (optional) if set to True, the transform expects each
element of input PCollection to be a batch, which is a list of elements
for non-weighted case and a tuple of lists of elements and weights for
weighted. Provides a way to accumulate multiple elements at a time more
efficiently.
|
github-repos
|
def CheckGlobalStatic(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
|
Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
juraj-google-style
|
def Uninstall(self, package_name, keep_data=False, timeout_ms=None):
cmd = ['pm uninstall']
if keep_data:
cmd.append('-k')
cmd.append(('"%s"' % package_name))
return self.Shell(' '.join(cmd), timeout_ms=timeout_ms)
|
Removes a package from the device.
Args:
package_name: Package name of target package.
keep_data: whether to keep the data and cache directories
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm uninstall output.
|
codesearchnet
|
def get_catch_vars(catch):
catch_re = re.compile(r'catch\s+(\${?\S+}?),\s*(\${?\S+}?)')
res = catch_re.match(catch)
if res is None:
err = 'Catch must have format "catch $x, $y", got "{0}"'.format(catch)
raise exceptions.YamlSyntaxError(err)
return get_var_name(res.group(1)), get_var_name(res.group(2))
|
Returns 2-tuple with names of catch control vars, e.g. for "catch $was_exc, $exc"
it returns ('was_exc', 'err').
Args:
catch: the whole catch line
Returns:
2-tuple with names of catch control variables
Raises:
exceptions.YamlSyntaxError if the catch line is malformed
|
juraj-google-style
|
def APFSUnlockVolume(fsapfs_volume, path_spec, key_chain):
is_locked = fsapfs_volume.is_locked()
if is_locked:
password = key_chain.GetCredential(path_spec, 'password')
if password:
fsapfs_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
fsapfs_volume.set_recovery_password(recovery_password)
is_locked = (not fsapfs_volume.unlock())
return (not is_locked)
|
Unlocks an APFS volume using the path specification.
Args:
fsapfs_volume (pyapfs.volume): APFS volume.
path_spec (PathSpec): path specification.
key_chain (KeyChain): key chain.
Returns:
bool: True if the volume is unlocked, False otherwise.
|
codesearchnet
|
def GetKey(self, public_key_hash):
if (public_key_hash.ToBytes() in self._keys.keys()):
return self._keys[public_key_hash.ToBytes()]
return None
|
Get the KeyPair belonging to the public key hash.
Args:
public_key_hash (UInt160): a public key hash to get the KeyPair for.
Returns:
KeyPair: If successful, the KeyPair belonging to the public key hash, otherwise None
|
codesearchnet
|
def GetHashType(self, hash_str):
for (hash_type, hash_re) in self.hashes:
if hash_re.match(hash_str):
return hash_type
return 'EMPTY'
|
Identify the type of hash in a hash string.
Args:
hash_str: A string value that may be a hash.
Returns:
A string description of the type of hash.
|
codesearchnet
|
def build_uri(self, id_or_uri):
if not id_or_uri:
logger.exception(RESOURCE_CLIENT_INVALID_ID)
raise ValueError(RESOURCE_CLIENT_INVALID_ID)
if "/" in id_or_uri:
self.validate_resource_uri(id_or_uri)
return id_or_uri
else:
return self._base_uri + "/" + id_or_uri
|
Helps to build the URI from resource id and validate the URI.
Args:
id_or_uri: ID/URI of the resource.
Returns:
Returns a valid resource URI
|
juraj-google-style
|
def max_entropy_distribution(node_indices, number_of_nodes):
distribution = np.ones(repertoire_shape(node_indices, number_of_nodes))
return (distribution / distribution.size)
|
Return the maximum entropy distribution over a set of nodes.
This is different from the network's uniform distribution because nodes
outside ``node_indices`` are fixed and treated as if they have only 1
state.
Args:
node_indices (tuple[int]): The set of node indices over which to take
the distribution.
number_of_nodes (int): The total number of nodes in the network.
Returns:
np.ndarray: The maximum entropy distribution over the set of nodes.
|
codesearchnet
|
def get_all_clusters_sites():
result = {}
gk = get_api_client()
sites = gk.sites.list()
for site in sites:
clusters = site.clusters.list()
result.update({c.uid: site.uid for c in clusters})
return result
|
Get all the cluster of all the sites.
Returns:
dict corresponding to the mapping cluster uid to python-grid5000 site
|
codesearchnet
|
def isfile(self, path, follow_symlinks=True):
return self._is_of_type(path, S_IFREG, follow_symlinks)
|
Determine if path identifies a regular file.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a regular file (following symlinks).
Raises:
TypeError: if path is None.
|
juraj-google-style
|
def convert_tensor_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:
mapping = {dtypes.float16: _types_pb2.FLOAT16, dtypes.float32: _types_pb2.FLOAT, dtypes.float64: _types_pb2.FLOAT64, dtypes.int8: _types_pb2.INT8, dtypes.int16: _types_pb2.INT16, dtypes.uint16: _types_pb2.UINT16, dtypes.int32: _types_pb2.INT32, dtypes.int64: _types_pb2.INT64, dtypes.uint8: _types_pb2.UINT8, dtypes.uint32: _types_pb2.UINT32, dtypes.uint64: _types_pb2.UINT64, dtypes.string: _types_pb2.STRING, dtypes.bool: _types_pb2.BOOL, dtypes.complex64: _types_pb2.COMPLEX64, dtypes.complex128: _types_pb2.COMPLEX128}
tflite_type = mapping.get(tf_type)
if tflite_type is None:
raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))
return tflite_type
|
Convert tensor type from tf type to tflite type.
Args:
tf_type: TensorFlow type.
usage: Text describing the reason for invoking this function.
Raises:
ValueError: If `tf_type` is unsupported.
Returns:
tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.
|
github-repos
|
def patch_retry(testcase, module):
from mock import Mock
from mock import patch
real_retry_with_exponential_backoff = retry.with_exponential_backoff
def patched_retry_with_exponential_backoff(**kwargs):
kwargs.update(logger=Mock(), clock=Mock())
return real_retry_with_exponential_backoff(**kwargs)
patch.object(retry, 'with_exponential_backoff', side_effect=patched_retry_with_exponential_backoff).start()
importlib.reload(module)
def remove_patches():
patch.stopall()
importlib.reload(module)
testcase.addCleanup(remove_patches)
|
A function to patch retry module to use mock clock and logger.
Clock and logger that defined in retry decorator will be replaced in test
in order to skip sleep phase when retry happens.
Args:
testcase: An instance of unittest.TestCase that calls this function to
patch retry module.
module: The module that uses retry and need to be replaced with mock
clock and logger in test.
|
github-repos
|
def __init__(self, dataset_file_map: Mapping[str, _RepresentativeDatasetFile]) -> None:
self.dataset_file_map = dataset_file_map
|
Initializes TFRecord represenatative dataset loader.
Args:
dataset_file_map: Signature key -> `RepresentativeDatasetFile` mapping.
Raises:
DecodeError: If the sample is not RepresentativeDataSample.
|
github-repos
|
def _parse_logline_timestamp(t):
date, time = t.split(' ')
month, day = date.split('-')
h, m, s = time.split(':')
s, ms = s.split('.')
return (month, day, h, m, s, ms)
|
Parses a logline timestamp into a tuple.
Args:
t: Timestamp in logline format.
Returns:
An iterable of date and time elements in the order of month, day, hour,
minute, second, microsecond.
|
github-repos
|
def _Build(self, storage_file):
self._index = {}
for event_tag in storage_file.GetEventTags():
self.SetEventTag(event_tag)
|
Builds the event tag index.
Args:
storage_file (BaseStorageFile): storage file.
|
juraj-google-style
|
def name_based_restore(mesh: layout_lib.Mesh, checkpoint_prefix: str, name_tensor_dict: Dict[str, Union[tensor_lib.Tensor, tf_variables.Variable]]):
if not context.executing_eagerly():
raise ValueError('name based restore must run eagerly.')
ordered_name_tensor_dict = name_tensor_dict
if not isinstance(name_tensor_dict, collections.OrderedDict):
ordered_name_tensor_dict = collections.OrderedDict(name_tensor_dict)
for name, tensor in ordered_name_tensor_dict.items():
try:
if api.fetch_layout(tensor).mesh.device_type().upper() != 'CPU':
raise ValueError('Restoring a non CPU Tensor is not supported currently. Offending tensor name : {tensor_name}'.format(tensor_name=name))
except errors_impl.OpError as op_error:
raise ValueError('Saving/Restoring tensor must be a DTensor') from op_error
checkpoint_prefix = api.pack([checkpoint_prefix] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=0))
tensor_names = api.pack([list(ordered_name_tensor_dict.keys())] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))
shape_and_slices = api.pack([[''] * len(ordered_name_tensor_dict)] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))
input_shapes = [tensor.shape for tensor in ordered_name_tensor_dict.values()]
input_layouts = [api.fetch_layout(tensor).to_string() for tensor in ordered_name_tensor_dict.values()]
with ops.device(api.device_name()):
restored_cpu_tensors = gen_dtensor_ops.d_tensor_restore_v2(prefix=checkpoint_prefix, tensor_names=tensor_names, shape_and_slices=shape_and_slices, input_shapes=input_shapes, input_layouts=input_layouts, dtypes=[tensor.dtype for tensor in ordered_name_tensor_dict.values()])
return collections.OrderedDict(zip(ordered_name_tensor_dict.keys(), restored_cpu_tensors))
|
Restores from checkpoint_prefix to name based DTensors.
It is required to have already-initialized DTensor variables that have same
shape/dtype for the tensors being restored.
Also, we currently only support a named based restore on a single mesh.
Args:
mesh: The single mesh that all Tensors would be restored to.
checkpoint_prefix : The prefix of checkpoint to be restored.
name_tensor_dict: A ordered dictionary of tensor_names to a DTensor. The
DTensor shape/dtype must match the tensors being saved/restored for now.
Returns:
A dictionary of name to its restored DTensor value.
|
github-repos
|
def upload(self, file_path, golden_image_info):
uri = "{0}?name={1}&description={2}".format(self.URI,
quote(golden_image_info.get('name', '')),
quote(golden_image_info.get('description', '')))
return self._client.upload(file_path, uri)
|
Adds a Golden Image resource from the file that is uploaded from a local drive. Only the .zip format file can
be used for the upload.
Args:
file_path (str): File name to upload.
golden_image_info (dict): Golden Image information.
Returns:
dict: Golden Image.
|
juraj-google-style
|
def list_runs(self, project, entity=None):
query = gql()
return self._flatten_edges(self.gql(query, variable_values={
'entity': entity or self.settings('entity'),
'model': project or self.settings('project')})['model']['buckets'])
|
Lists runs in W&B scoped by project.
Args:
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
Returns:
[{"id",name","description"}]
|
juraj-google-style
|
def user_to_uid(user):
if user is None:
user = salt.utils.user.get_user()
return salt.utils.win_dacl.get_sid_string(user)
|
Convert user name to a uid
Args:
user (str): The user to lookup
Returns:
str: The user id of the user
CLI Example:
.. code-block:: bash
salt '*' file.user_to_uid myusername
|
juraj-google-style
|
def _setweights(self):
for name_w in self.weights:
raw_w = getattr(self.module, (name_w + '_raw'))
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
if hasattr(self.module, name_w):
delattr(self.module, name_w)
setattr(self.module, name_w, w)
|
Uses pytorch's built-in dropout function to apply dropout to the parameters of
the wrapped module.
Args:
None
Returns:
None
|
codesearchnet
|
def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_value: Optional[Tuple[tf.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, training=False) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache)
hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
|
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`tf.Tensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states
|
github-repos
|
def library_line(self, file_name):
gulplib_set = lambda: 'GULP_LIB' in os.environ.keys()
readable = lambda f: os.path.isfile(f) and os.access(f, os.R_OK)
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name):
gin = 'library ' + file_name
else:
fpath = os.path.join(os.getcwd(), file_name)
if readable(fpath):
gin = 'library ' + fpath
elif gulplib_set():
fpath = os.path.join(os.environ['GULP_LIB'], file_name)
if readable(fpath):
gin = 'library ' + file_name
if gin:
return gin + "\n"
else:
raise GulpError('GULP Library not found')
|
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
|
juraj-google-style
|
def _MakeExecutable(self, metadata_script):
mode = os.stat(metadata_script).st_mode
os.chmod(metadata_script, mode | stat.S_IEXEC)
|
Add executable permissions to a file.
Args:
metadata_script: string, the path to the executable file.
|
juraj-google-style
|
def setattr(self, name, val):
nodes = self._do_query(multiple=False)
try:
return self.poco.agent.hierarchy.setAttr(nodes, name, val)
except UnableToSetAttributeException as e:
raise InvalidOperationException('"{}" of "{}"'.format(str(e), self))
|
Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the
immutable attributes or attributes which do not exist, the InvalidOperationException exception is raised.
Args:
name: attribute name
val: new attribute value to cast
Raises:
InvalidOperationException: when it fails to set the attribute on UI element
|
juraj-google-style
|
def _create_initial_state(self, initial_ids, initial_cache):
cur_index = tf.constant(0)
alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2)
initial_log_probs = tf.constant([([0.0] + ([(- float('inf'))] * (self.beam_size - 1)))])
alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])
alive_cache = nest.map_structure((lambda t: _expand_to_beam_size(t, self.beam_size)), initial_cache)
finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)
finished_scores = (tf.ones([self.batch_size, self.beam_size]) * (- INF))
finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)
state = {_StateKeys.CUR_INDEX: cur_index, _StateKeys.ALIVE_SEQ: alive_seq, _StateKeys.ALIVE_LOG_PROBS: alive_log_probs, _StateKeys.ALIVE_CACHE: alive_cache, _StateKeys.FINISHED_SEQ: finished_seq, _StateKeys.FINISHED_SCORES: finished_scores, _StateKeys.FINISHED_FLAGS: finished_flags}
state_shape_invariants = {_StateKeys.CUR_INDEX: tf.TensorShape([]), _StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]), _StateKeys.ALIVE_CACHE: nest.map_structure(_get_shape_keep_last_dim, alive_cache), _StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]), _StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size])}
return (state, state_shape_invariants)
|
Return initial state dictionary and its shape invariants.
Args:
initial_ids: initial ids to pass into the symbols_to_logits_fn.
int tensor with shape [batch_size, 1]
initial_cache: dictionary storing values to be passed into the
symbols_to_logits_fn.
Returns:
state and shape invariant dictionaries with keys from _StateKeys
|
codesearchnet
|
def __init__(self, wrapper):
self._wrapper = wrapper
self.make_request = self._wrapper.request_parking
|
Initialization of the API module.
Args:
wrapper (Wrapper): Object that performs the requests to endpoints.
|
juraj-google-style
|
def bool_env(varname: str, default: bool) -> bool:
val = os.getenv(varname, str(default))
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
raise ValueError('invalid truth value %r for environment %r' % (val, varname))
|
Read an environment variable and interpret it as a boolean.
True values are (case insensitive): 'y', 'yes', 't', 'true', 'on', and '1';
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
Args:
varname: the name of the variable
default: the default boolean value
Raises: ValueError if the environment variable is anything else.
|
github-repos
|
def renew(self, requested_timeout=None):
if self._has_been_unsubscribed:
raise SoCoException(
'Cannot renew subscription once unsubscribed')
if not self.is_subscribed:
raise SoCoException(
'Cannot renew subscription before subscribing')
if self.time_left == 0:
raise SoCoException(
'Cannot renew subscription after expiry')
headers = {
'SID': self.sid
}
if requested_timeout is None:
requested_timeout = self.requested_timeout
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
response = requests.request(
'SUBSCRIBE',
self.service.base_url + self.service.event_subscription_url,
headers=headers)
response.raise_for_status()
timeout = response.headers['timeout']
if timeout.lower() == 'infinite':
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Renewed subscription to %s, sid: %s",
self.service.base_url + self.service.event_subscription_url,
self.sid)
|
Renew the event subscription.
You should not try to renew a subscription which has been
unsubscribed, or once it has expired.
Args:
requested_timeout (int, optional): The period for which a renewal
request should be made. If None (the default), use the timeout
requested on subscription.
|
juraj-google-style
|
def get(self, dist=None, term=None, family=None):
if (dist is not None):
if (dist not in self.dists):
raise ValueError(("'%s' is not a valid distribution name." % dist))
return self._get_prior(self.dists[dist])
elif (term is not None):
if (term not in self.terms):
raise ValueError(("'%s' is not a valid term type." % term))
return self._get_prior(self.terms[term])
elif (family is not None):
if (family not in self.families):
raise ValueError(("'%s' is not a valid family name." % family))
_f = self.families[family]
prior = self._get_prior(_f['dist'])
return Family(family, prior, _f['link'], _f['parent'])
|
Retrieve default prior for a named distribution, term type, or family.
Args:
dist (str): Name of desired distribution. Note that the name is
the key in the defaults dictionary, not the name of the
Distribution object used to construct the prior.
term (str): The type of term family to retrieve defaults for.
Must be one of 'intercept', 'fixed', or 'random'.
family (str): The name of the Family to retrieve. Must be a value
defined internally. In the default config, this is one of
'gaussian', 'bernoulli', 'poisson', or 't'.
|
codesearchnet
|
def read_structs(fstream):
struct = read_struct(fstream)
while struct is not None:
yield struct
struct = read_struct(fstream)
|
Read all structs from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all structs in the
fstream.
|
juraj-google-style
|
def reversals(self, transfer_id, data={}, **kwargs):
url = "{}/{}/reversals".format(self.base_url, transfer_id)
return self.get_url(url, data, **kwargs)
|
Get all Reversal Transfer from given id
Args:
transfer_id :
Id for which reversal transfer object has to be fetched
Returns:
Transfer Dict
|
juraj-google-style
|
def match(self, request):
errors = []
def match(matcher):
try:
return matcher.match(request)
except Exception as err:
err = '{}: {}'.format(type(matcher).__name__, err)
errors.append(err)
return False
return (all([match(matcher) for matcher in self]), errors)
|
Match the given HTTP request instance against the registered
matcher functions in the current engine.
Arguments:
request (pook.Request): outgoing request to match.
Returns:
tuple(bool, list[Exception]): ``True`` if all matcher tests
passes, otherwise ``False``. Also returns an optional list
of error exceptions.
|
codesearchnet
|
def new_scope(self, new_scope={}):
(old_scopes, self.scopes) = (self.scopes, self.scopes.new_child(new_scope))
(yield)
self.scopes = old_scopes
|
Add a new innermost scope for the duration of the with block.
Args:
new_scope (dict-like): The scope to add.
|
codesearchnet
|
def concatenate(x, other):
return type(x)(tf.TensorShape(x).concatenate(other))
|
Returns the concatenation of the dimension in `x` and `other`.
*Note:* If either `x` or `other` is completely unknown, concatenation will
discard information about the other shape. In future, we might support
concatenation that preserves this information for use with slicing.
For more details, see `help(tf.TensorShape.concatenate)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
new_shape: an object like `x` whose elements are the concatenation of the
dimensions in `x` and `other`.
|
codesearchnet
|
def __init__(self, devices, group_size, options, collective_keys=None, canonicalize_devices=True):
if group_size % len(devices) > 0:
raise ValueError('group_size must be divisible by the number of devices.')
self._group_size = group_size
self._options = options
self._collective_keys = collective_keys or cross_device_utils.CollectiveKeys()
self._lock = threading.Lock()
if canonicalize_devices:
self._devices = tuple((device_util.canonicalize(d) for d in devices))
else:
self._devices = tuple((device_util.canonicalize_without_job_and_task(d) for d in devices))
group_key = self._collective_keys.get_group_key(self._devices)
self._launchers = []
self._limited_nccl = False
for device in self._devices:
launcher = cross_device_utils.CollectiveReplicaLauncher(group_key, group_size, self._collective_keys, device, options)
self._launchers.append(launcher)
if not launcher.can_order_nccl():
self._limited_nccl = True
super(CollectiveAllReduce, self).__init__()
self._canonicalize_devices = canonicalize_devices
|
Initializes the object.
Args:
devices: a list of device strings to run collectives on.
group_size: the global group size. For between-graph replicated training
it's the total number of devices across all workers.
options: a `tf.distribute.experimental.CommunicationOptions`.
collective_keys: an optional CollectiveKey object.
canonicalize_devices: Whether to canonicalize devices for workers or not.
|
github-repos
|
def __init__(self, resolver_context):
super(VShadowFile, self).__init__(resolver_context)
self._file_system = None
self._vshadow_store = None
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
juraj-google-style
|
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1
else:
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where(math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(indices=indices, values=array_ops.gather_nd(input_tensor, indices, name='values'), dense_shape=array_ops.shape(input_tensor, out_type=dtypes.int64, name='dense_shape'))
|
Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be absent
from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
|
github-repos
|
def configure(self, options):
self.client.api.configure_plugin(self.name, options)
self.reload()
|
Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
class TFConvNextV2Stage(keras.layers.Layer):
def __init__(self, config: ConvNextV2Config, in_channels: int, out_channels: int, kernel_size: int=2, stride: int=2, depth: int=2, drop_path_rates: Optional[List[float]]=None, **kwargs):
super().__init__(**kwargs)
if in_channels != out_channels or stride > 1:
self.downsampling_layer = [keras.layers.LayerNormalization(epsilon=1e-06, name='downsampling_layer.0'), keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='downsampling_layer.1')]
else:
self.downsampling_layer = [tf.identity]
drop_path_rates = drop_path_rates or [0.0] * depth
self.layers = [TFConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j], name=f'layers.{j}') for j in range(depth)]
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def call(self, hidden_states):
for layer in self.downsampling_layer:
hidden_states = layer(hidden_states)
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, 'layers', None) is not None:
for layer in self.layers:
with tf.name_scope(layer.name):
layer.build(None)
if self.in_channels != self.out_channels or self.stride > 1:
with tf.name_scope(self.downsampling_layer[0].name):
self.downsampling_layer[0].build([None, None, None, self.in_channels])
with tf.name_scope(self.downsampling_layer[1].name):
self.downsampling_layer[1].build([None, None, None, self.in_channels])
|
ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
Args:
config (`ConvNextV2V2Config`):
Model configuration class.
in_channels (`int`):
Number of input channels.
out_channels (`int`):
Number of output channels.
depth (`int`):
Number of residual blocks.
drop_path_rates(`List[float]`):
Stochastic depth rates for each layer.
|
github-repos
|
def should_record_backprop(tensors):
return pywrap_tfe.TFE_Py_TapeSetShouldRecordBackprop(tensors)
|
Returns true if any tape in the stack watches any of these tensors.
Only takes GradientTapes into account, not forward accumulators.
Args:
tensors: Tensors to check, typically inputs to an operation.
Returns:
Boolean, whether any tape watches any of `tensors`.
|
github-repos
|
def in_main_process():
return not _running_in_worker
|
Whether it's in the main test process.
This is normally used to prepare the test environment which should only happen
in the main process.
Returns:
A boolean.
|
github-repos
|
def render_template(cmd_derived_from_alias, pos_args_table):
try:
cmd_derived_from_alias = normalize_placeholders(cmd_derived_from_alias, inject_quotes=True)
template = jinja.Template(cmd_derived_from_alias)
rendered = shlex.split(template.render(pos_args_table))
if ('' in rendered):
check_runtime_errors(cmd_derived_from_alias, pos_args_table)
return rendered
except Exception as exception:
if isinstance(exception, CLIError):
raise
split_exception_message = str(exception).split()
error_index = split_exception_message[(- 1)]
if error_index.isdigit():
split_exception_message.insert((- 1), 'index')
error_msg = RENDER_TEMPLATE_ERROR.format(' '.join(split_exception_message), cmd_derived_from_alias)
error_msg += '\n{}^'.format((' ' * (((len(error_msg) - len(cmd_derived_from_alias)) + int(error_index)) - 1)))
else:
exception_str = str(exception).replace('"{{', '}}').replace('}}"', '}}')
error_msg = RENDER_TEMPLATE_ERROR.format(cmd_derived_from_alias, exception_str)
raise CLIError(error_msg)
|
Render cmd_derived_from_alias as a Jinja template with pos_args_table as the arguments.
Args:
cmd_derived_from_alias: The string to be injected with positional arguemnts.
pos_args_table: The dictionary used to rendered.
Returns:
A processed string with positional arguments injected.
|
codesearchnet
|
def generate_plaintext_random(plain_vocab, distribution, train_samples, length):
if (distribution is not None):
assert (len(distribution) == len(plain_vocab))
train_indices = np.random.choice(range(len(plain_vocab)), (train_samples, length), p=distribution)
return train_indices
|
Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
|
codesearchnet
|
def mutant_charts_for_feature(example_protos, feature_name, serving_bundles, viz_params):
def chart_for_index(index_to_mutate):
(mutant_features, mutant_examples) = make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params)
charts = []
for serving_bundle in serving_bundles:
inference_result_proto = run_inference(mutant_examples, serving_bundle)
charts.append(make_json_formatted_for_single_chart(mutant_features, inference_result_proto, index_to_mutate))
return charts
try:
original_feature = parse_original_feature_from_example(example_protos[0], feature_name)
except ValueError as e:
return {'chartType': 'categorical', 'data': []}
indices_to_mutate = (viz_params.feature_indices or range(original_feature.length))
chart_type = ('categorical' if (original_feature.feature_type == 'bytes_list') else 'numeric')
try:
return {'chartType': chart_type, 'data': [chart_for_index(index_to_mutate) for index_to_mutate in indices_to_mutate]}
except IndexError as e:
raise common_utils.InvalidUserInputError(e)
|
Returns JSON formatted for rendering all charts for a feature.
Args:
example_proto: The example protos to mutate.
feature_name: The string feature name to mutate.
serving_bundles: One `ServingBundle` object per model, that contains the
information to make the serving request.
viz_params: A `VizParams` object that contains the UI state of the request.
Raises:
InvalidUserInputError if `viz_params.feature_index_pattern` requests out of
range indices for `feature_name` within `example_proto`.
Returns:
A JSON-able dict for rendering a single mutant chart. parsed in
`tf-inference-dashboard.html`.
{
'chartType': 'numeric', # oneof('numeric', 'categorical')
'data': [A list of data] # parseable by vz-line-chart or vz-bar-chart
}
|
codesearchnet
|
def select_copula(cls, X):
frank = Bivariate(CopulaTypes.FRANK)
frank.fit(X)
if (frank.tau <= 0):
selected_theta = frank.theta
selected_copula = CopulaTypes.FRANK
return (selected_copula, selected_theta)
copula_candidates = [frank]
theta_candidates = [frank.theta]
try:
clayton = Bivariate(CopulaTypes.CLAYTON)
clayton.fit(X)
copula_candidates.append(clayton)
theta_candidates.append(clayton.theta)
except ValueError:
pass
try:
gumbel = Bivariate(CopulaTypes.GUMBEL)
gumbel.fit(X)
copula_candidates.append(gumbel)
theta_candidates.append(gumbel.theta)
except ValueError:
pass
(z_left, L, z_right, R) = cls.compute_empirical(X)
(left_dependence, right_dependence) = cls.get_dependencies(copula_candidates, z_left, z_right)
cost_L = [np.sum(((L - l) ** 2)) for l in left_dependence]
cost_R = [np.sum(((R - r) ** 2)) for r in right_dependence]
cost_LR = np.add(cost_L, cost_R)
selected_copula = np.argmax(cost_LR)
selected_theta = theta_candidates[selected_copula]
return (CopulaTypes(selected_copula), selected_theta)
|
Select best copula function based on likelihood.
Args:
X: 2-dimensional `np.ndarray`
Returns:
tuple: `tuple(CopulaType, float)` best fit and model param.
|
codesearchnet
|
def _get_subcommand(name):
_LOGGER.debug('Accessing subcommand "%s".', name)
if (name not in settings.subcommands):
raise ValueError('"{subcommand}" is not a {command} command. \'{command} help -a\' lists all available subcommands.'.format(command=settings.command, subcommand=name))
return settings.subcommands[name]
|
Return the function for the specified subcommand.
Args:
name: The name of a subcommand.
Returns:
The loadable object from the entry point represented by the subcommand.
|
codesearchnet
|
def word_to_vector_list(self, word, numeric=False, xsampa=False):
if xsampa:
word = self.xsampa.convert(word)
tensor = list(map(self.segment_to_vector, self.segs(word)))
if numeric:
return self.tensor_to_numeric(tensor)
else:
return tensor
|
Return a list of feature vectors, given a Unicode IPA word.
Args:
word (unicode): string in IPA
numeric (bool): if True, return features as numeric values instead
of strings
Returns:
list: a list of lists of '+'/'-'/'0' or 1/-1/0
|
codesearchnet
|
def le(self, other, axis="columns", level=None):
return self._binary_op("le", other, axis=axis, level=level)
|
Checks element-wise that this is less than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the le over.
level: The Multilevel index level to apply le over.
Returns:
A new DataFrame filled with Booleans.
|
juraj-google-style
|
def driver_for_path(path, drivers=None):
ext = (os.path.splitext(path)[1][1:] or path).lower()
drivers = drivers or ImageDriver.registry if ext else {}
for name, meta in drivers.items():
if ext == meta.get('DMD_EXTENSION', '').lower():
return ImageDriver(name)
return None
|
Returns the gdal.Driver for a path or None based on the file extension.
Arguments:
path -- file path as str with a GDAL supported file extension
|
juraj-google-style
|
def set_timestamp(cls, filename: str, response: HTTPResponse):
last_modified = response.fields.get('Last-Modified')
if (not last_modified):
return
try:
last_modified = email.utils.parsedate(last_modified)
except ValueError:
_logger.exception('Failed to parse date.')
return
last_modified = time.mktime(last_modified)
os.utime(filename, (time.time(), last_modified))
|
Set the Last-Modified timestamp onto the given file.
Args:
filename: The path of the file
response: Response
|
codesearchnet
|
def es_json(self, role='rdf_class', remove_empty=True, **kwargs):
def test_idx_status(cls_inst, **kwargs):
if kwargs.get("force") == True:
return False
idx_time = cls_inst.get("kds_esIndexTime", [None])[0]
mod_time = cls_inst.get("dcterm_modified", [None])[0]
error_msg = cls_inst.get("kds_esIndexError", [None])[0]
if (not idx_time) or \
error_msg or \
(idx_time and mod_time and idx_time < mod_time):
return False
return True
rtn_obj = {}
if kwargs.get("depth"):
kwargs['depth'] += 1
else:
kwargs['depth'] = 1
if role == 'rdf_class':
if test_idx_status(self, **kwargs):
return None
for prop, value in self.items():
if prop in ['kds_esIndexTime', 'kds_esIndexError']:
continue
new_val = value.es_json()
rtn_method = get_attr(self[prop], 'kds_esObjectType', [])
if 'kdr_Array' in rtn_method:
rtn_obj[prop] = new_val
elif (remove_empty and new_val) or not remove_empty:
if len(new_val) == 1:
rtn_obj[prop] = new_val[0]
else:
rtn_obj[prop] = new_val
nested_props = None
else:
try:
nested_props = self.es_defs.get('kds_esNestedProps',
list(self.keys())).copy()
except AttributeError:
nested_props = list(self.keys())
for prop, value in self.items():
if prop in ['kds_esIndexTime', 'kds_esIndexError']:
continue
new_val = value.es_json(**kwargs)
rtn_method = get_attr(self[prop], 'kds_esObjectType', [])
if 'kdr_Array' in rtn_method:
rtn_obj[prop] = new_val
elif (remove_empty and new_val) or not remove_empty:
if len(new_val) == 1:
rtn_obj[prop] = new_val[0] \
if not isinstance(new_val, dict) \
else new_val
else:
rtn_obj[prop] = new_val
rtn_obj = get_es_label(rtn_obj, self)
rtn_obj = get_es_value(rtn_obj, self)
rtn_obj = get_es_ids(rtn_obj, self)
if nested_props:
nested_props += ['value', 'id', 'uri']
rtn_obj = {key: value
for key, value in rtn_obj.items()
if key in nested_props}
rml_maps = self.get_all_rml(role=role)
if rml_maps:
rtn_obj['rml_map'] = rml_maps
return rtn_obj
|
Returns a JSON object of the class for insertion into es
args:
role: the role states how the class data should be returned
depending upon whether it is used as a subject of an object.
options are kds_esNested or rdf_class
remove_empty: True removes empty items from es object
|
juraj-google-style
|
def login(self, login_type, **kwargs):
content = {
"type": login_type
}
for key in kwargs:
if kwargs[key]:
content[key] = kwargs[key]
return self._send("POST", "/login", content)
|
Perform /login.
Args:
login_type (str): The value for the 'type' key.
**kwargs: Additional key/values to add to the JSON submitted.
|
juraj-google-style
|
def CrowdsaleRegister(self, wallet, register_addresses, from_addr=None):
invoke_args = [self.ScriptHash.ToString(), 'crowdsale_register',
[PromptUtils.parse_param(p, wallet) for p in register_addresses]]
tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True, from_addr)
return tx, fee, results
|
Register for a crowd sale.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
register_addresses (list): list of public addresses to register for the sale.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
|
juraj-google-style
|
def get_extended_surface_mesh(self, repeat=(5, 5, 1)):
surf_str = Structure.from_sites(self.surface_sites)
surf_str.make_supercell(repeat)
return surf_str
|
Gets an extended surface mesh for to use for adsorption
site finding by constructing supercell of surface sites
Args:
repeat (3-tuple): repeat for getting extended surface mesh
|
juraj-google-style
|
def _make_parser_func(sep):
def parser_func(filepath_or_buffer, sep=sep, delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=0, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, skipfooter=0, doublequote=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None):
(_, _, _, kwargs) = inspect.getargvalues(inspect.currentframe())
if (not kwargs.get('sep', sep)):
kwargs['sep'] = '\t'
return _read(**kwargs)
return parser_func
|
Creates a parser function from the given sep.
Args:
sep: The separator default to use for the parser.
Returns:
A function object.
|
codesearchnet
|
def parse(self, text, layers=None):
params = {'text': text, 'key': self.key}
if (layers is not None):
if isinstance(layers, six.string_types):
params['layers'] = layers
elif isinstance(layers, collections.Iterable):
params['layers'] = ','.join(layers)
req = requests.get(self.NLU_URL, params=params)
return req.json()
|
Parsing passed text to json.
Args:
text: Text to parse.
layers (optional): Special fields. Only one string
or iterable object (e.g "Data", ("Data", "Fio")).
Only these fields will be returned.
Returns:
The parsed text into a json object.
|
codesearchnet
|
def get_review(review_struct):
review_fn = _resource_context('review.rst')
with open(review_fn) as f:
review = f.read()
with NamedTemporaryFile(suffix='.png') as qr_file:
url = pyqrcode.create(review_struct.internal_url)
url.png(qr_file.name, scale=5)
qr_file.flush()
qr_file.seek(0)
review = Template(review).substitute(content=review_struct.get_rst(), datum=time.strftime('%d.%m.%Y', time.localtime()), cas=time.strftime('%H:%M', time.localtime()), resources_path=RES_PATH, qr_path=qr_file.name)
return gen_pdf(review, open(_resource_context('review_style.json')).read())
|
Generate review from `review_struct`.
Args:
review_struct (obj): :class:`.GenerateReview` instance.
Returns:
obj: StringIO file instance containing PDF file.
|
codesearchnet
|
def produce(self, X):
signal = X
window_length = len(self.window)
anomalies = np.zeros(len(signal))
window_weight = sum(self.window)
for i in range(0, len(signal) - window_length - 1):
rfft = np.fft.rfft(signal[i:i + window_length] * self.window)
sig_freq = np.abs(rfft) / window_weight
anomalies[i] = 0
for m in range(0, int(window_length / 2) - 1):
if ((sig_freq[m] > self.mask_top[m]) or (sig_freq[m] < self.mask_bottom[m])):
anomalies[i] = 1
break
return anomalies
|
Detects anomalies in telemetry data based on its power spectral density
Args:
X: Telemetry data
Returns:
anomalies: Data vector consisting of the anomalies detected in the telemetry data
|
juraj-google-style
|
def align_long_axis(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = (size['height'], size['width'])
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.LAST:
rot_axes = (0, 1)
elif input_data_format == ChannelDimension.FIRST:
rot_axes = (1, 2)
else:
raise ValueError(f'Unsupported data format: {input_data_format}')
if output_width < output_height and input_width > input_height or (output_width > output_height and input_width < input_height):
image = np.rot90(image, 3, axes=rot_axes)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
|
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`np.ndarray`):
The image to be aligned.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The aligned image.
|
github-repos
|
def get_class_weights(y, smooth_factor=0):
from collections import Counter
counter = Counter(y)
if smooth_factor > 0:
p = max(counter.values()) * smooth_factor
for k in counter.keys():
counter[k] += p
majority = max(counter.values())
return {cls: float(majority / count) for cls, count in counter.items()}
|
Returns the weights for each class based on the frequencies of the samples.
Args:
y: A list of true labels (the labels must be hashable).
smooth_factor: A factor that smooths extremely uneven weights.
Returns:
A dictionary with the weight for each class.
|
juraj-google-style
|
def send_peers(self, connection_id):
with self._lock:
peer_endpoints = list(self._peers.values())
if self._endpoint:
peer_endpoints.append(self._endpoint)
peers_response = GetPeersResponse(peer_endpoints=peer_endpoints)
try:
self._network.send(validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, peers_response.SerializeToString(), connection_id, one_way=True)
except ValueError:
LOGGER.debug('Connection disconnected: %s', connection_id)
|
Sends a message containing our peers to the
connection identified by connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
|
codesearchnet
|
def __init__(self, url):
self._url = url
self._last_progress_msg_print_time = time.time()
self._total_bytes_downloaded = 0
self._max_prog_str = 0
|
Creates DownloadManager responsible for downloading a TF-Hub module.
Args:
url: URL pointing to the TF-Hub module to download and extract.
|
juraj-google-style
|
def hstack(xs):
if any_symbolic_tensors((xs,)):
return Hstack().symbolic_call(xs)
return backend.numpy.hstack(xs)
|
Stack tensors in sequence horizontally (column wise).
This is equivalent to concatenation along the first axis for 1-D tensors,
and along the second axis for all other tensors.
Args:
xs: Sequence of tensors.
Returns:
The tensor formed by stacking the given tensors.
|
github-repos
|
def ParseCallsRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
call_type = self._GetRowValue(query_hash, row, 'type')
call_type = self.CALL_TYPE.get(call_type, 'UNKNOWN')
duration = self._GetRowValue(query_hash, row, 'duration')
timestamp = self._GetRowValue(query_hash, row, 'date')
event_data = AndroidCallEventData()
event_data.call_type = call_type
event_data.duration = self._GetRowValue(query_hash, row, 'duration')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.number = self._GetRowValue(query_hash, row, 'number')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Started')
parser_mediator.ProduceEventWithEventData(event, event_data)
if duration:
if isinstance(duration, py2to3.STRING_TYPES):
try:
duration = int(duration, 10)
except ValueError:
duration = 0
timestamp += (duration * 1000)
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Ended')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a Call record row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
codesearchnet
|
def expand_abbreviations(self, text):
if (not self.abbreviations):
raise LexiconError('No abbreviations in lexicon.')
def chunks(data, SIZE=25):
'\n Regex only supports 100 groups for munging callbacks. So we have to\n chunk the abbreviation dicitonary.\n '
it = iter(data)
for i in range(0, len(data), SIZE):
(yield {k: data[k] for k in islice(it, SIZE)})
def cb(g):
'Regex callback'
return (self.abbreviations.get(g.group(0)) or g.group(0))
text = re.sub('w/', 'wi', text)
for subdict in chunks(self.abbreviations):
regex = (('(\\b' + '\\b)|(\\b'.join(subdict.keys())) + '\\b)')
text = re.sub(regex, cb, text)
return text
|
Parse a piece of text and replace any abbreviations with their full
word equivalents. Uses the lexicon.abbreviations dictionary to find
abbreviations.
Args:
text (str): The text to parse.
Returns:
str: The text with abbreviations replaced.
|
codesearchnet
|
def range(self, location, distance):
return (segment.range(location, distance) for segment in self)
|
Test whether locations are within a given range of ``location``.
Args:
location (Point): Location to test range against
distance (float): Distance to test location is within
Returns:
list of list of Point: Groups of points in range per segment
|
codesearchnet
|
def server_def(self):
return self._server_def
|
Returns the `tf.train.ServerDef` for this server.
Returns:
A `tf.train.ServerDef` protocol buffer that describes the configuration
of this server.
|
github-repos
|
def take_profit(self, accountID, **kwargs):
return self.create(accountID, order=TakeProfitOrderRequest(**kwargs))
|
Shortcut to create a Take Profit Order in an Account
Args:
accountID : The ID of the Account
kwargs : The arguments to create a TakeProfitOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request
|
codesearchnet
|
def convert_shapes(input_shape, to_tuples=True):
def _is_shape_component(value):
return value is None or isinstance(value, (int, tensor_shape.Dimension))
def _is_atomic_shape(input_shape):
if _is_shape_component(input_shape):
return True
if isinstance(input_shape, tensor_shape.TensorShape):
return True
if isinstance(input_shape, (tuple, list)) and all((_is_shape_component(ele) for ele in input_shape)):
return True
return False
def _convert_shape(input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if to_tuples:
input_shape = tuple(input_shape.as_list())
return input_shape
return map_structure_with_atomic(_is_atomic_shape, _convert_shape, input_shape)
|
Converts nested shape representations to desired format.
Performs:
TensorShapes -> tuples if `to_tuples=True`.
tuples of int or None -> TensorShapes if `to_tuples=False`.
Valid objects to be converted are:
- TensorShapes
- tuples with elements of type int or None.
- ints
- None
Args:
input_shape: A nested structure of objects to be converted to TensorShapes.
to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts
all tuples representing shapes to TensorShapes.
Returns:
Nested structure of shapes in desired format.
Raises:
ValueError: when the input tensor shape can't be converted to tuples, eg
unknown tensor shape.
|
github-repos
|
def date2datestr(date, fmt='yyyymmdd'):
if '-' in fmt:
if not fmt.index('d') < fmt.index('m') < fmt.index('y'):
raise ValueError('Invalid format string. {}'.format(
VALID_DATE_FORMATS_TEXT))
d, m, y = fmt.split('-')
elif '/' in fmt:
if not fmt.index('m') < fmt.index('d') < fmt.index('y'):
raise ValueError('Invalid format string. {}'.format(
VALID_DATE_FORMATS_TEXT))
m, d, y = fmt.split('/')
elif any(c not in 'dmy' for c in fmt):
raise ValueError('Invalid character in format string. {}'.format(
VALID_DATE_FORMATS_TEXT))
else:
if not fmt.index('y') < fmt.index('m') < fmt.index('d'):
raise ValueError('Invalid format string. {}'.format(
VALID_DATE_FORMATS_TEXT))
y, m, d = fmt[:-4], fmt[-4:-2], fmt[-2:]
for string, char in ((d, 'd'), (m, 'm'), (y, 'y')):
if any(c != char for c in string):
raise ValueError('Invalid date format: {} is not {}'.\
format(char, string))
if len(y) == 4:
fmt = fmt.replace('yyyy', '%Y', 1)
elif len(y) == 2:
fmt = fmt.replace('yy', '%y', 1)
else:
raise ValueError('Invalid format string, year must have 2 or 4 digits')
if len(m) == 2:
fmt = fmt.replace('mm', '%m', 1)
elif len(m) == 1:
fmt = fmt.replace('m', 'X%m', 1)
else:
raise ValueError('Invalid format string, month must have 1 or 2 digits')
if len(d) == 2:
fmt = fmt.replace('dd', '%d', 1)
elif len(d) == 1:
fmt = fmt.replace('d', 'X%d', 1)
else:
raise ValueError('Invalid format string, day must have 1 or 2 digits')
return date.strftime(fmt).replace('X0','X').replace('X','')
|
Turns a datetime.date object into a string. The string must have one of the
formats from VALID_DATE_FORMATS_TEXT to make it compatible with
datestr2date.
Args:
date (datetime.date) the date to be translated
fmt (str) a format string.
Returns:
(str) that represents a date.
Raises:
ValueError if the format is not valid.
|
juraj-google-style
|
def dump_begin(self, selector_id):
if self.dump_walker is not None:
self.storage.destroy_walker(self.dump_walker)
selector = DataStreamSelector.FromEncoded(selector_id)
self.dump_walker = self.storage.create_walker(selector, skip_all=False)
return Error.NO_ERROR, Error.NO_ERROR, self.dump_walker.count()
|
Start dumping a stream.
Args:
selector_id (int): The buffered stream we want to dump.
Returns:
(int, int, int): Error code, second error code, number of available readings
|
juraj-google-style
|
def remove_team_member(self, account_id=None, email_address=None):
return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id)
|
Remove a user from your Team
Args:
account_id (str): The id of the account of the user to remove from your team.
email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
|
juraj-google-style
|
def resolve_path(path, config_file):
if os.path.isabs(path):
return path
return os.path.relpath(path, os.path.dirname(config_file))
|
Resolve path relative to config file location.
Args:
path: Path to be resolved.
config_file: Path to config file, which `path` is specified
relative to.
Returns:
Path relative to the `config_file` location. If `path` is an
absolute path then it will be returned without change.
|
codesearchnet
|
def gaussian_noise(x, severity=1):
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip)
|
Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
|
juraj-google-style
|
def get_crystal_field_spin(self, coordination: str='oct', spin_config: str='high'):
if ((coordination not in ('oct', 'tet')) or (spin_config not in ('high', 'low'))):
raise ValueError('Invalid coordination or spin config.')
elec = self.full_electronic_structure
if ((len(elec) < 4) or (elec[(- 1)][1] != 's') or (elec[(- 2)][1] != 'd')):
raise AttributeError('Invalid element {} for crystal field calculation.'.format(self.symbol))
nelectrons = ((elec[(- 1)][2] + elec[(- 2)][2]) - self.oxi_state)
if ((nelectrons < 0) or (nelectrons > 10)):
raise AttributeError('Invalid oxidation state {} for element {}'.format(self.oxi_state, self.symbol))
if (spin_config == 'high'):
return (nelectrons if (nelectrons <= 5) else (10 - nelectrons))
elif (spin_config == 'low'):
if (coordination == 'oct'):
if (nelectrons <= 3):
return nelectrons
elif (nelectrons <= 6):
return (6 - nelectrons)
elif (nelectrons <= 8):
return (nelectrons - 6)
else:
return (10 - nelectrons)
elif (coordination == 'tet'):
if (nelectrons <= 2):
return nelectrons
elif (nelectrons <= 4):
return (4 - nelectrons)
elif (nelectrons <= 7):
return (nelectrons - 4)
else:
return (10 - nelectrons)
|
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
|
codesearchnet
|
def __init__(self, options=None, **kwargs):
try:
env = MeCabEnv(**kwargs)
self.__ffi = _ffi_libmecab()
self.__mecab = self.__ffi.dlopen(env.libpath)
self.libpath = env.libpath
self.__bytes2str, self.__str2bytes = string_support(env.charset)
self.__split_pattern, self.__split_features = splitter_support(env.charset)
op = OptionParse(env.charset)
self.options = op.parse_mecab_options(options)
ostr = op.build_options_str(self.options)
self.model = self.__mecab.mecab_model_new2(ostr)
if self.model == self.__ffi.NULL:
logger.error(self._ERROR_NULLPTR.format('Model'))
raise MeCabError(self._ERROR_NULLPTR.format('Model'))
self.tagger = self.__mecab.mecab_model_new_tagger(self.model)
if self.tagger == self.__ffi.NULL:
logger.error(self._ERROR_NULLPTR.format('Tagger'))
raise MeCabError(self._ERROR_NULLPTR.format('Tagger'))
self.lattice = self.__mecab.mecab_model_new_lattice(self.model)
if self.lattice == self.__ffi.NULL:
logger.error(self._ERROR_NULLPTR.format('Lattice'))
raise MeCabError(self._ERROR_NULLPTR.format('Lattice'))
n = self.options.get('nbest', 1)
if n > 1:
req_type = self.MECAB_LATTICE_NBEST
else:
req_type = self.MECAB_LATTICE_ONE_BEST
self.__mecab.mecab_lattice_set_request_type(self.lattice, req_type)
if 'partial' in self.options:
self.__mecab.mecab_lattice_add_request_type(
self.lattice, self.MECAB_LATTICE_PARTIAL)
if 'marginal' in self.options:
self.__mecab.mecab_lattice_add_request_type(
self.lattice, self.MECAB_LATTICE_MARGINAL_PROB)
if 'all_morphs' in self.options:
self.__mecab.mecab_lattice_add_request_type(
self.lattice, self.MECAB_LATTICE_ALL_MORPHS)
if 'allocate_sentence' in self.options:
self.__mecab.mecab_lattice_add_request_type(
self.lattice, self.MECAB_LATTICE_ALLOCATE_SENTENCE)
self.dicts = []
dptr = self.__mecab.mecab_model_dictionary_info(self.model)
while dptr != self.__ffi.NULL:
fpath = self.__bytes2str(self.__ffi.string(dptr.filename))
fpath = os.path.abspath(fpath)
chset = self.__bytes2str(self.__ffi.string(dptr.charset))
self.dicts.append(DictionaryInfo(dptr, fpath, chset))
dptr = getattr(dptr, 'next')
self.__enc = self.dicts[0].charset
self.version = self.__bytes2str(
self.__ffi.string(self.__mecab.mecab_version()))
except EnvironmentError as err:
logger.error(self._ERROR_INIT.format(str(err)))
raise MeCabError(err)
except ValueError as verr:
logger.error(self._ERROR_INIT.format(str(verr)))
raise MeCabError(self._ERROR_INIT.format(str(verr)))
|
Initializes the MeCab instance with the given options.
Args:
options: Optional string or dictionary of the MeCab options to be
used.
Kwargs:
debug (bool): Flag for outputting debug messages to stderr.
Raises:
SystemExit: An unrecognized option was passed in.
MeCabError: An error occurred in locating the MeCab library;
or the FFI handle to MeCab could not be created.
|
juraj-google-style
|
def run_commands(commands, settings):
sprint = settings["sprint"]
quiet = settings["quiet"]
error = settings["error"]
enhanced_errors = True
the_shell = None
if settings["no_enhanced_errors"]:
enhanced_errors = False
if "shell" in settings:
the_shell = settings["shell"]
windows_p = sys.platform == "win32"
STDOUT = None
STDERR = None
if quiet:
STDOUT = PIPE
STDERR = PIPE
commands = commands.rstrip()
sprint("About to run commands '{}'".format(commands), level="verbose")
if not quiet:
sprint(commands)
if the_shell:
tmp = shlex.split(the_shell)
the_shell = tmp[0]
tmp = tmp[1:]
if enhanced_errors and not windows_p:
tmp.append("-e")
tmp.append(commands)
commands = tmp
else:
if enhanced_errors and not windows_p:
commands = ["-e", commands]
p = Popen(commands, shell=True, stdout=STDOUT, stderr=STDERR,
executable=the_shell)
out, err = p.communicate()
if p.returncode:
if quiet:
error(err.decode(locale.getpreferredencoding()))
error("Command failed to run")
sys.exit(1)
|
Runs the commands supplied as an argument
It will exit the program if the commands return a
non-zero code
Args:
the commands to run
The settings dictionary
|
juraj-google-style
|
def __init__(self, metric_name, metric_methods, label_length, *args):
self._metric_name = metric_name
self._metric_methods = metric_methods
self._label_length = label_length
if label_length >= len(self._metric_methods):
raise ValueError('Cannot create {} metric with label >= {}'.format(self._metric_name, len(self._metric_methods)))
self._metric = self._metric_methods[self._label_length].create(*args)
|
Creates a new metric.
Args:
metric_name: name of the metric class.
metric_methods: list of swig metric methods.
label_length: length of label args.
*args: the arguments to call create method.
|
github-repos
|
def _ParseTriggerStartTime(self, parser_mediator, trigger):
time_elements_tuple = (
trigger.start_date.year, trigger.start_date.month,
trigger.start_date.day_of_month, trigger.start_time.hours,
trigger.start_time.minutes, 0)
date_time = None
if time_elements_tuple != (0, 0, 0, 0, 0, 0):
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid trigger start time: {0!s}'.format(time_elements_tuple))
return date_time
|
Parses the start time from a trigger.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
trigger (job_trigger): a trigger.
Returns:
dfdatetime.DateTimeValues: last run date and time or None if not
available.
|
juraj-google-style
|
def put(self, credentials):
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
|
Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
|
juraj-google-style
|
def observe(self, success, failure):
if isinstance(success, int) is False:
if isinstance(success, float) is False:
raise TypeError()
if isinstance(failure, int) is False:
if isinstance(failure, float) is False:
raise TypeError()
if success <= 0:
raise ValueError()
if failure <= 0:
raise ValueError()
self.__success += success
self.__failure += failure
|
Observation data.
Args:
success: The number of success.
failure: The number of failure.
|
juraj-google-style
|
def save_image(imager, grid_data, grid_norm, output_file):
imager.finalise_plane(grid_data, grid_norm)
grid_data = numpy.real(grid_data)
border = (imager.plane_size - imager.image_size)
if border > 0:
end = border + imager.image_size
grid_data = grid_data[border:end, border:end]
hdr = fits.header.Header()
fits.writeto(output_file, grid_data, hdr, clobber=True)
|
Makes an image from gridded visibilities and saves it to a FITS file.
Args:
imager (oskar.Imager): Handle to configured imager.
grid_data (numpy.ndarray): Final visibility grid.
grid_norm (float): Grid normalisation to apply.
output_file (str): Name of output FITS file to write.
|
juraj-google-style
|
def start(host, port, profiler_stats, dont_start_browser, debug_mode):
stats_handler = functools.partial(StatsHandler, profiler_stats)
if (not debug_mode):
sys.stderr = open(os.devnull, 'w')
print('Starting HTTP server...')
if (not dont_start_browser):
webbrowser.open('http:
try:
StatsServer((host, port), stats_handler).serve_forever()
except KeyboardInterrupt:
print('Stopping...')
sys.exit(0)
|
Starts HTTP server with specified parameters.
Args:
host: Server host name.
port: Server port.
profiler_stats: A dict with collected program stats.
dont_start_browser: Whether to open browser after profiling.
debug_mode: Whether to redirect stderr to /dev/null.
|
codesearchnet
|
def get_current_remat_mode():
remat_scope_stack = global_state.get_global_attribute('remat_scope_stack')
if not remat_scope_stack:
return None
active_scope = remat_scope_stack[-1]
return RematMode(active_scope.mode, active_scope.output_size_threshold, active_scope.layer_names)
|
Get the current rematerialization mode and associated settings.
Returns:
RematMode or None: The current rematerialization mode, or None if not
set.
|
github-repos
|
def _ExpandUsersVariablePathSegments(
cls, path_segments, path_separator, user_accounts):
if not path_segments:
return []
path_segments_lower = [
path_segment.lower() for path_segment in path_segments]
if path_segments_lower[0] in ('%%users.homedir%%', '%%users.userprofile%%'):
return cls._ExpandUsersHomeDirectoryPathSegments(
path_segments, path_separator, user_accounts)
path_expansions = cls._PATH_EXPANSIONS_PER_USERS_VARIABLE.get(
path_segments[0], None)
if path_expansions:
expanded_paths = []
for path_expansion in path_expansions:
expanded_path_segments = list(path_expansion)
expanded_path_segments.extend(path_segments[1:])
paths = cls._ExpandUsersVariablePathSegments(
expanded_path_segments, path_separator, user_accounts)
expanded_paths.extend(paths)
return expanded_paths
if cls._IsWindowsDrivePathSegment(path_segments[0]):
path_segments[0] = ''
path = path_separator.join(path_segments)
return [path]
|
Expands path segments with a users variable, e.g. %%users.homedir%%.
Args:
path_segments (list[str]): path segments.
path_separator (str): path segment separator.
user_accounts (list[UserAccountArtifact]): user accounts.
Returns:
list[str]: paths for which the users variables have been expanded.
|
juraj-google-style
|
def send(self, conn):
if (conn is None):
raise ValueError('Cannot send to connection None')
with (yield conn.write_lock.acquire()):
sent = 0
(yield conn.write_message(self.header_json, locked=False))
sent += len(self.header_json)
(yield conn.write_message(self.metadata_json, locked=False))
sent += len(self.metadata_json)
(yield conn.write_message(self.content_json, locked=False))
sent += len(self.content_json)
sent += (yield self.write_buffers(conn, locked=False))
raise gen.Return(sent)
|
Send the message on the given connection.
Args:
conn (WebSocketHandler) : a WebSocketHandler to send messages
Returns:
int : number of bytes sent
|
codesearchnet
|
def process_buffers_for_display(s, limit=40):
if isinstance(s, (list, tuple)):
return [process_buffers_for_display(elem, limit=limit) for elem in s]
else:
length = len(s)
if (length > limit):
return (binascii.b2a_qp(s[:limit]) + (b' (length-%d truncated at %d bytes)' % (length, limit)))
else:
return binascii.b2a_qp(s)
|
Process a buffer for human-readable display.
This function performs the following operation on each of the buffers in `s`.
1. Truncate input buffer if the length of the buffer is greater than
`limit`, to prevent large strings from overloading the frontend.
2. Apply `binascii.b2a_qp` on the truncated buffer to make the buffer
printable and convertible to JSON.
3. If truncation happened (in step 1), append a string at the end
describing the original length and the truncation.
Args:
s: The buffer to be processed, either a single buffer or a nested array of
them.
limit: Length limit for each buffer, beyond which truncation will occur.
Return:
A single processed buffer or a nested array of processed buffers.
|
codesearchnet
|
def get_default(__func: Callable, __arg: str) -> str:
return signature(__func).parameters[__arg].default
|
Fetch default value for a function argument
Args:
__func: Function to inspect
__arg: Argument to extract default value for
|
codesearchnet
|
def orient_undirected_graph(self, data, graph, **kwargs):
self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test]
self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep]
self.arguments['{DIRECTED}'] = 'TRUE'
self.arguments['{ALPHA}'] = str(self.alpha)
self.arguments['{NJOBS}'] = str(self.nb_jobs)
self.arguments['{VERBOSE}'] = str(self.verbose).upper()
fe = DataFrame(nx.adj_matrix(graph, weight=None).todense())
fg = DataFrame(1 - fe.values)
results = self._run_pc(data, fixedEdges=fe, fixedGaps=fg, verbose=self.verbose)
return nx.relabel_nodes(nx.DiGraph(results),
{idx: i for idx, i in enumerate(data.columns)})
|
Run PC on an undirected graph.
Args:
data (pandas.DataFrame): DataFrame containing the data
graph (networkx.Graph): Skeleton of the graph to orient
Returns:
networkx.DiGraph: Solution given by PC on the given skeleton.
|
juraj-google-style
|
def symbolic_master_equation(self, rho=None):
(L, H) = (self.L, self.H)
if (rho is None):
rho = OperatorSymbol('rho', hs=self.space)
return (((- I) * ((H * rho) - (rho * H))) + sum(((((Lk * rho) * adjoint(Lk)) - ((((adjoint(Lk) * Lk) * rho) + ((rho * adjoint(Lk)) * Lk)) / 2)) for Lk in L.matrix.ravel())))
|
Compute the symbolic Liouvillian acting on a state rho
If no rho is given, an OperatorSymbol is created in its place.
This correspnds to the RHS of the master equation
in which an average is taken over the external noise degrees of
freedom.
Args:
rho (Operator): A symbolic density matrix operator
Returns:
Operator: The RHS of the master equation.
|
codesearchnet
|
def GetFileEntryByPathSpec(self, path_spec):
return bde_file_entry.BDEFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
BDEFileEntry: file entry or None.
|
juraj-google-style
|
def show_abierrors(self, nids=None, stream=sys.stdout):
lines = []
app = lines.append
for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):
header = (('=== ' + task.qout_file.path) + '===')
app(header)
report = task.get_event_report()
if (report is not None):
app(('num_errors: %s, num_warnings: %s, num_comments: %s' % (report.num_errors, report.num_warnings, report.num_comments)))
app('*** ERRORS ***')
app('\n'.join((str(e) for e in report.errors)))
app('*** BUGS ***')
app('\n'.join((str(b) for b in report.bugs)))
else:
app('get_envent_report returned None!')
app((('=' * len(header)) + (2 * '\n')))
return stream.writelines(lines)
|
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
|
codesearchnet
|
def remove_api_key(self):
url = (self.record_url + '/remove_api_key')
res = requests.patch(url=url, headers=HEADERS, verify=False)
res.raise_for_status()
self.api_key = ''
|
Removes the user's existing API key, if present, and sets the current instance's 'api_key'
attribute to the empty string.
Returns:
`NoneType`: None.
|
codesearchnet
|
def dbname(self, value):
self._dbname = value
self._connectionXML.set('dbname', value)
|
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
|
juraj-google-style
|
def add_layout(self, obj, place='center'):
valid_places = ['left', 'right', 'above', 'below', 'center']
if place not in valid_places:
raise ValueError(
"Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places))
)
getattr(self, place).append(obj)
|
Adds an object to the plot in a specified place.
Args:
obj (Renderer) : the object to add to the Plot
place (str, optional) : where to add the object (default: 'center')
Valid places are: 'left', 'right', 'above', 'below', 'center'.
Returns:
None
|
juraj-google-style
|
def run_shell_command(state, host, command, get_pty=False, timeout=None, print_output=False, **command_kwargs):
command = make_command(command, **command_kwargs)
logger.debug('Running command on {0}: (pty={1}) {2}'.format(host.name, get_pty, command))
if print_output:
print('{0}>>> {1}'.format(host.print_prefix, command))
(_, stdout_buffer, stderr_buffer) = host.connection.exec_command(command, get_pty=get_pty)
channel = stdout_buffer.channel
stdout_reader = gevent.spawn(read_buffer, stdout_buffer, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, line)))
stderr_reader = gevent.spawn(read_buffer, stderr_buffer, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, click.style(line, 'red'))))
greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)
if (len(greenlets) != 2):
stdout_reader.kill()
stderr_reader.kill()
raise timeout_error()
stdout = stdout_reader.get()
stderr = stderr_reader.get()
logger.debug('Waiting for exit status...')
exit_status = channel.recv_exit_status()
logger.debug('Command exit status: {0}'.format(exit_status))
return ((exit_status == 0), stdout, stderr)
|
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
|
codesearchnet
|
def create_mapping(record, keys):
ordered = OrderedDict()
field_mappings = []
for key, value in record.items():
ordered[key] = value
field_mappings.append({
'columnNumber': len(ordered),
'fieldName': key,
'key': key in keys,
})
return {
'field_mappings': field_mappings,
'data': ordered,
'fields': list(ordered.values()),
}
|
Create a field mapping for use in API updates and creates.
Args:
record (BaseModel): Record that should be mapped.
keys (list[str]): Fields that should be mapped as keys.
Returns:
dict: Dictionary with keys:
* ``field_mappings``: Field mappings as required by API.
* ``data``: Ordered data dictionary for input record.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.