code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def set_optimizer_experimental_options(self, options):
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
|
Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
|
github-repos
|
def update(self, span: typing.Tuple[(int, int)], line_type: LineType) -> None:
(first_block_line, last_block_line) = span
for i in range(first_block_line, (last_block_line + 1)):
try:
self.__setitem__(i, line_type)
except ValueError as error:
raise ValidationError((i + self.fn_offset), 1, 'AAA99 {}'.format(error))
|
Updates line types for a block's span.
Args:
span: First and last relative line number of a Block.
line_type: The type of line to update to.
Raises:
ValidationError: A special error on collision. This prevents Flake8
from crashing because it is converted to a Flake8 error tuple,
but it indicates to the user that something went wrong with
processing the function.
|
codesearchnet
|
def torus(script, major_radius=3.0, minor_radius=1.0, inner_diameter=None, outer_diameter=None, major_segments=48, minor_segments=12, color=None):
if ((inner_diameter is not None) and (outer_diameter is not None)):
major_radius = ((inner_diameter + outer_diameter) / 4)
minor_radius = (major_radius - (inner_diameter / 2))
filter_xml = ''.join([' <filter name="Torus">\n', ' <Param name="hRadius" ', ('value="%s" ' % major_radius), 'description="Horizontal Radius" ', 'type="RichFloat" ', '/>\n', ' <Param name="vRadius" ', ('value="%s" ' % minor_radius), 'description="Vertical Radius" ', 'type="RichFloat" ', '/>\n', ' <Param name="hSubdiv" ', ('value="%d" ' % major_segments), 'description="Horizontal Subdivision" ', 'type="RichInt" ', '/>\n', ' <Param name="vSubdiv" ', ('value="%d" ' % minor_segments), 'description="Vertical Subdivision" ', 'type="RichInt" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Torus', change_layer=True)
if (color is not None):
vert_color.function(script, color=color)
return None
|
Create a torus mesh
Args:
major_radius (float, (optional)): radius from the origin to the
center of the cross sections
minor_radius (float, (optional)): radius of the torus cross
section
inner_diameter (float, (optional)): inner diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.,
outer_diameter (float, (optional)): outer diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.
major_segments (int (optional)): number of segments for the main
ring of the torus
minor_segments (int (optional)): number of segments for the minor
ring of the torus
color (str (optional)): color name to apply vertex colors to the
newly created mesh
Returns:
None
|
codesearchnet
|
def validate(tmpl, path):
pattern = (_generate_pattern_for_template(tmpl) + '$')
return (True if (re.match(pattern, path) is not None) else False)
|
Validate a path against the path template.
.. code-block:: python
>>> validate('users/*/messages/*', 'users/me/messages/123')
True
>>> validate('users/*/messages/*', 'users/me/drafts/123')
False
>>> validate('/v1/{name=shelves/*/books/*}', /v1/shelves/1/books/3)
True
>>> validate('/v1/{name=shelves/*/books/*}', /v1/shelves/1/tapes/3)
False
Args:
tmpl (str): The path template.
path (str): The expanded path.
Returns:
bool: True if the path matches.
|
codesearchnet
|
def format_image_annotations_as_coco(image_id: str, categories: list[int], areas: list[float], bboxes: list[tuple[float]]) -> dict:
annotations = []
for category, area, bbox in zip(categories, areas, bboxes):
formatted_annotation = {'image_id': image_id, 'category_id': category, 'iscrowd': 0, 'area': area, 'bbox': list(bbox)}
annotations.append(formatted_annotation)
return {'image_id': image_id, 'annotations': annotations}
|
Format one set of image annotations to the COCO format
Args:
image_id (str): image id. e.g. "0001"
categories (List[int]): list of categories/class labels corresponding to provided bounding boxes
areas (List[float]): list of corresponding areas to provided bounding boxes
bboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format
([center_x, center_y, width, height] in absolute coordinates)
Returns:
dict: {
"image_id": image id,
"annotations": list of formatted annotations
}
|
github-repos
|
def wait(animation='elipses', text='', speed=0.2):
def decorator(func):
func.animation = animation
func.speed = speed
func.text = text
@wraps(func)
def wrapper(*args, **kwargs):
animation = func.animation
text = func.text
if ((not isinstance(animation, (list, tuple))) and (not hasattr(animations, animation))):
text = (animation if (text == '') else text)
animation = 'elipses'
wait = Wait(animation=animation, text=text, speed=func.speed)
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper
return decorator
|
Decorator for adding wait animation to long running
functions.
Args:
animation (str, tuple): String reference to animation or tuple
with custom animation.
speed (float): Number of seconds each cycle of animation.
Examples:
>>> @animation.wait('bar')
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
|
codesearchnet
|
def clear(self, name=None):
if name is None:
name = '%s_clear' % self._name
return gen_data_flow_ops.stage_clear(name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)
|
Clears the staging area.
Args:
name: A name for the operation (optional)
Returns:
The created op
|
github-repos
|
def asserts_truth(func):
if re.match('_[^_]', func.__name__):
raise AttributeError('@asserts_truth may not be applied to methods beginning with "_".')
def AssertThat(*args, **kwargs):
try:
return func(*args, **kwargs)
except TruthAssertionError as truth_assertion:
if hasattr(truth_assertion, 'with_traceback'):
truth_assertion.with_traceback(None)
raise truth_assertion
raise
return AssertThat
|
Decorator for every public method that might raise TruthAssertionError.
Args:
func: the function to be decorated.
Returns:
The decorated function. In Python 2, the function behaves identically.
Otherwise, if that function raises a TruthAssertionError, then that error
is re-raised with a modified, minimal traceback.
Raises:
AttributeError: if attempted to be applied to a method whose name begins
with a single '_'. This decorator's purpose is to reduce the traceback
depth of exceptions raised by nested calls in this library, so that the
failing assertion has only two frames: the original AssertThat() call,
and the "raise truth_assertion" in the decorated function.
Annotating inner method calls is contrary to that goal.
|
github-repos
|
def _import_module_by_name(self, module_name) -> _AST | None:
existing = self._modules.get_existing_ast(module_name)
if existing:
return existing
assert path_utils.sep not in module_name, (path_utils.sep, module_name)
log.debug('Trying to import %r', module_name)
mod = self._load_builtin('builtins', module_name)
if mod:
return mod
mod_ast = None
default = None
mod_info = self._module_loader.find_import(module_name)
if mod_info:
if mod_info.file_exists:
mod_ast = self.load_module(mod_info)
assert mod_ast is not None, mod_info.filename
else:
mod_ast = self._create_empty(mod_info)
if mod_info.is_default_pyi():
default = self._modules.get(module_name)
del self._modules[module_name]
elif module_name in _ALWAYS_PREFER_TYPESHED:
del self._modules[module_name]
else:
return mod_ast
mod = self._load_builtin('stdlib', module_name)
if mod:
return mod
mod = self._load_builtin('third_party', module_name)
if mod:
return mod
if mod_ast:
assert default
self._modules[module_name] = default
return mod_ast
return None
|
Load a name like 'sys' or 'foo.bar.baz'.
Args:
module_name: The name of the module. May contain dots.
Returns:
The parsed file, instance of pytd.TypeDeclUnit, or None if we
the module wasn't found.
|
github-repos
|
def CreateClass(cls, data_type_definition):
cls._ValidateDataTypeDefinition(data_type_definition)
class_definition = cls._CreateClassTemplate(data_type_definition)
namespace = {
'__builtins__' : {
'object': builtins.object,
'super': builtins.super},
'__name__': '{0:s}'.format(data_type_definition.name)}
if sys.version_info[0] >= 3:
namespace['__builtins__']['__build_class__'] = builtins.__build_class__
exec(class_definition, namespace)
return namespace[data_type_definition.name]
|
Creates a new structure values class.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
class: structure values class.
|
juraj-google-style
|
def parse(source):
if isinstance(source, str):
return parse_stream(six.StringIO(source))
else:
return parse_stream(source)
|
Parses source code returns an array of instructions suitable for
optimization and execution by a Machine.
Args:
source: A string or stream containing source code.
|
juraj-google-style
|
def load_genes(path):
with open(path, 'rt') as f:
lines = [ x.split('\t')[:2] for x in f if not x.startswith('hgnc') ]
transcripts = {}
for symbol, tx in lines:
if symbol not in transcripts:
transcripts[symbol] = []
transcripts[symbol].append(tx)
return transcripts
|
load a file listing gene and transcript IDs
Args:
path: path to file containing gene IDs and transcript IDs e.g.
gene_1 transcript_1.1 length_1 denovo_count
gene_2 transcript_2.1 length_3 denovo_count
Returns:
dict of transcripts eg {'CTC1': ["ENST00000315684", "ENST00000485511"]}
|
juraj-google-style
|
def delete(self, customer_id, token_id, data={}, **kwargs):
url = "{}/{}/tokens/{}".format(self.base_url, customer_id, token_id)
return self.delete_url(url, data, **kwargs)
|
Delete Given Token For a Customer
Args:
customer_id : Customer Id for which tokens have to be deleted
token_id : Id for which TOken object has to be deleted
Returns:
Dict for deleted token
|
juraj-google-style
|
def delete(self, resource_id):
endpoint = '{}/{}'.format(self.endpoint, resource_id)
response = self.api.execute('DELETE', endpoint)
if (not response.ok):
raise Error.parse(response.json())
return self._cls.parse(response.json())
|
Deletes an existing resource
Args:
resource_id - int - The resource ID to be deleted
|
codesearchnet
|
def dict_of_sets_add(dictionary, key, value):
set_objs = dictionary.get(key, set())
set_objs.add(value)
dictionary[key] = set_objs
|
Add value to a set in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to set in dictionary
Returns:
None
|
codesearchnet
|
def validate_all_keys(obj_name, obj, validation_fun):
for key, value in obj.items():
validation_fun(obj_name, key)
if isinstance(value, dict):
validate_all_keys(obj_name, value, validation_fun)
|
Validate all (nested) keys in `obj` by using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise this error on failure
|
juraj-google-style
|
def path_status(path, filename='', status=None, verbosity=0):
status = status or {}
if not filename:
dir_path, filename = os.path.split()
else:
dir_path = path
full_path = os.path.join(dir_path, filename)
if verbosity > 1:
print(full_path)
status['name'] = filename
status['path'] = full_path
status['dir'] = dir_path
status['type'] = []
try:
status['size'] = os.path.getsize(full_path)
status['accessed'] = datetime.datetime.fromtimestamp(os.path.getatime(full_path))
status['modified'] = datetime.datetime.fromtimestamp(os.path.getmtime(full_path))
status['created'] = datetime.datetime.fromtimestamp(os.path.getctime(full_path))
status['mode'] = os.stat(full_path).st_mode
if os.path.ismount(full_path):
status['type'] += ['mount-point']
elif os.path.islink(full_path):
status['type'] += ['symlink']
if os.path.isfile(full_path):
status['type'] += ['file']
elif os.path.isdir(full_path):
status['type'] += ['dir']
if not status['type']:
if os.stat.S_ISSOCK(status['mode']):
status['type'] += ['socket']
elif os.stat.S_ISCHR(status['mode']):
status['type'] += ['special']
elif os.stat.S_ISBLK(status['mode']):
status['type'] += ['block-device']
elif os.stat.S_ISFIFO(status['mode']):
status['type'] += ['pipe']
if not status['type']:
status['type'] += ['unknown']
elif status['type'] and status['type'][-1] == 'symlink':
status['type'] += ['broken']
except OSError:
status['type'] = ['nonexistent'] + status['type']
if verbosity > -1:
warnings.warn("Unable to stat path '{}'".format(full_path))
status['type'] = '->'.join(status['type'])
return status
|
Retrieve the access, modify, and create timetags for a path along with its size
Arguments:
path (str): full path to the file or directory to be statused
status (dict): optional existing status to be updated/overwritten with new status values
Returns:
dict: {'size': bytes (int), 'accessed': (datetime), 'modified': (datetime), 'created': (datetime)}
|
juraj-google-style
|
def _PrintTasksInformation(self, storage_reader):
table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Tasks')
for (task_start, _) in storage_reader.GetSessions():
start_time = timelib.Timestamp.CopyToIsoFormat(task_start.timestamp)
task_identifier = uuid.UUID(hex=task_start.identifier)
task_identifier = '{0!s}'.format(task_identifier)
table_view.AddRow([task_identifier, start_time])
table_view.Write(self._output_writer)
|
Prints information about the tasks.
Args:
storage_reader (StorageReader): storage reader.
|
codesearchnet
|
def forward(self, hidden_states: torch.FloatTensor, attention_masks: Optional[torch.BoolTensor]=None, position_embeddings: Optional[torch.FloatTensor]=None) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
if attention_masks.dim() == 3 and attention_masks.shape[0] == hidden_states.shape[0]:
attention_masks = attention_masks[:, None, :, :]
attention_masks = attention_masks.repeat(1, self.num_heads, 1, 1)
dtype = hidden_states.dtype
attention_masks = attention_masks.to(dtype=dtype)
attention_masks = (1.0 - attention_masks) * torch.finfo(dtype).min
queries = keys = self.with_pos_embed(hidden_states, position_embeddings)
attention_output, attention_weights = self.self_attn(queries=queries, keys=keys, values=hidden_states, attention_mask=attention_masks, output_attentions=True)
attention_output = nn.functional.dropout(attention_output, p=self.dropout, training=self.training)
hidden_states = hidden_states + attention_output
hidden_states = self.layer_norm_before(hidden_states)
residual = hidden_states
hidden_states = self.activation(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states + residual
hidden_states = self.layer_norm_after(hidden_states)
return (hidden_states, attention_weights)
|
Text self-attention to enhance projection of text features generated by
the text encoder (AutoModel based on text_config) within GroundingDinoEncoderLayer
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`):
Text features generated by the text encoder.
attention_masks (`torch.BoolTensor`, *optional*):
Attention mask for text self-attention. False for real tokens and True for padding tokens.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings to be added to the hidden states.
Returns:
`tuple(torch.FloatTensor)` comprising two elements:
- **hidden_states** (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) --
Output of the text self-attention layer.
- **attention_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length,
sequence_length)`) --
Attention weights of the text self-attention layer.
|
github-repos
|
def _getH2singleTrait(self, K, verbose=None):
verbose = dlimix.getVerbose(verbose)
varg = sp.zeros(self.P)
varn = sp.zeros(self.P)
fixed = sp.zeros((1,self.P))
for p in range(self.P):
y = self.Y[:,p:p+1]
I = sp.isnan(y[:,0])
if I.sum()>0:
y = y[~I,:]
_K = K[~I,:][:,~I]
else:
_K = copy.copy(K)
lmm = dlimix.CLMM()
lmm.setK(_K)
lmm.setSNPs(sp.ones((y.shape[0],1)))
lmm.setPheno(y)
lmm.setCovs(sp.zeros((y.shape[0],1)))
lmm.setVarcompApprox0(-20, 20, 1000)
lmm.process()
delta = sp.exp(lmm.getLdelta0()[0,0])
Vtot = sp.exp(lmm.getLSigma()[0,0])
varg[p] = Vtot
varn[p] = delta*Vtot
fixed[:,p] = lmm.getBetaSNP()
if verbose: print(p)
sth = {}
sth['varg'] = varg
sth['varn'] = varn
sth['fixed'] = fixed
return sth
|
Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)
Args:
K: covariance matrix of the non-noise random effect term
|
juraj-google-style
|
def insert_system(cur, system_name, encoded_data=None):
if (encoded_data is None):
encoded_data = {}
if ('system_name' not in encoded_data):
encoded_data['system_name'] = system_name
insert = 'INSERT OR IGNORE INTO system(system_name) VALUES (:system_name);'
cur.execute(insert, encoded_data)
|
Insert a system name into the cache.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
system_name (str):
The unique name of a system
encoded_data (dict, optional):
If a dictionary is provided, it will be populated with the serialized data. This is
useful for preventing encoding the same information many times.
|
codesearchnet
|
def energy_prof(step):
diff, rad = diffs_prof(step)
adv, _ = advts_prof(step)
return (diff + np.append(adv, 0)), rad
|
Energy flux.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the energy flux and the radial position
at which it is evaluated.
|
juraj-google-style
|
def trace_read(self, offset, num_items):
buf_size = ctypes.c_uint32(num_items)
buf = (structs.JLinkTraceData * num_items)()
res = self._dll.JLINKARM_TRACE_Read(buf, int(offset), ctypes.byref(buf_size))
if (res == 1):
raise errors.JLinkException('Failed to read from trace buffer.')
return list(buf)[:int(buf_size.value)]
|
Reads data from the trace buffer and returns it.
Args:
self (JLink): the ``JLink`` instance.
offset (int): the offset from which to start reading from the trace
buffer.
num_items (int): number of items to read from the trace buffer.
Returns:
A list of ``JLinkTraceData`` instances corresponding to the items
read from the trace buffer. Note that this list may have size less
than ``num_items`` in the event that there are not ``num_items``
items in the trace buffer.
Raises:
JLinkException: on error.
|
codesearchnet
|
def set_redirect(self, url, status=HttpStatusCodes.HTTP_303):
self.set_status(status)
self.set_content('')
self.set_header(HttpResponseHeaders.LOCATION, url)
|
Helper method to set a redirect response.
Args:
url (:obj:`str`): URL to redirect to
status (:obj:`str`, optional): Status code of the response
|
codesearchnet
|
def _check_keyword_parentheses(self, tokens, start):
if (self._inside_brackets(':') and (tokens[start][1] == 'for')):
self._pop_token()
if (tokens[(start + 1)][1] != '('):
return
found_and_or = False
depth = 0
keyword_token = str(tokens[start][1])
line_num = tokens[start][2][0]
for i in range(start, (len(tokens) - 1)):
token = tokens[i]
if (token[0] == tokenize.NL):
return
if (token[1] == '('):
depth += 1
elif (token[1] == ')'):
depth -= 1
if depth:
continue
if ((tokens[(i + 1)][1] in (':', ')', ']', '}', 'in')) or (tokens[(i + 1)][0] in (tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT))):
if (i == (start + 2)):
return
if (keyword_token == 'not'):
if (not found_and_or):
self.add_message('superfluous-parens', line=line_num, args=keyword_token)
elif (keyword_token in ('return', 'yield')):
self.add_message('superfluous-parens', line=line_num, args=keyword_token)
elif (keyword_token not in self._keywords_with_parens):
if (not found_and_or):
self.add_message('superfluous-parens', line=line_num, args=keyword_token)
return
elif (depth == 1):
if (token[1] == ','):
return
if (token[1] in ('and', 'or')):
found_and_or = True
elif (token[1] == 'yield'):
return
elif (token[1] == 'for'):
return
|
Check that there are not unnecessary parens after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
|
codesearchnet
|
def user_list_membership(self, username, member_type='USER', recursive=True, max_return_count=999):
return self.client.service.getUserListMembership(username, member_type, recursive, max_return_count, self.proxy_id)
|
Get info for lists a user is a member of.
This is similar to :meth:`user_lists` but with a few differences:
#. It returns list info objects instead of list names.
#. It has an option to fully resolve a user's list hierarchy. That
is, if a user is a member of a nested list, this method can
retrieve both the nested list and the parent lists that contain
the nested list.
Args:
username (str): The MIT username of the user
member_type(str): The type of user, "USER" or "STRING"
recursive(bool): Whether to fully resolve the list hierarchy
max_return_count(int): limit the number of items returned
Returns:
list of dicts: info dicts, one per list.
|
codesearchnet
|
def handle_run_exception(self, pipeline_key, pipeline_func, e):
if isinstance(e, Retry):
retry_message = str(e)
logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message)
self.transition_retry(pipeline_key, retry_message)
elif isinstance(e, Abort):
abort_message = str(e)
logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message)
pipeline_func.abort(abort_message)
else:
retry_message = ('%s: %s' % (e.__class__.__name__, str(e)))
logging.exception('Generator %r
self.transition_retry(pipeline_key, retry_message)
return pipeline_func.task_retry
|
Handles an exception raised by a Pipeline's user code.
Args:
pipeline_key: The pipeline that raised the error.
pipeline_func: The class path name of the Pipeline that was running.
e: The exception that was raised.
Returns:
True if the exception should be re-raised up through the calling stack
by the caller of this method.
|
codesearchnet
|
def add_group_coordinator(self, group, response):
log.debug("Updating coordinator for %s: %s", group, response)
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
log.error("GroupCoordinatorResponse error: %s", error_type)
self._groups[group] = -1
return False
node_id = response.coordinator_id
coordinator = BrokerMetadata(
response.coordinator_id,
response.host,
response.port,
None)
if node_id not in self._brokers:
self._brokers[node_id] = coordinator
else:
node = self._brokers[node_id]
if coordinator.host != node.host or coordinator.port != node.port:
log.error("GroupCoordinator metadata conflicts with existing"
" broker metadata. Coordinator: %s, Broker: %s",
coordinator, node)
self._groups[group] = node_id
return False
log.info("Group coordinator for %s is %s", group, coordinator)
self._groups[group] = node_id
return True
|
Update with metadata for a group coordinator
Arguments:
group (str): name of group from GroupCoordinatorRequest
response (GroupCoordinatorResponse): broker response
Returns:
bool: True if metadata is updated, False on error
|
juraj-google-style
|
def percent_point(self, U):
self.check_fit()
return norm.ppf(U, loc=self.mean, scale=self.std)
|
Given a cumulated distribution value, returns a value in original space.
Arguments:
U: `np.ndarray` of shape (n, 1) and values in [0,1]
Returns:
`np.ndarray`: Estimated values in original space.
|
codesearchnet
|
def update_refresh_state(self, id_or_uri, refresh_state_data):
uri = self._client.build_uri(id_or_uri) + "/refreshState"
return self._client.update(refresh_state_data, uri=uri)
|
Refreshes a given intelligent power delivery device.
Args:
id_or_uri:
Can be either the power device id or the uri
refresh_state_data:
Power device refresh request
Returns:
str: The power state
|
juraj-google-style
|
def get_effect_class(self, effect_name: str, package_name: str=None) -> Type['Effect']:
return self._project.get_effect_class(effect_name, package_name=package_name)
|
Get an effect class by the class name
Args:
effect_name (str): Name of the effect class
Keyword Args:
package_name (str): The package the effect belongs to. This is optional and only
needed when effect class names are not unique.
Returns:
:py:class:`Effect` class
|
codesearchnet
|
def _split_result_for_readers(axis, num_splits, df):
splits = split_result_of_axis_func_pandas(axis, num_splits, df)
if (not isinstance(splits, list)):
splits = [splits]
return splits
|
Splits the DataFrame read into smaller DataFrames and handles all edge cases.
Args:
axis: Which axis to split over.
num_splits: The number of splits to create.
df: The DataFrame after it has been read.
Returns:
A list of pandas DataFrames.
|
codesearchnet
|
def _build_predicate_for_coding_in_value_set(self, expanded_value_set: value_set_pb2.ValueSet, coding_column: Optional[_sql_data_types.Identifier]=None) -> _sql_data_types.StandardSqlExpression:
codes_per_system = {}
for concept in expanded_value_set.expansion.contains:
codes_per_system.setdefault(concept.system.value, []).append(concept.code.value)
codes_per_system = list(codes_per_system.items())
codes_per_system.sort(key=operator.itemgetter(0))
for _, codes in codes_per_system:
codes.sort()
if coding_column is None:
code_col = _sql_data_types.Identifier('code', _sql_data_types.String)
system_col = _sql_data_types.Identifier('system', _sql_data_types.String)
else:
code_col = coding_column.dot('code', _sql_data_types.String)
system_col = coding_column.dot('system', _sql_data_types.String)
code_system_predicates = []
for system, codes in codes_per_system:
system = _sql_data_types.RawExpression('"%s"' % system, _sql_data_types.String)
codes = [_sql_data_types.RawExpression('"%s"' % code, _sql_data_types.String) for code in codes]
code_system_predicates.append(system_col.eq_(system).and_(code_col.in_(codes)))
return functools.reduce(lambda acc, pred: acc.or_(pred), code_system_predicates)
|
Builds a predicate asserting the coding column is bound to the value_set.
Ensures that the codings contained in `coding_column` are codings found in
`expanded_value_set`.
Produces SQL like:
(`coding_column`.system = system1 AND `coding_column`.code IN (
code1, code2)) OR
(`coding_column`.system = system2 AND `coding_column`.code IN (
code3, code4))
Args:
expanded_value_set: The expanded value set containing the coding values to
assert membership against.
coding_column: The column containing the coding values. If given, columns
`coding_column`.system and `coding_column`.code will be referenced in
the predicate. If not given, columns 'system' and 'code' will be
referenced.
Returns:
The SQL for the value set binding predicate.
|
github-repos
|
def load_hgnc_bulk(self, gene_objs):
LOG.info('Loading gene bulk with length %s', len(gene_objs))
try:
result = self.hgnc_collection.insert_many(gene_objs)
except (DuplicateKeyError, BulkWriteError) as err:
raise IntegrityError(err)
return result
|
Load a bulk of hgnc gene objects
Raises IntegrityError if there are any write concerns
Args:
gene_objs(iterable(scout.models.hgnc_gene))
Returns:
result (pymongo.results.InsertManyResult)
|
codesearchnet
|
def set_sig_figs(n=4):
u.default_format = '.' + str(n) + 'g'
pd.options.display.float_format = ('{:,.' + str(n) + '}').format
|
Set the number of significant figures used to print Pint, Pandas, and
NumPy quantities.
Args:
n (int): Number of significant figures to display.
|
juraj-google-style
|
def firmware_version(self):
buf = (ctypes.c_char * self.MAX_BUF_SIZE)()
self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)
return ctypes.string_at(buf).decode()
|
Returns a firmware identification string of the connected J-Link.
It consists of the following:
- Product Name (e.g. J-Link)
- The string: compiled
- Compile data and time.
- Optional additional information.
Args:
self (JLink): the ``JLink`` instance
Returns:
Firmware identification string.
|
codesearchnet
|
def label(self, input_grid):
unset = 0
(high_labels, num_labels) = label((input_grid > self.high_thresh))
region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, (num_labels + 1))))[::(- 1)]
output_grid = np.zeros(input_grid.shape, dtype=int)
stack = []
for rank in region_ranking:
label_num = (rank + 1)
(label_i, label_j) = np.where((high_labels == label_num))
for i in range(label_i.size):
if (output_grid[(label_i[i], label_j[i])] == unset):
stack.append((label_i[i], label_j[i]))
while (len(stack) > 0):
index = stack.pop()
output_grid[index] = label_num
for i in range((index[0] - 1), (index[0] + 2)):
for j in range((index[1] - 1), (index[1] + 2)):
if ((0 <= i < output_grid.shape[0]) and (0 <= j < output_grid.shape[1])):
if ((input_grid[(i, j)] > self.low_thresh) and (output_grid[(i, j)] == unset)):
stack.append((i, j))
return output_grid
|
Label input grid with hysteresis method.
Args:
input_grid: 2D array of values.
Returns:
Labeled output grid.
|
codesearchnet
|
def _apply(self, ctx: ExtensionContext) -> Any:
(_, external_path) = ctx.node
return ctx.mentor.load_yaml(self.locator(external_path, (cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None)))
|
Loads a yaml fragment from an external file.
Args:
ctx: The processing context.
Returns:
The external resource as a python dictionary. The fragment is already send through
the processor as well.
|
codesearchnet
|
def unhide_tool(self, context_name, tool_name):
data = self._context(context_name)
hidden_tools = data['hidden_tools']
if (tool_name in hidden_tools):
hidden_tools.remove(tool_name)
self._flush_tools()
|
Unhide a tool so that it may be exposed in a suite.
Note that unhiding a tool doesn't guarantee it can be seen - a tool of
the same name from a different context may be overriding it.
Args:
context_name (str): Context containing the tool.
tool_name (str): Name of tool to unhide.
|
codesearchnet
|
def _send_offset_commit_request(self, offsets):
assert (self.config['api_version'] >= (0, 8, 1)), 'Unsupported Broker API'
assert all(map((lambda k: isinstance(k, TopicPartition)), offsets))
assert all(map((lambda v: isinstance(v, OffsetAndMetadata)), offsets.values()))
if (not offsets):
log.debug('No offsets to commit')
return Future().success(None)
node_id = self.coordinator()
if (node_id is None):
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
offset_data = collections.defaultdict(dict)
for (tp, offset) in six.iteritems(offsets):
offset_data[tp.topic][tp.partition] = offset
if self._subscription.partitions_auto_assigned():
generation = self.generation()
else:
generation = Generation.NO_GENERATION
if ((self.config['api_version'] >= (0, 9)) and (generation is None)):
return Future().failure(Errors.CommitFailedError())
if (self.config['api_version'] >= (0, 9)):
request = OffsetCommitRequest[2](self.group_id, generation.generation_id, generation.member_id, OffsetCommitRequest[2].DEFAULT_RETENTION_TIME, [(topic, [(partition, offset.offset, offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)])
elif (self.config['api_version'] >= (0, 8, 2)):
request = OffsetCommitRequest[1](self.group_id, (- 1), '', [(topic, [(partition, offset.offset, (- 1), offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)])
elif (self.config['api_version'] >= (0, 8, 1)):
request = OffsetCommitRequest[0](self.group_id, [(topic, [(partition, offset.offset, offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)])
log.debug('Sending offset-commit request with %s for group %s to %s', offsets, self.group_id, node_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())
_f.add_errback(self._failed_request, node_id, request, future)
return future
|
Commit offsets for the specified list of topics and partitions.
This is a non-blocking call which returns a request future that can be
polled in the case of a synchronous commit or ignored in the
asynchronous case.
Arguments:
offsets (dict of {TopicPartition: OffsetAndMetadata}): what should
be committed
Returns:
Future: indicating whether the commit was successful or not
|
codesearchnet
|
def _AssignVar(self, matched, value):
_value = self._GetValue(value)
if _value is not None:
_value.AssignVar(matched.group(value))
|
Assigns variable into current record from a matched rule.
If a record entry is a list then append, otherwise values are replaced.
Args:
matched: (regexp.match) Named group for each matched value.
value: (str) The matched value.
|
juraj-google-style
|
def __init__(self, latitude, longitude, units='metric',
angle='degrees', timezone=0, time=None):
super(TimedPoint, self).__init__(latitude, longitude, units, angle,
timezone)
self.time = time
|
Initialise a new ``TimedPoint`` object.
Args:
latitude (float, tuple or list): Location's latitude
longitude (float, tuple or list): Location's longitude
angle (str): Type for specified angles
units (str): Units type to be used for distances
timezone (int): Offset from UTC in minutes
time (datetime.datetime): Time associated with the location
|
juraj-google-style
|
def _find_countour_yaml(start, checked, names=None):
extensions = []
if names:
for name in names:
if (not os.path.splitext(name)[1]):
extensions.append((name + '.yaml'))
extensions.append((name + '.yml'))
yaml_names = (((names or []) + CONTOUR_YAML_NAMES) + extensions)
directory = start
while (directory not in checked):
checked.add(directory)
for fs_yaml_name in yaml_names:
yaml_path = os.path.join(directory, fs_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
directory = os.path.dirname(directory)
return
|
Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of countour.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the countour.yaml file or None if it is not found
|
codesearchnet
|
def _convert_id_to_token(self, token_id: int) -> list:
token_type_value = self.decoder.get(token_id, f'{self.unk_token}_TOKEN_TIME')
token_type_value = token_type_value.split('_')
token_type, value = ('_'.join(token_type_value[1:]), int(token_type_value[0]))
return [token_type, value]
|
Decodes the token ids generated by the transformer into notes.
Args:
token_id (`int`):
This denotes the ids generated by the transformers to be converted to Midi tokens.
Returns:
`List`: A list consists of token_type (`str`) and value (`int`).
|
github-repos
|
def compare(array, other, op, ty_str):
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
if (isinstance(other, str) or isinstance(other, WeldObject)):
other_var = weld_obj.update(other)
if isinstance(other, WeldObject):
other_var = tmp.obj_id
weld_obj.dependencies[other_var] = other
else:
other_var = ('%s(%s)' % (ty_str, str(other)))
weld_template = '\n map(\n %(array)s,\n |a: %(ty)s| a %(op)s %(other)s\n )\n '
weld_obj.weld_code = (weld_template % {'array': array_var, 'other': other_var, 'op': op, 'ty': ty_str})
return weld_obj
|
Performs passed-in comparison op between every element in the passed-in
array and other, and returns an array of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second input array
op (str): Op string used for element-wise comparison (== >= <= !=)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
|
codesearchnet
|
def Insert(self, key, value, row_index):
if row_index < 0:
row_index += len(self)
if not 0 <= row_index < len(self):
raise IndexError('Index "%s" is out of bounds.' % row_index)
new_row = Row()
for idx in self.header:
if self.index(idx) == row_index:
new_row[key] = value
new_row[idx] = self[idx]
self._keys = new_row.header
self._values = new_row.values
del new_row
self._BuildIndex()
|
Inserts new values at a specified offset.
Args:
key: string for header value.
value: string for a data value.
row_index: Offset into row for data.
Raises:
IndexError: If the offset is out of bands.
|
juraj-google-style
|
def get_resources(self):
json_resources = self.rest_client.make_request(self.resource_url)['resources']
return [RestResource(resource, self.rest_client) for resource in json_resources]
|
Retrieves a list of all known Streams high-level REST resources.
Returns:
:py:obj:`list` of :py:class:`~.rest_primitives.RestResource`: List of all Streams high-level REST resources.
|
codesearchnet
|
def update_configuration(self, configuration):
return self._client.update(configuration, uri=(self.URI + '/configuration'))
|
Updates the metrics configuration with the new values. Overwrites the existing configuration.
Args:
configuration (dict):
Dictionary with a list of objects which contain frequency, sample interval, and source type for each
resource-type.
Returns:
dict: The current configuration for which metrics are being relayed.
|
codesearchnet
|
def _add_consequences(self, variant_obj, raw_variant_line):
consequences = []
for consequence in SO_TERMS:
if (consequence in raw_variant_line):
consequences.append(consequence)
variant_obj.consequences = consequences
|
Add the consequences found for a variant
Args:
variant_obj (puzzle.models.Variant)
raw_variant_line (str): A raw vcf variant line
|
codesearchnet
|
def unknown_shape(rank=None, **kwargs) -> 'TensorShape':
if rank is None and 'ndims' in kwargs:
rank = kwargs.pop('ndims')
if kwargs:
raise TypeError('Unknown argument: %s' % kwargs)
if rank is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * rank)
|
Returns an unknown TensorShape, optionally with a known rank.
Args:
rank: (Optional) If specified, the number of dimensions in the shape.
**kwargs: For backwards compatibility.
Returns:
An unknown TensorShape.
Raises:
TypeError: In case of invalid arguments.
|
github-repos
|
def get_dimension_type(self, dim):
dim = self.get_dimension(dim)
if dim is None:
return None
elif dim.type is not None:
return dim.type
elif dim in self.vdims:
return np.float64
return self.interface.dimension_type(self, dim)
|
Get the type of the requested dimension.
Type is determined by Dimension.type attribute or common
type of the dimension values, otherwise None.
Args:
dimension: Dimension to look up by name or by index
Returns:
Declared type of values along the dimension
|
juraj-google-style
|
def add_arc(self, src, dst, char):
if src not in self.automaton.states():
self.add_state()
arc = fst.Arc(self.isyms[char], self.osyms[char], fst.Weight.One(self.automaton.weight_type()), dst)
self.automaton.add_arc(src, arc)
|
Adds a new Arc
Args:
src (int): The source state identifier
dst (int): The destination state identifier
char (str): The character for the transition
Returns:
None
|
juraj-google-style
|
def run(self):
cmd = list(self.vasp_cmd)
if self.auto_gamma:
vi = VaspInput.from_directory('.')
kpts = vi['KPOINTS']
if ((kpts.style == Kpoints.supported_modes.Gamma) and (tuple(kpts.kpts[0]) == (1, 1, 1))):
if ((self.gamma_vasp_cmd is not None) and which(self.gamma_vasp_cmd[(- 1)])):
cmd = self.gamma_vasp_cmd
elif which((cmd[(- 1)] + '.gamma')):
cmd[(- 1)] += '.gamma'
logger.info('Running {}'.format(' '.join(cmd)))
with open(self.output_file, 'w') as f_std, open(self.stderr_file, 'w', buffering=1) as f_err:
p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err)
return p
|
Perform the actual VASP run.
Returns:
(subprocess.Popen) Used for monitoring.
|
codesearchnet
|
def _from_base_type(self, value):
if not value:
return None
try:
credentials = client.Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
|
Converts our stored JSON string back to the desired type.
Args:
value: A value from the datastore to be converted to the
desired type.
Returns:
A deserialized Credentials (or subclass) object, else None if
the value can't be parsed.
|
juraj-google-style
|
def from_row_and_group(row: int, group: int):
for sym in _pt_data.keys():
el = Element(sym)
if ((el.row == row) and (el.group == group)):
return el
raise ValueError('No element with this row and group!')
|
Returns an element from a row and group number.
Args:
row (int): Row number
group (int): Group number
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
|
codesearchnet
|
def intersection(self, *others):
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for (element, multiplicity) in list(_elements.items()):
new_multiplicity = other.get(element, 0)
if (new_multiplicity < multiplicity):
if (new_multiplicity > 0):
_elements[element] = new_multiplicity
_total -= (multiplicity - new_multiplicity)
else:
del _elements[element]
_total -= multiplicity
result._total = _total
return result
|
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
|
codesearchnet
|
def yesno(question, default=None):
if default is not None:
if isinstance(default, bool):
pass
else:
default_ = default.upper()
if default_ not in ('Y', 'YES', 'N', 'NO'):
raise RuntimeError("Invalid default value: '{}'".format(default))
default = default_ in ('Y', 'YES')
while True:
ans = input("{} ({}/{})? ".format(question, "Y" if default == True else "y",
"N" if default == False else "n")).upper()
if ans == "" and default is not None:
ret = default
break
elif ans in ("N", "NO"):
ret = False
break
elif ans in ("Y", "YES"):
ret = True
break
return ret
|
Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise
|
juraj-google-style
|
def assign(self, droplet_id):
return self.get_data(
"floating_ips/%s/actions/" % self.ip,
type=POST,
params={"type": "assign", "droplet_id": droplet_id}
)
|
Assign a FloatingIP to a Droplet.
Args:
droplet_id: int - droplet id
|
juraj-google-style
|
def get_attr_info(binary_view):
global _ATTR_BASIC
attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9])
return (AttrTypes(attr_type), attr_len, bool(non_resident))
|
Gets basic information from a binary stream to allow correct processing of
the attribute header.
This function allows the interpretation of the Attribute type, attribute length
and if the attribute is non resident.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
An tuple with the attribute type, the attribute length, in bytes, and
if the attribute is resident or not.
|
juraj-google-style
|
def state_fluent_variables(self) -> FluentParamsList:
fluents = self.domain.state_fluents
ordering = self.domain.state_fluent_ordering
return self._fluent_params(fluents, ordering)
|
Returns the instantiated state fluents in canonical order.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings.
|
codesearchnet
|
def ndtr(x, name='ndtr'):
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name='x')
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError('x.dtype=%s is not handled, see docstring for supported types.' % x.dtype)
return _ndtr(x)
|
Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
|
github-repos
|
def _rnn_scan(self, hidden_states: torch.Tensor, recurrent_gate: torch.Tensor, reset: torch.Tensor, recurrent_states: Union[torch.Tensor, None], acc_dtype: torch.dtype=torch.float32) -> Tuple[torch.Tensor, torch.Tensor]:
recurrent_gate = recurrent_gate * ~reset
if hidden_states.shape[1] == 1:
if recurrent_states is None:
return (hidden_states, hidden_states[:, 0].type(acc_dtype))
else:
contextualized_states = recurrent_gate.type(acc_dtype) * recurrent_states[:, None].to(recurrent_gate.device)
contextualized_states += hidden_states.type(acc_dtype)
return (contextualized_states.type(hidden_states.dtype), contextualized_states[:, -1])
else:
if recurrent_states is None:
recurrent_states = torch.zeros(hidden_states[:, 0].shape, dtype=acc_dtype, device=hidden_states.device)
contextualized_states = torch.zeros_like(hidden_states)
for t in range(hidden_states.shape[1]):
recurrent_states = recurrent_gate[:, t].type(acc_dtype) * recurrent_states.to(recurrent_gate.device)
recurrent_states = recurrent_states + hidden_states[:, t].type(acc_dtype)
contextualized_states[:, t] = recurrent_states.type(hidden_states.dtype)
return (contextualized_states, recurrent_states)
|
Runs the recurrence of a linear RNN.
Args:
hidden_states: The input sequence.
recurrent_gate: The diagonal of the recurrence matrix `A`.
reset: Indicator of document boundaries, e.g. when to reset the hidden state
of the RNN.
recurrent_states: The initial hidden state.
acc_dtype: The data type for the accumulation.
Returns:
The output of the linear recurrence.
|
github-repos
|
def _remove_hidden_parts(projected_surface):
surface = np.copy(projected_surface)
surface[~_make_occlusion_mask(projected_surface)] = np.nan
return surface
|
Removes parts of a projected surface that are not visible.
Args:
projected_surface (surface): the surface to use
Returns:
surface: A projected surface.
|
juraj-google-style
|
def master(self, task_type=None, task_id=None, rpc_layer=None):
session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY)
if session_master is not None:
return session_master
cluster_spec = self.cluster_spec()
if not cluster_spec.jobs or (len(cluster_spec.jobs) == 1 and len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1):
return ''
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer
return format_master_url(cluster_spec.task_address(task_type, task_id), rpc_layer)
|
Returns the master address to use when creating a TensorFlow session.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (String, optional) Overrides and sets the task_type of the
master.
task_id: (Integer, optional) Overrides and sets the task id of the
master.
rpc_layer: (String, optional) Overrides and sets the protocol over which
TensorFlow nodes communicate with each other.
Returns:
The address of the master.
Raises:
RuntimeError: If the task_type or task_id is not specified and the
`TF_CONFIG` environment variable does not contain a task section.
|
github-repos
|
def _nested_to_proto(nested_value, nested_proto, process_leafs, already_processed):
if (not isinstance(nested_proto, module_pb2.NestedData)):
raise base_errors.ModuleInfoError('Expected module_pb2.NestedData.')
if (id(nested_value) in already_processed):
nested_proto.value = ''
return
for (type_name, type_info) in six.iteritems(_TO_PROTO_SPECIAL_TYPES):
if type_info.check(nested_value):
nested_proto.special_type.name = type_name
type_info.to_proto(nested_value, nested_proto.special_type.object, process_leafs, already_processed)
return
if _is_iterable(nested_value):
already_processed.add(id(nested_value))
if isinstance(nested_value, dict):
nested_proto.dict.SetInParent()
for (key, child) in six.iteritems(nested_value):
str_key = str(key)
child_proto = nested_proto.dict.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
elif isinstance(nested_value, tuple):
if _is_namedtuple(nested_value):
nested_proto.named_tuple.name = type(nested_value).__name__
for str_key in nested_value._fields:
child = getattr(nested_value, str_key)
child_proto = nested_proto.named_tuple.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.tuple.SetInParent()
for child in nested_value:
child_proto = nested_proto.tuple.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.list.SetInParent()
for child in nested_value:
child_proto = nested_proto.list.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.value = process_leafs(nested_value)
|
Serializes `nested_value` into `nested_proto`.
Args:
nested_value: A nested Python value.
nested_proto: A `module_pb2.NestedData` instance to be filled from the value
in `nested_value`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
Raises:
ModuleInfoError: If `nested_proto` is not an instance of
`module_pb2.NestedData`.
|
codesearchnet
|
def write_tabular(obj, filepath):
_, fn, ext = splitext2(filepath)
if ext == '.h5':
_write_tabular_h5(obj, filepath)
elif ext == '.pkl':
_write_tabular_pickle(obj, filepath)
else:
raise NotImplementedError
|
Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl'
|
juraj-google-style
|
def exclude(self, **filters):
exclude = {('-%s' % key): value for (key, value) in filters.items()}
return self.filter(**exclude)
|
Applies query filters for excluding matching records from result set.
Args:
**filters: Query filters as keyword arguments.
Returns:
Self. Queryset object.
Examples:
>>> Person.objects.exclude(age=None)
>>> Person.objects.filter(name__startswith='jo').exclude(age__lte=16)
|
codesearchnet
|
def floating_point_ops(self, inputs: dict[str, Union[torch.Tensor, Any]]):
if hasattr(self.model, 'floating_point_ops'):
return self.model.floating_point_ops(inputs)
else:
return 0
|
For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
operations for every backward + forward pass. If using another model, either implement such a method in the
model or subclass and override this method.
Args:
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
`int`: The number of floating-point operations.
|
github-repos
|
def add(self, document_data, document_id=None):
if (document_id is None):
(parent_path, expected_prefix) = self._parent_info()
document_pb = document_pb2.Document()
created_document_pb = self._client._firestore_api.create_document(parent_path, collection_id=self.id, document_id=None, document=document_pb, mask=None, metadata=self._client._rpc_metadata)
new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix)
document_ref = self.document(new_document_id)
set_result = document_ref.set(document_data)
return (set_result.update_time, document_ref)
else:
document_ref = self.document(document_id)
write_result = document_ref.create(document_data)
return (write_result.update_time, document_ref)
|
Create a document in the Firestore database with the provided data.
Args:
document_data (dict): Property names and values to use for
creating the document.
document_id (Optional[str]): The document identifier within the
current collection. If not provided, an ID will be
automatically assigned by the server (the assigned ID will be
a random 20 character string composed of digits,
uppercase and lowercase letters).
Returns:
Tuple[google.protobuf.timestamp_pb2.Timestamp, \
~.firestore_v1beta1.document.DocumentReference]: Pair of
* The ``update_time`` when the document was created (or
overwritten).
* A document reference for the created document.
Raises:
~google.cloud.exceptions.Conflict: If ``document_id`` is provided
and the document already exists.
|
codesearchnet
|
def all(self, customer_id, data={}, **kwargs):
url = "{}/{}/tokens".format(self.base_url, customer_id)
return self.get_url(url, data, **kwargs)
|
Get all tokens for given customer Id
Args:
customer_id : Customer Id for which tokens have to be fetched
Returns:
Token dicts for given cutomer Id
|
juraj-google-style
|
def from_dir(cls, top, exts=None, exclude_dirs="_*"):
pseudos = []
if exts == "all_files":
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info('Skipping file %s' % f)
except:
logger.info('Skipping file %s' % f)
if not pseudos:
logger.warning('No pseudopotentials parsed from folder %s' % top)
return None
logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos))
else:
if exts is None: exts=("psp8",)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical("Error in %s:\n%s" % (p, exc))
return cls(pseudos).sort_by_z()
|
Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z.
|
juraj-google-style
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def visit_arithmetic(self, arithmetic: _evaluation.ArithmeticNode) -> _sql_data_types.Select:
lhs_result = self.visit(arithmetic.left)
rhs_result = self.visit(arithmetic.right)
sql_data_type = _sql_data_types.coerce(lhs_result.sql_data_type, rhs_result.sql_data_type)
lhs_subquery = lhs_result.as_operand()
rhs_subquery = rhs_result.as_operand()
if sql_data_type == _sql_data_types.String:
sql_value = f'CONCAT({lhs_subquery}, {rhs_subquery})'
elif arithmetic.op == _ast.Arithmetic.Op.MODULO:
sql_value = f'MOD({lhs_subquery}, {rhs_subquery})'
elif arithmetic.op == _ast.Arithmetic.Op.TRUNCATED_DIVISION:
sql_value = f'DIV({lhs_subquery}, {rhs_subquery})'
else:
sql_value = f'({lhs_subquery} {arithmetic.op} {rhs_subquery})'
return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=sql_data_type, _sql_alias='arith_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)
|
Translates a FHIRPath arithmetic expression to Spark SQL.
Each operand is expected to be a collection of a single element. Both
operands must be of the same type, or of compatible types according to the
rules of implicit conversion.
Args:
arithmetic: The `_Arithmetic` Expression node.
Returns:
A compiled Spark SQL expression.
|
github-repos
|
def prepare_kwargs(raw, string_parameter='name'):
kwargs = dict()
if isinstance(raw, dict):
kwargs.update(raw)
elif isinstance(raw, str):
kwargs[string_parameter] = raw
return kwargs
|
Utility method to convert raw string/diction input into a dictionary to pass
into a function. Always returns a dictionary.
Args:
raw: string or dictionary, string is assumed to be the name of the activation
activation function. Dictionary will be passed through unchanged.
Returns: kwargs dictionary for **kwargs
|
juraj-google-style
|
def teleport(self, location=None, rotation=None):
val = 0
if location is not None:
val += 1
np.copyto(self._teleport_buffer, location)
if rotation is not None:
np.copyto(self._rotation_buffer, rotation)
val += 2
self._teleport_bool_buffer[0] = val
|
Teleports the agent to a specific location, with a specific rotation.
Args:
location (np.ndarray, optional): An array with three elements specifying the target world coordinate in meters.
If None, keeps the current location. Defaults to None.
rotation (np.ndarray, optional): An array with three elements specifying the target rotation of the agent.
If None, keeps the current rotation. Defaults to None.
Returns:
None
|
juraj-google-style
|
def WriteEventBody(self, event):
if not hasattr(event, 'timestamp'):
return
row = self._GetSanitizedEventValues(event)
try:
self._cursor.execute(self._INSERT_QUERY, row)
except MySQLdb.Error as exception:
logger.warning(
'Unable to insert into database with error: {0!s}.'.format(
exception))
self._count += 1
if self._count % 10000 == 0:
self._connection.commit()
if self._set_status:
self._set_status('Inserting event: {0:d}'.format(self._count))
|
Writes the body of an event object to the output.
Args:
event (EventObject): event.
|
juraj-google-style
|
def from_args_list(cls, args_list: list[str]) -> 'CompileCommand':
cc_file = None
filtered_args = []
for arg in args_list:
if arg in _DISALLOWED_ARGS:
continue
if arg.endswith('.cc'):
cc_file = arg
filtered_args.append(arg)
return cls(cc_file, filtered_args)
|
Alternative constructor which uses the args_list from `bazel aquery`.
This collects arguments and the file being run on from the output of
`bazel aquery`. Also filters out arguments which break clang-tidy.
Arguments:
args_list: List of arguments generated by `bazel aquery`
Returns:
The corresponding ClangTidyCommand.
|
github-repos
|
def obtain_input_shape(input_shape, default_size, min_size, data_format, require_flatten, weights=None):
if weights != 'imagenet' and input_shape and (len(input_shape) == 3):
if data_format == 'channels_first':
correct_channel_axis = 1 if len(input_shape) == 4 else 0
if input_shape[correct_channel_axis] not in {1, 3}:
warnings.warn(f'This model usually expects 1 or 3 input channels. However, it was passed an input_shape with {input_shape[0]} input channels.', stacklevel=2)
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(f'This model usually expects 1 or 3 input channels. However, it was passed an input_shape with {input_shape[-1]} input channels.', stacklevel=2)
default_shape = (default_size, default_size, input_shape[-1])
elif data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError(f'When setting `include_top=True` and loading `imagenet` weights, `input_shape` should be {default_shape}. Received: input_shape={input_shape}')
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError(f'The input must have 3 channels; Received `input_shape={input_shape}`')
if input_shape[1] is not None and input_shape[1] < min_size or (input_shape[2] is not None and input_shape[2] < min_size):
raise ValueError(f'Input size must be at least {min_size}x{min_size}; Received: input_shape={input_shape}')
elif input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError(f'The input must have 3 channels; Received `input_shape={input_shape}`')
if input_shape[0] is not None and input_shape[0] < min_size or (input_shape[1] is not None and input_shape[1] < min_size):
raise ValueError(f'Input size must be at least {min_size}x{min_size}; Received: input_shape={input_shape}')
elif require_flatten:
input_shape = default_shape
elif data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError(f'If `include_top` is True, you should specify a static `input_shape`. Received: input_shape={input_shape}')
return input_shape
|
Internal utility to compute/validate a model's input shape.
Args:
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: In case of invalid argument values.
|
github-repos
|
def alternatives(self, Class=None, set=None):
for e in self.select(AlternativeLayers,None, True, ['Original','Suggestion']):
if Class is None:
yield e
elif len(e) >= 1:
for e2 in e:
try:
if isinstance(e2, Class):
try:
if set is None or e2.set == set:
yield e
break
except AttributeError:
continue
except AttributeError:
continue
|
Generator over alternatives, either all or only of a specific annotation type, and possibly restrained also by set.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
Generator over Alternative elements
|
juraj-google-style
|
def get(self, path, params=None, headers=None):
response = requests.get(self._url_for(path), params=params, headers=self._headers(headers))
self._handle_errors(response)
return response
|
Perform a GET request, optionally providing query-string params.
Args:
path (str): A path that gets appended to ``base_url``.
params (dict, optional): Dictionary of param names to values.
Example:
api_client.get('/users', params={'active': True})
Returns:
A requests ``Response`` object.
|
codesearchnet
|
def difference_update(self, *others):
for other in map(self._as_multiset, others):
for (element, multiplicity) in other.items():
self.discard(element, multiplicity)
|
r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
|
codesearchnet
|
def save_features(self, train_features, test_features, feature_names, feature_list_id):
self.save_feature_names(feature_names, feature_list_id)
self.save_feature_list(train_features, 'train', feature_list_id)
self.save_feature_list(test_features, 'test', feature_list_id)
|
Save features for the training and test sets to disk, along with their metadata.
Args:
train_features: A NumPy array of features for the training set.
test_features: A NumPy array of features for the test set.
feature_names: A list containing the names of the feature columns.
feature_list_id: The name for this feature list.
|
codesearchnet
|
def get_other_answers(pool, seeded_answers, get_student_item_dict, algo, options):
num_responses = (len(options) if (('num_responses' not in algo) or (algo['num_responses'] == '
if (algo['name'] == 'simple'):
return get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses)
elif (algo['name'] == 'random'):
return get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses)
else:
raise UnknownChooseAnswerAlgorithm()
|
Select other student's answers from answer pool or seeded answers based on the selection algorithm
Args:
pool (dict): answer pool, format:
{
option1_index: {
student_id: { can store algorithm specific info here }
},
option2_index: {
student_id: { ... }
}
}
seeded_answers (list): seeded answers from instructor
[
{'answer': 0, 'rationale': 'rationale A'},
{'answer': 1, 'rationale': 'rationale B'},
]
get_student_item_dict (callable): get student item dict function to return student item dict
algo (str): selection algorithm
options (dict): answer options for the question
Returns:
dict: answers based on the selection algorithm
|
codesearchnet
|
def IsWalletTransaction(self, tx):
for key, contract in self._contracts.items():
for output in tx.outputs:
if output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes():
return True
for script in tx.scripts:
if script.VerificationScript:
if bytes(contract.Script) == script.VerificationScript:
return True
for watch_script_hash in self._watch_only:
for output in tx.outputs:
if output.ScriptHash == watch_script_hash:
return True
for script in tx.scripts:
if Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash:
return True
return False
|
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
|
juraj-google-style
|
def write_script(script, tempdir):
name = "script" + self.suffix
path = os.path.join(tempdir, name)
with open(path, "w") as f:
f.write("\n".join(script))
return path
|
Write script to a temporary directory
Arguments:
script (list): Commands which to put into a file
Returns:
Absolute path to script
|
juraj-google-style
|
def __init__(self, functions, inference_args, input_tangents, tape_watching):
self._functions = functions
self._inference_args = inference_args
self._input_tangents = input_tangents
self._tape_watching = tape_watching
|
Collects information about the function call.
Args:
functions: An object which produces forward and backward functions, either
a _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object.
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
tape_watching: Boolean, with True indicating that recording is necessary.
|
github-repos
|
def call_later(self, time_seconds, callback, arguments):
scheduled_call = {'ts': (time.time() + time_seconds), 'callback': callback, 'args': arguments}
self.scheduled_calls.append(scheduled_call)
|
Schedules a function to be run x number of seconds from now.
The call_later method is primarily used to resend messages if we
haven't received a confirmation message from the receiving host.
We can wait x number of seconds for a response and then try
sending the message again.
Args:
time_seconds (float): The number of seconds from now we should call
the provided function.
callback (function): The method to execute when our time has been
reached. E.g. self.retransmit
arguments (dict): A dictionary of arguments to send to the callback.
Returns:
None
|
codesearchnet
|
def ApproximateDistanceBetweenPoints(pa, pb):
(alat, alon) = pa
(blat, blon) = pb
sa = transitfeed.Stop(lat=alat, lng=alon)
sb = transitfeed.Stop(lat=blat, lng=blon)
return transitfeed.ApproximateDistanceBetweenStops(sa, sb)
|
Finds the distance between two points on the Earth's surface.
This is an approximate distance based on assuming that the Earth is a sphere.
The points are specified by their lattitude and longitude.
Args:
pa: the first (lat, lon) point tuple
pb: the second (lat, lon) point tuple
Returns:
The distance as a float in metres.
|
codesearchnet
|
def cmdargs(mysqldump: str, username: str, password: str, database: str, verbose: bool, with_drop_create_database: bool, max_allowed_packet: str, hide_password: bool=False) -> List[str]:
ca = [mysqldump, '-u', username, '-p{}'.format(('*****' if hide_password else password)), '--max_allowed_packet={}'.format(max_allowed_packet), '--hex-blob']
if verbose:
ca.append('--verbose')
if with_drop_create_database:
ca.extend(['--add-drop-database', '--databases', database])
else:
ca.append(database)
pass
return ca
|
Returns command arguments for a ``mysqldump`` call.
Args:
mysqldump: ``mysqldump`` executable filename
username: user name
password: password
database: database name
verbose: verbose output?
with_drop_create_database: produce commands to ``DROP`` the database
and recreate it?
max_allowed_packet: passed to ``mysqldump``
hide_password: obscure the password (will break the arguments but
provide a safe version to show the user)?
Returns:
list of command-line arguments
|
codesearchnet
|
def write_credentials(self, credentials=None, profile=None,
cache_token=None):
d = {
'profile': profile,
'client_id': credentials.client_id,
'client_secret': credentials.client_secret,
'refresh_token': credentials.refresh_token
}
if cache_token:
d.update({'access_token': credentials.access_token})
with self.lock:
return self.db.upsert(
d, self.query.profile == profile
)
|
Write credentials.
Write credentials to credentials file. Performs ``upsert``.
Args:
cache_token (bool): If ``True``, stores ``access_token`` in token store. Defaults to ``True``.
credentials (class): Read-only credentials.
profile (str): Credentials profile. Defaults to ``'default'``.
Returns:
int: Affected document ID.
|
juraj-google-style
|
def download_from_url(url, destination_path, force=False, aspera=False, silent=False):
if (aspera and url.startswith('http')):
logger.warn('Aspera Connect allows only FTP servers - falling back to normal download')
aspera = False
try:
fn = Downloader(url, outdir=os.path.dirname(destination_path))
if aspera:
fn.download_aspera(user='anonftp', host='ftp-trace.ncbi.nlm.nih.gov', silent=silent)
else:
fn.download(silent=silent, force=force)
except URLError:
logger.error(('Cannot find file %s' % url))
|
Download file from remote server.
If the file is already downloaded and ``force`` flag is on the file will
be removed.
Args:
url (:obj:`str`): Path to the file on remote server (including file
name)
destination_path (:obj:`str`): Path to the file on local machine
(including file name)
force (:obj:`bool`): If file exist force to overwrite it. Defaults to
False.
aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False.
silent (:obj:`bool`): Do not print any message. Defaults to False.
|
codesearchnet
|
def from_bigquery(sql):
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')'
else:
sql = '`' + sql + '`'
query = bq.Query(
'SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql)
df = query.execute().result().to_dataframe()
labels = sorted(set(df['target']) | set(df['predicted']))
labels_count = len(labels)
df['target'] = [labels.index(x) for x in df['target']]
df['predicted'] = [labels.index(x) for x in df['predicted']]
cm = [[0] * labels_count for i in range(labels_count)]
for index, row in df.iterrows():
cm[row['target']][row['predicted']] = row['count']
return ConfusionMatrix(cm, labels)
|
Create a ConfusionMatrix from a BigQuery table or query.
Args:
sql: Can be one of:
A SQL query string.
A Bigquery table string.
A Query object defined with '%%bq query --name [query_name]'.
The query results or table must include "target", "predicted" columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if query results or table does not include 'target' or 'predicted' columns.
|
juraj-google-style
|
def __init__(self, n_classes=256, act=torch.softmax):
super().__init__()
self.k = n_classes
self.act = act
self.register_buffer('k_idx', torch.arange(0, n_classes).view(1, -1, 1, 1), persistent=False)
self.register_buffer('k_minus_1', torch.tensor([self.k - 1]).view(1, -1, 1, 1), persistent=False)
|
Compute log binomial distribution for n_classes
Args:
n_classes (`int`, *optional*, defaults to 256):
Number of output classes.
act (`torch.nn.Module`, *optional*, defaults to `torch.softmax`):
Activation function to apply to the output.
|
github-repos
|
def log(self, metric):
message = self.LOGFMT.format(**metric)
if metric['context']:
message += ' context: {context}'.format(context=metric['context'])
self._logger.log(self.level, message)
|
Format and output metric.
Args:
metric (dict): Complete metric.
|
juraj-google-style
|
def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None):
new = cls(manager=manager)
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
return new
|
Build tasks for the computation of Born effective charges from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
manager: :class:`TaskManager` object.
|
juraj-google-style
|
def split_vert_on_nonmanifold_face(script, vert_displacement_ratio=0.0):
filter_xml = ''.join([
' <filter name="Split Vertexes Incident on Non Manifold Faces">\n',
' <Param name="VertDispRatio" ',
'value="{}" '.format(vert_displacement_ratio),
'description="Vertex Displacement Ratio" ',
'type="RichFloat" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None
|
Split non-manifold vertices until it becomes two-manifold.
Args:
script: the FilterScript object or script filename to write
the filter to.
vert_displacement_ratio (float): When a vertex is split it is moved
along the average vector going from its position to the centroid
of the FF connected faces sharing it.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
|
juraj-google-style
|
def _get_params(mapper_spec, allowed_keys=None, allow_old=True):
if ('input_reader' not in mapper_spec.params):
message = "Input reader's parameters should be specified in input_reader subdictionary."
if ((not allow_old) or allowed_keys):
raise errors.BadReaderParamsError(message)
params = mapper_spec.params
params = dict(((str(n), v) for (n, v) in params.iteritems()))
else:
if (not isinstance(mapper_spec.params.get('input_reader'), dict)):
raise errors.BadReaderParamsError('Input reader parameters should be a dictionary')
params = mapper_spec.params.get('input_reader')
params = dict(((str(n), v) for (n, v) in params.iteritems()))
if allowed_keys:
params_diff = (set(params.keys()) - allowed_keys)
if params_diff:
raise errors.BadReaderParamsError(('Invalid input_reader parameters: %s' % ','.join(params_diff)))
return params
|
Obtain input reader parameters.
Utility function for input readers implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "input_reader"
subdictionary of mapper_spec parameters.
allow_old: Allow parameters to exist outside of the input_reader
subdictionary for compatability.
Returns:
mapper parameters as dict
Raises:
BadReaderParamsError: if parameters are invalid/missing or not allowed.
|
codesearchnet
|
def assert_hermitian_spectrum(self, name='assert_hermitian_spectrum'):
eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps
with self._name_scope(name):
max_err = eps * self.domain_dimension_tensor()
imag_convolution_kernel = math_ops.imag(self.convolution_kernel())
return check_ops.assert_less(math_ops.abs(imag_convolution_kernel), max_err, message='Spectrum was not Hermitian')
|
Returns an `Op` that asserts this operator has Hermitian spectrum.
This operator corresponds to a real-valued matrix if and only if its
spectrum is Hermitian.
Args:
name: A name to give this `Op`.
Returns:
An `Op` that asserts this operator has Hermitian spectrum.
|
github-repos
|
def encode_configuration(self, did, eid, parameters):
parameters = [{"parameterId": k, "parameterValue": v} for (k,v) in parameters.items()]
payload = {
'parameters':parameters
}
req_headers = {
'Accept': 'application/vnd.onshape.v1+json',
'Content-Type': 'application/json'
}
res = self._api.request('post', '/api/elements/d/' + did + '/e/' + eid + '/configurationencodings', body=payload, headers=req_headers)
return json.loads(res.content.decode("utf-8"))["encodedId"]
|
Encode parameters as a URL-ready string
Args:
- did (str): Document ID
- eid (str): Element ID
- parameters (dict): key-value pairs of the parameters to be encoded
Returns:
- configuration (str): the url-ready configuration string.
|
juraj-google-style
|
def parse_frequency(variant, info_key):
raw_annotation = variant.INFO.get(info_key)
raw_annotation = None if raw_annotation == '.' else raw_annotation
frequency = float(raw_annotation) if raw_annotation else None
return frequency
|
Parse any frequency from the info dict
Args:
variant(cyvcf2.Variant)
info_key(str)
Returns:
frequency(float): or None if frequency does not exist
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.