code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def task(self, task_uuid):
request = clearly_pb2.FindTaskRequest(task_uuid=task_uuid)
task = self._stub.find_task(request)
if task.uuid:
ClearlyClient._display_task(task, True, True, True)
else:
print(EMPTY)
|
Finds one specific task.
Args:
task_uuid (str): the task id
|
juraj-google-style
|
def __init__(self, context):
del context
self._debugger_data_server = None
self._server_thread = None
self._grpc_port = None
|
Constructs a debugger plugin for TensorBoard.
This plugin adds handlers for retrieving debugger-related data. The plugin
also starts a debugger data server once the log directory is passed to the
plugin via the call to get_plugin_apps.
Args:
context: A base_plugin.TBContext instance.
|
juraj-google-style
|
def listen(self, log, noprint=True):
try:
result = self.decode_event(log.topics, log.data)
except ValueError:
return
if (not noprint):
print(result)
return result
|
Return a dictionary representation of the Log instance.
Note:
This function won't work with anonymous events.
Args:
log (processblock.Log): The Log instance that needs to be parsed.
noprint (bool): Flag to turn off priting of the decoded log instance.
|
codesearchnet
|
def write_file(path, content, mode='w'):
from peltak.core import context
from peltak.core import log
if context.get('pretend', False):
log.info("Would overwrite <34>{path}<32> with:\n<90>{content}",
path=path,
content=content)
else:
with open(path, mode) as fp:
fp.write(content)
|
--pretend aware file writing.
You can always write files manually but you should always handle the
--pretend case.
Args:
path (str):
content (str):
mode (str):
|
juraj-google-style
|
def run_eagerly(self):
if self._run_eagerly is True and (not context.executing_eagerly()):
raise ValueError('You can only set `run_eagerly=True` if eager execution is enabled.')
if not self.dynamic:
if self._run_eagerly is None:
return def_function.functions_run_eagerly()
else:
return self._run_eagerly
else:
if not context.executing_eagerly():
raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You must enable eager execution with `tf.enable_eager_execution()`.')
if self._run_eagerly is False:
raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You cannot set `run_eagerly=False`.')
return context.executing_eagerly()
|
Settable attribute indicating whether the model should run eagerly.
Running eagerly means that your model will be run step by step,
like Python code. Your model might run slower, but it should become easier
for you to debug it by stepping into individual layer calls.
By default, we will attempt to compile your model to a static graph to
deliver the best execution performance.
Returns:
Boolean, whether the model should run eagerly.
|
github-repos
|
def update_state(self, y_true, y_pred, sample_weight=None):
return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)
|
Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
|
github-repos
|
def GetAccounts(self):
selector = {'fields': ['CustomerId', 'CanManageClients']}
accounts = self.client.GetService('ManagedCustomerService').get(selector)
return accounts['entries']
|
Return the client accounts associated with the user's manager account.
Returns:
list List of ManagedCustomer data objects.
|
codesearchnet
|
def _get_bounding_box(self, box: 'torch.Tensor') -> Dict[str, int]:
if self.framework != 'pt':
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
xmin, ymin, xmax, ymax = box.int().tolist()
bbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
return bbox
|
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
Args:
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
Returns:
bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
|
github-repos
|
def select_qadapter(self, pconfs):
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
raise RuntimeError("Cannot find qadapter for this run!")
|
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
|
juraj-google-style
|
def code_verifier(n_bytes=64):
verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')
if len(verifier) < 43:
raise ValueError("Verifier too short. n_bytes must be > 30.")
elif len(verifier) > 128:
raise ValueError("Verifier too long. n_bytes must be < 97.")
else:
return verifier
|
Generates a 'code_verifier' as described in section 4.1 of RFC 7636.
This is a 'high-entropy cryptographic random string' that will be
impractical for an attacker to guess.
Args:
n_bytes: integer between 31 and 96, inclusive. default: 64
number of bytes of entropy to include in verifier.
Returns:
Bytestring, representing urlsafe base64-encoded random data.
|
juraj-google-style
|
def _lower_non_existent_context_field_filters(match_traversals, visitor_fn):
new_match_traversals = []
for match_traversal in match_traversals:
new_match_traversal = []
for step in match_traversal:
if (step.where_block is not None):
new_filter = step.where_block.visit_and_update_expressions(visitor_fn)
if (new_filter.predicate == TrueLiteral):
new_filter = None
new_step = step._replace(where_block=new_filter)
else:
new_step = step
new_match_traversal.append(new_step)
new_match_traversals.append(new_match_traversal)
return new_match_traversals
|
Return new match traversals, lowering filters involving non-existent ContextFields.
Expressions involving non-existent ContextFields are evaluated to TrueLiteral.
BinaryCompositions, where one of the operands is lowered to a TrueLiteral,
are lowered appropriately based on the present operator (u'||' and u'&&' are affected).
TernaryConditionals, where the predicate is lowered to a TrueLiteral,
are replaced by their if_true predicate.
The `visitor_fn` implements these behaviors (see `_update_context_field_expression`).
Args:
match_traversals: list of match traversal enitities to be lowered
visitor_fn: visit_and_update function for lowering expressions in given match traversal
Returns:
new list of match_traversals, with all filter expressions lowered
|
codesearchnet
|
def generate_output_list(self, source, key, val, line='2', hr=True, show_name=False, colorize=True):
output = generate_output(line=line, short=(HR_RDAP[source][key]['_short'] if hr else key), name=(HR_RDAP[source][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize)
if (val is not None):
for item in val:
output += generate_output(line=str((int(line) + 1)), value=item, colorize=colorize)
return output
|
The function for generating CLI output RDAP list results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
|
codesearchnet
|
def async_decorator(func):
@functools.wraps(func)
def async_wrapper(*args, **kwargs):
if 'callback' not in kwargs or not kwargs['callback']:
return func(*args, **kwargs)
callback = kwargs.pop('callback')
if not callable(callback):
raise TypeError('Expected \'callback\' is not callable.')
def thread_func(*args, **kwargs):
exception, res = None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
return callback(exception, res)
thread = threads.ThreadReturn(target=thread_func,
args=args,
kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
return async_wrapper
|
Asynchronous function decorator. Interprets the function as being
asynchronous, so returns a function that will handle calling the
Function asynchronously.
Args:
func (function): function to be called asynchronously
Returns:
The wrapped function.
Raises:
AttributeError: if ``func`` is not callable
|
juraj-google-style
|
def mean(x, axis=None, keepdims=False):
from .function_bases import mean as mean_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return mean_base(x, axis, keepdims)
|
Reduction along axes with mean operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which mean is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
|
juraj-google-style
|
def find_word_groups(self, text, category, proximity=2):
f = re.IGNORECASE
words = getattr(self, category)
regex = re.compile((('(\\b' + '\\b|\\b'.join(words)) + '\\b)'), flags=f)
candidates = regex.finditer(text)
(starts, ends) = ([], [])
groups = []
for item in candidates:
starts.append(item.span()[0])
ends.append(item.span()[1])
groups.append(item.group().lower())
new_starts = []
new_groups = []
skip = False
for (i, g) in enumerate(groups):
if skip:
skip = False
continue
if ((i < (len(groups) - 1)) and ((starts[(i + 1)] - ends[i]) <= proximity)):
if (g[(- 1)] == '-'):
sep = ''
else:
sep = ' '
new_groups.append(((g + sep) + groups[(i + 1)]))
new_starts.append(starts[i])
skip = True
else:
if (g not in new_groups):
new_groups.append(g)
new_starts.append(starts[i])
skip = False
return new_groups
|
Given a string and a category, finds and combines words into
groups based on their proximity.
Args:
text (str): Some text.
tokens (list): A list of regex strings.
Returns:
list. The combined strings it found.
Example:
COLOURS = [r"red(?:dish)?", r"grey(?:ish)?", r"green(?:ish)?"]
s = 'GREYISH-GREEN limestone with RED or GREY sandstone.'
find_word_groups(s, COLOURS) --> ['greyish green', 'red', 'grey']
|
codesearchnet
|
def open(self, filepath):
with io.open(filepath, 'r', encoding='utf-8') as fp:
content = fp.read()
return content
|
Open settings backend to return its content
Args:
filepath (str): Settings object, depends from backend
Returns:
string: File content.
|
codesearchnet
|
def with_params(self, params):
copy = params.copy()
copy.update(self._params)
return self.__copy_and_set('params', copy)
|
Adds parameters to the request params
Args:
params (dict): The parameters to add to the request params
Returns:
The request builder instance in order to chain calls
|
juraj-google-style
|
def docstring(documentation, prepend=False, join=''):
def decorator(func):
current = (func.__doc__ if func.__doc__ else '').strip()
doc = documentation.strip()
new = '\n'.join(([doc, join, current] if prepend else [current, join, doc]))
lines = len(new.strip().splitlines())
if (lines == 1):
func.__doc__ = new.strip()
else:
func.__doc__ = (new.strip() + '\n')
return func
return decorator
|
r"""Prepend or append a string to the current documentation of the function.
This decorator should be robust even if ``func.__doc__`` is None
(for example, if -OO was passed to the interpreter).
Usage::
@docstring('Appended this line')
def func():
"This docstring will have a line below."
pass
>>> print(func.__doc__)
This docstring will have a line below.
Appended this line
Args:
documentation (str): Documentation string that should be added,
appended or prepended to the current documentation string.
prepend (bool): Prepend the documentation string to the current
documentation if ``True`` else append. default=``False``
join (str): String used to separate docstrings. default='\n'
|
codesearchnet
|
def GetExecutionDetails(self, request, global_params=None):
config = self.GetMethodConfig('GetExecutionDetails')
return self._RunMethod(config, request, global_params=global_params)
|
Request detailed information about the execution status of the job. EXPERIMENTAL. This API is subject to change or removal without notice.
Args:
request: (DataflowProjectsLocationsJobsGetExecutionDetailsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobExecutionDetails) The response message.
|
github-repos
|
def main_loop(self, steps_per_epoch, starting_epoch, max_epoch):
with self.sess.as_default():
self.loop.config(steps_per_epoch, starting_epoch, max_epoch)
self.loop.update_global_step()
try:
self._callbacks.before_train()
self.loop.update_global_step()
for self.loop._epoch_num in range(
self.loop.starting_epoch, self.loop.max_epoch + 1):
logger.info("Start Epoch {} ...".format(self.loop.epoch_num))
self._callbacks.before_epoch()
start_time = time.time()
for self.loop._local_step in range(self.loop.steps_per_epoch):
if self.hooked_sess.should_stop():
return
self.run_step()
self._callbacks.trigger_step()
self._callbacks.after_epoch()
logger.info("Epoch {} (global_step {}) finished, time:{}.".format(
self.loop.epoch_num, self.loop.global_step, humanize_time_delta(time.time() - start_time)))
self._callbacks.trigger_epoch()
logger.info("Training has finished!")
except (StopTraining, tf.errors.OutOfRangeError) as e:
logger.info("Training was stopped by exception {}.".format(str(e)))
except KeyboardInterrupt:
logger.info("Detected Ctrl-C and exiting main loop.")
raise
finally:
self._callbacks.after_train()
self.hooked_sess.close()
|
Run the main training loop.
Args:
steps_per_epoch, starting_epoch, max_epoch (int):
|
juraj-google-style
|
def loads(cls, name):
if (not isinstance(name, six.string_types)):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(classname=cls.__name__, string_types=repr(six.string_types)))
if ((not name) or name.isspace()):
raise ValueError('name must not be empty')
return cls(name)
|
Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
|
codesearchnet
|
def get_random_email(ltd='com'):
email = [RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), '@', RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), '.', ltd]
return ''.join(email)
|
Get a random email address with the given ltd.
Args:
ltd (str): The ltd to use (e.g. com).
Returns:
str: The random email.
|
codesearchnet
|
def read_proto(file_name: str, proto_cls: Type[_T]) -> _T:
raw_text = ''
proto = proto_cls()
with open(file_name, 'r', encoding='utf-8') as f:
raw_text = f.read()
return text_format.Parse(raw_text, proto)
|
Reads a protobuf in prototxt format from file_name.
Data is parsed into an instance of `proto_cls`.
Args:
file_name: The file to read from.
proto_cls: The type of protobuf message to parse as.
Returns:
The protobuf message in the file.
|
github-repos
|
def dict2str(self, d: Dict, joiner: str) -> str:
result = str()
for key in d:
result = result + str(key) + " : "
if isinstance(d[key], list):
result = result + self.list2str(d[key], joiner) + joiner
elif isinstance(d[key], dict):
result = result + self.dict2str(d[key], joiner) + joiner
elif d[key]:
result = result + str(d[key]) + joiner
return result
|
Convert dict to str as input for tokenizer
Args:
d (dict): dict for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the dict as a string
|
juraj-google-style
|
def pauli_group(number_of_qubits, case='weight'):
if (number_of_qubits < 5):
temp_set = []
if (case == 'weight'):
tmp = pauli_group(number_of_qubits, case='tensor')
return sorted(tmp, key=(lambda x: (- np.count_nonzero((np.array(x.to_label(), 'c') == b'I')))))
elif (case == 'tensor'):
for k in range((4 ** number_of_qubits)):
z = np.zeros(number_of_qubits, dtype=np.bool)
x = np.zeros(number_of_qubits, dtype=np.bool)
for j in range(number_of_qubits):
element = ((k
if (element == 1):
x[j] = True
elif (element == 2):
z[j] = True
x[j] = True
elif (element == 3):
z[j] = True
temp_set.append(Pauli(z, x))
return temp_set
else:
raise QiskitError("Only support 'weight' or 'tensor' cases but you have {}.".format(case))
raise QiskitError('Only support number of qubits is less than 5')
|
Return the Pauli group with 4^n elements.
The phases have been removed.
case 'weight' is ordered by Pauli weights and
case 'tensor' is ordered by I,X,Y,Z counting lowest qubit fastest.
Args:
number_of_qubits (int): number of qubits
case (str): determines ordering of group elements ('weight' or 'tensor')
Returns:
list: list of Pauli objects
Raises:
QiskitError: case is not 'weight' or 'tensor'
QiskitError: number_of_qubits is larger than 4
|
codesearchnet
|
def unite(df, colname, *args, **kwargs):
to_unite = list([a for a in flatten(args)])
sep = kwargs.get('sep', '_')
remove = kwargs.get('remove', True)
na_action = kwargs.get('na_action', 'maintain')
if (na_action == 'maintain'):
df[colname] = df[to_unite].apply((lambda x: (np.nan if any(x.isnull()) else sep.join(x.map(str)))), axis=1)
elif (na_action == 'ignore'):
df[colname] = df[to_unite].apply((lambda x: sep.join(x[(~ x.isnull())].map(str))), axis=1)
elif (na_action == 'as_string'):
df[colname] = df[to_unite].astype(str).apply((lambda x: sep.join(x)), axis=1)
if remove:
df.drop(to_unite, axis=1, inplace=True)
return df
|
Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining.
|
codesearchnet
|
def html2text(__html: str, *, width: int=80, ascii_replacements: bool=False) -> str:
html2.BODY_WIDTH = width
html2.UNICODE_SNOB = ascii_replacements
return html2.html2text(__html).strip()
|
HTML to plain text renderer.
See also: :pypi:`html2text`
Args:
__html: Text to process
width: Paragraph width
ascii_replacements: Use pseudo-ASCII replacements for Unicode
Returns:
Rendered text
|
codesearchnet
|
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
|
Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).
|
github-repos
|
def key_to_kind(cls, key):
if key.kind() == Kind.KIND_NAME:
return key.id()
else:
return key.parent().id()
|
Return the kind specified by a given __property__ key.
Args:
key: key whose kind name is requested.
Returns:
The kind specified by key.
|
juraj-google-style
|
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):
button_entry_comment = extract_element_internationalized_comment(button)
if button_entry_comment is None:
return
for state in button.getElementsByTagName('state'):
state_name = state.attributes['key'].value
state_entry_comment = button_entry_comment + " - " + state_name + " state of button"
if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment):
try:
button_entry_key = state.attributes['title'].value
except KeyError:
try:
button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue
except Exception:
continue
results.append((button_entry_key, state_entry_comment))
warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)
|
Adds strings pairs from a button xib element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
button(element): The button element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
|
juraj-google-style
|
def minimum_required(version):
def _minimum_required(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if list(self.version) < list(version):
raise errors.JLinkException('Version %s required.' % version)
return func(self, *args, **kwargs)
return wrapper
return _minimum_required
|
Decorator to specify the minimum SDK version required.
Args:
version (str): valid version string
Returns:
A decorator function.
|
juraj-google-style
|
def parse_email(data, strip_attachment_payloads=False):
if type(data) == bytes:
if is_outlook_msg(data):
data = convert_outlook_msg(data)
data = data.decode("utf-8", errors="replace")
parsed_email = mailparser.parse_from_string(data)
headers = json.loads(parsed_email.headers_json).copy()
parsed_email = json.loads(parsed_email.mail_json).copy()
parsed_email["headers"] = headers
if "received" in parsed_email:
for received in parsed_email["received"]:
if "date_utc" in received:
if received["date_utc"] is None:
del received["date_utc"]
else:
received["date_utc"] = received["date_utc"].replace("T",
" ")
if "from" not in parsed_email:
if "From" in parsed_email["headers"]:
parsed_email["from"] = parsed_email["Headers"]["From"]
else:
parsed_email["from"] = None
if parsed_email["from"] is not None:
parsed_email["from"] = parse_email_address(parsed_email["from"][0])
if "date" in parsed_email:
parsed_email["date"] = parsed_email["date"].replace("T", " ")
else:
parsed_email["date"] = None
if "reply_to" in parsed_email:
parsed_email["reply_to"] = list(map(lambda x: parse_email_address(x),
parsed_email["reply_to"]))
else:
parsed_email["reply_to"] = []
if "to" in parsed_email:
parsed_email["to"] = list(map(lambda x: parse_email_address(x),
parsed_email["to"]))
else:
parsed_email["to"] = []
if "cc" in parsed_email:
parsed_email["cc"] = list(map(lambda x: parse_email_address(x),
parsed_email["cc"]))
else:
parsed_email["cc"] = []
if "bcc" in parsed_email:
parsed_email["bcc"] = list(map(lambda x: parse_email_address(x),
parsed_email["bcc"]))
else:
parsed_email["bcc"] = []
if "delivered_to" in parsed_email:
parsed_email["delivered_to"] = list(
map(lambda x: parse_email_address(x),
parsed_email["delivered_to"])
)
if "attachments" not in parsed_email:
parsed_email["attachments"] = []
else:
for attachment in parsed_email["attachments"]:
if "payload" in attachment:
payload = attachment["payload"]
try:
if "content_transfer_encoding" in attachment:
if attachment["content_transfer_encoding"] == "base64":
payload = decode_base64(payload)
else:
payload = str.encode(payload)
attachment["sha256"] = hashlib.sha256(payload).hexdigest()
except Exception as e:
logger.debug("Unable to decode attachment: {0}".format(
e.__str__()
))
if strip_attachment_payloads:
for attachment in parsed_email["attachments"]:
if "payload" in attachment:
del attachment["payload"]
if "subject" not in parsed_email:
parsed_email["subject"] = None
parsed_email["filename_safe_subject"] = get_filename_safe_string(
parsed_email["subject"])
if "body" not in parsed_email:
parsed_email["body"] = None
return parsed_email
|
A simplified email parser
Args:
data: The RFC 822 message string, or MSG binary
strip_attachment_payloads (bool): Remove attachment payloads
Returns (dict): Parsed email data
|
juraj-google-style
|
def _CheckIsSocket(self, file_entry):
if definitions.FILE_ENTRY_TYPE_SOCKET not in self._file_entry_types:
return False
return file_entry.IsSocket()
|
Checks the is_socket find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not.
|
juraj-google-style
|
def normalize_datetime_to_utc(dt):
return datetime.datetime(*dt.utctimetuple()[:6], microsecond=dt.microsecond, tzinfo=datetime.timezone.utc)
|
Adjust datetime to UTC.
Apply the timezone offset to the datetime and set the timezone to UTC.
This is a no-op if the datetime is already in UTC.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
Notes:
This forces a new object to be returned, which fixes an issue with
serialization to XML in PyXB. PyXB uses a mixin together with
datetime to handle the XML xs:dateTime. That type keeps track of
timezone information included in the original XML doc, which conflicts if we
return it here as part of a datetime mixin.
See Also:
``cast_naive_datetime_to_tz()``
|
codesearchnet
|
def _generate_placements(self, width, height):
skyline = self._skyline
points = collections.deque()
left_index = right_index = 0
support_height = skyline[0].top
support_index = 0
placements = self._placement_points_generator(skyline, width)
for p in placements:
if p+width > skyline[right_index].right:
for right_index in range(right_index+1, len(skyline)):
if skyline[right_index].top >= support_height:
support_index = right_index
support_height = skyline[right_index].top
if p+width <= skyline[right_index].right:
break
if p >= skyline[left_index].right:
left_index +=1
if support_index < left_index:
support_index = left_index
support_height = skyline[left_index].top
for i in range(left_index, right_index+1):
if skyline[i].top >= support_height:
support_index = i
support_height = skyline[i].top
if support_height+height <= self.height:
points.append((Rectangle(p, support_height, width, height),\
left_index, right_index))
return points
|
Generate a list with
Arguments:
skyline (list): SkylineHSegment list
width (number):
Returns:
tuple (Rectangle, fitness):
Rectangle: Rectangle in valid position
left_skyline: Index for the skyline under the rectangle left edge.
right_skyline: Index for the skyline under the rectangle right edte.
|
juraj-google-style
|
def dataframe(self, force_refresh=False):
if force_refresh:
self.clear_cache()
if (self._dataframe is None):
self._dataframe = self._fetch_dataframe()
return self._dataframe
|
A pandas dataframe with lots of interesting results about this object.
Created by calling SageMaker List and Describe APIs and converting them into
a convenient tabular summary.
Args:
force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
|
codesearchnet
|
def parse_string_descriptor(string_desc):
if not isinstance(string_desc, str):
string_desc = str(string_desc)
if not string_desc.endswith(';'):
string_desc += ';'
parsed = get_streamer_parser().parseString(string_desc)[0]
realtime = 'realtime' in parsed
broadcast = 'broadcast' in parsed
encrypted = 'security' in parsed and parsed['security'] == 'encrypted'
signed = 'security' in parsed and parsed['security'] == 'signed'
auto = 'manual' not in parsed
with_other = None
if 'with_other' in parsed:
with_other = parsed['with_other']
auto = False
dest = SlotIdentifier.FromString('controller')
if 'explicit_tile' in parsed:
dest = parsed['explicit_tile']
selector = parsed['selector']
if realtime and (encrypted or signed):
raise SensorGraphSemanticError("Realtime streamers cannot be either signed or encrypted")
if broadcast and (encrypted or signed):
raise SensorGraphSemanticError("Broadcast streamers cannot be either signed or encrypted")
report_type = 'broadcast' if broadcast else 'telegram'
dest = dest
selector = selector
if realtime or broadcast:
report_format = u'individual'
elif signed:
report_format = u'signedlist_userkey'
elif encrypted:
raise SensorGraphSemanticError("Encrypted streamers are not yet supported")
else:
report_format = u'hashedlist'
return DataStreamer(selector, dest, report_format, auto, report_type=report_type, with_other=with_other)
|
Parse a string descriptor of a streamer into a DataStreamer object.
Args:
string_desc (str): The string descriptor that we wish to parse.
Returns:
DataStreamer: A DataStreamer object representing the streamer.
|
juraj-google-style
|
def get_staged_signatures(vcs):
staged_path = _get_staged_history_path(vcs)
known_signatures = []
if os.path.exists(staged_path):
with open(staged_path, 'r') as f:
known_signatures = f.read().split()
return known_signatures
|
Get the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
Returns:
list(basestring) - list of signatures
|
codesearchnet
|
def __init__(self, configuration_file='dependencies.ini'):
super(DependencyHelper, self).__init__()
self._test_dependencies = {}
self.dependencies = {}
dependency_reader = DependencyDefinitionReader()
with open(configuration_file, 'r') as file_object:
for dependency in dependency_reader.Read(file_object):
self.dependencies[dependency.name] = dependency
dependency = DependencyDefinition('mock')
dependency.minimum_version = '0.7.1'
dependency.version_property = '__version__'
self._test_dependencies['mock'] = dependency
|
Initializes a dependency helper.
Args:
configuration_file (Optional[str]): path to the dependencies
configuration file.
|
juraj-google-style
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
encoding_method = getattr(path_spec, 'encoding_method', None)
if not encoding_method:
raise errors.PathSpecError(
'Unsupported path specification without encoding method.')
self._encoding_method = encoding_method
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
juraj-google-style
|
def sparse_read(self, indices, name=None):
raise AttributeError
|
Gather slices from params axis axis according to indices.
This function supports a subset of tf.gather, see tf.gather for details on
usage.
Args:
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
|
github-repos
|
def pyc_load(fp):
magic_1 = U16(fp.read(2), target=MARSHAL_TARGET)
magic_2 = U16(fp.read(2), target=MARSHAL_TARGET)
internals = MAGIC_MAP.get(magic_1)
if internals is None:
raise ValueError('Invalid or unknown magic (%d).' % magic_1)
if magic_2 != 2573:
raise ValueError('Invalid secondary magic (%d).' % magic_2)
timestamp = datetime.datetime.fromtimestamp(U32(fp.read(4), target=MARSHAL_TARGET))
if internals['version'] >= 33:
file_size = U32(fp.read(4))
else:
file_size = None
code_object = marshal_load(fp, internals)
return PycFile(magic_1, internals, timestamp, file_size, code_object)
|
Load a .pyc file from a file-like object.
Arguments:
fp(file): The file-like object to read.
Returns:
PycFile: The parsed representation of the .pyc file.
|
juraj-google-style
|
def apply_mutation(module_path, operator, occurrence):
module_ast = get_ast(module_path, python_version=operator.python_version)
original_code = module_ast.get_code()
visitor = MutationVisitor(occurrence, operator)
mutated_ast = visitor.walk(module_ast)
mutated_code = None
if visitor.mutation_applied:
mutated_code = mutated_ast.get_code()
with module_path.open(mode='wt', encoding='utf-8') as handle:
handle.write(mutated_code)
handle.flush()
return original_code, mutated_code
|
Apply a specific mutation to a file on disk.
Args:
module_path: The path to the module to mutate.
operator: The `operator` instance to use.
occurrence: The occurrence of the operator to apply.
Returns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was
no mutation performed, the `mutated-code` is `None`.
|
juraj-google-style
|
def _expand_ellipsis(key_list, num_remaining_dims):
if num_remaining_dims is None:
raise ValueError('Ellipsis not supported for unknown shape RaggedTensors')
num_indices = sum((1 for idx in key_list if idx is not array_ops.newaxis))
if num_indices > num_remaining_dims + 1:
raise IndexError('Too many indices for RaggedTensor')
elif num_indices == num_remaining_dims + 1:
return key_list[1:]
else:
return [slice(None, None, None)] + key_list
|
Expands the ellipsis at the start of `key_list`.
Assumes that the first element of `key_list` is Ellipsis. This will either
remove the Ellipsis (if it corresponds to zero indices) or prepend a new
`slice(None, None, None)` (if it corresponds to more than zero indices).
Args:
key_list: The arguments to `__getitem__()`.
num_remaining_dims: The number of dimensions remaining.
Returns:
A copy of `key_list` with he ellipsis expanded.
Raises:
ValueError: If ragged_rank.shape.ndims is None
IndexError: If there are too many elements in `key_list`.
|
github-repos
|
def duplicate(script, layer_num=None):
filter_xml = ' <filter name="Duplicate Current layer"/>\n'
if isinstance(script, mlx.FilterScript):
if ((layer_num is None) or (layer_num == script.current_layer())):
util.write_filter(script, filter_xml)
script.add_layer('{}_copy'.format(script.layer_stack[script.current_layer()]), True)
else:
change(script, layer_num)
util.write_filter(script, filter_xml)
script.add_layer('{}_copy'.format(script.layer_stack[layer_num]), True)
else:
util.write_filter(script, filter_xml)
return None
|
Duplicate a layer.
New layer label is '*_copy'.
Args:
script: the mlx.FilterScript object or script filename to write
the filter to.
layer_num (int): layer number to duplicate. Default is the
current layer. Not supported on the file base API.
Layer stack:
Creates a new layer
Changes current layer to the new layer
MeshLab versions:
2016.12
1.3.4BETA
|
codesearchnet
|
def combine(a1, a2):
if not isinstance(a1, list):
a1 = [a1]
if not isinstance(a2, list):
a2 = [a2]
return a1 + a2
|
Combine to argument into a single flat list
It is used when you are not sure whether arguments are lists but want to combine them into one flat list
Args:
a1: list or other thing
a2: list or other thing
Returns:
list: a flat list contain a1 and a2
|
juraj-google-style
|
def get_spectre_plot(self, sigma=0.05, step=0.01):
from pymatgen.util.plotting import pretty_plot
from matplotlib.mlab import normpdf
plt = pretty_plot(12, 8)
transitions = self.read_excitation_energies()
minval = (min([val[0] for val in transitions]) - (5.0 * sigma))
maxval = (max([val[0] for val in transitions]) + (5.0 * sigma))
npts = (int(((maxval - minval) / step)) + 1)
eneval = np.linspace(minval, maxval, npts)
lambdaval = [(((cst.h * cst.c) / (val * cst.e)) * 1000000000.0) for val in eneval]
spectre = np.zeros(npts)
for trans in transitions:
spectre += (trans[2] * normpdf(eneval, trans[0], sigma))
spectre /= spectre.max()
plt.plot(lambdaval, spectre, 'r-', label='spectre')
data = {'energies': eneval, 'lambda': lambdaval, 'xas': spectre}
plt.vlines([val[1] for val in transitions], 0.0, [val[2] for val in transitions], color='blue', label='transitions', linewidth=2)
plt.xlabel('$\\lambda$ (nm)')
plt.ylabel('Arbitrary unit')
plt.legend()
return (data, plt)
|
Get a matplotlib plot of the UV-visible xas. Transition are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the xas is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "xas": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (xas).
A matplotlib plot.
|
codesearchnet
|
def _illegal_character(c, ctx, message=''):
container_type = ctx.container.ion_type is None and 'top-level' or ctx.container.ion_type.name
value_type = ctx.ion_type is None and 'unknown' or ctx.ion_type.name
if c is None:
header = 'Illegal token'
else:
c = 'EOF' if BufferQueue.is_eof(c) else _chr(c)
header = 'Illegal character %s' % (c,)
raise IonException('%s at position %d in %s value contained in %s. %s Pending value: %s'
% (header, ctx.queue.position, value_type, container_type, message, ctx.value))
|
Raises an IonException upon encountering the given illegal character in the given context.
Args:
c (int|None): Ordinal of the illegal character.
ctx (_HandlerContext): Context in which the illegal character was encountered.
message (Optional[str]): Additional information, as necessary.
|
juraj-google-style
|
def AddSubkey(self, registry_key):
name = registry_key.name.upper()
if name in self._subkeys:
raise KeyError(
'Subkey: {0:s} already exists.'.format(registry_key.name))
self._subkeys[name] = registry_key
key_path = key_paths.JoinKeyPath([self._key_path, registry_key.name])
registry_key._key_path = key_path
|
Adds a subkey.
Args:
registry_key (WinRegistryKey): Windows Registry subkey.
Raises:
KeyError: if the subkey already exists.
|
juraj-google-style
|
def read_tensor_tracer_event_file(event_file):
step_occurrence_count = collections.defaultdict(int)
step_occurrence_list = []
for trace_event in summary_iterator.summary_iterator(event_file):
if not trace_event.HasField('summary'):
continue
if len(trace_event.summary.value) != 1:
raise ValueError('Single step contains %d summary values, expected 1.' % len(trace_event.summary.value))
step = trace_event.step
step_occurrence_count[step] += 1
occurrence_idx = step_occurrence_count[step] - 1
occurrence_size = len(step_occurrence_list)
if occurrence_idx == occurrence_size:
new_occurrence = collections.defaultdict(dict)
step_occurrence_list.append(new_occurrence)
elif occurrence_idx > occurrence_size:
raise ValueError('Unexpected: occurrence_idx (%d) > occurrence_size (%d)' % (occurrence_idx, occurrence_size))
tensor_value = trace_event.summary.value[0]
tensor_name = tensor_value.tag
real_shape = [d.size for d in tensor_value.tensor.tensor_shape.dim]
tensor_content = np.frombuffer(tensor_value.tensor.tensor_content, dtypes.DType(tensor_value.tensor.dtype).as_numpy_dtype()).reshape(real_shape)
step_occurrence_list[occurrence_idx][step][tensor_name] = tensor_content
return step_occurrence_list
|
Reads the event file written by tensor tracer.
This can be used to read the full tensors written into binary event files by
by TensorTracer with trace_mode=full_tensor_summary.
Example usage:
result_dict_list = tensor_tracer.read_tensor_tracer_event_file(
event_file_path)
for result_dict in result_dict_list:
for step, tensor_dict in result_dict.items():
for tensor_name, full_tensor_content in tensor_dict.items():
logging.info(tensor_name, full_tensor_content)
Args:
event_file: Path to the event file that contains only tensor tracer events.
Returns:
A list of event dictionaries, each of which with the form:
{step_number: {tensor_name: tensor_content}}. This is a list instead of
a single event dictionary because it is possible that an event file may
have multiple event traces, each of them covering the same step ranges.
Raises:
ValueError: If an unexpected trace is found.
|
github-repos
|
def Downsampled(cls, stats, interval=None):
interval = (interval or cls.DEFAULT_SAMPLING_INTERVAL)
result = cls(stats)
result.cpu_samples = cls._Downsample(kind=CpuSample, samples=stats.cpu_samples, interval=interval)
result.io_samples = cls._Downsample(kind=IOSample, samples=stats.io_samples, interval=interval)
return result
|
Constructs a copy of given stats but downsampled to given interval.
Args:
stats: A `ClientStats` instance.
interval: A downsampling interval.
Returns:
A downsampled `ClientStats` instance.
|
codesearchnet
|
def eig(x):
if any_symbolic_tensors((x,)):
return Eig().symbolic_call(x)
return _eig(x)
|
Computes the eigenvalues and eigenvectors of a square matrix.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tuple of two tensors: a tensor of shape `(..., M)` containing
eigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors.
|
github-repos
|
def update_handler(Model, name=None, **kwds):
async def action_handler(service, action_type, payload, props, notify=True, **kwds):
if (action_type == get_crud_action('update', (name or Model))):
try:
message_props = {}
if ('correlation_id' in props):
message_props['correlation_id'] = props['correlation_id']
pk_field = Model.primary_key()
if (not (pk_field.name in payload)):
raise ValueError('Must specify the pk of the model when updating')
model = Model.select().where((pk_field == payload[pk_field.name])).get()
payload.pop(pk_field.name, None)
for (key, value) in payload.items():
setattr(model, key, value)
model.save()
if notify:
(await service.event_broker.send(payload=ModelSerializer().serialize(model), action_type=change_action_status(action_type, success_status()), **message_props))
except Exception as err:
if notify:
(await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props))
else:
raise err
return action_handler
|
This factory returns an action handler that updates a new instance of
the specified model when a update action is recieved, assuming the
action follows nautilus convetions.
Args:
Model (nautilus.BaseModel): The model to update when the action
received.
Returns:
function(type, payload): The action handler for this model
|
codesearchnet
|
def add(self, *l):
for a in flatten(l):
self._add([self.Inner(a)], self.l)
|
add inner to outer
Args:
*l: element that is passed into Inner init
|
codesearchnet
|
def __init__(self, name, min_val, max_val):
self.name = name
self.min_val = min_val
self.max_val = max_val
if type(min_val) != type(max_val):
raise ValueError('Supplied min_val is not the same type as\
supplied max_val: {}, {}'.format(
type(min_val),
type(max_val))
)
self.dtype = type(min_val + max_val)
if self.dtype not in SUPPORTED_DTYPES:
raise ValueError('Unsupported data type: use {}'
.format(SUPPORTED_DTYPES))
|
Parameter object
Args:
name (str): name of the parameter
min_val (int or float): minimum allowed value for the parameter
max_val (int or float): maximum allowed value for the parameter
|
juraj-google-style
|
def template(self):
instance = self.template_instance()
offset = (self._chunk.offset() + instance.template_offset())
node = TemplateNode(self._buf, offset, self._chunk, instance)
return node
|
parse the template referenced by this root node.
note, this template structure is not guaranteed to be located within the root node's boundaries.
Returns:
TemplateNode: the template.
|
codesearchnet
|
def get_glibc_version():
key = 'glibc_ver'
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print('Error in detecting GCC version:\n %s' % str(err))
return out.strip(b'\n')
|
Retrieves version of GLIBC detected.
Returns:
String that is the version of GLIBC.
e.g. '2.24'
|
github-repos
|
def report_uninitialized_resources(resource_list=None, name='report_uninitialized_resources'):
if resource_list is None:
resource_list = shared_resources() + local_resources()
with ops.name_scope(name):
local_device = os.environ.get('TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING', '/cpu:0')
with ops.device(local_device):
if not resource_list:
return array_ops.constant([], dtype=dtypes.string)
variables_mask = math_ops.logical_not(array_ops_stack.stack([r.is_initialized for r in resource_list]))
variable_names_tensor = array_ops.constant([s.handle.name for s in resource_list])
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
|
Returns the names of all uninitialized resources in resource_list.
If the returned tensor is empty then all resources have been initialized.
Args:
resource_list: resources to check. If None, will use shared_resources() +
local_resources().
name: name for the resource-checking op.
Returns:
Tensor containing names of the handles of all resources which have not
yet been initialized.
|
github-repos
|
def shift(x, offset, dim, wrap, name=None):
return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]
|
Shift operation.
Shift x right by +offset in dimension dim.
Args:
x: a Tensor
offset: an integer. If negative, shift left instead of right.
dim: a Dimension of x
wrap: a boolean - whether to wrap (True) or pad with zeros (False).
name: an optional string
Returns:
a Tensor with the same shape and dtype as x
|
codesearchnet
|
def get_processes(sort_by_name=True):
if sort_by_name:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))
),
)
else:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))
),
)
|
Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples.
|
juraj-google-style
|
def validate(cls, mapper_spec):
if mapper_spec.output_writer_class() != cls:
raise errors.BadWriterParamsError("Output writer class mismatch")
params = output_writers._get_params(mapper_spec)
if cls.BUCKET_NAME_PARAM not in params:
raise errors.BadWriterParamsError(
"%s is required for the _HashingGCSOutputWriter" %
cls.BUCKET_NAME_PARAM)
|
Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
Raises:
BadWriterParamsError: when Output writer class mismatch.
|
juraj-google-style
|
def separate_words(text, acronyms=None):
(words, _case, _sep) = case_parse.parse_case(text, acronyms, preserve_case=True)
return ' '.join(words)
|
Return text in "seperate words" style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> separate_words("HELLO_WORLD")
'HELLO WORLD'
>>> separate_words("helloHTMLWorld", True, ["HTML"])
'hello HTML World'
|
codesearchnet
|
def __init__(self, instrument, probe_name, name = None, info = None, buffer_length = 100):
assert isinstance(instrument, Instrument)
assert isinstance(probe_name, str)
assert probe_name in instrument._PROBES
if name is None:
name = probe_name
assert isinstance(name, str)
if info is None:
info = ''
assert isinstance(info, str)
self.name = name
self.info = info
self.instrument = instrument
self.probe_name = probe_name
self.buffer = deque(maxlen = buffer_length)
|
creates a probe...
Args:
name (optinal): name of probe, if not provided take name of function
settings (optinal): a Parameter object that contains all the information needed in the script
|
juraj-google-style
|
async def reset(self):
params = {'include_participants': (1 if AUTO_GET_PARTICIPANTS else 0), 'include_matches': (1 if AUTO_GET_MATCHES else 0)}
res = (await self.connection('POST', 'tournaments/{}/reset'.format(self._id), **params))
self._refresh_from_json(res)
|
reset the tournament on Challonge
|methcoro|
Note:
|from_api| Reset a tournament, clearing all of its scores and attachments. You can then add/remove/edit participants before starting the tournament again.
Raises:
APIException
|
codesearchnet
|
def ndim(x):
return x.shape.rank
|
Returns the number of axes in a tensor, as an integer.
Args:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.ndim(input)
3
>>> tf.keras.backend.ndim(kvar)
2
|
github-repos
|
def _checkBool(inputvalue, description='inputvalue'):
_checkString(description, minlength=1, description='description string')
if not isinstance(inputvalue, bool):
raise TypeError('The {0} must be boolean. Given: {1!r}'.format(description, inputvalue))
|
Check that the given inputvalue is a boolean.
Args:
* inputvalue (boolean): The value to be checked.
* description (string): Used in error messages for the checked inputvalue.
Raises:
TypeError, ValueError
|
juraj-google-style
|
def query_gal(self, l, b, d=None, **kwargs):
if (not isinstance(l, units.Quantity)):
l = (l * units.deg)
if (not isinstance(b, units.Quantity)):
b = (b * units.deg)
if (d is None):
coords = coordinates.SkyCoord(l, b, frame='galactic')
else:
if (not isinstance(d, units.Quantity)):
d = (d * units.kpc)
coords = coordinates.SkyCoord(l, b, distance=d, frame='galactic')
return self.query(coords, **kwargs)
|
Query using Galactic coordinates.
Args:
l (:obj:`float`, scalar or array-like): Galactic longitude, in degrees,
or as an :obj:`astropy.unit.Quantity`.
b (:obj:`float`, scalar or array-like): Galactic latitude, in degrees,
or as an :obj:`astropy.unit.Quantity`.
d (Optional[:obj:`float`, scalar or array-like]): Distance from the Solar
System, in kpc, or as an :obj:`astropy.unit.Quantity`. Defaults to
``None``, meaning no distance is specified.
**kwargs: Any additional keyword arguments accepted by derived
classes.
Returns:
The results of the query, which must be implemented by derived
classes.
|
codesearchnet
|
def list_filters(self):
def _row_gen(attributes):
for attr in attributes.values():
(yield (attr.name, attr.type, attr.description))
return pd.DataFrame.from_records(_row_gen(self.filters), columns=['name', 'type', 'description'])
|
Lists available filters in a readable DataFrame format.
Returns:
pd.DataFrame: Frame listing available filters.
|
codesearchnet
|
def get_groups(self, **kwargs):
params = {
'cultureInfo': util.language_code(kwargs.get('lang'))
}
result = self.make_request('geo', 'get_groups', **params)
if not util.check_result(result):
return False, result.get('resultDescription', 'UNKNOWN ERROR')
values = util.response_list(result, 'resultValues')
return True, [emtype.GeoGroupItem(**a) for a in values]
|
Obtain line types and details.
Args:
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[GeoGroupItem]), or message
string in case of error.
|
juraj-google-style
|
def apply(self, inputs, *args, **kwargs):
warnings.warn('`layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.')
return self.__call__(inputs, *args, **kwargs)
|
Deprecated, do NOT use!
This is an alias of `self.__call__`.
Args:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
|
github-repos
|
def datasets_get(self, dataset_name):
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, credentials=self._credentials)
|
Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
juraj-google-style
|
def add(self, rule: 'functions.ReplacementRule') -> None:
self.matcher.add(rule.pattern, rule.replacement)
|
Add a new rule to the replacer.
Args:
rule:
The rule to add.
|
juraj-google-style
|
def dr(self, atom1, atom2):
return self.cell.dr(atom1.r, atom2.r)
|
Calculate the distance between two atoms.
Args:
atom1 (vasppy.Atom): Atom 1.
atom2 (vasppy.Atom): Atom 2.
Returns:
(float): The distance between Atom 1 and Atom 2.
|
codesearchnet
|
def _ws_on_open(self, ws: websocket.WebSocketApp):
payload = {'op': WebSocketEvent.IDENTIFY.value, 'd': {'token': self.token, 'properties': {'$os': sys.platform, '$browser': 'Pycord', '$device': 'Pycord', '$referrer': '', '$referring_domain': ''}, 'compress': True, 'large_threshold': 250}}
self.logger.debug('Sending identify payload')
ws.send(json.dumps(payload))
self.connected = True
|
Callback for sending the initial authentication data
This "payload" contains the required data to authenticate this websocket
client as a suitable bot connection to the Discord websocket.
Args:
ws: websocket connection
|
codesearchnet
|
def get_item(dictionary, tuple_key, default_value):
u, v = tuple_key
tuple1 = dictionary.get((u, v), None)
tuple2 = dictionary.get((v, u), None)
return tuple1 or tuple2 or default_value
|
Grab values from a dictionary using an unordered tuple as a key.
Dictionary should not contain None, 0, or False as dictionary values.
Args:
dictionary: Dictionary that uses two-element tuple as keys
tuple_key: Unordered tuple of two elements
default_value: Value that is returned when the tuple_key is not found in the dictionary
|
juraj-google-style
|
def get_lattice_type(number):
f = lambda i, j: i <= number <= j
cs = {'triclinic': (1, 2), 'monoclinic': (3, 15),
'orthorhombic': (16, 74), 'tetragonal': (75, 142),
'trigonal': (143, 167), 'hexagonal': (168, 194),
'cubic': (195, 230)}
crystal_system = None
for k, v in cs.items():
if f(*v):
crystal_system = k
break
if number in [146, 148, 155, 160, 161, 166, 167]:
return "rhombohedral"
elif crystal_system == "trigonal":
return "hexagonal"
else:
return crystal_system
|
Return the lattice crystal system.
Hexagonal cells are differentiated into rhombohedral and hexagonal
lattices.
Args:
number (int): The international space group number.
Returns:
str: The lattice crystal system.
|
juraj-google-style
|
def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):
pdb_id = pdb_id.lower()
file_type = file_type.lower()
file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']
if file_type not in file_types:
raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')
if file_type == 'mmtf':
file_type = 'mmtf.gz'
if file_type.endswith('.gz'):
gzipped = True
else:
gzipped = False
if file_type == 'mmcif':
file_type = 'cif'
if only_header:
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
else:
folder = 'download'
outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if file_type == 'mmtf.gz' or file_type == 'mmtf':
mmtf_api = '1.0'
download_link = 'http:
else:
download_link = 'http:
urlretrieve(download_link, outfile)
if gzipped:
outfile = ssbio.utils.gunzip_file(infile=outfile,
outfile=outfile.strip('.gz'),
outdir=outdir,
delete_original=False,
force_rerun_flag=force_rerun)
log.debug('{}: saved structure file'.format(outfile))
else:
if file_type == 'mmtf.gz':
outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))
log.debug('{}: structure file already saved'.format(outfile))
return outfile
|
Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
|
juraj-google-style
|
def expand(self, tags, clique_scoring_func=None):
lattice = Lattice()
overlapping_spans = []
def end_token_index():
return max([t.get('end_token') for t in overlapping_spans])
for i in xrange(len(tags)):
tag = tags[i]
if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):
overlapping_spans.append(tag)
elif len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
overlapping_spans = [tag]
else:
lattice.append(overlapping_spans)
overlapping_spans = [tag]
if len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
else:
lattice.append(overlapping_spans)
return lattice.traverse()
|
This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques
|
juraj-google-style
|
def auth_required(func):
@wraps(func)
async def wrapper(*args):
if ((await get_auth(args[(- 1)])) is None):
raise web.HTTPForbidden()
return (await func(*args))
return wrapper
|
Utility decorator that checks if a user has been authenticated for this
request.
Allows views to be decorated like:
@auth_required
def view_func(request):
pass
providing a simple means to ensure that whoever is calling the function has
the correct authentication details.
Args:
func: Function object being decorated and raises HTTPForbidden if not
Returns:
A function object that will raise web.HTTPForbidden() if the passed
request does not have the correct permissions to access the view.
|
codesearchnet
|
def update_connection_public_key(self, connection_id, public_key):
if connection_id in self._connections:
connection_info = self._connections[connection_id]
self._connections[connection_id] = \
ConnectionInfo(connection_info.connection_type,
connection_info.connection,
connection_info.uri,
connection_info.status,
public_key)
else:
LOGGER.debug("Could not update the public key %s for "
"connection_id %s. The connection does not "
"exist.",
public_key,
connection_id)
|
Adds the public_key to the connection definition.
Args:
connection_id (str): The identifier for the connection.
public_key (str): The public key used to enforce permissions on
connections.
|
juraj-google-style
|
def find(self, username):
filter = ['(uid={})'.format(username)]
results = self.client.search(filter)
if len(results) < 1:
raise ldap_tools.exceptions.NoUserFound(
'User ({}) not found'.format(username))
return
elif len(results) > 1:
raise ldap_tools.exceptions.TooManyResults(
'Multiple users found. Please narrow your search.')
return
else:
return results
|
Find user with given username.
Args:
username Username of the user to search for
Raises:
ldap_tools.exceptions.NoUserFound: No users returned by LDAP
ldap_tools.exceptions.TooManyResults:
Multiple users returned by LDAP
|
juraj-google-style
|
def read_eof(self, echo=None):
d = b''
while True:
try:
d += self.read(1, echo)
except EOFError:
return d
|
Read until the channel is closed.
Args:
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: The read data.
|
codesearchnet
|
def get_pipeline_definition(pipeline_name, working_dir):
logger.debug('starting')
pipeline_path = get_pipeline_path(pipeline_name=pipeline_name, working_directory=working_dir)
logger.debug(f'Trying to open pipeline at path {pipeline_path}')
try:
with open(pipeline_path) as yaml_file:
pipeline_definition = pypyr.yaml.get_pipeline_yaml(yaml_file)
logger.debug(f'found {len(pipeline_definition)} stages in pipeline.')
except FileNotFoundError:
logger.error(f"The pipeline doesn't exist. Looking for a file here: {pipeline_name}.yaml in the /pipelines sub directory.")
raise
logger.debug('pipeline definition loaded')
logger.debug('done')
return pipeline_definition
|
Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs.
|
codesearchnet
|
def call_projection_function(self, hist: Hist) -> Hist:
for axis in self.projection_axes:
logger.debug(f'Apply projection axes hist range: {axis.name}')
axis.apply_range_set(hist)
projected_hist = None
if (hasattr(hist, 'ProjectionND') and hasattr(hist, 'Projection')):
projected_hist = self._project_THn(hist=hist)
elif (hasattr(hist, 'ProjectionZ') and hasattr(hist, 'Project3D')):
projected_hist = self._project_TH3(hist=hist)
elif (hasattr(hist, 'ProjectionX') and hasattr(hist, 'ProjectionY')):
projected_hist = self._project_TH2(hist=hist)
else:
raise TypeError(type(hist), f'Could not recognize hist {hist} of type {type(hist)}')
self.cleanup_cuts(hist, cut_axes=self.projection_axes)
return projected_hist
|
Calls the actual projection function for the hist.
Args:
hist: Histogram from which the projections should be performed.
Returns:
The projected histogram.
|
codesearchnet
|
def __init__(self, session, object_factory):
check_type(session, RestSession, may_be_none=False)
super(RoomsAPI, self).__init__()
self._session = session
self._object_factory = object_factory
|
Initialize a new RoomsAPI object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Webex Teams service.
Raises:
TypeError: If the parameter types are incorrect.
|
juraj-google-style
|
def FromString(cls, indata):
lines = [x.strip() for x in indata.split("\n") if not x.startswith('
if len(lines) < 3:
raise DataError("Invalid CommandFile string that did not contain 3 header lines", lines=lines)
fmt_line, version_line, ascii_line = lines[:3]
if not version_line.startswith("Format: "):
raise DataError("Invalid format version that did not start with 'Format: '", line=version_line)
version = version_line[8:]
if ascii_line != "Type: ASCII":
raise DataError("Unknown file type line (expected Type: ASCII)", line=ascii_line)
cmds = [cls.decode(x) for x in lines[3:]]
return CommandFile(fmt_line, version, cmds)
|
Load a CommandFile from a string.
The string should be produced from a previous call to
encode.
Args:
indata (str): The encoded input data.
Returns:
CommandFile: The decoded CommandFile object.
|
juraj-google-style
|
def fastcc_is_consistent(model, epsilon, solver):
for reaction in fastcc(model, epsilon, solver):
return False
return True
|
Quickly check whether model is consistent
Return true if the model is consistent. If it is only necessary to know
whether a model is consistent, this function is fast as it will return
the result as soon as it finds a single inconsistent reaction.
Args:
model: :class:`MetabolicModel` to solve.
epsilon: Flux threshold value.
solver: LP solver instance to use.
|
codesearchnet
|
def results_tc(self, key, value):
if os.access(self.default_args.tc_out_path, os.W_OK):
results_file = '{}/results.tc'.format(self.default_args.tc_out_path)
else:
results_file = 'results.tc'
new = True
open(results_file, 'a').close()
with open(results_file, 'r+') as fh:
results = ''
for line in fh.read().strip().split('\n'):
if not line:
continue
try:
k, v = line.split(' = ')
except ValueError:
k, v = line.split(' =')
if k == key:
v = value
new = False
if v is not None:
results += '{} = {}\n'.format(k, v)
if new and value is not None:
results += '{} = {}\n'.format(key, value)
fh.seek(0)
fh.write(results)
fh.truncate()
|
Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key (string): The data key to be stored.
value (string): The data value to be stored.
|
juraj-google-style
|
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if (countryinfo is not None):
return countryinfo.get('
return None
|
Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
|
codesearchnet
|
def victim_phone_assets(self, main_type, sub_type, unique_id, params=None):
params = params or {}
if not sub_type:
url = '/v2/{}/{}/victimAssets/phoneNumbers'.format(main_type, unique_id)
else:
url = '/v2/{}/{}/{}/victimAssets/phoneNumbers'.format(main_type, sub_type, unique_id)
for vpa in self._iterate(url, params, 'victimPhone'):
yield vpa
|
Args:
main_type:
sub_type:
unique_id:
params:
Return:
|
juraj-google-style
|
def merkleroot(hashes):
if (not hashes):
return sha3_256(b'').hexdigest()
if (len(hashes) == 1):
return hexlify(hashes[0]).decode()
if ((len(hashes) % 2) == 1):
hashes.append(hashes[(- 1)])
parent_hashes = [sha3_256((hashes[i] + hashes[(i + 1)])).digest() for i in range(0, (len(hashes) - 1), 2)]
return merkleroot(parent_hashes)
|
Computes the merkle root for a given list.
Args:
hashes (:obj:`list` of :obj:`bytes`): The leaves of the tree.
Returns:
str: Merkle root in hexadecimal form.
|
codesearchnet
|
def make_multi_lagger(lags, groupby_kwargs=None):
laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags]
feature_union = FeatureUnion([(repr(lagger), lagger) for lagger in laggers])
return feature_union
|
Return a union of transformers that apply different lags
Args:
lags (Collection[int]): collection of lags to apply
groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby
|
codesearchnet
|
def stack_inputs(self, stack_indices=None, tile_variants=False):
if stack_indices is None:
stack_indices = range(len(self._inputs))
length = self.pfor.loop_len_vector
for i in stack_indices:
inp = self._inputs[i]
is_variant = inp.t.dtype == dtypes.variant
if not inp.is_stacked:
self._inputs[i] = _stack(inp.t, length)
if tile_variants and is_variant:
self._inputs[i] = wrap(_tile_variant_with_length(self._inputs[i].t, length), True)
elif not tile_variants and is_variant:
self._inputs[i] = wrap(_untile_variant(self._inputs[i].t), True)
|
Stacks unstacked inputs at `stack_indices`.
Args:
stack_indices: indices of inputs at which stacking is done. If None,
stacking is done at all indices.
tile_variants: If True, affected indices which have a variant dtype will
be tiled after this operation to match the expected shape of a
vectorized tensor. Variants generally need to be un-tiled when they are
inputs to operations and tiled when returned.
|
github-repos
|
def _event_to_pb(event):
if isinstance(event, (TaskData, Task)):
key, klass = 'task', clearly_pb2.TaskMessage
elif isinstance(event, (WorkerData, Worker)):
key, klass = 'worker', clearly_pb2.WorkerMessage
else:
raise ValueError('unknown event')
keys = klass.DESCRIPTOR.fields_by_name.keys()
data = {k: v for k, v in
getattr(event, '_asdict',
lambda: {f: getattr(event, f) for f in event._fields})
().items() if k in keys}
return key, klass(**data)
|
Supports converting internal TaskData and WorkerData, as well as
celery Task and Worker to proto buffers messages.
Args:
event (Union[TaskData|Task|WorkerData|Worker]):
Returns:
ProtoBuf object
|
juraj-google-style
|
def _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed):
if (initial_population is not None):
return [tf.convert_to_tensor(value=part) for part in initial_population]
seed_stream = distributions.SeedStream(seed, salt='get_starting_population')
population = []
for part in initial_position:
part = tf.convert_to_tensor(value=part)
part_event_shape = tf.shape(input=part)
population_part_shape = tf.concat([[(population_size - 1)], part_event_shape], axis=0)
population_part = tf.random.normal(population_part_shape, stddev=population_stddev, dtype=part.dtype.base_dtype, seed=seed_stream())
population_part += part
population_part = tf.concat([[part], population_part], axis=0)
population.append(population_part)
return population
|
Constructs the initial population.
If an initial population is not already provided, this function constructs
a population by adding random normal noise to the initial position.
Args:
initial_population: None or a list of `Tensor`s. The initial population.
initial_position: None or a list of `Tensor`s. The initial position.
If initial_population is None, this argument must not be None.
population_size: Scalar integer `Tensor`. The number of members in the
population. If the initial population is not None, this parameter is
ignored.
population_stddev: A positive scalar real `Tensor` of the same dtype
as `initial_position` or `initial_population` (whichever is not None).
This parameter is ignored if `initial_population`
is specified. Used to generate the population from the
`initial_position` by adding random normal noise with zero mean and
the specified standard deviation.
seed: Seed for random number generation.
Returns:
A list of `Tensor`s. The initial population.
|
codesearchnet
|
def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None):
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data_column = 3 if elapsed else 1
data = np.genfromtxt(filename, dtype='i8,f4',
usecols=(0, data_column), names=['k', 'v'])
index = data['k']
values = data['v']
if unit == 's':
pass
elif unit == 'm':
values /= 60
elif unit == 'h':
values /= 3600
elif unit == 'd':
values /= 3600 * 24
else:
raise ValueError('The argument `unit` must be chosen from {s|m|h|d}.')
plt.plot(index, values, **plot_kwargs)
|
Plot series data from MonitorTimeElapsed output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
elapsed (bool): If ``True``, it plots the total elapsed time.
unit (str):
Time unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required.
|
juraj-google-style
|
def to_parquet(evset: EventSet, path: str, **kwargs):
df = to_pandas(evset)
df.to_parquet(path, **kwargs)
|
Saves an [`EventSet`][temporian.EventSet] to a CSV file.
Example:
```python
>>> output_path = str(tmp_dir / "output_data.parquet")
>>> evset = tp.event_set(timestamps=[1,], features={"f1": [0.1]})
>>> tp.to_parquet(evset, output_path)
```
Args:
evset: EventSet to save.
path: Path to the file.
|
github-repos
|
def _on_scan_request(self, sequence, topic, message):
if messages.ProbeCommand.matches(message):
self._logger.debug('Received probe message on topic %s, message=%s', topic, message)
self._loop.add_callback(self._publish_scan_response, message['client'])
else:
self._logger.warn('Invalid message received on topic %s, message=%s', topic, message)
|
Process a request for scanning information
Args:
sequence (int:) The sequence number of the packet received
topic (string): The topic this message was received on
message_type (string): The type of the packet received
message (dict): The message itself
|
codesearchnet
|
def sg_reuse(tensor, **opt):
r
opt = tf.sg_opt(opt)
assert hasattr(tensor, '_sugar'), 'cannot reuse this node.'
assert opt.input is not None, 'input is mandatory.'
nodes, prev = [tensor], tensor._sugar.prev
while prev is not None:
nodes = [prev] + nodes
prev = prev._sugar.prev if hasattr(prev, '_sugar') else None
out = opt.input
for node in nodes[1:]:
if node._sugar.is_layer:
fn = tf.sg_layer_func(node._sugar.func)
if node._sugar.arg.scope_name:
with tf.variable_scope(node._sugar.arg.scope_name):
out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))
else:
out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))
else:
out = node._sugar.func(out, node._sugar.arg)
return out
|
r""" Reconstruct computational graph of `tensor` so all the parameters
can be reused and replace its input tensor with `opt.input`.
Args:
tensor: A `Tensor` (automatically given by chaining).
**opt:
input: A `Tensor` that will replace the original input tensor.
Returns:
Reconstructed tensor nodes.
|
juraj-google-style
|
def convert_to_ndarray(test_obj, a):
if tf.is_tensor(a):
a = test_obj.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
|
Converts the input `a` into an ndarray.
Args:
test_obj: An object which has the `evaluate` method. Used to evaluate `a` if
`a` is a Tensor.
a: Object to be converted to an ndarray.
Returns:
An ndarray containing the values of `a`.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.