code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get_clusters_interfaces(clusters, extra_cond=(lambda nic: True)):
interfaces = {}
for cluster in clusters:
nics = get_cluster_interfaces(cluster, extra_cond=extra_cond)
interfaces.setdefault(cluster, nics)
return interfaces
|
Returns for each cluster the available cluster interfaces
Args:
clusters (str): list of the clusters
extra_cond (lambda): extra predicate to filter network card retrieved
from the API. E.g lambda nic: not nic['mounted'] will retrieve all the
usable network cards that are not mounted by default.
Returns:
dict of cluster with their associated nic names
Examples:
.. code-block:: python
# pseudo code
actual = get_clusters_interfaces(["paravance"])
expected = {"paravance": ["eth0", "eth1"]}
assertDictEquals(expected, actual)
|
codesearchnet
|
def visualize_instance_html(self, exp, label, div_name, exp_object_name,
text=True, opacity=True):
if not text:
return u''
text = (self.indexed_string.raw_string()
.encode('utf-8', 'xmlcharrefreplace').decode('utf-8'))
text = re.sub(r'[<>&]', '|', text)
exp = [(self.indexed_string.word(x[0]),
self.indexed_string.string_position(x[0]),
x[1]) for x in exp]
all_occurrences = list(itertools.chain.from_iterable(
[itertools.product([x[0]], x[1], [x[2]]) for x in exp]))
all_occurrences = [(x[0], int(x[1]), x[2]) for x in all_occurrences]
ret = % (exp_object_name, json.dumps(all_occurrences), label,
json.dumps(text), div_name, json.dumps(opacity))
return ret
|
Adds text with highlighted words to visualization.
Args:
exp: list of tuples [(id, weight), (id,weight)]
label: label id (integer)
div_name: name of div object to be used for rendering(in js)
exp_object_name: name of js explanation object
text: if False, return empty
opacity: if True, fade colors according to weight
|
juraj-google-style
|
def remove_indirect_links(g, alg='aracne', **kwargs):
alg = {'aracne': aracne, 'nd': network_deconvolution, 'clr': clr}[alg]
mat = np.array(nx.adjacency_matrix(g).todense())
return nx.relabel_nodes(nx.DiGraph(alg(mat, **kwargs)), {idx: i for (idx, i) in enumerate(list(g.nodes()))})
|
Apply deconvolution to a networkx graph.
Args:
g (networkx.Graph): Graph to apply deconvolution to
alg (str): Algorithm to use ('aracne', 'clr', 'nd')
kwargs (dict): extra options for algorithms
Returns:
networkx.Graph: graph with undirected links removed.
|
codesearchnet
|
def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None):
new_image_features = []
feature_lens = []
for image_idx, image_feature in enumerate(image_features):
if image_feature.shape[0] > 1:
base_image_feature = image_feature[0]
image_feature = image_feature[1:]
height = width = self.config.vision_config.image_size
num_patch_height, num_patch_width = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size)
if np.prod(image_feature.shape) % (num_patch_height * num_patch_width * height * width) != 0 and vision_feature_select_strategy == 'default':
logger.warning_once('Image feature shape does not line up with the provided patch size. You may be using the `default` vision_feature_select_strategy with a visual encoder that does not have CLS.')
image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
image_feature = unpad_image(image_feature, image_sizes[image_idx])
if image_newline is not None:
image_feature = torch.cat((image_feature, image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device, image_feature.dtype)), dim=-1)
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
else:
image_feature = image_feature[0]
if image_newline is not None:
image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)
new_image_features.append(image_feature)
feature_lens.append(image_feature.size(0))
feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features[0].device)
return (new_image_features, feature_lens)
|
Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.
Args:
image_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)
List of image feature tensor, each contains all the visual feature of all patches.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
vision_feature_select_strategy (`str`)
The feature selection strategy used to select the vision feature from the vision backbone.
image_newline (`torch.Tensor` of shape `(embed_dim)`)
New line embedding vector.
Returns:
image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)
feature_lens (`List[int]`)
token length of each image in image_features
|
github-repos
|
def Patch(self, request, global_params=None):
config = self.GetMethodConfig('Patch')
return self._RunMethod(config, request, global_params=global_params)
|
Updates a `BuildTrigger` by its project ID and trigger ID. This API is experimental.
Args:
request: (CloudbuildProjectsTriggersPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BuildTrigger) The response message.
|
github-repos
|
def get_image_and_mask(self, label, positive_only=True, hide_rest=False, num_features=5, min_weight=0.0):
if (label not in self.local_exp):
raise KeyError('Label not in explanation')
segments = self.segments
image = self.image
exp = self.local_exp[label]
mask = np.zeros(segments.shape, segments.dtype)
if hide_rest:
temp = np.zeros(self.image.shape)
else:
temp = self.image.copy()
if positive_only:
fs = [x[0] for x in exp if ((x[1] > 0) and (x[1] > min_weight))][:num_features]
for f in fs:
temp[(segments == f)] = image[(segments == f)].copy()
mask[(segments == f)] = 1
return (temp, mask)
else:
for (f, w) in exp[:num_features]:
if (np.abs(w) < min_weight):
continue
c = (0 if (w < 0) else 1)
mask[(segments == f)] = (1 if (w < 0) else 2)
temp[(segments == f)] = image[(segments == f)].copy()
temp[((segments == f), c)] = np.max(image)
for cp in [0, 1, 2]:
if (c == cp):
continue
return (temp, mask)
|
Init function.
Args:
label: label to explain
positive_only: if True, only take superpixels that contribute to
the prediction of the label. Otherwise, use the top
num_features superpixels, which can be positive or negative
towards the label
hide_rest: if True, make the non-explanation part of the return
image gray
num_features: number of superpixels to include in explanation
min_weight: TODO
Returns:
(image, mask), where image is a 3d numpy array and mask is a 2d
numpy array that can be used with
skimage.segmentation.mark_boundaries
|
codesearchnet
|
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [0] * len(token_ids_0) + [1]
|
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
github-repos
|
def sample_observed_state(self, s: pd.Series) -> Dict:
return {
n[0]: {
i.name: np.random.normal(s[n[0]] * i.mean, i.stdev)
for i in n[1]["indicators"].values()
}
for n in self.nodes(data=True)
}
|
Sample observed state vector. This is the implementation of the
emission function.
Args:
s: Latent state vector.
Returns:
Observed state vector.
|
juraj-google-style
|
def get_mailcap_entry(self, url):
for parser in mime_parsers.parsers:
if parser.pattern.match(url):
try:
modified_url, content_type = parser.get_mimetype(url)
except Exception as e:
_logger.warning('parser %s raised an exception', parser)
_logger.exception(e)
raise exceptions.MailcapEntryNotFound()
if not content_type:
_logger.info('Content type could not be determined')
raise exceptions.MailcapEntryNotFound()
elif content_type == 'text/html':
_logger.info('Content type text/html, deferring to browser')
raise exceptions.MailcapEntryNotFound()
command, entry = mailcap.findmatch(
self._mailcap_dict, content_type, filename=modified_url)
if not entry:
_logger.info('Could not find a valid mailcap entry')
raise exceptions.MailcapEntryNotFound()
return command, entry
raise exceptions.MailcapEntryNotFound()
|
Search through the mime handlers list and attempt to find the
appropriate command to open the provided url with.
Will raise a MailcapEntryNotFound exception if no valid command exists.
Params:
url (text): URL that will be checked
Returns:
command (text): The string of the command that should be executed
in a subprocess to open the resource.
entry (dict): The full mailcap entry for the corresponding command
|
juraj-google-style
|
def export(self, name=None):
with ops.name_scope(name, '%s_Export' % self.name, [self.resource_handle]):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)
exported_values.set_shape(exported_keys.get_shape().concatenate(self._value_shape))
return (exported_keys, exported_values)
|
Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
|
github-repos
|
def Logger(name, debug=False, facility=None):
logger = logging.getLogger(name)
logger.handlers = []
logger.addHandler(logging.NullHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(name + ': %(levelname)s %(message)s')
if debug:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
if facility:
syslog_handler = logging.handlers.SysLogHandler(
address=constants.SYSLOG_SOCKET, facility=facility)
syslog_handler.setLevel(logging.INFO)
syslog_handler.setFormatter(formatter)
logger.addHandler(syslog_handler)
return logger
|
Get a logging object with handlers for sending logs to SysLog.
Args:
name: string, the name of the logger which will be added to log entries.
debug: bool, True if debug output should write to the console.
facility: int, an encoding of the SysLog handler's facility and priority.
Returns:
logging object, an object for logging entries.
|
juraj-google-style
|
def strip_hidden(key_tuples, visibilities):
result = []
for key_tuple in key_tuples:
if len(key_tuple) != len(visibilities):
raise ValueError(
"length of key tuple {} is not equal to length of visibilities {}".format(
key_tuple, visibilities
)
)
filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)
result.append(filtered_tuple)
return result
|
Filter each tuple according to visibility.
Args:
key_tuples: A sequence of tuples of equal length (i.e. rectangular)
visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.
Returns:
A sequence equal in length to key_tuples where the items are tuples with a length corresponding
to the number of items in visibility which are True.
|
juraj-google-style
|
def find_paths_referenced(self) -> Collection[str]:
_, paths = self._find_paths_referenced()
return set(paths)
|
Finds paths for any elements referenced in this expression.
For example, given the expression 'a.b.where(c > d.e).f' returns paths
{'a', 'a.b', 'a.b.c', 'a.b.d', 'a.b.d.e', 'a.b.f'}
Returns:
A collections of paths referenced in the expression.
|
github-repos
|
def debug_watch_keys(self, node_name, device_name=None):
try:
device_name = self._infer_device_name(device_name, node_name)
except ValueError:
return []
if node_name not in self._debug_watches[device_name]:
return []
watch_keys = []
for watched_slot in self._debug_watches[device_name][node_name]:
debug_ops = self._debug_watches[device_name][node_name][watched_slot]
for debug_op in debug_ops:
watch_keys.append(_get_tensor_watch_key(node_name, watched_slot, debug_op))
return watch_keys
|
Get all tensor watch keys of given node according to partition graphs.
Args:
node_name: (`str`) name of the node.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all debug tensor watch keys. Returns an empty list if
the node name does not correspond to any debug watch keys.
Raises:
`LookupError`: If debug watch information has not been loaded from
partition graphs yet.
|
github-repos
|
def _log_score(score):
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
|
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
|
juraj-google-style
|
def instantiate(self, substitutions):
param_dict = self.substitute_params(substitutions)
pkg, ident = self.name.rsplit(".", 1)
pkg = "malcolm.modules.%s" % pkg
try:
ob = importlib.import_module(pkg)
except ImportError as e:
raise_with_traceback(
ImportError("\n%s:%d:\n%s" % (
self.filename, self.lineno, e)))
try:
ob = getattr(ob, ident)
except AttributeError:
raise_with_traceback(
ImportError("\n%s:%d:\nPackage %r has no ident %r" % (
self.filename, self.lineno, pkg, ident)))
try:
model = MethodModel.from_callable(ob, returns=False)
args = model.validate(param_dict)
ret = ob(**args)
except Exception as e:
sourcefile = inspect.getsourcefile(ob)
lineno = inspect.getsourcelines(ob)[1]
raise_with_traceback(
YamlError("\n%s:%d:\n%s:%d:\n%s" % (
self.filename, self.lineno, sourcefile, lineno, e)))
else:
return ret
|
Keep recursing down from base using dotted name, then call it with
self.params and args
Args:
substitutions (dict): Substitutions to make to self.param_dict
Returns:
The found object called with (*args, map_from_d)
E.g. if ob is malcolm.parts, and name is "ca.CADoublePart", then the
object will be malcolm.parts.ca.CADoublePart
|
juraj-google-style
|
def nonzero_monies(self):
return [copy.copy(m) for m in self._money_obs if (m.amount != 0)]
|
Get a list of the underlying ``Money`` instances that are not zero
Returns:
([Money]): A list of zero or more money instances. Currencies will be unique.
|
codesearchnet
|
def has_chosen(state, correct, msgs):
if not issubclass(type(correct), int):
raise InstructorError(
"Inside `has_chosen()`, the argument `correct` should be an integer."
)
student_process = state.student_process
if not isDefinedInProcess(MC_VAR_NAME, student_process):
raise InstructorError("Option not available in the student process")
else:
selected_option = getOptionFromProcess(student_process, MC_VAR_NAME)
if not issubclass(type(selected_option), int):
raise InstructorError("selected_option should be an integer")
if selected_option < 1 or correct < 1:
raise InstructorError(
"selected_option and correct should be greater than zero"
)
if selected_option > len(msgs) or correct > len(msgs):
raise InstructorError("there are not enough feedback messages defined")
feedback_msg = msgs[selected_option - 1]
state.reporter.success_msg = msgs[correct - 1]
state.do_test(EqualTest(selected_option, correct, feedback_msg))
|
Test multiple choice exercise.
Test for a MultipleChoiceExercise. The correct answer (as an integer) and feedback messages
are passed to this function.
Args:
correct (int): the index of the correct answer (should be an instruction). Starts at 1.
msgs (list(str)): a list containing all feedback messages belonging to each choice of the
student. The list should have the same length as the number of options.
|
juraj-google-style
|
def _initialize_splittable_and_unsplittable_dims(self, default_splittability, exception_dims_iterable=None):
default_dims = set()
exception_dims = set()
if exception_dims_iterable:
exception_dims.update(exception_dims_iterable)
for t in itertools.chain(self.inputs, self.outputs):
for dim_name in t.shape.dimension_names:
if (dim_name not in exception_dims):
default_dims.add(dim_name)
if (default_splittability == 'splittable'):
return (frozenset(default_dims), frozenset(exception_dims))
elif (default_splittability == 'unsplittable'):
return (frozenset(exception_dims), frozenset(default_dims))
else:
raise ValueError('default_splittability should be either "splittable" or "unsplittable" but was {}'.format(default_splittability))
|
Initializer for splittable_dims and unsplittable_dims.
Helper method to categorize all dimensions in the input/output tensors as
either splittable or unsplittable.
Args:
default_splittability: a string which is either "splittable" or
"unsplittable".
exception_dims_iterable: an optional iterable of names of dimensions
which are exceptions to the default splittability.
Returns:
splittable_dims and unsplittable_dims, two frozensets of names of
dimensions (strings)
Raises:
ValueError: default_splittability is not one of "splittable" or
"unsplittable".
|
codesearchnet
|
def _source_file_paths_outside_tensorflow_py_library(code_defs, id_to_string):
file_ids = set()
for code_def in code_defs:
for trace in code_def.traces:
file_ids.add(trace.file_id)
non_tf_files = (id_to_string[file_id] for file_id in file_ids)
non_tf_files = (f for f in non_tf_files if not source_utils.guess_is_tensorflow_py_library(f) and gfile.Exists(f))
return non_tf_files
|
Extract source file paths outside TensorFlow Python library.
Args:
code_defs: An iterable of `CodeDef` protos, i.e., an iterable of stack
traces.
id_to_string: A proto map from integer ids to strings.
Returns:
An iterable of source file paths outside the TensorFlow Python library.
|
github-repos
|
def ParseAccountInformation(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
display_name = self._GetRowValue(query_hash, row, 'given_displayname')
fullname = self._GetRowValue(query_hash, row, 'fullname')
username = '{0!s} <{1!s}>'.format(fullname, display_name)
event_data = SkypeAccountEventData()
event_data.country = self._GetRowValue(query_hash, row, 'country')
event_data.display_name = display_name
event_data.email = self._GetRowValue(query_hash, row, 'emails')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.username = username
timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Authenticate Request')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Online')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Mood Event')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Used')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses account information.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row with account information.
|
juraj-google-style
|
def __delitem__(self, keyword):
status = False
if keyword:
if not self.case_sensitive:
keyword = keyword.lower()
current_dict = self.keyword_trie_dict
character_trie_list = []
for letter in keyword:
if letter in current_dict:
character_trie_list.append((letter, current_dict))
current_dict = current_dict[letter]
else:
current_dict = None
break
if current_dict and self._keyword in current_dict:
character_trie_list.append((self._keyword, current_dict))
character_trie_list.reverse()
for key_to_remove, dict_pointer in character_trie_list:
if len(dict_pointer.keys()) == 1:
dict_pointer.pop(key_to_remove)
else:
dict_pointer.pop(key_to_remove)
break
status = True
self._terms_in_trie -= 1
return status
|
To remove keyword from the dictionary
pass the keyword and the clean name it maps to.
Args:
keyword : string
keyword that you want to remove if it's present
Examples:
>>> keyword_processor.add_keyword('Big Apple')
>>> del keyword_processor['Big Apple']
|
juraj-google-style
|
class Poisson(reduction_metrics.MeanMetricWrapper):
def __init__(self, name='poisson', dtype=None):
super().__init__(fn=poisson, name=name, dtype=dtype)
def get_config(self):
return {'name': self.name, 'dtype': self.dtype}
|
Computes the Poisson metric between `y_true` and `y_pred`.
Formula:
```python
metric = y_pred - y_true * log(y_pred)
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.Poisson()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.49999997
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.99999994
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[keras.metrics.Poisson()])
```
|
github-repos
|
def imshow(img, win_name='', wait_time=0):
cv2.imshow(win_name, imread(img))
cv2.waitKey(wait_time)
|
Show an image.
Args:
img (str or ndarray): The image to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
|
codesearchnet
|
class CLIPEncoder(nn.Module):
def __init__(self, config: CLIPConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
|
github-repos
|
def _load_config(self, client_secrets_file, client_id, client_secret):
if (client_id and client_secret):
(self.client_id, self.client_secret) = (client_id, client_secret)
return
if client_secrets_file:
self._load_client_secrets(client_secrets_file)
return
if ('GOOGLE_OAUTH2_CLIENT_SECRETS_FILE' in self.app.config):
self._load_client_secrets(self.app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'])
return
try:
(self.client_id, self.client_secret) = (self.app.config['GOOGLE_OAUTH2_CLIENT_ID'], self.app.config['GOOGLE_OAUTH2_CLIENT_SECRET'])
except KeyError:
raise ValueError('OAuth2 configuration could not be found. Either specify the client_secrets_file or client_id and client_secret or set the app configuration variables GOOGLE_OAUTH2_CLIENT_SECRETS_FILE or GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET.')
|
Loads oauth2 configuration in order of priority.
Priority:
1. Config passed to the constructor or init_app.
2. Config passed via the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE app
config.
3. Config passed via the GOOGLE_OAUTH2_CLIENT_ID and
GOOGLE_OAUTH2_CLIENT_SECRET app config.
Raises:
ValueError if no config could be found.
|
codesearchnet
|
def ClaimNotificationsForCollection(cls, token=None, start_time=None, lease_time=200, collection=None):
class CollectionFilter(object):
def __init__(self, collection):
self.collection = collection
def FilterRecord(self, notification):
if (self.collection is None):
self.collection = notification.result_collection_urn
return (self.collection != notification.result_collection_urn)
f = CollectionFilter(collection)
results = []
with aff4.FACTORY.OpenWithLock(RESULT_NOTIFICATION_QUEUE, aff4_type=HuntResultQueue, lease_time=300, blocking=True, blocking_sleep_interval=15, blocking_lock_timeout=600, token=token) as queue:
for record in queue.ClaimRecords(record_filter=f.FilterRecord, start_time=start_time, timeout=lease_time, limit=100000):
results.append(record)
return (f.collection, results)
|
Return unclaimed hunt result notifications for collection.
Args:
token: The security token to perform database operations with.
start_time: If set, an RDFDateTime indicating at what point to start
claiming notifications. Only notifications with a timestamp after this
point will be claimed.
lease_time: How long to claim the notifications for.
collection: The urn of the collection to find notifications for. If unset,
the earliest (unclaimed) notification will determine the collection.
Returns:
A pair (collection, results) where collection is the collection
that notifications were retrieved for and results is a list of
Record objects which identify GrrMessage within the result
collection.
|
codesearchnet
|
def create_win_salt_restart_task():
cmd = 'cmd'
args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start salt-minion'
return __salt__['task.create_task'](name='restart-salt-minion', user_name='System', force=True, action_type='Execute', cmd=cmd, arguments=args, trigger_type='Once', start_date='1975-01-01', start_time='01:00')
|
Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task()
|
codesearchnet
|
def init(dvc_dir):
config_file = os.path.join(dvc_dir, Config.CONFIG)
open(config_file, 'w+').close()
return Config(dvc_dir)
|
Initializes dvc config.
Args:
dvc_dir (str): path to .dvc directory.
Returns:
dvc.config.Config: config object.
|
codesearchnet
|
def build_nccl_all_reduce(input_tensors, red_op, un_op=None):
if red_op == math_ops.add:
output_tensors = nccl_ops.all_sum(input_tensors)
else:
raise ValueError('red_op not supported by NCCL all-reduce: ', red_op)
if un_op:
un_op_wrapped = []
for t in output_tensors:
with ops.colocate_with(t):
un_op_wrapped.append(un_op(t))
output_tensors = un_op_wrapped
return output_tensors
|
Build a subgraph that does one full all-reduce, using NCCL.
Args:
input_tensors: list of `tf.Tensor` of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator. Must be one of
{tf.add}
un_op: optional unary elementwise Op to apply to fully-reduce values.
Returns:
list of `tf.Tensor` of reduced values.
Raises:
ValueError: red_op not supported.
|
github-repos
|
class PatchTSMixerGatedAttention(nn.Module):
def __init__(self, in_size: int, out_size: int):
super().__init__()
self.attn_layer = nn.Linear(in_size, out_size)
self.attn_softmax = nn.Softmax(dim=-1)
def forward(self, inputs):
attn_weight = self.attn_softmax(self.attn_layer(inputs))
inputs = inputs * attn_weight
return inputs
|
Module that applies gated attention to input data.
Args:
in_size (`int`): The input size.
out_size (`int`): The output size.
|
github-repos
|
def validate_format(self, **kwargs):
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
|
Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
|
juraj-google-style
|
def get_project_details(self, project_id):
if (not is_valid_uuid(project_id)):
raise StorageArgumentException('Invalid UUID for project_id: {0}'.format(project_id))
return self._authenticated_request.to_endpoint('project/{}/'.format(project_id)).return_body().get()
|
Get information on a given project
Args:
project_id (str): The UUID of the requested project.
Returns:
A dictionary describing the project::
{
u'collab_id': 2271,
u'created_by': u'303447',
u'created_on': u'2017-03-10T12:50:06.077891Z',
u'description': u'',
u'entity_type': u'project',
u'modified_by': u'303447',
u'modified_on': u'2017-03-10T12:50:06.077946Z',
u'name': u'2271',
u'uuid': u'3abd8742-d069-44cf-a66b-2370df74a682'
}
Raises:
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
codesearchnet
|
def _resource_apply_dense(self, grad, handle):
raise NotImplementedError()
|
Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
Returns:
An `Operation` which updates the value of the variable.
|
github-repos
|
def add_output(self, output):
if not isinstance(output, Output):
raise TypeError('`output` must be an Output instance or None')
self.outputs.append(output)
|
Adds an output to a Transaction's list of outputs.
Args:
output (:class:`~bigchaindb.common.transaction.
Output`): An Output to be added to the
Transaction.
|
juraj-google-style
|
def is_bit_mask(enumeration, potential_mask):
if (not isinstance(potential_mask, six.integer_types)):
return False
mask_enumerations = (CryptographicUsageMask, ProtectionStorageMask, StorageStatusMask)
if (enumeration not in mask_enumerations):
return False
mask = 0
for value in [e.value for e in enumeration]:
if ((value & potential_mask) == value):
mask |= value
if (mask != potential_mask):
return False
return True
|
A utility function that checks if the provided value is a composite bit
mask of enumeration values in the specified enumeration class.
Args:
enumeration (class): One of the mask enumeration classes found in this
file. These include:
* Cryptographic Usage Mask
* Protection Storage Mask
* Storage Status Mask
potential_mask (int): A potential bit mask composed of enumeration
values belonging to the enumeration class.
Returns:
True: if the potential mask is a valid bit mask of the mask enumeration
False: otherwise
|
codesearchnet
|
def filter(self, field_name, operand, value):
if (operand not in self._FILTER_OPERANDS):
raise ValueError('Operand must be one of {}'.format(', '.join(self._FILTER_OPERANDS)))
record_stub = record_factory(self._app)
field = record_stub.get_field(field_name)
self._raw['filters'].append({'fieldId': field.id, 'filterType': operand, 'value': field.get_report(value)})
|
Adds a filter to report
Notes:
All filters are currently AND'ed together
Args:
field_name (str): Target field name to filter on
operand (str): Operand used in comparison. See `swimlane.core.search` for options
value: Target value used in comparision
|
codesearchnet
|
def CreateDataTypeMap(self, definition_name):
data_type_definition = self._definitions_registry.GetDefinitionByName(definition_name)
if (not data_type_definition):
return None
return DataTypeMapFactory.CreateDataTypeMapByType(data_type_definition)
|
Creates a specific data type map by name.
Args:
definition_name (str): name of the data type definition.
Returns:
DataTypeMap: data type map or None if the date type definition
is not available.
|
codesearchnet
|
def lex_index(n, k, lst):
if len(lst) != k:
raise VisualizationError("list should have length k")
comb = list(map(lambda x: n - 1 - x, lst))
dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])
return int(dualm)
|
Return the lex index of a combination..
Args:
n (int): the total number of options .
k (int): The number of elements.
lst (list): list
Returns:
int: returns int index for lex order
Raises:
VisualizationError: if length of list is not equal to k
|
juraj-google-style
|
def get_site_orbital_dos(self, site, orbital):
return Dos(self.efermi, self.energies, self.pdos[site][orbital])
|
Get the Dos for a particular orbital of a particular site.
Args:
site: Site in Structure associated with CompleteDos.
orbital: Orbital in the site.
Returns:
Dos containing densities for orbital of site.
|
juraj-google-style
|
def _init_profile_batch(self, profile_batch):
profile_batch_error_message = 'profile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: {}'.format(profile_batch)
if isinstance(profile_batch, str):
profile_batch = str(profile_batch).split(',')
profile_batch = nest.map_structure(int, profile_batch)
if isinstance(profile_batch, int):
self._start_batch = profile_batch
self._stop_batch = profile_batch
elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2:
self._start_batch, self._stop_batch = profile_batch
else:
raise ValueError(profile_batch_error_message)
if self._start_batch < 0 or self._stop_batch < self._start_batch:
raise ValueError(profile_batch_error_message)
self._profiler_started = False
if self._start_batch > 0:
self._start_profiler(logdir='')
self._stop_profiler(save=False)
self._is_tracing = False
self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0)
|
Validate profile_batch value and set the range of batches to profile.
Sets values of _start_batch and _stop_batch attributes,
specifying the start and stop batch to profile.
Setting `profile_batch=0` disables profiling.
Args:
profile_batch: The range of batches to profile. Should be a non-negative
integer or a comma separated string of pair of positive integers. A pair
of positive integers signify a range of batches to profile.
Raises:
ValueError: If profile_batch is not an integer or a comma separated pair
of positive integers.
|
github-repos
|
def get_creator_by_name(name):
return {'docker(container)': Container.creator,
'shell': Bash.creator, 'docker(image)': Image.creator,
'python': Script.creator, 'packer': Packer.creator,
'ansible(simple)': Ansible.creator}[name]
|
Get creator function by name.
Args:
name (str): name of the creator function.
Returns:
function: creater function.
|
juraj-google-style
|
def email_address(self, address, owner=None, **kwargs):
return EmailAddress(self.tcex, address, owner=owner, **kwargs)
|
Create the Email Address TI object.
Args:
owner:
address:
**kwargs:
Return:
|
juraj-google-style
|
def camel_to_snake(name):
s1 = re.sub('(.)([A-Z][a-z]+)', '\\1_\\2', name)
return re.sub('([a-z0-9])([A-Z])', '\\1_\\2', s1).lower()
|
Converts CamelCase to snake_case.
Args:
name (string): The name to convert from CamelCase to snake_case.
Returns:
string: Converted string.
|
codesearchnet
|
def __edit_distance_alt(self, words):
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
|
Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words
|
codesearchnet
|
def get_audio_features(self, input_features: torch.FloatTensor, feature_attention_mask: Optional[torch.LongTensor]=None, audio_feature_lengths: Optional[torch.LongTensor]=None):
if feature_attention_mask is not None:
audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
input_features = input_features.permute(0, 2, 1)[feature_attention_mask.bool()].permute(1, 0)
else:
audio_feature_lengths = None
audio_feat_lengths, audio_output_lengths = self.audio_tower._get_feat_extract_output_lengths(audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1))
feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1)
audio_outputs = self.audio_tower(input_features, feature_lens=feature_lens, aftercnn_lens=audio_feat_lengths)
audio_features = audio_outputs.last_hidden_state
if audio_features.shape[0] != sum(audio_output_lengths.tolist()):
raise ValueError('length of audio_features should match audio_output_lengths')
return audio_features
|
Encodes audios into continuous embeddings that can be forwarded to the language model.
Args:
input_features (`torch.FloatTensor`):
The tensors corresponding to the input audios.
feature_attention_mask (`torch.LongTensor`, *optional*):
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
The length of feature shape of each audio in LLM.
|
github-repos
|
def ReadTimestamp(filename):
if not os.path.exists(filename):
return None
try:
timestamp_file = open(filename, 'r')
timestamp_string = timestamp_file.read().strip()
except IOError as e:
logging.warning('error opening timestamp file: %s', e)
timestamp_string = None
else:
timestamp_file.close()
logging.debug('read timestamp %s from file %r', timestamp_string, filename)
if timestamp_string is not None:
try:
timestamp = time.strptime(timestamp_string + ' UTC', '%Y-%m-%dT%H:%M:%SZ %Z')
except ValueError as e:
logging.error('cannot parse timestamp file %r: %s', filename, e)
timestamp = None
else:
timestamp = None
logging.debug('Timestamp is: %r', timestamp)
now = time.gmtime()
logging.debug(' Now is: %r', now)
if timestamp > now:
logging.warning('timestamp %r (%r) from %r is in the future, now is %r', timestamp_string, time.mktime(timestamp), filename, time.mktime(now))
if time.mktime(timestamp) - time.mktime(now) >= 60 * 60:
logging.info('Resetting timestamp to now.')
timestamp = now
return timestamp
|
Return a timestamp from a file.
The timestamp file format is a single line, containing a string in the
ISO-8601 format YYYY-MM-DDThh:mm:ssZ (i.e. UTC time). We do not support
all ISO-8601 formats for reasons of convenience in the code.
Timestamps internal to nss_cache deliberately do not carry milliseconds.
Args:
filename: A String naming the file to read from.
Returns:
A time.struct_time, or None if the timestamp file doesn't
exist or has errors.
|
github-repos
|
def get_contact(self, response=None, nir=None, handle=None, retry_count=3, dt_format=None):
if (response or (nir == 'krnic')):
contact_response = response
else:
contact_response = self._net.get_http_raw(url=str(NIR_WHOIS[nir]['url']).format(handle), retry_count=retry_count, headers=NIR_WHOIS[nir]['request_headers'], request_type=NIR_WHOIS[nir]['request_type'])
return self.parse_fields(response=contact_response, fields_dict=NIR_WHOIS[nir]['contact_fields'], dt_format=dt_format, hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta']), is_contact=True)
|
The function for retrieving and parsing NIR whois data based on
NIR_WHOIS contact_fields.
Args:
response (:obj:`str`): Optional response object, this bypasses the
lookup.
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
handle (:obj:`str`): For NIRs that have separate contact queries
(JPNIC), this is the contact handle to use in the query.
Defaults to None.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
Returns:
dict: Mapping of the fields provided in contact_fields, to their
parsed results.
|
codesearchnet
|
def rmdir(path, dir_fd=None):
system = get_instance(path)
system.remove(system.ensure_dir_path(path))
|
Remove a directory.
Equivalent to "os.rmdir".
Args:
path (path-like object): Path or URL.
dir_fd: directory descriptors;
see the os.rmdir() description for how it is interpreted.
Not supported on cloud storage objects.
|
codesearchnet
|
def set_viewbox(self, x, y, w, h):
self.attributes['viewBox'] = ('%s %s %s %s' % (x, y, w, h))
self.attributes['preserveAspectRatio'] = 'none'
|
Sets the origin and size of the viewbox, describing a virtual view area.
Args:
x (int): x coordinate of the viewbox origin
y (int): y coordinate of the viewbox origin
w (int): width of the viewbox
h (int): height of the viewbox
|
codesearchnet
|
def to_grid_locator(latitude, longitude, precision='square'):
if (precision not in ('square', 'subsquare', 'extsquare')):
raise ValueError(('Unsupported precision value %r' % precision))
if (not ((- 90) <= latitude <= 90)):
raise ValueError(('Invalid latitude value %r' % latitude))
if (not ((- 180) <= longitude <= 180)):
raise ValueError(('Invalid longitude value %r' % longitude))
latitude += 90.0
longitude += 180.0
locator = []
field = int((longitude / LONGITUDE_FIELD))
locator.append(chr((field + 65)))
longitude -= (field * LONGITUDE_FIELD)
field = int((latitude / LATITUDE_FIELD))
locator.append(chr((field + 65)))
latitude -= (field * LATITUDE_FIELD)
square = int((longitude / LONGITUDE_SQUARE))
locator.append(str(square))
longitude -= (square * LONGITUDE_SQUARE)
square = int((latitude / LATITUDE_SQUARE))
locator.append(str(square))
latitude -= (square * LATITUDE_SQUARE)
if (precision in ('subsquare', 'extsquare')):
subsquare = int((longitude / LONGITUDE_SUBSQUARE))
locator.append(chr((subsquare + 97)))
longitude -= (subsquare * LONGITUDE_SUBSQUARE)
subsquare = int((latitude / LATITUDE_SUBSQUARE))
locator.append(chr((subsquare + 97)))
latitude -= (subsquare * LATITUDE_SUBSQUARE)
if (precision == 'extsquare'):
extsquare = int((longitude / LONGITUDE_EXTSQUARE))
locator.append(str(extsquare))
extsquare = int((latitude / LATITUDE_EXTSQUARE))
locator.append(str(extsquare))
return ''.join(locator)
|
Calculate Maidenhead locator from latitude and longitude.
Args:
latitude (float): Position's latitude
longitude (float): Position's longitude
precision (str): Precision with which generate locator string
Returns:
str: Maidenhead locator for latitude and longitude
Raise:
ValueError: Invalid precision identifier
ValueError: Invalid latitude or longitude value
|
codesearchnet
|
def update_user_attributes(self, user, claims):
required_fields = [field.name for field in user._meta.fields if field.blank is False]
for field, claim in settings.CLAIM_MAPPING.items():
if hasattr(user, field):
if claim in claims:
setattr(user, field, claims[claim])
logger.debug("Attribute '{}' for user '{}' was set to '{}'.".format(field, user, claims[claim]))
else:
if field in required_fields:
msg = "Claim not found in access token: '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(claim))
else:
msg = "Claim '{}' for user field '{}' was not found in the access token for user '{}'. " \
"Field is not required and will be left empty".format(claim, field, user)
logger.warning(msg)
else:
msg = "User model has no field named '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(field))
|
Updates user attributes based on the CLAIM_MAPPING setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): claims from the access token
|
juraj-google-style
|
def is_extension_type(tensor):
return isinstance(tensor, composite_tensor.CompositeTensor)
|
Returns whether a tensor is of an ExtensionType.
github.com/tensorflow/community/pull/269
Currently it works by checking if `tensor` is a `CompositeTensor` instance,
but this will be changed to use an appropriate extensiontype protocol
check once ExtensionType is made public.
Args:
tensor: An object to test
Returns:
True if the tensor is an extension type object, false if not.
|
github-repos
|
def iaf_hparams(hidden_size=512, filter_size=4096):
hparams = common_hparams.basic_params1()
hparams.hidden_size = hidden_size
hparams.add_hparam("attention_key_channels", None)
hparams.add_hparam("attention_value_channels", None)
hparams.add_hparam("num_heads", 4)
hparams.add_hparam("attention_dropout", 0.1)
hparams.add_hparam("shared_rel", False)
hparams.add_hparam("block_width", 1)
hparams.add_hparam("block_length", 1)
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.layer_preprocess_sequence = "n"
hparams.layer_prepostprocess_dropout = 0.1
hparams.norm_type = "layer"
hparams.norm_epsilon = 1e-06
hparams.layer_prepostprocess_dropout_broadcast_dims = ""
hparams.layer_postprocess_sequence = "da"
hparams.add_hparam("filter_size", filter_size)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("relu_dropout", 0.1)
return hparams
|
Create hyperpameters for inverse autoregressive flows.
Args:
hidden_size: Width of attention layers and neural network output layer.
filter_size: Hidden layer width for neural network.
Returns:
hparams: Hyperpameters with basic presets for inverse autoregressive flows.
|
juraj-google-style
|
def setup_logging(verbosity, formats=None):
if formats is None:
formats = {}
log_level = logging.INFO
log_format = formats.get("info", INFO_FORMAT)
if sys.stdout.isatty():
log_format = formats.get("color", COLOR_FORMAT)
if verbosity > 0:
log_level = logging.DEBUG
log_format = formats.get("debug", DEBUG_FORMAT)
if verbosity < 2:
logging.getLogger("botocore").setLevel(logging.CRITICAL)
hdlr = logging.StreamHandler()
hdlr.setFormatter(ColorFormatter(log_format, ISO_8601))
logging.root.addHandler(hdlr)
logging.root.setLevel(log_level)
|
Configure a proper logger based on verbosity and optional log formats.
Args:
verbosity (int): 0, 1, 2
formats (dict): Optional, looks for `info`, `color`, and `debug` keys
which may override the associated default log formats.
|
juraj-google-style
|
def metadata_extractor(self):
if (not hasattr(self, '_local_file')):
raise AttributeError('local_file attribute must be set before calling metadata_extractor')
if (not hasattr(self, '_metadata_extractor')):
if self.local_file.endswith('.whl'):
logger.info('Getting metadata from wheel using WheelMetadataExtractor.')
extractor_cls = metadata_extractors.WheelMetadataExtractor
else:
logger.info('Getting metadata from setup.py using SetupPyMetadataExtractor.')
extractor_cls = metadata_extractors.SetupPyMetadataExtractor
base_python_version = (self.base_python_version or self.template_base_py_ver)
self._metadata_extractor = extractor_cls(self.local_file, self.name, self.name_convertor, self.version, self.rpm_name, self.venv, base_python_version)
return self._metadata_extractor
|
Returns an instance of proper MetadataExtractor subclass.
Always returns the same instance.
Returns:
The proper MetadataExtractor subclass according to local file
suffix.
|
codesearchnet
|
def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None):
import shlex
if isinstance(command, (list, tuple)):
raise ValueError('command tuple not supported yet')
args = shlex.split(command, posix=(not WIN32))
if (verbose is True):
verbose = 2
if (verbout is None):
verbout = (verbose >= 1)
if (verbose >= 2):
print('+=== START CMD2 ===')
print('Command:')
print(command)
if verbout:
print('----')
print('Stdout:')
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell, universal_newlines=True)
if detatch:
info = {'proc': proc}
else:
write_fn = sys.stdout.write
flush_fn = sys.stdout.flush
logged_out = []
for line in _run_process(proc):
line_ = (line if six.PY2 else line)
if (len(line_) > 0):
if verbout:
write_fn(line_)
flush_fn()
logged_out.append(line)
try:
from utool import util_str
out = ''.join(logged_out)
except UnicodeDecodeError:
from utool import util_str
logged_out = util_str.ensure_unicode_strlist(logged_out)
out = ''.join(logged_out)
(out_, err) = proc.communicate()
ret = proc.wait()
info = {'out': out, 'err': err, 'ret': ret}
if (verbose >= 2):
print('L___ END CMD2 ___')
return info
|
Trying to clean up cmd
Args:
command (str): string command
shell (bool): if True, process is run in shell
detatch (bool): if True, process is run in background
verbose (int): verbosity mode
verbout (bool): if True, `command` writes to stdout in realtime.
defaults to True iff verbose > 0
Returns:
dict: info - information about command status
|
codesearchnet
|
def GetVSSStoreIdentifiers(self, volume_system, volume_identifiers):
print_header = True
while True:
if print_header:
self._PrintVSSStoreIdentifiersOverview(
volume_system, volume_identifiers)
print_header = False
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(self._USER_PROMPT_VSS)
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\nVSS identifier(s): ')
try:
selected_volumes = self._ReadSelectedVolumes(
volume_system, prefix='vss')
if (not selected_volumes or
not set(selected_volumes).difference(volume_identifiers)):
break
except ValueError:
pass
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(
'Unsupported VSS identifier(s), please try again or abort with '
'Ctrl^C.')
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\n')
return selected_volumes
|
Retrieves VSS store identifiers.
This method can be used to prompt the user to provide VSS store identifiers.
Args:
volume_system (VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers including prefix.
Returns:
list[str]: selected volume identifiers including prefix or None.
|
juraj-google-style
|
def gen_public_api(output_dir: str, output_package: str, root_init_template: str, api_version: int, compat_api_versions: Sequence[int], compat_init_templates: Sequence[str], use_lazy_loading: bool, file_prefixes_to_strip: Sequence[str], mapping_files: Sequence[str], packages_to_ignore: Sequence[str], module_prefix: str, root_file_name: str, output_files: Set[str]):
public_api = get_public_api(mapping_files, file_prefixes_to_strip, packages_to_ignore, output_package, module_prefix)
root_entrypoints_by_module = public_api.v2_entrypoints_by_module
root_generated_imports_by_module = public_api.v2_generated_imports_by_module
if api_version == 1:
root_entrypoints_by_module = public_api.v1_entrypoints_by_module
root_generated_imports_by_module = public_api.v1_generated_imports_by_module
for compat_version in compat_api_versions:
compat_package = f'{output_package}.compat'
compat_version_package = f'{compat_package}.v{compat_version}'
public_api.v2_generated_imports_by_module[compat_package].add(compat_version_package)
public_api.v1_generated_imports_by_module[compat_package].add(compat_version_package)
_gen_init_files(output_dir, output_package, api_version, root_entrypoints_by_module, root_generated_imports_by_module, public_api.docs_by_module, root_init_template, file_prefixes_to_strip, use_lazy_loading, module_prefix, output_files, root_file_name=root_file_name)
for compat_index, compat_version in enumerate(compat_api_versions):
compat_output_dir = os.path.join(output_dir, 'compat', f'v{compat_version}')
os.makedirs(compat_output_dir, exist_ok=True)
compat_version = int(compat_version)
compat_entrypoints_by_module = public_api.v2_entrypoints_by_module
compat_generated_imports_by_module = public_api.v2_generated_imports_by_module
if compat_version == 1:
compat_entrypoints_by_module = public_api.v1_entrypoints_by_module
compat_generated_imports_by_module = public_api.v1_generated_imports_by_module
_gen_init_files(compat_output_dir, output_package, compat_version, compat_entrypoints_by_module, compat_generated_imports_by_module, public_api.docs_by_module, compat_init_templates[compat_index] if compat_init_templates else '', file_prefixes_to_strip, use_lazy_loading, module_prefix, output_files, subpackage_rewrite=f'{output_package}.compat.v{compat_version}')
for nested_compat_index, nested_compat_version in enumerate(compat_api_versions):
nested_compat_version = int(nested_compat_version)
nested_compat_output_dir = os.path.join(compat_output_dir, 'compat', f'v{nested_compat_version}')
nested_compat_entrypoints_by_module = public_api.v2_entrypoints_by_module
nested_compat_generated_imports_by_module = public_api.v2_generated_imports_by_module
if nested_compat_version == 1:
nested_compat_entrypoints_by_module = public_api.v1_entrypoints_by_module
nested_compat_generated_imports_by_module = public_api.v1_generated_imports_by_module
os.makedirs(nested_compat_output_dir, exist_ok=True)
gen_nested_compat_files(nested_compat_output_dir, output_package, nested_compat_version, nested_compat_entrypoints_by_module, nested_compat_generated_imports_by_module, public_api.docs_by_module, compat_init_templates[nested_compat_index] if compat_init_templates else '', file_prefixes_to_strip, use_lazy_loading, compat_api_versions, module_prefix, output_files)
|
Generates the public API for tensorflow.
Args:
output_dir: The directory to output the files to.
output_package: The package to use for the imports.
root_init_template: The template for the root init file.
api_version: The version of the API to generate.
compat_api_versions: The versions of the compat APIs to generate.
compat_init_templates: The templates for the compat init files.
use_lazy_loading: Whether to use lazy loading or not.
file_prefixes_to_strip: The prefixes to strip from the file names of the
imports.
mapping_files: The mapping files created by the API Extractor.
packages_to_ignore: A list of python packages that should be ignored when
searching for tf_exports.
module_prefix: A prefix to add to the non-generated imports.
root_file_name: The file name that should be generated for the top level
API.
output_files: List of files expected to generate.
|
github-repos
|
def get_info(ads):
infos = []
for ad in ads:
device_info = ad.device_info
user_added_info = {k: str(v) for k, v in device_info['user_added_info'].items()}
device_info['user_added_info'] = user_added_info
infos.append(device_info)
return infos
|
Get information on a list of AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects.
Returns:
A list of dict, each representing info for an AndroidDevice objects.
Everything in this dict should be yaml serializable.
|
github-repos
|
def spawn_program(self, name, arguments=[], timeout=30, exclusive=False):
logger.debug('Spawning program for interaction ...')
if exclusive:
kill_longrunning(self.config)
return RunningProgram(self, name, arguments, timeout)
|
Spawns a program in the working directory.
This method allows the interaction with the running program,
based on the returned RunningProgram object.
Args:
name (str): The name of the program to be executed.
arguments (tuple): Command-line arguments for the program.
timeout (int): The timeout for execution.
exclusive (bool): Prevent parallel validation runs on the
test machines, e.g. when doing performance
measurements for submitted code.
Returns:
RunningProgram: An object representing the running program.
|
codesearchnet
|
def IsWalletTransaction(self, tx):
for (key, contract) in self._contracts.items():
for output in tx.outputs:
if (output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes()):
return True
for script in tx.scripts:
if script.VerificationScript:
if (bytes(contract.Script) == script.VerificationScript):
return True
for watch_script_hash in self._watch_only:
for output in tx.outputs:
if (output.ScriptHash == watch_script_hash):
return True
for script in tx.scripts:
if (Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash):
return True
return False
|
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
|
codesearchnet
|
def testSaveAndLoadSingleVariable(self, shard_config):
strategy = self._create_strategy(shard_config[0])
with strategy.scope():
var = variables_lib.Variable([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
model_dir = self.get_temp_dir()
save.save(var, model_dir)
strategy2 = self._create_strategy(shard_config[1])
with strategy2.scope():
loaded = load.load(model_dir)
if shard_config[1] > 1:
loaded = array_ops.concat(loaded.variables, axis=0)
self.assertLen(loaded.numpy(), 6)
if shard_config[0] > 1:
var = array_ops.concat(var.variables, axis=0)
self.assertAllClose(var.numpy(), loaded.numpy())
|
Test saving and loading ShardedVariable with different numbers of shards.
Loading tf.Variables into multiple Shards is not yet supported
Args:
shard_config: The number of shards to use before and after loading. For
example, [2, 1] means to create and save the variable with 2 shards and
load it into 1 shard (i.e., a regular tf.Variable).
|
github-repos
|
def _buffer_incomplete_responses(raw_output, buf):
if raw_output:
if buf:
raw_output = b''.join([buf, raw_output])
buf = None
if (b'\n' not in raw_output):
buf = raw_output
raw_output = None
elif (not raw_output.endswith(b'\n')):
remainder_offset = (raw_output.rindex(b'\n') + 1)
buf = raw_output[remainder_offset:]
raw_output = raw_output[:remainder_offset]
return (raw_output, buf)
|
It is possible for some of gdb's output to be read before it completely finished its response.
In that case, a partial mi response was read, which cannot be parsed into structured data.
We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's
output if the output did not end in a newline.
Args:
raw_output: Contents of the gdb mi output
buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to
gdb's next output.
Returns:
(raw_output, buf)
|
codesearchnet
|
def activate_vacation(self, endtime: datetime, temperature: float):
data = {
"endtime": endtime.strftime("%Y_%m_%d %H:%M"),
"temperature": temperature,
}
return self._restCall("home/heating/activateVacation", json.dumps(data))
|
activates the vatation mode until the given time
Args:
endtime(datetime): the time when the vatation mode should automatically be disabled
temperature(float): the settemperature during the vacation mode
|
juraj-google-style
|
def Key(self):
return getattr(self, self._KEY)
|
Return unique identifier for this MapEntry object.
Returns:
A str which contains the name of the attribute to be used as an index
value for a maps.MapEntry instance in a maps.Map.
|
github-repos
|
def WriteBlobsWithUnknownHashes(self, blobs_data):
blobs_ids = [rdf_objects.BlobID.FromBlobData(d) for d in blobs_data]
self.WriteBlobs(dict(zip(blobs_ids, blobs_data)))
return blobs_ids
|
Calculates hash ids and writes contents of given data blobs.
Args:
blobs_data: An iterable of bytes.
Returns:
A list of rdf_objects.BlobID objects with each blob id corresponding
to an element in the original blobs_data argument.
|
codesearchnet
|
def encode_structure(nested_structure):
return _map_structure(nested_structure, _get_encoders())
|
Encodes nested structures composed of encodable types into a proto.
Args:
nested_structure: Structure to encode.
Returns:
Encoded proto.
Raises:
NotEncodableError: For values for which there are no encoders.
|
github-repos
|
def has_nrows(
state,
incorrect_msg="Your query returned a table with {{n_stu}} row{{'s' if n_stu > 1 else ''}} while it should return a table with {{n_sol}} row{{'s' if n_sol > 1 else ''}}.",
):
has_result(state)
n_stu = len(next(iter(state.student_result.values())))
n_sol = len(next(iter(state.solution_result.values())))
if n_stu != n_sol:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol}
)
state.do_test(_msg)
return state
|
Test whether the student and solution query results have equal numbers of rows.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the number of rows in the student and solution query don't match.
|
juraj-google-style
|
def delete(self, dash_id):
removed_info = dict(time_modified=r_db.zscore(config.DASH_ID_KEY, dash_id), meta=r_db.hget(config.DASH_META_KEY, dash_id), content=r_db.hget(config.DASH_CONTENT_KEY, dash_id))
r_db.zrem(config.DASH_ID_KEY, dash_id)
r_db.hdel(config.DASH_META_KEY, dash_id)
r_db.hdel(config.DASH_CONTENT_KEY, dash_id)
return {'removed_info': removed_info}
|
Delete a dash meta and content, return updated dash content.
Actually, just remove it to a specfied place in database.
Args:
dash_id: dashboard id.
Returns:
Redirect to home page.
|
codesearchnet
|
def insert_arguments_into_sql_query(compilation_result, arguments):
if compilation_result.language != SQL_LANGUAGE:
raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result))
base_query = compilation_result.query
return base_query.params(**arguments)
|
Insert the arguments into the compiled SQL query to form a complete query.
Args:
compilation_result: CompilationResult, compilation result from the GraphQL compiler.
arguments: Dict[str, Any], parameter name -> value, for every parameter the query expects.
Returns:
SQLAlchemy Selectable, a executable SQL query with parameters bound.
|
juraj-google-style
|
def no_company_with_insufficient_companies_house_data(value):
for (prefix, name) in company_types_with_insufficient_companies_house_data:
if value.upper().startswith(prefix):
raise ValidationError(MESSAGE_INSUFFICIENT_DATA, params={'name': name})
|
Confirms that the company number is not for for a company that
Companies House does not hold information on.
Args:
value (string): The company number to check.
Raises:
django.forms.ValidationError
|
codesearchnet
|
def universal_transformer_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None):
def add_vanilla_transformer_layer(x, num_layers, name):
'Passes the input through num_layers of vanilla transformer layers.\n\n Args:\n x: input\n num_layers: number of layers\n name: string, prefix of layer names\n\n Returns:\n output of vanilla_transformer_layer\n '
if hparams.add_position_timing_signal:
x = common_attention.add_timing_signal_1d(x)
for layer in range(num_layers):
with tf.variable_scope((name + ('layer_%d' % layer))):
x = ffn_unit(attention_unit(x))
return x
with tf.variable_scope(('universal_transformer_%s' % hparams.recurrence_type)):
if (hparams.mix_with_transformer and ('before_ut' in hparams.mix_with_transformer)):
x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers, 'before_ut_')
if (hparams.recurrence_type == 'act'):
(output, extra_output) = universal_transformer_act(x, hparams, ffn_unit, attention_unit)
else:
(ut_function, initializer) = get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover)
(output, _, extra_output) = tf.foldl(ut_function, tf.range(hparams.num_rec_steps), initializer=initializer)
if ((hparams.recurrence_type == 'lstm') and hparams.get('use_memory_as_final_state', False)):
output = extra_output
if (hparams.mix_with_transformer and ('after_ut' in hparams.mix_with_transformer)):
output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers, 'after_ut_')
return (output, extra_output)
|
Core function applying the universal transformer layer.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
the output tensor, extra output (can be memory, ponder time, etc.)
Raises:
ValueError: Unknown recurrence type
|
codesearchnet
|
def process_request(self, request, credential=None):
self._client_identity = [None, None]
header = request.request_header
self._set_protocol_version(header.protocol_version)
max_response_size = None
if header.maximum_response_size:
max_response_size = header.maximum_response_size.value
now = int(time.time())
if header.time_stamp:
then = header.time_stamp.value
if ((now >= then) and ((now - then) < 60)):
self._logger.info('Received request at time: {0}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(then))))
elif (now < then):
self._logger.warning('Received request with future timestamp. Received timestamp: {0}, Current timestamp: {1}'.format(then, now))
raise exceptions.InvalidMessage('Future request rejected by server.')
else:
self._logger.warning('Received request with old timestamp. Possible replay attack. Received timestamp: {0}, Current timestamp: {1}'.format(then, now))
raise exceptions.InvalidMessage('Stale request rejected by server.')
else:
self._logger.info('Received request at time: {0}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now))))
self.is_asynchronous = False
if (header.asynchronous_indicator is not None):
self.is_asynchronous = header.asynchronous_indicator.value
if self.is_asynchronous:
raise exceptions.InvalidMessage('Asynchronous operations are not supported.')
if header.authentication:
if header.authentication.credentials:
auth_credentials = header.authentication.credentials[0]
else:
auth_credentials = None
else:
auth_credentials = None
self._verify_credential(auth_credentials, credential)
batch_error_option = enums.BatchErrorContinuationOption.STOP
if (header.batch_error_cont_option is not None):
batch_error_option = header.batch_error_cont_option.value
if (batch_error_option == enums.BatchErrorContinuationOption.UNDO):
raise exceptions.InvalidMessage('Undo option for batch handling is not supported.')
batch_order_option = False
if header.batch_order_option:
batch_order_option = header.batch_order_option.value
response_batch = self._process_batch(request.batch_items, batch_error_option, batch_order_option)
response = self._build_response(header.protocol_version, response_batch)
return (response, max_response_size, header.protocol_version)
|
Process a KMIP request message.
This routine is the main driver of the KmipEngine. It breaks apart and
processes the request header, handles any message errors that may
result, and then passes the set of request batch items on for
processing. This routine is thread-safe, allowing multiple client
connections to use the same KmipEngine.
Args:
request (RequestMessage): The request message containing the batch
items to be processed.
credential (string): Identifying information about the client
obtained from the client certificate. Optional, defaults to
None.
Returns:
ResponseMessage: The response containing all of the results from
the request batch items.
|
codesearchnet
|
def latest_file(path_name, keyword='', ext='', **kwargs) -> str:
files = all_files(
path_name=path_name, keyword=keyword, ext=ext, full_path=True
)
if not files:
from xbbg.io import logs
logger = logs.get_logger(latest_file, level=kwargs.pop('log', 'warning'))
logger.debug(f'file is not found in folder: {path_name}')
return ''
modified_time = [os.path.getmtime(f) for f in files]
files = [f for (dt, f) in sorted(zip(modified_time, files))]
return files[-1]
|
Latest modified file in folder
Args:
path_name: full path name
keyword: keyword to search
ext: file extension
Returns:
str: latest file name
|
juraj-google-style
|
def compare(self, value, expectation, regex_expr=False):
return compare(value, expectation, regex_expr=regex_expr)
|
Compares two values with regular expression matching support.
Arguments:
value (mixed): value to compare.
expectation (mixed): value to match.
regex_expr (bool, optional): enables string based regex matching.
Returns:
bool
|
codesearchnet
|
def _create_inbound_stream(self, config=None):
if (config is None):
raise ValueError('No stream config to create stream from.')
name = self._get_stream_name(config)
stream_handlers = self._get_stream_handlers(config, name)
stream_input = config.get('input', None)
if (stream_input is None):
raise cfg.AitConfigMissing("inbound stream {}'s input".format(name))
if (type(stream_input[0]) is int):
return PortInputStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})
else:
return ZMQStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})
|
Creates an inbound stream from its config.
Params:
config: stream configuration as read by ait.config
Returns:
stream: a Stream
Raises:
ValueError: if any of the required config values are missing
|
codesearchnet
|
def failure_message(description, options):
message = "expected to find {}".format(description)
if options["count"] is not None:
message += " {count} {times}".format(
count=options["count"],
times=declension("time", "times", options["count"]))
elif options["between"] is not None:
between = options["between"]
if between:
first, last = between[0], between[-1]
else:
first, last = None, None
message += " between {first} and {last} times".format(
first=first,
last=last)
elif options["maximum"] is not None:
message += " at most {maximum} {times}".format(
maximum=options["maximum"],
times=declension("time", "times", options["maximum"]))
elif options["minimum"] is not None:
message += " at least {minimum} {times}".format(
minimum=options["minimum"],
times=declension("time", "times", options["minimum"]))
return message
|
Returns a expectation failure message for the given query description.
Args:
description (str): A description of the failed query.
options (Dict[str, Any]): The query options.
Returns:
str: A message describing the failure.
|
juraj-google-style
|
def to_dataframe(self, start_row=0, max_rows=None):
fetcher = self._get_row_fetcher(start_row=start_row,
max_rows=max_rows,
page_size=self._MAX_PAGE_SIZE)
count = 0
page_token = None
df_list = []
df = None
while True:
page_rows, page_token = fetcher(page_token, count)
if len(page_rows):
count += len(page_rows)
df_list.append(pandas.DataFrame.from_records(page_rows))
if not page_token:
break
if df_list:
df = pandas.concat(df_list, ignore_index=True, copy=False)
ordered_fields = [field.name for field in self.schema]
return df[ordered_fields] if df is not None else pandas.DataFrame()
|
Exports the table to a Pandas dataframe.
Args:
start_row: the row of the table at which to start the export (default 0)
max_rows: an upper limit on the number of rows to export (default None)
Returns:
A Pandas dataframe containing the table data.
|
juraj-google-style
|
def copy_remote_file(web_file, destination):
size = 0
dir_name = os.path.dirname(destination)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(destination, 'wb') as file_:
chunk_size = 8 * 1024
for chunk in web_file.iter_content(chunk_size=chunk_size):
if chunk:
file_.write(chunk)
size += len(chunk)
return size
|
Check if exist the destination path, and copy the online resource
file to local.
Args:
:web_file: reference to online file resource to take.
:destination: path to store the file.
|
juraj-google-style
|
def __init__(self, channel):
self.CompleteQuery = channel.unary_unary(
"/google.cloud.talent.v4beta1.Completion/CompleteQuery",
request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_completion__service__pb2.CompleteQueryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_completion__service__pb2.CompleteQueryResponse.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def _FormatUsername(self, event):
username = self._output_mediator.GetUsername(event)
return self._FormatField(username)
|
Formats the username.
Args:
event (EventObject): event.
Returns:
str: formatted username field.
|
juraj-google-style
|
def __init__(self, channel):
self.GetRequiredPlugins = channel.unary_unary(
'/pulumirpc.LanguageRuntime/GetRequiredPlugins',
request_serializer=language__pb2.GetRequiredPluginsRequest.SerializeToString,
response_deserializer=language__pb2.GetRequiredPluginsResponse.FromString,
)
self.Run = channel.unary_unary(
'/pulumirpc.LanguageRuntime/Run',
request_serializer=language__pb2.RunRequest.SerializeToString,
response_deserializer=language__pb2.RunResponse.FromString,
)
self.GetPluginInfo = channel.unary_unary(
'/pulumirpc.LanguageRuntime/GetPluginInfo',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=plugin__pb2.PluginInfo.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def NCHWToNHWC(input_tensor: Union[tensor_lib.Tensor, list[int]]) -> Union[tensor_lib.Tensor, list[int]]:
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, tensor_lib.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
|
Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
|
github-repos
|
def untar(file_path, extract_folder=None):
file_path = Path(file_path)
if extract_folder is None:
extract_folder = file_path.parent
extract_folder = Path(extract_folder)
tar = tarfile.open(file_path)
tar.extractall(extract_folder)
tar.close()
|
Simple tar archive extractor
Args:
file_path: path to the tar file to be extracted
extract_folder: folder to which the files will be extracted
|
juraj-google-style
|
def _get_native_delegate_pointer(self):
return self._delegate_ptr
|
Returns the native TfLiteDelegate pointer.
It is not safe to copy this pointer because it needs to be freed.
Returns:
TfLiteDelegate *
|
github-repos
|
def make_serializable(json):
new_dict = dict()
for key, value in iteritems(json):
if is_valid_json(value):
new_dict[key] = value
return new_dict
|
This function ensures that the dictionary is JSON serializable. If not,
keys with non-serializable values are removed from the return value.
Args:
json (dict): Dictionary to convert to serializable
Returns:
new_dict (dict): New dictionary with non JSON serializable values removed
|
juraj-google-style
|
def resolve_type(self, name: str | pytd_node.Node) -> pytd.Type:
if isinstance(name, (pytd.GenericType, pytd.AnythingType)):
return name
if isinstance(name, pytd.NamedType):
name = name.name
assert isinstance(name, str), f'Expected str, got {name}'
if name == 'nothing':
return pytd.NothingType()
base_type = self.type_map.get(name)
if base_type is None:
module, dot, tail = name.partition('.')
full_name = self.module_path_map.get(module, module) + dot + tail
base_type = pytd.NamedType(full_name)
return base_type
|
Return the fully resolved name for an alias.
Args:
name: The name of the type or alias.
Returns:
A pytd.NamedType with the fully resolved and qualified name.
|
github-repos
|
def _handle_changed_fields(self, old_data):
for link in self.get_links(is_set=False):
fld_id = un_camel_id(link['field'])
if not old_data or old_data.get(fld_id) != self._data[fld_id]:
if self._data[fld_id]:
linked_mdl = getattr(self, link['field'])
self._add_back_link(linked_mdl, link)
|
Looks for changed relation fields between new and old data (before/after save).
Creates back_link references for updated fields.
Args:
old_data: Object's data before save.
|
juraj-google-style
|
def GetDisplayName(self, file_entry=None):
if (file_entry is None):
file_entry = self._file_entry
if (file_entry is None):
raise ValueError('Missing file entry')
path_spec = getattr(file_entry, 'path_spec', None)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(path_spec, mount_path=self._mount_path)
if (not relative_path):
return file_entry.name
return self.GetDisplayNameForPathSpec(path_spec)
|
Retrieves the display name for a file entry.
Args:
file_entry (Optional[dfvfs.FileEntry]): file entry object, where None
will return the display name of self._file_entry.
Returns:
str: human readable string that describes the path to the file entry.
Raises:
ValueError: if the file entry is missing.
|
codesearchnet
|
def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):
for label in y_labels:
if (not all((isinstance(x, numbers.Real) for x in self[label]))):
raise ValueError("The column '{0}' contains non-numerical values. A plot cannot be drawn for this column.".format(label))
n = len(y_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if (overlay and (n > 1)):
(_, axis) = plt.subplots(figsize=(width, height))
if (x_label is not None):
axis.set_xlabel(x_label)
for (label, color) in zip(y_labels, colors):
draw(axis, label, color)
if (ticks is not None):
annotate(axis, ticks)
axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
(fig, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
if (not isinstance(axes, collections.Iterable)):
axes = [axes]
for (axis, y_label, color) in zip(axes, y_labels, colors):
draw(axis, y_label, color)
axis.set_ylabel(y_label, fontsize=16)
if (x_label is not None):
axis.set_xlabel(x_label, fontsize=16)
if (ticks is not None):
annotate(axis, ticks)
type(self).plots.append(axis)
|
Generic visualization that overlays or separates the draw function.
Raises:
ValueError: The Table contains non-numerical values in columns
other than `column_for_categories`
|
codesearchnet
|
def expandEntitiesFromEmail(e):
email = {}
email['type'] = 'i3visio.email'
email['value'] = e
email['attributes'] = []
alias = {}
alias['type'] = 'i3visio.alias'
alias['value'] = e.split('@')[0]
alias['attributes'] = []
domain = {}
domain['type'] = 'i3visio.domain'
domain['value'] = e.split('@')[1]
domain['attributes'] = []
return [email, alias, domain]
|
Method that receives an email an creates linked entities
Args:
-----
e: Email to verify.
Returns:
--------
Three different values: email, alias and domain in a list.
|
codesearchnet
|
def ProduceExtractionWarning(self, message, path_spec=None):
if not self._storage_writer:
raise RuntimeError('Storage writer not set.')
if not path_spec and self._file_entry:
path_spec = self._file_entry.path_spec
parser_chain = self.GetParserChain()
warning = warnings.ExtractionWarning(
message=message, parser_chain=parser_chain, path_spec=path_spec)
self._storage_writer.AddWarning(warning)
self._number_of_warnings += 1
self.last_activity_timestamp = time.time()
|
Produces an extraction warning.
Args:
message (str): message of the warning.
path_spec (Optional[dfvfs.PathSpec]): path specification, where None
will use the path specification of current file entry set in
the mediator.
Raises:
RuntimeError: when storage writer is not set.
|
juraj-google-style
|
def from_config(cls, config):
return cls(**config)
|
Creates a quantizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same quantizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A quantizer instance.
|
github-repos
|
def onWith(self, evnt, func):
self.on(evnt, func)
try:
yield self
finally:
self.off(evnt, func)
|
A context manager which can be used to add a callback and remove it when
using a ``with`` statement.
Args:
evnt (str): An event name
func (function): A callback function to receive event tufo
|
juraj-google-style
|
def invert(self) -> Rigid:
rot_inv = self._rots.invert()
trn_inv = rot_inv.apply(self._trans)
return Rigid(rot_inv, -1 * trn_inv)
|
Inverts the transformation.
Returns:
The inverse transformation.
|
github-repos
|
def _add_strings_to_commastring(self, field, strings):
allstringsadded = True
for string in strings:
if not self._add_string_to_commastring(field, string):
allstringsadded = False
return allstringsadded
|
Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
|
juraj-google-style
|
def write_record(cls, file_handle, value):
encoded_length = struct.pack(b'<Q', len(value))
file_handle.write(b''.join([encoded_length, struct.pack(b'<I', cls._masked_crc32c(encoded_length)), value, struct.pack(b'<I', cls._masked_crc32c(value))]))
|
Encode a value as a TFRecord.
Args:
file_handle: The file to write to.
value: A bytes object representing content of the record.
|
github-repos
|
def from_bytes(cls, bt):
log.debug("Parsing email from bytes")
if six.PY2:
raise MailParserEnvironmentError(
"Parsing from bytes is valid only for Python 3.x version")
message = email.message_from_bytes(bt)
return cls(message)
|
Init a new object from bytes.
Args:
bt (bytes-like object): raw email as bytes-like object
Returns:
Instance of MailParser
|
juraj-google-style
|
def add_router(self, path, router):
if (self.strict_router_check and (not isinstance(router, Router))):
raise TypeError(('Expected object of type Router, found %r' % type(router)))
log.info('{} Adding router {} on path {}', id(self), router, path)
self.middleware.add(path=path, func=router, method_mask=HTTPMethod.ALL)
|
Adds a router to the list of routers
Args:
path (str or regex): The path on which the router binds
router (growler.Router): The router which will respond to
requests
Raises:
TypeError: If `strict_router_check` attribute is True and
the router is not an instance of growler.Router.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.