code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get(self, secret_id):
return self.prepare_model(self.client.api.inspect_secret(secret_id))
|
Get a secret.
Args:
secret_id (str): Secret ID.
Returns:
(:py:class:`Secret`): The secret.
Raises:
:py:class:`docker.errors.NotFound`
If the secret does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def __init__(self, username=None, password=None):
super(UsernamePasswordCredential, self).__init__(
tag=Tags.CREDENTIAL_VALUE
)
self._username = None
self._password = None
self.username = username
self.password = password
|
Construct a UsernamePasswordCredential struct.
Args:
username (string): The username identifying the credential.
Optional, defaults to None. Required for encoding and decoding.
password (string): The password associated with the username.
Optional, defaults to None.
|
juraj-google-style
|
def convert_batchnorm(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting batchnorm ...')
if names == 'short':
tf_name = 'BN' + random_string(6)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
bias_name = '{0}.bias'.format(w_name)
weights_name = '{0}.weight'.format(w_name)
mean_name = '{0}.running_mean'.format(w_name)
var_name = '{0}.running_var'.format(w_name)
if bias_name in weights:
beta = weights[bias_name].numpy()
if weights_name in weights:
gamma = weights[weights_name].numpy()
mean = weights[mean_name].numpy()
variance = weights[var_name].numpy()
eps = params['epsilon']
momentum = params['momentum']
if weights_name not in weights:
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
center=False, scale=False,
weights=[mean, variance],
name=tf_name
)
else:
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
weights=[gamma, beta, mean, variance],
name=tf_name
)
layers[scope_name] = bn(layers[inputs[0]])
|
Convert batch normalization layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def __init__(self, service_endpoint_uri):
self._service_endpoint_uri = service_endpoint_uri
self._queue = None
self._send_buffer_size = 100
self._timeout = 10
|
Initializes a new instance of the class.
Args:
service_endpoint_uri (str) the address of the service to send telemetry data to.
|
juraj-google-style
|
def RemoveScanNode(self, path_spec):
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
return None
if scan_node.sub_nodes:
raise RuntimeError('Scan node has sub nodes.')
parent_scan_node = scan_node.parent_node
if parent_scan_node:
parent_scan_node.sub_nodes.remove(scan_node)
if path_spec == self._root_path_spec:
self._root_path_spec = None
del self._scan_nodes[path_spec]
if path_spec.IsFileSystem():
del self._file_system_scan_nodes[path_spec]
return parent_scan_node
|
Removes a scan node of a certain path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
SourceScanNode: parent scan node or None if not available.
Raises:
RuntimeError: if the scan node has sub nodes.
|
juraj-google-style
|
def get_head_mask(self, head_mask: tf.Tensor | None, num_hidden_layers: int) -> tf.Tensor:
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
else:
head_mask = [None] * num_hidden_layers
return head_mask
|
Prepare the head mask if needed.
Args:
head_mask (`tf.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (`int`):
The number of hidden layers in the model.
Returns:
`tf.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with
`[None]` for each layer.
|
github-repos
|
def get_decoder_self_attention_bias(length):
with tf.name_scope("decoder_self_attention_bias"):
valid_locs = tf.matrix_band_part(tf.ones([length, length]), -1, 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
decoder_bias = _NEG_INF * (1.0 - valid_locs)
return decoder_bias
|
Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
Returns:
float tensor of shape [1, 1, length, length]
|
juraj-google-style
|
def join(*paths):
absolute = False
relpaths = []
for p in paths:
if p:
if p[0] == "/":
del relpaths[:]
absolute = True
relpaths.append(p)
path = normpath("/".join(relpaths))
if absolute:
path = abspath(path)
return path
|
Join any number of paths together.
Arguments:
*paths (str): Paths to join, given as positional arguments.
Returns:
str: The joined path.
Example:
>>> join('foo', 'bar', 'baz')
'foo/bar/baz'
>>> join('foo/bar', '../baz')
'foo/baz'
>>> join('foo/bar', '/baz')
'/baz'
|
juraj-google-style
|
def encrpyt_file(self, filename):
if not os.path.exists(filename):
print "Invalid filename %s. Does not exist" % filename
return
if self.vault_password is None:
print "ENV Variable PYANSI_VAULT_PASSWORD not set"
return
if self.is_file_encrypted(filename):
return
cipher = 'AES256'
vaulteditor = VaultEditor(cipher, self.vault_password, filename)
vaulteditor.encrypt_file()
|
Encrypt File
Args:
filename: Pass the filename to encrypt.
Returns:
No return.
|
juraj-google-style
|
def triangle(duration: int, amp: complex, period: float=None, phase: float=0, name: str=None) -> SamplePulse:
if (period is None):
period = duration
return _sampled_triangle_pulse(duration, amp, period, phase=phase, name=name)
|
Generates triangle wave `SamplePulse`.
Applies `left` sampling strategy to generate discrete pulse from continuous function.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse.
|
codesearchnet
|
def ParseRow(header, row):
precondition.AssertDictType(row, Text, Text)
result = rdf_osquery.OsqueryRow()
for column in header.columns:
result.values.append(row[column.name])
return result
|
Parses a single row of osquery output.
Args:
header: A parsed header describing the row format.
row: A row in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryRow` instance.
|
codesearchnet
|
def called_with_tracing(self, function_name, omit_warning):
self._call_count += 1
self._calls_per_tracings.append(1)
while self._calls_per_tracings:
if self._call_count - self._calls_per_tracings[0] > FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY:
self._call_count -= self._calls_per_tracings.pop(0)
else:
break
if omit_warning or self._total_warning_count >= FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR:
return
if len(self._calls_per_tracings) >= FREQUENT_TRACING_WARNING_THRESHOLD:
self._total_warning_count += 1
logging.warning('{} out of the last {} calls to {} triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https:
|
Updates the list of most recent calls' tracing information.
Warns the user when recent calls caused retracing too often.
Args:
function_name: the python function being traced.
omit_warning: If 'True', this call will not warn the user even if
retracing happens too often.
|
github-repos
|
def HandleAccounts(self, result):
self.logger.debug('Checking for changes to user accounts.')
configured_users = self.utils.GetConfiguredUsers()
enable_oslogin = self._GetEnableOsLoginValue(result)
enable_two_factor = self._GetEnableTwoFactorValue(result)
if enable_oslogin:
desired_users = {}
self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)
else:
desired_users = self._GetAccountsData(result)
self.oslogin.UpdateOsLogin(False)
remove_users = sorted((set(configured_users) - set(desired_users.keys())))
self._UpdateUsers(desired_users)
self._RemoveUsers(remove_users)
self.utils.SetConfiguredUsers(desired_users.keys())
|
Called when there are changes to the contents of the metadata server.
Args:
result: json, the deserialized contents of the metadata server.
|
codesearchnet
|
class GitVisionEncoder(nn.Module):
def __init__(self, config: GitVisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`GitVisionEncoderLayer`].
Args:
config: GitVisionConfig
|
github-repos
|
def argparse_funckw(func, defaults={}, **kwargs):
import utool as ut
funckw_ = ut.get_funckw(func, recursive=True)
funckw_.update(defaults)
funckw = ut.argparse_dict(funckw_, **kwargs)
return funckw
|
allows kwargs to be specified on the commandline from testfuncs
Args:
func (function):
Kwargs:
lbl, verbose, only_specified, force_keys, type_hint, alias_dict
Returns:
dict: funckw
CommandLine:
python -m utool.util_inspect argparse_funckw
SeeAlso:
exec_funckw
recursive_parse_kwargs
parse_kwarg_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> func = get_instance_attrnames
>>> funckw = argparse_funckw(func)
>>> result = ('funckw = %s' % (ut.repr3(funckw),))
>>> print(result)
funckw = {
'default': True,
'with_methods': True,
'with_properties': True,
}
|
codesearchnet
|
def _process_thread(self, client):
file_list = self.files
if not file_list:
return
print('Filefinder to collect {0:d} items'.format(len(file_list)))
flow_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
flow_args = flows_pb2.FileFinderArgs(
paths=file_list,
action=flow_action,)
flow_id = self._launch_flow(client, 'FileFinder', flow_args)
self._await_flow(client, flow_id)
collected_flow_data = self._download_files(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data))
|
Process a single client.
Args:
client: GRR client object to act on.
|
juraj-google-style
|
async def start_server_in_loop(runner, hostname, port, agent):
(await runner.setup())
agent.web.server = aioweb.TCPSite(runner, hostname, port)
(await agent.web.server.start())
logger.info(f'Serving on http:
|
Listens to http requests and sends them to the webapp.
Args:
runner: AppRunner to process the http requests
hostname: host name to listen from.
port: port to listen from.
agent: agent that owns the web app.
|
codesearchnet
|
def split_raster(rs, split_shp, field_name, temp_dir):
UtilClass.rmmkdir(temp_dir)
ds = ogr_Open(split_shp)
lyr = ds.GetLayer(0)
lyr.ResetReading()
ft = lyr.GetNextFeature()
while ft:
cur_field_name = ft.GetFieldAsString(field_name)
for r in rs:
cur_file_name = r.split(os.sep)[(- 1)]
outraster = ((temp_dir + os.sep) + cur_file_name.replace('.tif', ('_%s.tif' % cur_field_name.replace(' ', '_'))))
subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp, '-crop_to_cutline', '-cwhere', ("'%s'='%s'" % (field_name, cur_field_name)), '-dstnodata', '-9999'])
ft = lyr.GetNextFeature()
ds = None
|
Split raster by given shapefile and field name.
Args:
rs: origin raster file.
split_shp: boundary (ESRI Shapefile) used to spilt raster.
field_name: field name identify the spilt value.
temp_dir: directory to store the spilt rasters.
|
codesearchnet
|
def select_one(self, selector):
result = list(self.select(selector))
if (len(result) > 1):
raise ValueError(('Found more than one model matching %s: %r' % (selector, result)))
if (len(result) == 0):
return None
return result[0]
|
Query this document for objects that match the given selector.
Raises an error if more than one object is found. Returns
single matching object, or None if nothing is found
Args:
selector (JSON-like query dictionary) : you can query by type or by
name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
Returns:
Model or None
|
codesearchnet
|
def getEvents(self):
events = []
for json in self.conn.endpoints['self'].getEvents():
events.append(SkypeEvent.fromRaw(self, json))
return events
|
Retrieve a list of events since the last poll. Multiple calls may be needed to retrieve all events.
If no events occur, the API will block for up to 30 seconds, after which an empty list is returned. As soon as
an event is received in this time, it is returned immediately.
Returns:
:class:`.SkypeEvent` list: a list of events, possibly empty
|
codesearchnet
|
def generate_ngram_data_set(self, token_list, n=2):
n_gram_tuple_zip = self.generate_tuple_zip(token_list, n)
n_gram_tuple_list = [n_gram_tuple for n_gram_tuple in n_gram_tuple_zip]
n_gram_data_set = self.generate_tuple_zip(n_gram_tuple_list, 2)
return n_gram_data_set
|
Generate the N-gram's pair.
Args:
token_list: The list of tokens.
n N
Returns:
zip of Tuple(Training N-gram data, Target N-gram data)
|
juraj-google-style
|
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data, codepage=codepage)
value_string = 'Shell item path: {0:s}'.format(
shell_items_parser.CopyToPath())
return value_string
|
Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
|
juraj-google-style
|
def cleave_sequence(input_layer, unroll=None):
if unroll is None:
raise ValueError('You must set unroll either here or in the defaults.')
shape = input_layer.shape
if shape[0] is not None and shape[0] % unroll != 0:
raise ValueError('Must divide the split dimension evenly: %d mod %d != 0' %
(shape[0], unroll))
if unroll <= 0:
raise ValueError('Unroll must be > 0: %s' % unroll)
elif unroll == 1:
splits = [input_layer.tensor]
else:
splits = tf.split(
value=input_layer.tensor, num_or_size_splits=unroll, axis=0)
result = input_layer.with_sequence(splits)
defaults = result.defaults
if 'unroll' in defaults:
del defaults['unroll']
return result
|
Cleaves a tensor into a sequence, this is the inverse of squash.
Recurrent methods unroll across an array of Tensors with each one being a
timestep. This cleaves the first dim so that each it is an array of Tensors.
It is the inverse of squash_sequence.
Args:
input_layer: The input layer.
unroll: The number of time steps.
Returns:
A PrettyTensor containing an array of tensors.
Raises:
ValueError: If unroll is not specified and it has no default or it is <= 0.
|
juraj-google-style
|
def copy(self, src, dst, other_system=None):
container, obj = self.split_locator(src)
with _handle_client_exception():
self.client.copy_object(
container=container, obj=obj, destination=self.relpath(dst))
|
Copy object of the same storage.
Args:
src (str): Path or URL.
dst (str): Path or URL.
other_system (pycosio._core.io_system.SystemBase subclass): Unused.
|
juraj-google-style
|
def parse_string_descriptor(string_desc):
if (not isinstance(string_desc, str)):
string_desc = str(string_desc)
if (not string_desc.endswith(';')):
string_desc += ';'
parsed = get_streamer_parser().parseString(string_desc)[0]
realtime = ('realtime' in parsed)
broadcast = ('broadcast' in parsed)
encrypted = (('security' in parsed) and (parsed['security'] == 'encrypted'))
signed = (('security' in parsed) and (parsed['security'] == 'signed'))
auto = ('manual' not in parsed)
with_other = None
if ('with_other' in parsed):
with_other = parsed['with_other']
auto = False
dest = SlotIdentifier.FromString('controller')
if ('explicit_tile' in parsed):
dest = parsed['explicit_tile']
selector = parsed['selector']
if (realtime and (encrypted or signed)):
raise SensorGraphSemanticError('Realtime streamers cannot be either signed or encrypted')
if (broadcast and (encrypted or signed)):
raise SensorGraphSemanticError('Broadcast streamers cannot be either signed or encrypted')
report_type = ('broadcast' if broadcast else 'telegram')
dest = dest
selector = selector
if (realtime or broadcast):
report_format = u'individual'
elif signed:
report_format = u'signedlist_userkey'
elif encrypted:
raise SensorGraphSemanticError('Encrypted streamers are not yet supported')
else:
report_format = u'hashedlist'
return DataStreamer(selector, dest, report_format, auto, report_type=report_type, with_other=with_other)
|
Parse a string descriptor of a streamer into a DataStreamer object.
Args:
string_desc (str): The string descriptor that we wish to parse.
Returns:
DataStreamer: A DataStreamer object representing the streamer.
|
codesearchnet
|
def switch_to_frame(self, frame):
if isinstance(frame, Element):
self.driver.switch_to_frame(frame)
self._scopes.append('frame')
elif (frame == 'parent'):
if (self._scopes[(- 1)] != 'frame'):
raise ScopeError('`switch_to_frame("parent")` cannot be called from inside a descendant frame\'s `scope` context.')
self._scopes.pop()
self.driver.switch_to_frame('parent')
elif (frame == 'top'):
if ('frame' in self._scopes):
idx = self._scopes.index('frame')
if any([(scope not in ['frame', None]) for scope in self._scopes[idx:]]):
raise ScopeError('`switch_to_frame("top")` cannot be called from inside a descendant frame\'s `scope` context.')
self._scopes = self._scopes[:idx]
self.driver.switch_to_frame('top')
else:
raise ValueError('You must provide a frame element, "parent", or "top" when calling switch_to_frame')
|
Switch to the given frame.
If you use this method you are responsible for making sure you switch back to the parent
frame when done in the frame changed to. :meth:`frame` is preferred over this method and
should be used when possible. May not be supported by all drivers.
Args:
frame (Element | str): The iframe/frame element to switch to.
|
codesearchnet
|
def sort(x, axis=-1):
if any_symbolic_tensors((x,)):
return Sort(axis=axis).symbolic_call(x)
return backend.numpy.sort(x, axis=axis)
|
Sorts the elements of `x` along a given axis in ascending order.
Args:
x: Input tensor.
axis: Axis along which to sort. If `None`, the tensor is flattened
before sorting. Defaults to `-1`; the last axis.
Returns:
Sorted tensor.
|
github-repos
|
def Serialize(self, writer):
super(ContractState, self).Serialize(writer)
self.Code.Serialize(writer)
writer.WriteUInt8(self.ContractProperties)
writer.WriteVarString(self.Name)
writer.WriteVarString(self.CodeVersion)
writer.WriteVarString(self.Author)
writer.WriteVarString(self.Email)
writer.WriteVarString(self.Description)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
juraj-google-style
|
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True):
fname = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, (fileroot + GOLD_STANDARD_BLOCKS_EXT))
with io.open(fname, mode='r') as f:
data = f.read()
if split_blocks:
return filter(None, data[:(- 1)].split('\n'))
return filter(None, data)
|
Read the gold standard blocks file corresponding to identifier ``fileroot``
in the gold standard blocks directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
split_blocks (bool): If True, split the file's content into blocks.
Returns:
str or List[str]
|
codesearchnet
|
def dot(poly1, poly2):
if not isinstance(poly1, Poly) and not isinstance(poly2, Poly):
return numpy.dot(poly1, poly2)
poly1 = Poly(poly1)
poly2 = Poly(poly2)
poly = poly1*poly2
if numpy.prod(poly1.shape) <= 1 or numpy.prod(poly2.shape) <= 1:
return poly
return chaospy.poly.sum(poly, 0)
|
Dot product of polynomial vectors.
Args:
poly1 (Poly) : left part of product.
poly2 (Poly) : right part of product.
Returns:
(Poly) : product of poly1 and poly2.
Examples:
>>> poly = cp.prange(3, 1)
>>> print(poly)
[1, q0, q0^2]
>>> print(cp.dot(poly, numpy.arange(3)))
2q0^2+q0
>>> print(cp.dot(poly, poly))
q0^4+q0^2+1
|
juraj-google-style
|
def dumps(ms, single=False, pretty_print=False, **kwargs):
if single:
ms = [ms]
return serialize(ms, pretty_print=pretty_print, **kwargs)
|
Serialize an Xmrs object to the Prolog representation
Args:
ms: an iterator of Xmrs objects to serialize (unless the
*single* option is `True`)
single: if `True`, treat *ms* as a single Xmrs object instead
of as an iterator
pretty_print: if `True`, add newlines and indentation
Returns:
the Prolog string representation of a corpus of Xmrs
|
codesearchnet
|
def _extend_with_testcase(test_dict, testcase_def_dict):
testcase_def_dict["config"].setdefault("variables", {})
testcase_def_variables = utils.ensure_mapping_format(testcase_def_dict["config"].get("variables", {}))
testcase_def_variables.update(test_dict.pop("variables", {}))
testcase_def_dict["config"]["variables"] = testcase_def_variables
test_base_url = test_dict.pop("base_url", "")
if not testcase_def_dict["config"].get("base_url"):
testcase_def_dict["config"]["base_url"] = test_base_url
test_name = test_dict.pop("name", None) \
or testcase_def_dict["config"].pop("name", None) \
or "testcase name undefined"
testcase_def_dict["config"].update(test_dict)
testcase_def_dict["config"]["name"] = test_name
test_dict.clear()
test_dict.update(testcase_def_dict)
|
extend test with testcase definition
test will merge and override testcase config definition.
Args:
test_dict (dict): test block
testcase_def_dict (dict): testcase definition
Returns:
dict: extended test dict.
|
juraj-google-style
|
def FromMany(cls, samples):
if not samples:
raise ValueError("Empty `samples` argument")
return IOSample(
timestamp=max(sample.timestamp for sample in samples),
read_bytes=max(sample.read_bytes for sample in samples),
write_bytes=max(sample.write_bytes for sample in samples))
|
Constructs a single sample that best represents a list of samples.
Args:
samples: An iterable collection of `IOSample` instances.
Returns:
An `IOSample` instance representing `samples`.
Raises:
ValueError: If `samples` is empty.
|
juraj-google-style
|
def manufacturer(self):
buf = ctypes.cast(self.sManu, ctypes.c_char_p).value
return (buf.decode() if buf else None)
|
Returns the name of the manufacturer of the device.
Args:
self (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance
Returns:
Manufacturer name.
|
codesearchnet
|
def format(self, number, **kwargs):
if check_type(number, 'list'):
return map((lambda val: self.format(val, **kwargs)))
number = self.parse(number)
if check_type(kwargs, 'dict'):
options = self.settings['number'].update(kwargs)
precision = self._change_precision(options['precision'])
negative = (lambda num: ('-' if (num < 0) else ''))(number)
base = str(int(self.to_fixed((abs(number) or 0), precision)), 10)
mod = (lambda num: ((len(num) % 3) if (len(num) > 3) else 0))(base)
num = (negative + (lambda num: (base[0:num] if num else ''))(mod))
num += re.sub('/(\\d{3})(?=\\d)/g', ('$1' + options['thousand']), base[mod:])
num += (lambda val: ((options['decimal'] + self.to_fixed(abs(number), precision).split('.')[1]) if val else ''))(precision)
return num
|
Format a given number.
Format a number, with comma-separated thousands and
custom precision/decimal places
Localise by overriding the precision and thousand / decimal separators
2nd parameter `precision` can be an object matching `settings.number`
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
|
codesearchnet
|
def _setBitOn(x, bitNum):
_checkInt(x, minvalue=0, description='input value')
_checkInt(bitNum, minvalue=0, description='bitnumber')
return (x | (1 << bitNum))
|
Set bit 'bitNum' to True.
Args:
* x (int): The value before.
* bitNum (int): The bit number that should be set to True.
Returns:
The value after setting the bit. This is an integer.
For example:
For x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).
|
codesearchnet
|
def rekey(self, uid=None, offset=None, **kwargs):
if (uid is not None):
if (not isinstance(uid, six.string_types)):
raise TypeError('The unique identifier must be a string.')
if (offset is not None):
if (not isinstance(offset, six.integer_types)):
raise TypeError('The offset must be an integer.')
attributes = []
if kwargs.get('activation_date'):
attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.ACTIVATION_DATE, kwargs.get('activation_date')))
if kwargs.get('process_start_date'):
attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.PROCESS_START_DATE, kwargs.get('process_start_date')))
if kwargs.get('protect_stop_date'):
attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.PROTECT_STOP_DATE, kwargs.get('protect_stop_date')))
if kwargs.get('deactivation_date'):
attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.DEACTIVATION_DATE, kwargs.get('deactivation_date')))
template_attribute = cobjects.TemplateAttribute(attributes=attributes)
result = self.proxy.rekey(uuid=uid, offset=offset, template_attribute=template_attribute)
status = result.get('result_status')
if (status == enums.ResultStatus.SUCCESS):
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(status, result.get('result_reason'), result.get('result_message'))
|
Rekey an existing key.
Args:
uid (string): The unique ID of the symmetric key to rekey.
Optional, defaults to None.
offset (int): The time delta, in seconds, between the new key's
initialization date and activation date. Optional, defaults
to None.
**kwargs (various): A placeholder for object attributes that
should be set on the newly rekeyed key. Currently
supported attributes include:
activation_date (int)
process_start_date (int)
protect_stop_date (int)
deactivation_date (int)
Returns:
string: The unique ID of the newly rekeyed key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
|
codesearchnet
|
def _gal2idx(self, gal):
l = coordinates.Longitude(gal.l, wrap_angle=180.*units.deg)
j = (self._inv_pix_scale * (l.deg - self._l_bounds[0])).astype('i4')
k = (self._inv_pix_scale * (gal.b.deg - self._b_bounds[0])).astype('i4')
idx = (j < 0) | (j >= self._shape[0]) | (k < 0) | (k >= self._shape[1])
if np.any(idx):
j[idx] = -1
k[idx] = -1
return j, k, ~idx
|
Converts from Galactic coordinates to pixel indices.
Args:
gal (:obj:`astropy.coordinates.SkyCoord`): Galactic coordinates. Must
store an array of coordinates (i.e., not be scalar).
Returns:
``j, k, mask`` - Pixel indices of the coordinates, as well as a mask
of in-bounds coordinates. Outputs have the same shape as the input
coordinates.
|
juraj-google-style
|
def check_captcha(self, captcha_id, solution, author_name=None, author_url=None, author_mail=None, author_ip=None, author_id=None, author_open_id=None, honeypot=None):
check_catpcha_endpoint = Template('${rest_root}/captcha/${captcha_id}')
url = check_catpcha_endpoint.substitute(rest_root=self._rest_root, captcha_id=captcha_id)
data = {'solution': solution}
response = self.__post_request(url, data)
return (response['captcha']['solved'] == '1')
|
Checks a CAPTCHA that was solved by the end-user.
Keyword arguments:
captcha_id -- Unique identifier of the CAPTCHA solved.
solution -- Solution provided by the end-user for the CAPTCHA.
author_name -- The name of the content author.
author_url -- The homepage/website URL of the content author.
author_mail -- The e-mail address of the content author.
author_ip -- The IP address of the content author.
author_id -- The local user ID on the client site of the content author.
author_open_id -- List of Open IDs of the content author.
honeypot -- The value of a client-side honeypot form element, if non-empty.
Returns:
solved -- Boolean whether or not the CAPTCHA was solved correctly.
If the CAPTCHA is associated with an unsure contents, it is recommended to recheck the content.
|
codesearchnet
|
def set_voltage(self, volt, ramp=False):
if ramp:
self.mon.RampVoltage(self.mon.start_voltage, volt)
else:
self.mon.SetVoltage(volt)
|
Sets the output voltage of monsoon.
Args:
volt: Voltage to set the output to.
ramp: If true, the output voltage will be increased gradually to
prevent tripping Monsoon overvoltage.
|
codesearchnet
|
def flip_variable(self, v):
adj = self.adj
linear = self.linear
quadratic = self.quadratic
if (v not in adj):
return
if (self.vartype is Vartype.SPIN):
linear[v] *= (- 1.0)
for u in adj[v]:
adj[v][u] *= (- 1.0)
adj[u][v] *= (- 1.0)
if ((u, v) in quadratic):
quadratic[(u, v)] *= (- 1.0)
elif ((v, u) in quadratic):
quadratic[(v, u)] *= (- 1.0)
else:
raise RuntimeError('quadratic is missing an interaction')
elif (self.vartype is Vartype.BINARY):
self.offset += linear[v]
linear[v] *= (- 1)
for u in adj[v]:
bias = adj[v][u]
adj[v][u] *= (- 1.0)
adj[u][v] *= (- 1.0)
linear[u] += bias
if ((u, v) in quadratic):
quadratic[(u, v)] *= (- 1.0)
elif ((v, u) in quadratic):
quadratic[(v, u)] *= (- 1.0)
else:
raise RuntimeError('quadratic is missing an interaction')
else:
raise RuntimeError('Unexpected vartype')
try:
self._counterpart.flip_variable(v)
except AttributeError:
pass
|
Flip variable v in a binary quadratic model.
Args:
v (variable):
Variable in the binary quadratic model. If v is not in the binary
quadratic model, it is ignored.
Examples:
This example creates a binary quadratic model with two variables and inverts
the value of one.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({1: 1, 2: 2}, {(1, 2): 0.5}, 0.5, dimod.SPIN)
>>> bqm.flip_variable(1)
>>> bqm.linear[1], bqm.linear[2], bqm.quadratic[(1, 2)]
(-1.0, 2, -0.5)
|
codesearchnet
|
def datasets_get(self, dataset_name):
url = (Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name))
return datalab.utils.Http.request(url, credentials=self._credentials)
|
Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
class XGBoostModelHandlerPandas(XGBoostModelHandler[pandas.DataFrame, PredictionResult, Union[xgboost.Booster, xgboost.XGBModel]]):
def run_inference(self, batch: Sequence[pandas.DataFrame], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return self._inference_fn(batch, model, inference_args)
def get_num_bytes(self, batch: Sequence[pandas.DataFrame]) -> int:
return sum((df.memory_usage(deep=True).sum() for df in batch))
|
Implementation of the ModelHandler interface for XGBoost
using pandas dataframes as input.
Example Usage::
pcoll | RunInference(
XGBoostModelHandlerPandas(
model_class="XGBoost Model Class",
model_state="my_model_state.json")))
Args:
model_class: class of the XGBoost model that defines the model
structure.
model_state: path to a json file that contains the model's
configuration.
inference_fn: the inference function to use during RunInference.
default=default_xgboost_inference_fn
|
github-repos
|
def _add_open_file(self, file_obj):
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1
|
Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object.
|
juraj-google-style
|
def remove_node(self, node_id, force=False):
url = self._url('/nodes/{0}', node_id)
params = {
'force': force
}
res = self._delete(url, params=params)
self._raise_for_status(res)
return True
|
Remove a node from the swarm.
Args:
node_id (string): ID of the node to be removed.
force (bool): Force remove an active node. Default: `False`
Raises:
:py:class:`docker.errors.NotFound`
If the node referenced doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
|
juraj-google-style
|
async def _async_start(self, auto_register=True):
if auto_register:
await self._async_register()
self.client = aioxmpp.PresenceManagedClient(self.jid,
aioxmpp.make_security_layer(self.password,
no_verify=not self.verify_security),
loop=self.loop,
logger=logging.getLogger(self.jid.localpart))
self.message_dispatcher = self.client.summon(SimpleMessageDispatcher)
self.presence = PresenceManager(self)
await self._async_connect()
self.message_dispatcher.register_callback(
aioxmpp.MessageType.CHAT,
None,
self._message_received,
)
await self.setup()
self._alive.set()
for behaviour in self.behaviours:
if not behaviour.is_running:
behaviour.start()
|
Starts the agent from a coroutine. This fires some actions:
* if auto_register: register the agent in the server
* runs the event loop
* connects the agent to the server
* runs the registered behaviours
Args:
auto_register (bool, optional): register the agent in the server (Default value = True)
|
juraj-google-style
|
def process_git_configs(git_short=''):
LOG.info('Processing application.json files from GitLab "%s".', git_short)
file_lookup = FileLookup(git_short=git_short)
app_configs = process_configs(file_lookup,
RUNWAY_BASE_PATH + '/application-master-{env}.json',
RUNWAY_BASE_PATH + '/pipeline.json')
commit_obj = file_lookup.project.commits.get('master')
config_commit = commit_obj.attributes['id']
LOG.info('Commit ID used: %s', config_commit)
app_configs['pipeline']['config_commit'] = config_commit
return app_configs
|
Retrieve _application.json_ files from GitLab.
Args:
git_short (str): Short Git representation of repository, e.g.
forrest/core.
Returns:
collections.defaultdict: Configurations stored for each environment
found.
|
juraj-google-style
|
def delete(self, membershipId):
check_type(membershipId, basestring)
self._session.delete(API_ENDPOINT + '/' + membershipId)
|
Delete a membership, by ID.
Args:
membershipId(basestring): The membership ID.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
|
juraj-google-style
|
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--steps', dest='steps', type=_parse_steps, help='A JSON string that gives a list where each entry of the list is configuration information for a step. Configuration for each step consists of (1) A float "per_bundle_delay_sec" (in seconds). Defaults to 0.(2) A float "per_element_delay_msec" (in milli seconds). Defaults to 0.(3) An integer "output_records_per_input_record". Defaults to 1.(4) A float "output_filter_ratio" in the range [0, 1] . Defaults to 0.(5) A bool "splittable" that defaults to false.(6) An integer "initial_splitting_num_bundles". Defaults to 8.')
parser.add_argument('--input', dest='input', type=json.loads, help='A JSON string that describes the properties of the SyntheticSource used by the pipeline. Configuration is similar to Java SyntheticBoundedInput.Currently supports following properties. (1) An integer "numRecords". (2) An integer "keySize". (3) An integer "valueSize". (4) A tuple "bundleSizeDistribution" with following values. A string "type". Allowed values are "const" and "zipf". An float "param". Only used if "type"=="zipf". Must be larger than 1. (5) An integer "forceNumInitialBundles". (6) An integer "splitPointFrequencyRecords". (7) A tuple "delayDistribution" with following values. A string "type". Only allowed value is "const". An integer "const". (8) A string "algorithm". Allowed values are "builtin" for Python builtin random generator, and "lcg" for the linear congruential generator equivalent to Java (java.util.Random).')
parser.add_argument('--barrier', dest='barrier', default='shuffle', choices=['shuffle', 'side-input', 'expand-gbk', 'expand-second-output', 'merge-gbk', 'merge-side-input'], help='Whether to use shuffle as the barrier (as opposed to side inputs).')
parser.add_argument('--output', dest='output', default='', help='Destination to write output.')
return parser.parse_known_args(args)
|
Parses a given set of arguments.
Args:
args: set of arguments to be passed.
Returns:
a tuple where first item gives the set of arguments defined and parsed
within this method and second item gives the set of unknown arguments.
|
github-repos
|
def sort_objects_in_import(import_statement: str) -> str:
def _replace(match):
imports = match.groups()[0]
if ',' not in imports:
return f'[{imports}]'
keys = [part.strip().replace('"', '') for part in imports.split(',')]
if len(keys[-1]) == 0:
keys = keys[:-1]
return '[' + ', '.join([f'"{k}"' for k in sort_objects(keys)]) + ']'
lines = import_statement.split('\n')
if len(lines) > 3:
idx = 2 if lines[1].strip() == '[' else 1
keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1])
sorted_lines = [lines[x[0] + idx] for x in sorted_indices]
return '\n'.join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lines) == 3:
if _re_bracket_content.search(lines[1]) is not None:
lines[1] = _re_bracket_content.sub(_replace, lines[1])
else:
keys = [part.strip().replace('"', '') for part in lines[1].split(',')]
if len(keys[-1]) == 0:
keys = keys[:-1]
lines[1] = get_indent(lines[1]) + ', '.join([f'"{k}"' for k in sort_objects(keys)])
return '\n'.join(lines)
else:
import_statement = _re_bracket_content.sub(_replace, import_statement)
return import_statement
|
Sorts the imports in a single import statement.
Args:
import_statement (`str`): The import statement in which to sort the imports.
Returns:
`str`: The same as the input, but with objects properly sorted.
|
github-repos
|
def cudnn_lstm(units, n_hidden, n_layers=1, trainable_initial_states=None, seq_lengths=None, initial_h=None, initial_c=None, name='cudnn_lstm', reuse=False):
with tf.variable_scope(name, reuse=reuse):
lstm = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers=n_layers, num_units=n_hidden)
if trainable_initial_states:
init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden])
init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1))
init_c = tf.get_variable('init_c', [n_layers, 1, n_hidden])
init_c = tf.tile(init_c, (1, tf.shape(units)[0], 1))
else:
init_h = init_c = tf.zeros([n_layers, tf.shape(units)[0], n_hidden])
initial_h = (initial_h or init_h)
initial_c = (initial_c or init_c)
(h, (h_last, c_last)) = lstm(tf.transpose(units, (1, 0, 2)), (initial_h, initial_c))
h = tf.transpose(h, (1, 0, 2))
h_last = h_last[(- 1)]
c_last = c_last[(- 1)]
if (seq_lengths is not None):
indices = tf.stack([tf.range(tf.shape(h)[0]), (seq_lengths - 1)], axis=1)
h_last = tf.gather_nd(h, indices)
return (h, (h_last, c_last))
|
Fast CuDNN LSTM implementation
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch size
T - number of tokens
F - features
n_hidden: dimensionality of hidden state
n_layers: number of layers
trainable_initial_states: whether to create a special trainable variable
to initialize the hidden states of the network or use just zeros
seq_lengths: tensor of sequence lengths with dimension [B]
initial_h: optional initial hidden state, masks trainable_initial_states
if provided
initial_c: optional initial cell state, masks trainable_initial_states
if provided
name: name of the variable scope to use
reuse:whether to reuse already initialized variable
Returns:
h - all hidden states along T dimension,
tf.Tensor with dimensionality [B x T x F]
h_last - last hidden state, tf.Tensor with dimensionality [B x H]
where H - number of hidden units
c_last - last cell state, tf.Tensor with dimensionality [B x H]
where H - number of hidden units
|
codesearchnet
|
def transition_retry(self, pipeline_key, retry_message):
def txn():
pipeline_record = db.get(pipeline_key)
if (pipeline_record is None):
logging.warning('Tried to retry pipeline ID "%s" but it does not exist.', pipeline_key.name())
raise db.Rollback()
if (pipeline_record.status not in (_PipelineRecord.WAITING, _PipelineRecord.RUN)):
logging.warning('Tried to retry pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
params = pipeline_record.params
offset_seconds = (params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt))
pipeline_record.next_retry_time = (self._gettime() + datetime.timedelta(seconds=offset_seconds))
pipeline_record.current_attempt += 1
pipeline_record.retry_message = retry_message
pipeline_record.status = _PipelineRecord.WAITING
if (pipeline_record.current_attempt >= pipeline_record.max_attempts):
root_pipeline_key = _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record)
logging.warning('Giving up on pipeline ID "%s" after %d attempt(s); causing abort all the way to the root pipeline ID "%s"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name())
pipeline_record.abort_message = ('Aborting after %d attempts' % pipeline_record.current_attempt)
task = taskqueue.Task(url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
else:
task = taskqueue.Task(url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target'])
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
db.run_in_transaction(txn)
|
Marks the given pipeline as requiring another retry.
Does nothing if all attempts have been exceeded.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
retry_message: User-supplied message indicating the reason for the retry.
|
codesearchnet
|
def get_doctest_files(diff_with_last_commit: bool=False) -> List[str]:
repo = Repo(PATH_TO_REPO)
test_files_to_run = []
if not diff_with_last_commit:
print(f'main is at {repo.refs.main.commit}')
print(f'Current head is at {repo.head.commit}')
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f'Branching commit: {commit}')
test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits)
else:
print(f'main is at {repo.head.commit}')
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f'Parent commit: {commit}')
test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits)
all_test_files_to_run = get_all_doctest_files()
new_test_files = get_new_doctest_files(repo, repo.head.commit, repo.refs.main.commit)
test_files_to_run = list(set(test_files_to_run + new_test_files))
with open('utils/slow_documentation_tests.txt') as fp:
slow_documentation_tests = set(fp.read().strip().split('\n'))
test_files_to_run = [x for x in test_files_to_run if x in all_test_files_to_run and x not in slow_documentation_tests]
test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]
return sorted(test_files_to_run)
|
Return a list of python and Markdown files where doc example have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
Returns:
`List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files
modified are returned if the diff in the file is only in doctest examples).
|
github-repos
|
def _use_datastore(self, key, options=None):
flag = ContextOptions.use_datastore(options)
if (flag is None):
flag = self._datastore_policy(key)
if (flag is None):
flag = ContextOptions.use_datastore(self._conn.config)
if (flag is None):
flag = True
return flag
|
Return whether to use the datastore for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the datastore should be used, False otherwise.
|
codesearchnet
|
def _GetSerializedAttributeContainerByIndex(self, container_type, index):
container_list = self._GetSerializedAttributeContainerList(container_type)
return container_list.GetAttributeContainerByIndex(index)
|
Retrieves a specific serialized attribute container.
Args:
container_type (str): attribute container type.
index (int): attribute container index.
Returns:
bytes: serialized attribute container data or None if not available.
|
codesearchnet
|
def get_node_details(self, node_id: list) -> dict:
if (not self._manager):
raise RuntimeError('Only the Swarm manager node can retrieve node details.')
node = self._client.nodes.get(node_id)
return node.attrs
|
Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
|
codesearchnet
|
def from_json_file(cls, filename):
with open(filename, 'r') as fp:
return cls(json.load(fp))
|
Load a lexicon from a JSON file.
Args:
filename (str): The path to a JSON dump.
|
codesearchnet
|
def exceptions(error_is_fatal=True, error_messages=None):
def exception_decorator(func):
nonlocal error_messages
@functools.wraps(func)
def exc_wrapper(*args, **kwargs):
nonlocal error_messages
try:
result = func(*args, **kwargs)
except sa.exc.SQLAlchemyError as err:
result = None
details = None
err_type = err.__class__
if error_messages and err_type in error_messages:
details = error_messages[err_type]
if details:
LOG.error(details)
LOG.error("For developers: (%s) %s", err.__class__, str(err))
if error_is_fatal:
sys.exit("Abort, SQL operation failed.")
if not ui.ask(
"I can continue at your own risk, do you want that?"):
raise err
return result
return exc_wrapper
return exception_decorator
|
Handle SQLAlchemy exceptions in a sane way.
Args:
func: An arbitrary function to wrap.
error_is_fatal: Should we exit the program on exception?
reraise: Should we reraise the exception, after logging? Only makes sense
if error_is_fatal is False.
error_messages: A dictionary that assigns an exception class to a
customized error message.
|
juraj-google-style
|
def run_coroutine(self, cor, *args, **kwargs):
if self.stopping:
raise LoopStoppingError("Could not launch coroutine because loop is shutting down: %s" % cor)
self.start()
cor = _instaniate_coroutine(cor, args, kwargs)
if self.inside_loop():
raise InternalError("BackgroundEventLoop.run_coroutine called from inside event loop, "
"would have deadlocked.")
future = self.launch_coroutine(cor)
return future.result()
|
Run a coroutine to completion and return its result.
This method may only be called outside of the event loop.
Attempting to call it from inside the event loop would deadlock
and will raise InternalError instead.
Args:
cor (coroutine): The coroutine that we wish to run in the
background and wait until it finishes.
Returns:
object: Whatever the coroutine cor returns.
|
juraj-google-style
|
def get_learning_rate(self, iter):
return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power)
|
Get learning rate with polymomial decay based on current iteration.
Args:
iter (int): current iteration (starting with 0).
Returns:
float: Learning rate
|
juraj-google-style
|
def compress(a, b):
from difflib import ndiff
left = a.splitlines(1) if isinstance(a, string_types) else a
right = b.splitlines(1) if isinstance(b, string_types) else b
ldiff = list(ndiff(left, right))
result = {}
latest = None
combo = None
icombo = 0
iorig = 0
for i, line in enumerate(ldiff):
cs = [l[0] for l in ldiff[i:min((i+4, len(ldiff)))]]
if cs[0] != ' ':
if latest is None:
latest = iorig
result[latest] = []
if combo is None:
if cs[0] == '-':
if (len(cs) >=3 and cs[1] == '+' and cs[2] == '?'):
combo = 3
elif (len(cs) >= 4 and cs[1] == '?' and cs[2] == '+'
and cs[3] == '?'):
combo = 4
else:
combo = 1
elif cs[0] == '+':
combo = 1
if icombo < combo:
result[latest].append(line)
icombo += 1
if icombo == combo:
if combo > 1:
latest = None
combo = None
icombo = 0
if cs[0] != '+':
iorig += 1
else:
latest = None
iorig += 1
return result
|
Performs the *compressed* diff of `a` and `b` such that the original
contents of the :func:`difflib.ndiff` call can be reconstructed using
:func:`~acorn.logging.diff.restore`.
Args:
a (str or list): *original* string or list of strings to diff.
b (str or list): *edited* string or list of strings to diff.
|
juraj-google-style
|
def find_all(self, product_type, short_name, include_hidden=False):
all_prods = []
if (product_type is None):
for prod_dict in self._product_map.values():
all_prods.extend([prod for prod in prod_dict.get(short_name, []) if (include_hidden or (not prod.hidden))])
return all_prods
all_prods = self._product_map.get(product_type, {})
return [prod for prod in all_prods.get(short_name, []) if (include_hidden or (not prod.hidden))]
|
Find all providers of a given product by its short name.
This function will return all providers of a given product. If you
want to ensure that a product's name is unique among all dependencies,
you should use find_unique.
Args:
product_type (str): The type of product that we are looking for, like
firmware_image, library etc.
short_name (str): The short name of the product that we wish to find,
usually its os.path.basename()
include_hidden (bool): Return products that are hidden and not selected
as visible in the depends section of this tile's module settings.
This defaults to False.
Returns:
list of ProductInfo: A list of all of the matching products. If no matching
products are found, an empty list is returned. If you want to raise
a BuildError in that case use find_unique.
|
codesearchnet
|
def which(self, cmd, parent_environ=None, fallback=False):
env = self.get_environ(parent_environ=parent_environ)
path = which(cmd, env=env)
if fallback and path is None:
path = which(cmd)
return path
|
Find a program in the resolved environment.
Args:
cmd: String name of the program to find.
parent_environ: Environment to interpret the context within,
defaults to os.environ if None.
fallback: If True, and the program is not found in the context,
the current environment will then be searched.
Returns:
Path to the program, or None if the program was not found.
|
juraj-google-style
|
def _CheckStatusAnalysisProcess(self, pid):
self._RaiseIfNotRegistered(pid)
if pid in self._completed_analysis_processes:
status_indicator = definitions.STATUS_INDICATOR_COMPLETED
process_status = {
'processing_status': status_indicator}
used_memory = 0
else:
process = self._processes_per_pid[pid]
process_status = self._QueryProcessStatus(process)
if process_status is None:
process_is_alive = False
else:
process_is_alive = True
process_information = self._process_information_per_pid[pid]
used_memory = process_information.GetUsedMemory() or 0
if self._worker_memory_limit and used_memory > self._worker_memory_limit:
logger.warning((
'Process: {0:s} (PID: {1:d}) killed because it exceeded the '
'memory limit: {2:d}.').format(
process.name, pid, self._worker_memory_limit))
self._KillProcess(pid)
if isinstance(process_status, dict):
self._rpc_errors_per_pid[pid] = 0
status_indicator = process_status.get('processing_status', None)
if status_indicator == definitions.STATUS_INDICATOR_COMPLETED:
self._completed_analysis_processes.add(pid)
else:
rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1
self._rpc_errors_per_pid[pid] = rpc_errors
if rpc_errors > self._MAXIMUM_RPC_ERRORS:
process_is_alive = False
if process_is_alive:
rpc_port = process.rpc_port.value
logger.warning((
'Unable to retrieve process: {0:s} (PID: {1:d}) status via '
'RPC socket: http:
process.name, pid, rpc_port))
processing_status_string = 'RPC error'
status_indicator = definitions.STATUS_INDICATOR_RUNNING
else:
processing_status_string = 'killed'
status_indicator = definitions.STATUS_INDICATOR_KILLED
process_status = {
'processing_status': processing_status_string}
self._UpdateProcessingStatus(pid, process_status, used_memory)
if status_indicator in definitions.ERROR_STATUS_INDICATORS:
logger.error((
'Process {0:s} (PID: {1:d}) is not functioning correctly. '
'Status code: {2!s}.').format(
process.name, pid, status_indicator))
self._TerminateProcessByPid(pid)
|
Checks the status of an analysis process.
Args:
pid (int): process ID (PID) of a registered analysis process.
Raises:
KeyError: if the process is not registered with the engine.
|
juraj-google-style
|
def easeOutElastic(n, amplitude=1, period=0.3):
_checkRange(n)
if (amplitude < 1):
amplitude = 1
s = (period / 4)
else:
s = ((period / (2 * math.pi)) * math.asin((1 / amplitude)))
return (((amplitude * (2 ** ((- 10) * n))) * math.sin(((n - s) * ((2 * math.pi) / period)))) + 1)
|
An elastic tween function that overshoots the destination and then "rubber bands" into the destination.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
codesearchnet
|
def gen_ordered_statistics(transaction_manager, record):
items = record.items
for combination_set in combinations(sorted(items), len(items) - 1):
items_base = frozenset(combination_set)
items_add = frozenset(items.difference(items_base))
confidence = (
record.support / transaction_manager.calc_support(items_base))
lift = confidence / transaction_manager.calc_support(items_add)
yield OrderedStatistic(
frozenset(items_base), frozenset(items_add), confidence, lift)
|
Returns a generator of ordered statistics as OrderedStatistic instances.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
record -- A support record as a SupportRecord instance.
|
juraj-google-style
|
def content_type(self):
return (self.headers.get('ContentType') or self.headers.get('Content-Type') or _content_types.JSON)
|
The request's content-type.
Returns:
(str): The value, if any, of the header 'ContentType' (used by some AWS services) and 'Content-Type'.
Otherwise, returns 'application/json' as default.
|
codesearchnet
|
def from_string(cls, jss, xml_string):
root = ElementTree.fromstring(xml_string.encode('utf-8'))
return cls(jss, root)
|
Creates a new JSSObject from an UTF-8 XML string.
Args:
jss: A JSS object.
xml_string: String XML file data used to create object.
|
codesearchnet
|
def get_image_features(self, pixel_values: torch.FloatTensor):
image_tokens = self.get_image_tokens(pixel_values)
vision_embeddings = self.get_input_embeddings()(image_tokens)
return vision_embeddings
|
Tokenizes images into discrete tokens with VQGAN module and embeds
them with text embeddings layer
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
The tensors corresponding to the input images.
|
github-repos
|
def survival_function(self, value, name='survival_function'):
return self._call_survival_function(value, name)
|
Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
|
github-repos
|
def get_ssm_parameter(parameter_name):
try:
response = boto3.client('ssm').get_parameters(Names=[parameter_name], WithDecryption=True)
return response.get('Parameters', None)[0].get('Value', '')
except Exception:
pass
return ''
|
Get the decrypted value of an SSM parameter
Args:
parameter_name - the name of the stored parameter of interest
Return:
Value if allowed and present else None
|
codesearchnet
|
def build_input_fns(data_dir, batch_size):
with open(download(data_dir, "vocab.pkl"), "r") as f:
words_to_idx = pickle.load(f)
num_words = len(words_to_idx)
vocabulary = [None] * num_words
for word, idx in words_to_idx.items():
vocabulary[idx] = word
def train_input_fn():
dataset = newsgroups_dataset(
data_dir, "train", num_words, shuffle_and_repeat=True)
dataset = dataset.batch(batch_size).prefetch(32)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
def eval_input_fn():
dataset = newsgroups_dataset(
data_dir, "test", num_words, shuffle_and_repeat=False)
dataset = dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return train_input_fn, eval_input_fn, vocabulary
|
Builds iterators for train and evaluation data.
Each object is represented as a bag-of-words vector.
Arguments:
data_dir: Folder in which to store the data.
batch_size: Batch size for both train and evaluation.
Returns:
train_input_fn: A function that returns an iterator over the training data.
eval_input_fn: A function that returns an iterator over the evaluation data.
vocabulary: A mapping of word's integer index to the corresponding string.
|
juraj-google-style
|
def letter_score(letter):
score_map = {
1: ["a", "e", "i", "o", "u", "l", "n", "r", "s", "t"],
2: ["d", "g"],
3: ["b", "c", "m", "p"],
4: ["f", "h", "v", "w", "y"],
5: ["k"],
8: ["j", "x"],
10: ["q", "z"],
}
for score, letters in score_map.items():
if letter.lower() in letters:
return score
else:
raise TypeError("Invalid letter: %s", letter)
|
Returns the Scrabble score of a letter.
Args:
letter: a single character string
Raises:
TypeError if a non-Scrabble character is supplied
|
juraj-google-style
|
def rename_state_fluent(name: str) -> str:
i = name.index('/')
functor = name[:i]
arity = name[i+1:]
return "{}'/{}".format(functor, arity)
|
Returns current state fluent canonical name.
Args:
name (str): The next state fluent name.
Returns:
str: The current state fluent name.
|
juraj-google-style
|
def blit(self, src_rect, dst_surf, dst_rect):
check_int_err(lib.SDL_UpperBlit(self._ptr, src_rect._ptr, dst_surf._ptr, dst_rect._ptr))
|
Performs a fast blit from the source surface to the destination surface.
This assumes that the source and destination rectangles are
the same size. If either src_rect or dst_rect are None, the entire
surface is copied. The final blit rectangles are saved
in src_rect and dst_rect after all clipping is performed.
Args:
src_rect (Rect): Source rect.
dst_surf (Surface): Destination surface.
dst_rect (Rect): Destination rect.
Raises:
SDLError: If the blit fails.
|
codesearchnet
|
def __init__(self, path, encoding="utf-8", chunk_size=io.DEFAULT_BUFFER_SIZE):
if encoding.lower() not in supported_encodings:
error_message = "{0} encoding was not supported/tested.".format(encoding)
error_message += "Supported encodings are '{0}'".format(",".join(supported_encodings))
raise NotImplementedError(error_message)
self.path = path
self.encoding = encoding.lower()
self.chunk_size = chunk_size
self.iterator = FileReadBackwardsIterator(io.open(self.path, mode="rb"), self.encoding, self.chunk_size)
|
Constructor for FileReadBackwards.
Args:
path: Path to the file to be read
encoding (str): Encoding
chunk_size (int): How many bytes to read at a time
|
juraj-google-style
|
def events_filter(
self,
topics: List[str] = None,
from_block: BlockSpecification = None,
to_block: BlockSpecification = None,
) -> StatelessFilter:
return self.client.new_filter(
self.address,
topics=topics,
from_block=from_block,
to_block=to_block,
)
|
Install a new filter for an array of topics emitted by the contract.
Args:
topics: A list of event ids to filter for. Can also be None,
in which case all events are queried.
from_block: The block number at which to start looking for events.
to_block: The block number at which to stop looking for events.
Return:
Filter: The filter instance.
|
juraj-google-style
|
def read_model_with_mutable_tensors(input_tflite_file):
return copy.deepcopy(read_model(input_tflite_file))
|
Reads a tflite model as a python object with mutable tensors.
Similar to read_model() with the addition that the returned object has
mutable tensors (read_model() returns an object with immutable tensors).
NOTE: This API only works for TFLite generated with
_experimental_use_buffer_offset=false
Args:
input_tflite_file: Full path name to the input tflite file
Raises:
RuntimeError: If input_tflite_file path is invalid.
IOError: If input_tflite_file cannot be opened.
Returns:
A mutable python object corresponding to the input tflite file.
|
github-repos
|
def encode_row(fields):
unicode_fields = [unicode(f) for f in fields]
escaped_fields = map(escape, unicode_fields)
return _field_delimiter.join(escaped_fields)
|
Encode a list of column values into a [incr tsdb()] profile line.
Encoding involves escaping special characters for each value, then
joining the values into a single string with the field delimiter
(`"@"` by default). It does not fill in default values (see
make_row()).
Args:
fields: a list of column values
Returns:
A [incr tsdb()]-encoded string
|
juraj-google-style
|
def is_old(self):
if (not self.processing_started_ts):
return True
if self.processing_ended_ts:
return ((self.processing_ended_ts + DB_CACHE_TIME) < time.time())
expected_end_ts = (self.creation_ts + DB_MAX_WAIT_TIME)
if (expected_end_ts < time.time()):
logger.error('Prosessing timeouted and properites were not set!')
return (expected_end_ts < time.time())
|
Is the object cached for too long, so it should be redownloaded?
See :attr:`.DB_MAX_WAIT_TIME` and :attr:`.DB_CACHE_TIME` for details.
Returns:
bool: True if it is.
|
codesearchnet
|
def GetUserByEmail(self, email):
user = self.rpc_helper.GetAccountInfoByEmail(email)
return GitkitUser.FromApiResponse(user)
|
Gets user info by email.
Args:
email: string, the user email.
Returns:
GitkitUser, containing the user info.
|
codesearchnet
|
def get_doc_id(document_pb, expected_prefix):
(prefix, document_id) = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1)
if (prefix != expected_prefix):
raise ValueError('Unexpected document name', document_pb.name, 'Expected to begin with', expected_prefix)
return document_id
|
Parse a document ID from a document protobuf.
Args:
document_pb (google.cloud.proto.firestore.v1beta1.\
document_pb2.Document): A protobuf for a document that
was created in a ``CreateDocument`` RPC.
expected_prefix (str): The expected collection prefix for the
fully-qualified document name.
Returns:
str: The document ID from the protobuf.
Raises:
ValueError: If the name does not begin with the prefix.
|
codesearchnet
|
def get_connection(db_type, db_pth, user=None, password=None, name=None):
if (db_type == 'sqlite'):
print(db_pth)
conn = sqlite3.connect(db_pth)
elif (db_type == 'mysql'):
import mysql.connector
conn = mysql.connector.connect(user=user, password=password, database=name)
elif (db_type == 'django_mysql'):
from django.db import connection as conn
else:
print('unsupported database type: {}, choices are "sqlite", "mysql" or "django_mysql"'.format(db_type))
return conn
|
Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database
Example:
>>> from msp2db.db import get_connection
>>> conn = get_connection('sqlite', 'library.db')
If using "mysql" mysql.connector needs to be installed.
If using "django_mysql" Django needs to be installed.
Args:
db_type (str): Type of database can either be "sqlite", "mysql" or "django_mysql"
Returns:
sql connection object
|
codesearchnet
|
def _ParseRelationshipsXMLFile(self, xml_data):
xml_root = ElementTree.fromstring(xml_data)
property_files = []
for xml_element in xml_root.iter():
type_attribute = xml_element.get('Type')
if 'properties' in repr(type_attribute):
target_attribute = xml_element.get('Target')
property_files.append(target_attribute)
return property_files
|
Parses the relationships XML file (_rels/.rels).
Args:
xml_data (bytes): data of a _rels/.rels XML file.
Returns:
list[str]: property file paths. The path is relative to the root of
the ZIP file.
Raises:
zipfile.BadZipfile: if the relationship XML file cannot be read.
|
juraj-google-style
|
def __init__(self, mutation_list):
self.mutation_list = [(i[0], int(i[1]), self._standard_resname(i[2])) for i in mutation_list]
self.chains_and_residues = [(i[0], int(i[1])) for i in mutation_list]
|
Initialize the parameters which indicate what mutations will occur
Args:
chain:
residue_number:
mutate_to:
|
juraj-google-style
|
def get_schedule_distribution(schedule, global_step=None):
(interpolation, steps, pmfs) = schedule
if (len(pmfs) == 1):
return pmfs[0]
if (global_step is None):
global_step = tf.train.get_or_create_global_step()
if (interpolation == 'step'):
interpolation_fn = step_interpolation
elif (interpolation == 'linear'):
interpolation_fn = linear_interpolation
else:
raise ValueError(('Invalid interpolation strategy: %s' % interpolation))
return tf.reshape(tf.py_func(func=(lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs))), inp=[global_step], Tout=tf.float32), [len(pmfs[0])])
|
Computes the pmf of a schedule given the global_step.
Args:
schedule: A schedule tuple, see encode_schedule for details.
global_step: A scalar tensor, the step to query the schedule.
Returns:
A 1-D tensor of probs, the sampling distribution of the global_step.
|
codesearchnet
|
def _auditpol_cmd(cmd):
ret = salt.modules.cmdmod.run_all(cmd='auditpol {0}'.format(cmd),
python_shell=True)
if ret['retcode'] == 0:
return ret['stdout'].splitlines()
msg = 'Error executing auditpol command: {0}\n'.format(cmd)
msg += '\n'.join(ret['stdout'])
raise CommandExecutionError(msg)
|
Helper function for running the auditpol command
Args:
cmd (str): the auditpol command to run
Returns:
list: A list containing each line of the return (splitlines)
Raises:
CommandExecutionError: If the command encounters an error
|
juraj-google-style
|
def authorizer(self, schemes, resource, action, request_args):
if (not schemes):
return (u'', u'')
for scheme in schemes:
if ((scheme in self.schemes) and self.has_auth_params(scheme)):
cred = Context.format_auth_params(self.schemes[scheme][u'params'])
if hasattr(self, 'mfa_token'):
cred = '{}, mfa_token="{}"'.format(cred, self.mfa_token)
return (scheme, cred)
raise AuthenticationError(self, schemes)
|
Construct the Authorization header for a request.
Args:
schemes (list of str): Authentication schemes supported for the
requested action.
resource (str): Object upon which an action is being performed.
action (str): Action being performed.
request_args (list of str): Arguments passed to the action call.
Returns:
(str, str) A tuple of the auth scheme satisfied, and the credential
for the Authorization header or empty strings if none could be
satisfied.
|
codesearchnet
|
def __init__(self, file_name=None, path=None, date=None):
self._utils = TcExUtils()
self._occurrence_data = {}
if file_name is not None:
self._occurrence_data['fileName'] = file_name
if path is not None:
self._occurrence_data['path'] = path
if date is not None:
self._occurrence_data['date'] = self._utils.format_datetime(
date, date_format='%Y-%m-%dT%H:%M:%SZ'
)
|
Initialize Class Properties
Args:
file_name (str, optional): The file name for this occurrence.
path (str, optional): The file path for this occurrence.
date (str, optional): The datetime expression for this occurrence.
|
juraj-google-style
|
def locked_put(self, credentials):
self._create_file_if_needed()
_helpers.validate_file(self._filename)
f = open(self._filename, 'w')
f.write(credentials.to_json())
f.close()
|
Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
Raises:
IOError if the file is a symbolic link.
|
juraj-google-style
|
async def claim_work(context):
log.debug('Calling claimWork...')
payload = {'workerGroup': context.config['worker_group'], 'workerId': context.config['worker_id'], 'tasks': 1}
try:
return (await context.queue.claimWork(context.config['provisioner_id'], context.config['worker_type'], payload))
except (taskcluster.exceptions.TaskclusterFailure, aiohttp.ClientError) as exc:
log.warning('{} {}'.format(exc.__class__, exc))
|
Find and claim the next pending task in the queue, if any.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: a dict containing a list of the task definitions of the tasks claimed.
|
codesearchnet
|
def write_to_path(self, path, suffix='', format='png', overwrite=False):
if (os.path.exists(path) and (overwrite is False)):
raise ValueError('Error: use ovewrite=True to overwrite images')
if (not os.path.exists(path)):
os.makedirs(path)
for (i, r) in self.iterrows():
spath = os.path.join(path, r['project_name'], r['sample_name'])
if (not os.path.exists(spath)):
os.makedirs(spath)
if (suffix == ''):
fname = os.path.join(spath, ((r['frame_name'] + '.') + format))
else:
fname = os.path.join(spath, ((((r['frame_name'] + '_') + suffix) + '.') + format))
imageio.imwrite(fname, r['image'], format=format)
|
Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame
Args:
path (str): Where to write the directory of images
suffix (str): for labeling the imaages you write
format (str): default 'png' format to write the file
overwrite (bool): default False. if true can overwrite files in the path
Modifies:
Creates path folder if necessary and writes images to path
|
codesearchnet
|
def clear_collection(self, name) -> None:
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
|
Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
|
github-repos
|
def delete_subscription(self, subscription_id):
return self.client._delete(self.url + 'subscriptions/{}'.format(subscription_id), headers=self.get_headers())
|
Unsubscribe, delete the relationship of the customer with the plan.
Args:
subscription_id: Identification of the subscription.
Returns:
|
juraj-google-style
|
def from_function(cls, f, *args, **kwargs):
return cls.from_code(six.get_function_code(f), *args, **kwargs)
|
Create a new instance from a function. Gets the code object from
the function and passes it and any other specified parameters to
:meth:`from_code`.
Arguments:
f(function): The function to get the code object from.
Returns:
CodeObject: A new :class:`CodeObject` instance.
|
juraj-google-style
|
def from_seed(cls, seed, alg=None):
if alg is None:
alg = DEFAULT_ALGORITHM
alg = random_ops_util.convert_alg_to_int(alg)
state = create_rng_state(seed, alg)
return cls(state=state, alg=alg)
|
Creates a generator from a seed.
A seed is a 1024-bit unsigned integer represented either as a Python
integer or a vector of integers. Seeds shorter than 1024-bit will be
padded. The padding, the internal structure of a seed and the way a seed
is converted to a state are all opaque (unspecified). The only semantics
specification of seeds is that two different seeds are likely to produce
two independent generators (but no guarantee).
Args:
seed: the seed for the RNG.
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
|
github-repos
|
def _ReadFileHeader(self, file_object):
data_type_map = self._GetDataTypeMap('keychain_file_header')
(file_header, _) = self._ReadStructureFromFileObject(file_object, 0, data_type_map)
if (file_header.signature != self._FILE_SIGNATURE):
raise errors.ParseError('Unsupported file signature.')
if ((file_header.major_format_version != self._MAJOR_VERSION) or (file_header.minor_format_version != self._MINOR_VERSION)):
raise errors.ParseError('Unsupported format version: {0:s}.{1:s}'.format(file_header.major_format_version, file_header.minor_format_version))
return file_header
|
Reads the file header.
Args:
file_object (file): file-like object.
Returns:
keychain_file_header: file header.
Raises:
ParseError: if the file header cannot be read.
|
codesearchnet
|
def tokenize(self, text):
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
|
juraj-google-style
|
def _required_constraint_name(table: str, field, key):
return '{table}_{field}_required_{postfix}'.format(
table=table,
field=field.column,
postfix=key
)
|
Gets the name for a CONSTRAINT that applies
to a single hstore key.
Arguments:
table:
The name of the table the field is
a part of.
field:
The hstore field to create a
UNIQUE INDEX for.
key:
The name of the hstore key
to create the name for.
Returns:
The name for the UNIQUE index.
|
juraj-google-style
|
def copy(self):
new_store = EagerVariableStore()
for key, var in self._store._vars.items():
try:
index = var.name.index(':')
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
new_var = resource_variable_ops.ResourceVariable(var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
|
Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.