code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def allowed_methods(self):
return [method for (method, allowed) in (('GET', hasattr(self, 'on_get')), ('POST', hasattr(self, 'on_post')), ('PUT', hasattr(self, 'on_put')), ('PATCH', hasattr(self, 'on_patch')), ('DELETE', hasattr(self, 'on_delete')), ('HEAD', hasattr(self, 'on_head')), ('OPTIONS', hasattr(self, 'on_options'))) if allowed]
|
Return list of allowed HTTP methods on this resource.
This is only for purpose of making resource description.
Returns:
list: list of allowed HTTP method names (uppercase)
|
codesearchnet
|
def ParseDataStream(self, parser_mediator, file_entry, data_stream_name):
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if (not file_object):
raise RuntimeError('Unable to retrieve file-like object from file entry.')
try:
parser_names = self._GetSignatureMatchParserNames(file_object)
parse_with_non_sigscan_parsers = True
if parser_names:
parse_result = self._ParseFileEntryWithParsers(parser_mediator, parser_names, file_entry, file_object=file_object)
if (parse_result in (self._PARSE_RESULT_FAILURE, self._PARSE_RESULT_SUCCESS)):
parse_with_non_sigscan_parsers = False
if parse_with_non_sigscan_parsers:
self._ParseFileEntryWithParsers(parser_mediator, self._non_sigscan_parser_names, file_entry, file_object=file_object)
finally:
file_object.close()
|
Parses a data stream of a file entry with the enabled parsers.
Args:
parser_mediator (ParserMediator): parser mediator.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
Raises:
RuntimeError: if the file-like object or the parser object is missing.
|
codesearchnet
|
def plot_spectra_overlapped(ss, title=None, setup=_default_setup):
plt.figure()
draw_spectra_overlapped(ss, title, setup)
plt.show()
|
Plots one or more spectra in the same plot.
Args:
ss: list of Spectrum objects
title=None: window title
setup: PlotSpectrumSetup object
|
juraj-google-style
|
def potcar_spec(filename):
p_spec = {}
with open(filename, 'r') as f:
potcars = re.split('(End of Dataset\n)', f.read())
potcar_md5sums = [md5sum(''.join(pair)) for pair in zip(potcars[::2], potcars[1:(- 1):2])]
for this_md5sum in potcar_md5sums:
for ps in potcar_sets:
for (p, p_md5sum) in potcar_md5sum_data[ps].items():
if (this_md5sum == p_md5sum):
p_spec[p] = ps
if (len(p_spec) != len(potcar_md5sums)):
raise ValueError('One or more POTCARs did not have matching md5 hashes')
return p_spec
|
Returns a dictionary specifying the pseudopotentials contained in a POTCAR file.
Args:
filename (Str): The name of the POTCAR file to process.
Returns:
(Dict): A dictionary of pseudopotential filename: dataset pairs, e.g.
{ 'Fe_pv': 'PBE_54', 'O', 'PBE_54' }
|
codesearchnet
|
def set_standby_timeout(timeout, power='ac', scheme=None):
return _set_powercfg_value(scheme=scheme, sub_group='SUB_SLEEP', setting_guid='STANDBYIDLE', power=power, value=timeout)
|
Set the standby timeout in minutes for the given power scheme
Args:
timeout (int):
The amount of time in minutes before the computer sleeps
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
# Sets the system standby timeout to 30 minutes on Battery
salt '*' powercfg.set_standby_timeout 30 power=dc
|
codesearchnet
|
def config_insync(self):
status = self.get('config/insync').get('configInSync', False)
if (status is None):
status = False
return status
|
Returns whether the config is in sync, i.e. whether the running
configuration is the same as that on disk.
Returns:
bool
|
codesearchnet
|
def get_time_evolution(self):
term = self.simplify()
coeff = term.coeff
if coeff.imag:
raise ValueError('Not a real coefficient.')
ops = term.ops
def append_to_circuit(circuit, t):
if (not ops):
return
for op in ops:
n = op.n
if (op.op == 'X'):
circuit.h[n]
elif (op.op == 'Y'):
circuit.rx((- half_pi))[n]
for i in range(1, len(ops)):
circuit.cx[(ops[(i - 1)].n, ops[i].n)]
circuit.rz((((- 2) * coeff) * t))[ops[(- 1)].n]
for i in range((len(ops) - 1), 0, (- 1)):
circuit.cx[(ops[(i - 1)].n, ops[i].n)]
for op in ops:
n = op.n
if (op.op == 'X'):
circuit.h[n]
elif (op.op == 'Y'):
circuit.rx(half_pi)[n]
return append_to_circuit
|
Get the function to append the time evolution of this term.
Returns:
function(circuit: Circuit, t: float):
Add gates for time evolution to `circuit` with time `t`
|
codesearchnet
|
def _str_to_ord(content, weights):
ordinal = 0
for i, c in enumerate(content):
ordinal += weights[i] * _ALPHABET.index(c) + 1
return ordinal
|
Converts a string to its lexicographical order.
Args:
content: the string to convert. Of type str.
weights: weights from _get_weights.
Returns:
an int or long that represents the order of this string. "" has order 0.
|
juraj-google-style
|
def _call(callable_obj, arg_names, namespace):
arguments = {arg_name: getattr(namespace, arg_name) for arg_name in arg_names}
return callable_obj(**arguments)
|
Actually calls the callable with the namespace parsed from the command
line.
Args:
callable_obj: a callable object
arg_names: name of the function arguments
namespace: the namespace object parsed from the command line
|
codesearchnet
|
def upload_timeline(self, timeline_name, plaso_storage_path):
resource_url = '{0:s}/upload/'.format(self.api_base_url)
files = {'file': open(plaso_storage_path, 'rb')}
data = {'name': timeline_name}
response = self.session.post(resource_url, files=files, data=data)
try:
response_dict = response.json()
except ValueError:
raise RuntimeError('Could not decode JSON response from Timesketch (Status {0:d}):\n{1:s}'.format(response.status_code, response.content))
index_id = response_dict['objects'][0]['id']
return index_id
|
Create a timeline with the specified name from the given plaso file.
Args:
timeline_name (str): Name of timeline
plaso_storage_path (str): Local path of plaso file to be uploaded
Returns:
int: ID of uploaded timeline
Raises:
RuntimeError: When the JSON response from Timesketch cannot be decoded.
|
codesearchnet
|
def _HasDuplicateRegistryKeyPaths(self, filename, artifact_definition, source):
result = False
intersection = self._artifact_registry_key_paths.intersection(set(source.keys))
if intersection:
duplicate_key_paths = '\n'.join(intersection)
logging.warning('Artifact definition: {0:s} in file: {1:s} has duplicate Registry key paths:\n{2:s}'.format(artifact_definition.name, filename, duplicate_key_paths))
result = True
self._artifact_registry_key_paths.update(source.keys)
return result
|
Checks if Registry key paths are not already defined by other artifacts.
Note that at the moment this function will only find exact duplicate
Registry key paths.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
Returns:
bool: True if the Registry key paths defined by the source type
are used in other artifacts.
|
codesearchnet
|
def weighted_moments_v2(x, axes, frequency_weights, keepdims=False, name=None):
return weighted_moments(x=x, axes=axes, frequency_weights=frequency_weights, name=name, keep_dims=keepdims)
|
Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
keepdims: Produce moments with the same dimensionality as the input.
name: Name used to scope the operation.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
|
github-repos
|
def _acquire(self, uuid_path):
for index in range(self._min_third_octet, self._max_third_octet + 1):
lease = self.create_lease_object_from_idx(index)
if self._lease_valid(lease):
continue
self._take_lease(lease, uuid_path, safe=False)
return lease.to_ip_network()
raise LagoSubnetLeaseStoreFullException(self.get_allowed_range())
|
Lease a free network for the given uuid path
Args:
uuid_path (str): Path to the uuid file of a :class:`lago.Prefix`
Returns:
netaddr.IPNetwork: Which represents the selected subnet
Raises:
LagoSubnetLeaseException: If the store is full
|
juraj-google-style
|
def handle_discovery_request(self, path, request, start_response):
if (path == self._GET_REST_API):
return self._get_rest_doc(request, start_response)
elif (path == self._GET_RPC_API):
error_msg = 'RPC format documents are no longer supported with the Endpoints Framework for Python. Please use the REST format.'
_logger.error('%s', error_msg)
return util.send_wsgi_error_response(error_msg, start_response)
elif (path == self._LIST_API):
return self._list(request, start_response)
return False
|
Returns the result of a discovery service request.
This calls start_response and returns the response body.
Args:
path: A string containing the API path (the portion of the path
after /_ah/api/).
request: An ApiRequest, the transformed request sent to the Discovery API.
start_response: A function with semantics defined in PEP-333.
Returns:
The response body. Or returns False if the request wasn't handled by
DiscoveryService.
|
codesearchnet
|
def date_to_epoch(year, month, day):
return int(date_to_delorean(year, month, day).epoch)
|
Converts a date to epoch in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Int epoch in UTC from date.
|
codesearchnet
|
def _commit_change(alias_table, export_path=None, post_commit=True):
with open((export_path or GLOBAL_ALIAS_PATH), 'w+') as alias_config_file:
alias_table.write(alias_config_file)
if post_commit:
alias_config_file.seek(0)
alias_config_hash = hashlib.sha1(alias_config_file.read().encode('utf-8')).hexdigest()
AliasManager.write_alias_config_hash(alias_config_hash)
collided_alias = AliasManager.build_collision_table(alias_table.sections())
AliasManager.write_collided_alias(collided_alias)
build_tab_completion_table(alias_table)
|
Record changes to the alias table.
Also write new alias config hash and collided alias, if any.
Args:
alias_table: The alias table to commit.
export_path: The path to export the aliases to. Default: GLOBAL_ALIAS_PATH.
post_commit: True if we want to perform some extra actions after writing alias to file.
|
codesearchnet
|
def uni_to_beta(text):
u = _UNICODE_MAP
transform = []
for ch in text:
try:
conv = u[ch]
except KeyError:
conv = ch
transform.append(conv)
converted = ''.join(transform)
return converted
|
Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable.
|
codesearchnet
|
def _html_tree_view_render(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html:
return self._html_tree_view(view=view, name=name, parent=parent, root_path=root_path, **view.get_kwargs(kwargs, self._html_tree_view_config(), root_path or KeyPath())).add_style(*self._html_tree_view_css_styles())
|
The entrypoint of rendering the subtree represented by this extension.
Args:
view: The view to render the object.
name: The name of the object.
parent: The parent of the object.
root_path: The key path of the object relative to the root.
**kwargs: kwargs to pass to `view.render()` on this extension.
Returns:
The rendered HTML.
|
github-repos
|
def _open_rpc_interface(self, connection_id, callback):
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, 'Could not find connection information')
return
self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))
try:
service = context['services'][TileBusService]
header_characteristic = service[ReceiveHeaderChar]
payload_characteristic = service[ReceivePayloadChar]
except KeyError:
self.connections.finish_operation(connection_id, False, "Can't find characteristics to open rpc interface")
return
self.bable.set_notification(enabled=True, connection_handle=context['connection_handle'], characteristic=header_characteristic, on_notification_set=[self._on_interface_opened, context, payload_characteristic], on_notification_received=self._on_notification_received, sync=False)
|
Enable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
codesearchnet
|
def get_variation_for_rollout(self, rollout, user_id, attributes=None):
if rollout and len(rollout.experiments) > 0:
for idx in range(len(rollout.experiments) - 1):
experiment = self.config.get_experiment_from_key(rollout.experiments[idx].get('key'))
if not audience_helper.is_user_in_experiment(self.config, experiment, attributes, self.logger):
self.logger.debug('User "%s" does not meet conditions for targeting rule %s.' % (
user_id,
idx + 1
))
continue
self.logger.debug('User "%s" meets conditions for targeting rule %s.' % (user_id, idx + 1))
bucketing_id = self._get_bucketing_id(user_id, attributes)
variation = self.bucketer.bucket(experiment, user_id, bucketing_id)
if variation:
self.logger.debug('User "%s" is in variation %s of experiment %s.' % (
user_id,
variation.key,
experiment.key
))
return Decision(experiment, variation, enums.DecisionSources.ROLLOUT)
else:
self.logger.debug('User "%s" is not in the traffic group for the targeting else. '
'Checking "Everyone Else" rule now.' % user_id)
break
everyone_else_experiment = self.config.get_experiment_from_key(rollout.experiments[-1].get('key'))
if audience_helper.is_user_in_experiment(self.config,
self.config.get_experiment_from_key(rollout.experiments[-1].get('key')),
attributes,
self.logger):
bucketing_id = self._get_bucketing_id(user_id, attributes)
variation = self.bucketer.bucket(everyone_else_experiment, user_id, bucketing_id)
if variation:
self.logger.debug('User "%s" meets conditions for targeting rule "Everyone Else".' % user_id)
return Decision(everyone_else_experiment, variation, enums.DecisionSources.ROLLOUT)
return Decision(None, None, enums.DecisionSources.ROLLOUT)
|
Determine which experiment/variation the user is in for a given rollout.
Returns the variation of the first experiment the user qualifies for.
Args:
rollout: Rollout for which we are getting the variation.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Decision namedtuple consisting of experiment and variation for the user.
|
juraj-google-style
|
def to_grayscale(img):
gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float)
imbands = img.getbands()
alpha = None
if ('A' in imbands):
alpha = numpy.asarray(img.split()[(- 1)]).astype(numpy.float)
return (gray, alpha)
|
Convert PIL image to numpy grayscale array and numpy alpha array.
Args:
img (PIL.Image): PIL Image object.
Returns:
(gray, alpha): both numpy arrays.
|
codesearchnet
|
def GetTaskPendingMerge(self, current_task):
next_task = self._tasks_pending_merge.PeekTask()
if not next_task:
return None
if current_task and next_task.merge_priority > current_task.merge_priority:
return None
with self._lock:
next_task = self._tasks_pending_merge.PopTask()
self._tasks_merging[next_task.identifier] = next_task
return next_task
|
Retrieves the first task that is pending merge or has a higher priority.
This function will check if there is a task with a higher merge priority
than the current_task being merged. If so, that task with the higher
priority is returned.
Args:
current_task (Task): current task being merged or None if no such task.
Returns:
Task: the next task to merge or None if there is no task pending merge or
with a higher priority.
|
juraj-google-style
|
def get_box_threads(self, box_key):
uri = '/'.join([
self.api_uri,
self.boxes_suffix,
box_key,
self.threads_suffix
])
return self._req('get', uri)
|
Gets all threads in a specified box
Args:
box_key box to look in
returns a list of thread dicts
|
juraj-google-style
|
def set_guest_access(self, room_id, guest_access):
content = {
"guest_access": guest_access
}
return self.send_state_event(room_id, "m.room.guest_access", content)
|
Set the guest access policy of the room.
Args:
room_id(str): The room to set the rules for.
guest_access(str): Wether guests can join. One of: ["can_join",
"forbidden"]
|
juraj-google-style
|
def compute_recall(self, result_neighbors, ground_truth_neighbors):
self.assertLen(result_neighbors.shape, 2)
self.assertLen(ground_truth_neighbors.shape, 2)
self.assertEqual(result_neighbors.shape[0], ground_truth_neighbors.shape[0])
gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors]
def hits_per_q(q, nn_per_q):
return len(list((x for x in nn_per_q if x.item() in gt_sets[q])))
hits = sum((hits_per_q(q, nn_per_q) for q, nn_per_q in enumerate(result_neighbors)))
return hits / ground_truth_neighbors.size
|
Computes the recall of an approximate nearest neighbor search.
Args:
result_neighbors: int32 numpy array of the shape [num_queries,
neighbors_per_query] where the values are the indices of the dataset.
ground_truth_neighbors: int32 numpy array of with shape [num_queries,
ground_truth_neighbors_per_query] where the values are the indices of
the dataset.
Returns:
The recall.
|
github-repos
|
def set_parent(self, parent):
if not isinstance(parent, Node):
raise TypeError("parent must be a Node")
self.parent = parent
|
Set the parent of this ``Node`` object. Use this carefully, otherwise you may damage the structure of this ``Tree`` object.
Args:
``Node``: The new parent of this ``Node``
|
juraj-google-style
|
def get_whois_tags(ip_address):
whois = IPWhois(ip_address).lookup_whois()
nets = whois.get('nets', None)
if (not nets):
return []
cities = [net['city'] for net in nets if net.get('city', None)]
address_list = []
for net in nets:
address = net.get('address', None)
if (not address):
continue
if (('description' in net) and net['description']):
address = address.replace(net['description'], '').strip()
if ('\n' in address):
address = ', '.join(address.splitlines())
address_list.append(address)
return [SourceString(val, source='Whois') for val in set((cities + address_list))]
|
Get list of tags with `address` for given `ip_address`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects.
|
codesearchnet
|
def parse_header(line):
if ((not line) or (line == '\r\n')):
return None
if (line[0] in ' \t'):
return line[1:].rstrip()
(name, value) = line.split(':', 1)
return (name.strip(), value.strip())
|
Parse a header line.
Args:
line: A header line as a string.
Returns:
None if end of headers is found. A string giving the continuation line
if a continuation is found. A tuple of name, value when a header line is
found.
Raises:
ValueError: If the line cannot be parsed as a header.
|
codesearchnet
|
def _try_to_clean_garbage(self, writer_spec, exclude_list=()):
tmpl = string.Template(self._TMPFILE_PREFIX)
prefix = tmpl.substitute(id=self.status.mapreduce_id, shard=self.status.shard)
bucket = self._get_tmp_gcs_bucket(writer_spec)
account_id = self._get_tmp_account_id(writer_spec)
for f in cloudstorage.listbucket(('/%s/%s' % (bucket, prefix)), _account_id=account_id):
if (f.filename not in exclude_list):
self._remove_tmpfile(f.filename, self.status.writer_spec)
|
Tries to remove any files created by this shard that aren't needed.
Args:
writer_spec: writer_spec for the MR.
exclude_list: A list of filenames (strings) that should not be
removed.
|
codesearchnet
|
def qubits_tab(backend):
props = backend.properties().to_dict()
header_html = "<div><font style='font-weight:bold'>{key}</font>: {value}</div>"
header_html = header_html.format(key='last_update_date', value=props['last_update_date'])
update_date_widget = widgets.HTML(value=header_html)
qubit_html = '<table>'
qubit_html += '<style>\ntable {\n border-collapse: collapse;\n width: auto;\n}\n\nth, td {\n text-align: left;\n padding: 8px;\n}\n\ntr:nth-child(even) {background-color:
qubit_html += '<tr><th></th><th>Frequency</th><th>T1</th><th>T2</th>'
qubit_html += '<th>U1 gate error</th><th>U2 gate error</th><th>U3 gate error</th>'
qubit_html += '<th>Readout error</th></tr>'
qubit_footer = '</table>'
for qub in range(len(props['qubits'])):
name = ('Q%s' % qub)
qubit_data = props['qubits'][qub]
gate_data = props['gates'][(3 * qub):((3 * qub) + 3)]
t1_info = qubit_data[0]
t2_info = qubit_data[1]
freq_info = qubit_data[2]
readout_info = qubit_data[3]
freq = ((str(round(freq_info['value'], 5)) + ' ') + freq_info['unit'])
T1 = ((str(round(t1_info['value'], 5)) + ' ') + t1_info['unit'])
T2 = ((str(round(t2_info['value'], 5)) + ' ') + t2_info['unit'])
U1 = str(round(gate_data[0]['parameters'][0]['value'], 5))
U2 = str(round(gate_data[1]['parameters'][0]['value'], 5))
U3 = str(round(gate_data[2]['parameters'][0]['value'], 5))
readout_error = round(readout_info['value'], 5)
qubit_html += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td>"
qubit_html += '<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>'
qubit_html = (qubit_html % (name, freq, T1, T2, U1, U2, U3, readout_error))
qubit_html += qubit_footer
qubit_widget = widgets.HTML(value=qubit_html)
out = widgets.VBox([update_date_widget, qubit_widget])
return out
|
The qubits properties widget
Args:
backend (IBMQbackend): The backend.
Returns:
VBox: A VBox widget.
|
codesearchnet
|
def calculate_signatures(self):
if (not self.signing_algorithm):
return []
algo_id = {'sha1': 1, 'sha384': 2}[self.signing_algorithm]
hashers = [(algo_id, make_hasher(algo_id))]
for block in get_signature_data(self.fileobj, self.filesize):
[h.update(block) for (_, h) in hashers]
signatures = [(algo_id, sign_hash(self.signing_key, h.finalize(), h.algorithm.name)) for (algo_id, h) in hashers]
return signatures
|
Calculate the signatures for this MAR file.
Returns:
A list of signature tuples: [(algorithm_id, signature_data), ...]
|
codesearchnet
|
def get_revisions(page):
start_string = ' <revision>\n'
end_string = ' </revision>\n'
ret = []
current_pos = 0
while True:
start_pos = page.find(start_string, current_pos)
if (start_pos == (- 1)):
break
end_pos = page.find(end_string, start_pos)
assert (end_pos != (- 1))
ret.append(page[(start_pos + len(start_string)):end_pos])
current_pos = (end_pos + len(end_string))
return ret
|
Extract the revisions of a page.
Args:
page: a string
Returns:
a list of strings
|
codesearchnet
|
def main(args=None):
if (args is None):
args = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(args)
if (args.verbose >= 2):
level = logging.DEBUG
elif (args.verbose >= 1):
level = logging.INFO
else:
level = logging.WARNING
logging.basicConfig(level=level)
try:
args.command(args)
except pylink.JLinkException as e:
sys.stderr.write(('Error: %s%s' % (str(e), os.linesep)))
return 1
return 0
|
Main command-line interface entrypoint.
Runs the given subcommand or argument that were specified. If not given a
``args`` parameter, assumes the arguments are passed on the command-line.
Args:
args (list): list of command-line arguments
Returns:
Zero on success, non-zero otherwise.
|
codesearchnet
|
def key_changes(self, from_token, to_token):
params = {"from": from_token, "to": to_token}
return self._send("GET", "/keys/changes", query_params=params)
|
Gets a list of users who have updated their device identity keys.
Args:
from_token (str): The desired start point of the list. Should be the
next_batch field from a response to an earlier call to /sync.
to_token (str): The desired end point of the list. Should be the next_batch
field from a recent call to /sync - typically the most recent such call.
|
juraj-google-style
|
def main(jlink_serial, device):
buf = StringIO.StringIO()
jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)
jlink.open(serial_no=jlink_serial)
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(device, verbose=True)
big_endian = jlink.set_little_endian()
if big_endian:
jlink.set_big_endian()
print('Target Endian Mode: %s Endian' % ('Big' if big_endian else 'Little'))
|
Main function.
Args:
jlink_serial (str): the J-Link serial number
device (str): the target CPU
Returns:
``None``
Raises:
JLinkException: on error
|
juraj-google-style
|
class UnitNorm(Constraint):
def __init__(self, axis=0):
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
return w / (backend.epsilon() + ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True)))
def get_config(self):
return {'axis': self.axis}
|
Constrains the weights incident to each hidden unit to have unit norm.
Args:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
|
github-repos
|
def altitude(msg):
tc = common.typecode(msg)
if tc<9 or tc==19 or tc>22:
raise RuntimeError("%s: Not a airborn position message" % msg)
mb = common.hex2bin(msg)[32:]
if tc < 19:
q = mb[15]
if q:
n = common.bin2int(mb[8:15]+mb[16:20])
alt = n * 25 - 1000
else:
alt = None
else:
alt = common.bin2int(mb[8:20]) * 3.28084
return alt
|
Decode aircraft altitude
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: altitude in feet
|
juraj-google-style
|
def from_input(cls, input, workdir=None, manager=None):
return cls(input, workdir=workdir, manager=manager)
|
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
|
codesearchnet
|
def split_vert_on_nonmanifold_face(script, vert_displacement_ratio=0.0):
filter_xml = ''.join([' <filter name="Split Vertexes Incident on Non Manifold Faces">\n', ' <Param name="VertDispRatio" ', 'value="{}" '.format(vert_displacement_ratio), 'description="Vertex Displacement Ratio" ', 'type="RichFloat" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
return None
|
Split non-manifold vertices until it becomes two-manifold.
Args:
script: the FilterScript object or script filename to write
the filter to.
vert_displacement_ratio (float): When a vertex is split it is moved
along the average vector going from its position to the centroid
of the FF connected faces sharing it.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
|
codesearchnet
|
def nearest_neighbour_delta_E(self):
delta_nn = ((self.final_site.nn_occupation() - self.initial_site.nn_occupation()) - 1)
return (delta_nn * self.nearest_neighbour_energy)
|
Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted.
Args:
None
Returns:
(Float): delta E (nearest-neighbour)
|
codesearchnet
|
def parse_results(self, data):
results = []
if len(data["Records"]) < 1:
return -1
codes = data["Records"][0]["Results"]
for code in codes.split(","):
results.append(str(code))
self.addr1 = data["Records"][0]["AddressLine1"]
self.addr2 = data["Records"][0]["AddressLine2"]
self.city = data["Records"][0]["City"]
self.name = data["Records"][0]["NameFull"]
self.phone = data["Records"][0]["PhoneNumber"]
self.province = data["Records"][0]["State"]
self.postal = data["Records"][0]["PostalCode"]
self.recordID = data["Records"][0]["RecordID"]
return results
|
parse_results
Parses the MelissaData response.
Args:
data (dict): Contains MelissaData response
Returns:
results, either contains a dict with corrected address info or -1 for an invalid address.
|
juraj-google-style
|
def peek(init, exposes, debug=False):
def _peek(store, container, _stack=None):
args = [ store.peek(objname, container, _stack=_stack) \
for objname in exposes ]
if debug:
print(args)
return init(*args)
return _peek
|
Default deserializer factory.
Arguments:
init (callable): type constructor.
exposes (iterable): attributes to be peeked and passed to `init`.
Returns:
callable: deserializer (`peek` routine).
|
juraj-google-style
|
def __init__(self, default_environment: Optional[environments.Environment]=None, bundle_repeat: int=0, use_state_iterables: bool=False, provision_info: Optional['ExtendedProvisionInfo']=None, progress_request_frequency: Optional[float]=None, is_drain: bool=False) -> None:
super().__init__()
self._default_environment = default_environment or environments.EmbeddedPythonEnvironment.default()
self._bundle_repeat = bundle_repeat
self._num_workers = 1
self._progress_frequency = progress_request_frequency
self._profiler_factory: Optional[Callable[..., Profile]] = None
self._use_state_iterables = use_state_iterables
self._is_drain = is_drain
self._provision_info = provision_info or ExtendedProvisionInfo(beam_provision_api_pb2.ProvisionInfo(retrieval_token='unused-retrieval-token'))
|
Creates a new Fn API Runner.
Args:
default_environment: the default environment to use for UserFns.
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
use_state_iterables: Intentionally split gbk iterables over state API
(for testing)
provision_info: provisioning info to make available to workers, or None
progress_request_frequency: The frequency (in seconds) that the runner
waits before requesting progress from the SDK.
is_drain: identify whether expand the sdf graph in the drain mode.
|
github-repos
|
def scripthash_to_address(scripthash):
sb = (bytearray([ADDRESS_VERSION]) + scripthash)
c256 = bin_dbl_sha256(sb)[0:4]
outb = (sb + bytearray(c256))
return base58.b58encode(bytes(outb)).decode('utf-8')
|
Convert a script hash to a public address.
Args:
scripthash (bytes):
Returns:
str: base58 encoded string representing the wallet address.
|
codesearchnet
|
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
q_ids = tf.range(query_size, dtype=tf.int32)
k_ids = tf.range(key_size, dtype=tf.int32)
rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
return tf.cast(rel_pos_ids, tf.int64)
|
Build relative position according to the query and key
We assume the absolute position of query \(P_q\) is range from (0, query_size) and the absolute position of key
\(P_k\) is range from (0, key_size), The relative positions from query to key is \(R_{q \rightarrow k} = P_q -
P_k\)
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
Return:
`tf.Tensor`: A tensor with shape [1, query_size, key_size]
|
github-repos
|
def virt_env(self):
if (self._virt_env is None):
self._virt_env = self._create_virt_env()
return self._virt_env
|
Getter for this instance's virt env, creates it if needed
Returns:
lago.virt.VirtEnv: virt env instance used by this prefix
|
codesearchnet
|
def observe(self, value):
self._buffer.append(value)
if (len(self._buffer) == _BUFFER_SIZE):
self._flush()
|
Samples an observation's value.
Args:
value: A numeric value signifying the value to be sampled.
|
codesearchnet
|
def get_subnets(target='ec2', purpose='internal', env='', region=''):
account_az_dict = defaultdict(defaultdict)
subnet_id_dict = defaultdict(defaultdict)
subnet_url = '{0}/subnets/aws'.format(API_URL)
subnet_response = requests.get(subnet_url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if (not subnet_response.ok):
raise SpinnakerTimeout(subnet_response.text)
subnet_list = subnet_response.json()
for subnet in subnet_list:
LOG.debug('Subnet: %(account)s\t%(region)s\t%(target)s\t%(vpcId)s\t%(availabilityZone)s', subnet)
if (subnet.get('target', '') == target):
availability_zone = subnet['availabilityZone']
account = subnet['account']
subnet_region = subnet['region']
subnet_id = subnet['id']
try:
if (availability_zone not in account_az_dict[account][subnet_region]):
account_az_dict[account][subnet_region].append(availability_zone)
except KeyError:
account_az_dict[account][subnet_region] = [availability_zone]
if (subnet['purpose'] == purpose):
try:
subnet_id_dict[account][subnet_region].append(subnet_id)
except KeyError:
subnet_id_dict[account][subnet_region] = [subnet_id]
LOG.debug('%s regions: %s', account, list(account_az_dict[account].keys()))
if all([env, region]):
try:
region_dict = {region: account_az_dict[env][region]}
region_dict['subnet_ids'] = {region: subnet_id_dict[env][region]}
LOG.debug('Region dict: %s', region_dict)
return region_dict
except KeyError:
raise SpinnakerSubnetError(env=env, region=region)
LOG.debug('AZ dict:\n%s', pformat(dict(account_az_dict)))
return account_az_dict
|
Get all availability zones for a given target.
Args:
target (str): Type of subnets to look up (ec2 or elb).
env (str): Environment to look up.
region (str): AWS Region to find Subnets for.
Returns:
az_dict: dictionary of availbility zones, structured like
{ $region: [ $avaibilityzones ] }
or
{ $account: $region: [ $availabilityzone] }
|
codesearchnet
|
def _preprocess(self, inputs: Sequence[torch.Tensor], freq: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
input_ts, input_padding, inp_freq = ([], [], [])
for i, ts in enumerate(inputs):
input_len = ts.shape[0]
padding = torch.zeros(input_len + self.horizon_len, dtype=ts.dtype, device=ts.device)
if input_len < self.context_len:
num_front_pad = self.context_len - input_len
ts = torch.cat([torch.zeros(num_front_pad, dtype=ts.dtype, device=ts.device), ts], dim=0)
padding = torch.cat([torch.ones(num_front_pad, dtype=ts.dtype, device=padding.device), padding], dim=0)
elif input_len > self.context_len:
ts = ts[-self.context_len:]
padding = padding[-(self.context_len + self.horizon_len):]
input_ts.append(ts)
input_padding.append(padding)
inp_freq.append(freq[i])
return (torch.stack(input_ts, dim=0), torch.stack(input_padding, dim=0), torch.tensor(inp_freq, dtype=torch.int32).reshape(-1, 1))
|
Formats and pads raw inputs to feed into the model.
This function both pads each time series to match the context length, and
pads the inputs to meet the SPMD shape requirement.
Args:
inputs: A list of 1d Tensors. Each Tensor is the context time series of
a single forecast task.
freq: list of frequencies
Returns:
A tuple of:
- the padded input time series to meet the model required context.
- the padding indicator.
- the number of padded examples for SPMD so that each core has the same
number (a multiple of `batch_size`) of examples.
|
github-repos
|
def dbmax10years(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `dbmax10years`'.format(value))
self._dbmax10years = value
|
Corresponds to IDD Field `dbmax10years`
10-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax10years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def _locate_point(nodes, point):
candidates = [(0.0, 1.0, nodes)]
for _ in six.moves.xrange((_MAX_LOCATE_SUBDIVISIONS + 1)):
next_candidates = []
for (start, end, candidate) in candidates:
if _helpers.contains_nd(candidate, point.ravel(order='F')):
midpoint = (0.5 * (start + end))
(left, right) = subdivide_nodes(candidate)
next_candidates.extend(((start, midpoint, left), (midpoint, end, right)))
candidates = next_candidates
if (not candidates):
return None
params = [(start, end) for (start, end, _) in candidates]
if (np.std(params) > _LOCATE_STD_CAP):
raise ValueError('Parameters not close enough to one another', params)
s_approx = np.mean(params)
s_approx = newton_refine(nodes, point, s_approx)
if (s_approx < 0.0):
return 0.0
elif (s_approx > 1.0):
return 1.0
else:
return s_approx
|
r"""Locate a point on a curve.
Does so by recursively subdividing the curve and rejecting
sub-curves with bounding boxes that don't contain the point.
After the sub-curves are sufficiently small, uses Newton's
method to zoom in on the parameter value.
.. note::
This assumes, but does not check, that ``point`` is ``D x 1``,
where ``D`` is the dimension that ``curve`` is in.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.
point (numpy.ndarray): The point to locate.
Returns:
Optional[float]: The parameter value (:math:`s`) corresponding
to ``point`` or :data:`None` if the point is not on the ``curve``.
Raises:
ValueError: If the standard deviation of the remaining start / end
parameters among the subdivided intervals exceeds a given
threshold (e.g. :math:`2^{-20}`).
|
codesearchnet
|
def flatten_rules(self, declarations):
rules = []
for (protocole, paths) in declarations:
if protocole:
continue
rules.extend([self.strip_quotes(v.strip()) for v in paths.split(',')])
return list(filter(self.filter_rules, rules))
|
Flatten returned import rules from regex.
Because import rules can contains multiple items in the same rule
(called multiline import rule), the regex ``REGEX_IMPORT_RULE``
return a list of unquoted items for each rule.
Args:
declarations (list): A SCSS source.
Returns:
list: Given SCSS source with all comments removed.
|
codesearchnet
|
def piece_to_id(input, model_file=None, model_proto=None, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_piece_to_id(input, model_file=model_file, model_proto=model_proto, name=name)
|
Converts piece into vocabulary id.
Args:
input: An arbitrary tensor of string.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A tensor of int32 with the same shape as input.
|
codesearchnet
|
def pages(self):
if (self._owner_id is None):
it = ProfileIterator.from_username(self._username, self.session)
self._owner_id = it.owner_id
return it
return ProfileIterator(self._owner_id, self.session, self.rhx)
|
Obtain an iterator over Instagram post pages.
Returns:
PageIterator: an iterator over the instagram post pages.
Raises:
ValueError: when the requested user does not exist.
RuntimeError: when the user is a private account
and there is no logged user (or the logged user
does not follow that account).
|
codesearchnet
|
def createThread(parent, worker, deleteWorkerLater=False):
thread = QtCore.QThread(parent)
thread.started.connect(worker.doWork)
worker.finished.connect(thread.quit)
if deleteWorkerLater:
thread.finished.connect(worker.deleteLater)
worker.moveToThread(thread)
worker.setParent(parent)
return thread
|
Create a new thread for given worker.
Args:
parent (QObject): parent of thread and worker.
worker (ProgressWorker): worker to use in thread.
deleteWorkerLater (bool, optional): delete the worker if thread finishes.
Returns:
QThread
|
codesearchnet
|
def _check_suffix(self, w_string, access_string, index):
prefix_as = self._membership_query(access_string)
full_as = self._membership_query(access_string + w_string[index:])
prefix_w = self._membership_query(w_string[:index])
full_w = self._membership_query(w_string)
length = len(commonprefix([prefix_as, full_as]))
as_suffix = full_as[length:]
length = len(commonprefix([prefix_w, full_w]))
w_suffix = full_w[length:]
if as_suffix != w_suffix:
logging.debug('Access string state incorrect')
return True
logging.debug('Access string state correct.')
return False
|
Checks if access string suffix matches with the examined string suffix
Args:
w_string (str): The examined string to be consumed
access_string (str): The access string for the state
index (int): The index value for selecting the prefix of w
Returns:
bool: A boolean valuei indicating if matching was successful
|
juraj-google-style
|
def serialize_to_string(self, name, datas):
value = datas.get('value', None)
if (value is None):
msg = "String reference '{}' lacks of required 'value' variable or is empty"
raise SerializerError(msg.format(name))
return value
|
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
|
codesearchnet
|
def __le__(self, other):
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
|
Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(n)) == (m <= n)
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
|
github-repos
|
class GitProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[GitProcessorKwargs]) -> BatchFeature:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
images, text = _validate_images_text_input_order(images, text)
output_kwargs = self._merge_kwargs(GitProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
data = {}
if text is not None:
text_features = self.tokenizer(text, **output_kwargs['text_kwargs'])
data.update(text_features)
if images is not None:
image_features = self.image_processor(images, **output_kwargs['images_kwargs'])
data.update(image_features)
return BatchFeature(data=data, tensor_type=output_kwargs['common_kwargs'].get('return_tensors'))
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
return ['input_ids', 'attention_mask', 'pixel_values']
|
Constructs a GIT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.
[`GitProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the
[`~GitProcessor.__call__`] and [`~GitProcessor.decode`] for more information.
Args:
image_processor ([`AutoImageProcessor`]):
The image processor is a required input.
tokenizer ([`AutoTokenizer`]):
The tokenizer is a required input.
|
github-repos
|
def get_logger(name):
logger = logging.getLogger(name)
logger.addHandler(logging.NullHandler())
return logger
|
Gets a logger
Arguments:
name - the name you wish to log as
Returns:
A logger!
|
juraj-google-style
|
def CacheObject(self, identifier, vfs_object):
if identifier in self._values:
raise KeyError('Object already cached for identifier: {0:s}'.format(
identifier))
if len(self._values) == self._maximum_number_of_cached_values:
raise errors.CacheFullError('Maximum number of cached values reached.')
self._values[identifier] = ObjectsCacheValue(vfs_object)
|
Caches a VFS object.
This method ignores the cache value reference count.
Args:
identifier (str): VFS object identifier.
vfs_object (object): VFS object to cache.
Raises:
CacheFullError: if he maximum number of cached values is reached.
KeyError: if the VFS object already is cached.
|
juraj-google-style
|
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return (instance, False)
else:
instance = model(**kwargs)
if ('dataset' in kwargs):
instance.update_sequence_id(session, kwargs['dataset'])
session.add(instance)
session.commit()
return (instance, True)
|
Get or create sqlalchemy instance.
Args:
session (Sqlalchemy session):
model (sqlalchemy model):
kwargs (dict): kwargs to lookup or create instance.
Returns:
Tuple: first element is found or created instance, second is boolean - True if instance created,
False if instance found.
|
codesearchnet
|
def nested_row_lengths(self, name=None):
with ops.name_scope(name, 'RaggedNestedRowLengths', [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
|
Returns a tuple containing the row_lengths for all ragged dimensions.
`rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors
for all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
|
github-repos
|
def converted_self(self):
raise NotImplementedError
|
A copy of this Convertible to be modified during conversion.
Returns:
Implementations should return the copied instance, which in turn should
be contained in converted_enclosing_graph(). This instance is the one that
will be modified during conversion. Its main use will be in the
implementations of convert_variable_to_constant().
|
github-repos
|
def monitored_timer(cell):
def actual_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with MonitoredTimer(cell):
return func(*args, **kwargs)
return wrapper
return actual_decorator
|
A function decorator for adding MonitoredTimer support.
Args:
cell: the cell associated with the time metric that will be inremented.
Returns:
A decorator that measure the function runtime and increment the specified
counter cell.
|
github-repos
|
def convert(credentials):
credentials_class = type(credentials)
try:
return _CLASS_CONVERSION_MAP[credentials_class](credentials)
except KeyError as caught_exc:
new_exc = ValueError(_CONVERT_ERROR_TMPL.format(credentials_class))
six.raise_from(new_exc, caught_exc)
|
Convert oauth2client credentials to google-auth credentials.
This class converts:
- :class:`oauth2client.client.OAuth2Credentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.client.GoogleCredentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.service_account.ServiceAccountCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.service_account._JWTAccessCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.contrib.gce.AppAssertionCredentials` to
:class:`google.auth.compute_engine.Credentials`.
- :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to
:class:`google.auth.app_engine.Credentials`.
Returns:
google.auth.credentials.Credentials: The converted credentials.
Raises:
ValueError: If the credentials could not be converted.
|
codesearchnet
|
def clone(self, to_namespace, to_name):
r = fapi.clone_workspace(self.namespace, self.name, to_namespace, to_name, self.api_url)
fapi._check_response_code(r, 201)
return Workspace(to_namespace, to_name, self.api_url)
|
Clone this workspace.
Args:
to_namespace (str): Target workspace namespace
to_name (str): Target workspace name
|
codesearchnet
|
def issue(self, invoice_id, **kwargs):
url = "{}/{}/issue".format(self.base_url, invoice_id)
return self.post_url(url, {}, **kwargs)
|
Issues an invoice in draft state
Args:
invoice_id : Id for delete the invoice
Returns:
Its response is the invoice entity, similar to create/update API response. Its status now would be issued.
|
juraj-google-style
|
def _GetBetweenQEqualsAndAmpersand(self, url):
_, _, url = url.partition('?')
_, _, url = url.partition('q=')
if not url:
return ''
url, _, _ = url.partition('&')
return url
|
Retrieves the substring between the substrings 'q=' and '&'.
Args:
url (str): URL.
Returns:
str: search query, the value between 'q=' and '&' or None if no query
was found.
|
juraj-google-style
|
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return backend.sparse_categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis)
|
Computes the sparse categorical crossentropy loss.
Standalone usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
axis: Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
|
github-repos
|
def as_dict_summary(self, print_subelectrodes=True):
chg_comp = self.fully_charged_entry.composition
dischg_comp = self.fully_discharged_entry.composition
ion = self.working_ion
d = {"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self._working_ion.symbol,
"nsteps": self.num_steps,
"framework": self._vpairs[0].framework.to_data_dict,
"formula_charge": chg_comp.reduced_formula,
"id_charge": self.fully_charged_entry.entry_id,
"formula_discharge": dischg_comp.reduced_formula,
"id_discharge": self.fully_discharged_entry.entry_id,
"fracA_charge": chg_comp.get_atomic_fraction(ion),
"fracA_discharge": dischg_comp.get_atomic_fraction(ion),
"max_instability": self.get_max_instability(),
"min_instability": self.get_min_instability(),
"material_ids" : [itr_ent.entry_id for itr_ent in self._entries],
"stable_material_ids" : [itr_ent.entry_id for itr_ent in self.get_stable_entries()],
"unstable_material_ids": [itr_ent.entry_id for itr_ent in self.get_unstable_entries()],
}
if all(['decomposition_energy' in itr_ent.data for itr_ent in self._entries]):
d.update({"stability_charge": self.fully_charged_entry.data['decomposition_energy'],
"stability_discharge": self.fully_discharged_entry.data['decomposition_energy'],
"stability_data":{itr_ent.entry_id: itr_ent.data['decomposition_energy'] for itr_ent in self._entries},
})
if all(['muO2' in itr_ent.data for itr_ent in self._entries]):
d.update({"muO2_data" : {itr_ent.entry_id: itr_ent.data['muO2'] for itr_ent in self._entries}})
if print_subelectrodes:
f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False)
d["adj_pairs"] = list(map(f_dict,
self.get_sub_electrodes(adjacent_only=True)))
d["all_pairs"] = list(map(f_dict,
self.get_sub_electrodes(adjacent_only=False)))
return d
|
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
|
juraj-google-style
|
def remove_results(vcs, signature):
results_directory = _get_results_directory(vcs, signature)
if (not os.path.exists(results_directory)):
raise ResultsNotFoundError
shutil.rmtree(results_directory)
|
Removed saved results for this signature
Args:
vcs (easyci.vcs.base.Vcs)
signature (str)
Raises:
ResultsNotFoundError
|
codesearchnet
|
def get_creator_by_name(name):
return {'docker(container)': Container.creator, 'shell': Bash.creator, 'docker(image)': Image.creator, 'python': Script.creator, 'packer': Packer.creator, 'ansible(simple)': Ansible.creator}[name]
|
Get creator function by name.
Args:
name (str): name of the creator function.
Returns:
function: creater function.
|
codesearchnet
|
def get_links(self, **kw):
links = [a for a in dir(self) if (isinstance(getattr(self, a), Model) and (not a.startswith('_model')))]
return [{'field': l, 'mdl': getattr(self, l).__class__} for l in links]
|
Prepare links of form by mimicing pyoko's get_links method's result
Args:
**kw:
Returns: list of link dicts
|
codesearchnet
|
def is_os(name, version_id=None):
result = False
os_release_infos = _fetch_os_release_infos()
if name == os_release_infos.get('name', None):
if version_id is None:
result = True
elif version_id == os_release_infos.get('version_id', None):
result = True
return result
|
Return True if OS name in /etc/lsb-release of host given by fabric param
`-H` is the same as given by argument, False else.
If arg version_id is not None only return True if it is the same as in
/etc/lsb-release, too.
Args:
name: 'Debian GNU/Linux', 'Ubuntu'
version_id(None or str): None,
'14.04', (Ubuntu)
'16.04', (Ubuntu)
'8', (Debian)
|
juraj-google-style
|
def __stripValue(self, value):
if isinstance(value, str):
if ( value[0] == '"' and value[-1] == '"' ) or ( value[0] == '[' and value[-1] == ']' ):
return value[1:-1]
return value
|
strip the special characters in the value
Args:
value: value string
Returns:
value string without special characters
|
juraj-google-style
|
def create_iam_resources(env='dev', app='', **_):
session = boto3.session.Session(profile_name=env)
client = session.client('iam')
app_properties = get_properties(env='pipeline')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
deployment_type = app_properties['type']
role_trust_template = get_template('infrastructure/iam/trust/{0}_role.json.j2'.format(deployment_type), formats=generated)
resource_action(client, action='create_role', log_format='Created Role: %(RoleName)s', RoleName=details.role, AssumeRolePolicyDocument=role_trust_template)
resource_action(client, action='create_instance_profile', log_format='Created Instance Profile: %(InstanceProfileName)s', InstanceProfileName=details.profile)
attach_profile_to_role(client, role_name=details.role, profile_name=details.profile)
iam_policy = construct_policy(app=app, group=details.group, env=env, pipeline_settings=app_properties)
if iam_policy:
resource_action(client, action='put_role_policy', log_format='Added IAM Policy: %(PolicyName)s', RoleName=details.role, PolicyName=details.policy, PolicyDocument=iam_policy)
resource_action(client, action='create_user', log_format='Created User: %(UserName)s', UserName=details.user)
resource_action(client, action='create_group', log_format='Created Group: %(GroupName)s', GroupName=details.group)
resource_action(client, action='add_user_to_group', log_format='Added User to Group: %(UserName)s -> %(GroupName)s', GroupName=details.group, UserName=details.user)
return True
|
Create the IAM Resources for the application.
Args:
env (str): Deployment environment/account, i.e. dev, stage, prod.
app (str): Spinnaker Application name.
Returns:
True upon successful completion.
|
codesearchnet
|
def drop(self, items):
self._manager.leaser.remove(items)
self._manager.maybe_resume_consumer()
|
Remove the given messages from lease management.
Args:
items(Sequence[DropRequest]): The items to drop.
|
juraj-google-style
|
def get_default_description(arg: inspect.Parameter) -> str:
if arg.annotation is inspect._empty:
arg_type = '<fill_type>'
elif hasattr(arg.annotation, '__name__'):
arg_type = arg.annotation.__name__
else:
arg_type = str(arg.annotation)
if arg.default is inspect._empty:
return f'`{arg_type}`'
elif arg.default is None:
return f'`{arg_type}`, {OPTIONAL_KEYWORD}'
else:
str_default = stringify_default(arg.default)
return f'`{arg_type}`, {OPTIONAL_KEYWORD}, defaults to {str_default}'
|
Builds a default description for a parameter that was not documented.
Args:
arg (`inspect.Parameter`): The argument in the signature to generate a description for.
Returns:
`str`: The description.
|
github-repos
|
def _verify_ops(graph_def: graph_pb2.GraphDef, namespace_whitelist):
if namespace_whitelist is None:
return
invalid_ops = []
invalid_namespaces = set()
all_operations = []
all_operations.extend(meta_graph.ops_used_by_graph_def(graph_def))
for op in all_operations:
if '>' in op:
namespace = op.split('>')[0]
if namespace not in namespace_whitelist:
invalid_ops.append(op)
invalid_namespaces.add(namespace)
if invalid_ops:
raise ValueError(f"Attempted to save ops from non-whitelisted namespaces to SavedModel: {invalid_ops}.\nPlease verify that these ops should be saved, since they must be available when loading the SavedModel. If loading from Python, you must import the library defining these ops. From C++, link the custom ops to the serving binary. Once you've confirmed this, add the following namespaces to the `namespace_whitelist` argument in tf.saved_model.SaveOptions: {invalid_namespaces}.")
|
Verifies that all namespaced ops in the graph are whitelisted.
Args:
graph_def: the GraphDef to validate.
namespace_whitelist: a list of namespaces to allow. If `None`, all will be
allowed. If an op does not have a namespace, it will be allowed.
Raises:
ValueError: If the graph contains ops that violate the whitelist.
|
github-repos
|
def get(self, uid: int) -> Optional[CachedMessage]:
return self._cache.get(uid)
|
Return the given cached message.
Args:
uid: The message UID.
|
juraj-google-style
|
def parseConfig(cls, value):
if 'enabled' in value:
value['enabled'] = bool(value['enabled'])
if 'exclude_paths' in value:
value['exclude_paths'] = [n.strip() for n in ast.literal_eval(value['exclude_paths'])]
return value
|
Parse the config values
Args:
value (dict): Dictionary which contains the checker config
Returns:
dict: The checker config with parsed values
|
juraj-google-style
|
def leave(self):
try:
self.client.api.leave_room(self.room_id)
del self.client.rooms[self.room_id]
return True
except MatrixRequestError:
return False
|
Leave the room.
Returns:
boolean: Leaving the room was successful.
|
codesearchnet
|
def _getFieldStats(self):
fieldStats = dict()
fieldNames = self._inputSource.getFieldNames()
for field in fieldNames:
curStats = dict()
curStats['min'] = self._inputSource.getFieldMin(field)
curStats['max'] = self._inputSource.getFieldMax(field)
fieldStats[field] = curStats
return fieldStats
|
Method which returns a dictionary of field statistics received from the
input source.
Returns:
fieldStats: dict of dicts where the first level is the field name and
the second level is the statistic. ie. fieldStats['pounds']['min']
|
codesearchnet
|
def get_np_doc_form():
return _np_doc_form
|
Gets the form of the original numpy docstrings.
Returns:
See `set_np_doc_form` for the list of valid values.
|
github-repos
|
def list_apps(site):
ret = dict()
ps_cmd = list()
ps_cmd.append("Get-WebApplication -Site '{0}'".format(site))
ps_cmd.append(r"| Select-Object applicationPool, path, PhysicalPath, preloadEnabled,")
ps_cmd.append(r"@{ Name='name'; Expression={ $_.path.Split('/', 2)[-1] } },")
ps_cmd.append(r"@{ Name='protocols'; Expression={ @( $_.enabledProtocols.Split(',')")
ps_cmd.append(r"| Foreach-Object { $_.Trim() } ) } }")
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
protocols = list()
if isinstance(item['protocols'], dict):
if 'value' in item['protocols']:
protocols += item['protocols']['value']
else:
protocols.append(item['protocols'])
ret[item['name']] = {'apppool': item['applicationPool'],
'path': item['path'],
'preload': item['preloadEnabled'],
'protocols': protocols,
'sourcepath': item['PhysicalPath']}
if not ret:
log.warning('No apps found in output: %s', cmd_ret)
return ret
|
Get all configured IIS applications for the specified site.
Args:
site (str): The IIS site name.
Returns: A dictionary of the application names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_apps site
|
juraj-google-style
|
def get_total_mass(self):
try:
mass = self.loc[:, 'mass'].sum()
except KeyError:
mass_molecule = self.add_data('mass')
mass = mass_molecule.loc[:, 'mass'].sum()
return mass
|
Returns the total mass in g/mol.
Args:
None
Returns:
float:
|
juraj-google-style
|
def parse(self, utterance, context=None, N=1):
start = time.time()
context_trie = None
if (context and isinstance(context, list)):
context.sort(key=(lambda x: x.get('confidence')))
context_trie = Trie()
for entity in context:
(entity_value, entity_type) = entity.get('data')[0]
context_trie.insert(entity_value.lower(), data=(entity_value, entity_type), weight=entity.get('confidence'))
tagged = self._tagger.tag(utterance.lower(), context_trie=context_trie)
self.emit('tagged_entities', {'utterance': utterance, 'tags': list(tagged), 'time': (time.time() - start)})
start = time.time()
bke = BronKerboschExpander(self._tokenizer)
def score_clique(clique):
score = 0.0
for tagged_entity in clique:
ec = tagged_entity.get('entities', [{'confidence': 0.0}])[0].get('confidence')
score += ((ec * len(tagged_entity.get('entities', [{'match': ''}])[0].get('match'))) / (len(utterance) + 1))
return score
parse_results = bke.expand(tagged, clique_scoring_func=score_clique)
count = 0
for result in parse_results:
count += 1
parse_confidence = 0.0
for tag in result:
sample_entity = tag['entities'][0]
entity_confidence = ((sample_entity.get('confidence', 0.0) * float(len(sample_entity.get('match')))) / len(utterance))
parse_confidence += entity_confidence
(yield {'utterance': utterance, 'tags': result, 'time': (time.time() - start), 'confidence': parse_confidence})
if (count >= N):
break
|
Used to find tags within utterance with a given confidence
Args:
utterance(str): conversational piece given by the user
context(list): a list of entities
N(int): number of results
Returns: yield an object with the following fields
utterance(str): the value passed in
tags(list) : a list of tags found in utterance
time(time) : duration since call of function
confidence(float) : float indicating how confident of a match to the
utterance. This might be used to determan the most likely intent.
|
codesearchnet
|
def set_query(self, value):
if isinstance(value, basestring) or value is None:
self._content['query'] = value
elif hasattr(value, 'keys'):
self._content['query'] = query.terms_from_dict(value)
else:
raise TypeError("Query must be a string or dict. Got: " + type(value) + " insted!")
|
Convert a dict form of query in a string of needed and store the query string.
Args:
value -- A query string or a dict with query xpaths as keys and text or
nested query dicts as values.
|
juraj-google-style
|
def locked_get(self):
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if entity:
credential = getattr(entity, self.property_name)
if (credential and hasattr(credential, 'set_store')):
credential.set_store(self)
return credential
else:
return None
|
Retrieve stored credential.
Returns:
A :class:`oauth2client.Credentials` instance or `None`.
|
codesearchnet
|
def CheckFlowCanBeStartedOnClient(flow_name):
flow_cls = flow.GRRFlow.GetPlugin(flow_name)
if flow_cls.category:
return True
else:
raise access_control.UnauthorizedAccess(
"Flow %s can't be started on a client by non-suid users." % flow_name)
|
Checks if flow can be started on a particular client.
Only flows with a category can bestarted. Having a category means that the
flow will be accessible from the UI.
Args:
flow_name: Name of the flow to check access for.
Returns:
True if flow is externally accessible.
Raises:
access_control.UnauthorizedAccess: if flow is not externally accessible.
|
juraj-google-style
|
def RegisterPlugin(cls, plugin_class):
name = getattr(
plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__)
name = name.lower()
if name in cls._plugins:
raise KeyError(
'Artifact plugin class already set for name: {0:s}.'.format(name))
preprocess_plugin = plugin_class()
cls._plugins[name] = preprocess_plugin
if isinstance(
preprocess_plugin, interface.FileSystemArtifactPreprocessorPlugin):
cls._file_system_plugins[name] = preprocess_plugin
elif isinstance(
preprocess_plugin, interface.KnowledgeBasePreprocessorPlugin):
cls._knowledge_base_plugins[name] = preprocess_plugin
elif isinstance(
preprocess_plugin,
interface.WindowsRegistryKeyArtifactPreprocessorPlugin):
cls._windows_registry_plugins[name] = preprocess_plugin
|
Registers an preprocess plugin class.
Args:
plugin_class (type): preprocess plugin class.
Raises:
KeyError: if plugin class is already set for the corresponding name.
TypeError: if the source type of the plugin class is not supported.
|
juraj-google-style
|
def angle(self, deg=False):
if (self.dtype.str[1] != 'c'):
warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1)
da = distob.vectorize(np.angle)(self, deg)
return _dts_from_da(da, self.tspan, self.labels)
|
Return the angle of a complex Timeseries
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
|
codesearchnet
|
def print_periodic_table(filter_function: callable = None):
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
try:
el = Element.from_row_and_group(row, group)
except ValueError:
el = None
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
|
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
|
juraj-google-style
|
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params)
|
Lists snapshots.
Args:
request: (DataflowProjectsLocationsJobsSnapshotsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListSnapshotsResponse) The response message.
|
github-repos
|
def __init__(self, data_parallelism, expert_parallelism, gates):
self._gates = gates
self._dp = data_parallelism
self._ep = expert_parallelism
assert len(gates) == self._dp.n
self._dispatchers = self._dp(SparseDispatcher, self._ep.n, gates)
|
Create a DistributedSparseDispatcher.
Args:
data_parallelism: a Parallelism object.
expert_parallelism: a Parallelism object.
gates: a list of datashard_parallelism.n `Tensor`s of shapes
`[batch_size[d], num_experts]`.
Returns:
a DistributedSparseDispatcher
|
juraj-google-style
|
def find_nearest(a, value, index=False):
i = np.abs(a - value).argmin()
if index:
return i
else:
return a[i]
|
Find the array value, or index of the array value, closest to some given
value.
Args:
a (ndarray)
value (float)
index (bool): whether to return the index instead of the array value.
Returns:
float. The array value (or index, as int) nearest the specified value.
|
juraj-google-style
|
def vocab_token_counts(text_filepattern, max_lines):
ret = {}
for (i, line) in enumerate(_read_filepattern(text_filepattern, max_lines=max_lines)):
if (',' not in line):
tf.logging.warning("Malformed vocab line
continue
(token, count) = line.rsplit(',', 1)
ret[_native_to_unicode(token)] = int(count)
return ret
|
Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
|
codesearchnet
|
def _save_private_file(filename, json_contents):
temp_filename = tempfile.mktemp()
file_desc = os.open(temp_filename, (os.O_WRONLY | os.O_CREAT), 384)
with os.fdopen(file_desc, 'w') as file_handle:
json.dump(json_contents, file_handle, sort_keys=True, indent=2, separators=(',', ': '))
shutil.move(temp_filename, filename)
|
Saves a file with read-write permissions on for the owner.
Args:
filename: String. Absolute path to file.
json_contents: JSON serializable object to be saved.
|
codesearchnet
|
def summarize_tensors(tensor_dict, tag=None):
if tag is None:
tag = "tensors/"
for t_name in list(tensor_dict):
t = tensor_dict[t_name]
tf.summary.histogram(tag + t_name, t)
|
Summarize the tensors.
Args:
tensor_dict: a dictionary of tensors.
tag: name scope of the summary; defaults to tensors/.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.