code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def stop_loss(self, accountID, **kwargs):
return self.create(accountID, order=StopLossOrderRequest(**kwargs))
|
Shortcut to create a Stop Loss Order in an Account
Args:
accountID : The ID of the Account
kwargs : The arguments to create a StopLossOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request
|
codesearchnet
|
def _await_flow(self, client, flow_id):
print('{0:s}: Waiting to finish'.format(flow_id))
while True:
try:
status = client.Flow(flow_id).Get().data
except grr_errors.UnknownError:
msg = 'Unable to stat flow {0:s} for host {1:s}'.format(flow_id, client.data.os_info.fqdn.lower())
self.state.add_error(msg)
raise DFTimewolfError('Unable to stat flow {0:s} for host {1:s}'.format(flow_id, client.data.os_info.fqdn.lower()))
if (status.state == flows_pb2.FlowContext.ERROR):
message = status.context.backtrace
if ('ArtifactNotRegisteredError' in status.context.backtrace):
message = status.context.backtrace.split('\n')[(- 2)]
raise DFTimewolfError('{0:s}: FAILED! Message from GRR:\n{1:s}'.format(flow_id, message))
if (status.state == flows_pb2.FlowContext.TERMINATED):
print('{0:s}: Complete'.format(flow_id))
break
time.sleep(self._CHECK_FLOW_INTERVAL_SEC)
|
Awaits flow completion.
Args:
client: GRR Client object in which to await the flow.
flow_id: string containing ID of flow to await.
Raises:
DFTimewolfError: if flow error encountered.
|
codesearchnet
|
def blit(
self,
dest: tcod.console.Console,
fill_fore: bool = True,
fill_back: bool = True,
) -> None:
if not dest:
dest = tcod.console.Console._from_cdata(ffi.NULL)
if dest.width != self.width or dest.height != self.height:
raise ValueError(
"ConsoleBuffer.blit: "
"Destination console has an incorrect size."
)
if fill_back:
bg = dest.bg.ravel()
bg[0::3] = self.back_r
bg[1::3] = self.back_g
bg[2::3] = self.back_b
if fill_fore:
fg = dest.fg.ravel()
fg[0::3] = self.fore_r
fg[1::3] = self.fore_g
fg[2::3] = self.fore_b
dest.ch.ravel()[:] = self.char
|
Use libtcod's "fill" functions to write the buffer to a console.
Args:
dest (Console): Console object to modify.
fill_fore (bool):
If True, fill the foreground color and characters.
fill_back (bool):
If True, fill the background color.
|
juraj-google-style
|
def normalize_placeholders(arg, inject_quotes=False):
number_placeholders = re.findall('{{\\s*\\d+\\s*}}', arg)
for number_placeholder in number_placeholders:
number = re.search('\\d+', number_placeholder).group()
arg = arg.replace(number_placeholder, (('{{_' + number) + '}}'))
return (arg.replace('{{', '"{{').replace('}}', '}}"') if inject_quotes else arg)
|
Normalize placeholders' names so that the template can be ingested into Jinja template engine.
- Jinja does not accept numbers as placeholder names, so add a "_"
before the numbers to make them valid placeholder names.
- Surround placeholders expressions with "" so we can preserve spaces inside the positional arguments.
Args:
arg: The string to process.
inject_qoutes: True if we want to surround placeholders with a pair of quotes.
Returns:
A processed string where placeholders are surrounded by "" and
numbered placeholders are prepended with "_".
|
codesearchnet
|
def _get_course_content(course_id, course_url, sailthru_client, site_code, config):
cache_key = '{}:{}'.format(site_code, course_url)
response = cache.get(cache_key)
if (not response):
try:
sailthru_response = sailthru_client.api_get('content', {'id': course_url})
if (not sailthru_response.is_ok()):
response = {}
else:
response = sailthru_response.json
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
except SailthruClientError:
response = {}
if (not response):
logger.error('Could not get course data from Sailthru on enroll/purchase event. Calling Ecommerce Course API to get course info for enrollment confirmation email')
response = _get_course_content_from_ecommerce(course_id, site_code=site_code)
if response:
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
return response
|
Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru
|
codesearchnet
|
def find_or_build(cls, **kwargs):
keys = (kwargs.pop('keys') if ('keys' in kwargs) else [])
return (cls.first(**subdict(kwargs, keys)) or cls.build(**kwargs))
|
Checks if an instance already exists in db with these kwargs else
returns a new, saved instance of the service's model class.
Args:
**kwargs: instance parameters
|
codesearchnet
|
def fit(self, X=None, y=None, **kwargs):
context = {'X': X, 'y': y}
context.update(kwargs)
last_block_name = list(self.blocks.keys())[(- 1)]
for (block_name, block) in self.blocks.items():
LOGGER.debug('Fitting block %s', block_name)
try:
fit_args = self._get_block_args(block_name, block.fit_args, context)
block.fit(**fit_args)
except Exception:
LOGGER.exception('Exception caught fitting MLBlock %s', block_name)
raise
if (block_name != last_block_name):
LOGGER.debug('Producing block %s', block_name)
try:
produce_args = self._get_block_args(block_name, block.produce_args, context)
outputs = block.produce(**produce_args)
output_dict = self._get_outputs(block_name, outputs, block.produce_output)
context.update(output_dict)
except Exception:
LOGGER.exception('Exception caught producing MLBlock %s', block_name)
raise
|
Fit the blocks of this pipeline.
Sequentially call the `fit` and the `produce` methods of each block,
capturing the outputs each `produce` method before calling the `fit`
method of the next one.
During the whole process a context dictionary is built, where both the
passed arguments and the captured outputs of the `produce` methods
are stored, and from which the arguments for the next `fit` and
`produce` calls will be taken.
Args:
X: Fit Data, which the pipeline will learn from.
y: Fit Data labels, which the pipeline will use to learn how to
behave.
**kwargs: Any additional keyword arguments will be directly added
to the context dictionary and available for the blocks.
|
codesearchnet
|
def combine_first_two_dimensions(x):
ret = tf.reshape(x, tf.concat([[(- 1)], common_layers.shape_list(x)[2:]], 0))
old_shape = x.get_shape().dims
(a, b) = old_shape[:2]
new_shape = ([((a * b) if (a and b) else None)] + old_shape[2:])
ret.set_shape(new_shape)
return ret
|
Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...]
|
codesearchnet
|
def __init__(self, resolver_context, file_data):
super(FakeFile, self).__init__(resolver_context)
self._current_offset = 0
self._file_data = file_data
self._size = 0
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
file_data (bytes): fake file data.
|
juraj-google-style
|
def flush(self, hard=False):
if (not self.servers):
return
if hard:
self.client.flush_all()
self.reset_stats()
else:
from uuid import uuid4
tag = uuid4().hex
if self.debug:
tag = ('flushed' + tag)
self.current = tag
|
Drop existing entries from the cache.
Args:
hard (bool): If True, all current entries are flushed from the
server(s), which affects all users. If False, only the local
process is affected.
|
codesearchnet
|
def sas_logical_interconnect_groups(self):
if (not self.__sas_logical_interconnect_groups):
self.__sas_logical_interconnect_groups = SasLogicalInterconnectGroups(self.__connection)
return self.__sas_logical_interconnect_groups
|
Gets the SasLogicalInterconnectGroups API client.
Returns:
SasLogicalInterconnectGroups:
|
codesearchnet
|
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
if (entry in self.stable_entries):
return ({entry: 1}, 0)
comp = entry.composition
(facet, simplex) = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt for (f, amt) in zip(facet, decomp_amts) if (abs(amt) > PhaseDiagram.numerical_tol)}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = (entry.energy_per_atom - np.dot(decomp_amts, energies))
if (allow_negative or (ehull >= (- PhaseDiagram.numerical_tol))):
return (decomp, ehull)
raise ValueError('No valid decomp found!')
|
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
|
codesearchnet
|
def attention_lm_prepare_decoder(targets, hparams):
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
common_attention.attention_bias_prepend_inputs_full_attention(
common_attention.embedding_to_padding(targets)))
else:
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(targets)[1]))
decoder_input = common_layers.shift_right_3d(targets)
if hparams.pos == "timing":
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
return (decoder_input, decoder_self_attention_bias)
|
Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a Tensor, containing large negative values
to implement masked attention and possibly biases for diagonal alignments
|
juraj-google-style
|
def is_native_xmon_op(op: ops.Operation) -> bool:
return (isinstance(op, ops.GateOperation) and
is_native_xmon_gate(op.gate))
|
Check if the gate corresponding to an operation is a native xmon gate.
Args:
op: Input operation.
Returns:
True if the operation is native to the xmon, false otherwise.
|
juraj-google-style
|
def get_saved_issue_data(self, issue, namespace='open'):
if isinstance(issue, int):
issue_number = str(issue)
elif isinstance(issue, basestring):
issue_number = issue
else:
issue_number = issue.number
issue_data_key = self._issue_data_key(namespace)
issue_data = self.data.get(issue_data_key,
{})
_data = issue_data.get(str(issue_number), {})
issue_data[str(issue_number)] = _data
return _data
|
Returns issue data from local data.
Args:
issue:
`int`. Github issue number.
namespace:
`str`. Namespace for storing this issue.
|
juraj-google-style
|
def handle_command(command):
try:
cmds = command.split(None, 1)
cmd = cmds[0]
if cmd == 'new':
add_task(get_arg(cmds))
elif cmd == 'done':
mark_done(int(get_arg(cmds)))
elif cmd == 'list':
for task in format_tasks(list_tasks()):
print task
elif cmd == 'delete':
delete_task(int(get_arg(cmds)))
else:
print_usage()
except Exception, e:
print e
print_usage()
|
Accepts a string command and performs an action.
Args:
command: the command to run as a string.
|
juraj-google-style
|
def list(self, **kwargs):
resp = self.client.api.volumes(**kwargs)
if not resp.get('Volumes'):
return []
return [self.prepare_model(obj) for obj in resp['Volumes']]
|
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def dp020(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `dp020`'.format(value))
self._dp020 = value
|
Corresponds to IDD Field `dp020`
Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `dp020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def _execute(self, command, data=None, unpack=True):
if not data:
data = {}
data.setdefault('element_id', self.element_id)
return self._driver._execute(command, data, unpack)
|
Private method to execute command with data.
Args:
command(Command): The defined command.
data(dict): The uri variable and body.
Returns:
The unwrapped value field in the json response.
|
juraj-google-style
|
def __init__(self, tensor_callable, dtype, device):
super().__init__(tensor_callable, None, None, dtype, device)
|
Initializes a `Callable` object.
Args:
tensor_callable: A callable that takes no arguments and returns a Tensor.
dtype: Dtype of the tensor returned by the callable.
device: Device of the tensor returned by the callable.
|
github-repos
|
def get_dropout(x, rate=0.0, init=True):
if init or rate == 0:
return x
return tf.layers.dropout(x, rate=rate, training=True)
|
Dropout x with dropout_rate = rate.
Apply zero dropout during init or prediction time.
Args:
x: 4-D Tensor, shape=(NHWC).
rate: Dropout rate.
init: Initialization.
Returns:
x: activations after dropout.
|
juraj-google-style
|
def _reset_non_empty(self, indices):
observ = tf.py_func(
self._batch_env.reset, [indices], self.observ_dtype, name="reset")
observ.set_shape(indices.get_shape().concatenate(self.observ_shape))
with tf.control_dependencies([
tf.scatter_update(self._observ, indices, observ)]):
return tf.identity(observ)
|
Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
|
juraj-google-style
|
def ConcatWith(x, tensor, dim):
if type(tensor) != list:
tensor = [tensor]
return tf.concat([x] + tensor, dim)
|
A wrapper around ``tf.concat`` to cooperate with :class:`LinearWrap`.
Args:
x (tf.Tensor): input
tensor (list[tf.Tensor]): a tensor or list of tensors to concatenate with x.
x will be at the beginning
dim (int): the dimension along which to concatenate
Returns:
tf.Tensor: ``tf.concat([x] + tensor, dim)``
|
juraj-google-style
|
def _copy_stream_position(position):
if isinstance(position, types.StreamPosition):
output = types.StreamPosition()
output.CopyFrom(position)
return output
return types.StreamPosition(**position)
|
Copy a StreamPosition.
Args:
position (Union[ \
dict, \
~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \
]):
StreamPostion (or dictionary in StreamPosition format) to copy.
Returns:
~google.cloud.bigquery_storage_v1beta1.types.StreamPosition:
A copy of the input StreamPostion.
|
juraj-google-style
|
def Get(self, key):
if key not in self._hash:
raise KeyError(key)
node = self._hash[key]
self._age.Unlink(node)
self._age.AppendNode(node)
return node.data
|
Fetch the object from cache.
Objects may be flushed from cache at any time. Callers must always
handle the possibility of KeyError raised here.
Args:
key: The key used to access the object.
Returns:
Cached object.
Raises:
KeyError: If the object is not present in the cache.
|
juraj-google-style
|
def add_model_tags(self, tags: Union[List[str], str]) -> None:
if isinstance(tags, str):
tags = [tags]
if self.model_tags is None:
self.model_tags = []
for tag in tags:
if tag not in self.model_tags:
self.model_tags.append(tag)
|
Add custom tags into the model that gets pushed to the Hugging Face Hub. Will
not overwrite existing tags in the model.
Args:
tags (`Union[List[str], str]`):
The desired tags to inject in the model
Examples:
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("google-bert/bert-base-cased")
model.add_model_tags(["custom", "custom-bert"])
# Push the model to your namespace with the name "my-custom-bert".
model.push_to_hub("my-custom-bert")
```
|
github-repos
|
def verify_binary(flag_name, process_args=None):
if (process_args is None):
process_args = []
path = getattr(FLAGS, flag_name)
if (not path):
logging.error(('Flag %r not set' % flag_name))
sys.exit(1)
with open(os.devnull, 'w') as dev_null:
try:
subprocess.check_call(([path] + process_args), stdout=dev_null, stderr=subprocess.STDOUT)
except:
logging.exception('--%s binary at path %r does not work', flag_name, path)
sys.exit(1)
|
Exits the program if the binary from the given flag doesn't run.
Args:
flag_name: Name of the flag that should be the path to the binary.
process_args: Args to pass to the binary to do nothing but verify
that it's working correctly (something like "--version") is good.
Optional. Defaults to no args.
Raises:
SystemExit with error if the process did not work.
|
codesearchnet
|
def consult_robots_txt(self, request: HTTPRequest) -> bool:
if not self._robots_txt_checker:
return True
result = yield from self._robots_txt_checker.can_fetch(request)
return result
|
Consult by fetching robots.txt as needed.
Args:
request: The request to be made
to get the file.
Returns:
True if can fetch
Coroutine
|
juraj-google-style
|
def cache_penalty_model(penalty_model, database=None):
if not _is_index_labelled(penalty_model.graph):
mapping, __ = _graph_canonicalization(penalty_model.graph)
penalty_model = penalty_model.relabel_variables(mapping, inplace=False)
if database is None:
conn = cache_connect()
else:
conn = cache_connect(database)
with conn as cur:
insert_penalty_model(cur, penalty_model)
conn.close()
|
Caching function for penaltymodel_cache.
Args:
penalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to
be cached.
database (str, optional): The path to the desired sqlite database
file. If None, will use the default.
|
juraj-google-style
|
def original_args(self):
return self._original_args
|
A `SessionRunArgs` object holding the original arguments of `run()`.
If user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this
field is equal to SessionRunArgs(a, b).
Returns:
A `SessionRunArgs` object
|
github-repos
|
def Webhook(self, request, global_params=None):
config = self.GetMethodConfig('Webhook')
return self._RunMethod(config, request, global_params=global_params)
|
ReceiveWebhook is called when the API receives a GitHub webhook.
Args:
request: (CloudbuildWebhookRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
|
github-repos
|
def _command_template(self, switches, objectInput=None):
command = ["java", "-jar", self.file_jar, "-eUTF-8"]
if self.memory_allocation:
command.append("-Xmx{}".format(self.memory_allocation))
command.extend(switches)
if not objectInput:
objectInput = subprocess.PIPE
log.debug("Subprocess command: {}".format(", ".join(command)))
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command,
stdin=objectInput,
stdout=subprocess.PIPE,
stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command,
stdin=objectInput,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
stdoutdata, _ = out.communicate()
return stdoutdata.decode("utf-8").strip()
|
Template for Tika app commands
Args:
switches (list): list of switches to Tika app Jar
objectInput (object): file object/standard input to analyze
Return:
Standard output data (unicode Python 2, str Python 3)
|
juraj-google-style
|
def get_categorical_features_to_sampling(examples, top_k):
observed_features = collections.defaultdict(list)
for example in examples:
for feature_name in get_categorical_feature_names(example):
original_feature = parse_original_feature_from_example(example, feature_name)
observed_features[feature_name].extend(original_feature.original_value)
result = {}
for (feature_name, feature_values) in sorted(iteritems(observed_features)):
samples = [word for (word, count) in collections.Counter(feature_values).most_common(top_k) if (count > 1)]
if samples:
result[feature_name] = {'samples': samples}
return result
|
Returns categorical features and a sampling of their most-common values.
The results of this slow function are used by the visualization repeatedly,
so the results are cached.
Args:
examples: Examples to read to get feature samples.
top_k: Max number of samples to return per feature.
Returns:
A dict of feature_name -> {'samples': ['Married-civ-spouse',
'Never-married', 'Divorced']}.
There is one key for each categorical feature.
Currently, the inner dict just has one key, but this structure leaves room
for further expansion, and mirrors the structure used by
`get_numeric_features_to_observed_range`.
|
codesearchnet
|
def _indent(lines, prefix=' '):
indented = []
for line in lines.split('\n'):
indented.append((prefix + line))
return '\n'.join(indented)
|
Indent some text.
Note that this is present as ``textwrap.indent``, but not in Python 2.
Args:
lines (str): The newline delimited string to be indented.
prefix (Optional[str]): The prefix to indent each line with. Default
to two spaces.
Returns:
str: The newly indented content.
|
codesearchnet
|
def on_created(self, event):
self._logger.debug('Detected create event on watched path: %s', event.src_path)
self._process_event(event)
|
Function called everytime a new file is created.
Args:
event: Event to process.
|
codesearchnet
|
def generate_csr(self, csr_data, bay_number=None):
uri = "{}/https/certificaterequest".format(self.data['uri'])
if bay_number:
uri += "?bayNumber=%d" % (bay_number)
headers = {'Content-Type': 'application/json'}
return self._helper.do_post(uri, csr_data, -1, headers)
|
Creates a Certificate Signing Request (CSR) for an enclosure.
Args:
csr_data: Dictionary with csr details.
bay_number: OA from which the CSR should be generated.
Returns:
Enclosure.
|
juraj-google-style
|
def valid_identifiers(self):
funcs = (list(utils.find_all(self.contexts[(- 1)])) + list(self.builtins))
return funcs
|
Get a list of all valid identifiers for the current context.
Returns:
list(str): A list of all of the valid identifiers for this context
|
codesearchnet
|
def set_display_name(self, display_name):
self.displayname = display_name
return self.api.set_display_name(self.user_id, display_name)
|
Set this users display name.
Args:
display_name (str): Display Name
|
codesearchnet
|
def get_by_hostname(self, hostname):
resources = self._client.get_all()
resources_filtered = [x for x in resources if x['hostname'] == hostname]
if resources_filtered:
return resources_filtered[0]
else:
return None
|
Retrieve a storage system by its hostname.
Works only in API500 onwards.
Args:
hostname: Storage system hostname.
Returns:
dict
|
juraj-google-style
|
def fillup_layer(names):
longest = max([len(name) for name in names])
inputs_wires = []
for name in names:
inputs_wires.append(InputWire(name.rjust(longest)))
return inputs_wires
|
Creates a layer with InputWire elements.
Args:
names (list): List of names for the wires.
Returns:
list: The new layer
|
juraj-google-style
|
def get_samples_live_last(self, sensor_id):
url = 'https:
headers = self.__gen_headers()
headers['Content-Type'] = 'application/json'
params = {'sensorId': sensor_id}
url = self.__append_url_params(url, params)
r = requests.get(url, headers=headers)
return r.json()
|
Get the last sample recorded by the sensor.
Args:
sensor_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
Returns:
list: dictionary objects containing sample data
|
codesearchnet
|
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus/', sku, '/versions?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
List available versions for a given publisher's sku.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): VM image publisher. E.g. MicrosoftWindowsServer.
offer (str): VM image offer. E.g. WindowsServer.
sku (str): VM image sku. E.g. 2016-Datacenter.
Returns:
HTTP response with JSON list of versions.
|
codesearchnet
|
def reset_subscription_since(self, account_id, datetime_str):
data = {
'account_id': account_id,
'datetime': datetime_str,
}
return self._perform_post_request(self.reset_subscription_since_endpoint, data, self.token_header)
|
Handler for `--reset-subscription-since` command.
Args:
account_id(int): id of the account to reset.
datetime_str(str): string representing the datetime used in the
next poll to retrieve data since.
Returns:
(str) json encoded response.
NOTES:
We don't care about validation here, we demand the responsibility to
the backend.
|
juraj-google-style
|
def disconnect(self):
result = False
logger.debug('SK8.disconnect({})'.format(self.conn_handle))
if (self.conn_handle >= 0):
logger.debug('Calling dongle disconnect')
result = self.dongle._disconnect(self.conn_handle)
self.conn_handle = (- 1)
self.packets = 0
return result
|
Disconnect the dongle from this SK8.
Simply closes the active BLE connection to the device represented by the current instance.
Returns:
bool. True if connection was closed, False if not (e.g. if already closed).
|
codesearchnet
|
def filter_devices(ads, func):
results = []
for ad in ads:
if func(ad):
results.append(ad)
return results
|
Finds the AndroidDevice instances from a list that match certain
conditions.
Args:
ads: A list of AndroidDevice instances.
func: A function that takes an AndroidDevice object and returns True
if the device satisfies the filter condition.
Returns:
A list of AndroidDevice instances that satisfy the filter condition.
|
github-repos
|
def _QueryHash(self, digest):
if (not self._url):
self._url = '{0:s}:
request_data = {self.lookup_hash: digest}
try:
json_response = self.MakeRequestAndDecodeJSON(self._url, 'POST', data=request_data)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query Viper with error: {0!s}.'.format(exception))
return json_response
|
Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
|
codesearchnet
|
def is_unused(input, model_file=None, model_proto=None, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(
input, model_file=model_file, model_proto=model_proto, name=name,
piece_type=2)
|
Returns true if input id is unused piece.
Args:
input: An arbitrary tensor of int32.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A tensor of bool with the same shape as input.
|
juraj-google-style
|
def _validate_isvalid_uncertainty(self, isvalid_uncertainty, field, value):
self._validate_isvalid_quantity(True, field, value)
if ((len(value) > 1) and (value[1]['uncertainty-type'] != 'relative')):
if (value[1].get('uncertainty') is not None):
self._validate_isvalid_quantity(True, field, [value[1]['uncertainty']])
if (value[1].get('upper-uncertainty') is not None):
self._validate_isvalid_quantity(True, field, [value[1]['upper-uncertainty']])
if (value[1].get('lower-uncertainty') is not None):
self._validate_isvalid_quantity(True, field, [value[1]['lower-uncertainty']])
|
Checks for valid given value and appropriate units with uncertainty.
Args:
isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked
field (`str`): property associated with the quantity in question.
value (`list`): list with the string of the value of the quantity and a dictionary of
the uncertainty
The rule's arguments are validated against this schema:
{'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
|
codesearchnet
|
def map(self, internalize: Callable[([TExternalQubit], TInternalQubit)], externalize: Callable[([TInternalQubit], TExternalQubit)]) -> 'QubitOrder':
def func(qubits):
unwrapped_qubits = [internalize(q) for q in qubits]
unwrapped_result = self.order_for(unwrapped_qubits)
return tuple((externalize(q) for q in unwrapped_result))
return QubitOrder(func)
|
Transforms the Basis so that it applies to wrapped qubits.
Args:
externalize: Converts an internal qubit understood by the underlying
basis into an external qubit understood by the caller.
internalize: Converts an external qubit understood by the caller
into an internal qubit understood by the underlying basis.
Returns:
A basis that transforms qubits understood by the caller into qubits
understood by an underlying basis, uses that to order the qubits,
then wraps the ordered qubits back up for the caller.
|
codesearchnet
|
def call(self, input_features: TFModelInputType | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, decoder_inputs_embeds: Optional[Tuple[Union[np.ndarray, tf.Tensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
outputs = self.model(input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
return outputs
|
Returns:
Example:
```python
>>> import tensorflow as tf
>>> from transformers import TFWhisperModel, AutoFeatureExtractor
>>> from datasets import load_dataset
>>> model = TFWhisperModel.from_pretrained("openai/whisper-base")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf")
>>> input_features = inputs.input_features
>>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 512]
```
|
github-repos
|
def write_block(self, block_, body):
self.write('for ; πF.State() >= 0; πF.PopCheckpoint() {')
with self.indent_block():
self.write('switch πF.State() {')
self.write('case 0:')
for checkpoint in block_.checkpoints:
self.write_tmpl('case $state: goto Label$state', state=checkpoint)
self.write('default: panic("unexpected function state")')
self.write('}')
with self.indent_block(-1):
self.write(body)
self.write('}')
|
Outputs the boilerplate necessary for code blocks like functions.
Args:
block_: The Block object representing the code block.
body: String containing Go code making up the body of the code block.
|
juraj-google-style
|
def ParseFileObject(self, parser_mediator, file_object):
filename = parser_mediator.GetFilename()
if not filename.startswith('$I'):
raise errors.UnableToParseFile('Filename must start with $I.')
file_header_map = self._GetDataTypeMap('recycle_bin_metadata_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse Windows Recycle.Bin metadata file header with '
'error: {0!s}').format(exception))
if file_header.format_version not in self._SUPPORTED_FORMAT_VERSIONS:
raise errors.UnableToParseFile(
'Unsupported format version: {0:d}.'.format(
file_header.format_version))
if file_header.deletion_time == 0:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(
timestamp=file_header.deletion_time)
event_data = WinRecycleBinEventData()
try:
event_data.original_filename = self._ParseOriginalFilename(
file_object, file_header.format_version)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse original filename with error: {0!s}.'.format(
exception))
event_data.file_size = file_header.original_file_size
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_DELETED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a Windows Recycle.Bin metadata ($I) file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def flush(self, force: bool=False) -> bool | Any:
return self._messages.flush(force=force)
|
Flushes the underlying log message queue.
Args:
* force: If True, force queue to flush
Returns:
* True, if flushed with no errors
* False, if not flushed
* Error value from logger, if flushed with errors
|
github-repos
|
def get_symbol(self, symbol):
self._ensure_symbols_loaded()
if (type(symbol) is int):
return self._symbols_by_index[symbol]
else:
return self._symbols_by_name[symbol]
|
Get a specific symbol by index or name.
Args:
symbol(int or str): The index or name of the symbol to return.
Returns:
ELF.Symbol: The symbol.
Raises:
KeyError: The requested symbol does not exist.
|
codesearchnet
|
def ParseFileObject(self, parser_mediator, file_object):
try:
file_header = self._ReadFileHeader(file_object)
except (ValueError, errors.ParseError):
raise errors.UnableToParseFile('Unable to parse file header.')
tables = self._ReadTablesArray(file_object, file_header.tables_array_offset)
table = tables.get(self._RECORD_TYPE_APPLICATION_PASSWORD, None)
if table:
for record in table.records:
self._ParseApplicationPasswordRecord(parser_mediator, record)
table = tables.get(self._RECORD_TYPE_INTERNET_PASSWORD, None)
if table:
for record in table.records:
self._ParseInternetPasswordRecord(parser_mediator, record)
|
Parses a MacOS keychain file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def get_request_fields(self):
if hasattr(self, '_request_fields'):
return self._request_fields
include_fields = self.get_request_feature(self.INCLUDE)
exclude_fields = self.get_request_feature(self.EXCLUDE)
request_fields = {}
for (fields, include) in ((include_fields, True), (exclude_fields, False)):
if (fields is None):
continue
for field in fields:
field_segments = field.split('.')
num_segments = len(field_segments)
current_fields = request_fields
for (i, segment) in enumerate(field_segments):
last = (i == (num_segments - 1))
if segment:
if last:
current_fields[segment] = include
else:
if (segment not in current_fields):
current_fields[segment] = {}
current_fields = current_fields[segment]
elif (not last):
raise exceptions.ParseError(('"%s" is not a valid field.' % field))
self._request_fields = request_fields
return request_fields
|
Parses the INCLUDE and EXCLUDE features.
Extracts the dynamic field features from the request parameters
into a field map that can be passed to a serializer.
Returns:
A nested dict mapping serializer keys to
True (include) or False (exclude).
|
codesearchnet
|
def get_resource_from_handle(self, resource_handle):
repo_type = resource_handle.get("repository_type")
location = resource_handle.get("location")
if not (repo_type and location):
raise ValueError("PackageRepositoryManager requires "
"resource_handle objects to have a "
"repository_type and location defined")
path = "%s@%s" % (repo_type, location)
repo = self.get_repository(path)
resource = repo.get_resource_from_handle(resource_handle)
return resource
|
Get a resource.
Args:
resource_handle (`ResourceHandle`): Handle of the resource.
Returns:
`PackageRepositoryResource` instance.
|
juraj-google-style
|
def branch_lengths(self, terminal=True, internal=True):
if not isinstance(terminal, bool):
raise TypeError("terminal must be a bool")
if not isinstance(internal, bool):
raise TypeError("internal must be a bool")
for node in self.traverse_preorder():
if (internal and not node.is_leaf()) or (terminal and node.is_leaf()):
if node.edge_length is None:
yield 0
else:
yield node.edge_length
|
Generator over the lengths of the selected branches of this ``Tree``. Edges with length ``None`` will be output as 0-length
Args:
``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False``
|
juraj-google-style
|
def from_string(cls, string):
lines = string.split('\n')
timestep = int(lines[1])
natoms = int(lines[3])
box_arr = np.loadtxt(StringIO('\n'.join(lines[5:8])))
bounds = box_arr[(:, :2)]
tilt = None
if ('xy xz yz' in lines[4]):
tilt = box_arr[(:, 2)]
x = (0, tilt[0], tilt[1], (tilt[0] + tilt[1]))
y = (0, tilt[2])
bounds -= np.array([[min(x), max(x)], [min(y), max(y)], [0, 0]])
box = LammpsBox(bounds, tilt)
data_head = lines[8].replace('ITEM: ATOMS', '').split()
data = pd.read_csv(StringIO('\n'.join(lines[9:])), names=data_head, delim_whitespace=True)
return cls(timestep, natoms, box, data)
|
Constructor from string parsing.
Args:
string (str): Input string.
|
codesearchnet
|
def get_default_query_from_module(module):
if isinstance(module, types.ModuleType):
return module.__dict__.get(_SQL_MODULE_LAST, None)
return None
|
Given a %%sql module return the default (last) query for the module.
Args:
module: the %%sql module.
Returns:
The default query associated with this module.
|
juraj-google-style
|
def PrintExtractionStatusHeader(self, processing_status):
self._output_writer.Write('Source path\t\t: {0:s}\n'.format(self._source_path))
self._output_writer.Write('Source type\t\t: {0:s}\n'.format(self._source_type))
if self._artifact_filters:
artifacts_string = ', '.join(self._artifact_filters)
self._output_writer.Write('Artifact filters\t: {0:s}\n'.format(artifacts_string))
if self._filter_file:
self._output_writer.Write('Filter file\t\t: {0:s}\n'.format(self._filter_file))
self._PrintProcessingTime(processing_status)
self._PrintTasksStatus(processing_status)
self._output_writer.Write('\n')
|
Prints the extraction status header.
Args:
processing_status (ProcessingStatus): processing status.
|
codesearchnet
|
def UpdateUser(self, user, ssh_keys):
if (not bool(USER_REGEX.match(user))):
self.logger.warning('Invalid user account name %s.', user)
return False
if (not self._GetUser(user)):
if (not (self._AddUser(user) and self._UpdateUserGroups(user, self.groups))):
return False
if (not self._UpdateSudoer(user, sudoer=True)):
return False
pw_entry = self._GetUser(user)
if (pw_entry and (os.path.basename(pw_entry.pw_shell) == 'nologin')):
message = 'Not updating user %s. User set `nologin` as login shell.'
self.logger.debug(message, user)
return True
try:
self._UpdateAuthorizedKeys(user, ssh_keys)
except (IOError, OSError) as e:
message = 'Could not update the authorized keys file for user %s. %s.'
self.logger.warning(message, user, str(e))
return False
else:
return True
|
Update a Linux user with authorized SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Returns:
bool, True if the user account updated successfully.
|
codesearchnet
|
def apply(self, func, workers=1, job_size=10000):
if workers == 1:
for lines in self.iter_chunks(job_size):
yield func(lines)
else:
with ProcessPoolExecutor(max_workers=workers) as executor:
for result in executor.map(func, self.iter_chunks(job_size)):
yield result
|
Apply `func` to lines of text in parallel or sequential.
Args:
func : a function that takes a list of lines.
|
juraj-google-style
|
def CleanUpTest(cls, func):
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
|
Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
|
juraj-google-style
|
def goto_step(self, inst: InstanceNode) -> InstanceNode:
return inst.look_up(**self.parse_keys(inst.schema_node))
|
Return member instance of `inst` addressed by the receiver.
Args:
inst: Current instance.
|
juraj-google-style
|
def get_appliance(self, appliance_id):
url = "https:
headers = self.__gen_headers()
headers["Content-Type"] = "application/json"
r = requests.get(url, headers=headers)
return r.json()
|
Get the information for a specified appliance
Args:
appliance_id (string): identifiying string of appliance
Returns:
list: dictionary object containing information about the specified appliance
|
juraj-google-style
|
def Write2000256List(self, arr):
for item in arr:
ba = bytearray(binascii.unhexlify(item))
ba.reverse()
self.WriteBytes(ba)
|
Write an array of 64 byte items to the stream.
Args:
arr (list): a list of 2000 items of 64 bytes in size.
|
juraj-google-style
|
def __init__(self, sdat):
self.sdat = sdat
self._last = UNDETERMINED
self._data = {None: _step.EmptyStep()}
|
Initialization of instances:
Args:
sdat (:class:`StagyyData`): the StagyyData instance owning the
:class:`_Steps` instance.
Attributes:
sdat (:class:`StagyyData`): the StagyyData instance owning the
:class:`_Steps` instance.
|
juraj-google-style
|
def do_block(args):
rest_client = RestClient(args.url, args.user)
if args.subcommand == 'list':
block_generator = rest_client.list_blocks()
blocks = []
left = args.count
for block in block_generator:
blocks.append(block)
left -= 1
if left <= 0:
break
keys = ('num', 'block_id', 'batches', 'txns', 'signer')
headers = tuple(k.upper() if k != 'batches' else 'BATS' for k in keys)
def parse_block_row(block):
batches = block.get('batches', [])
txns = [t for b in batches for t in b['transactions']]
return (
block['header'].get('block_num', 0),
block['header_signature'],
len(batches),
len(txns),
block['header']['signer_public_key'])
if args.format == 'default':
fmt.print_terminal_table(headers, blocks, parse_block_row)
elif args.format == 'csv':
fmt.print_csv(headers, blocks, parse_block_row)
elif args.format == 'json' or args.format == 'yaml':
data = [{k: d for k, d in zip(keys, parse_block_row(b))}
for b in blocks]
if args.format == 'yaml':
fmt.print_yaml(data)
elif args.format == 'json':
fmt.print_json(data)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
else:
raise AssertionError('Missing handler: {}'.format(args.format))
if args.subcommand == 'show':
output = rest_client.get_block(args.block_id)
if args.key:
if args.key in output:
output = output[args.key]
elif args.key in output['header']:
output = output['header'][args.key]
else:
raise CliException(
'key "{}" not found in block or header'.format(args.key))
if args.format == 'yaml':
fmt.print_yaml(output)
elif args.format == 'json':
fmt.print_json(output)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
|
Runs the block list or block show command, printing output to the
console
Args:
args: The parsed arguments sent to the command at runtime
|
juraj-google-style
|
def scan(initial_state, scan_func):
def _apply_fn(dataset):
return dataset.scan(initial_state=initial_state, scan_func=scan_func)
return _apply_fn
|
A transformation that scans a function across an input dataset.
This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
Args:
initial_state: A nested structure of tensors, representing the initial state
of the accumulator.
scan_func: A function that maps `(old_state, input_element)` to
`(new_state, output_element)`. It must take two arguments and return a
pair of nested structures of tensors. The `new_state` must match the
structure of `initial_state`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
|
github-repos
|
def save(hdf5_filename, array):
hdf5_filename = os.path.expanduser(hdf5_filename)
try:
h = h5py.File(hdf5_filename, 'w')
h.create_dataset('CUTOUT', data=array)
h.close()
except Exception as e:
raise ValueError('Could not save HDF5 file {0}.'.format(hdf5_filename))
return hdf5_filename
|
Export a numpy array to a HDF5 file.
Arguments:
hdf5_filename (str): A filename to which to save the HDF5 data
array (numpy.ndarray): The numpy array to save to HDF5
Returns:
String. The expanded filename that now holds the HDF5 data
|
codesearchnet
|
def recv_result_from_workers(self):
info = MPI.Status()
result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)
logger.debug('Received result from workers: {}'.format(result))
return result
|
Receives a results from the MPI worker pool and send it out via 0mq
Returns:
--------
result: task result from the workers
|
codesearchnet
|
def _get_dequantized_hist_mids_after_quantize(self, quant_min: float, quant_max: float) -> np.ndarray:
maxbound = 2 ** self._num_bits - 1
minbound = 0
scale = (quant_max - quant_min) / maxbound
zero_point = -quant_min / scale
if abs(zero_point) > 9000000000.0:
zero_point = 9000000000.0
if abs(scale) < 1e-09:
scale = 1e-09
zero_point = round(zero_point)
quantized_hist_mids = np.clip(np.round(self._hist_mids / scale) + zero_point, minbound, maxbound)
dequantized_hist_mids = scale * (quantized_hist_mids - zero_point)
return dequantized_hist_mids
|
Quantizes and dequantizes hist_mids using quant_min and quant_max.
Quantization converts the range of numbers from [quant_min, quant_max] to
[0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and
values greater than quant_max are converted to 2^num_bits - 1.
The histogram represents the distribution of the data, and our goal is to
find the quant_min and quant_max that best describe this distribution. To do
this, we quantize hist_mids using quant_min and quant_max and dequantize
them again. Then the difference between hist_mids and dequantized hist_mids
equates to quantization error when using quant_min and quant_max.
Args:
quant_min: The minimum real value that can be represented by a quantized
value.
quant_max: The maximum real value that can be represented by a quantized
value.
Returns:
dequantized hist_mids after quantizing by quant_min and quant_max
|
github-repos
|
def _protobuf_value_to_string(value):
value_in_json = json_format.MessageToJson(value)
if value.HasField("string_value"):
return value_in_json[1:-1]
return value_in_json
|
Returns a string representation of given google.protobuf.Value message.
Args:
value: google.protobuf.Value message. Assumed to be of type 'number',
'string' or 'bool'.
|
juraj-google-style
|
def fetch(self, webfonts):
sorted_keys = sorted(webfonts.keys())
for webfont_name in sorted_keys:
self.get(webfont_name, webfonts[webfont_name])
|
Store every defined webfonts.
Webfont are stored with sort on their name.
Args:
webfonts (dict): Dictionnary of webfont settings from
``settings.ICOMOON_WEBFONTS``.
|
codesearchnet
|
def py_to_weld_type(self, obj):
if isinstance(obj, np.ndarray):
dtype = str(obj.dtype)
if dtype == 'int16':
base = WeldInt16()
elif dtype == 'int32':
base = WeldInt()
elif dtype == 'int64':
base = WeldLong()
elif dtype == 'float32':
base = WeldFloat()
elif dtype == 'float64':
base = WeldDouble()
elif dtype == 'bool':
base = WeldBit()
else:
base = WeldVec(WeldChar())
for i in xrange(obj.ndim):
base = WeldVec(base)
elif isinstance(obj, str):
base = WeldVec(WeldChar())
else:
raise Exception("Invalid object type: unable to infer NVL type")
return base
|
Summary
Args:
obj (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
|
juraj-google-style
|
def _filter_and_bucket_subtokens(subtoken_counts, min_count):
subtoken_buckets = []
for subtoken, count in six.iteritems(subtoken_counts):
if count < min_count:
continue
while len(subtoken_buckets) <= len(subtoken):
subtoken_buckets.append(set())
subtoken_buckets[len(subtoken)].add(subtoken)
return subtoken_buckets
|
Return a bucketed list of subtokens that are filtered by count.
Args:
subtoken_counts: defaultdict mapping subtokens to their counts
min_count: int count used to filter subtokens
Returns:
List of subtoken sets, where subtokens in set i have the same length=i.
|
juraj-google-style
|
def _bisect(self, begin, end, listener):
step = ((end.date - begin.date) / 2)
while (abs(step) >= self._eps_bisect):
date = (begin.date + step)
if (self.SPEAKER_MODE == 'global'):
orb = self.propagate(date)
else:
orb = begin.propagate(date)
if ((listener(begin) * listener(orb)) > 0):
begin = orb
else:
end = orb
step = ((end.date - begin.date) / 2)
else:
end.event = listener.info(end)
return end
|
This method search for the zero-crossing of the watched parameter
Args:
begin (Orbit):
end (Orbit)
listener (Listener)
Return
Return
|
codesearchnet
|
def _ParseStorageMediaOptions(self, options):
self._ParseStorageMediaImageOptions(options)
self._ParseVSSProcessingOptions(options)
self._ParseCredentialOptions(options)
self._ParseSourcePathOption(options)
|
Parses the storage media options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
codesearchnet
|
def __init__(self, dataset_vid=None, vid=None, score=None):
assert vid is not None, 'vid can not be None.'
assert dataset_vid is not None, 'dataset_vid can not be None.'
assert score is not None, 'score can not be None.'
self.dataset_vid = dataset_vid
self.vid = vid
self.score = score
|
Initalizes partition search result fields.
Args:
dataset_vid (str): vid of the partition's dataset.
vid (str): partition vid.
score (int): score of the search result.
|
juraj-google-style
|
def from_pure(cls, z):
return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
|
Creates a pure composition.
Args:
z (int): atomic number
|
juraj-google-style
|
def _client_receive(self):
try:
response = self._client.readline()
if self.verbose_logging:
self.log.debug('Snippet received: %s', response)
elif _MAX_RPC_RESP_LOGGING_LENGTH >= len(response):
self.log.debug('Snippet received: %s', response)
else:
self.log.debug('Snippet received: %s... %d chars are truncated', response[:_MAX_RPC_RESP_LOGGING_LENGTH], len(response) - _MAX_RPC_RESP_LOGGING_LENGTH)
return response
except socket.error as e:
raise Error(self._ad, 'Encountered socket error reading RPC response "%s"' % e)
|
Receives the server's response of an Rpc message.
Returns:
Raw byte string of the response.
Raises:
Error: a socket error occurred during the read.
|
github-repos
|
def _sort_records_map(records):
ctx = context.get()
l = len(records)
key_records = ([None] * l)
logging.debug('Parsing')
for i in range(l):
proto = kv_pb.KeyValue()
proto.ParseFromString(records[i])
key_records[i] = (proto.key(), records[i])
logging.debug('Sorting')
key_records.sort(cmp=_compare_keys)
logging.debug('Writing')
mapper_spec = ctx.mapreduce_spec.mapper
params = input_readers._get_params(mapper_spec)
bucket_name = params.get('bucket_name')
filename = ((((((ctx.mapreduce_spec.name + '/') + ctx.mapreduce_id) + '/output-') + ctx.shard_id) + '-') + str(int(time.time())))
full_filename = ('/%s/%s' % (bucket_name, filename))
filehandle = cloudstorage.open(full_filename, mode='w')
with output_writers.GCSRecordsPool(filehandle, ctx=ctx) as pool:
for key_record in key_records:
pool.append(key_record[1])
logging.debug('Finalizing')
filehandle.close()
entity = _OutputFile(key_name=full_filename, parent=_OutputFile.get_root_key(ctx.mapreduce_id))
entity.put()
|
Map function sorting records.
Converts records to KeyValue protos, sorts them by key and writes them
into new GCS file. Creates _OutputFile entity to record resulting
file name.
Args:
records: list of records which are serialized KeyValue protos.
|
codesearchnet
|
def handle_erroneous_response(self, response: requests.Response) -> NoReturn:
logger.debug('handling erroneous response: %s', response)
try:
err = BugZooException.from_dict(response.json())
except Exception:
err = UnexpectedResponse(response)
raise err
|
Attempts to decode an erroneous response into an exception, and to
subsequently throw that exception.
Raises:
BugZooException: the exception described by the error response.
UnexpectedResponse: if the response cannot be decoded to an
exception.
|
codesearchnet
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
bde_volume = pybde.volume()
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
bde.BDEVolumeOpen(
bde_volume, path_spec, file_object, resolver.Resolver.key_chain)
except:
file_object.close()
raise
self._bde_volume = bde_volume
self._file_object = file_object
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb'
read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
juraj-google-style
|
def split(self, path):
path = path.strip()
if not path.startswith(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX):
raise ValueError('Path %r must be Azure Blob Storage path.' % path)
prefix_len = len(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
|
Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
For file-systems other than the local file-system, head should include the
prefix.
Args:
path: path as a string
Returns:
a pair of path components as strings.
|
github-repos
|
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
github-repos
|
def select_with_condition(self, condition, key=None):
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
obj = conf if key is None else AttrDict(conf[key])
add_it = condition(obj=obj)
if add_it: new_confs.append(conf)
self._confs = new_confs
|
Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars
|
juraj-google-style
|
def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs):
preprocessed_func = self.preprocess_func(func)
return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]
|
Applies a function to a list of remote partitions.
Note: The main use for this is to preprocess the func.
Args:
func: The func to apply
partitions: The list of partitions
Returns:
A list of BaseFramePartition objects.
|
juraj-google-style
|
def make_nested_list_of_images(images: Union[list[ImageInput], ImageInput]) -> ImageInput:
if isinstance(images, (list, tuple)) and all((isinstance(images_i, (list, tuple)) for images_i in images)) and all((is_valid_list_of_images(images_i) for images_i in images)):
return images
if isinstance(images, (list, tuple)) and is_valid_list_of_images(images):
if is_pil_image(images[0]) or images[0].ndim == 3:
return [images]
if images[0].ndim == 4:
return [list(image) for image in images]
if is_valid_image(images):
if is_pil_image(images) or images.ndim == 3:
return [[images]]
if images.ndim == 4:
return [list(images)]
raise ValueError('Invalid input type. Must be a single image, a list of images, or a list of batches of images.')
|
Ensure that the output is a nested list of images.
Args:
images (`Union[List[ImageInput], ImageInput]`):
The input image.
Returns:
list: A list of list of images or a list of 4d array of images.
|
github-repos
|
def json_to_pybel(data, infer_bonds=False):
obmol = ob.OBMol()
obmol.BeginModify()
for atom in data['atoms']:
obatom = obmol.NewAtom()
obatom.SetAtomicNum(table.GetAtomicNum(str(atom['element'])))
obatom.SetVector(*atom['location'])
if 'label' in atom:
pd = ob.OBPairData()
pd.SetAttribute('_atom_site_label')
pd.SetValue(atom['label'])
obatom.CloneData(pd)
if 'bonds' not in data or not data['bonds']:
if infer_bonds:
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
else:
for bond in data['bonds']:
if 'atoms' not in bond:
continue
obmol.AddBond(bond['atoms'][0] + 1, bond['atoms'][1] + 1,
bond['order'])
if 'unitcell' in data:
uc = ob.OBUnitCell()
uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))
uc.SetSpaceGroup('P1')
obmol.CloneData(uc)
obmol.EndModify()
mol = pybel.Molecule(obmol)
if 'charge' in data['atoms'][0]:
mol.OBMol.SetPartialChargesPerceived()
for atom, pyatom in zip(data['atoms'], mol.atoms):
pyatom.OBAtom.SetPartialCharge(atom['charge'])
return mol
|
Converts python data structure to pybel.Molecule.
This will infer bond data if not specified.
Args:
data: The loaded json data of a molecule, as a Python object
infer_bonds (Optional): If no bonds specified in input, infer them
Returns:
An instance of `pybel.Molecule`
|
juraj-google-style
|
def cast_if_floating_dtype_and_mismatch(targets, outputs):
if tensor_util.is_tf_type(targets):
return cast_single_tensor(targets, dtype=outputs[0].dtype)
new_targets = []
for target, out in zip(targets, outputs):
if isinstance(target, np.ndarray):
target = tensor_conversion.convert_to_tensor_v2_with_dispatch(target)
if target.dtype != out.dtype:
new_targets.append(cast_single_tensor(target, dtype=out.dtype))
else:
new_targets.append(target)
return new_targets
|
Returns target data tensors using correct datatype.
Checks that each target and output pair are the same datatype. If not, casts
the target to the output's datatype.
Args:
targets: tensor or list of targets.
outputs: tensor or list of outputs.
Returns:
Targets in appropriate datatype.
|
github-repos
|
def transform(self, X):
sklearn.base.check_is_fitted(self)
X = _validate_data(self, X, reset=False)
return self.model_.predict(X)
|
Transform the data.
Args:
X: array-like, shape=(n_samples, n_features)
The input samples.
Returns:
X_transformed: array-like, shape=(n_samples, n_features)
The transformed data.
|
github-repos
|
def key_for_entity_group(cls, key):
return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())
|
Return the key for the entity group containing key.
Args:
key: a key for an entity group whose __entity_group__ key you want.
Returns:
The __entity_group__ key for the entity group containing key.
|
juraj-google-style
|
def apply_on_inputs(self, named_inputs: Dict[str, EventSetNode]) -> Dict[str, EventSetNode]:
g = deepcopy(self)
assert g.named_inputs is not None
assert g.named_outputs is not None
for name, new_node in named_inputs.items():
if name not in g.named_inputs:
raise ValueError(f"Input node {name} is not in the graph's inputs. Inputs: {g.named_inputs}")
old_node = g.named_inputs[name]
for operator in g.operators:
for name, inp in operator.inputs.items():
if inp is old_node:
operator.inputs[name] = new_node
return g.named_outputs
|
Applies the operators in this graph to new inputs.
Note that the objects in the modified graph are very inconsistent, but
that's okay since we won't use it anymore. When running it or save it
the graph will be re-inferred.
Args:
named_inputs: The new inputs to the graph.
Returns:
The graph's named outputs.
|
github-repos
|
def _convert_to_seeder_format(dataset):
data = {}
seed = {}
_add_if_set(data, "name", dataset.get("title"))
_add_if_set(data, "issn", dataset.get("issn"))
_add_if_set(data, "annotation", dataset.get("annotation"))
rules = dataset.get("rules", {})
if rules:
_add_if_set(data, "frequency", rules.get("frequency"))
_add_if_set(seed, "budget", rules.get("budget"))
_add_if_set(seed, "calendars", rules.get("calendars"))
_add_if_set(seed, "global_reject", rules.get("global_reject"))
_add_if_set(seed, "gentle_fetch", rules.get("gentle_fetch"))
_add_if_set(seed, "javascript", rules.get("javascript"))
_add_if_set(seed, "local_traps", rules.get("local_traps"))
_add_if_set(seed, "youtube", rules.get("youtube"))
_add_if_set(seed, "url", dataset.get("url"))
if seed:
data["seed"] = seed
return data
|
WA KAT dataset has different structure from Seeder. This is convertor
which converts WA-KAT -> Seeder data format.
Args:
dataset (dict): WA-KAT dataset sent from frontend.
Returns:
dict: Dict with converted data.
|
juraj-google-style
|
def delete(self, invoice_id, **kwargs):
url = '{}/{}'.format(self.base_url, invoice_id)
return self.delete_url(url, {}, **kwargs)
|
Delete an invoice
You can delete an invoice which is in the draft state.
Args:
invoice_id : Id for delete the invoice
Returns:
The response is always be an empty array like this - []
|
codesearchnet
|
def authenticate_identify(self, api_token, override=True):
if (self.context.has_auth_params('Gem-Identify') and not override):
raise OverrideError('Gem-Identify')
if (not api_token or
not self.context.authorize('Gem-Identify', api_token=api_token)):
raise AuthUsageError(self.context, 'Gem-Identify')
return True
|
Set credentials for Identify authentication.
Args:
api_token (str): Token issued to your Application through the Gem
Developer Console.
override (boolean): Replace existing Application credentials.
|
juraj-google-style
|
def format_checksum(checksum_pyxb):
return '{}/{}'.format(checksum_pyxb.algorithm.upper().replace('-', ''), checksum_pyxb.value().lower())
|
Create string representation of a PyXB Checksum object.
Args:
PyXB Checksum object
Returns:
str : Combined hexadecimal value and algorithm name.
|
codesearchnet
|
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=True)
if 'height' not in size or 'width' not in size:
raise ValueError('size dictionary must contain height and width keys')
return resize(image, (size['height'], size['width']), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
|
Resize an image to a certain size.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
The size to resize the image to. Must contain height and width keys.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use when resizing the input.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.