code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def assert_is_compatible_with(self, other):
if not self.is_compatible_with(other):
raise ValueError('Shapes %s and %s are incompatible' % (self, other))
|
Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
|
github-repos
|
def eye(size, dtype=None, name=None):
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
|
Instantiate an identity matrix and returns it.
Args:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
>>> kvar = tf.keras.backend.eye(3)
>>> tf.keras.backend.eval(kvar)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
|
github-repos
|
def compare_python_to_reference_murmur3_32(data: Any, seed: int=0) -> None:
assert mmh3, 'Need mmh3 module'
c_data = to_str(data)
c_signed = mmh3.hash(c_data, seed=seed)
py_data = to_bytes(c_data)
py_unsigned = murmur3_x86_32(py_data, seed=seed)
py_signed = twos_comp_to_signed(py_unsigned, n_bits=32)
preamble = 'Hashing {data} with MurmurHash3/32-bit/seed={seed}'.format(data=repr(data), seed=seed)
if (c_signed == py_signed):
print((preamble + ' -> {result}: OK'.format(result=c_signed)))
else:
raise AssertionError((preamble + '; mmh3 says {c_data} -> {c_signed}, Python version says {py_data} -> {py_unsigned} = {py_signed}'.format(c_data=repr(c_data), c_signed=c_signed, py_data=repr(py_data), py_unsigned=py_unsigned, py_signed=py_signed)))
|
Checks the pure Python implementation of 32-bit murmur3 against the
``mmh3`` C-based module.
Args:
data: data to hash
seed: seed
Raises:
AssertionError: if the two calculations don't match
|
codesearchnet
|
def context(self, name):
data = self._context(name)
context = data.get('context')
if context:
return context
assert self.load_path
context_path = os.path.join(self.load_path, 'contexts', ('%s.rxt' % name))
context = ResolvedContext.load(context_path)
data['context'] = context
data['loaded'] = True
return context
|
Get a context.
Args:
name (str): Name to store the context under.
Returns:
`ResolvedContext` object.
|
codesearchnet
|
def __mul__(self, other):
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
|
Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m * n)
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
|
github-repos
|
def valid(self, value, level=[]):
self.validation_failures = []
if value is None and self._optional:
return True
for i in range(len(self._nodes)):
if self._nodes[i].valid(value):
return True
self.validation_failures.append(('.'.join(level), 'no valid option'))
return False
|
Valid
Checks if a value is valid based on the instance's values
Arguments:
value {mixed} -- The value to validate
Returns:
bool
|
juraj-google-style
|
def complement(self, alphabet):
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
if state.final:
state.final = False
else:
state.final = True
|
Returns the complement of DFA
Args:
alphabet (list): The input alphabet
Returns:
None
|
juraj-google-style
|
def close(self, channel_identifier: ChannelID, partner: Address, balance_hash: BalanceHash, nonce: Nonce, additional_hash: AdditionalHash, signature: Signature, given_block_identifier: BlockSpecification):
log_details = {'token_network': pex(self.address), 'node': pex(self.node_address), 'partner': pex(partner), 'nonce': nonce, 'balance_hash': encode_hex(balance_hash), 'additional_hash': encode_hex(additional_hash), 'signature': encode_hex(signature)}
log.debug('closeChannel called', **log_details)
checking_block = self.client.get_checking_block()
try:
self._close_preconditions(channel_identifier, partner=partner, block_identifier=given_block_identifier)
except NoStateForBlockIdentifier:
pass
error_prefix = 'closeChannel call will fail'
with self.channel_operations_lock[partner]:
gas_limit = self.proxy.estimate_gas(checking_block, 'closeChannel', channel_identifier=channel_identifier, partner=partner, balance_hash=balance_hash, nonce=nonce, additional_hash=additional_hash, signature=signature)
if gas_limit:
error_prefix = 'closeChannel call failed'
transaction_hash = self.proxy.transact('closeChannel', safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_CLOSE_CHANNEL), channel_identifier=channel_identifier, partner=partner, balance_hash=balance_hash, nonce=nonce, additional_hash=additional_hash, signature=signature)
self.client.poll(transaction_hash)
receipt_or_none = check_transaction_threw(self.client, transaction_hash)
transaction_executed = (gas_limit is not None)
if ((not transaction_executed) or receipt_or_none):
if transaction_executed:
block = receipt_or_none['blockNumber']
else:
block = checking_block
self.proxy.jsonrpc_client.check_for_insufficient_eth(transaction_name='closeChannel', transaction_executed=transaction_executed, required_gas=GAS_REQUIRED_FOR_CLOSE_CHANNEL, block_identifier=block)
(error_type, msg) = self._check_channel_state_for_close(participant1=self.node_address, participant2=partner, block_identifier=block, channel_identifier=channel_identifier)
if (not error_type):
error_type = RaidenUnrecoverableError
error_msg = f'{error_prefix}. {msg}'
if (error_type == RaidenRecoverableError):
log.warning(error_msg, **log_details)
else:
log.critical(error_msg, **log_details)
raise error_type(error_msg)
log.info('closeChannel successful', **log_details)
|
Close the channel using the provided balance proof.
Note:
This method must *not* be called without updating the application
state, otherwise the node may accept new transfers which cannot be
used, because the closer is not allowed to update the balance proof
submitted on chain after closing
Raises:
RaidenRecoverableError: If the channel is already closed.
RaidenUnrecoverableError: If the channel does not exist or is settled.
|
codesearchnet
|
def _handle_error_response(response_body):
try:
error_data = json.loads(response_body)
error_details = '{}: {}'.format(
error_data['error'],
error_data.get('error_description'))
except (KeyError, ValueError):
error_details = response_body
raise exceptions.RefreshError(
error_details, response_body)
|
Translates an error response into an exception.
Args:
response_body (str): The decoded response data.
Raises:
google.auth.exceptions.RefreshError
|
juraj-google-style
|
async def read_reply(self):
code = 500
messages = []
go_on = True
while go_on:
try:
line = (await self.readline())
except ValueError as e:
code = 500
go_on = False
else:
try:
code = int(line[:3])
except ValueError as e:
raise ConnectionResetError('Connection lost.') from e
else:
go_on = (line[3:4] == b'-')
message = line[4:].strip(b' \t\r\n').decode('ascii')
messages.append(message)
full_message = '\n'.join(messages)
return (code, full_message)
|
Reads a reply from the server.
Raises:
ConnectionResetError: If the connection with the server is lost
(we can't read any response anymore). Or if the server
replies without a proper return code.
Returns:
(int, str): A (code, full_message) 2-tuple consisting of:
- server response code ;
- server response string corresponding to response code
(multiline responses are returned in a single string).
|
codesearchnet
|
def _generate_assignments(splittable_dimensions, mesh_dimension_to_size):
assignments = []
for assignment_size in six.moves.xrange(
1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))):
for s_dims_chosen in itertools.combinations(splittable_dimensions,
assignment_size):
for m_dims_chosen in itertools.permutations(mesh_dimension_to_size,
assignment_size):
assignments.append(dict(zip(s_dims_chosen, m_dims_chosen)))
return assignments
|
Generates all ways to map splittable dimensions to mesh dimensions.
Args:
splittable_dimensions: a frozenset of the names of splittable dimensions.
mesh_dimension_to_size: a dictionary from mesh dimension name to size.
Returns:
A list of the valid assignments. Each assignment is a dict keyed by every
splittable dimension, whose value is either a mesh dimension or None.
|
juraj-google-style
|
def check_call(state, callstr, argstr=None, expand_msg=None):
state.assert_is(['function_defs', 'lambda_functions'], 'check_call', ['check_function_def', 'check_lambda_function'])
if (expand_msg is None):
expand_msg = 'To verify it, we reran {{argstr}}. '
(stu_part, _argstr) = build_call(callstr, state.student_parts['node'])
(sol_part, _) = build_call(callstr, state.solution_parts['node'])
append_message = {'msg': expand_msg, 'kwargs': {'argstr': (argstr or _argstr)}}
child = part_to_child(stu_part, sol_part, append_message, state)
return child
|
When checking a function definition of lambda function,
prepare has_equal_x for checking the call of a user-defined function.
Args:
callstr (str): call string that specifies how the function should be called, e.g. `f(1, a = 2)`.
``check_call()`` will replace ``f`` with the function/lambda you're targeting.
argstr (str): If specified, this overrides the way the function call is refered to in the expand message.
expand_msg (str): If specified, this overrides any messages that are prepended by previous SCT chains.
state (State): state object that is chained from.
:Example:
Student and solution code::
def my_power(x):
print("calculating sqrt...")
return(x * x)
SCT::
Ex().check_function_def('my_power').multi(
check_call("f(3)").has_equal_value()
check_call("f(3)").has_equal_output()
)
|
codesearchnet
|
def get_by(self, field, value):
firmwares = self.get_all()
matches = []
for item in firmwares:
if item.get(field) == value:
matches.append(item)
return matches
|
Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to
filter the list of resources returned.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: List of firmware baseline resources.
|
juraj-google-style
|
def fit(self, mol1, mol2):
return self.get_rmsd(mol1, mol2) < self._tolerance
|
Fit two molecules.
Args:
mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object
mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object
Returns:
A boolean value indicates whether two molecules are the same.
|
juraj-google-style
|
def write_profile(name, repo, token):
make_sure_folder_exists(CONFIG_FOLDER)
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
profile = {'repo': repo, 'token': token}
config[name] = profile
with open(CONFIG_FILE, 'w') as configfile:
config.write(configfile)
return profile
|
Save a profile to the CONFIG_FILE.
After you use this method to save a profile, you can load it anytime
later with the ``read_profile()`` function defined above.
Args:
name
The name of the profile to save.
repo
The Github repo you want to connect to. For instance,
this repo is ``jtpaasch/simplygithub``.
token
A personal access token to connect to the repo. It is
a hash that looks something like ``ff20ae42dc...``
Returns:
A dictionary with the profile's ``repo`` and ``token`` values.
|
codesearchnet
|
def OpenFile(client_path, max_timestamp=None):
path_info = data_store.REL_DB.ReadLatestPathInfosWithHashBlobReferences([client_path], max_timestamp=max_timestamp)[client_path]
if (path_info is None):
raise FileHasNoContentError(client_path)
hash_id = rdf_objects.SHA256HashID.FromBytes(path_info.hash_entry.sha256.AsBytes())
blob_references = data_store.REL_DB.ReadHashBlobReferences([hash_id])[hash_id]
if (blob_references is None):
raise MissingBlobReferencesError(('File hash was expected to have corresponding blob references, but they were not found: %r' % hash_id))
return BlobStream(client_path, blob_references, hash_id)
|
Opens latest content of a given file for reading.
Args:
client_path: A db.ClientPath object describing path to a file.
max_timestamp: If specified, will open the last collected version with a
timestamp equal or lower than max_timestamp. If not specified, will simply
open the latest version.
Returns:
A file like object with random access support.
Raises:
FileHasNoContentError: if the file was never collected.
MissingBlobReferencesError: if one of the blobs was not found.
|
codesearchnet
|
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output
text_features = self.text_projection(pooled_output)
text_features = F.normalize(text_features, dim=-1)
return text_features
|
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`ClapTextModel`].
Examples:
```python
>>> from transformers import AutoTokenizer, ClapModel
>>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
>>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
>>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```
|
github-repos
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
return self._vslvm_logical_volume.read(size)
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
juraj-google-style
|
def FindProxies():
proxies = []
for i in range(0, 100):
try:
sid = winreg.EnumKey(winreg.HKEY_USERS, i)
except OSError:
break
try:
subkey = (sid + '\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings')
internet_settings = winreg.OpenKey(winreg.HKEY_USERS, subkey)
proxy_enable = winreg.QueryValueEx(internet_settings, 'ProxyEnable')[0]
if proxy_enable:
proxy_server = str(winreg.QueryValueEx(internet_settings, 'ProxyServer')[0])
if ('=' in proxy_server):
for p in proxy_server.split(';'):
(protocol, address) = p.split('=', 1)
if (not re.match('^([^/:]+):
address = ('%s:
proxies.append(address)
elif (proxy_server[:5] == 'http:'):
proxies.append(proxy_server)
else:
proxies.append(('http:
internet_settings.Close()
except (OSError, ValueError, TypeError):
continue
logging.debug('Found proxy servers: %s', proxies)
return proxies
|
Tries to find proxies by interrogating all the user's settings.
This function is a modified urillib.getproxies_registry() from the
standard library. We just store the proxy value in the environment
for urllib to find it.
TODO(user): Iterate through all the possible values if one proxy
fails, in case more than one proxy is specified in different users
profiles.
Returns:
A list of proxies.
|
codesearchnet
|
def GetTARInfoByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if (location is None):
raise errors.PathSpecError('Path specification missing location.')
if (not location.startswith(self.LOCATION_ROOT)):
raise errors.PathSpecError('Invalid location in path specification.')
if (len(location) == 1):
return None
try:
return self._tar_file.getmember(location[1:])
except KeyError:
pass
|
Retrieves the TAR info for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
tarfile.TARInfo: TAR info or None if it does not exist.
Raises:
PathSpecError: if the path specification is incorrect.
|
codesearchnet
|
def import_args_from_dict(value, args, config):
if isinstance(value, six.string_types):
for match in TOKEN_REGEX.finditer(str(value)):
token = match.group(1)
if (token in args):
actual_param = args[token]
if isinstance(actual_param, six.string_types):
value = value.replace(('@' + token), args[token])
else:
value = actual_param
elif isinstance(value, list):
return [import_args_from_dict(item, args, config) for item in value]
elif isinstance(value, dict):
return {key: import_args_from_dict(val, args, config) for (key, val) in value.items()}
elif isinstance(value, tuple):
return tuple((import_args_from_dict(val, args, config) for val in value))
return value
|
Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args.
|
codesearchnet
|
def _create_grad_indexed_slices_init(grad_output_slices, forward_input):
assert isinstance(grad_output_slices, indexed_slices.IndexedSlices)
assert isinstance(forward_input, tensor.Tensor)
values_out = grad_output_slices.values
indices_out = grad_output_slices.indices
if values_out.shape.is_fully_defined():
values_shape = tensor_shape.TensorShape([0] + values_out.shape.as_list()[1:])
values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')
else:
if forward_input.dtype == dtypes.resource:
forward_shape = gen_resource_variable_ops.variable_shape(forward_input)
else:
forward_shape = array_ops.shape(forward_input)
values_shape = array_ops.concat([[0], forward_shape[1:]], 0)
values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')
indices = constant_op.constant([], indices_out.dtype, name='indices_init')
if forward_input.dtype == dtypes.resource:
shape = gen_resource_variable_ops.variable_shape(forward_input, name='shape_init')
else:
shape = array_ops.shape(forward_input, name='shape_init')
return indexed_slices.IndexedSlices(values=values, indices=indices, dense_shape=shape)
|
Creates an IndexedSlices to pass as input to the while grad function.
Args:
grad_output_slices: IndexedSlices. The corresponding while grad function
output.
forward_input: Tensor. The corresponding input to the forward while op.
Returns:
Zeros IndexedSlices, created in current Graph.
|
github-repos
|
def translations(self, **kwargs):
path = self._get_id_path('translations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the translations for a specific movie id.
Args:
append_to_response: (optional) Comma separated, any movie method.
Returns:
A dict representation of the JSON returned from the API.
|
juraj-google-style
|
def _numpy_section(line_info):
next_line_is_hyphens = _line_is_hyphens(line_info.next.stripped)
if next_line_is_hyphens:
possible_title = line_info.remaining
return _section_from_possible_title(possible_title)
else:
return None
|
Checks whether the current line is the start of a new numpy-style section.
Numpy style sections are followed by a full line of hyphens, for example:
Section Name
------------
Section body goes here.
Args:
line_info: Information about the current line.
Returns:
A Section type if one matches, or None if no section type matches.
|
github-repos
|
def get_metrics_result(self):
return_metrics = {}
for metric in self.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
return python_utils.pythonify_logs(return_metrics)
|
Returns the model's metrics values as a dict.
If any of the metric result is a dict (containing multiple metrics),
each of them gets added to the top level returned dict of this method.
Returns:
A `dict` containing values of the metrics listed in `self.metrics`.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
|
github-repos
|
def contrast(x, severity=1):
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip)
|
Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
|
juraj-google-style
|
def get(self, report_id):
return Report(
self._app,
self._swimlane.request('get', "reports/{0}".format(report_id)).json()
)
|
Retrieve report by ID
Args:
report_id (str): Full report ID
Returns:
Report: Corresponding Report instance
|
juraj-google-style
|
def state(self):
if not self.has_job:
return PipelineState.DONE
self._update_job()
return self._get_job_state()
|
Return the current state of the remote job.
Returns:
A PipelineState object.
|
github-repos
|
def _ParseValueData(self, knowledge_base, value_data):
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail(
'Unsupported Windows Registry value type: {0:s} for '
'artifact: {1:s}.'.format(
type(value_data), self.ARTIFACT_DEFINITION_NAME))
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name=self._NAME, value=value_data)
try:
logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format(
self._NAME, value_data))
knowledge_base.AddEnvironmentVariable(environment_variable)
except KeyError:
pass
|
Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def write_file(self, filename, file_format='xyz'):
mol = pb.Molecule(self._obmol)
return mol.write(file_format, filename, overwrite=True)
|
Uses OpenBabel to output all supported formats.
Args:
filename: Filename of file to output
file_format: String specifying any OpenBabel supported formats.
|
codesearchnet
|
def stitch_map(tiles, width, height, bbox, dpi):
size = (int(width * dpi_to_dpmm(dpi)), int(height * dpi_to_dpmm(dpi)))
background = Image.new('RGBA', size, (255, 255, 255))
for layer in tiles:
layer_img = Image.new("RGBA", size)
for (x, y), tile_path in layer.items():
tile = Image.open(tile_path)
layer_img.paste(tile, ((x - bbox.min.x) * TILE_SIZE, (y - bbox.min.y) * TILE_SIZE))
background = Image.alpha_composite(background, layer_img)
add_scales_bar(background, bbox)
return background.convert("RGB")
|
Merge tiles together into one image.
Args:
tiles (list of dict of file): tiles for each layer
width (float): page width in mm
height (height): page height in mm
dpi (dpi): resolution in dots per inch
Returns:
PIL.Image: merged map.
|
juraj-google-style
|
def _InitializeURL(self, upload_url, current_content_length):
if current_content_length != 0:
return upload_url
headers = {
'Content-Type': 'application/xml',
'Content-Length': 0,
'x-goog-resumable': 'start'
}
req = urllib2.Request(upload_url, data={}, headers=headers)
resp = self._url_opener.open(req)
return resp.headers['location']
|
Ensures that the URL used to upload operations is properly initialized.
Args:
upload_url: a string url.
current_content_length: an integer identifying the current content length
of data uploaded to the Batch Job.
Returns:
An initialized string URL, or the provided string URL if the URL has
already been initialized.
|
juraj-google-style
|
def set_colour(self, r, g, b):
if (not (0 <= r <= 255)):
raise ValueError('The value for red needs to be between 0 and 255.')
if (not (0 <= g <= 255)):
raise ValueError('The value for green needs to be between 0 and 255.')
if (not (0 <= b <= 255)):
raise ValueError('The value for blue needs to be between 0 and 255.')
hexvalue = BulbDevice._rgb_to_hexvalue(r, g, b)
payload = self.generate_payload(SET, {self.DPS_INDEX_MODE: self.DPS_MODE_COLOUR, self.DPS_INDEX_COLOUR: hexvalue})
data = self._send_receive(payload)
return data
|
Set colour of an rgb bulb.
Args:
r(int): Value for the colour red as int from 0-255.
g(int): Value for the colour green as int from 0-255.
b(int): Value for the colour blue as int from 0-255.
|
codesearchnet
|
async def upload_image(self, image_file, filename=None, *, return_uploaded_image=False):
image_filename = (filename or os.path.basename(image_file.name))
image_data = image_file.read()
res = (await self._base_request(IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', 'json', json.dumps({'protocolVersion': '0.8', 'createSessionRequest': {'fields': [{'external': {'name': 'file', 'filename': image_filename, 'put': {}, 'size': len(image_data)}}]}})))
try:
upload_url = self._get_upload_session_status(res)['externalFieldTransfers'][0]['putInfo']['url']
except KeyError:
raise exceptions.NetworkError('image upload failed: can not acquire an upload url')
res = (await self._base_request(upload_url, 'application/octet-stream', 'json', image_data))
try:
raw_info = self._get_upload_session_status(res)['additionalInfo']['uploader_service.GoogleRupioAdditionalInfo']['completionInfo']['customerSpecificInfo']
image_id = raw_info['photoid']
url = raw_info['url']
except KeyError:
raise exceptions.NetworkError('image upload failed: can not fetch upload info')
result = UploadedImage(image_id=image_id, url=url)
return (result if return_uploaded_image else result.image_id)
|
Upload an image that can be later attached to a chat message.
Args:
image_file: A file-like object containing an image.
filename (str): (optional) Custom name for the uploaded file.
return_uploaded_image (bool): (optional) If True, return
:class:`.UploadedImage` instead of image ID. Defaults to False.
Raises:
hangups.NetworkError: If the upload request failed.
Returns:
:class:`.UploadedImage` instance, or ID of the uploaded image.
|
codesearchnet
|
def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True):
traj = {}
trajsize = len(rollout[SampleBatch.ACTIONS])
for key in rollout:
traj[key] = np.stack(rollout[key])
if use_gae:
assert (SampleBatch.VF_PREDS in rollout), 'Values not found!'
vpred_t = np.concatenate([rollout[SampleBatch.VF_PREDS], np.array([last_r])])
delta_t = ((traj[SampleBatch.REWARDS] + (gamma * vpred_t[1:])) - vpred_t[:(- 1)])
traj[Postprocessing.ADVANTAGES] = discount(delta_t, (gamma * lambda_))
traj[Postprocessing.VALUE_TARGETS] = (traj[Postprocessing.ADVANTAGES] + traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)
else:
rewards_plus_v = np.concatenate([rollout[SampleBatch.REWARDS], np.array([last_r])])
traj[Postprocessing.ADVANTAGES] = discount(rewards_plus_v, gamma)[:(- 1)]
traj[Postprocessing.VALUE_TARGETS] = np.zeros_like(traj[Postprocessing.ADVANTAGES])
traj[Postprocessing.ADVANTAGES] = traj[Postprocessing.ADVANTAGES].copy().astype(np.float32)
assert all(((val.shape[0] == trajsize) for val in traj.values())), 'Rollout stacked incorrectly!'
return SampleBatch(traj)
|
Given a rollout, compute its value targets and the advantage.
Args:
rollout (SampleBatch): SampleBatch of a single trajectory
last_r (float): Value estimation for last observation
gamma (float): Discount factor.
lambda_ (float): Parameter for GAE
use_gae (bool): Using Generalized Advantage Estamation
Returns:
SampleBatch (SampleBatch): Object with experience from rollout and
processed rewards.
|
codesearchnet
|
def plot_spectra_pages_pdf(ss, pdf_filename='pages.pdf', setup=_default_setup):
logger = a99.get_python_logger()
xmin, xmax, ymin_, ymax, xspan, yspan = calc_max_min(ss)
ymin = ymin_ if setup.ymin is None else setup.ymin
num_pages = len(ss)
a99.format_BLB()
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
for i, s in enumerate(ss):
title = s.title
fig = plt.figure()
plt.plot(s.x, s.y, c=_FAV_COLOR)
if setup.flag_xlabel and setup.fmt_xlabel:
_set_plot(plt.xlabel, setup.fmt_xlabel, s)
if setup.flag_ylabel and setup.fmt_ylabel:
_set_plot(plt.ylabel, setup.fmt_ylabel, s)
_set_plot(plt.title, setup.fmt_title, s)
plt.xlim([xmin-xspan*_T, xmax+xspan*_T])
plt.ylim([ymin-yspan*_T, ymax+yspan*_T])
plt.tight_layout()
plt.subplots_adjust(top=0.94)
logger.info("Printing page {0:d}/{1:d} ('{2!s}')".format(i+1, num_pages, title))
pdf.savefig(fig)
plt.close()
pdf.close()
logger.info("File {0!s} successfully created.".format(pdf_filename))
|
Plots spectra into a PDF file, one spectrum per page.
Splits into several pieces of width
Args:
ss: list of Spectrum objects
pdf_filename: name of output file
|
juraj-google-style
|
def address(self, ip, owner=None, **kwargs):
return Address(self.tcex, ip, owner=owner, **kwargs)
|
Create the Address TI object.
Args:
owner:
ip:
**kwargs:
Return:
|
juraj-google-style
|
def __set_proxy(self, config):
if "proxy" in config and config["proxy"]:
proxy = config["proxy"]
splitted = proxy.split(':')
if len(splitted) != 2:
raise ValueError(ONEVIEW_CLIENT_INVALID_PROXY)
proxy_host = splitted[0]
proxy_port = int(splitted[1])
self.__connection.set_proxy(proxy_host, proxy_port)
|
Set proxy if needed
Args:
config: Config dict
|
juraj-google-style
|
def get_static_batch_size(layer):
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tensor_shape.Dimension(batch_input_shape[0]).value
return None
|
Gets the static batch size of a Layer.
Args:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
|
github-repos
|
def FindUnspentCoinsByAsset(self, asset_id, from_addr=None, use_standard=False, watch_only_val=0):
coins = self.FindUnspentCoins(from_addr=from_addr, use_standard=use_standard, watch_only_val=watch_only_val)
return [coin for coin in coins if (coin.Output.AssetId == asset_id)]
|
Finds unspent coin objects in the wallet limited to those of a certain asset type.
Args:
asset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain.
from_addr (UInt160): a bytearray (len 20) representing an address.
use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).
watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.
Returns:
list: a list of ``neo.Wallet.Coin`` in the wallet that are not spent
|
codesearchnet
|
def run(self, samples=1000, chains=1, **kwargs):
self.fit = self.stan_model.sampling(data=self.X, iter=samples,
chains=chains, **kwargs)
return self._convert_to_results()
|
Run the Stan sampler.
Args:
samples (int): Number of samples to obtain (in each chain).
chains (int): Number of chains to use.
kwargs (dict): Optional keyword arguments passed onto the PyStan
StanModel.sampling() call.
Returns: A PyMC3ModelResults instance.
|
juraj-google-style
|
def pad_image(self, image: np.ndarray, pad_size: Optional[Dict[str, int]]=None, constant_values: Union[float, Iterable[float]]=0, pad_mode: PaddingMode=PaddingMode.CONSTANT, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
height, width = get_image_size(image, channel_dim=input_data_format)
max_height = pad_size.get('height', height)
max_width = pad_size.get('width', width)
pad_right, pad_bottom = (max_width - width, max_height - height)
if pad_right < 0 or pad_bottom < 0:
raise ValueError('The padding size must be greater than image size')
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=pad_mode, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
|
Pad an image with zeros to the given size.
Args:
image (`np.ndarray`):
Image to pad.
pad_size (`Dict[str, int]`)
Size of the output image with pad.
constant_values (`Union[float, Iterable[float]]`)
The fill value to use when padding the image.
pad_mode (`PaddingMode`)
The pad mode, default to PaddingMode.CONSTANT
data_format (`ChannelDimension` or `str`, *optional*)
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
def _linop_inverse(self) -> 'LinearOperatorBlockLowerTriangular':
if len(self.operators) == 1:
return LinearOperatorBlockLowerTriangular([[self.operators[0][0].inverse()]], is_non_singular=self.is_non_singular, is_self_adjoint=self.is_self_adjoint, is_positive_definite=self.is_positive_definite, is_square=True)
blockwise_dim = len(self.operators)
upper_left_inverse = LinearOperatorBlockLowerTriangular(self.operators[:-1]).inverse()
bottom_row = self.operators[-1]
bottom_right_inverse = bottom_row[-1].inverse()
inverse_bottom_row = []
for i in range(blockwise_dim - 1):
blocks = []
for j in range(i, blockwise_dim - 1):
result = bottom_row[j].matmul(upper_left_inverse.operators[j][i])
if not any((isinstance(result, op_type) for op_type in linear_operator_addition.SUPPORTED_OPERATORS)):
result = linear_operator_full_matrix.LinearOperatorFullMatrix(result.to_dense())
blocks.append(result)
summed_blocks = linear_operator_addition.add_operators(blocks)
assert len(summed_blocks) == 1
block = summed_blocks[0]
block = bottom_right_inverse.matmul(block)
block = linear_operator_identity.LinearOperatorScaledIdentity(num_rows=bottom_right_inverse.domain_dimension_tensor(), multiplier=math_ops.cast(-1, dtype=block.dtype)).matmul(block)
inverse_bottom_row.append(block)
inverse_bottom_row.append(bottom_right_inverse)
return LinearOperatorBlockLowerTriangular(upper_left_inverse.operators + [inverse_bottom_row], is_non_singular=self.is_non_singular, is_self_adjoint=self.is_self_adjoint, is_positive_definite=self.is_positive_definite, is_square=True)
|
Inverse of LinearOperatorBlockLowerTriangular.
We recursively apply the identity:
```none
|A 0|' = | A' 0|
|B C| |-C'BA' C'|
```
where `A` is n-by-n, `B` is m-by-n,
`C` is m-by-m, and `'` denotes inverse.
This identity can be verified through multiplication:
```none
|A 0|| A' 0|
|B C||-C'BA' C'|
= | AA' 0|
|BA'-CC'BA' CC'|
= |I 0|
|0 I|
```
Returns:
A 'LinearOperatorBlockLowerTriangular'.
|
github-repos
|
def run(inputs, program, outputs):
root = tempfile.mkdtemp()
try:
cwd = os.getcwd()
for fake, real in inputs:
parent = os.path.join(root, os.path.dirname(fake))
if not os.path.exists(parent):
os.makedirs(parent)
if hasattr(os, 'symlink') and not os.name == 'nt':
os.symlink(os.path.join(cwd, real), os.path.join(root, fake))
else:
shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake))
if subprocess.call(program + [root]) != 0:
return 1
for fake, real in outputs:
shutil.copyfile(os.path.join(root, fake), real)
return 0
finally:
try:
shutil.rmtree(root)
except EnvironmentError:
pass
|
Creates temp symlink tree, runs program, and copies back outputs.
Args:
inputs: List of fake paths to real paths, which are used for symlink tree.
program: List containing real path of program and its arguments. The
execroot directory will be appended as the last argument.
outputs: List of fake outputted paths to copy back to real paths.
Returns:
0 if succeeded or nonzero if failed.
|
juraj-google-style
|
def play_from_queue(self, index, start=True):
if not self.speaker_info:
self.get_speaker_info()
uri = 'x-rincon-queue:{0}
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
if start:
self.play()
|
Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
|
juraj-google-style
|
def add(self, information, timeout=-1):
return self._client.create(information, timeout=timeout)
|
Adds a data center resource based upon the attributes specified.
Args:
information: Data center information
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Added data center.
|
juraj-google-style
|
def _make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None):
url = "%s/%s/%s" % (_BOKEH_GH, api_type, id)
options = options or {}
set_classes(options)
node = nodes.reference(
rawtext, kind + utils.unescape(id), refuri=url, **options)
return node
|
Return a link to a Bokeh Github resource.
Args:
app (Sphinx app) : current app
rawtext (str) : text being replaced with link node.
role (str) : role name
kind (str) : resource type (issue, pull, etc.)
api_type (str) : type for api link
id : (str) : id of the resource to link to
options (dict) : options dictionary passed to role function
|
juraj-google-style
|
def __gt__(self, other):
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
|
Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(n)) == (m > n)
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
|
github-repos
|
def _Execute(self, http):
message = mime_multipart.MIMEMultipart('mixed')
setattr(message, '_write_headers', (lambda self: None))
for key in self.__request_response_handlers:
msg = mime_nonmultipart.MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._ConvertIdToHeader(key)
body = self._SerializeRequest(self.__request_response_handlers[key].request)
msg.set_payload(body)
message.attach(msg)
request = http_wrapper.Request(self.__batch_url, 'POST')
request.body = message.as_string()
request.headers['content-type'] = ('multipart/mixed; boundary="%s"' % message.get_boundary())
response = http_wrapper.MakeRequest(http, request)
if (response.status_code >= 300):
raise exceptions.HttpError.FromResponse(response)
header = ('content-type: %s\r\n\r\n' % response.info['content-type'])
content = response.content
if (isinstance(content, bytes) and self.__response_encoding):
content = response.content.decode(self.__response_encoding)
parser = email_parser.Parser()
mime_response = parser.parsestr((header + content))
if (not mime_response.is_multipart()):
raise exceptions.BatchError('Response not in multipart/mixed format.')
for part in mime_response.get_payload():
request_id = self._ConvertHeaderToId(part['Content-ID'])
response = self._DeserializeResponse(part.get_payload())
self.__request_response_handlers[request_id] = self.__request_response_handlers[request_id]._replace(response=response)
|
Serialize batch request, send to server, process response.
Args:
http: A httplib2.Http object to be used to make the request with.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
|
codesearchnet
|
def register_magics(store_name='_ampl_cells', ampl_object=None):
from IPython.core.magic import (
Magics, magics_class, cell_magic, line_magic
)
@magics_class
class StoreAMPL(Magics):
def __init__(self, shell=None, **kwargs):
Magics.__init__(self, shell=shell, **kwargs)
self._store = []
shell.user_ns[store_name] = self._store
@cell_magic
def ampl(self, line, cell):
self._store.append(cell)
@cell_magic
def ampl_eval(self, line, cell):
ampl_object.eval(cell)
@line_magic
def get_ampl(self, line):
return self._store
get_ipython().register_magics(StoreAMPL)
|
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
|
juraj-google-style
|
def get_plugins(package_name, paths=None):
pkg = get_latest_package(package_name, paths=paths, error=True)
if (not pkg.has_plugins):
return []
it = iter_package_families(paths)
package_names = set((x.name for x in it))
bar = ProgressBar('Searching', len(package_names))
plugin_pkgs = []
for package_name_ in package_names:
bar.next()
if (package_name_ == package_name):
continue
plugin_pkg = get_latest_package(package_name_, paths=paths)
if (not plugin_pkg.plugin_for):
continue
for plugin_for in plugin_pkg.plugin_for:
if (plugin_for == pkg.name):
plugin_pkgs.append(package_name_)
bar.finish()
return plugin_pkgs
|
Find packages that are plugins of the given package.
Args:
package_name (str): Name of the package.
paths (list of str): Paths to search for packages, defaults to
`config.packages_path`.
Returns:
list of str: The packages that are plugins of the given package.
|
codesearchnet
|
def create_new_board(self, query_params=None):
board_json = self.fetch_json(uri_path='/boards', http_method='POST', query_params=(query_params or {}))
return self.create_board(board_json)
|
Create a new board. name is required in query_params. Returns a Board
object.
Returns:
Board: Returns the created board
|
codesearchnet
|
def run_eval(interpreter, input_image):
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
|
Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
|
github-repos
|
def get(self, name):
return self.prepare_model(self.client.api.inspect_plugin(name))
|
Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def segment(self, source, language=None):
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks, language = self._get_source_chunks(source, language=language)
if self.use_entity:
entities = self._get_entities(source, language=language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks.resolve_dependencies()
return chunks
|
Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
|
juraj-google-style
|
def __init__(self, working_directory, emulator_zip, java=None):
self._working_directory = working_directory
self._emulators = {}
zipped_file = zipfile.ZipFile(emulator_zip)
if not os.path.isdir(self._working_directory):
os.mkdir(self._working_directory)
zipped_file.extractall(self._working_directory)
self._emulator_dir = os.path.join(self._working_directory,
'cloud-datastore-emulator')
self._emulator_cmd = os.path.join(self._emulator_dir,
'cloud_datastore_emulator')
os.chmod(self._emulator_cmd, 0700)
if java:
os.environ['JAVA'] = java
|
Constructs a factory for building datastore emulator instances.
Args:
working_directory: path to a directory where temporary files will be
stored
emulator_zip: path to the emulator zip file
java: path to a java executable
|
juraj-google-style
|
def update(self, **kwargs):
if ('image' not in kwargs):
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
kwargs['image'] = spec['Image']
if (kwargs.get('force_update') is True):
task_template = self.attrs['Spec']['TaskTemplate']
current_value = int(task_template.get('ForceUpdate', 0))
kwargs['force_update'] = (current_value + 1)
create_kwargs = _get_create_service_kwargs('update', kwargs)
return self.client.api.update_service(self.id, self.version, **create_kwargs)
|
Update a service's configuration. Similar to the ``docker service
update`` command.
Takes the same parameters as :py:meth:`~ServiceCollection.create`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def moma2(self, objective, wt_obj):
obj_expr = 0
for reaction in self._adjustment_reactions():
v_wt = self._v_wt[reaction]
v = self._v[reaction]
obj_expr += ((v_wt - v) ** 2)
self._prob.set_objective(obj_expr)
with self.constraints((self._v_wt[objective] >= wt_obj)):
self._solve(lp.ObjectiveSense.Minimize)
|
Find the smallest redistribution vector using Euclidean distance.
Minimizing the redistribution of fluxes using a quadratic objective
function. The distance is minimized by minimizing the sum of
(wild type - knockout)^2.
Creates the constraint that the we select the optimal flux vector that
is closest to the wildtype. This might still return an arbitrary flux
vector the maximizes the objective function.
Args:
objective: Objective reaction for the model.
wt_obj: The flux value for your wild type objective reactions.
Can either use an expiremental value or on determined by FBA
by using :meth:`.get_fba_obj_flux(objective)`.
|
codesearchnet
|
def validate_layout_display(self, table, display_condition):
display = False
if display_condition is None:
display = True
else:
display_query = 'select count(*) from {} where {}'.format(table, display_condition)
try:
cur = self.db_conn.cursor()
cur.execute(display_query.replace('"', ''))
rows = cur.fetchall()
if rows[0][0] > 0:
display = True
except sqlite3.Error as e:
print('"{}" query returned an error: ({}).'.format(display_query, e))
sys.exit(1)
return display
|
Check to see if the display condition passes.
Args:
table (str): The name of the DB table which hold the App data.
display_condition (str): The "where" clause of the DB SQL statement.
Returns:
bool: True if the row count is greater than 0.
|
juraj-google-style
|
def get_attribute(json, attr):
res = [json[entry][attr] for (entry, _) in enumerate(json)]
logger.debug('{0}s (from JSON):\n{1}'.format(attr, res))
return res
|
Gets the values of an attribute from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
attr: String of attribute in JSON file to collect.
Returns:
List of values of specified attribute from JSON
|
codesearchnet
|
def values(self):
return self._values
|
The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
|
github-repos
|
def encode_field(self, field, value):
for encoder in _GetFieldCodecs(field, 'encoder'):
result = encoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.EnumField):
if field.repeated:
remapped_value = [GetCustomJsonEnumMapping(
field.type, python_name=e.name) or e.name for e in value]
else:
remapped_value = GetCustomJsonEnumMapping(
field.type, python_name=value.name)
if remapped_value:
return remapped_value
if (isinstance(field, messages.MessageField) and
not isinstance(field, message_types.DateTimeField)):
value = json.loads(self.encode_message(value))
return super(_ProtoJsonApiTools, self).encode_field(field, value)
|
Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps.
|
juraj-google-style
|
def init_from_wave_file(wavpath):
try:
(samplerate, data) = SW.read(wavpath)
nframes = data.shape[0]
except:
try:
w = wave.open(wavpath)
samplerate = w.getframerate()
nframes = w.getnframes()
except:
raise Exception(('Cannot decode wavefile ' + wavpath))
return SVEnv(samplerate, nframes, wavpath)
|
Init a sonic visualiser environment structure based the analysis
of the main audio file. The audio file have to be encoded in wave
Args:
wavpath(str): the full path to the wavfile
|
codesearchnet
|
def GetUsernameByIdentifier(
self, user_identifier, session_identifier=CURRENT_SESSION):
user_accounts = self._user_accounts.get(session_identifier, {})
user_account = user_accounts.get(user_identifier, None)
if not user_account:
return ''
return user_account.username or ''
|
Retrieves the username based on an user identifier.
Args:
user_identifier (str): user identifier, either a UID or SID.
session_identifier (Optional[str])): session identifier, where
CURRENT_SESSION represents the active session.
Returns:
str: username.
|
juraj-google-style
|
def _has_attr(self, node, obj, attr):
if isinstance(obj, abstract.AMBIGUOUS_OR_EMPTY):
return (node, None)
if not isinstance(attr, abstract.PythonConstant) or not isinstance(attr.pyval, str):
return (node, None)
node, ret = self.ctx.attribute_handler.get_attribute(node, obj, attr.pyval)
return (node, ret is not None)
|
Check if the object has attribute attr.
Args:
node: The given node.
obj: A BaseValue, generally the left hand side of a hasattr() call.
attr: A BaseValue, generally the right hand side of a hasattr() call.
Returns:
(node, result) where result = True if the object has attribute attr, False
if it does not, and None if it is ambiguous.
|
github-repos
|
def __init__(self, *args, **kwargs):
args = deepcopy(args)
kwargs = deepcopy(kwargs)
super(Metadata, self).__init__(*args, **kwargs)
self._ensure_id()
self._ensure_version()
self._validate()
self._normalize_dates()
self._validate_interval()
|
prepare compliant, normalized metadata from inputs
Args:
kwargs: key-value pairs for metadata fields.
Raises:
InvalidDatalakeMetadata if required fields are missing and cannot
be inferred.
|
juraj-google-style
|
def get_ytvideos(query, ilogger):
queue = []
search_result = ytdiscoveryapi.search().list(
q=query,
part="id,snippet",
maxResults=1,
type="video,playlist"
).execute()
if not search_result["items"]:
return []
title = search_result["items"][0]["snippet"]["title"]
ilogger.info("Queueing {}".format(title))
if search_result["items"][0]["id"]["kind"] == "youtube
videoid = search_result["items"][0]["id"]["videoId"]
queue.append(["https:
elif search_result["items"][0]["id"]["kind"] == "youtube
queue = get_queue_from_playlist(search_result["items"][0]["id"]["playlistId"])
return queue
|
Gets either a list of videos from a playlist or a single video, using the
first result of a YouTube search
Args:
query (str): The YouTube search query
ilogger (logging.logger): The logger to log API calls to
Returns:
queue (list): The items obtained from the YouTube search
|
juraj-google-style
|
def setMeterPassword(self, new_pwd, pwd="00000000"):
result = False
self.setContext("setMeterPassword")
try:
if len(new_pwd) != 8 or len(pwd) != 8:
self.writeCmdMsg("Passwords must be exactly eight characters.")
self.setContext("")
return result
if not self.request(False):
self.writeCmdMsg("Pre command read failed: check serial line.")
else:
if not self.serialCmdPwdAuth(pwd):
self.writeCmdMsg("Password failure")
else:
req_pwd = binascii.hexlify(new_pwd.zfill(8))
req_str = "015731023030323028" + req_pwd + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setMeterPassword): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
|
Serial Call to set meter password. USE WITH CAUTION.
Args:
new_pwd (str): 8 digit numeric password to set
pwd (str): Old 8 digit numeric password.
Returns:
bool: True on completion with ACK.
|
juraj-google-style
|
def FetchMostRecentGraphSeries(label, report_type, token=None):
if _ShouldUseLegacyDatastore():
return _FetchMostRecentGraphSeriesFromTheLegacyDB(label, report_type, token=token)
return data_store.REL_DB.ReadMostRecentClientGraphSeries(label, report_type)
|
Fetches the latest graph series for a client label from the DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the legacy (non-relational)
datastore.
Raises:
AFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected
report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
|
codesearchnet
|
def __setitem__(self, complete_selector, value):
if not SELECTOR_RE.match(complete_selector):
raise ValueError("Invalid selector '{}'.".format(complete_selector))
selector_components = complete_selector.split('.')
node = self._selector_tree
for component in selector_components[::-1]:
node = node.setdefault(component, {})
node[_TERMINAL_KEY] = complete_selector
self._selector_map[complete_selector] = value
|
Associates a value with `complete_selector`.
This function also performs some additional bookkeeping to facilitate
partial matching of selectors.
Args:
complete_selector: The (complete) selector to associate a value with.
value: The value to associate.
Raises:
ValueError: If `complete_selector` isn't a string consisting of valid
Python identifiers separated by periods.
|
juraj-google-style
|
def intersect(self, other):
if (not isinstance(other, self.__class__)):
m = 'You can only intersect striplogs with each other.'
raise StriplogError(m)
result = []
for iv in self:
for jv in other:
try:
result.append(iv.intersect(jv))
except IntervalError:
pass
return Striplog(result)
|
Makes a striplog of all intersections.
Args:
Striplog. The striplog instance to intersect with.
Returns:
Striplog. The result of the intersection.
|
codesearchnet
|
def list_tasks(target=None):
from os import getcwd, chdir
from glob import glob
original = getcwd()
if target is None:
target = _dbdir()
chdir(target)
result = {}
for filename in glob("*.*.json"):
project, task = filename.split('.')[0:2]
if project not in result:
result[project] = []
result[project].append(task)
chdir(original)
return result
|
Returns a list of all the projects and tasks available in the `acorn`
database directory.
Args:
target (str): directory to list the projects for. Defaults to the configured
database directory.
Returns:
dict: keys are project names; values are lists of tasks associated with the
project.
|
juraj-google-style
|
def __init__(self, item_class, expected_class):
super().__init__()
self.item_class = item_class
self.expected_class = expected_class
|
Take the parameters to inform the user about the error.
Args:
item_class (:obj:`type`): The class of the item that was being
inserted in the list when the exception was raised.
expected_class (:obj:`type`): The expected type that didn't match
against the item to be inserted.
|
juraj-google-style
|
def _is_failed(self):
if self._status_code in _InstrumentationStatusCodeCategories.FAIL:
return True
elif self._known_keys[_InstrumentationKnownStatusKeys.STACK] and self._status_code != _InstrumentationStatusCodes.ASSUMPTION_FAILURE:
return True
elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]:
return True
else:
return False
|
Determines if the test corresponding to the instrumentation block
failed.
This method can not be used to tell if a test method passed and
should not be used for such a purpose.
Returns:
A boolean indicating if the test method failed.
|
github-repos
|
def select_by_key(self, key):
for item in self.children.values():
if 'selected' in item.attributes:
del item.attributes['selected']
self.children[key].attributes['selected'] = 'selected'
self._selected_key = key
self._selected_item = self.children[key]
|
Selects an item by its unique string identifier.
Args:
key (str): Unique string identifier of the DropDownItem that have to be selected.
|
juraj-google-style
|
def to_csv(self, sep=',', path=None):
stats = self._stats()
data = []
first_row = ['Stat', self.name]
data.append(sep.join(first_row))
for stat in stats:
k, n, f = stat
if k is None:
row = [''] * len(data[0])
data.append(sep.join(row))
continue
elif k == 'rf' and not type(self.rf) == float:
continue
row = [n]
raw = getattr(self, k)
if f is None:
row.append(raw)
elif f == 'p':
row.append(fmtp(raw))
elif f == 'n':
row.append(fmtn(raw))
elif f == 'dt':
row.append(raw.strftime('%Y-%m-%d'))
else:
raise NotImplementedError('unsupported format %s' % f)
data.append(sep.join(row))
res = '\n'.join(data)
if path is not None:
with open(path, 'w') as fl:
fl.write(res)
else:
return res
|
Returns a CSV string with appropriate formatting.
If path is not None, the string will be saved to file
at path.
Args:
* sep (char): Separator
* path (str): If None, CSV string returned. Else file written
to specified path.
|
juraj-google-style
|
def _ReadEventDataIntoEvent(self, event):
if (self._storage_type != definitions.STORAGE_TYPE_SESSION):
return
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
lookup_key = event_data_identifier.CopyToString()
event_data = self._event_data[lookup_key]
for (attribute_name, attribute_value) in event_data.GetAttributes():
setattr(event, attribute_name, attribute_value)
|
Reads the data into the event.
This function is intended to offer backwards compatible event behavior.
Args:
event (EventObject): event.
|
codesearchnet
|
def setModelData(self, editor, model, index):
if index.isValid():
value = editor.text()
model.setData(index, value, QtCore.Qt.EditRole)
|
Gets data from the editor widget and stores it in the specified model at the item index.
Args:
editor (QtGui.QLineEdit): editor widget.
model (QAbstractItemModel): parent model.
index (QModelIndex): model data index.
|
juraj-google-style
|
def get_params(img, scale, ratio):
area = (img.size[0] * img.size[1])
for attempt in range(10):
target_area = (random.uniform(*scale) * area)
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt((target_area * aspect_ratio))))
h = int(round(math.sqrt((target_area / aspect_ratio))))
if ((w <= img.size[0]) and (h <= img.size[1])):
i = random.randint(0, (img.size[1] - h))
j = random.randint(0, (img.size[0] - w))
return (i, j, h, w)
in_ratio = (img.size[0] / img.size[1])
if (in_ratio < min(ratio)):
w = img.size[0]
h = (w / min(ratio))
elif (in_ratio > max(ratio)):
h = img.size[1]
w = (h * max(ratio))
else:
w = img.size[0]
h = img.size[1]
i = ((img.size[1] - h)
j = ((img.size[0] - w)
return (i, j, h, w)
|
Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
|
codesearchnet
|
def memory_read8(self, addr, num_bytes, zone=None):
return self.memory_read(addr, num_bytes, zone=zone, nbits=8)
|
Reads memory from the target system in units of bytes.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_bytes (int): number of bytes to read
zone (str): memory zone to read from
Returns:
List of bytes read from the target system.
Raises:
JLinkException: if memory could not be read.
|
juraj-google-style
|
def _get_group_object(name):
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT:
|
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
|
juraj-google-style
|
def save_local_scope(
self,
line_number,
saved_function_call_index
):
saved_variables = list()
saved_variables_so_far = set()
first_node = None
for assignment in [node for node in self.nodes
if (type(node) == AssignmentNode or
type(node) == AssignmentCallNode or
type(Node) == BBorBInode)]:
if assignment.left_hand_side in saved_variables_so_far:
continue
saved_variables_so_far.add(assignment.left_hand_side)
save_name = 'save_{}_{}'.format(saved_function_call_index, assignment.left_hand_side)
previous_node = self.nodes[-1]
saved_scope_node = RestoreNode(
save_name + ' = ' + assignment.left_hand_side,
save_name,
[assignment.left_hand_side],
line_number=line_number,
path=self.filenames[-1]
)
if not first_node:
first_node = saved_scope_node
self.nodes.append(saved_scope_node)
saved_variables.append(SavedVariable(LHS=save_name,
RHS=assignment.left_hand_side))
self.connect_if_allowed(previous_node, saved_scope_node)
return (saved_variables, first_node)
|
Save the local scope before entering a function call by saving all the LHS's of assignments so far.
Args:
line_number(int): Of the def of the function call about to be entered into.
saved_function_call_index(int): Unique number for each call.
Returns:
saved_variables(list[SavedVariable])
first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.
|
juraj-google-style
|
def binomial_coefficient(n, k):
if not isinstance(k, int) or not isinstance(n, int):
raise TypeError("Expecting positive integers")
if k > n:
raise ValueError("k must be lower or equal than n")
if k < 0 or n < 0:
raise ValueError("Expecting positive integers")
return factorial(n)
|
Calculate the binomial coefficient indexed by n and k.
Args:
n (int): positive integer
k (int): positive integer
Returns:
The binomial coefficient indexed by n and k
Raises:
TypeError: If either n or k is not an integer
ValueError: If either n or k is negative, or if k is strictly greater than n
|
juraj-google-style
|
def _start_services_on_ads(ads):
for ad in ads:
start_logcat = not getattr(ad, KEY_SKIP_LOGCAT, DEFAULT_VALUE_SKIP_LOGCAT)
try:
if start_logcat:
ad.services.logcat.start()
except Exception:
is_required = getattr(ad, KEY_DEVICE_REQUIRED, DEFAULT_VALUE_DEVICE_REQUIRED)
if is_required:
ad.log.exception('Failed to start some services, abort!')
destroy(ads)
raise
else:
ad.log.exception('Skipping this optional device because some services failed to start.')
|
Starts long running services on multiple AndroidDevice objects.
If any one AndroidDevice object fails to start services, cleans up all
AndroidDevice objects and their services.
Args:
ads: A list of AndroidDevice objects whose services to start.
|
github-repos
|
def add_gene(self, gene):
logger.debug('Adding gene {0} to variant {1}'.format(gene, self['variant_id']))
self['genes'].append(gene)
|
Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary
|
codesearchnet
|
def _AssertProtoEquals(self, a, b, msg=None, relative_tolerance=None):
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg, relative_tolerance=relative_tolerance)
|
Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
relative_tolerance: float. The allowable difference between the two values
being compared is determined by multiplying the relative tolerance by
the maximum of the two values. If this is not provided, then all floats
are compared using string comparison.
|
github-repos
|
def __le__(self, other):
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items <= other.items
|
Check if `self` <= `other` via a comparison of the contents.
If `other` is not a :class:`FrameSet`, but is a set, frozenset, or
is iterable, it will be cast to a :class:`FrameSet`.
Args:
other (:class:`FrameSet`): Also accepts an object that can be cast to a :class:`FrameSet`
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
|
juraj-google-style
|
def info(msg: str, *args, **kwargs) -> None:
_DEFAULT_LOGGER.info(msg, *args, **kwargs)
|
Logs info message.
Args:
msg: Message with possible format string.
*args: Values for variables in the format string.
**kwargs: Keyword arguments for the logger.
|
github-repos
|
def join(input_files, output_file):
final_features = []
for file in input_files:
with open(file) as f:
feat_collection = geojson.load(f)
final_features += feat_collection['features']
feat_collection['features'] = final_features
with open(output_file, 'w') as f:
geojson.dump(feat_collection, f)
|
Join geojsons into one. The spatial reference system of the output file is the same
as the one of the last file in the list.
Args:
input_files (list): List of file name strings.
output_file (str): Output file name.
|
juraj-google-style
|
def freeze_parameter(self, name):
i = self.get_parameter_names(include_frozen=True).index(name)
self.unfrozen_mask[i] = False
|
Freeze a parameter by name
Args:
name: The name of the parameter
|
codesearchnet
|
def create_tc_entity(self, key, value):
data = None
if ((key is not None) and (value is not None)):
data = self.db.create(key.strip(), json.dumps(value))
else:
self.tcex.log.warning(u'The key or value field was None.')
return data
|
Create method of CRUD operation for TC entity data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
|
codesearchnet
|
def unpack_grad_tuple(gv, gpt):
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope("unpack"):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
|
Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
|
juraj-google-style
|
def render_latex(latex: str) -> PIL.Image:
tmpfilename = 'circ'
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, tmpfilename)
with open(tmppath + '.tex', 'w') as latex_file:
latex_file.write(latex)
subprocess.run(["pdflatex",
"-halt-on-error",
"-output-directory={}".format(tmpdirname),
"{}".format(tmpfilename+'.tex')],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
check=True)
subprocess.run(['pdftocairo',
'-singlefile',
'-png',
'-q',
tmppath + '.pdf',
tmppath])
img = PIL.Image.open(tmppath + '.png')
return img
|
Convert a single page LaTeX document into an image.
To display the returned image, `img.show()`
Required external dependencies: `pdflatex` (with `qcircuit` package),
and `poppler` (for `pdftocairo`).
Args:
A LaTeX document as a string.
Returns:
A PIL Image
Raises:
OSError: If an external dependency is not installed.
|
juraj-google-style
|
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
if ssbio.utils.is_ipynb():
import nglview as nv
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if not self.structure_file:
raise ValueError("Structure file not loaded")
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':
view = nv.NGLWidget()
view.add_component(self.structure_path)
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity)
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
return view
|
Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
|
juraj-google-style
|
def process_status_queue(self):
self.log.debug('Start processing status queue')
while True:
messages = self.status_queue.receive_messages(MaxNumberOfMessages=10)
if (not messages):
break
for message in messages:
data = json.loads(message.body)
job = SchedulerJob.get(data['id'])
try:
if (job and job.update_status(data['status'])):
db.session.commit()
except SchedulerError as ex:
if (hasattr(ex, 'message') and (ex.message == 'Attempting to update already completed job')):
pass
message.delete()
open_batches = db.SchedulerBatch.find((SchedulerBatch.status < SchedulerStatus.COMPLETED))
for batch in open_batches:
open_jobs = list(filter((lambda x: (x.status < SchedulerStatus.COMPLETED)), batch.jobs))
if (not open_jobs):
open_batches.remove(batch)
batch.update_status(SchedulerStatus.COMPLETED)
self.log.debug('Closed completed batch {}'.format(batch.batch_id))
else:
started_jobs = list(filter((lambda x: (x.status > SchedulerStatus.PENDING)), open_jobs))
if ((batch.status == SchedulerStatus.PENDING) and (len(started_jobs) > 0)):
batch.update_status(SchedulerStatus.STARTED)
self.log.debug('Started batch manually {}'.format(batch.batch_id))
for batch in open_batches:
if (batch.started < (datetime.now() - timedelta(hours=2))):
self.log.warning('Closing a stale scheduler batch: {}'.format(batch.batch_id))
for job in batch.jobs:
if (job.status < SchedulerStatus.COMPLETED):
job.update_status(SchedulerStatus.ABORTED)
batch.update_status(SchedulerStatus.ABORTED)
db.session.commit()
|
Process all messages in the `status_queue` and check for any batches that needs to change status
Returns:
`None`
|
codesearchnet
|
def __init__(self, fileobj):
self.fileobj = fileobj
self.mardata = mar.parse_stream(self.fileobj)
|
Initialize a new MarReader object.
Note:
Files should always be opened in binary mode.
Args:
fileobj (file object): A file-like object open in read mode where
the MAR data will be read from. This object must also be
seekable (i.e. support .seek() and .tell()).
|
juraj-google-style
|
def __init__(self, a_file=None):
self._macros = dict()
if a_file:
self.ParseInput(a_file)
|
Initializes the collection.
Args:
a_file: The file like stream to parse.
Raises:
PDDMError if there are any issues.
|
juraj-google-style
|
def update_compliance_all(self, information, timeout=(- 1)):
uri = (self.URI + '/compliance')
result = self._helper.update(information, uri, timeout=timeout)
return result
|
Returns SAS Logical Interconnects to a consistent state. The current SAS Logical Interconnect state is
compared to the associated SAS Logical Interconnect group.
Args:
information: Can be either the resource ID or URI.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: SAS Logical Interconnect.
|
codesearchnet
|
def add_word(self, word):
word = word.lower()
if not (word.isascii() and word.isalpha()):
raise ValueError("Invalid character in word '{}'".format(word))
word = word.encode(encoding="ascii")
result = cgaddag.gdg_add_word(self.gdg, word)
if result == 1:
raise ValueError("Invalid character in word '{}'".format(word))
elif result == 2:
raise MemoryError("Out of memory, GADDAG is in an undefined state")
|
Add a word to the GADDAG.
Args:
word: A word to be added to the GADDAG.
|
juraj-google-style
|
def sys_save_screenshot(name: Optional[str]=None) -> None:
lib.TCOD_sys_save_screenshot((_bytes(name) if (name is not None) else ffi.NULL))
|
Save a screenshot to a file.
By default this will automatically save screenshots in the working
directory.
The automatic names are formatted as screenshotNNN.png. For example:
screenshot000.png, screenshot001.png, etc. Whichever is available first.
Args:
file Optional[AnyStr]: File path to save screenshot.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.