code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def fetch(self, refund_id, data={}, **kwargs):
return super(Refund, self).fetch(refund_id, data, **kwargs)
|
Refund object for given paymnet Id
Args:
refund_id : Refund Id for which refund has to be retrieved
Returns:
Refund dict for given refund Id
|
juraj-google-style
|
def __init__(
self,
cls,
diff,
):
msg = "\n".join([
"",
"ctor: {}".format(cls),
"extras: {}".format(diff)
])
Exception.__init__(self, msg)
self.type = str(
type(self),
)
self.cls = str(cls)
self.diff = str(diff)
self.type = self.__class__.__name__
|
Note that type_assert can't be used because it would
create a circular dependency.
Args:
cls, type, The type that was attempted to unmarshal into
diff: dict, The extra arguments that were passed to @cls
|
juraj-google-style
|
def imread(img_or_path, flag='color'):
if isinstance(img_or_path, np.ndarray):
return img_or_path
elif is_str(img_or_path):
flag = (imread_flags[flag] if is_str(flag) else flag)
check_file_exist(img_or_path, 'img file does not exist: {}'.format(img_or_path))
return cv2.imread(img_or_path, flag)
else:
raise TypeError('"img" must be a numpy array or a filename')
|
Read an image.
Args:
img_or_path (ndarray or str): Either a numpy array or image path.
If it is a numpy array (loaded image), then it will be returned
as is.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
Returns:
ndarray: Loaded image array.
|
codesearchnet
|
def get(self, language: str=None, default: str=None) -> str:
language = language or settings.LANGUAGE_CODE
value = super().get(language, default)
return value if value is not None else default
|
Gets the underlying value in the specified or
primary language.
Arguments:
language:
The language to get the value in.
Returns:
The value in the current language, or
the primary language in case no language
was specified.
|
juraj-google-style
|
def to_dataframe(self):
data = []
for (target_index, target_row) in enumerate(self._cm):
for (predicted_index, count) in enumerate(target_row):
data.append((self._labels[target_index], self._labels[predicted_index], count))
return pd.DataFrame(data, columns=['target', 'predicted', 'count'])
|
Convert the confusion matrix to a dataframe.
Returns:
A DataFrame with "target", "predicted", "count" columns.
|
codesearchnet
|
def smart_device_selection(preferred_device_type=None):
cl_environments = CLEnvironmentFactory.all_devices(cl_device_type=preferred_device_type)
platform_names = [env.platform.name for env in cl_environments]
has_amd_pro_platform = any((('AMD Accelerated Parallel Processing' in name) for name in platform_names))
if has_amd_pro_platform:
return list(filter((lambda env: ('Clover' not in env.platform.name)), cl_environments))
if ((preferred_device_type is not None) and (not len(cl_environments))):
return CLEnvironmentFactory.all_devices()
return cl_environments
|
Get a list of device environments that is suitable for use in MOT.
Basically this gets the total list of devices using all_devices() and applies a filter on it.
This filter does the following:
1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover'
platform.
More things may be implemented in the future.
Args:
preferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'.
If no devices of this type can be found, we will use any other device available.
Returns:
list of CLEnvironment: List with the CL device environments.
|
codesearchnet
|
def _decrypt_asymmetric(self, decryption_algorithm, decryption_key, cipher_text, padding_method, hashing_algorithm=None):
if (decryption_algorithm == enums.CryptographicAlgorithm.RSA):
if (padding_method == enums.PaddingMethod.OAEP):
hash_algorithm = self._encryption_hash_algorithms.get(hashing_algorithm)
if (hash_algorithm is None):
raise exceptions.InvalidField("The hashing algorithm '{0}' is not supported for asymmetric decryption.".format(hashing_algorithm))
padding_method = asymmetric_padding.OAEP(mgf=asymmetric_padding.MGF1(algorithm=hash_algorithm()), algorithm=hash_algorithm(), label=None)
elif (padding_method == enums.PaddingMethod.PKCS1v15):
padding_method = asymmetric_padding.PKCS1v15()
else:
raise exceptions.InvalidField("The padding method '{0}' is not supported for asymmetric decryption.".format(padding_method))
backend = default_backend()
try:
private_key = backend.load_der_private_key(decryption_key, None)
except Exception:
try:
private_key = backend.load_pem_private_key(decryption_key, None)
except Exception:
raise exceptions.CryptographicFailure('The private key bytes could not be loaded.')
plain_text = private_key.decrypt(cipher_text, padding_method)
return plain_text
else:
raise exceptions.InvalidField("The cryptographic algorithm '{0}' is not supported for asymmetric decryption.".format(decryption_algorithm))
|
Encrypt data using asymmetric decryption.
Args:
decryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the asymmetric decryption algorithm to use for
decryption. Required.
decryption_key (bytes): The bytes of the private key to use for
decryption. Required.
cipher_text (bytes): The bytes to be decrypted. Required.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use with the asymmetric decryption
algorithm. Required.
hashing_algorithm (HashingAlgorithm): An enumeration specifying
the hashing algorithm to use with the decryption padding
method. Required, if the padding method is OAEP. Optional
otherwise, defaults to None.
Returns:
dict: A dictionary containing the decrypted data, with at least
the following key/value field:
* plain_text - the bytes of the decrypted data
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
|
codesearchnet
|
def save_page(self, path=None):
path = _prepare_path(path, 'html')
with open(path, 'wb') as f:
f.write(encode_string(self.body))
return path
|
Save a snapshot of the page.
If invoked without arguments, it will save a file to :data:`capybara.save_path` and the
file will be given a randomly generated filename. If invoked with a relative path, the path
will be relative to :data:`capybara.save_path`.
Args:
path (str, optional): The path to where it should be saved.
Returns:
str: The path to which the file was saved.
|
codesearchnet
|
def __init__(self, conf, conn=None):
super(HttpFilesSource, self).__init__(conf)
self._SetDefaults(conf)
if not conn:
conn = pycurl.Curl()
conn.setopt(pycurl.NOPROGRESS, 1)
conn.setopt(pycurl.NOSIGNAL, 1)
conn.setopt(pycurl.TIMEOUT, 60)
conn.setopt(pycurl.USERAGENT, 'nsscache')
if self.conf['http_proxy']:
conn.setopt(pycurl.PROXY, self.conf['http_proxy'])
self.conn = conn
|
Initialise the HTTP Data Source.
Args:
conf: config.Config instance
conn: pycurl Curl object
|
github-repos
|
def _send_request(url_id, data=None, json=None, req_type=None):
url = (settings.SEEDER_INFO_URL % url_id)
if (not req_type):
req_type = requests.get
resp = req_type(url, data=data, json=json, timeout=settings.SEEDER_TIMEOUT, headers={'User-Agent': settings.USER_AGENT, 'Authorization': settings.SEEDER_TOKEN})
resp.raise_for_status()
data = resp.json()
return data
|
Send request to Seeder's API.
Args:
url_id (str): ID used as identification in Seeder.
data (obj, default None): Optional parameter for data.
json (obj, default None): Optional parameter for JSON body.
req_type (fn, default None): Request method used to send/download the
data. If none, `requests.get` is used.
Returns:
dict: Data from Seeder.
|
codesearchnet
|
def update_vm(access_token, subscription_id, resource_group, vm_name, body):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '?api-version=', COMP_API])
return do_put(endpoint, body, access_token)
|
Update a virtual machine with a new JSON body. E.g. do a GET, change something, call this.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
body (dict): JSON body of the VM.
Returns:
HTTP response.
|
codesearchnet
|
def CheckTaskReadyForMerge(self, task):
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
if not self._processed_task_storage_path:
raise IOError('Missing processed task storage path.')
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
try:
stat_info = os.stat(processed_storage_file_path)
except (IOError, OSError):
return False
task.storage_file_size = stat_info.st_size
return True
|
Checks if a task is ready for merging with this session storage.
If the task is ready to be merged, this method also sets the task's
storage file size.
Args:
task (Task): task.
Returns:
bool: True if the task is ready to be merged.
Raises:
IOError: if the storage type is not supported or
OSError: if the storage type is not supported or
if the temporary path for the task storage does not exist.
|
juraj-google-style
|
def update_connection_endpoint(self, connection_id, endpoint):
if connection_id in self._connections:
connection_info = self._connections[connection_id]
self._connections[connection_id] = \
ConnectionInfo(connection_info.connection_type,
connection_info.connection,
endpoint,
connection_info.status,
connection_info.public_key)
else:
LOGGER.debug("Could not update the endpoint %s for "
"connection_id %s. The connection does not "
"exist.",
endpoint,
connection_id)
|
Adds the endpoint to the connection definition. When the
connection is created by the send/receive thread, we do not
yet have the endpoint of the remote node. That is not known
until we process the incoming ConnectRequest.
Args:
connection_id (str): The identifier for the connection.
endpoint (str): A zmq-style uri which identifies a publically
reachable endpoint.
|
juraj-google-style
|
def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({'usergroup': usergroup})
return self.api_call('usergroups.disable', json=kwargs)
|
Disable an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to disable.
e.g. 'S0604QSJC'
|
codesearchnet
|
def msgBox(self, promptType, _timeout=(- 1), **options):
if (promptType == 'confirm'):
return self._sendConfirmPrompt(_timeout, options)
else:
raise ValueError('Unknown prompt type: {}'.format(promptType))
|
Send a user prompt request to the GUI
Arguments:
promptType (string):
The prompt type to send to the GUI. Currently
the only type supported is 'confirm'.
_timeout (int):
The optional amount of time for which the prompt
should be displayed to the user before a timeout occurs.
Defaults to -1 which indicates there is no timeout limit.
options (dict):
The keyword arguments that should be passed to the requested
prompt type. Check prompt specific sections below for information on what
arguments are expected to be present.
Raises:
ValueError:
If the prompt type received is an unexpected value
**Confirm Prompt**
Display a message to the user and prompt them for a confirm/deny
response to the message.
Arguments:
msg (string):
The message to display to the user
Returns:
True if the user picks 'Confirm', False if the user picks 'Deny'
Raises:
KeyError:
If the options passed to the prompt handler doesn't contain a
`msg` attribute.
APITimeoutError:
If the timeout value is reached without receiving a response.
|
codesearchnet
|
def Deserialize(self, reader):
self.Script = reader.ReadVarBytes()
self.ParameterList = reader.ReadVarBytes()
self.ReturnType = reader.ReadByte()
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
|
juraj-google-style
|
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):
try:
tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)
except:
from .tokenization_bert_fast import BertTokenizerFast
tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)
return cls.from_tokenizer(tokenizer, **kwargs)
|
Instantiate a `TFBertTokenizer` from a pre-trained tokenizer.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The name or path to the pre-trained tokenizer.
Examples:
```python
from transformers import TFBertTokenizer
tf_tokenizer = TFBertTokenizer.from_pretrained("google-bert/bert-base-uncased")
```
|
github-repos
|
def _resolve_attribute_match(self, match):
if (match.group(1) == 'cluster'):
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
|
codesearchnet
|
def closest_eere(latitude, longitude):
with open(env.SRC_PATH + '/eere_meta.csv') as eere_meta:
stations = csv.DictReader(eere_meta)
d = 9999
station_code = ''
station_name = ''
for station in stations:
new_dist = great_circle((latitude, longitude),
(float(station['latitude']),
float(station['longitude']))).miles
if new_dist <= d:
d = new_dist
station_code = station['station_code']
station_name = station['weather_station']
return station_code, station_name
raise KeyError('station not found')
|
Find closest station from the new(er) list.
Warning: There may be some errors with smaller non US stations.
Args:
latitude (float)
longitude (float)
Returns:
tuple (station_code (str), station_name (str))
|
juraj-google-style
|
def set_pipeline_definition(self):
if (not self.pipeline_id):
self.get_pipeline_id()
json_def = self.datapipeline_data['json_definition']
try:
pipelineobjects = translator.definition_to_api_objects(json_def)
parameterobjects = translator.definition_to_api_parameters(json_def)
parametervalues = translator.definition_to_parameter_values(json_def)
except translator.PipelineDefinitionError as error:
LOG.warning(error)
raise DataPipelineDefinitionError
response = self.client.put_pipeline_definition(pipelineId=self.pipeline_id, pipelineObjects=pipelineobjects, parameterObjects=parameterobjects, parameterValues=parametervalues)
LOG.debug(response)
LOG.info('Successfully applied pipeline definition')
return response
|
Translates the json definition and puts it on created pipeline
Returns:
dict: the response of the Boto3 command
|
codesearchnet
|
def login_with_password_no_sync(self, username, password):
warn('login_with_password_no_sync is deprecated. Use login with sync=False.', DeprecationWarning)
return self.login(username, password, sync=False)
|
Deprecated. Use ``login`` with ``sync=False``.
Login to the homeserver.
Args:
username (str): Account username
password (str): Account password
Returns:
str: Access token
Raises:
MatrixRequestError
|
codesearchnet
|
def get_attribute(self, obj, attr):
if (attr == '*'):
return obj
if isinstance(obj, Mapping):
return obj.get(attr, None)
return getattr(obj, attr, None)
|
Get attribute of given object instance.
Reason for existence of this method is the fact that 'attribute' can
be also object's key from if is a dict or any other kind of mapping.
Note: it will return None if attribute key does not exist
Args:
obj (object): internal object to retrieve data from
Returns:
internal object's key value or attribute
|
codesearchnet
|
async def remove(self, index=""):
if not self.state == 'ready':
logger.debug("Trying to remove from wrong state '{}'".format(self.state))
return
if index == "":
self.statuslog.error("Must provide index to remove")
return
elif index == "all":
self.queue = []
self.update_queue()
self.statuslog.info("Removed all songs")
return
indexes = index.split("-")
self.logger.debug("Removing {}".format(indexes))
try:
if len(indexes) == 0:
self.statuslog.error("Remove must specify an index or range")
return
elif len(indexes) == 1:
num_lower = int(indexes[0]) - 1
num_upper = num_lower + 1
elif len(indexes) == 2:
num_lower = int(indexes[0]) - 1
num_upper = int(indexes[1])
else:
self.statuslog.error("Cannot have more than 2 indexes for remove range")
return
except TypeError:
self.statuslog.error("Remove index must be a number")
return
except ValueError:
self.statuslog.error("Remove index must be a number")
return
if num_lower < 0 or num_lower >= len(self.queue) or num_upper > len(self.queue):
if len(self.queue) == 0:
self.statuslog.warning("No songs in queue")
elif len(self.queue) == 1:
self.statuslog.error("Remove index must be 1 (only 1 song in queue)")
else:
self.statuslog.error("Remove index must be between 1 and {}".format(len(self.queue)))
return
if num_upper <= num_lower:
self.statuslog.error("Second index in range must be greater than first")
return
lower_songname = self.queue[num_lower][1]
for num in range(0, num_upper - num_lower):
self.logger.debug("Removed {}".format(self.queue[num_lower][1]))
self.queue.pop(num_lower)
if len(indexes) == 1:
self.statuslog.info("Removed {}".format(lower_songname))
else:
self.statuslog.info("Removed songs {}-{}".format(num_lower + 1, num_upper))
self.update_queue()
|
The remove command
Args:
index (str): The index to remove, can be either a number, or a range in the for '##-##'
|
juraj-google-style
|
def display_hierarchy(root_ad_unit, all_ad_units):
parent_id_to_children = collections.defaultdict(list)
for ad_unit in all_ad_units:
if ('parentId' in ad_unit):
parent_id_to_children[ad_unit['parentId']].append(ad_unit)
parent_id_to_children = dict(parent_id_to_children)
display_hierarchy_helper(root_ad_unit, parent_id_to_children, 0)
|
Display the ad units as a tree.
Args:
root_ad_unit: The root ad unit to begin from.
all_ad_units: A list containing all ad units.
|
codesearchnet
|
def from_paths(cls, path, bs=64, tfms=(None, None), trn_name='train', val_name='valid', test_name=None, test_with_labels=False, num_workers=8):
assert (not ((tfms[0] is None) or (tfms[1] is None))), 'please provide transformations for your train and validation sets'
(trn, val) = [folder_source(path, o) for o in (trn_name, val_name)]
if test_name:
test = (folder_source(path, test_name) if test_with_labels else read_dir(path, test_name))
else:
test = None
datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test)
return cls(path, datasets, bs, num_workers, classes=trn[2])
|
Read in images and their labels given as sub-folder names
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
trn_name: a name of the folder that contains training images.
val_name: a name of the folder that contains validation images.
test_name: a name of the folder that contains test images.
num_workers: number of workers
Returns:
ImageClassifierData
|
codesearchnet
|
def encode_schedule(schedule):
(interpolation, steps, pmfs) = schedule
return ((interpolation + ' ') + ' '.join((((('@' + str(s)) + ' ') + ' '.join(map(str, p))) for (s, p) in zip(steps, pmfs))))
|
Encodes a schedule tuple into a string.
Args:
schedule: A tuple containing (interpolation, steps, pmfs), where
interpolation is a string specifying the interpolation strategy, steps
is an int array_like of shape [N] specifying the global steps, and pmfs is
an array_like of shape [N, M] where pmf[i] is the sampling distribution
at global step steps[i]. N is the number of schedule requirements to
interpolate and M is the size of the probability space.
Returns:
The string encoding of the schedule tuple.
|
codesearchnet
|
def as_json_range(self, name):
return {'Name': name, 'Values': [json.dumps(v) for v in self.values]}
|
Represent the parameter range as a dictionary suitable for a request to
create an Amazon SageMaker hyperparameter tuning job using one of the deep learning frameworks.
The deep learning framework images require that hyperparameters be serialized as JSON.
Args:
name (str): The name of the hyperparameter.
Returns:
dict[str, list[str]]: A dictionary that contains the name and values of the hyperparameter,
where the values are serialized as JSON.
|
codesearchnet
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
vshadow_volume = pyvshadow.volume()
vshadow_volume.open_file_object(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._vshadow_volume = vshadow_volume
|
Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
juraj-google-style
|
def less(x, y):
return math_ops.less(x, y)
|
Element-wise truth value of (x < y).
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
|
github-repos
|
def apply(self, func, num_splits=None, other_axis_partition=None, maintain_partitioning=True, **kwargs):
import dask
if (num_splits is None):
num_splits = len(self.list_of_blocks)
if (other_axis_partition is not None):
return [DaskFramePartition(dask.delayed(obj)) for obj in deploy_func_between_two_axis_partitions(self.axis, func, num_splits, len(self.list_of_blocks), kwargs, *dask.compute(*tuple((self.list_of_blocks + other_axis_partition.list_of_blocks))))]
args = [self.axis, func, num_splits, kwargs, maintain_partitioning]
args.extend(dask.compute(*self.list_of_blocks))
return [DaskFramePartition(dask.delayed(obj)) for obj in deploy_axis_func(*args)]
|
Applies func to the object.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `DaskFrameAxisPartition` object to apply to
func with this one.
Returns:
A list of `DaskFramePartition` objects.
|
codesearchnet
|
def make_adapt_function(self):
if self._adapt_function is not None:
return self._adapt_function
def adapt_step(iterator):
data = next(iterator)
self._adapt_maybe_build(data)
self.update_state(data)
if self._steps_per_execution.numpy().item() == 1:
adapt_fn = adapt_step
else:
def adapt_fn(iterator):
for _ in math_ops.range(self._steps_per_execution):
adapt_step(iterator)
if not self._run_eagerly:
adapt_fn = def_function.function(adapt_fn)
self._adapt_function = adapt_fn
return self._adapt_function
|
Creates a function to execute one step of `adapt`.
This method can be overridden to support custom adapt logic.
This method is called by `PreprocessingLayer.adapt`.
Typically, this method directly controls `tf.function` settings,
and delegates the actual state update logic to
`PreprocessingLayer.update_state`.
This function is cached the first time `PreprocessingLayer.adapt`
is called. The cache is cleared whenever `PreprocessingLayer.compile`
is called.
Returns:
Function. The function created by this method should accept a
`tf.data.Iterator`, retrieve a batch, and update the state of the
layer.
|
github-repos
|
def energy_upperbound(self, spins):
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
trees = self._trees
if (not trees):
assert ((not subtheta.linear) and (not subtheta.quadratic))
return subtheta.offset
energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)
return energy
|
A formula for an upper bound on the energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
Returns:
Formula that upper bounds the energy with spins fixed.
|
codesearchnet
|
def sync_proxy(self, mri, block):
subscribe = Subscribe(path=[mri], delta=True)
done_queue = Queue()
def handle_response(response):
if not isinstance(response, Delta):
self.log.debug("Proxy got response %r", response)
done_queue.put(None)
else:
cothread.Callback(
self._handle_response, response, block, done_queue)
subscribe.set_callback(handle_response)
IOLoopHelper.call(self._send_request, subscribe)
done_queue.get(timeout=DEFAULT_TIMEOUT)
|
Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync
|
juraj-google-style
|
def restore(self, fade=False):
try:
if self.is_coordinator:
self._restore_coordinator()
finally:
self._restore_volume(fade)
if self.is_coordinator:
if self.transport_state == 'PLAYING':
self.device.play()
elif self.transport_state == 'STOPPED':
self.device.stop()
|
Restore the state of a device to that which was previously saved.
For coordinator devices restore everything. For slave devices
only restore volume etc., not transport info (transport info
comes from the slave's coordinator).
Args:
fade (bool): Whether volume should be faded up on restore.
|
juraj-google-style
|
def comments_1(self, value=None):
if (value is not None):
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str for field `comments_1`'.format(value))
if (',' in value):
raise ValueError('value should not contain a comma for field `comments_1`')
self._comments_1 = value
|
Corresponds to IDD Field `comments_1`
Args:
value (str): value for IDD Field `comments_1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def get_aggregate(self):
return dict([(aggregate.find('query').text, [ET.tostring(data).lstrip('<data xmlns:cps="www.clusterpoint.com" xmlns:cpse="www.clusterpoint.com">').strip().rstrip('</data>') for data in aggregate.findall('data')]) for aggregate in self._content.findall('aggregate')])
|
Get aggregate data.
Returns:
A dict in with queries as keys and results as values.
|
codesearchnet
|
def Add(self, rdf_value, timestamp=None, suffix=None, mutation_pool=None):
return self.StaticAdd(self.collection_id, rdf_value, timestamp=timestamp, suffix=suffix, mutation_pool=mutation_pool)
|
Adds an rdf value to the collection.
Adds an rdf value to the collection. Does not require that the collection
be locked.
Args:
rdf_value: The rdf value to add to the collection.
timestamp: The timestamp (in microseconds) to store the rdf value at.
Defaults to the current time.
suffix: A 'fractional timestamp' suffix to reduce the chance of
collisions. Defaults to a random number.
mutation_pool: A MutationPool object to write to.
Returns:
The pair (timestamp, suffix) which identifies the value within the
collection.
Raises:
ValueError: rdf_value has unexpected type.
|
codesearchnet
|
def tf_initialize(self, x_init, base_value, target_value, estimated_improvement):
self.base_value = base_value
if estimated_improvement is None:
estimated_improvement = tf.abs(x=base_value)
first_step = super(LineSearch, self).tf_initialize(x_init)
improvement = tf.divide(
x=(target_value - self.base_value),
y=tf.maximum(x=estimated_improvement, y=util.epsilon)
)
last_improvement = improvement - 1.0
if self.mode == 'linear':
deltas = [-t * self.parameter for t in x_init]
self.estimated_incr = -estimated_improvement * self.parameter
elif self.mode == 'exponential':
deltas = [-t * self.parameter for t in x_init]
return first_step + (deltas, improvement, last_improvement, estimated_improvement)
|
Initialization step preparing the arguments for the first iteration of the loop body.
Args:
x_init: Initial solution guess $x_0$.
base_value: Value $f(x')$ at $x = x'$.
target_value: Value $f(x_0)$ at $x = x_0$.
estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None.
Returns:
Initial arguments for tf_step.
|
juraj-google-style
|
def _extract_id(self) -> str:
match = re.match(self._VALID_URL, self.url)
if match:
return match.group('video_id')
else:
raise VideoIdNotMatchedError
|
Get video_id needed to obtain the real_url of the video.
Raises:
VideoIdNotMatchedError: If video_id is not matched with regular expression.
|
codesearchnet
|
def __add__(self, other):
if not all(np.equal(self.energies, other.energies)):
raise ValueError("Energies of both COHP are not compatible.")
populations = {spin: self.populations[spin] + other.populations[spin]
for spin in self.cohp}
if self.icohp is not None and other.icohp is not None:
int_pop = {spin: self.icohp[spin] + other.icohp[spin]
for spin in self.icohp}
else:
if self.icohp is not None or other.icohp is not None:
warnings.warn("One of the COHP objects does not contain "
"ICOHPs. Setting ICOHP to None.")
int_pop = None
return Cohp(self.efermi, self.energies, populations, icohp=int_pop)
|
Adds two COHP together. Checks that energy scales are the same.
Otherwise, it raises a ValueError. It also adds ICOHP if present.
If ICOHP is only present in one object, it displays a warning and
will not add ICOHP.
Args:
other: Another COHP object.
Returns:
Sum of the two COHPs as a COHP object.
|
juraj-google-style
|
def create_clusters(provider, context, **kwargs):
conn = get_session(provider.region).client('ecs')
try:
clusters = kwargs['clusters']
except KeyError:
logger.error('setup_clusters hook missing "clusters" argument')
return False
if isinstance(clusters, basestring):
clusters = [clusters]
cluster_info = {}
for cluster in clusters:
logger.debug('Creating ECS cluster: %s', cluster)
r = conn.create_cluster(clusterName=cluster)
cluster_info[r['cluster']['clusterName']] = r
return {'clusters': cluster_info}
|
Creates ECS clusters.
Expects a "clusters" argument, which should contain a list of cluster
names to create.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
|
codesearchnet
|
def __to_plain_containers(self, container: Union[(CommentedSeq, CommentedMap)]) -> Union[(OrderedDict, list)]:
if isinstance(container, CommentedMap):
new_container = OrderedDict()
for (key, value_obj) in container.items():
if (isinstance(value_obj, CommentedMap) or isinstance(value_obj, CommentedSeq)):
new_container[key] = self.__to_plain_containers(value_obj)
else:
new_container[key] = value_obj
elif isinstance(container, CommentedSeq):
new_container = list()
for value_obj in container:
if (isinstance(value_obj, CommentedMap) or isinstance(value_obj, CommentedSeq)):
new_container.append(self.__to_plain_containers(value_obj))
else:
new_container.append(value_obj)
return new_container
|
Converts any sequence or mapping to list or OrderedDict
Stops at anything that isn't a sequence or a mapping.
One day, we'll extract the comments and formatting and store \
them out-of-band.
Args:
mapping: The mapping of constructed subobjects to edit
|
codesearchnet
|
def designPrimers(seq_args, global_args=None, misprime_lib=None, mishyb_lib=None, debug=False):
if global_args:
primerdesign.setGlobals(global_args, misprime_lib, mishyb_lib)
primerdesign.setSeqArgs(seq_args)
return primerdesign.runDesign(debug)
|
Run the Primer3 design process.
If the global args have been previously set (either by a pervious
`designPrimers` call or by a `setGlobals` call), `designPrimers` may be
called with seqArgs alone (as a means of optimization).
Args:
seq_args (dict) : Primer3 sequence/design args as per
Primer3 docs
global_args (dict, optional) : Primer3 global args as per Primer3 docs
misprime_lib (dict, optional) : `Sequence name: sequence` dictionary
for mispriming checks.
mishyb_lib (dict, optional) : `Sequence name: sequence` dictionary
for mishybridization checks.
Returns:
A dictionary of Primer3 results (should be identical to the expected
BoulderIO output from primer3_main)
|
codesearchnet
|
def from_tensor(cls, tensor):
if isinstance(tensor, core.Value):
return EagerWeakTensor(tensor)
if isinstance(tensor, core.Symbol):
return GraphWeakTensor(tensor)
raise errors.InvalidArgumentError(None, None, f'WeakTensor can only be constructed from tf.Tensor or tf.WeakTensor, but {type(tensor)} was given.')
|
Converts a 'tf.Tensor' into a 'WeakTensor'.
This should be the standard way of creating a WeakTensor instead
of directly calling the WeakTensor constructor.
Args:
tensor: The `tf.Tensor` that should be converted into a 'WeakTensor'.
Returns:
A `EagerWeakTensor` or 'GraphWeakTensor' that holds the `tensor`.
|
github-repos
|
def to_hashable_table_ref(table_ref_elem_kv: Tuple[Union[str, TableReference], V]) -> Tuple[str, V]:
table_ref = table_ref_elem_kv[0]
hashable_table_ref = get_hashable_destination(table_ref)
return (hashable_table_ref, table_ref_elem_kv[1])
|
Turns the key of the input tuple to its string representation. The key
should be either a string or a TableReference.
Args:
table_ref_elem_kv: A tuple of table reference and element.
Returns:
A tuple of string representation of input table and input element.
|
github-repos
|
def push(self, x):
self._quantile_tracker.push(x)
|
Pushes a new value and updates the internal quantile tracker.
Args:
x: The new value to be pushed.
|
github-repos
|
def _lookup_model(cls, kind, default_model=None):
modelclass = cls._kind_map.get(kind, default_model)
if (modelclass is None):
raise KindError(("No model class found for kind '%s'. Did you forget to import it?" % kind))
return modelclass
|
Get the model class for the kind.
Args:
kind: A string representing the name of the kind to lookup.
default_model: The model class to use if the kind can't be found.
Returns:
The model class for the requested kind.
Raises:
KindError: The kind was not found and no default_model was provided.
|
codesearchnet
|
def _poll_once(self, timeout_ms, max_records):
self._coordinator.poll()
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
records, partial = self._fetcher.fetched_records(max_records)
if records:
if not partial:
self._fetcher.send_fetches()
return records
self._fetcher.send_fetches()
timeout_ms = min(timeout_ms, self._coordinator.time_to_next_poll() * 1000)
self._client.poll(timeout_ms=timeout_ms)
if self._coordinator.need_rejoin():
return {}
records, _ = self._fetcher.fetched_records(max_records)
return records
|
Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block.
Returns:
dict: Map of topic to list of records (may be empty).
|
juraj-google-style
|
def settings(package, reload_=False):
global packages
if ((package not in packages) or reload_):
from os import path
result = CaseConfigParser()
if (package != 'acorn'):
confpath = _package_path(package)
_read_single(result, confpath)
_read_single(result, _package_path('acorn'))
packages[package] = result
return packages[package]
|
Returns the config settings for the specified package.
Args:
package (str): name of the python package to get settings for.
|
codesearchnet
|
def refresh(self, request):
try:
self._retrieve_info(request)
self.token, self.expiry = _metadata.get_service_account_token(
request,
service_account=self._service_account_email)
except exceptions.TransportError as caught_exc:
new_exc = exceptions.RefreshError(caught_exc)
six.raise_from(new_exc, caught_exc)
|
Refresh the access token and scopes.
Args:
request (google.auth.transport.Request): The object used to make
HTTP requests.
Raises:
google.auth.exceptions.RefreshError: If the Compute Engine metadata
service can't be reached if if the instance has not
credentials.
|
juraj-google-style
|
def unpack(self, buff=None, offset=0):
instruction_type = UBInt16(enum_ref=InstructionType)
instruction_type.unpack(buff, offset)
self.__class__ = InstructionType(instruction_type.value).find_class()
length = UBInt16()
length.unpack(buff, offset=offset+2)
super().unpack(buff[:offset+length.value], offset)
|
Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
|
juraj-google-style
|
def _MakeRequestNoRetry(http, http_request, redirections=5, check_response_func=CheckResponse):
connection_type = None
if getattr(http, 'connections', None):
url_scheme = parse.urlsplit(http_request.url).scheme
if (url_scheme and (url_scheme in http.connections)):
connection_type = http.connections[url_scheme]
new_debuglevel = (4 if (httplib2.debuglevel == 4) else 0)
with _Httplib2Debuglevel(http_request, new_debuglevel, http=http):
(info, content) = http.request(str(http_request.url), method=str(http_request.http_method), body=http_request.body, headers=http_request.headers, redirections=redirections, connection_type=connection_type)
if (info is None):
raise exceptions.RequestError()
response = Response(info, content, http_request.url)
check_response_func(response)
return response
|
Send http_request via the given http.
This wrapper exists to handle translation between the plain httplib2
request/response types and the Request and Response types above.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
redirections: (int, default 5) Number of redirects to follow.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Returns:
A Response object.
Raises:
RequestError if no response could be parsed.
|
codesearchnet
|
def controlled_by(self, *control_qubits: Qid) -> 'Operation':
from cirq.ops import ControlledOperation
if control_qubits is None or len(control_qubits) is 0:
raise ValueError(
"Can't get controlled operation without control qubit. Op: {}"
.format(repr(self)))
else:
return ControlledOperation(control_qubits, self)
|
Returns a controlled version of this operation.
Args:
control_qubits: Qubits to control the operation by. Required.
|
juraj-google-style
|
def convert_reshape(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting reshape ...')
if names == 'short':
tf_name = 'RESH' + random_string(4)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if len(inputs) > 1:
if layers[inputs[1]][0] == -1:
print('Cannot deduct batch size! It will be omitted, but result may be wrong.')
reshape = keras.layers.Reshape(layers[inputs[1] + '_np'], name=tf_name)
layers[scope_name] = reshape(layers[inputs[0]])
else:
if inputs[0] in layers:
reshape = keras.layers.Reshape(params['shape'][1:], name=tf_name)
layers[scope_name] = reshape(layers[inputs[0]])
else:
print('Skip weight matrix transpose, but result may be wrong.')
|
Convert reshape layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def _generate_assignments(splittable_dimensions, mesh_dimension_to_size):
assignments = []
for assignment_size in six.moves.xrange((1 + min(len(splittable_dimensions), len(mesh_dimension_to_size)))):
for s_dims_chosen in itertools.combinations(splittable_dimensions, assignment_size):
for m_dims_chosen in itertools.permutations(mesh_dimension_to_size, assignment_size):
assignments.append(dict(zip(s_dims_chosen, m_dims_chosen)))
return assignments
|
Generates all ways to map splittable dimensions to mesh dimensions.
Args:
splittable_dimensions: a frozenset of the names of splittable dimensions.
mesh_dimension_to_size: a dictionary from mesh dimension name to size.
Returns:
A list of the valid assignments. Each assignment is a dict keyed by every
splittable dimension, whose value is either a mesh dimension or None.
|
codesearchnet
|
def _print_args(arguments, argument_type='Argument', indent=0):
indent_str = ' ' * indent
def _maybe_add_quotes(value):
is_quotes = "'" * isinstance(value, str)
return is_quotes + str(value) + is_quotes
def in_print(s, end='\n'):
print(indent_str + s, end=end)
for index, element in enumerate(arguments, 1):
if indent == 4:
in_print('%s
if isinstance(element, str):
in_print(' %s' % element)
elif isinstance(element, tensor_spec.TensorSpec):
print((indent + 1) * ' ' + '%s: %s' % (element.name, repr(element)))
elif isinstance(element, collections_abc.Iterable) and (not isinstance(element, dict)):
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: [', end='')
for value in element:
print('%s' % _maybe_add_quotes(value), end=', ')
print('\x08\x08]')
elif isinstance(element, dict):
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: {', end='')
for key, value in element.items():
print("'%s': %s" % (str(key), _maybe_add_quotes(value)), end=', ')
print('\x08\x08}')
else:
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: %s' % str(element))
|
Formats and prints the argument of the concrete functions defined in the model.
Args:
arguments: Arguments to format print.
argument_type: Type of arguments.
indent: How far (in increments of 2 spaces) to indent each line of
output.
|
github-repos
|
def ParseCodeToTree(code):
if not code.endswith(os.linesep):
code += os.linesep
try:
parser_driver = driver.Driver(_PYTHON_GRAMMAR, convert=pytree.convert)
tree = parser_driver.parse_string(code, debug=False)
except parse.ParseError:
ast.parse(code)
raise
return _WrapEndMarker(tree)
|
Parse the given code to a lib2to3 pytree.
Arguments:
code: a string with the code to parse.
Raises:
SyntaxError if the code is invalid syntax.
parse.ParseError if some other parsing failure.
Returns:
The root node of the parsed tree.
|
github-repos
|
def _build_all_reduce_ring(core_locations: List[_CoreLocation], rotate: bool=False) -> List[int]:
permutation = list(range(len(core_locations)))
if not permutation:
return permutation
logging.vlog(2, 'Core locations in: %s', core_locations)
first_column = min([l.x for l in core_locations])
first_row = min([l.y for l in core_locations])
same_z = len(set([l.z for l in core_locations])) == 1
logging.vlog(2, 'first_column: %d', first_column)
logging.vlog(2, 'first_row: %d', first_row)
logging.vlog(2, 'same_z: %s', same_z)
def _cmp_2d(ia: int, ib: int) -> int:
if not rotate:
a = core_locations[ia]
b = core_locations[ib]
a_first = a.x == first_column and a.y != first_row
b_first = b.x == first_column and b.y != first_row
if a_first != b_first:
return -1 if b_first else 1
if a.y != b.y:
return b.y - a.y if a_first else a.y - b.y
if a.x != b.x:
return a.x - b.x if a.y % 2 == 0 else b.x - a.x
return a.core - b.core
else:
a = core_locations[ia]
b = core_locations[ib]
a_first = a.y == first_row and a.x != first_column
b_first = b.y == first_row and b.x != first_column
if a_first != b_first:
return -1 if b_first else 1
if a.x != b.x:
return b.x - a.x if a_first else a.x - b.x
if a.y != b.y:
return a.y - b.y if a.x % 2 == 0 else b.y - a.y
return a.core - b.core
def _cmp_3d(ia: int, ib: int) -> int:
a = core_locations[ia]
b = core_locations[ib]
a_corner = a.x == first_column and a.y == first_row
b_corner = b.x == first_column and b.y == first_row
if a_corner and b_corner:
return b.z - a.z if a.z != b.z else a.core - b.core
if a_corner != b_corner:
return -1 if b_corner else 1
if a.z == b.z:
return _cmp_2d(ia, ib) if a.z % 2 == 0 else -_cmp_2d(ia, ib)
return a.z - b.z
if same_z:
permutation.sort(key=functools.cmp_to_key(_cmp_2d))
else:
permutation.sort(key=functools.cmp_to_key(_cmp_3d))
logging.vlog(2, 'Permutation out: %s', permutation)
return permutation
|
Reorders a list of TPU cores to optimize for AllReduce performance.
This is ported from the C++ tensorflow::BuildAllReduceRing function,
mixed with some logic from TF TPU's device_assignment._ring_3d.
Args:
core_locations: A list of core locations expressed as [x, y, z, core].
rotate: If true, scan the cores in a column-major order. False by default.
Returns:
A permutation of the input list such that neighbors in the sequence are
nearby in the TPU topology.
|
github-repos
|
def from_dict(cls, cls_dict, fallback_xsi_type=None):
if not cls_dict:
return None
if isinstance(cls_dict, six.string_types):
if not getattr(cls, "_convert_strings", False):
return cls_dict
try:
typekey = cls.dictkey(cls_dict)
except TypeError:
typekey = fallback_xsi_type
klass = cls.entity_class(typekey)
return klass.from_dict(cls_dict)
|
Parse the dictionary and return an Entity instance.
This will attempt to extract type information from the input
dictionary and pass it to entity_class to resolve the correct class
for the type.
Args:
cls_dict: A dictionary representation of an Entity object.
fallback_xsi_type: An xsi_type to use for string input, which
doesn't have properties
Returns:
An Entity instance.
|
juraj-google-style
|
def generate_link(flag, np_fun_name):
if flag == 'dev':
template = 'https:
elif flag == 'stable':
template = 'https:
elif re.match('\\d+(\\.\\d+(\\.\\d+)?)?$', flag):
template = f'https:
else:
return None
return template % np_fun_name
|
Generates link from numpy function name.
Args:
flag: the flag to control link form. See `set_np_doc_form`.
np_fun_name: the numpy function name.
Returns:
A string.
|
github-repos
|
def SetIamPolicy(self, request, global_params=None):
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(config, request, global_params=global_params)
|
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (BigqueryTablesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
|
github-repos
|
def prepare_http_request(self, method_type, params, **kwargs):
prepared_request = self.session.prepare_request(
requests.Request(method=method_type, **params)
)
return prepared_request
|
Prepares the HTTP REQUEST and returns it.
Args:
method_type: The HTTP method type
params: Additional parameters for the HTTP request.
kwargs: Any extra keyword arguements passed into a client method.
returns:
prepared_request: An HTTP request object.
|
juraj-google-style
|
def find_usbserial(vendor, product):
if (platform.system() == 'Linux'):
(vendor, product) = [('%04x' % x).strip() for x in (vendor, product)]
return linux_find_usbserial(vendor, product)
elif (platform.system() == 'Darwin'):
return osx_find_usbserial(vendor, product)
else:
raise NotImplementedError(('Cannot find serial ports on %s' % platform.system()))
|
Find the tty device for a given usbserial devices identifiers.
Args:
vendor: (int) something like 0x0000
product: (int) something like 0x0000
Returns:
String, like /dev/ttyACM0 or /dev/tty.usb...
|
codesearchnet
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
artifacts_path = getattr(options, 'artifact_definitions_path', None)
data_location = getattr(configuration_object, '_data_location', None)
if ((not artifacts_path or not os.path.exists(artifacts_path)) and
data_location):
artifacts_path = os.path.dirname(data_location)
artifacts_path = os.path.join(artifacts_path, 'artifacts')
if not os.path.exists(artifacts_path) and 'VIRTUAL_ENV' in os.environ:
artifacts_path = os.path.join(
os.environ['VIRTUAL_ENV'], 'share', 'artifacts')
if not os.path.exists(artifacts_path):
artifacts_path = os.path.join(sys.prefix, 'share', 'artifacts')
if not os.path.exists(artifacts_path):
artifacts_path = os.path.join(sys.prefix, 'local', 'share', 'artifacts')
if sys.prefix != '/usr':
if not os.path.exists(artifacts_path):
artifacts_path = os.path.join('/usr', 'share', 'artifacts')
if not os.path.exists(artifacts_path):
artifacts_path = os.path.join('/usr', 'local', 'share', 'artifacts')
if not os.path.exists(artifacts_path):
artifacts_path = None
if not artifacts_path or not os.path.exists(artifacts_path):
raise errors.BadConfigOption(
'Unable to determine path to artifact definitions.')
custom_artifacts_path = getattr(
options, 'custom_artifact_definitions_path', None)
if custom_artifacts_path and not os.path.isfile(custom_artifacts_path):
raise errors.BadConfigOption(
'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))
if custom_artifacts_path:
logger.info(
'Custom artifact filter file: {0:s}'.format(custom_artifacts_path))
registry = artifacts_registry.ArtifactDefinitionsRegistry()
reader = artifacts_reader.YamlArtifactsReader()
logger.info(
'Determined artifact definitions path: {0:s}'.format(artifacts_path))
try:
registry.ReadFromDirectory(reader, artifacts_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(artifacts_path, exception))
for name in preprocessors_manager.PreprocessPluginsManager.GetNames():
if not registry.GetDefinitionByName(name):
raise errors.BadConfigOption(
'Missing required artifact definition: {0:s}'.format(name))
if custom_artifacts_path:
try:
registry.ReadFromFile(reader, custom_artifacts_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(custom_artifacts_path, exception))
setattr(configuration_object, '_artifact_definitions_path', artifacts_path)
setattr(
configuration_object, '_custom_artifacts_path', custom_artifacts_path)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: if the required artifact definitions are not defined.
|
juraj-google-style
|
def download(self, file: Optional[IO]=None, rewind: bool=True,
duration_timeout: Optional[float]=None) -> Response:
if self._session_state != SessionState.file_request_sent:
raise RuntimeError('File request not sent')
if rewind and file and hasattr(file, 'seek'):
original_offset = file.tell()
else:
original_offset = None
if not hasattr(file, 'drain'):
self._response.body = file
if not isinstance(file, Body):
self._response.body = Body(file)
read_future = self._commander.read_stream(file, self._data_stream)
try:
reply = yield from \
asyncio.wait_for(read_future, timeout=duration_timeout)
except asyncio.TimeoutError as error:
raise DurationTimeout(
'Did not finish reading after {} seconds.'
.format(duration_timeout)
) from error
self._response.reply = reply
if original_offset is not None:
file.seek(original_offset)
self.event_dispatcher.notify(self.Event.end_transfer, self._response)
self._session_state = SessionState.response_received
return self._response
|
Read the response content into file.
Args:
file: A file object or asyncio stream.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated with the final data connection reply.
Be sure to call :meth:`start` first.
Coroutine.
|
juraj-google-style
|
def run_inference(self, batch: Sequence[ExampleT], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionT]:
raise NotImplementedError(type(self))
|
Runs inferences on a batch of examples.
Args:
batch: A sequence of examples or features.
model: The model used to make inferences.
inference_args: Extra arguments for models whose inference call requires
extra parameters.
Returns:
An Iterable of Predictions.
|
github-repos
|
def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction:
raise NotImplementedError
|
Applies the aggregation function to an iterable of predictions, either on
their outlier scores or labels.
Args:
predictions: An Iterable of `AnomalyPrediction` objects to aggregate.
Returns:
An `AnomalyPrediction` object containing the aggregated result.
|
github-repos
|
def set_redirect(self, url, status=HttpStatusCodes.HTTP_303):
self.set_status(status)
self.set_content('')
self.set_header(HttpResponseHeaders.LOCATION, url)
|
Helper method to set a redirect response.
Args:
url (:obj:`str`): URL to redirect to
status (:obj:`str`, optional): Status code of the response
|
juraj-google-style
|
def _PackArgumentsHelper(self, elem, data, set_type_attrs):
if self._packer:
data = self._packer.Pack(data, self._version)
if isinstance(data, dict):
type_override = data.get('xsi_type')
if type_override:
elem_type = self._DiscoverElementTypeFromLocalname(type_override)
else:
elem_type = elem.type
data_formatted = data.iteritems()
packed_result = self._CreateComplexTypeFromData(elem_type, (type_override is not None), data_formatted, set_type_attrs)
elif isinstance(data, zeep.xsd.CompoundValue):
elem_type = data._xsd_type
data_formatted = zip(dir(data), [data[k] for k in dir(data)])
packed_result = self._CreateComplexTypeFromData(elem_type, False, data_formatted, set_type_attrs)
elif isinstance(data, (list, tuple)):
packed_result = [self._PackArgumentsHelper(elem, item, set_type_attrs) for item in data]
else:
if ((elem.type.name == 'base64Binary') and self._IsBase64(data)):
_logger.warn('Passing data to base64 field %s that may already be encoded. Do not pre-encode base64 fields with zeep.', elem.name)
packed_result = data
return packed_result
|
Recursive helper for PackArguments.
Args:
elem: The element type we are creating.
data: The data to instantiate it with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An instance of type 'elem'.
|
codesearchnet
|
def _piecewise_learning_rate(step, boundaries, values):
values = [1.0] + values
boundaries = [float(x) for x in boundaries]
return tf.train.piecewise_constant(
step, boundaries, values, name="piecewise_lr")
|
Scale learning rate according to the given schedule.
Multipliers are not cumulative.
Args:
step: global step
boundaries: List of steps to transition on.
values: Multiplier to apply at each boundary transition.
Returns:
Scaled value for the learning rate.
|
juraj-google-style
|
def generate(self, information, timeout=-1):
return self._client.create(information, timeout=timeout)
|
Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients.
Args:
information (dict): Information to generate the certificate for RabbitMQ clients.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: RabbitMQ certificate generated
|
juraj-google-style
|
def _validate_exp(claims, leeway=0):
if ('exp' not in claims):
return
try:
exp = int(claims['exp'])
except ValueError:
raise JWTClaimsError('Expiration Time claim (exp) must be an integer.')
now = timegm(datetime.utcnow().utctimetuple())
if (exp < (now - leeway)):
raise ExpiredSignatureError('Signature has expired.')
|
Validates that the 'exp' claim is valid.
The "exp" (expiration time) claim identifies the expiration time on
or after which the JWT MUST NOT be accepted for processing. The
processing of the "exp" claim requires that the current date/time
MUST be before the expiration date/time listed in the "exp" claim.
Implementers MAY provide for some small leeway, usually no more than
a few minutes, to account for clock skew. Its value MUST be a number
containing a NumericDate value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
leeway (int): The number of seconds of skew that is allowed.
|
codesearchnet
|
def list_tags(self, image_name):
tags_url = (self.registry_url + '/v2/{}/tags/list')
r = self.get(tags_url.format(image_name), auth=self.auth)
data = r.json()
if ('tags' in data):
return reversed(sorted(data['tags']))
return []
|
List all tags for the given image stored in the registry.
Args:
image_name (str):
The name of the image to query. The image must be present on the
registry for this call to return any values.
Returns:
list[str]: List of tags for that image.
|
codesearchnet
|
def open_tunnel(self, serial_no, port=19020):
return self.open(ip_addr='tunnel:' + str(serial_no) + ':' + str(port))
|
Connects to the J-Link emulator (over SEGGER tunnel).
Args:
self (JLink): the ``JLink`` instance
serial_no (int): serial number of the J-Link
port (int): optional port number (default to 19020).
Returns:
``None``
|
juraj-google-style
|
def find_element_by_id(self, id_, update=False) -> Elements:
return self.find_element(by=By.ID, value=id_, update=update)
|
Finds an element by id.
Args:
id_: The id of the element to be found.
update: If the interface has changed, this option should be True.
Returns:
The element if it was found.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
element = driver.find_element_by_id('foo')
|
juraj-google-style
|
def update_mim_version(self, new_genes, new_panel, old_version):
LOG.info('Updating versions for new genes')
version = new_panel['version']
for gene in new_panel['genes']:
gene_symbol = gene['hgnc_id']
if gene_symbol in new_genes:
gene['database_entry_version'] = version
continue
gene['database_entry_version'] = old_version
return
|
Set the correct version for each gene
Loop over the genes in the new panel
Args:
new_genes(set(str)): Set with the new gene symbols
new_panel(dict)
|
juraj-google-style
|
def chain_to_quadratic(chain, target_adjacency, chain_strength):
quadratic = {}
seen = set()
try:
next_level = {next(iter(chain))}
except StopIteration:
raise ValueError('chain must have at least one variable')
while next_level:
this_level = next_level
next_level = set()
for v in this_level:
if (v not in seen):
seen.add(v)
for u in target_adjacency[v]:
if (u not in chain):
continue
next_level.add(u)
if ((u != v) and ((u, v) not in quadratic)):
quadratic[(v, u)] = (- chain_strength)
if (len(chain) != len(seen)):
raise ValueError('{} is not a connected chain'.format(chain))
return quadratic
|
Determine the quadratic biases that induce the given chain.
Args:
chain (iterable):
The variables that make up a chain.
target_adjacency (dict/:class:`networkx.Graph`):
Should be a dict of the form {s: Ns, ...} where s is a variable
in the target graph and Ns is the set of neighbours of s.
chain_strength (float):
The magnitude of the quadratic bias that should be used to create chains.
Returns:
dict[edge, float]: The quadratic biases that induce the given chain.
Raises:
ValueError: If the variables in chain do not form a connected subgraph of target.
Examples:
>>> chain = {1, 2}
>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}
>>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1)
{(1, 2): -1}
|
codesearchnet
|
def _CallMethod(self, srvc, method_descriptor, rpc_controller, request, callback):
if (method_descriptor.containing_service != self.descriptor):
raise RuntimeError('CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback)
|
Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
|
codesearchnet
|
def length_squared(x, keep_dims=False, name=None, reduction_dim=None):
with tf.name_scope(name, 'length_squared', [x]) as scope:
x = tf.convert_to_tensor(x, name='x')
if not reduction_dim:
reduction_dim = _last_index(x, 1)
return tf.reduce_sum(
tf.square(x),
reduction_dim,
keep_dims=keep_dims,
name=scope)
|
Computes the squared length of x.
Args:
x: A tensor.
keep_dims: If true, reduction does not change the rank of the input.
name: Optional name for this op.
reduction_dim: The dimension to reduce, by default choose the last one
and if no shape is specified guess 1.
Returns:
The squared length of x.
|
juraj-google-style
|
def combine_columns(columns):
columns_zipped = itertools.zip_longest(*columns)
return ''.join(x for zipped in columns_zipped for x in zipped if x)
|
Combine ``columns`` into a single string.
Example:
>>> combine_columns(['eape', 'xml'])
'example'
Args:
columns (iterable): ordered columns to combine
Returns:
String of combined columns
|
juraj-google-style
|
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self._common_name):
return _SyncOnReadSaveable(self, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
|
Overrides Trackable method.
This allows both name-based and object-based save and restore of
`SyncOnReadVariable`s.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
|
github-repos
|
def GetBlockHash(self, height):
if self._current_block_height < height:
return
if len(self._header_index) <= height:
return
return self._header_index[height]
|
Get the block hash by its block height
Args:
height(int): height of the block to retrieve hash from.
Returns:
bytes: a non-raw block hash (e.g. b'6dd83ed8a3fc02e322f91f30431bf3662a8c8e8ebe976c3565f0d21c70620991', but not b'\x6d\xd8...etc'
|
juraj-google-style
|
def _AddParentDirectories(self, path):
path_segments = self.file_system.SplitPath(path)
for segment_index in range(len(path_segments)):
parent_path = self.file_system.JoinPath(path_segments[:segment_index])
file_entry = self.file_system.GetFileEntryByPath(parent_path)
if (file_entry and (not file_entry.IsDirectory())):
raise ValueError('Non-directory parent file entry: {0:s} already exists.'.format(parent_path))
for segment_index in range(len(path_segments)):
parent_path = self.file_system.JoinPath(path_segments[:segment_index])
if (not self.file_system.FileEntryExistsByPath(parent_path)):
self.file_system.AddFileEntry(parent_path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)
|
Adds the parent directories of a path to the fake file system.
Args:
path (str): path of the file within the fake file system.
Raises:
ValueError: if a parent directory is already set and is not a directory.
|
codesearchnet
|
def parse_arguments(argv):
parser = argparse.ArgumentParser(description='write-to-pubsub')
parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local')
parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID)
args, _ = parser.parse_known_args(args=argv)
return args
|
It parses the arguments passed to the command line and returns them as an object
Args:
argv: The arguments passed to the command line.
Returns:
The arguments that are being passed in.
|
github-repos
|
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs):
df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs)
return (_split_result_for_readers(0, num_splits, df) + [len(df.index)])
|
Use a Ray task to read columns from HDF5 into a Pandas DataFrame.
Note: Ray functions are not detected by codecov (thus pragma: no cover)
Args:
path_or_buf: The path of the HDF5 file.
columns: The list of column names to read.
num_splits: The number of partitions to split the column into.
Returns:
A list containing the split Pandas DataFrames and the Index as the last
element. If there is not `index_col` set, then we just return the length.
This is used to determine the total length of the DataFrame to build a
default Index.
|
codesearchnet
|
def edge(self, tail_name, head_name, label=None, _attributes=None, **attrs):
tail_name = self._quote_edge(tail_name)
head_name = self._quote_edge(head_name)
attr_list = self._attr_list(label, attrs, _attributes)
line = (self._edge % (tail_name, head_name, attr_list))
self.body.append(line)
|
Create an edge between two nodes.
Args:
tail_name: Start node identifier.
head_name: End node identifier.
label: Caption to be displayed near the edge.
attrs: Any additional edge attributes (must be strings).
|
codesearchnet
|
def request_stop(self, ex=None):
self._coord.request_stop(ex=ex)
|
Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
|
github-repos
|
def _get_bonds(self, mol):
num_atoms = len(mol)
if self.ignore_ionic_bond:
covalent_atoms = [i for i in range(num_atoms) if mol.species[i].symbol not in self.ionic_element_list]
else:
covalent_atoms = list(range(num_atoms))
all_pairs = list(itertools.combinations(covalent_atoms, 2))
pair_dists = [mol.get_distance(*p) for p in all_pairs]
elements = mol.composition.as_dict().keys()
unavailable_elements = list(set(elements) -
set(self.covalent_radius.keys()))
if len(unavailable_elements) > 0:
raise ValueError("The covalent radius for element {} is not "
"available".format(unavailable_elements))
bond_13 = self.get_13_bonds(self.priority_bonds)
max_length = [(self.covalent_radius[mol.sites[p[0]].specie.symbol] +
self.covalent_radius[mol.sites[p[1]].specie.symbol]) *
(1 + (self.priority_cap if p in self.priority_bonds
else (self.bond_length_cap if p not in bond_13
else self.bond_13_cap))) *
(0.1 if (self.ignore_halogen_self_bond and p not in self.priority_bonds and
mol.sites[p[0]].specie.symbol in self.halogen_list and
mol.sites[p[1]].specie.symbol in self.halogen_list)
else 1.0)
for p in all_pairs]
bonds = [bond
for bond, dist, cap in zip(all_pairs, pair_dists, max_length)
if dist <= cap]
return bonds
|
Find all the bond in a molcule
Args:
mol: the molecule. pymatgen Molecule object
Returns:
List of tuple. Each tuple correspond to a bond represented by the
id of the two end atoms.
|
juraj-google-style
|
def classify_coupling(coupling):
(lower, upper) = coupling
if ((lower is None) and (upper is None)):
return CouplingClass.Uncoupled
elif ((lower is None) or (upper is None)):
return CouplingClass.DirectionalReverse
elif ((lower == 0.0) and (upper == 0.0)):
return CouplingClass.Inconsistent
elif ((lower <= 0.0) and (upper >= 0.0)):
return CouplingClass.DirectionalForward
elif (abs((lower - upper)) < 1e-06):
return CouplingClass.Full
else:
return CouplingClass.Partial
|
Return a constant indicating the type of coupling.
Depending on the type of coupling, one of the constants from
:class:`.CouplingClass` is returned.
Args:
coupling: Tuple of minimum and maximum flux ratio
|
codesearchnet
|
def __init__(self, option):
self.option = option
super().__init__('invalid option name: {}'.format(option))
|
Initialization of instances:
Args:
option (str): invalid option name.
Attributes:
option (str): invalid option name.
|
juraj-google-style
|
def extend(self, step):
self.timesteps.extend(step.timesteps)
self.masks.extend(step.masks)
self.x.extend(step.x)
self.y.extend(step.y)
self.i.extend(step.i)
self.j.extend(step.j)
self.end_time = step.end_time
self.times = np.arange(self.start_time, self.end_time + self.step, self.step)
self.u = np.concatenate((self.u, step.u))
self.v = np.concatenate((self.v, step.v))
for attr in self.attributes.keys():
if attr in step.attributes.keys():
self.attributes[attr].extend(step.attributes[attr])
|
Adds the data from another STObject to this object.
Args:
step: another STObject being added after the current one in time.
|
juraj-google-style
|
def getFilepaths(self, filename):
return (os.path.join(os.environ['HOME'], filename),
os.path.join(self.mackup.mackup_folder, filename))
|
Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str)
|
juraj-google-style
|
def align_segmentation(beat_times, song):
try:
segment_times, segment_labels = msaf.io.read_references(song)
except:
return None, None, None
segment_times = np.asarray(segment_times)
segment_intervals = msaf.utils.times_to_intervals(segment_times)
beat_intervals = np.asarray(zip(beat_times[:-1], beat_times[1:]))
beat_segment_ids = librosa.util.match_intervals(beat_intervals,
segment_intervals)
segment_beats = []
segment_times_out = []
segment_labels_out = []
for i in range(segment_times.shape[0]):
hits = np.argwhere(beat_segment_ids == i)
if len(hits) > 0 and i < len(segment_intervals) and \
i < len(segment_labels):
segment_beats.extend(hits[0])
segment_times_out.append(segment_intervals[i, :])
segment_labels_out.append(segment_labels[i])
segment_beats = list(segment_beats)
segment_times_out = segment_times
return segment_beats, segment_times_out, segment_labels_out
|
Load a ground-truth segmentation, and align times to the nearest
detected beats.
Arguments:
beat_times -- array
song -- path to the audio file
Returns:
segment_beats -- array
beat-aligned segment boundaries
segment_times -- array
true segment times
segment_labels -- array
list of segment labels
|
juraj-google-style
|
def get_capacity_vol(self, min_voltage=None, max_voltage=None, use_overall_normalization=True):
pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage)
normalization_vol = (self.normalization_volume if (use_overall_normalization or (len(pairs_in_range) == 0)) else pairs_in_range[(- 1)].vol_discharge)
return (((sum([pair.mAh for pair in pairs_in_range]) / normalization_vol) * 1e+24) / N_A)
|
Get the volumetric capacity of the electrode.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
use_overall_normalization (booL): If False, normalize by the
discharged state of only the voltage pairs matching the voltage
criteria. if True, use default normalization of the full
electrode path.
Returns:
Volumetric capacity in mAh/cc across the insertion path (a subset
of the path can be chosen by the optional arguments)
|
codesearchnet
|
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(CreateKeyPairResponsePayload, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.PRIVATE_KEY_UNIQUE_IDENTIFIER, local_buffer):
self._private_key_unique_identifier = primitives.TextString(tag=enums.Tags.PRIVATE_KEY_UNIQUE_IDENTIFIER)
self._private_key_unique_identifier.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding('The CreateKeyPair response payload encoding is missing the private key unique identifier.')
if self.is_tag_next(enums.Tags.PUBLIC_KEY_UNIQUE_IDENTIFIER, local_buffer):
self._public_key_unique_identifier = primitives.TextString(tag=enums.Tags.PUBLIC_KEY_UNIQUE_IDENTIFIER)
self._public_key_unique_identifier.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding('The CreateKeyPair response payload encoding is missing the public key unique identifier.')
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
if self.is_tag_next(enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE, local_buffer):
self._private_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE)
self._private_key_template_attribute.read(local_buffer, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE, local_buffer):
self._public_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE)
self._public_key_template_attribute.read(local_buffer, kmip_version=kmip_version)
self.is_oversized(local_buffer)
|
Read the data encoding the CreateKeyPair response payload and decode it
into its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the private key unique identifier or
the public key unique identifier is missing from the encoded
payload.
|
codesearchnet
|
def PopEventSource(self):
try:
(_, _, event_source) = heapq.heappop(self._heap)
except IndexError:
return None
return event_source
|
Pops an event source from the heap.
Returns:
EventSource: an event source or None on if no event source is available.
|
codesearchnet
|
def _hexencode(bytestring, insert_spaces=False):
_checkString(bytestring, description='byte string')
separator = ('' if (not insert_spaces) else ' ')
byte_representions = []
for c in bytestring:
byte_representions.append('{0:02X}'.format(ord(c)))
return separator.join(byte_representions).strip()
|
Convert a byte string to a hex encoded string.
For example 'J' will return '4A', and ``'\\x04'`` will return '04'.
Args:
bytestring (str): Can be for example ``'A\\x01B\\x45'``.
insert_spaces (bool): Insert space characters between pair of characters to increase readability.
Returns:
A string of twice the length, with characters in the range '0' to '9' and 'A' to 'F'.
The string will be longer if spaces are inserted.
Raises:
TypeError, ValueError
|
codesearchnet
|
def GetTopLevel(self, file_object):
try:
top_level_object = biplist.readPlist(file_object)
except (biplist.InvalidPlistException,
biplist.NotBinaryPlistException) as exception:
raise errors.UnableToParseFile(
'Unable to parse plist with error: {0!s}'.format(exception))
return top_level_object
|
Returns the deserialized content of a plist as a dictionary object.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Returns:
dict[str, object]: contents of the plist.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def retrieve_taf(station_icao) -> typing.Tuple[(typing.Union[(str, None)], typing.Union[(str, None)])]:
url = _BASE_TAF_URL.format(station=station_icao)
with requests.get(url) as resp:
if (not resp.ok):
return (f'''unable to obtain TAF for station {station_icao}
Got to "http:
return (None, resp.content.decode().split('\n')[1])
|
Retrieves a TAF string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str
|
codesearchnet
|
def get_path_to_datafile(path):
if runfiles:
r = runfiles.Create()
new_fpath = r.Rlocation(_os.path.abspath(_os.path.join('tensorflow', path)))
if new_fpath is not None and _os.path.exists(new_fpath):
return new_fpath
old_filepath = _os.path.join(_os.path.dirname(_inspect.getfile(_sys._getframe(1))), path)
return old_filepath
|
Get the path to the specified file in the data dependencies.
The path is relative to tensorflow/
Args:
path: a string resource path relative to tensorflow/
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary.
Raises:
IOError: If the path is not found, or the resource can't be opened.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.