code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def schedule(self, api_call, bundle_id, bundle_desc, bundling_request, kwargs=None):
kwargs = (kwargs or dict())
bundle = self._bundle_for(api_call, bundle_id, bundle_desc, bundling_request, kwargs)
elts = getattr(bundling_request, bundle_desc.bundled_field)
event = bundle.extend(elts)
count_threshold = self._options.element_count_threshold
if ((count_threshold > 0) and (bundle.element_count >= count_threshold)):
self._run_now(bundle.bundle_id)
size_threshold = self._options.request_byte_threshold
if ((size_threshold > 0) and (bundle.request_bytesize >= size_threshold)):
self._run_now(bundle.bundle_id)
return event
|
Schedules bundle_desc of bundling_request as part of bundle_id.
The returned value an :class:`Event` that
* has a ``result`` attribute that will eventually be set to the result
the api call
* will be used to wait for the response
* holds the canceller function for canceling this part of the bundle
Args:
api_call (callable[[object], object]): the scheduled API call.
bundle_id (str): identifies the bundle on which the API call should be
made.
bundle_desc (gax.BundleDescriptor): describes the structure of the
bundled call.
bundling_request (object): the request instance to use in the API
call.
kwargs (dict): optional, the keyword arguments passed to the API call.
Returns:
Event: the scheduled event.
|
codesearchnet
|
def get_storage(self, id_or_uri):
uri = (self.URI + '/{}/storage'.format(extract_id_from_uri(id_or_uri)))
return self._client.get(uri)
|
Get storage details of an OS Volume.
Args:
id_or_uri: ID or URI of the OS Volume.
Returns:
dict: Storage details
|
codesearchnet
|
def experimental_tpu_test_loop(model, dataset, verbose=0, steps=None, callbacks=None):
mode = ModeKeys.TEST
current_strategy = model._distribution_strategy
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(strategy=current_strategy, learning_phase=0)
scope.__enter__()
out_labels = model.metrics_names
def _test_step_fn(inputs):
if isinstance(inputs, (tuple, list)) and len(inputs) == 2:
inputs, targets = inputs
else:
targets = None
distribute_lib.get_replica_context().merge_call(_build_model, args=(model, mode, inputs, targets))
_, outputs, updates, _ = _per_replica_execution_function(dist_utils.get_distributed_model(model, mode), mode)
with ops.control_dependencies([updates]):
return [array_ops.identity(out) for out in outputs]
test_input_data = iterator.get_next()
per_replica_outputs = current_strategy.run(_test_step_fn, args=(test_input_data,))
output_tensors = {}
for label, output in zip(out_labels, per_replica_outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
reduce_op = ds_reduce_util.ReduceOp.MEAN
output_tensors[label] = current_strategy.reduce(reduce_op, output, axis=None)
test_op = control_flow_ops.group(list(output_tensors.values()))
if verbose >= 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
dist_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(callbacks, model, do_validation=False, epochs=1, steps_per_epoch=steps, verbose=verbose, count_mode='steps', mode=ModeKeys.TEST)
callbacks._call_begin_hook(mode)
outs = [0.0] * len(model.metrics_names)
if steps is not None:
target_steps = steps
else:
raise ValueError('Number of steps could not be inferred from the data, please pass the steps argument.')
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = backend.batch_get_value([test_op, output_tensors])
except errors.OutOfRangeError:
warning_msg = 'Make sure that your dataset can generate at least `steps` batches (in this case, {} batches).'.format(steps)
logging.warning('Your dataset iterator ran out of data; interrupting evaluation. ' + warning_msg)
target_steps = current_step
break
for i, label in enumerate(model.metrics_names):
if i == 0:
outs[i] += batch_outs[label]
else:
outs[i] = batch_outs[label]
batch_logs = cbks.make_logs(model, batch_logs, outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose == 1:
progbar.update(current_step + 1)
current_step += 1
if verbose >= 1:
progbar.update(target_steps)
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(outs) >= 0:
outs[0] /= target_steps
if len(outs) == 1:
return outs[0]
return outs
|
Test loop for evaluating with TPU tf.distribute.Strategy.
Args:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
|
github-repos
|
def __init__(self, batch_size=8, data_dir=None):
self._train_data, self._train_labels = None, None
self._test_data, self._test_labels = None, None
self._batch_size = batch_size
self.img_size = IMAGE_SIZE
self.num_channels = NUM_CHANNELS
self.num_classes = NUM_CLASSES
self.train_len = NUM_TRAIN_SAMPLES
self.test_len = NUM_TEST_SAMPLES
self.data_dir = data_dir or "./test_data"
self.cifar10_dir = os.path.join(self.data_dir, 'cifar-10-batches-py')
self.cifar10_tarball = os.path.join(self.data_dir, CIFAR10_URL.split('/')[-1])
self.maybe_download_and_extract()
|
CIFAR-10 dataset and TF model constructor.
Args:
batch_size: dataset batch size.
|
juraj-google-style
|
def _update_bird_conf_file(self, operation):
conf_updated = False
prefixes = []
ip_version = operation.ip_version
config_file = self.bird_configuration[ip_version]['config_file']
variable_name = self.bird_configuration[ip_version]['variable_name']
changes_counter =\
self.bird_configuration[ip_version]['changes_counter']
dummy_ip_prefix =\
self.bird_configuration[ip_version]['dummy_ip_prefix']
try:
prefixes = get_ip_prefixes_from_bird(config_file)
except OSError as error:
self.log.error("failed to open Bird configuration %s, this is a "
"FATAL error, thus exiting main program", error)
sys.exit(1)
if not prefixes:
self.log.error("found empty bird configuration %s, this is a FATAL"
" error, thus exiting main program", config_file)
sys.exit(1)
if dummy_ip_prefix not in prefixes:
self.log.warning("dummy IP prefix %s wasn't found in bird "
"configuration, adding it. This shouldn't have "
"happened!", dummy_ip_prefix)
prefixes.insert(0, dummy_ip_prefix)
conf_updated = True
ip_prefixes_without_check = set(prefixes).difference(
self.ip_prefixes[ip_version])
if ip_prefixes_without_check:
self.log.warning("found %s IP prefixes in Bird configuration but "
"we aren't configured to run health checks on "
"them. Either someone modified the configuration "
"manually or something went horrible wrong. We "
"remove them from Bird configuration",
','.join(ip_prefixes_without_check))
prefixes[:] = (ip for ip in prefixes
if ip not in ip_prefixes_without_check)
conf_updated = True
if operation.update(prefixes):
conf_updated = True
if not conf_updated:
self.log.info('no updates for bird configuration')
return conf_updated
if self.bird_configuration[ip_version]['keep_changes']:
archive_bird_conf(config_file, changes_counter)
tempname = write_temp_bird_conf(
dummy_ip_prefix,
config_file,
variable_name,
prefixes
)
try:
os.rename(tempname, config_file)
except OSError as error:
self.log.critical("failed to create Bird configuration %s, this "
"is a FATAL error, thus exiting main program",
error)
sys.exit(1)
else:
self.log.info("Bird configuration for IPv%s is updated",
ip_version)
if len(prefixes) == 1:
self.log.warning("Bird configuration doesn't have IP prefixes for "
"any of the services we monitor! It means local "
"node doesn't receive any traffic")
return conf_updated
|
Update BIRD configuration.
It adds to or removes IP prefix from BIRD configuration. It also
updates generation time stamp in the configuration file.
Main program will exit if configuration file cant be read/written.
Arguments:
operation (obj): Either an AddOperation or DeleteOperation object
Returns:
True if BIRD configuration was updated otherwise False.
|
juraj-google-style
|
def Reinit(self, pid, auto_symfile_loading=True):
self.ShutDownGdb()
self.__init__(pid, auto_symfile_loading, architecture=self.arch)
|
Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
|
codesearchnet
|
def _update_run_calls_state(self, run_call_count, fetches, feed_dict, is_callable_runner=False):
self._run_call_count = run_call_count
self._feed_dict = feed_dict
self._run_description = cli_shared.get_run_short_description(run_call_count, fetches, feed_dict, is_callable_runner=is_callable_runner)
self._run_through_times -= 1
self._run_info = cli_shared.get_run_start_intro(run_call_count, fetches, feed_dict, self._tensor_filters, is_callable_runner=is_callable_runner)
|
Update the internal state with regard to run() call history.
Args:
run_call_count: (int) Number of run() calls that have occurred.
fetches: a node/tensor or a list of node/tensor that are the fetches of
the run() call. This is the same as the fetches argument to the run()
call.
feed_dict: None of a dict. This is the feed_dict argument to the run()
call.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
|
github-repos
|
def construct_concept_to_indicator_mapping(n: int=1) -> Dict[(str, List[str])]:
df = pd.read_sql_table('concept_to_indicator_mapping', con=engine)
gb = df.groupby('Concept')
_dict = {k: [get_variable_and_source(x) for x in take(n, v['Indicator'].values)] for (k, v) in gb}
return _dict
|
Create a dictionary mapping high-level concepts to low-level indicators
Args:
n: Number of indicators to return
Returns:
Dictionary that maps concept names to lists of indicator names.
|
codesearchnet
|
def add_cidr_rules(self, rules):
session = boto3.session.Session(profile_name=self.env, region_name=self.region)
client = session.client('ec2')
group_id = get_security_group_id(self.app_name, self.env, self.region)
for rule in rules:
data = {'DryRun': False, 'GroupId': group_id, 'IpPermissions': [{'IpProtocol': rule['protocol'], 'FromPort': rule['start_port'], 'ToPort': rule['end_port'], 'IpRanges': [{'CidrIp': rule['app']}]}]}
self.log.debug('Security Group rule: %s', data)
try:
client.authorize_security_group_ingress(**data)
except botocore.exceptions.ClientError as error:
if ('InvalidPermission.Duplicate' in str(error)):
self.log.debug('Duplicate rule exist, that is OK.')
else:
msg = 'Unable to add cidr rules to {}'.format(rule.get('app'))
self.log.error(msg)
raise SpinnakerSecurityGroupError(msg)
return True
|
Add cidr rules to security group via boto.
Args:
rules (list): Allowed Security Group ports and protocols.
Returns:
True: Upon successful completion.
Raises:
SpinnakerSecurityGroupError: boto3 call failed to add CIDR block to
Security Group.
|
codesearchnet
|
def delete_user(self, user):
self.service.delete_user(
user, self.url_prefix, self.auth, self.session, self.session_send_opts)
|
Delete the given user.
Args:
user (string): User name.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def _get_app_path(url):
app_path = urlparse(url).path.rstrip("/")
if not app_path.startswith("/"):
app_path = "/" + app_path
return app_path
|
Extract the app path from a Bokeh server URL
Args:
url (str) :
Returns:
str
|
juraj-google-style
|
def _RunInTransaction(self, function, readonly=False):
start_query = 'START TRANSACTION;'
if readonly:
start_query = 'START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY;'
for retry_count in range(_MAX_RETRY_COUNT):
with contextlib.closing(self.pool.get()) as connection:
try:
with contextlib.closing(connection.cursor()) as cursor:
cursor.execute(start_query)
ret = function(connection)
if (not readonly):
connection.commit()
return ret
except MySQLdb.OperationalError as e:
connection.rollback()
if ((retry_count >= _MAX_RETRY_COUNT) or (not _IsRetryable(e))):
raise
time.sleep((random.uniform(1.0, 2.0) * math.pow(1.5, retry_count)))
raise Exception('Looped ended early - last exception swallowed.')
|
Runs function within a transaction.
Allocates a connection, begins a transaction on it and passes the connection
to function.
If function finishes without raising, the transaction is committed.
If function raises, the transaction will be rolled back, if a retryable
database error is raised, the operation may be repeated.
Args:
function: A function to be run, must accept a single MySQLdb.connection
parameter.
readonly: Indicates that only a readonly (snapshot) transaction is
required.
Returns:
The value returned by the last call to function.
Raises: Any exception raised by function.
|
codesearchnet
|
def get_column(self, column_name, column_type, index, verbose=True):
return LazyOpResult(
grizzly_impl.get_column(
self.expr,
self.weld_type,
index
),
column_type,
1
)
|
Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
|
juraj-google-style
|
def sys_wait_for_event(mask: int, k: Optional[Key], m: Optional[Mouse], flush: bool) -> int:
return int(lib.TCOD_sys_wait_for_event(mask, (k.key_p if k else ffi.NULL), (m.mouse_p if m else ffi.NULL), flush))
|
Wait for an event then return.
If flush is True then the buffer will be cleared before waiting. Otherwise
each available event will be returned in the order they're recieved.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
flush (bool): Clear the event buffer before waiting.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
|
codesearchnet
|
def __init__(self, location, optional=False):
super(Backtrack, self).__init__(location, optional=optional)
self.location = location
self.optional = optional
self.validate()
|
Create a new Backtrack block, returning to the given location in the query.
Args:
location: Location object, specifying where to backtrack to
optional: optional bool, specifying whether the steps between the current location
and the location to which Backtrack is returning were optional or not
Returns:
new Backtrack object
|
juraj-google-style
|
def type_based_dispatch_signatures_for(cls):
def contains_cls(x):
if isinstance(x, dict):
return any((contains_cls(v) for v in x.values()))
elif x is cls:
return True
elif type_annotations.is_generic_list(x) or type_annotations.is_generic_union(x):
type_args = type_annotations.get_generic_type_args(x)
return any((contains_cls(arg) for arg in type_args))
else:
return False
result = {}
for api, api_signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items():
for _, signatures in api_signatures.items():
filtered = list(filter(contains_cls, signatures))
if filtered:
result.setdefault(api, []).extend(filtered)
return result
|
Returns dispatch signatures that have been registered for a given class.
This function is intended for documentation-generation purposes.
Args:
cls: The class to search for. Type signatures are searched recursively, so
e.g., if `cls=RaggedTensor`, then information will be returned for all
dispatch targets that have `RaggedTensor` anywhere in their type
annotations (including nested in `typing.Union` or `typing.List`.)
Returns:
A `dict` mapping `api` -> `signatures`, where `api` is a TensorFlow API
function; and `signatures` is a list of dispatch signatures for `api`
that include `cls`. (Each signature is a dict mapping argument names to
type annotations; see `dispatch_for_api` for more info.)
|
github-repos
|
def _make_model(self, data, key=None):
if (data['deleted'] and (not self.adapter.want_deleted)):
raise ObjectDoesNotExist('Deleted object returned')
model = self._model_class(self._current_context, _pass_perm_checks=self._pass_perm_checks)
model.setattr('key', (ub_to_str(key) if key else ub_to_str(data.get('key'))))
model = model.set_data(data, from_db=True)
model._initial_data = model.clean_value()
return model
|
Creates a model instance with the given data.
Args:
data: Model data returned from DB.
key: Object key
Returns:
pyoko.Model object.
|
codesearchnet
|
def get_variation(self, experiment, user_id, attributes, ignore_user_profile=False):
if (not experiment_helper.is_experiment_running(experiment)):
self.logger.info(('Experiment "%s" is not running.' % experiment.key))
return None
variation = self.config.get_forced_variation(experiment.key, user_id)
if variation:
return variation
variation = self.get_forced_variation(experiment, user_id)
if variation:
return variation
user_profile = UserProfile(user_id)
if ((not ignore_user_profile) and self.user_profile_service):
try:
retrieved_profile = self.user_profile_service.lookup(user_id)
except:
self.logger.exception(('Unable to retrieve user profile for user "%s" as lookup failed.' % user_id))
retrieved_profile = None
if validator.is_user_profile_valid(retrieved_profile):
user_profile = UserProfile(**retrieved_profile)
variation = self.get_stored_variation(experiment, user_profile)
if variation:
return variation
else:
self.logger.warning('User profile has invalid format.')
if (not audience_helper.is_user_in_experiment(self.config, experiment, attributes, self.logger)):
self.logger.info(('User "%s" does not meet conditions to be in experiment "%s".' % (user_id, experiment.key)))
return None
bucketing_id = self._get_bucketing_id(user_id, attributes)
variation = self.bucketer.bucket(experiment, user_id, bucketing_id)
if variation:
if ((not ignore_user_profile) and self.user_profile_service):
try:
user_profile.save_variation_for_experiment(experiment.id, variation.id)
self.user_profile_service.save(user_profile.__dict__)
except:
self.logger.exception(('Unable to save user profile for user "%s".' % user_id))
return variation
return None
|
Top-level function to help determine variation user should be put in.
First, check if experiment is running.
Second, check if user is forced in a variation.
Third, check if there is a stored decision for the user and return the corresponding variation.
Fourth, figure out if user is in the experiment by evaluating audience conditions if any.
Fifth, bucket the user and return the variation.
Args:
experiment: Experiment for which user variation needs to be determined.
user_id: ID for user.
attributes: Dict representing user attributes.
ignore_user_profile: True to ignore the user profile lookup. Defaults to False.
Returns:
Variation user should see. None if user is not in experiment or experiment is not running.
|
codesearchnet
|
def __init__(self, rnn_class=LSTM, hidden_dims=[50, 50], bidirectional=True, dropout_rate=0.5, **rnn_kwargs):
super(StackedRNN, self).__init__(dropout_rate)
self.rnn_class = rnn_class
self.hidden_dims = hidden_dims
self.bidirectional = bidirectional
self.rnn_kwargs = rnn_kwargs
|
Creates a stacked RNN.
Args:
rnn_class: The type of RNN to use. (Default Value = LSTM)
encoder_dims: The number of hidden units of RNN. (Default Value: 50)
bidirectional: Whether to use bidirectional encoding. (Default Value = True)
**rnn_kwargs: Additional args for building the RNN.
|
juraj-google-style
|
def topics(self, exclude_internal_topics=True):
topics = set(self._partitions.keys())
if exclude_internal_topics:
return (topics - self.internal_topics)
else:
return topics
|
Get set of known topics.
Arguments:
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to
True the only way to receive records from an internal topic is
subscribing to it. Default True
Returns:
set: {topic (str), ...}
|
codesearchnet
|
def process_extra_vars(extra_vars_list, force_json=True):
extra_vars = {}
extra_vars_yaml = ""
for extra_vars_opt in extra_vars_list:
if extra_vars_opt.startswith("@"):
with open(extra_vars_opt[1:], 'r') as f:
extra_vars_opt = f.read()
opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)
else:
opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)
if any(line.startswith("
extra_vars_yaml += extra_vars_opt + "\n"
elif extra_vars_opt != "":
extra_vars_yaml += yaml.dump(
opt_dict, default_flow_style=False) + "\n"
extra_vars.update(opt_dict)
if not force_json:
try:
try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)
assert type(try_dict) is dict
debug.log('Using unprocessed YAML', header='decision', nl=2)
return extra_vars_yaml.rstrip()
except Exception:
debug.log('Failed YAML parsing, defaulting to JSON',
header='decison', nl=2)
if extra_vars == {}:
return ""
return json.dumps(extra_vars, ensure_ascii=False)
|
Returns a string that is valid JSON or YAML and contains all the
variables in every extra_vars_opt inside of extra_vars_list.
Args:
parse_kv (bool): whether to allow key=value syntax.
force_json (bool): if True, always output json.
|
juraj-google-style
|
def Query(self):
if self.name is not None:
return
sb = ScriptBuilder()
sb.EmitAppCallWithOperation(self.ScriptHash, 'name')
sb.EmitAppCallWithOperation(self.ScriptHash, 'symbol')
sb.EmitAppCallWithOperation(self.ScriptHash, 'decimals')
engine = None
try:
engine = ApplicationEngine.Run(sb.ToArray(), exit_on_error=True, gas=Fixed8.FromDecimal(10.0), test_mode=False)
except Exception as e:
pass
if engine and len(engine.ResultStack.Items) == 3:
results = engine.ResultStack.Items
try:
self.name = results[0].GetString()
self.symbol = results[1].GetString()
self.decimals = results[2].GetBigInteger()
if len(self.name) > 1 and self.name != 'Stack Item' \
and len(self.symbol) > 1 and self.symbol != 'Stack Item' \
and self.decimals < 10:
return True
except Exception as e:
pass
return False
|
Query the smart contract for its token information (name, symbol, decimals).
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
Returns:
None: if the NEP5Token instance `Name` is already set.
True: if all information was retrieved.
False: if information retrieval failed.
|
juraj-google-style
|
def send_to_prv_exchange(self, user_id, message=None):
exchange = 'prv_%s' % user_id.lower()
msg = json.dumps(message, cls=ZEngineJSONEncoder)
log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg))
self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
|
Send messages through logged in users private exchange.
Args:
user_id string: User key
message dict: Message object
|
juraj-google-style
|
def __eq__(self, other):
return type(self) is type(other) and \
self.p == other.p and \
self.m == other.m and \
np.array_equal(self.reg, other.reg)
|
Check equivalence between two HyperLogLogs
Args:
other (datasketch.HyperLogLog):
Returns:
bool: True if both have the same internal state.
|
juraj-google-style
|
def transform_normalize_unicode(source, form, name=None):
with ops.name_scope(name, 'TransformNormalizeUnicode', [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
result = tf.SparseTensor(indices=source.indices, values=ops_module.transform_normalize_unicode(source.values, form), dense_shape=source.dense_shape)
else:
result = ops_module.transform_normalize_unicode(source, form)
return result
|
Normalize unicode strings tensor.
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to normalize.
form: Scalar value, name of normalization algorithm.
One of `"NFD"`, `"NFC"`, `"NFKD"`, `"NFKC"`.
name: A name for the operation (optional).
Returns:
`Tensor` or `SparseTensor` of same shape and size as input.
|
codesearchnet
|
def set_setpoint(self, setpointvalue):
_checkSetpointValue( setpointvalue, self.setpoint_max )
self.write_register( 4097, setpointvalue, 1)
|
Set the setpoint.
Args:
setpointvalue (float): Setpoint [most often in degrees]
|
juraj-google-style
|
def _check_interpret_cell(self, cell, prior_cell, row_index, column_index):
changed = False
if ((not is_empty_cell(cell)) and (not is_text_cell(cell))):
self.flag_change(self.flags, 'interpreted', (row_index, column_index), self.worksheet, self.FLAGS['converted-to-string'])
cell = str(cell)
changed = True
elif is_empty_cell(cell):
self.flag_change(self.flags, 'interpreted', (row_index, column_index), self.worksheet, self.FLAGS['copied-title'])
cell = prior_cell
changed = True
return (cell, changed)
|
Helper function which checks cell type and performs cell translation to strings where
necessary.
Returns:
A tuple of the form '(cell, changed)' where 'changed' indicates if 'cell' differs from
input.
|
codesearchnet
|
def typical_or_extreme_period_type(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `typical_or_extreme_period_type`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `typical_or_extreme_period_type`')
self._typical_or_extreme_period_type = value
|
Corresponds to IDD Field `typical_or_extreme_period_type`
Args:
value (str): value for IDD Field `typical_or_extreme_period_type`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def query_with_attributes(type_to_query, client):
session = client.create_session()
query = session.query(Attribute.name, Attribute.value, Entity.id).join(Entity).filter((Entity.type == type_to_query))
df = client.df_query(query)
session.close()
df = df.dropna(how='any')
df = df.set_index(['id', 'name']).unstack().reset_index()
df.columns = (['id'] + list(df.columns.get_level_values(1)[1:]))
return df
|
Query all entities of a specific type, with their attributes
Args:
type_to_query (str): type of entity to query
client: DB client to perform query with
Returns:
pandas.DataFrame: table of entities, with attributes as columns
|
codesearchnet
|
def rank_dated_files(pattern, dir, descending=True):
files = glob.glob(op.join(dir, pattern))
return sorted(files, reverse=descending)
|
Search a directory for files that match a pattern. Return an ordered list of these files by filename.
Args:
pattern: The glob pattern to search for.
dir: Path to directory where the files will be searched for.
descending: Default True, will sort alphabetically by descending order.
Returns:
list: Rank-ordered list by filename.
|
juraj-google-style
|
def update_node(self, node_id, version, node_spec=None):
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
return True
|
Update the node's configuration
Args:
node_id (string): ID of the node to be updated.
version (int): The version number of the node object being
updated. This is required to avoid conflicting writes.
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
|
codesearchnet
|
def fetch_git_package(self, config):
from git import Repo
ref = self.determine_git_ref(config)
dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)
cached_dir_path = os.path.join(self.package_cache_dir, dir_name)
if (not os.path.isdir(cached_dir_path)):
logger.debug('Remote repo %s does not appear to have been previously downloaded - starting clone to %s', config['uri'], cached_dir_path)
tmp_dir = tempfile.mkdtemp(prefix='stacker')
try:
tmp_repo_path = os.path.join(tmp_dir, dir_name)
with Repo.clone_from(config['uri'], tmp_repo_path) as repo:
repo.head.reference = ref
repo.head.reset(index=True, working_tree=True)
shutil.move(tmp_repo_path, self.package_cache_dir)
finally:
shutil.rmtree(tmp_dir)
else:
logger.debug('Remote repo %s appears to have been previously cloned to %s -- bypassing download', config['uri'], cached_dir_path)
self.update_paths_and_config(config=config, pkg_dir_name=dir_name)
|
Make a remote git repository available for local use.
Args:
config (dict): git config dictionary
|
codesearchnet
|
def GetEventData(self, data_type):
event_data = events.EventData(data_type=data_type)
for property_name, property_value in iter(self._properties.items()):
if isinstance(property_value, py2to3.BYTES_TYPE):
property_value = repr(property_value)
setattr(event_data, property_name, property_value)
return event_data
|
Retrieves the properties as event data.
Args:
data_type (str): event data type.
Returns:
EventData: event data.
|
juraj-google-style
|
def resolve(node, source_info, graphs, include_annotations=True):
node = TreeAnnotator(source_info, graphs, include_annotations).visit(node)
return node
|
Resolves the live symbols at the exit of control flow statements.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
include_annotations: Bool, whether type annotations should be included in
the analysis.
Returns:
ast.AST
|
github-repos
|
def encode_mezzanine_asset(access_token, processor_id, asset_id, output_assetname, json_profile):
path = '/Jobs'
endpoint = ''.join([ams_rest_endpoint, path])
assets_path = ''.join(["/Assets", "('", asset_id, "')"])
assets_path_encoded = urllib.parse.quote(assets_path, safe='')
endpoint_assets = ''.join([ams_rest_endpoint, assets_path_encoded])
body = '{ \
"Name":"' + output_assetname + '", \
"InputMediaAssets":[{ \
"__metadata":{ \
"uri":"' + endpoint_assets + '" \
} \
}], \
"Tasks":[{ \
"Configuration":\'' + json_profile + '\', \
"MediaProcessorId":"' + processor_id + '", \
"TaskBody":"<?xml version=\\"1.0\\" encoding=\\"utf-16\\"?><taskBody><inputAsset>JobInputAsset(0)</inputAsset><outputAsset assetCreationOptions=\\"0\\" assetName=\\"' + output_assetname + '\\">JobOutputAsset(0)</outputAsset></taskBody>" \
}] \
}'
return do_ams_post(endpoint, path, body, access_token)
|
Get Media Service Encode Mezanine Asset.
Args:
access_token (str): A valid Azure authentication token.
processor_id (str): A Media Service Processor ID.
asset_id (str): A Media Service Asset ID.
output_assetname (str): A Media Service Asset Name.
json_profile (str): A Media Service JSON Profile.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def is_cpu_target_available(target):
return _test_util.IsCPUTargetAvailable(target)
|
Indicates whether TensorFlow was built with support for a given CPU target.
Args:
target: The name of the CPU target whose support to check for.
Returns:
A boolean indicating whether TensorFlow was built with support for the
given CPU target.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with a given target.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_aarch64(self):
... if not tf.test.is_cpu_target_available('aarch64'):
... self.skipTest("test is only applicable on AArch64")
... @tf.function(jit_compile=True)
... def add(x, y):
... return tf.math.add(x, y)
...
... self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)
|
github-repos
|
def from_file(cls, fp, format_=None, fps=None, **kwargs):
if format_ is None:
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls()
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
|
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
|
juraj-google-style
|
def reverse_ad(node, wrt, preserve_result, check_dims):
if (not isinstance(node, gast.FunctionDef)):
raise TypeError
cfg.forward(node, cfg.Active(wrt))
ad = ReverseAD(wrt, preserve_result, check_dims)
(pri, adj) = ad.visit(node)
mod = gast.Module(body=[pri, adj])
mod = annotate.find_stacks(mod)
return (mod, ad.required, ad.stack)
|
Perform reverse-mode AD on an AST.
This function analyses the AST to determine which variables are active and
proceeds by taking the naive derivative. Before returning the primal and
adjoint it annotates push and pop statements as such.
Args:
node: A `FunctionDef` AST node.
wrt: A tuple of argument indices with respect to which we take the
derivative.
preserve_result: A boolean indicating whether the generated
derivative function should also return the original return value.
check_dims: A boolean indicating whether the seed derivatives should have
their dimensions checked to match their primal counterpart.
Returns:
mod: A `Module` node containing the naive primal and adjoint of the
function which can be fed to the `split` and `joint` functions.
required: A list of tuples of functions and argument indices. These
functions were called by the function but did not have an adjoint.
|
codesearchnet
|
def get(self, key, default) -> Union[(Uniform, UniformBlock, Subroutine, Attribute, Varying)]:
return self._members.get(key, default)
|
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.
Args:
default: This is the value to be returned in case key does not exist.
Returns:
:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,
:py:class:`Attribute` or :py:class:`Varying`
|
codesearchnet
|
def trans_v(self, structure):
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = float(structure.composition.weight)
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
if self.g_vrh < 0:
raise ValueError("k_vrh or g_vrh is negative, "
"sound velocity is undefined")
return (1e9 * self.g_vrh / mass_density) ** 0.5
|
Calculates transverse sound velocity (in SI units) using the
Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: transverse sound velocity (in SI units)
|
juraj-google-style
|
def assert_key_has_value(self, key, caller):
assert key, 'key parameter must be specified.'
self.assert_key_exists(key, caller)
if (self[key] is None):
raise KeyInContextHasNoValueError(f"context['{key}'] must have a value for {caller}.")
|
Assert that context contains key which also has a value.
Args:
key: validate this key exists in context AND has a value that isn't
None.
caller: string. calling function name - this used to construct
error messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if key is None
|
codesearchnet
|
def remove_object(self, file_path):
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
self.raise_os_error(errno.EBUSY, file_path)
try:
dirname, basename = self.splitpath(file_path)
target_directory = self.resolve(dirname)
target_directory.remove_entry(basename)
except KeyError:
self.raise_io_error(errno.ENOENT, file_path)
except AttributeError:
self.raise_io_error(errno.ENOTDIR, file_path)
|
Remove an existing file or directory.
Args:
file_path: The path to the file relative to self.
Raises:
IOError: if file_path does not correspond to an existing file, or
if part of the path refers to something other than a directory.
OSError: if the directory is in use (eg, if it is '/').
|
juraj-google-style
|
def WriteScanContext(self, scan_context, scan_step=None):
if scan_step is not None:
print('Scan step: {0:d}'.format(scan_step))
print('Source type\t\t: {0:s}'.format(scan_context.source_type))
print('')
scan_node = scan_context.GetRootScanNode()
self.WriteScanNode(scan_context, scan_node)
print('')
|
Writes the source scanner context to stdout.
Args:
scan_context (SourceScannerContext): the source scanner context.
scan_step (Optional[int]): the scan step, where None represents no step.
|
juraj-google-style
|
def id_pools_ipv4_ranges(self):
if (not self.__id_pools_ipv4_ranges):
self.__id_pools_ipv4_ranges = IdPoolsIpv4Ranges(self.__connection)
return self.__id_pools_ipv4_ranges
|
Gets the IdPoolsIpv4Ranges API client.
Returns:
IdPoolsIpv4Ranges:
|
codesearchnet
|
def matches_hostname(cls, certificate: cryptography.x509.Certificate, hostname: str) -> None:
certificate_names = {'subject': (tuple([('commonName', name) for name in cls.get_common_names(certificate.subject)]),), 'subjectAltName': tuple([('DNS', name) for name in cls.get_dns_subject_alternative_names(certificate)])}
ssl.match_hostname(certificate_names, hostname)
|
Verify that the certificate was issued for the given hostname.
Raises:
CertificateError: If the certificate was not issued for the supplied hostname.
|
codesearchnet
|
def set_direct(self, address_value_dict):
with self._lock:
for (address, value) in address_value_dict.items():
self._validate_write(address)
if (address in self._state):
self._state[address].set_result(result=value)
else:
fut = _ContextFuture(address=address)
self._state[address] = fut
fut.set_result(result=value)
|
Called in the context manager's set method to either overwrite the
value for an address, or create a new future and immediately set a
value in the future.
Args:
address_value_dict (dict of str:bytes): The unique full addresses
with bytes to set at that address.
Raises:
AuthorizationException
|
codesearchnet
|
def train(self, X_train, Y_train, X_test, Y_test):
while True:
print(1)
time.sleep(1)
if random.randint(0, 9) >= 5:
break
|
Train and validate the LR on a train and test dataset
Args:
X_train (np.array): Training data
Y_train (np.array): Training labels
X_test (np.array): Test data
Y_test (np.array): Test labels
|
juraj-google-style
|
def _reference_info(references):
document_paths = []
reference_map = {}
for reference in references:
doc_path = reference._document_path
document_paths.append(doc_path)
reference_map[doc_path] = reference
return (document_paths, reference_map)
|
Get information about document references.
Helper for :meth:`~.firestore_v1beta1.client.Client.get_all`.
Args:
references (List[.DocumentReference, ...]): Iterable of document
references.
Returns:
Tuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of
* fully-qualified documents paths for each reference in ``references``
* a mapping from the paths to the original reference. (If multiple
``references`` contains multiple references to the same document,
that key will be overwritten in the result.)
|
codesearchnet
|
def attach(self, observer):
if not observer in self._observers:
self._observers.append(observer)
return self
|
Attach an observer.
Args:
observer (func): A function to be called when new messages arrive
Returns:
:class:`Stream`. Current instance to allow chaining
|
juraj-google-style
|
def dew_point_temperature(self, value=99.9):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `dew_point_temperature`'.format(value))
if (value <= (- 70.0)):
raise ValueError('value need to be greater -70.0 for field `dew_point_temperature`')
if (value >= 70.0):
raise ValueError('value need to be smaller 70.0 for field `dew_point_temperature`')
self._dew_point_temperature = value
|
Corresponds to IDD Field `dew_point_temperature`
Args:
value (float): value for IDD Field `dew_point_temperature`
Unit: C
value > -70.0
value < 70.0
Missing value: 99.9
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def get_changeset(changeset):
url = 'https:
changeset
)
return ET.fromstring(requests.get(url).content)
|
Get the changeset using the OSM API and return the content as a XML
ElementTree.
Args:
changeset: the id of the changeset.
|
juraj-google-style
|
def __init__(self,
title: Text,
value: Optional[Any] = None,
disabled: Optional[Text] = None,
checked: bool = False,
shortcut_key: Optional[Text] = None) -> None:
self.disabled = disabled
self.value = value if value is not None else title
self.title = title
self.checked = checked
if shortcut_key is not None:
self.shortcut_key = str(shortcut_key)
else:
self.shortcut_key = None
|
Create a new choice.
Args:
title: Text shown in the selection list.
value: Value returned, when the choice is selected.
disabled: If set, the choice can not be selected by the user. The
provided text is used to explain, why the selection is
disabled.
checked: Preselect this choice when displaying the options.
shortcut_key: Key shortcut used to select this item.
|
juraj-google-style
|
def vgg_layer(inputs,
nout,
kernel_size=3,
activation=tf.nn.leaky_relu,
padding="SAME",
is_training=True,
has_batchnorm=False,
scope=None):
with tf.variable_scope(scope):
net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding,
activation=None, name="conv")
if has_batchnorm:
net = tfl.batch_normalization(net, training=is_training, name="bn")
net = activation(net)
return net
|
A layer of VGG network with batch norm.
Args:
inputs: image tensor
nout: number of output channels
kernel_size: size of the kernel
activation: activation function
padding: padding of the image
is_training: whether it is training mode or not
has_batchnorm: whether batchnorm is applied or not
scope: variable scope of the op
Returns:
net: output of layer
|
juraj-google-style
|
def _Aff4Read(aff4_obj, offset, length):
length = length or (_Aff4Size(aff4_obj) - offset)
aff4_obj.Seek(offset)
return aff4_obj.Read(length)
|
Reads contents of given AFF4 file.
Args:
aff4_obj: An AFF4 stream instance to retrieve contents for.
offset: An offset to start the reading from.
length: A number of bytes to read. Reads the whole file if 0.
Returns:
Contents of specified AFF4 stream.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 stream.
|
juraj-google-style
|
def _force_edges_active_move(self, state: _STATE) -> _STATE:
for _ in range(self._rand.randint(1, 4)):
state = self._force_edge_active_move(state)
return state
|
Move function which repeats _force_edge_active_move a few times.
Args:
state: Search state, not mutated.
Returns:
New search state which consists of incremental changes of the
original state.
|
juraj-google-style
|
def parse_query(query_str):
def _generate_match_all_fields_query():
stripped_query_str = ' '.join(query_str.replace(':', ' ').split())
return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}}
if not isinstance(query_str, six.text_type):
query_str = six.text_type(query_str.decode('utf-8'))
logger.info('Parsing: "' + query_str + '\".')
parser = StatefulParser()
rst_visitor = RestructuringVisitor()
es_visitor = ElasticSearchVisitor()
try:
unrecognized_text, parse_tree = parser.parse(query_str, Query)
if unrecognized_text:
msg = 'Parser returned unrecognized text: "' + unrecognized_text + \
'" for query: "' + query_str + '".'
if query_str == unrecognized_text and parse_tree is None:
logger.warn(msg)
return _generate_match_all_fields_query()
else:
msg += 'Continuing with recognized parse tree.'
logger.warn(msg)
except SyntaxError as e:
logger.warn('Parser syntax error (' + six.text_type(e) + ') with query: "' + query_str +
'". Continuing with a match_all with the given query.')
return _generate_match_all_fields_query()
try:
restructured_parse_tree = parse_tree.accept(rst_visitor)
logger.debug('Parse tree: \n' + emit_tree_format(restructured_parse_tree))
except Exception as e:
logger.exception(
RestructuringVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
try:
es_query = restructured_parse_tree.accept(es_visitor)
except Exception as e:
logger.exception(
ElasticSearchVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
if not es_query:
return _generate_match_all_fields_query()
return es_query
|
Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.
Args:
query_str (six.text_types): the given query to be translated to an ElasticSearch query
Returns:
six.text_types: Return an ElasticSearch query.
Notes:
In case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the
query_str argument.
|
juraj-google-style
|
def default(self):
cmd = self.command_builder('ntp source', default=True)
return self.configure(cmd)
|
Default the NTP source entry from the node.
Returns:
True if the operation succeeds, otherwise False.
|
codesearchnet
|
def charges(self, num, charge_id=None, **kwargs):
baseuri = self._BASE_URI + "company/{}/charges".format(num)
if charge_id is not None:
baseuri += "/{}".format(charge_id)
res = self.session.get(baseuri, params=kwargs)
else:
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
Search for charges against a company by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
|
juraj-google-style
|
def download_archive_artifact_bundle(self, id_or_uri, file_path):
uri = self.BACKUP_ARCHIVE_PATH + '/' + extract_id_from_uri(id_or_uri)
return self._client.download(uri, file_path)
|
Downloads an archive for the Artifact Bundle.
Args:
id_or_uri: ID or URI of the Artifact Bundle.
file_path(str): Destination file path.
Returns:
bool: Successfully downloaded.
|
juraj-google-style
|
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
|
Returns `variables` but with zero gradient w.r.t. every other variable.
Args:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
|
github-repos
|
def get(self, key):
key = self._service_key(key)
return self._service_ops['get'](key)
|
Return the object in `service` named by `key` or None.
Args:
key: Key naming the object to retrieve.
Returns:
object or None
|
codesearchnet
|
def Print(self, x, data, message, **kwargs):
tf.logging.info('PlacementMeshImpl::Print')
new_slices = x.tensor_list[:]
with tf.device(self._devices[0]):
new_slices[0] = tf.Print(new_slices[0], [t for d in data for t in d.tensor_list], message, **kwargs)
return self.LaidOutTensor(new_slices)
|
call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor
|
codesearchnet
|
def obs_space_info(obs_space):
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for (key, box) in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return (keys, shapes, dtypes)
|
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
|
codesearchnet
|
def is_layer_block(node):
match = LAYER_SUFFIX_RE.match(node.get('module_path', ''))
if not match or not node.get('children'):
return False
number = match.group(2)
return any((f'.{number}.' in child.get('module_path', '') for child in node['children']))
|
Checks whether a node represents a layer block with submodules.
Args:
node (`dict`): A node from the call tree.
Returns:
`bool`: Whether the node is a layer block.
|
github-repos
|
def traverse_postorder(self, leaves=True, internal=True):
for node in self.root.traverse_postorder(leaves=leaves, internal=internal):
yield node
|
Perform a postorder traversal of the ``Node`` objects in this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
|
juraj-google-style
|
def update_offset(self, new_offset):
self.offset = new_offset
self.data_points = self._data_points[self.offset:]
self.timestamps = self._timestamps[self.offset:]
|
Updates how many data points to skip in caculations.
Always use this function to update offset instead of directly setting
self.offset.
Args:
new_offset: The new offset.
|
codesearchnet
|
def join(*paths):
absolute = False
relpaths = []
for p in paths:
if p:
if (p[0] == '/'):
del relpaths[:]
absolute = True
relpaths.append(p)
path = normpath('/'.join(relpaths))
if absolute:
path = abspath(path)
return path
|
Join any number of paths together.
Arguments:
*paths (str): Paths to join, given as positional arguments.
Returns:
str: The joined path.
Example:
>>> join('foo', 'bar', 'baz')
'foo/bar/baz'
>>> join('foo/bar', '../baz')
'foo/baz'
>>> join('foo/bar', '/baz')
'/baz'
|
codesearchnet
|
def _PushParameterListState(self, newline):
current = self.next_token
previous = current.previous_token
if _IsFunctionDefinition(previous):
first_param_column = previous.total_length + self.stack[-2].indent
self.param_list_stack.append(object_state.ParameterListState(previous, newline, first_param_column))
|
Push a new parameter list state for a function definition.
Args:
newline: Whether the current token is to be added on a newline.
|
github-repos
|
def save_image(tensor, filename, nrow=8, padding=2, pad_value=0):
from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value)
im = Image.fromarray(pre_pillow_float_img_process(grid))
im.save(filename)
|
Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
|
juraj-google-style
|
def process_remote_sources(raw_config, environment=None):
config = yaml.safe_load(raw_config)
if (config and config.get('package_sources')):
processor = SourceProcessor(sources=config['package_sources'], stacker_cache_dir=config.get('stacker_cache_dir'))
processor.get_package_sources()
if processor.configs_to_merge:
for i in processor.configs_to_merge:
logger.debug('Merging in remote config "%s"', i)
remote_config = yaml.safe_load(open(i))
config = merge_map(remote_config, config)
if (not environment):
environment = {}
return render(str(config), environment)
return raw_config
|
Stage remote package sources and merge in remote configs.
Args:
raw_config (str): the raw stacker configuration string.
environment (dict, optional): any environment values that should be
passed to the config
Returns:
str: the raw stacker configuration string
|
codesearchnet
|
def commits(self, **kwargs):
path = ('%s/%s/commits' % (self.manager.path, self.get_id()))
data_list = self.manager.gitlab.http_list(path, as_list=False, **kwargs)
manager = ProjectCommitManager(self.manager.gitlab, parent=self.manager._parent)
return RESTObjectList(manager, ProjectCommit, data_list)
|
List the merge request commits.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of commits
|
codesearchnet
|
def savefits(cube, fitsname, **kwargs):
dropdeg = kwargs.pop('dropdeg', False)
ndim = len(cube.dims)
FITSINFO = get_data('decode', 'data/fitsinfo.yaml')
hdrdata = yaml.load(FITSINFO, dc.utils.OrderedLoader)
if ndim == 2:
header = fits.Header(hdrdata['dcube_2d'])
data = cube.values.T
elif ndim == 3:
if dropdeg:
header = fits.Header(hdrdata['dcube_2d'])
data = cube.values[:, :, 0].T
else:
header = fits.Header(hdrdata['dcube_3d'])
kidfq = cube.kidfq.values
freqrange = ~np.isnan(kidfq)
orderedfq = np.argsort(kidfq[freqrange])
newcube = cube[:, :, orderedfq]
data = newcube.values.T
else:
raise TypeError(ndim)
if cube.coordsys == 'AZEL':
header.update({'CTYPE1': 'dAZ', 'CTYPE2': 'dEL'})
elif cube.coordsys == 'RADEC':
header.update({'OBSRA': float(cube.xref), 'OBSDEC': float(cube.yref)})
else:
pass
header.update({'CRVAL1': float(cube.x[0]),
'CDELT1': float(cube.x[1] - cube.x[0]),
'CRVAL2': float(cube.y[0]),
'CDELT2': float(cube.y[1] - cube.y[0]),
'DATE': datetime.now(timezone('UTC')).isoformat()})
if (ndim == 3) and (not dropdeg):
header.update({'CRVAL3': float(newcube.kidfq[0]),
'CDELT3': float(newcube.kidfq[1] - newcube.kidfq[0])})
fitsname = str(Path(fitsname).expanduser())
fits.writeto(fitsname, data, header, **kwargs)
logger.info('{} has been created.'.format(fitsname))
|
Save a cube to a 3D-cube FITS file.
Args:
cube (xarray.DataArray): Cube to be saved.
fitsname (str): Name of output FITS file.
kwargs (optional): Other arguments common with astropy.io.fits.writeto().
|
juraj-google-style
|
def scheme(name, bins, bin_method='quantiles'):
return {'name': name, 'bins': bins, 'bin_method': (bin_method if isinstance(bins, int) else '')}
|
Return a custom scheme based on CARTOColors.
Args:
name (str): Name of a CARTOColor.
bins (int or iterable): If an `int`, the number of bins for classifying
data. CARTOColors have 7 bins max for quantitative data, and 11 max
for qualitative data. If `bins` is a `list`, it is the upper range
for classifying data. E.g., `bins` can be of the form ``(10, 20, 30,
40, 50)``.
bin_method (str, optional): One of methods in :obj:`BinMethod`.
Defaults to ``quantiles``. If `bins` is an interable, then that is
the bin method that will be used and this will be ignored.
.. Warning::
Input types are particularly sensitive in this function, and little
feedback is given for errors. ``name`` and ``bin_method`` arguments
are case-sensitive.
|
codesearchnet
|
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
|
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
|
juraj-google-style
|
def get(object_ids):
if isinstance(object_ids, (tuple, np.ndarray)):
return ray.get(list(object_ids))
elif isinstance(object_ids, dict):
keys_to_get = [k for (k, v) in object_ids.items() if isinstance(v, ray.ObjectID)]
ids_to_get = [v for (k, v) in object_ids.items() if isinstance(v, ray.ObjectID)]
values = ray.get(ids_to_get)
result = object_ids.copy()
for (key, value) in zip(keys_to_get, values):
result[key] = value
return result
else:
return ray.get(object_ids)
|
Get a single or a collection of remote objects from the object store.
This method is identical to `ray.get` except it adds support for tuples,
ndarrays and dictionaries.
Args:
object_ids: Object ID of the object to get, a list, tuple, ndarray of
object IDs to get or a dict of {key: object ID}.
Returns:
A Python object, a list of Python objects or a dict of {key: object}.
|
codesearchnet
|
def change_extension(self, filepath, new_extension):
filename, ext = os.path.splitext(filepath)
return '.'.join([filename, new_extension])
|
Change final filename extension.
Args:
filepath (str): A file path (relative or absolute).
new_extension (str): New extension name (without leading dot) to
apply.
Returns:
str: Filepath with new extension.
|
juraj-google-style
|
def create_branch(profile, name, branch_off):
branch_off_sha = get_branch_sha(profile, branch_off)
ref = "heads/" + name
data = refs.create_ref(profile, ref, branch_off_sha)
return data
|
Create a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the new branch.
branch_off
The name of a branch to create the new branch off of.
Returns:
A dict with data about the new branch.
|
juraj-google-style
|
def include(filename, hosts=False, when=True):
if (not pyinfra.is_cli):
raise PyinfraError('local.include is only available in CLI mode.')
if (not when):
return
if (hosts is not False):
hosts = ensure_host_list(hosts, inventory=pseudo_state.inventory)
if (pseudo_host not in hosts):
return
if pseudo_state.deploy_dir:
filename = path.join(pseudo_state.deploy_dir, filename)
frameinfo = get_caller_frameinfo()
logger.debug('Including local file: {0}'.format(filename))
try:
from pyinfra_cli.config import extract_file_config
from pyinfra_cli.util import exec_file
config_data = extract_file_config(filename)
kwargs = {key.lower(): value for (key, value) in six.iteritems(config_data) if (key in ['SUDO', 'SUDO_USER', 'SU_USER', 'PRESERVE_SUDO_ENV', 'IGNORE_ERRORS'])}
with pseudo_state.deploy(filename, kwargs, None, frameinfo.lineno, in_deploy=False):
exec_file(filename)
except IOError as e:
raise PyinfraError('Could not include local file: {0}\n{1}'.format(filename, e))
|
Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir``
directory.
Args:
hosts (string, list): group name or list of hosts to limit this include to
when (bool): indicate whether to trigger operations in this include
|
codesearchnet
|
def _CreateShapesFolder(self, schedule, doc):
if (not schedule.GetShapeList()):
return None
shapes_folder = self._CreateFolder(doc, 'Shapes')
shapes = list(schedule.GetShapeList())
shapes.sort(key=(lambda x: x.shape_id))
for shape in shapes:
placemark = self._CreatePlacemark(shapes_folder, shape.shape_id)
self._CreateLineStringForShape(placemark, shape)
if self.shape_points:
self._CreateShapePointFolder(shapes_folder, shape)
return shapes_folder
|
Create a KML Folder containing all the shapes in a schedule.
The folder contains a placemark for each shape. If there are no shapes in
the schedule then the folder is not created and None is returned.
Args:
schedule: The transitfeed.Schedule instance.
doc: The KML Document ElementTree.Element instance.
Returns:
The Folder ElementTree.Element instance or None.
|
codesearchnet
|
def do_import(self, keys, values, name=None):
with tf.name_scope(name or '%s_lookup_table_import' % self._name):
op = gen_simple_hash_table_op.examples_simple_hash_table_import(self.resource_handle, keys, values)
return op
|
Import all `key` and `value` pairs.
(Note that "import" is a python reserved word, so it cannot be the name of
a method.)
Args:
keys: Tensor of all keys.
values: Tensor of all values.
name: A name for the operation (optional).
Returns:
A tuple of two tensors, the first with the `keys` and the second with
the `values`.
|
github-repos
|
def _find_countour_yaml(start, checked, names=None):
extensions = []
if names:
for name in names:
if not os.path.splitext(name)[1]:
extensions.append(name + ".yaml")
extensions.append(name + ".yml")
yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions
directory = start
while directory not in checked:
checked.add(directory)
for fs_yaml_name in yaml_names:
yaml_path = os.path.join(directory, fs_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
directory = os.path.dirname(directory)
return
|
Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of countour.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the countour.yaml file or None if it is not found
|
juraj-google-style
|
def _find_address_range(addresses):
first = last = addresses[0]
last_index = 0
for ip in addresses[1:]:
if (ip._ip == (last._ip + 1)):
last = ip
last_index += 1
else:
break
return (first, last, last_index)
|
Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence,
and the index of the last IP address in the sequence.
|
codesearchnet
|
def _init_from_proto(self, variable_def, import_scope=None):
assert isinstance(variable_def, variable_pb2.VariableDef)
g = ops.get_default_graph()
self._variable = g.as_graph_element(ops.prepend_name_scope(variable_def.variable_name, import_scope=import_scope))
self._name = self._variable.name
self._initializer_op = g.as_graph_element(ops.prepend_name_scope(variable_def.initializer_name, import_scope=import_scope))
if hasattr(variable_def, 'initial_value_name') and variable_def.initial_value_name:
self._initial_value = g.as_graph_element(ops.prepend_name_scope(variable_def.initial_value_name, import_scope=import_scope))
else:
self._initial_value = None
synchronization, aggregation, trainable = variables.validate_synchronization_aggregation_trainable(variable_def.synchronization, variable_def.aggregation, variable_def.trainable, variable_def.variable_name)
self._synchronization = synchronization
self._aggregation = aggregation
self._trainable = trainable
self._snapshot = g.as_graph_element(ops.prepend_name_scope(variable_def.snapshot_name, import_scope=import_scope))
if variable_def.HasField('save_slice_info_def'):
self._save_slice_info = variables.Variable.SaveSliceInfo(save_slice_info_def=variable_def.save_slice_info_def, import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._constraint = None
|
Recreates the Variable object from a `VariableDef` protocol buffer.
Args:
variable_def: `VariableDef` protocol buffer, describing a variable whose
nodes already exists in the graph.
import_scope: Optional `string`. Name scope to add.
|
github-repos
|
def _create_uninitialized_mirrored_tpu_replicated_variables(**kwargs):
dtype = kwargs.get('dtype', None)
shape = kwargs.get('shape', None)
initial_value = kwargs.get('initial_value', None)
if initial_value is None:
return _create_mirrored_tpu_replicated_variables(**kwargs)
with maybe_init_scope():
if initial_value is not None:
if callable(initial_value):
initial_value = initial_value()
initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)
kwargs['initial_value'] = initial_value
if dtype is None:
kwargs['dtype'] = kwargs['initial_value'].dtype
if shape is None:
kwargs['shape'] = kwargs['initial_value'].shape
mirrored_replicated_var_list = []
for replica_id in range(num_replicas):
replicated_var_list = []
for logic_core_id in range(num_cores_per_replica):
with ops.device(self._tpu_devices[replica_id][logic_core_id]):
v = uninitialized_variable_creator(**kwargs)
replicated_var_list.append(v)
replica_name = '{}/r:{}'.format(kwargs['name'], replica_id)
tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name)
mirrored_replicated_var_list.append(tpu_replicated_var)
return mirrored_replicated_var_list
|
Returns a list of `TPUReplicatedVariable`s.
The list consists of `num_replicas` `TPUReplicatedVariable`s and can be
used to initialize a `TPUMirroredVariable`. Each `TPUReplicatedVariable`
contains a list of `tf.Variable`s which are replicated to
`num_cores_per_replica` logical cores to enable XLA SPMD compilation.
Args:
**kwargs: the keyword arguments for creating a variable
|
github-repos
|
def generate_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
batch_size = enc_output.shape[0]
proposals = []
current_position = 0
for level, (height, width) in enumerate(spatial_shapes):
mask_flatten_ = padding_mask[:, current_position:current_position + height * width]
mask_flatten_ = mask_flatten_.view(batch_size, height, width, 1)
valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = meshgrid(torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing='ij')
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
width_height = torch.ones_like(grid) * 0.05 * 2.0 ** level
proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)
proposals.append(proposal)
current_position += height * width
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
object_query = enc_output
object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
object_query = object_query.masked_fill(~output_proposals_valid, float(0))
object_query = self.enc_output_norm(self.enc_output(object_query))
return (object_query, output_proposals)
|
Generate the encoder output proposals from encoded enc_output.
Args:
enc_output (`torch.Tensor[batch_size, sequence_length, hidden_size]`): Output of the encoder.
padding_mask (`torch.Tensor[batch_size, sequence_length]`): Padding mask for `enc_output`.
spatial_shapes (`torch.Tensor[num_feature_levels, 2]`): Spatial shapes of the feature maps.
Returns:
`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
directly predict a bounding box. (without the need of a decoder)
- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
sigmoid.
|
github-repos
|
def handle_server_error(error: Exception) -> ResponseReturnValue:
return (DQMResponse(name=error.__class__.__name__, description=str(error), code=500), 500)
|
DQM Server Error Response.
Args:
* error: Server error
Returns:
* DQMResponse for the error with a 500 status code
|
github-repos
|
def with_device(
self,
new_device: devices.Device,
qubit_mapping: Callable[[ops.Qid], ops.Qid] = lambda e: e,
) -> 'Circuit':
return Circuit(
moments=[ops.Moment(operation.transform_qubits(qubit_mapping)
for operation in moment.operations)
for moment in self._moments],
device=new_device
)
|
Maps the current circuit onto a new device, and validates.
Args:
new_device: The new device that the circuit should be on.
qubit_mapping: How to translate qubits from the old device into
qubits on the new device.
Returns:
The translated circuit.
|
juraj-google-style
|
def ParseNSSwitchConf(nsswitch_filename):
with open(nsswitch_filename, 'r') as nsswitch_file:
nsswitch = {}
map_re = re.compile('^([a-z]+): *(.*)$')
for line in nsswitch_file:
match = map_re.match(line)
if match:
sources = match.group(2).split()
nsswitch[match.group(1)] = sources
return nsswitch
|
Parse /etc/nsswitch.conf and return the sources for each map.
Args:
nsswitch_filename: Full path to an nsswitch.conf to parse. See manpage
nsswitch.conf(5) for full details on the format expected.
Returns:
a dictionary keyed by map names and containing a list of sources
for each map.
|
github-repos
|
def format_formula(formula):
formatted_formula = ''
number_format = ''
for (i, s) in enumerate(formula):
if s.isdigit():
if (not number_format):
number_format = '_{'
number_format += s
if (i == (len(formula) - 1)):
number_format += '}'
formatted_formula += number_format
else:
if number_format:
number_format += '}'
formatted_formula += number_format
number_format = ''
formatted_formula += s
return ('$%s$' % formatted_formula)
|
Converts str of chemical formula into
latex format for labelling purposes
Args:
formula (str): Chemical formula
|
codesearchnet
|
def restore_app_connection(self, port=None):
self.host_port = (port or utils.get_available_host_port())
self._adb.forward([('tcp:%d' % self.host_port), ('tcp:%d' % self.device_port)])
try:
self.connect()
except:
self.log.exception('Failed to re-connect to app.')
raise jsonrpc_client_base.AppRestoreConnectionError(self._ad, ('Failed to restore app connection for %s at host port %s, device port %s' % (self.package, self.host_port, self.device_port)))
self._proc = None
self._restore_event_client()
|
Restores the app after device got reconnected.
Instead of creating new instance of the client:
- Uses the given port (or find a new available host_port if none is
given).
- Tries to connect to remote server with selected port.
Args:
port: If given, this is the host port from which to connect to remote
device port. If not provided, find a new available port as host
port.
Raises:
AppRestoreConnectionError: When the app was not able to be started.
|
codesearchnet
|
def __call__(self, image):
frame_height = image.shape[0]
frame_width = image.shape[1]
faces = self.find_faces(image, self.draw_box)
for x, y, w, h in faces:
hat = self.hat.copy()
hat_width = int(w * self.w_offset)
hat_height = int(hat_width * hat.shape[0] / hat.shape[1])
hat = cv2.resize(hat, (hat_width, hat_height))
hat_left = 0
hat_top = 0
hat_bottom = hat_height
hat_right = hat_width
y0 = y - hat_height + self.y_offset
if y0 < 0:
hat_top = abs(y0)
y0 = 0
y1 = y0 + hat_height - hat_top
if y1 > frame_height:
hat_bottom = hat_height - (y1 - frame_height)
y1 = frame_height
x0 = x + self.x_offset
if x0 < 0:
hat_left = abs(x0)
x0 = 0
x1 = x0 + hat_width - hat_left
if x1 > frame_width:
hat_right = hat_width - (x1 - frame_width)
x1 = frame_width
for c in range(0, 3):
hat_slice = hat[hat_top:hat_bottom, hat_left:hat_right, c] * \
(hat[hat_top:hat_bottom, hat_left:hat_right, 3] / 255.0)
bg_slice = image[y0:y1, x0:x1, c] * \
(1.0 - hat[hat_top:hat_bottom, hat_left:hat_right, 3]
/ 255.0)
image[y0:y1, x0:x1, c] = hat_slice + bg_slice
return image
|
Draws a hat on top of detected faces inside the image.
Args:
image: The image.
Returns:
The image with a hat.
|
juraj-google-style
|
def sonos_uri_from_id(self, item_id):
item_id = quote_url(item_id.encode('utf-8'))
account = self.account
result = 'soco:
return result
|
Get a uri which can be sent for playing.
Args:
item_id (str): The unique id of a playable item for this music
service, such as that returned in the metadata from
`get_metadata`, eg ``spotify:track:2qs5ZcLByNTctJKbhAZ9JE``
Returns:
str: A URI of the form: ``soco://spotify%3Atrack
%3A2qs5ZcLByNTctJKbhAZ9JE?sid=2311&sn=1`` which encodes the
``item_id``, and relevant data from the account for the music
service. This URI can be sent to a Sonos device for playing,
and the device itself will retrieve all the necessary metadata
such as title, album etc.
|
codesearchnet
|
def checkStatus(self):
checkAccount()
data = {'userid': self.user_id,
'useridx': self.useridx
}
r = self.session.post(nurls['checkStatus'], data = data)
p = re.compile(r'\<message\>(?P<message>.+)\</message\>')
message = p.search(r.text).group('message')
if message == 'success':
return True
else:
return False
|
Check status
Args:
Returns:
True: Sucess
False: Failed
|
juraj-google-style
|
def GetAttributeContainerByIndex(self, index):
if (index < 0):
raise IndexError('Unsupported negative index value: {0:d}.'.format(index))
if (index < len(self._list)):
return self._list[index]
return None
|
Retrieves a specific serialized attribute container from the list.
Args:
index (int): attribute container index.
Returns:
bytes: serialized attribute container data or None if not available.
Raises:
IndexError: if the index is less than zero.
|
codesearchnet
|
def _FindAugmentingEdge(self, queue):
for y in (v for v in self.right - self.t if self.slack[v] == 0):
if y not in self.matches:
return (True, self.slackx[y], y)
self.t.add(y)
if self.matches[y] not in self.s:
queue.append(self.matches[y])
self._AddToTree(self.matches[y], self.slackx[y])
return (False, None, None)
|
Find a final edge for an augmenting path after updating labels.
At least one new edge should have been added to the equality subgraph, so
we check if any new edges will create an augmenting path.
Args:
queue: Queue for performing BFS traversal.
Returns:
found: True if path was found.
x: Left vertex of final path edge.
y: Right vertex of final path edge.
|
github-repos
|
def __init__(self, observations, sources, provisional_name):
self.mpc_observations = {}
self.observations = observations
self.sys_header = None
self.sources = [astrom.Source(reading_list, provisional_name) for reading_list in sources]
|
Constructs a new astronomy data set object.
Args:
observations: list(Observations)
The observations that are part of the data set.
|
juraj-google-style
|
def add_arguments(self, parser):
parser.add_argument('-p', '--product', action='store_true', help='print the production information')
parser.add_argument('-j', '--jtag', action='store_true', help='print the JTAG pin status')
return self.add_common_arguments(parser, False)
|
Adds the information commands to the parser.
Args:
self (InfoCommand): the ``InfoCommand`` instance
parser (argparse.ArgumentParser): the parser to add the arguments to
Returns:
``None``
|
codesearchnet
|
def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None):
length = -1
if category in ('snv', 'indel', 'cancer'):
if ref_len == alt_len:
length = alt_len
else:
length = abs(ref_len - alt_len)
elif category == 'sv':
if svtype == 'bnd':
length = int(10e10)
else:
if svlen:
length = abs(int(svlen))
elif end:
if end != pos:
length = end - pos
return length
|
Return the length of a variant
Args:
alt_len(int)
ref_len(int)
category(str)
svtype(str)
svlen(int)
|
juraj-google-style
|
def op_functions_and_classes(ops_module):
for op_class_name in dir(ops_module):
op_class = getattr(ops_module, op_class_name)
if isinstance(op_class, type) and Operation in op_class.__mro__:
op_function_name = to_snake_case(op_class_name)
op_function_name = {'batch_norm': 'batch_normalization', 'rms_norm': 'rms_normalization', 'search_sorted': 'searchsorted'}.get(op_function_name, op_function_name)
op_function = getattr(ops_module, op_function_name, None)
if op_function is not None:
yield (op_function, op_class)
|
Enumerate pairs of op function and op classes in a module.
Will return for instance `(ExpandDims, expand_dims)`, `(Sum, sum)`, ...
Args:
ops_module: the module to explore.
Returns:
iterable returning tuples with function and class pairs.
|
github-repos
|
def get_params(self, deep=True):
params = {'weights': self.coef_, 'bias': self.intercept_}
if deep:
for (key, value) in self.B.items():
params[('b_' + str(key))] = value
return params
|
Get parameters for the estimator.
Args:
deep (boolean, optional) : If True, will return the parameters for this estimator and contained subobjects that are estimators.
Returns:
params : mapping of string to any contained subobjects that are estimators.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.