
code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def CreateRetryTask(self):
with self._lock:
abandoned_task = self._GetTaskPendingRetry()
if (not abandoned_task):
return None
retry_task = abandoned_task.CreateRetryTask()
logger.debug('Retrying task {0:s} as {1:s}.'.format(abandoned_task.identifier, retry_task.identifier))
self._tasks_queued[retry_task.identifier] = retry_task
self._total_number_of_tasks += 1
self.SampleTaskStatus(retry_task, 'created_retry')
return retry_task | Creates a task that to retry a previously abandoned task.
Returns:
Task: a task that was abandoned but should be retried or None if there are
no abandoned tasks that should be retried. | codesearchnet |
def hr_dp010(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `hr_dp010`'.format(value))
self._hr_dp010 = value | Corresponds to IDD Field `hr_dp010`
humidity ratio corresponding to
Dew-point temperature corresponding to 1.0,% annual cumulative frequency of occurrence
calculated at the standard atmospheric pressure at elevation of station
Args:
value (float): value for IDD Field `hr_dp010`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def upload_entities_tsv(namespace, workspace, entities_tsv):
if isinstance(entities_tsv, string_types):
with open(entities_tsv, 'r') as tsv:
entity_data = tsv.read()
elif isinstance(entities_tsv, io.StringIO):
entity_data = entities_tsv.getvalue()
else:
raise ValueError('Unsupported input type.')
return upload_entities(namespace, workspace, entity_data) | Upload entities from a tsv loadfile.
File-based wrapper for api.upload_entities().
A loadfile is a tab-separated text file with a header row
describing entity type and attribute names, followed by
rows of entities and their attribute values.
Ex:
entity:participant_id age alive
participant_23 25 Y
participant_27 35 N
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
entities_tsv (file): FireCloud loadfile, see format above | codesearchnet |
def _MaybeDeleteOldCheckpoints(self, meta_graph_suffix='meta'):
if self._checkpoints_to_be_deleted:
p = self._checkpoints_to_be_deleted.pop(0)
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += self.saver_def.keep_checkpoint_every_n_hours * 3600
return
try:
checkpoint_management.remove_checkpoint(self._CheckpointFilename(p), self.saver_def.version, meta_graph_suffix)
except Exception as e:
logging.warning('Ignoring: %s', str(e)) | Deletes old checkpoints if necessary.
`self._checkpoints_to_be_deleted` is going to contain checkpoints that are
over `max_to_keep`. They are going to be deleted. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'. | github-repos |
def set_margin(self, top=40, bottom=30, left=50, right=10, buffer_size=8):
self.set_integer("top", top)
self.set_integer("bottom", bottom)
self.set_integer("left", left)
self.set_integer("right", right)
self.set_integer("buffer", buffer_size) | Set margin of the chart.
Args:
top (int): size of top margin in pixels.
bottom (int): size of bottom margin in pixels.
left (int): size of left margin in pixels.
right (int): size of right margin in pixels.
buffer_size (int): buffer size in pixels between the chart and margins. | juraj-google-style |
def setRightsHolder(self, pid, userId, serialVersion, vendorSpecific=None):
response = self.setRightsHolderResponse(
pid, userId, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setRightsHolderResponse()
Args:
pid:
userId:
serialVersion:
vendorSpecific:
Returns: | juraj-google-style |
def get_subport_statistics(self, id_or_uri, port_name, subport_number):
uri = self._client.build_uri(id_or_uri) + "/statistics/{0}/subport/{1}".format(port_name, subport_number)
return self._client.get(uri) | Gets the subport statistics on an interconnect.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
port_name (str): A specific port name of an interconnect.
subport_number (int): The subport.
Returns:
dict: The statistics for the interconnect that matches id, port_name, and subport_number. | juraj-google-style |
def Decrypt(self, encrypted_data):
decrypted_data = self._rc4_cipher.decrypt(encrypted_data)
return decrypted_data, b'' | Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes,bytes]: decrypted data and remaining encrypted data. | juraj-google-style |
def _fuse_awq_mlp(model, current_module_name, fuse_module_names, module, target_cls):
if len(fuse_module_names) == 0:
return
if hasattr(module, fuse_module_names[0]):
gate_proj = getattr(module, fuse_module_names[0])
up_proj = getattr(module, fuse_module_names[1])
down_proj = getattr(module, fuse_module_names[2])
previous_device = gate_proj.qweight.device
config = model.config.get_text_config(decoder=True)
hidden_act = config.hidden_act
activation_fn = ACT2FN[hidden_act]
new_module = target_cls(gate_proj, down_proj, up_proj, activation_fn)
parent_name, child_name = current_module_name.rsplit('.', 1)
parent = model.get_submodule(parent_name)
setattr(parent, child_name, new_module.to(previous_device))
del gate_proj, up_proj, down_proj | Fuse the MLP layers into a target class using autoawq
Args:
model (`~PreTrainedModel`):
The input pretrained model
current_module_name (`str`):
The current submodule name
fuse_module_names (`List[str]`):
The list of module names to fuse. For the MLP layers it has to be an array
of length 3 that consists of the 3 MLP layers in the order (gate (dense layer post-attention) / up / down layers)
module (`nn.Module`):
The pytorch parent module that has layernorm modules to fuse
target_cls (`~autoawq.QuantFusedMLP`):
The `QuantFusedMLP` class as it only supports that class
for now. | github-repos |
def parse_test_files(filepattern):
for path in glob.glob(filepattern):
with open(path) as fin:
suite_name = os.path.splitext(os.path.basename(path))[0].title().replace('-', '') + 'Test'
print(path, suite_name)
methods = dict(create_test_methods(yaml.load(fin, Loader=yaml_transform.SafeLineLoader)))
globals()[suite_name] = type(suite_name, (unittest.TestCase,), methods) | Parses YAML test files and dynamically creates test cases.
This function iterates through all files matching the given glob pattern.
For each YAML file found, it:
1. Reads the file content.
2. Determines a test suite name based on the file name.
3. Calls `create_test_methods` to generate test methods from the
YAML specification.
4. Dynamically creates a new TestCase class (inheriting from
`unittest.TestCase`) and populates it with the generated test methods.
5. Adds this newly created TestCase class to the global scope, making it
discoverable by the unittest framework.
Args:
filepattern (str): A glob pattern specifying the YAML test files to parse.
For example, 'path/to/tests/*.yaml'. | github-repos |
def __init__(self, assign_defaults=(), method_name=None):
super(self.__class__, self).__init__(assign_defaults=assign_defaults,
method_name=method_name) | Assigns arguments to the decorator.
Args:
assign_defaults: A sequence of strings for the default values that should
be provided. Defaults are shared across methods.
method_name: If provided, use this as the method_name instead of the
wrapped function's name. | juraj-google-style |
def grow(script, iterations=1):
filter_xml = ' <filter name="Dilate Selection"/>\n'
for _ in range(iterations):
util.write_filter(script, filter_xml)
return None | Grow (dilate, expand) the current set of selected faces
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): the number of times to grow the selection.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA | juraj-google-style |
def __init__(self, channel):
self.ListKnowledgeBases = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.KnowledgeBases/ListKnowledgeBases',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.ListKnowledgeBasesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.ListKnowledgeBasesResponse.FromString,
)
self.GetKnowledgeBase = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.KnowledgeBases/GetKnowledgeBase',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.GetKnowledgeBaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.KnowledgeBase.FromString,
)
self.CreateKnowledgeBase = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.KnowledgeBases/CreateKnowledgeBase',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.CreateKnowledgeBaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.KnowledgeBase.FromString,
)
self.DeleteKnowledgeBase = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.KnowledgeBases/DeleteKnowledgeBase',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.DeleteKnowledgeBaseRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def SetLines(self, lines):
(self._cli_lines, self._cli_cols) = TerminalSize()
if lines:
self._cli_lines = int(lines) | Set number of screen lines.
Args:
lines: An int, number of lines. If None, use terminal dimensions.
Raises:
ValueError, TypeError: Not a valid integer representation. | codesearchnet |
def delete_value(hive, key, vname=None, use_32bit_registry=False):
return __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) | r'''
Delete a registry value entry or the default value for a key.
Args:
hive (str):
The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
key (str):
The key (looks like a path) to the value name.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be deleted.
use_32bit_registry (bool):
Deletes the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version' | codesearchnet |
def set_topic(self, topic):
if not topic:
topic = ''
result = self._connection.put("room/%s" % self.id, {"room": {"topic": topic}})
if result["success"]:
self._load()
return result["success"] | Set the room topic.
Args:
topic (str): Topic
Returns:
bool. Success | juraj-google-style |
def lu_factor(x):
if any_symbolic_tensors((x,)):
return LuFactor().symbolic_call(x)
return _lu_factor(x) | Computes the lower-upper decomposition of a square matrix.
Args:
x: A tensor of shape `(..., M, M)`.
Returns:
A tuple of two tensors: a tensor of shape `(..., M, M)` containing the
lower and upper triangular matrices and a tensor of shape `(..., M)`
containing the pivots. | github-repos |
def remove(self, dic):
for kw in dic:
removePair = Pair(kw, dic[kw])
self._remove([removePair]) | remove the pair by passing a identical dict
Args:
dic (dict): key and value | codesearchnet |
def CreateServiceProto(job):
service = rdf_client.OSXServiceInformation(
label=job.get("Label"),
program=job.get("Program"),
sessiontype=job.get("LimitLoadToSessionType"),
lastexitstatus=int(job["LastExitStatus"]),
timeout=int(job["TimeOut"]),
ondemand=bool(job["OnDemand"]))
for arg in job.get("ProgramArguments", "", stringify=False):
service.args.Append(str(arg))
mach_dict = job.get("MachServices", {}, stringify=False)
for key, value in iteritems(mach_dict):
service.machservice.Append("%s:%s" % (key, value))
job_mach_dict = job.get("PerJobMachServices", {}, stringify=False)
for key, value in iteritems(job_mach_dict):
service.perjobmachservice.Append("%s:%s" % (key, value))
if "PID" in job:
service.pid = job["PID"].value
return service | Create the Service protobuf.
Args:
job: Launchdjobdict from servicemanagement framework.
Returns:
sysinfo_pb2.OSXServiceInformation proto | juraj-google-style |
def read_file(path):
gen = textfile.read_separated_lines_generator(path, max_columns=6,
ignore_lines_starting_with=[';;'])
utterances = collections.defaultdict(list)
for record in gen:
values = record[1:len(record)]
for i in range(len(values)):
if i == 1 or i == 2 or i == 4:
values[i] = float(values[i])
utterances[record[0]].append(values)
return utterances | Reads a ctm file.
Args:
path (str): Path to the file
Returns:
(dict): Dictionary with entries.
Example::
>>> read_file('/path/to/file.txt')
{
'wave-ab': [
['1', 0.00, 0.07, 'HI', 1],
['1', 0.09, 0.08, 'AH', 1]
],
'wave-xy': [
['1', 0.00, 0.07, 'HI', 1],
['1', 0.09, 0.08, 'AH', 1]
]
} | juraj-google-style |
def __init__(self, table_name, dataset, schema, project):
beam.PTransform.__init__(self)
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project | Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table. | github-repos |
def fit(self, *args):
data = list(zip(*args))
self.save()
if self._fit_batch_size is None:
raise ConfigError("in order to use fit() method"
" set `fit_batch_size` parameter")
bs = int(self._fit_batch_size)
data_len = len(data)
num_batches = self._fit_max_batches or ((data_len - 1)
avg_loss = 0.
best_loss = float('inf')
lrs, losses = [], []
_lr_find_schedule = DecayScheduler(start_val=self._fit_learning_rate[0],
end_val=self._fit_learning_rate[1],
dec_type="exponential",
num_it=num_batches)
self._lr = _lr_find_schedule.start_val
self._mom = 0.
self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)
best_lr = _lr_find_schedule.start_val
for i in range(num_batches):
batch_start = (i * bs) % data_len
batch_end = batch_start + bs
report = self.train_on_batch(*zip(*data[batch_start:batch_end]))
if not isinstance(report, dict):
report = {'loss': report}
avg_loss = self._fit_beta*avg_loss + (1 - self._fit_beta)*report['loss']
smoothed_loss = avg_loss / (1 - self._fit_beta**(i + 1))
lrs.append(self._lr)
losses.append(smoothed_loss)
log.info(f"Batch {i}/{num_batches}: smooth_loss = {smoothed_loss}"
f", lr = {self._lr}, best_lr = {best_lr}")
if math.isnan(smoothed_loss) or (smoothed_loss > 4 * best_loss):
break
if (smoothed_loss < best_loss) and (i >= self._fit_min_batches):
best_loss = smoothed_loss
best_lr = self._lr
self._lr = _lr_find_schedule.next_val()
self._update_graph_variables(learning_rate=self._lr)
if i >= num_batches:
break
end_val = self._get_best(lrs, losses)
start_val = end_val
if self._lr_schedule.dec_type in (DecayType.ONECYCLE, DecayType.TRAPEZOID):
start_val = end_val / self._fit_learning_rate_div
elif self._lr_schedule.dec_type in (DecayType.POLYNOMIAL, DecayType.EXPONENTIAL,
DecayType.LINEAR, DecayType.COSINE):
start_val = end_val
end_val = end_val / self._fit_learning_rate_div
self._lr_schedule = DecayScheduler(start_val=start_val,
end_val=end_val,
num_it=self._lr_schedule.nb,
dec_type=self._lr_schedule.dec_type,
extra=self._lr_schedule.extra)
log.info(f"Found best learning rate value = {best_lr}"
f", setting new learning rate schedule with {self._lr_schedule}.")
self.load()
self._lr = self._lr_schedule.start_val
self._mom = self._mom_schedule.start_val
self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)
return {'smoothed_loss': losses, 'learning_rate': lrs} | Find the best learning rate schedule, and set obtained values of learning rate
and momentum for further model training. Best learning rate will be divided
by `fit_learning_rate_div` for further training model.
Args:
*args: arguments
Returns: | juraj-google-style |
def distance_to_angle(distance, units='metric'):
if (units in ('km', 'metric')):
pass
elif (units in ('sm', 'imperial', 'US customary')):
distance *= STATUTE_MILE
elif (units in ('nm', 'nautical')):
distance *= NAUTICAL_MILE
else:
raise ValueError(('Unknown units type %r' % units))
return math.degrees((distance / BODY_RADIUS)) | Convert a distance in to an angle along a great circle.
Args:
distance (float): Distance to convert to degrees
units (str): Unit type to be used for distances
Returns:
float: Angle in degrees
Raises:
ValueError: Unknown value for ``units`` | codesearchnet |
def __call__(self, inputs, state, scope=None):
return self._call_wrapped_cell(inputs, state, cell_call_fn=self.cell.__call__, scope=scope) | Runs the RNN cell step computation.
We assume that the wrapped RNNCell is being built within its `__call__`
method. We directly use the wrapped cell's `__call__` in the overridden
wrapper `__call__` method.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `__call__`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
scope: VariableScope for the subgraph created in the wrapped cells'
`__call__`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state. | github-repos |
def get_yield_stress(self, n):
comp = root(self.get_stability_criteria, (- 1), args=n)
tens = root(self.get_stability_criteria, 1, args=n)
return (comp.x, tens.x) | Gets the yield stress for a given direction
Args:
n (3x1 array-like): direction for which to find the
yield stress | codesearchnet |
def from_json(cls, name, spec):
if "run" not in spec:
raise TuneError("No trainable specified!")
if "env" in spec:
spec["config"] = spec.get("config", {})
spec["config"]["env"] = spec["env"]
del spec["env"]
spec = copy.deepcopy(spec)
run_value = spec.pop("run")
try:
exp = cls(name, run_value, **spec)
except TypeError:
raise TuneError("Improper argument from JSON: {}.".format(spec))
return exp | Generates an Experiment object from JSON.
Args:
name (str): Name of Experiment.
spec (dict): JSON configuration of experiment. | juraj-google-style |
def ContainsAddressStr(self, address):
for key, contract in self._contracts.items():
if contract.Address == address:
return True
return False | Determine if the wallet contains the address.
Args:
address (str): a string representing the public key.
Returns:
bool: True, if the address is present in the wallet. False otherwise. | juraj-google-style |
def min(self):
if self.is_quantized or self.base_dtype in (bool, string, complex64, complex128):
raise TypeError(f'Cannot find minimum value of {self} with {('quantized type' if self.is_quantized else 'type')} {self.base_dtype}.')
try:
return ml_dtypes.finfo(self.as_numpy_dtype).min
except:
try:
return ml_dtypes.iinfo(self.as_numpy_dtype).min
except:
raise TypeError(f'Cannot find minimum value of {self}.') | Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type. | github-repos |
def case_to_clinVars(self, case_id):
query = dict(case_id=case_id, csv_type='variant')
clinvar_objs = list(self.clinvar_collection.find(query))
submitted_vars = {}
for clinvar in clinvar_objs:
submitted_vars[clinvar.get('local_id')] = clinvar
return submitted_vars | Get all variants included in clinvar submissions for a case
Args:
case_id(str): a case _id
Returns:
submission_variants(dict): keys are variant ids and values are variant submission objects | juraj-google-style |
def build(self):
return copy.deepcopy(self._options) | Build a profiling option.
Returns:
A dict of profiling options. | github-repos |
def user_ban(channel, user):
username = user.name
if isinstance(user, discord.Member):
if user.nick is not None:
username = user.nick
gui = ui_embed.UI(
channel,
"Banned {}".format(username),
"{} has been banned from this server".format(username),
modulename=modulename
)
return gui | Creates an embed UI containing an user warning message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
user (discord.User): The user to ban
Returns:
ui (ui_embed.UI): The embed UI object | juraj-google-style |
def get_all_distributions_by_type(dist, metric_id):
submit_timestamp = time.time()
dist_types = ['count', 'max', 'min', 'sum', 'mean']
distribution_dicts = []
for dist_type in dist_types:
try:
distribution_dicts.append(get_distribution_dict(dist_type, submit_timestamp, dist, metric_id))
except ValueError:
continue
return distribution_dicts | Creates new list of objects with type of each distribution
metric value.
Args:
dist(object): DistributionMetric object to be parsed
metric_id(uuid): id of the current test run
Returns:
list of :class:`DistributionMetric` objects | github-repos |
def get_specification(version: str) -> Mapping[(str, Any)]:
spec_dir = config['bel']['lang']['specifications']
spec_dict = {}
bel_versions = get_bel_versions()
if (version not in bel_versions):
log.error('Cannot get unknown version BEL specification')
return {'error': 'unknown version of BEL'}
version_underscored = version.replace('.', '_')
json_fn = f'{spec_dir}/bel_v{version_underscored}.json'
with open(json_fn, 'r') as f:
spec_dict = json.load(f)
return spec_dict | Get BEL Specification
The json file this depends on is generated by belspec_yaml2json as
part of the update_specifications function
Args:
version: e.g. 2.0.0 where the filename | codesearchnet |
def to_diff_dict(self) -> Dict[str, Any]:
config_dict = self.to_dict()
default_config_dict = BitsAndBytesConfig().to_dict()
serializable_config_dict = {}
for key, value in config_dict.items():
if value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict | Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, | github-repos |
def combine(specs):
new_specs = {}
for spec in specs:
if (new_specs.get(spec, None) is None):
new_specs[spec] = spec
else:
new_specs[spec].add(spec)
return list(new_specs.values()) | Combine package specifications' limitations.
Args:
specs (list of PackageSpec): the package specifications.
Returns:
list of PackageSpec: the new, merged list of PackageSpec. | codesearchnet |
def dbclass(self, value):
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value) | Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing. | juraj-google-style |
def is_periodically_contiguous(self):
edges = self.sites_at_edges()
is_contiguous = [False, False, False]
along_x = any([(s2 in s1.p_neighbours) for s1 in edges[0] for s2 in edges[1]])
along_y = any([(s2 in s1.p_neighbours) for s1 in edges[2] for s2 in edges[3]])
along_z = any([(s2 in s1.p_neighbours) for s1 in edges[4] for s2 in edges[5]])
return (along_x, along_y, along_z) | logical check whether a cluster connects with itself across the
simulation periodic boundary conditions.
Args:
none
Returns
( Bool, Bool, Bool ): Contiguity along the x, y, and z coordinate axes | codesearchnet |
def get_element(source, path, separator=r'[/.]'):
return _get_element_by_names(source, re.split(separator, path)) | Given a dict and path '/' or '.' separated. Digs into de dict to retrieve
the specified element.
Args:
source (dict): set of nested objects in which the data will be searched
path (string): '/' or '.' string with attribute names | juraj-google-style |
def get_cot_artifacts(context):
artifacts = {}
filepaths = filepaths_in_dir(context.config['artifact_dir'])
hash_alg = context.config['chain_of_trust_hash_algorithm']
for filepath in sorted(filepaths):
path = os.path.join(context.config['artifact_dir'], filepath)
sha = get_hash(path, hash_alg=hash_alg)
artifacts[filepath] = {hash_alg: sha}
return artifacts | Generate the artifact relative paths and shas for the chain of trust.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: a dictionary of {"path/to/artifact": {"hash_alg": "..."}, ...} | codesearchnet |
def filter_(predicate, *structures, **kwargs):
flatten = kwargs.pop('flatten', False)
assert (not kwargs), 'filter() got unexpected keyword arguments.'
def impl(predicate, *structures):
if (len(structures) == 0):
return structures
if all((isinstance(s, (tuple, list)) for s in structures)):
if (len(set((len(x) for x in structures))) > 1):
raise ValueError('Cannot merge tuples or lists of different length.')
if (len(structures) > 1):
filtered = (impl(predicate, *x) for x in _builtin_zip(*structures))
else:
filtered = (impl(predicate, x) for x in structures[0])
if hasattr(structures[0], '_fields'):
filtered = ((x if (x != ()) else None) for x in filtered)
return type(structures[0])(*filtered)
else:
filtered = (x for x in filtered if ((not isinstance(x, (tuple, list, dict))) or x))
return type(structures[0])(filtered)
if all((isinstance(s, dict) for s in structures)):
if (len(set((frozenset(x.keys()) for x in structures))) > 1):
raise ValueError('Cannot merge dicts with different keys.')
if (len(structures) > 1):
filtered = {k: impl(predicate, *(s[k] for s in structures)) for k in structures[0]}
else:
filtered = {k: impl(predicate, v) for (k, v) in structures[0].items()}
filtered = {k: v for (k, v) in filtered.items() if ((not isinstance(v, (tuple, list, dict))) or v)}
return type(structures[0])(filtered)
if (len(structures) > 1):
return (structures if predicate(*structures) else ())
else:
return (structures[0] if predicate(structures[0]) else ())
result = impl(predicate, *structures)
if flatten:
result = flatten_(result)
return result | Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
predicate: The function to determine whether an element should be kept.
Receives one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure. | codesearchnet |
def init(self, force_deploy=False):
machines = self.provider_conf.machines
networks = self.provider_conf.networks
_networks = []
for network in networks:
ipnet = IPNetwork(network.cidr)
_networks.append({'netpool': list(ipnet)[10:(- 10)], 'cidr': network.cidr, 'roles': network.roles, 'gateway': ipnet.ip})
vagrant_machines = []
vagrant_roles = {}
j = 0
for machine in machines:
for _ in range(machine.number):
vagrant_machine = {'name': ('enos-%s' % j), 'cpu': machine.flavour_desc['core'], 'mem': machine.flavour_desc['mem'], 'ips': [n['netpool'].pop() for n in _networks]}
vagrant_machines.append(vagrant_machine)
for role in machine.roles:
vagrant_roles.setdefault(role, []).append(vagrant_machine)
j = (j + 1)
logger.debug(vagrant_roles)
loader = FileSystemLoader(searchpath=TEMPLATE_DIR)
env = Environment(loader=loader, autoescape=True)
template = env.get_template('Vagrantfile.j2')
vagrantfile = template.render(machines=vagrant_machines, provider_conf=self.provider_conf)
vagrantfile_path = os.path.join(os.getcwd(), 'Vagrantfile')
with open(vagrantfile_path, 'w') as f:
f.write(vagrantfile)
v_env = dict(os.environ)
v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend
v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=False, env=v_env)
if force_deploy:
v.destroy()
v.up()
v.provision()
roles = {}
for (role, machines) in vagrant_roles.items():
for machine in machines:
keyfile = v.keyfile(vm_name=machine['name'])
port = v.port(vm_name=machine['name'])
address = v.hostname(vm_name=machine['name'])
roles.setdefault(role, []).append(Host(address, alias=machine['name'], user=self.provider_conf.user, port=port, keyfile=keyfile))
networks = [{'cidr': str(n['cidr']), 'start': str(n['netpool'][0]), 'end': str(n['netpool'][(- 1)]), 'dns': '8.8.8.8', 'gateway': str(n['gateway']), 'roles': n['roles']} for n in _networks]
logger.debug(roles)
logger.debug(networks)
return (roles, networks) | Reserve and deploys the vagrant boxes.
Args:
force_deploy (bool): True iff new machines should be started | codesearchnet |
def __init__(self, root, case_sensitive=True):
root = os.path.normpath(root)
if not root:
errstr = 'root path must not be empty (\'.\' for current directory)'
raise ValueError(errstr)
ensure_directory_exists(root)
self.root_path = root
self.case_sensitive = bool(case_sensitive) | Initialize the datastore with given root directory `root`.
Args:
root: A path at which to mount this filesystem datastore. | juraj-google-style |
def matches(self, regex: str) -> 'Builder':
param_nodes = self._function_args_to_nodes(self.node, [regex])
return self._to_builder(_evaluation.MatchesFunction(self.node.context, self.node, param_nodes)) | The FHIRPath matches() function.
Args:
regex: a regular expression to match against the parent element.
Returns:
An expression that evaluates to True if the parent matches the given
regular expression. | github-repos |
def fetch(self, customer_id, token_id, data={}, **kwargs):
url = "{}/{}/tokens/{}".format(self.base_url, customer_id, token_id)
return self.get_url(url, data, **kwargs) | Fetch Token for given Id and given customer Id
Args:
customer_id : Customer Id for which tokens have to be fetched
token_id : Id for which TOken object has to be fetched
Returns:
Token dict for given token Id | juraj-google-style |
def realpath(path):
if (path == '~'):
return userdir
if (path == '/'):
return sysroot
if path.startswith('/'):
return os.path.abspath(path)
if path.startswith('~/'):
return os.path.expanduser(path)
if path.startswith('./'):
return os.path.abspath(os.path.join(os.path.curdir, path[2:]))
return os.path.abspath(path) | Create the real absolute path for the given path.
Add supports for userdir & / supports.
Args:
* path: pathname to use for realpath.
Returns:
Platform independent real absolute path. | codesearchnet |
def sample_dynamic_prior(self, samples, batch_size, length, fixed=False):
if fixed:
sample_batch_size = 1
else:
sample_batch_size = batch_size
(sample, state) = self.dynamic_prior.zero_state([samples, sample_batch_size])
locs = []
scale_diags = []
sample_list = []
for _ in range(length):
(dist, state) = self.dynamic_prior(sample, state)
sample = dist.sample()
locs.append(dist.parameters['loc'])
scale_diags.append(dist.parameters['scale_diag'])
sample_list.append(sample)
sample = tf.stack(sample_list, axis=2)
loc = tf.stack(locs, axis=2)
scale_diag = tf.stack(scale_diags, axis=2)
if fixed:
sample = (sample + tf.zeros([batch_size, 1, 1]))
return (sample, tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)) | Sample the dynamic latent prior.
Args:
samples: Number of samples to draw from the latent distribution.
batch_size: Number of sequences to sample.
length: Number of timesteps to sample for each sequence.
fixed: Boolean for whether or not to share the same random
sample across all sequences.
Returns:
A tuple of a sample tensor of shape [samples, batch_size, length
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [samples, 1, length] if fixed or [samples, batch_size,
length] otherwise. | codesearchnet |
def colorize(text, messageType=None):
formattedText = str(text)
if ('ERROR' in messageType):
formattedText = (colorama.Fore.RED + formattedText)
elif ('WARNING' in messageType):
formattedText = (colorama.Fore.YELLOW + formattedText)
elif ('SUCCESS' in messageType):
formattedText = (colorama.Fore.GREEN + formattedText)
elif ('INFO' in messageType):
formattedText = (colorama.Fore.BLUE + formattedText)
if ('BOLD' in messageType):
formattedText = (colorama.Style.BRIGHT + formattedText)
return (formattedText + colorama.Style.RESET_ALL) | Function that colorizes a message.
Args:
-----
text: The string to be colorized.
messageType: Possible options include "ERROR", "WARNING", "SUCCESS",
"INFO" or "BOLD".
Returns:
--------
string: Colorized if the option is correct, including a tag at the end
to reset the formatting. | codesearchnet |
def _change_precision(self, val, base=0):
if not isinstance(val, int):
raise TypeError('The first argument must be an integer.')
val = round(abs(val))
val = (lambda num: base if is_num(num) else num)(val)
return val | Check and normalise the value of precision (must be positive integer).
Args:
val (INT): must be positive integer
base (INT): Description
Returns:
VAL (INT): Description | juraj-google-style |
def validate_args(func: Method, *args: Any, **kwargs: Any) -> Method:
signature(func).bind(*args, **kwargs)
return func | Check if the request's arguments match a function's signature.
Raises TypeError exception if arguments cannot be passed to a function.
Args:
func: The function to check.
args: Positional arguments.
kwargs: Keyword arguments.
Raises:
TypeError: If the arguments cannot be passed to the function. | codesearchnet |
def mark_checked(tensors):
def _mark_checked(tensor):
tensor._keras_history_checked = True
nest.map_structure(_mark_checked, tensors) | Marks that these Tensors should not be tracked.
This prevents Layers from attempting to create TensorFlowOpLayers
for these Tensors.
Args:
tensors: An arbitrary structure of Tensors. | github-repos |
def filter_tests(output_file: str, filters: List[str]):
if not os.path.isfile(output_file):
print('No test file found.')
return
with open(output_file, 'r', encoding='utf-8') as f:
test_files = f.read().split(' ')
if len(test_files) == 0 or test_files == ['']:
print('No tests to filter.')
return
if test_files == ['tests']:
test_files = [os.path.join('tests', f) for f in os.listdir('tests') if f not in ['__init__.py'] + filters]
else:
test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters]
with open(output_file, 'w', encoding='utf-8') as f:
f.write(' '.join(test_files)) | Reads the content of the output file and filters out all the tests in a list of given folders.
Args:
output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher.
filters (`List[str]`): A list of folders to filter. | github-repos |
def parse_structure(self, store_in_memory=False):
if not self.structure_file:
log.error('{}: no structure file, unable to parse'.format(self.id))
return None
else:
structure = StructureIO(self.structure_path, self.file_type)
structure_chains = [x.id for x in structure.first_model.child_list]
self.add_chain_ids(structure_chains)
self.get_structure_seqs(structure.first_model)
if not self.mapped_chains:
self.add_mapped_chain_ids(structure_chains)
if store_in_memory:
self.parsed = True
self.structure = structure
return structure | Read the 3D coordinates of a structure file and return it as a Biopython Structure object.
Also create ChainProp objects in the chains attribute for each chain in the first model.
Args:
store_in_memory (bool): If the Biopython Structure object should be stored in the attribute ``structure``.
Returns:
Structure: Biopython Structure object | juraj-google-style |
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.w_in(hidden_states)
if self.dropout is not None:
hidden_states = self.dropout(hidden_states)
hidden_states = self.w_out(hidden_states)
return hidden_states | Args:
hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) | github-repos |
def system(self) -> 'EFBChat':
self.chat_name = 'System'
self.chat_alias = None
self.chat_uid = EFBChat.SYSTEM_ID
self.chat_type = ChatType.System
return self | Set the chat as a system chat.
Only set for channel-level and group-level system chats.
Returns:
EFBChat: This object. | codesearchnet |
def add_authorization_policy(access_token, ck_id, oid):
path = '/ContentKeys'
body = (('{"AuthorizationPolicyId":"' + oid) + '"}')
return helper_add(access_token, ck_id, path, body) | Add Media Service Authorization Policy.
Args:
access_token (str): A valid Azure authentication token.
ck_id (str): A Media Service Asset Content Key ID.
options_id (str): A Media Service OID.
Returns:
HTTP response. JSON body. | codesearchnet |
def attention_bias_local(length, max_backward, max_forward):
band = common_layers.ones_matrix_band_part(
length,
length,
max_backward,
max_forward,
out_shape=[1, 1, length, length])
return -1e9 * (1.0 - band) | Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative values
indicate unlimited.
max_forward: int, maximum distance forward to attend. Negative values
indicate unlimited.
Returns:
a `Tensor` with shape [1, 1, length, length]. | juraj-google-style |
def loss_l2(self, l2=0):
if isinstance(l2, (int, float)):
D = l2 * torch.eye(self.d)
else:
D = torch.diag(torch.from_numpy(l2))
return torch.norm(D @ (self.mu - self.mu_init)) ** 2 | L2 loss centered around mu_init, scaled optionally per-source.
In other words, diagonal Tikhonov regularization,
||D(\mu-\mu_{init})||_2^2
where D is diagonal.
Args:
- l2: A float or np.array representing the per-source regularization
strengths to use | juraj-google-style |
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder | Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize(). | codesearchnet |
def event_date(self, event_date):
if (not self.can_update()):
self._tcex.handle_error(910, [self.type])
event_date = self._utils.format_datetime(event_date, date_format='%Y-%m-%dT%H:%M:%SZ')
self._data['eventDate'] = event_date
request = {'eventDate': event_date}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request) | Updates the event_date.
Args:
event_date: Converted to %Y-%m-%dT%H:%M:%SZ date format.
Returns: | codesearchnet |
def _flip(image, flip_index, scope_name):
with ops.name_scope(None, scope_name, [image]):
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
shape = image.get_shape()
def f_rank3():
return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index]))
def f_rank4():
return array_ops.reverse(image, [flip_index + 1])
if shape.ndims is None:
rank = array_ops.rank(image)
return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)
elif shape.ndims == 3:
return f_rank3()
elif shape.ndims == 4:
return f_rank4()
else:
raise ValueError("'image' (shape %s)must have either 3 or 4 dimensions." % shape) | Flip an image either horizontally or vertically.
Outputs the contents of `image` flipped along the dimension `flip_index`.
See also `reverse()`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
flip_index: 0 For vertical, 1 for horizontal.
scope_name: string, scope name.
Returns:
A `Tensor` of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported. | github-repos |
def _GetMemberDataTypeMaps(self, data_type_definition, data_type_map_cache):
if not data_type_definition:
raise errors.FormatError('Missing data type definition')
members = getattr(data_type_definition, 'members', None)
if not members:
raise errors.FormatError('Invalid data type definition missing members')
data_type_maps = []
members_data_size = 0
for member_definition in members:
if isinstance(member_definition, data_types.MemberDataTypeDefinition):
member_definition = member_definition.member_data_type_definition
if (data_type_definition.byte_order != definitions.BYTE_ORDER_NATIVE and
member_definition.byte_order == definitions.BYTE_ORDER_NATIVE):
member_definition = copy.copy(member_definition)
member_definition.name = '_{0:s}_{1:s}'.format(
data_type_definition.name, member_definition.name)
member_definition.byte_order = data_type_definition.byte_order
if member_definition.name not in data_type_map_cache:
data_type_map = DataTypeMapFactory.CreateDataTypeMapByType(
member_definition)
data_type_map_cache[member_definition.name] = data_type_map
data_type_map = data_type_map_cache[member_definition.name]
if members_data_size is not None:
if not isinstance(member_definition, data_types.PaddingDefinition):
byte_size = member_definition.GetByteSize()
else:
_, byte_size = divmod(
members_data_size, member_definition.alignment_size)
if byte_size > 0:
byte_size = member_definition.alignment_size - byte_size
data_type_map.byte_size = byte_size
if byte_size is None:
members_data_size = None
else:
members_data_size += byte_size
data_type_maps.append(data_type_map)
return data_type_maps | Retrieves the member data type maps.
Args:
data_type_definition (DataTypeDefinition): data type definition.
data_type_map_cache (dict[str, DataTypeMap]): cached data type maps.
Returns:
list[DataTypeMap]: member data type maps.
Raises:
FormatError: if the data type maps cannot be determined from the data
type definition. | juraj-google-style |
def check_error_output(tst, out, command_prefix, args):
tst.assertGreater(len(out.lines), 2)
tst.assertStartsWith(out.lines[0], 'Error occurred during handling of command: %s %s' % (command_prefix, ' '.join(args))) | Check RichTextLines output from invalid/erroneous commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
command_prefix: The command prefix of the command that caused the error.
args: The arguments (excluding prefix) of the command that caused the error. | github-repos |
def GetRealValue(self, value):
assert value.op.type not in ['Variable', 'VariableV2']
real_value = self._history_map.get(value.name)
if real_value is None:
cur_value = value
cur_grad_state = self
while True:
enter_op = util.GetLoopConstantEnter(cur_value)
if enter_op:
cur_value = enter_op.inputs[0]
cur_grad_state = cur_grad_state.outer_grad_state
if cur_grad_state is None:
real_value = self._grad_context.AddValue(cur_value)
break
elif constant_op.is_constant(cur_value):
real_value = constant_op.constant(tensor_util.constant_value(cur_value), dtype=cur_value.dtype)
break
else:
self._grad_context.Exit()
history_value = cur_grad_state.AddForwardAccumulator(cur_value)
self._grad_context.Enter()
break
if real_value is None:
real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value)
if cur_grad_state != self:
real_value = self._grad_context.AddValue(real_value)
self._history_map[value.name] = real_value
return real_value | Get the real value of `value`.
If backprop "uses" a value produced by forward inference, an accumulator
is added in the forward loop to accumulate its values. We use the
accumulated value. This method must be called in the grad loop context.
`value` must be in forward and needed for backprop.
Args:
value: A tensor to be captured.
Returns:
The same tensor obtained from the saved history. | github-repos |
def GenerateTableHtml(items, keys_to_print, display_index=True):
html = ''
html += '<table><tr>\n'
html += '<tr>\n'
if display_index:
html += '<th>index</th>'
for h, mapper in keys_to_print:
html += '<th>%s</th>' % h
html += '</tr>\n'
for idx, tensor in enumerate(items):
html += '<tr>\n'
if display_index:
html += '<td>%d</td>' % idx
for h, mapper in keys_to_print:
val = tensor[h] if h in tensor else None
val = val if mapper is None else mapper(val)
html += '<td>%s</td>\n' % val
html += '</tr>\n'
html += '</table>\n'
return html | Given a list of object values and keys to print, make an HTML table.
Args:
items: Items to print an array of dicts.
keys_to_print: (key, display_fn). `key` is a key in the object. i.e.
items[0][key] should exist. display_fn is the mapping function on display.
i.e. the displayed html cell will have the string returned by
`mapping_fn(items[0][key])`.
display_index: add a column which is the index of each row in `items`.
Returns:
An html table. | github-repos |
def get_dim_label(js_dict, dim, input='dataset'):
if (input == 'dataset'):
input = js_dict['dimension'][dim]
label_col = 'label'
elif (input == 'dimension'):
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'], dim_index['id']], axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(), dim_label.values())), index=dim_label.keys(), columns=['id', label_col])
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index'])
else:
if (type(dim_index) is list):
dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label | Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data. | codesearchnet |
def iterate_sequences(consumer_fn, output_template, sequences, length, chunk_length=None, batch_size=None, num_epochs=1, padding_value=0):
if (not length.shape[0].value):
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map((lambda x: tf.data.Dataset.from_tensor_slices(chunk_sequence(x, chunk_length, padding_value))))
num_chunks = tf.reduce_sum((((length - 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle((num_sequences
dataset = dataset.batch((batch_size or num_sequences))
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = ((num_epochs * num_chunks)
return tf.scan((lambda _1, index: consumer_fn(iterator.get_next())), tf.range(num_batches), output_template, parallel_iterations=1) | Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer. | codesearchnet |
def update_ref(profile, ref, sha):
resource = ('/refs/' + ref)
payload = {'sha': sha}
data = api.patch_request(profile, resource, payload)
return prepare(data) | Point a ref to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to update, e.g., ``heads/my-feature-branch``.
sha
The SHA of the commit to point the ref to.
Returns
A dict with data about the ref. | codesearchnet |
def get_json(self):
try:
usernotes = self.subreddit.wiki[self.page_name].content_md
notes = json.loads(usernotes)
except NotFound:
self._init_notes()
else:
if (notes['ver'] != self.schema):
raise RuntimeError('Usernotes schema is v{0}, puni requires v{1}'.format(notes['ver'], self.schema))
self.cached_json = self._expand_json(notes)
return self.cached_json | Get the JSON stored on the usernotes wiki page.
Returns a dict representation of the usernotes (with the notes BLOB
decoded).
Raises:
RuntimeError if the usernotes version is incompatible with this
version of puni. | codesearchnet |
def set_members(self, name, members, mode=None):
commands = list()
grpid = re.search('(\\d+)', name).group()
current_members = self.get_members(name)
lacp_mode = self.get_lacp_mode(name)
if (mode and (mode != lacp_mode)):
lacp_mode = mode
self.set_lacp_mode(grpid, lacp_mode)
for member in set(current_members).difference(members):
commands.append(('interface %s' % member))
commands.append(('no channel-group %s' % grpid))
for member in set(members).difference(current_members):
commands.append(('interface %s' % member))
commands.append(('channel-group %s mode %s' % (grpid, lacp_mode)))
return (self.configure(commands) if commands else True) | Configures the array of member interfaces for the Port-Channel
Args:
name(str): The Port-Channel interface name to configure the member
interfaces
members(list): The list of Ethernet interfaces that should be
member interfaces
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'. When there are
existing channel-group members and their lacp mode differs
from this attribute, all of those members will be removed and
then re-added using the specified lacp mode. If this attribute
is omitted, the existing lacp mode will be used for new
member additions.
Returns:
True if the operation succeeds otherwise False | codesearchnet |
def get_firmware(self):
firmware_uri = '{}/firmware'.format(self.data['uri'])
return self._helper.do_get(firmware_uri) | Gets baseline firmware information for a SAS Logical Interconnect.
Returns:
dict: SAS Logical Interconnect Firmware. | codesearchnet |
def create(configs):
if not configs:
raise Error(ANDROID_DEVICE_EMPTY_CONFIG_MSG)
elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN:
ads = get_all_instances()
elif not isinstance(configs, list):
raise Error(ANDROID_DEVICE_NOT_LIST_CONFIG_MSG)
elif isinstance(configs[0], dict):
ads = get_instances_with_configs(configs)
elif isinstance(configs[0], basestring):
ads = get_instances(configs)
else:
raise Error('No valid config found in: %s' % configs)
valid_ad_identifiers = list_adb_devices() + list_adb_devices_by_usb_id()
for ad in ads:
if ad.serial not in valid_ad_identifiers:
raise DeviceError(ad, 'Android device is specified in config but'
' is not attached.')
_start_services_on_ads(ads)
return ads | Creates AndroidDevice controller objects.
Args:
configs: A list of dicts, each representing a configuration for an
Android device.
Returns:
A list of AndroidDevice objects. | juraj-google-style |
def one_hot_class_label_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
del model_hparams, vocab_size
loss_scale = tf.losses.softmax_cross_entropy(
onehot_labels=targets, logits=top_out)
weights = weights_fn(targets)
loss_denom = tf.reduce_sum(weights)
return loss_scale, loss_denom | Apply softmax cross-entropy between outputs and targets.
Args:
top_out: logits Tensor with shape [batch, ?, ?, num_classes]
targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
weights_fn:
Returns:
loss_scale (cross-entropy), loss_denom | juraj-google-style |
def is_orthogonal(
matrix: np.ndarray,
*,
rtol: float = 1e-5,
atol: float = 1e-8) -> bool:
return (matrix.shape[0] == matrix.shape[1] and
np.all(np.imag(matrix) == 0) and
np.allclose(matrix.dot(matrix.T), np.eye(matrix.shape[0]),
rtol=rtol,
atol=atol)) | Determines if a matrix is approximately orthogonal.
A matrix is orthogonal if it's square and real and its transpose is its
inverse.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is orthogonal within the given tolerance. | juraj-google-style |
def __init__(self, input_spec):
def maybe_parse_byte_size(s):
return parse_byte_size(s) if isinstance(s, str) else int(s)
self._num_records = input_spec['numRecords']
self._key_size = maybe_parse_byte_size(input_spec.get('keySizeBytes', 1))
self._hot_key_fraction = input_spec.get('hotKeyFraction', 0)
self._num_hot_keys = input_spec.get('numHotKeys', 0)
self._value_size = maybe_parse_byte_size(input_spec.get('valueSizeBytes', 1))
self._total_size = self.element_size * self._num_records
self._initial_splitting = input_spec['bundleSizeDistribution']['type'] if 'bundleSizeDistribution' in input_spec else 'const'
if self._initial_splitting != 'const' and self._initial_splitting != 'zipf':
raise ValueError('Only const and zipf distributions are supported for determining sizes of bundles produced by initial splitting. Received: %s', self._initial_splitting)
self._initial_splitting_num_bundles = input_spec['forceNumInitialBundles'] if 'forceNumInitialBundles' in input_spec else 0
if self._initial_splitting == 'zipf':
self._initial_splitting_distribution_parameter = input_spec['bundleSizeDistribution']['param']
if self._initial_splitting_distribution_parameter < 1:
raise ValueError('Parameter for a Zipf distribution must be larger than 1. Received %r.', self._initial_splitting_distribution_parameter)
else:
self._initial_splitting_distribution_parameter = 0
self._dynamic_splitting = 'none' if 'splitPointFrequencyRecords' in input_spec and input_spec['splitPointFrequencyRecords'] == 0 else 'perfect'
if 'delayDistribution' in input_spec:
if input_spec['delayDistribution']['type'] != 'const':
raise ValueError("SyntheticSource currently only supports delay distributions of type 'const'. Received %s.", input_spec['delayDistribution']['type'])
self._sleep_per_input_record_sec = float(input_spec['delayDistribution']['const']) / 1000
if self._sleep_per_input_record_sec and self._sleep_per_input_record_sec < 0.001:
raise ValueError('Sleep time per input record must be at least 1e-3. Received: %r', self._sleep_per_input_record_sec)
else:
self._sleep_per_input_record_sec = 0
self.gen_algo = input_spec.get('algorithm', None)
if self.gen_algo not in (None, 'builtin', 'lcg'):
raise ValueError('Unknown algorithm for input_spec: %s. Supported algorithms are "builtin" and "lcg".', self.gen_algo) | Initiates a synthetic source.
Args:
input_spec: Input specification of the source. See corresponding option in
function 'parse_args()' below for more details.
Raises:
ValueError: if input parameters are invalid. | github-repos |
def _consolidate_numeric_values(row_index_to_values, min_consolidation_fraction, debug_info):
type_counts = collections.Counter()
for numeric_values in row_index_to_values.values():
type_counts.update(_get_all_types(numeric_values))
if not type_counts:
return {}
max_count = max(type_counts.values())
if max_count < len(row_index_to_values) * min_consolidation_fraction:
return {}
valid_types = set()
for value_type, count in type_counts.items():
if count == max_count:
valid_types.add(value_type)
if len(valid_types) > 1:
assert DATE_TYPE in valid_types
max_type = DATE_TYPE
else:
max_type = next(iter(valid_types))
new_row_index_to_value = {}
for index, values in row_index_to_values.items():
for value in values:
if _get_value_type(value) == max_type:
new_row_index_to_value[index] = value
break
return new_row_index_to_value | Finds the most common numeric values in a column and returns them
Args:
row_index_to_values:
For each row index all the values in that cell.
min_consolidation_fraction:
Fraction of cells that need to have consolidated value.
debug_info:
Additional information only used for logging
Returns:
For each row index the first value that matches the most common value. Rows that don't have a matching value
are dropped. Empty list if values can't be consolidated. | github-repos |
def setup(self, paths=None):
if (not paths):
self.state.add_error('No `paths` argument provided in recipe, bailing', critical=True)
else:
self._paths = [path.strip() for path in paths.strip().split(',')] | Sets up the _paths attribute.
Args:
paths: Comma-separated list of strings representing the paths to collect. | codesearchnet |
def scan(self, proxy_scanner, expected_num=20, val_thr_num=4, queue_timeout=3, val_timeout=5, out_file='proxies.json'):
try:
proxy_scanner.scan()
self.logger.info('starting {} threads to validating proxies...'.format(val_thr_num))
val_threads = []
for i in range(val_thr_num):
t = threading.Thread(name='val-{:0>2d}'.format((i + 1)), target=self.validate, kwargs=dict(proxy_scanner=proxy_scanner, expected_num=expected_num, queue_timeout=queue_timeout, val_timeout=val_timeout))
t.daemon = True
val_threads.append(t)
t.start()
for t in val_threads:
t.join()
self.logger.info('Proxy scanning done!')
except:
raise
finally:
if (out_file is not None):
self.save(out_file) | Scan and validate proxies
Firstly, call the `scan` method of `proxy_scanner`, then using multiple
threads to validate them.
Args:
proxy_scanner: A ProxyScanner object.
expected_num: Max number of valid proxies to be scanned.
val_thr_num: Number of threads used for validating proxies.
queue_timeout: Timeout for getting a proxy from the queue.
val_timeout: An integer passed to `is_valid` as argument `timeout`.
out_file: A string or None. If not None, the proxies will be saved
into `out_file`. | codesearchnet |
def Snapshot(self, request, global_params=None):
config = self.GetMethodConfig('Snapshot')
return self._RunMethod(config, request, global_params=global_params) | Snapshot the state of a streaming job.
Args:
request: (DataflowProjectsLocationsJobsSnapshotRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Snapshot) The response message. | github-repos |
def use_value_spec(self, value_spec: Optional[pg_typing.Dict], allow_partial: bool=False) -> 'Dict':
if value_spec is None:
self._value_spec = None
self._accessor_writable = True
return self
if not isinstance(value_spec, pg_typing.Dict):
raise ValueError(self._error_message(f'Value spec for list must be a `pg.typing.Dict` object. Encountered: {value_spec!r}'))
if self._value_spec and self._value_spec != value_spec:
raise RuntimeError(self._error_message(f'Dict is already bound with a different value spec: {self._value_spec}. New value spec: {value_spec}.'))
self._allow_partial = allow_partial
if flags.is_type_check_enabled():
value_spec.apply(self, allow_partial=base.accepts_partial(self), child_transform=base.symbolic_transform_fn(self._allow_partial), root_path=self.sym_path)
else:
self._value_spec = value_spec
return self | Applies a ``pg.typing.Dict`` as the value spec for current dict.
Args:
value_spec: A Dict ValueSpec to apply to this Dict.
If current Dict is schema-less (whose immediate members are not
validated against schema), and `value_spec` is not None, the value spec
will be applied to the Dict.
Or else if current Dict is already symbolic (whose immediate members
are under the constraint of a Dict value spec), and `value_spec` is
None, current Dict will become schema-less. However, the schema
constraints for non-immediate members will remain.
allow_partial: Whether allow partial dict based on the schema. This flag
will override allow_partial flag in __init__ for spec-less Dict.
Returns:
Self.
Raises:
ValueError: validation failed due to value error.
RuntimeError: Dict is already bound with another spec.
TypeError: type errors during validation.
KeyError: key errors during validation. | github-repos |
def get_address_coords(self, address):
url = "https:
r = requests.get(url)
r.raise_for_status()
results = r.json()['results']
lat = results[0]['geometry']['location']['lat']
lng = results[0]['geometry']['location']['lng']
return lat, lng | Use the google geocoder to get latitude and longitude for an address string
Args:
address: any address string
Returns:
A tuple of (lat,lng) | juraj-google-style |
def __init__(self, channel):
self.ListIntents = channel.unary_unary(
'/google.cloud.dialogflow.v2.Intents/ListIntents',
request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsResponse.FromString,
)
self.GetIntent = channel.unary_unary(
'/google.cloud.dialogflow.v2.Intents/GetIntent',
request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.GetIntentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,
)
self.CreateIntent = channel.unary_unary(
'/google.cloud.dialogflow.v2.Intents/CreateIntent',
request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.CreateIntentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,
)
self.UpdateIntent = channel.unary_unary(
'/google.cloud.dialogflow.v2.Intents/UpdateIntent',
request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.UpdateIntentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,
)
self.DeleteIntent = channel.unary_unary(
'/google.cloud.dialogflow.v2.Intents/DeleteIntent',
request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.DeleteIntentRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.BatchUpdateIntents = channel.unary_unary(
'/google.cloud.dialogflow.v2.Intents/BatchUpdateIntents',
request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchUpdateIntentsRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.BatchDeleteIntents = channel.unary_unary(
'/google.cloud.dialogflow.v2.Intents/BatchDeleteIntents',
request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchDeleteIntentsRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def _ReadEntry(self, line):
line = line.split()
map_entry = automount.AutomountMapEntry()
try:
map_entry.key = line[0]
if len(line) > 2:
map_entry.options = line[1]
map_entry.location = line[2]
else:
map_entry.location = line[1]
except IndexError:
return None
return map_entry | Return an AutomountMapEntry from a record in the target cache.
Args:
line: A string from a file cache.
Returns:
An AutomountMapEntry if the line is successfully parsed, None otherwise. | github-repos |
def reduce(cls, requirements: Iterable['FetchRequirement']) -> 'FetchRequirement':
return reduce((lambda x, y: (x | y)), requirements, cls.NONE) | Reduce a set of fetch requirements into a single requirement.
Args:
requirements: The set of fetch requirements. | codesearchnet |
def publishApp(self, app_info, map_info=None, fsInfo=None):
if self.securityhandler is None:
print ("Security handler required")
return
appDet = None
try:
app_results = []
if isinstance(app_info, list):
for appDet in app_info:
app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo))
else:
app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo))
return app_results
except (common.ArcRestHelperError) as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
appDet = None
del appDet
gc.collect() | Publishes apps to AGOL/Portal
Args:
app_info (list): A list of JSON configuration apps to publish.
map_info (list): Defaults to ``None``.
fsInfo (list): Defaults to ``None``.
Returns:
dict: A dictionary of results objects. | juraj-google-style |
def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:
kwargs.update({'user': user})
return self.api_call('users.getPresence', http_verb='GET', params=kwargs) | Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890' | codesearchnet |
def _make_unique_slug(slug: str, language: str, is_unique: Callable[[str], bool]) -> str:
index = 1
unique_slug = slug
while not is_unique(unique_slug, language):
unique_slug = '%s-%d' % (slug, index)
index += 1
return unique_slug | Guarentees that the specified slug is unique by appending
a number until it is unique.
Arguments:
slug:
The slug to make unique.
is_unique:
Function that can be called to verify
whether the generate slug is unique.
Returns:
A guarenteed unique slug. | juraj-google-style |
def getdoc(object):
return _inspect.getdoc(object) | TFDecorator-aware replacement for inspect.getdoc.
Args:
object: An object, possibly decorated.
Returns:
The docstring associated with the object.
The outermost-decorated object is intended to have the most complete
documentation, so the decorated parameter is not unwrapped. | github-repos |
def create_position_ids_from_input_ids(self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0):
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx | Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor | github-repos |
def features(self):
buf = (ctypes.c_char * self.MAX_BUF_SIZE)()
self._dll.JLINKARM_GetFeatureString(buf)
result = ctypes.string_at(buf).decode().strip()
if len(result) == 0:
return list()
return result.split(', ') | Returns a list of the J-Link embedded features.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of strings, each a feature. Example:
``[ 'RDI', 'FlashBP', 'FlashDL', 'JFlash', 'GDB' ]`` | juraj-google-style |
def __init__(self, value=b''):
super(DigestValue, self).__init__(value, Tags.DIGEST_VALUE) | Construct a DigestValue object.
Args:
value (bytes): The bytes of the hash. Optional, defaults to
the empty byte string. | juraj-google-style |
def get_weights(self):
with self.distribute_strategy.scope():
return super(Model, self).get_weights() | Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays. | github-repos |
class OrderedEnqueuer(PyDatasetEnqueuer):
def __init__(self, py_dataset, workers=1, use_multiprocessing=False, max_queue_size=10, shuffle=False):
super().__init__(py_dataset, workers, use_multiprocessing, max_queue_size)
self.shuffle = shuffle
if self.py_dataset.num_batches is None:
self.indices = itertools.count()
def _get_executor_init(self, workers):
def pool_fn(seqs):
pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
try:
if self.py_dataset.num_batches is not None:
indices = range(self.py_dataset.num_batches)
if self.shuffle:
indices = list(indices)
random.shuffle(indices)
self.indices = iter(indices)
self._send_py_dataset()
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while self.is_running():
try:
i = next(self.indices)
self.future_queue.put(executor.apply_async(get_index, (self.uid, i)), block=True)
except StopIteration:
break
except Exception as e:
self.future_queue.put(e)
def get(self):
while self.is_running():
try:
inputs = self.ready_queue.get(block=False)
yield inputs
continue
except queue.Empty:
pass
try:
value = self.future_queue.get(block=True, timeout=5)
self.future_queue.task_done()
if isinstance(value, Exception):
raise value
inputs = value.get()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e:
self.stop(drain_queue_and_join=True)
raise e
raise ValueError('Iterator called after `on_epoch_end` or before `on_epoch_begin`.') | Builds a Enqueuer from a PyDataset.
Args:
py_dataset: A `keras.utils.PyDataset` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch | github-repos |
def flatten(l):
for el in l:
if (isinstance(el, Iterable) and (not isinstance(el, (str, bytes))) and (not isinstance(el, dict))):
(yield from flatten(el))
else:
(yield el) | Flatten a multi-deminision list and return a iterable
Note that dict and str will not be expanded, instead, they will be kept as a single element.
Args:
l (list): The list needs to be flattened
Returns:
A iterable of flattened list. To have a list instead use ``list(flatten(l))`` | codesearchnet |
def _build_recursive_hd_scatter(input_tensors, devices):
num_devices = len(devices)
num_hops = int(math.log(num_devices, 2))
assert num_devices == 2 ** num_hops, 'num_devices must be a power of 2'
chunks = input_tensors
for h in reversed(range(0, num_hops)):
span = 2 ** h
group_size = span * 2
new_chunks = [[] for _ in devices]
for d in range(0, num_devices):
if d % group_size >= group_size / 2:
continue
left_idx = d
right_idx = d + span
left_dev = devices[left_idx]
right_dev = devices[right_idx]
with ops.device(left_dev):
new_chunks[left_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0)
with ops.device(right_dev):
new_chunks[right_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0)
chunks = new_chunks
return chunks | Construct the scatter phase of recursive halving-doubling all-reduce.
Args:
input_tensors: list of `tf.Tensor` that are fully-reduced shards.
devices: a list of strings naming the devices on which the reconstituted
full tensors should be placed.
Returns:
list of `tf.Tensor` which are the fully reduced tensors. | github-repos |
async def executor(func, *args, **kwargs):
def syncfunc():
return func(*args, **kwargs)
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, syncfunc) | Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple. | juraj-google-style |
def merge_with(self, other):
other = as_dimension(other)
self.assert_is_convertible_with(other)
if (self._value is None):
return Dimension(other.value)
else:
return Dimension(self._value) | Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None)
tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible (see
is_convertible_with). | codesearchnet |
def find_vulnerabilities(cfg_list, blackbox_mapping_file, sources_and_sinks_file, interactive=False, nosec_lines=defaultdict(set)):
vulnerabilities = list()
definitions = parse(sources_and_sinks_file)
with open(blackbox_mapping_file) as infile:
blackbox_mapping = json.load(infile)
for cfg in cfg_list:
find_vulnerabilities_in_cfg(cfg, definitions, Lattice(cfg.nodes), blackbox_mapping, vulnerabilities, interactive, nosec_lines)
if interactive:
with open(blackbox_mapping_file, 'w') as outfile:
json.dump(blackbox_mapping, outfile, indent=4)
return vulnerabilities | Find vulnerabilities in a list of CFGs from a trigger_word_file.
Args:
cfg_list(list[CFG]): the list of CFGs to scan.
blackbox_mapping_file(str)
sources_and_sinks_file(str)
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
Returns:
A list of vulnerabilities. | codesearchnet |
def stop_requested(self):
return self._stop_requested | Returns whether a stop is requested or not.
If true, `MonitoredSession` stops iterations.
Returns:
A `bool` | github-repos |
def draw(self, current_time, frame_time):
self.set_default_viewport()
self.timeline.draw(current_time, frame_time, self.fbo) | Draws a frame. Internally it calls the
configured timeline's draw method.
Args:
current_time (float): The current time (preferrably always from the configured timer class)
frame_time (float): The duration of the previous frame in seconds | juraj-google-style |
def _ParseDateTimeValue(self, byte_stream, file_offset):
datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')
try:
value = self._ReadStructureFromByteStream(
byte_stream, file_offset, datetime_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse datetime value with error: {0!s}'.format(exception))
direction_from_utc = chr(value.direction_from_utc)
rfc2579_date_time_tuple = (
value.year, value.month, value.day_of_month,
value.hours, value.minutes, value.seconds, value.deciseconds,
direction_from_utc, value.hours_from_utc, value.minutes_from_utc)
return dfdatetime_rfc2579_date_time.RFC2579DateTime(
rfc2579_date_time_tuple=rfc2579_date_time_tuple) | Parses a CUPS IPP RFC2579 date-time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the RFC2579 date-time value cannot be parsed. | juraj-google-style |
Overview
This dataset contains Python code-docstring pairs, whereas the docstrings are in Google style. A Google style docstring is structured as follows:
<Description of the code>
Args:
<var1> (<data-type>) : <description of var1>
<var2> (<data_type>) : <description of var2>
Returns:
<var3> (<data-type>) : <description of var3>
Raises:
<var4> (<data-type>) : <description of var4>
The format varies widely (like additional sections such as Examples, Notes, etc) but generally speaking, it should contain an Args/Parameters and Returns section.
Source
The dataset was gathered from 3 different sources:
CodeSearchNet
From their Python split of ~250k samples, ~23k samples was extracted. A less than 10% sample retention, most samples from CodeSearchNet contained informal docstrings that only contained descriptions and no sections.
Repositories Under Google's GitHub Organization Page
You can find the specified page here here. These repos are dictated by the list:
repos = [
"https://github.com/google/python-fire",
"https://github.com/google/yapf",
"https://github.com/google/pytype",
"https://github.com/google/tf-quant-finance",
"https://github.com/google/budoux",
"https://github.com/google/mobly",
"https://github.com/google/temporian",
"https://github.com/google/pyglove",
"https://github.com/google/subpar",
"https://github.com/google/weather-tools",
"https://github.com/google/ci_edit",
"https://github.com/google/etils",
"https://github.com/google/pcbdl",
"https://github.com/google/starthinker",
"https://github.com/google/pytruth",
"https://github.com/google/nsscache",
"https://github.com/google/megalista",
"https://github.com/google/fhir-py",
"https://github.com/google/chatbase-python",
"https://github.com/tensorflow/tensorflow",
"https://github.com/google/project-OCEAN",
"https://github.com/google/qhbm-library",
"https://github.com/google/data-quality-monitor",
"https://github.com/google/genai-processors",
"https://github.com/google/python-proto-converter",
"https://github.com/google/sprockets",
"https://github.com/keras-team/keras",
"https://github.com/scikit-learn/scikit-learn",
"https://github.com/apache/beam",
"https://github.com/huggingface/transformers"
]
A total of ~11k samples was gathered from this source.
Juraj's Python Google-style Docstrings Dataset
I found this dataset here and is made my user Juraj-juraj. You can find the dataset here. A total of ~25k samples was gathered from this source, after further preprocessing.
Preprocessing Steps
The following cleaning, normalizing and preprocessing steps were performed:
- Removed duplicates based on both code and docstring
- Remove samples with empty code and docstrings
- Remove samples with extremely short entries (<20 chars)
- Remove samples with extremely long entries (>5000 chars)
- Removed comments and docstring from the code
- Removed samples where docstring isn't in English (using langdetect)
- Removed samples where docstring contained special characters like html tags or URLS
- Using CodeT5+ tokenizer, removed samples where docstring tokens are < 12 or > 256
- Normalized all docstring entries by removing any indentions
Data Structure
The data structure of the dataset is as follows:
<code> : <The code, removed of docstrings and comments>,
<docstring> : <The corresponding docstring of the code>,
<source> : <The source which the code came from>
The source is any of the following:
CodeSearchNet - from the CodeSearchNet dataset
github-repos - from the repositories under Google's Organization GitHub page
juraj-google-style - from Juraj's Python Google-style docstring dataset
- Downloads last month
- 99
Models trained or fine-tuned on Mir-2002/python-google-style-docstrings
