code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def extract_value_from_output(canary, split_offset, kal_out):
retval = ""
while retval == "":
for line in kal_out.splitlines():
if canary in line:
retval = str(line.split()[split_offset])
if retval == "":
retval = None
return retval
|
Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal.
|
juraj-google-style
|
def parse_isoformat(timestamp):
if len(timestamp) == 20:
zone = TzOffset('+00:00')
timestamp = timestamp[:-1]
elif len(timestamp) == 24:
zone = TzOffset('%s:%s' % (timestamp[-5:-2], timestamp[-2:]))
timestamp = timestamp[:-5]
elif len(timestamp) == 25:
zone = TzOffset(timestamp[-6:])
timestamp = timestamp[:-6]
timestamp = Timestamp.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')
timestamp = timestamp.replace(tzinfo=zone)
return timestamp
|
Parse an ISO 8601 formatted time stamp.
Args:
timestamp (str): Timestamp to parse
Returns:
Timestamp: Parsed timestamp
|
juraj-google-style
|
def decodes(self, s: str) -> BioCCollection:
tree = etree.parse(io.BytesIO(bytes(s, encoding='UTF-8')))
collection = self.__parse_collection(tree.getroot())
collection.encoding = tree.docinfo.encoding
collection.standalone = tree.docinfo.standalone
collection.version = tree.docinfo.xml_version
return collection
|
Deserialize ``s`` to a BioC collection object.
Args:
s: a "str" instance containing a BioC collection
Returns:
an object of BioCollection
|
juraj-google-style
|
def from_text_file(file_path):
results = []
with io.open(file_path, 'r', encoding='utf-8') as f:
data_strs = f.read().split(MonsoonData.delimiter)
for data_str in data_strs:
results.append(MonsoonData.from_string(data_str))
return results
|
Load MonsoonData objects from a text file generated by
MonsoonData.save_to_text_file.
Args:
file_path: The full path of the file load from, including the file
name.
Returns:
A list of MonsoonData objects.
|
codesearchnet
|
def convert_error(exc_src, exc_dest):
def wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exc_dest:
raise
except exc_src as err:
reraise(exc_dest, err, sys.exc_info()[2])
return wrapper
return wrap
|
A decorator for reraising exceptions with a different type.
Mostly useful for IOError.
Args:
exc_src (type): The source exception type
exc_dest (type): The target exception type.
|
juraj-google-style
|
def publish(self, event_type: str, event_data: dict = None):
import inspect
import os.path
_stack = inspect.stack()
_origin = os.path.basename(_stack[3][1]) + '::' + \
_stack[3][3]+'::L{}'.format(_stack[3][2])
publish(event_type=event_type,
event_data=event_data,
object_type=self._type,
object_id=self._id,
object_key=self._key,
origin=_origin)
|
Publish an event associated with the scheduling object.
Note:
Ideally publish should not be used directly but by other methods
which perform actions on the object.
Args:
event_type (str): Type of event.
event_data (dict, optional): Event data.
|
juraj-google-style
|
def __init__(self, idx, name="select_input"):
super(SelectInput, self).__init__(name=name)
self._check_type(idx)
self._idx = idx
|
Module constructor.
Args:
idx: Indexes of the tensors to select. If `idx` is an integer, then
a `Tensor` is returned. If `idx` is a (nested) list/tuple, then a
(nested) tuple of `Tensor` is returned.
name: Name of the module.
Raises:
TypeError: If `idx` is not an list, tuple or integer.
|
juraj-google-style
|
def calc_padding(fmt, align):
remain = struct.calcsize(fmt) % align
if remain == 0:
return ""
return 'x' * (align - remain)
|
Calculate how many padding bytes needed for ``fmt`` to be aligned to
``align``.
Args:
fmt (str): :mod:`struct` format.
align (int): alignment (2, 4, 8, etc.)
Returns:
str: padding format (e.g., various number of 'x').
>>> calc_padding('b', 2)
'x'
>>> calc_padding('b', 3)
'xx'
|
juraj-google-style
|
def tracking_metadata(self):
return json_utils.Encoder().encode(self.python_properties)
|
String stored in metadata field in the SavedModel proto.
Returns:
A serialized JSON storing information necessary for recreating this layer.
|
github-repos
|
class Llama4VisionEncoder(nn.Module):
def __init__(self, config: Llama4VisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Llama4VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
self.config = config
def forward(self, hidden_states: torch.Tensor, freqs_ci: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, freqs_ci, attention_mask, output_attentions)
else:
layer_outputs = encoder_layer(hidden_state=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, freqs_ci=freqs_ci)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = layer_outputs[0]
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Llama4VisionEncoderLayer`].
Args:
config: Llama4VisionConfig
|
github-repos
|
def set_forced_variation(self, experiment_key, user_id, variation_key):
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('set_forced_variation'))
return False
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return False
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return False
return self.config.set_forced_variation(experiment_key, user_id, variation_key)
|
Force a user into a variation for a given experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
variation_key: A string variation key that specifies the variation which the user.
will be forced into. If null, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully.
|
juraj-google-style
|
def AddAnalysisReport(self, analysis_report):
self._RaiseIfNotWritable()
self._storage_file.AddAnalysisReport(analysis_report)
report_identifier = analysis_report.plugin_name
self._session.analysis_reports_counter['total'] += 1
self._session.analysis_reports_counter[report_identifier] += 1
self.number_of_analysis_reports += 1
|
Adds an analysis report.
Args:
analysis_report (AnalysisReport): analysis report.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
|
juraj-google-style
|
def parse_package_string(path):
parts = path.split('.')
if parts[(- 1)][0].isupper():
return ('.'.join(parts[:(- 1)]), parts[(- 1)])
return (path, '')
|
Parse the effect package string.
Can contain the package python path or path to effect class in an effect package.
Examples::
# Path to effect pacakge
examples.cubes
# Path to effect class
examples.cubes.Cubes
Args:
path: python path to effect package. May also include effect class name.
Returns:
tuple: (package_path, effect_class)
|
codesearchnet
|
def get_likelihood(self, uni_matrix):
if self.parents is None:
left_u = uni_matrix[:, self.L]
right_u = uni_matrix[:, self.R]
else:
left_ing = list(self.D - self.parents[0].D)[0]
right_ing = list(self.D - self.parents[1].D)[0]
left_u = uni_matrix[self.L, left_ing]
right_u = uni_matrix[self.R, right_ing]
copula = Bivariate(self.name)
copula.theta = self.theta
X_left_right = np.array([[left_u, right_u]])
X_right_left = np.array([[right_u, left_u]])
value = np.sum(copula.probability_density(X_left_right))
left_given_right = copula.partial_derivative(X_left_right)
right_given_left = copula.partial_derivative(X_right_left)
return value, left_given_right, right_given_left
|
Compute likelihood given a U matrix.
Args:
uni_matrix(numpy.array): Matrix to compute the likelihood.
Return:
tuple(np.ndarray, np.ndarray, np.array): likelihood and conditional values.
|
juraj-google-style
|
def delete_idx_status(self, rdf_class):
sparql_template = '\n DELETE\n {{\n ?s kds:esIndexTime ?esTime .\n ?s kds:esIndexError ?esError .\n }}\n WHERE\n {{\n\n VALUES ?rdftypes {{\n\t\t{} }} .\n ?s a ?rdftypes .\n OPTIONAL {{\n ?s kds:esIndexTime ?esTime\n }}\n OPTIONAL {{\n ?s kds:esIndexError ?esError\n }}\n FILTER(bound(?esTime)||bound(?esError))\n }}\n '
rdf_types = ([rdf_class.uri] + [item.uri for item in rdf_class.subclasses])
sparql = sparql_template.format('\n\t\t'.join(rdf_types))
log.warn('Deleting index status for %s', rdf_class.uri)
return self.tstore_conn.update_query(sparql)
|
Removes all of the index status triples from the datastore
Args:
-----
rdf_class: The class of items to remove the status from
|
codesearchnet
|
def info(self, collector_id):
cid = self.collector_id
if collector_id:
cid = collector_id
url = '{0}/{1}'.format(self.url, cid)
request = requests.get(url, auth=self.auth)
return request.json()
|
Return a dict of collector.
Args:
collector_id (int): id of collector (optional)
|
juraj-google-style
|
def get_structure_property_dict(self, structure, include_base_props=True,
ignore_errors=False):
s_props = ["trans_v", "long_v", "snyder_ac", "snyder_opt",
"snyder_total", "clarke_thermalcond", "cahill_thermalcond",
"debye_temperature"]
if ignore_errors and (self.k_vrh < 0 or self.g_vrh < 0):
sp_dict = {prop: None for prop in s_props}
else:
sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}
sp_dict["structure"] = structure
if include_base_props:
sp_dict.update(self.property_dict)
return sp_dict
|
returns a dictionary of properties derived from the elastic tensor
and an associated structure
Args:
structure (Structure): structure object for which to calculate
associated properties
include_base_props (bool): whether to include base properties,
like k_vrh, etc.
ignore_errors (bool): if set to true, will set problem properties
that depend on a physical tensor to None, defaults to False
|
juraj-google-style
|
def with_row(self, row):
self = self.copy()
self.append(row)
return self
|
Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2
|
juraj-google-style
|
def parse_args(test: typing.Optional[typing.List[str]]=None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('train_data', help='File path for the encoded training data.')
parser.add_argument('base_model', help='File path for the base model file.')
parser.add_argument('-o', '--output', help=f'File path for the output weights. (default: {DEFAULT_OUTPUT_NAME})', type=str, default=DEFAULT_OUTPUT_NAME)
parser.add_argument('--val-data', help='File path for the encoded validation data.', type=str)
parser.add_argument('--iters', help=f'Number of iterations for training. (default: {DEFAULT_NUM_ITERS})', type=int, default=DEFAULT_NUM_ITERS)
parser.add_argument('--log-span', help=f'Iteration span to print metrics. (default: {DEFAULT_LOG_SPAN})', type=int, default=DEFAULT_LOG_SPAN)
parser.add_argument('--learning-rate', help=f'Learning rate. (default: {DEFAULT_LEARNING_RATE})', type=float, default=DEFAULT_LEARNING_RATE)
if test is None:
return parser.parse_args()
else:
return parser.parse_args(test)
|
Parses commandline arguments.
Args:
test (typing.Optional[typing.List[str]], optional): Commandline args for
testing. Defaults to None.
Returns:
Parsed arguments (argparse.Namespace).
|
github-repos
|
def Matches(self, file_entry):
if not self._names or not file_entry.IsFile():
return False
return file_entry.name.lower() in self._names
|
Compares the file entry against the filter.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches the filter.
|
juraj-google-style
|
def keyword(self, **kwargs):
path = self._get_path('keyword')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Search for keywords by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
|
juraj-google-style
|
def get_aligned_collection(self, value=0, data_type=None, unit=None, mutable=None):
header = self._check_aligned_header(data_type, unit)
values = self._check_aligned_value(value)
if (mutable is None):
collection = self.__class__(header, values, self.datetimes)
else:
if (self._enumeration is None):
self._get_mutable_enumeration()
if (mutable is False):
col_obj = self._enumeration['immutable'][self._collection_type]
else:
col_obj = self._enumeration['mutable'][self._collection_type]
collection = col_obj(header, values, self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection
|
Return a Collection aligned with this one composed of one repeated value.
Aligned Data Collections are of the same Data Collection class, have the same
number of values and have matching datetimes.
Args:
value: A value to be repeated in the aliged collection values or
A list of values that has the same length as this collection.
Default: 0.
data_type: The data type of the aligned collection. Default is to
use the data type of this collection.
unit: The unit of the aligned collection. Default is to
use the unit of this collection or the base unit of the
input data_type (if it exists).
mutable: An optional Boolean to set whether the returned aligned
collection is mutable (True) or immutable (False). The default is
None, which will simply set the aligned collection to have the
same mutability as the starting collection.
|
codesearchnet
|
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
return internal_convert_n_to_tensor_or_indexed_slices(values=values, dtype=dtype, name=name, as_ref=False)
|
Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
|
github-repos
|
def get_course_modes(self, course_id):
details = self.get_course_details(course_id)
modes = details.get('course_modes', [])
return self._sort_course_modes([mode for mode in modes if mode['slug'] not in EXCLUDED_COURSE_MODES])
|
Query the Enrollment API for the specific course modes that are available for the given course_id.
Arguments:
course_id (str): The string value of the course's unique identifier
Returns:
list: A list of course mode dictionaries.
|
juraj-google-style
|
def AppendSource(self, type_indicator, attributes):
if (not type_indicator):
raise errors.FormatError('Missing type indicator.')
try:
source_object = registry.ArtifactDefinitionsRegistry.CreateSourceType(type_indicator, attributes)
except (AttributeError, TypeError) as exception:
raise errors.FormatError('Unable to create source type: {0:s} for artifact definition: {1:s} with error: {2!s}'.format(type_indicator, self.name, exception))
self.sources.append(source_object)
return source_object
|
Appends a source.
If you want to implement your own source type you should create a subclass
in source_type.py and change the AppendSource method to handle the new
subclass. This function raises FormatError if an unsupported source type
indicator is encountered.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
|
codesearchnet
|
def get_reaction(self, reactants, products):
return self._make_request("/reaction",
payload={"reactants[]": reactants,
"products[]": products}, mp_decode=False)
|
Gets a reaction from the Materials Project.
Args:
reactants ([str]): List of formulas
products ([str]): List of formulas
Returns:
rxn
|
juraj-google-style
|
def _create_warm_start_tuner(self, additional_parents, warm_start_type, estimator=None):
all_parents = {self.latest_tuning_job.name}
if additional_parents:
all_parents = all_parents.union(additional_parents)
return HyperparameterTuner(estimator=(estimator if estimator else self.estimator), objective_metric_name=self.objective_metric_name, hyperparameter_ranges=self._hyperparameter_ranges, objective_type=self.objective_type, max_jobs=self.max_jobs, max_parallel_jobs=self.max_parallel_jobs, warm_start_config=WarmStartConfig(warm_start_type=warm_start_type, parents=all_parents))
|
Creates a new ``HyperparameterTuner`` with ``WarmStartConfig``, where type will be equal to
``warm_start_type`` and``parents`` would be equal to union of ``additional_parents`` and self.
Args:
additional_parents (set{str}): Additional parents along with self, to be used for warm starting.
warm_start_type (sagemaker.tuner.WarmStartTypes): Type of warm start job.
Returns:
sagemaker.tuner.HyperparameterTuner: Instance with the request fields copied from self along with the
warm start configuration
|
codesearchnet
|
def _GenerateSection(self, problem_type):
if problem_type == transitfeed.TYPE_WARNING:
dataset_problems = self._dataset_warnings
heading = 'Warnings'
else:
dataset_problems = self._dataset_errors
heading = 'Errors'
if not dataset_problems:
return ''
prefix = '<h2 class="issueHeader">%s:</h2>' % heading
dataset_sections = []
for dataset_merger, problems in dataset_problems.items():
dataset_sections.append('<h3>%s</h3><ol>%s</ol>' % (
dataset_merger.FILE_NAME, '\n'.join(problems)))
body = '\n'.join(dataset_sections)
return prefix + body
|
Generate a listing of the given type of problems.
Args:
problem_type: The type of problem. This is one of the problem type
constants from transitfeed.
Returns:
The generated HTML as a string.
|
juraj-google-style
|
def VisitParameter(self, p):
if not self.class_types:
return p
if not self.force and (not isinstance(p.type, pytd.AnythingType)):
return p
if p.name == 'self' and self.method_kind in (pytd.MethodKind.METHOD, pytd.MethodKind.PROPERTY):
return p.Replace(type=self.class_types[-1])
elif p.name == 'cls' and self.method_kind == pytd.MethodKind.CLASSMETHOD:
cls_type = pytd.GenericType(pytd.NamedType('builtins.type'), parameters=(self.class_types[-1],))
return p.Replace(type=cls_type)
else:
return p
|
Adjust all parameters called "self" to have their base class type.
But do this only if their original type is unoccupied ("Any").
Args:
p: pytd.Parameter instance.
Returns:
Adjusted pytd.Parameter instance.
|
github-repos
|
def _get_condition_json(self, index):
condition = self.condition_data[index]
condition_log = {'name': condition[0], 'value': condition[1], 'type': condition[2], 'match': condition[3]}
return json.dumps(condition_log)
|
Method to generate json for logging audience condition.
Args:
index: Index of the condition.
Returns:
String: Audience condition JSON.
|
codesearchnet
|
def JsonDumpAndFlush(data, fp):
json.dump(data, fp)
fp.flush()
|
Write the dictionary `data` to a JSON file `fp` (and flush).
Args:
data: in a dictionary that is JSON serializable.
fp: File-like object
|
github-repos
|
def get_modifier_from_signature(self, modifier_signature):
return next((m for m in self.modifiers if m.full_name == modifier_signature), None)
|
Return a modifier from a signature
Args:
modifier_name (str): signature of the modifier
Returns:
Modifier
|
juraj-google-style
|
def ion_or_solid_comp_object(formula):
m = re.search('\\[([^\\[\\]]+)\\]|\\(aq\\)', formula)
if m:
comp_obj = Ion.from_formula(formula)
elif re.search('\\(s\\)', formula):
comp_obj = Composition(formula[:(- 3)])
else:
comp_obj = Composition(formula)
return comp_obj
|
Returns either an ion object or composition object given
a formula.
Args:
formula: String formula. Eg. of ion: NaOH(aq), Na[+];
Eg. of solid: Fe2O3(s), Fe(s), Na2O
Returns:
Composition/Ion object
|
codesearchnet
|
def get_hgnc_id(gene_info, adapter):
hgnc_id = gene_info.get('hgnc_id')
hgnc_symbol = gene_info.get('hgnc_symbol')
true_id = None
if hgnc_id:
true_id = int(hgnc_id)
else:
gene_result = adapter.hgnc_genes(hgnc_symbol)
if (gene_result.count() == 0):
raise Exception('No gene could be found for {}'.format(hgnc_symbol))
for gene in gene_result:
if (hgnc_symbol.upper() == gene.hgnc_symbol.upper()):
true_id = gene.hgnc_id
if (not gene_info['hgnc_id']):
true_id = gene.hgnc_id
return true_id
|
Get the hgnc id for a gene
The proprity order will be
1. if there is a hgnc id this one will be choosen
2. if the hgnc symbol matches a genes proper hgnc symbol
3. if the symbol ony matches aliases on several genes one will be
choosen at random
Args:
gene_info(dict)
adapter
Returns:
true_id(int)
|
codesearchnet
|
def get_min_max_value(self) -> tuple[float, float]:
return self._get_min_max_value_by_expanding_range(self._num_bins
|
Finds min and max starting from the center index.
The HistogramMseSymmetric method starts from the center bin and expands the
range to both sides. This works better when the data is well-centered.
Returns:
(min_value, max_value): Min and max calculated using the method starting
from center and expanding.
|
github-repos
|
def set_atten(self, value):
self.attenuation_device.set_atten(self.idx, value)
|
This function sets the attenuation of Attenuator.
Args:
value: This is a floating point value for nominal attenuation to be
set. Unit is db.
|
github-repos
|
def __init__(self, empty=True):
super(ObjectTypeChecker, self).__init__(empty=empty)
|
Initialization method.
Args:
empty (bool):
|
juraj-google-style
|
def delete_document(project_id, knowledge_base_id, document_id):
import dialogflow_v2beta1 as dialogflow
client = dialogflow.DocumentsClient()
document_path = client.document_path(project_id, knowledge_base_id,
document_id)
response = client.delete_document(document_path)
print('operation running:\n {}'.format(response.operation))
print('Waiting for results...')
print('Done.\n {}'.format(response.result()))
|
Deletes a Document.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base.
document_id: Id of the Document.
|
juraj-google-style
|
def install_json_params(self, ij=None):
if self._install_json_params is None or ij is not None:
self._install_json_params = {}
if ij is None:
ij = self.install_json
for p in ij.get('params') or []:
self._install_json_params.setdefault(p.get('name'), p)
return self._install_json_params
|
Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key.
|
juraj-google-style
|
def __init__(
self,
hparams,
metrics,
user=None,
description=None,
time_created_secs=None,
):
self._hparams = list(hparams)
self._metrics = list(metrics)
self._user = user
self._description = description
if time_created_secs is None:
time_created_secs = time.time()
self._time_created_secs = time_created_secs
|
Create an experiment object.
Args:
hparams: A list of `HParam` values.
metrics: A list of `Metric` values.
user: An optional string denoting the user or group that owns this
experiment.
description: An optional Markdown string describing this
experiment.
time_created_secs: The time that this experiment was created, as
seconds since epoch. Defaults to the current time.
|
juraj-google-style
|
def autorotate(image, orientation=None):
orientation_value = (orientation if orientation else image._getexif().get(EXIF_KEYS.get('Orientation')))
if (orientation_value is None):
raise ImDirectException('No orientation available in Exif tag or given explicitly.')
if (orientation_value in (1, 2)):
i = image
elif (orientation_value in (3, 4)):
i = image.transpose(Image.ROTATE_180)
elif (orientation_value in (5, 6)):
i = image.transpose(Image.ROTATE_270)
elif (orientation_value in (7, 8)):
i = image.transpose(Image.ROTATE_90)
else:
i = image
if (orientation_value in (2, 4, 5, 7)):
i = i.transpose(Image.FLIP_LEFT_RIGHT)
return i
|
Rotate and return an image according to its Exif information.
ROTATION_NEEDED = {
1: 0,
2: 0 (Mirrored),
3: 180,
4: 180 (Mirrored),
5: -90 (Mirrored),
6: -90,
7: 90 (Mirrored),
8: 90,
}
Args:
image (PIL.Image.Image): PIL image to rotate
orientation (): Optional orientation value in [1, 8]
Returns:
A :py:class:`~PIL.Image.Image` image.
|
codesearchnet
|
def message_index(index_url):
idx = csv.reader(urllib2.urlopen(index_url), delimiter=':')
messages = []
for line in idx:
messages.append(line)
return messages
|
get message index of components for urllib2.
Args:
url(string):
Returns:
list: messages
|
juraj-google-style
|
def _generate_input_signature(self, layer):
if isinstance(layer.call, def_function.Function) and layer.call.input_signature is not None:
return layer.call.input_signature
elif isinstance(layer, training_lib.Model):
return saving_utils.model_input_signature(layer)
elif layer.input_spec is not None and layer._use_input_spec_as_call_signature:
def to_tensor_spec_or_none(x):
spec = input_spec.to_tensor_spec(x, layer._compute_dtype)
if spec.shape == tensor_shape.TensorShape(None):
return None
return spec
input_signature = [nest.map_structure(to_tensor_spec_or_none, layer.input_spec)]
return input_signature
else:
return None
|
Inspects layer object and returns the inferred input signature.
Args:
layer: Layer object.
Returns:
List of possibly nested TensorSpecs of the layer call function inputs.
The list does not contain the `training` argument.
|
github-repos
|
def _ParseValueData(self, knowledge_base, value_data):
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail(
'Unsupported Windows Registry value type: {0:s} for '
'artifact: {1:s}.'.format(
type(value_data), self.ARTIFACT_DEFINITION_NAME))
codepage = 'cp{0:s}'.format(value_data)
if not knowledge_base.codepage:
try:
knowledge_base.SetCodepage(codepage)
except ValueError:
pass
|
Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def set_config_files_(self, *config_files):
self._config_files = tuple((pathlib.Path(path) for path in config_files))
|
Set the list of config files.
Args:
config_files (pathlike): path of config files, given in the order
of reading.
|
codesearchnet
|
def main_process_first(self, local=True, desc='work'):
if is_torch_available() and self.world_size > 1:
main_process_desc = 'main local process' if local else 'main process'
if self.distributed_state is not None:
is_main_process = self.distributed_state.is_local_main_process if local else self.distributed_state.is_main_process
elif is_sagemaker_mp_enabled():
is_main_process = smp.rank() == 0
try:
if not is_main_process:
logger.debug(f'{self.process_index}: waiting for the {main_process_desc} to perform {desc}')
if is_torch_xla_available():
xm.rendezvous(desc)
else:
dist.barrier()
yield
finally:
if is_main_process:
logger.debug(f'{self.process_index}: {main_process_desc} completed {desc}, releasing all replicas')
if is_torch_xla_available():
xm.rendezvous(desc)
else:
dist.barrier()
else:
yield
|
A context manager for torch distributed environment where on needs to do something on the main process, while
blocking replicas, and when it's finished releasing the replicas.
One such use is for `datasets`'s `map` feature which to be efficient should be run once on the main process,
which upon completion saves a cached version of results and which then automatically gets loaded by the
replicas.
Args:
local (`bool`, *optional*, defaults to `True`):
if `True` first means process of rank 0 of each node if `False` first means process of rank 0 of node
rank 0 In multi-node environment with a shared filesystem you most likely will want to use
`local=False` so that only the main process of the first node will do the processing. If however, the
filesystem is not shared, then the main process of each node will need to do the processing, which is
the default behavior.
desc (`str`, *optional*, defaults to `"work"`):
a work description to be used in debug logs
|
github-repos
|
def peek(self, size=-1):
if not self._readable:
raise UnsupportedOperation('read')
with self._seek_lock:
self._raw.seek(self._seek)
return self._raw._peek(size)
|
Return bytes from the stream without advancing the position.
Args:
size (int): Number of bytes to read. -1 to read the full
stream.
Returns:
bytes: bytes read
|
juraj-google-style
|
def joinNetwork(self, eRoleId):
print '%s call joinNetwork' % self.port
print eRoleId
self.deviceRole = eRoleId
mode = 15
try:
if ModuleHelper.LeaderDutChannelFound:
self.channel = ModuleHelper.Default_Channel
if eRoleId == Thread_Device_Role.Leader:
print 'join as leader'
mode = 15
if self.AutoDUTEnable is False:
self.__setRouterDowngradeThreshold(33)
elif eRoleId == Thread_Device_Role.Router:
print 'join as router'
mode = 15
if self.AutoDUTEnable is False:
self.__setRouterDowngradeThreshold(33)
elif eRoleId == Thread_Device_Role.SED:
print 'join as sleepy end device'
mode = 4
self.setPollingRate(self.sedPollingRate)
elif eRoleId == Thread_Device_Role.EndDevice:
print 'join as end device'
mode = 13
elif eRoleId == Thread_Device_Role.REED:
print 'join as REED'
mode = 15
self.__setRouterUpgradeThreshold(0)
elif eRoleId == Thread_Device_Role.EndDevice_FED:
print 'join as FED'
mode = 15
self.__setRouterUpgradeThreshold(0)
elif eRoleId == Thread_Device_Role.EndDevice_MED:
print 'join as MED'
mode = 13
else:
pass
self.__setDeviceMode(mode)
self.__setKeySwitchGuardTime(0)
time.sleep(0.1)
self.__startOpenThreadWpan()
time.sleep(3)
return True
except Exception, e:
ModuleHelper.WriteIntoDebugLogger('joinNetwork() Error: ' + str(e))
|
make device ready to join the Thread Network with a given role
Args:
eRoleId: a given device role id
Returns:
True: ready to set Thread Network parameter for joining desired Network
|
juraj-google-style
|
def get_host(self):
if (hasattr(self, 'host') and self.host):
return Host(self.rest_client.make_request(self.host), self.rest_client)
|
Get resource this operator is currently executing in.
If the operator is running on an externally
managed resource ``None`` is returned.
Returns:
Host: Resource this operator is running on.
.. versionadded:: 1.9
|
codesearchnet
|
def clean(self, settings):
return {k: v for (k, v) in settings.items() if (k in DEFAULT_SETTINGS)}
|
Filter given settings to keep only key names available in
``DEFAULT_SETTINGS``.
Args:
settings (dict): Loaded settings.
Returns:
dict: Settings object filtered.
|
codesearchnet
|
def erase(self):
try:
if not self.halted():
self.halt()
except errors.JLinkException:
pass
res = self._dll.JLINK_EraseChip()
if res < 0:
raise errors.JLinkEraseException(res)
return res
|
Erases the flash contents of the device.
This erases the flash memory of the target device. If this method
fails, the device may be left in an inoperable state.
Args:
self (JLink): the ``JLink`` instance
Returns:
Number of bytes erased.
|
juraj-google-style
|
def __init__(self, default_alpha=1, default_beta=1):
if isinstance(default_alpha, int) is False:
if isinstance(default_alpha, float) is False:
raise TypeError()
if isinstance(default_beta, int) is False:
if isinstance(default_beta, float) is False:
raise TypeError()
if default_alpha <= 0:
raise ValueError()
if default_beta <= 0:
raise ValueError()
self.__success += 0
self.__failure += 0
self.__default_alpha = default_alpha
self.__default_beta = default_beta
|
Initialization
Args:
default_alpha: Alpha
default_beta: Beta
|
juraj-google-style
|
def get_position_encoding(
length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
position = tf.to_float(tf.range(length))
num_timescales = hidden_size
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
|
Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
|
juraj-google-style
|
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS, module_name=None, **args):
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args), flag_values, module_name)
|
Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
serializer: ArgumentSerializer that serializes the flag value.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
|
codesearchnet
|
def db_dp020(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `db_dp020`'.format(value))
self._db_dp020 = value
|
Corresponds to IDD Field `db_dp020`
mean coincident dry-bulb temperature to
Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_dp020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def padding_to_length(padding):
non_padding = 1.0 - padding
return tf.to_int32(tf.reduce_sum(non_padding, axis=-1))
|
Calculate the length of mask based on padding.
Args:
padding: a Tensor with shape [..., length].
Returns:
a Tensor with shape [...].
|
juraj-google-style
|
def write_data(worksheet, data):
if (not data):
return
if isinstance(data, list):
rows = data
else:
rows = [data]
if isinstance(rows[0], dict):
keys = get_keys(rows)
worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys])
for row in rows:
values = [get_value_from_row(row, key) for key in keys]
worksheet.append(values)
elif isinstance(rows[0], list):
for row in rows:
values = [utilities.normalize_cell_value(value) for value in row]
worksheet.append(values)
else:
for row in rows:
worksheet.append([utilities.normalize_cell_value(row)])
|
Writes data into worksheet.
Args:
worksheet: worksheet to write into
data: data to be written
|
codesearchnet
|
def get_sql_statement_with_environment(item, args=None):
if isinstance(item, basestring):
item = _sql_statement.SqlStatement(item)
elif not isinstance(item, _sql_statement.SqlStatement):
item = SqlModule.get_default_query_from_module(item)
if not item:
raise Exception('Expected a SQL statement or module but got %s' % str(item))
env = {}
if item.module:
env.update(item.module.__dict__)
parser = env.get(_utils._SQL_MODULE_ARGPARSE, None)
if parser:
args = SqlModule._get_sql_args(parser, args=args)
else:
args = None
if isinstance(args, dict):
env.update(args)
return item, env
|
Given a SQLStatement, string or module plus command line args or a dictionary,
return a SqlStatement and final dictionary for variable resolution.
Args:
item: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values.
Returns:
A SqlStatement for the query or module, plus a dictionary of variable values to use.
|
juraj-google-style
|
class EfficientNetFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
rescale_offset: bool
include_top: bool
|
Args:
rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`):
Whether to rescale the image between [-max_range/2, scale_range/2] instead of [0, scale_range].
include_top (`bool`, *optional*, defaults to `self.include_top`):
Normalize the image again with the standard deviation only for image classification if set to True.
|
github-repos
|
def _merge_partition_lists(partition_lists):
dst = list(partition_lists[0])
for src in partition_lists[1:]:
if len(src) != len(dst):
raise ValueError('All ragged inputs must have the same ragged_rank.')
for i in range(len(dst)):
dst[i] = dst[i]._merge_precomputed_encodings(src[i])
return dst
|
Merges the given list of lists of RowPartitions.
Args:
partition_lists: A list of lists of RowPartition.
Returns:
A list of RowPartitions, where `result[i]` is formed by merging
`partition_lists[j][i]` for all `j`, using
`RowPartition._merge_precomputed_encodings`.
|
github-repos
|
def available_readers(as_dict=False):
readers = []
for reader_configs in configs_for_reader():
try:
reader_info = read_reader_config(reader_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning('Could not import reader config from: %s', reader_configs)
LOG.debug('Error loading YAML', exc_info=True)
continue
readers.append((reader_info if as_dict else reader_info['name']))
return readers
|
Available readers based on current configuration.
Args:
as_dict (bool): Optionally return reader information as a dictionary.
Default: False
Returns: List of available reader names. If `as_dict` is `True` then
a list of dictionaries including additionally reader information
is returned.
|
codesearchnet
|
def _satisfied_at_timestamp(self, device_name, pending, timestamp, start_i=0):
if not pending:
return True
for datum in self._dump_tensor_data[device_name][start_i:]:
if datum.timestamp > timestamp:
break
if datum.timestamp == timestamp and (datum.node_name, datum.output_slot) in pending:
pending.remove((datum.node_name, datum.output_slot))
if not pending:
return True
return not pending
|
Determine whether pending inputs are satisfied at given timestamp.
Note: This method mutates the input argument "pending".
Args:
device_name: (str) device name.
pending: A list of 2-tuple (node_name, output_slot): the dependencies to
check.
timestamp: (int) the timestamp in question.
start_i: (int) the index in self._dump_tensor_data to start searching for
the timestamp.
Returns:
(bool) Whether all the dependencies in pending are satisfied at the
timestamp. If pending is empty to begin with, return True.
|
github-repos
|
def exec_inspect(self, exec_id):
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
|
Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def kms_encrypt(value, key, aws_config=None):
aws_config = aws_config or {}
aws = boto3.session.Session(**aws_config)
client = aws.client('kms')
enc_res = client.encrypt(KeyId=key,
Plaintext=value)
return n(b64encode(enc_res['CiphertextBlob']))
|
Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'region_name': 'us-east-1'}
Returns:
str: encrypted cipher text
|
juraj-google-style
|
class TimesFmOutputForPrediction(BaseModelOutput):
mean_predictions: Optional[torch.Tensor] = None
full_predictions: Optional[torch.Tensor] = None
loss: Optional[Union[torch.Tensor, float]] = None
|
Args:
mean_predictions (`torch.Tensor` of shape `(batch_size, sequence_length)`):
The mean predictions of the time series.
full_predictions (`torch.Tensor` of shape `(batch_size, sequence_length)`):
The full predictions of the time series including the mean and the quantiles.
loss (`torch.Tensor` of shape `(1,)`, *optional*, returned when `future_values` is provided):
The loss of the TimesFM model.
|
github-repos
|
def write_xls(data, file_name, worksheet_names=None):
workbook = xlwt.Workbook()
for (sheet_index, sheet_data) in enumerate(data):
if (worksheet_names and (sheet_index < len(worksheet_names)) and worksheet_names[sheet_index]):
name = worksheet_names[sheet_index]
else:
name = 'Worksheet {}'.format(sheet_index)
sheet = workbook.add_sheet(name)
for (row_index, row) in enumerate(sheet_data):
for (col_index, value) in enumerate(row):
sheet.write(row_index, col_index, value)
workbook.save(file_name)
|
Writes out to old excel format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
worksheet_names: A list of worksheet names (optional).
|
codesearchnet
|
def on_binlog(event, stream):
rows, meta = _rows_event_to_dict(event, stream)
table_name = '%s.%s' % (meta['schema'], meta['table'])
if meta['action'] == 'insert':
sig = signals.rows_inserted
elif meta['action'] == 'update':
sig = signals.rows_updated
elif meta['action'] == 'delete':
sig = signals.rows_deleted
else:
raise RuntimeError('Invalid action "%s"' % meta['action'])
sig.send(table_name, rows=rows, meta=meta)
|
Process on a binlog event
1. Convert event instance into a dict
2. Send corresponding schema/table/signals
Args:
event (pymysqlreplication.row_event.RowsEvent): the event
|
juraj-google-style
|
def conv_stack(name, x, mid_channels, output_channels, dilations=None, activation='relu', dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = conv_block('conv_block', x, mid_channels=mid_channels, dilations=dilations, activation=activation, dropout=dropout)
x = conv('zeros', x, apply_actnorm=False, conv_init='zeros', output_channels=output_channels, dilations=dilations)
return x
|
3-layer convolutional stack.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer.
output_channels: Number of output channels.
dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.
By default, apply no dilations.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: float, 0.0
Returns:
output: output of 3 layer conv network.
|
codesearchnet
|
def stop_gradient(cls, x: 'TensorFluent') -> 'TensorFluent':
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(tf.stop_gradient(x.tensor), scope, batch)
|
Returns a copy of the input fluent with stop_gradient at tensor level.
Args:
x: The input fluent.
Returns:
A TensorFluent that stops backpropagation of gradient computations.
|
codesearchnet
|
def from_model_config(cls, model_config: PretrainedConfig) -> 'GenerationConfig':
config_dict = model_config.to_dict()
config_dict.pop('_from_model_config', None)
config_dict = {key: value for key, value in config_dict.items() if value is not None}
generation_config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)
decoder_config = model_config.get_text_config(decoder=True)
if decoder_config is not model_config:
default_generation_config = GenerationConfig()
decoder_config_dict = decoder_config.to_dict()
for attr in generation_config.to_dict().keys():
is_unset = getattr(generation_config, attr) == getattr(default_generation_config, attr)
if attr in decoder_config_dict and is_unset:
setattr(generation_config, attr, decoder_config_dict[attr])
if generation_config.return_dict_in_generate is False:
if any((getattr(generation_config, extra_output_flag, False) for extra_output_flag in generation_config.extra_output_flags)):
generation_config.return_dict_in_generate = True
generation_config._original_object_hash = hash(generation_config)
return generation_config
|
Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy
[`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].
Args:
model_config (`PretrainedConfig`):
The model config that will be used to instantiate the generation config.
Returns:
[`GenerationConfig`]: The configuration object instantiated from those parameters.
|
github-repos
|
def _refresh(self, http):
if (not self.store):
self._do_refresh_request(http)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and (not new_cred.invalid) and (new_cred.access_token != self.access_token) and (not new_cred.access_token_expired)):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http)
finally:
self.store.release_lock()
|
Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http: an object to be used to make HTTP requests.
Raises:
HttpAccessTokenRefreshError: When the refresh fails.
|
codesearchnet
|
def __clone_function(f, name=None):
if not isinstance(f, types.FunctionType):
raise SimTypeError('Given parameter is not a function.')
if name is None:
name = f.__name__
newglobals = f.__globals__.copy()
globals_used = [x for x in f.__globals__ if x in f.__code__.co_names]
for x in globals_used:
gv = f.__globals__[x]
if isinstance(gv, types.FunctionType):
newglobals[x] = __clone_function(gv)
elif isinstance(gv, types.ModuleType):
newglobals[x] = gv
else:
newglobals[x] = copy.deepcopy(gv)
newfunc = types.FunctionType(
f.__code__, newglobals, name, f.__defaults__, f.__closure__)
return newfunc
|
Make a new version of a function that has its own independent copy
of any globals that it uses directly, and has its own name.
All other attributes are assigned from the original function.
Args:
f: the function to clone
name (str): the name for the new function (if None, keep the same name)
Returns:
A copy of the function f, having its own copy of any globals used
Raises:
SimValueError
|
juraj-google-style
|
def supply(self, issuer):
def _retrieve_jwks():
jwks_uri = self._key_uri_supplier.supply(issuer)
if not jwks_uri:
raise UnauthenticatedException(u"Cannot find the `jwks_uri` for issuer "
u"%s: either the issuer is unknown or "
u"the OpenID discovery failed" % issuer)
try:
response = requests.get(jwks_uri)
json_response = response.json()
except Exception as exception:
message = u"Cannot retrieve valid verification keys from the `jwks_uri`"
raise UnauthenticatedException(message, exception)
if u"keys" in json_response:
jwks_keys = jwk.KEYS()
jwks_keys.load_jwks(response.text)
return jwks_keys._keys
else:
return _extract_x509_certificates(json_response)
return self._jwks_cache.get_or_create(issuer, _retrieve_jwks)
|
Supplies the `Json Web Key Set` for the given issuer.
Args:
issuer: the issuer.
Returns:
The successfully retrieved Json Web Key Set. None is returned if the
issuer is unknown or the retrieval process fails.
Raises:
UnauthenticatedException: When this method cannot supply JWKS for the
given issuer (e.g. unknown issuer, HTTP request error).
|
juraj-google-style
|
def _get_attribute(self, offset):
attr_type = self.get_uint_le(offset)
length = self.get_uint_le((offset + 4))
data = self.get_chunk(offset, length)
return MftAttr.factory(attr_type, data)
|
Determines attribute type at the offset and returns \
initialized attribute object.
Returns:
MftAttr: One of the attribute objects \
(eg. :class:`~.mft_attribute.MftAttrFilename`).
None: If atttribute type does not mach any one of the supported \
attribute types.
|
codesearchnet
|
def _pyval_update_fields(pyval, fields, depth):
if not isinstance(pyval, (dict, list, tuple)):
raise ValueError('Expected dict or nested list/tuple of dict')
for key, target in fields.items():
for _ in range(1, depth):
target = target[-1]
target.append(pyval[key] if isinstance(pyval, dict) else [])
if isinstance(pyval, (list, tuple)):
for child in pyval:
_pyval_update_fields(child, fields, depth + 1)
|
Append the field values from `pyval` to `fields`.
Args:
pyval: A python `dict`, or nested list/tuple of `dict`, whose value(s)
should be appended to `fields`.
fields: A dictionary mapping string keys to field values. Field values
extracted from `pyval` are appended to this dictionary's values.
depth: The depth at which `pyval` should be appended to the field values.
|
github-repos
|
def forward(ctx, x, k, percentile_mode, scale):
zero_point = torch.tensor(0.0, device=scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
|
Args:
x (`torch.Tensor`):
Floating point tensor to be quantized.
k (`int`):
Quantization bitwidth.
percentile_mode (`bool`):
Whether or not to use percentile calibration.
scale (`torch.Tensor`):
Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction
requires pre-calculated scaling factor.
Returns:
`torch.Tensor`: Symmetric-quantized value of *input*.
|
github-repos
|
def list_classes(mod_name):
mod = sys.modules[mod_name]
return [cls.__name__ for cls in mod.__dict__.values() if is_mod_class(mod, cls)]
|
Lists all classes declared in a module.
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
|
codesearchnet
|
def data(self, value):
if value == self._defaults['data'] and 'data' in self._values:
del self._values['data']
else:
self._values['data'] = value
|
The data property.
Args:
value (object). the property value.
|
juraj-google-style
|
def charge_balance(model):
compound_charge = {}
for compound in model.compounds:
if (compound.charge is not None):
compound_charge[compound.id] = compound.charge
for reaction in model.reactions:
charge = reaction_charge(reaction.equation, compound_charge)
(yield (reaction, charge))
|
Calculate the overall charge for all reactions in the model.
Yield (reaction, charge) pairs.
Args:
model: :class:`psamm.datasource.native.NativeModel`.
|
codesearchnet
|
def _parse_single_sequence_example_raw(serialized, context, feature_list, debug_name, name=None):
with ops.name_scope(name, 'ParseSingleExample', [serialized, debug_name]):
serialized = ops.convert_to_tensor(serialized, name='serialized')
serialized = _assert_scalar(serialized, 'serialized')
return _parse_sequence_example_raw(serialized, debug_name, context, feature_list, name)[:2]
|
Parses a single `SequenceExample` proto.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary serialized
`SequenceExample` proto.
context: A `ParseOpParams` containing the parameters for the parse op for
the context features.
feature_list: A `ParseOpParams` containing the parameters for the parse op
for the feature_list features.
debug_name: A scalar (0-D Tensor) of strings (optional), the name of the
serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
TypeError: if feature_list.dense_defaults is not either None or a dict.
|
github-repos
|
def settings_view_for_block(block_wrapper, settings_view_factory):
state_root_hash = (block_wrapper.state_root_hash if (block_wrapper is not None) else None)
return settings_view_factory.create_settings_view(state_root_hash)
|
Returns the settings view for an arbitrary block.
Args:
block_wrapper (BlockWrapper): The block for which a settings
view is to be returned
settings_view_factory (SettingsViewFactory): The settings
view factory used to create the SettingsView object
Returns:
SettingsView object associated with the block
|
codesearchnet
|
def get_servo_status(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
|
Get the error status of servo
This function gets the error status (if any) of the servo
Args:
none
Returns:
int: an integer corresponding to the servo status
* refer datasheet
|
juraj-google-style
|
def _load_client_secrets(filename):
client_type, client_info = clientsecrets.loadfile(filename)
if client_type != clientsecrets.TYPE_WEB:
raise ValueError(
'The flow specified in {} is not supported, only the WEB flow '
'type is supported.'.format(client_type))
return client_info['client_id'], client_info['client_secret']
|
Loads client secrets from the given filename.
Args:
filename: The name of the file containing the JSON secret key.
Returns:
A 2-tuple, the first item containing the client id, and the second
item containing a client secret.
|
juraj-google-style
|
def to_insert(table, d):
columns = []
args = []
for key, val in d.items():
columns.append('"{}"'.format(key))
args.append(val)
stmt = 'insert into {table} ({columns}) values ({params})'.format(
table=table,
columns=', '.join(columns),
params=', '.join(['?'] * len(columns)))
return (stmt, args)
|
Generate an insert statement using the given table and dictionary.
Args:
table (str): table name
d (dict): dictionary with column names as keys and values as values.
Returns:
tuple of statement and arguments
>>> to_insert('doc.foobar', {'name': 'Marvin'})
('insert into doc.foobar ("name") values (?)', ['Marvin'])
|
juraj-google-style
|
class TFConv1D(keras.layers.Layer):
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
if self.built:
return
self.built = True
self.weight = self.add_weight('weight', shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range))
self.bias = self.add_weight('bias', shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
|
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (`int`):
The number of output features.
nx (`int`):
The number of input features.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation to use to initialize the weights.
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`.
|
github-repos
|
def forward(self, x, name='forward'):
return self._call_forward(x, name)
|
Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
|
github-repos
|
def __init__(self, client, dag_name):
self._client = client
self._dag_name = dag_name
|
Initialise the task signal convenience class.
Args:
client (Client): A reference to a signal client object.
dag_name (str): The name of the dag the task belongs to.
|
juraj-google-style
|
def _testCompareToExplicitDerivative(self, dtype):
delta = 0.001
np_dtype = dtype.as_numpy_dtype
try:
from scipy import differentiate
from scipy import special
alpha_val = np.logspace(-2, 3, dtype=np_dtype)
alpha = constant_op.constant(alpha_val)
sample = random_ops.random_gamma([], alpha, np_dtype(1.0), dtype=dtype, seed=12345)
actual = gradients_impl.gradients(sample, alpha)[0]
sample_val, actual_val = self.evaluate((sample, actual))
u = special.gammainc(alpha_val, sample_val)
expected_val = differentiate.derivative(special.gammaincinv, alpha_val, args=(u,), initial_step=delta * alpha_val, order=2, preserve_shape=True).df
self.assertAllClose(actual_val, expected_val, rtol=0.001, atol=0.001)
except ImportError as e:
tf_logging.warn('Cannot use special functions in a test: %s' % str(e))
|
Compare to the explicit reparameterization derivative.
Verifies that the computed derivative satisfies
dsample / dalpha = d igammainv(alpha, u) / dalpha,
where u = igamma(alpha, sample).
Args:
dtype: TensorFlow dtype to perform the computations in.
|
github-repos
|
def send_client_cmd(self, data, cmd=None, via_queue=None):
mq_channel = self._connect_mq()
if cmd:
data['cmd'] = cmd
if via_queue:
mq_channel.basic_publish(exchange='',
routing_key=via_queue,
body=json.dumps(data))
else:
mq_channel.basic_publish(exchange=self.prv_exchange,
routing_key='',
body=json.dumps(data))
|
Send arbitrary cmd and data to client
if queue name passed by "via_queue" parameter,
that queue will be used instead of users private exchange.
Args:
data: dict
cmd: string
via_queue: queue name,
|
juraj-google-style
|
def get_type_from_api_entity(self, api_entity):
merged = self.group_types_data.copy()
merged.update(self.indicator_types_data)
print(merged)
for (key, value) in merged.items():
if value.get('apiEntity') == api_entity:
return key
return None
|
Returns the object type as a string given a api entity.
Args:
api_entity:
Returns:
|
juraj-google-style
|
def _map_free_gates(layout, gates, coupling_map):
blocked_qubits = set()
mapped_gates = []
remaining_gates = []
for gate in gates:
if not gate['partition']:
qubits = [n for n in gate['graph'].nodes() if n.type == 'op'][0].qargs
if not qubits:
continue
if blocked_qubits.intersection(qubits):
blocked_qubits.update(qubits)
remaining_gates.append(gate)
else:
mapped_gate = _transform_gate_for_layout(gate, layout)
mapped_gates.append(mapped_gate)
continue
qubits = gate['partition'][0]
if blocked_qubits.intersection(qubits):
blocked_qubits.update(qubits)
remaining_gates.append(gate)
elif len(qubits) == 1:
mapped_gate = _transform_gate_for_layout(gate, layout)
mapped_gates.append(mapped_gate)
elif coupling_map.distance(*[layout[q] for q in qubits]) == 1:
mapped_gate = _transform_gate_for_layout(gate, layout)
mapped_gates.append(mapped_gate)
else:
blocked_qubits.update(qubits)
remaining_gates.append(gate)
return mapped_gates, remaining_gates
|
Map all gates that can be executed with the current layout.
Args:
layout (Layout): Map from virtual qubit index to physical qubit index.
gates (list): Gates to be mapped.
coupling_map (CouplingMap): CouplingMap for target device topology.
Returns:
tuple:
mapped_gates (list): ops for gates that can be executed, mapped onto layout.
remaining_gates (list): gates that cannot be executed on the layout.
|
juraj-google-style
|
def get_template_parameters_file(template_full_path):
for suffix in EFConfig.PARAMETER_FILE_SUFFIXES:
parameters_file = template_full_path.replace("/templates", "/parameters") + suffix
if exists(parameters_file):
return parameters_file
else:
continue
return None
|
Checks for existance of parameters file against supported suffixes and returns parameters file path if found
Args:
template_full_path: full filepath for template file
Returns:
filename of parameters file if it exists
|
juraj-google-style
|
def write_uint32(self, value, little_endian=True):
if little_endian:
endian = "<"
else:
endian = ">"
return self.pack('%sI' % endian, value)
|
Pack the value as an unsigned integer and write 4 bytes to the stream.
Args:
value:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written.
|
juraj-google-style
|
def set_dataset_year_range(self, dataset_year, dataset_end_year=None):
if isinstance(dataset_year, int):
dataset_date = ('01/01/%d' % dataset_year)
elif isinstance(dataset_year, str):
dataset_date = ('01/01/%s' % dataset_year)
else:
raise hdx.data.hdxobject.HDXError(('dataset_year has type %s which is not supported!' % type(dataset_year).__name__))
if (dataset_end_year is None):
dataset_end_year = dataset_year
if isinstance(dataset_end_year, int):
dataset_end_date = ('31/12/%d' % dataset_end_year)
elif isinstance(dataset_end_year, str):
dataset_end_date = ('31/12/%s' % dataset_end_year)
else:
raise hdx.data.hdxobject.HDXError(('dataset_end_year has type %s which is not supported!' % type(dataset_end_year).__name__))
self.set_dataset_date(dataset_date, dataset_end_date)
|
Set dataset date as a range from year or start and end year.
Args:
dataset_year (Union[str, int]): Dataset year given as string or int
dataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int
Returns:
None
|
codesearchnet
|
def nic_b(msg):
tc = typecode(msg)
if ((tc < 9) or (tc > 18)):
raise RuntimeError(('%s: Not a airborne position message, expecting 8<TC<19' % msg))
msgbin = common.hex2bin(msg)
nic_b = int(msgbin[39])
return nic_b
|
Obtain NICb, navigation integrity category supplement-b
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: NICb number (0 or 1)
|
codesearchnet
|
def _config_net_topology(self, conf):
conf = self._init_net_specs(conf)
mgmts = self._select_mgmt_networks(conf)
self._validate_netconfig(conf)
allocated_subnets, conf = self._allocate_subnets(conf)
try:
self._add_mgmt_to_domains(conf, mgmts)
self._register_preallocated_ips(conf)
self._allocate_ips_to_nics(conf)
self._set_mtu_to_nics(conf)
self._add_dns_records(conf, mgmts)
except:
self._subnet_store.release(allocated_subnets)
raise
return conf
|
Initialize and populate all the network related elements, like
reserving ips and populating network specs of the given confiiguration
spec
Args:
conf (dict): Configuration spec to initalize
Returns:
None
|
juraj-google-style
|
def getIndexGrid(self, name):
index_map = self.mapTableFile.indexMaps.filter_by(name=name).one()
gssha_pro_card = self.getCard('
if (gssha_pro_card is None):
raise ValueError('
with tmp_chdir(self.project_directory):
return GDALGrid(index_map.filename, gssha_pro_card.value.strip('"').strip("'"))
|
Returns GDALGrid object of index map
Paramters:
name(str): Name of index map in 'cmt' file.
Returns:
GDALGrid
|
codesearchnet
|
def set_countriesdata(cls, countries):
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
|
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
|
juraj-google-style
|
def qc_data(self, tests, alias=None):
r = {m: c.quality(tests, alias) for (m, c) in self.data.items()}
s = self.qc_curve_group(tests, alias=alias)
for (m, results) in r.items():
if (m in s):
results.update(s[m])
return r
|
Run a series of tests against the data and return the corresponding
results.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
|
codesearchnet
|
def project(self, n):
n = get_uvec(n)
return self.einsum_sequence(([n] * self.rank))
|
Convenience method for projection of a tensor into a
vector. Returns the tensor dotted into a unit vector
along the input n.
Args:
n (3x1 array-like): direction to project onto
Returns (float):
scalar value corresponding to the projection of
the tensor into the vector
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.