code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def ingress(self, envelope, http_headers, operation):
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug(_RESPONSE_XML_LOG_LINE,
etree.tostring(envelope, pretty_print=True))
if self._logger.isEnabledFor(logging.WARN):
warn_data = {}
header = envelope.find(_HEADER_XPATH)
fault = envelope.find(_FAULT_XPATH)
if fault is not None:
warn_data['faultMessage'] = fault.find('faultstring').text
if header is not None:
header_data = {
re.sub(_REMOVE_NS_REGEXP, '', child.tag): child.text
for child in header[0]}
warn_data.update(header_data)
if 'serviceName' not in warn_data:
warn_data['serviceName'] = operation.binding.wsdl.services.keys()[0]
if 'methodName' not in warn_data:
warn_data['methodName'] = operation.name
self._logger.warn('Error summary: %s', warn_data)
return envelope, http_headers
|
Overrides the ingress function for response logging.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
Returns:
A tuple of the envelope and headers.
|
juraj-google-style
|
def add_sample_tag_value(self, tag_name, new_sample_values):
if tag_name in self.format_tags:
msg = "New format value [{}] already exists.".format(tag_name)
raise KeyError(msg)
if not self._samples_match(new_sample_values):
raise KeyError("Sample name values must match "
"existing sample names")
for sample in self.sample_tag_values.keys():
value = str(new_sample_values[sample])
self.sample_tag_values[sample][tag_name] = value
|
Appends a new format tag-value for all samples.
Args:
tag_name: string tag name; must not already exist
new_sample
Raises:
KeyError: if tag_name to be added already exists
|
juraj-google-style
|
def update_restore_inputs(self, checkpoint_key: str, shape_and_slice_spec: str) -> tuple[Sequence[str], Sequence[str]]:
keys = []
slices = []
logging.info('Updating restore v2 inputs for %s: %s', checkpoint_key, shape_and_slice_spec)
for i, layout in enumerate(self._to_shard_layout):
sub_checkpoint_key = checkpoint_key.replace(self._main_checkpoint_name, self._checkpoint_local_names[i])
logging.info('Will read sub key %s: %s', sub_checkpoint_key, layout.unsharded_shape)
keys.append(sub_checkpoint_key)
slices.append(_shard_info_str(layout.unsharded_shape, trackable_base.ShardInfo(offset=[0, 0], shape=layout.unsharded_shape)))
return (keys, slices)
|
Updates checkpoint key and slice spec acorrding to the resharding plan.
Args:
checkpoint_key: The input checkpoint key to be read.
shape_and_slice_spec: The shape and slice spec of the checkpoint key to be
read.
Returns:
A tuple of (keys, slices) that should be passed to restore_v2 inorder to
reshard according to the resharding plan. The restored tensors from
restore_v2 op will usually be passed to reshard method of this class to
get the final resharded value.
|
github-repos
|
def eulers_totient(n):
if not isinstance(n, int):
raise TypeError("Expecting a strictly positive integer")
if n <= 0:
raise ValueError("Expecting a strictly positive integer")
if n == 1:
return 1
result = 0
for i in range(1, n):
if gcd(i, n) == 1:
result += 1
return result
|
Calculate the value of Euler's totient for a given integer
Args:
n (int): strictly positive integer
Returns:
The value of Euler's totient for n
Raises:
TypeError: If either n or k is not an integer
ValueError: If either n or k is negative, or if k is strictly greater than n
|
juraj-google-style
|
def write_uint8(self, value, little_endian=True):
if little_endian:
endian = '<'
else:
endian = '>'
return self.pack(('%sB' % endian), value)
|
Pack the value as an unsigned byte and write 1 byte to the stream.
Args:
value:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written.
|
codesearchnet
|
def check_copies(overwrite: bool=False, file: Optional[str]=None):
buffer = {}
if file is None:
all_files = glob.glob(os.path.join(TRANSFORMERS_PATH, '***.py'), recursive=True)
all_files = list(all_files) + list(all_test_files)
else:
all_files = [file]
diffs = []
for filename in all_files:
new_diffs = is_copy_consistent(filename, overwrite, buffer)
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(diffs) > 0:
diff = '\n'.join(diffs)
raise Exception('Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.')
|
Check every file is copy-consistent with the original. Also check the model list in the main README and other
READMEs are consistent.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to overwrite the copies when they don't match.
file (`bool`, *optional*):
The path to a specific file to check and/or fix.
|
github-repos
|
def _is_in_control_flow(self, op):
return control_flow_util.IsInCond(op)
|
Returns true if the given op is inside a tf.cond or in tf.while_loop.
Args:
op: A tensorflow op that should be checked whether in control flow or not.
Returns:
A boolean value whether the op is in control flow or not.
|
github-repos
|
def dice_loss(inputs: Tensor, labels: Tensor, num_masks: int) -> Tensor:
probs = inputs.sigmoid().flatten(1)
numerator = 2 * (probs * labels).sum(-1)
denominator = probs.sum(-1) + labels.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
loss = loss.sum() / num_masks
return loss
|
Compute the DICE loss, similar to generalized IOU for masks as follows:
$$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}} $$
In practice, since `labels` is a binary mask, (only 0s and 1s), dice can be computed as follow
$$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}} $$
Args:
inputs (`torch.Tensor`):
A tensor representing a mask.
labels (`torch.Tensor`):
A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs
(0 for the negative class and 1 for the positive class).
num_masks (`int`):
The number of masks present in the current batch, used for normalization.
Returns:
`torch.Tensor`: The computed loss.
|
github-repos
|
def start_new_feature(**cc_kwargs):
project = Project.from_path(pathlib.Path.cwd().resolve())
contrib_dir = project.get('contrib', 'module_path')
with tempfile.TemporaryDirectory() as tempdir:
output_dir = tempdir
cc_kwargs['output_dir'] = output_dir
rendered_dir = render_feature_template(**cc_kwargs)
src = rendered_dir
dst = contrib_dir
synctree(src, dst, onexist=_fail_if_feature_exists)
logger.info('Start new feature successful.')
|
Start a new feature within a ballet project
Renders the feature template into a temporary directory, then copies the
feature files into the proper path within the contrib directory.
Args:
**cc_kwargs: options for the cookiecutter template
Raises:
ballet.exc.BalletError: the new feature has the same name as an
existing one
|
juraj-google-style
|
def get_data(self,
file_path=sys.stdin,
delimiter=',',
categories_delimiter=None):
if file_path == sys.stdin:
logger.info('Read data from standard input')
lines = [line.replace('\n', '') for line in file_path]
else:
logger.info('Read data from file ' + file_path)
with open(file_path) as file:
lines = list(file)
columns = lines[0].rstrip('\n').split(delimiter)[1:]
categories = None
if categories_delimiter:
columns, categories = zip(*[c.split(categories_delimiter, 1)
for c in columns])
size = len(columns)
data = [list(map(int, l.split(delimiter)[1:]))
for l in lines[1:size + 1]]
return DesignStructureMatrix(data, columns, categories)
|
Implement get_dsm method from Provider class.
Parse CSV to return an instance of DSM.
Args:
file_path (str/fd): path or file descriptor.
delimiter (str): character(s) used as delimiter for columns.
categories_delimiter (str):
character(s) used as delimiter for categories and keys
(first column).
Returns:
DSM: instance of DSM.
|
juraj-google-style
|
def expand_source_files(filenames, cwd=None):
out = []
for f in expand_globpaths(filenames.split(), cwd):
if path_utils.isdir(f):
out += recursive_glob(path_utils.join(f, '**', '*.py'))
elif f.endswith('.py'):
out.append(f)
elif is_file_script(f, cwd):
out.append(f)
return set(out)
|
Expand a space-separated string of filenames passed in as sources.
This is a helper function for handling command line arguments that specify a
list of source files and directories.
Any directories in filenames will be scanned recursively for .py files.
Any files that do not end with ".py" will be dropped.
Args:
filenames: A space-separated string of filenames to process.
cwd: An optional working directory to expand relative paths
Returns:
A set of full paths to .py files
|
github-repos
|
def _maybe_create_attribute(self, name, default_value):
if not hasattr(self, name):
self.__setattr__(name, default_value)
|
Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
|
github-repos
|
def survey_basis(self, keys=None, alias=None, step=None):
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
starts, stops, steps = [], [], []
for k in keys:
d = self.get_curve(k, alias=alias)
if keys and (d is None):
continue
try:
starts.append(d.basis[0])
stops.append(d.basis[-1])
steps.append(d.basis[1] - d.basis[0])
except Exception as e:
pass
if starts and stops and steps:
step = step or min(steps)
return np.arange(min(starts), max(stops)+1e-9, step)
else:
return None
|
Look at the basis of all the curves in ``well.data`` and return a
basis with the minimum start, maximum depth, and minimum step.
Args:
keys (list): List of strings: the keys of the data items to
survey, if not all of them.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
step (float): a new step, if you want to change it.
Returns:
ndarray. The most complete common basis.
|
juraj-google-style
|
def to_molden(cartesian_list, buf=None, sort_index=True, overwrite=True, float_format='{:.6f}'.format):
if sort_index:
cartesian_list = [molecule.sort_index() for molecule in cartesian_list]
give_header = (((((((('[MOLDEN FORMAT]\n' + '[N_GEO]\n') + str(len(cartesian_list))) + '\n') + '[GEOCONV]\n') + 'energy\n{energy}') + 'max-force\n{max_force}') + 'rms-force\n{rms_force}') + '[GEOMETRIES] (XYZ)\n').format
values = (len(cartesian_list) * '1\n')
energy = [str(m.metadata.get('energy', 1)) for m in cartesian_list]
energy = ('\n'.join(energy) + '\n')
header = give_header(energy=energy, max_force=values, rms_force=values)
coordinates = [x.to_xyz(sort_index=sort_index, float_format=float_format) for x in cartesian_list]
output = (header + '\n'.join(coordinates))
if (buf is not None):
if overwrite:
with open(buf, mode='w') as f:
f.write(output)
else:
with open(buf, mode='x') as f:
f.write(output)
else:
return output
|
Write a list of Cartesians into a molden file.
.. note:: Since it permamently writes a file, this function
is strictly speaking **not sideeffect free**.
The list to be written is of course not changed.
Args:
cartesian_list (list):
buf (str): StringIO-like, optional buffer to write to
sort_index (bool): If sort_index is true, the Cartesian
is sorted by the index before writing.
overwrite (bool): May overwrite existing files.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
Returns:
formatted : string (or unicode, depending on data and options)
|
codesearchnet
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
priority_level = event_values.get('level', None)
if isinstance(priority_level, py2to3.INTEGER_TYPES):
event_values['level'] = '{0:s} ({1:d})'.format(
self._PRIORITY_LEVELS.get(priority_level, 'UNKNOWN'), priority_level)
read_uid = event_values.get('read_uid', None)
if read_uid == -1:
event_values['read_uid'] = 'ALL'
read_gid = event_values.get('read_gid', None)
if read_gid == -1:
event_values['read_gid'] = 'ALL'
return self._ConditionalFormatMessages(event_values)
|
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
juraj-google-style
|
def markdown_to_html_with_extensions(text, options=0, extensions=None):
if extensions is None:
extensions = []
core_extensions_ensure_registered()
cmark_extensions = []
for extension_name in extensions:
extension = find_syntax_extension(extension_name)
if extension is None:
raise ValueError('Unknown extension {}'.format(extension_name))
cmark_extensions.append(extension)
parser = parser_new(options=options)
try:
for extension in cmark_extensions:
parser_attach_syntax_extension(parser, extension)
parser_feed(parser, text)
root = parser_finish(parser)
if _cmark.lib.cmark_node_get_type(root) == _cmark.lib.CMARK_NODE_NONE:
raise ValueError('Error parsing markdown!')
extensions_ll = parser_get_syntax_extensions(parser)
output = render_html(root, options=options, extensions=extensions_ll)
finally:
parser_free(parser)
return output
|
Render the given text to Markdown, using extensions.
This is a high-level wrapper over the various functions needed to enable
extensions, attach them to a parser, and render html.
Args:
text (str): The text to render to Markdown.
options (int): The cmark options.
extensions (Sequence[str]): The list of extension names to use.
Returns:
str: The rendered markdown.
|
juraj-google-style
|
def __init__(self, *args, **kwargs):
if "widget" not in kwargs:
kwargs["widget"] = PasswordConfirmationInput(
confirm_with=kwargs.pop('confirm_with', None))
super(PasswordConfirmationField, self).__init__(*args, **kwargs)
|
Init method.
Args:
*args (): Django's args for a form field.
**kwargs (): Django's kwargs for a form field. Should contain a
confirm_with keyword argument to point to the password field.
|
juraj-google-style
|
def add(self, term):
if isinstance(term, Conjunction):
for term_ in term.terms:
self.add(term_)
elif isinstance(term, Term):
self._terms.append(term)
else:
raise TypeError('Not a Term or Conjunction')
|
Add a term to the conjunction.
Args:
term (:class:`Term`, :class:`Conjunction`): term to add;
if a :class:`Conjunction`, all of its terms are added
to the current conjunction.
Raises:
:class:`TypeError`: when *term* is an invalid type
|
codesearchnet
|
def set_domain_workgroup(workgroup):
if six.PY2:
workgroup = _to_unicode(workgroup)
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return (True if (not res[0]) else False)
|
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
|
codesearchnet
|
def get_simulated_data(nmr_problems):
nmr_observed_tanks = 10
nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint')
observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint')
return observations, nmr_tanks_ground_truth
|
Simulate some data.
This returns the simulated tank observations and the corresponding ground truth maximum number of tanks.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth)
|
juraj-google-style
|
def l2_distance_sq(t1, t2, name=None):
with tf.name_scope(name, 'l2_distance_sq', [t1, t2]) as scope:
t1 = tf.convert_to_tensor(t1, name='t1')
t2 = tf.convert_to_tensor(t2, name='t2')
return length_squared(tf.subtract(t1, t2), name=scope)
|
Square of l2 distance between t1 and t2.
Args:
t1: A tensor.
t2: A tensor that is the same size as t1.
name: Optional name for this op.
Returns:
The l2 distance between t1 and t2.
|
codesearchnet
|
def RunStateMethod(self, method_name, request=None, responses=None):
if self.rdf_flow.pending_termination:
self.Error(error_message=self.rdf_flow.pending_termination.reason)
return
client_id = self.rdf_flow.client_id
deadline = self.rdf_flow.processing_deadline
if deadline and rdfvalue.RDFDatetime.Now() > deadline:
raise flow.FlowError("Processing time for flow %s on %s expired." %
(self.rdf_flow.flow_id, self.rdf_flow.client_id))
self.rdf_flow.current_state = method_name
if request and responses:
logging.debug("Running %s for flow %s on %s, %d responses.", method_name,
self.rdf_flow.flow_id, client_id, len(responses))
else:
logging.debug("Running %s for flow %s on %s", method_name,
self.rdf_flow.flow_id, client_id)
try:
try:
method = getattr(self, method_name)
except AttributeError:
raise ValueError("Flow %s has no state method %s" %
(self.__class__.__name__, method_name))
responses = flow_responses.Responses.FromResponses(
request=request, responses=responses)
if responses.status is not None:
self.SaveResourceUsage(responses.status)
stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")
if method_name == "Start":
stats_collector_instance.Get().IncrementCounter(
"flow_starts", fields=[self.rdf_flow.flow_class_name])
method()
else:
method(responses)
if self.replies_to_process:
if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id:
self._ProcessRepliesWithHuntOutputPlugins(self.replies_to_process)
else:
self._ProcessRepliesWithFlowOutputPlugins(self.replies_to_process)
self.replies_to_process = []
except Exception as e:
stats_collector_instance.Get().IncrementCounter(
"flow_errors", fields=[self.rdf_flow.flow_class_name])
logging.exception("Flow %s on %s raised %s.", self.rdf_flow.flow_id,
client_id, utils.SmartUnicode(e))
self.Error(
error_message=utils.SmartUnicode(e), backtrace=traceback.format_exc())
|
Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of FlowMessages responding to the request.
|
juraj-google-style
|
def _pick_or_create_inserted_op_moment_index(
self, splitter_index: int, op: ops.Operation,
strategy: InsertStrategy) -> int:
if (strategy is InsertStrategy.NEW or
strategy is InsertStrategy.NEW_THEN_INLINE):
self._moments.insert(splitter_index, ops.Moment())
return splitter_index
if strategy is InsertStrategy.INLINE:
if (0 <= splitter_index - 1 < len(self._moments) and
self._can_add_op_at(splitter_index - 1, op)):
return splitter_index - 1
return self._pick_or_create_inserted_op_moment_index(
splitter_index, op, InsertStrategy.NEW)
if strategy is InsertStrategy.EARLIEST:
if self._can_add_op_at(splitter_index, op):
p = self._prev_moment_available(op, splitter_index)
return p or 0
return self._pick_or_create_inserted_op_moment_index(
splitter_index, op, InsertStrategy.INLINE)
raise ValueError('Unrecognized append strategy: {}'.format(strategy))
|
Determines and prepares where an insertion will occur.
Args:
splitter_index: The index to insert at.
op: The operation that will be inserted.
strategy: The insertion strategy.
Returns:
The index of the (possibly new) moment where the insertion should
occur.
Raises:
ValueError: Unrecognized append strategy.
|
juraj-google-style
|
def CleanVacuousVersions(clients=None, dry_run=True):
if (not clients):
index = client_index.CreateClientIndex()
clients = index.LookupClients(['.'])
clients.sort()
with data_store.DB.GetMutationPool() as pool:
logging.info('checking %d clients', len(clients))
for batch in collection.Batch(clients, 10000):
client_infos = data_store.DB.MultiResolvePrefix(batch, ['aff4:', 'aff4:'], data_store.DB.ALL_TIMESTAMPS)
for (client, type_list) in client_infos:
cleared = 0
kept = 0
updates = []
for (a, _, ts) in type_list:
if (ts != 0):
updates.append((ts, a))
updates = sorted(updates)
dirty = True
for (ts, a) in updates:
if (a == 'aff4:type'):
if dirty:
kept += 1
dirty = False
else:
cleared += 1
if (not dry_run):
pool.DeleteAttributes(client, ['aff4:type'], start=ts, end=ts)
if (pool.Size() > 1000):
pool.Flush()
else:
dirty = True
logging.info('%s: kept %d and cleared %d', client, kept, cleared)
|
A script to remove no-op client versions.
This script removes versions of a client when it is identical to the previous,
in the sense that no versioned attributes were changed since the previous
client version.
Args:
clients: A list of ClientURN, if empty cleans all clients.
dry_run: whether this is a dry run
|
codesearchnet
|
def _populate(cls, as_of=None, delete=False):
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
if (not billing_cycles_exist):
delete = False
if (billing_cycles_exist and (not current_billing_cycle)):
raise CannotPopulateForDateOutsideExistingCycles()
omit_current = (current_billing_cycle and delete)
stop_date = (as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS))
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
cls.objects.filter(start_date__gte=beginning_date).delete()
for (start_date, end_date) in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception('It should not be possible to get here as future billing cycles have just been deleted')
else:
pass
else:
BillingCycle.objects.create(date_range=(start_date, end_date))
|
Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
|
codesearchnet
|
def from_string(cls, data, sigfigs=8):
lines = data.split("\n")[:-1]
struc_lines = {"HEADER": [], "VERS": [], "SYMGRP": [],
"STRUC": [], "CLASS": [], "SITE": []}
for line in lines:
if line != "" and not line.isspace():
if not line[0].isspace():
cat = line.split()[0]
if cat in struc_lines:
struc_lines[cat].append(line)
else:
pass
for cat in struc_lines:
struc_lines[cat] = " ".join(struc_lines[cat]).replace("= ", "=")
structure_tokens = {"ALAT": None,
"PLAT": [],
"CLASS": [],
"SITE": []}
for cat in ["STRUC", "CLASS", "SITE"]:
fields = struc_lines[cat].split("=")
for f, field in enumerate(fields):
token = field.split()[-1]
if token == "ALAT":
alat = round(float(fields[f+1].split()[0]), sigfigs)
structure_tokens["ALAT"] = alat
elif token == "ATOM":
atom = fields[f+1].split()[0]
if not bool(re.match("E[0-9]*$", atom)):
if cat == "CLASS":
structure_tokens["CLASS"].append(atom)
else:
structure_tokens["SITE"].append({"ATOM": atom})
else:
pass
elif token in ["PLAT", "POS"]:
try:
arr = np.array([round(float(i), sigfigs)
for i in fields[f+1].split()])
except ValueError:
arr = np.array([round(float(i), sigfigs)
for i in fields[f+1].split()[:-1]])
if token == "PLAT":
structure_tokens["PLAT"] = arr.reshape([3, 3])
elif not bool(re.match("E[0-9]*$", atom)):
structure_tokens["SITE"][-1]["POS"] = arr
else:
pass
else:
pass
try:
spcgrp_index = struc_lines["SYMGRP"].index("SPCGRP")
spcgrp = struc_lines["SYMGRP"][spcgrp_index:spcgrp_index+12]
structure_tokens["SPCGRP"] = spcgrp.split("=")[1].split()[0]
except ValueError:
pass
for token in ["HEADER", "VERS"]:
try:
value = re.split(token + r"\s*", struc_lines[token])[1]
structure_tokens[token] = value.strip()
except IndexError:
pass
return LMTOCtrl.from_dict(structure_tokens)
|
Creates a CTRL file object from a string. This will mostly be
used to read an LMTOCtrl object from a CTRL file. Empty spheres
are ignored.
Args:
data: String representation of the CTRL file.
Returns:
An LMTOCtrl object.
|
juraj-google-style
|
def _call_post_with_user_override(self, sap_user_id, url, payload):
SAPSuccessFactorsEnterpriseCustomerConfiguration = apps.get_model(
'sap_success_factors',
'SAPSuccessFactorsEnterpriseCustomerConfiguration'
)
oauth_access_token, _ = SAPSuccessFactorsAPIClient.get_oauth_access_token(
self.enterprise_configuration.sapsf_base_url,
self.enterprise_configuration.key,
self.enterprise_configuration.secret,
self.enterprise_configuration.sapsf_company_id,
sap_user_id,
SAPSuccessFactorsEnterpriseCustomerConfiguration.USER_TYPE_USER
)
response = requests.post(
url,
data=payload,
headers={
'Authorization': 'Bearer {}'.format(oauth_access_token),
'content-type': 'application/json'
}
)
return response.status_code, response.text
|
Make a post request with an auth token acquired for a specific user to a SuccessFactors endpoint.
Args:
sap_user_id (str): The user to use to retrieve an auth token.
url (str): The url to post to.
payload (str): The json encoded payload to post.
|
juraj-google-style
|
def apply(self, flag_set: AbstractSet[Flag], operand: AbstractSet[Flag]) \
-> FrozenSet[Flag]:
if self == FlagOp.ADD:
return frozenset(flag_set | operand)
elif self == FlagOp.DELETE:
return frozenset(flag_set - operand)
else:
return frozenset(operand)
|
Apply the flag operation on the two sets, returning the result.
Args:
flag_set: The flag set being operated on.
operand: The flags to use as the operand.
|
juraj-google-style
|
def parse_time_indices(s):
if not s.startswith('['):
s = '[' + s + ']'
parsed = command_parser._parse_slices(s)
if len(parsed) != 1:
raise ValueError(
'Invalid number of slicing objects in time indices (%d)' % len(parsed))
else:
return parsed[0]
|
Parse a string as time indices.
Args:
s: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10'
Returns:
A slice object.
Raises:
ValueError: If `s` does not represent valid time indices.
|
juraj-google-style
|
def delete(table, keyset):
delete = Mutation.Delete(table=table, key_set=keyset._to_pb())
return _Mutator(mutation=Mutation(delete=delete), rows=0, cells=0, operation=WriteMutation._OPERATION_DELETE, kwargs={'table': table, 'keyset': keyset})
|
Delete one or more table rows.
Args:
table: Name of the table to be modified.
keyset: Keys/ranges identifying rows to delete.
|
github-repos
|
def build_global(self, global_node):
config_block_lines = self.__build_config_block(global_node.config_block)
return config.Global(config_block=config_block_lines)
|
parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object
|
codesearchnet
|
def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1):
def get_gpu_info():
gpu_info = subprocess.check_output(['nvidia-smi', '--format=csv,noheader,nounits', '--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu']).decode()
gpu_info = gpu_info.split('\n')
gpu_info_array = []
for line in gpu_info:
if (len(line) > 0):
(gpu_id, total_memory, free_memory, used_memory, gpu_util) = line.split(',')
gpu_memory_util = (float(used_memory) / float(total_memory))
gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id))
return gpu_info_array
num_times_to_average = 5
current_array = []
for ind in range(num_times_to_average):
current_array.append(get_gpu_info())
time.sleep(1)
num_gpus = len(current_array[0])
avg_array = [(0, 0, str(x)) for x in range(num_gpus)]
for ind in range(num_times_to_average):
for gpu_ind in range(num_gpus):
avg_array[gpu_ind] = ((avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0]), (avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1]), avg_array[gpu_ind][2])
for gpu_ind in range(num_gpus):
avg_array[gpu_ind] = ((float(avg_array[gpu_ind][0]) / num_times_to_average), (float(avg_array[gpu_ind][1]) / num_times_to_average), avg_array[gpu_ind][2])
avg_array.sort()
gpus_found = 0
gpus_to_use = ''
free_memory = 1.0
for current_gpu in avg_array:
if ((current_gpu[0] < max_gpu_utilization) and ((1 - current_gpu[1]) > min_free_memory)):
if (gpus_found == 0):
gpus_to_use = current_gpu[2]
free_memory = (1 - current_gpu[1])
else:
gpus_to_use = ((gpus_to_use + ',') + current_gpu[2])
free_memory = min(free_memory, (1 - current_gpu[1]))
gpus_found = (gpus_found + 1)
if (gpus_found == num_gpu):
break
return (gpus_to_use, free_memory)
|
Get available GPUs according to utilization thresholds.
Args:
:max_gpu_utilization: percent utilization threshold to consider a GPU "free"
:min_free_memory: percent free memory to consider a GPU "free"
:num_gpu: number of requested GPUs
Returns:
A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory
is the lowest amount of free memory available on the available_gpus.
|
codesearchnet
|
def parent(self) -> 'KeyPath':
if self.is_root:
raise KeyError('Parent of a root KeyPath does not exist.')
return KeyPath(self._keys[:-1])
|
The ``KeyPath`` object for current node's parent.
Example::
path = pg.KeyPath.parse('a.b.c.')
assert path.parent == 'a.b'
Returns:
A ``KeyPath`` object for the parent of current node.
Raises:
KeyError: If current path is the root.
|
github-repos
|
def _CopyDateFromString(self, date_string):
date_string_length = len(date_string)
if (date_string_length < 10):
raise ValueError('Date string too short.')
if ((date_string[4] != '-') or (date_string[7] != '-')):
raise ValueError('Invalid date string.')
try:
year = int(date_string[0:4], 10)
except ValueError:
raise ValueError('Unable to parse year.')
try:
month = int(date_string[5:7], 10)
except ValueError:
raise ValueError('Unable to parse month.')
try:
day_of_month = int(date_string[8:10], 10)
except ValueError:
raise ValueError('Unable to parse day of month.')
days_per_month = self._GetDaysPerMonth(year, month)
if ((day_of_month < 1) or (day_of_month > days_per_month)):
raise ValueError('Day of month value out of bounds.')
return (year, month, day_of_month)
|
Copies a date from a string.
Args:
date_string (str): date value formatted as: YYYY-MM-DD
Returns:
tuple[int, int, int]: year, month, day of month.
Raises:
ValueError: if the date string is invalid or not supported.
|
codesearchnet
|
def deserialize_block(value):
block = Block()
block.ParseFromString(value)
return BlockWrapper(block=block)
|
Deserialize a byte string into a BlockWrapper
Args:
value (bytes): the byte string to deserialze
Returns:
BlockWrapper: a block wrapper instance
|
codesearchnet
|
def OR(self):
clone = copy.deepcopy(self)
clone.adapter._QUERY_GLUE = ' OR '
return clone
|
Switches default query joiner from " AND " to " OR "
Returns:
Self. Queryset object.
|
codesearchnet
|
def build(self, spec, reset=True):
if reset:
self.reset()
with self.model:
self.mu = 0.
for t in spec.terms.values():
data = t.data
label = t.name
dist_name = t.prior.name
dist_args = t.prior.args
n_cols = t.data.shape[1]
coef = self._build_dist(spec, label, dist_name,
shape=n_cols, **dist_args)
if t.random:
self.mu += coef[t.group_index][:, None] * t.predictor
else:
self.mu += pm.math.dot(data, coef)[:, None]
y = spec.y.data
y_prior = spec.family.prior
link_f = spec.family.link
if isinstance(link_f, string_types):
link_f = self.links[link_f]
else:
link_f = link_f
y_prior.args[spec.family.parent] = link_f(self.mu)
y_prior.args['observed'] = y
y_like = self._build_dist(spec, spec.y.name, y_prior.name,
**y_prior.args)
self.spec = spec
|
Compile the PyMC3 model from an abstract model specification.
Args:
spec (Model): A bambi Model instance containing the abstract
specification of the model to compile.
reset (bool): if True (default), resets the PyMC3BackEnd instance
before compiling.
|
juraj-google-style
|
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
vision_data = {}
if image_sizes is not None:
num_image_tokens = []
for height, width in image_sizes:
height, width = smart_resize(height, width, self.image_processor.spatial_factor, self.image_processor.min_pixels, self.image_processor.max_pixels)
height = height
width = width
image_seq_length = height * (width + 1)
num_image_tokens.append(image_seq_length)
num_image_patches = [1] * len(image_sizes)
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
|
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`List[List[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
|
github-repos
|
def print_run_bidirectional_blast(reference, other_genome, dbtype, outdir):
if dbtype == 'nucl':
command = 'blastn'
elif dbtype == 'prot':
command = 'blastp'
else:
raise ValueError('dbtype must be "nucl" or "prot"')
r_folder, r_name, r_ext = utils.split_folder_and_path(reference)
g_folder, g_name, g_ext = utils.split_folder_and_path(other_genome)
r_vs_g_name = r_name + '_vs_' + g_name
r_vs_g = r_vs_g_name + '_blast.out'
if op.exists(op.join(outdir, r_vs_g)) and os.stat(op.join(outdir, r_vs_g)).st_size != 0:
log.debug('{} vs {} BLAST already run'.format(r_name, g_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, g_name, r_vs_g)
utils.write_torque_script(command=cmd, err=r_vs_g_name, out=r_vs_g_name, name=r_vs_g_name,
outfile=op.join(outdir, r_vs_g_name) + '.sh',
walltime='00:15:00', queue='regular')
g_vs_r_name = g_name + '_vs_' + r_name
g_vs_r = g_vs_r_name + '_blast.out'
if op.exists(op.join(outdir, g_vs_r)) and os.stat(op.join(outdir, g_vs_r)).st_size != 0:
log.debug('{} vs {} BLAST already run'.format(g_name, r_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, r_name, g_vs_r)
utils.write_torque_script(command=cmd, err=g_vs_r_name, out=g_vs_r_name, name=g_vs_r_name,
outfile=op.join(outdir, g_vs_r_name) + '.sh',
walltime='00:15:00', queue='regular')
|
Write torque submission files for running bidirectional blast on a server and print execution command.
Args:
reference (str): Path to "reference" genome, aka your "base strain"
other_genome (str): Path to other genome which will be BLASTed to the reference
dbtype (str): "nucl" or "prot" - what format your genome files are in
outdir (str): Path to folder where Torque scripts should be placed
|
juraj-google-style
|
def write_reactions(self, stream, reactions, properties=None):
self._write_entries(
stream, reactions, self.convert_reaction_entry, properties)
|
Write iterable of reactions as YAML object to stream.
Args:
stream: File-like object.
compounds: Iterable of reaction entries.
properties: Set of reaction properties to output (or None to output
all).
|
juraj-google-style
|
def Cleanse(obj, encoding='utf-8'):
if isinstance(obj, int):
return obj
elif isinstance(obj, float):
if (obj == _INFINITY):
return 'Infinity'
elif (obj == _NEGATIVE_INFINITY):
return '-Infinity'
elif math.isnan(obj):
return 'NaN'
else:
return obj
elif isinstance(obj, bytes):
return tf.compat.as_text(obj, encoding)
elif isinstance(obj, (list, tuple)):
return [Cleanse(i, encoding) for i in obj]
elif isinstance(obj, set):
return [Cleanse(i, encoding) for i in sorted(obj)]
elif isinstance(obj, dict):
return {Cleanse(k, encoding): Cleanse(v, encoding) for (k, v) in obj.items()}
else:
return obj
|
Makes Python object appropriate for JSON serialization.
- Replaces instances of Infinity/-Infinity/NaN with strings.
- Turns byte strings into unicode strings.
- Turns sets into sorted lists.
- Turns tuples into lists.
Args:
obj: Python data structure.
encoding: Charset used to decode byte strings.
Returns:
Unicode JSON data structure.
|
codesearchnet
|
def _ConsumeSingleByteString(self):
text = self.token
if ((len(text) < 1) or (text[0] not in _QUOTES)):
raise self._ParseError(('Expected string but found: %r' % (text,)))
if ((len(text) < 2) or (text[(- 1)] != text[0])):
raise self._ParseError(('String missing ending quote: %r' % (text,)))
try:
result = text_encoding.CUnescape(text[1:(- 1)])
except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
Returns:
The token parsed.
Raises:
ParseError: When the wrong format data is found.
|
codesearchnet
|
def fulfill_transaction(transaction, *, private_keys):
if not isinstance(private_keys, (list, tuple)):
private_keys = [private_keys]
if isinstance(private_keys, tuple):
private_keys = list(private_keys)
transaction_obj = Transaction.from_dict(transaction)
try:
signed_transaction = transaction_obj.sign(private_keys)
except KeypairMismatchException as exc:
raise MissingPrivateKeyError('A private key is missing!') from exc
return signed_transaction.to_dict()
|
Fulfills the given transaction.
Args:
transaction (dict): The transaction to be fulfilled.
private_keys (:obj:`str` | :obj:`list` | :obj:`tuple`): One or
more private keys to be used for fulfilling the
transaction.
Returns:
dict: The fulfilled transaction payload, ready to be sent to a
BigchainDB federation.
Raises:
:exc:`~.exceptions.MissingPrivateKeyError`: If a private
key is missing.
|
juraj-google-style
|
def _ExpandDirectories(filenames):
expanded = set()
for filename in filenames:
if (not os.path.isdir(filename)):
expanded.add(filename)
continue
for (root, _, files) in os.walk(filename):
for loopfile in files:
fullname = os.path.join(root, loopfile)
if fullname.startswith(('.' + os.path.sep)):
fullname = fullname[len(('.' + os.path.sep)):]
expanded.add(fullname)
filtered = []
for filename in expanded:
if (os.path.splitext(filename)[1][1:] in GetAllExtensions()):
filtered.append(filename)
return filtered
|
Searches a list of filenames and replaces directories in the list with
all files descending from those directories. Files with extensions not in
the valid extensions list are excluded.
Args:
filenames: A list of files or directories
Returns:
A list of all files that are members of filenames or descended from a
directory in filenames
|
codesearchnet
|
def raw_search(self, *args, **kwargs):
limit = 50
try:
limit = kwargs['limit']
except KeyError:
pass
self._mail.select("inbox")
try:
date = kwargs['date']
date_str = date.strftime("%d-%b-%Y")
_, email_ids = self._mail.search(None, '(SINCE "%s")' % date_str)
except KeyError:
_, email_ids = self._mail.search(None, 'ALL')
email_ids = email_ids[0].split()
matching_uids = []
for _ in range(1, min(limit, len(email_ids))):
email_id = email_ids.pop()
rfc_body = self._mail.fetch(email_id, "(RFC822)")[1][0][1]
match = True
for expr in args:
if re.search(expr, rfc_body) is None:
match = False
break
if match:
uid = re.search(
"UID\\D*(\\d+)\\D*", self._mail.fetch(email_id, 'UID')[1][0]).group(1)
matching_uids.append(uid)
return matching_uids
|
Find the a set of emails matching each regular expression passed in against the (RFC822) content.
Args:
*args: list of regular expressions.
Kwargs:
limit (int) - Limit to how many of the most resent emails to search through.
date (datetime) - If specified, it will filter avoid checking messages older
than this date.
|
juraj-google-style
|
def __init__(self, value_type: typing.Optional[typing.Union[typing.Type[typing.Any], typing.Tuple[typing.Type[typing.Any], ...]]], default: typing.Any=MISSING_VALUE, transform: typing.Optional[typing.Callable[[typing.Any], typing.Any]]=None, is_noneable: bool=False, frozen: bool=False):
super().__init__()
self._value_type = value_type
self._is_noneable = is_noneable
self._frozen = False
self._default = MISSING_VALUE
self._transform = transform
self.set_default(default)
self._frozen = frozen
|
Constructor of ValueSpecBase.
This class provides common facilities for implementing ValueSpec,
including type check, default value assignment, noneable handling,
missing value handling, and etc. Subclasses only need to handle value
specific logics in `apply`, `extend`, and `is_compatible`.
Args:
value_type: Type or tuples of type or None. When a not-none value_type is
present, type check will be performed.
default: (Optional) Default value. If not specified, it always require
user to provide. Or it can be any value that can be accepted by this
spec, or None, which automatically add Noneable property to the spec.
transform: (Optional) user-defined function to be called on the input
of `apply`. It could be used as a type converter or a custom
validator which may raise errors.
is_noneable: (Optional) If True, None is acceptable for this spec.
frozen: If True, values other than the default value is not accceptable.
|
github-repos
|
def Print(self, output_writer):
if self._extensions:
output_writer.Write('\textensions: {0:s}\n'.format(
', '.join(self._extensions)))
|
Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer.
|
juraj-google-style
|
def compute_expand_dims_output_shape(input_shape, axis):
input_shape = list(input_shape)
if axis is None:
axis = len(input_shape)
axis = to_tuple_or_list(axis)
out_ndim = len(axis) + len(input_shape)
axis = [canonicalize_axis(a, out_ndim) for a in axis]
shape_iter = iter(input_shape)
new_shape = [1 if ax in axis else next(shape_iter) for ax in range(out_ndim)]
return tuple(new_shape)
|
Compute the output shape for the `expand_dims` operation.
Args:
input_shape: Input shape.
axis: int or sequence of ints for the axis to expand.
Returns:
Tuple of ints: The output shape after the `expand_dims` operation.
|
github-repos
|
def take_node_screenshot(self, element, screenshot_path):
from PIL import Image
temp_path = os.path.join(tempdir, screenshot_path)
el_x = int(element.location['x'])
el_y = int(element.location['y'])
el_height = int(element.size['height'])
el_width = int(element.size['width'])
if el_height == 0 or el_width == 0:
self.debug_log("take_node_screenshot cannot be taken because element width or height equal zero")
return False
bounding_box = (
el_x,
el_y,
(el_x + el_width),
(el_y + el_height)
)
self._driver.save_screenshot(temp_path)
base_image = Image.open(temp_path)
cropped_image = base_image.crop(bounding_box)
base_image = base_image.resize(cropped_image.size)
base_image.paste(cropped_image, (0, 0))
base_image.save(screenshot_path)
|
Take a screenshot of a node
Args:
element (object): the proxy_element
screenshot_path (str): the path where the screenshot will be saved
|
juraj-google-style
|
def generated_tag_data(tags):
generated_tags = []
for key, value in tags.items():
generated_tags.append({
'Key': key,
'Value': value,
})
return generated_tags
|
Convert :obj:`dict` to S3 Tag list.
Args:
tags (dict): Dictonary of tag key and tag value passed.
Returns:
list: List of dictionaries.
|
juraj-google-style
|
def __init__(self, port=None, queue_id=None):
super().__init__(action_type=ActionType.OFPAT_ENQUEUE, length=16)
self.port = port
self.queue_id = queue_id
|
Create an ActionEnqueue with the optional parameters below.
Args:
port (physical port or :attr:`.Port.OFPP_IN_PORT`): Queue's port.
queue_id (int): Where to enqueue the packets.
|
juraj-google-style
|
def get_data(name, train_batch_size, test_batch_size):
if (name not in ['mnist', 'cifar10']):
raise ValueError(("Expected dataset 'mnist' or 'cifar10', but got %s" % name))
dataset = getattr(tf.keras.datasets, name)
num_classes = 10
raw_data = dataset.load_data()
((images_train, labels_train), (images_test, labels_test)) = raw_data
images_train = (images_train.astype(np.float32) / 255.0)
images_test = (images_test.astype(np.float32) / 255.0)
labels_train = labels_train.astype(np.int32).squeeze()
labels_test = labels_test.astype(np.int32).squeeze()
if (images_train.ndim == 3):
images_train = np.expand_dims(images_train, (- 1))
images_test = np.expand_dims(images_test, (- 1))
train_data = tf.data.Dataset.from_tensor_slices((images_train, labels_train))
test_data = tf.data.Dataset.from_tensor_slices((images_test, labels_test))
train_iterator = train_data.shuffle(buffer_size=len(images_train)).batch(train_batch_size).repeat().make_one_shot_iterator()
test_iterator = test_data.batch(test_batch_size).make_initializable_iterator()
return dict(train_iterator=train_iterator, test_iterator=test_iterator, num_classes=num_classes)
|
Gets training and testing dataset iterators.
Args:
name: String. Name of dataset, either 'mnist' or 'cifar10'.
train_batch_size: Integer. Batch size for training.
test_batch_size: Integer. Batch size for testing.
Returns:
Dict containing:
train_iterator: A tf.data.Iterator, over training data.
test_iterator: A tf.data.Iterator, over test data.
num_classes: Integer. Number of class labels.
|
codesearchnet
|
def get_timestamp(self, url, xpath=None):
if (not path.exists(self.db_path)):
return None
if (self._query(url, xpath).count() > 0):
return self._query(url, xpath).one().queried_on
|
Get time stamp of cached query result.
If DB has not yet been initialized or url/xpath has not been queried yet, return None.
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``)
Returns:
datetime.datetime: cached response timestamp, None if not available
|
codesearchnet
|
def random_walk_uniform_fn(scale=1.0, name=None):
def _fn(state_parts, seed):
'Adds a uniform perturbation to the input state.\n\n Args:\n state_parts: A list of `Tensor`s of any shape and real dtype representing\n the state parts of the `current_state` of the Markov chain.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: `None`.\n\n Returns:\n perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same\n shape and type as the `state_parts`.\n\n Raises:\n ValueError: if `scale` does not broadcast with `state_parts`.\n '
with tf.compat.v1.name_scope(name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]):
scales = (scale if mcmc_util.is_list_like(scale) else [scale])
if (len(scales) == 1):
scales *= len(state_parts)
if (len(state_parts) != len(scales)):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkUniformFn')
next_state_parts = [tf.random.uniform(minval=(state_part - scale_part), maxval=(state_part + scale_part), shape=tf.shape(input=state_part), dtype=state_part.dtype.base_dtype, seed=seed_stream()) for (scale_part, state_part) in zip(scales, state_parts)]
return next_state_parts
return _fn
|
Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm.
|
codesearchnet
|
def export_gpx_file(self):
gpx = create_elem('gpx', GPX_ELEM_ATTRIB)
if (not self.metadata.bounds):
self.metadata.bounds = self[:]
gpx.append(self.metadata.togpx())
for place in self:
gpx.append(place.togpx())
return etree.ElementTree(gpx)
|
Generate GPX element tree from ``Waypoints`` object.
Returns:
etree.ElementTree: GPX element tree depicting ``Waypoints`` object
|
codesearchnet
|
def l2_regression_loss(y, target, name=None):
with tf.name_scope(name, 'l2_regression', [y, target]) as scope:
y = tf.convert_to_tensor(y, name='y')
target = tf.convert_to_tensor(target, name='target')
return tf.sqrt(l2_regression_sq_loss(y, target, name=scope))
|
Calculates the square root of the SSE between y and target.
Args:
y: the calculated values.
target: the desired values.
name: the name for this op, defaults to l2_regression
Returns:
A tensorflow op.
|
codesearchnet
|
def random_weights(n, bounds=(0., 1.), total=1.0):
low = bounds[0]
high = bounds[1]
if high < low:
raise ValueError('Higher bound must be greater or '
'equal to lower bound')
if n * high < total or n * low > total:
raise ValueError('solution not possible with given n and bounds')
w = [0] * n
tgt = -float(total)
for i in range(n):
rn = n - i - 1
rhigh = rn * high
rlow = rn * low
lowb = max(-rhigh - tgt, low)
highb = min(-rlow - tgt, high)
rw = random.uniform(lowb, highb)
w[i] = rw
tgt += rw
random.shuffle(w)
return w
|
Generate pseudo-random weights.
Returns a list of random weights that is of length
n, where each weight is in the range bounds, and
where the weights sum up to total.
Useful for creating random portfolios when benchmarking.
Args:
* n (int): number of random weights
* bounds ((low, high)): bounds for each weight
* total (float): total sum of the weights
|
juraj-google-style
|
def send_message(self):
start = time.time()
message = None
if (not self.initialized):
message = self.construct_start_message()
self.initialized = True
else:
message = self.construct_end_message()
self.send_UDP_message(message)
end = time.time()
return (end - start)
|
Send message over UDP.
If tracking is disables, the bytes_sent will always be set to -1
Returns:
(bytes_sent, time_taken)
|
codesearchnet
|
def debug_object(obj, log_level: int = logging.DEBUG) -> None:
msgs = ["For {o!r}:".format(o=obj)]
for attrname in dir(obj):
attribute = getattr(obj, attrname)
msgs.append("- {an!r}: {at!r}, of type {t!r}".format(
an=attrname, at=attribute, t=type(attribute)))
log.log(log_level, "{}", "\n".join(msgs))
|
Sends details about a Python to the log, specifically its ``repr()``
representation, and all of its attributes with their name, value, and type.
Args:
obj: object to debug
log_level: log level to use; default is ``logging.DEBUG``
|
juraj-google-style
|
def accepts(self, tp, converter):
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
|
Declare that other types may be converted to this property type.
Args:
tp (Property) :
A type that may be converted automatically to this property
type.
converter (callable) :
A function accepting ``value`` to perform conversion of the
value to this property type.
Returns:
self
|
juraj-google-style
|
def add_curves_from_lasio(self, l, remap=None, funcs=None):
params = {}
for field, (sect, code) in LAS_FIELDS['data'].items():
params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
curves = {c.mnemonic: Curve.from_lasio_curve(c, **params)
for c in l.curves}
self.data.update(curves)
return None
|
Given a LAS file, add curves from it to the current well instance.
Essentially just wraps ``add_curves_from_lasio()``.
Args:
fname (str): The path of the LAS file to read curves from.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
None. Works in place.
|
juraj-google-style
|
def distances(self, word, words):
point = self[word]
vectors = np.asarray([self[w] for w in words])
diff = vectors - point
distances = np.linalg.norm(diff, axis=1)
return distances
|
Calculate eucledean pairwise distances between `word` and `words`.
Args:
word (string): single word.
words (list): list of strings.
Returns:
numpy array of the distances.
Note:
L2 metric is used to calculate distances.
|
juraj-google-style
|
def get_cookie_header(queue_item):
header = []
path = URLHelper.get_path(queue_item.request.url)
for cookie in queue_item.request.cookies:
root_path = ((cookie.path == '') or (cookie.path == '/'))
if (path.startswith(cookie.path) or root_path):
header.append(((cookie.name + '=') + cookie.value))
return '&'.join(header)
|
Convert a requests cookie jar to a HTTP request cookie header value.
Args:
queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.
Returns:
str: The HTTP cookie header value.
|
codesearchnet
|
def _render(self):
message = Message()
message.add(Heading(tr('Problem'), **ORANGE_LEVEL_4_STYLE))
message.add(Paragraph(tr(
'The following problem(s) were encountered whilst running the '
'analysis.')))
items = BulletedList()
for p in reversed(self.problems):
items.add(p)
message.add(items)
message.add(Heading(tr('Suggestion'), **GREEN_LEVEL_4_STYLE))
message.add(Paragraph(tr(
'You can try the following to resolve the issue:')))
if len(self.suggestions) < 1:
suggestions = self.standard_suggestions()
message.add(suggestions)
else:
items = BulletedList()
for s in reversed(self.suggestions):
if s is not None:
items.add(s)
message.add(items)
if len(self.details) > 0:
items = BulletedList()
message.add(Heading(
tr('Details'), **ORANGE_LEVEL_5_STYLE))
message.add(Paragraph(tr(
'These additional details were reported when the problem '
'occurred.')))
for d in self.details:
if d is not None:
items.add(d)
message.add(items)
message.add(Heading(tr(
'Diagnostics'), **TRACEBACK_STYLE))
message.add(self.tracebacks)
return message
|
Create a Message version of this ErrorMessage
Args:
none
Returns:
the Message instance of this ErrorMessage
Raises:
Errors are propagated
|
juraj-google-style
|
def set_inter_op_parallelism_threads(num_threads):
context.context().inter_op_parallelism_threads = num_threads
|
Set number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Args:
num_threads: Number of parallel threads
|
github-repos
|
def create_timer(cb: Callable[[float], None], interval: float,
delay_policy: TimerDelayPolicy = TimerDelayPolicy.DEFAULT,
loop: Optional[asyncio.BaseEventLoop] = None) -> asyncio.Task:
if not loop:
loop = asyncio.get_event_loop()
async def _timer():
fired_tasks = []
try:
while True:
if delay_policy == TimerDelayPolicy.CANCEL:
for t in fired_tasks:
if not t.done():
t.cancel()
await t
fired_tasks.clear()
else:
fired_tasks[:] = [t for t in fired_tasks if not t.done()]
t = loop.create_task(cb(interval=interval))
fired_tasks.append(t)
await asyncio.sleep(interval)
except asyncio.CancelledError:
for t in fired_tasks:
t.cancel()
await asyncio.gather(*fired_tasks)
return loop.create_task(_timer())
|
Schedule a timer with the given callable and the interval in seconds.
The interval value is also passed to the callable.
If the callable takes longer than the timer interval, all accumulated
callable's tasks will be cancelled when the timer is cancelled.
Args:
cb: TODO - fill argument descriptions
Returns:
You can stop the timer by cancelling the returned task.
|
juraj-google-style
|
def dbmin_mean(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `dbmin_mean`'.format(value))
self._dbmin_mean = value
|
Corresponds to IDD Field `dbmin_mean`
Mean of extreme annual minimum dry-bulb temperature
Args:
value (float): value for IDD Field `dbmin_mean`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def gunzip_file(gz_path, new_path):
if tf.gfile.Exists(new_path):
tf.logging.info("File %s already exists, skipping unpacking" % new_path)
return
tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
os.chmod(os.path.dirname(new_path), mode)
with gzip.open(gz_path, "rb") as gz_file:
with tf.gfile.GFile(new_path, mode="wb") as new_file:
for line in gz_file:
new_file.write(line)
|
Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
|
juraj-google-style
|
def script(experiment, projects):
benchbuild_c = local[local.path(sys.argv[0])]
slurm_script = local.cwd / experiment.name + "-" + str(
CFG['slurm']['script'])
srun = local["srun"]
srun_args = []
if not CFG["slurm"]["multithread"]:
srun_args.append("--hint=nomultithread")
if not CFG["slurm"]["turbo"]:
srun_args.append("--pstate-turbo=off")
srun = srun[srun_args]
srun = srun[benchbuild_c["run"]]
return __save__(slurm_script, srun, experiment, projects)
|
Prepare a slurm script that executes the experiment for a given project.
Args:
experiment: The experiment we want to execute
projects: All projects we generate an array job for.
|
juraj-google-style
|
def __init__(self, channel):
self.ReadRows = channel.unary_stream(
"/google.bigtable.v2.Bigtable/ReadRows",
request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString,
)
self.SampleRowKeys = channel.unary_stream(
"/google.bigtable.v2.Bigtable/SampleRowKeys",
request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString,
)
self.MutateRow = channel.unary_unary(
"/google.bigtable.v2.Bigtable/MutateRow",
request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString,
)
self.MutateRows = channel.unary_stream(
"/google.bigtable.v2.Bigtable/MutateRows",
request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString,
)
self.CheckAndMutateRow = channel.unary_unary(
"/google.bigtable.v2.Bigtable/CheckAndMutateRow",
request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString,
)
self.ReadModifyWriteRow = channel.unary_unary(
"/google.bigtable.v2.Bigtable/ReadModifyWriteRow",
request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def extend(*args):
if (not args):
return {}
first = args[0]
rest = args[1:]
out = type(first)(first)
for each in rest:
out.update(each)
return out
|
shallow dictionary merge
Args:
a: dict to extend
b: dict to apply to a
Returns:
new instance of the same type as _a_, with _a_ and _b_ merged.
|
codesearchnet
|
def from_config(cls, config):
config.pop('dtype', None)
return cls(**config)
|
Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance.
|
github-repos
|
def opensearch(self, query, results=10, redirect=True):
self._check_query(query, 'Query must be specified')
query_params = {'action': 'opensearch', 'search': query, 'limit': (100 if (results > 100) else results), 'redirects': ('resolve' if redirect else 'return'), 'warningsaserror': True, 'namespace': ''}
results = self.wiki_request(query_params)
self._check_error_response(results, query)
res = list()
for (i, item) in enumerate(results[1]):
res.append((item, results[2][i], results[3][i]))
return res
|
Execute a MediaWiki opensearch request, similar to search box
suggestions and conforming to the OpenSearch specification
Args:
query (str): Title to search for
results (int): Number of pages within the radius to return
redirect (bool): If **False** return the redirect itself, \
otherwise resolve redirects
Returns:
List: List of results that are stored in a tuple \
(Title, Summary, URL)
|
codesearchnet
|
def has_same_sumformula(self, other):
same_atoms = True
for atom in set(self['atom']):
own_atom_number = len(self[(self['atom'] == atom)])
other_atom_number = len(other[(other['atom'] == atom)])
same_atoms = (own_atom_number == other_atom_number)
if (not same_atoms):
break
return same_atoms
|
Determines if ``other`` has the same sumformula
Args:
other (molecule):
Returns:
bool:
|
codesearchnet
|
def get_oauth_data(self, code, client_id, client_secret, state):
request = self._get_request()
response = request.post(self.OAUTH_TOKEN_URL, {
"state": state,
"code": code,
"grant_type": "authorization_code",
"client_id": client_id,
"client_secret": client_secret
})
return HSAccessTokenAuth.from_response(response)
|
Get Oauth data from HelloSign
Args:
code (str): Code returned by HelloSign for our callback url
client_id (str): Client id of the associated app
client_secret (str): Secret token of the associated app
Returns:
A HSAccessTokenAuth object
|
juraj-google-style
|
def random_int_generator(maxrange):
try:
return random.randint(0,maxrange)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "random_int_generator",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
pass
|
Generates a random integer from 0 to `maxrange`, inclusive.
Args:
maxrange (int): The upper range of integers to randomly choose.
Returns:
int: The randomly generated integer from :py:func:`random.randint`.
Examples:
>>> arcresthelper.common.random_int_generator(15)
9
|
juraj-google-style
|
def get_airport_metars_hist(self, iata):
url = (AIRPORT_BASE.format(iata) + '/weather')
return self._fr24.get_airport_metars_hist(url)
|
Retrieve the metar data for past 72 hours. The data will not be parsed to readable format.
Given the IATA code of an airport, this method returns the metar information for last 72 hours.
Args:
iata (str): The IATA code for an airport, e.g. HYD
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars_hist('HYD')
|
codesearchnet
|
def packVersion(major, minor=0, patch=0):
ret = patch & mask20
ret = ret | (minor & mask20) << 20
ret = ret | (major & mask20) << 20 * 2
return ret
|
Pack a set of major/minor/patch integers into a single integer for storage.
Args:
major (int): Major version level integer.
minor (int): Minor version level integer.
patch (int): Patch version level integer.
Returns:
int: System normalized integer value to represent a software version.
|
juraj-google-style
|
def bin_hash160(string):
intermed = hashlib.sha256(string).digest()
return hashlib.new('ripemd160', intermed).hexdigest()
|
Get a hash of the provided message using the ripemd160 algorithm.
Args:
string (str): message to hash.
Returns:
str: hash as a double digit hex string.
|
juraj-google-style
|
def get_coords(variant):
coordinates = {
'chrom': None,
'end_chrom': None,
'sv_length': None,
'sv_type': None,
'pos': None,
'end': None,
}
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
coordinates['chrom'] = chrom
end_chrom = chrom
pos = int(variant.POS)
alt = variant.ALT[0]
end_pos = variant.INFO.get('END')
if end_pos:
end = int(end_pos)
else:
end = int(variant.end)
coordinates['end'] = end
sv_type = variant.INFO.get('SVTYPE')
length = variant.INFO.get('SVLEN')
if length:
sv_len = abs(length)
else:
sv_len = end - pos
if sv_type == 'BND':
other_coordinates = alt.strip('ACGTN[]').split(':')
end_chrom = other_coordinates[0]
if end_chrom.startswith(('chr', 'CHR', 'Chr')):
end_chrom = end_chrom[3:]
end = int(other_coordinates[1])
sv_len = float('inf')
if (sv_len == 0 and alt != '<INS>'):
sv_len = len(alt)
if (pos == end) and (sv_len > 0):
end = pos + sv_len
position = Position(chrom, pos)
end_position = Position(end_chrom, end)
if is_greater(position, end_position):
end_chrom = position.chrom
end = position.pos
chrom = end_position.chrom
pos = end_position.pos
coordinates['end_chrom'] = end_chrom
coordinates['pos'] = pos
coordinates['end'] = end
coordinates['sv_length'] = sv_len
coordinates['sv_type'] = sv_type
return coordinates
|
Returns a dictionary with position information
Args:
variant(cyvcf2.Variant)
Returns:
coordinates(dict)
|
juraj-google-style
|
def get_instance(name, cls='system', storage=None, storage_parameters=None, unsecure=None, *args, **kwargs):
system_parameters = _system_parameters(unsecure=unsecure, storage_parameters=storage_parameters)
with _MOUNT_LOCK:
for root in MOUNTED:
if ((isinstance(root, Pattern) and root.match(name)) or ((not isinstance(root, Pattern)) and name.startswith(root))):
info = MOUNTED[root]
stored_parameters = (info.get('system_parameters') or dict())
if (not system_parameters):
same_parameters = True
system_parameters = stored_parameters
elif (system_parameters == stored_parameters):
same_parameters = True
else:
same_parameters = False
system_parameters.update({key: value for (key, value) in stored_parameters.items() if (key not in system_parameters)})
break
else:
mount_info = mount(storage=storage, name=name, **system_parameters)
info = mount_info[tuple(mount_info)[0]]
same_parameters = True
if (cls == 'system'):
if same_parameters:
return info['system_cached']
else:
return info['system'](roots=info['roots'], **system_parameters)
if same_parameters:
if ('storage_parameters' not in system_parameters):
system_parameters['storage_parameters'] = dict()
system_parameters['storage_parameters']['pycosio.system_cached'] = info['system_cached']
kwargs.update(system_parameters)
return info[cls](*args, name=name, **kwargs)
|
Get a cloud object storage instance.
Args:
name (str): File name, path or URL.
cls (str): Type of class to instantiate.
'raw', 'buffered' or 'system'.
storage (str): Storage name.
storage_parameters (dict): Storage configuration parameters.
Generally, client configuration and credentials.
unsecure (bool): If True, disables TLS/SSL to improves
transfer performance. But makes connection unsecure.
Default to False.
args, kwargs: Instance arguments
Returns:
pycosio._core.io_base.ObjectIOBase subclass: Instance
|
codesearchnet
|
def run_program(self, src, filename, maximum_depth):
self.filename = filename
self._maximum_depth = maximum_depth
src = preprocess.augment_annotations(src)
src_tree = directors.parse_src(src, self.ctx.python_version)
code = self.compile_src(src, filename=filename, store_blockgraph=True)
director = directors.Director(src_tree, self.ctx.errorlog, filename, self.ctx.options.disable)
self.ctx.errorlog.set_error_filter(director.filter_error)
self._director = director
self.ctx.options.set_feature_flags(director.features)
self._branch_tracker = pattern_matching.BranchTracker(director.matches, self.ctx)
code = process_blocks.merge_annotations(code, self._director.annotations, self._director.param_annotations)
visitor = vm_utils.FindIgnoredTypeComments(self._director.type_comments)
pyc.visit(code, visitor)
for line in visitor.ignored_lines():
self.ctx.errorlog.ignored_type_comment(self.filename, line, self._director.type_comments[line])
if self.ctx.options.debug_constant_folding:
before = _bytecode_to_string(code)
code = constant_folding.fold_constants(code)
after = _bytecode_to_string(code)
print('\n'.join(difflib.unified_diff(before.splitlines(), after.splitlines())))
else:
code = constant_folding.fold_constants(code)
process_blocks.adjust_returns(code, self._director.block_returns)
node, f_globals, f_locals, _ = self.run_bytecode(self.ctx.root_node, code)
logging.info('Done running bytecode, postprocessing globals')
for annot in itertools.chain.from_iterable(self.late_annotations.values()):
annot.resolve(node, f_globals, f_locals)
self.flatten_late_annotation(node, annot, f_globals)
self.late_annotations = None
assert not self.frames, 'Frames left over!'
log.info('Final node: <%d>%s', node.id, node.name)
return (node, f_globals.members)
|
Run the code and return the CFG nodes.
Args:
src: The program source code.
filename: The filename the source is from.
maximum_depth: Maximum depth to follow call chains.
Returns:
A tuple (CFGNode, set) containing the last CFGNode of the program as
well as all the top-level names defined by it.
|
github-repos
|
def asn(self, as_number, **kwargs):
indicator_obj = ASN(as_number, **kwargs)
return self._indicator(indicator_obj)
|
Add ASN data to Batch object.
Args:
as_number (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of ASN.
|
codesearchnet
|
def unpack(self, buff, item_class, offset=0):
begin = offset
limit_buff = len(buff)
while begin < limit_buff:
item = item_class()
item.unpack(buff, begin)
self.append(item)
begin += item.get_size()
|
Unpack the elements of the list.
Args:
buff (bytes): The binary data to be unpacked.
item_class (:obj:`type`): Class of the expected items on this list.
offset (int): If we need to shift the beginning of the data.
|
juraj-google-style
|
def add_paths_argument(cls, group, argname, dest=None, help_=None):
prefixed = '%s-%s' % (cls.argument_prefix, argname)
if dest is None:
dest = prefixed.replace('-', '_')
final_dest = dest[len(cls.argument_prefix) + 1:]
else:
final_dest = dest
dest = '%s_%s' % (cls.argument_prefix, dest)
group.add_argument('--%s' % prefixed, action='store', nargs='+',
dest=dest, help=help_)
cls.paths_arguments[dest] = final_dest
|
Subclasses may call this to expose a paths argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`.
|
juraj-google-style
|
def inflate_nd_checker(identifier, definition):
if isinstance(definition, bool):
return Checker(name=identifier, passes=definition)
elif isinstance(definition, dict):
return Checker(definition.pop('name', identifier), **definition)
else:
raise ValueError('%s type is not supported for no-data checkers, '
'use bool or dict' % type(definition))
|
Inflate a no-data checker from a basic definition.
Args:
identifier (str): the no-data checker identifier / name.
definition (bool/dict): a boolean acting as "passes" or a full
dict definition with "passes" and "allow_failure".
Returns:
Checker: a checker instance.
Raises:
ValueError: when the definition type is not bool or dict.
|
juraj-google-style
|
def write_json(data, path, file_name):
if os.path.exists(path) and not os.path.isdir(path):
return
elif not os.path.exists(path):
mkdir_p(path)
with open(os.path.join(path, file_name), 'w') as f:
json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True)
|
Write out data to a json file.
Args:
data: A dictionary representation of the data to write out
path: The directory to output the file in
file_name: The name of the file to write out
|
juraj-google-style
|
def __init__(self, metadata,
registry):
self.metadata = metadata
self.fields = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs)
field_names = [name for name, _ in self.fields]
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self.metric = prometheus_client.Counter(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
bins = metadata.bins or [
0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8,
9, 10, 15, 20, 50, 100
]
self.metric = prometheus_client.Histogram(
metadata.varname,
metadata.docstring,
labelnames=field_names,
buckets=bins,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
self.metric = prometheus_client.Gauge(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
else:
raise ValueError("Unknown metric type: {!r}".format(metadata.metric_type))
|
Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type.
|
juraj-google-style
|
def convert_to_numpy(cls, x):
return x
|
Convert a tensor to a NumPy array.
Only called after slicing using `__getitem__`.
Args:
x: the tensor to convert.
Returns: the converted tensor.
|
github-repos
|
def _add_write_pbs(self, write_pbs):
if self._read_only:
raise ValueError(_WRITE_READ_ONLY)
super(Transaction, self)._add_write_pbs(write_pbs)
|
Add `Write`` protobufs to this transaction.
Args:
write_pbs (List[google.cloud.proto.firestore.v1beta1.\
write_pb2.Write]): A list of write protobufs to be added.
Raises:
ValueError: If this transaction is read-only.
|
juraj-google-style
|
def get_mapping_function(function_name, functions_mapping):
if function_name in functions_mapping:
return functions_mapping[function_name]
elif function_name in ["parameterize", "P"]:
from httprunner import loader
return loader.load_csv_file
elif function_name in ["environ", "ENV"]:
return utils.get_os_environ
try:
from httprunner import loader
built_in_functions = loader.load_builtin_functions()
return built_in_functions[function_name]
except KeyError:
pass
try:
item_func = eval(function_name)
if callable(item_func):
return item_func
except (NameError, TypeError):
raise exceptions.FunctionNotFound("{} is not found.".format(function_name))
|
get function from functions_mapping,
if not found, then try to check if builtin function.
Args:
variable_name (str): variable name
variables_mapping (dict): variables mapping
Returns:
mapping function object.
Raises:
exceptions.FunctionNotFound: function is neither defined in debugtalk.py nor builtin.
|
juraj-google-style
|
def setup_keyword(dist, _, value):
if (value is not True):
return
dist.entry_points = _ensure_entry_points_is_dict(dist.entry_points)
for (command, subcommands) in six.iteritems(_get_commands(dist)):
entry_point = '{command} = rcli.dispatcher:main'.format(command=command)
entry_points = dist.entry_points.setdefault('console_scripts', [])
if (entry_point not in entry_points):
entry_points.append(entry_point)
dist.entry_points.setdefault('rcli', []).extend(subcommands)
|
Add autodetected commands as entry points.
Args:
dist: The distutils Distribution object for the project being
installed.
_: The keyword used in the setup function. Unused.
value: The value set to the keyword in the setup function. If the value
is not True, this function will do nothing.
|
codesearchnet
|
def plot_pie(self, key='wall_time', minfract=0.05, **kwargs):
timers = self.timers()
n = len(timers)
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
fig = plt.gcf()
gspec = GridSpec(n, 1)
for (idx, timer) in enumerate(timers):
ax = plt.subplot(gspec[(idx, 0)])
ax.set_title(str(timer))
timer.pie(ax=ax, key=key, minfract=minfract, show=False)
return fig
|
Plot pie charts of the different timers.
Args:
key: Keyword used to extract data from timers.
minfract: Don't show sections whose relative weight is less that minfract.
Returns:
`matplotlib` figure
|
codesearchnet
|
def _RemoveFromPool(self):
with self.pool.lock:
if (not self.pool.started):
return False
if (len(self.pool) <= self.pool.min_threads):
return False
self.pool._RemoveWorker(self.name)
return True
|
Remove ourselves from the pool.
Returns:
True if removal was possible, and False if it was not possible.
|
codesearchnet
|
def partial_derivative_mu(mu, sigma, low, high, data):
pd_mu = np.sum(data - mu) / sigma ** 2
pd_mu -= len(data) * ((norm.pdf(low, mu, sigma) - norm.pdf(high, mu, sigma))
/ (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))
return -pd_mu
|
The partial derivative with respect to the mean.
Args:
mu (float): the mean of the truncated normal
sigma (float): the std of the truncated normal
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the partial derivative evaluated at the given point
|
juraj-google-style
|
def _distance_graph(cls, inputs, clusters, distance_metric):
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(inputs, clusters, inputs_normalized=True)
else:
assert False, str(distance_metric)
|
Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
|
github-repos
|
def throw(self, exception_class, should_throw):
return self.__copy_and_set('throws', (self._throws + [(exception_class, should_throw)]))
|
Defines if the an exception should be thrown after the request is sent
Args:
exception_class (class): The class of the exception to instantiate
should_throw (function): The predicate that should indicate if the exception
should be thrown. This function will be called with the response as a parameter
Returns:
The request builder instance in order to chain calls
|
codesearchnet
|
def discount_bond_price(self, short_rate: types.RealTensor, times: types.RealTensor, maturities: types.RealTensor, name: str=None) -> types.RealTensor:
name = name or self._name + '_discount_bond_prices'
with tf.name_scope(name):
short_rate = tf.convert_to_tensor(short_rate, self._dtype)
times = tf.convert_to_tensor(times, self._dtype)
maturities = tf.convert_to_tensor(maturities, self._dtype)
input_shape_times = times.shape.as_list()
times_flat = tf.reshape(times, shape=[-1])
mean_reversion = self._mean_reversion(times_flat)
volatility = self._volatility(times_flat)
y_t = self._compute_yt(times_flat, mean_reversion, volatility)
mean_reversion = tf.reshape(tf.transpose(mean_reversion), input_shape_times + [self._dim])
y_t = tf.reshape(tf.transpose(y_t), input_shape_times + [self._dim])
values = self._bond_reconstitution(times, maturities, mean_reversion, short_rate, y_t)
return values
|
Returns zero-coupon bond prices `P(t,T)` conditional on `r(t)`.
Args:
short_rate: A `Tensor` of real dtype and shape `batch_shape + [dim]`
specifying the short rate `r(t)`.
times: A `Tensor` of real dtype and shape `batch_shape`. The time `t`
at which discount bond prices are computed.
maturities: A `Tensor` of real dtype and shape `batch_shape`. The time
to maturity of the discount bonds.
name: Str. The name to give this op.
Default value: `discount_bond_prices`.
Returns:
A `Tensor` of real dtype and the same shape as `batch_shape + [dim]`
containing the price of zero-coupon bonds.
|
github-repos
|
def _add_variable_proxy_methods(var, proxy_tensor):
proxy_tensor.read_value = (lambda : tf.identity(proxy_tensor))
proxy_tensor.assign_sub = var.assign_sub
proxy_tensor.assign = var.assign
proxy_tensor.initialized_value = var.initialized_value
|
Proxy methods of underlying variable.
This enables our custom getters to still work with, e.g., batch norm.
Args:
var: Variable to proxy
proxy_tensor: Tensor that is identity of var
|
codesearchnet
|
def DeserializeFrom(reader):
ttype = reader.ReadByte()
tx = None
from neo.Core.TX.RegisterTransaction import RegisterTransaction
from neo.Core.TX.IssueTransaction import IssueTransaction
from neo.Core.TX.ClaimTransaction import ClaimTransaction
from neo.Core.TX.MinerTransaction import MinerTransaction
from neo.Core.TX.PublishTransaction import PublishTransaction
from neo.Core.TX.InvocationTransaction import InvocationTransaction
from neo.Core.TX.EnrollmentTransaction import EnrollmentTransaction
from neo.Core.TX.StateTransaction import StateTransaction
if ttype == int.from_bytes(TransactionType.RegisterTransaction, 'little'):
tx = RegisterTransaction()
elif ttype == int.from_bytes(TransactionType.MinerTransaction, 'little'):
tx = MinerTransaction()
elif ttype == int.from_bytes(TransactionType.IssueTransaction, 'little'):
tx = IssueTransaction()
elif ttype == int.from_bytes(TransactionType.ClaimTransaction, 'little'):
tx = ClaimTransaction()
elif ttype == int.from_bytes(TransactionType.PublishTransaction, 'little'):
tx = PublishTransaction()
elif ttype == int.from_bytes(TransactionType.InvocationTransaction, 'little'):
tx = InvocationTransaction()
elif ttype == int.from_bytes(TransactionType.EnrollmentTransaction, 'little'):
tx = EnrollmentTransaction()
elif ttype == int.from_bytes(TransactionType.StateTransaction, 'little'):
tx = StateTransaction()
else:
tx = Transaction()
tx.Type = ttype
tx.DeserializeUnsignedWithoutType(reader)
tx.scripts = []
byt = reader.ReadVarInt()
if byt > 0:
for i in range(0, byt):
witness = Witness()
witness.Deserialize(reader)
tx.scripts.append(witness)
tx.OnDeserialized()
return tx
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Returns:
Transaction:
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.