code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def fill_dataset_tree(self, tree, data_sets):
tree.model().removeRows(0, tree.model().rowCount())
for index, (time, script) in enumerate(data_sets.items()):
name = script.settings['tag']
type = script.name
item_time = QtGui.QStandardItem(str(time))
item_name = QtGui.QStandardItem(str(name))
item_type = QtGui.QStandardItem(str(type))
item_time.setSelectable(False)
item_time.setEditable(False)
item_type.setSelectable(False)
item_type.setEditable(False)
tree.model().appendRow([item_time, item_name, item_type]) | fills the tree with data sets where datasets is a dictionary of the form
Args:
tree:
data_sets: a dataset
Returns: | juraj-google-style |
def clamp(value, maximum=None):
value = max(value, 0)
if (maximum is not None):
return min(value, maximum)
else:
return value | Clamp numeric values to be non-negative, an optionally, less than a
given maximum.
Args:
value (float) :
A number to clamp.
maximum (float, optional) :
A max bound to to clamp to. If None, there is no upper bound,
and values are only clamped to be non-negative. (default: None)
Returns:
float | codesearchnet |
def get_string(self, significant_figures=6):
ph = ('{:.%df}' % significant_figures)
lines = []
for (bound, d) in zip(self.bounds, 'xyz'):
fillers = (bound + ([d] * 2))
bound_format = ' '.join((([ph] * 2) + [' {}lo {}hi']))
lines.append(bound_format.format(*fillers))
if self.tilt:
tilt_format = ' '.join((([ph] * 3) + [' xy xz yz']))
lines.append(tilt_format.format(*self.tilt))
return '\n'.join(lines) | Returns the string representation of simulation box in LAMMPS
data file format.
Args:
significant_figures (int): No. of significant figures to
output for box settings. Default to 6.
Returns:
String representation | codesearchnet |
def iso_date(d) -> str:
if isinstance(d, datetime):
return d.isoformat()
elif isinstance(d, date):
return datetime.combine(d, datetime.min.time()).isoformat()
else:
try:
datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
return d
except ValueError:
try:
datetime.strptime(d, '%Y-%m-%d')
return d + "T00:00:00"
except ValueError:
pass
raise ISODateError("Can not convert value to ISO format for kg") | Return iso format of a date
Args:
d:
Returns: str | juraj-google-style |
def get_config(self):
raise NotImplementedError(f'{self} does not implement get_config()') | Returns the config of the quantizer.
A quantizer config is a Python dictionary (serializable)
containing all configuration parameters of the quantizer.
The same quantizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary. | github-repos |
def predict_on_batch(self, x):
raise NotImplementedError | Returns predictions for a single batch of samples.
Args:
x: Input data. It must be array-like.
Returns:
NumPy array(s) of predictions. | github-repos |
def _read_hip_para(self, length, *, version):
counter = 0
optkind = list()
options = dict()
while (counter < length):
kind = self._read_binary(2)
if (not kind):
break
code = int(kind, base=2)
cbit = (True if int(kind[15], base=2) else False)
clen = self._read_unpack(2)
plen = ((11 + clen) - ((clen + 3) % 8))
dscp = _HIP_PARA.get(code, 'Unassigned')
data = _HIP_PROC(dscp)(self, code, cbit, clen, desc=dscp, length=plen, version=version)
counter += plen
if (dscp in optkind):
if isinstance(options[dscp], tuple):
options[dscp] += (Info(data),)
else:
options[dscp] = (Info(options[dscp]), Info(data))
else:
optkind.append(dscp)
options[dscp] = data
if (counter != length):
raise ProtocolError(f'HIPv{version}: invalid format')
return (tuple(optkind), options) | Read HIP parameters.
Positional arguments:
* length -- int, length of parameters
Keyword arguments:
* version -- int, HIP version
Returns:
* dict -- extracted HIP parameters | codesearchnet |
def MatchBestComponentName(self, component):
fd = self.OpenAsContainer()
file_listing = set(fd.ListNames())
if component not in file_listing:
lower_component = component.lower()
for x in file_listing:
if lower_component == x.lower():
component = x
break
if fd.supported_pathtype != self.pathspec.pathtype:
new_pathspec = rdf_paths.PathSpec(
path=component, pathtype=fd.supported_pathtype)
else:
new_pathspec = self.pathspec.last.Copy()
new_pathspec.path = component
return new_pathspec | Returns the name of the component which matches best our base listing.
In order to do the best case insensitive matching we list the files in the
base handler and return the base match for this component.
Args:
component: A component name which should be present in this directory.
Returns:
the best component name. | juraj-google-style |
def _update_record(self, identifier, rtype=None, name=None, content=None):
if (identifier is not None):
identifier = int(identifier)
records = self._list_records_internal(identifier=identifier)
else:
records = self._list_records_internal(name=name, rtype=rtype)
LOGGER.debug('Records to update (%d): %s', len(records), records)
assert records, 'No record found to update'
success = True
for record in records:
name = (name if (name is not None) else record['name'])
rtype = (rtype if (rtype is not None) else record['type'])
content = (content if (content is not None) else record['content'])
success = (success and self._create_record_internal(rtype, name, content, record['id']))
return success | Update a DNS entry identified by identifier or name in the domain zone.
Any non given argument will leave the current value of the DNS entry.
Args:
identifier (str): The easyname id of the DNS entry to update.
[rtype] (str): The DNS rtype (e.g. A, TXT, MX, etc) of the new entry.
[name] (str): The name of the new DNS entry, e.g the domain for which
a MX entry shall be valid.
[content] (str): The content of the new DNS entry, e.g. the mail
server hostname for a MX entry.
Returns:
bool: True if the record was updated successfully, False otherwise.
Raises:
AssertionError: When a request returns unexpected or unknown data. | codesearchnet |
def unescape(cls, text: str) -> str:
chop = text.split("\\", 1)
try:
return (chop[0] if len(chop) == 1
else chop[0] + cls.unescape_map[chop[1][0]] +
cls.unescape(chop[1][1:]))
except KeyError:
raise InvalidArgument(text) from None | Replace escape sequence with corresponding characters.
Args:
text: Text to unescape. | juraj-google-style |
def _ImageDimensions(image, rank):
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = array_ops_stack.unstack(array_ops.shape(image), rank)
return [s if s is not None else d for s, d in zip(static_shape, dynamic_shape)] | Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise, they are integer scalar tensors. | github-repos |
def GetFileAndLine(component):
if inspect.isbuiltin(component):
return (None, None)
try:
filename = inspect.getsourcefile(component)
except TypeError:
return (None, None)
try:
unused_code, lineindex = inspect.findsource(component)
lineno = lineindex + 1
except (OSError, IndexError):
lineno = None
return (filename, lineno) | Returns the filename and line number of component.
Args:
component: A component to find the source information for, usually a class
or routine.
Returns:
filename: The name of the file where component is defined.
lineno: The line number where component is defined. | github-repos |
def strip_prefix_from_items(prefix, items):
items_no_prefix = []
for item in items:
if item.startswith(prefix):
items_no_prefix.append(item[len(prefix):])
else:
items_no_prefix.append(item)
return items_no_prefix | Strips out the prefix from each of the items if it is present.
Args:
prefix: the string for that you wish to strip from the beginning of each
of the items.
items: a list of strings that may or may not contain the prefix you want
to strip out.
Returns:
items_no_prefix: a copy of the list of items (same order) without the
prefix (if present). | codesearchnet |
def encode(self, s):
if s.endswith(".mp3"):
out_filepath = s[:-4] + ".wav"
call([
"sox", "--guard", s, "-r", "16k", "-b", "16", "-c", "1", out_filepath
])
s = out_filepath
elif not s.endswith(".wav"):
out_filepath = s + ".wav"
if not os.path.exists(out_filepath):
call(["sox", "-r", "16k", "-b", "16", "-c", "1", s, out_filepath])
s = out_filepath
rate, data = wavfile.read(s)
assert rate == self._sample_rate
assert len(data.shape) == 1
if data.dtype not in [np.float32, np.float64]:
data = data.astype(np.float32) / np.iinfo(data.dtype).max
return data.tolist() | Transform a string with a filename into a list of float32.
Args:
s: path to the file with a waveform.
Returns:
samples: list of int16s | juraj-google-style |
def start(self, name: str, increment_count: bool=True) -> None:
if (not self._timing):
return
now = get_now_utc_pendulum()
if self._stack:
last = self._stack[(- 1)]
self._totaldurations[last] += (now - self._starttimes[last])
if (name not in self._starttimes):
self._totaldurations[name] = datetime.timedelta()
self._count[name] = 0
self._starttimes[name] = now
if increment_count:
self._count[name] += 1
self._stack.append(name) | Start a named timer.
Args:
name: name of the timer
increment_count: increment the start count for this timer | codesearchnet |
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False | Initialize ChunkedRFile instance.
Args:
rfile (file): file encoded with the 'chunked' transfer encoding
maxlen (int): maximum length of the file being read
bufsize (int): size of the buffer used to read the file | juraj-google-style |
def collect_members(module_to_name):
members = {}
for (module, module_name) in module_to_name.items():
all_names = getattr(module, '__all__', None)
for (name, member) in inspect.getmembers(module):
if ((inspect.isfunction(member) or inspect.isclass(member)) and (not _always_drop_symbol_re.match(name)) and ((all_names is None) or (name in all_names))):
fullname = ('%s.%s' % (module_name, name))
if (name in members):
(other_fullname, other_member) = members[name]
if (member is not other_member):
raise RuntimeError(('Short name collision between %s and %s' % (fullname, other_fullname)))
if (len(fullname) == len(other_fullname)):
raise RuntimeError(("Can't decide whether to use %s or %s for %s: both full names have length %d" % (fullname, other_fullname, name, len(fullname))))
if (len(fullname) > len(other_fullname)):
continue
members[name] = (fullname, member)
return members | Collect all symbols from a list of modules.
Args:
module_to_name: Dictionary mapping modules to short names.
Returns:
Dictionary mapping name to (fullname, member) pairs. | codesearchnet |
def delete_tag(self, tag_name, **kwargs):
resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), **kwargs)
resp.raise_for_status()
return resp | delete a tag by name
Args:
tag_name (string): name of tag to delete | codesearchnet |
def _normalize_hparams(hparams):
result = {}
for (k, v) in six.iteritems(hparams):
if isinstance(k, HParam):
k = k.name
if k in result:
raise ValueError("multiple values specified for hparam %r" % (k,))
result[k] = v
return result | Normalize a dict keyed by `HParam`s and/or raw strings.
Args:
hparams: A `dict` whose keys are `HParam` objects and/or strings
representing hyperparameter names, and whose values are
hyperparameter values. No two keys may have the same name.
Returns:
A `dict` whose keys are hyperparameter names (as strings) and whose
values are the corresponding hyperparameter values.
Raises:
ValueError: If two entries in `hparams` share the same
hyperparameter name. | juraj-google-style |
def _GetDateTime(self, filetime):
if (filetime == 0):
return dfdatetime_semantic_time.SemanticTime('Not set')
return dfdatetime_filetime.Filetime(timestamp=filetime) | Retrieves the date and time from a FILETIME timestamp.
Args:
filetime (int): FILETIME timestamp.
Returns:
dfdatetime.DateTimeValues: date and time. | codesearchnet |
def list(self, pattern='*'):
if (self._descriptors is None):
self._descriptors = self._client.list_metric_descriptors(filter_string=self._filter_string, type_prefix=self._type_prefix)
return [metric for metric in self._descriptors if fnmatch.fnmatch(metric.type, pattern)] | Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters. | codesearchnet |
def byte_adaptor(fbuffer):
if six.PY3:
strings = fbuffer.read().decode('latin-1')
fbuffer = six.StringIO(strings)
return fbuffer
else:
return fbuffer | provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer | juraj-google-style |
def check(self, dsm, simplicity_factor=2, **kwargs):
economy_of_mechanism = False
message = ''
data = dsm.data
categories = dsm.categories
dsm_size = dsm.size[0]
if (not categories):
categories = (['appmodule'] * dsm_size)
dependency_number = 0
for i in range(0, dsm_size):
for j in range(0, dsm_size):
if ((categories[i] not in ('framework', 'corelib')) and (categories[j] not in ('framework', 'corelib')) and (data[i][j] > 0)):
dependency_number += 1
if (dependency_number < (dsm_size * simplicity_factor)):
economy_of_mechanism = True
else:
message = ' '.join([('Number of dependencies (%s)' % dependency_number), ('> number of rows (%s)' % dsm_size), ('* simplicity factor (%s) = %s' % (simplicity_factor, (dsm_size * simplicity_factor)))])
return (economy_of_mechanism, message) | Check economy of mechanism.
As first abstraction, number of dependencies between two modules
< 2 * the number of modules
(dependencies to the framework are NOT considered).
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
simplicity_factor (int): simplicity factor.
Returns:
bool: True if economic, else False | codesearchnet |
def open_shards(glob_pattern, mode='rt', encoding='utf-8'):
if 'b' in mode:
encoding = None
with tempfile.NamedTemporaryFile(delete=False) as out_file:
for shard in glob.glob(glob_pattern):
with open(shard, 'rb') as in_file:
out_file.write(in_file.read())
concatenated_file_name = out_file.name
return io.open(concatenated_file_name, mode, encoding=encoding) | Returns a composite file of all shards matching the given glob pattern.
Args:
glob_pattern (str): Pattern used to match files which should be opened.
mode (str): Specify the mode in which the file should be opened. For
available modes, check io.open() documentation.
encoding (str): Name of the encoding used to decode or encode the file.
This should only be used in text mode.
Returns:
A stream with the contents of the opened files. | github-repos |
def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs):
done = False
records = {}
page = 1
while not done:
kwargs['page'] = page
response = self.__cloudflare_request(
account=account,
path='/zones/{}/dns_records'.format(zoneID),
args=kwargs
)
info = response['result_info']
if 'total_pages' not in info or page >= info['total_pages']:
done = True
else:
page += 1
for record in response['result']:
if record['name'] in records:
records[record['name']]['value'] = sorted(records[record['name']]['value'] + [record['content']])
else:
records[record['name']] = {
'name': record['name'],
'value': sorted([record['content']]),
'type': record['type']
}
return list(records.values()) | Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and
their information.
Args:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
zoneID (`int`): Internal CloudFlare ID of the DNS zone
**kwargs (`dict`): Additional arguments to be consumed by the API endpoint
Returns:
:obj:`dict` of `str`: `dict` | juraj-google-style |
def ParseSMS(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms')
if phone_number:
phone_number = phone_number.replace(' ', '')
event_data = SkypeSMSEventData()
event_data.number = phone_number
event_data.query = query
event_data.text = self._GetRowValue(query_hash, row, 'msg_sms')
timestamp = self._GetRowValue(query_hash, row, 'time_sms')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'SMS from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses an SMS.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query. | juraj-google-style |
def _orthogonal_matrix(self, n):
a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
if self.seed:
self.seed += 1
q, r = gen_linalg_ops.qr(a)
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
return q | Construct an n x n orthogonal matrix.
Args:
n: Dimension.
Returns:
A n x n orthogonal matrix. | github-repos |
def where(condition, x1=None, x2=None):
if x1 is None and x2 is not None or (x1 is not None and x2 is None):
raise ValueError('`x1` and `x2` either both should be `None` or both should have non-None value.')
if any_symbolic_tensors((condition, x1, x2)):
return Where().symbolic_call(condition, x1, x2)
return backend.numpy.where(condition, x1, x2) | Return elements chosen from `x1` or `x2` depending on `condition`.
Args:
condition: Where `True`, yield `x1`, otherwise yield `x2`.
x1: Values from which to choose when `condition` is `True`.
x2: Values from which to choose when `condition` is `False`.
Returns:
A tensor with elements from `x1` where `condition` is `True`, and
elements from `x2` where `condition` is `False`. | github-repos |
def cartesian(self,subsets=None,step_pixels=100,max_distance_pixels=150,*args,**kwargs):
n = Cartesian.read_cellframe(self,subsets=subsets,step_pixels=step_pixels,max_distance_pixels=max_distance_pixels,prune_neighbors=False,*args,**kwargs)
if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']
else: n.measured_regions = self.get_measured_regions()
if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']
else: n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
return n | Return a class that can be used to create honeycomb plots
Args:
subsets (list): list of SubsetLogic objects
step_pixels (int): distance between hexagons
max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area
Returns:
Cartesian: returns a class that holds the layout of the points to plot. | juraj-google-style |
def categorize(self, categories, default=None):
return dim(self, categorize, categories=categories, default=default) | Replaces discrete values with supplied categories
Replaces discrete values in input array into a fixed set of
categories defined either as a list or dictionary.
Args:
categories: List or dict of categories to map inputs to
default: Default value to assign if value not in categories | juraj-google-style |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = utils.BytearrayStream()
if len(self._credentials) == 0:
raise ValueError("Authentication struct missing credentials.")
for credential in self._credentials:
credential.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(Authentication, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer) | Write the data encoding the Authentication struct to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | juraj-google-style |
def getTickTock(self, vals):
val0, val1 = vals
try:
_tick = self._getLiftValu(val0)
except ValueError as e:
raise s_exc.BadTypeValu(name=self.name, valu=val0,
mesg='Unable to process the value for val0 in getTickTock.')
sortval = False
if isinstance(val1, str):
if val1.startswith(('+-', '-+')):
sortval = True
delt = s_time.delta(val1[2:])
_tock = _tick + delt
_tick = _tick - delt
elif val1.startswith('-'):
sortval = True
_tock = self._getLiftValu(val1, relto=_tick)
else:
_tock = self._getLiftValu(val1, relto=_tick)
else:
_tock = self._getLiftValu(val1, relto=_tick)
if sortval and _tick >= _tock:
tick = min(_tick, _tock)
tock = max(_tick, _tock)
return tick, tock
return _tick, _tock | Get a tick, tock time pair.
Args:
vals (list): A pair of values to norm.
Returns:
(int, int): A ordered pair of integers. | juraj-google-style |
async def count(self, text, opts=None):
i = 0
async for _ in self.cell.eval(text, opts=opts, user=self.user):
i += 1
return i | Count the number of nodes which result from a storm query.
Args:
text (str): Storm query text.
opts (dict): Storm query options.
Returns:
(int): The number of nodes resulting from the query. | codesearchnet |
def rpow(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rpow", other, axis=axis, level=level, fill_value=fill_value
) | Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied. | juraj-google-style |
def roc_auc_score(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_pred: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
try:
return sklearn.metrics.roc_auc_score(np.squeeze(np.array(y_true)),
np.squeeze(np.array(y_pred)), average="macro")
except ValueError:
return 0. | Compute Area Under the Curve (AUC) from prediction scores.
Args:
y_true: true binary labels
y_pred: target scores, can either be probability estimates of the positive class
Returns:
Area Under the Curve (AUC) from prediction scores | juraj-google-style |
def get_backend_engine(self, name, **kwargs):
if name not in self._engines:
msg = "Given settings backend is unknowed: {}"
raise SettingsBackendError(msg.format(name))
return self._engines[name](**kwargs) | Get backend engine from given name.
Args:
(string): Path to validate.
Raises:
boussole.exceptions.SettingsBackendError: If given backend name
does not match any available engine.
Returns:
object: Instance of selected backend engine. | juraj-google-style |
def get_sample_window(self, type_tag, size=10):
size = size * 1024 * 1024
cursor = self.database[self.sample_collection].find({'type_tag': type_tag},
{'md5': 1,'length': 1}).sort('import_time',pymongo.DESCENDING)
total_size = 0
md5_list = []
for item in cursor:
if total_size > size:
return md5_list
md5_list.append(item['md5'])
total_size += item['length']
return md5_list | Get a window of samples not to exceed size (in MB).
Args:
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...).
size: Size of samples in MBs.
Returns:
a list of md5s. | juraj-google-style |
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False, rotate_manager_token=False):
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={'rotateWorkerToken': rotate_worker_token, 'rotateManagerToken': rotate_manager_token, 'version': version})
self._raise_for_status(response)
return True | Update the Swarm's configuration
Args:
version (int): The version number of the swarm object being
updated. This is required to avoid conflicting writes.
swarm_spec (dict): Configuration settings to update. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
generate a valid configuration. Default: ``None``.
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | codesearchnet |
async def verify_docker_worker_task(chain, link):
if chain != link:
check_interactive_docker_worker(link)
verify_docker_image_sha(chain, link) | Docker-worker specific checks.
Args:
chain (ChainOfTrust): the chain we're operating on
link (ChainOfTrust or LinkOfTrust): the trust object for the signing task.
Raises:
CoTError: on failure. | juraj-google-style |
def all(self, data={}, **kwargs):
return super(Subscription, self).all(data, **kwargs) | Fetch all Subscription entities
Returns:
Dictionary of Subscription data | codesearchnet |
def reduce_by(self, package_request):
if self.pr:
reqstr = _short_req_str(package_request)
self.pr.passive('reducing %s wrt %s...', self, reqstr)
if self.solver.optimised:
if (package_request in self.been_reduced_by):
return (self, [])
if ((package_request.range is None) or (package_request.name not in self.fam_requires)):
return (self, [])
with self.solver.timed(self.solver.reduction_time):
return self._reduce_by(package_request) | Remove variants whos dependencies conflict with the given package
request.
Returns:
(VariantSlice, [Reduction]) tuple, where slice may be None if all
variants were reduced. | codesearchnet |
def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False):
outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext)
program = 'pepstats'
pepstats_args = '-sequence="{}" -outfile="{}"'.format(infile, outfile)
cmd_string = '{} {}'.format(program, pepstats_args)
ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True)
return outfile | Run EMBOSS pepstats on a FASTA file.
Args:
infile: Path to FASTA file
outfile: Name of output file without extension
outdir: Path to output directory
outext: Extension of results file, default is ".pepstats"
force_rerun: Flag to rerun pepstats
Returns:
str: Path to output file. | juraj-google-style |
def GetApprovalForObject(object_urn, token=None, username=""):
if token is None:
raise access_control.UnauthorizedAccess(
"No token given, cannot authenticate.")
if not username:
username = token.username
approvals_root_urn = aff4.ROOT_URN.Add("ACL").Add(
object_urn.Path()).Add(username)
children_urns = list(aff4.FACTORY.ListChildren(approvals_root_urn))
if not children_urns:
raise access_control.UnauthorizedAccess(
"No approval found for user %s" % utils.SmartStr(username),
subject=object_urn)
last_error = None
approvals = aff4.FACTORY.MultiOpen(
children_urns,
mode="r",
aff4_type=Approval,
age=aff4.ALL_TIMES,
token=token)
for approval in approvals:
try:
test_token = access_control.ACLToken(
username=username, reason=approval.Get(approval.Schema.REASON))
approval.CheckAccess(test_token)
return test_token
except access_control.UnauthorizedAccess as e:
last_error = e
if last_error:
raise access_control.UnauthorizedAccess(last_error, subject=object_urn)
else:
raise access_control.UnauthorizedAccess(
"Couldn't open any of %d approvals "
"for user %s" % (len(children_urns), utils.SmartStr(username)),
subject=object_urn) | Looks for approvals for an object and returns available valid tokens.
Args:
object_urn: Urn of the object we want access to.
token: The token to use to lookup the ACLs.
username: The user to get the approval for, if "" we get it from the
token.
Returns:
A token for access to the object on success, otherwise raises.
Raises:
UnauthorizedAccess: If there are no valid approvals available. | juraj-google-style |
def checksum1(data, stringlength):
value_buffer = 0
for count in range(0, stringlength):
value_buffer = value_buffer ^ data[count]
return value_buffer&0xFE | Calculate Checksum 1
Calculate the ckecksum 1 required for the herkulex data packet
Args:
data (list): the data of which checksum is to be calculated
stringlength (int): the length of the data
Returns:
int: The calculated checksum 1 | juraj-google-style |
def from_json(cls, json):
if json[cls.KEY_RANGE_PARAM] is None:
key_ranges = None
else:
key_ranges = []
for k in json[cls.KEY_RANGE_PARAM]:
if k:
key_ranges.append(key_range.KeyRange.from_json(k))
else:
key_ranges.append(None)
if json[cls.NAMESPACE_RANGE_PARAM] is None:
ns_range = None
else:
ns_range = namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM])
if json[cls.CURRENT_KEY_RANGE_PARAM] is None:
current_key_range = None
else:
current_key_range = key_range.KeyRange.from_json(
json[cls.CURRENT_KEY_RANGE_PARAM])
return cls(
json[cls.ENTITY_KIND_PARAM],
key_ranges,
ns_range,
json[cls.BATCH_SIZE_PARAM],
current_key_range,
filters=json.get(cls.FILTERS_PARAM)) | Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json. | juraj-google-style |
def substitute(self, var_map):
if (self in var_map):
return var_map[self]
return self._substitute(var_map) | Substitute sub-expressions
Args:
var_map (dict): Dictionary with entries of the form
``{expr: substitution}`` | codesearchnet |
def initialize(self):
if ops.executing_eagerly_outside_functions():
self._iterator._eager_reset()
return []
else:
return [self._iterator.initializer] | Initialize underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run. | github-repos |
def get_catalog_results(self, content_filter_query, query_params=None, traverse_pagination=False):
query_params = query_params or {}
try:
endpoint = getattr(self.client, self.SEARCH_ALL_ENDPOINT)
response = endpoint().post(data=content_filter_query, **query_params)
if traverse_pagination:
response['results'] = self.traverse_pagination(response, endpoint, content_filter_query, query_params)
response['next'] = response['previous'] = None
except Exception as ex:
LOGGER.exception(
'Attempted to call course-discovery search/all/ endpoint with the following parameters: '
'content_filter_query: %s, query_params: %s, traverse_pagination: %s. '
'Failed to retrieve data from the catalog API. content -- [%s]',
content_filter_query,
query_params,
traverse_pagination,
getattr(ex, 'content', '')
)
raise ex
return response | Return results from the discovery service's search/all endpoint.
Arguments:
content_filter_query (dict): query parameters used to filter catalog results.
query_params (dict): query parameters used to paginate results.
traverse_pagination (bool): True to return all results, False to return the paginated response.
Defaults to False.
Returns:
dict: Paginated response or all the records. | juraj-google-style |
def samefile(path1, path2):
(path1, path1_is_storage) = format_and_is_storage(path1)
(path2, path2_is_storage) = format_and_is_storage(path2)
if ((not path1_is_storage) and (not path2_is_storage)):
return os_path_samefile(path1, path2)
if ((not path1_is_storage) or (not path2_is_storage)):
return False
with handle_os_exceptions():
system = get_instance(path1)
if (system is not get_instance(path2)):
return False
elif (system.relpath(path1) != system.relpath(path2)):
return False
return True | Return True if both pathname arguments refer to the same file or directory.
Equivalent to "os.path.samefile".
Args:
path1 (path-like object): Path or URL.
path2 (path-like object): Path or URL.
Returns:
bool: True if same file or directory. | codesearchnet |
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'):
source_string = open(source, 'r').read()
return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing) | Read a string from a file and derive a ``Graph`` from it.
This is a convenience function for opening a file and passing its
contents to ``Graph.from_string()`` (see that for more detail)
Args:
source (str): the file to read and derive the graph from
distance_weights (dict): dict of relative indices corresponding
with word weights. See ``Graph.from_string`` for more detail.
merge_same_words (bool): whether nodes which have the same value
should be merged or not.
group_marker_opening (str): The string used to mark the beginning
of word groups.
group_marker_closing (str): The string used to mark the end
of word groups.
Returns: Graph
Example:
>>> graph = Graph.from_file('cage.txt') # doctest: +SKIP
>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP
'poetry i have nothing to say and i' | codesearchnet |
def schedule(self, function, args, kwargs):
closure = Closure(function, self.closure_queue._cancellation_mgr, args=args, kwargs=kwargs)
ret = closure.build_output_remote_value()
self.closure_queue.put(closure)
return ret | Schedules `function` to be dispatched to a worker for execution.
Args:
function: The function to be dispatched to a worker for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `RemoteValue` object. | github-repos |
def get_organisation(self, id, name=None):
return self.create_organisation(dict(id=id, name=name)) | Get an organisation
Returns:
Organisation: The organisation with the given `id` | codesearchnet |
def _split_list_into_bundles(self, output_pcollection, elements, max_element_per_bundle, element_size_fn):
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles = [bundle]
for element in elements:
if max_element_per_bundle and bundle_size >= max_element_per_bundle:
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles.append(bundle)
bundle.output(element)
bundle_size += element_size_fn(element)
return bundles | Splits elements, an iterable, into multiple output bundles.
Args:
output_pcollection: PCollection that the elements belong to.
elements: elements to be chunked into bundles.
max_element_per_bundle: (approximately) the maximum element per bundle.
If it is None, only a single bundle will be produced.
element_size_fn: Function to return the size of a given element.
Returns:
List of output uncommitted bundles with at least one bundle. | github-repos |
def metadata(self, url):
_, path = self._parse_url(url)
status = self._hdfs_client.status(path, strict=False)
if status is None:
raise BeamIOError('File not found: %s' % url)
return FileMetadata(url, status[_FILE_STATUS_LENGTH], status[_FILE_STATUS_UPDATED] / 1000.0) | Fetch metadata fields of a file on the FileSystem.
Args:
url: string url of a file.
Returns:
:class:`~apache_beam.io.filesystem.FileMetadata`.
Raises:
``BeamIOError``: if url doesn't exist. | github-repos |
def exportUsufy(data, ext, fileH):
if (ext == 'csv'):
usufyToCsvExport(data, ((fileH + '.') + ext))
elif (ext == 'gml'):
usufyToGmlExport(data, ((fileH + '.') + ext))
elif (ext == 'json'):
usufyToJsonExport(data, ((fileH + '.') + ext))
elif (ext == 'ods'):
usufyToOdsExport(data, ((fileH + '.') + ext))
elif (ext == 'png'):
usufyToPngExport(data, ((fileH + '.') + ext))
elif (ext == 'txt'):
usufyToTextExport(data, ((fileH + '.') + ext))
elif (ext == 'xls'):
usufyToXlsExport(data, ((fileH + '.') + ext))
elif (ext == 'xlsx'):
usufyToXlsxExport(data, ((fileH + '.') + ext)) | Method that exports the different structures onto different formats.
Args:
-----
data: Data to export.
ext: One of the following: csv, excel, json, ods.
fileH: Fileheader for the output files.
Returns:
--------
Performs the export as requested by parameter. | codesearchnet |
def CreateTask(self, session_identifier):
task = tasks.Task(session_identifier)
logger.debug('Created task: {0:s}.'.format(task.identifier))
with self._lock:
self._tasks_queued[task.identifier] = task
self._total_number_of_tasks += 1
self.SampleTaskStatus(task, 'created')
return task | Creates a task.
Args:
session_identifier (str): the identifier of the session the task is
part of.
Returns:
Task: task attribute container. | juraj-google-style |
def delete(self, key):
key = self._service_key(key)
self._service_ops['delete'](key) | Removes the object named by `key` in `service`.
Args:
key: Key naming the object to remove. | codesearchnet |
def build_all_reduce_device_prefixes(job_name, num_tasks):
if (job_name != 'localhost'):
return [('/job:%s/task:%d' % (job_name, d)) for d in range(0, num_tasks)]
else:
assert (num_tasks == 1)
return [('/job:%s' % job_name)] | Build list of device prefix names for all_reduce.
Args:
job_name: "worker", "ps" or "localhost".
num_tasks: number of jobs across which device names should be generated.
Returns:
A list of device name prefix strings. Each element spells out the full
host name without adding the device.
e.g. "/job:worker/task:0" | codesearchnet |
def get_package(name, version, paths=None):
if isinstance(version, basestring):
range_ = VersionRange("==%s" % version)
else:
range_ = VersionRange.from_version(version, "==")
it = iter_packages(name, range_, paths)
try:
return it.next()
except StopIteration:
return None | Get an exact version of a package.
Args:
name (str): Name of the package, eg 'maya'.
version (Version or str): Version of the package, eg '1.0.0'
paths (list of str, optional): paths to search for package, defaults
to `config.packages_path`.
Returns:
`Package` object, or None if the package was not found. | juraj-google-style |
def new_cells(self, name=None, formula=None):
return self._impl.new_cells(name, formula).interface | Create a cells in the space.
Args:
name: If omitted, the model is named automatically ``CellsN``,
where ``N`` is an available number.
func: The function to define the formula of the cells.
Returns:
The new cells. | juraj-google-style |
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params) | Gets the specified model resource by model ID.
Args:
request: (BigqueryModelsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Model) The response message. | github-repos |
def _full_reduce(self, axis, map_func, reduce_func=None):
if reduce_func is None:
reduce_func = map_func
mapped_parts = self.data.map_across_blocks(map_func)
full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)
if axis == 0:
columns = self.columns
return self.__constructor__(
full_frame, index=["__reduced__"], columns=columns
)
else:
index = self.index
return self.__constructor__(
full_frame, index=index, columns=["__reduced__"]
) | Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results from map_func and
reduce_func. | juraj-google-style |
def swd_read16(self, offset):
value = self._dll.JLINK_SWD_GetU16(offset)
return ctypes.c_uint16(value).value | Gets a unit of ``16`` bits from the input buffer.
Args:
self (JLink): the ``JLink`` instance
offset (int): the offset (in bits) from which to start reading
Returns:
The integer read from the input buffer. | juraj-google-style |
def slice(inputs, start_indices, shape):
if any_symbolic_tensors((inputs, start_indices)):
return Slice(shape=shape).symbolic_call(inputs, start_indices)
return backend.core.slice(inputs, start_indices, shape) | Return a slice of an input tensor.
At a high level, this operation is an explicit replacement for array slicing
e.g. `inputs[start_indices: start_indices + shape]`.
Unlike slicing via brackets, this operation will accept tensor start
indices on all backends, which is useful when indices dynamically computed
via other tensor operations.
```python
inputs = np.zeros((5, 5))
start_indices = np.array([3, 3])
shape = np.array([2, 2])
inputs = keras.ops.slice(inputs, start_indices, shape)
```
Args:
inputs: A tensor, the tensor to be updated.
start_indices: A list/tuple of shape `(inputs.ndim,)`, specifying
the starting indices for updating.
shape: The full shape of the returned slice.
Returns:
A tensor, has the same shape and dtype as `inputs`. | github-repos |
def get_usb_serial(self, port_num):
port = self.port_map[str(port_num)]
arg = ''.join(['DEVICE INFO,', self._addr, '.', port])
cmd = ['esuit64', '-t', arg]
info = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
serial = None
if ('SERIAL' in info):
serial_info = info.split('SERIAL:')[1]
serial = serial_info.split('\n')[0].strip()
use_info = info.split('BY')[1].split(' ')[1]
if (use_info == 'NO'):
cmd = ['esuit64', '-t', 'AUTO USE ALL']
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
time.sleep((50.0 / 1000.0))
else:
raise ValueError('No USB device detected')
return serial | Get the device serial number
Args:
port_num: port number on the Cambrionix unit
Return:
usb device serial number | codesearchnet |
def string_to_scopes(scopes):
if not scopes:
return []
elif isinstance(scopes, six.string_types):
return scopes.split(' ')
else:
return scopes | Converts stringifed scope value to a list.
If scopes is a list then it is simply passed through. If scopes is an
string then a list of each individual scope is returned.
Args:
scopes: a string or iterable of strings, the scopes.
Returns:
The scopes in a list. | juraj-google-style |
def make_single_array(ds, batch_size=(8 * 1024)):
if (isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple)):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if (nshapes > 0):
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[(- 1)]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype) | Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array. | codesearchnet |
def na_if(series, *values):
series = pd.Series(series)
series[series.isin(values)] = np.nan
return series | If values in a series match a specified value, change them to `np.nan`.
Args:
series: Series or vector, often symbolic.
*values: Value(s) to convert to `np.nan` in the series. | codesearchnet |
def from_api_repr(cls, resource):
if (
"datasetReference" not in resource
or "datasetId" not in resource["datasetReference"]
):
raise KeyError(
"Resource lacks required identity information:"
'["datasetReference"]["datasetId"]'
)
project_id = resource["datasetReference"]["projectId"]
dataset_id = resource["datasetReference"]["datasetId"]
dataset = cls(DatasetReference(project_id, dataset_id))
dataset._properties = copy.deepcopy(resource)
return dataset | Factory: construct a dataset given its API representation
Args:
resource (Dict[str: object]):
Dataset resource representation returned from the API
Returns:
google.cloud.bigquery.dataset.Dataset:
Dataset parsed from ``resource``. | juraj-google-style |
def custom_apply(self, path: utils.KeyPath, value_spec: pg_typing.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, pg_typing.Field, Any], Any]]=None) -> Tuple[bool, 'Dict']:
proceed_with_standard_apply = True
if self._value_spec:
if value_spec and (not value_spec.is_compatible(self._value_spec)):
raise ValueError(utils.message_on_path(f'Dict (spec={self._value_spec!r}) cannot be assigned to an incompatible field (spec={value_spec!r}).', path))
if self._allow_partial == allow_partial:
proceed_with_standard_apply = False
else:
self._allow_partial = allow_partial
elif isinstance(value_spec, pg_typing.Dict):
self._value_spec = value_spec
return (proceed_with_standard_apply, self) | Implement pg.typing.CustomTyping interface.
Args:
path: KeyPath of current object.
value_spec: Origin value spec of the field.
allow_partial: Whether allow partial object to be created.
child_transform: Function to transform child node values in dict_obj into
their final values. Transform function is called on leaf nodes first,
then on their containers, recursively.
Returns:
A tuple (proceed_with_standard_apply, transformed value) | github-repos |
def dict_of_sets_add(dictionary, key, value):
set_objs = dictionary.get(key, set())
set_objs.add(value)
dictionary[key] = set_objs | Add value to a set in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to set in dictionary
Returns:
None | juraj-google-style |
def _binary_assert_doc_v2(sym, opname, test_var):
def _decorator(func):
func.__doc__ = '\n Assert the condition `x {sym} y` holds element-wise.\n\n This Op checks that `x[i] {sym} y[i]` holds for every pair of (possibly\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\n trivially satisfied.\n\n If `x` {sym} `y` does not hold, `message`, as well as the first `summarize`\n entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.\n\n When using inside `tf.function`, this API takes effects during execution.\n It\'s recommended to use this API with `tf.control_dependencies` to\n ensure the correct execution order.\n\n In the following example, without `tf.control_dependencies`, errors may\n not be raised at all.\n Check `tf.control_dependencies` for more details.\n\n >>> def check_size(x):\n ... with tf.control_dependencies([\n ... tf.debugging.{opname}(tf.size(x), {test_var},\n ... message=\'Bad tensor size\')]):\n ... return x\n\n >>> check_size(tf.ones([2, 3], tf.float32))\n Traceback (most recent call last):\n ...\n InvalidArgumentError: ...\n\n Args:\n x: Numeric `Tensor`.\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\n message: A string to prefix to the default message. (optional)\n summarize: Print this many entries of each tensor. (optional)\n name: A name for this operation (optional). Defaults to "{opname}".\n\n Returns:\n Op that raises `InvalidArgumentError` if `x {sym} y` is False. This can\n be used with `tf.control_dependencies` inside of `tf.function`s to\n block followup computation until the check has executed.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x == y` is False. The check can be performed immediately during eager\n execution or if `x` and `y` are statically known.\n '.format(sym=sym, opname=opname, test_var=test_var)
return func
return _decorator | Common docstring for v2 assert_* ops that compare two tensors element-wise.
Args:
sym: Binary operation symbol, i.e. "=="
opname: Name for the symbol, i.e. "assert_equal"
test_var: A number used in the docstring example
Returns:
Decorator that adds the appropriate docstring to the function for
symbol `sym`. | github-repos |
def inspect_edge(G: AnalysisGraph, source: str, target: str):
return create_statement_inspection_table(
G[source][target]["InfluenceStatements"]
) | 'Drill down' into an edge in the analysis graph and inspect its
provenance. This function prints the provenance.
Args:
G
source
target | juraj-google-style |
def __eq__(self, other):
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"==",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
) | Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description | juraj-google-style |
async def get_ticket(self, request):
session = (await get_session(request))
return session.get(self.cookie_name) | Called to return the ticket for a request.
Args:
request: aiohttp Request object.
Returns:
A ticket (string like) object, or None if no ticket is available
for the passed request. | codesearchnet |
def get_all_artifacts_per_task_id(chain, upstream_artifacts):
all_artifacts_per_task_id = {}
for link in chain.links:
if link.task_type in PARENT_TASK_TYPES:
add_enumerable_item_to_dict(
dict_=all_artifacts_per_task_id, key=link.task_id, item='public/task-graph.json'
)
if link.task_type in DECISION_TASK_TYPES:
add_enumerable_item_to_dict(
dict_=all_artifacts_per_task_id, key=link.task_id, item='public/actions.json'
)
add_enumerable_item_to_dict(
dict_=all_artifacts_per_task_id, key=link.task_id, item='public/parameters.yml'
)
if upstream_artifacts:
for upstream_dict in upstream_artifacts:
add_enumerable_item_to_dict(
dict_=all_artifacts_per_task_id, key=upstream_dict['taskId'], item=upstream_dict['paths']
)
for task_id, paths in all_artifacts_per_task_id.items():
all_artifacts_per_task_id[task_id] = sorted(set(paths))
return all_artifacts_per_task_id | Return every artifact to download, including the Chain Of Trust Artifacts.
Args:
chain (ChainOfTrust): the chain of trust object
upstream_artifacts: the list of upstream artifact definitions
Returns:
dict: sorted list of paths to downloaded artifacts ordered by taskId | juraj-google-style |
def from_api_repr(cls, resource):
entry = resource.copy()
role = entry.pop("role", None)
entity_type, entity_id = entry.popitem()
if len(entry) != 0:
raise ValueError("Entry has unexpected keys remaining.", entry)
return cls(role, entity_type, entity_id) | Factory: construct an access entry given its API representation
Args:
resource (Dict[str, object]):
Access entry resource representation returned from the API
Returns:
google.cloud.bigquery.dataset.AccessEntry:
Access entry parsed from ``resource``.
Raises:
ValueError:
If the resource has more keys than ``role`` and one additional
key. | juraj-google-style |
def sent_request(self, value):
if value == self._defaults['sentRequest'] and 'sentRequest' in self._values:
del self._values['sentRequest']
else:
self._values['sentRequest'] = value | The sent_request property.
Args:
value (string). the property value. | juraj-google-style |
def filter_def_file(def_file: str, filter_file: str, filtered_file: str) -> None:
with open(filter_file, 'r', encoding='utf-8') as filter_file_handle:
filter_json: Dict[str, Any] = json.load(filter_file_handle)
inclusion_patterns: List[str] = filter_json['global'] + ['EXPORTS', '*;*']
incl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\*', '.*')) for p in inclusion_patterns]
exclusion_patterns: List[str] = filter_json['local']
excl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\*', '.*')) for p in exclusion_patterns]
with open(def_file, 'r') as orig_file, open(filtered_file, 'w') as filt_file:
for l in orig_file:
if not matches_any(excl_patterns, l) or matches_any(incl_patterns, l):
filt_file.write(l) | Filters a windows .def file based on a filter .json.
Args:
def_file: The path to the input windows .def file.
filter_file: The path to the filter file (JSON format).
filtered_file: The path to the output filtered windows .def file. | github-repos |
def BuildAdGroupOperations(batch_job_helper, campaign_operations, number_of_adgroups=1):
adgroup_operations = [{'xsi_type': 'AdGroupOperation', 'operand': {'campaignId': campaign_operation['operand']['id'], 'id': batch_job_helper.GetId(), 'name': ('Batch Ad Group
return adgroup_operations | Builds the operations adding desired number of AdGroups to given Campaigns.
Note: When the AdGroups are created, they will have a different Id than those
generated here as a temporary Id. This is just used to identify them in the
BatchJobService.
Args:
batch_job_helper: a BatchJobHelper instance.
campaign_operations: a list containing the operations that will add
Campaigns.
number_of_adgroups: an int defining the number of AdGroups to be created per
Campaign.
Returns:
a list containing the operations that will add the desired number of
AdGroups to each of the provided Campaigns. | codesearchnet |
def __init__(self, job, runner, options=None):
self._job = job
self._runner = runner
self._options = options
self.metric_results = None | Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance. | github-repos |
def create_autocast_variable(variable):
if not distributed_training_utils.is_distributed_variable(variable):
return AutoCastVariable(variable)
class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):
def __repr__(self):
return '<AutoCastDistributedVariable dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name} inner_variable={v._variable}>'.format(v=self)
return AutoCastDistributedVariable(variable) | Creates an AutoCastVariable that wraps another variable.
This typically just returns `AutoCastVariable(variable)`. But, if the variable
is a DistributedVariable or one of its subclasses, we instead dynamically
create a class that subclasses from both AutoCastVariable and
variable.__class__. This is so the returned variable will still pass
`isinstance(variable, variable.__class__)`, which is required for
DistributedVariables and its subclasses to work properly.
Args:
variable: A floating-point resource variable to wrap.
Returns:
An AutoCastVariable that wraps the variable. | github-repos |
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True):
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = ([0] * max_order)
possible_matches_by_order = ([0] * max_order)
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict(((ngram, min(count, translation_ngram_counts[ngram])) for (ngram, count) in ref_ngram_counts.items()))
for ngram in overlap:
matches_by_order[(len(ngram) - 1)] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[(len(ngram) - 1)] += translation_ngram_counts[ngram]
precisions = ([0] * max_order)
smooth = 1.0
for i in xrange(0, max_order):
if (possible_matches_by_order[i] > 0):
precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])
if (matches_by_order[i] > 0):
precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])
else:
smooth *= 2
precisions[i] = (1.0 / (smooth * possible_matches_by_order[i]))
else:
precisions[i] = 0.0
if (max(precisions) > 0):
p_log_sum = sum((math.log(p) for p in precisions if p))
geo_mean = math.exp((p_log_sum / max_order))
if use_bp:
ratio = (translation_length / reference_length)
bp = (math.exp((1 - (1.0 / ratio))) if (ratio < 1.0) else 1.0)
bleu = (geo_mean * bp)
return np.float32(bleu) | Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score. | codesearchnet |
def _ring_2d(m, n):
if (m == 1):
return [(0, i) for i in range(n)]
if (n == 1):
return [(i, 0) for i in range(m)]
if ((m % 2) != 0):
tf.logging.warning('Odd dimension')
return [((i % m), (i
ret = [(0, 0)]
for i in range((m
for j in range(1, n):
ret.append(((2 * i), j))
for j in range((n - 1), 0, (- 1)):
ret.append((((2 * i) + 1), j))
for i in range((m - 1), 0, (- 1)):
ret.append((i, 0))
return ret | Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs | codesearchnet |
def to_json_string(self, use_diff: bool=True) -> str:
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + '\n' | Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format. | github-repos |
def get_aligned_output_features_output_indices(out_features: Optional[list[str]], out_indices: Optional[Union[list[int], tuple[int]]], stage_names: list[str]) -> tuple[list[str], list[int]]:
out_indices = list(out_indices) if out_indices is not None else None
verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)
output_features, output_indices = _align_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)
verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names)
return (output_features, output_indices) | Get the `out_features` and `out_indices` so that they are aligned.
The logic is as follows:
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
`out_indices`.
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
`out_features`.
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
- `out_indices` and `out_features` set: they are verified to be aligned.
Args:
out_features (`List[str]`): The names of the features for the backbone to output.
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
stage_names (`List[str]`): The names of the stages of the backbone. | github-repos |
def get_output_embeddings(self) -> nn.Module:
return None | Returns the model's output embeddings.
Returns:
`nn.Module`: A torch module mapping hidden states to vocabulary. | github-repos |
def bulk_insert(self, rows, return_model=False):
if (self.conflict_target or self.conflict_action):
compiler = self._build_insert_compiler(rows)
objs = compiler.execute_sql(return_id=True)
if return_model:
return [self.model(**dict(r, **k)) for (r, k) in zip(rows, objs)]
else:
return [dict(r, **k) for (r, k) in zip(rows, objs)]
return super().bulk_create([self.model(**fields) for fields in rows]) | Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An array of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: False):
If model instances should be returned rather than
just dicts.
Returns:
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified | codesearchnet |
def get_header(message, name):
header = message.get(name)
log.debug('Getting header {!r}: {!r}'.format(name, header))
if header:
return decode_header_part(header)
return six.text_type() | Gets an email.message.Message and a header name and returns
the mail header decoded with the correct charset.
Args:
message (email.message.Message): email message object
name (string): header to get
Returns:
decoded header | codesearchnet |
def flip_variable(self, v):
try:
idx = self.variables.index(v)
except ValueError:
raise ValueError('variable {} is not a variable in constraint {}'.format(v, self.name))
if (self.vartype is dimod.BINARY):
original_func = self.func
def func(*args):
new_args = list(args)
new_args[idx] = (1 - new_args[idx])
return original_func(*new_args)
self.func = func
self.configurations = frozenset((((config[:idx] + ((1 - config[idx]),)) + config[(idx + 1):]) for config in self.configurations))
else:
original_func = self.func
def func(*args):
new_args = list(args)
new_args[idx] = (- new_args[idx])
return original_func(*new_args)
self.func = func
self.configurations = frozenset((((config[:idx] + ((- config[idx]),)) + config[(idx + 1):]) for config in self.configurations))
self.name = '{} ({} flipped)'.format(self.name, v) | Flip a variable in the constraint.
Args:
v (variable):
Variable in the constraint to take the complementary value of its
construction value.
Examples:
This example creates a constraint that :math:`a = b` on binary variables
and flips variable a.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_func(operator.eq,
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> const.check({'a': 0, 'b': 0})
True
>>> const.flip_variable('a')
>>> const.check({'a': 1, 'b': 0})
True
>>> const.check({'a': 0, 'b': 0})
False | codesearchnet |
def add(self, other_op):
self._op.logEntries.extend(other_op.logEntries)
self._merge_timestamps(other_op)
self._merge_metric_values(other_op) | Combines `other_op` with the operation held by this aggregator.
N.B. It merges the operations log entries and metric values, but makes
the assumption the operation is consistent. It's the callers
responsibility to ensure consistency
Args:
other_op (
class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`):
an operation merge into this one | codesearchnet |
def ParsePageVisitedRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs):
query_hash = hash(query)
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
hidden = self._GetRowValue(query_hash, row, 'hidden')
rev_host = self._GetRowValue(query_hash, row, 'rev_host')
typed = self._GetRowValue(query_hash, row, 'typed')
extras = []
if from_visit:
extras.append('visited from: {0:s}'.format(self._GetUrl(from_visit, cache, database)))
if (hidden == '1'):
extras.append('(url hidden)')
if (typed == '1'):
extras.append('(directly typed)')
else:
extras.append('(URL not typed directly)')
event_data = FirefoxPlacesPageVisitedEventData()
event_data.host = self._ReverseHostname(rev_host)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')
event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type')
if extras:
event_data.extra = extras
timestamp = self._GetRowValue(query_hash, row, 'visit_date')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a page visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database. | codesearchnet |
def AddEvent(self, event):
self._RaiseIfNotWritable()
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
if not isinstance(event_data_identifier, identifiers.SQLTableIdentifier):
raise IOError('Unsupported event data identifier type: {0:s}'.format(
type(event_data_identifier)))
event.event_data_row_identifier = event_data_identifier.row_identifier
self._AddSerializedEvent(event) | Adds an event.
Args:
event (EventObject): event.
Raises:
IOError: when the storage file is closed or read-only or
if the event data identifier type is not supported.
OSError: when the storage file is closed or read-only or
if the event data identifier type is not supported. | juraj-google-style |
def getGrid(self, use_mask=True):
grid_card_name = 'WATERSHED_MASK'
if (not use_mask):
grid_card_name = 'ELEVATION'
return self.getGridByCard(grid_card_name) | Returns GDALGrid object of GSSHA model bounds
Paramters:
use_mask(bool): If True, uses watershed mask. Otherwise, it uses the elevaiton grid.
Returns:
GDALGrid | codesearchnet |
def on_predict_batch_end(self, batch, logs=None):
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs) | Calls the `on_predict_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. | github-repos |
def _gauss(mean: int, sigma: int) -> int:
return int(random.gauss(mean, sigma)) | Creates a variation from a base value
Args:
mean: base value
sigma: gaussian sigma
Returns: random value | codesearchnet |
def get_identifier(identifier, module_globals, module_name):
if isinstance(identifier, six.string_types):
fn = module_globals.get(identifier)
if (fn is None):
raise ValueError('Unknown {}: {}'.format(module_name, identifier))
return fn
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret identifier') | Helper utility to retrieve the callable function associated with a string identifier.
Args:
identifier: The identifier. Could be a string or function.
module_globals: The global objects of the module.
module_name: The module name
Returns:
The callable associated with the identifier. | codesearchnet |
def set_intrusion_alert_through_smoke_detectors(self, activate: bool = True):
data = {"intrusionAlertThroughSmokeDetectors": activate}
return self._restCall(
"home/security/setIntrusionAlertThroughSmokeDetectors", json.dumps(data)
) | activate or deactivate if smoke detectors should "ring" during an alarm
Args:
activate(bool): True will let the smoke detectors "ring" during an alarm | juraj-google-style |
def add_arguments(self, parser, bootstrap=False):
[item.add_argument(parser, bootstrap) for item in self._get_items(bootstrap=False)] | Adds all items to the parser passed in.
Args:
parser (argparse.ArgumentParser): The parser to add all items to.
bootstrap (bool): Flag to indicate whether you only want to mark
bootstrapped items as required on the command-line. | codesearchnet |
def datacenters(self):
if (not self.__datacenters):
self.__datacenters = Datacenters(self.__connection)
return self.__datacenters | Gets the Datacenters API client.
Returns:
Datacenters: | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.