code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def convert_drive(self, shift, instruction):
command_dict = {
'name': instruction.command.name,
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name
}
return self._qobj_model(**command_dict)
|
Return converted `PulseInstruction`.
Args:
shift(int): Offset time.
instruction (PulseInstruction): drive instruction.
Returns:
dict: Dictionary of required parameters.
|
juraj-google-style
|
def ApplyParsersToResponses(parser_factory, responses, flow_obj):
knowledge_base = flow_obj.state.knowledge_base
parsed_responses = []
if parser_factory.HasSingleResponseParsers():
for response in responses:
for parser in parser_factory.SingleResponseParsers():
parsed_responses.extend(parser.ParseResponse(knowledge_base, response, flow_obj.args.path_type))
for parser in parser_factory.MultiResponseParsers():
parsed_responses.extend(parser.ParseResponses(knowledge_base, responses))
has_single_file_parsers = parser_factory.HasSingleFileParsers()
has_multi_file_parsers = parser_factory.HasMultiFileParsers()
if (has_single_file_parsers or has_multi_file_parsers):
precondition.AssertIterableType(responses, rdf_client_fs.StatEntry)
pathspecs = [response.pathspec for response in responses]
if data_store.RelationalDBEnabled():
filedescs = []
for pathspec in pathspecs:
client_path = db.ClientPath.FromPathSpec(flow_obj.client_id, pathspec)
filedescs.append(file_store.OpenFile(client_path))
else:
filedescs = MultiOpenAff4File(flow_obj, pathspecs)
if has_single_file_parsers:
for (response, filedesc) in zip(responses, filedescs):
for parser in parser_factory.SingleFileParsers():
parsed_responses.extend(parser.ParseFile(knowledge_base, response.pathspec, filedesc))
if has_multi_file_parsers:
for parser in parser_factory.MultiFileParsers():
parsed_responses.extend(parser.ParseFiles(knowledge_base, pathspecs, filedescs))
return (parsed_responses or responses)
|
Parse responses with applicable parsers.
Args:
parser_factory: A parser factory for specific artifact.
responses: A list of responses from the client.
flow_obj: An artifact collection flow.
Returns:
A list of (possibly parsed) responses.
|
codesearchnet
|
def get_route_lines_route(self, **kwargs):
select_date = ('%02d/%02d/%d' % (kwargs.get('day', '01'), kwargs.get('month', '01'), kwargs.get('year', '1970')))
params = {'SelectDate': select_date, 'Lines': util.ints_to_string(kwargs.get('lines', []))}
result = self.make_request('geo', 'get_route_lines_route', **params)
if (not util.check_result(result)):
return (False, result.get('resultDescription', 'UNKNOWN ERROR'))
values = util.response_list(result, 'resultValues')
return (True, [emtype.RouteLinesItem(**a) for a in values])
|
Obtain itinerary for one or more lines in the given date.
Args:
day (int): Day of the month in format DD.
The number is automatically padded if it only has one digit.
month (int): Month number in format MM.
The number is automatically padded if it only has one digit.
year (int): Year number in format YYYY.
lines (list[int] | int): Lines to query, may be empty to get
all the lines.
Returns:
Status boolean and parsed response (list[RouteLinesItem]), or message
string in case of error.
|
codesearchnet
|
def generate(self):
result = self._gen(self.optimized, self.splitstring)
if self.splitstring and result is not None:
result = result[1:]
return result
|
Generates a new random string from the start symbol
Args:
None
Returns:
str: The generated string
|
juraj-google-style
|
def match(self, url):
try:
urlSchemes = self._urlSchemes.itervalues()
except AttributeError:
urlSchemes = self._urlSchemes.values()
for urlScheme in urlSchemes:
if urlScheme.match(url):
return True
return False
|
Try to find if url matches against any of the schemes within this
endpoint.
Args:
url: The url to match against each scheme
Returns:
True if a matching scheme was found for the url, False otherwise
|
codesearchnet
|
def parse_vep_header(vcf_obj):
vep_header = []
if 'CSQ' in vcf_obj:
csq_info = vcf_obj['CSQ']
format_info = parse_header_format(csq_info['Description'])
vep_header = [key.upper() for key in format_info.split('|')]
return vep_header
|
Return a list with the VEP header
The vep header is collected from CSQ in the vcf file
All keys are capitalized
Args:
vcf_obj(cyvcf2.VCF)
Returns:
vep_header(list)
|
juraj-google-style
|
def plot_grid(step):
rad = get_rprof(step, 'r')[0]
drad = get_rprof(step, 'dr')[0]
(_, unit) = step.sdat.scale(1, 'm')
if unit:
unit = ' ({})'.format(unit)
(fig, (ax1, ax2)) = plt.subplots(2, sharex=True)
ax1.plot(rad, '-ko')
ax1.set_ylabel(('$r$' + unit))
ax2.plot(drad, '-ko')
ax2.set_ylabel(('$dr$' + unit))
ax2.set_xlim([(- 0.5), (len(rad) - 0.5)])
ax2.set_xlabel('Cell number')
misc.saveplot(fig, 'grid', step.istep)
|
Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
|
codesearchnet
|
def to_polars(evset: EventSet, tp_string_to_pl_string: bool=True, timestamp_to_datetime: bool=True, timestamps: bool=True) -> 'pl.DataFrame':
pl = import_pl()
timestamp_key = 'timestamp'
index_names = evset.schema.index_names()
feature_names = evset.schema.feature_names()
column_names = index_names + feature_names
if timestamps:
column_names += [timestamp_key]
data_dict = {column_name: [] for column_name in column_names}
for index, data in evset.data.items():
assert isinstance(index, tuple)
if timestamps:
timestamps_data = data.timestamps
if evset.schema.is_unix_timestamp and timestamp_to_datetime:
datetime_series = pl.from_epoch(pl.Series(timestamps_data), time_unit='s')
data_dict[timestamp_key].extend(datetime_series)
else:
data_dict[timestamp_key].extend(timestamps_data)
for feature_name, feature in zip(feature_names, data.features):
data_dict[feature_name].extend(feature)
num_timestamps = len(data.timestamps)
for index_name, index_item in zip(index_names, index):
data_dict[index_name].extend([index_item] * num_timestamps)
for col_name, col_data in data_dict.items():
data_dict[col_name] = pl.Series(col_data)
if tp_string_to_pl_string:
for feature in evset.schema.features:
if feature.dtype == DType.STRING:
data_dict[feature.name] = data_dict[feature.name].cast(pl.Utf8)
for index in evset.schema.indexes:
if index.dtype == DType.STRING:
data_dict[index.name] = data_dict[index.name].cast(pl.Utf8)
return pl.DataFrame(data_dict)
|
Converts an [`EventSet`][temporian.EventSet] to a Polars DataFrame.
Usage example:
```python
>>> from datetime import datetime
>>> evset = tp.event_set(
... timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)],
... features={
... "feature_1": [0.5, 0.6],
... "my_index": ["red", "yellow"],
... },
... indexes=["my_index"],
... )
>>> df = tp.to_polars(evset)
```
Args:
evset: Input EventSet.
timestamp_to_datetime: If true, convert epoch timestamps to Polars Date objects.
timestamps: If true, include the timestamps as a column in the DataFrame.
tp_string_to_pl_string: If true, cast Temporian strings to Polars Object.
Returns:
A Polars DataFrame created from the EventSet.
|
github-repos
|
def adversary(self, name, **kwargs):
group_obj = Adversary(name, **kwargs)
return self._group(group_obj)
|
Add Adversary data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Adversary.
|
juraj-google-style
|
def get_data_location(self, catalog_id):
try:
record = self.get(catalog_id)
except:
return None
if (('Landsat8' in record['type']) and ('LandsatAcquisition' in record['type'])):
bucket = record['properties']['bucketName']
prefix = record['properties']['bucketPrefix']
return ((('s3:
if ('DigitalGlobeAcquisition' in record['type']):
o = Ordering()
res = o.location([catalog_id])
return res['acquisitions'][0]['location']
return None
|
Find and return the S3 data location given a catalog_id.
Args:
catalog_id: The catalog ID
Returns:
A string containing the s3 location of the data associated with a catalog ID. Returns
None if the catalog ID is not found, or if there is no data yet associated with it.
|
codesearchnet
|
def add_update_resources(self, resources, ignore_datasetid=False):
if not isinstance(resources, list):
raise HDXError('Resources should be a list!')
for resource in resources:
self.add_update_resource(resource, ignore_datasetid)
|
Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
|
juraj-google-style
|
def one_or_more(e, delimiter=None):
if delimiter is None:
delimiter = lambda s, grm, pos: (s, Ignore, (pos, pos))
msg = 'Expected one or more of: {}'.format(repr(e))
def match_one_or_more(s, grm=None, pos=0):
start = pos
s, obj, span = e(s, grm, pos)
pos = span[1]
data = [] if obj is Ignore else [obj]
try:
while True:
s, obj, span = delimiter(s, grm, pos)
pos = span[1]
if obj is not Ignore:
data.append(obj)
s, obj, span = e(s, grm, pos)
pos = span[1]
if obj is not Ignore:
data.append(obj)
except PegreError:
pass
return PegreResult(s, data, (start, pos))
return match_one_or_more
|
Create a PEG function to match one or more expressions.
Args:
e: the expression to match
delimiter: an optional expression to match between the
primary *e* matches.
|
juraj-google-style
|
def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
if input_shapes:
for idx, (shape_signature, final_shape) in enumerate(input_shapes):
self.assertTrue((input_details[idx]['shape_signature'] == shape_signature).all())
index = input_details[idx]['index']
interpreter.resize_tensor_input(index, final_shape, strict=True)
interpreter.allocate_tensors()
output_details = interpreter.get_output_details()
input_details = interpreter.get_input_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return [interpreter.get_tensor(details['index']) for details in output_details]
|
Evaluates the model on the `input_data`.
Args:
tflite_model: TensorFlow Lite model.
input_data: List of EagerTensor const ops containing the input data for
each input tensor.
input_shapes: List of tuples representing the `shape_signature` and the
new shape of each input tensor that has unknown dimensions.
Returns:
[np.ndarray]
|
github-repos
|
def add_imported_namespace(self, namespace, imported_alias=False, imported_data_type=False, imported_annotation=False, imported_annotation_type=False):
assert (self.name != namespace.name), 'Namespace cannot import itself.'
reason = self._imported_namespaces.setdefault(namespace, _ImportReason())
if imported_alias:
reason.alias = True
if imported_data_type:
reason.data_type = True
if imported_annotation:
reason.annotation = True
if imported_annotation_type:
reason.annotation_type = True
|
Keeps track of namespaces that this namespace imports.
Args:
namespace (Namespace): The imported namespace.
imported_alias (bool): Set if this namespace references an alias
in the imported namespace.
imported_data_type (bool): Set if this namespace references a
data type in the imported namespace.
imported_annotation (bool): Set if this namespace references a
annotation in the imported namespace.
imported_annotation_type (bool): Set if this namespace references an
annotation in the imported namespace, possibly indirectly (by
referencing an annotation elsewhere that has this type).
|
codesearchnet
|
def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):
if unique_matches:
pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)
else:
pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)
return pairings
|
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns:
|
codesearchnet
|
def run_shell_cmd(args):
proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return proc.communicate()
|
Executes shell commands and returns output.
Args:
args: String of shell commands to run.
Returns:
Tuple output (stdoutdata, stderrdata) from running the shell commands.
|
github-repos
|
def merge(self, status: 'Status[Input, Output]') -> 'Status[Input, Output]':
if ((status is None) or (status.farthest is None)):
pass
elif (self.farthest is None):
self.farthest = status.farthest
self.expected = status.expected
elif (status.farthest.position < self.farthest.position):
pass
elif (status.farthest.position > self.farthest.position):
self.farthest = status.farthest
self.expected = status.expected
else:
self.expected = (status.expected + self.expected)
return self
|
Merge the failure message from another status into this one.
Whichever status represents parsing that has gone the farthest is
retained. If both statuses have gone the same distance, then the
expected values from both are retained.
Args:
status: The status to merge into this one.
Returns:
This ``Status`` which may have ``farthest`` and ``expected``
updated accordingly.
|
codesearchnet
|
def _get_mpr_table(self, connection, partition):
virtual_table = partition.vid
table = '{}_v'.format(virtual_table)
logger.debug('Looking for materialized table of the partition.\n partition: {}'.format(partition.name))
table_exists = self._relation_exists(connection, table)
if table_exists:
logger.debug('Materialized table of the partition found.\n partition: {}, table: {}'.format(partition.name, table))
return table
logger.debug('Looking for a virtual table of the partition.\n partition: {}'.format(partition.name))
virtual_exists = self._relation_exists(connection, virtual_table)
if virtual_exists:
logger.debug('Virtual table of the partition found.\n partition: {}, table: {}'.format(partition.name, table))
return virtual_table
raise MissingTableError('sqlite database does not have table for mpr of {} partition.'.format(partition.vid))
|
Returns name of the sqlite table who stores mpr data.
Args:
connection (apsw.Connection): connection to sqlite database who stores mpr data.
partition (orm.Partition):
Returns:
str:
Raises:
MissingTableError: if partition table not found in the db.
|
codesearchnet
|
def MakeType(name, base_classes, namespace):
precondition.AssertType(name, str)
if PY2:
name = name.encode('ascii')
return type(name, base_classes, namespace)
|
A compatibility wrapper for the `type` built-in function.
In Python 2 `type` (used as a type constructor) requires the name argument to
be a `bytes` object whereas in Python 3 it is required to be an `unicode`
object. Since class name is human readable text rather than arbitrary stream
of bytes, the Python 3 behaviour is considered to be the sane one.
Once support for Python 2 is dropped all invocations of this call can be
replaced with the `type` built-in.
Args:
name: A name of the type to create.
base_classes: A tuple of base classes that the returned type is supposed to
derive from.
namespace: A dictionary of methods and fields that the returned type is
supposed to contain.
Returns:
A new type with specified parameters.
|
codesearchnet
|
def SetParseFn(fn, *arguments):
def _Decorator(func):
parse_fns = GetParseFns(func)
if not arguments:
parse_fns['default'] = fn
else:
for argument in arguments:
parse_fns['named'][argument] = fn
_SetMetadata(func, FIRE_PARSE_FNS, parse_fns)
return func
return _Decorator
|
Sets the fn for Fire to use to parse args when calling the decorated fn.
Args:
fn: The function to be used for parsing arguments.
*arguments: The arguments for which to use the parse fn. If none are listed,
then this will set the default parse function.
Returns:
The decorated function, which now has metadata telling Fire how to perform.
|
github-repos
|
def __init__(self, org=None, library=None, branch=None, version_guid=None, **kwargs):
if 'offering' in kwargs:
raise ValueError("'offering' is not a valid field for a LibraryLocator.")
if 'course' in kwargs:
if library is not None:
raise ValueError("Cannot specify both 'library' and 'course'")
warnings.warn(
"For LibraryLocators, use 'library' instead of 'course'.",
DeprecationWarning,
stacklevel=2
)
library = kwargs.pop('course')
run = kwargs.pop('run', self.RUN)
if run != self.RUN:
raise ValueError("Invalid run. Should be '{}' or None.".format(self.RUN))
if version_guid:
version_guid = self.as_object_id(version_guid)
for name, value in [['org', org], ['library', library], ['branch', branch]]:
if not (value is None or self.ALLOWED_ID_RE.match(value)):
raise InvalidKeyError(self.__class__,
u"Special characters not allowed in field {}: '{}'".format(name, value))
if kwargs.get('deprecated', False):
raise InvalidKeyError(self.__class__, 'LibraryLocator cannot have deprecated=True')
super(LibraryLocator, self).__init__(
org=org,
library=library,
branch=branch,
version_guid=version_guid,
**kwargs
)
if self.version_guid is None and (self.org is None or self.library is None):
raise InvalidKeyError(self.__class__, "Either version_guid or org and library should be set")
|
Construct a LibraryLocator
Args:
version_guid (string or ObjectId): optional unique id for the version
org, library: the standard definition. Optional only if version_guid given.
branch (string): the optional branch such as 'draft', 'published', 'staged', 'beta'
|
juraj-google-style
|
def flatten(vari):
if isinstance(vari, Poly):
shape = int(numpy.prod(vari.shape))
return reshape(vari, (shape,))
return numpy.array(vari).flatten()
|
Flatten a shapeable quantity.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Shapeable input quantity.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Same type as ``vari`` with `len(Q.shape)==1`.
Examples:
>>> P = chaospy.reshape(chaospy.prange(4), (2,2))
>>> print(P)
[[1, q0], [q0^2, q0^3]]
>>> print(chaospy.flatten(P))
[1, q0, q0^2, q0^3]
|
codesearchnet
|
def play_match(black_model, white_model, games, sgf_dir):
with utils.logged_timer('Loading weights'):
black_net = dual_net.DualNetwork(black_model)
white_net = dual_net.DualNetwork(white_model)
readouts = FLAGS.num_readouts
black = MCTSPlayer(black_net, two_player_mode=True)
white = MCTSPlayer(white_net, two_player_mode=True)
black_name = os.path.basename(black_net.save_file)
white_name = os.path.basename(white_net.save_file)
for i in range(games):
num_move = 0
for player in [black, white]:
player.initialize_game()
first_node = player.root.select_leaf()
(prob, val) = player.network.run(first_node.position)
first_node.incorporate_results(prob, val, first_node)
while True:
start = time.time()
active = (white if (num_move % 2) else black)
inactive = (black if (num_move % 2) else white)
current_readouts = active.root.N
while (active.root.N < (current_readouts + readouts)):
active.tree_search()
if (FLAGS.verbose >= 3):
print(active.root.position)
if active.should_resign():
active.set_result(((- 1) * active.root.position.to_play), was_resign=True)
inactive.set_result(active.root.position.to_play, was_resign=True)
if active.is_done():
fname = '{:d}-{:s}-vs-{:s}-{:d}.sgf'.format(int(time.time()), white_name, black_name, i)
active.set_result(active.root.position.result(), was_resign=False)
with gfile.GFile(os.path.join(sgf_dir, fname), 'w') as _file:
sgfstr = sgf_wrapper.make_sgf(active.position.recent, active.result_string, black_name=black_name, white_name=white_name)
_file.write(sgfstr)
print('Finished game', i, active.result_string)
break
move = active.pick_move()
active.play_move(move)
inactive.play_move(move)
dur = (time.time() - start)
num_move += 1
if ((FLAGS.verbose > 1) or ((FLAGS.verbose == 1) and ((num_move % 10) == 9))):
timeper = ((dur / readouts) * 100.0)
print(active.root.position)
print(('%d: %d readouts, %.3f s/100. (%.2f sec)' % (num_move, readouts, timeper, dur)))
|
Plays matches between two neural nets.
Args:
black_model: Path to the model for black player
white_model: Path to the model for white player
|
codesearchnet
|
def guess_leb_size(path):
f = open(path, 'rb')
f.seek(0, 2)
file_size = (f.tell() + 1)
f.seek(0)
block_size = None
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBIFS_NODE_MAGIC, buf):
start = m.start()
chdr = nodes.common_hdr(buf[start:(start + UBIFS_COMMON_HDR_SZ)])
if (chdr and (chdr.node_type == UBIFS_SB_NODE)):
sb_start = (start + UBIFS_COMMON_HDR_SZ)
sb_end = (sb_start + UBIFS_SB_NODE_SZ)
if (chdr.len != len(buf[sb_start:sb_end])):
f.seek(sb_start)
buf = f.read(UBIFS_SB_NODE_SZ)
else:
buf = buf[sb_start:sb_end]
sbn = nodes.sb_node(buf)
block_size = sbn.leb_size
f.close()
return block_size
f.close()
return block_size
|
Get LEB size from superblock
Arguments:
Str:path -- Path to file.
Returns:
Int -- LEB size.
Searches file for superblock and retrieves leb size.
|
codesearchnet
|
def on_deleted(self, event):
if (not self._event_error):
self.logger.info(u'Change detected from deletion of: %s', event.src_path)
self.compile_dependencies(event.src_path, include_self=False)
|
Called when a file or directory is deleted.
Todo:
May be bugged with inspector and sass compiler since the does not
exists anymore.
Args:
event: Watchdog event, ``watchdog.events.DirDeletedEvent`` or
``watchdog.events.FileDeletedEvent``.
|
codesearchnet
|
def _check_approval_wrapper(self, grr_object, grr_function, *args, **kwargs):
approval_sent = False
while True:
try:
return grr_function(*args, **kwargs)
except grr_errors.AccessForbiddenError as exception:
print('No valid approval found: {0!s}'.format(exception))
if approval_sent:
print('Approval not yet granted, waiting {0:d}s'.format(
self._CHECK_APPROVAL_INTERVAL_SEC))
time.sleep(self._CHECK_APPROVAL_INTERVAL_SEC)
continue
if not self.approvers:
message = ('GRR needs approval but no approvers specified '
'(hint: use --approvers)')
self.state.add_error(message, critical=True)
return None
grr_object.CreateApproval(
reason=self.reason, notified_users=self.approvers)
approval_sent = True
print('{0!s}: approval request sent to: {1!s} (reason: {2:s})'.format(
grr_object, self.approvers, self.reason))
|
Wraps a call to GRR functions checking for approval.
Args:
grr_object: the GRR object to create the eventual approval on.
grr_function: The GRR function requiring approval.
*args: Positional arguments that are to be passed to `grr_function`.
**kwargs: Keyword arguments that are to be passed to `grr_function`.
Returns:
The return value of the execution of grr_function(*args, **kwargs).
|
juraj-google-style
|
def load(self, languages=[]):
duckling_load = self.clojure.var('duckling.core', 'load!')
clojure_hashmap = self.clojure.var('clojure.core', 'hash-map')
clojure_list = self.clojure.var('clojure.core', 'list')
if languages:
iso_languages = [Language.convert_to_iso(lang) for lang in languages]
duckling_load.invoke(clojure_hashmap.invoke(self.clojure.read(':languages'), clojure_list.invoke(*iso_languages)))
else:
duckling_load.invoke()
self._is_loaded = True
|
Loads the Duckling corpus.
Languages can be specified, defaults to all.
Args:
languages: Optional parameter to specify languages,
e.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. ["en", "fr"])
|
codesearchnet
|
def _delete_from_hdx(self, object_type, id_field_name):
if id_field_name not in self.data:
raise HDXError('No %s field (mandatory) in %s!' % (id_field_name, object_type))
self._save_to_hdx('delete', id_field_name)
|
Helper method to deletes a resource from HDX
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
Returns:
None
|
juraj-google-style
|
def decorate(self, record):
color = 'gray'
if (record.levelno == logging.WARNING):
color = 'yellow'
if (record.levelno == logging.INFO):
color = 'green'
if (record.levelno == logging.DEBUG):
color = 'gray'
if (record.levelno >= logging.ERROR):
color = 'red'
notify = False
if (record.levelno >= logging.ERROR):
nofiy = True
payload = {'color': color, 'notify': notify, 'message_format': 'text'}
return payload
|
Build up HipChat specific values for log record
Args:
record (:obj:`logging.record`): log message object
Returns:
dict: params for POST request
|
codesearchnet
|
def get_compatible_generator_action(self, filename):
for action in self.__generator_actions:
if action.act_on_file(filename):
return action
return None
|
Return the **first** compatible :class:`GeneratorAction` for a given filename or ``None`` if none is found.
Args:
filename (str): The filename of the template to process.
|
juraj-google-style
|
def set_column_sizes(self, values):
self.style['grid-template-columns'] = ' '.join(map(lambda value: (str(value) if str(value).endswith('%') else str(value) + '%') , values))
|
Sets the size value for each column
Args:
values (iterable of int or str): values are treated as percentage.
|
juraj-google-style
|
def __init__(self, scopes, service_account_id=None, token_maker=None,
retry_params=None):
if isinstance(scopes, basestring):
scopes = [scopes]
self.scopes = scopes
self.service_account_id = service_account_id
self.make_token_async = token_maker or _config.TOKEN_MAKER
if not retry_params:
retry_params = api_utils._get_default_retry_params()
self.retry_params = retry_params
self.user_agent = {'User-Agent': retry_params._user_agent}
self.expiration_headroom = random.randint(60, 240)
|
Constructor.
Args:
scopes: A scope or a list of scopes.
service_account_id: Internal use only.
token_maker: An asynchronous function of the form
(scopes, service_account_id) -> (token, expires).
retry_params: An instance of api_utils.RetryParams. If None, the
default for current thread will be used.
|
juraj-google-style
|
def get_key(key, data_structure):
if key == '/':
return data_structure
path = key.split('/')
path[0] or path.pop(0)
current_value = data_structure
while path:
current_key = path.pop(0)
try:
current_key = int(current_key)
except ValueError:
pass
try:
current_value = current_value[current_key]
except (KeyError, IndexError):
LOGGER.debug('failed to extract path {}'.format(key))
return None
return current_value
|
Helper method for extracting values from a nested data structure.
Args:
key (str): The path to the vales (a series of keys and indexes
separated by '/')
data_structure (dict or list): The data structure from which the
value will be extracted.
Returns:
str: The values associated with key
|
juraj-google-style
|
def from_join(cls, join: Join) -> 'ConditionalJoin':
return cls(
join.table_name,
join.parent_alias,
join.table_alias,
join.join_type,
join.join_field,
join.nullable
)
|
Creates a new :see:ConditionalJoin from the
specified :see:Join object.
Arguments:
join:
The :see:Join object to create the
:see:ConditionalJoin object from.
Returns:
A :see:ConditionalJoin object created from
the :see:Join object.
|
juraj-google-style
|
def set_all_pattern_variables(self, patternnumber, sp0, ti0, sp1, ti1, sp2, ti2, sp3, ti3, sp4, ti4, sp5, ti5, sp6, ti6, sp7, ti7, actual_step, additional_cycles, link_pattern):
_checkPatternNumber(patternnumber)
self.set_pattern_step_setpoint(patternnumber, 0, sp0)
self.set_pattern_step_setpoint(patternnumber, 1, sp1)
self.set_pattern_step_setpoint(patternnumber, 2, sp2)
self.set_pattern_step_setpoint(patternnumber, 3, sp3)
self.set_pattern_step_setpoint(patternnumber, 4, sp4)
self.set_pattern_step_setpoint(patternnumber, 5, sp5)
self.set_pattern_step_setpoint(patternnumber, 6, sp6)
self.set_pattern_step_setpoint(patternnumber, 7, sp7)
self.set_pattern_step_time(patternnumber, 0, ti0)
self.set_pattern_step_time(patternnumber, 1, ti1)
self.set_pattern_step_time(patternnumber, 2, ti2)
self.set_pattern_step_time(patternnumber, 3, ti3)
self.set_pattern_step_time(patternnumber, 4, ti4)
self.set_pattern_step_time(patternnumber, 5, ti5)
self.set_pattern_step_time(patternnumber, 6, ti6)
self.set_pattern_step_time(patternnumber, 7, ti7)
self.set_pattern_additional_cycles(patternnumber, additional_cycles)
self.set_pattern_link_topattern(patternnumber, link_pattern)
self.set_pattern_actual_step(patternnumber, actual_step)
|
Set all variables for a given pattern at one time.
Args:
* patternnumber (integer): 0-7
* sp[*n*] (float): setpoint value for step *n*
* ti[*n*] (integer??): step time for step *n*, 0-900
* actual_step (int): ?
* additional_cycles(int): ?
* link_pattern(int): ?
|
codesearchnet
|
def inner_text(node):
from lxml import etree
parts = [node.text]
for child in node.getchildren():
parts.append(etree.tostring(child, encoding='utf-8', method='text'))
parts.append(child.tail)
return ''.join(map(decode_bytes, filter(None, parts)))
|
Returns the inner text of a given XML node, excluding tags.
Args:
node: (lxml.etree.Element): The node whose inner text is desired.
Returns:
str: The inner text of the node.
|
codesearchnet
|
def fts_contrast2(self, fs, ft_name, inv):
inv_fts = [self.fts(x) for x in inv if (set(fs) <= self.fts(x))]
for a in inv_fts:
for b in inv_fts:
if (a != b):
diff = (a ^ b)
if (len(diff) == 2):
if all([(nm == ft_name) for (_, nm) in diff]):
return True
return False
|
Return `True` if there is a segment in `inv` that contrasts in feature
`ft_name`.
Args:
fs (list): feature specifications used to filter `inv`.
ft_name (str): name of the feature where contrast must be present.
inv (list): collection of segments represented as Unicode segments.
Returns:
bool: `True` if two segments in `inv` are identical in features except
for feature `ft_name`
|
codesearchnet
|
def on_test_begin(self, logs=None):
|
Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
|
github-repos
|
def set_sig_figs(n=4):
u.default_format = (('.' + str(n)) + 'g')
pd.options.display.float_format = (('{:,.' + str(n)) + '}').format
|
Set the number of significant figures used to print Pint, Pandas, and
NumPy quantities.
Args:
n (int): Number of significant figures to display.
|
codesearchnet
|
def derive_annotations(self, annotations):
cls = type(self)
return cls(self[0], self[1], self[2], self[3], annotations, self[5])
|
Derives a new event from this one setting the ``annotations`` attribute.
Args:
annotations: (Sequence[Union[amazon.ion.symbols.SymbolToken, unicode]]):
The annotations associated with the derived event.
Returns:
IonEvent: The newly generated event.
|
codesearchnet
|
def _ProduceContent(self, mods, showprivate=False, showinh=False):
result = ''
nestedresult = ''
for mod in mods:
try:
all = mod[1].__all__
except AttributeError:
raise RuntimeError(('Module (%s) MUST have `__all__` defined.' % mod[1].__name__))
if ((not showprivate) and (mod[0][0:1] == '_')):
continue
if (mod[0][0:2] == '__'):
continue
result += self._ProduceSingleContent(mod, showprivate, showinh)
return result
|
An internal helper to create pages for several modules that do not have nested modules.
This will automatically generate the needed RSF to document each module module
and save the module to its own page appropriately.
Args:
mods (module): The modules to document that do not contain nested modules
showprivate (bool): A flag for whether or not to display private members
Returns:
str: The file names ready to be appended to a toctree
|
codesearchnet
|
def _freeze_keras_model(self, output_dir):
try:
self._keras_model.save(output_dir, save_format='tf')
except Exception:
return None
tag_set = set([_tag_constants.SERVING])
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
graph_def, input_tensors, output_tensors, sess_graph = _freeze_saved_model(output_dir, None, None, None, tag_set, signature_key)
self.saved_model_dir = output_dir
self._saved_model_tags = tag_set
self._saved_model_exported_names = [signature_key]
self._parse_saved_model_args()
if self.saved_model_dir:
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess_graph)
|
Save Keras model to Saved Model format.
Args:
output_dir: The output directory to save the SavedModel.
|
github-repos
|
def build(self, client, nobuild=False, usecache=True, pull=False):
if (not nobuild):
self.update_source_images(client, usecache=usecache, pull=pull)
width = utils.get_console_width()
cprint(('\n' + ('=' * width)), color='white', attrs=['bold'])
line = ('STARTING BUILD for "%s" (image definition "%s" from %s)\n' % (self.targetname, self.imagename, self.steps[(- 1)].sourcefile))
cprint(_centered(line, width), color='blue', attrs=['bold'])
for (istep, step) in enumerate(self.steps):
print(colored('* Step', 'blue'), colored(('%d/%d' % ((istep + 1), len(self.steps))), 'blue', attrs=['bold']), colored('for image', color='blue'), colored(self.imagename, color='blue', attrs=['bold']))
if (not nobuild):
if step.bust_cache:
stackkey = self._get_stack_key(istep)
if (stackkey in _rebuilt):
step.bust_cache = False
step.build(client, usecache=usecache)
print(colored('* Created intermediate image', 'green'), colored(step.buildname, 'green', attrs=['bold']), end='\n\n')
if step.bust_cache:
_rebuilt.add(stackkey)
finalimage = step.buildname
if (not nobuild):
self.finalizenames(client, finalimage)
line = ('FINISHED BUILDING "%s" (image definition "%s" from %s)' % (self.targetname, self.imagename, self.steps[(- 1)].sourcefile))
cprint(_centered(line, width), color='green', attrs=['bold'])
cprint(('=' * width), color='white', attrs=['bold'], end='\n\n')
|
Drives the build of the final image - get the list of steps and execute them.
Args:
client (docker.Client): docker client object that will build the image
nobuild (bool): just create dockerfiles, don't actually build the image
usecache (bool): use docker cache, or rebuild everything from scratch?
pull (bool): try to pull new versions of repository images?
|
codesearchnet
|
def stop(self, wait=True):
assert (not self._stopped), 'Already stopped'
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
|
Stop the Bokeh Server.
This stops and removes all Bokeh Server ``IOLoop`` callbacks, as well
as stops the ``HTTPServer`` that this instance was configured with.
Args:
fast (bool):
Whether to wait for orderly cleanup (default: True)
Returns:
None
|
codesearchnet
|
def _CompositeMapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
elements_data_size = None
elements_terminator = None
number_of_elements = None
if self._HasElementsDataSize():
elements_data_size = self._EvaluateElementsDataSize(context)
element_byte_size = self._element_data_type_definition.GetByteSize()
if element_byte_size is not None:
number_of_elements, _ = divmod(elements_data_size, element_byte_size)
else:
elements_terminator = (
self._element_data_type_definition.elements_terminator)
elif self._HasElementsTerminator():
elements_terminator = self._data_type_definition.elements_terminator
elif self._HasNumberOfElements():
number_of_elements = self._EvaluateNumberOfElements(context)
if elements_terminator is None and number_of_elements is None:
raise errors.MappingError(
'Unable to determine element terminator or number of elements')
context_state = getattr(context, 'state', {})
elements_data_offset = context_state.get('elements_data_offset', 0)
element_index = context_state.get('element_index', 0)
element_value = None
mapped_values = context_state.get('mapped_values', [])
size_hints = context_state.get('size_hints', {})
subcontext = context_state.get('context', None)
if not subcontext:
subcontext = DataTypeMapContext()
try:
while byte_stream[byte_offset:]:
if (number_of_elements is not None and
element_index == number_of_elements):
break
if (elements_data_size is not None and
elements_data_offset >= elements_data_size):
break
element_value = self._element_data_type_map.MapByteStream(
byte_stream, byte_offset=byte_offset, context=subcontext)
byte_offset += subcontext.byte_size
elements_data_offset += subcontext.byte_size
element_index += 1
mapped_values.append(element_value)
if (elements_terminator is not None and
element_value == elements_terminator):
break
except errors.ByteStreamTooSmallError as exception:
context_state['context'] = subcontext
context_state['elements_data_offset'] = elements_data_offset
context_state['element_index'] = element_index
context_state['mapped_values'] = mapped_values
raise errors.ByteStreamTooSmallError(exception)
except Exception as exception:
raise errors.MappingError(exception)
if number_of_elements is not None and element_index != number_of_elements:
context_state['context'] = subcontext
context_state['elements_data_offset'] = elements_data_offset
context_state['element_index'] = element_index
context_state['mapped_values'] = mapped_values
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: missing element: {2:d}').format(
self._data_type_definition.name, byte_offset, element_index - 1)
raise errors.ByteStreamTooSmallError(error_string)
if (elements_terminator is not None and
element_value != elements_terminator and (
elements_data_size is None or
elements_data_offset < elements_data_size)):
byte_stream_size = len(byte_stream)
size_hints[self._data_type_definition.name] = DataTypeMapSizeHint(
byte_stream_size - byte_offset)
context_state['context'] = subcontext
context_state['elements_data_offset'] = elements_data_offset
context_state['element_index'] = element_index
context_state['mapped_values'] = mapped_values
context_state['size_hints'] = size_hints
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: unable to find elements terminator').format(
self._data_type_definition.name, byte_offset)
raise errors.ByteStreamTooSmallError(error_string)
if context:
context.byte_size = elements_data_offset
context.state = {}
return tuple(mapped_values)
|
Maps a sequence of composite data types on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
tuple[object, ...]: mapped values.
Raises:
ByteStreamTooSmallError: if the byte stream is too small.
MappingError: if the data type definition cannot be mapped on
the byte stream.
|
juraj-google-style
|
def with_rank_at_least(x, rank):
return type(x)(tf.TensorShape(x).with_rank_at_least(rank))
|
Returns a shape based on `x` with at least the given `rank`.
For more details, see `help(tf.TensorShape.with_rank_at_least)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
rank: An `int` representing the minimum rank of `x` or else an assertion is
raised.
Returns:
shape: a shape having `type(x)` but guaranteed to have at least the given
rank (or else an assertion was raised).
Raises:
ValueError: If `x` does not represent a shape with at least the given
`rank`.
|
codesearchnet
|
def _GetISO8601String(self, structure):
time_zone_offset = structure.time_zone_offset
try:
time_zone_offset_hours = int(time_zone_offset[1:3], 10)
time_zone_offset_minutes = int(time_zone_offset[3:5], 10)
except (IndexError, TypeError, ValueError) as exception:
raise ValueError(
'unable to parse time zone offset with error: {0!s}.'.format(
exception))
try:
iso8601 = (
'{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.{6:03d}'
'{7:s}{8:02d}:{9:02d}').format(
structure.year, structure.month, structure.day,
structure.hours, structure.minutes, structure.seconds,
structure.microseconds, time_zone_offset[0],
time_zone_offset_hours, time_zone_offset_minutes)
except ValueError as exception:
raise ValueError(
'unable to format date time string with error: {0!s}.'.format(
exception))
return iso8601
|
Retrieves an ISO 8601 date time string from the structure.
The date and time values in Google Drive Sync log files are formatted as:
"2018-01-24 18:25:08,454 -0800".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string.
|
juraj-google-style
|
def __init__(self, mackup, files, dry_run, verbose):
assert isinstance(mackup, Mackup)
assert isinstance(files, set)
self.mackup = mackup
self.files = list(files)
self.dry_run = dry_run
self.verbose = verbose
|
Create an ApplicationProfile instance.
Args:
mackup (Mackup)
files (list)
|
juraj-google-style
|
def _add_impact_severity(self, variant_obj):
if variant_obj.most_severe_consequence:
variant_obj.impact_severity = IMPACT_SEVERITIES.get(
variant_obj.most_severe_consequence
)
|
Add the impact severity for the most severe consequence
Args:
variant_obj (puzzle.models.Variant)
|
juraj-google-style
|
def _do_revoke(self, http, token):
logger.info('Revoking token')
query_params = {'token': token}
token_revoke_uri = _helpers.update_query_params(
self.revoke_uri, query_params)
resp, content = transport.request(http, token_revoke_uri)
if resp.status == http_client.METHOD_NOT_ALLOWED:
body = urllib.parse.urlencode(query_params)
resp, content = transport.request(http, token_revoke_uri,
method='POST', body=body)
if resp.status == http_client.OK:
self.invalid = True
else:
error_msg = 'Invalid response {0}.'.format(resp.status)
try:
d = json.loads(_helpers._from_bytes(content))
if 'error' in d:
error_msg = d['error']
except (TypeError, ValueError):
pass
raise TokenRevokeError(error_msg)
if self.store:
self.store.delete()
|
Revokes this credential and deletes the stored copy (if it exists).
Args:
http: an object to be used to make HTTP requests.
token: A string used as the token to be revoked. Can be either an
access_token or refresh_token.
Raises:
TokenRevokeError: If the revoke request does not return with a
200 OK.
|
juraj-google-style
|
def __init__(self, address, ap):
super(ReadRequest, self).__init__(address=address, ap=ap)
|
Initializes the base class.
Args:
self (ReadRequest): the ``ReadRequest`` instance
address (int): the register index
ap (bool): ``True`` if this request is to an Access Port Access
Register, otherwise ``False`` for a Debug Port Access Register
Returns:
``None``
|
juraj-google-style
|
def CreateAdGroup(client, campaign_id):
ad_group_service = client.GetService('AdGroupService', 'v201809')
ad_group = {
'name': 'Dynamic remarketing ad group',
'campaignId': campaign_id,
'status': 'ENABLED'
}
operations = [{
'operator': 'ADD',
'operand': ad_group
}]
return ad_group_service.mutate(operations)['value'][0]
|
Creates a dynamic remarketing campaign.
Args:
client: an AdWordsClient instance.
campaign_id: an int campaign ID.
Returns:
The ad group that was successfully created.
|
juraj-google-style
|
def init_app(self, app, client_id=None):
if (not self.client_id):
if client_id:
self.client_id = client_id
else:
self.client_id = app.name
|
Initialize the Micropub extension if it was not given app
in the constructor.
Args:
app (flask.Flask): the flask application to extend.
client_id (string, optional): the IndieAuth client id, will be
displayed when the user is asked to authorize this client. If not
provided, the app name will be used.
|
codesearchnet
|
def Append(self, value=None, **kwarg):
if self.rdf_type is not None:
if (isinstance(value, rdfvalue.RDFValue) and
value.__class__ != self.rdf_type):
raise ValueError("Can only accept %s" % self.rdf_type)
try:
value = self.rdf_type(value, **kwarg)
except (TypeError, ValueError):
raise ValueError("Unable to initialize %s from type %s" %
(self.__class__.__name__, type(value)))
self.content.Append(DataBlob().SetValue(value))
|
Add another member to the array.
Args:
value: The new data to append to the array.
**kwarg: Create a new element from these keywords.
Returns:
The value which was added. This can be modified further by the caller and
changes will be propagated here.
Raises:
ValueError: If the value to add is not allowed.
|
juraj-google-style
|
def load(self, data_dir):
K.set_learning_phase(0)
try:
latest_ckpt = max(glob.iglob(
os.path.join(data_dir, '*.h*5')), key=os.path.getctime)
latest_ckpt_name = os.path.basename(latest_ckpt)
latest_ckpt_time = str(
datetime.fromtimestamp(os.path.getmtime(latest_ckpt)))
except ValueError:
raise FileNotFoundError('No checkpoint (.hdf5 or .h5) files '
'available at {}'.format(data_dir))
try:
latest_json = max(glob.iglob(os.path.join(data_dir, '*.json')),
key=os.path.getctime)
with open(latest_json, 'r') as f:
model_json = json.loads(f.read())
self._model = model_from_json(model_json)
self._model.load_weights(latest_ckpt)
except ValueError:
try:
self._model = load_model(latest_ckpt)
except ValueError:
raise FileNotFoundError('The (.hdf5 or .h5) files available at'
'{} don\'t have the model'
' architecture.'
.format(latest_ckpt))
self._sess = K.get_session()
self._tf_predict_var = self._model.outputs[0]
self._tf_input_var = self._model.inputs[0]
self._model_name = type(self).__name__
self._latest_ckpt_name = latest_ckpt_name
self._latest_ckpt_time = latest_ckpt_time
|
Load graph and weight data.
Args:
data_dir (:obj:`str`): location of Keras checkpoint (`.hdf5`) files
and model (in `.json`) structure. The default behavior
is to take the latest of each, by OS timestamp.
|
juraj-google-style
|
def close(self):
if not self.closed:
self._uploader.finish()
super().close()
|
Complete the upload and close this stream.
This method has no effect if the stream is already closed.
Raises:
Any error encountered by the uploader.
|
github-repos
|
def _get_args(cls, args):
if isinstance(args, tuple):
raise TypeError(
"{}[...] takes exactly one argument.".format(cls.__name__)
)
return super(_StringMeta, cls)._get_args((_STR_TYPE, args))
|
Return the parameters necessary to check type boundaries.
Args:
args: A slice representing the minimum and maximum lengths allowed
for values of that string.
Returns:
A tuple with three parameters: a type, a slice, and the len
function.
|
juraj-google-style
|
def save_archive(archive):
_assert_obj_type(archive, obj_type=DBArchive)
_get_handler().store_object(archive)
return archive.to_comm(light_request=True)
|
Save `archive` into database and into proper indexes.
Attr:
archive (obj): Instance of the :class:`.DBArchive`.
Returns:
obj: :class:`.DBArchive` without data.
Raises:
InvalidType: When the `archive` is not instance of :class:`.DBArchive`.
UnindexablePublication: When there is no index (property) which can be
used to index `archive` in database.
|
codesearchnet
|
def get_num_patches(self, image_height: int, image_width: int, patch_size: Optional[Dict[str, int]]=None) -> int:
patch_size = patch_size if patch_size is not None else self.patch_size
patch_height, patch_width = (self.patch_size['height'], self.patch_size['width'])
if image_height % patch_height != 0:
raise ValueError(f'image_height={image_height!r} must be divisible by {patch_height}')
if image_width % patch_width != 0:
raise ValueError(f'image_width={image_width!r} must be divisible by {patch_width}')
num_patches_per_dim_h = image_height
num_patches_per_dim_w = image_width
num_patches = num_patches_per_dim_h * num_patches_per_dim_w
return num_patches
|
Calculate number of patches required to encode an image.
Args:
image_height (`int`):
Height of the image.
image_width (`int`):
Width of the image.
patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
|
github-repos
|
def _CreateStyleFromConfigParser(config):
section = 'yapf' if config.has_section('yapf') else 'style'
if config.has_option('style', 'based_on_style'):
based_on = config.get('style', 'based_on_style').lower()
base_style = _STYLE_NAME_TO_FACTORY[based_on]()
elif config.has_option('yapf', 'based_on_style'):
based_on = config.get('yapf', 'based_on_style').lower()
base_style = _STYLE_NAME_TO_FACTORY[based_on]()
else:
base_style = _GLOBAL_STYLE_FACTORY()
for option, value in config.items(section):
if option.lower() == 'based_on_style':
continue
option = option.upper()
if option not in _STYLE_OPTION_VALUE_CONVERTER:
raise StyleConfigError('Unknown style option "{0}"'.format(option))
try:
base_style[option] = _STYLE_OPTION_VALUE_CONVERTER[option](value)
except ValueError:
raise StyleConfigError("'{}' is not a valid setting for {}.".format(value, option))
return base_style
|
Create a style dict from a configuration file.
Arguments:
config: a ConfigParser object.
Returns:
A style dict.
Raises:
StyleConfigError: if an unknown style option was encountered.
|
github-repos
|
def create_board(self, board_json):
return trolly.board.Board(trello_client=self, board_id=board_json['id'], name=board_json['name'], data=board_json)
|
Create Board object from a JSON object
Returns:
Board: The board from the given `board_json`.
|
codesearchnet
|
def _html_tree_view_content(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html:
return view.content(self, name=name, parent=parent, root_path=root_path, **kwargs)
|
Returns the main content for the object.
Args:
view: The view to render the object.
name: The name of the object.
parent: The parent of the object.
root_path: The key path of the object relative to the root.
**kwargs: kwargs to pass to the view. See `_html_tree_view_config` for
the builtin arguments.
Returns:
The rendered HTML as the main content of the object.
|
github-repos
|
def ensure_valid_input(model, tokens, input_names):
print('Ensuring inputs are in correct order')
model_args_name = model.forward.__code__.co_varnames
model_args, ordered_input_names = ([], [])
for arg_name in model_args_name[1:]:
if arg_name in input_names:
ordered_input_names.append(arg_name)
model_args.append(tokens[arg_name])
else:
print(f'{arg_name} is not present in the generated input list.')
break
print(f'Generated inputs order: {ordered_input_names}')
return (ordered_input_names, tuple(model_args))
|
Ensure inputs are presented in the correct order, without any Non
Args:
model: The model used to forward the input data
tokens: BatchEncoding holding the input data
input_names: The name of the inputs
Returns: Tuple
|
github-repos
|
def _get_kernel_arguments(self):
declarations = []
for (name, data) in self._kernel_data.items():
declarations.extend(data.get_kernel_parameters(('_' + name)))
return declarations
|
Get the list of kernel arguments for loading the kernel data elements into the kernel.
This will use the sorted keys for looping through the kernel input items.
Returns:
list of str: the list of parameter definitions
|
codesearchnet
|
def Open(self, file_object):
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if len(signature_data) > 2:
if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:
self.file_format = 'bin-big-endian'
elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:
self.file_format = 'bin-little-endian'
elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:
self.file_format = 'odc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:
self.file_format = 'newc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:
self.file_format = 'crc'
if self.file_format is None:
raise IOError('Unsupported CPIO format.')
self._file_object = file_object
self._file_size = file_object.get_size()
self._ReadFileEntries(self._file_object)
|
Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported.
|
juraj-google-style
|
def _GetUserTypeAndPassword(username, password=None, is_admin=False):
if is_admin:
user_type = api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN
else:
user_type = api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD
if (password is None):
password = getpass.getpass(prompt=("Please enter password for user '%s':" % username))
return (user_type, password)
|
Returns the user-type and password for a user.
Args:
username: Username for the user.
password: Password for the user. If None, or not provided, we will prompt
for one via the terminal.
is_admin: Indicates whether the user should have admin privileges.
|
codesearchnet
|
def _get_cuda_compute_capabilities_or_die() -> list[str]:
try:
nvidia_smi = _find_executable_or_die('nvidia-smi')
nvidia_smi_proc = subprocess.run([nvidia_smi, '--query-gpu=compute_cap', '--format=csv,noheader'], capture_output=True, check=True, text=True)
capabilities = sorted(set(nvidia_smi_proc.stdout.strip().split('\n')))
logging.info('Found CUDA compute capabilities: %s', capabilities)
return capabilities
except (RuntimeError, subprocess.CalledProcessError) as e:
logging.info('Could not find nvidia-smi, or nvidia-smi command failed. Please pass capabilities directly using --cuda_compute_capabilities.')
raise e
|
Finds compute capabilities via nvidia-smi or rasies exception.
Returns:
list of unique, sorted strings representing compute capabilities:
Raises:
RuntimeError: if path to nvidia-smi couldn't be found.
subprocess.CalledProcessError: if nvidia-smi process failed.
|
github-repos
|
def relative_probability_from_lookup_table(self, jump_lookup_table):
l1 = self.initial_site.label
l2 = self.final_site.label
c1 = self.initial_site.nn_occupation()
c2 = self.final_site.nn_occupation()
return jump_lookup_table.jump_probability[l1][l2][c1][c2]
|
Relative probability of accepting this jump from a lookup-table.
Args:
jump_lookup_table (LookupTable): the lookup table to be used for this jump.
Returns:
(Float): relative probability of accepting this jump.
|
codesearchnet
|
def copy_file(source, destination, unique=False, sort=False, case_sensitive=True, create_path=False):
_File.copy(source, destination, unique, sort, case_sensitive, create_path)
|
Python utility to create file
Args:
source: absolute/relative path of source file
destination: absolute/relative path of destination file.
Use same as source for replacing the content of existing file.
unique: Copy only unique lines from file
sort: Sort the content of file
case_sensitive: unique/sort operations to be performed case-sensitive string
create_path: Recursively create the path to destination directory in case not found
Returns: None
|
codesearchnet
|
def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0):
_get_fashion_mnist(tmp_dir)
d = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME)
l = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME)
return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
|
Image generator for FashionMNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
|
juraj-google-style
|
def replace(self, **kw):
if "tzinfo" in kw:
if kw["tzinfo"] is None:
raise TypeError("Can not remove the timezone use asdatetime()")
else:
tzinfo = kw["tzinfo"]
del kw["tzinfo"]
else:
tzinfo = None
is_dst = None
if "is_dst" in kw:
is_dst = kw["is_dst"]
del kw["is_dst"]
else:
is_dst = self.is_dst
replaced = self.asdatetime().replace(**kw)
return type(self)(
replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)
|
Return datetime with new specified fields given as arguments.
For example, dt.replace(days=4) would return a new datetime_tz object with
exactly the same as dt but with the days attribute equal to 4.
Any attribute can be replaced, but tzinfo can not be set to None.
Args:
Any datetime_tz attribute.
Returns:
A datetime_tz object with the attributes replaced.
Raises:
TypeError: If the given replacement is invalid.
|
juraj-google-style
|
def _get_index(self, data: _instance_base.Instance | ConcreteValue) -> int | None:
if isinstance(data, ConcreteValue):
return self.ctx.convert.value_to_constant(data, (int, type(None)))
elif isinstance(data, _instance_base.Instance):
if data.cls != self.ctx.convert.int_type:
raise abstract_utils.ConversionError()
else:
return None
else:
raise abstract_utils.ConversionError()
|
Helper function for getslice_slot that extracts int or None from data.
If data is an Instance of int, None is returned.
Args:
data: The object to extract from. Usually a ConcreteValue or an Instance.
Returns:
The value (an int or None) of the index.
Raises:
abstract_utils.ConversionError: If the data could not be converted.
|
github-repos
|
def register(self, name):
if name not in settings.CODEMIRROR_SETTINGS:
msg = ("Given config name '{}' does not exists in "
"'settings.CODEMIRROR_SETTINGS'.")
raise UnknowConfigError(msg.format(name))
parameters = copy.deepcopy(self.default_internal_config)
parameters.update(copy.deepcopy(
settings.CODEMIRROR_SETTINGS[name]
))
if 'css_bundle_name' not in parameters:
css_template_name = settings.CODEMIRROR_BUNDLE_CSS_NAME
parameters['css_bundle_name'] = css_template_name.format(
settings_name=name
)
if 'js_bundle_name' not in parameters:
js_template_name = settings.CODEMIRROR_BUNDLE_JS_NAME
parameters['js_bundle_name'] = js_template_name.format(
settings_name=name
)
self.registry[name] = parameters
return parameters
|
Register configuration for an editor instance.
Arguments:
name (string): Config name from available ones in
``settings.CODEMIRROR_SETTINGS``.
Raises:
UnknowConfigError: If given config name does not exist in
``settings.CODEMIRROR_SETTINGS``.
Returns:
dict: Registred config dict.
|
juraj-google-style
|
def __init__(self, input_reader=None, output_writer=None):
super(PinfoTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._compare_storage_file_path = None
self._output_filename = None
self._output_format = None
self._process_memory_limit = None
self._storage_file_path = None
self._verbose = False
self.compare_storage_information = False
|
Initializes the CLI tool object.
Args:
input_reader (Optional[InputReader]): input reader, where None indicates
that the stdin input reader should be used.
output_writer (Optional[OutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
|
juraj-google-style
|
def secondary_training_status_message(job_description, prev_description):
if ((job_description is None) or (job_description.get('SecondaryStatusTransitions') is None) or (len(job_description.get('SecondaryStatusTransitions')) == 0)):
return ''
prev_description_secondary_transitions = (prev_description.get('SecondaryStatusTransitions') if (prev_description is not None) else None)
prev_transitions_num = (len(prev_description['SecondaryStatusTransitions']) if (prev_description_secondary_transitions is not None) else 0)
current_transitions = job_description['SecondaryStatusTransitions']
if (len(current_transitions) == prev_transitions_num):
transitions_to_print = current_transitions[(- 1):]
else:
transitions_to_print = current_transitions[(prev_transitions_num - len(current_transitions)):]
status_strs = []
for transition in transitions_to_print:
message = transition['StatusMessage']
time_str = datetime.utcfromtimestamp(time.mktime(job_description['LastModifiedTime'].timetuple())).strftime('%Y-%m-%d %H:%M:%S')
status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message))
return '\n'.join(status_strs)
|
Returns a string contains last modified time and the secondary training job status message.
Args:
job_description: Returned response from DescribeTrainingJob call
prev_description: Previous job description from DescribeTrainingJob call
Returns:
str: Job status string to be printed.
|
codesearchnet
|
def initialize_references_json(references_json, references, setter=None):
for obj in references_json:
obj_id = obj['id']
obj_attrs = obj['attributes']
instance = references[obj_id]
HasProps.__init__(instance)
instance.update_from_json(obj_attrs, models=references, setter=setter)
|
Given a JSON representation of the models in a graph, and new model
objects, set the properties on the models from the JSON
Args:
references_json (``JSON``)
JSON specifying attributes and values to initialize new model
objects with.
references (dict[str, Model])
A dictionary mapping model IDs to newly created (but not yet
initialized) Bokeh models.
**This is an "out" parameter**. The values it contains will be
modified in-place.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
|
codesearchnet
|
def reduce_per_replica(values, strategy, reduction='first'):
def _reduce(v):
if reduction == 'concat' and _collective_all_reduce_multi_worker(strategy):
return _multi_worker_concat(v, strategy)
if not _is_per_replica_instance(v):
return v
elif reduction == 'first':
return strategy.unwrap(v)[0]
elif reduction == 'concat':
if _is_tpu_multi_host(strategy):
return _tpu_multi_host_concat(v, strategy)
else:
return concat(strategy.unwrap(v))
else:
raise ValueError('`reduction` must be "first" or "concat".')
return nest.map_structure(_reduce, values)
|
Reduce PerReplica objects.
Args:
values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are
returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of 'first', 'concat'.
Returns:
Structure of `Tensor`s.
|
github-repos
|
def replace_vars(config, env):
if isinstance(config, dict):
for (k, v) in list(config.items()):
if (isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple)):
replace_vars(v, env)
elif isinstance(v, basestring):
config[k] = expand_var(v, env)
elif isinstance(config, list):
for (i, v) in enumerate(config):
if (isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple)):
replace_vars(v, env)
elif isinstance(v, basestring):
config[i] = expand_var(v, env)
elif isinstance(config, tuple):
for v in config:
if (isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple)):
replace_vars(v, env)
|
Replace variable references in config using the supplied env dictionary.
Args:
config: the config to parse. Can be a tuple, list or dict.
env: user supplied dictionary.
Raises:
Exception if any variable references are not found in env.
|
codesearchnet
|
def get_interpolated_gap(self, tol=0.001, abs_tol=False, spin=None):
tdos = (self.y if (len(self.ydim) == 1) else np.sum(self.y, axis=1))
if (not abs_tol):
tol = ((tol * tdos.sum()) / tdos.shape[0])
energies = self.x
below_fermi = [i for i in range(len(energies)) if ((energies[i] < self.efermi) and (tdos[i] > tol))]
above_fermi = [i for i in range(len(energies)) if ((energies[i] > self.efermi) and (tdos[i] > tol))]
vbm_start = max(below_fermi)
cbm_start = min(above_fermi)
if (vbm_start == cbm_start):
return (0.0, self.efermi, self.efermi)
else:
terminal_dens = tdos[vbm_start:(vbm_start + 2)][::(- 1)]
terminal_energies = energies[vbm_start:(vbm_start + 2)][::(- 1)]
start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
terminal_dens = tdos[(cbm_start - 1):(cbm_start + 1)]
terminal_energies = energies[(cbm_start - 1):(cbm_start + 1)]
end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
return ((end - start), end, start)
|
Expects a DOS object and finds the gap
Args:
tol: tolerance in occupations for determining the gap
abs_tol: Set to True for an absolute tolerance and False for a
relative one.
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(gap, cbm, vbm):
Tuple of floats in eV corresponding to the gap, cbm and vbm.
|
codesearchnet
|
def consume(self, callback, bindings=None, queues=None, exchanges=None):
self._bindings = (bindings or config.conf['bindings'])
self._queues = (queues or config.conf['queues'])
self._exchanges = (exchanges or config.conf['exchanges'])
if inspect.isclass(callback):
cb_obj = callback()
if (not callable(cb_obj)):
raise ValueError('Callback must be a class that implements __call__ or a function.')
self._consumer_callback = cb_obj
elif callable(callback):
self._consumer_callback = callback
else:
raise ValueError('Callback must be a class that implements __call__ or a function.')
self._running = True
self.connect()
self._connection.ioloop.start()
|
Consume messages from a message queue.
Simply define a callable to be used as the callback when messages are
delivered and specify the queue bindings. This call blocks. The callback
signature should accept a single positional argument which is an
instance of a :class:`Message` (or a sub-class of it).
Args:
callback (callable): The callable to pass the message to when one
arrives.
bindings (list of dict): A list of dictionaries describing bindings
for queues. Refer to the :ref:`conf-bindings` configuration
documentation for the format.
queues (dict): A dictionary of queues to ensure exist. Refer to the
:ref:`conf-queues` configuration documentation for the format.
exchanges (dict): A dictionary of exchanges to ensure exist. Refer
to the :ref:`conf-exchanges` configuration documentation for the
format.
Raises:
HaltConsumer: Raised when the consumer halts.
ValueError: If the callback isn't a callable object or a class with
__call__ defined.
|
codesearchnet
|
def put(self, entity):
actual_entity = _normalize_entity(entity)
if actual_entity is None:
return self.ndb_put(entity)
self.puts.append(actual_entity)
|
Registers entity to put to datastore.
Args:
entity: an entity or model instance to put.
|
juraj-google-style
|
def Runs(self):
with self._accumulators_mutex:
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for (run_name, accumulator) in items}
|
Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
```
|
codesearchnet
|
def Decode(data, encoding=None):
encoding = encoding or GetConsoleAttr().GetEncoding()
return encoding_util.Decode(data, encoding=encoding)
|
Converts the given string, bytes, or object to a text string.
Args:
data: Any bytes, string, or object that has str() or unicode() methods.
encoding: A suggesting encoding used to decode. If this encoding doesn't
work, other defaults are tried. Defaults to
GetConsoleAttr().GetEncoding().
Returns:
A text string representation of the data.
|
github-repos
|
def __init__(self, location, resource_pool):
super(MemoryPackageRepository, self).__init__(location, resource_pool)
self.data = {}
self.register_resource(MemoryPackageFamilyResource)
self.register_resource(MemoryPackageResource)
self.register_resource(MemoryVariantResource)
|
Create an in-memory package repository.
Args:
location (str): Path containing the package repository.
|
juraj-google-style
|
def log_histogram(self, name, value, step=None):
if isinstance(value, six.string_types):
raise TypeError('"value" should be a number, got {}'
.format(type(value)))
self._check_step(step)
tf_name = self._ensure_tf_name(name)
summary = self._histogram_summary(tf_name, value, step=step)
self._log_summary(tf_name, summary, value, step=step)
|
Log a histogram for given name on given step.
Args:
name (str): name of the variable (it will be converted to a valid
tensorflow summary name).
value (tuple or list): either list of numbers
to be summarized as a histogram, or a tuple of bin_edges and
bincounts that directly define a histogram.
step (int): non-negative integer used for visualization
|
juraj-google-style
|
def log_combinations(n, counts, name='log_combinations'):
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name='n')
counts = ops.convert_to_tensor(counts, name='counts')
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
|
Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
|
github-repos
|
def check_tx(self, raw_transaction):
self.abort_if_abci_chain_is_not_synced()
logger.debug('check_tx: %s', raw_transaction)
transaction = decode_transaction(raw_transaction)
if self.bigchaindb.is_valid_transaction(transaction):
logger.debug('check_tx: VALID')
return ResponseCheckTx(code=CodeTypeOk)
else:
logger.debug('check_tx: INVALID')
return ResponseCheckTx(code=CodeTypeError)
|
Validate the transaction before entry into
the mempool.
Args:
raw_tx: a raw string (in bytes) transaction.
|
juraj-google-style
|
def read_probes(self, key):
assert key in list(self._PROBES.keys())
if key == 'output':
value = self._output
return value
|
requestes value from the instrument and returns it
Args:
key: name of requested value
Returns: reads values from instrument
|
juraj-google-style
|
def _group_similar(items: List[T],
comparer: Callable[[T, T], bool]) -> List[List[T]]:
groups = []
used = set()
for i in range(len(items)):
if i not in used:
group = [items[i]]
for j in range(i + 1, len(items)):
if j not in used and comparer(items[i], items[j]):
used.add(j)
group.append(items[j])
groups.append(group)
return groups
|
Combines similar items into groups.
Args:
items: The list of items to group.
comparer: Determines if two items are similar.
Returns:
A list of groups of items.
|
juraj-google-style
|
def _MergeTaskStorage(self, storage_writer):
if self._processing_profiler:
self._processing_profiler.StartTiming('merge_check')
for task_identifier in storage_writer.GetProcessedTaskIdentifiers():
try:
task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier)
self._task_manager.SampleTaskStatus(task, 'processed')
to_merge = self._task_manager.CheckTaskToMerge(task)
if not to_merge:
storage_writer.RemoveProcessedTaskStorage(task)
self._task_manager.RemoveTask(task)
self._task_manager.SampleTaskStatus(task, 'removed_processed')
else:
storage_writer.PrepareMergeTaskStorage(task)
self._task_manager.UpdateTaskAsPendingMerge(task)
except KeyError:
logger.error(
'Unable to retrieve task: {0:s} to prepare it to be merged.'.format(
task_identifier))
continue
if self._processing_profiler:
self._processing_profiler.StopTiming('merge_check')
task = None
if not self._storage_merge_reader_on_hold:
task = self._task_manager.GetTaskPendingMerge(self._merge_task)
if task or self._storage_merge_reader:
self._status = definitions.STATUS_INDICATOR_MERGING
if self._processing_profiler:
self._processing_profiler.StartTiming('merge')
if task:
if self._storage_merge_reader:
self._merge_task_on_hold = self._merge_task
self._storage_merge_reader_on_hold = self._storage_merge_reader
self._task_manager.SampleTaskStatus(
self._merge_task_on_hold, 'merge_on_hold')
self._merge_task = task
try:
self._storage_merge_reader = storage_writer.StartMergeTaskStorage(
task)
self._task_manager.SampleTaskStatus(task, 'merge_started')
except IOError as exception:
logger.error((
'Unable to merge results of task: {0:s} '
'with error: {1!s}').format(task.identifier, exception))
self._storage_merge_reader = None
if self._storage_merge_reader:
fully_merged = self._storage_merge_reader.MergeAttributeContainers(
maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS)
else:
fully_merged = True
if self._processing_profiler:
self._processing_profiler.StopTiming('merge')
if fully_merged:
try:
self._task_manager.CompleteTask(self._merge_task)
except KeyError as exception:
logger.error(
'Unable to complete task: {0:s} with error: {1!s}'.format(
self._merge_task.identifier, exception))
if not self._storage_merge_reader_on_hold:
self._merge_task = None
self._storage_merge_reader = None
else:
self._merge_task = self._merge_task_on_hold
self._storage_merge_reader = self._storage_merge_reader_on_hold
self._merge_task_on_hold = None
self._storage_merge_reader_on_hold = None
self._task_manager.SampleTaskStatus(
self._merge_task, 'merge_resumed')
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_produced_events = storage_writer.number_of_events
self._number_of_produced_sources = storage_writer.number_of_event_sources
self._number_of_produced_warnings = storage_writer.number_of_warnings
|
Merges a task storage with the session storage.
This function checks all task stores that are ready to merge and updates
the scheduled tasks. Note that to prevent this function holding up
the task scheduling loop only the first available task storage is merged.
Args:
storage_writer (StorageWriter): storage writer for a session storage used
to merge task storage.
|
juraj-google-style
|
def find_library_linux(cls):
dll = Library.JLINK_SDK_NAME
root = os.path.join('/', 'opt', 'SEGGER')
for (directory_name, subdirs, files) in os.walk(root):
fnames = []
x86_found = False
for f in files:
path = os.path.join(directory_name, f)
if (os.path.isfile(path) and f.startswith(dll)):
fnames.append(f)
if ('_x86' in path):
x86_found = True
for fname in fnames:
fpath = os.path.join(directory_name, fname)
if util.is_os_64bit():
if ('_x86' not in fname):
(yield fpath)
elif x86_found:
if ('_x86' in fname):
(yield fpath)
else:
(yield fpath)
|
Loads the SEGGER DLL from the root directory.
On Linux, the SEGGER tools are installed under the ``/opt/SEGGER``
directory with versioned directories having the suffix ``_VERSION``.
Args:
cls (Library): the ``Library`` class
Returns:
The paths to the J-Link library files in the order that they are
found.
|
codesearchnet
|
def _merge_choice_field(self, json_value: Any, choice_field: descriptor.FieldDescriptor, field_name: str, parent: message.Message) -> None:
choice_field_name = _get_choice_field_name(choice_field, field_name)
choice_field_map = _get_field_map(choice_field.message_type)
choice_value_field = choice_field_map.get(choice_field_name)
if choice_value_field is None:
raise ValueError(f'Cannot find {choice_field_name!r} on {choice_field.full_name}')
choice_message = proto_utils.set_in_parent_or_add(parent, choice_field)
self._merge_field(json_value, choice_value_field, choice_message)
|
Creates a Message based on the choice_field Descriptor and json_value.
The resulting message is merged into parent.
Args:
json_value: The JSON value to merge into a message of the type described
by choice_field.
choice_field: The field descriptor of the FHIR choice type on parent.
field_name: The nested field name of the choice type, e.g.: _valueBoolean.
parent: The parent Message to merge into.
|
github-repos
|
def get_metrics_collector(self, prefix: str=''):
metrics_namespace = self._metrics_namespace if self._metrics_namespace else self._model_handler.get_metrics_namespace()
if self._model_handler.override_metrics(metrics_namespace):
return None
return _MetricsCollector(metrics_namespace, prefix=prefix)
|
Args:
prefix: Unique identifier for metrics, used when models
are updated using side input.
|
github-repos
|
def error(channel, title, description):
gui = ui_embed.UI(
channel,
title,
description,
modulename=modulename
)
return gui
|
Creates an embed UI containing an error message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
title (str): The title of the embed
description (str): The description for the error
Returns:
ui (ui_embed.UI): The embed UI object
|
juraj-google-style
|
def __call__(self, input_1: EventSet, input_2: EventSet) -> Dict[str, EventSet]:
assert isinstance(self.operator, BaseBinaryOperator)
output_schema = self.output_schema('output')
if len(input_1.schema.features) != len(input_2.schema.features):
raise ValueError('Both EventSets must have the same number of features.')
num_features = len(input_1.schema.features)
dst_evset = EventSet(data={}, schema=output_schema)
assert len(input_1.data) == len(input_2.data)
for index_key, index_data in input_1.data.items():
input_1_features = index_data.features
input_2_features = input_2.data[index_key].features
dst_features = []
for feature_idx in range(num_features):
input_1_feature = input_1_features[feature_idx]
input_2_feature = input_2_features[feature_idx]
assert input_1_feature.dtype.type == input_2_feature.dtype.type
result = self._do_operation(input_1_feature, input_2_feature, input_1.schema.features[feature_idx].dtype)
dst_features.append(result)
dst_evset.set_index_value(index_key, IndexData(features=dst_features, timestamps=index_data.timestamps, schema=output_schema), normalize=False)
return {'output': dst_evset}
|
Applies the corresponding arithmetic operation between two EventSets.
Args:
input_1: First EventSet.
input_2: Second EventSet.
Returns:
Result of the operation.
Raises:
ValueError: If sampling of both EventSets is not equal.
|
github-repos
|
def _is_valid(self, value):
if hasattr(self._type, 'istypeof'):
return self._type.istypeof(value)
else:
return isinstance(value, self._type)
|
Return True if the input value is valid for insertion into the
inner list.
Args:
value: An object about to be inserted.
|
codesearchnet
|
def check_the_end_flag(self, state_key):
x, y = state_key
end_point_tuple = np.where(self.__map_arr == self.__end_point_label)
end_point_x_arr, end_point_y_arr = end_point_tuple
if x == end_point_x_arr[0] and y == end_point_y_arr[0]:
return True
else:
return False
|
Check the end flag.
If this return value is `True`, the learning is end.
Args:
state_key: The key of state in `self.t`.
Returns:
bool
|
juraj-google-style
|
def get_diff(value1, value2, name1, name2):
lines1 = [(line + '\n') for line in value1.splitlines()]
lines2 = [(line + '\n') for line in value2.splitlines()]
diff_lines = difflib.context_diff(lines1, lines2, fromfile=name1, tofile=name2)
return ''.join(diff_lines)
|
Get a diff between two strings.
Args:
value1 (str): First string to be compared.
value2 (str): Second string to be compared.
name1 (str): Name of the first string.
name2 (str): Name of the second string.
Returns:
str: The full diff.
|
codesearchnet
|
def plot(self, ax=None, return_fig=False, **kwargs):
if (ax is None):
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
hypertime = np.linspace(self.start, self.stop, (((10 * self.size) - 1) + 1))
hyperamp = np.interp(hypertime, self.basis, self)
ax.plot(hyperamp, hypertime, 'k')
ax.fill_betweenx(hypertime, hyperamp, 0, (hyperamp > 0.0), facecolor='k', lw=0)
ax.invert_yaxis()
ax.set_title(self.name)
if return_ax:
return ax
elif return_fig:
return fig
else:
return None
|
Plot a synthetic.
Args:
ax (ax): A matplotlib axis.
legend (Legend): For now, only here to match API for other plot
methods.
return_fig (bool): whether to return the matplotlib figure.
Default False.
Returns:
ax. If you passed in an ax, otherwise None.
|
codesearchnet
|
def open_repository(path, spor_dir='.spor'):
root = _find_root_dir(path, spor_dir)
return Repository(root, spor_dir)
|
Open an existing repository.
Args:
path: Path to any file or directory within the repository.
spor_dir: The name of the directory containing spor data.
Returns: A `Repository` instance.
Raises:
ValueError: No repository is found.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.