code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _dequeue_return_value(self, tensors):
if self._names:
return {n: tensors[i] for i, n in enumerate(self._names)}
elif len(tensors) == 1:
return tensors[0]
else:
return tensors | Return the value to return from a dequeue op.
If the queue has names, return a dictionary with the
names as keys. Otherwise return either a single tensor
or a list of tensors depending on the length of `tensors`.
Args:
tensors: List of tensors from the dequeue op.
Returns:
A single tensor, a list of tensors, or a dictionary
of tensors. | github-repos |
def CheckDependencies(self, verbose_output=True):
print('Checking availability and versions of dependencies.')
check_result = True
for module_name, dependency in sorted(self.dependencies.items()):
if module_name == 'sqlite3':
result, status_message = self._CheckSQLite3()
else:
result, status_message = self._CheckPythonModule(dependency)
if not result and module_name == 'lzma':
dependency.name = 'backports.lzma'
result, status_message = self._CheckPythonModule(dependency)
if not result and not dependency.is_optional:
check_result = False
self._PrintCheckDependencyStatus(
dependency, result, status_message, verbose_output=verbose_output)
if check_result and not verbose_output:
print('[OK]')
print('')
return check_result | Checks the availability of the dependencies.
Args:
verbose_output (Optional[bool]): True if output should be verbose.
Returns:
bool: True if the dependencies are available, False otherwise. | juraj-google-style |
def __init__(self, name_context, spec, counter_factory, state_sampler):
assert isinstance(name_context, common.NameContext)
self.name_context = name_context
self.spec = spec
self.counter_factory = counter_factory
self.execution_context = None
self.consumers = collections.defaultdict(list)
self.metrics_container = MetricsContainer(self.name_context.metrics_name())
self.state_sampler = state_sampler
self.scoped_start_state = self.state_sampler.scoped_state(self.name_context, 'start', metrics_container=self.metrics_container)
self.scoped_process_state = self.state_sampler.scoped_state(self.name_context, 'process', metrics_container=self.metrics_container)
self.scoped_finish_state = self.state_sampler.scoped_state(self.name_context, 'finish', metrics_container=self.metrics_container)
self.receivers = []
self.setup_done = False
self.step_name = None
self.data_sampler: Optional[DataSampler] = None | Initializes a worker operation instance.
Args:
name_context: A NameContext instance, with the name information for this
operation.
spec: A operation_specs.Worker* instance.
counter_factory: The CounterFactory to use for our counters.
state_sampler: The StateSampler for the current operation. | github-repos |
def __init__(self, residual_restriction=None, process_continuation=None, future_output_watermark=None):
self.residual_restriction = residual_restriction
self.process_continuation = process_continuation
self.future_output_watermark = future_output_watermark | Returned as a result of a `invoke_process_element()` invocation.
Args:
residual_restriction: a restriction for the unprocessed part of the
element.
process_continuation: a `ProcessContinuation` if one was returned as the
last element of the SDF `process()` invocation.
future_output_watermark: output watermark of the results that will be
produced when invoking the Splittable `DoFn`
for the current element with
`residual_restriction`. | github-repos |
def _dict_to_tensor(self, x, k):
return array_ops_stack.stack([x[i] for i in range(k)]) | Convert a dictionary to a tensor.
Args:
x: A dictionary of length k.
k: Dimension of x.
Returns:
A tensor with the same dimension. | github-repos |
def log_(
message: str,
logger: logging.Logger,
level: int = logging.INFO,
extra: Optional[Dict] = None,
trim: bool = False,
) -> None:
if extra is None:
extra = {}
if message:
message = message.replace("\n", "").replace(" ", " ").replace("{ ", "{")
if trim:
message = _trim_message(message)
logger.log(level, message, extra=extra) | Log a request or response
Args:
message: JSON-RPC request or response string.
logger:
level: Log level.
extra: More details to include in the log entry.
trim: Abbreviate log messages. | juraj-google-style |
def untar_to_directory(tarfile: str, directory: str, verbose: bool=False, gzipped: bool=False, skip_if_dir_exists: bool=True, run_func: Callable[([List[str]], Any)]=None, chdir_via_python: bool=True) -> None:
if (skip_if_dir_exists and os.path.isdir(directory)):
log.info('Skipping extraction of {} as directory {} exists', tarfile, directory)
return
log.info('Extracting {} -> {}', tarfile, directory)
require_executable(TAR)
mkdir_p(directory)
args = [TAR, '-x']
if verbose:
args.append('-v')
if gzipped:
args.append('-z')
if (platform.system() != 'Darwin'):
args.append('--force-local')
args.extend(['-f', tarfile])
if chdir_via_python:
with pushd(directory):
run_func(args)
else:
args.extend(['-C', directory])
run_func(args) | Unpacks a TAR file into a specified directory.
Args:
tarfile: filename of the ``.tar`` file
directory: destination directory
verbose: be verbose?
gzipped: is the ``.tar`` also gzipped, e.g. a ``.tar.gz`` file?
skip_if_dir_exists: don't do anything if the destrination directory
exists?
run_func: function to use to call an external command
chdir_via_python: change directory via Python, not via ``tar``.
Consider using this via Windows, because Cygwin ``tar`` v1.29 falls
over when given a Windows path for its ``-C`` (or ``--directory``)
option. | codesearchnet |
def _get_new_node_defs(self):
node_def_bytes = self.node_file.read()
node_defs = []
cur_pos = 0
while cur_pos < len(node_def_bytes):
size_bytes = node_def_bytes[cur_pos:cur_pos + 8]
size, = struct.unpack('<Q', size_bytes)
cur_pos += 8
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(node_def_bytes[cur_pos:cur_pos + size])
ignored_ops = []
if context.run_eager_op_as_function_enabled():
ignored_ops.extend(['_Arg', '_Retval', 'NoOp'])
ignored_ops.extend(['_Recv', '_HostRecv'])
if node_def.op not in ignored_ops:
node_defs.append(node_def)
cur_pos += size
self.assertEqual(cur_pos, len(node_def_bytes))
return node_defs | Gets new NodeDefs written by the NodeFileWriter.
Returns:
A list of new NodeDefs in the file written by NodeDefWriter since the last
time this method was called. | github-repos |
def __init__(self, s3_conn, es_client):
self.s3_conn = s3_conn
self.es_client = es_client | Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client | juraj-google-style |
def _wait_for_any_job(provider, job_ids, poll_interval):
if (not job_ids):
return
while True:
tasks = provider.lookup_job_tasks({'*'}, job_ids=job_ids)
running_jobs = set()
failed_jobs = set()
for t in tasks:
status = t.get_field('task-status')
job_id = t.get_field('job-id')
if (status in ['FAILURE', 'CANCELED']):
failed_jobs.add(job_id)
if (status == 'RUNNING'):
running_jobs.add(job_id)
remaining_jobs = running_jobs.difference(failed_jobs)
if (failed_jobs or (len(remaining_jobs) != len(job_ids))):
return remaining_jobs
SLEEP_FUNCTION(poll_interval) | Waits until any of the listed jobs is not running.
In particular, if any of the jobs sees one of its tasks fail,
we count the whole job as failing (but do not terminate the remaining
tasks ourselves).
Args:
provider: job service provider
job_ids: a list of job IDs (string) to wait for
poll_interval: integer seconds to wait between iterations
Returns:
A set of the jobIDs with still at least one running task. | codesearchnet |
def poll_stack(self):
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'DELETE_COMPLETE']
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if (current_status.endswith('COMPLETE') or current_status.endswith('FAILED')):
if (current_status in completed_states):
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if (str(wtf).find('does not exist') == (- 1)):
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False | Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False | codesearchnet |
def ops_used_by_graph_def(graph_def):
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
used_ops = set()
functions_to_process = []
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
def process_node(node):
mark_op_as_used(node.op)
if node.op in ['PartitionedCall', 'StatefulPartitionedCall']:
mark_op_as_used(node.attr['f'].func.name)
for node in graph_def.node:
process_node(node)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node_def:
process_node(node)
return [op for op in used_ops if op not in name_to_function] | Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph. | github-repos |
def Lookup(self, name):
if not self._name2item:
self._InitCache()
return self._name2item[name] | Convenience function: Look up a given name in the global namespace.
Tries to find a constant, function or class by this name.
Args:
name: Name to look up.
Returns:
A Constant, Function or Class.
Raises:
KeyError: if this identifier doesn't exist. | github-repos |
def get_field_to_observations_map(generator, query_for_tag=''):
def increment(stat, event, tag=''):
assert (stat in TRACKED_FIELDS)
field_to_obs[stat].append(Observation(step=event.step, wall_time=event.wall_time, tag=tag)._asdict())
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
for event in generator:
if (event.HasField('graph_def') and (not query_for_tag)):
increment('graph', event)
if (event.HasField('session_log') and (not query_for_tag)):
status = event.session_log.status
if (status == event_pb2.SessionLog.START):
increment('sessionlog:start', event)
elif (status == event_pb2.SessionLog.STOP):
increment('sessionlog:stop', event)
elif (status == event_pb2.SessionLog.CHECKPOINT):
increment('sessionlog:checkpoint', event)
elif event.HasField('summary'):
for value in event.summary.value:
if (query_for_tag and (value.tag != query_for_tag)):
continue
for (proto_name, display_name) in SUMMARY_TYPE_TO_FIELD.items():
if value.HasField(proto_name):
increment(display_name, event, value.tag)
return field_to_obs | Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list. | codesearchnet |
def _count_objs(self, obj, path=None, **kwargs):
sub_val = None
if isinstance(obj, dict):
for key, value in obj.items():
if isinstance(value, (list, dict)):
kwargs = self._count_objs(value,
self.make_path(key, path),
**kwargs)
else:
if self.make_path(key, path) == self.sub_total:
sub_val = value
kwargs['current'] = self._increment_prop(key,
path,
**kwargs)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (list, dict)):
kwargs = self._count_objs(item, path, **kwargs)
else:
if path == self.sub_total:
pdb.set_trace()
sub_val = item
kwargs['current'] = self._increment_prop(path, **kwargs)
else:
kwargs['current'] = self._increment_prop(path, **kwargs)
if path == self.sub_total:
pdb.set_trace()
sub_val = item
if kwargs.get('sub_val') is None:
kwargs['sub_val'] = sub_val
return kwargs | cycles through the object and adds in count values
Args:
-----
obj: the object to parse
path: the current path
kwargs:
-------
current: a dictionary of counts for current call
sub_val: the value to use for subtotal aggregation | juraj-google-style |
def _evolve(self, state, qargs=None):
if qargs is not None:
return SuperOp(self)._evolve(state, qargs)
state = self._format_state(state)
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is not equal to state dimension."
)
if state.ndim == 1 and self._data[1] is None and \
self._data[0].shape[0]
return np.dot(self._data[0], state)
state = self._format_state(state, density_matrix=True)
stine_l, stine_r = self._data
if stine_r is None:
stine_r = stine_l
din, dout = self.dim
dtr = stine_l.shape[0]
shape = (dout, dtr, din)
return np.einsum('iAB,BC,jAC->ij', np.reshape(stine_l, shape), state,
np.reshape(np.conjugate(stine_r), shape)) | Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions. | juraj-google-style |
def _handle_azure_exception():
try:
(yield)
except _AzureHttpError as exception:
if (exception.status_code in _ERROR_CODES):
raise _ERROR_CODES[exception.status_code](str(exception))
raise | Handles Azure exception and convert to class IO exceptions
Raises:
OSError subclasses: IO error. | codesearchnet |
def _run_using_default_session(operation, feed_dict, graph, session=None) -> None:
if session is None:
session = stack.get_default_session()
if session is None:
raise ValueError('Cannot execute operation using `run()`: No default session is registered. Use `with sess.as_default():` or pass an explicit session to `run(session=sess)`')
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: the operation's graph is different from the session's graph. Pass an explicit session to run(session=sess).")
elif session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: the operation's graph is different from the session's graph.")
session.run(operation, feed_dict) | Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph. | github-repos |
def FilterItems(self, filterFn, key=None):
with self._mutex:
if key:
if key in self._buckets:
return self._buckets[key].FilterItems(filterFn)
else:
return 0
else:
return sum(bucket.FilterItems(filterFn)
for bucket in self._buckets.values()) | Filter items within a Reservoir, using a filtering function.
Args:
filterFn: A function that returns True for the items to be kept.
key: An optional bucket key to filter. If not specified, will filter all
all buckets.
Returns:
The number of items removed. | juraj-google-style |
def Sample(self, tasks_status):
sample_time = time.time()
sample = '{0:f}\t{1:d}\t{2:d}\t{3:d}\t{4:d}\t{5:d}\n'.format(
sample_time, tasks_status.number_of_queued_tasks,
tasks_status.number_of_tasks_processing,
tasks_status.number_of_tasks_pending_merge,
tasks_status.number_of_abandoned_tasks,
tasks_status.total_number_of_tasks)
self._WritesString(sample) | Takes a sample of the status of queued tasks for profiling.
Args:
tasks_status (TasksStatus): status information about tasks. | juraj-google-style |
def make_qq_plot(kev, obs, mdl, unit, key_text):
import omega as om
kev = np.asarray(kev)
obs = np.asarray(obs)
mdl = np.asarray(mdl)
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = max(c_obs[(- 1)], c_mdl[(- 1)])
p = om.RectPlot()
p.addXY([0, mx], [0, mx], '1:1')
p.addXY(c_mdl, c_obs, key_text)
locs = (np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2))
c0 = (mx * 1.05)
c1 = (mx * 1.1)
for loc in locs:
i0 = int(np.floor(loc))
frac = (loc - i0)
kevval = (((1 - frac) * kev[i0]) + (frac * kev[(i0 + 1)]))
mdlval = (((1 - frac) * c_mdl[i0]) + (frac * c_mdl[(i0 + 1)]))
obsval = (((1 - frac) * c_obs[i0]) + (frac * c_obs[(i0 + 1)]))
p.addXY([mdlval, mdlval], [c0, c1], ('%.2f keV' % kevval), dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels(('Cumulative model ' + unit), ('Cumulative data ' + unit))
p.defaultKeyOverlay.vAlign = 0.3
return p | Make a quantile-quantile plot comparing events and a model.
*kev*
A 1D, sorted array of event energy bins measured in keV.
*obs*
A 1D array giving the number or rate of events in each bin.
*mdl*
A 1D array giving the modeled number or rate of events in each bin.
*unit*
Text describing the unit in which *obs* and *mdl* are measured; will
be shown on the plot axes.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines. | codesearchnet |
def parse_unique_urlencoded(content):
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for (key, value) in six.iteritems(urlencoded_params):
if (len(value) != 1):
msg = ('URL-encoded content contains a repeated value:%s -> %s' % (key, ', '.join(value)))
raise ValueError(msg)
params[key] = value[0]
return params | Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated. | codesearchnet |
def batch_workflow_status(self, batch_workflow_id):
self.logger.debug('Get status of batch workflow: ' + batch_workflow_id)
url = '%(base_url)s/batch_workflows/%(batch_id)s' % {
'base_url': self.base_url, 'batch_id': batch_workflow_id
}
r = self.gbdx_connection.get(url)
return r.json() | Checks GBDX batch workflow status.
Args:
batch workflow_id (str): Batch workflow id.
Returns:
Batch Workflow status (str). | juraj-google-style |
def get_user_groups(self, user):
self.project_service.set_auth(self._token_project)
return self.project_service.get_user_groups(user) | Get user's group memberships.
Args:
user (string): User name.
Returns:
(list): User's groups.
Raises:
requests.HTTPError on failure. | juraj-google-style |
def __getattr__(self, name):
self._conn.send((self._ACCESS, name))
return self._receive() | Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute. | juraj-google-style |
def get_mnemonics(self, mnemonics, uwis=None, alias=None):
uwis = (uwis or self.uwis)
wells = [w for w in self.__list if (w.uwi in uwis)]
all_wells = []
for w in wells:
this_well = [w.get_mnemonic(m, alias=alias) for m in mnemonics]
all_wells.append(this_well)
return all_wells | Looks at all the wells in turn and returns the highest thing
in the alias table.
Args:
mnemonics (list)
alias (dict)
Returns:
list. A list of lists. | codesearchnet |
def checkDeterminism(self, dataset_fn, expect_determinism, expected_elements):
if expect_determinism:
dataset = dataset_fn(100)
actual = self.getDatasetOutput(dataset)
self.assertAllEqual(expected_elements, actual)
return
for delay_ms in [10, 100, 1000, 20000, 100000]:
dataset = dataset_fn(delay_ms)
actual = self.getDatasetOutput(dataset)
self.assertCountEqual(expected_elements, actual)
for i in range(len(actual)):
if actual[i] != expected_elements[i]:
return
self.fail('Failed to observe nondeterministic ordering') | Tests whether a dataset produces its elements deterministically.
`dataset_fn` takes a delay_ms argument, which tells it how long to delay
production of the first dataset element. This gives us a way to trigger
out-of-order production of dataset elements.
Args:
dataset_fn: A function taking a delay_ms argument.
expect_determinism: Whether to expect deterministic ordering.
expected_elements: The elements expected to be produced by the dataset,
assuming the dataset produces elements in deterministic order. | github-repos |
def as_object(obj):
LOGGER.debug('as_object(%s)', obj)
if isinstance(obj, datetime.date):
return as_date(obj)
elif hasattr(obj, '__dict__'):
out = {k: obj.__dict__[k] for k in obj.__dict__ if (not k.startswith('_'))}
for (k, v) in ((p, getattr(obj, p)) for (p, _) in inspect.getmembers(obj.__class__, (lambda x: isinstance(x, property)))):
out[k] = v
return out | Return a JSON serializable type for ``o``.
Args:
obj (:py:class:`object`): the object to be serialized.
Raises:
:py:class:`AttributeError`:
when ``o`` is not a Python object.
Returns:
(dict): JSON serializable type for the given object. | codesearchnet |
def compareBulk(self, retina_name, body):
resourcePath = '/compare/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [metric.Metric(**r) for r in response.json()] | Bulk compare
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: Bulk comparison of elements 2 by 2 (required)
Returns: Array[Metric] | juraj-google-style |
def add_report(self, specification_name, report):
self._reports[specification_name] = report
self._total = self._total + report.testsRun
self._failures = self._failures + len(report.failures)
self._errors = self._errors + len(report.errors)
self._success = self._total - self._failures - self._errors | Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The | juraj-google-style |
def _extend_before(self, other):
other_num_lines = other.num_lines()
self._lines = other.lines + self._lines
new_font_attr_segs = {}
for line_index in self.font_attr_segs:
new_font_attr_segs[other_num_lines + line_index] = self.font_attr_segs[line_index]
new_font_attr_segs.update(other.font_attr_segs)
self._font_attr_segs = new_font_attr_segs
new_annotations = {}
for key in self._annotations:
if isinstance(key, int):
new_annotations[other_num_lines + key] = self.annotations[key]
else:
new_annotations[key] = other.annotations[key]
new_annotations.update(other.annotations)
self._annotations = new_annotations | Add another RichTextLines object to the front.
Args:
other: (RichTextLines) The other object to add to the front to this
object. | github-repos |
def join(self, timeout_s=None):
if (not self.thread):
return False
self.thread.join(timeout_s)
return self.running | Joins blocking until the interval ends or until timeout is reached.
Args:
timeout_s: The time in seconds to wait, defaults to forever.
Returns:
True if the interval is still running and we reached the timeout. | codesearchnet |
def add_to_dumper(dumper: Type, classes: List[Type]) -> None:
if not isinstance(classes, list):
classes = [classes]
for class_ in classes:
if issubclass(class_, enum.Enum):
dumper.add_representer(class_, EnumRepresenter(class_))
elif issubclass(class_, str) or issubclass(class_, UserString):
dumper.add_representer(class_, UserStringRepresenter(class_))
else:
dumper.add_representer(class_, Representer(class_)) | Register user-defined classes with the Dumper.
This enables the Dumper to write objects of your classes to a \
YAML file. Note that all the arguments are types, not instances!
Args:
dumper: Your dumper class(!), derived from yatiml.Dumper
classes: One or more classes to add. | juraj-google-style |
def _forward_backward_log(state_trans_log_probs, initial_state_log_probs, final_state_log_probs, observed_log_probs, sequence_length):
if state_trans_log_probs.shape.ndims == 2:
perm = [1, 0]
elif state_trans_log_probs.shape.ndims == 3:
perm = [0, 2, 1]
else:
raise ValueError(f'Rank of argument `state_trans_log_probs` must be known and equal to 2 or 3. Received state_trans_log_probs={state_trans_log_probs} of rank {state_trans_log_probs.shape.ndims}')
bwd_state_trans_log_probs = array_ops.transpose(state_trans_log_probs, perm)
batch_size = _get_dim(observed_log_probs, 1)
def _forward(state_log_prob, obs_log_prob):
state_log_prob = array_ops.expand_dims(state_log_prob, axis=1)
state_log_prob += state_trans_log_probs
state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)
state_log_prob += obs_log_prob
log_prob_sum = math_ops.reduce_logsumexp(state_log_prob, axis=-1, keepdims=True)
state_log_prob -= log_prob_sum
return state_log_prob
fwd = _scan(_forward, observed_log_probs, initial_state_log_probs, inclusive=True)
def _backward(accs, elems):
state_log_prob, cum_log_sum = accs
obs_log_prob, mask = elems
state_log_prob += obs_log_prob
state_log_prob = array_ops.expand_dims(state_log_prob, axis=1)
state_log_prob += bwd_state_trans_log_probs
state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)
log_prob_sum = math_ops.reduce_logsumexp(state_log_prob, axis=-1, keepdims=True)
state_log_prob -= log_prob_sum
cum_log_sum += array_ops.squeeze(log_prob_sum, axis=[-1]) * mask
batched_mask = array_ops.expand_dims(mask, axis=1)
out = state_log_prob * batched_mask
out += final_state_log_probs * (1.0 - batched_mask)
return (out, cum_log_sum)
zero_log_sum = array_ops.zeros([batch_size])
maxlen = _get_dim(observed_log_probs, 0)
mask = array_ops.sequence_mask(sequence_length, maxlen, dtypes.float32)
mask = array_ops.transpose(mask, perm=[1, 0])
bwd, cum_log_sum = _scan(_backward, (observed_log_probs, mask), (final_state_log_probs, zero_log_sum), reverse=True, inclusive=True)
fwd_bwd_log_probs = fwd[1:] + bwd[1:]
fwd_bwd_log_probs_sum = math_ops.reduce_logsumexp(fwd_bwd_log_probs, axis=2, keepdims=True)
fwd_bwd_log_probs -= fwd_bwd_log_probs_sum
fwd_bwd_log_probs += math_ops.log(array_ops.expand_dims(mask, axis=2))
log_likelihood = bwd[0, :, 0] + cum_log_sum[0]
return (fwd_bwd_log_probs, log_likelihood) | Forward-backward algorithm computed in log domain.
Args:
state_trans_log_probs: tensor of shape [states, states] or if different
transition matrix per batch [batch_size, states, states]
initial_state_log_probs: tensor of shape [batch_size, states]
final_state_log_probs: tensor of shape [batch_size, states]
observed_log_probs: tensor of shape [frames, batch_size, states]
sequence_length: tensor of shape [batch_size]
Returns:
forward backward log probabilities: tensor of shape [frames, batch, states]
log_likelihood: tensor of shape [batch_size]
Raises:
ValueError: If state_trans_log_probs has unknown or incorrect rank. | github-repos |
def get_blocks(self, block_structure=None):
if (block_structure is None):
block_structure = self.block_structure
try:
return self._get_blocks(block_structure)
except IncompatibleBlockStructures as e:
raise e | For a reducible circuit, get a sequence of subblocks that when
concatenated again yield the original circuit. The block structure
given has to be compatible with the circuits actual block structure,
i.e. it can only be more coarse-grained.
Args:
block_structure (tuple): The block structure according to which the
subblocks are generated (default = ``None``, corresponds to the
circuit's own block structure)
Returns:
A tuple of subblocks that the circuit consists of.
Raises:
.IncompatibleBlockStructures | codesearchnet |
def quantile_for_list_of_values(self, **kwargs):
if self._is_transposed:
kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)
return self.transpose().quantile_for_list_of_values(**kwargs)
axis = kwargs.get('axis', 0)
q = kwargs.get('q')
numeric_only = kwargs.get('numeric_only', True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [col for (col, dtype) in zip(self.columns, self.dtypes) if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))]
if axis:
nonnumeric = [col for (col, dtype) in zip(self.columns, self.dtypes) if (not is_numeric_dtype(dtype))]
query_compiler = self.drop(columns=nonnumeric)
new_columns = query_compiler.index
else:
query_compiler = self
def quantile_builder(df, **kwargs):
result = df.quantile(**kwargs)
return (result.T if (axis == 1) else result)
func = query_compiler._prepare_method(quantile_builder, **kwargs)
q_index = pandas.Float64Index(q)
new_data = query_compiler._map_across_full_axis(axis, func)
if (axis == 1):
q_index = new_columns
new_columns = pandas.Float64Index(q)
result = self.__constructor__(new_data, q_index, new_columns)
return (result.transpose() if (axis == 1) else result) | Returns Manager containing quantiles along an axis for numeric columns.
Returns:
DataManager containing quantiles of original DataManager along an axis. | codesearchnet |
def find_dependencies(self, dataset_keys, **dfilter):
unknown_datasets = set()
for key in dataset_keys.copy():
(n, unknowns) = self._find_dependencies(key, **dfilter)
dataset_keys.discard(key)
if (n is not None):
dataset_keys.add(n.name)
if unknowns:
unknown_datasets.update(unknowns)
continue
self.add_child(self, n)
return unknown_datasets | Create the dependency tree.
Args:
dataset_keys (iterable): Strings or DatasetIDs to find dependencies for
**dfilter (dict): Additional filter parameters. See
`satpy.readers.get_key` for more details.
Returns:
(Node, set): Root node of the dependency tree and a set of unknown datasets | codesearchnet |
def get(self) -> Union[(Event, None)]:
message = self._queue.get_message()
if (message and (message['type'] == 'message')):
event_id = DB.get_event(self._pub_key, self._processed_key)
event_data_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_data_str)
event_dict['id'] = event_id
event_dict['subscriber'] = self._subscriber
return Event.from_config(event_dict)
return None | Get the latest event from the queue.
Call this method to query the queue for the latest event.
If no event has been published None is returned.
Returns:
Event or None | codesearchnet |
def GetTSKFileByPathSpec(self, path_spec):
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
else:
raise errors.PathSpecError(
'Path specification missing inode and location.')
return tsk_file | Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location. | juraj-google-style |
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
if (not outdir):
outdir = self.structure_dir
if (not outdir):
raise ValueError('Output directory must be specified')
if (not pdb_file_type):
pdb_file_type = self.pdb_file_type
if (self.num_structures_experimental == 0):
log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))
return
downloaded_pdb_ids = []
for s in self.get_experimental_structures():
log.debug('{}: downloading structure file from the PDB...'.format(s.id))
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id)
return downloaded_pdb_ids | Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info | codesearchnet |
def confirmdir(self, target_directory):
try:
directory = self.resolve(target_directory)
except IOError as exc:
self.raise_os_error(exc.errno, target_directory)
if not directory.st_mode & S_IFDIR:
if self.is_windows_fs and IS_PY2:
error_nr = errno.EINVAL
else:
error_nr = errno.ENOTDIR
self.raise_os_error(error_nr, target_directory, 267)
return directory | Test that the target is actually a directory, raising OSError
if not.
Args:
target_directory: Path to the target directory within the fake
filesystem.
Returns:
The FakeDirectory object corresponding to target_directory.
Raises:
OSError: if the target is not a directory. | juraj-google-style |
def args_to_dict(args):
arguments = dict()
for arg in args.split(','):
(key, value) = arg.split('=')
arguments[key] = value
return arguments | Convert command line arguments in a comma separated string to a dictionary
Args:
args (str): Command line arguments
Returns:
DictUpperBound[str,str]: Dictionary of arguments | codesearchnet |
def _compile_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype2compiler = {
'constant': self._compile_constant_expression,
'pvar': self._compile_pvariable_expression,
'randomvar': self._compile_random_variable_expression,
'arithmetic': self._compile_arithmetic_expression,
'boolean': self._compile_boolean_expression,
'relational': self._compile_relational_expression,
'func': self._compile_function_expression,
'control': self._compile_control_flow_expression,
'aggregation': self._compile_aggregation_expression
}
etype = expr.etype
if etype[0] not in etype2compiler:
raise ValueError('Expression type unknown: {}'.format(etype))
with self.graph.as_default():
compiler_fn = etype2compiler[etype[0]]
return compiler_fn(expr, scope, batch_size, noise) | Compile the expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent. | juraj-google-style |
def returns_collection(self) -> bool:
return self.cardinality == Cardinality.COLLECTION or self.cardinality == Cardinality.CHILD_OF_COLLECTION | Indicates if the data type will evaluate to a collection.
Returns:
True in the following circumstances
- The data type represents an element with cardinality greater than one.
- The data type represents an element with a cardinality less than or
equal to one, but that element is a child of a collection and will
evaluate to a collection. For example, the path Patient.name.use will
return a collection, despite 'use' being a scalar, because it is a
child of the collection, 'name.'
False if the data type represents a scalar element whose parents are all
also scalars. | github-repos |
def generate_entry_label(entry):
if isinstance(entry, MultiEntry):
return ' + '.join([latexify_ion(e.name) for e in entry.entry_list])
else:
return latexify_ion(latexify(entry.name)) | Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for | codesearchnet |
def level_cond_prior(prior_dist, z, latent, hparams, state):
latent_dist_encoder = hparams.get('latent_dist_encoder', None)
latent_skip = hparams.get('latent_skip', False)
if (latent_dist_encoder == 'pointwise'):
last_latent = latent
merge_std = hparams.level_scale
latent_shape = common_layers.shape_list(latent)
z_shape = common_layers.shape_list(z)
if (latent_shape != z_shape):
raise ValueError(('Expected latent_shape to be %s, got %s' % (latent_shape, z_shape)))
latent_dist = scale_gaussian_prior('latent_prior', latent, logscale_factor=3.0)
cond_dist = merge_level_and_latent_dist(prior_dist, latent_dist, merge_std=merge_std)
elif (latent_dist_encoder == 'conv_net'):
output_channels = common_layers.shape_list(z)[(- 1)]
last_latent = latent[(- 1)]
latent_stack = tf.concat(([prior_dist.loc] + latent), axis=(- 1))
latent_stack = noise_op(latent_stack, hparams)
cond_dist = latent_to_dist('latent_stack', latent_stack, hparams=hparams, output_channels=output_channels)
elif (latent_dist_encoder == 'conv3d_net'):
last_latent = latent[(- 1)]
output_channels = common_layers.shape_list(last_latent)[(- 1)]
num_steps = len(latent)
cond_latents = tf.stack(latent, axis=1)
prev_latents = tf.tile(tf.expand_dims(prior_dist.loc, axis=1), [1, num_steps, 1, 1, 1])
cond_latents = tf.concat((cond_latents, prev_latents), axis=(- 1))
cond_latents = noise_op(cond_latents, hparams)
cond_dist = temporal_latent_to_dist('latent_stack', cond_latents, hparams, output_channels=output_channels)
elif (latent_dist_encoder == 'conv_lstm'):
last_latent = latent
output_channels = common_layers.shape_list(z)[(- 1)]
latent_stack = tf.concat((prior_dist.loc, latent), axis=(- 1))
latent_stack = noise_op(latent_stack, hparams)
(_, state) = common_video.conv_lstm_2d(latent_stack, state, hparams.latent_encoder_width, kernel_size=3, name='conv_lstm')
cond_dist = single_conv_dist('state_to_dist', state.h, output_channels=output_channels)
if latent_skip:
new_mean = (cond_dist.loc + last_latent)
cond_dist = tfp.distributions.Normal(new_mean, cond_dist.scale)
return (cond_dist.loc, cond_dist.scale, state) | Returns a conditional prior for each level.
Args:
prior_dist: Distribution conditioned on the previous levels.
z: Tensor, output of the previous levels.
latent: Tensor or a list of tensors to condition the latent_distribution.
hparams: next_frame_glow hparams.
state: Current LSTM state. Used only if hparams.latent_dist_encoder is
a lstm.
Raises:
ValueError: If hparams.latent_dist_encoder is "pointwise" and if the shape
of latent is different from z. | codesearchnet |
def credits(self, **kwargs):
path = self._get_series_id_season_number_episode_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API. | codesearchnet |
def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name):
if (not frames):
logger.info('Could save summaries - no summaries to save!')
logger.info('You have no frames - aborting')
return None
if (not keys):
logger.info('Could save summaries - no summaries to save!')
logger.info('You have no keys - aborting')
return None
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
summary_df = pd.concat(frames, keys=keys, axis=1)
for (key, value) in selected_summaries_dict.items():
_summary_file_name = os.path.join(batch_dir, ('summary_%s_%s.csv' % (key, batch_name)))
_summary_df = summary_df.iloc[(:, (summary_df.columns.get_level_values(1) == value))]
_header = _summary_df.columns
_summary_df.to_csv(_summary_file_name, sep=';')
logger.info(('saved summary (%s) to:\n %s' % (key, _summary_file_name)))
logger.info('finished saving summaries')
return summary_df | Writes the summaries to csv-files
Args:
frames: list of ``cellpy`` summary DataFrames
keys: list of indexes (typically run-names) for the different runs
selected_summaries: list defining which summary data to save
batch_dir: directory to save to
batch_name: the batch name (will be used for making the file-name(s))
Returns: a pandas DataFrame with your selected summaries. | codesearchnet |
def DeleteRecords(cls, ids, token):
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.QueueDeleteRecords(ids) | Delete records identified by ids.
Args:
ids: A list of ids provided by ClaimRecords.
token: The database access token to delete with.
Raises:
LockError: If the queue is not locked. | codesearchnet |
def __str__(self):
text = super(Baken, self).__format__('dms')
if self._locator:
text = '%s (%s)' % (self._locator, text)
return text | Pretty printed location string.
Args:
mode (str): Coordinate formatting system to use
Returns:
str: Human readable string representation of ``Baken`` object | juraj-google-style |
def __init__(self, user_pipeline: beam.Pipeline, pcolls: Optional[Set[beam.pvalue.PCollection]]=None):
assert not pcolls or all((pcoll.pipeline is user_pipeline for pcoll in pcolls)), 'All %s need to belong to %s' % (pcolls, user_pipeline)
self._user_pipeline = user_pipeline
self._pcolls = pcolls
self._cache_manager = ie.current_env().get_cache_manager(self._user_pipeline, create_if_absent=True)
if background_caching_job.has_source_to_cache(self._user_pipeline):
self._cache_manager = ie.current_env().get_cache_manager(self._user_pipeline)
_, self._context = self._user_pipeline.to_runner_api(return_context=True)
self._context.component_id_map = copy.copy(self._user_pipeline.component_id_map)
self._cacheables = self.cacheables() | Initializes a pipelilne for augmenting interactive flavor.
Args:
user_pipeline: a beam.Pipeline instance defined by the user.
pcolls: cacheable pcolls to be computed/retrieved. If the set is
empty, all intermediate pcolls assigned to variables are applicable. | github-repos |
def start(self, extra_args='', tag=''):
if self.started:
return
utils.create_dir(self.log_path)
if tag:
tag = tag + ','
out_file_name = 'IPerfServer,{},{}{}.log'.format(self.port, tag, len(self.log_files))
full_out_path = os.path.join(self.log_path, out_file_name)
cmd = '%s %s > %s' % (self.iperf_str, extra_args, full_out_path)
self.iperf_process = utils.start_standing_subprocess(cmd, shell=True)
self.log_files.append(full_out_path)
self.started = True | Starts iperf server on specified port.
Args:
extra_args: A string representing extra arguments to start iperf
server with.
tag: Appended to log file name to identify logs from different
iperf runs. | github-repos |
def _bits_in_condition(self, cond):
all_bits = []
if (cond is not None):
all_bits.extend([(cond[0], j) for j in range(self.cregs[cond[0].name].size)])
return all_bits | Return a list of bits in the given condition.
Args:
cond (tuple or None): optional condition (ClassicalRegister, int)
Returns:
list[(ClassicalRegister, idx)]: list of bits | codesearchnet |
def heightmap_count_cells(hm: np.ndarray, mi: float, ma: float) -> int:
return int(lib.TCOD_heightmap_count_cells(_heightmap_cdata(hm), mi, ma)) | Return the number of map cells which value is between ``mi`` and ``ma``.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound.
ma (float): The upper bound.
Returns:
int: The count of values which fall between ``mi`` and ``ma``.
.. deprecated:: 8.1
Can be replaced by an equivalent NumPy function such as:
``numpy.count_nonzero((mi <= hm) & (hm < ma))`` | juraj-google-style |
def value_loss_given_predictions(value_prediction, rewards, reward_mask, gamma=0.99):
(B, T) = rewards.shape
assert ((B, T) == reward_mask.shape)
assert ((B, (T + 1), 1) == value_prediction.shape)
value_prediction = np.squeeze(value_prediction, axis=2)
value_prediction = (value_prediction[(:, :(- 1))] * reward_mask)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma)
loss = ((value_prediction - r2g) ** 2)
return (np.sum(loss) / np.sum(reward_mask)) | Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
Returns:
The average L2 value loss, averaged over instances where reward_mask is 1. | codesearchnet |
def _checkResponseWriteData(payload, writedata):
_checkString(payload, minlength=4, description='payload')
_checkString(writedata, minlength=2, maxlength=2, description='writedata')
BYTERANGE_FOR_WRITEDATA = slice(2, 4)
receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]
if receivedWritedata != writedata:
raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format( \
receivedWritedata, writedata, payload)) | Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError | juraj-google-style |
def __gt__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other | Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class. | juraj-google-style |
def menu(title, options, cancel_label="Cancel", flag_allow_empty=False, flag_cancel=True, ch='.'):
num_options, flag_ok = len(options), 0
option = None
min_allowed = 0 if flag_cancel else 1
while True:
print("")
for line in format_box(title, ch):
print(" "+line)
for i, s in enumerate(options):
print((" {0:d} - {1!s}".format(i+1, s)))
if flag_cancel: print((" 0 - << (*{0!s}*)".format(cancel_label)))
try:
s_option = input('? ')
except KeyboardInterrupt:
raise
except:
print("")
n_try = 0
while True:
if n_try >= 10:
print('You are messing up!')
break
if len(s_option) == 0 and flag_allow_empty:
flag_ok = True
break
try:
option = int(s_option)
if min_allowed <= option <= num_options:
flag_ok = True
break
except ValueError:
print("Invalid integer value!")
print(("Invalid option, range is [{0:d}, {1:d}]!".format(0 if flag_cancel else 1, num_options)))
n_try += 1
s_option = input("? ")
if flag_ok:
break
return option | Text menu.
Arguments:
title -- menu title, to appear at the top
options -- sequence of strings
cancel_label='Cancel' -- label to show at last "zero" option
flag_allow_empty=0 -- Whether to allow empty option
flag_cancel=True -- whether there is a "0 - Cancel" option
ch="." -- character to use to draw frame around title
Returns:
option -- an integer: None; 0-Back/Cancel/etc; 1, 2, ...
Adapted from irootlab menu.m | juraj-google-style |
def category(msg):
if ((common.typecode(msg) < 1) or (common.typecode(msg) > 4)):
raise RuntimeError(('%s: Not a identification message' % msg))
msgbin = common.hex2bin(msg)
return common.bin2int(msgbin[5:8]) | Aircraft category number
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: category number | codesearchnet |
def _iterdump(self, file_name, headers=None):
if headers is None:
headers = ["Discharge_Capacity", "Charge_Capacity"]
step_txt = self.headers_normal['step_index_txt']
point_txt = self.headers_normal['data_point_txt']
cycle_txt = self.headers_normal['cycle_index_txt']
self.logger.debug("iterating through file: %s" % file_name)
if not os.path.isfile(file_name):
print("Missing file_\n %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.info(txt)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
constr = self.__get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("constr str: %s" % constr)
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
global_data_df = pd.read_sql_query(sql, conn)
self.logger.debug("sql statement: %s" % sql)
tests = global_data_df[self.headers_normal['test_id_txt']]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
self.logger.debug("only selecting first test")
test_no = 0
self.logger.debug("setting data for test number %i" % test_no)
loaded_from = file_name
start_datetime = global_data_df[self.headers_global['start_datetime_txt']][test_no]
test_ID = int(global_data_df[self.headers_normal['test_id_txt']][test_no])
test_name = global_data_df[self.headers_global['test_name_txt']][test_no]
self.logger.debug("reading raw-data")
columns = ["Data_Point", "Step_Index", "Cycle_Index"]
columns.extend(headers)
columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns)
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_normal
sql_3 = "where %s=%s " % (self.headers_normal['test_id_txt'], test_ID)
sql_5 = "order by %s" % self.headers_normal['data_point_txt']
import time
info_list = []
info_header = ["cycle", "row_count", "start_point", "end_point"]
info_header.extend(headers)
self.logger.info(" ".join(info_header))
self.logger.info("-------------------------------------------------")
for cycle_number in range(1, 2000):
t1 = time.time()
self.logger.debug("picking cycle %i" % cycle_number)
sql_4 = "AND %s=%i " % (cycle_txt, cycle_number)
sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5
self.logger.debug("sql statement: %s" % sql)
normal_df = pd.read_sql_query(sql, conn)
t2 = time.time()
dt = t2 - t1
self.logger.debug("time: %f" % dt)
if normal_df.empty:
self.logger.debug("reached the end")
break
row_count, _ = normal_df.shape
start_point = normal_df[point_txt].min()
end_point = normal_df[point_txt].max()
last = normal_df.iloc[-1, :]
step_list = [cycle_number, row_count, start_point, end_point]
step_list.extend([last[x] for x in headers])
info_list.append(step_list)
self._clean_up_loadres(None, conn, temp_filename)
info_dict = pd.DataFrame(info_list, columns=info_header)
return info_dict | Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame | juraj-google-style |
def installed_capabilities(image=None):
if (salt.utils.versions.version_cmp(__grains__['osversion'], '10') == (- 1)):
raise NotImplementedError('`installed_capabilities` is not available on this version of Windows: {0}'.format(__grains__['osversion']))
return _get_components('Capability Identity', 'Capabilities', 'Installed') | List the capabilities installed on the system
Args:
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
Raises:
NotImplementedError: For all versions of Windows that are not Windows 10
and later. Server editions of Windows use ServerManager instead.
Returns:
list: A list of installed capabilities
CLI Example:
.. code-block:: bash
salt '*' dism.installed_capabilities | codesearchnet |
def get_existing_pipelines(self):
url = '{0}/applications/{1}/pipelineConfigs'.format(API_URL, self.app_name)
resp = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert resp.ok, 'Failed to lookup pipelines for {0}: {1}'.format(self.app_name, resp.text)
return resp.json() | Get existing pipeline configs for specific application.
Returns:
str: Pipeline config json | codesearchnet |
def get_lowest_values(self, count):
count = int(count)
assert (count <= len(self._values)), 'count must be <= to Data Collection len. {} > {}.'.format(count, len(self._values))
assert (count > 0), 'count must be greater than 0. Got {}.'.format(count)
lowest_values = sorted(self._values)[0:count]
lowest_values_index = sorted(list(xrange(len(self._values))), key=(lambda k: self._values[k]))[0:count]
return (lowest_values, lowest_values_index) | Get a list of the the x lowest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the smallest values of a data collection occur.
Args:
count: Integer representing the number of lowest values to account for.
Returns:
highest_values: The n lowest values in data list, ordered from
lowest to lowest.
lowest_values_index: Indicies of the n lowest values in data
list, ordered from lowest to lowest. | codesearchnet |
def GetRawDevice(path):
path = CanonicalPathToLocalPath(path)
try:
path = win32file.GetLongPathName(path)
except pywintypes.error:
pass
try:
mount_point = win32file.GetVolumePathName(path)
except pywintypes.error as details:
logging.info("path not found. %s", details)
raise IOError("No mountpoint for path: %s" % path)
if not path.startswith(mount_point):
stripped_mp = mount_point.rstrip("\\")
if not path.startswith(stripped_mp):
raise IOError("path %s is not mounted under %s" % (path, mount_point))
corrected_path = LocalPathToCanonicalPath(path[len(mount_point):])
corrected_path = utils.NormalizePath(corrected_path)
volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip("\\")
volume = LocalPathToCanonicalPath(volume)
result = rdf_paths.PathSpec(
path=volume,
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point=mount_point.rstrip("\\"))
return result, corrected_path | Resolves the raw device that contains the path.
Args:
path: A path to examine.
Returns:
A pathspec to read the raw device as well as the modified path to read
within the raw device. This is usually the path without the mount point.
Raises:
IOError: if the path does not exist or some unexpected behaviour occurs. | juraj-google-style |
def convert_frame_change(self, shift, instruction):
command_dict = {
'name': 'fc',
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name,
'phase': instruction.command.phase
}
return self._qobj_model(**command_dict) | Return converted `FrameChangeInstruction`.
Args:
shift(int): Offset time.
instruction (FrameChangeInstruction): frame change instruction.
Returns:
dict: Dictionary of required parameters. | juraj-google-style |
def potential_purviews(self, direction, mechanism, purviews=False):
if purviews is False:
purviews = self.network.potential_purviews(direction, mechanism)
purviews = [purview for purview in purviews
if set(purview).issubset(self.node_indices)]
return irreducible_purviews(self.cm, direction, mechanism, purviews) | Return all purviews that could belong to the |MIC|/|MIE|.
Filters out trivially-reducible purviews.
Args:
direction (Direction): |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The mechanism of interest.
Keyword Args:
purviews (tuple[int]): Optional subset of purviews of interest. | juraj-google-style |
def _make_pr_entry(self, step, wall_time, data_array, thresholds):
true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]]
false_positives = [int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]]
tp_index = metadata.TRUE_POSITIVES_INDEX
fp_index = metadata.FALSE_POSITIVES_INDEX
positives = data_array[([tp_index, fp_index], :)].astype(int).sum(axis=0)
end_index_inclusive = (len(positives) - 1)
while ((end_index_inclusive > 0) and (positives[end_index_inclusive] == 0)):
end_index_inclusive -= 1
end_index = (end_index_inclusive + 1)
return {'wall_time': wall_time, 'step': step, 'precision': data_array[(metadata.PRECISION_INDEX, :end_index)].tolist(), 'recall': data_array[(metadata.RECALL_INDEX, :end_index)].tolist(), 'true_positives': true_positives[:end_index], 'false_positives': false_positives[:end_index], 'true_negatives': [int(v) for v in data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]], 'false_negatives': [int(v) for v in data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]], 'thresholds': thresholds[:end_index]} | Creates an entry for PR curve data. Each entry corresponds to 1 step.
Args:
step: The step.
wall_time: The wall time.
data_array: A numpy array of PR curve data stored in the summary format.
thresholds: An array of floating point thresholds.
Returns:
A PR curve entry. | codesearchnet |
def get_metadata(self):
if (self._metadata is None):
self._metadata = self._source.get_metadata(self._handle)
return self._metadata | Returns the associated metadata info for this template version
Returns:
dict: Metadata for this version | codesearchnet |
def load_values(self, dictionary, as_defaults=False, flat=False):
if flat:
separator = self.settings.str_path_separator
flat_dictionary = dictionary
dictionary = collections.OrderedDict()
for k, v in flat_dictionary.items():
k_parts = k.split(separator)
c = dictionary
for i, kp in enumerate(k_parts):
if i >= len(k_parts) - 1:
c[kp] = v
else:
if kp not in c:
c[kp] = collections.OrderedDict()
c = c[kp]
for name, value in dictionary.items():
if name not in self:
if as_defaults:
if isinstance(value, dict):
self[name] = self.create_section()
self[name].load_values(value, as_defaults=as_defaults)
else:
self[name] = self.create_item(name, default=value)
else:
pass
continue
resolution = self._get_item_or_section(name, handle_not_found=False)
if is_config_item(resolution):
if as_defaults:
resolution.default = value
else:
resolution.value = value
else:
resolution.load_values(value, as_defaults=as_defaults) | Import config values from a dictionary.
When ``as_defaults`` is set to ``True``, the values
imported will be set as defaults. This can be used to
declare the sections and items of configuration.
Values of sections and items in ``dictionary`` can be
dictionaries as well as instances of :class:`.Item` and
:class:`.Config`.
Args:
dictionary:
as_defaults: if ``True``, the imported values will be set as defaults. | juraj-google-style |
def filter_list(lst, takeout, case_sensitive=True):
takeout = force_list(takeout)
if (not case_sensitive):
lst = [x.lower() for x in lst]
takeout = [y.lower() for y in takeout]
return [x for x in lst if (x not in takeout)] | Return a modified list removing items specified.
Args:
lst: Original list of values
takeout: Object or objects to remove from lst
case_sensitive: if the search should be case sensitive
Returns:
list: Filtered list of values | codesearchnet |
def add(self, key, value, expire=0, noreply=None):
if (noreply is None):
noreply = self.default_noreply
return self._store_cmd(b'add', {key: value}, expire, noreply)[key] | The memcached "add" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, the return value is always True. Otherwise the
return value is True if the value was stored, and False if it was
not (because the key already existed). | codesearchnet |
def max_pool(x, pool_size, strides, padding):
x = tf_np.asarray(x)
return tf_np.asarray(nn_ops.pool(input=x, window_shape=pool_size, pooling_type='MAX', strides=strides, padding=padding)) | Performs an N-D max pooling.
Args:
x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]`. Pooling happens over the spatial dimensions only.
pool_size: sequence of N ints.
strides: sequence of N ints.
padding: a string, the padding algorithm. Must be "SAME" or "VALID".
Returns:
An (N+2)-D array, of shape
[batch_size] + output_spatial_shape + [num_channels],
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]). | github-repos |
def pprint(self, initials_only=False):
last_name = self.last
suffixes = ((', ' + self.suffix) if self.suffix else '')
if (initials_only and (last_name != u'')):
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip() | Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.' | codesearchnet |
def valueWritePreprocessor(valueString, replaceParamsFile=None):
if (type(valueString) is bool):
log.warning('Only numerical variable types can be handled by the valueReadPreprocessor function.')
return valueString
variableString = valueString
if (replaceParamsFile is not None):
if (variableString == REPLACE_NO_VALUE):
variableString = '[NO_VARIABLE]'
else:
try:
number = int(valueString)
if (number < 0):
parameterID = (number * (- 1))
for targetParam in replaceParamsFile.targetParameters:
if (targetParam.id == parameterID):
variableString = targetParam.targetVariable
break
except:
pass
return variableString | Look up variable name in replace param file for the negative id given and return it.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string | codesearchnet |
def all_max(tensors):
return _apply_all_reduce('max', tensors) | Returns a list of tensors with the all-reduce max across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the maximum of the input tensors, where tensor i
has the same device as `tensors[i]`. | github-repos |
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1] | Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. | github-repos |
def add_file(profile, branch, file_path, file_contents, is_executable=False, commit_message=None):
branch_sha = get_branch_sha(profile, branch)
tree = get_files_in_branch(profile, branch_sha)
new_tree = add_file_to_tree(tree, file_path, file_contents, is_executable)
data = trees.create_tree(profile, new_tree)
sha = data.get('sha')
if (not commit_message):
commit_message = (('Added ' + file_path) + '.')
parents = [branch_sha]
commit_data = commits.create_commit(profile, commit_message, sha, parents)
commit_sha = commit_data.get('sha')
ref_data = refs.update_ref(profile, ('heads/' + branch), commit_sha)
return ref_data | Add a file to a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
branch
The name of a branch.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encoded) contents of the new file.
is_executable
If ``True``, the new file will get executable permissions (0755).
Otherwise, it will get 0644 permissions.
commit_message
A commit message to give to the commit.
Returns:
A dict with data about the branch's new ref (it includes the new SHA
the branch's HEAD points to, after committing the new file). | codesearchnet |
def _image_url(array, fmt='png', mode='data', quality=90, domain=None):
supported_modes = 'data'
if (mode not in supported_modes):
message = "Unsupported mode '%s', should be one of '%s'."
raise ValueError(message, mode, supported_modes)
image_data = serialize_array(array, fmt=fmt, quality=quality)
base64_byte_string = base64.b64encode(image_data).decode('ascii')
return ((('data:image/' + fmt.upper()) + ';base64,') + base64_byte_string) | Create a data URL representing an image from a PIL.Image.
Args:
image: a numpy
mode: presently only supports "data" for data URL
Returns:
URL representing image | codesearchnet |
def _embedding_lookup_for_sparse_tensor(self, inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:
def sparse_to_dense_computation(inp, weight):
if weight is None:
weight = sparse_tensor.SparseTensor(inp.indices, array_ops.ones_like(inp.values, dtype=dtypes.float32), dense_shape=inp.dense_shape)
inp = sparse_ops.sparse_tensor_to_dense(inp)
weight = sparse_ops.sparse_tensor_to_dense(weight)
return (inp, weight)
inp, weight = tpu_replication.outside_compilation(sparse_to_dense_computation, inp=inp, weight=weight)
embeddings = embedding_ops.embedding_lookup_v2(table, inp)
weight = array_ops.expand_dims(weight, -1)
embeddings *= weight
if not feature.output_shape and feature.max_sequence_length > 0:
embeddings = self._pad_or_truncate_with_sequence_length(embeddings, feature.max_sequence_length)
else:
embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)
return embeddings | Embedding lookup for sparse tensor based on its feature config.
Args:
inp: a single SparseTensor input.
weight: None or SparseTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result. | github-repos |
def reduce_sum(self, x):
return self.reduce(lambda y: math_ops.reduce_sum(y, axis=0), x) | Performs a sum reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the sum of the values
of `x` across the pfor iterations. | github-repos |
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
if not isinstance(grid_pinpoints, list):
raise TypeError('grid_pinpoints should be a list of tuples or lists')
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise TypeError(f'image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor')
image_size = image_size.tolist()
height, width = select_best_resolution(image_size, grid_pinpoints)
return (height | Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
Args:
image_size (`tuple`):
The size of the input image in the format (width, height).
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
tuple: The shape of the image patch grid in the format (width, height). | github-repos |
def get_sns_topic_arn(topic_name, account, region):
if ((topic_name.count(':') == 5) and topic_name.startswith('arn:aws:sns:')):
return topic_name
session = boto3.Session(profile_name=account, region_name=region)
sns_client = session.client('sns')
topics = sns_client.list_topics()['Topics']
matched_topic = None
for topic in topics:
topic_arn = topic['TopicArn']
if (topic_name == topic_arn.split(':')[(- 1)]):
matched_topic = topic_arn
break
else:
LOG.critical('No topic with name %s found.', topic_name)
raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name))
return matched_topic | Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name | codesearchnet |
def convert_to_torch_compatible(cls, x):
return x | Convert a tensor to something that the Torch backend can consume.
This can be a Torch tensor, NumPy array or any other type of tensor that
`keras.backend.torch.core.convert_to_tensor()` can consume.
Only called after slicing using `__getitem__`.
Used to densify sparse tensors and ragged tensors.
Args:
x: the tensor to convert.
Returns: the converted tensor. | github-repos |
def get_block_header(self, block_hash, id=None, endpoint=None):
return self._call_endpoint(GET_BLOCK_HEADER, params=[block_hash, 1], id=id, endpoint=endpoint) | Get the corresponding block header information according to the specified script hash.
Args:
block_hash: (str) the block scripthash (e.g. 'a5508c9b6ed0fc09a531a62bc0b3efcb6b8a9250abaf72ab8e9591294c1f6957')
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | juraj-google-style |
def rationalize(flt: float, denominators: Set[int]=None) -> Fraction:
if (denominators is None):
denominators = _DENOMINATORS
frac = Fraction.from_float(flt).limit_denominator()
if (frac.denominator not in denominators):
raise ValueError('Cannot rationalize')
return frac | Convert a floating point number to a Fraction with a small
denominator.
Args:
flt: A floating point number
denominators: Collection of standard denominators. Default is
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512,
1024, 2048, 4096, 8192
Raises:
ValueError: If cannot rationalize float | codesearchnet |
def deploy_ray_axis_func(axis, func, num_splits, kwargs, *partitions):
table = concat_arrow_table_partitions(axis, partitions)
try:
result = func(table, **kwargs)
except Exception:
result = pyarrow.Table.from_pandas(func(table.to_pandas(), **kwargs))
return split_arrow_table_result(
axis, result, len(partitions), num_splits, table.schema.metadata
) | Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames. | juraj-google-style |
def save_headers(cls, filename: str, response: HTTPResponse):
new_filename = (filename + '-new')
with open('wb') as new_file:
new_file.write(response.header())
with wpull.util.reset_file_offset(response.body):
response.body.seek(0)
shutil.copyfileobj(response.body, new_file)
os.remove(filename)
os.rename(new_filename, filename) | Prepend the HTTP response header to the file.
Args:
filename: The path of the file
response: Response | codesearchnet |
def new_random_wallet(cls, user_entropy=None, network=BitcoinMainNet):
seed = str(urandom(64))
seed += str(int((time.time() * (10 ** 6))))
if user_entropy:
user_entropy = str(user_entropy)
seed += user_entropy
return cls.from_master_secret(seed, network=network) | Generate a new wallet using a randomly generated 512 bit seed.
Args:
user_entropy: Optional user-supplied entropy which is combined
combined with the random seed, to help counteract compromised
PRNGs.
You are encouraged to add an optional `user_entropy` string to protect
against a compromised CSPRNG. This will be combined with the output
from the CSPRNG. Note that if you do supply this value it only adds
additional entropy and will not be sufficient to recover the random
wallet. If you're even saving `user_entropy` at all, you're doing it
wrong. | codesearchnet |
def wait_for_batches(self, batch_ids, timeout=None):
self._batch_tracker.watch_statuses(self, batch_ids)
timeout = (timeout or DEFAULT_TIMEOUT)
start_time = time()
with self._wait_condition:
while True:
if (self._statuses is not None):
return _format_batch_statuses(self._statuses, batch_ids, self._batch_tracker)
if ((time() - start_time) > timeout):
statuses = self._batch_tracker.get_statuses(batch_ids)
return _format_batch_statuses(statuses, batch_ids, self._batch_tracker)
self._wait_condition.wait((timeout - (time() - start_time))) | Locks until a list of batch ids is committed to the block chain
or a timeout is exceeded. Returns the statuses of those batches.
Args:
batch_ids (list of str): The ids of the batches to wait for
timeout(int): Maximum time in seconds to wait for
Returns:
list of BatchStatus: BatchStatuses to send back to client | codesearchnet |
def get_unit(self, name):
return Unit(client=self, data=self._single_request('Units.Get', unitName=name)) | Retreive a specifi unit from the fleet cluster by name
Args:
name (str): If specified, only this unit name is returned
Returns:
Unit: The unit identified by ``name`` in the fleet cluster
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400 | juraj-google-style |
def resolve_prefix_path(cls, start_path=None):
if not start_path or start_path == 'auto':
start_path = os.path.curdir
cur_path = start_path
LOGGER.debug('Checking if %s is a prefix', os.path.abspath(cur_path))
if cls.is_prefix(cur_path):
return os.path.abspath(cur_path)
cur_path = join(start_path, '.lago')
while not cls.is_prefix(cur_path):
LOGGER.debug('%s is not a prefix', cur_path)
cur_path = os.path.normpath(
os.path.join(cur_path, '..', '..', '.lago')
)
LOGGER.debug('Checking %s for a prefix', cur_path)
if os.path.realpath(join(cur_path, '..')) == '/':
raise RuntimeError(
'Unable to find prefix for %s' %
os.path.abspath(start_path)
)
return os.path.abspath(cur_path) | Look for an existing prefix in the given path, in a path/.lago dir, or
in a .lago dir under any of it's parent directories
Args:
start_path (str): path to start the search from, if None passed, it
will use the current dir
Returns:
str: path to the found prefix
Raises:
RuntimeError: if no prefix was found | juraj-google-style |
def initial_sql(self, value):
self._initial_sql = value
if (value is None):
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value) | Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing. | codesearchnet |
def ExistsWithType(self, urn, aff4_type=None, follow_symlinks=True, age=NEWEST_TIME, token=None):
if (not aff4_type):
raise ValueError("aff4_type can't be None")
try:
self.Open(urn, aff4_type=aff4_type, follow_symlinks=follow_symlinks, age=age, token=token)
return True
except InstantiationError:
return False | Checks if an object with a given URN and type exists in the datastore.
Args:
urn: The urn to check.
aff4_type: Expected object type.
follow_symlinks: If object opened is a symlink, follow it.
age: The age policy used to check this object. Should be either
NEWEST_TIME or a time range given as a tuple (start, end) in
microseconds since Jan 1st, 1970.
token: The Security Token to use for opening this item.
Raises:
ValueError: if aff4_type is not specified.
Returns:
True if there's an object with a matching type at a given URN, False
otherwise. | codesearchnet |
def GenerateCostReport(metagraph, per_node_report=False, verbose=False, cluster=None):
if cluster is None:
cluster = gcluster.Cluster(disable_detailed_stats=False)
return tf_wrap.GenerateCostReport(metagraph.SerializeToString(), per_node_report, verbose, cluster.tf_cluster) | Analyze the cost of each TensorFlow op and node in the provided metagraph.
Args:
metagraph: A TensorFlow MetaGraphDef.
per_node_report: by default the report contains stats aggregated on a per op
type basis, setting per_node_report to True adds results for each
individual node to the report.
verbose: Prints out the entire operation proto instead of a summary table.
cluster: Analyze the costs using the specified cluster, or the local machine
if no cluster was specified.
Returns:
A string of cost report. | github-repos |
def attach_template(self, _template, _key, **unbound_var_values):
if (_key in unbound_var_values):
raise ValueError(('%s specified twice.' % _key))
unbound_var_values[_key] = self
return _DeferredLayer(self.bookkeeper, _template.as_layer().construct, [], unbound_var_values, scope=self._scope, defaults=self._defaults, partial_context=self._partial_context) | Attaches the template to this with the _key is supplied with this layer.
Note: names were chosen to avoid conflicts.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
ValueError: If _key is specified twice or there is a problem computing the
template. | codesearchnet |
def fetch(self, plan_id, data={}, **kwargs):
return super(Plan, self).fetch(plan_id, data, **kwargs) | Fetch Plan for given Id
Args:
plan_id : Id for which Plan object has to be retrieved
Returns:
Plan dict for given subscription Id | codesearchnet |
def get_expectations_config(self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False):
config = dict(self._expectations_config)
config = copy.deepcopy(config)
expectations = config['expectations']
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
if (('success_on_last_run' in expectation) and (expectation['success_on_last_run'] == False)):
discards['failed_expectations'] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
for expectation in expectations:
if ('success_on_last_run' in expectation):
del expectation['success_on_last_run']
if discard_result_format_kwargs:
if ('result_format' in expectation['kwargs']):
del expectation['kwargs']['result_format']
discards['result_format'] += 1
if discard_include_configs_kwargs:
if ('include_configs' in expectation['kwargs']):
del expectation['kwargs']['include_configs']
discards['include_configs'] += 1
if discard_catch_exceptions_kwargs:
if ('catch_exceptions' in expectation['kwargs']):
del expectation['kwargs']['catch_exceptions']
discards['catch_exceptions'] += 1
if (not suppress_warnings):
'\nWARNING: get_expectations_config discarded\n 12 failing expectations\n 44 result_format kwargs\n 0 include_config kwargs\n 1 catch_exceptions kwargs\nIf you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.\n '
if any([discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs]):
print('WARNING: get_expectations_config discarded')
if discard_failed_expectations:
print(('\t%d failing expectations' % discards['failed_expectations']))
if discard_result_format_kwargs:
print(('\t%d result_format kwargs' % discards['result_format']))
if discard_include_configs_kwargs:
print(('\t%d include_configs kwargs' % discards['include_configs']))
if discard_catch_exceptions_kwargs:
print(('\t%d catch_exceptions kwargs' % discards['catch_exceptions']))
print('If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.')
config['expectations'] = expectations
return config | Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_configs_kwargs (boolean): \
In returned expectation objects, suppress the `include_configs` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
Returns:
An expectation config.
Note:
get_expectations_config does not affect the underlying config at all. The returned config is a copy of _expectations_config, not the original object. | codesearchnet |
def recent_all_projects(self, limit=30, offset=0):
method = 'GET'
url = '/recent-builds?circle-token={token}&limit={limit}&offset={offset}'.format(token=self.client.api_token, limit=limit, offset=offset)
json_data = self.client.request(method, url)
return json_data | Return information about recent builds across all projects.
Args:
limit (int), Number of builds to return, max=100, defaults=30.
offset (int): Builds returned from this point, default=0.
Returns:
A list of dictionaries. | codesearchnet |
def watch_printer(watch, value):
print('({: 8} s) {}: {}'.format(value.raw_time, watch, value.value)) | Print a watched value.
Args:
watch (DataStream): The stream that was watched
value (IOTileReading): The value to was seen | codesearchnet |
def __init__(self, policies=None, database_path=None):
self._logger = logging.getLogger('kmip.server.engine')
self._cryptography_engine = engine.CryptographyEngine()
self.database_path = 'sqlite:
if not database_path:
self.database_path = 'sqlite:
self._data_store = sqlalchemy.create_engine(
self.database_path,
echo=False,
connect_args={'check_same_thread': False}
)
sqltypes.Base.metadata.create_all(self._data_store)
self._data_store_session_factory = sqlalchemy.orm.sessionmaker(
bind=self._data_store
)
self._lock = threading.RLock()
self._id_placeholder = None
self._protocol_versions = [
contents.ProtocolVersion(1, 4),
contents.ProtocolVersion(1, 3),
contents.ProtocolVersion(1, 2),
contents.ProtocolVersion(1, 1),
contents.ProtocolVersion(1, 0)
]
self.default_protocol_version = self._protocol_versions[2]
self._protocol_version = self._protocol_versions[2]
self._object_map = {
enums.ObjectType.CERTIFICATE: objects.X509Certificate,
enums.ObjectType.SYMMETRIC_KEY: objects.SymmetricKey,
enums.ObjectType.PUBLIC_KEY: objects.PublicKey,
enums.ObjectType.PRIVATE_KEY: objects.PrivateKey,
enums.ObjectType.SPLIT_KEY: None,
enums.ObjectType.TEMPLATE: None,
enums.ObjectType.SECRET_DATA: objects.SecretData,
enums.ObjectType.OPAQUE_DATA: objects.OpaqueObject
}
self._attribute_policy = policy.AttributePolicy(self._protocol_version)
self._operation_policies = policies
self._client_identity = [None, None] | Create a KmipEngine.
Args:
policy_path (string): The path to the filesystem directory
containing PyKMIP server operation policy JSON files.
Optional, defaults to None.
database_path (string): The path to the SQLite database file
used to store all server data. Optional, defaults to None.
If none, database path defaults to '/tmp/pykmip.database'. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.