code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def on_the_air(self, **kwargs):
path = self._get_path('on_the_air')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the list of TV shows that are currently on the air. This query
looks for any TV show that has an episode with an air date in the
next 7 days.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
|
juraj-google-style
|
def _ResizeBicubicGrad(op: ops.Operation, grad):
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
grad0 = gen_image_ops.resize_bicubic_grad(grad, op.inputs[0], align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))
return [grad0, None]
|
The derivatives for bicubic resizing.
Args:
op: The ResizeBicubic op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
|
github-repos
|
def _get_accepted(self, graph):
accepted = []
for state in graph.states:
if state.final != TropicalWeight(float('inf')):
accepted.append(state)
return accepted
|
Find the accepted states
Args:
graph (DFA): The DFA states
Return:
list: Returns the list of the accepted states
|
juraj-google-style
|
def __reg_query_value(handle, value_name):
item_value, item_type = win32api.RegQueryValueEx(handle, value_name)
if six.PY2 and isinstance(item_value, six.string_types) and not isinstance(item_value, six.text_type):
try:
item_value = six.text_type(item_value, encoding='mbcs')
except UnicodeError:
pass
if item_type == win32con.REG_EXPAND_SZ:
win32api.ExpandEnvironmentStrings(item_value)
item_type = win32con.REG_SZ
return item_value, item_type
|
Calls RegQueryValueEx
If PY2 ensure unicode string and expand REG_EXPAND_SZ before returning
Remember to catch not found exceptions when calling.
Args:
handle (object): open registry handle.
value_name (str): Name of the value you wished returned
Returns:
tuple: type, value
|
juraj-google-style
|
def set_peer_address(self, value=None, default=False, disable=False):
return self._configure_mlag('peer-address', value, default, disable)
|
Configures the mlag peer-address value
Args:
value (str): The value to configure the peer-address
default (bool): Configures the peer-address using the
default keyword
disable (bool): Negates the peer-address using the no keyword
Returns:
bool: Returns True if the commands complete successfully
|
juraj-google-style
|
def isregex(value):
if (not value):
return False
return any((isregex_expr(value), isinstance(value, retype)))
|
Returns ``True`` if the input argument object is a native
regular expression object, otherwise ``False``.
Arguments:
value (mixed): input value to test.
Returns:
bool
|
codesearchnet
|
def populate(self, filename):
if os.path.isfile(filename):
fid_st = os.stat(filename)
self.name = os.path.abspath(filename)
self.full_name = filename
self.size = fid_st.st_size
self.last_modified = fid_st.st_mtime
self.last_accessed = fid_st.st_atime
self.last_info_changed = fid_st.st_ctime
self.location = os.path.dirname(filename)
|
Finds the file-stats and populates the class with stat values.
Args:
filename (str): name of the file.
|
juraj-google-style
|
def __init__(self, shape=None, dtype=dtypes.float32, indices_dtype=dtypes.int64, dense_shape_dtype=None, indices_shape=None):
self._shape = tensor_shape.as_shape(shape)
self._values_dtype = dtypes.as_dtype(dtype)
self._indices_dtype = dtypes.as_dtype(indices_dtype)
if dense_shape_dtype is None:
self._dense_shape_dtype = None
else:
self._dense_shape_dtype = dtypes.as_dtype(dense_shape_dtype)
self._indices_shape = tensor_shape.as_shape(indices_shape).with_rank(1)
|
Constructs a type specification for a `tf.IndexedSlices`.
Args:
shape: The dense shape of the `IndexedSlices`, or `None` to allow any
dense shape.
dtype: `tf.DType` of values in the `IndexedSlices`.
indices_dtype: `tf.DType` of the `indices` in the `IndexedSlices`. One
of `tf.int32` or `tf.int64`.
dense_shape_dtype: `tf.DType` of the `dense_shape` in the `IndexedSlices`.
One of `tf.int32`, `tf.int64`, or `None` (if the `IndexedSlices` has
no `dense_shape` tensor).
indices_shape: The shape of the `indices` component, which indicates
how many slices are in the `IndexedSlices`.
|
github-repos
|
def _GetClientLib(service_class_names, language, output_path, build_system, hostname=None, application_path=None):
client_libs = []
service_configs = GenApiConfig(service_class_names, hostname=hostname, config_string_generator=discovery_generator.DiscoveryGenerator(), application_path=application_path)
for (api_name_version, config) in service_configs.iteritems():
client_name = (api_name_version + '.zip')
client_libs.append(_GenClientLibFromContents(config, language, output_path, build_system, client_name))
return client_libs
|
Fetch client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
build_system: The target build system for the client library language.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Returns:
A list of paths to client libraries.
|
codesearchnet
|
def init_test_variables(self, variables_mapping=None):
variables_mapping = variables_mapping or {}
variables_mapping = utils.ensure_mapping_format(variables_mapping)
variables_mapping.update(self.session_variables_mapping)
parsed_variables_mapping = parser.parse_variables_mapping(variables_mapping)
self.test_variables_mapping = {}
self.test_variables_mapping.update(parsed_variables_mapping)
self.test_variables_mapping.update(self.session_variables_mapping)
|
init test variables, called when each test(api) starts.
variables_mapping will be evaluated first.
Args:
variables_mapping (dict)
{
"random": "${gen_random_string(5)}",
"authorization": "${gen_md5($TOKEN, $data, $random)}",
"data": '{"name": "user", "password": "123456"}',
"TOKEN": "debugtalk",
}
|
juraj-google-style
|
def create_analyzer_cli(dump):
analyzer = analyzer_cli.DebugAnalyzer(dump, _cli_config_from_temp_file())
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler('list_tensors', analyzer.list_tensors, analyzer.get_help('list_tensors'), prefix_aliases=['lt'])
registry.register_command_handler('node_info', analyzer.node_info, analyzer.get_help('node_info'), prefix_aliases=['ni'])
registry.register_command_handler('list_inputs', analyzer.list_inputs, analyzer.get_help('list_inputs'), prefix_aliases=['li'])
registry.register_command_handler('list_outputs', analyzer.list_outputs, analyzer.get_help('list_outputs'), prefix_aliases=['lo'])
registry.register_command_handler('print_tensor', analyzer.print_tensor, analyzer.get_help('print_tensor'), prefix_aliases=['pt'])
registry.register_command_handler('print_source', analyzer.print_source, analyzer.get_help('print_source'), prefix_aliases=['ps'])
registry.register_command_handler('list_source', analyzer.list_source, analyzer.get_help('list_source'), prefix_aliases=['ls'])
registry.register_command_handler('eval', analyzer.evaluate_expression, analyzer.get_help('eval'), prefix_aliases=['ev'])
return (analyzer, registry)
|
Create an analyzer CLI.
Args:
dump: A `DebugDumpDir` object to base the analyzer CLI on.
Returns:
1) A `DebugAnalyzer` object created based on `dump`.
2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object
and has the common tfdbg commands, e.g., lt, ni, li, lo, registered.
|
github-repos
|
def _fire_timers(self):
transform_fired_timers, _ = self._executor.evaluation_context.extract_all_timers()
for applied_ptransform, fired_timers in transform_fired_timers:
empty_bundle = self._executor.evaluation_context.create_empty_committed_bundle(applied_ptransform.inputs[0])
timer_completion_callback = _CompletionCallback(self._executor.evaluation_context, self._executor.all_updates, timer_firings=fired_timers)
self._executor.schedule_consumption(applied_ptransform, empty_bundle, fired_timers, timer_completion_callback)
return bool(transform_fired_timers)
|
Schedules triggered consumers if any timers fired.
Returns:
True if timers fired.
|
github-repos
|
def has_event(self, event, cameo_code):
if self.has_cameo_code(cameo_code):
entry = self.mapping.get(cameo_code)
if entry:
return entry[self.event_name[event]]
return False
|
Test whether there is an "event2" or "event3" entry for the given cameo code
Args:
event:
cameo_code:
Returns:
|
juraj-google-style
|
def fresnel_sin(x, name=None):
with ops.name_scope(name, 'fresnel_sin', [x]):
return gen_special_math_ops.fresnel_sin(x)
|
Computes Fresnel's sine integral of `x` element-wise.
The Fresnel sine integral is defined as the integral of `sin(t^2)` from
`0` to `x`, with the domain of definition all real numbers.
>>> tf.math.special.fresnel_sin([-1., -0.1, 0.1, 1.]).numpy()
array([-0.43825912, -0.00052359, 0.00052359, 0.43825912], dtype=float32)
This implementation is based off of the Cephes math library.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types:
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.fresnel first output.
@end_compatibility
|
github-repos
|
def _validate_pos_args_syntax(alias_name, alias_command):
pos_args_from_alias = get_placeholders(alias_name)
pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]
if set(pos_args_from_alias) != set(pos_args_from_command):
arg_diff = set(pos_args_from_alias) ^ set(pos_args_from_command)
raise CLIError(INCONSISTENT_ARG_ERROR.format('' if len(arg_diff) == 1 else 's',
arg_diff,
'is' if len(arg_diff) == 1 else 'are'))
|
Check if the positional argument syntax is valid in alias name and alias command.
Args:
alias_name: The name of the alias to validate.
alias_command: The command to validate.
|
juraj-google-style
|
def ProcessMessages(self, active_notifications, queue_manager, time_limit=0):
now = time.time()
processed = 0
for notification in active_notifications:
if (notification.session_id not in self.queued_flows):
if (time_limit and ((time.time() - now) > time_limit)):
break
processed += 1
self.queued_flows.Put(notification.session_id, 1)
self.thread_pool.AddTask(target=self._ProcessMessages, args=(notification, queue_manager.Copy()), name=self.__class__.__name__)
return processed
|
Processes all the flows in the messages.
Precondition: All tasks come from the same queue.
Note that the server actually completes the requests in the
flow when receiving the messages from the client. We do not really
look at the messages here at all any more - we just work from the
completed messages in the flow RDFValue.
Args:
active_notifications: The list of notifications.
queue_manager: QueueManager object used to manage notifications,
requests and responses.
time_limit: If set return as soon as possible after this many seconds.
Returns:
The number of processed flows.
|
codesearchnet
|
def build_this_graph(G, settings, dont_update_shas_of=None):
verbose = settings["verbose"]
quiet = settings["quiet"]
force = settings["force"]
recon = settings["recon"]
parallel = settings["parallel"]
error = settings["error"]
sprint = settings["sprint"]
if not dont_update_shas_of:
dont_update_shas_of = []
sprint("Checking that graph is directed acyclic", level="verbose")
if not nx.is_directed_acyclic_graph(G):
errmes = "Dependency resolution is impossible; "
errmes += "graph is not directed and acyclic"
errmes += "\nCheck the Sakefile\n"
error(errmes)
sys.exit(1)
sprint("Dependency resolution is possible", level="verbose")
in_mem_shas = take_shas_of_all_files(G, settings)
from_store = {}
if not os.path.isfile(".shastore"):
write_shas_to_shastore(in_mem_shas)
in_mem_shas = {}
in_mem_shas['files'] = {}
with io.open(".shastore", "r") as fh:
shas_on_disk = fh.read()
from_store = yaml.load(shas_on_disk)
check_shastore_version(from_store, settings)
if not from_store:
write_shas_to_shastore(in_mem_shas)
in_mem_shas = {}
in_mem_shas['files'] = {}
with io.open(".shastore", "r") as fh:
shas_on_disk = fh.read()
from_store = yaml.load(shas_on_disk)
if parallel:
for line in parallel_sort(G):
line = sorted(line)
out = "Checking if targets '{}' need to be run"
sprint(out.format(", ".join(line)), level="verbose")
to_build = []
for item in line:
if needs_to_run(G, item, in_mem_shas, from_store, settings):
to_build.append(item)
if to_build:
if recon:
if len(to_build) == 1:
out = "Would run target '{}'"
sprint(out.format(to_build[0]))
else:
out = "Would run targets '{}' in parallel"
sprint(out.format(", ".join(to_build)))
continue
parallel_run_these(G, to_build, in_mem_shas, from_store,
settings, dont_update_shas_of)
else:
targets = []
for line in parallel_sort(G):
for item in sorted(line):
targets.append(item)
for target in targets:
outstr = "Checking if target '{}' needs to be run"
sprint(outstr.format(target), level="verbose")
if needs_to_run(G, target, in_mem_shas, from_store, settings):
if recon:
sprint("Would run target: {}".format(target))
continue
run_the_target(G, target, settings)
node_dict = get_the_node_dict(G, target)
if "output" in node_dict:
for output in acts.get_all_outputs(node_dict):
if output not in dont_update_shas_of:
in_mem_shas['files'][output] = {"sha": get_sha(output,
settings)}
write_shas_to_shastore(in_mem_shas)
if "dependencies" in node_dict:
for dep in acts.get_all_dependencies(node_dict):
if dep not in dont_update_shas_of:
in_mem_shas['files'][dep] = {"sha": get_sha(dep,
settings)}
write_shas_to_shastore(in_mem_shas)
if recon:
return 0
in_mem_shas = take_shas_of_all_files(G, settings)
if in_mem_shas:
in_mem_shas = merge_from_store_and_in_mems(from_store, in_mem_shas,
dont_update_shas_of)
write_shas_to_shastore(in_mem_shas)
sprint("Done", color=True)
return 0
|
This is the master function that performs the building.
Args:
A graph (often a subgraph)
The settings dictionary
An optional list of files to not update the shas of
(needed when building specific targets)
Returns:
0 if successful
UN-success results in a fatal error so it will return 0 or nothing
|
juraj-google-style
|
def CreateJob(self, cron_args=None, job_id=None, token=None, enabled=True):
if not job_id:
uid = random.UInt16()
job_id = "%s_%s" % (cron_args.flow_name, uid)
flow_runner_args = rdf_flow_runner.FlowRunnerArgs(
flow_name="CreateAndRunGenericHuntFlow")
flow_args = rdf_hunts.CreateGenericHuntFlowArgs()
flow_args.hunt_args.flow_args = cron_args.flow_args
flow_args.hunt_args.flow_runner_args.flow_name = cron_args.flow_name
flow_args.hunt_runner_args = cron_args.hunt_runner_args
flow_args.hunt_runner_args.hunt_name = "GenericHunt"
create_cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
description=cron_args.description,
periodicity=cron_args.frequency,
flow_runner_args=flow_runner_args,
flow_args=flow_args,
allow_overruns=cron_args.allow_overruns,
lifetime=cron_args.lifetime)
cron_job_urn = self.CRON_JOBS_PATH.Add(job_id)
with aff4.FACTORY.Create(
cron_job_urn,
aff4_type=CronJob,
mode="rw",
token=token,
force_new_version=False) as cron_job:
existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)
if existing_cron_args and existing_cron_args.start_time:
create_cron_args.start_time = existing_cron_args.start_time
if create_cron_args != existing_cron_args:
cron_job.Set(cron_job.Schema.CRON_ARGS(create_cron_args))
cron_job.Set(cron_job.Schema.DISABLED(not enabled))
return job_id
|
Creates a cron job that runs given flow with a given frequency.
Args:
cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs.
job_id: Use this job_id instead of an autogenerated unique name (used for
system cron jobs - we want them to have well-defined persistent name).
token: Security token used for data store access.
enabled: If False, the job object will be created, but will be disabled.
Returns:
Name of the cron job created.
|
juraj-google-style
|
def heartbeat(self, status_info):
for field in ('role', 'ttl', 'load'):
if (not (field in status_info)):
raise Exception('status_info is missing required field %s', repr(field))
val = status_info['ttl']
if ((not (isinstance(val, float) or isinstance(val, int))) or (val <= 0)):
raise Exception('ttl must be a number > 0')
updated_status_info = dict(status_info)
updated_status_info['last_heartbeat'] = r.now()
if (not ('first_heartbeat' in updated_status_info)):
updated_status_info['first_heartbeat'] = updated_status_info['last_heartbeat']
if (not ('host' in updated_status_info)):
updated_status_info['host'] = socket.gethostname()
if (not ('pid' in updated_status_info)):
updated_status_info['pid'] = os.getpid()
try:
result = self.rr.table(self.table).insert(updated_status_info, conflict='replace', return_changes=True).run()
return result['changes'][0]['new_val']
except:
self.logger.error('error updating service registry', exc_info=True)
return status_info
|
Update service status, indicating "up"-ness.
Args:
status_info (dict): a dictionary representing the status of the
service
`status_info` must have at least the fields 'role', 'load', and
'ttl'. Some additional fields are populated automatically by this
method. If the field 'id' is absent, it will be generated by rethinkdb.
See the ServiceRegistry class-level documentation for more information
about the various fields.
Returns:
On success, returns the modified status info dict. On failure
communicating with rethinkdb, returns `status_info` unmodified.
Raises:
Exception: if `status_info` is missing a required field, or a
`status_info['ttl']` is not a number greater than zero
|
codesearchnet
|
def calculate_heading(locator1, locator2):
(lat1, long1) = locator_to_latlong(locator1)
(lat2, long2) = locator_to_latlong(locator2)
r_lat1 = radians(lat1)
r_lon1 = radians(long1)
r_lat2 = radians(lat2)
r_lon2 = radians(long2)
d_lon = radians((long2 - long1))
b = atan2((sin(d_lon) * cos(r_lat2)), ((cos(r_lat1) * sin(r_lat2)) - ((sin(r_lat1) * cos(r_lat2)) * cos(d_lon))))
bd = degrees(b)
(br, bn) = divmod((bd + 360), 360)
return bn
|
calculates the heading from the first to the second locator
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading
>>> calculate_heading("JN48QM", "QF67bf")
74.3136
|
codesearchnet
|
def make_calls(self, num_calls=1):
self._cull()
while ((self._outstanding_calls + num_calls) > self._max_calls_per_second):
time.sleep(0)
self._cull()
self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls))
self._outstanding_calls += num_calls
|
Adds appropriate sleep to avoid making too many calls.
Args:
num_calls: int the number of calls which will be made
|
codesearchnet
|
def get_decor(self, c, match_only=None):
if isinstance(c, Component):
if c:
if match_only:
c = Component({k: getattr(c, k, None) for k in match_only})
for decor in self.__list:
try:
if (c == decor.component):
return decor
except AttributeError:
continue
else:
for decor in self.__list:
try:
if (getattr(c, 'mnemonic').lower() == decor.curve.mnemonic):
return decor
except AttributeError:
continue
return Decor({'colour': '
|
Get the decor for a component.
Args:
c (component): The component to look up.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
Decor. The matching Decor from the Legend, or None if not found.
|
codesearchnet
|
def operate_magmom(self, magmom):
magmom = Magmom(magmom)
transformed_moment = ((self.apply_rotation_only(magmom.global_moment) * np.linalg.det(self.rotation_matrix)) * self.time_reversal)
return Magmom.from_global_moment_and_saxis(transformed_moment, magmom.saxis)
|
Apply time reversal operator on the magnetic moment. Note that
magnetic moments transform as axial vectors, not polar vectors.
See 'Symmetry and magnetic structures', Rodríguez-Carvajal and
Bourée for a good discussion. DOI: 10.1051/epjconf/20122200010
Args:
magmom: Magnetic moment as electronic_structure.core.Magmom
class or as list or np array-like
Returns:
Magnetic moment after operator applied as Magmom class
|
codesearchnet
|
def assert_false(expr, msg, extras=None):
if expr:
fail(msg, extras)
|
Assert an expression evaluates to false, otherwise fail the test.
Args:
expr: The expression that is evaluated.
msg: A string explaining the details in case of failure.
extras: An optional field for extra information to be included in
test result.
|
github-repos
|
def main(target_device):
jlink = pylink.JLink()
print('connecting to JLink...')
jlink.open()
print(('connecting to %s...' % target_device))
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(target_device)
print('connected, starting RTT...')
jlink.rtt_start()
while True:
try:
num_up = jlink.rtt_get_num_up_buffers()
num_down = jlink.rtt_get_num_down_buffers()
print(('RTT started, %d up bufs, %d down bufs.' % (num_up, num_down)))
break
except pylink.errors.JLinkRTTException:
time.sleep(0.1)
try:
thread.start_new_thread(read_rtt, (jlink,))
thread.start_new_thread(write_rtt, (jlink,))
while jlink.connected():
time.sleep(1)
print('JLink disconnected, exiting...')
except KeyboardInterrupt:
print('ctrl-c detected, exiting...')
pass
|
Creates an interactive terminal to the target via RTT.
The main loop opens a connection to the JLink, and then connects
to the target device. RTT is started, the number of buffers is presented,
and then two worker threads are spawned: one for read, and one for write.
The main loops sleeps until the JLink is either disconnected or the
user hits ctrl-c.
Args:
target_device (string): The target CPU to connect to.
Returns:
Always returns ``0`` or a JLinkException.
Raises:
JLinkException on error.
|
codesearchnet
|
def __init__(self, op, specs, name):
self.op = op
self.specs = specs
self.name = name
|
Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
|
github-repos
|
def receiveds_parsing(receiveds):
parsed = []
receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds]
n = len(receiveds)
log.debug("Nr. of receiveds. {}".format(n))
for idx, received in enumerate(receiveds):
log.debug("Parsing received {}/{}".format(idx + 1, n))
log.debug("Try to parse {!r}".format(received))
try:
values_by_clause = parse_received(received)
except MailParserReceivedParsingError:
parsed.append({'raw': received})
else:
parsed.append(values_by_clause)
log.debug("len(receiveds) %s, len(parsed) %s" % (
len(receiveds), len(parsed)))
if len(receiveds) != len(parsed):
log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \
parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed))
return receiveds_not_parsed(receiveds)
else:
return receiveds_format(parsed)
|
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
|
juraj-google-style
|
def insert(self, name, entry_type, filename):
if self.cursor is None:
raise RuntimeError(
'Open DB connection before attempting to call insert!')
db_entry = (name, entry_type, filename)
if self.verbose:
print('Inserting %s "%s" -> %s' % db_entry, file=sys.stderr)
self.cursor.execute(
, db_entry)
|
Insert an entry into the Zeal database.
Args:
name: A string representing the name of the entry.
entry_type: A string representing the entry type.
filename: A string representing the filename of the documentation
for the entry.
Raises:
RuntimeError: a database connection was not established before
calling insert()
|
juraj-google-style
|
def find_pruneable_heads_and_indices(heads: list[int], n_heads: int, head_size: int, already_pruned_heads: set[int]) -> tuple[set[int], torch.LongTensor]:
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum((1 if h < head else 0 for h in already_pruned_heads))
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return (heads, index)
|
Finds the heads and their indices taking `already_pruned_heads` into account.
Args:
heads (`List[int]`): List of the indices of heads to prune.
n_heads (`int`): The number of heads in the model.
head_size (`int`): The size of each head.
already_pruned_heads (`Set[int]`): A set of already pruned heads.
Returns:
`Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads`
into account and the indices of rows/columns to keep in the layer weight.
|
github-repos
|
def find_and_replace_channel_refs(self, text):
match = True
pattern = re.compile('<
while match:
match = pattern.search(text)
if match:
text = text.replace(match.group(0), '
return text
|
Find occurrences of Slack channel referenfces and attempts to
replace them with just channel names.
Args:
text (string): The message text
Returns:
string: The message text with channel references replaced.
|
juraj-google-style
|
def add_plot_boundary(ax, padding=0.125):
nodes = np.asfortranarray(
np.vstack([line.get_xydata() for line in ax.lines]).T
)
left, right, bottom, top = _helpers.bbox(nodes)
center_x = 0.5 * (right + left)
delta_x = right - left
center_y = 0.5 * (top + bottom)
delta_y = top - bottom
multiplier = (1.0 + padding) * 0.5
ax.set_xlim(
center_x - multiplier * delta_x, center_x + multiplier * delta_x
)
ax.set_ylim(
center_y - multiplier * delta_y, center_y + multiplier * delta_y
)
|
Add a buffer of empty space around a plot boundary.
.. note::
This only uses ``line`` data from the axis. It **could**
use ``patch`` data, but doesn't at this time.
Args:
ax (matplotlib.artist.Artist): A matplotlib axis.
padding (Optional[float]): Amount (as a fraction of width and height)
of padding to add around data. Defaults to ``0.125``.
|
juraj-google-style
|
def DownloadPqlResultToList(self, pql_query, values=None):
results = []
self._PageThroughPqlSet(pql_query, results.append, values)
return results
|
Downloads the results of a PQL query to a list.
Args:
pql_query: str a statement filter to apply (the query should not include
the limit or the offset)
[optional]
values: A dict of python objects or a list of raw SOAP values to bind
to the pql_query.
Returns:
a list of lists with the first being the header row and each subsequent
list being a row of results.
|
juraj-google-style
|
def _resolve_attribute(self, attribute):
value = self.attributes[attribute]
if (not value):
return None
resolved_value = re.sub('\\$\\((.*?)\\)', self._resolve_attribute_match, value)
return resolved_value
|
Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
|
codesearchnet
|
def collection(self, **kwargs):
path = self._get_path('collection')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Search for collections by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def delete_meta_features(self, path):
if os.path.exists(self.meta_features_path(path)):
os.remove(self.meta_features_path(path))
|
Deletes meta-features of base learner if it exists
Args:
path (str): Absolute/local path of xcessiv folder
|
codesearchnet
|
def fetch_git_package(self, config):
from git import Repo
ref = self.determine_git_ref(config)
dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)
cached_dir_path = os.path.join(self.package_cache_dir, dir_name)
if not os.path.isdir(cached_dir_path):
logger.debug("Remote repo %s does not appear to have been "
"previously downloaded - starting clone to %s",
config['uri'],
cached_dir_path)
tmp_dir = tempfile.mkdtemp(prefix='stacker')
try:
tmp_repo_path = os.path.join(tmp_dir, dir_name)
with Repo.clone_from(config['uri'], tmp_repo_path) as repo:
repo.head.reference = ref
repo.head.reset(index=True, working_tree=True)
shutil.move(tmp_repo_path, self.package_cache_dir)
finally:
shutil.rmtree(tmp_dir)
else:
logger.debug("Remote repo %s appears to have been previously "
"cloned to %s -- bypassing download",
config['uri'],
cached_dir_path)
self.update_paths_and_config(config=config,
pkg_dir_name=dir_name)
|
Make a remote git repository available for local use.
Args:
config (dict): git config dictionary
|
juraj-google-style
|
def get_config(self, key_name):
if key_name in self.config:
return self.config.get(key_name)
return self.Configuration.default(key_name, inst=self)
|
Return configuration value
Args:
key_name (str): configuration key
Returns:
The value for the specified configuration key, or if not found
in the config the default value specified in the Configuration Handler
class specified inside this component
|
juraj-google-style
|
def timeparse(sval):
match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
if not match or not match.group(0).strip():
return
mdict = match.groupdict()
return sum(
MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None)
|
Parse a time expression, returning it as a number of seconds. If
possible, the return value will be an `int`; if this is not
possible, the return will be a `float`. Returns `None` if a time
expression cannot be parsed from the given string.
Arguments:
- `sval`: the string value to parse
>>> timeparse('1m24s')
84
>>> timeparse('1.2 minutes')
72
>>> timeparse('1.2 seconds')
1.2
|
juraj-google-style
|
def _ReadCacheEntry(self, file_object, display_name, block_size):
file_offset = file_object.get_offset()
cache_entry_header_map = self._GetDataTypeMap('firefox_cache1_entry_header')
try:
(cache_entry_header, header_data_size) = self._ReadStructureFromFileObject(file_object, file_offset, cache_entry_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse Firefox cache entry header with error: {0!s}'.format(exception))
if (not self._ValidateCacheEntryHeader(cache_entry_header)):
file_offset = (block_size - header_data_size)
file_object.seek(file_offset, os.SEEK_CUR)
raise IOError('Not a valid Firefox cache record.')
body_data_size = (cache_entry_header.request_size + cache_entry_header.information_size)
cache_entry_body_data = self._ReadData(file_object, (file_offset + header_data_size), body_data_size)
url = cache_entry_body_data[:cache_entry_header.request_size].decode('ascii').rstrip('\x00')
(request_method, response_code) = self._ParseHTTPHeaders(cache_entry_body_data[cache_entry_header.request_size:], file_offset, display_name)
cache_entry_data_size = (header_data_size + body_data_size)
(_, remaining_data_size) = divmod(cache_entry_data_size, block_size)
if (remaining_data_size > 0):
file_object.seek((block_size - remaining_data_size), os.SEEK_CUR)
event_data = FirefoxCacheEventData()
event_data.data_size = cache_entry_header.cached_data_size
event_data.fetch_count = cache_entry_header.fetch_count
event_data.info_size = cache_entry_header.information_size
event_data.location = cache_entry_header.location
event_data.request_method = request_method
event_data.request_size = cache_entry_header.request_size
event_data.response_code = response_code
event_data.url = url
event_data.version = '{0:d}.{1:d}'.format(cache_entry_header.major_format_version, cache_entry_header.minor_format_version)
return (cache_entry_header, event_data)
|
Reads a cache entry.
Args:
file_object (dfvfs.FileIO): a file-like object.
display_name (str): display name.
block_size (int): block size.
Returns:
tuple: containing:
firefox_cache1_entry_header: cache record header structure.
FirefoxCacheEventData: event data.
Raises:
IOError: if the cache record header cannot be validated.
OSError: if the cache record header cannot be validated.
ParseError: if the cache record header cannot be parsed.
|
codesearchnet
|
def input_elements(self, instruction_id, expected_inputs, abort_callback=None):
raise NotImplementedError(type(self))
|
Returns an iterable of all Element.Data and Element.Timers bundles for
instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected transforms. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_inputs: which transforms to wait on for completion
abort_callback: a callback to invoke if blocking returning whether
to abort before consuming all the data
|
github-repos
|
def __init__(self, template, capacity, max_length, scope):
self._capacity = capacity
self._max_length = max_length
with tf.variable_scope(scope) as var_scope:
self._scope = var_scope
self._length = tf.Variable(tf.zeros(capacity, tf.int32), False)
self._buffers = tools.nested.map(
lambda x: tf.Variable(tf.zeros(
[capacity, max_length] + x.shape.as_list(), x.dtype), False),
template)
|
Create a memory that stores episodes.
Each transition tuple consists of quantities specified by the template.
These quantities would typically be be observations, actions, rewards, and
done indicators.
Args:
template: Nested tensors to derive shapes and dtypes of each transition.
capacity: Number of episodes, or rows, hold by the memory.
max_length: Allocated sequence length for the episodes.
scope: Variable scope to use for internal variables.
|
juraj-google-style
|
def is_valid(self):
if self.key is None:
raise ValueError('Invalid DisplayDataItem %s. Key must not be None.' % self)
if self.namespace is None:
raise ValueError('Invalid DisplayDataItem %s. Namespace must not be None' % self)
if self.value is None:
raise ValueError('Invalid DisplayDataItem %s. Value must not be None' % self)
if self.type is None:
raise ValueError('Invalid DisplayDataItem. Value {} is of an unsupported type.'.format(self.value))
|
Checks that all the necessary fields of the :class:`DisplayDataItem`
are filled in. It checks that neither key, namespace, value or type are
:data:`None`.
Raises:
ValueError: If the item does not have a key, namespace,
value or type.
|
github-repos
|
def _autodetect_num_gpus():
proc_gpus_path = '/proc/driver/nvidia/gpus'
if os.path.isdir(proc_gpus_path):
return len(os.listdir(proc_gpus_path))
return 0
|
Attempt to detect the number of GPUs on this machine.
TODO(rkn): This currently assumes Nvidia GPUs and Linux.
Returns:
The number of GPUs if any were detected, otherwise 0.
|
codesearchnet
|
def event(self, **kwargs):
if self.callback.noargs and self.streams == []:
self.param.warning(
'No streams declared. To update a DynamicMaps using '
'generators (or callables without arguments) use streams=[Next()]')
return
if self.streams == []:
self.param.warning('No streams on DynamicMap, calling event '
'will have no effect')
return
stream_params = set(util.stream_parameters(self.streams))
invalid = [k for k in kwargs.keys() if k not in stream_params]
if invalid:
msg = 'Key(s) {invalid} do not correspond to stream parameters'
raise KeyError(msg.format(invalid = ', '.join('%r' % i for i in invalid)))
streams = []
for stream in self.streams:
contents = stream.contents
applicable_kws = {k:v for k,v in kwargs.items()
if k in set(contents.keys())}
if not applicable_kws and contents:
continue
streams.append(stream)
rkwargs = util.rename_stream_kwargs(stream, applicable_kws, reverse=True)
stream.update(**rkwargs)
Stream.trigger(streams)
|
Updates attached streams and triggers events
Automatically find streams matching the supplied kwargs to
update and trigger events on them.
Args:
**kwargs: Events to update streams with
|
juraj-google-style
|
def get_input_embeddings(self) -> keras.layers.Layer:
main_layer = getattr(self, self.base_model_prefix, self)
if main_layer is not self:
return main_layer.get_input_embeddings()
else:
raise NotImplementedError
|
Returns the model's input embeddings layer.
Returns:
`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
|
github-repos
|
def SampleMemoryUsage(self, parser_name):
if self._memory_profiler:
used_memory = (self._process_information.GetUsedMemory() or 0)
self._memory_profiler.Sample(parser_name, used_memory)
|
Takes a sample of the memory usage for profiling.
Args:
parser_name (str): name of the parser.
|
codesearchnet
|
def hours(value: Union[int, float]) -> Duration:
return float(value * 60 * 60)
|
Converts input value from hours to a `Duration` in seconds.
Example:
```python
>>> timestamps = [tp.duration.hours(i) for i in [1, 2, 10]]
>>> timestamps
[3600.0, 7200.0, 36000.0]
>>> # Usage in a window operation
>>> a = tp.event_set(timestamps=timestamps, features={"f1": [1, 5, -5]})
>>> a.moving_sum(window_length=tp.duration.hours(2))
indexes: ...
timestamps: [ 3600. 7200. 36000.]
'f1': [ 1 6 -5]
...
```
Args:
value: Number of hours.
Returns:
Equivalent number of seconds.
|
github-repos
|
def not_equal(x, y):
return math_ops.not_equal(x, y)
|
Element-wise inequality between two tensors.
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
|
github-repos
|
def get_errors(self):
return [{cr.component_name: cr.get_error()} for cr in self.component_results if cr.has_error()]
|
If there were any business errors fetching data for this property,
returns the error messages.
Returns:
string - the error message, or None if there was no error.
|
codesearchnet
|
def polymorph_response(response, poly, bqm, penalty_strength=None, keep_penalty_variables=True, discard_unsatisfied=False):
record = response.record
penalty_vector = penalty_satisfaction(response, bqm)
original_variables = bqm.variables
if discard_unsatisfied:
samples_to_keep = list(map(bool, list(penalty_vector)))
penalty_vector = np.array(([True] * np.sum(samples_to_keep)))
else:
samples_to_keep = list(map(bool, ([1] * len(record.sample))))
samples = record.sample[samples_to_keep]
energy_vector = poly.energies((samples, response.variables))
if (not keep_penalty_variables):
original_variables = poly.variables
idxs = [response.variables.index[v] for v in original_variables]
samples = np.asarray(samples[(:, idxs)])
(num_samples, num_variables) = np.shape(samples)
datatypes = [('sample', np.dtype(np.int8), (num_variables,)), ('energy', energy_vector.dtype), ('penalty_satisfaction', penalty_vector.dtype)]
datatypes.extend(((name, record[name].dtype, record[name].shape[1:]) for name in record.dtype.names if (name not in {'sample', 'energy'})))
data = np.rec.array(np.empty(num_samples, dtype=datatypes))
data.sample = samples
data.energy = energy_vector
for name in record.dtype.names:
if (name not in {'sample', 'energy'}):
data[name] = record[name][samples_to_keep]
data['penalty_satisfaction'] = penalty_vector
response.info['reduction'] = bqm.info['reduction']
if (penalty_strength is not None):
response.info['penalty_strength'] = penalty_strength
return SampleSet(data, original_variables, response.info, response.vartype)
|
Transforms the sampleset for the higher order problem.
Given a response of a penalized HUBO, this function creates a new sampleset
object, taking into account penalty information and calculates the
energies of samples for the higherorder problem.
Args:
response (:obj:`.SampleSet`): response for a penalized hubo.
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model of the
reduced problem.
penalty_strength (float, optional): default is None, if provided,
will be added to the info field of the returned sampleSet object.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
Returns:
(:obj:`.SampleSet'): A sampleSet object that has additional penalty
information. The energies of samples are calculated for the HUBO
ignoring the penalty variables.
|
codesearchnet
|
def extract_code(end_mark, current_str, str_array, line_num):
if end_mark not in current_str:
reached_end = False
line_num += 1
while reached_end is False:
next_line = str_array[line_num]
if end_mark in next_line:
reached_end = True
else:
line_num += 1
current_str += next_line
clean_str = current_str.split(end_mark)[0]
return {'current_str': clean_str, 'line_num': line_num}
|
Extract a multi-line string from a string array, up to a specified end marker.
Args:
end_mark (str): The end mark string to match for.
current_str (str): The first line of the string array.
str_array (list): An array of strings (lines).
line_num (int): The current offset into the array.
Returns:
Extended string up to line with end marker.
|
juraj-google-style
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
analysis_plugins = cls._ParseStringOption(options, 'analysis_plugins')
if analysis_plugins and analysis_plugins.lower() != 'list':
plugin_names = analysis_manager.AnalysisPluginManager.GetPluginNames()
analysis_plugins = [name.strip() for name in analysis_plugins.split(',')]
difference = set(analysis_plugins).difference(plugin_names)
if difference:
raise errors.BadConfigOption(
'Non-existent analysis plugins specified: {0:s}'.format(
' '.join(difference)))
setattr(configuration_object, '_analysis_plugins', analysis_plugins)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
|
juraj-google-style
|
def _SetHashers(self, hasher_names_string):
if not hasher_names_string or hasher_names_string == 'none':
return
analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance(
'hashing')
analyzer_object.SetHasherNames(hasher_names_string)
self._analyzers.append(analyzer_object)
|
Sets the hasher names.
Args:
hasher_names_string (str): comma separated names of the hashers
to enable, where 'none' disables the hashing analyzer.
|
juraj-google-style
|
def get_v2_constants(module: Any) -> Sequence[str]:
constants_v2 = []
tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants
if hasattr(module, tensorflow_constants_attr):
constants_v2.extend(getattr(module, tensorflow_constants_attr))
return constants_v2
|
Get a list of TF 2.0 constants in this module.
Args:
module: TensorFlow module.
Returns:
List of all API constants under the given module.
|
github-repos
|
def __init__(self, name):
super(DependencyDefinition, self).__init__()
self.dpkg_name = None
self.is_optional = False
self.l2tbinaries_macos_name = None
self.l2tbinaries_name = None
self.maximum_version = None
self.minimum_version = None
self.name = name
self.pypi_name = None
self.python2_only = False
self.python3_only = False
self.rpm_name = None
self.version_property = None
|
Initializes a dependency configuration.
Args:
name (str): name of the dependency.
|
juraj-google-style
|
def range(self, dimension, data_range=True, dimension_range=True):
dimension = self.get_dimension(dimension)
if dimension is None or (not data_range and not dimension_range):
return (None, None)
elif all(util.isfinite(v) for v in dimension.range) and dimension_range:
return dimension.range
elif data_range:
if dimension in self.kdims+self.vdims:
dim_vals = self.dimension_values(dimension.name)
lower, upper = util.find_range(dim_vals)
else:
dname = dimension.name
match_fn = lambda x: dname in x.kdims + x.vdims
range_fn = lambda x: x.range(dname)
ranges = self.traverse(range_fn, [match_fn])
lower, upper = util.max_range(ranges)
else:
lower, upper = (np.NaN, np.NaN)
if not dimension_range:
return lower, upper
return util.dimension_range(lower, upper, dimension.range, dimension.soft_range)
|
Return the lower and upper bounds of values along dimension.
Args:
dimension: The dimension to compute the range on.
data_range (bool): Compute range from data values
dimension_range (bool): Include Dimension ranges
Whether to include Dimension range and soft_range
in range calculation
Returns:
Tuple containing the lower and upper bound
|
juraj-google-style
|
def set_attribute(self, obj, attr, value):
if isinstance(obj, MutableMapping):
obj[attr] = value
else:
setattr(obj, attr, value)
|
Set value of attribute in given object instance.
Reason for existence of this method is the fact that 'attribute' can
be also a object's key if it is a dict or any other kind of mapping.
Args:
obj (object): object instance to modify
attr (str): attribute (or key) to change
value: value to set
|
juraj-google-style
|
def _atoms(atoms_string):
atoms = {}
for split in atoms_string.split(','):
sites = split.split('.')
el = sites.pop(0)
sites = list(map(int, sites))
atoms[el] = (np.array(sites) - 1)
return atoms
|
Parse the atom string.
Args:
atoms_string (str): The atoms to plot, in the form ``"C.1.2.3,"``.
Returns:
dict: The atomic indices over which to sum the DOS. Formatted as::
{Element: [atom_indices]}.
Indices are zero indexed for each atomic species. If an element symbol
is included with an empty list, then all sites for that species are
considered.
|
codesearchnet
|
def which(cmd):
def is_exe(fp):
return os.path.isfile(fp) and os.access(fp, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None
|
Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python")
|
juraj-google-style
|
def _associate_click_tags(self, feed_item, creative):
click_tags = []
for click_tag in feed_item.get('click_tags', []):
lp = self.landing_page_dao.get(click_tag, column_name=FieldMap.CLICK_TAG_LANDING_PAGE_ID)
ct = {'eventName': click_tag.get(FieldMap.CLICK_TAG_EVENT, None), 'name': click_tag.get(FieldMap.CLICK_TAG_NAME, None), 'clickThroughUrl': {}}
if click_tag.get(FieldMap.CLICK_TAG_LANDING_PAGE_ID):
ct['clickThroughUrl']['landingPageId'] = click_tag.get(FieldMap.CLICK_TAG_LANDING_PAGE_ID) if not lp else lp['id']
elif click_tag.get(FieldMap.CLICK_TAG_CUSTOM_CLICK_THROUGH_URL):
ct['clickThroughUrl']['customClickThroughUrl'] = click_tag.get(FieldMap.CLICK_TAG_CUSTOM_CLICK_THROUGH_URL)
click_tags.append(ct)
if click_tags:
creative['clickTags'] = click_tags
|
Associate click tags with the respective creative DCM object.
This method transforms all child feed mapped earlier into DCM formatted
associations within the creative object so it can be pushed to the API.
Args:
feed_item: Feed item representing the creative.
creative: DCM creative object being created or updated.
|
github-repos
|
def PrivateKeyFromWIF(wif):
if ((wif is None) or (len(wif) is not 52)):
raise ValueError('Please provide a wif with a length of 52 bytes (LEN: {0:d})'.format(len(wif)))
data = base58.b58decode(wif)
length = len(data)
if ((length is not 38) or (data[0] is not 128) or (data[33] is not 1)):
raise ValueError('Invalid format!')
checksum = Crypto.Hash256(data[0:34])[0:4]
if (checksum != data[34:]):
raise ValueError('Invalid WIF Checksum!')
return data[1:33]
|
Get the private key from a WIF key
Args:
wif (str): The wif key
Returns:
bytes: The private key
|
codesearchnet
|
def temporal_latent_to_dist(name, x, hparams, output_channels=None):
(_, _, width, _, res_channels) = common_layers.shape_list(x)
if (output_channels is None):
output_channels = res_channels
dilation_rates = get_dilation_rates(hparams, width)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
h = x
for i in range(hparams.latent_encoder_depth):
if hparams.latent_apply_dilations:
h2 = dilated_conv_stack(('dil_latent_3d_res_%d' % i), h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, dilation_rates=dilation_rates, activation=hparams.latent_activation, dropout=hparams.latent_dropout)
else:
h2 = conv_stack(('latent_3d_res_%d' % i), h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, activation=hparams.latent_activation, dropout=hparams.latent_dropout)
h += h2
h = h[(:, (- 1), :, :, :)]
h = conv('res_final', h, apply_actnorm=False, conv_init='zeros', output_channels=(2 * output_channels), filter_size=[1, 1])
(mean, log_scale) = (h[(:, :, :, 0::2)], h[(:, :, :, 1::2)])
return tfp.distributions.Normal(mean, tf.exp(log_scale))
|
Network that maps a time-indexed list of 3-D latents to a gaussian.
Args:
name: variable scope.
x: List of 4-D Tensors indexed by time, (NHWC)
hparams: tf.contrib.training.Hparams.
output_channels: int, Number of channels of the output gaussian mean.
Returns:
dist: tfp.distributions.Normal
|
codesearchnet
|
def _auth(f):
@wraps(f)
def method(self, *args, **kwargs):
if not self._auth_token or datetime.utcnow() >= self._last_auth + timedelta(minutes=10):
self.auth_refresh()
return f(self, *args, **kwargs)
return method
|
Makes sure the request has a valid authorization jwt before calling the wrapped function.
It does this by checking the timestamp of the last jwt and if > 10 minutes have elapsed,
it refreshes it's existing jwt from the server.
Args:
f: Function to wrap
Returns:
Function, f
|
juraj-google-style
|
def _infer_all_output_dims(self, inputs):
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
out_channels = (self.output_channels,)
if (self._n == 1):
out_shape = ((1,) + self.output_shape)
else:
out_shape = self.output_shape
if self._data_format.startswith('NC'):
out_shape_tuple = (out_channels + out_shape)
elif (self._data_format.startswith('N') and self._data_format.endswith('C')):
out_shape_tuple = (out_shape + out_channels)
output_shape = tf.concat([batch_size, out_shape_tuple], 0)
return output_shape
|
Calculate the output shape for `inputs` after a deconvolution.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
Returns:
output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
|
codesearchnet
|
def remove_op_callback(op_callback):
ctx = context.context()
ctx.remove_op_callback(op_callback)
if ctx.executing_eagerly() and (not ctx.op_callbacks):
execute.execute = execute.quick_execute
|
Remove an already-added op callback.
Args:
op_callback: The op callback to be removed.
Raises:
KeyError: If `op_callback` has not been registered using `add_op_callback()`
before.
|
github-repos
|
def add_step_timing_signal(x, step, hparams):
if hparams.recurrence_type == "act":
num_steps = hparams.act_max_steps
else:
num_steps = hparams.num_rec_steps
channels = common_layers.shape_list(x)[-1]
if hparams.step_timing_signal_type == "learned":
signal = common_attention.get_layer_timing_signal_learned_1d(
channels, step, num_steps)
elif hparams.step_timing_signal_type == "sinusoid":
signal = common_attention.get_layer_timing_signal_sinusoid_1d(
channels, step, num_steps)
if hparams.add_or_concat_timing_signal == "add":
x_with_timing = x + common_layers.cast_like(signal, x)
elif hparams.add_or_concat_timing_signal == "concat":
batch_size = common_layers.shape_list(x)[0]
length = common_layers.shape_list(x)[1]
signal_tiled = tf.tile(signal, [batch_size, length, 1])
x_with_timing = tf.concat((x, signal_tiled), axis=-1)
return x_with_timing
|
Add n-dimensional embedding as the step (vertical) timing signal.
Args:
x: a tensor with shape [batch, length, depth]
step: step
hparams: model hyper parameters
Returns:
a Tensor with the same shape as x.
|
juraj-google-style
|
def AppendFlagValues(self, flag_values):
for flag_name, flag in six.iteritems(flag_values.FlagDict()):
if flag_name == flag.name:
try:
self[flag_name] = flag
except exceptions.DuplicateFlagError:
raise exceptions.DuplicateFlagError.from_flag(
flag_name, self, other_flag_values=flag_values)
|
Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
|
juraj-google-style
|
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
if session is None:
session = stack.get_default_session()
if session is None:
raise ValueError('Cannot evaluate tensor using `eval()`: No default session is registered. Use `with sess.as_default()` or pass an explicit session to `eval(session=sess)`')
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: the tensor's graph is different from the session's graph. Pass an explicit session to `eval(session=sess)`.")
elif session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: the tensor's graph is different from the session's graph.")
return session.run(tensors, feed_dict)
|
Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
|
github-repos
|
def read(alias_name, allow_none=False):
warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)
return core.read('{0}_PORT'.format(alias_name), default=None, allow_none=allow_none)
|
Get the raw docker link value.
Get the raw environment variable for the docker link
Args:
alias_name: The environment variable name
default: The default value if the link isn't available
allow_none: If the return value can be `None` (i.e. optional)
|
codesearchnet
|
async def from_api_token(cls, token=None, api_cls=SlackBotApi):
api = (api_cls.from_env() if (token is None) else api_cls(api_token=token))
data = (await api.execute_method(cls.API_AUTH_ENDPOINT))
return cls(data['user_id'], data['user'], api)
|
Create a new instance from the API token.
Arguments:
token (:py:class:`str`, optional): The bot's API token
(defaults to ``None``, which means looking in the
environment).
api_cls (:py:class:`type`, optional): The class to create
as the ``api`` argument for API access (defaults to
:py:class:`aslack.slack_api.SlackBotApi`).
Returns:
:py:class:`SlackBot`: The new instance.
|
codesearchnet
|
def push(self, value):
stream = DataStream.FromEncoded(value.stream)
if stream.stream_type == DataStream.OutputType:
if len(self.streaming_data) == self.streaming_length:
raise StorageFullError('Streaming buffer full')
self.streaming_data.append(value)
else:
if len(self.storage_data) == self.storage_length:
raise StorageFullError('Storage buffer full')
self.storage_data.append(value)
|
Store a new value for the given stream.
Args:
value (IOTileReading): The value to store. The stream
parameter must have the correct value
|
juraj-google-style
|
def parse(cls, op):
for event in cls:
if (event.value == int(op)):
return event
return None
|
Gets the enum for the op code
Args:
op: value of the op code (will be casted to int)
Returns:
The enum that matches the op code
|
codesearchnet
|
def find_repo_by_name(name, repo_dir=None):
if repo_dir is None:
repo_dir = config.get('template_repos')
ret, out, _ = utils.run_command([
'find',
repo_dir,
'-name',
'*.json',
], )
repos = [
TemplateRepository.from_url(line.strip()) for line in out.split('\n')
if len(line.strip())
]
for repo in repos:
if repo.name == name:
return repo
raise RuntimeError('Could not find repo %s' % name)
|
Searches the given repo name inside the repo_dir (will use the config value
'template_repos' if no repo dir passed), will rise an exception if not
found
Args:
name (str): Name of the repo to search
repo_dir (str): Directory where to search the repo
Return:
str: path to the repo
Raises:
RuntimeError: if not found
|
juraj-google-style
|
def kill_test_logger(logger):
for h in list(logger.handlers):
logger.removeHandler(h)
if isinstance(h, logging.FileHandler):
h.close()
|
Cleans up a test logger object by removing all of its handlers.
Args:
logger: The logging object to clean up.
|
juraj-google-style
|
def run_matrix(self, matrix_definition, document):
matrix = Matrix(matrix_definition, ('matrix(parallel)' in document))
process_data = MatrixProcessData()
process_data.options = self.options
process_data.pipeline = document['pipeline']
process_data.model = ({} if ('model' not in document) else document['model'])
process_data.hooks = Hooks(document)
return matrix.process(process_data)
|
Running pipeline via a matrix.
Args:
matrix_definition (dict): one concrete matrix item.
document (dict): spline document (complete) as loaded from yaml file.
|
codesearchnet
|
def eval(source, optimize=True, output=sys.stdout, input=sys.stdin, steps=(- 1)):
machine = execute(source, optimize=optimize, output=output, input=input, steps=steps)
ds = machine.stack
if (len(ds) == 0):
return None
elif (len(ds) == 1):
return ds[(- 1)]
else:
return ds
|
Compiles and runs program, returning the values on the stack.
To return the machine instead, see execute().
Args:
optimize: Whether to optimize the code after parsing it.
output: Stream which program can write output to.
input: Stream which program can read input from.
steps: An optional maximum number of instructions to execute on the
virtual machine. Set to -1 for no limit.
Returns:
None: If the stack is empty
obj: If the stack contains a single value
[obj, obj, ...]: If the stack contains many values
|
codesearchnet
|
def message_upperbound(self, tree, spins, subtheta):
energy_sources = set()
for (v, subtree) in tree.items():
assert all(((u in spins) for u in self._ancestors[v]))
def energy_contributions():
(yield subtheta.linear[v])
for (u, bias) in subtheta.adj[v].items():
if (u in spins):
(yield Times(limitReal(spins[u]), bias))
energy = Plus(energy_contributions())
if subtree:
spins[v] = 1.0
plus = self.message_upperbound(subtree, spins, subtheta)
spins[v] = (- 1.0)
minus = self.message_upperbound(subtree, spins, subtheta)
del spins[v]
else:
plus = minus = limitReal(0.0)
m = FreshSymbol(REAL)
self.assertions.update({LE(m, Plus(energy, plus)), LE(m, Plus(Times(energy, limitReal((- 1.0))), minus))})
energy_sources.add(m)
return Plus(energy_sources)
|
Determine an upper bound on the energy of the elimination tree.
Args:
tree (dict): The current elimination tree
spins (dict): The current fixed spins
subtheta (dict): Theta with spins fixed.
Returns:
The formula for the energy of the tree.
|
codesearchnet
|
def get_random_url(ltd='com'):
url = ['https:
return ''.join(url)
|
Get a random url with the given ltd.
Args:
ltd (str): The ltd to use (e.g. com).
Returns:
str: The random url.
|
codesearchnet
|
def tomography_basis(basis, prep_fun=None, meas_fun=None):
ret = TomographyBasis(basis)
ret.prep_fun = prep_fun
ret.meas_fun = meas_fun
return ret
|
Generate a TomographyBasis object.
See TomographyBasis for further details.abs
Args:
prep_fun (callable) optional: the function which adds preparation
gates to a circuit.
meas_fun (callable) optional: the function which adds measurement
gates to a circuit.
Returns:
TomographyBasis: A tomography basis.
|
codesearchnet
|
def traverse_pagination(response, endpoint):
results = response.get('results', [])
next_page = response.get('next')
while next_page:
querystring = parse_qs(urlparse(next_page).query, keep_blank_values=True)
response = endpoint.get(**querystring)
results += response.get('results', [])
next_page = response.get('next')
return results
|
Traverse a paginated API response.
Extracts and concatenates "results" (list of dict) returned by DRF-powered
APIs.
Arguments:
response (Dict): Current response dict from service API
endpoint (slumber Resource object): slumber Resource object from edx-rest-api-client
Returns:
list of dict.
|
codesearchnet
|
def repeat(element, count):
if count < 0:
raise ValueError("repeat() count cannot be negative")
return query(itertools.repeat(element, count))
|
Generate a sequence with one repeated value.
Note: This method uses deferred execution.
Args:
element: The value to be repeated.
count: The number of times to repeat the value.
Raises:
ValueError: If the count is negative.
|
juraj-google-style
|
def get_output_details(self):
result = {}
for output_name, tensor_index in self._outputs:
result[output_name] = self._interpreter._get_tensor_details(tensor_index, self._subgraph_index)
return result
|
Gets output tensor details.
Returns:
A dictionary from input name to tensor details where each item is a
dictionary with details about an output tensor. The dictionary contains
the same fields as described for `get_input_details()`.
|
github-repos
|
def send_event(self, event_type, category=None, dimensions=None, properties=None, timestamp=None):
if (category and (category not in SUPPORTED_EVENT_CATEGORIES)):
raise ValueError(((('Event category is not one of the supported' + 'types: {') + ', '.join(SUPPORTED_EVENT_CATEGORIES)) + '}'))
data = {'eventType': event_type, 'category': category, 'dimensions': (dimensions or {}), 'properties': (properties or {}), 'timestamp': (int(timestamp) if timestamp else None)}
_logger.debug('Sending event to SignalFx: %s', data)
self._add_extra_dimensions(data)
return self._send_event(event_data=data, url='{0}/{1}'.format(self._endpoint, self._INGEST_ENDPOINT_EVENT_SUFFIX), session=self._session)
|
Send an event to SignalFx.
Args:
event_type (string): the event type (name of the event time
series).
category (string): the category of the event.
dimensions (dict): a map of event dimensions.
properties (dict): a map of extra properties on that event.
timestamp (float): timestamp when the event has occured
|
codesearchnet
|
def gate_nodes(self):
nodes = []
for node in self.op_nodes():
if isinstance(node.op, Gate):
nodes.append(node)
return nodes
|
Get the list of gate nodes in the dag.
Returns:
list: the list of node ids that represent gates.
|
codesearchnet
|
def terminate(self, uuid):
request_url = (self._client.base_api_url + self.terminate_url.format(id=uuid))
response = self._client.session.post(request_url)
self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_202_ACCEPTED)
return self.response_data_to_model_instance(response.json())
|
Terminate the task instance with given UUID.
Args:
uuid (str): The UUID of the task instance to terminate.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance that was told to terminate.
|
codesearchnet
|
def dependency_of_fetches(fetches, op):
try:
from tensorflow.python.client.session import _FetchHandler as FetchHandler
handler = FetchHandler(op.graph, fetches, {})
targets = tuple((handler.fetches() + handler.targets()))
except ImportError:
if isinstance(fetches, list):
targets = tuple(fetches)
elif isinstance(fetches, dict):
raise ValueError("Don't know how to parse dictionary to fetch list! This is a bug of tensorpack.")
else:
targets = (fetches,)
return dependency_of_targets(targets, op)
|
Check that op is in the subgraph induced by the dependencies of fetches.
fetches may have more general structure.
Args:
fetches: An argument to `sess.run`. Nested structure will affect performance.
op (tf.Operation or tf.Tensor):
Returns:
bool: True if any of `fetches` depend on `op`.
|
codesearchnet
|
def ParseBookmarkRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
rev_host = self._GetRowValue(query_hash, row, 'rev_host')
bookmark_type = self._GetRowValue(query_hash, row, 'type')
event_data = FirefoxPlacesBookmarkEventData()
event_data.host = rev_host or 'N/A'
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.places_title = self._GetRowValue(query_hash, row, 'places_title')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title')
event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a bookmark row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
juraj-google-style
|
def recipe_bigquery_view(config, auth_read, query, dataset, view, legacy):
bigquery(config, {'auth': auth_read, 'from': {'query': query, 'legacy': legacy}, 'to': {'dataset': dataset, 'view': view}})
|
Create a BigQuery view.
Args:
auth_read (authentication) - Credentials used for reading data.
query (text) - SQL with newlines and all.
dataset (string) - Existing BigQuery dataset.
view (string) - View to create from this query.
legacy (boolean) - Query type must match source tables.
|
github-repos
|
def unnest_primitive_type(beam_type: schema_pb2.FieldType):
avro_type = beam_type_to_avro_type(beam_type)
return avro_type['type'] if beam_type.WhichOneof('type_info') == 'atomic_type' else avro_type
|
unnests beam types that map to avro primitives or unions.
if mapping to a avro primitive or a union, don't nest the field type
for complex types, like arrays, we need to nest the type.
Example: { 'type': 'string' } -> 'string'
{ 'type': 'array', 'items': 'string' }
-> { 'type': 'array', 'items': 'string' }
Args:
beam_type: the beam type to map to avro.
Returns:
the converted avro type with the primitive or union type unnested.
|
github-repos
|
def list_vmss_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/virtualMachineScaleSets',
'?api-version=', COMP_API])
return do_get_next(endpoint, access_token)
|
List VM Scale Sets in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VM scale sets.
|
juraj-google-style
|
def variational_dropout(units, keep_prob, fixed_mask_dims=(1,)):
units_shape = tf.shape(units)
noise_shape = [units_shape[n] for n in range(len(units.shape))]
for dim in fixed_mask_dims:
noise_shape[dim] = 1
return tf.nn.dropout(units, keep_prob, noise_shape)
|
Dropout with the same drop mask for all fixed_mask_dims
Args:
units: a tensor, usually with shapes [B x T x F], where
B - batch size
T - tokens dimension
F - feature dimension
keep_prob: keep probability
fixed_mask_dims: in these dimensions the mask will be the same
Returns:
dropped units tensor
|
juraj-google-style
|
def remove_segments(self, segments_to_remove):
v_ind = self.vertex_indices_in_segments(segments_to_remove)
self.segm = {name: faces for name, faces in self.segm.iteritems() if name not in segments_to_remove}
self.remove_vertices(v_ind)
|
Remove the faces and vertices for given segments, keeping all others.
Args:
segments_to_remove: a list of segnments whose vertices will be removed
|
juraj-google-style
|
def AddScanNode(self, path_spec, parent_scan_node):
scan_node = self._scan_nodes.get(path_spec, None)
if scan_node:
raise KeyError('Scan node already exists.')
scan_node = SourceScanNode(path_spec)
if parent_scan_node:
if parent_scan_node.path_spec not in self._scan_nodes:
raise RuntimeError('Parent scan node not present.')
scan_node.parent_node = parent_scan_node
parent_scan_node.sub_nodes.append(scan_node)
if not self._root_path_spec:
self._root_path_spec = path_spec
self._scan_nodes[path_spec] = scan_node
if path_spec.IsFileSystem():
self._file_system_scan_nodes[path_spec] = scan_node
self.updated = True
return scan_node
|
Adds a scan node for a certain path specification.
Args:
path_spec (PathSpec): path specification.
parent_scan_node (SourceScanNode): parent scan node or None.
Returns:
SourceScanNode: scan node.
Raises:
KeyError: if the scan node already exists.
RuntimeError: if the parent scan node is not present.
|
juraj-google-style
|
def deleted(self, deleted_since, filters=None, params=None):
return self.tc_requests.deleted(
self.api_type,
self.api_sub_type,
deleted_since,
owner=self.owner,
filters=filters,
params=params,
)
|
Gets the indicators deleted.
Args:
params:
filters:
deleted_since: Date since its been deleted
|
juraj-google-style
|
def slice_begin(self, tensor_shape, pnum):
tensor_layout = self.tensor_layout(tensor_shape)
coordinates = pnum_to_processor_coordinates(self.shape, pnum)
ret = []
for dim_size, mesh_axis in zip(
tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis):
if mesh_axis is None:
ret.append(0)
else:
ret.append(
dim_size
return ret
|
Begin position for the tensor slice for the given processor.
Args:
tensor_shape: Shape.
pnum: int <= self.size.
Returns:
list of integers with length tensor_shape.ndims.
|
juraj-google-style
|
def dnd_setSnooze(self, *, num_minutes: int, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"num_minutes": num_minutes})
return self.api_call("dnd.setSnooze", http_verb="GET", params=kwargs)
|
Turns on Do Not Disturb mode for the current user, or changes its duration.
Args:
num_minutes (int): The snooze duration. e.g. 60
|
juraj-google-style
|
def get_structure_property_dict(self, structure, include_base_props=True, ignore_errors=False):
s_props = ['trans_v', 'long_v', 'snyder_ac', 'snyder_opt', 'snyder_total', 'clarke_thermalcond', 'cahill_thermalcond', 'debye_temperature']
if (ignore_errors and ((self.k_vrh < 0) or (self.g_vrh < 0))):
sp_dict = {prop: None for prop in s_props}
else:
sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}
sp_dict['structure'] = structure
if include_base_props:
sp_dict.update(self.property_dict)
return sp_dict
|
returns a dictionary of properties derived from the elastic tensor
and an associated structure
Args:
structure (Structure): structure object for which to calculate
associated properties
include_base_props (bool): whether to include base properties,
like k_vrh, etc.
ignore_errors (bool): if set to true, will set problem properties
that depend on a physical tensor to None, defaults to False
|
codesearchnet
|
def random_uniform(mesh, shape, **kwargs):
shape = convert_to_shape(shape)
return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
|
Random uniform.
Args:
mesh: a Mesh
shape: a Shape
**kwargs: keyword args for tf.random.uniform, except seed
Returns:
a Tensor
|
codesearchnet
|
def __init__(self, fsntfs_attribute):
if not fsntfs_attribute:
raise errors.BackEndError('Missing pyfsntfs attribute.')
super(NTFSAttribute, self).__init__()
self._fsntfs_attribute = fsntfs_attribute
|
Initializes the attribute object.
Args:
fsntfs_attribute (pyfsntfs.attribute): NTFS attribute.
Raises:
BackEndError: if the pyfsntfs attribute is missing.
|
juraj-google-style
|
def recipe_bigquery_query(config, auth_write, query, dataset, table, legacy):
bigquery(config, {'auth': auth_write, 'from': {'query': query, 'legacy': legacy}, 'to': {'dataset': dataset, 'table': table}})
|
Save query results into a BigQuery table.
Args:
auth_write (authentication) - Credentials used for writing data.
query (text) - SQL with newlines and all.
dataset (string) - Existing BigQuery dataset.
table (string) - Table to create from this query.
legacy (boolean) - Query type must match source tables.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.