code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def add_controller(self, controller, timeout=None):
assert (controller.mri not in self._controllers), ('Controller already exists for %s' % controller.mri)
self._controllers[controller.mri] = controller
controller.setup(self)
if self.state:
should_publish = self._start_controllers([controller], timeout)
if ((self.state == STARTED) and should_publish):
self._publish_controllers(timeout)
|
Add a controller to be hosted by this process
Args:
controller (Controller): Its controller
timeout (float): Maximum amount of time to wait for each spawned
object. None means forever
|
codesearchnet
|
def __init__(self, hidden_size):
super(Compressor, self).__init__()
self.hidden_size = hidden_size
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=tf.nn.leaky_relu)
self.conv1 = conv(256, 3, 2)
self.conv2 = conv(256, 3, 2)
self.conv3 = conv(256, 3, 2)
self.conv4 = conv(hidden_size, 8, padding="VALID")
|
Constructs a convolutional compressor.
This model takes as input `x_{1:T}` and outputs an intermediate
representation for use in downstream probabilistic encoders.
Args:
hidden_size: Dimensionality of the intermediate representations.
|
juraj-google-style
|
def _get_global_step_read(graph=None):
graph = graph or ops.get_default_graph()
global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)
if len(global_step_read_tensors) > 1:
raise RuntimeError('There are multiple items in collection {}. There should be only one.'.format(GLOBAL_STEP_READ_KEY))
if len(global_step_read_tensors) == 1:
return global_step_read_tensors[0]
return None
|
Gets global step read tensor in graph.
Args:
graph: The graph in which to create the global step read tensor. If missing,
use default graph.
Returns:
Global step read tensor.
Raises:
RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.
|
github-repos
|
def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_safe: bool=False, _add_with_inplace: bool=False, _inplace_chunk_size: Optional[int]=256) -> torch.Tensor:
if inplace_safe:
x = self._inference_forward(z, mask, inplace_chunk_size=_inplace_chunk_size, with_add=_add_with_inplace)
return x
if mask is None:
mask = z.new_ones(z.shape[:-1])
mask = mask.unsqueeze(-1)
z = self.layer_norm_in(z)
a = mask
a = a * self.sigmoid(self.linear_a_g(z))
a = a * self.linear_a_p(z)
b = mask
b = b * self.sigmoid(self.linear_b_g(z))
b = b * self.linear_b_p(z)
if is_fp16_enabled():
with torch.cuda.amp.autocast(enabled=False):
x = self._combine_projections(a.float(), b.float())
else:
x = self._combine_projections(a, b)
del a, b
x = self.layer_norm_out(x)
x = self.linear_z(x)
g = self.sigmoid(self.linear_g(z))
x = x * g
return x
|
Args:
x:
[*, N_res, N_res, C_z] input tensor
mask:
[*, N_res, N_res] input mask
Returns:
[*, N_res, N_res, C_z] output tensor
|
github-repos
|
def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:
if not isinstance(hidden_states, (tuple, list)):
raise TypeError('hidden_states should be a tuple or list of tensors')
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
output = self.fusion_stage(features)
return output
|
Args:
hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
|
github-repos
|
def _validate_config(config):
required_keys = [KEY_ADDRESS, KEY_MODEL, KEY_PORT, KEY_PATHS]
for key in required_keys:
if key not in config:
raise Error("Required key %s missing from config %s",
(key, config))
|
Verifies that a config dict for an attenuator device is valid.
Args:
config: A dict that is the configuration for an attenuator device.
Raises:
attenuator.Error: A config is not valid.
|
juraj-google-style
|
def _on_join_leader(self, response):
try:
group_assignment = self._perform_assignment(response.leader_id,
response.group_protocol,
response.members)
except Exception as e:
return Future().failure(e)
version = 0 if self.config['api_version'] < (0, 11, 0) else 1
request = SyncGroupRequest[version](
self.group_id,
self._generation.generation_id,
self._generation.member_id,
[(member_id,
assignment if isinstance(assignment, bytes) else assignment.encode())
for member_id, assignment in six.iteritems(group_assignment)])
log.debug("Sending leader SyncGroup for group %s to coordinator %s: %s",
self.group_id, self.coordinator_id, request)
return self._send_sync_group_request(request)
|
Perform leader synchronization and send back the assignment
for the group via SyncGroupRequest
Arguments:
response (JoinResponse): broker response to parse
Returns:
Future: resolves to member assignment encoded-bytes
|
juraj-google-style
|
def find_sanitiser_nodes(
sanitiser,
sanitisers_in_file
):
for sanitiser_tuple in sanitisers_in_file:
if sanitiser == sanitiser_tuple.trigger_word:
yield sanitiser_tuple.cfg_node
|
Find nodes containing a particular sanitiser.
Args:
sanitiser(string): sanitiser to look for.
sanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser.
Returns:
Iterable of sanitiser nodes.
|
juraj-google-style
|
def parse_display_name(chrom, pos, ref, alt, variant_type):
return '_'.join([chrom, pos, ref, alt, variant_type])
|
Parse the variant id for a variant
This is used to display the variant in scout.
Args:
chrom(str)
pos(str)
ref(str)
alt(str)
variant_type(str): 'clinical' or 'research'
Returns:
variant_id(str): The variant id in human readable format
|
juraj-google-style
|
def dates(self):
return _gen_business_days(self._start_date, self._end_date, self._holiday_calendar, self._backward)
|
Returns the dates as computed from the schedule as a DateTensor.
Constructs the date schedule from the supplied data. For more details see
the initializer docstring.
Returns:
`DateTensor` of rank one more than `start_date` or `end_date`
(depending on `backwards`), representing schedules for each element
of the input.
|
github-repos
|
def perform_load_job(self, destination, job_id, source_uris=None, source_stream=None, schema=None, write_disposition=None, create_disposition=None, additional_load_parameters=None, source_format=None, job_labels=None, load_job_project_id=None):
project_id = destination.projectId if load_job_project_id is None else load_job_project_id
return self._insert_load_job(project_id, job_id, destination, source_uris=source_uris, source_stream=source_stream, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition, additional_load_parameters=additional_load_parameters, source_format=source_format, job_labels=job_labels)
|
Starts a job to load data into BigQuery.
Returns:
bigquery.JobReference with the information about the job that was started.
|
github-repos
|
def create_run_group(prj):
from benchbuild.utils import schema as s
session = s.Session()
experiment = prj.experiment
group = s.RunGroup(id=prj.run_uuid, experiment=experiment.id)
session.add(group)
session.commit()
return (group, session)
|
Create a new 'run_group' in the database.
This creates a new transaction in the database and creates a new run_group
within this transaction. Afterwards we return both the transaction as well
as the run_group itself. The user is responsible for committing it when the
time comes.
Args:
prj - The project for which we open the run_group.
Returns:
A tuple (group, session) containing both the newly created run_group and
the transaction object.
|
juraj-google-style
|
def GenerateId(self, entity_id=None):
self._idnum += 1
if entity_id:
return ('%s_merged_%d' % (entity_id, self._idnum))
else:
return ('merged_%d' % self._idnum)
|
Generate a unique id based on the given id.
This is done by appending a counter which is then incremented. The
counter is initialised at the maximum number used as an ending for
any id in the old and new schedules.
Args:
entity_id: The base id string. This is allowed to be None.
Returns:
The generated id.
|
codesearchnet
|
def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):
if not sparse_tensor_proto.HasField("named_tuple"):
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: expected proto tuple.")
if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: The name of the tuple "
"should have been {} but was {}.".format(
_SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name))
named_tuple_map = sparse_tensor_proto.named_tuple.map
return tf.SparseTensor(
indices=process_leafs(named_tuple_map["indices"].value),
values=process_leafs(named_tuple_map["values"].value),
dense_shape=process_leafs(named_tuple_map["dense_shape"].value))
|
Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.
Args:
sparse_tensor_proto: A proto representing a `tf.SparseTensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
Returns:
An instance of `tf.SparseTensor`.
|
juraj-google-style
|
def dqdv_cycles(cycles, **kwargs):
ica_dfs = list()
cycle_group = cycles.groupby('cycle')
for (cycle_number, cycle) in cycle_group:
(v, dq) = dqdv_cycle(cycle, splitter=True, **kwargs)
_ica_df = pd.DataFrame({'voltage': v, 'dq': dq})
_ica_df['cycle'] = cycle_number
_ica_df = _ica_df[['cycle', 'voltage', 'dq']]
ica_dfs.append(_ica_df)
ica_df = pd.concat(ica_dfs)
return ica_df
|
Convenience functions for creating dq-dv data from given capacity and
voltage cycles.
Returns a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycles (pandas.DataFrame): the cycle data ('cycle', 'voltage',
'capacity', 'direction' (1 or -1)).
Returns:
pandas.DataFrame with columns 'cycle', 'voltage', 'dq'.
Example:
>>> cycles_df = my_data.get_cap(
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth",
>>> ... label_cycle_number=True,
>>> ... )
>>> ica_df = ica.dqdv_cycles(cycles_df)
|
codesearchnet
|
def get_by(self, field, value):
if field == 'userName' or field == 'name':
return self._client.get(self.URI + '/' + value)
elif field == 'role':
value = value.replace(" ", "%20")
return self._client.get(self.URI + '/roles/users/' + value)['members']
else:
raise HPOneViewException('Only userName, name and role can be queried for this resource.')
|
Gets all Users that match the filter.
The search is case-insensitive.
Args:
field: Field name to filter. Accepted values: 'name', 'userName', 'role'
value: Value to filter.
Returns:
list: A list of Users.
|
juraj-google-style
|
def remove_item(self, **kwargs):
path = self._get_id_path('remove_item')
kwargs.update({'session_id': self.session_id})
payload = {'media_id': kwargs.pop('media_id', None)}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
|
Delete movies from a list that the user created.
A valid session id is required.
Args:
media_id: A movie id.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def get_ordered_params(url):
if (url not in URLHelper.__cache):
URLHelper.__cache[url] = urlparse(url)
params = URLHelper.query_string_to_dict(URLHelper.__cache[url].query)
return OrderedDict(sorted(params.items()))
|
Get the query parameters of the given URL in alphabetical order.
Args:
url (str): The URL to get the query parameters from.
Returns:
str: The query parameters
|
codesearchnet
|
def __init__(self, assign_defaults=(), method_name=None, overwrite=False):
super(self.__class__, self).__init__(assign_defaults=assign_defaults,
method_name=method_name,
overwrite=overwrite)
|
Assigns arguments to the decorator.
Args:
assign_defaults: A sequence of strings for the default values that should
be provided. Defaults are shared across methods.
method_name: If provided, use this as the method_name instead of the
wrapped function's name.
overwrite: if true, overwrites definition if exists.
|
juraj-google-style
|
async def movehere(self, channel):
self.logger.debug("movehere command")
await self.embed.delete()
self.embed.channel = channel
await self.embed.send()
await self.add_reactions()
self.statuslog.info("Moved to front")
|
Moves the embed message to a new channel; can also be used to move the musicplayer to the front
Args:
channel (discord.Channel): The channel to move to
|
juraj-google-style
|
def CheckDependencies(verbose_output=True):
print('Checking availability and versions of dependencies.')
check_result = True
for module_name, version_tuple in sorted(PYTHON_DEPENDENCIES.items()):
if not _CheckPythonModule(
module_name, version_tuple[0], version_tuple[1],
is_required=version_tuple[3], maximum_version=version_tuple[2],
verbose_output=verbose_output):
check_result = False
if not _CheckSQLite3(verbose_output=verbose_output):
check_result = False
if check_result and not verbose_output:
print('[OK]')
print('')
return check_result
|
Checks the availability of the dependencies.
Args:
verbose_output (Optional[bool]): True if output should be verbose.
Returns:
bool: True if the dependencies are available, False otherwise.
|
juraj-google-style
|
def stream_realtime(self, stream, value):
if not self.stream_iface_open:
return
reading = IOTileReading(0, stream, value)
report = IndividualReadingReport.FromReadings(self.iotile_id, [reading])
self.stream(report)
|
Stream a realtime value as an IndividualReadingReport.
If the streaming interface of the VirtualInterface this
VirtualDevice is attached to is not opened, the realtime
reading may be dropped.
Args:
stream (int): The stream id to send
value (int): The stream value to send
|
juraj-google-style
|
def write(self, path=None, *args, **kwargs):
if path is None:
print(self.format(*args, **kwargs))
else:
with io.open(path, 'w', newline="") as f:
f.write(self.format(*args, **kwargs))
|
Perform formatting and write the formatted string to a file or stdout.
Optional arguments can be used to format the editor's contents. If no
file path is given, prints to standard output.
Args:
path (str): Full file path (default None, prints to stdout)
*args: Positional arguments to format the editor with
**kwargs: Keyword arguments to format the editor with
|
juraj-google-style
|
def wait_all_futures(self, futures, timeout=None, event_timeout=None):
if (timeout is None):
end = None
else:
end = (time.time() + timeout)
if (not isinstance(futures, list)):
if futures:
futures = [futures]
else:
futures = []
filtered_futures = []
for f in futures:
if f.done():
if (f.exception() is not None):
raise f.exception()
else:
filtered_futures.append(f)
while filtered_futures:
if (event_timeout is not None):
until = (time.time() + event_timeout)
if (end is not None):
until = min(until, end)
else:
until = end
self._service_futures(filtered_futures, until)
|
Services all futures until the list 'futures' are all done
then returns. Calls relevant subscription callbacks as they
come off the queue and raises an exception on abort
Args:
futures: a `Future` or list of all futures that the caller
wants to wait for
timeout: maximum total time in seconds to wait for responses, wait
forever if None
event_timeout: maximum time in seconds to wait between each response
event, wait forever if None
|
codesearchnet
|
async def _handle_set_typing_notification(self, set_typing_notification):
conv_id = set_typing_notification.conversation_id.id
res = parsers.parse_typing_status_message(set_typing_notification)
(await self.on_typing.fire(res))
try:
conv = (await self._get_or_fetch_conversation(conv_id))
except exceptions.NetworkError:
logger.warning('Failed to fetch conversation for typing notification: %s', conv_id)
else:
(await conv.on_typing.fire(res))
|
Receive SetTypingNotification and update the conversation.
Args:
set_typing_notification: hangouts_pb2.SetTypingNotification
instance
|
codesearchnet
|
def _wait_for_and_process_task(self, task):
function_descriptor = FunctionDescriptor.from_bytes_list(
task.function_descriptor_list())
driver_id = task.driver_id()
if not task.actor_creation_id().is_nil():
assert self.actor_id.is_nil()
self.actor_id = task.actor_creation_id()
self.actor_creation_task_id = task.task_id()
actor_class = self.function_actor_manager.load_actor_class(
driver_id, function_descriptor)
self.actors[self.actor_id] = actor_class.__new__(actor_class)
self.actor_checkpoint_info[self.actor_id] = ActorCheckpointInfo(
num_tasks_since_last_checkpoint=0,
last_checkpoint_timestamp=int(1000 * time.time()),
checkpoint_ids=[],
)
execution_info = self.function_actor_manager.get_execution_info(
driver_id, function_descriptor)
function_name = execution_info.function_name
extra_data = {"name": function_name, "task_id": task.task_id().hex()}
if task.actor_id().is_nil():
if task.actor_creation_id().is_nil():
title = "ray_worker:{}()".format(function_name)
next_title = "ray_worker"
else:
actor = self.actors[task.actor_creation_id()]
title = "ray_{}:{}()".format(actor.__class__.__name__,
function_name)
next_title = "ray_{}".format(actor.__class__.__name__)
else:
actor = self.actors[task.actor_id()]
title = "ray_{}:{}()".format(actor.__class__.__name__,
function_name)
next_title = "ray_{}".format(actor.__class__.__name__)
with profiling.profile("task", extra_data=extra_data):
with _changeproctitle(title, next_title):
self._process_task(task, execution_info)
self.task_context.current_task_id = TaskID.nil()
self.task_context.task_index = 0
self.task_context.put_index = 1
if self.actor_id.is_nil():
self.task_driver_id = DriverID.nil()
ray_signal.reset()
self.function_actor_manager.increase_task_counter(
driver_id, function_descriptor)
reached_max_executions = (self.function_actor_manager.get_task_counter(
driver_id, function_descriptor) == execution_info.max_calls)
if reached_max_executions:
self.raylet_client.disconnect()
sys.exit(0)
|
Wait for a task to be ready and process the task.
Args:
task: The task to execute.
|
juraj-google-style
|
def get_axis_grid(self, ind):
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
|
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
|
juraj-google-style
|
def render(self, data):
renderers = {'text/csv': self._render_as_csv, 'text/html': self._render_as_html, None: self._render_as_html}
render = renderers[data.content_type]
return render(data)
|
Renders the reports based on data.content_type's value.
Arguments:
data (ReportViewRequestData): The report data. data.content_type
is used to determine how the reports are rendered.
Returns:
HTTPResponse: The rendered version of the report.
|
codesearchnet
|
def get_variable_dtype(master_dtype=tf.bfloat16, slice_dtype=tf.float32, activation_dtype=tf.float32):
return mtf.VariableDType(master_dtype=tf.as_dtype(master_dtype), slice_dtype=tf.as_dtype(slice_dtype), activation_dtype=tf.as_dtype(activation_dtype))
|
Datatypes to use for the run.
Args:
master_dtype: string, datatype for checkpoints
keep this the same between training and eval/inference
slice_dtype: string, datatype for variables in memory
must be tf.float32 for training
activation_dtype: string, datatype for activations
less memory usage if tf.bfloat16 but possible numerical issues
Returns:
a mtf.VariableDtype
|
codesearchnet
|
def normal_var(data, mean):
if (not isinstance(data, np.ndarray)):
data = np.array(data)
cumm = [0.0]
cumm.extend(np.cumsum(np.power(np.abs((data - mean)), 2)))
def cost(s, t):
' Cost function for normal distribution with variable variance\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n '
dist = float((t - s))
diff = (cumm[t] - cumm[s])
return (dist * np.log((diff / dist)))
return cost
|
Creates a segment cost function for a time series with a
Normal distribution with changing variance
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
codesearchnet
|
def last_timestamp(self, event_key=None):
if event_key is None:
timestamps = [self._trackers[key].first_timestamp
for key in self._trackers]
return max(timestamp for timestamp in timestamps if timestamp >= 0)
else:
return self._trackers[event_key].last_timestamp
|
Obtain the last timestamp.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY). If
None, includes all event type keys.
Returns:
Last (latest) timestamp of all the events of the given type (or all
event types if event_key is None).
|
juraj-google-style
|
def __init__(self, src_state_id, dst_state_id, guard_p, term=None):
self.src_state = src_state_id
self.dst_state = dst_state_id
self.guard = guard_p
self.term = None
|
Initialization function for Arc's guardgen structure
Args:
src_state_id (int): The source state identifier
dst_state_id (int): The destination state identifier
guard_p: The input character
term: The input term
Returns:
None
|
juraj-google-style
|
def detect_mbr(self, filename, offset, fs_id):
self.logger.debug('Detecting MBR partition type')
if (fs_id not in self.__mbr_plugins):
return None
else:
plugins = self.__mbr_plugins.get(fs_id)
for plugin in plugins:
if plugin.detect(filename, offset):
return plugin.get_volume_object()
return None
|
Used by rawdisk.session.Session to match mbr partitions against
filesystem plugins.
Args:
filename: device or file that it will read in order to detect
the filesystem fs_id: filesystem id to match (ex. 0x07)
offset: offset for the filesystem that is being matched
Returns:
Volume object supplied by matched plugin.
If there is no match, None is returned
|
codesearchnet
|
def size(self, time):
if (self.start_time <= time <= self.end_time):
return self.masks[(time - self.start_time)].sum()
else:
return 0
|
Gets the size of the object at a given time.
Args:
time: Time value being queried.
Returns:
size of the object in pixels
|
codesearchnet
|
def _validate_netconfig(self, conf):
nets = conf.get('nets', {})
if (len(nets) == 0):
raise LagoInitException('No networks configured.')
no_mgmt_dns = [name for (name, net) in nets.iteritems() if ((net.get('management', None) is None) and (net.get('main_dns') or net.get('dns_domain_name')))]
if ((len(no_mgmt_dns) > 0) and (len(nets.keys()) > 1)):
raise LagoInitException('Networks: {0}, misconfigured, they are not marked as management, but have DNS attributes. DNS is supported only in management networks.'.format(','.join(no_mgmt_dns)))
for (dom_name, dom_spec) in conf['domains'].items():
mgmts = []
for nic in dom_spec['nics']:
net = self._get_net(conf, dom_name, nic)
if (net.get('management', False) is True):
mgmts.append(nic['net'])
if (len(mgmts) == 0):
raise LagoInitException('VM {0} has no management network, please connect it to one.'.format(dom_name))
if (len(mgmts) > 1):
raise LagoInitException('VM {0} has more than one management network: {1}. It should have exactly one.'.format(dom_name, ','.join(mgmts)))
|
Validate network configuration
Args:
conf(dict): spec
Returns:
None
Raises:
:exc:`~lago.utils.LagoInitException`: If a VM has more than
one management network configured, or a network which is not
management has DNS attributes, or a VM is configured with a
none-existence NIC, or a VM has no management network.
|
codesearchnet
|
def from_pseudoinverse(cls, strains, stresses):
warnings.warn("Pseudoinverse fitting of Strain/Stress lists may yield "
"questionable results from vasp data, use with caution.")
stresses = np.array([Stress(stress).voigt for stress in stresses])
with warnings.catch_warnings(record=True):
strains = np.array([Strain(strain).voigt for strain in strains])
voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))
return cls.from_voigt(voigt_fit)
|
Class method to fit an elastic tensor from stress/strain
data. Method uses Moore-Penrose pseudoinverse to invert
the s = C*e equation with elastic tensor, stress, and
strain in voigt notation
Args:
stresses (Nx3x3 array-like): list or array of stresses
strains (Nx3x3 array-like): list or array of strains
|
juraj-google-style
|
def crscode_to_string(codetype, code, format):
link = ('http:
result = urllib2.urlopen(link).read()
if (not isinstance(result, str)):
result = result.decode()
return result
|
Lookup crscode on spatialreference.org and return in specified format.
Arguments:
- *codetype*: "epsg", "esri", or "sr-org".
- *code*: The code.
- *format*: The crs format of the returned string. One of "ogcwkt", "esriwkt", or "proj4", but also several others...
Returns:
- Crs string in the specified format.
|
codesearchnet
|
def ParseOptions(cls, options, output_module):
if not isinstance(output_module, shared_4n6time.Shared4n6TimeOutputModule):
raise errors.BadConfigObject(
'Output module is not an instance of Shared4n6TimeOutputModule')
append = getattr(options, 'append', cls._DEFAULT_APPEND)
evidence = cls._ParseStringOption(
options, 'evidence', default_value=cls._DEFAULT_EVIDENCE)
fields = cls._ParseStringOption(
options, 'fields', default_value=cls._DEFAULT_FIELDS)
additional_fields = cls._ParseStringOption(
options, 'additional_fields')
if additional_fields:
fields = '{0:s},{1:s}'.format(fields, additional_fields)
output_module.SetAppendMode(append)
output_module.SetEvidence(evidence)
output_module.SetFields([
field_name.strip() for field_name in fields.split(',')])
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
|
juraj-google-style
|
def get_dopants_from_shannon_radii(bonded_structure, num_dopants=5, match_oxi_sign=False):
all_species = [Specie(el, oxi) for el in Element for oxi in el.common_oxidation_states]
cn_and_species = set(((bonded_structure.get_coordination_of_site(i), bonded_structure.structure[i].specie) for i in range(bonded_structure.structure.num_sites)))
cn_to_radii_map = {}
possible_dopants = []
for (cn, species) in cn_and_species:
cn_roman = _int_to_roman(cn)
try:
species_radius = species.get_shannon_radius(cn_roman)
except KeyError:
warnings.warn('Shannon radius not found for {} with coordination number {}.\nSkipping...'.format(species, cn))
continue
if (cn not in cn_to_radii_map):
cn_to_radii_map[cn] = _shannon_radii_from_cn(all_species, cn_roman, radius_to_compare=species_radius)
shannon_radii = cn_to_radii_map[cn]
possible_dopants += [{'radii_diff': p['radii_diff'], 'dopant_species': p['species'], 'original_species': species} for p in shannon_radii]
possible_dopants.sort(key=(lambda x: abs(x['radii_diff'])))
return _get_dopants(possible_dopants, num_dopants, match_oxi_sign)
|
Get dopant suggestions based on Shannon radii differences.
Args:
bonded_structure (StructureGraph): A pymatgen structure graph
decorated with oxidation states. For example, generated using the
CrystalNN.get_bonded_structure() method.
num_dopants (int): The nummber of suggestions to return for
n- and p-type dopants.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "radii_diff": The difference between the Shannon radii of the species.
- "dopant_spcies": The dopant species.
- "original_species": The substituted species.
|
codesearchnet
|
def is_native_xmon_gate(gate: ops.Gate) -> bool:
return isinstance(gate, (ops.CZPowGate,
ops.MeasurementGate,
ops.PhasedXPowGate,
ops.XPowGate,
ops.YPowGate,
ops.ZPowGate))
|
Check if a gate is a native xmon gate.
Args:
gate: Input gate.
Returns:
True if the gate is native to the xmon, false otherwise.
|
juraj-google-style
|
def get_name(node):
if isinstance(node, gast.Name):
return node.id
elif isinstance(node, (gast.Subscript, gast.Attribute)):
return get_name(node.value)
else:
raise TypeError
|
Get the name of a variable.
Args:
node: A `Name`, `Subscript` or `Attribute` node.
Returns:
The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
|
codesearchnet
|
def get_vcenter(self, **kwargs):
config = ET.Element("config")
urn = "urn:brocade.com:mgmt:brocade-vswitch"
ET.SubElement(config, "vcenter", xmlns=urn)
output = self._callback(config, handler='get_config')
result = []
element = ET.fromstring(str(output))
for vcenter in element.iter('{%s}vcenter'%urn):
vc = {}
vc['name'] = vcenter.find('{%s}id' % urn).text
vc['url'] = (vcenter.find('{%s}credentials' % urn)).find('{%s}url' % urn).text
isactive = vcenter.find('{%s}activate' %urn)
if isactive is None:
vc['isactive'] = False
else:
vc['isactive'] = True
result.append(vc)
return result
|
Get vCenter hosts on the switch
Args:
callback (function): A function executed upon completion of the
method.
Returns:
Returns a list of vcenters
Raises:
None
|
juraj-google-style
|
def parse_args():
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument('--max_steps', type=int, default=10, help='Number of steps to run trainer.')
parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size used during training.')
parser.add_argument('--learning_rate', type=float, default=0.025, help='Initial learning rate.')
parser.add_argument('--data_dir', type=str, default='/tmp/mnist_data', help='Directory for storing data')
parser.add_argument('--fake_data', type='bool', nargs='?', const=True, default=False, help='Use fake MNIST data for unit testing')
parser.add_argument('--check_numerics', type='bool', nargs='?', const=True, default=False, help='Use tfdbg to track down bad values during training. Mutually exclusive with the --dump_dir flag.')
parser.add_argument('--dump_dir', type=str, default=None, help='Dump TensorFlow program debug data to the specified directory. The dumped data contains information regarding tf.function building, execution of ops and tf.functions, as well as their stack traces and associated source-code snapshots. Mutually exclusive with the --check_numerics flag.')
parser.add_argument('--dump_tensor_debug_mode', type=str, default='FULL_HEALTH', help='Mode for dumping tensor values. Options: NO_TENSOR, CURT_HEALTH, CONCISE_HEALTH, SHAPE, FULL_HEALTH. This is relevant only when --dump_dir is set.')
parser.add_argument('--dump_circular_buffer_size', type=int, default=-1, help='Size of the circular buffer used to dump execution events. A value <= 0 disables the circular-buffer behavior and causes all instrumented tensor values to be dumped. This is relevant only when --dump_dir is set.')
parser.add_argument('--use_random_config_path', type='bool', nargs='?', const=True, default=False, help='If set, set config file path to a random file in the temporary\n directory.')
return parser.parse_known_args()
|
Parses commandline arguments.
Returns:
A tuple (parsed, unparsed) of the parsed object and a group of unparsed
arguments that did not match the parser.
|
github-repos
|
def isworkday(self, date):
date = parsefun(date)
return self.weekdaymap[date.weekday()].isworkday
|
Check if a given date is a work date, ignoring holidays.
Args:
date (date, datetime or str): Date to be checked.
Returns:
bool: True if the date is a work date, False otherwise.
|
juraj-google-style
|
def check_import_stdlib(module):
if (
module in stdlib_list('2.7')
or module in stdlib_list('3.4')
or module in stdlib_list('3.5')
or module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in ['app', 'args', 'playbook_app']
):
return True
return False
|
Check if module is in Python stdlib.
Args:
module (str): The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
|
juraj-google-style
|
def download_kegg_gene_metadata(gene_id, outdir=None, force_rerun=False):
if not outdir:
outdir = ''
outfile = op.join(outdir, '{}.kegg'.format(custom_slugify(gene_id)))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
raw_text = bs_kegg.get("{}".format(gene_id))
if raw_text == 404:
return
with io.open(outfile, mode='wt', encoding='utf-8') as f:
f.write(raw_text)
log.debug('{}: downloaded KEGG metadata file'.format(outfile))
else:
log.debug('{}: KEGG metadata file already exists'.format(outfile))
return outfile
|
Download the KEGG flatfile for a KEGG ID and return the path.
Args:
gene_id: KEGG gene ID (with organism code), i.e. "eco:1244"
outdir: optional output directory of metadata
Returns:
Path to metadata file
|
juraj-google-style
|
def _SetCompleted(self):
with self._lock:
if self._completed:
return False
self._completed = True
return True
|
Atomically marks the breakpoint as completed.
Returns:
True if the breakpoint wasn't marked already completed or False if the
breakpoint was already completed.
|
codesearchnet
|
def _run_model(iterator, args, tf_args):
single_node_env(tf_args)
logging.info("===== input_mapping: {}".format(args.input_mapping))
logging.info("===== output_mapping: {}".format(args.output_mapping))
input_tensor_names = [tensor for col, tensor in sorted(args.input_mapping.items())]
output_tensor_names = [tensor for tensor, col in sorted(args.output_mapping.items())]
if args.signature_def_key:
assert args.export_dir, "Inferencing with signature_def_key requires --export_dir argument"
logging.info("===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}".format(args.tag_set, args.export_dir))
meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)
signature = meta_graph_def.signature_def[args.signature_def_key]
logging.debug("signature: {}".format(signature))
inputs_tensor_info = signature.inputs
logging.debug("inputs_tensor_info: {0}".format(inputs_tensor_info))
outputs_tensor_info = signature.outputs
logging.debug("outputs_tensor_info: {0}".format(outputs_tensor_info))
result = []
global global_sess, global_args
if global_sess and global_args == args:
sess = global_sess
else:
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
if args.export_dir:
assert args.tag_set, "Inferencing from a saved_model requires --tag_set"
logging.info("===== restoring from saved_model: {}".format(args.export_dir))
loader.load(sess, args.tag_set.split(','), args.export_dir)
elif args.model_dir:
ckpt = tf.train.latest_checkpoint(args.model_dir)
assert ckpt, "Invalid model checkpoint path: {}".format(args.model_dir)
logging.info("===== restoring from checkpoint: {}".format(ckpt + ".meta"))
saver = tf.train.import_meta_graph(ckpt + ".meta", clear_devices=True)
saver.restore(sess, ckpt)
else:
raise Exception("Inferencing requires either --model_dir or --export_dir argument")
global_sess = sess
global_args = args
if args.signature_def_key:
input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names]
output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]
else:
input_tensors = [t + ':0' for t in input_tensor_names]
output_tensors = [t + ':0' for t in output_tensor_names]
logging.info("input_tensors: {0}".format(input_tensors))
logging.info("output_tensors: {0}".format(output_tensors))
for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)):
inputs_feed_dict = {}
for i in range(len(input_tensors)):
inputs_feed_dict[input_tensors[i]] = tensors[i]
outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)
lengths = [len(output) for output in outputs]
input_size = len(tensors[0])
assert all([length == input_size for length in lengths]), "Output array sizes {} must match input size: {}".format(lengths, input_size)
python_outputs = [output.tolist() for output in outputs]
result.extend(zip(*python_outputs))
return result
|
mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.
Args:
:iterator: input RDD partition iterator.
:args: arguments for TFModel, in argparse format
:tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.
Returns:
An iterator of result data.
|
juraj-google-style
|
def dropout(x, keep_prob, noise_shape=None, name=None):
noise_shape = convert_to_shape(noise_shape)
if (noise_shape is None):
noise_shape = x.shape
with tf.variable_scope(name, default_name='dropout'):
if (keep_prob == 1.0):
return x
noise = cast(less(random_uniform(x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype)
noise /= keep_prob
return (x * noise)
|
Dropout layer.
Args:
x: a Tensor
keep_prob: a float between 0.0 and 1.0
noise_shape: an optional Shape (a subset of x.shape)
name: an optional string
Returns:
a Tensor
|
codesearchnet
|
def compile_action_preconditions(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
scope = self.action_precondition_scope(state, action)
preconds = []
with self.graph.as_default():
with tf.name_scope('action_preconditions'):
for p in self.rddl.domain.preconds:
fluent = self._compile_expression(p, scope)
preconds.append(fluent)
return preconds
|
Compiles the action preconditions given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
|
juraj-google-style
|
def Sample(self, task, status):
sample_time = time.time()
sample = '{0:f}\t{1:s}\t{2:s}\n'.format(
sample_time, task.identifier, status)
self._WritesString(sample)
|
Takes a sample of the status of a task for profiling.
Args:
task (Task): a task.
status (str): status.
|
juraj-google-style
|
def get_metadata_as_dict(self, user_id=None, source=None):
if self.metadata is None or self.metadata == "":
return {}
metadata_dict = self.metadata if isinstance(self.metadata, dict) else json.loads(self.metadata)
metadata_keys = [m.lower() for m in metadata_dict]
if user_id is not None and 'user_id' not in metadata_keys:
metadata_dict['user_id'] = six.text_type(user_id)
if source is not None and 'source' not in metadata_keys:
metadata_dict['source'] = six.text_type(source)
return { k : six.text_type(v) for k, v in metadata_dict.items() }
|
Convert a metadata json string into a dictionary.
Args:
user_id (int): Optional: Insert user_id into the metadata if specified
source (string): Optional: Insert source (the name of the app typically) into the metadata if necessary.
Returns:
dict: THe metadata as a python dictionary
|
juraj-google-style
|
def raise_for_status(response):
for err_name in web_exceptions.__all__:
err = getattr(web_exceptions, err_name)
if (err.status_code == response.status):
payload = dict(headers=response.headers, reason=response.reason)
if issubclass(err, web_exceptions._HTTPMove):
raise err(response.headers['Location'], **payload)
raise err(**payload)
|
Raise an appropriate error for a given response.
Arguments:
response (:py:class:`aiohttp.ClientResponse`): The API response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate
error for the response's status.
|
codesearchnet
|
def _process_query(self, query, prepared=False):
if (prepared is True):
files = {'query': str(query)}
logger.debug('About to submit the following query {}'.format(query))
(res, status) = self.post(self.disambiguate_service, files=files, headers={'Accept': 'application/json'})
if (status == 200):
return (self.decode(res), status)
else:
logger.debug('Disambiguation failed.')
return (None, status)
text = query['text']
sentence_coordinates = [{'offsetStart': 0, 'offsetEnd': len(text)}]
total_nb_sentences = len(sentence_coordinates)
sentences_groups = []
if (len(text) > self.max_text_length):
(res, status_code) = self.segment(text)
if (status_code == 200):
sentence_coordinates = res['sentences']
total_nb_sentences = len(sentence_coordinates)
else:
logger.error('Error during the segmentation of the text.')
logger.debug('Text too long, split in {} sentences; building groups of {} sentences.'.format(total_nb_sentences, self.sentences_per_group))
sentences_groups = self._group_sentences(total_nb_sentences, self.sentences_per_group)
else:
query['sentence'] = 'true'
if (total_nb_sentences > 1):
query['sentences'] = sentence_coordinates
if (len(sentences_groups) > 0):
for group in sentences_groups:
query['processSentence'] = group
(res, status_code) = self._process_query(query, prepared=True)
if (status_code == 200):
if ('entities' in res):
query['entities'] = res[u'entities']
query['language'] = res[u'language']
else:
logger.error('Error when processing the query {}'.format(query))
return (None, status_code)
else:
(res, status_code) = self._process_query(query, prepared=True)
if (status_code == 200):
query['language'] = res[u'language']
if ('entities' in res):
query['entities'] = res[u'entities']
else:
logger.error('Error when processing the query {}'.format(query))
return (None, status_code)
return (query, status_code)
|
Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API.
|
codesearchnet
|
def Deserialize(self, reader):
self.Timestamp = reader.ReadUInt32()
self.Services = reader.ReadUInt64()
addr = bytearray(reader.ReadFixedString(16))
addr.reverse()
addr.strip(b'\x00')
nums = []
for i in range(0, 4):
nums.append(str(addr[i]))
nums.reverse()
adddd = '.'.join(nums)
self.Address = adddd
self.Port = reader.ReadUInt16(endian='>')
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
|
juraj-google-style
|
def center_crop(self, image: 'torch.Tensor', size: Dict[str, int], **kwargs) -> 'torch.Tensor':
output_size = size.shortest_edge
return F.center_crop(image, output_size=(output_size, output_size), **kwargs)
|
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`torch.Tensor`):
Image to center crop.
size (`Dict[str, int]`):
Size of the output image in the form `{"height": h, "width": w}`.
|
github-repos
|
def _draw_breakpoint_icon(self, top, painter, icon_name):
rect = QRect(0, top, self.sizeHint().width(), self.sizeHint().height())
try:
icon = self.icons[icon_name]
except KeyError as e:
debug_print("Breakpoint icon doen't exist, {}".format(e))
else:
icon.paint(painter, rect)
|
Draw the given breakpoint pixmap.
Args:
top (int): top of the line to draw the breakpoint icon.
painter (QPainter)
icon_name (srt): key of icon to draw (see: self.icons)
|
codesearchnet
|
def __init__(self, hosts=None):
if not hosts:
hosts = [{"host": "localhost", "port": 9200}]
try:
self.els_search = elasticsearch.Elasticsearch(hosts)
info = self.els_search.info()
version = info['version']
print '\t- ELS Indexer connected: %s %s %s %s' % (str(hosts), info['name'],
version['number'], version['lucene_version'])
except elasticsearch.exceptions.ConnectionError:
print '\t- ELS connection failed! Is your ELS server running?'
exit(1)
|
Initialization for the Elastic Search Indexer.
Args:
hosts: List of connection settings.
|
juraj-google-style
|
def _next_power_of_two(x):
return 1 if x == 0 else 2 ** (int(x) - 1).bit_length()
|
Calculates the smallest enclosing power of two for an input.
Args:
x: Positive float or integer number.
Returns:
Next largest power of two integer.
|
github-repos
|
def ice_register_write(self, register_index, value, delay=False):
self._dll.JLINKARM_WriteICEReg(register_index, int(value), int(delay))
return None
|
Writes a value to an ARM ICE register.
Args:
self (JLink): the ``JLink`` instance
register_index (int): the ICE register to write to
value (int): the value to write to the ICE register
delay (bool): boolean specifying if the write should be delayed
Returns:
``None``
|
juraj-google-style
|
def init_from_storage_write_to_datastore(self, batch_size=100, allowed_epsilon=None, skip_image_ids=None, max_num_images=None):
if (allowed_epsilon is None):
allowed_epsilon = copy.copy(DEFAULT_EPSILON)
self._dataset_batches = {}
images = self._read_image_list(skip_image_ids)
if max_num_images:
images = images[:max_num_images]
for (batch_idx, batch_start) in enumerate(range(0, len(images), batch_size)):
batch = images[batch_start:(batch_start + batch_size)]
batch_id = DATASET_BATCH_ID_PATTERN.format(batch_idx)
batch_epsilon = allowed_epsilon[(batch_idx % len(allowed_epsilon))]
self.add_batch(batch_id, {'epsilon': batch_epsilon})
for (image_id, image_path) in batch:
self.add_image(batch_id, image_id, {'dataset_image_id': os.path.basename(image_path)[:(- 4)], 'image_path': image_path})
self.write_to_datastore()
|
Initializes dataset batches from the list of images in the datastore.
Args:
batch_size: batch size
allowed_epsilon: list of allowed epsilon or None to use default
skip_image_ids: list of image ids to skip
max_num_images: maximum number of images to read
|
codesearchnet
|
def _read(cls, **kwargs):
pd_obj = pandas.read_csv(**kwargs)
if isinstance(pd_obj, pandas.DataFrame):
return cls.from_pandas(pd_obj)
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
pd_read = pd_obj.read
pd_obj.read = lambda *args, **kwargs: cls.from_pandas(
pd_read(*args, **kwargs)
)
return pd_obj
|
Read csv file from local disk.
Args:
filepath_or_buffer:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas.read_csv
|
juraj-google-style
|
def delete(self, filething=None):
fileobj = filething.fileobj
self.tags.clear()
try:
try:
self.tags._inject(fileobj, lambda x: 0)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
except IOError as e:
reraise(self._Error, e, sys.exc_info()[2])
|
delete(filething=None)
Remove tags from a file.
If no filename is given, the one most recently loaded is used.
Args:
filething (filething)
Raises:
mutagen.MutagenError
|
juraj-google-style
|
def GetLogdirSubdirectories(path):
if (not tf.io.gfile.exists(path)):
return ()
if (not tf.io.gfile.isdir(path)):
raise ValueError(('GetLogdirSubdirectories: path exists and is not a directory, %s' % path))
if IsCloudPath(path):
logger.info('GetLogdirSubdirectories: Starting to list directories via glob-ing.')
traversal_method = ListRecursivelyViaGlobbing
else:
logger.info('GetLogdirSubdirectories: Starting to list directories via walking.')
traversal_method = ListRecursivelyViaWalking
return (subdir for (subdir, files) in traversal_method(path) if any((IsTensorFlowEventsFile(f) for f in files)))
|
Obtains all subdirectories with events files.
The order of the subdirectories returned is unspecified. The internal logic
that determines order varies by scenario.
Args:
path: The path to a directory under which to find subdirectories.
Returns:
A tuple of absolute paths of all subdirectories each with at least 1 events
file directly within the subdirectory.
Raises:
ValueError: If the path passed to the method exists and is not a directory.
|
codesearchnet
|
def __init__(self, function_approximator, map_size=(10, 10), memory_num=4, repeating_penalty=0.5):
self.__map_arr = self.__create_map(map_size)
self.__agent_pos = self.START_POS
self.__reward_list = []
self.__route_memory_list = []
self.__memory_num = memory_num
self.__repeating_penalty = repeating_penalty
super().__init__(function_approximator)
|
Init.
Args:
function_approximator: is-a `FunctionApproximator`.
map_size: Size of map.
memory_num: The number of step of agent's memory.
repeating_penalty: The value of penalty in the case that agent revisit.
|
juraj-google-style
|
def aoi(self, **kwargs):
g = self._parse_geoms(**kwargs)
if g is None:
return self
else:
return self[g]
|
Subsets the Image by the given bounds
Args:
bbox (list): optional. A bounding box array [minx, miny, maxx, maxy]
wkt (str): optional. A WKT geometry string
geojson (str): optional. A GeoJSON geometry dictionary
Returns:
image: an image instance of the same type
|
juraj-google-style
|
def run_and_report_benchmark(self, dataset, num_elements, name, iters=5, extras=None, warmup=True, apply_default_optimizations=False, session_config=None):
wall_time = self.run_benchmark(dataset=dataset, num_elements=num_elements, iters=iters, warmup=warmup, apply_default_optimizations=apply_default_optimizations, session_config=session_config)
if extras is None:
extras = {}
if context.executing_eagerly():
name = '{}.eager'.format(name)
extras['implementation'] = 'eager'
else:
name = '{}.graph'.format(name)
extras['implementation'] = 'graph'
extras['num_elements'] = num_elements
self.report_benchmark(wall_time=wall_time, iters=iters, name=name, extras=extras)
return wall_time
|
Benchmarks the dataset and reports the stats.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
This is followed by logging/printing the benchmark stats.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
name: Name of the benchmark.
iters: Number of times to repeat the timing.
extras: A dict which maps string keys to additional benchmark info.
warmup: If true, warms up the session caches by running an untimed run.
apply_default_optimizations: Determines whether default optimizations
should be applied.
session_config: A ConfigProto protocol buffer with configuration options
for the session. Applicable only for benchmarking in graph mode.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
|
github-repos
|
def pack(value):
if is_packed(value):
return value
spec = value._type_spec._tf_extension_type_with_packed(True)
try:
variant = composite_tensor_ops.composite_tensor_to_variants(value)
except nested_structure_coder.NotEncodableError as e:
raise ValueError('ExtensionTypes must have a __name__ field in order to be packed.') from e
return _create_object_from_type_and_dict(type(value), {'_tf_extension_type_cached_type_spec': spec, '_tf_extension_type_packed_variant': variant})
|
Returns a copy of `value` with fields packed in a single Variant.
Args:
value: An `ExtensionType` object.
Returns:
An `ExtensionType` object.
|
github-repos
|
def ExtractFilename(self, flagfile_str):
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise exceptions.Error(
'Hit illegal --flagfile type: %s' % flagfile_str)
|
Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
Args:
flagfile_str: flagfile string.
Returns:
str filename from a flagfile_str of form -[-]flagfile=filename.
Raises:
Error: when illegal --flagfile provided.
|
juraj-google-style
|
def build_results(self, session, tensor_values):
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
if self._fetches[i].ref() in self._feed_handles:
value = self._feed_handles[self._fetches[i].ref()].eval()
else:
value = self._feeds.get(self._fetches[i].ref())
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i].ref())
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
|
Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
|
github-repos
|
def manual_get_pfam_annotations(seq, outpath, searchtype='phmmer', force_rerun=False):
if op.exists(outpath):
with open(outpath, 'r') as f:
json_results = json.loads(json.load(f))
else:
fseq = ('>Seq\n' + seq)
if (searchtype == 'phmmer'):
parameters = {'seqdb': 'pdb', 'seq': fseq}
if (searchtype == 'hmmscan'):
parameters = {'hmmdb': 'pfam', 'seq': fseq}
enc_params = urllib.urlencode(parameters).encode('utf-8')
request = urllib2.Request('http:
url = (urllib2.urlopen(request).geturl() + '?output=json')
request = str(url)
request_read = urlopen(request).read().decode('utf-8')
with open(outpath, 'w') as f:
json.dump(request_read, f)
json_results = json.loads(request_read)
return json_results['results']['hits']
|
Retrieve and download PFAM results from the HMMER search tool.
Args:
seq:
outpath:
searchtype:
force_rerun:
Returns:
Todo:
* Document and test!
|
codesearchnet
|
def dict_factory(self, cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
val = row[idx]
name = col[0]
if name == Field.Time_Stamp:
d[col[0]] = str(val)
continue
if name == "Raw_A" or name == "Raw_B":
continue
if name not in self.m_all_fields:
continue
if (str(val) != "None") and ((val > 0) or (val < 0)):
d[name] = str(val)
return d
|
Sqlite callback accepting the cursor and the original row as a tuple.
Simple return of JSON safe types.
Args:
cursor (sqlite cursor): Original cursory
row (sqlite row tuple): Original row.
Returns:
dict: modified row.
|
juraj-google-style
|
def _ParseFileEntryWithParsers(self, parser_mediator, parser_names, file_entry, file_object=None):
parse_results = self._PARSE_RESULT_UNSUPPORTED
for parser_name in parser_names:
parser = self._parsers.get(parser_name, None)
if (not parser):
raise RuntimeError('Parser object missing for parser: {0:s}'.format(parser_name))
if parser.FILTERS:
if (not self._CheckParserCanProcessFileEntry(parser, file_entry)):
parse_results = self._PARSE_RESULT_SUCCESS
continue
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug('[ParseFileEntryWithParsers] parsing file: {0:s} with parser: {1:s}'.format(display_name, parser_name))
parse_result = self._ParseFileEntryWithParser(parser_mediator, parser, file_entry, file_object=file_object)
if (parse_result == self._PARSE_RESULT_FAILURE):
return self._PARSE_RESULT_FAILURE
if (parse_result == self._PARSE_RESULT_SUCCESS):
parse_results = self._PARSE_RESULT_SUCCESS
return parse_results
|
Parses a file entry with a specific parsers.
Args:
parser_mediator (ParserMediator): parser mediator.
parser_names (list[str]): names of parsers.
file_entry (dfvfs.FileEntry): file entry.
file_object (Optional[file]): file-like object to parse.
If not set the parser will use the parser mediator to open
the file entry's default data stream as a file-like object.
Returns:
int: parse result which is _PARSE_RESULT_FAILURE if the file entry
could not be parsed, _PARSE_RESULT_SUCCESS if the file entry
successfully was parsed or _PARSE_RESULT_UNSUPPORTED when
UnableToParseFile was raised or no names of parser were provided.
Raises:
RuntimeError: if the parser object is missing.
|
codesearchnet
|
def _page_streamable(page_descriptor):
def inner(a_func, settings, request, **kwargs):
'Actual page-streaming based on the settings.'
page_iterator = gax.PageIterator(a_func, page_descriptor, settings.page_token, request, **kwargs)
if settings.flatten_pages:
return gax.ResourceIterator(page_iterator)
else:
return page_iterator
return inner
|
Creates a function that yields an iterable to performs page-streaming.
Args:
page_descriptor (:class:`PageDescriptor`): indicates the structure
of page streaming to be performed.
Returns:
Callable: A function that returns an iterator.
|
codesearchnet
|
def build(self):
if (not self.build_cmds):
LOGGER.debug('No build commands were found, skipping build step')
with LogTask('Building {} disk {}'.format(self.name, self.disk_path)):
for command in self.build_cmds:
with LogTask('Running command {}'.format(command.name)):
LOGGER.debug(command.cmd)
result = utils.run_command(command.cmd)
if result:
raise BuildException(result.err)
|
Run all the commands in self.build_cmds
Raises:
lago.build.BuildException: If a command returned a non-zero code
|
codesearchnet
|
def alternative_titles(self, **kwargs):
path = self._get_id_path('alternative_titles')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the alternative titles for a specific movie id.
Args:
country: (optional) ISO 3166-1 code.
append_to_response: (optional) Comma separated, any movie method.
Returns:
A dict representation of the JSON returned from the API.
|
codesearchnet
|
def set_reprompt_ssml(self, ssml):
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml
|
Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
|
codesearchnet
|
async def verify_docker_image_task(chain, link):
errors = []
worker_type = get_worker_type(link.task)
if (worker_type not in chain.context.config['valid_docker_image_worker_types']):
errors.append('{} is not a valid docker-image workerType!'.format(worker_type))
raise_on_errors(errors)
|
Verify the docker image Link.
Args:
chain (ChainOfTrust): the chain we're operating on.
link (LinkOfTrust): the task link we're checking.
|
codesearchnet
|
def _validated_config_filename(self, name):
dir_name = self._make_config_dir()
filename = os.path.join(dir_name, name.split(".json")[0] + ".json")
return filename
|
Make config dir and return full file path and extension
Args:
name (str): Filename without dir or extension
Returns:
str: Full path including extension
|
juraj-google-style
|
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, data_type, data_format='NHWC'):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)], dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)], dtype=data_type).reshape(filter_in_sizes)
with self.session() as sess:
if data_type == np.float32:
tolerance = 0.0001
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-08
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == 'NCHW':
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_ops.depthwise_conv2d_native(native_t1, t2, strides=strides, data_format=data_format, padding=padding)
if data_format == 'NCHW':
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device('CPU'):
conv_interface = ReferenceDepthwiseConv2D(t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print('data_type:', data_type, 'max diff = ', np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
|
Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
|
github-repos
|
def convert_obatoms_to_molecule(self, atoms, residue_name=None, site_property='ff_map'):
restore_site_props = (True if (residue_name is not None) else False)
if (restore_site_props and (not hasattr(self, 'map_residue_to_mol'))):
self._set_residue_map()
coords = []
zs = []
for atm in atoms:
coords.append(list(atm.coords))
zs.append(atm.atomicnum)
mol = Molecule(zs, coords)
if restore_site_props:
props = []
ref = self.map_residue_to_mol[residue_name].copy()
assert (len(mol) == len(ref))
assert (ref.formula == mol.formula)
for (i, site) in enumerate(mol):
assert (site.specie.symbol == ref[i].specie.symbol)
props.append(getattr(ref[i], site_property))
mol.add_site_property(site_property, props)
return mol
|
Convert list of openbabel atoms to MOlecule.
Args:
atoms ([OBAtom]): list of OBAtom objects
residue_name (str): the key in self.map_residue_to_mol. Usec to
restore the site properties in the final packed molecule.
site_property (str): the site property to be restored.
Returns:
Molecule object
|
codesearchnet
|
def to_price_index(returns, start=100):
return ((returns.replace(to_replace=np.nan, value=0) + 1).cumprod() * start)
|
Returns a price index given a series of returns.
Args:
* returns: Expects a return series
* start (number): Starting level
Assumes arithmetic returns.
Formula is: cumprod (1+r)
|
codesearchnet
|
def convert(self, calibration_inputs: Optional[Mapping[str, np.ndarray]]=None, num_runs=1) -> None:
|
Converts the model with TensorRT and calibrates if using INT8 precision mode.
Args:
calibration_inputs: Mapping from input names to ndarrays in TF1. Or a
sequence of tensors in TF2. Used as calibration data.
num_runs: Number of calibration runs.
|
github-repos
|
def aes_encrypt(base64_encryption_key, data):
if isinstance(data, text_type):
data = data.encode('UTF-8')
(aes_key_bytes, hmac_key_bytes) = _extract_keys(base64_encryption_key)
data = _pad(data)
iv_bytes = os.urandom(AES_BLOCK_SIZE)
cipher = AES.new(aes_key_bytes, mode=AES.MODE_CBC, IV=iv_bytes)
data = (iv_bytes + cipher.encrypt(data))
hmac_signature = hmac.new(hmac_key_bytes, data, hashlib.sha256).digest()
return as_base64((data + hmac_signature))
|
Encrypt data with AES-CBC and sign it with HMAC-SHA256
Arguments:
base64_encryption_key (str): a base64-encoded string containing an AES encryption key
and HMAC signing key as generated by generate_encryption_key()
data (str): a byte string containing the data to be encrypted
Returns:
str: the encrypted data as a byte string with the HMAC signature appended to the end
|
codesearchnet
|
def _KillProcess(self, pid):
if sys.platform.startswith('win'):
process_terminate = 1
handle = ctypes.windll.kernel32.OpenProcess(process_terminate, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, (- 1))
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
except OSError as exception:
logger.error('Unable to kill process {0:d} with error: {1!s}'.format(pid, exception))
|
Issues a SIGKILL or equivalent to the process.
Args:
pid (int): process identifier (PID).
|
codesearchnet
|
def expand_role(self, role):
if ('/' in role):
return role
else:
return self.boto_session.resource('iam').Role(role).arn
|
Expand an IAM role name into an ARN.
If the role is already in the form of an ARN, then the role is simply returned. Otherwise we retrieve the full
ARN and return it.
Args:
role (str): An AWS IAM role (either name or full ARN).
Returns:
str: The corresponding AWS IAM role ARN.
|
codesearchnet
|
def delete(self, url, **kwargs):
check_type(url, basestring, may_be_none=False)
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['DELETE'])
self.request('DELETE', url, erc, **kwargs)
|
Sends a DELETE request.
Args:
url(basestring): The URL of the API endpoint.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
|
codesearchnet
|
def _signature_to_tf2xla_config(signature_def, variable_nodes_to_feed):
from tensorflow.compiler.tf2xla import tf2xla_pb2
config = tf2xla_pb2.Config()
tensor_id = tf2xla_pb2.TensorId
for name, input_ in signature_def.inputs.items():
name = name.replace('/', '_')
name = 'feed_{}'.format(name)
node_name, output_index = _parse_tensor_name(input_.name)
output_index = int(output_index)
config.feed.append(tf2xla_pb2.Feed(id=tensor_id(node_name=node_name, output_index=output_index), name=name, type=input_.dtype, shape=input_.tensor_shape))
for name, output_ in signature_def.outputs.items():
name = name.replace('/', '_')
name = 'fetch_{}'.format(name)
node_name, output_index = _parse_tensor_name(output_.name)
output_index = int(output_index)
config.fetch.append(tf2xla_pb2.Fetch(id=tensor_id(node_name=node_name, output_index=output_index), name=name, type=output_.dtype, shape=output_.tensor_shape))
for node, modified in variable_nodes_to_feed:
name = node.name.replace('/', '_')
name = 'param_{}'.format(name)
config.variable.append(tf2xla_pb2.Variable(node_name=node.name, name=name, type=node.attr['dtype'].type, shape=node.attr['shape'].shape, readonly=not modified))
return config
|
Convert `signature_def` to tf2xla config. Returns a `tf2xla.Config` proto.
Args:
signature_def: Instance of `SignatureDef`.
variable_nodes_to_feed: List of tuples of form `(node_def, modified)`
corresponding to VarHandleOp, and a boolean `modified` that describes
whether the variable was modified during execution.
Returns:
An instance of `tf2xla.Config` proto.
Raises:
RuntimeError: If TensorFlow was not compiled with XLA.
|
github-repos
|
def _to_json(self, strip, to_serialize=None):
if (to_serialize is None):
to_serialize = copy.copy(self.__dict__)
pkcs12_val = to_serialize.get(_PKCS12_KEY)
if (pkcs12_val is not None):
to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val)
return super(ServiceAccountCredentials, self)._to_json(strip, to_serialize=to_serialize)
|
Utility function that creates JSON repr. of a credentials object.
Over-ride is needed since PKCS#12 keys will not in general be JSON
serializable.
Args:
strip: array, An array of names of members to exclude from the
JSON.
to_serialize: dict, (Optional) The properties for this object
that will be serialized. This allows callers to
modify before serializing.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
|
codesearchnet
|
def get_pkg_names(pkgs):
result = set()
with open(join('mapping'), 'r') as f:
data = dict((x.strip().split(':') for x in f))
for pkg in pkgs:
result.add(data.get(pkg, pkg))
return sorted(result, key=(lambda s: s.lower()))
|
Get PyPI package names from a list of imports.
Args:
pkgs (List[str]): List of import names.
Returns:
List[str]: The corresponding PyPI package names.
|
codesearchnet
|
def run(self, *args, **kwargs):
self.log.debug('Starting EBSAuditor')
data = self.update_data()
notices = defaultdict(list)
for (account, issues) in data.items():
for issue in issues:
for recipient in account.contacts:
notices[NotificationContact(type=recipient['type'], value=recipient['value'])].append(issue)
self.notify(notices)
|
Main execution point for the auditor
Args:
*args:
**kwargs:
Returns:
`None`
|
codesearchnet
|
def delete_user_role(self, user, role):
self.project_service.set_auth(self._token_project)
self.project_service.delete_user_role(user, role)
|
Remove role from given user.
Args:
user (string): User name.
role (string): Role to remove.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def _parse_name(self, name):
if not isinstance(name, str):
raise TypeError(f"'name' must be a string, such as 'mixed_float16'. Received: name={name} (of type {type(name)})")
if name == 'mixed_float16':
return ('float16', 'float32')
elif name == 'mixed_bfloat16':
return ('bfloat16', 'float32')
try:
dtype = backend.standardize_dtype(name)
return (dtype, dtype)
except ValueError:
raise ValueError(f"Cannot convert '{name}' to a mixed precision DTypePolicy. Valid policies include 'mixed_float16', 'mixed_bfloat16', and the name of any float dtype such as 'float32'.")
|
Parses a `DTypePolicy` name into a compute and variable dtype.
Args:
name: The name of the policy.
Returns:
The `(compute_dtype, variable_dtype)` pair.
|
github-repos
|
def num_accelerators(self, task_type=None, task_id=None, config_proto=None):
master = self.master(task_type, task_id)
devices = get_accelerator_devices(master, config_proto)
mapping = collections.defaultdict(int)
for device in devices:
if task_type is not None and task_id is not None:
job_path = '/job:%s' % task_type
task_path = '/task:%s' % task_id
if job_path not in device.name or task_path not in device.name:
continue
mapping[device.device_type] += 1
return mapping
|
Returns the number of accelerator cores per worker.
This returns the number of accelerator cores (such as GPUs and TPUs)
available per worker.
Optionally, we allow callers to specify the task_type, and task_id, for
if they want to target a specific TensorFlow task to query
the number of accelerators. This is to support heterogenous environments,
where the number of accelerators cores per host is different.
Args:
task_type: (Optional) The type of the TensorFlow task of the machine we
want to query.
task_id: (Optional) The index of the TensorFlow task of the machine we
want to query.
config_proto: (Optional) Configuration for starting a new session to
query how many accelerator cores it has.
Returns:
A map of accelerator types to number of cores.
|
github-repos
|
def Process(self, parser_mediator, root_item=None, **kwargs):
super(AutomaticDestinationsOLECFPlugin, self).Process(
parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
for item in root_item.sub_items:
if item.name == 'DestList':
self.ParseDestList(parser_mediator, item)
elif self._RE_LNK_ITEM_NAME.match(item.name):
display_name = parser_mediator.GetDisplayName()
if display_name:
display_name = '{0:s}
else:
display_name = '
parser_mediator.AppendToParserChain(self._WINLNK_PARSER)
try:
item.seek(0, os.SEEK_SET)
self._WINLNK_PARSER.ParseFileLNKFile(
parser_mediator, item, display_name)
finally:
parser_mediator.PopFromParserChain()
|
Parses an OLECF file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root_item is not set.
|
juraj-google-style
|
def __init__(self, owner, repo_name, token=''):
self._github_repository = GitHub(token=token).repository(owner, repo_name)
|
Build the GitHub API URL which points to the definition of the repository.
Args:
owner (str): the owner's GitHub username
repo_name (str): the name of the repository
token (str): the GitHub API token
Returns:
dict: a representation of the repo definition
|
juraj-google-style
|
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
looper = coordinator.LooperThread(self._coord, timer_interval_secs, target=target, args=args, kwargs=kwargs)
looper.start()
return looper
|
Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
|
github-repos
|
def available_cpu_count() -> int:
try:
match = re.search('(?m)^Cpus_allowed:\\s*(.*)$', open('/proc/self/status').read())
if match:
res = bin(int(match.group(1).replace(',', ''), 16)).count('1')
if (res > 0):
return res
except IOError:
LOG.debug('Could not get the number of allowed CPUs')
try:
import psutil
return psutil.cpu_count()
except (ImportError, AttributeError):
LOG.debug('Could not get the number of allowed CPUs')
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if (res > 0):
return res
except (AttributeError, ValueError):
LOG.debug('Could not get the number of allowed CPUs')
try:
res = open('/proc/cpuinfo').read().count('processor\t:')
if (res > 0):
return res
except IOError:
LOG.debug('Could not get the number of allowed CPUs')
raise Exception('Can not determine number of CPUs on this system')
|
Get the number of available CPUs.
Number of available virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling
userspace-only program.
Returns:
Number of avaialable CPUs.
|
codesearchnet
|
def get_optimizer_experimental_options():
return context.context().get_optimizer_experimental_options()
|
Get experimental optimizer options.
Refer to tf.config.optimizer.set_experimental_options for a list of current
options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Returns:
Dictionary of configured experimental optimizer options
|
github-repos
|
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
for entry in top_level:
datetime_value = entry.get('date', None)
package_identifiers = entry.get('packageIdentifiers', [])
if ((not datetime_value) or (not package_identifiers)):
continue
display_name = entry.get('displayName', '<UNKNOWN>')
display_version = entry.get('displayVersion', '<DISPLAY_VERSION>')
process_name = entry.get('processName', '<PROCESS_NAME>')
package_identifiers = ', '.join(package_identifiers)
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: {3:s}.'.format(display_name, display_version, process_name, package_identifiers)
event_data.key = ''
event_data.root = '/item'
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts relevant install history entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (dict[str, object]): plist top-level key.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.