code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
class DPTReassembleStage(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList()
if config.is_hybrid:
self._init_reassemble_dpt_hybrid(config)
else:
self._init_reassemble_dpt(config)
self.neck_ignore_stages = config.neck_ignore_stages
def _init_reassemble_dpt_hybrid(self, config):
for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):
if i <= 1:
self.layers.append(nn.Identity())
elif i > 1:
self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))
if config.readout_type != 'project':
raise ValueError(f'Readout type {config.readout_type} is not supported for DPT-Hybrid.')
self.readout_projects = nn.ModuleList()
hidden_size = _get_backbone_hidden_size(config)
for i in range(len(config.neck_hidden_sizes)):
if i <= 1:
self.readout_projects.append(nn.Sequential(nn.Identity()))
elif i > 1:
self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))
def _init_reassemble_dpt(self, config):
for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):
self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))
if config.readout_type == 'project':
self.readout_projects = nn.ModuleList()
hidden_size = _get_backbone_hidden_size(config)
for _ in range(len(config.neck_hidden_sizes)):
self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))
def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:
out = []
for i, hidden_state in enumerate(hidden_states):
if i not in self.neck_ignore_stages:
cls_token, hidden_state = (hidden_state[:, 0], hidden_state[:, 1:])
batch_size, sequence_length, num_channels = hidden_state.shape
if patch_height is not None and patch_width is not None:
hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)
else:
size = torch_int(sequence_length ** 0.5)
hidden_state = hidden_state.reshape(batch_size, size, size, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_shape = hidden_state.shape
if self.config.readout_type == 'project':
hidden_state = hidden_state.flatten(2).permute((0, 2, 1))
readout = cls_token.unsqueeze(1).expand_as(hidden_state)
hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1))
hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape)
elif self.config.readout_type == 'add':
hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1)
hidden_state = hidden_state.reshape(feature_shape)
hidden_state = self.layers[i](hidden_state)
out.append(hidden_state)
return out | This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
`config.readout_type`.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture. | github-repos |
def getctime(self, path):
try:
file_obj = self.filesystem.resolve(path)
except IOError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_ctime | Returns the creation time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the creation time of the fake file in number of
seconds since the epoch.
Raises:
OSError: if the file does not exist. | codesearchnet |
def metadata(self, path):
try:
file_metadata = self._blobstorageIO()._status(path)
return FileMetadata(path, file_metadata['size'], file_metadata['last_updated'])
except Exception as e:
raise BeamIOError('Metadata operation failed', {path: e}) | Fetch metadata fields of a file on the FileSystem.
Args:
path: string path of a file.
Returns:
:class:`~apache_beam.io.filesystem.FileMetadata`.
Raises:
``BeamIOError``: if path isn't a file or doesn't exist. | github-repos |
def _build_command(self, python_executable, lib_dir_fq, proxy_enabled):
exe_command = [os.path.expanduser(python_executable), '-m', 'pip', 'install', '-r', self.requirements_file, '--ignore-installed', '--quiet', '--target', lib_dir_fq]
if self.args.no_cache_dir:
exe_command.append('--no-cache-dir')
if proxy_enabled:
trusted_hosts = ['pypi.org', 'pypi.python.org', 'files.pythonhosted.org']
for host in trusted_hosts:
exe_command.append('--trusted-host')
exe_command.append(host)
return exe_command | Build the pip command for installing dependencies.
Args:
python_executable (str): The fully qualified path of the Python executable.
lib_dir_fq (str): The fully qualified path of the lib directory.
Returns:
list: The Python pip command with all required args. | codesearchnet |
def start(logdir, options=None):
global _profiler
with _profiler_lock:
if _profiler is not None:
raise errors.AlreadyExistsError(None, None, 'Another profiler is running.')
_profiler = _pywrap_profiler.ProfilerSession()
try:
opts = dict(options._asdict()) if options is not None else {}
_profiler.start(logdir, opts)
except errors.AlreadyExistsError:
logging.warning('Another profiler session is running which is probably created by profiler server. Please avoid using profiler server and profiler APIs at the same time.')
raise errors.AlreadyExistsError(None, None, 'Another profiler is running.')
except Exception:
_profiler = None
raise | Start profiling TensorFlow performance.
Args:
logdir: Profiling results log directory.
options: `ProfilerOptions` namedtuple to specify miscellaneous profiler
options. See example usage below.
Raises:
AlreadyExistsError: If a profiling session is already running.
Example usage:
```python
options = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3,
python_tracer_level = 1,
device_tracer_level = 1)
tf.profiler.experimental.start('logdir_path', options = options)
# Training code here
tf.profiler.experimental.stop()
```
To view the profiling results, launch TensorBoard and point it to `logdir`.
Open your browser and go to `localhost:6006/#profile` to view profiling
results. | github-repos |
def add_weatherdata(self, data):
if (not isinstance(data, WeatherData)):
raise ValueError('Weather data need to be of type WeatherData')
self._data['WEATHER DATA'].append(data) | Appends weather data.
Args:
data (WeatherData): weather data object | codesearchnet |
def launch_R_script(template, arguments, output_function=None, verbose=True, debug=False):
id = str(uuid.uuid4())
os.makedirs((('/tmp/cdt_R_script_' + id) + '/'))
try:
scriptpath = (('/tmp/cdt_R_script_' + id) + '/instance_{}'.format(os.path.basename(template)))
copy(template, scriptpath)
with fileinput.FileInput(scriptpath, inplace=True) as file:
for line in file:
mline = line
for elt in arguments:
mline = mline.replace(elt, arguments[elt])
print(mline, end='')
if (output_function is None):
output = subprocess.call('Rscript --vanilla {}'.format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
if verbose:
process = subprocess.Popen('Rscript --vanilla {}'.format(scriptpath), shell=True)
else:
process = subprocess.Popen('Rscript --vanilla {}'.format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
process.wait()
output = output_function()
except Exception as e:
if (not debug):
rmtree((('/tmp/cdt_R_script_' + id) + '/'))
raise e
except KeyboardInterrupt:
if (not debug):
rmtree((('/tmp/cdt_R_script_' + id) + '/'))
raise KeyboardInterrupt
if (not debug):
rmtree((('/tmp/cdt_R_script_' + id) + '/'))
return output | Launch an R script, starting from a template and replacing text in file
before execution.
Args:
template (str): path to the template of the R script
arguments (dict): Arguments that modify the template's placeholders
with arguments
output_function (function): Function to execute **after** the execution
of the R script, and its output is returned by this function. Used
traditionally as a function to retrieve the results of the
execution.
verbose (bool): Sets the verbosity of the R subprocess.
debug (bool): If True, the generated scripts are not deleted.
Return:
Returns the output of the ``output_function`` if not `None`
else `True` or `False` depending on whether the execution was
successful. | codesearchnet |
def poweroff_server(self, server=None, server_id=None):
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False | Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False | juraj-google-style |
def from_json(cls, jsonmsg):
import json
msg = json.loads(jsonmsg)
obj = cls(**msg)
obj.validate()
return obj | Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` was generated from | juraj-google-style |
def add_jpeg_decoding(module_spec):
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
return jpeg_data, resized_image | Adds operations that perform JPEG decoding and resizing to the graph..
Args:
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps. | juraj-google-style |
def read_graph_execution_trace(self, graph_execution_trace_digest):
debug_event = self._reader.read_graph_execution_traces_event(graph_execution_trace_digest.locator)
return self._graph_execution_trace_from_debug_event_proto(debug_event, graph_execution_trace_digest.locator) | Read the detailed graph execution trace.
Args:
graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.
Returns:
The corresponding `GraphExecutionTrace` object. | github-repos |
def _ParseFileData(self, knowledge_base, file_object):
plist_file = plist.PlistFile()
try:
plist_file.Read(file_object)
except IOError as exception:
raise errors.PreProcessFail(
'Unable to read: {0:s} with error: {1!s}'.format(
self.ARTIFACT_DEFINITION_NAME, exception))
if not plist_file.root_key:
raise errors.PreProcessFail((
'Unable to read: {0:s} with error: missing root key').format(
self.ARTIFACT_DEFINITION_NAME))
matches = []
self._FindKeys(plist_file.root_key, self._PLIST_KEYS, matches)
if not matches:
raise errors.PreProcessFail(
'Unable to read: {0:s} with error: no such keys: {1:s}.'.format(
self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS)))
name = None
value = None
for name, value in matches:
if value:
break
if value is None:
raise errors.PreProcessFail((
'Unable to read: {0:s} with error: no values found for keys: '
'{1:s}.').format(
self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS)))
self._ParsePlistKeyValue(knowledge_base, name, value) | Parses file content (data) for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_object (dfvfs.FileIO): file-like object that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails. | juraj-google-style |
def select_by_value(self, value):
self._selected_key = None
self._selected_item = None
for k in self.children:
item = self.children[k]
item.attributes['selected'] = False
if value == item.get_value():
self._selected_key = k
self._selected_item = item
self._selected_item.attributes['selected'] = True | Selects an item by the text content of the child.
Args:
value (str): Text content of the item that have to be selected. | juraj-google-style |
def format_page(self, page, link_resolver, output):
debug('Formatting page %s' % page.link.ref, 'formatting')
if output:
actual_output = os.path.join(output,
'html')
if not os.path.exists(actual_output):
os.makedirs(actual_output)
else:
actual_output = None
page.format(self.formatter, link_resolver, actual_output) | Called by `project.Project.format_page`, to leave full control
to extensions over the formatting of the pages they are
responsible of.
Args:
page: tree.Page, the page to format.
link_resolver: links.LinkResolver, object responsible
for resolving links potentially mentioned in `page`
output: str, path to the output directory. | juraj-google-style |
def _lm_numdiff_jacobian(eval_func, nmr_params, nmr_observations):
return SimpleCLFunction.from_string(r + str(nmr_params) + + str(nmr_observations) + , dependencies=[eval_func, SimpleCLFunction.from_string( + str(nmr_observations) + + eval_func.get_cl_function_name() + + eval_func.get_cl_function_name() + ), SimpleCLFunction.from_string( + str(nmr_observations) + + eval_func.get_cl_function_name() + + eval_func.get_cl_function_name() + ), SimpleCLFunction.from_string( + str(nmr_observations) + + eval_func.get_cl_function_name() + + eval_func.get_cl_function_name() + )]) | Get a numerical differentiated Jacobian function.
This computes the Jacobian of the observations (function vector) with respect to the parameters.
Args:
eval_func (mot.lib.cl_function.CLFunction): the evaluation function
nmr_params (int): the number of parameters
nmr_observations (int): the number of observations (the length of the function vector).
Returns:
mot.lib.cl_function.CLFunction: CL function for numerically estimating the Jacobian. | juraj-google-style |
def apply_product_config(config):
cot_product = config['cot_product']
for key in config:
if (isinstance(config[key], Mapping) and ('by-cot-product' in config[key])):
try:
config[key] = config[key]['by-cot-product'][cot_product]
except KeyError:
raise ConfigError('Product {} not specified for key {}'.format(cot_product, key))
return config | Apply config values that are keyed by `cot_product`.
This modifies the passed in configuration.
Args:
config dict: the config to apply cot_product keying too
Returns: dict | codesearchnet |
def _get_query_argument(args, cell, env):
sql_arg = args.get('query', None)
if sql_arg is None:
if not isinstance(cell, basestring):
raise Exception('Expected a --query argument or inline SQL')
return bigquery.Query(cell, env=env)
item = google.datalab.utils.commands.get_notebook_item(sql_arg)
if isinstance(item, bigquery.Query):
return item
else:
raise Exception('Expected a query object, got %s.' % type(item)) | Get a query argument to a cell magic.
The query is specified with args['query']. We look that up and if it is a BQ query
object, just return it. If it is a string, build a query object out of it and return
that
Args:
args: the dictionary of magic arguments.
cell: the cell contents which can be variable value overrides (if args has a 'query'
value) or inline SQL otherwise.
env: a dictionary that is used for looking up variable values.
Returns:
A Query object. | juraj-google-style |
def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None):
def gamma_cdf(x, a, loc, b):
if a == 0 or b == 0:
cdf = np.ones(x.shape)
else:
cdf = gamma.cdf(x, a, loc, b)
return cdf
crps_obj = DistributedCRPS(self.dist_thresholds)
if query is not None:
sub_forecasts = self.matched_forecasts[model_type][model_name].query(query)
sub_forecasts = sub_forecasts.reset_index(drop=True)
condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query)
condition_forecasts = condition_forecasts.reset_index(drop=True)
else:
sub_forecasts = self.matched_forecasts[model_type][model_name]
condition_forecasts = self.matched_forecasts["condition"][condition_model_name]
if sub_forecasts.shape[0] > 0:
if model_type == "dist":
forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.dist_thresholds.size))
for f in range(sub_forecasts.shape[0]):
condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]]
if condition_prob >= condition_threshold:
f_params = [0, 0, 0]
else:
f_params = sub_forecasts[self.forecast_bins[model_type]].values[f]
forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params[0], f_params[1], f_params[2])
obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params)
for params in sub_forecasts[self.type_cols[model_type]].values])
crps_obj.update(forecast_cdfs, obs_cdfs)
else:
crps_obj.update(sub_forecasts[self.forecast_bins[model_type].astype(str)].values,
sub_forecasts[self.type_cols[model_type]].values)
return crps_obj | Calculates the cumulative ranked probability score (CRPS) on the forecast data.
Args:
model_type: model type being evaluated.
model_name: machine learning model being evaluated.
condition_model_name: Name of the hail/no-hail model being evaluated
condition_threshold: Threshold for using hail size CDF
query: pandas query string to filter the forecasts based on the metadata
Returns:
a DistributedCRPS object | juraj-google-style |
def declare(self, name, description=None, **kwargs):
if not self._is_valid_key(name):
raise self.InvalidKeyError(
'Invalid key name, must begin with a lowercase letter', name)
if name in self._declarations:
raise self.KeyAlreadyDeclaredError(
'Configuration key already declared', name)
self._declarations[name] = self.Declaration(
name, description=description, **kwargs) | Declare a configuration key with the given name.
Args:
name: Configuration key to declare, must not have been already declared.
description: If provided, use this as the description for this key.
**kwargs: Other kwargs to pass to the Declaration, only default_value
is currently supported. | juraj-google-style |
def mtf_transformer_paper_lm(size):
n = (2 ** size)
hparams = mtf_transformer_base_lm()
hparams.batch_size = 256
hparams.d_model = 1024
hparams.d_ff = int((8192 * n))
hparams.d_kv = 256
hparams.num_heads = int((8 * n))
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_decay_steps = 13600
return hparams | Config for language-model experiments.
Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Results:
size params/10^9 log-ppl(per-token)
-1 0.14 3.209
0 0.22 3.119
1 0.37 3.037
2 0.67 2.969
3 1.28 2.912
4 2.48 2.874
5 4.90 2.871
(to get word-level log-ppl, multiply by 1.1078)
Args:
size: an integer
Returns:
a hparams object | codesearchnet |
def getall(self):
users = self.users_re.findall(self.config, re.M)
resources = dict()
for user in users:
resources.update(self._parse_username(user))
return resources | Returns all local users configuration as a resource dict
Returns:
dict: A dict of usernames with a nested resource dict object | codesearchnet |
def get_parameters(self, grad_only=True):
params = OrderedDict()
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
continue
pname = k
name = "{}/{}".format(prefix, pname)
if grad_only and v.need_grad == False:
continue
params[name] = v
return params | Get parameters.
Args:
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
If you set this option as `False`, All parameters are returned. Default is `True`.
Returns:
dict: The dictionary of parameter name (`str`) to Variable (:obj:`~nnabla.Variable`). | juraj-google-style |
def to_variant(dataset: DatasetV2):
return dataset._variant_tensor | Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset. | github-repos |
def new(self, val):
if len(self.things) >= self.max_things:
raise LimitationError('too many things')
self.things.add(val)
return val | Add a new value to me.
Args:
val (LispVal): The value to be added.
Returns:
LispVal: The added value.
Raises:
~parthial.errs.LimitationError: If I already contain the maximum
number of elements. | juraj-google-style |
def __init__(self, channel):
self.BatchWriteSpans = channel.unary_unary(
"/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans",
request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_tracing__pb2.BatchWriteSpansRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateSpan = channel.unary_unary(
"/google.devtools.cloudtrace.v2.TraceService/CreateSpan",
request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def _match_elements(dom, matches):
out = {}
for key, content in matches.items():
pattern = content["data"].strip()
if "\n" in pattern:
pattern = pattern.split()
transformer = lambda x: x.strip().split()
else:
transformer = lambda x: x.strip()
matching_elements = _locate_element(
dom,
pattern,
transformer=transformer
)
not_found_msg = content.get("notfoundmsg", "").replace("$name", key)
if not not_found_msg.strip():
not_found_msg = "Can't locate variable '%s' with content '%s'!" % (
key,
pattern,
)
content["notfoundmsg"] = not_found_msg
tagname = content.get("tagname", "").strip().lower()
if tagname:
matching_elements = filter(
lambda x: x.getTagName().strip().lower() == tagname,
matching_elements
)
if not matching_elements:
raise UserWarning(not_found_msg)
if len(matching_elements) > 1:
raise UserWarning(
"Ambigious content '%s'!" % content
+ "Content was found in multiple elements!"
)
out[key] = matching_elements[0]
return out | Find location of elements matching patterns specified in `matches`.
Args:
dom (obj): HTMLElement DOM tree.
matches (dict): Structure: ``{"var": {"data": "match", ..}, ..}``.
Returns:
dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}`` | juraj-google-style |
def _GetRecord(self, offset, record_size):
record_header = '<4sLQQL'
get4 = (lambda x: struct.unpack('<L', self.input_dat[x:(x + 4)])[0])
url_offset = struct.unpack('B', self.input_dat[(offset + 52):(offset + 53)])[0]
if (url_offset in [255, 254]):
return None
data_offset = get4((offset + 68))
data_size = get4((offset + 72))
start_pos = (offset + data_offset)
data = struct.unpack('{0}s'.format(data_size), self.input_dat[start_pos:(start_pos + data_size)])[0]
fmt = record_header
unknown_size = (url_offset - struct.calcsize(fmt))
fmt += '{0}s'.format(unknown_size)
fmt += '{0}s'.format((record_size - struct.calcsize(fmt)))
dat = struct.unpack(fmt, self.input_dat[offset:(offset + record_size)])
(header, blocks, mtime, ctime, ftime, _, url) = dat
url = url.split(b'\x00')[0].decode('utf-8')
if mtime:
mtime = ((mtime
if ctime:
ctime = ((ctime
return {'header': header, 'blocks': blocks, 'urloffset': url_offset, 'data_offset': data_offset, 'data_size': data_size, 'data': data, 'mtime': mtime, 'ctime': ctime, 'ftime': ftime, 'url': url} | Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record. | codesearchnet |
def start(self, auto_register=True):
return self.container.start_agent(agent=self, auto_register=auto_register) | Tells the container to start this agent.
It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.
Args:
auto_register (bool): register the agent in the server (Default value = True) | juraj-google-style |
def _on_response_message(self, sequence, topic, message):
try:
conn_key = self._find_connection(topic)
context = self.conns.get_context(conn_key)
except ArgumentError:
self._logger.warn('Dropping message that does not correspond with a known connection, message=%s', message)
return
if (('client' in message) and (message['client'] != self.name)):
self._logger.debug('Dropping message that is for another client %s, we are %s', message['client'], self.name)
if messages.DisconnectionResponse.matches(message):
self.conns.finish_disconnection(conn_key, message['success'], message.get('failure_reason', None))
elif messages.OpenInterfaceResponse.matches(message):
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.RPCResponse.matches(message):
rpc_message = messages.RPCResponse.verify(message)
self.conns.finish_operation(conn_key, rpc_message['success'], rpc_message.get('failure_reason', None), rpc_message.get('status', None), rpc_message.get('payload', None))
elif messages.ProgressNotification.matches(message):
progress_callback = context.get('progress_callback', None)
if (progress_callback is not None):
progress_callback(message['done_count'], message['total_count'])
elif messages.ScriptResponse.matches(message):
if ('progress_callback' in context):
del context['progress_callback']
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.DisconnectionNotification.matches(message):
try:
conn_key = self._find_connection(topic)
conn_id = self.conns.get_connection_id(conn_key)
except ArgumentError:
self._logger.warn('Dropping disconnect notification that does not correspond with a known connection, topic=%s', topic)
return
self.conns.unexpected_disconnect(conn_key)
self._trigger_callback('on_disconnect', self.id, conn_id)
else:
self._logger.warn('Invalid response message received, message=%s', message) | Process a response message received
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself | codesearchnet |
def paint(self):
snippet = {'fill-opacity': VectorStyle.get_style_value(self.opacity), 'fill-color': VectorStyle.get_style_value(self.color), 'fill-outline-color': VectorStyle.get_style_value(self.outline_color)}
if self.translate:
snippet['fill-translate'] = self.translate
return snippet | Renders a javascript snippet suitable for use as a mapbox-gl fill paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet | codesearchnet |
def remove_context(self, name):
self._context(name)
del self.contexts[name]
self._flush_tools() | Remove a context from the suite.
Args:
name (str): Name of the context to remove. | juraj-google-style |
def map_arg(**maps):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if six.PY2:
argmap = inspect.getcallargs(func, *args, **kwargs)
else:
sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments
for k, map_func in six.iteritems(maps):
if k in argmap:
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco | Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func} | juraj-google-style |
def _test_end(self, result, e):
if self.begin_time is not None:
self.end_time = utils.get_current_epoch_time()
self.result = result
if e:
self.termination_signal = ExceptionRecord(e) | Marks the end of the test logic.
Args:
result: One of the TEST_RESULT enums in TestResultEnums.
e: A test termination signal (usually an exception object). It can
be any exception instance or of any subclass of
mobly.signals.TestSignal. | github-repos |
async def attach_url(self, url: str, description: str = None) -> Attachment:
return await self._attach(url=url, description=description) | add an url as an attachment
|methcoro|
Args:
url: url you want to add
description: *optional* description for your attachment
Returns:
Attachment:
Raises:
ValueError: url must not be None
APIException | juraj-google-style |
def CloseExpression(clean_lines, linenum, pos):
line = clean_lines.elided[linenum]
if ((line[pos] not in '({[<') or Match('<[<=]', line[pos:])):
return (line, clean_lines.NumLines(), (- 1))
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if (end_pos > (- 1)):
return (line, linenum, end_pos)
while (stack and (linenum < (clean_lines.NumLines() - 1))):
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if (end_pos > (- 1)):
return (line, linenum, end_pos)
return (line, clean_lines.NumLines(), (- 1)) | If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum. | codesearchnet |
def __init__(self, dataset=None, worker=None, devices=None, components=None, element_spec=None, options=None, canonicalize_devices=None):
if worker is None or devices is None:
raise ValueError('Both `worker` and `devices` should be provided')
error_message = 'Either `dataset` or both `components` and `element_spec` need to be provided.'
self._options = options
self._canonicalize_devices = canonicalize_devices
if dataset is None:
if components is None or element_spec is None:
raise ValueError(error_message)
self._element_spec = element_spec
self._worker = worker
self._devices = devices
self._iterator = components[0]
else:
if components is not None or element_spec is not None:
raise ValueError(error_message)
super(_SingleWorkerOwnedDatasetIterator, self).__init__(dataset, worker, devices, self._options) | Create iterator for the `dataset` to fetch data to worker's `devices` .
`OwnedMultiDeviceIterator` is used to prefetch input to the devices on the
given worker. The lifetime of this iterator is tied to the encompassing
python object. Once we go out of scope of the python object or return from
a tf.function the underlying iterator resource is deleted.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
components: Tensor components to construct the
_SingleWorkerOwnedDatasetIterator from.
element_spec: A nested structure of `TypeSpec` objects that represents the
type specification of elements of the iterator.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
canonicalize_devices: Whether to canonicalize devices for workers fully or
partially. If False, it will partially canonicalize devices by removing
job and task. | github-repos |
def _guess_fmt_from_bytes(inp):
stripped = inp.strip()
fmt = None
ini_section_header_re = re.compile(b'^\[([\w-]+)\]')
if len(stripped) == 0:
fmt = 'yaml'
else:
if stripped.startswith(b'<'):
fmt = 'xml'
else:
for l in stripped.splitlines():
line = l.strip()
if not line.startswith(b'
break
if ini_section_header_re.match(line):
fmt = 'ini'
else:
fmt = 'yaml'
return fmt | Try to guess format of given bytestring.
Args:
inp: byte string to guess format of
Returns:
guessed format | juraj-google-style |
def inspect_config(self, id):
url = self._url('/configs/{0}', id)
return self._result(self._get(url), True) | Retrieve config metadata
Args:
id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists | juraj-google-style |
def formula_html(self, reversed_=False):
if self.H_count == 1:
text = "H"
elif self.H_count > 1:
text = "H<sub>{}</sub>".format(self.H_count)
else:
text = ""
seq = [self.symbol, text, self.charge_sign_html()]
if reversed_:
seq = reversed(seq)
return "".join(seq) | Chemical formula HTML
Args:
reversed (bool): reversed text for leftmost atom groups | juraj-google-style |
def load_with_vocab(fin, vocab, dtype=np.float32):
arr = None
for line in fin:
try:
(token, v) = _parse_line(line, dtype)
except (ValueError, IndexError):
raise ParseError((b'Parsing error in line: ' + line))
if (token in vocab):
if (arr is None):
arr = np.empty((len(vocab), len(v)), dtype=dtype)
arr.fill(np.NaN)
elif (arr.shape[1] != len(v)):
raise ParseError((b'Vector size did not match in line: ' + line))
arr[(vocab[token], :)] = np.array(v, dtype=dtype).reshape(1, (- 1))
return arr | Load word embedding file with predefined vocabulary
Args:
fin (File): File object to read. File should be open for reading ascii.
vocab (dict): Mapping from words (``bytes``) to vector indices
(``int``).
dtype (numpy.dtype): Element data type to use for the array.
Returns:
numpy.ndarray: Word embedding representation vectors | codesearchnet |
def add_path(self, path, path_filter=None):
for root, _, files in os.walk(path):
for filename in files:
full_path_and_filename = os.path.join(root, filename)
if path_filter is None or path_filter(full_path_and_filename):
relative_path_and_filename = full_path_and_filename.replace(path + '/', '')
with open(full_path_and_filename, 'rb') as handle:
self.files[relative_path_and_filename] = b64encode(handle.read()).decode('utf-8') | Adding all files from given path to the object.
Args:
path (str): valid, existing directory | juraj-google-style |
def assertProtoEqual(self, a: message.Message, b: message.Message, check_initialized: bool=True, normalize_numbers: bool=False, msg: Optional[str]=None) -> None:
pool = descriptor_pool.Default()
if isinstance(a, str):
a = text_format.Merge(a, b.__class__(), descriptor_pool=pool)
for pb in (a, b):
if check_initialized:
errors = pb.FindInitializationErrors()
if errors:
cast(absltest.TestCase, self).fail(f'Initialization errors: {errors}\n{pb}')
if normalize_numbers:
normalize_number_fields(pb)
cast(absltest.TestCase, self).assertMultiLineEqual(text_format.MessageToString(a, descriptor_pool=pool), text_format.MessageToString(b, descriptor_pool=pool), msg=msg) | Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: absltest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subclass thereof.
check_initialized: boolean, whether to fail if either a or b isn't
initialized.
normalize_numbers: boolean, whether to normalize types and precision of
numbers before comparison.
msg: if specified, is used as the error message on failure. | github-repos |
def _build_network_on_replica(model, mode, inputs=None, targets=None):
from tensorflow.python.keras import models
from tensorflow.python.keras.engine import sequential
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(model, input_tensors=inputs, layer_fn=models.share_weights)
else:
updated_model = models._clone_functional_model(model, input_tensors=inputs, layer_fn=models.share_weights)
updated_model._callable_losses = model._callable_losses
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
updated_model.outputs = [_upcast_low_precision_outputs(o) for o in updated_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None:
_custom_compile_for_predict(updated_model)
else:
updated_model.compile(model.optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics(model._compile_weighted_metrics), target_tensors=targets)
return updated_model | Build an updated model on replicas.
We create a new Keras model while sharing the variables from the old graph.
Building a new sub-graph is required since the original keras model creates
placeholders for the input and the output that are not accessible till we
call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.
The sharing of weights and layers between the old and the new model guarantee
that we're using Strategy variables and any updates on either model are
reflected correctly in callbacks and loop iterations.
We need to make sure we share the optimizers between the old and the new model
as well so that optimizer state is not lost if the user is running fit
multiple times.
Args:
model: Model to be replicated across Replicas
mode: Which of fit/eval/predict is building the distributed network
inputs: Input variables to be passed to the model
targets: Target tensor to be passed to model.compile
Returns:
A new model with shared layers with the old model. | github-repos |
def _sobol_generating_matrices(dim: types.IntTensor, log_num_results: types.IntTensor, num_digits: types.IntTensor, dtype=None) -> types.IntTensor:
global _INITIAL_DIRECTION_NUMBERS
global _PRIMITIVE_POLYNOMIAL_COEFFICIENTS
dtype = dtype or tf.int32
zero = tf.constant(0, dtype=dtype)
indices = tf.cast(tf.range(0, log_num_results), dtype)
dimensions = tf.range(0, dim)
directions = tf.convert_to_tensor(_INITIAL_DIRECTION_NUMBERS, dtype=dtype, name='direction_numbers')
padding = log_num_results - utils.get_shape(directions)[0]
padding = tf.math.maximum(zero, padding)
directions = tf.pad(directions, [[zero, padding], [zero, zero]])
directions = directions[:log_num_results]
directions = tf.gather(directions, dimensions, axis=1)
directions = tf.cast(tf.transpose(directions), dtype)
polynomial = tf.convert_to_tensor(_PRIMITIVE_POLYNOMIAL_COEFFICIENTS, dtype=dtype, name='polynomial_coefficients')
polynomial = tf.cast(tf.gather(polynomial, tf.expand_dims(dimensions, axis=1)), dtype)
degree = tf.cast(tf.math.floor(utils.log2(tf.cast(polynomial, dtype=tf.float32))), dtype=dtype)
initial_matrices = tf.bitwise.left_shift(directions, tf.cast(tf.expand_dims(num_digits - 1 - indices, axis=0), dtype))
def loop_predicate_fn(matrix_values, column):
del matrix_values
return column < log_num_results - 1
def loop_body_fn(matrices, column):
column_values = tf.gather(matrices, [column], axis=1)
should_be_updated = tf.logical_and(tf.less_equal(tf.math.maximum(degree, column + 1), indices), tf.less_equal(indices, column + degree))
updated_matrices = tf.bitwise.bitwise_xor(tf.where(tf.equal(indices, column + degree), tf.bitwise.right_shift(column_values, degree), matrices), utils.filter_tensor(column_values, polynomial, column + degree - indices))
returned_matrices = tf.where(should_be_updated, updated_matrices, matrices)
return (returned_matrices, column + 1)
matrices, _ = tf.while_loop(loop_predicate_fn, loop_body_fn, loop_vars=(initial_matrices, tf.constant(0, dtype)), maximum_iterations=tf.cast(log_num_results, tf.int32) - 1)
return matrices | Returns all Sobol generating matrices.
Args:
dim: Positive scalar `Tensor` with rank 0 representing the event size of
points which can be sampled from the resulting generating matrix.
log_num_results: Positive scalar `Tensor` with rank 0 representing the
base-2 logarithm of the maximum number of points which can be sampled from
the resulting generating matrix.
num_digits: Positive scalar `Tensor` with rank 0 representing the base-2
precision of points which can be sampled from the resulting generating
matrix.
dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either a signed
or unsigned integer `dtype`).
Default value: `None` which maps to `int32`.
Returns:
A scalar `Tensor` with shape `(dim, ceil(log2(num_results)))`. | github-repos |
def FindFirst(cls, setting_matcher, device_matcher=None, **kwargs):
try:
return next(cls.FindDevices(setting_matcher, device_matcher=device_matcher, **kwargs))
except StopIteration:
raise usb_exceptions.DeviceNotFoundError('No device available, or it is in the wrong configuration.') | Find and return the first matching device.
Args:
setting_matcher: See cls.FindDevices.
device_matcher: See cls.FindDevices.
**kwargs: See cls.FindDevices.
Returns:
An instance of UsbHandle.
Raises:
DeviceNotFoundError: Raised if the device is not available. | codesearchnet |
def no_llvm(*args, uid=0, gid=0, **kwargs):
uchroot_cmd = no_args()
uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)]
return uchroot_cmd[args] | Return a customizable uchroot command.
The command will be executed inside a uchroot environment.
Args:
args: List of additional arguments for uchroot (typical: mounts)
Return:
chroot_cmd | juraj-google-style |
def process_dimensions(kdims, vdims):
dimensions = {}
for group, dims in [('kdims', kdims), ('vdims', vdims)]:
if dims is None:
continue
elif isinstance(dims, (tuple, basestring, Dimension, dict)):
dims = [dims]
elif not isinstance(dims, list):
raise ValueError("%s argument expects a Dimension or list of dimensions, "
"specified as tuples, strings, dictionaries or Dimension "
"instances, not a %s type. Ensure you passed the data as the "
"first argument." % (group, type(dims).__name__))
for dim in dims:
if not isinstance(dim, (tuple, basestring, Dimension, dict)):
raise ValueError('Dimensions must be defined as a tuple, '
'string, dictionary or Dimension instance, '
'found a %s type.' % type(dim).__name__)
dimensions[group] = [asdim(d) for d in dims]
return dimensions | Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
objects:
{'kdims': [Dimension('x')], 'vdims': [Dimension('y')] | juraj-google-style |
def GetArtifactParserDependencies(rdf_artifact):
deps = set()
processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name)
for p in processors:
deps.update(p.knowledgebase_dependencies)
return deps | Return the set of knowledgebase path dependencies required by the parser.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"] | juraj-google-style |
def recursively_convert_to_json_serializable(test_obj):
try:
if ((not isinstance(test_obj, list)) and np.isnan(test_obj)):
return None
except TypeError:
pass
except ValueError:
pass
if isinstance(test_obj, (string_types, integer_types, float, bool)):
return test_obj
elif isinstance(test_obj, dict):
new_dict = {}
for key in test_obj:
new_dict[str(key)] = recursively_convert_to_json_serializable(test_obj[key])
return new_dict
elif isinstance(test_obj, (list, tuple, set)):
new_list = []
for val in test_obj:
new_list.append(recursively_convert_to_json_serializable(val))
return new_list
elif isinstance(test_obj, (np.ndarray, pd.Index)):
return [recursively_convert_to_json_serializable(x) for x in test_obj.tolist()]
elif (test_obj is None):
return test_obj
elif isinstance(test_obj, (datetime.datetime, datetime.date)):
return str(test_obj)
elif np.issubdtype(type(test_obj), np.bool_):
return bool(test_obj)
elif (np.issubdtype(type(test_obj), np.integer) or np.issubdtype(type(test_obj), np.uint)):
return int(test_obj)
elif np.issubdtype(type(test_obj), np.floating):
return float(round(test_obj, sys.float_info.dig))
elif isinstance(test_obj, pd.DataFrame):
return recursively_convert_to_json_serializable(test_obj.to_dict(orient='records'))
elif isinstance(test_obj, decimal.Decimal):
return float(test_obj)
else:
raise TypeError(('%s is of type %s which cannot be serialized.' % (str(test_obj), type(test_obj).__name__))) | Helper function to convert a dict object to one that is serializable
Args:
test_obj: an object to attempt to convert a corresponding json-serializable object
Returns:
(dict) A converted test_object
Warning:
test_obj may also be converted in place. | codesearchnet |
def create_endpoints_csv_file(self, timeout=(- 1)):
uri = '{}/endpoints/'.format(self.data['uri'])
return self._helper.do_post(uri, {}, timeout, None) | Creates an endpoints CSV file for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Endpoint CSV File Response. | codesearchnet |
def collections(self, page_size=None):
iterator = self._client._firestore_api.list_collection_ids(self._document_path, page_size=page_size, metadata=self._client._rpc_metadata)
iterator.document = self
iterator.item_to_value = _item_to_collection_ref
return iterator | List subcollections of the current document.
Args:
page_size (Optional[int]]): The maximum number of collections
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document. If the
document does not exist at the time of `snapshot`, the
iterator will be empty | codesearchnet |
def set_package_releases(self, project_name, versions):
self.packages[project_name] = sorted(versions, reverse=True) | Storage package information in ``self.packages``
Args:
project_name (str): This will be used as a the key in the
dictionary.
versions (list): List of ``str`` representing the available
versions of a project. | juraj-google-style |
def Parse(self, rdf_data):
if self._filter:
return list(self._filter.Parse(rdf_data, self.expression))
return rdf_data | Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a items that meet filter conditions are returned in a list.
Args:
rdf_data: Host data that has already been processed by a Parser into RDF.
Returns:
A list containing data items that matched the filter rules. | juraj-google-style |
def start(self, request: Request) -> Response:
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = Response()
yield from self._prepare_fetch(request, response)
response.file_transfer_size = yield from self._fetch_size(request)
if request.restart_value:
try:
yield from self._commander.restart(request.restart_value)
response.restart_value = request.restart_value
except FTPServerError:
_logger.debug('Could not restart file.', exc_info=1)
yield from self._open_data_stream()
command = Command('RETR', request.file_path)
yield from self._begin_stream(command)
self._session_state = SessionState.file_request_sent
return response | Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine. | juraj-google-style |
def get_minimum_indentation(text):
r
lines = text.split('\n')
indentations = [get_indentation(line_)
for line_ in lines if len(line_.strip()) > 0]
if len(indentations) == 0:
return 0
return min(indentations) | r"""
returns the number of preceding spaces
Args:
text (str): unicode text
Returns:
int: indentation
CommandLine:
python -m utool.util_str --exec-get_minimum_indentation --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> import utool as ut
>>> text = ' foo\n bar'
>>> result = get_minimum_indentation(text)
>>> print(result)
3 | juraj-google-style |
def deliver_tx(self, raw_transaction):
self.abort_if_abci_chain_is_not_synced()
logger.debug('deliver_tx: %s', raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug('deliver_tx: INVALID')
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug('storing tx')
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk) | Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction. | juraj-google-style |
def __init__(self,
batch_size=1000,
threadpool_prefix="batch_processor",
threadpool_size=10):
super(BatchConverter, self).__init__()
self.batch_size = batch_size
self.threadpool_prefix = threadpool_prefix
self.threadpool_size = threadpool_size | BatchProcessor constructor.
Args:
batch_size: All the values will be processed in batches of this size.
threadpool_prefix: Prefix that will be used in thread pool's threads
names.
threadpool_size: Size of a thread pool that will be used. If
threadpool_size is 0, no threads will be used and all conversions will
be done in the current thread. | juraj-google-style |
def get_type_parameters(self, annot, seen=None):
seen = seen or set()
if annot in seen or not annot.formal:
return []
if isinstance(annot, mixin.NestedAnnotation):
seen = seen | {annot}
if isinstance(annot, abstract.TypeParameter):
return [annot]
elif isinstance(annot, abstract.TupleClass):
annots = []
for idx in range(annot.tuple_length):
annots.extend(self.get_type_parameters(annot.formal_type_parameters[idx], seen))
return annots
elif isinstance(annot, mixin.NestedAnnotation):
return sum((self.get_type_parameters(t, seen) for _, t in annot.get_inner_types()), [])
return [] | Returns all the TypeParameter instances that appear in the annotation.
Note that if you just need to know whether or not the annotation contains
type parameters, you can check its `.formal` attribute.
Args:
annot: An annotation.
seen: A seen set. | github-repos |
def get_account(self, address, id=None, endpoint=None):
return self._call_endpoint(GET_ACCOUNT_STATE, params=[address], id=id, endpoint=endpoint) | Look up an account on the blockchain. Sample output:
Args:
address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | juraj-google-style |
def parse_method_configs(interface_config):
retry_codes_map = {name: retry_codes for (name, retry_codes) in six.iteritems(interface_config.get('retry_codes', {}))}
retry_params_map = {name: retry_params for (name, retry_params) in six.iteritems(interface_config.get('retry_params', {}))}
method_configs = {}
for (method_name, method_params) in six.iteritems(interface_config.get('methods', {})):
retry_params_name = method_params.get('retry_params_name')
if (retry_params_name is not None):
retry_params = retry_params_map[retry_params_name]
retry_ = _retry_from_retry_config(retry_params, retry_codes_map[method_params['retry_codes_name']])
timeout_ = _timeout_from_retry_config(retry_params)
else:
retry_ = None
timeout_ = timeout.ConstantTimeout((method_params['timeout_millis'] / _MILLIS_PER_SECOND))
method_configs[method_name] = MethodConfig(retry=retry_, timeout=timeout_)
return method_configs | Creates default retry and timeout objects for each method in a gapic
interface config.
Args:
interface_config (Mapping): The interface config section of the full
gapic library config. For example, If the full configuration has
an interface named ``google.example.v1.ExampleService`` you would
pass in just that interface's configuration, for example
``gapic_config['interfaces']['google.example.v1.ExampleService']``.
Returns:
Mapping[str, MethodConfig]: A mapping of RPC method names to their
configuration. | codesearchnet |
def get_option(option_name, section_name="main", default=_sentinel, cfg_file=cfg_file):
defaults = get_defaults()
if default != _sentinel:
my_defaults = {option_name: default}
else:
my_defaults = defaults.get('section_name', {})
parser = get_parser(cfg_file)
return parser.get(section_name, option_name, vars=my_defaults) | Returns a specific option specific in a config file
Arguments:
option_name -- Name of the option (example host_name)
section_name -- Which section of the config (default: name)
examples:
>>> get_option("some option", default="default result")
'default result' | juraj-google-style |
def get_connection_string(params, hide_password=True):
connection_string = (params['driver'] + ':
user = params.get('user', None)
password = params.get('password', None)
host = params.get('host', None)
port = params.get('port', None)
database = params.get('database', None)
if (database is None):
raise ValueError("Field 'database' of connection parameters cannot be None.")
if ((password is None) and (user is not None)):
password = Client._get_password(params)
if (password is None):
raise RuntimeError('Password not defined and not available in keyring.')
if (host is not None):
if (user is not None):
connection_string += user
if (len(password) > 0):
if hide_password:
connection_string += ':[password hidden]'
else:
connection_string += (':' + password)
connection_string += '@'
connection_string += host
if (port is not None):
connection_string += (':' + str(port))
connection_string += ('/' + database)
return connection_string | Get a database connection string
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
hide_password (bool): if True, the password is hidden in the returned string
(use this for logging purposes).
Returns:
str: connection string | codesearchnet |
def has_course_mode(self, course_run_id, mode):
course_modes = self.get_course_modes(course_run_id)
return any((course_mode for course_mode in course_modes if (course_mode['slug'] == mode))) | Query the Enrollment API to see whether a course run has a given course mode available.
Arguments:
course_run_id (str): The string value of the course run's unique identifier
Returns:
bool: Whether the course run has the given mode avaialble for enrollment. | codesearchnet |
def load_words(self, words):
self._dictionary.update([word.lower() for word in words])
self._update_dictionary() | Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded | juraj-google-style |
def firmware_drivers(self):
if (not self.__firmware_drivers):
self.__firmware_drivers = FirmwareDrivers(self.__connection)
return self.__firmware_drivers | Gets the FirmwareDrivers API client.
Returns:
FirmwareDrivers: | codesearchnet |
def contains_peroxide(structure, relative_cutoff=1.1):
ox_type = oxide_type(structure, relative_cutoff)
if ox_type == "peroxide":
return True
else:
return False | Determines if a structure contains peroxide anions.
Args:
structure (Structure): Input structure.
relative_cutoff: The peroxide bond distance is 1.49 Angstrom.
Relative_cutoff * 1.49 stipulates the maximum distance two O
atoms must be to each other to be considered a peroxide.
Returns:
Boolean indicating if structure contains a peroxide anion. | juraj-google-style |
def schedule(self, callback, *args, **kwargs):
self._executor.submit(callback, *args, **kwargs) | Schedule the callback to be called asynchronously in a thread pool.
Args:
callback (Callable): The function to call.
args: Positional arguments passed to the function.
kwargs: Key-word arguments passed to the function.
Returns:
None | juraj-google-style |
def from_http_response(response):
try:
payload = response.json()
except ValueError:
payload = {"error": {"message": response.text or "unknown error"}}
error_message = payload.get("error", {}).get("message", "unknown error")
errors = payload.get("error", {}).get("errors", ())
message = "{method} {url}: {error}".format(
method=response.request.method, url=response.request.url, error=error_message
)
exception = from_http_status(
response.status_code, message, errors=errors, response=response
)
return exception | Create a :class:`GoogleAPICallError` from a :class:`requests.Response`.
Args:
response (requests.Response): The HTTP response.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`, with the message and errors populated
from the response. | juraj-google-style |
def _IsText(self, bytes_in, encoding=None):
is_text = True
if isinstance(bytes_in, py2to3.UNICODE_TYPE):
return is_text
for value in bytes_in:
if py2to3.PY_2:
value = ord(value)
if (not (31 < value < 128)):
is_text = False
break
if is_text:
return is_text
try:
bytes_in.decode('utf-8')
return True
except UnicodeDecodeError:
pass
if encoding:
try:
bytes_in.decode(encoding)
return True
except LookupError:
logger.error('Unsupported encoding: {0:s}'.format(encoding))
except UnicodeDecodeError:
pass
return False | Examine the bytes in and determine if they are indicative of text.
Parsers need quick and at least semi reliable method of discovering whether
or not a particular byte stream is text or resembles text or not. This can
be used in text parsers to determine if a file is a text file or not for
instance.
The method assumes the byte sequence is either ASCII, UTF-8, UTF-16 or
method supplied character encoding. Otherwise it will make the assumption
the byte sequence is not text, but a byte sequence.
Args:
bytes_in (bytes|str): byte stream to examine.
encoding (Optional[str]): encoding to test, if not defined ASCII and
UTF-8 are tried.
Returns:
bool: True if the bytes stream contains text. | codesearchnet |
def __getitem__(self, item):
depth = item.count('.') + 1
parts = item.split('.', 1)
for m in self.modules:
if parts[0] == m.name:
if depth == 1:
return m
for p in self.packages:
if parts[0] == p.name:
if depth == 1:
return p
item = p.get(parts[1])
if item:
return item
raise KeyError(item) | Return the corresponding Package or Module object.
Args:
item (str): name of the package/module, dot-separated.
Returns:
Package/Module: corresponding object. | juraj-google-style |
def compile_intermediate_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
interm_fluents = []
with self.graph.as_default():
with tf.name_scope('intermediate_cpfs'):
for cpf in self.rddl.domain.intermediate_cpfs:
cpf_noise = noise.get(cpf.name, None) if noise is not None else None
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
interm_fluents.append((cpf.name, t))
scope[cpf.name] = t
return interm_fluents | Compiles the intermediate fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. | juraj-google-style |
def _convert_to_json(self, response):
try:
return response.json()
except ValueError:
logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format(response.request.url, response.text))
return None | Converts response to JSON.
If the response cannot be converted to JSON then `None` is returned.
Args:
response - An object of type `requests.models.Response`
Returns:
Response in JSON format if the response can be converted to JSON. `None` otherwise. | codesearchnet |
def truthyAttrs(cls):
def __bool__(self):
return bool(any(getattr(self, attr) for attr in self.attrs))
cls.__bool__ = cls.__nonzero__ = __bool__
return cls | Class decorator: override __bool__ to set truthiness based on any attr being present.
Args:
cls (class): class to decorate
Returns:
class: same, but modified, class | juraj-google-style |
def insert(cls, cur, table: str, values: dict):
keys = cls._COMMA.join(values.keys())
value_place_holder = (cls._PLACEHOLDER * len(values))
query = cls._insert_string.format(table, keys, value_place_holder[:(- 1)])
(yield from cur.execute(query, tuple(values.values())))
return (yield from cur.fetchone()) | Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties | codesearchnet |
def add(self, promise, bitoffset, *, _offsetideal=None):
if _offsetideal is None:
_offsetideal = bitoffset
if isinstance(promise, TDOPromise):
newpromise = promise.makesubatoffset(
bitoffset, _offsetideal=_offsetideal)
self._promises.append(newpromise)
elif isinstance(promise, TDOPromiseCollection):
for p in promise._promises:
self.add(p, bitoffset, _offsetideal=_offsetideal) | Add a promise to the promise collection at an optional offset.
Args:
promise: A TDOPromise to add to this collection.
bitoffset: An integer offset for this new promise in the collection.
_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control. | juraj-google-style |
def TransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
return layers.Serial(
layers.ShiftRight(),
layers.Embedding(feature_depth, vocab_size),
layers.Dropout(rate=dropout, mode=mode),
layers.PositionalEncoding(max_len=max_len),
layers.Serial(*[DecoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, mode)
for _ in range(num_layers)]),
layers.LayerNorm(),
layers.Dense(vocab_size),
layers.LogSoftmax()
) | Transformer language model (only uses the decoder part of Transformer).
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer. | juraj-google-style |
def read_schema(path):
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result | Reads a schema from the provided location.
Args:
path: The location of the file holding a serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None | github-repos |
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts) | \
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work. | juraj-google-style |
def image_load(filename: str) -> tcod.image.Image:
return tcod.image.Image._from_cdata(
ffi.gc(lib.TCOD_image_load(_bytes(filename)), lib.TCOD_image_delete)
) | Load an image file into an Image instance and return it.
Args:
filename (AnyStr): Path to a .bmp or .png image file. | juraj-google-style |
def from_raw(self, file_names=None, **kwargs):
if file_names:
self.file_names = file_names
if (not isinstance(file_names, (list, tuple))):
self.file_names = [file_names]
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug('start iterating through file(s)')
for f in self.file_names:
self.logger.debug('loading raw file:')
self.logger.debug(f'{f}')
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if (test is not None):
self.logger.debug('continuing reading files...')
_test = self._append(test[set_number], new_tests[set_number])
if (not _test):
self.logger.warning(f'EMPTY TEST: {f}')
continue
test[set_number] = _test
self.logger.debug('added this test - started merging')
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if (counter > 10):
self.logger.debug('ERROR? Too many files to merge')
raise ValueError('Too many files to merge - could be a p2-p3 zip thing')
else:
self.logger.debug('getting data from first file')
if new_tests[set_number].no_data:
self.logger.debug('NO DATA')
else:
test = new_tests
else:
self.logger.debug('NOTHING LOADED')
self.logger.debug('finished loading the raw-files')
test_exists = False
if test:
if test[0].no_data:
self.logging.debug('the first dataset (or only dataset) loaded from the raw data file is empty')
else:
test_exists = True
if test_exists:
if (not prms.Reader.sorted_data):
self.logger.debug('sorting data')
test[set_number] = self._sort_data(test[set_number])
self.datasets.append(test[set_number])
else:
self.logger.warning('No new datasets added!')
self.number_of_datasets = len(self.datasets)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self | Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together. | codesearchnet |
def _initialize_splittable_dimensions(self, mtf_graph):
all_mtf_dimension_names = set()
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
if not re.match(r"_anonymous_\d*", mtf_dimension.name):
all_mtf_dimension_names.add(mtf_dimension.name)
unsplittable_mtf_dimension_names = set()
for mtf_operation in mtf_graph.operations:
unsplittable_mtf_dimension_names.update(mtf_operation.unsplittable_dims)
return all_mtf_dimension_names - unsplittable_mtf_dimension_names | Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout. | juraj-google-style |
def exhaustive_fragment_check(self, ontology: pd.DataFrame, iri_curie_fragment_predicate: str='iri', cross_reference_iris: bool=False, cross_reference_fragments: bool=False, diff: bool=True) -> Tuple[list]:
(inside, outside) = ([], [])
header = (['Index'] + list(ontology.columns))
for row in ontology.itertuples():
row = {header[i]: val for (i, val) in enumerate(row)}
entity_suffix = row[iri_curie_fragment_predicate]
if isinstance(entity_suffix, list):
if (len(entity_suffix) != 0):
exit('Need to have only 1 iri in the cell from the onotology.')
else:
entity_suffix = entity_suffix[0]
entity_fragment = self.extract_fragment(entity_suffix)
ilx_rows = self.fragment2rows.get(entity_fragment)
if (cross_reference_fragments and ilx_rows):
ilx_rows = [row for row in ilx_rows if (entity_fragment.lower() in row['iri'].lower())]
if (cross_reference_iris and ilx_rows):
ilx_rows = [row for row in ilx_rows if (entity_suffix.rsplit('/', 1)[(- 1)].lower() in row['iri'].lower())]
if ilx_rows:
inside.append({'external_ontology_row': row, 'ilx_rows': ilx_rows})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return (inside, outside, diff)
return (inside, outside) | All entities with conflicting fragments gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
iri_curie_fragment_predicate: usually in qname form and is the colname of the DataFrame for iri
Default is "iri" for graph2pandas module
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only | codesearchnet |
def detect_language(index_page):
dom = dhtmlparser.parseString(index_page)
clean_content = dhtmlparser.removeTags(dom)
lang = None
try:
lang = langdetect.detect(clean_content)
except UnicodeDecodeError:
lang = langdetect.detect(clean_content.decode("utf-8"))
return SourceString(
lang,
source="langdetect"
) | Detect `languages` using `langdetect` library.
Args:
index_page (str): HTML content of the page you wish to analyze.
Returns:
obj: One :class:`.SourceString` object. | juraj-google-style |
def reverse_transform(self, tables, table_metas=None, missing=None):
if (missing is None):
missing = self.missing
else:
self.missing = missing
warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform'), DeprecationWarning)
reverse = {}
for table_name in tables:
table = tables[table_name]
if (table_metas is None):
table_meta = self.table_dict[table_name][1]
else:
table_meta = table_metas[table_name]
reverse[table_name] = self.reverse_transform_table(table, table_meta)
return reverse | Transform data back to its original format.
Args:
tables(dict): mapping of table names to `tuple` where each tuple is on the form
(`pandas.DataFrame`, `dict`). The `DataFrame` contains the transformed
data and the `dict` the corresponding meta information.
If not specified, the tables will be retrieved using the meta_file.
table_metas(dict): Full metadata file for the dataset.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data). | codesearchnet |
def abi_to_fasta(input, output):
direcs = [input, ]
zip_files = list_files(input, ['zip'])
if zip_files:
direcs.extend(_process_zip_files(zip_files))
for d in direcs:
files = list_files(d, ['ab1', 'abi'])
seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files]
fastas = ['>{}\n{}'.format(s.id, str(s.seq)) for s in seqs]
ofile = os.path.basename(os.path.normpath(d)) + '.fasta'
opath = os.path.join(output, ofile)
open(opath, 'w').write('\n'.join(fastas)) | Converts ABI or AB1 files to FASTA format.
Args:
input (str): Path to a file or directory containing abi/ab1 files or
zip archives of abi/ab1 files
output (str): Path to a directory for the output FASTA files | juraj-google-style |
def torque_off(self):
data = []
data.append(10)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(1)
data.append(0)
send_data(data) | Set the torques of Herkulex to zero
In this mode, position control and velocity control
will not work, enable torque before that. Also the
servo shaft is freely movable
Args:
none | codesearchnet |
def __init__(self, model_handler: ModelHandler[ExampleT, PredictionT, Any], clock, metrics_namespace, load_model_at_runtime: bool=False, model_tag: str='RunInference'):
self._model_handler = model_handler
self._shared_model_handle = shared.Shared()
self._clock = clock
self._model = None
self._metrics_namespace = metrics_namespace
self._load_model_at_runtime = load_model_at_runtime
self._side_input_path = None
self._model_tag = model_tag
self._cur_tag = model_tag | A DoFn implementation generic to frameworks.
Args:
model_handler: An implementation of ModelHandler.
clock: A clock implementing time_ns. *Used for unit testing.*
metrics_namespace: Namespace of the transform to collect metrics.
load_model_at_runtime: Bool to indicate if model loading should be
deferred to runtime - for example if we are depending on side
inputs to get the model path or we want to enforce a timeout on
model loading.
model_tag: Tag to use to disambiguate models in multi-model settings. | github-repos |
def _apply_transformation(inputs):
(ts, transformation, extend_collection, clear_redo) = inputs
new = ts.append_transformation(transformation, extend_collection, clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o | Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation) | codesearchnet |
def _resize_output_size_rescale_to_max_len(height: int, width: int, min_len: Optional[int]=1, max_len: Optional[int]=None) -> Tuple[int, int]:
max_len = max(height, width) if max_len is None else max_len
aspect_ratio = width / height
if width >= height:
width = max_len
height = int(width / aspect_ratio)
if height % 2 != 0:
height += 1
elif height > width:
height = max_len
width = int(height * aspect_ratio)
if width % 2 != 0:
width += 1
height = max(height, min_len)
width = max(width, min_len)
return (height, width) | Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
min_len (`int`, *optional*, defaults to 1):
Minimum size of the output image.
max_len (`int`, *optional*, defaults to the maximum size of the image):
Maximum size of the output image.
Returns:
The output size of the image after resizing. | github-repos |
def validate(self, definition, version=None, strict=False):
if not HAS_KUBERNETES_VALIDATE:
raise KubernetesValidateMissing()
errors = list()
warnings = list()
try:
if version is None:
try:
version = self.version['kubernetes']['gitVersion']
except KeyError:
version = kubernetes_validate.latest_version()
kubernetes_validate.validate(definition, version, strict)
except kubernetes_validate.utils.ValidationError as e:
errors.append("resource definition validation error at %s: %s" % ('.'.join([str(item) for item in e.path]), e.message))
except VersionNotSupportedError as e:
errors.append("Kubernetes version %s is not supported by kubernetes-validate" % version)
except kubernetes_validate.utils.SchemaNotFoundError as e:
warnings.append("Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)" %
(e.kind, e.api_version, e.version))
return warnings, errors | validate checks a kubernetes resource definition
Args:
definition (dict): resource definition
version (str): version of kubernetes to validate against
strict (bool): whether unexpected additional properties should be considered errors
Returns:
warnings (list), errors (list): warnings are missing validations, errors are validation failures | juraj-google-style |
def get_max_res_without_distortion(image_size: Tuple[int, int], target_size: Tuple[int, int]) -> Tuple[int, int]:
original_height, original_width = image_size
target_height, target_width = target_size
scale_w = target_width / original_width
scale_h = target_height / original_height
if scale_w < scale_h:
new_width = target_width
new_height = min(math.floor(original_height * scale_w), target_height)
else:
new_height = target_height
new_width = min(math.floor(original_width * scale_h), target_width)
return (new_height, new_width) | Determines the maximum resolution to which an image can be resized to without distorting its
aspect ratio, based on the target resolution.
Args:
image_size (Tuple[int, int]): The original resolution of the image (height, width).
target_resolution (Tuple[int, int]): The desired resolution to fit the image into (height, width).
Returns:
Tuple[int, int]: The optimal dimensions (height, width) to which the image should be resized.
Example:
>>> _get_max_res_without_distortion([200, 300], target_size = [450, 200])
(134, 200)
>>> _get_max_res_without_distortion([800, 600], target_size = [450, 1300])
(450, 338) | github-repos |
def encode_texts(self, texts, unknown_token='<UNK>', verbose=1, **kwargs):
if (not self.has_vocab):
raise ValueError('You need to build the vocabulary using `build_vocab` before using `encode_texts`')
if (unknown_token and (unknown_token not in self.special_token)):
raise ValueError(((('Your special token (' + unknown_token) + ') to replace unknown words is not in the list of special token: ') + self.special_token))
progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
encoded_texts = []
for token_data in self.token_generator(texts, **kwargs):
(indices, token) = (token_data[:(- 1)], token_data[(- 1)])
token_idx = self._token2idx.get(token)
if ((token_idx is None) and unknown_token):
token_idx = self.special_token.index(unknown_token)
if (token_idx is not None):
utils._append(encoded_texts, indices, token_idx)
progbar.update(indices[0])
progbar.update(len(texts))
return encoded_texts | Encodes the given texts using internal vocabulary with optionally applied encoding options. See
``apply_encoding_options` to set various options.
Args:
texts: The list of text items to encode.
unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted.
verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
**kwargs: The kwargs for `token_generator`.
Returns:
The encoded texts. | codesearchnet |
def _refine_candidate(self, width, height):
packer = newPacker(PackingMode.Offline, PackingBin.BFF,
pack_algo=self._pack_algo, sort_algo=SORT_LSIDE,
rotation=self._rotation)
packer.add_bin(width, height)
for r in self._rectangles:
packer.add_rect(*r)
packer.pack()
if len(packer[0]) != len(self._rectangles):
return None
new_height = max(packer[0], key=lambda x: x.top).top
return(width, new_height, packer) | Use bottom-left packing algorithm to find a lower height for the
container.
Arguments:
width
height
Returns:
tuple (width, height, PackingAlgorithm): | juraj-google-style |
def __init__(self, filters):
self.filters = filters
super().__init__(', '.join(repr(f) for f in filters)) | Initialization of instances:
Args:
filters (list): the invalid filter names.
Attributes:
filters (list): the invalid filter names. | juraj-google-style |
def InitializeDownload(self, http_request, http=None, client=None):
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(
self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
if self.auto_transfer:
self.StreamInChunks() | Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead. | juraj-google-style |
def compute_index(self, axis, data_object, compute_diff=True):
def pandas_index_extraction(df, axis):
if (not axis):
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = (self.index if (not axis) else self.columns)
old_blocks = (self.data if compute_diff else None)
new_indices = data_object.get_indices(axis=axis, index_func=(lambda df: pandas_index_extraction(df, axis)), old_blocks=old_blocks)
return (index_obj[new_indices] if compute_diff else new_indices) | Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object. | codesearchnet |
def field_mask(original, modified):
if ((original is None) and (modified is None)):
return field_mask_pb2.FieldMask()
if ((original is None) and (modified is not None)):
original = copy.deepcopy(modified)
original.Clear()
if ((modified is None) and (original is not None)):
modified = copy.deepcopy(original)
modified.Clear()
if (type(original) != type(modified)):
raise ValueError('expected that both original and modified should be of the same type, received "{!r}" and "{!r}".'.format(type(original), type(modified)))
return field_mask_pb2.FieldMask(paths=_field_mask_helper(original, modified)) | Create a field mask by comparing two messages.
Args:
original (~google.protobuf.message.Message): the original message.
If set to None, this field will be interpretted as an empty
message.
modified (~google.protobuf.message.Message): the modified message.
If set to None, this field will be interpretted as an empty
message.
Returns:
google.protobuf.field_mask_pb2.FieldMask: field mask that contains
the list of field names that have different values between the two
messages. If the messages are equivalent, then the field mask is empty.
Raises:
ValueError: If the ``original`` or ``modified`` are not the same type. | codesearchnet |
def extract_signature(func, ignore_first=False):
sig_params = get_signature_params(func)
if ignore_first:
if len(sig_params) == 0:
raise Exception("Methods must take a 'self' argument, but the "
"method '{}' does not have one.".format(
func.__name__))
sig_params = sig_params[1:]
arg_names = []
arg_defaults = []
arg_is_positionals = []
keyword_names = set()
for arg_name, parameter in sig_params:
arg_names.append(arg_name)
arg_defaults.append(parameter.default)
arg_is_positionals.append(parameter.kind == parameter.VAR_POSITIONAL)
if parameter.kind == Parameter.POSITIONAL_OR_KEYWORD:
keyword_names.add(arg_name)
return FunctionSignature(arg_names, arg_defaults, arg_is_positionals,
keyword_names, func.__name__) | Extract the function signature from the function.
Args:
func: The function whose signature should be extracted.
ignore_first: True if the first argument should be ignored. This should
be used when func is a method of a class.
Returns:
A function signature object, which includes the names of the keyword
arguments as well as their default values. | juraj-google-style |
def pretty_print_config_to_json(self, services, hostname=None):
descriptor = self.get_config_dict(services, hostname)
return json.dumps(descriptor, sort_keys=True, indent=2,
separators=(',', ': ')) | JSON string description of a protorpc.remote.Service in API format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
string, The API descriptor document as a JSON string. | juraj-google-style |
def dp004(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dp004`'.format(value))
self._dp004 = value | Corresponds to IDD Field `dp004`
Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `dp004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.