code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype):
tensor_in = self._CreateNumpyTensor(tensor_in_sizes)
filter_in = self._CreateNumpyTensor(filter_in_sizes)
num_groups = tensor_in_sizes[3]
assert num_groups > 1 and filter_in_sizes[2] * num_groups == tensor_in_sizes[3]
with test_util.device(True):
t1 = constant_op.constant(tensor_in, dtype=dtype)
t2 = constant_op.constant(filter_in, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == 'NCHW':
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
t1_splits = array_ops.split(t1, num_groups, axis=1)
else:
t1_splits = array_ops.split(t1, num_groups, axis=3)
t2_splits = array_ops.split(t2, num_groups, axis=3)
def MakeConv2d(inputs, filters):
return nn_ops.conv2d(inputs, filters, strides, padding, dilations=dilations, data_format=data_format)
group_conv = MakeConv2d(t1, t2)
group_conv_loop = array_ops.concat([MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)], axis=1 if data_format == 'NCHW' else 3)
results = self.evaluate([group_conv, group_conv_loop])
tol_to_use = 1e-05
self.assertAllClose(results[0], results[1], atol=tol_to_use, rtol=tol_to_use)
|
Verify the output of group convolution is equal to a for-loop implementation.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
|
github-repos
|
def create_runner(runner_name: str) -> 'PipelineRunner':
runner_name = _RUNNER_MAP.get(runner_name.lower(), _RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))
if '.' in runner_name:
module, runner = runner_name.rsplit('.', 1)
try:
return getattr(importlib.import_module(module), runner)()
except ImportError:
if 'dataflow' in runner_name.lower():
raise ImportError('Google Cloud Dataflow runner not available, please install apache_beam[gcp]')
elif 'interactive' in runner_name.lower():
raise ImportError('Interactive runner not available, please install apache_beam[interactive]')
else:
raise
else:
raise ValueError('Unexpected pipeline runner: %s. Valid values are %s or the fully qualified name of a PipelineRunner subclass.' % (runner_name, ', '.join(StandardOptions.KNOWN_RUNNER_NAMES)))
|
For internal use only; no backwards-compatibility guarantees.
Creates a runner instance from a runner class name.
Args:
runner_name: Name of the pipeline runner. Possible values are listed in
_RUNNER_MAP above.
Returns:
A runner object.
Raises:
RuntimeError: if an invalid runner name is used.
|
github-repos
|
def _GetMetadataUpdate(self, metadata_key='', recursive=True, wait=True, timeout=None):
metadata_key = (os.path.join(metadata_key, '') if recursive else metadata_key)
metadata_url = os.path.join(METADATA_SERVER, metadata_key)
params = {'alt': 'json', 'last_etag': self.etag, 'recursive': recursive, 'timeout_sec': (timeout or self.timeout), 'wait_for_change': wait}
while True:
response = self._GetMetadataRequest(metadata_url, params=params, timeout=timeout)
etag_updated = self._UpdateEtag(response)
if (wait and (not etag_updated) and (not timeout)):
continue
else:
break
return json.loads(response.read().decode('utf-8'))
|
Request the contents of metadata server and deserialize the response.
Args:
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
wait: bool, True if we should wait for a metadata change.
timeout: int, timeout in seconds for returning metadata output.
Returns:
json, the deserialized contents of the metadata server.
|
codesearchnet
|
def get_box_newsfeeds(self, box_key, detail_level = None):
uri = '/'.join([
self.api_uri,
self.boxes_suffix,
box_key,
self.newsfeed_suffix
])
return self._get_newsfeeds(uri, detail_level)
|
Function to get newsfeed for a pipeline
Args:
box pipeline key
detail_level arguments for req str ['ALL', 'CONDENSED']
return list of feed dicts parse at your convenience
|
juraj-google-style
|
def setup_mock_socket_file(self, mock_create_connection, resp=MOCK_RESP):
fake_file = self.MockSocketFile(resp)
fake_conn = mock.MagicMock()
fake_conn.makefile.return_value = fake_file
mock_create_connection.return_value = fake_conn
return fake_file
|
Sets up a fake socket file from the mock connection.
Args:
mock_create_connection: The mock method for creating a method.
resp: (str) response to give. MOCK_RESP by default.
Returns:
The mock file that will be injected into the code.
|
github-repos
|
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(CreateRequestPayload, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer):
self._object_type = primitives.Enumeration(enums.ObjectType, tag=enums.Tags.OBJECT_TYPE)
self._object_type.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the object type.')
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
if self.is_tag_next(enums.Tags.TEMPLATE_ATTRIBUTE, local_buffer):
self._template_attribute = objects.TemplateAttribute()
self._template_attribute.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the template attribute.')
elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):
attributes = objects.Attributes()
attributes.read(local_buffer, kmip_version=kmip_version)
value = objects.convert_attributes_to_template_attribute(attributes)
self._template_attribute = value
else:
raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the attributes structure.')
self.is_oversized(local_buffer)
|
Read the data encoding the Create request payload and decode it into
its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the object type or template
attribute is missing from the encoded payload.
|
codesearchnet
|
def ensure_files(self, filenames):
logger.debug("Testing {0} for the following files: {1}".format(
self.working_dir, filenames))
dircontent = os.listdir(self.working_dir)
for fname in filenames:
if fname not in dircontent:
return False
return True
|
Checks the student submission for specific files.
Args:
filenames (tuple): The list of file names to be cjecked for.
Returns:
bool: Indicator if all files are found in the student archive.
|
juraj-google-style
|
async def retry_request(*args, retry_exceptions=(asyncio.TimeoutError,
ScriptWorkerRetryException),
retry_async_kwargs=None, **kwargs):
retry_async_kwargs = retry_async_kwargs or {}
return await retry_async(request, retry_exceptions=retry_exceptions,
args=args, kwargs=kwargs, **retry_async_kwargs)
|
Retry the ``request`` function.
Args:
*args: the args to send to request() through retry_async().
retry_exceptions (list, optional): the exceptions to retry on.
Defaults to (ScriptWorkerRetryException, ).
retry_async_kwargs (dict, optional): the kwargs for retry_async.
If None, use {}. Defaults to None.
**kwargs: the kwargs to send to request() through retry_async().
Returns:
object: the value from request().
|
juraj-google-style
|
def wait(self, timeout=None):
flag = self._finished.wait(timeout=timeout)
if flag is False:
raise TimeoutExpiredError("Timeout waiting for response to event loop operation")
if self._exception is not None:
self._raise_exception()
return self._result
|
Wait for this operation to finish.
You can specify an optional timeout that defaults to no timeout if
None is passed. The result of the operation is returned from this
method. If the operation raised an exception, it is reraised from this
method.
Args:
timeout (float): The maximum number of seconds to wait before timing
out.
|
juraj-google-style
|
def _checkNumerical(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):
if (not isinstance(description, str)):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if (not isinstance(inputvalue, (int, long, float))):
raise TypeError('The {0} must be numerical. Given: {1!r}'.format(description, inputvalue))
if (not isinstance(minvalue, (int, float, long, type(None)))):
raise TypeError('The minvalue must be numeric or None. Given: {0!r}'.format(minvalue))
if (not isinstance(maxvalue, (int, float, long, type(None)))):
raise TypeError('The maxvalue must be numeric or None. Given: {0!r}'.format(maxvalue))
if ((not (minvalue is None)) and (not (maxvalue is None))):
if (maxvalue < minvalue):
raise ValueError('The maxvalue must not be smaller than minvalue. Given: {0} and {1}, respectively.'.format(maxvalue, minvalue))
if (not (minvalue is None)):
if (inputvalue < minvalue):
raise ValueError('The {0} is too small: {1}, but minimum value is {2}.'.format(description, inputvalue, minvalue))
if (not (maxvalue is None)):
if (inputvalue > maxvalue):
raise ValueError('The {0} is too large: {1}, but maximum value is {2}.'.format(description, inputvalue, maxvalue))
|
Check that the given numerical value is valid.
Args:
* inputvalue (numerical): The value to be checked.
* minvalue (numerical): Minimum value Use None to skip this part of the test.
* maxvalue (numerical): Maximum value. Use None to skip this part of the test.
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as it uses this function internally.
|
codesearchnet
|
def Default() -> 'Blockchain':
if (Blockchain._instance is None):
Blockchain._instance = Blockchain()
Blockchain.GenesisBlock().RebuildMerkleRoot()
return Blockchain._instance
|
Get the default registered blockchain instance.
Returns:
obj: Currently set to `neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain`.
|
codesearchnet
|
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
min_pixels = images_kwargs.get('min_pixels', None) or self.size['shortest_edge']
max_pixels = images_kwargs.get('max_pixels', None) or self.size['longest_edge']
patch_size = images_kwargs.get('patch_size', None) or self.patch_size
merge_size = images_kwargs.get('merge_size', None) or self.merge_size
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels)
grid_h, grid_w = (resized_height
return grid_h * grid_w
|
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of image patches per image.
|
github-repos
|
def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary=True, *, copy_xla_sharding=False):
validate_shape = shape.is_fully_defined()
if isinstance(primary, variables.Variable):
prefix = primary._shared_name
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + '/' + name):
if colocate_with_primary:
distribution_strategy = distribute_lib.get_strategy()
with distribution_strategy.extended.colocate_vars_with(primary):
return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)
else:
return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)
|
Creates a slot initialized using an `Initializer`.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
copy_xla_sharding: Boolean. If True also copies XLA sharding
from primary.
Returns:
A `Variable` object.
|
github-repos
|
def iter_geno_marker(self, markers, return_index=False):
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
if isinstance(markers, str):
markers = [markers]
if return_index:
for marker in markers:
geno, seek = self.get_geno_marker(marker, return_index=True)
yield marker, geno, seek
else:
for marker in markers:
yield marker, self.get_geno_marker(marker)
|
Iterates over genotypes for a list of markers.
Args:
markers (list): The list of markers to iterate onto.
return_index (bool): Wether to return the marker's index or not.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (additive format).
|
juraj-google-style
|
def count_params(self):
if not self.built:
raise ValueError(f"You tried to call `count_params` on layer '{self.name}', but the layer isn't built. You can build it manually via: `layer.build(input_shape)`.")
return summary_utils.count_params(self.weights)
|
Count the total number of scalars composing the weights.
Returns:
An integer count.
|
github-repos
|
def GetFileEntryByPathSpec(self, path_spec):
return fvde_file_entry.FVDEFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FVDEFileEntry: file entry or None.
|
juraj-google-style
|
def __init__(self, unicodeHexValue, block):
if unicodeHexValue < 0 or unicodeHexValue > 0x10FFFF:
raise ValueError("numeric value outside Unicode range")
self.unicodeHexValue = unicodeHexValue
self.unichr = py23char(self.unicodeHexValue)
self.name = unicodedata.name(self.unichr)
self.equivalents = {}
self._block = block
|
Set up a unicode character.
Arguments:
unicodeHexValue -- an integer that should correspond to a
Unicode code point.
block -- the CharacterBlock this character belongs to.
Raises:
ValueError -- if unicodeHexValue is not a valid code point.
|
juraj-google-style
|
def unit_pos_to_spot(unit_pos) -> ParkingSpot:
min_ = 50
res = None
for airport in parkings:
for spot in parkings[airport]:
spot_pos = parkings[airport][spot]
dist = math.hypot(unit_pos[0] - spot_pos[0], unit_pos[1] - spot_pos[1])
if dist < min_:
min_ = dist
res = ParkingSpot(airport=airport, spot=spot)
return res
|
Translates a unit position to a known parking spot
Args:
unit_pos: unit position as Vec2
Returns: ParkingSpot object
|
juraj-google-style
|
def AddValue(self, registry_value):
name = registry_value.name.upper()
if name in self._values:
raise KeyError(
'Value: {0:s} already exists.'.format(registry_value.name))
self._values[name] = registry_value
|
Adds a value.
Args:
registry_value (WinRegistryValue): Windows Registry value.
Raises:
KeyError: if the value already exists.
|
juraj-google-style
|
def todo(self, **kwargs):
path = '%s/%s/todo' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path, **kwargs)
|
Create a todo associated to the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTodoError: If the todo cannot be set
|
juraj-google-style
|
def list_folder(cls, session, mailbox, folder):
return cls(('/mailboxes/%d/folders/%s/conversations.json' % (mailbox.id, folder.id)), session=session)
|
Return conversations in a specific folder of a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox that folder is in.
folder (helpscout.models.Folder): Folder to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
|
codesearchnet
|
def Create(self, request, global_params=None):
config = self.GetMethodConfig('Create')
return self._RunMethod(config, request, global_params=global_params)
|
Create an association between a GCP project and a GitHub Enterprise server.
Args:
request: (CloudbuildProjectsLocationsGithubEnterpriseConfigsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
|
github-repos
|
def to_md_file(string, filename, out_path="."):
md_file = "%s.md" % filename
with open(os.path.join(out_path, md_file), "w") as f:
f.write(string)
print("wrote {}.".format(md_file))
|
Import a module path and create an api doc from it
Args:
string (str): string with line breaks to write to file.
filename (str): filename without the .md
out_path (str): The output directory
|
juraj-google-style
|
def plot_iso(axis, step, var):
xmesh, ymesh, fld = get_meshes_fld(step, var)
if conf.field.shift:
fld = np.roll(fld, conf.field.shift, axis=0)
axis.contour(xmesh, ymesh, fld, linewidths=1)
|
Plot isocontours of scalar field.
Args:
axis (:class:`matplotlib.axes.Axes`): the axis handler of an
existing matplotlib figure where the isocontours should
be plotted.
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the scalar field name.
|
juraj-google-style
|
def get_repository(self, path):
parts = path.split('@', 1)
if (len(parts) == 1):
parts = ('filesystem', parts[0])
(repo_type, location) = parts
if (repo_type == 'filesystem'):
location = os.path.abspath(location)
normalised_path = ('%s@%s' % (repo_type, location))
return self._get_repository(normalised_path)
|
Get a package repository.
Args:
path (str): Entry from the 'packages_path' config setting. This may
simply be a path (which is managed by the 'filesystem' package
repository plugin), or a string in the form "type@location",
where 'type' identifies the repository plugin type to use.
Returns:
`PackageRepository` instance.
|
codesearchnet
|
def _new_from_rft(self, base_template, rft_file):
self._add_entry(base_template)
self._add_entry(templates.NEW_FROM_RFT.format(rft_file_path=rft_file, rft_file_name=op.basename(rft_file)))
|
Append a new file from .rft entry to the journal.
This instructs Revit to create a new model based on
the provided .rft template.
Args:
base_template (str): new file journal template from rmj.templates
rft_file (str): full path to .rft template to be used
|
codesearchnet
|
def Group(items, key):
result = {}
for item in items:
result.setdefault(key(item), []).append(item)
return result
|
Groups items by given key function.
Args:
items: An iterable or an iterator of items.
key: A function which given each item will return the key.
Returns:
A dict with keys being each unique key and values being a list of items of
that key.
|
codesearchnet
|
def append(self, future):
future.prev = self.tail
if self.tail is None:
assert self.head is None
self.head = future
else:
self.tail.next = future
self.tail = future
future.add_done_callback(self.remove)
|
Append an object to the linked list.
Args:
future (PlasmaObjectFuture): A PlasmaObjectFuture instance.
|
juraj-google-style
|
def set_api_url(self, api_url='https:
old_api_url = self._api_url
old_lang = self._lang
self._lang = lang.lower()
self._api_url = api_url.format(lang=self._lang)
try:
self._get_site_info()
self.__supported_languages = None
except MediaWikiException:
self._api_url = old_api_url
self._lang = old_lang
raise MediaWikiAPIURLError(api_url)
self.clear_memoized()
|
Set the API URL and language
Args:
api_url (str): API URL to use
lang (str): Language of the API URL
Raises:
:py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \
url is not a valid MediaWiki site
|
codesearchnet
|
def GetBullets(self):
return self._bullets
|
Returns the bullet characters list.
Use the list elements in order for best appearance in nested bullet lists,
wrapping back to the first element for deep nesting. The list size depends
on the console implementation.
Returns:
A tuple of bullet characters.
|
github-repos
|
def __init__(self, dims, multiples, name="tile_by_dim"):
super(TileByDim, self).__init__(name=name)
self._dims = dims
self._multiples = multiples
if np.unique(dims).size != len(dims):
raise ValueError("dims must not have any repeated integers.")
if len(multiples) != len(dims):
raise ValueError(
"multiples must have the same length as dims: {}.".format(len(dims)))
|
Constructs the `TileByDim` module.
Args:
dims: The dimensions to tile along, as a list of unique integers.
multiples: The multiple of the tiling, as a list of integers. Must
be the same length as the `dims` list.
name: The name of the module.
Raises:
ValueError: If `dims` has non-unique integers, or if the size of
`multiples` is different from the size of `dims`.
|
juraj-google-style
|
def get_json(filename):
check_if_this_file_exist(filename)
filename = os.path.abspath(filename)
s = command_line(['exiftool', '-G', '-j', '-sort', filename])
if s:
s = s.decode('utf-8').rstrip('\r\n')
return json.loads(s)
else:
return s
|
Return a json value of the exif
Get a filename and return a JSON object
Arguments:
filename {string} -- your filename
Returns:
[JSON] -- Return a JSON object
|
juraj-google-style
|
def total_cost_function(self, item_a, item_b, time_a, time_b):
distances = np.zeros(len(self.weights))
for (c, component) in enumerate(self.cost_function_components):
distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])
total_distance = np.sum((self.weights * distances))
return total_distance
|
Calculate total cost function between two items.
Args:
item_a: STObject
item_b: STObject
time_a: Timestep in item_a at which cost function is evaluated
time_b: Timestep in item_b at which cost function is evaluated
Returns:
The total weighted distance between item_a and item_b
|
codesearchnet
|
def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1):
gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)
gs_blocks_filenames = get_filenames(gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))
gs_blocks_fileroots = (re.search(('(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT)), gs_blocks_filename).group(1) for gs_blocks_filename in gs_blocks_filenames)
return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh) for fileroot in gs_blocks_fileroots]
|
Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data`
|
codesearchnet
|
def _ReadTable(self, tables, file_object, table_offset):
table_header = self._ReadTableHeader(file_object, table_offset)
for record_offset in table_header.record_offsets:
if (record_offset == 0):
continue
record_offset += table_offset
if (table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO):
self._ReadRecordSchemaInformation(tables, file_object, record_offset)
elif (table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES):
self._ReadRecordSchemaIndexes(tables, file_object, record_offset)
elif (table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES):
self._ReadRecordSchemaAttributes(tables, file_object, record_offset)
else:
self._ReadRecord(tables, file_object, record_offset, table_header.record_type)
|
Reads the table.
Args:
tables (dict[int, KeychainDatabaseTable]): tables per identifier.
file_object (file): file-like object.
table_offset (int): offset of the table relative to the start of
the file.
Raises:
ParseError: if the table cannot be read.
|
codesearchnet
|
def avl_join2(t1, t2):
if ((t1 is None) and (t2 is None)):
new_root = None
elif (t2 is None):
new_root = t1
elif (t1 is None):
new_root = t2
else:
(new_left, last_node) = avl_split_last(t1)
debug = 0
if debug:
EulerTourTree(root=new_left)._assert_nodes('new_left')
EulerTourTree(root=last_node)._assert_nodes('last_node')
EulerTourTree(root=t2)._assert_nodes('t2')
print('new_left')
EulerTourTree(root=new_left).print_tree()
print('last_node')
EulerTourTree(root=last_node).print_tree()
print('t2')
EulerTourTree(root=t2).print_tree()
new_root = avl_join(new_left, t2, last_node)
if debug:
print('new_root')
EulerTourTree(root=new_root).print_tree()
EulerTourTree(root=last_node)._assert_nodes('new_root')
return new_root
|
join two trees without any intermediate key
Returns:
Node: new_root
O(log(n) + log(m)) = O(r(t1) + r(t2))
For AVL-Trees the rank r(t1) = height(t1) - 1
|
codesearchnet
|
def _list_objects(self, client_kwargs, path, max_request_entries):
client_kwargs = client_kwargs.copy()
if max_request_entries:
client_kwargs['MaxKeys'] = max_request_entries
while True:
with _handle_client_error():
response = self.client.list_objects_v2(
Prefix=path, **client_kwargs)
try:
for obj in response['Contents']:
yield obj.pop('Key'), obj
except KeyError:
raise _ObjectNotFoundError('Not found: %s' % path)
try:
client_kwargs['ContinuationToken'] = response[
'NextContinuationToken']
except KeyError:
break
|
Lists objects.
args:
client_kwargs (dict): Client arguments.
path (str): Path relative to current locator.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
|
juraj-google-style
|
def evaluate(code: str, *, global_vars: Optional[Dict[str, Any]]=None, permission: Optional[permissions.CodePermission]=None, returns_stdout: bool=False, outputs_intermediate: bool=False) -> Union[Any, Dict[str, Any]]:
permission = permission or permissions.get_permission()
ctx = dict(get_context())
if global_vars:
ctx.update(global_vars)
code_block = parsing.parse(code, permission)
global_vars, orig_global_vars = (ctx, ctx.copy())
if not code_block.body:
return {} if outputs_intermediate else None
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
if hasattr(code_block.body[-1], 'value'):
last_expr = code_block.body.pop()
result_vars = [RESULT_KEY]
if isinstance(last_expr, ast.Assign):
for name_node in last_expr.targets:
if isinstance(name_node, ast.Name):
result_vars.append(name_node.id)
last_expr = ast.Expression(last_expr.value)
try:
exec(compile(code_block, '', mode='exec'), global_vars)
result = eval(compile(last_expr, '', mode='eval'), global_vars)
except BaseException as e:
raise errors.CodeError(code, e) from e
for result_var in result_vars:
global_vars[result_var] = result
else:
try:
exec(compile(code_block, '', mode='exec'), global_vars)
except BaseException as e:
raise errors.CodeError(code, e) from e
global_vars[RESULT_KEY] = list(global_vars.values())[-1]
if returns_stdout:
return stdout.getvalue()
if outputs_intermediate:
outputs = {}
for k, v in global_vars.items():
if k == '__builtins__':
continue
if k not in orig_global_vars or v is not orig_global_vars[k]:
outputs[k] = v
outputs[STDOUT_KEY] = stdout.getvalue()
return outputs
return global_vars[RESULT_KEY]
|
Executes Python code.
Features:
* Fine-grained execution policy for limiting what APIs could be executed.
This eliminates the need for sandboxing.
* It exposes both the final results and intermediate results (variables).
Args:
code: Python code to run.
global_vars: An optional dict as the globals that could be referenced by the
code.
permission: Permission for the Python code to run.
returns_stdout: If True, the stdout (a str) will be returned.
outputs_intermediate: Applicable when returns_stdout is False. If True,
intermediate output will be outputted as a dict, with the last line's
value accessible by key '__result__' and the std output accessible by
key '__stdout__'. Otherwise the value of the last line will be returned.
Returns:
The value of the last line of the code block. Or a dict of variable
names of all locals to their evaluated values as the output of the code to
run. The value for the last line can be accessed by key '__result__'. Or the
stdout as a str.
|
github-repos
|
def __init__(self, conf, conn=None):
super(LdapSource, self).__init__(conf)
self._dn_requested = False
self._SetDefaults(conf)
self._conf = conf
self.ldap_controls = makeSimplePagedResultsControl(self.PAGE_SIZE)
self._last_search_params = None
if conn is None:
rlo = ldap.ldapobject.ReconnectLDAPObject
self.conn = rlo(uri=conf['uri'], retry_max=conf['retry_max'], retry_delay=conf['retry_delay'])
if conf['tls_starttls'] == 1:
self.conn.start_tls_s()
if 'ldap_debug' in conf:
self.conn.set_option(ldap.OPT_DEBUG_LEVEL, conf['ldap_debug'])
else:
self.conn = conn
self.Bind(conf)
|
Initialise the LDAP Data Source.
Args:
conf: config.Config instance
conn: An instance of ldap.LDAPObject that'll be used as the connection.
|
github-repos
|
def start_app(self, bundle_id):
self._bundle_id = bundle_id
self._session = self._wda.session(bundle_id)
return self._session
|
Start an application
Args:
- bundle_id: (string) apk bundle ID
Returns:
WDA session object
|
juraj-google-style
|
def list_vnets(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/',
'/virtualNetworks?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties.
|
juraj-google-style
|
def segment(self, source, language=None):
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by MeCab segmenter'.format(language))
chunks = ChunkList()
seek = 0
source_str = source.encode('utf-8') if six.PY2 else source
results = self.tagger.parse(source_str).split('\n')[:-2]
for row in results:
if six.PY2:
row = row.decode('utf-8')
token = row.split('\t')
word = token[0]
labels = token[3].split('-')
pos = labels[0]
label = labels[1] if len(labels) > 1 else None
if source[seek: seek + len(word)] != word:
assert source[seek] == ' '
assert source[seek + 1: seek + len(word) + 1] == word
chunks.append(Chunk.space())
seek += 1
dependency = None
if pos in _DEPENDENT_POS_FORWARD:
dependency = True
elif pos in _DEPENDENT_POS_BACKWARD:
dependency = False
elif label in _DEPENDENT_LABEL_FORWARD:
dependency = True
elif label in _DEPENDENT_LABEL_BACKWARD:
dependency = False
chunk = Chunk(word, pos=pos, label=label, dependency=dependency)
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
chunks.resolve_dependencies()
return chunks
|
Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
|
juraj-google-style
|
def loss_labels(self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array]) -> Dict[str, Tensor]:
pred_logits = class_queries_logits
batch_size, num_queries, _ = pred_logits.shape
criterion = nn.CrossEntropyLoss(weight=self.empty_weight)
idx = self._get_predictions_permutation_indices(indices)
target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])
target_classes = torch.full((batch_size, num_queries), fill_value=self.num_labels, dtype=torch.int64, device=pred_logits.device)
target_classes[idx] = target_classes_o
pred_logits_transposed = pred_logits.transpose(1, 2)
loss_ce = criterion(pred_logits_transposed, target_classes)
losses = {'loss_cross_entropy': loss_ce}
return losses
|
Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`List[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`Tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
|
github-repos
|
def guess_content_type_and_encoding(path):
for (ext, content_type) in _EXTENSION_TO_MIME_TYPE.items():
if path.endswith(ext):
return content_type
(content_type, encoding) = mimetypes.guess_type(path)
content_type = (content_type or 'application/binary')
return (content_type, encoding)
|
Guess the content type of a path, using ``mimetypes``.
Falls back to "application/binary" if no content type is found.
Args:
path (str): the path to guess the mimetype of
Returns:
str: the content type of the file
|
codesearchnet
|
def execute(self, method, **kwargs):
payload = {
'id': 1,
'jsonrpc': '2.0',
'method': method,
'params': kwargs
}
credentials = base64.b64encode('{}:{}'.format(self._username, self._password).encode())
auth_header_prefix = 'Basic ' if self._auth_header == DEFAULT_AUTH_HEADER else ''
headers = {
self._auth_header: auth_header_prefix + credentials.decode(),
'Content-Type': 'application/json',
}
return self._do_request(headers, payload)
|
Call remote API procedure
Args:
method: Procedure name
kwargs: Procedure named arguments
Returns:
Procedure result
Raises:
urllib2.HTTPError: Any HTTP error (Python 2)
urllib.error.HTTPError: Any HTTP error (Python 3)
|
juraj-google-style
|
def __init__(self, text='', font_attr=None):
self.text = text
if font_attr:
self.font_attr_segs = [(0, len(text), font_attr)]
else:
self.font_attr_segs = []
|
Construct a RichLine with no rich attributes or a single attribute.
Args:
text: Raw text string
font_attr: If specified, a single font attribute to be applied to the
entire text. Extending this object via concatenation allows creation
of text with varying attributes.
|
github-repos
|
def __call__(self, observed_obj, *arg, **kw):
if self.identify_observed:
return self.func_wr()(observed_obj, *arg, **kw)
else:
return self.func_wr()(*arg, **kw)
|
Call the function I wrap.
Args:
*arg: The arguments passed to me by the observed object.
**kw: The keyword args passed to me by the observed object.
observed_obj: The observed object which called me.
Returns:
Whatever the function I wrap returns.
|
juraj-google-style
|
def get_all_disorder_predictions(self, iupred_path='/home/nathan/software/iupred/',
iupred_exec='iupred', disembl_cmd='/home/nathan/software/DisEMBL-1.4/DisEMBL.py',
representative_only=True):
if representative_only:
if not self.representative_sequence:
log.warning('{}: no representative sequence set, cannot get disorder properties'.format(self.id))
return
if not self.representative_sequence.seq:
log.warning('{}: representative sequence {} set, but no sequence stored. '
'Cannot get disorder properties.'.format(self.id, self.representative_sequence.id))
return
self.representative_sequence.store_disembl_disorder_predictions(disembl_cmd=disembl_cmd)
if not representative_only:
for s in self.sequences:
if not s.seq:
log.warning('{}: no sequence stored. '
'Cannot get disorder properties.'.format(s.id))
continue
else:
s.store_disembl_disorder_predictions(disembl_cmd=disembl_cmd)
|
Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of the protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequence
|
juraj-google-style
|
def __init__(self, dataset, devices, max_buffer_size=1, prefetch_buffer_size=1, source_device='/cpu:0'):
options = options_lib.Options()
options.experimental_distribute.num_devices = len(devices)
if prefetch_buffer_size == 0:
options.experimental_optimization.inject_prefetch = False
dataset = dataset.with_options(options)
self._dataset = dataset._apply_debug_options()
self._experimental_slack = dataset.options().experimental_slack
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._max_buffer_size = max_buffer_size
self._prefetch_buffer_size = prefetch_buffer_size
if self._prefetch_buffer_size > self._max_buffer_size:
self._max_buffer_size = self._prefetch_buffer_size
with ops.device(self._source_device):
shared_name = ''
if context.executing_eagerly():
shared_name = context.anonymous_name()
self._multi_device_iterator_resource = gen_dataset_ops.multi_device_iterator(devices=self._devices, shared_name=shared_name, container='', **self._dataset._flat_structure)
if context.executing_eagerly():
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(handle=self._multi_device_iterator_resource, handle_device=self._source_device)
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(self._dataset._variant_tensor, self._multi_device_iterator_resource, max_buffer_size=self._max_buffer_size)
self._prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource, self._incarnation_id, self._source_device_tensor, self._dataset.element_spec, iterator_is_anonymous=False)
self._prototype_device_datasets.append(ds)
self._device_iterators = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(self._prototype_device_datasets[i], self._incarnation_id, self._prefetch_buffer_size, self._experimental_slack)
if context.executing_eagerly():
self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))
else:
self._device_iterators.append(dataset_ops.make_initializable_iterator(ds))
if not context.executing_eagerly():
device_iterator_initializers = [iterator.initializer for iterator in self._device_iterators]
self._initializer = control_flow_ops.group(*device_iterator_initializers)
|
Constructs a MultiDeviceIterator.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 0, then we setup a buffer on each device to
prefetch into.
source_device: The host device to place the `dataset` on. In order to
prevent deadlocks, if the prefetch_buffer_size is greater than the
max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.
|
github-repos
|
def insert(parent: ScheduleComponent, time: int, child: ScheduleComponent,
name: str = None) -> Schedule:
return union(parent, (time, child), name=name)
|
Return a new schedule with the `child` schedule inserted into the `parent` at `start_time`.
Args:
parent: Schedule to be inserted into
time: Time to be inserted defined with respect to `parent`
child: Schedule to insert
name: Name of the new schedule. Defaults to name of parent
|
juraj-google-style
|
def setup_sdk_logging(logfile=None, loglevel=logging.INFO):
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(logging.NullHandler())
if logfile:
fh = logging.FileHandler(logfile)
fh.setLevel(loglevel)
fh.setFormatter(get_default_log_formatter())
logging.root.addHandler(fh)
|
Setup a NullHandler to the root logger. If ``logfile`` is passed,
additionally add a FileHandler in ``loglevel`` level.
Args:
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
None
|
codesearchnet
|
def from_pb(cls, pb):
obj = cls._from_pb(pb)
obj._pb = pb
return obj
|
Instantiate the object from a protocol buffer.
Args:
pb (protobuf)
Save a reference to the protocol buffer on the object.
|
juraj-google-style
|
def relaxng(filename=None):
E = ElementMaker(namespace="http:
grammar = E.grammar( E.start( E.element(
E.attribute(name='id',ns="http:
E.optional( E.attribute(name='version') ),
E.optional( E.attribute(name='generator') ),
E.element(
E.optional(E.attribute(name='type')),
E.optional(E.attribute(name='src')),
E.element( E.zeroOrMore( E.choice( *relaxng_declarations() ) ) ,name='annotations'),
E.zeroOrMore(
E.element(E.attribute(name='id'), E.text(), name='meta'),
),
E.zeroOrMore(
E.ref(name="foreign-data"),
),
E.zeroOrMore(
E.element(
E.attribute(name='id',ns="http:
E.optional(E.attribute(name='type')),
E.optional(E.attribute(name='src')),
E.zeroOrMore(
E.element(E.attribute(name='id'), E.text(), name='meta'),
),
E.zeroOrMore(
E.ref(name="foreign-data"),
),
name="submetadata"
)
),
name='metadata',
),
E.interleave(
E.zeroOrMore(
E.ref(name='text'),
),
E.zeroOrMore(
E.ref(name='speech'),
),
),
name='FoLiA',
ns = NSFOLIA
) ),
E.define( E.interleave(E.zeroOrMore(E.ref(name="any_element")),E.text()), name="any_content"),
E.define( E.element(E.anyName(), E.zeroOrMore(E.ref(name="any_attribute")), E.zeroOrMore(E.ref(name="any_content"))), name="any_element"),
E.define( E.attribute(E.anyName()), name="any_attribute"),
E.define( E.zeroOrMore(E.attribute(E.anyName(getattr(E,'except')(E.nsName(),E.nsName(ns=""),E.nsName(ns="http:
datatypeLibrary="http:
)
done = {}
for c in globals().values():
if 'relaxng' in dir(c):
if c.relaxng and c.XMLTAG and not c.XMLTAG in done:
done[c.XMLTAG] = True
definition = c.relaxng()
grammar.append( definition )
if c.XMLTAG == 'item':
definition_alias = c.relaxng()
definition_alias.set('name','listitem')
definition_alias[0].set('name','listitem')
grammar.append( definition_alias )
if filename:
if sys.version < '3':
f = io.open(filename,'w',encoding='utf-8')
else:
f = io.open(filename,'wb')
if LXE:
if sys.version < '3':
f.write( ElementTree.tostring(relaxng(),pretty_print=True).replace("</define>","</define>\n\n") )
else:
f.write( ElementTree.tostring(relaxng(),pretty_print=True).replace(b"</define>",b"</define>\n\n") )
else:
f.write( ElementTree.tostring(relaxng()).replace("</define>","</define>\n\n") )
f.close()
return grammar
|
Generates a RelaxNG Schema for FoLiA. Optionally saves it to file.
Args:
filename (str): Save the schema to the following filename
Returns:
lxml.ElementTree: The schema
|
juraj-google-style
|
def autopep8_diff(fpath):
r
import utool as ut
args = ('autopep8', fpath, '--diff')
res = ut.cmd(args, verbose=False)
out, err, ret = res
ut.print_difftext(out)
|
r"""
Args:
fpath (str): file path string
CommandLine:
python -m utool.util_dev --test-autopep8_diff --fpath ingest_data.py
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> fpath = ut.get_argval('--fpath', type_=str, default='ingest_data.py')
>>> result = autopep8_diff(fpath)
>>> print(result)
|
juraj-google-style
|
def flux_randomization(model, threshold, tfba, solver):
optimize = {}
for reaction_id in model.reactions:
if model.is_reversible(reaction_id):
optimize[reaction_id] = ((2 * random.random()) - 1.0)
else:
optimize[reaction_id] = random.random()
fba = _get_fba_problem(model, tfba, solver)
for (reaction_id, value) in iteritems(threshold):
fba.prob.add_linear_constraints((fba.get_flux_var(reaction_id) >= value))
fba.maximize(optimize)
for reaction_id in model.reactions:
(yield (reaction_id, fba.get_flux(reaction_id)))
|
Find a random flux solution on the boundary of the solution space.
The reactions in the threshold dictionary are constrained with the
associated lower bound.
Args:
model: MetabolicModel to solve.
threshold: dict of additional lower bounds on reaction fluxes.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
An iterator of reaction ID and reaction flux pairs.
|
codesearchnet
|
def delete(self, addon_id, data={}, **kwargs):
return super(Addon, self).delete(addon_id, data, **kwargs)
|
Delete addon for given id
Args:
addon_id : Id for which addon object has to be deleted
|
juraj-google-style
|
def as_report_request(self, rules, timer=datetime.utcnow):
if (not self.service_name):
raise ValueError(u'the service name must be set')
op = super(Info, self).as_operation(timer=timer)
if (op.operationId and op.operationName):
labels = {}
for known_label in rules.labels:
known_label.do_labels_update(self, labels)
labels[_KNOWN_LABELS.SCC_PLATFORM.label_name] = self.platform.friendly_string()
labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT
labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT
if labels:
op.labels = encoding.PyValueToMessage(sc_messages.Operation.LabelsValue, labels)
for known_metric in rules.metrics:
known_metric.do_operation_update(self, op)
now = timer()
op.logEntries = [self._as_log_entry(l, now) for l in rules.logs]
return sc_messages.ServicecontrolServicesReportRequest(serviceName=self.service_name, reportRequest=sc_messages.ReportRequest(operations=[op]))
|
Makes a `ServicecontrolServicesReportRequest` from this instance
Args:
rules (:class:`ReportingRules`): determines what labels, metrics and
logs to include in the report request.
timer: a function that determines the current time
Return:
a ``ServicecontrolServicesReportRequest`` generated from this instance
governed by the provided ``rules``
Raises:
ValueError: if the fields in this instance cannot be used to create
a valid ``ServicecontrolServicesReportRequest``
|
codesearchnet
|
def from_ops(*operations: ops.OP_TREE, strategy: InsertStrategy=InsertStrategy.EARLIEST, device: devices.Device=devices.UnconstrainedDevice) -> 'Circuit':
result = Circuit(device=device)
result.append(operations, strategy)
return result
|
Creates an empty circuit and appends the given operations.
Args:
operations: The operations to append to the new circuit.
strategy: How to append the operations.
device: Hardware that the circuit should be able to run on.
Returns:
The constructed circuit containing the operations.
|
codesearchnet
|
def _apply_with_random_selector(x, func, num_cases):
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)
])[0]
|
Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
|
juraj-google-style
|
def diffs_prof(step):
(diff, rad) = diff_prof(step)
return (_scale_prof(step, diff, rad), rad)
|
Scaled diffusion.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
|
codesearchnet
|
def correct_pad(kernel_size: Union[int, Tuple], adjust: bool=True):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
correct = (kernel_size[0]
if adjust:
return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
else:
return (correct[1], correct[1], correct[0], correct[0])
|
Utility function to get the tuple padding value for the depthwise convolution.
Args:
kernel_size (`int` or `tuple`):
Kernel size of the convolution layers.
adjust (`bool`, *optional*, defaults to `True`):
Adjusts padding value to apply to right and bottom sides of the input.
|
github-repos
|
def set_evaluation_parameter(self, parameter_name, parameter_value):
if ('evaluation_parameters' not in self._expectations_config):
self._expectations_config['evaluation_parameters'] = {}
self._expectations_config['evaluation_parameters'].update({parameter_name: parameter_value})
|
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
|
codesearchnet
|
def __init__(self, cluster_id, variant_id, case_id):
super(Identity, self).__init__(
cluster_id=cluster_id,
variant_id=variant_id,
case_id=case_id,
)
|
Construct a identity object
Args:
cluster_id(str): Ref to a cluster
variant_id (str): ID from variant
case_id (str): What case it belongs to
|
juraj-google-style
|
def format_level_2_memory(memory, header=None):
memory_list = []
for shot_memory in memory:
memory_list.append(format_counts_memory(shot_memory, header))
return memory_list
|
Format an experiment result memory object for measurement level 2.
Args:
memory (list): Memory from experiment with `meas_level==2` and `memory==True`.
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
list[str]: List of bitstrings
|
codesearchnet
|
def formula_double_format(afloat, ignore_ones=True, tol=1e-8):
if ignore_ones and afloat == 1:
return ""
elif abs(afloat - int(afloat)) < tol:
return str(int(afloat))
else:
return str(round(afloat, 8))
|
This function is used to make pretty formulas by formatting the amounts.
Instead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4.
Args:
afloat (float): a float
ignore_ones (bool): if true, floats of 1 are ignored.
tol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2
Returns:
A string representation of the float for formulas.
|
juraj-google-style
|
def _write_object_proto(self, proto, options):
resource_variable_ops.write_object_proto_for_resource_variable(self, proto, options)
values_util.write_object_proto(self, proto, options)
|
Update a SavedObject proto for the caller.
If a DistributedVariable object supports this method, it will be called when
saving with a pre-built `SavedObject` proto representing the object, plus an
instance of `SaveOptions`. This method is then free to modify that proto
instance.
`DistributedVariable` with `AUTO` or `ON_WRITE` synchronization optionally
write out information about their components to the
`experimental_distributed_variable_components` field of a
`SavedVariable` (depending on the `SaveOptions` variable policy).
Args:
proto: A pre-built `SavedObject` proto for this object. It is assumed this
will be a `SavedVariable` instance.
options: A `SaveOptions` instance.
|
github-repos
|
def _parse_saved_model_args(self, always_enable_saved_model_import=False):
if not self.experimental_new_converter:
self.saved_model_dir = None
return
if self.saved_model_dir:
try:
saved_model_proto, _ = _parse_saved_model_with_debug_info(self.saved_model_dir)
except OSError:
self.saved_model_dir = None
return
if not always_enable_saved_model_import and (not self._contains_function_with_implements_attr(saved_model_proto)):
self.saved_model_dir = None
return
if not self._saved_model_exported_names:
self._saved_model_exported_names = []
self._saved_model_version = saved_model_proto.saved_model_schema_version
if self._saved_model_version == 0:
self.saved_model_dir = None
logging.warning('SavedModel schema version is zero.')
return
if self._saved_model_version not in [1, 2]:
raise ValueError('SavedModel file format({0}) is not supported'.format(self._saved_model_version))
|
Parses SavedModel arguments from the given Keras/RNN SavedModel.
Args:
always_enable_saved_model_import: Bool. When the value is true, it enables
MLIR saved model import path regardless of checking the conditions.
|
github-repos
|
def find_effect_class(self, path) -> Type[Effect]:
package_name, class_name = parse_package_string(path)
if package_name:
package = self.get_package(package_name)
return package.find_effect_class(class_name, raise_for_error=True)
for package in self.packages:
effect_cls = package.find_effect_class(class_name)
if effect_cls:
return effect_cls
raise EffectError("No effect class '{}' found in any packages".format(class_name))
|
Find an effect class by class name or full python path to class
Args:
path (str): effect class name or full python path to effect class
Returns:
Effect class
Raises:
EffectError if no class is found
|
juraj-google-style
|
def add_attribute_label(self, attribute_id, label):
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.add_attribute_label(
self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner
)
|
Adds a security labels to a attribute
Args:
attribute_id:
label:
Returns: A response json
|
juraj-google-style
|
def file_delete(filename, settings):
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'mode'.")
for k, v in settings.items():
if k == "mode" and v == "actual":
try:
os.remove(filename)
except OSError:
pass
elif k == "mode" and v == "simulated":
print("Simulated removal of {}".format(filename))
|
Deletes a file. {'_file_delete': {'mode': "actual"}}
Args:
filename (str): Filename.
settings (dict): Must be {"mode": actual/simulated}. Simulated
mode only prints the action without performing it.
|
juraj-google-style
|
def get_student_certificates(self, username, course_ids=None):
if (course_ids is None):
enrollments_client = CourseEnrollments(self.requester, self.base_url)
enrollments = enrollments_client.get_student_enrollments()
course_ids = list(enrollments.get_enrolled_course_ids())
all_certificates = []
for course_id in course_ids:
try:
all_certificates.append(self.get_student_certificate(username, course_id))
except HTTPError as error:
if (error.response.status_code >= 500):
raise
return Certificates(all_certificates)
|
Returns an Certificates object with the user certificates
Args:
username (str): an edx user's username
course_ids (list): a list of edX course ids.
Returns:
Certificates: object representing the student certificates for a course
|
codesearchnet
|
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
for segment in self.segments:
segment.simplify(eps, max_dist_error, max_speed_error, topology_only)
return self
|
In-place simplification of segments
Args:
max_dist_error (float): Min distance error, in meters
max_speed_error (float): Min speed error, in km/h
topology_only: Boolean, optional. True to keep
the topology, neglecting velocity and time
accuracy (use common Douglas-Ramen-Peucker).
False (default) to simplify segments keeping
the velocity between points.
Returns:
This track
|
codesearchnet
|
def simplify(self, assignments):
raise NotImplementedError()
|
Simplify this term, given a list of possible values for each variable.
Args:
assignments: A list of possible values for each variable. A dictionary
mapping strings (variable name) to sets of strings (value names).
Returns:
A new BooleanTerm, potentially simplified.
|
github-repos
|
def create_in_hdx(self, allow_no_resources=False, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True):
self.check_required_fields(allow_no_resources=allow_no_resources)
loadedid = None
if ('id' in self.data):
if self._dataset_load_from_hdx(self.data['id']):
loadedid = self.data['id']
else:
logger.warning(('Failed to load dataset with id %s' % self.data['id']))
if (not loadedid):
if self._dataset_load_from_hdx(self.data['name']):
loadedid = self.data['name']
if loadedid:
logger.warning(('Dataset exists. Updating %s' % loadedid))
self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update)
return
filestore_resources = list()
if self.resources:
ignore_fields = ['package_id']
for resource in self.resources:
resource.check_required_fields(ignore_fields=ignore_fields)
if resource.get_file_to_upload():
filestore_resources.append(resource)
resource['url'] = Dataset.temporary_url
self.data['resources'] = self._convert_hdxobjects(self.resources)
self._save_to_hdx('create', 'name')
self._add_filestore_resources(filestore_resources, False, hxl_update)
|
Check if dataset exists in HDX and if so, update it, otherwise create it
Args:
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
update_resources (bool): Whether to update resources (if updating). Defaults to True.
update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True.
remove_additional_resources (bool): Remove additional resources found in dataset (if updating). Defaults to False.
create_default_views (bool): Whether to call package_create_default_resource_views (if updating). Defaults to True.
hxl_update (bool): Whether to call package_hxl_update. Defaults to True.
Returns:
None
|
codesearchnet
|
def get_matrix(self, x1=None, x2=None, include_diagonal=None, include_general=None):
if ((x1 is None) and (x2 is None)):
if ((self._t is None) or (not self.computed)):
raise RuntimeError("you must call 'compute' first")
K = self.kernel.get_value((self._t[(:, None)] - self._t[(None, :)]))
if ((include_diagonal is None) or include_diagonal):
K[np.diag_indices_from(K)] += ((self._yerr ** 2) + self.kernel.jitter)
if (((include_general is None) or include_general) and len(self._A)):
K[np.diag_indices_from(K)] += self._A
K += np.tril(np.dot(self._U.T, self._V), (- 1))
K += np.triu(np.dot(self._V.T, self._U), 1)
return K
incl = False
x1 = np.ascontiguousarray(x1, dtype=float)
if (x2 is None):
x2 = x1
incl = ((include_diagonal is not None) and include_diagonal)
K = self.kernel.get_value((x1[(:, None)] - x2[(None, :)]))
if incl:
K[np.diag_indices_from(K)] += self.kernel.jitter
return K
|
Get the covariance matrix at given independent coordinates
Args:
x1 (Optional[array[n1]]): The first set of independent coordinates.
If this is omitted, ``x1`` will be assumed to be equal to ``x``
from a previous call to :func:`GP.compute`.
x2 (Optional[array[n2]]): The second set of independent
coordinates. If this is omitted, ``x2`` will be assumed to be
``x1``.
include_diagonal (Optional[bool]): Should the white noise and
``yerr`` terms be included on the diagonal?
(default: ``False``)
|
codesearchnet
|
def get_pipeline_field(self, pipeline_key, field_key = None):
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline_key,
self.fields_suffix
])
if field_key:
uri = '/'.join([uri, field_key])
return self._req('get', uri)
|
Gets one/all field in a pipeline
Args:
pipeline_key key for pipeline
field_key key for field (default: None i.e. ALL)
returns status code, field dict or list thereof
|
juraj-google-style
|
def guess_listing_type(lines, threshold=100):
scores = {'unix': 0, 'msdos': 0, 'nlst': 0}
for line in lines:
if (not line):
continue
if re.search('---|r--|rw-|rwx', line):
scores['unix'] += 1
if (('<DIR>' in line) or re.search('^.{0,4}\\d\\d', line)):
scores['msdos'] += 1
words = line.split(' ', 1)
if (len(words) == 1):
scores['nlst'] += 1
if (max(scores.values()) > threshold):
break
top = max(scores.items(), key=(lambda item: item[1]))
if top[1]:
return top[0]
else:
return 'unknown'
|
Guess the style of directory listing.
Returns:
str: ``unix``, ``msdos``, ``nlst``, ``unknown``.
|
codesearchnet
|
def __init__(self, cipher_suites=None):
super(BasicAuthenticationSuite, self).__init__(cipher_suites)
self._protocol = ssl.PROTOCOL_TLSv1
|
Create a BasicAuthenticationSuite object.
Args:
cipher_suites (list): A list of strings representing the names of
cipher suites to use. Overrides the default set of cipher
suites. Optional, defaults to None.
|
juraj-google-style
|
def _count_ops(self, graphdef: graph_pb2.GraphDef, op_names: Collection[str], attr_name: str='', attr_val: _AttrValType=None, get_op_name: bool=False) -> int:
op_count = 0
for op_name in op_names:
op_count += self._count_op_with_name_and_attribute(nodes=graphdef.node, op_name=op_name, attr_name=attr_name, attr_val=attr_val, get_op_name=get_op_name)
for func in graphdef.library.function:
op_count += self._count_op_with_name_and_attribute(nodes=func.node_def, op_name=op_name, attr_name=attr_name, attr_val=attr_val, get_op_name=get_op_name)
return op_count
|
Returns the number of given ops in a graph def.
Args:
graphdef: A GraphDef object.
op_names: Names of the operations to find within the graph.
attr_name: Name of the attribute of the ops to match.
attr_val: Value of the attr_name to check.
get_op_name: If set True, checks node.name rather than node.op.
Returns:
The number of occurrences of the given ops in a graph. The ops will be
counted only if the ops are named 'op_name' and has 'attr_val' if
'attr_name' is specified.
|
github-repos
|
def select_starts_ends(start, end, p_mask, attention_mask, min_null_score=1000000, top_k=1, handle_impossible_answer=False, max_answer_len=15):
undesired_tokens = np.abs(np.array(p_mask) - 1)
if attention_mask is not None:
undesired_tokens = undesired_tokens & attention_mask
undesired_tokens_mask = undesired_tokens == 0.0
start = np.where(undesired_tokens_mask, -10000.0, start)
end = np.where(undesired_tokens_mask, -10000.0, end)
start = np.exp(start - start.max(axis=-1, keepdims=True))
start = start / start.sum()
end = np.exp(end - end.max(axis=-1, keepdims=True))
end = end / end.sum()
if handle_impossible_answer:
min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item())
start[0, 0] = end[0, 0] = 0.0
starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens)
return (starts, ends, scores, min_null_score)
|
Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses
`decode_spans()` to generate probabilities for each span to be the actual answer.
Args:
start (`np.ndarray`): Individual start logits for each token.
end (`np.ndarray`): Individual end logits for each token.
p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer
attention_mask (`np.ndarray`): The attention mask generated by the tokenizer
min_null_score(`float`): The minimum null (empty) answer score seen so far.
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
handle_impossible_answer(`bool`): Whether to allow null (empty) answers
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
|
github-repos
|
def CreateKey(self, prikey=None):
account = super(UserWallet, self).CreateKey(private_key=prikey)
self.OnCreateAccount(account)
contract = WalletContract.CreateSignatureContract(account.PublicKey)
self.AddContract(contract)
return account
|
Create a KeyPair and store it encrypted in the database.
Args:
private_key (iterable_of_ints): (optional) 32 byte private key.
Returns:
KeyPair: a KeyPair instance.
|
codesearchnet
|
def integer_fractional_parts(number):
radix_point = number.index(".")
integer_part = number[:radix_point]
fractional_part = number[radix_point:]
return(integer_part, fractional_part)
|
Returns a tuple of the integer and fractional parts of a number.
Args:
number(iterable container): A number in the following form:
(..., ".", int, int, int, ...)
Returns:
(integer_part, fractional_part): tuple.
Example:
>>> integer_fractional_parts((1,2,3,".",4,5,6))
((1, 2, 3), ('.', 4, 5, 6))
|
juraj-google-style
|
def find_item(self, fq_name):
names = fq_name.split(self._separator)
current = self._yapconf_items
for name in names:
if isinstance(current, (YapconfDictItem, YapconfListItem)):
current = current.children
if name not in current:
return None
current = current[name]
return current
|
Find an item in the specification by fully qualified name.
Args:
fq_name (str): Fully-qualified name of the item.
Returns:
The item if it is in the specification. None otherwise
|
juraj-google-style
|
def load_sklearn_iris_test_data(data_type: Callable, split: bool=True, seed: int=999) -> list[Union[numpy.array, pandas.DataFrame]]:
dataset = load_iris()
_, x_test, _, _ = train_test_split(dataset['data'], dataset['target'], test_size=0.2, random_state=seed)
if split:
return [(index, data_type(sample.reshape(1, -1))) for index, sample in enumerate(x_test)]
return [(0, data_type(x_test))]
|
Loads test data from the sklearn Iris dataset in a given format,
either in a single or multiple batches.
Args:
data_type: Datatype of the iris test dataset.
split: Split the dataset in different batches or return single batch.
seed: Random state for splitting the train and test set.
|
github-repos
|
def _get_eq_sets(self):
UNIT = np.eye(3)
(eq_sets, operations) = (defaultdict(set), defaultdict(dict))
symm_ops = [op.rotation_matrix for op in generate_full_symmops(self.symmops, self.tol)]
def get_clustered_indices():
indices = cluster_sites(self.centered_mol, self.tol, give_only_index=True)
out = list(indices[1].values())
if (indices[0] is not None):
out.append([indices[0]])
return out
for index in get_clustered_indices():
sites = self.centered_mol.cart_coords[index]
for (i, reference) in zip(index, sites):
for op in symm_ops:
rotated = np.dot(op, sites.T).T
matched_indices = find_in_coord_list(rotated, reference, self.tol)
matched_indices = {dict(enumerate(index))[i] for i in matched_indices}
eq_sets[i] |= matched_indices
if (i not in operations):
operations[i] = {j: (op.T if (j != i) else UNIT) for j in matched_indices}
else:
for j in matched_indices:
if (j not in operations[i]):
operations[i][j] = (op.T if (j != i) else UNIT)
for j in matched_indices:
if (j not in operations):
operations[j] = {i: (op if (j != i) else UNIT)}
elif (i not in operations[j]):
operations[j][i] = (op if (j != i) else UNIT)
return {'eq_sets': eq_sets, 'sym_ops': operations}
|
Calculates the dictionary for mapping equivalent atoms onto each other.
Args:
None
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
|
codesearchnet
|
def get_current_user(with_domain=True):
try:
user_name = win32api.GetUserNameEx(win32api.NameSamCompatible)
if (user_name[(- 1)] == '$'):
test_user = win32api.GetUserName()
if (test_user == 'SYSTEM'):
user_name = 'SYSTEM'
elif (get_sid_from_name(test_user) == 'S-1-5-18'):
user_name = 'SYSTEM'
elif (not with_domain):
user_name = win32api.GetUserName()
except pywintypes.error as exc:
raise CommandExecutionError('Failed to get current user: {0}'.format(exc))
if (not user_name):
return False
return user_name
|
Gets the user executing the process
Args:
with_domain (bool):
``True`` will prepend the user name with the machine name or domain
separated by a backslash
Returns:
str: The user name
|
codesearchnet
|
def fetch_local_package(self, config):
self.update_paths_and_config(config=config, pkg_dir_name=config['source'], pkg_cache_dir=os.getcwd())
|
Make a local path available to current stacker config.
Args:
config (dict): 'local' path config dictionary
|
codesearchnet
|
def __setitem__(self, key, value):
if not fs.exists(value):
raise ValueError(value)
path = self.keypath(key)
fs.mkdir(self.path)
fs.mv(value, path)
|
Emplace file in cache.
Arguments:
key: Key.
value (str): Path of file to insert in cache.
Raises:
ValueError: If no "value" does nto exist.
|
juraj-google-style
|
def run_ansible(playbooks, inventory_path=None, roles=None, extra_vars=None, tags=None, on_error_continue=False, basedir='.'):
(inventory, variable_manager, loader, options) = _load_defaults(inventory_path=inventory_path, roles=roles, extra_vars=extra_vars, tags=tags, basedir=basedir)
passwords = {}
for path in playbooks:
logger.info(('Running playbook %s with vars:\n%s' % (path, extra_vars)))
pbex = PlaybookExecutor(playbooks=[path], inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords)
code = pbex.run()
stats = pbex._tqm._stats
hosts = stats.processed.keys()
result = [{h: stats.summarize(h)} for h in hosts]
results = {'code': code, 'result': result, 'playbook': path}
print(results)
failed_hosts = []
unreachable_hosts = []
for h in hosts:
t = stats.summarize(h)
if (t['failures'] > 0):
failed_hosts.append(h)
if (t['unreachable'] > 0):
unreachable_hosts.append(h)
if (len(failed_hosts) > 0):
logger.error(('Failed hosts: %s' % failed_hosts))
if (not on_error_continue):
raise EnosFailedHostsError(failed_hosts)
if (len(unreachable_hosts) > 0):
logger.error(('Unreachable hosts: %s' % unreachable_hosts))
if (not on_error_continue):
raise EnosUnreachableHostsError(unreachable_hosts)
|
Run Ansible.
Args:
playbooks (list): list of paths to the playbooks to run
inventory_path (str): path to the hosts file (inventory)
extra_var (dict): extra vars to pass
tags (list): list of tags to run
on_error_continue(bool): Don't throw any exception in case a host is
unreachable or the playbooks run with errors
Raises:
:py:class:`enoslib.errors.EnosFailedHostsError`: if a task returns an
error on a host and ``on_error_continue==False``
:py:class:`enoslib.errors.EnosUnreachableHostsError`: if a host is
unreachable (through ssh) and ``on_error_continue==False``
|
codesearchnet
|
def add_answer(self, vote, rationale):
self.raw_answers.append({
VOTE_KEY: vote,
RATIONALE_KEY: rationale,
})
|
Add an answer
Args:
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
|
juraj-google-style
|
def _find_best_fit(self, pbin):
fit = ((pbin.fitness(r[0], r[1]), k) for (k, r) in self._sorted_rect.items())
fit = (f for f in fit if (f[0] is not None))
try:
(_, rect) = min(fit, key=self.first_item)
return rect
except ValueError:
return None
|
Return best fitness rectangle from rectangles packing _sorted_rect list
Arguments:
pbin (PackingAlgorithm): Packing bin
Returns:
key of the rectangle with best fitness
|
codesearchnet
|
def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False):
prefix = prefix or cls.argument_prefix
group.add_argument("--%s-sources" % prefix,
action="store", nargs="+",
dest="%s_sources" % prefix.replace('-', '_'),
help="%s source files to parse" % prefix)
if allow_filters:
group.add_argument("--%s-source-filters" % prefix,
action="store", nargs="+",
dest="%s_source_filters" % prefix.replace(
'-', '_'),
help="%s source files to ignore" % prefix)
if add_root_paths:
group.add_argument("--%s-source-roots" % prefix,
action="store", nargs="+",
dest="%s_source_roots" % prefix.replace(
'-', '_'),
help="%s source root directories allowing files "
"to be referenced relatively to those" % prefix)
|
Subclasses may call this to add sources and source_filters arguments.
Args:
group: arparse.ArgumentGroup, the extension argument group
allow_filters: bool, Whether the extension wishes to expose a
source_filters argument.
prefix: str, arguments have to be namespaced.
|
juraj-google-style
|
def Create(self, project_id, start_options=None, deadline=10):
return DatastoreEmulator(self._emulator_cmd, self._working_directory, project_id, deadline, start_options)
|
Creates an emulator instance.
This method will wait for up to 'deadline' seconds for the emulator to
start.
Args:
project_id: project ID
start_options: a list of additional command-line options to pass to the
emulator 'start' command
deadline: number of seconds to wait for the datastore to respond
Returns:
a DatastoreEmulator
Raises:
IOError: if the emulator could not be started within the deadline
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
current_key_values = () if use_cache else None
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(hidden_states, attention_mask, position_bias, output_attentions=output_attentions, past_key_values=past_key_values[i] if past_key_values else None, use_cache=use_cache)
hidden_states, attn_weights, current_key_value = layer_outputs
if output_attentions:
all_self_attns += (attn_weights,)
if current_key_value is not None:
current_key_values = current_key_values + (current_key_value,)
hidden_states = self.output_layernorm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
return (hidden_states, current_key_values, all_hidden_states, all_self_attns)
|
Args:
hidden_states (`torch.Tensor`):
Input to the layer of shape `(batch, seq_len, dim_model)`
attention_mask (`torch.Tensor`):
Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)`
position_bias (`torch.Tensor`):
Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)`
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
past_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*):
Cached past key and value projection states
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
|
github-repos
|
def GetSshkeyMap(self, since=None):
return SshkeyUpdateGetter(self.conf).GetUpdates(source=self, search_base=self.conf['base'], search_filter=self.conf['filter'], search_scope=self.conf['scope'], since=since)
|
Return the sshkey map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of maps.SshkeyMap
|
github-repos
|
def _build_parser_message(self, column: str, parser: str, error: str, value: Any) -> LogMessage:
return self._base_log.copy() | LogMessage(log_type=LogType.PARSER.value, column=column, parser=parser, error=error, value=value)
|
Adds parser error information to base log message.
Args:
* column: column where the rule is applied
* parser: parser function that failed and raises this message
* value: value that fails to parse
Returns:
* log: LogMessage dictionary
|
github-repos
|
def grep(regex, output):
lines = output.decode('utf-8').strip().splitlines()
results = []
for line in lines:
if re.search(regex, line):
results.append(line.strip())
return results
|
Similar to linux's `grep`, this returns the line in an output stream
that matches a given regex pattern.
It does not rely on the `grep` binary and is not sensitive to line endings,
so it can be used cross-platform.
Args:
regex: string, a regex that matches the expected pattern.
output: byte string, the raw output of the adb cmd.
Returns:
A list of strings, all of which are output lines that matches the
regex pattern.
|
github-repos
|
def get_image_size_for_max_num_patches(image_height: int, image_width: int, patch_size: int, max_num_patches: int, eps: float=1e-05) -> Tuple[int, int]:
def get_scaled_image_size(scale: float, size: int, patch_size: int) -> int:
scaled_size = size * scale
scaled_size = math.ceil(scaled_size / patch_size) * patch_size
scaled_size = max(patch_size, scaled_size)
return int(scaled_size)
scale_min, scale_max = (eps / 10, 100.0)
while scale_max - scale_min >= eps:
scale = (scale_min + scale_max) / 2
target_height = get_scaled_image_size(scale, image_height, patch_size)
target_width = get_scaled_image_size(scale, image_width, patch_size)
num_patches = target_height / patch_size * (target_width / patch_size)
if num_patches <= max_num_patches:
scale_min = scale
else:
scale_max = scale
scale = scale_min
target_height = get_scaled_image_size(scale, image_height, patch_size)
target_width = get_scaled_image_size(scale, image_width, patch_size)
return (target_height, target_width)
|
Determine image size based on max number of patches, ensure dimensions are divisible by patch size and image is at least 1 patch.
Args:
image_height (`int`):
Original image height.
image_width (`int`):
Original image width.
patch_size (`int`):
Patch size for processing.
max_num_patches (`int`):
Maximum number of patches.
eps (`float`):
Small threshold for binary search.
Returns:
Tuple: (target_height, target_width)
|
github-repos
|
def lookup_imagenet_labels(indices):
global _CLASS_INDEX
if _CLASS_INDEX is None:
with open(os.path.join(os.path.dirname(__file__), '../../resources/imagenet_class_index.json')) as f:
_CLASS_INDEX = json.load(f)
indices = listify(indices)
return [_CLASS_INDEX[str(idx)][1] for idx in indices]
|
Utility function to return the image net label for the final `dense` layer output index.
Args:
indices: Could be a single value or an array of indices whose labels should be looked up.
Returns:
Image net label corresponding to the image category.
|
juraj-google-style
|
def _check_status(cls, response_json):
status = response_json['status']
msg = response_json['msg']
if status == 400:
raise BadRequestException(msg)
elif status == 403:
raise PermissionDeniedException(msg)
elif status == 404:
raise FileNotFoundException(msg)
elif status == 451:
raise UnavailableForLegalReasonsException(msg)
elif status == 509:
raise BandwidthUsageExceeded(msg)
elif status >= 500:
raise ServerErrorException(msg)
|
Check the status of the incoming response, raise exception if status is not 200.
Args:
response_json (dict): results of the response of the GET request.
Returns:
None
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.