code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def bytes_to_long(bytesdata: bytes) -> int:
assert (len(bytesdata) == 8)
return sum(((b << (k * 8)) for (k, b) in enumerate(bytesdata)))
|
Converts an 8-byte sequence to a long integer.
Args:
bytesdata: 8 consecutive bytes, as a ``bytes`` object, in
little-endian format (least significant byte [LSB] first)
Returns:
integer
|
codesearchnet
|
def PushEventSource(self, event_source):
if (event_source.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY):
weight = 1
else:
weight = 100
heap_values = (weight, time.time(), event_source)
heapq.heappush(self._heap, heap_values)
|
Pushes an event source onto the heap.
Args:
event_source (EventSource): event source.
|
codesearchnet
|
def backend():
return 'tensorflow'
|
Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
|
github-repos
|
def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
if doc_title.startswith('"'):
doc_title = doc_title[1:]
if doc_title.endswith('"'):
doc_title = doc_title[:-1]
if prefix is None:
prefix = ''
out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace(' ', ' ')
return out
rag_input_strings = [cat_input_and_doc(docs[i]['title'][j], docs[i]['text'][j], input_strings[i], prefix) for i in range(len(docs)) for j in range(n_docs)]
contextualized_inputs = self.generator_tokenizer.batch_encode_plus(rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding='max_length', truncation=True)
return (contextualized_inputs['input_ids'], contextualized_inputs['attention_mask'])
|
Postprocessing retrieved `docs` and combining them with `input_strings`.
Args:
docs (`dict`):
Retrieved documents.
input_strings (`str`):
Input strings decoded by `preprocess_query`.
prefix (`str`):
Prefix added at the beginning of each input, typically used with T5-based models.
Return:
`tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible
`attention_mask`.
|
github-repos
|
def list(self, path):
self.__validate_storage_path(path)
entity = self.api_client.get_entity_by_query(path=path)
if (entity['entity_type'] not in self.__BROWSABLE_TYPES):
raise StorageArgumentException('The entity type "{0}" cannot belisted'.format(entity['entity_type']))
entity_uuid = entity['uuid']
file_names = []
more_pages = True
page_number = 1
while more_pages:
response = self.api_client.list_folder_content(entity_uuid, page=page_number, ordering='name')
more_pages = (response['next'] is not None)
page_number += 1
for child in response['results']:
pattern = ('/{name}' if (child['entity_type'] == 'folder') else '{name}')
file_names.append(pattern.format(name=child['name']))
return file_names
|
List the entities found directly under the given path.
Args:
path (str): The path of the entity to be listed. Must start with a '/'.
Returns:
The list of entity names directly under the given path:
u'/12345/folder_1'
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
codesearchnet
|
def set_scf_initial_guess(self, guess='SAD'):
availabel_guesses = {'core', 'sad', 'gwh', 'read', 'fragmo'}
if (guess.lower() not in availabel_guesses):
raise ValueError((('The guess method ' + guess) + ' is not supported yet'))
self.params['rem']['scf_guess'] = guess.lower()
|
Set initial guess method to be used for SCF
Args:
guess: The initial guess method. (str)
|
codesearchnet
|
def sed(regexpr, repl, force=False, recursive=False, dpath_list=None, fpath_list=None, verbose=None, include_patterns=None, exclude_patterns=[]):
if (include_patterns is None):
include_patterns = ['*.py', '*.pyx', '*.pxi', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.html', '*.tex']
if (dpath_list is None):
dpath_list = [os.getcwd()]
if (verbose is None):
verbose = ut.NOT_QUIET
if (fpath_list is None):
greater_exclude_dirs = get_standard_exclude_dnames()
exclude_dirs = []
fpath_generator = matching_fpaths(dpath_list, include_patterns, exclude_dirs, greater_exclude_dirs=greater_exclude_dirs, recursive=recursive, exclude_patterns=exclude_patterns)
else:
fpath_generator = fpath_list
if verbose:
print(('sed-ing %r' % (dpath_list,)))
print((' * regular expression : %r' % (regexpr,)))
print((' * replacement : %r' % (repl,)))
print((' * include_patterns : %r' % (include_patterns,)))
print((' * recursive: %r' % (recursive,)))
print((' * force: %r' % (force,)))
from utool import util_str
print((' * fpath_list: %s' % (util_str.repr3(fpath_list),)))
regexpr = extend_regex(regexpr)
num_changed = 0
num_files_checked = 0
fpaths_changed = []
for fpath in fpath_generator:
num_files_checked += 1
changed_lines = sedfile(fpath, regexpr, repl, force, verbose=verbose)
if (changed_lines is not None):
fpaths_changed.append(fpath)
num_changed += len(changed_lines)
import utool as ut
print(('num_files_checked = %r' % (num_files_checked,)))
print(('fpaths_changed = %s' % (ut.repr3(sorted(fpaths_changed)),)))
print(('total lines changed = %r' % (num_changed,)))
|
Python implementation of sed. NOT FINISHED
searches and replaces text in files
Args:
regexpr (str): regx patterns to find
repl (str): text to replace
force (bool):
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
|
codesearchnet
|
def plot_main(pid, return_fig_ax=False):
global WORKING_DIRECTORY, SNR_CUT
if isinstance(pid, PlotInput):
pid = pid.return_dict()
WORKING_DIRECTORY = '.'
if ('WORKING_DIRECTORY' not in pid['general'].keys()):
pid['general']['WORKING_DIRECTORY'] = '.'
SNR_CUT = 5.0
if ('SNR_CUT' not in pid['general'].keys()):
pid['general']['SNR_CUT'] = SNR_CUT
if ('switch_backend' in pid['general'].keys()):
plt.switch_backend(pid['general']['switch_backend'])
running_process = MakePlotProcess(**{**pid, **pid['general'], **pid['plot_info'], **pid['figure']})
running_process.input_data()
running_process.setup_figure()
running_process.create_plots()
if ('save_figure' in pid['figure'].keys()):
if (pid['figure']['save_figure'] is True):
running_process.fig.savefig(((pid['general']['WORKING_DIRECTORY'] + '/') + pid['figure']['output_path']), **pid['figure']['savefig_kwargs'])
if ('show_figure' in pid['figure'].keys()):
if (pid['figure']['show_figure'] is True):
plt.show()
if (return_fig_ax is True):
return (running_process.fig, running_process.ax)
return
|
Main function for creating these plots.
Reads in plot info dict from json file or dictionary in script.
Args:
return_fig_ax (bool, optional): Return figure and axes objects.
Returns:
2-element tuple containing
- **fig** (*obj*): Figure object for customization outside of those in this program.
- **ax** (*obj*): Axes object for customization outside of those in this program.
|
codesearchnet
|
def add_node(self, node_id, name, labels):
node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name})
try:
node.add_labels(*labels)
except NotImplementedError:
pass
|
Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported.
|
juraj-google-style
|
def filter_by_pattern(self, pattern):
_filt_values, _filt_datetimes = self._filter_by_pattern(pattern)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = True
return collection
|
Filter the Data Collection based on a list of booleans.
Args:
pattern: A list of True/False values. Typically, this is a list
with a length matching the length of the Data Collections values
but it can also be a pattern to be repeated over the Data Collection.
Return:
A new Data Collection with filtered data
|
juraj-google-style
|
def ensure_exe(exe_name: str, *paths: str):
if not elib_run.find_executable(exe_name, *paths):
LOGGER.error('could not find "%s.exe" on this system', exe_name)
sys.exit(-1)
|
Makes sure that an executable can be found on the system path.
Will exit the program if the executable cannot be found
Args:
exe_name: name of the executable
paths: optional path(s) to be searched; if not specified, search the whole system
|
juraj-google-style
|
def intify(x):
if isinstance(x, int):
return x
try:
return int(x, 0)
except (TypeError, ValueError):
return None
|
Ensure ( or coerce ) a value into being an integer or None.
Args:
x (obj): An object to intify
Returns:
(int): The int value ( or None )
|
juraj-google-style
|
def _split_op(self, identifier, hs_label=None, dagger=False, args=None):
if self._isinstance(identifier, 'SymbolicLabelBase'):
identifier = QnetAsciiDefaultPrinter()._print_SCALAR_TYPES(identifier.expr)
(name, total_subscript) = self._split_identifier(identifier)
total_superscript = ''
if (hs_label not in [None, '']):
if (self._settings['show_hs_label'] == 'subscript'):
if (len(total_subscript) == 0):
total_subscript = (('(' + hs_label) + ')')
else:
total_subscript += ((',(' + hs_label) + ')')
else:
total_superscript += (('(' + hs_label) + ')')
if dagger:
total_superscript += self._dagger_sym
args_str = ''
if ((args is not None) and (len(args) > 0)):
args_str = ((self._parenth_left + ','.join([self.doprint(arg) for arg in args])) + self._parenth_right)
return (name, total_subscript, total_superscript, args_str)
|
Return `name`, total `subscript`, total `superscript` and
`arguments` str. All of the returned strings are fully rendered.
Args:
identifier (str or SymbolicLabelBase): A (non-rendered/ascii)
identifier that may include a subscript. The output `name` will
be the `identifier` without any subscript
hs_label (str): The rendered label for the Hilbert space of the
operator, or None. Returned unchanged.
dagger (bool): Flag to indicate whether the operator is daggered.
If True, :attr:`dagger_sym` will be included in the
`superscript` (or `subscript`, depending on the settings)
args (list or None): List of arguments (expressions). Each element
will be rendered with :meth:`doprint`. The total list of args
will then be joined with commas, enclosed
with :attr:`_parenth_left` and :attr:`parenth_right`, and
returnd as the `arguments` string
|
codesearchnet
|
def create_attribute_model(self, initial_value=None):
attr = self.attribute_class(meta=self, value=initial_value)
return attr
|
Make an AttributeModel instance of the correct type for this Meta
Args:
initial_value: The initial value the Attribute should take
Returns:
AttributeModel: The created attribute model instance
|
juraj-google-style
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = cls + token_ids_0 + sep
if token_ids_1 is not None:
result += token_ids_1 + sep
return result
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A CANINE sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def _md5sum(file_path):
md5 = hashlib.md5()
with open(file_path, 'rb') as md5_file:
while True:
data = md5_file.read(((1024 * 1024) * 4))
if (not data):
break
md5.update(data)
return md5.digest()
|
Helper function that builds and md5sum from a file in chunks.
Args:
file_path: The path to the file you want an md5sum for.
Returns:
A string containing an md5sum.
|
codesearchnet
|
def CalculateWaitForRetry(retry_attempt, max_wait=60):
wait_time = 2 ** retry_attempt
max_jitter = wait_time / 4.0
wait_time += random.uniform(-max_jitter, max_jitter)
return max(1, min(wait_time, max_wait))
|
Calculates amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts. A
random amount of jitter is added to spread out retry attempts from
different clients.
Args:
retry_attempt: Retry attempt counter.
max_wait: Upper bound for wait time [seconds].
Returns:
Number of seconds to wait before retrying request.
|
juraj-google-style
|
def matrix_rank(a, tol=None, validate_args=False, name=None):
with ops.name_scope(name or 'matrix_rank'):
a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
s = svd(a, compute_uv=False)
if tol is None:
if a.shape[-2:].is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = math_ops.reduce_max(array_ops.shape(a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = eps * math_ops.cast(m, a.dtype) * math_ops.reduce_max(s, axis=-1, keepdims=True)
return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)
|
Compute the matrix rank of one or more matrices.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as 'zero'.
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'matrix_rank'.
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
|
github-repos
|
def open_image(fn):
flags = ((cv2.IMREAD_UNCHANGED + cv2.IMREAD_ANYDEPTH) + cv2.IMREAD_ANYCOLOR)
if ((not os.path.exists(fn)) and (not str(fn).startswith('http'))):
raise OSError('No such file or directory: {}'.format(fn))
elif (os.path.isdir(fn) and (not str(fn).startswith('http'))):
raise OSError('Is a directory: {}'.format(fn))
elif isdicom(fn):
slice = pydicom.read_file(fn)
if slice.PhotometricInterpretation.startswith('MONOCHROME'):
im = np.stack(([slice.pixel_array] * 3), (- 1))
return (im / ((1 << slice.BitsStored) - 1))
else:
raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation))
else:
try:
if str(fn).startswith('http'):
req = urllib.urlopen(str(fn))
image = np.asarray(bytearray(req.read()), dtype='uint8')
im = (cv2.imdecode(image, flags).astype(np.float32) / 255)
else:
im = (cv2.imread(str(fn), flags).astype(np.float32) / 255)
if (im is None):
raise OSError(f'File not recognized by opencv: {fn}')
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
except Exception as e:
raise OSError('Error handling image at: {}'.format(fn)) from e
|
Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
|
codesearchnet
|
def get_token(wallet: 'Wallet', token_str: str) -> 'NEP5Token.NEP5Token':
if token_str.startswith('0x'):
token_str = token_str[2:]
token = None
for t in wallet.GetTokens().values():
if (token_str in [t.symbol, t.ScriptHash.ToString()]):
token = t
break
if (not isinstance(token, NEP5Token.NEP5Token)):
raise ValueError('The given token argument does not represent a known NEP5 token')
return token
|
Try to get a NEP-5 token based on the symbol or script_hash
Args:
wallet: wallet instance
token_str: symbol or script_hash (accepts script hash with or without 0x prefix)
Raises:
ValueError: if token is not found
Returns:
NEP5Token instance if found.
|
codesearchnet
|
def __getitem__(self, slice_: Tuple[Union[slice, np.ndarray, int], Union[slice, np.ndarray, int]]) -> loompy.LoomView:
if type(slice_) is not tuple or len(slice_) is not 2:
raise ValueError("Views require slices along two dimensions")
rows = slice_[0]
cols = slice_[1]
ra = self.ds.ra[rows]
row_graphs = self.ds.row_graphs[rows]
ca = self.ds.ca[cols]
col_graphs = self.ds.col_graphs[cols]
layers = self.ds.layer[rows, cols]
return loompy.LoomView(layers, ra, ca, row_graphs, col_graphs, filename=self.ds.filename, file_attrs=self.ds.attrs)
|
Create a new view by slicing through the loom file or view
Args:
slice_ (2-tuple of slice, int or np.ndarray): How to slice the file or view
Returns:
A LoomView object, an in-memory representation of the sliced file
|
juraj-google-style
|
def loads(conditions_string):
decoder = ConditionDecoder(_audience_condition_deserializer)
json_decoder = json.JSONDecoder(object_hook=decoder.object_hook)
condition_structure = json_decoder.decode(conditions_string)
condition_list = decoder.condition_list
return (condition_structure, condition_list)
|
Deserializes the conditions property into its corresponding
components: the condition_structure and the condition_list.
Args:
conditions_string: String defining valid and/or conditions.
Returns:
A tuple of (condition_structure, condition_list).
condition_structure: nested list of operators and placeholders for operands.
condition_list: list of conditions whose index correspond to the values of the placeholders.
|
juraj-google-style
|
def resolve_context(self, verbosity=0, max_fails=(- 1), timestamp=None, callback=None, buf=None, package_load_callback=None):
package_filter = PackageFilterList.from_pod(self.package_filter)
context = ResolvedContext(self.request, package_paths=self.packages_path, package_filter=package_filter, verbosity=verbosity, max_fails=max_fails, timestamp=timestamp, buf=buf, callback=callback, package_load_callback=package_load_callback, caching=self.caching)
if context.success:
if (self._context and self._context.load_path):
context.set_load_path(self._context.load_path)
self._set_context(context)
self._modified = True
return context
|
Update the current context by performing a re-resolve.
The newly resolved context is only applied if it is a successful solve.
Returns:
`ResolvedContext` object, which may be a successful or failed solve.
|
codesearchnet
|
def update(self, span: typing.Tuple[int, int], line_type: LineType) -> None:
first_block_line, last_block_line = span
for i in range(first_block_line, last_block_line + 1):
try:
self.__setitem__(i, line_type)
except ValueError as error:
raise ValidationError(i + self.fn_offset, 1, 'AAA99 {}'.format(error))
|
Updates line types for a block's span.
Args:
span: First and last relative line number of a Block.
line_type: The type of line to update to.
Raises:
ValidationError: A special error on collision. This prevents Flake8
from crashing because it is converted to a Flake8 error tuple,
but it indicates to the user that something went wrong with
processing the function.
|
juraj-google-style
|
def save_config(config, logdir=None):
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.FastGFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = 'Start a new run without storing summaries and checkpoints since no logging directory was specified.'
tf.logging.info(message)
return config
|
Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
|
codesearchnet
|
def create_binary(self, key, value):
data = None
if ((key is not None) and (value is not None)):
try:
data = self.db.create(key.strip(), json.dumps(base64.b64encode(bytes(value)).decode('utf-8')))
except TypeError:
data = self.db.create(key.strip(), json.dumps(base64.b64encode(bytes(value, 'utf-8')).decode('utf-8')))
else:
self.tcex.log.warning(u'The key or value field was None.')
return data
|
Create method of CRUD operation for binary data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
|
codesearchnet
|
def get_class_locals(cls_name: str, allow_methods: bool, ordering, ctx):
out = collections.OrderedDict()
if cls_name not in ctx.vm.local_ops:
return out
for op in ctx.vm.local_ops[cls_name]:
local = ctx.vm.annotated_locals[cls_name][op.name]
if not is_relevant_class_local(local, op.name, allow_methods):
continue
if ordering is Ordering.FIRST_ANNOTATE:
if not op.is_annotate() or op.name in out:
continue
else:
assert ordering is Ordering.LAST_ASSIGN
if not op.is_assign():
continue
elif op.name in out:
out.move_to_end(op.name)
out[op.name] = local
return out
|
Gets a dictionary of the class's local variables.
Args:
cls_name: The name of an abstract.InterpreterClass.
allow_methods: A bool, whether to allow methods as variables.
ordering: A classgen.Ordering describing the order in which the variables
should appear.
ctx: The abstract context.
Returns:
A collections.OrderedDict of the locals.
|
github-repos
|
def _ImageDimensions(images, dynamic_shape=False):
if dynamic_shape:
return array_ops.unpack(array_ops.shape(images))
else:
return images.get_shape().as_list()
|
Returns the dimensions of an image tensor.
Args:
images: 4-D Tensor of shape [batch, height, width, channels]
dynamic_shape: Whether the input image has undertermined shape. If set to
`True`, shape information will be retrieved at run time. Default to
`False`.
Returns:
list of integers [batch, height, width, channels]
|
juraj-google-style
|
def __init__(self, definitions: fhir_package.FhirPackageManager, handler: primitive_handler.PrimitiveHandler, error_reporter: fhir_errors.ErrorReporter, options: Optional[SqlGenerationOptions]=None) -> None:
self._options = options or SqlGenerationOptions()
self._context = context.MockFhirPathContext(definitions.iter_structure_definitions())
self._primitive_handler = handler
self._bq_interpreter = _bigquery_interpreter.BigQuerySqlInterpreter(value_set_codes_table=self._options.value_set_codes_table, value_set_codes_definitions=self._options.value_set_codes_definitions or definitions)
self._error_reporter = error_reporter
self._options.skip_keys.update(_SKIP_KEYS)
self._ctx: List[expressions.Builder] = []
self._in_progress: Set[_PathStep] = set()
self._type_code_to_regex_map: Dict[str, _RegexInfo] = {}
self._regex_columns_generated = set()
self._requirement_column_names: Set[str] = set()
self._visited_element_definitions: Set[Tuple[str, str]] = set()
self._visited_slices: Set[Tuple[str, str]] = set()
|
Creates a new instance of `FhirProfileStandardSqlEncoder`.
Args:
definitions: The FHIR resource "graph" for traversal and encoding of
constraints.
handler: Computes primitives with respect to the specification.
error_reporter: A `fhir_errors.ErrorReporter` delegate for error-handling.
options: Defines a list of optional settings that can be used to customize
the behaviour of FhirProfileStandardSqlEncoder.
|
github-repos
|
def brake_on(self):
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x40)
send_data(data)
|
Set the Brakes of Herkulex
In braked mode, position control and velocity control
will not work, enable torque before that
Args:
none
|
juraj-google-style
|
def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer):
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument to be a `Sequential` model instance, but got:', model)
if not callable(layer_fn):
raise ValueError('Expected `layer_fn` argument to be a callable.')
layers = []
layer_map = {}
for layer in model._flatten_layers(include_self=False, recursive=False):
if isinstance(layer, InputLayer) and input_tensors is not None:
continue
cloned_layer = _clone_layer(layer) if isinstance(layer, InputLayer) else layer_fn(layer)
layers.append(cloned_layer)
layer_map[layer] = cloned_layer
layers, ancillary_layers = _remove_ancillary_layers(model, layer_map, layers)
if input_tensors is None:
cloned_model = Sequential(layers=layers, name=model.name)
elif len(generic_utils.to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect at most one tensor as part of `input_tensors`.')
else:
if isinstance(input_tensors, tuple):
input_tensors = list(input_tensors)
x = generic_utils.to_list(input_tensors)[0]
if backend.is_keras_tensor(x):
origin_layer = x._keras_history.layer
if isinstance(origin_layer, InputLayer):
cloned_model = Sequential(layers=[origin_layer] + layers, name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top of a tensor that comes from a Keras layer other than an `InputLayer`. Use the functional API instead.')
else:
input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history.layer
cloned_model = Sequential(layers=[input_layer] + layers, name=model.name)
if not ancillary_layers:
return cloned_model
tensor_map = {}
for depth, cloned_nodes in cloned_model._nodes_by_depth.items():
nodes = model._nodes_by_depth[depth]
for cloned_node, node in zip(cloned_nodes, nodes):
if isinstance(cloned_node.output_tensors, list):
for j, output_tensor in enumerate(cloned_node.output_tensors):
tensor_map[node.output_tensors[j]] = output_tensor
else:
tensor_map[node.output_tensors] = cloned_node.output_tensors
new_nodes = _make_new_nodes({depth: nodes for depth, nodes in model._nodes_by_depth.items() if depth < 0}, layer_fn, layer_map, tensor_map)
_insert_ancillary_layers(cloned_model, ancillary_layers, model.metrics_names, new_nodes)
return cloned_model
|
Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Args:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
layer_fn: callable to be applied on non-input layers in the model. By
default it clones the layer. Another example is to preserve the layer
to share the weights. This is required when we create a per-replica
copy of the model with distribution strategy; we want the weights to
be shared but still feed inputs separately so we create new input
layers.
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value or `layer_fn`
argument value.
|
github-repos
|
def __init__(self, unkeyed: ModelHandler[ExampleT, PredictionT, ModelT]):
if len(unkeyed.get_preprocess_fns()) or len(unkeyed.get_postprocess_fns()):
raise Exception('Cannot make make an unkeyed model handler with pre or postprocessing functions defined into a keyed model handler. All pre/postprocessing functions must be defined on the outer modelhandler.')
self._unkeyed = unkeyed
self._env_vars = getattr(unkeyed, '_env_vars', {})
|
A ModelHandler that takes examples that might have keys and returns
predictions that might have keys.
For example, if the original model is used with RunInference to take a
PCollection[E] to a PCollection[P], this ModelHandler would take either
PCollection[E] to a PCollection[P] or PCollection[tuple[K, E]] to a
PCollection[tuple[K, P]], depending on the whether the elements are
tuples. This pattern makes it possible to associate the outputs with the
inputs based on the key.
Note that you cannot use this ModelHandler if E is a tuple type.
In addition, either all examples should be keyed, or none of them.
Args:
unkeyed: An implementation of ModelHandler that does not require keys.
|
github-repos
|
def split_string(str_src, spliters=None, elim_empty=False):
if is_string(spliters):
spliters = [spliters]
if spliters is None or not spliters:
spliters = [' ', '\t']
dest_strs = list()
src_strs = [str_src]
while True:
old_dest_strs = src_strs[:]
for s in spliters:
for src_s in src_strs:
temp_strs = src_s.split(s)
for temp_s in temp_strs:
temp_s = temp_s.strip()
if temp_s == '' and elim_empty:
continue
if is_string(temp_s):
temp_s = str(temp_s)
dest_strs.append(temp_s)
src_strs = dest_strs[:]
dest_strs = list()
if old_dest_strs == src_strs:
dest_strs = src_strs[:]
break
return dest_strs
|
Split string by split character space(' ') and indent('\t') as default
Examples:
>>> StringClass.split_string('exec -ini test.ini', ' ')
['exec', '-ini', 'test.ini']
Args:
str_src: source string
spliters: e.g. [' ', '\t'], [], ' ', None
elim_empty: Eliminate empty (i.e., '') or not.
Returns:
split sub-strings as list
|
juraj-google-style
|
def to_dict(self, remove_nones=False):
content = {}
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if self.resources != []:
content['resources'] = [resource.to_dict(remove_nones=remove_nones)
for resource in self.resources]
content['desc'] = self.desc
return content
|
Return the dict representation of the instance.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representation of the `DidlObject`.
|
juraj-google-style
|
def __init__(self, optimizer, scope='global-optimizer', summary_labels=()):
super(GlobalOptimizer, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
|
Creates a new global optimizer instance.
Args:
optimizer: The optimizer which is modified by this meta optimizer.
|
juraj-google-style
|
def on_core_metadata_event(self, event):
raise NotImplementedError('on_core_metadata_event() is not implemented in the base servicer class')
|
Callback for core metadata.
Args:
event: The Event proto that carries a JSON string in its
`log_message.message` field.
Returns:
`None` or an `EventReply` proto to be sent back to the client. If `None`,
an `EventReply` proto construct with the default no-arg constructor will
be sent back to the client.
|
github-repos
|
def check(self, uid=None, usage_limits_count=None, cryptographic_usage_mask=None, lease_time=None):
if (uid is not None):
if (not isinstance(uid, six.string_types)):
raise TypeError('The unique identifier must be a string.')
if (usage_limits_count is not None):
if (not isinstance(usage_limits_count, six.integer_types)):
raise TypeError('The usage limits count must be an integer.')
if (cryptographic_usage_mask is not None):
if ((not isinstance(cryptographic_usage_mask, list)) or (not all((isinstance(x, enums.CryptographicUsageMask) for x in cryptographic_usage_mask)))):
raise TypeError('The cryptographic usage mask must be a list of CryptographicUsageMask enumerations.')
if (lease_time is not None):
if (not isinstance(lease_time, six.integer_types)):
raise TypeError('The lease time must be an integer.')
result = self.proxy.check(uid, usage_limits_count, cryptographic_usage_mask, lease_time)
status = result.get('result_status')
if (status == enums.ResultStatus.SUCCESS):
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(status, result.get('result_reason'), result.get('result_message'))
|
Check the constraints for a managed object.
Args:
uid (string): The unique ID of the managed object to check.
Optional, defaults to None.
usage_limits_count (int): The number of items that can be secured
with the specified managed object. Optional, defaults to None.
cryptographic_usage_mask (list): A list of CryptographicUsageMask
enumerations specifying the operations possible with the
specified managed object. Optional, defaults to None.
lease_time (int): The number of seconds that can be leased for the
specified managed object. Optional, defaults to None.
|
codesearchnet
|
def group(self, group_type=None, owner=None, **kwargs):
group = None
if not group_type:
return Group(self.tcex, None, None, owner=owner, **kwargs)
name = kwargs.pop('name', None)
group_type = group_type.upper()
if group_type == 'ADVERSARY':
group = Adversary(self.tcex, name, owner=owner, **kwargs)
if group_type == 'CAMPAIGN':
group = Campaign(self.tcex, name, owner=owner, **kwargs)
if group_type == 'DOCUMENT':
group = Document(self.tcex, name, kwargs.pop('file_name', None), owner=owner, **kwargs)
if group_type == 'EVENT':
group = Event(self.tcex, name, owner=owner, **kwargs)
if group_type == 'EMAIL':
group = Email(
self.tcex,
name,
kwargs.pop('to', None),
kwargs.pop('from_addr', None),
kwargs.pop('subject', None),
kwargs.pop('body', None),
kwargs.pop('header', None),
owner=owner,
**kwargs
)
if group_type == 'INCIDENT':
group = Incident(self.tcex, name, owner=owner, **kwargs)
if group_type == 'INTRUSION SET':
group = IntrusionSet(self.tcex, name, owner=owner, **kwargs)
if group_type == 'REPORT':
group = Report(self.tcex, name, owner=owner, **kwargs)
if group_type == 'SIGNATURE':
group = Signature(
self.tcex,
name,
kwargs.pop('file_name', None),
kwargs.pop('file_type', None),
kwargs.pop('file_text', None),
owner=owner,
**kwargs
)
if group_type == 'THREAT':
group = Threat(self.tcex, name, owner=owner, **kwargs)
if group_type == 'TASK':
group = Task(
self.tcex,
name,
kwargs.pop('status', 'Not Started'),
kwargs.pop('due_date', None),
kwargs.pop('reminder_date', None),
kwargs.pop('escalation_date', None),
owner=owner,
**kwargs
)
return group
|
Create the Group TI object.
Args:
owner:
group_type:
**kwargs:
Return:
|
juraj-google-style
|
async def send_event(self, con, name, payload):
message = dict(type='event', name=name, payload=payload)
encoded = pack(message)
(await con.send(encoded))
|
Send an event to a client connection.
This method will push an event message to the client with the given
name and payload. You need to have access to the the ``connection``
object for the client, which is only available once the client has
connected and passed to self.prepare_conn(connection).
Args:
con (websockets.Connection): The connection to use to send
the event.
name (str): The name of the event to send.
payload (object): The msgpack-serializable object so send
as the event's payload.
|
codesearchnet
|
def create_blocking_connection(host):
return pika.BlockingConnection(
amqpdaemon.getConParams(
settings.get_amqp_settings()[host.lower()]["vhost"]
)
)
|
Return properly created blocking connection.
Args:
host (str): Host as it is defined in :func:`.get_amqp_settings`.
Uses :func:`edeposit.amqp.amqpdaemon.getConParams`.
|
juraj-google-style
|
def get_schema_path(schema, resolved=False):
def _strip_first_path_elem(path):
"Pass doctests.\n\n Strip the first element of the given path, returning an empty string if\n there are no more elements. For example, 'something/other' will end up\n as 'other', but passing then 'other' will return ''\n "
stripped_path = path.split(os.path.sep, 1)[1:]
return ''.join(stripped_path)
def _schema_to_normalized_path(schema):
"Pass doctests.\n\n Extracts the path from the url, makes sure to get rid of any '..' in\n the path and adds the json extension if not there.\n "
path = os.path.normpath((os.path.sep + urlsplit(schema).path))
if path.startswith(os.path.sep):
path = path[1:]
if (not path.endswith('.json')):
path += '.json'
return path
path = _schema_to_normalized_path(schema)
while path:
if resolved:
schema_path = os.path.abspath(os.path.join(_resolved_schema_root_path, path))
else:
schema_path = os.path.abspath(os.path.join(_schema_root_path, path))
if os.path.exists(schema_path):
return os.path.abspath(schema_path)
path = _strip_first_path_elem(path)
raise SchemaNotFound(schema=schema)
|
Retrieve the installed path for the given schema.
Args:
schema(str): relative or absolute url of the schema to validate, for
example, 'records/authors.json' or 'jobs.json', or just the name of the
schema, like 'jobs'.
resolved(bool): if True, the returned path points to a fully resolved
schema, that is to the schema with all `$ref` replaced by their
targets.
Returns:
str: path to the given schema name.
Raises:
SchemaNotFound: if no schema could be found.
|
codesearchnet
|
def _add_new_ide_controller_helper(ide_controller_label, controller_key, bus_number):
if (controller_key is None):
controller_key = randint((- 200), 250)
ide_spec = vim.vm.device.VirtualDeviceSpec()
ide_spec.device = vim.vm.device.VirtualIDEController()
ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_spec.device.key = controller_key
ide_spec.device.busNumber = bus_number
ide_spec.device.deviceInfo = vim.Description()
ide_spec.device.deviceInfo.label = ide_controller_label
ide_spec.device.deviceInfo.summary = ide_controller_label
return ide_spec
|
Helper function for adding new IDE controllers
.. versionadded:: 2016.3.0
Args:
ide_controller_label: label of the IDE controller
controller_key: if not None, the controller key to use; otherwise it is randomly generated
bus_number: bus number
Returns: created device spec for an IDE controller
|
codesearchnet
|
def noisy_operation(self, operation: 'cirq.Operation') -> 'cirq.OP_TREE':
if not hasattr(self.noisy_moments, '_not_overridden'):
return self.noisy_moments([ops.Moment([operation])],
operation.qubits)
if not hasattr(self.noisy_moment, '_not_overridden'):
return self.noisy_moment(ops.Moment([operation]), operation.qubits)
assert False, 'Should be unreachable.'
|
Adds noise to an individual operation.
Args:
operation: The operation to make noisy.
Returns:
An OP_TREE corresponding to the noisy operations implementing the
noisy version of the given operation.
|
juraj-google-style
|
def decode_jpeg(image_buffer, scope=None):
with tf.name_scope(values=[image_buffer], name=scope,
default_name='decode_jpeg'):
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
|
Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
|
juraj-google-style
|
def __init__(self, model, ncats, alpha_lambda=1.0, beta_lambda=2.0,
freeparams=['alpha_lambda', 'beta_lambda']):
new_max_beta = DiscreteGamma(self.PARAMLIMITS["alpha_lambda"][1],
self.PARAMLIMITS["beta_lambda"][0], ncats)[-1]
new_limits = model.PARAMLIMITS
new_limits["beta"] = (new_limits["beta"][0], new_max_beta)
model.PARAMLIMITS = new_limits
super(GammaDistributedBetaModel, self).__init__(model, "beta",
ncats, alpha_lambda=1.0, beta_lambda=2.0,
freeparams=['alpha_lambda', 'beta_lambda'])
assert all([scipy.allclose(new_max_beta, m.PARAMLIMITS["beta"][1])
for m in self._models]), ("{0}\n{1}".format(
new_max_beta, '\n'.join([m.PARAMLIMITS["beta"][1]
for m in self._models])))
|
Initialize an `GammaDistributedModel` object.
The `lambda_param` is set to "beta".
Args:
`model` `ncats`,`alpha_lambda`, `beta_lambda`, `freeparams`
Meaning described in main class doc string for
`GammaDistributedModel`.
|
juraj-google-style
|
def run(self, dag):
num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()])
if (num_dag_qubits > self.coupling_map.size()):
raise TranspilerError('Number of qubits greater than device.')
best_sub = self._best_subset(num_dag_qubits)
layout = Layout()
map_iter = 0
for qreg in dag.qregs.values():
for i in range(qreg.size):
layout[(qreg, i)] = int(best_sub[map_iter])
map_iter += 1
self.property_set['layout'] = layout
|
Pick a convenient layout depending on the best matching
qubit connectivity, and set the property `layout`.
Args:
dag (DAGCircuit): DAG to find layout for.
Raises:
TranspilerError: if dag wider than self.coupling_map
|
codesearchnet
|
def add_observer(self, callback):
if callback in self._observers:
raise ValueError('{} is already an observer of {}'
.format(callback, self))
self._observers.append(callback)
|
Add an observer to this event.
Args:
callback: A function or coroutine callback to call when the event
is fired.
Raises:
ValueError: If the callback has already been added.
|
juraj-google-style
|
def serialize(self):
return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(self.resource_handle)
|
Serializes the ensemble into proto and returns the serialized proto.
Returns:
stamp_token: int64 scalar Tensor to denote the stamp of the resource.
serialized_proto: string scalar Tensor of the serialized proto.
|
github-repos
|
def _expand_to_beam_size(tensor, beam_size):
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims)
|
Tiles a given tensor by beam_size.
Args:
tensor: tensor to tile [batch_size, ...]
beam_size: How much to tile the tensor by.
Returns:
Tiled tensor [batch_size, beam_size, ...]
|
juraj-google-style
|
def _text_checker(job, interval, _interval_set=False, quiet=False, output=sys.stdout):
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
prev_msg = msg
if not quiet:
print('', file=output)
|
A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
output (file): The file like object to write status messages to.
By default this is sys.stdout.
|
juraj-google-style
|
def format_error_message(exception_message, task_exception=False):
lines = exception_message.split('\n')
if task_exception:
lines = (lines[0:1] + lines[3:])
pass
return '\n'.join(lines)
|
Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message (str): A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
|
codesearchnet
|
def double_linked_dom(str_or_dom):
dom = str_or_dom
if not isinstance(str_or_dom, dhtmlparser.HTMLElement):
dom = dhtmlparser.parseString(str_or_dom)
dhtmlparser.makeDoubleLinked(dom)
return dom
|
Create double linked DOM from input.
In case of string, parse it, make it double-linked. In case of DOM, just
make it double-linked.
Args:
str_or_dom (str/HTMLelement): String or HTMLelement instance.
Returns:
obj: HTMLelement with parsed, double-linked content from `str_or_dom`.
|
juraj-google-style
|
def get_logger(name):
logger = logging.getLogger(name)
logger.addHandler(logging.NullHandler())
return logger
|
Gets a logger
Arguments:
name - the name you wish to log as
Returns:
A logger!
|
codesearchnet
|
def get_restore_path(self, status=None):
status = self.get_status() if status is None else status
return config.get_restore_path(status.name.lower())
|
get_restore_path: get path to restoration file
Args:
status (str): step to get restore file (optional)
Returns: string path to restoration file
|
juraj-google-style
|
def get_gated_grpc_tensors(self, matching_debug_op=None):
with self._grpc_gated_lock:
matching_debug_op = (matching_debug_op or 'DebugIdentity')
if (matching_debug_op not in self._grpc_gated_tensors):
node_name_to_op_type = dict(((node.name, node.op) for node in self._graph_def.node))
gated = []
for node in self._graph_def.node:
if (node.op == matching_debug_op):
for attr_key in node.attr:
if ((attr_key == 'gated_grpc') and node.attr[attr_key].b):
(node_name, output_slot, _, debug_op) = debug_graphs.parse_debug_node_name(node.name)
gated.append((node_name, node_name_to_op_type[node_name], output_slot, debug_op))
break
self._grpc_gated_tensors[matching_debug_op] = gated
return self._grpc_gated_tensors[matching_debug_op]
|
Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples.
|
codesearchnet
|
def _create_output_from_match(self, match_result):
if isinstance(match_result, dict):
return LinterOutput(self.name, **match_result)
return LinterOutput(self.name, *match_result)
|
Create Result instance from pattern match results.
Args:
match: Pattern match.
|
codesearchnet
|
def write_var_int(self, value, little_endian=True):
if not isinstance(value, int):
raise SDKException(ErrorCode.param_err('%s not int type.' % value))
if value < 0:
raise SDKException(ErrorCode.param_err('%d too small.' % value))
elif value < 0xfd:
return self.write_byte(value)
elif value <= 0xffff:
self.write_byte(0xfd)
return self.write_uint16(value, little_endian)
elif value <= 0xFFFFFFFF:
self.write_byte(0xfe)
return self.write_uint32(value, little_endian)
else:
self.write_byte(0xff)
return self.write_uint64(value, little_endian)
|
Write an integer value in a space saving way to the stream.
Args:
value (int):
little_endian (bool): specify the endianness. (Default) Little endian.
Raises:
SDKException: if `value` is not of type int.
SDKException: if `value` is < 0.
Returns:
int: the number of bytes written.
|
juraj-google-style
|
def _GetDictFromStringsTable(self, parser_mediator, table):
if (not table):
return {}
record_values = {}
for record in table.records:
if parser_mediator.abort:
break
if (record.get_number_of_values() != 2):
continue
identification = self._GetRecordValue(record, 0)
filename = self._GetRecordValue(record, 1)
if (not identification):
continue
record_values[identification] = filename
return record_values
|
Build a dictionary of the value in the strings table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
table (pyesedb.table): strings table.
Returns:
dict[str,object]: values per column name.
|
codesearchnet
|
def __init__(self, schema, max_files_per_bundle=_DEFAULT_MAX_WRITERS_PER_BUNDLE, max_file_size=_DEFAULT_MAX_FILE_SIZE, file_format=None):
self.schema = schema
self.max_files_per_bundle = max_files_per_bundle
self.max_file_size = max_file_size
self.file_format = file_format or bigquery_tools.FileFormat.JSON
|
Initialize a :class:`WriteRecordsToFile`.
Args:
max_files_per_bundle (int): The maximum number of files that can be kept
open during execution of this step in a worker. This is to avoid over-
whelming the worker memory.
max_file_size (int): The maximum size in bytes for a file to be used in
an export job.
|
github-repos
|
def set_agent(self, agent):
self.agent = agent
self.queue = asyncio.Queue(loop=self.agent.loop)
self.presence = agent.presence
self.web = agent.web
|
Links behaviour with its owner agent
Args:
agent (spade.agent.Agent): the agent who owns the behaviour
|
juraj-google-style
|
def __init__(self, initial_learning_rate, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None):
super(NoisyLinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.initial_variance = initial_variance
self.variance_decay = variance_decay
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
|
Applies noisy linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
|
github-repos
|
def _generate_unique_name(self, symbol_name: str) -> str:
normalized_symbol_name = self._NON_SYMBOL_NAME_CHARS_REGEX.sub('_', symbol_name)
normalized_name_conflict_count = self._num_symbols_with_normalized_name.get(normalized_symbol_name, 0)
self._num_symbols_with_normalized_name[normalized_symbol_name] = normalized_name_conflict_count + 1
optional_disambiguation_suffix = '' if normalized_name_conflict_count == 0 else f'_{normalized_name_conflict_count}'
return f'{normalized_symbol_name}{optional_disambiguation_suffix}'
|
Translates a symbol name to a unique FileCheck capture name.
Replaces all characters other than letters, numbers, and underscores with
underscores. If the resulting name has already been used, appends a counter
to disambiguate it. For example, this could result in the following sequence
of replacements:
1.) "foo.bar.baz" -> "foo_bar_baz"
2.) "foo.bar_baz" -> "foo_bar_baz_1"
3.) "foo_bar.baz" -> "foo_bar_baz_2"
4.) "foo_bar_baz" -> "foo_bar_baz_3"
Args:
symbol_name: The original symbol name.
Returns:
The generated FileCheck capture name.
|
github-repos
|
def load_and_print_resfile(filename, info_dict=None):
if (info_dict is None):
info_dict = dict()
info_dict['mass'] = 1.23
info_dict['nom_cap'] = 3600
info_dict['tot_mass'] = 2.33
d = CellpyData()
print('filename:', filename)
print('info_dict in:', end=' ')
print(info_dict)
d.from_raw(filename)
d.set_mass(info_dict['mass'])
d.make_step_table()
d.make_summary()
for test in d.datasets:
print('newtest')
print(test)
return info_dict
|
Load a raw data file and print information.
Args:
filename (str): name of the resfile.
info_dict (dict):
Returns:
info (str): string describing something.
|
codesearchnet
|
def compress(element):
element_spec = structure.type_spec_from_value(element)
tensor_list = structure.to_tensor_list(element_spec, element)
return ged_ops.compress_element(tensor_list)
|
Compress a dataset element.
Args:
element: A nested structure of types supported by Tensorflow.
Returns:
A variant tensor representing the compressed element. This variant can be
passed to `uncompress` to get back the original element.
|
github-repos
|
def _RemoveAllFlagAppearances(self, name):
flag_dict = self.FlagDict()
if (name not in flag_dict):
raise exceptions.UnrecognizedFlagError(name)
flag = flag_dict[name]
names_to_remove = {name}
names_to_remove.add(flag.name)
if flag.short_name:
names_to_remove.add(flag.short_name)
for n in names_to_remove:
self.__delattr__(n)
|
Removes flag with name for all appearances.
A flag can be registered with its long name and an optional short name.
This method removes both of them. This is different than __delattr__.
Args:
name: Either flag's long name or short name.
Raises:
UnrecognizedFlagError: When flag name is not found.
|
codesearchnet
|
def test_enrichment(pcoll, enrichment_handler: str, handler_config: Dict[str, Any], timeout: Optional[float]=30):
if enrichment_handler == 'BigTable':
row_key = handler_config['row_key']
bt_data = INPUT_TABLES['BigTable', handler_config['instance_id'], handler_config['table_id']]
products = {str(data[row_key]): data for data in bt_data}
def _fn(row):
left = row._asdict()
right = products[str(left[row_key])]
left['product'] = left.get('product', None) or right
return beam.Row(**left)
elif enrichment_handler == 'BigQuery':
row_key = handler_config['fields']
dataset, table = handler_config['table_name'].split('.')[-2:]
bq_data = INPUT_TABLES['BigQuery', str(dataset), str(table)]
bq_data = {tuple((str(data[key]) for key in row_key)): data for data in bq_data}
def _fn(row):
left = row._asdict()
right = bq_data[tuple((str(left[k]) for k in row_key))]
row = {key: left.get(key, None) or right[key] for key in {*left.keys(), *right.keys()}}
return beam.Row(**row)
else:
raise ValueError(f'{enrichment_handler} is not a valid enrichment_handler.')
return pcoll | beam.Map(_fn)
|
Mocks the Enrichment transform for testing purposes.
This PTransform simulates the behavior of the Enrichment transform by
looking up data from predefined in-memory tables based on the provided
`enrichment_handler` and `handler_config`.
Note: The Github action that invokes these tests does not have gcp
dependencies installed which is a prerequisite to
apache_beam.transforms.enrichment.Enrichment as a top-level import.
Args:
pcoll: The input PCollection.
enrichment_handler: A string indicating the type of enrichment handler
to simulate (e.g., 'BigTable', 'BigQuery').
handler_config: A dictionary containing configuration details for the
simulated handler (e.g., table names, row keys, fields).
timeout: An optional timeout value (ignored in this mock).
Returns:
A PCollection containing the enriched data.
|
github-repos
|
def capture_image(self, device_label):
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
|
Capture smartcam image
Args:
device_label (str): device label of camera
|
juraj-google-style
|
def stream_sync(self, report, timeout=120.0):
done = AwaitableResponse()
self.stream(report, callback=done.set_result)
return done.wait(timeout)
|
Send a report and wait for it to finish.
This awaitable coroutine wraps VirtualIOTileDevice.stream() and turns
the callback into an awaitable object. The appropriate usage of this
method is by calling it inside the event loop as:
await device.stream_sync(data)
Args:
report (IOTileReport): The report that should be streamed.
timeout (float): The maximum number of seconds to wait before
timing out.
Returns:
awaitable: An awaitable object with the result.
The result will be True if the data was sent successfully
or False if the data could not be sent in its entirety.
When False is returned, there is no guarantee about how much of
the data was sent, if any, just that it was not known to be
successfully sent.
|
codesearchnet
|
def nr_cases(self, institute_id=None):
query = {}
if institute_id:
query['collaborators'] = institute_id
LOG.debug("Fetch all cases with query {0}".format(query))
nr_cases = self.case_collection.find(query).count()
return nr_cases
|
Return the number of cases
This function will change when we migrate to 3.7.1
Args:
collaborator(str): Institute id
Returns:
nr_cases(int)
|
juraj-google-style
|
def _Inputs(op: ops.Operation, xs_set):
if _IsFunction(op.graph):
inputs = []
for t in op.inputs:
if t not in xs_set:
t = _MaybeCaptured(t)
inputs.append(t)
return inputs
else:
return op.inputs
|
Returns the inputs of op, crossing closure boundaries where necessary.
Args:
op: Operation
xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
is in a FuncGraph and has captured inputs.
|
github-repos
|
def _index_to_ansi_values(self, index):
if self.__class__.__name__[0] == 'F':
if index < 8:
index += ANSI_FG_LO_BASE
else:
index += (ANSI_FG_HI_BASE - 8)
else:
if index < 8:
index += ANSI_BG_LO_BASE
else:
index += (ANSI_BG_HI_BASE - 8)
return [str(index)]
|
Converts an palette index to the corresponding ANSI color.
Arguments:
index - an int (from 0-15)
Returns:
index as str in a list for compatibility with values.
|
juraj-google-style
|
def notify_batches_finished(self, statuses):
with self._wait_condition:
self._statuses = statuses
self._wait_condition.notify()
|
Called by the BatchTracker the _BatchWaiter is observing. Should not
be called by handlers.
Args:
statuses (dict of int): A dict with keys of batch ids, and values
of status enums
|
juraj-google-style
|
def add_sample(a_float, dist):
(dist_type, _) = _detect_bucket_option(dist)
if (dist_type == u'exponentialBuckets'):
_update_general_statistics(a_float, dist)
_update_exponential_bucket_count(a_float, dist)
elif (dist_type == u'linearBuckets'):
_update_general_statistics(a_float, dist)
_update_linear_bucket_count(a_float, dist)
elif (dist_type == u'explicitBuckets'):
_update_general_statistics(a_float, dist)
_update_explicit_bucket_count(a_float, dist)
else:
_logger.error(u'Could not determine bucket option type for %s', dist)
raise ValueError(u'Unknown bucket option type')
|
Adds `a_float` to `dist`, updating its existing buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not have known bucket options defined
ValueError: if there are not enough bucket count fields in `dist`
|
codesearchnet
|
def metadata(self):
if (self._info is None):
try:
self._info = self._api.buckets_get(self._name)
except Exception as e:
raise e
return (BucketMetadata(self._info) if self._info else None)
|
Retrieves metadata about the bucket.
Returns:
A BucketMetadata instance with information about this bucket.
Raises:
Exception if there was an error requesting the bucket's metadata.
|
codesearchnet
|
def load_extra_data(cls, data):
try:
cls._extra_config.update(json.loads(data))
except ValueError as exception:
sys.stderr.write('Could convert to JSON. {0:s}'.format(exception))
exit(-1)
|
Loads extra JSON configuration parameters from a data buffer.
The data buffer must represent a JSON object.
Args:
data: str, the buffer to load the JSON data from.
|
juraj-google-style
|
def potcar_eatom_list_from_outcar(filename='OUTCAR'):
with open(filename) as f:
outcar = f.read()
eatom_re = re.compile('energy of atom\\s+\\d+\\s+EATOM=\\s*([-\\d\\.]+)')
eatom = [float(e) for e in eatom_re.findall(outcar)]
return eatom
|
Returns a list of EATOM values for the pseudopotentials used.
Args:
filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.
Returns:
(List(Float)): A list of EATOM values, in the order they appear in the OUTCAR.
|
codesearchnet
|
def build_graph(device, n, m, k, transpose_a, transpose_b, dtype):
with ops.device('%s' % device):
if not transpose_a:
x = variable_v1.VariableV1(random_ops.random_uniform([n, m], dtype=dtype), use_resource=False)
else:
x = variable_v1.VariableV1(random_ops.random_uniform([m, n], dtype=dtype), use_resource=False)
if not transpose_b:
y = variable_v1.VariableV1(random_ops.random_uniform([m, k], dtype=dtype), use_resource=False)
else:
y = variable_v1.VariableV1(random_ops.random_uniform([k, m], dtype=dtype), use_resource=False)
z = math_ops.matmul(x, y, transpose_a=transpose_a, transpose_b=transpose_b)
return control_flow_ops.group(z)
|
Build a graph containing a sequence of matmul operations.
Args:
device: String, the device to run on.
n: tensor A's first dimension size.
m: tensor A's second dimension size.
k: tensor B's second dimension size.
transpose_a: boolean value to show if tensor A is transposed.
transpose_b: boolean value to show if tensor B is transposed.
dtype: numpy data type of the input tensor.
Returns:
A matmul operation to run()
|
github-repos
|
def __init__(self, val, unit, unit_type=None):
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(
"{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
|
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
|
juraj-google-style
|
def cancel(self):
try:
del self._protocol._consumers[self.queue]
except (KeyError, AttributeError):
pass
try:
del self._protocol.factory._consumers[self.queue]
except (KeyError, AttributeError):
pass
self._running = False
(yield self._read_loop)
try:
(yield self._channel.basic_cancel(consumer_tag=self._tag))
except pika.exceptions.AMQPChannelError:
pass
try:
(yield self._channel.close())
except pika.exceptions.AMQPChannelError:
pass
if (not self.result.called):
self.result.callback(self)
|
Cancel the consumer and clean up resources associated with it.
Consumers that are canceled are allowed to finish processing any
messages before halting.
Returns:
defer.Deferred: A deferred that fires when the consumer has finished
processing any message it was in the middle of and has been successfully
canceled.
|
codesearchnet
|
def _new_open_bin(self, width=None, height=None, rid=None):
factories_to_delete = set()
new_bin = None
for (key, binfac) in self._empty_bins.items():
if (not binfac.fits_inside(width, height)):
continue
new_bin = binfac.new_bin()
if (new_bin is None):
continue
self._open_bins.append(new_bin)
if binfac.is_empty():
factories_to_delete.add(key)
break
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
|
Extract the next empty bin and append it to open bins
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
|
codesearchnet
|
def __init__(self, cls):
super(EnumType, self).__init__()
self._cls = cls
|
Create a new EnumType. This new EnumType requires a class object in the
constructor. The class is used to construct new instances of the Enum
when the integer value is retrieved from the database.
Args:
cls(class): An Enum class used to create new instances from integer
values.
|
juraj-google-style
|
def with_params(self, params):
copy = params.copy()
copy.update(self._params)
return self.__copy_and_set('params', copy)
|
Adds parameters to the request params
Args:
params (dict): The parameters to add to the request params
Returns:
The request builder instance in order to chain calls
|
codesearchnet
|
def convert_dict_to_params(src_dict):
return '&'.join(['{}={}'.format(key, value) for (key, value) in src_dict.items()])
|
convert dict to params string
Args:
src_dict (dict): source mapping data structure
Returns:
str: string params data
Examples:
>>> src_dict = {
"a": 1,
"b": 2
}
>>> convert_dict_to_params(src_dict)
>>> "a=1&b=2"
|
codesearchnet
|
def __split_off_extra_attributes(self, mapping: CommentedMap, known_attrs: List[str]) -> CommentedMap:
attr_names = list(mapping.keys())
main_attrs = mapping.copy()
extra_attrs = OrderedDict(mapping.items())
for name in attr_names:
if ((name not in known_attrs) or (name == 'yatiml_extra')):
del main_attrs[name]
else:
del extra_attrs[name]
main_attrs['yatiml_extra'] = extra_attrs
return main_attrs
|
Separates the extra attributes in mapping into yatiml_extra.
This returns a mapping containing all key-value pairs from \
mapping whose key is in known_attrs, and an additional key \
yatiml_extra which maps to a dict containing the remaining \
key-value pairs.
Args:
mapping: The mapping to split
known_attrs: Attributes that should be kept in the main \
map, and not moved to yatiml_extra.
Returns:
A map with attributes reorganised as described above.
|
codesearchnet
|
def fetch(self, card_id, data={}, **kwargs):
return super(Card, self).fetch(card_id, data, **kwargs)
|
Fetch Card for given Id
Args:
card_id : Id for which card object has to be retrieved
Returns:
Card dict for given card Id
|
codesearchnet
|
def _ClassifyInclude(fileinfo, include, is_system):
is_cpp_h = (include in _CPP_HEADERS)
if (is_system and (os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++'])):
is_system = False
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
(target_dir, target_base) = os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))
(include_dir, include_base) = os.path.split(_DropCommonSuffixes(include))
target_dir_pub = os.path.normpath((target_dir + '/../public'))
target_dir_pub = target_dir_pub.replace('\\', '/')
if ((target_base == include_base) and ((include_dir == target_dir) or (include_dir == target_dir_pub))):
return _LIKELY_MY_HEADER
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and (target_first_component.group(0) == include_first_component.group(0))):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
|
Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
|
codesearchnet
|
def generate_output_entities(self, json_data=None, hr=True, show_name=False, colorize=True):
output = ''
short = (HR_RDAP['entities']['_short'] if hr else 'entities')
name = (HR_RDAP['entities']['_name'] if (hr and show_name) else None)
output += generate_output(line='0', short=short, name=name, is_parent=(False if ((json_data is None) or (json_data['entities'] is None)) else True), value=('None' if ((json_data is None) or (json_data['entities'] is None)) else None), colorize=colorize)
if (json_data is not None):
for ent in json_data['entities']:
output += generate_output(line='1', value=ent, colorize=colorize)
return output
|
The function for generating CLI output RDAP entity results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
|
codesearchnet
|
def response_data_to_model_instance(self, response_data):
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
if response_data["datetime_finished"]:
response_data["datetime_finished"] = dateutil.parser.parse(
response_data["datetime_finished"]
)
return super(
BaseTaskInstanceManager, self
).response_data_to_model_instance(response_data)
|
Convert response data to a task instance model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance from the reponse data.
|
juraj-google-style
|
def send_recv(self, message, timeout=10.0):
response_queue = self.send(message)
response = self.recv(response_queue, timeout)
return response
|
Send a message to a PandABox and wait for the response
Args:
message (str): The message to send
timeout (float): How long to wait before raising queue.Empty
Returns:
str: The response
|
juraj-google-style
|
def _check_positional_parameter_annotations(method_signature, base_signature, is_subtype):
for param_index in range(max(len(base_signature.param_names), len(method_signature.param_names))):
if param_index == 0:
continue
if param_index < len(base_signature.param_names):
base_param_name = base_signature.param_names[param_index]
elif base_signature.varargs_name:
base_param_name = base_signature.varargs_name
else:
break
try:
base_param_type = base_signature.annotations[base_param_name]
except KeyError:
continue
if base_param_name == base_signature.varargs_name:
base_param_type = _get_varargs_annotation_type(base_param_type)
if base_param_type is None:
continue
if param_index < method_signature.posonly_count:
method_param_name = method_signature.param_names[param_index]
elif param_index < len(method_signature.param_names):
if base_param_name == '_' or method_signature.param_names[param_index] == '_':
method_param_name = method_signature.param_names[param_index]
else:
method_param_name = base_param_name
elif method_signature.varargs_name:
method_param_name = method_signature.varargs_name
else:
break
try:
method_param_type = method_signature.annotations[method_param_name]
except KeyError:
continue
if method_param_name == method_signature.varargs_name:
method_param_type = _get_varargs_annotation_type(method_param_type)
if method_param_type is None:
continue
if not is_subtype(base_param_type, method_param_type):
return SignatureError(SignatureErrorType.POSITIONAL_PARAMETER_TYPE_MISMATCH, f"Type mismatch for parameter '{method_param_name}'.")
return None
|
Checks type annotations for positional parameters of the overriding method.
Args:
method_signature: signature of the overriding method.
base_signature: signature of the overridden method.
is_subtype: a binary function to compare types.
Returns:
SignatureError if a mismatch is detected. Otherwise returns None.
|
github-repos
|
def plot_ticks(ax, tick_fontsize=12, xticks=None, xticks_args=None, yticks=None, yticks_args=None, zticks=None, zticks_args=None):
if (xticks is not None):
ax.set_xticks(xticks)
xticks_args = dict_if_none(xticks_args)
ax.xaxis.set_tick_params(labelsize=tick_fontsize, **xticks_args)
if (yticks is not None):
ax.set_yticks(yticks)
yticks_args = dict_if_none(yticks_args)
ax.yaxis.set_tick_params(labelsize=tick_fontsize, **yticks_args)
if (zticks is not None):
ax.set_zticks(zticks)
zticks_args = dict_if_none(zticks_args)
ax.zaxis.set_tick_params(labelsize=tick_fontsize, **zticks_args)
|
Function that defines the labels options of a matplotlib plot.
Args:
ax: matplotlib axes
tick_fontsize (int): Defines the size of the ticks' font
xticks([list of ticks]): Defines the values of x ticks in the figure
xticks_arg(dict): Passsed into matplotlib as xticks arguments
yticks([list of ticks]): Defines the values of y ticks in the figure
yticks_arg(dict): Passsed into matplotlib as yticks arguments
zticks([list of ticks]): Defines the values of z ticks in the figure
zticks_arg(dict): Passsed into matplotlib as zticks arguments
|
codesearchnet
|
class CSVLogger(Callback):
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists_v2(self.filename):
with gfile.GFile(self.filename, 'r') as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = gfile.GFile(self.filename, mode)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, str):
return k
elif isinstance(k, collections.abc.Iterable) and (not is_zero_dim_ndarray):
return '"[%s]"' % ', '.join(map(str, k))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
logs = dict(((k, logs[k]) if k in logs else (k, 'NA') for k in self.keys))
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames, dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update(((key, handle_value(logs[key])) for key in self.keys))
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
|
Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
|
github-repos
|
def batch_insert(self, insertions: Iterable[Tuple[(int, ops.OP_TREE)]]) -> None:
copy = self.copy()
shift = 0
insertions = sorted(insertions, key=(lambda e: e[0]))
groups = _group_until_different(insertions, key=(lambda e: e[0]), value=(lambda e: e[1]))
for (i, group) in groups:
insert_index = (i + shift)
next_index = copy.insert(insert_index, reversed(group), InsertStrategy.EARLIEST)
if (next_index > insert_index):
shift += (next_index - insert_index)
self._moments = copy._moments
|
Applies a batched insert operation to the circuit.
Transparently handles the fact that earlier insertions may shift
the index that later insertions should occur at. For example, if you
insert an operation at index 2 and at index 4, but the insert at index 2
causes a new moment to be created, then the insert at "4" will actually
occur at index 5 to account for the shift from the new moment.
All insertions are done with the strategy 'EARLIEST'.
When multiple inserts occur at the same index, the gates from the later
inserts end up before the gates from the earlier inserts (exactly as if
you'd called list.insert several times with the same index: the later
inserts shift the earliest inserts forward).
Args:
insertions: A sequence of (insert_index, operations) pairs
indicating operations to add into the circuit at specific
places.
|
codesearchnet
|
def stack(self, value):
if value == self._defaults['stack'] and 'stack' in self._values:
del self._values['stack']
else:
self._values['stack'] = value
|
The stack property.
Args:
value (string). the property value.
|
juraj-google-style
|
def add_snmp_community(self, **kwargs):
community = kwargs.pop('community')
callback = kwargs.pop('callback', self._callback)
config = ET.Element('config')
snmp_server = ET.SubElement(config, 'snmp-server', xmlns='urn:brocade.com:mgmt:brocade-snmp')
community_el = ET.SubElement(snmp_server, 'community')
community_name = ET.SubElement(community_el, 'community')
community_name.text = community
return callback(config)
|
Add SNMP Community to NOS device.
Args:
community (str): Community string to be added to device.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `community` is not defined.
|
codesearchnet
|
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')
return self._lazy_read(gen_resource_variable_ops.resource_scatter_mul(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
|
Multiply this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to multiply this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
|
github-repos
|
def _FormatNotes(self, event):
inode = event.inode
if (inode is None):
inode = '-'
notes = getattr(event, 'notes', '')
if (not notes):
display_name = getattr(event, 'display_name', '')
notes = 'File: {0:s} inode: {1!s}'.format(display_name, inode)
return self._SanitizeField(notes)
|
Formats the notes.
Args:
event (EventObject): event.
Returns:
str: formatted notes field.
|
codesearchnet
|
def __set__(self, instance, value):
if value is None and self.default:
self._cache[instance] = self.default
else:
try:
cleaned_value = self.field_value(value)
except NodeTypeError as node_error:
raise SchemaNodeError('{}.{}: {}'.format(
instance.__class__.__name__, self.alias, node_error.args[0])
)
try:
self.is_valid(cleaned_value)
except SchemaNodeValidatorError as error:
raise SchemaNodeError(
'{}.{} Error for value `{}` : {}'.format(
instance.__class__.__name__,
self.alias,
value,
error.args[0]
)
)
self._cache[instance] = cleaned_value
|
Python descriptor protocol `__set__` magic method.
Args:
instance (object): The instance with descriptor attribute.
value (object): The value for instance attribute.
|
juraj-google-style
|
def parse(self, data):
data = '\n'.join(self.strip(data.split('\n')))
tag_re = re.compile('^:\\n?(?P<full_tag>(?P<tag>[0-9]{2}|NS)(?P<sub_tag>[A-Z])?):', re.MULTILINE)
matches = list(tag_re.finditer(data))
valid_matches = list(self.sanatize_tag_id_matches(matches))
for (i, match) in enumerate(valid_matches):
tag_id = self.normalize_tag_id(match.group('tag'))
tag = (self.tags.get(match.group('full_tag')) or self.tags[tag_id])
if valid_matches[(i + 1):]:
tag_data = data[match.end():valid_matches[(i + 1)].start()].strip()
else:
tag_data = data[match.end():].strip()
tag_dict = tag.parse(self, tag_data)
for processor in self.processors.get(('pre_%s' % tag.slug), []):
tag_dict = processor(self, tag, tag_dict)
result = tag(self, tag_dict)
for processor in self.processors.get(('post_%s' % tag.slug), []):
result = processor(self, tag, tag_dict, result)
if isinstance(tag, mt940.tags.Statement):
if (not self.transactions):
transaction = Transaction(self)
self.transactions.append(transaction)
if transaction.data.get('id'):
transaction = Transaction(self, result)
self.transactions.append(transaction)
else:
transaction.data.update(result)
elif (issubclass(tag.scope, Transaction) and self.transactions):
for (k, v) in _compat.iteritems(result):
if ((k in transaction.data) and hasattr(v, 'strip')):
transaction.data[k] += ('\n%s' % v.strip())
else:
transaction.data[k] = v
elif issubclass(tag.scope, Transactions):
self.data.update(result)
return self.transactions
|
Parses mt940 data, expects a string with data
Args:
data (str): The MT940 data
Returns: :py:class:`list` of :py:class:`Transaction`
|
codesearchnet
|
def register(model_type, config, exist_ok=False) -> None:
if issubclass(config, PretrainedConfig) and config.model_type != model_type:
raise ValueError(f'The config you are passing has a `model_type` attribute that is not consistent with the model type you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they match!')
CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)
|
Register a new configuration for this class.
Args:
model_type (`str`): The model type like "bert" or "gpt".
config ([`PretrainedConfig`]): The config to register.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.