code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def with_rank_at_least(self, rank):
if ((self.ndims is not None) and (self.ndims < rank)):
raise ValueError(('Shape %s must have rank at least %d' % (self, rank)))
else:
return self
|
Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
|
codesearchnet
|
def _make_estimator_serving_session(estimator, serving_input_fn, checkpoint_path):
with tf.Graph().as_default() as g:
mode = tf_v1.estimator.ModeKeys.PREDICT
tf_v1.train.create_global_step(g)
tf_v1.set_random_seed(estimator.config.tf_random_seed)
serving_input_receiver = serving_input_fn()
estimator_spec = estimator.model_fn(features=serving_input_receiver.features, labels=None, mode=mode, config=estimator.config)
session = tf_v1.Session(config=estimator._session_config)
with session.as_default():
saver_for_restore = (estimator_spec.scaffold.saver or tf_v1.train.Saver(sharded=True))
saver_for_restore.restore(session, checkpoint_path)
return session
|
Returns a session constructed using `estimator` and `serving_input_fn`.
The Estimator API does not provide an API to construct a graph and session,
making it necessary for this function to replicate how an estimator builds
a graph.
This code is based on `Estimator.export_savedmodel` (another function that
has to replicate how an estimator builds a graph).
Args:
estimator: tf.Estimator to use when constructing the session.
serving_input_fn: A function that takes no arguments and returns a
`ServingInputReceiver`. It is used to construct the session.
checkpoint_path: The checkpoint path to restore in the session. Must not
be None.
|
codesearchnet
|
def median(data):
ordered = sorted(data)
length = len(ordered)
if ((length % 2) == 0):
return ((ordered[(math.floor((length / 2)) - 1)] + ordered[math.floor((length / 2))]) / 2.0)
elif ((length % 2) != 0):
return ordered[math.floor((length / 2))]
|
Calculates the median of a list of integers or floating point numbers.
Args:
data: A list of integers or floating point numbers
Returns:
Sorts the list numerically and returns the middle number if the list has an odd number
of items. If the list contains an even number of items the mean of the two middle numbers
is returned.
|
codesearchnet
|
def reset_port_protection(self, id_or_uri, timeout=-1):
uri = self._client.build_uri(id_or_uri) + "/resetportprotection"
return self._client.update_with_zero_body(uri, timeout)
|
Triggers a reset of port protection.
Cause port protection to be reset on all the interconnects of the logical interconnect that matches ID.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: The interconnect.
|
juraj-google-style
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A Big Bird sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def ungzip(file_path, extract_path: Path = None):
CHUNK = 16 * 1024
file_path = Path(file_path)
extract_path = extract_path or file_path.with_suffix('')
with gzip.open(file_path, 'rb') as fin, extract_path.open('wb') as fout:
while True:
block = fin.read(CHUNK)
if not block:
break
fout.write(block)
|
Simple .gz archive extractor
Args:
file_path: path to the gzip file to be extracted
extract_path: path where the file will be extracted
|
juraj-google-style
|
def _illegal_character(c, ctx, message=''):
container_type = (((ctx.container.ion_type is None) and 'top-level') or ctx.container.ion_type.name)
value_type = (((ctx.ion_type is None) and 'unknown') or ctx.ion_type.name)
if (c is None):
header = 'Illegal token'
else:
c = ('EOF' if BufferQueue.is_eof(c) else _chr(c))
header = ('Illegal character %s' % (c,))
raise IonException(('%s at position %d in %s value contained in %s. %s Pending value: %s' % (header, ctx.queue.position, value_type, container_type, message, ctx.value)))
|
Raises an IonException upon encountering the given illegal character in the given context.
Args:
c (int|None): Ordinal of the illegal character.
ctx (_HandlerContext): Context in which the illegal character was encountered.
message (Optional[str]): Additional information, as necessary.
|
codesearchnet
|
def ctype_to_dtype(cl_type, mot_float_type='float'):
if is_vector_ctype(cl_type):
(raw_type, vector_length) = split_vector_ctype(cl_type)
if (raw_type == 'mot_float_type'):
if is_vector_ctype(mot_float_type):
(raw_type, _) = split_vector_ctype(mot_float_type)
else:
raw_type = mot_float_type
vector_type = (raw_type + str(vector_length))
return getattr(cl_array.vec, vector_type)
else:
if (cl_type == 'mot_float_type'):
cl_type = mot_float_type
data_types = [('char', np.int8), ('uchar', np.uint8), ('short', np.int16), ('ushort', np.uint16), ('int', np.int32), ('uint', np.uint32), ('long', np.int64), ('ulong', np.uint64), ('float', np.float32), ('double', np.float64)]
for (ctype, dtype) in data_types:
if (ctype == cl_type):
return dtype
|
Get the numpy dtype of the given cl_type string.
Args:
cl_type (str): the CL data type to match, for example 'float' or 'float4'.
mot_float_type (str): the C name of the ``mot_float_type``. The dtype will be looked up recursively.
Returns:
dtype: the numpy datatype
|
codesearchnet
|
def decode_body(headers: MutableMapping, body: bytes) -> dict:
type_, encoding = parse_content_type(headers)
decoded_body = body.decode(encoding)
if type_ == "application/json":
payload = json.loads(decoded_body)
else:
if decoded_body == "ok":
payload = {"ok": True}
else:
payload = {"ok": False, "data": decoded_body}
return payload
|
Decode the response body
For 'application/json' content-type load the body as a dictionary
Args:
headers: Response headers
body: Response body
Returns:
decoded body
|
juraj-google-style
|
def top_prior(name, z_shape, learn_prior='normal', temperature=1.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
h = tf.zeros(z_shape, dtype=tf.float32)
if (learn_prior == 'normal'):
prior_dist = tfp.distributions.Normal(h, tf.exp(h))
elif (learn_prior == 'single_conv'):
prior_dist = single_conv_dist('top_learn_prior', h)
else:
raise ValueError(('Expected learn_prior to be normal or single_conv got %s' % learn_prior))
return TemperedNormal(prior_dist.loc, prior_dist.scale, temperature)
|
Unconditional prior distribution.
Args:
name: variable scope
z_shape: Shape of the mean / scale of the prior distribution.
learn_prior: Possible options are "normal" and "single_conv".
If set to "single_conv", the gaussian is parametrized by a
single convolutional layer whose input are an array of zeros
and initialized such that the mean and std are zero and one.
If set to "normal", the prior is just a Gaussian with zero
mean and unit variance.
temperature: Temperature with which to sample from the Gaussian.
Returns:
objective: 1-D Tensor shape=(batch_size,) summed across spatial components.
Raises:
ValueError: If learn_prior not in "normal" or "single_conv"
|
codesearchnet
|
def main(argv):
parser = _BuildParser()
args = parser.parse_args(argv[1:])
style_config = args.style
if args.style_help:
_PrintHelp(args)
return 0
if args.lines and len(args.files) > 1:
parser.error('cannot use -l/--lines with more than one file')
lines = _GetLines(args.lines) if args.lines is not None else None
if not args.files:
if args.in_place or args.diff:
parser.error('cannot use --in-place or --diff flags when reading from stdin')
original_source = []
while True:
if hasattr(sys.stdin, 'closed') and sys.stdin.closed:
break
try:
original_source.append(_raw_input())
except EOFError:
break
except KeyboardInterrupt:
return 1
if style_config is None and (not args.no_local_style):
style_config = file_resources.GetDefaultStyleForDir(os.getcwd())
source = [line.rstrip() for line in original_source]
source[0] = _removeBOM(source[0])
try:
reformatted_source, _ = yapf_api.FormatCode(str('\n'.join(source).replace('\r\n', '\n') + '\n'), filename='<stdin>', style_config=style_config, lines=lines)
except errors.YapfError:
raise
except Exception as e:
raise errors.YapfError(errors.FormatErrorMsg(e))
file_resources.WriteReformattedCode('<stdout>', reformatted_source)
return 0
exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir(os.getcwd())
files = file_resources.GetCommandLineFiles(args.files, args.recursive, (args.exclude or []) + exclude_patterns_from_ignore_file)
if not files:
raise errors.YapfError('input filenames did not match any python files')
changed = FormatFiles(files, lines, style_config=args.style, no_local_style=args.no_local_style, in_place=args.in_place, print_diff=args.diff, parallel=args.parallel, quiet=args.quiet, verbose=args.verbose, print_modified=args.print_modified)
return 1 if changed and (args.diff or args.quiet) else 0
|
Main program.
Arguments:
argv: command-line arguments, such as sys.argv (including the program name
in argv[0]).
Returns:
Zero on successful program termination, non-zero otherwise.
With --diff: zero if there were no changes, non-zero otherwise.
Raises:
YapfError: if none of the supplied files were Python files.
|
github-repos
|
def update_pos(self, pos_id, name, pos_type, location=None):
arguments = {'name': name,
'type': pos_type,
'location': location}
return self.do_req('PUT',
self.merchant_api_base_url + '/pos/' +
pos_id + '/', arguments)
|
Update POS resource. Returns the raw response object.
Arguments:
pos_id:
POS id as chosen on registration
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
|
juraj-google-style
|
def defaultannotator(self, annotationtype, set=None):
if (inspect.isclass(annotationtype) or isinstance(annotationtype, AbstractElement)):
annotationtype = annotationtype.ANNOTATIONTYPE
if (not set):
set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['annotator']
except KeyError:
raise NoDefaultError
|
Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
|
codesearchnet
|
def from_args_and_dict(cls, args, processor_dict: dict[str, Any], **kwargs):
processor_dict = processor_dict.copy()
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
if 'processor_class' in processor_dict:
del processor_dict['processor_class']
if 'auto_map' in processor_dict:
del processor_dict['auto_map']
processor_dict.update(kwargs)
accepted_args_and_kwargs = cls.__init__.__code__.co_varnames[:cls.__init__.__code__.co_argcount][1:]
unused_kwargs, valid_kwargs = cls.validate_init_kwargs(processor_config=processor_dict, valid_kwargs=accepted_args_and_kwargs)
args_to_remove = [i for i, arg in enumerate(accepted_args_and_kwargs) if arg in processor_dict]
args = [arg for i, arg in enumerate(args) if i not in args_to_remove]
processor = cls(*args, **valid_kwargs)
logger.info(f'Processor {processor}')
if return_unused_kwargs:
return (processor, unused_kwargs)
else:
return processor
|
Instantiates a type of [`~processing_utils.ProcessingMixin`] from a Python dictionary of parameters.
Args:
processor_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the processor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
[`~processing_utils.ProcessingMixin.to_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the processor object.
Returns:
[`~processing_utils.ProcessingMixin`]: The processor object instantiated from those
parameters.
|
github-repos
|
def lines(start=None, end=None, reverse=False, selection=False):
if selection:
(start, end) = get_selection()
else:
(start, end) = fix_addresses(start, end)
if (not reverse):
item = idaapi.get_item_head(start)
while (item < end):
(yield Line(item))
item += idaapi.get_item_size(item)
else:
item = idaapi.get_item_head((end - 1))
while (item >= start):
(yield Line(item))
item = idaapi.get_item_head((item - 1))
|
Iterate lines in range.
Args:
start: Starting address, start of IDB if `None`.
end: End address, end of IDB if `None`.
reverse: Set to true to iterate in reverse order.
selection: If set to True, replaces start and end with current selection.
Returns:
iterator of `Line` objects.
|
codesearchnet
|
def transfer(self, payment_id, data={}, **kwargs):
url = '{}/{}/transfers'.format(self.base_url, payment_id)
return self.post_url(url, data, **kwargs)
|
Create Transfer for given Payment Id
Args:
payment_id : Id for which payment object has to be transfered
Returns:
Payment dict after getting transfered
|
codesearchnet
|
def _gradient(self, diff, d, coords):
denom = np.copy(d)
denom[denom == 0] = 1e-5
with np.errstate(divide='ignore', invalid='ignore'):
K = -2 * diff / denom
K[np.isnan(K)] = 0
g = np.empty_like(coords)
for n in range(self.n):
for i in range(self.m):
g[i, n] = ((coords[i, n] - coords[:, n]) * K[i, :]).sum()
return g
|
Compute the gradient.
Args:
diff (`array-like`): [`m`, `m`] matrix. `D` - `d`
d (`array-like`): [`m`, `m`] matrix.
coords (`array-like`): [`m`, `n`] matrix.
Returns:
`np.array`: Gradient, shape [`m`, `n`].
|
juraj-google-style
|
def capture(self, data_buffer=None, log_time=False, debug_print=False, retry_reset=True):
start = time.time()
if (data_buffer is None):
data_buffer = np.ndarray((Lepton.ROWS, Lepton.COLS, 1), dtype=np.uint16)
elif ((data_buffer.ndim < 2) or (data_buffer.shape[0] < Lepton.ROWS) or (data_buffer.shape[1] < Lepton.COLS) or (data_buffer.itemsize < 2)):
raise Exception('Provided input array not large enough')
while True:
Lepton.capture_segment(self.__handle, self.__xmit_buf, self.__msg_size, self.__capture_buf[0])
if (retry_reset and ((self.__capture_buf[(20, 0)] & 65295) != 5120)):
if debug_print:
print('Garbage frame number reset waiting...')
time.sleep(0.185)
else:
break
self.__capture_buf.byteswap(True)
data_buffer[(:, :)] = self.__capture_buf[(:, 2:)]
end = time.time()
if debug_print:
print('---')
for i in range(Lepton.ROWS):
fid = self.__capture_buf[(i, 0, 0)]
crc = self.__capture_buf[(i, 1, 0)]
fnum = (fid & 4095)
print('0x{0:04x} 0x{1:04x} : Row {2:2} : crc={1}'.format(fid, crc, fnum))
print('---')
if log_time:
print('frame processed int {0}s, {1}hz'.format((end - start), (1.0 / (end - start))))
return (data_buffer, data_buffer.sum())
|
Capture a frame of data.
Captures 80x60 uint16 array of non-normalized (raw 12-bit) data. Returns that frame and a frame_id (which
is currently just the sum of all pixels). The Lepton will return multiple, identical frames at a rate of up
to ~27 Hz, with unique frames at only ~9 Hz, so the frame_id can help you from doing additional work
processing duplicate frames.
Args:
data_buffer (numpy.ndarray): Optional. If specified, should be ``(60,80,1)`` with `dtype`=``numpy.uint16``.
Returns:
tuple consisting of (data_buffer, frame_id)
|
codesearchnet
|
def are_equal(self, sp1, sp2):
set1 = set(sp1.elements)
set2 = set(sp2.elements)
return (set1.issubset(set2) or set2.issubset(set1))
|
True if there is some overlap in composition between the species
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
True always
|
codesearchnet
|
def open(self, mode='r', buffering=(- 1), encoding=None, errors=None, newline=None):
if self._closed:
self._raise_closed()
return FakeFileOpen(self.filesystem, use_io=True)(self._path(), mode, buffering, encoding, errors, newline)
|
Open the file pointed by this path and return a fake file object.
Raises:
IOError: if the target object is a directory, the path is invalid
or permission is denied.
|
codesearchnet
|
def assert_title(self, title, **kwargs):
query = TitleQuery(title, **kwargs)
@self.synchronize(wait=query.wait)
def assert_title():
if not query.resolves_for(self):
raise ExpectationNotMet(query.failure_message)
return True
return assert_title()
|
Asserts that the page has the given title.
Args:
title (str | RegexObject): The string or regex that the title should match.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
|
juraj-google-style
|
def _min_max_filter(t: List, min_v: str, max_v: str) -> bool:
def tofloat(value):
try:
float(value)
return float(value)
except ValueError:
return False
for a_token in t:
if not tofloat(a_token.text):
return False
else:
if min_v and tofloat(min_v):
this_v = tofloat(a_token.text)
if this_v < tofloat(min_v):
return False
if max_v and tofloat(max_v):
this_v = tofloat(a_token.text)
if this_v > tofloat(max_v):
return False
return True
|
Min and Max filter
Args:
t: List, list of tokens
min_v: str
max_v: str
Returns: bool
|
juraj-google-style
|
def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):
source_paths = _get_pdf_filenames_at(source_directory)
(yield len(source_paths))
for source_path in source_paths:
output = os.path.join(output_directory, os.path.basename(source_path))
compress_pdf(source_path, output, ghostscript_binary)
(yield output)
|
Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs.
|
codesearchnet
|
def mean_minimum_centroid_distance(item_a, item_b, max_value):
centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times])
centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times])
distance_matrix = (((centroids_a[(:, 0:1)] - centroids_b.T[0:1]) ** 2) + ((centroids_a[(:, 1:)] - centroids_b.T[1:]) ** 2))
mean_min_distances = np.sqrt((distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()))
return (np.minimum(mean_min_distances, max_value) / float(max_value))
|
RMS difference in the minimum distances from the centroids of one track to the centroids of another track
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
|
codesearchnet
|
def random_dna(self, random_generator: Union[types.ModuleType, random.Random, None]=None, attach_spec: bool=True, previous_dna: Optional['DNA']=None) -> 'DNA':
random_generator = random_generator or random
dna = self._random_dna(random_generator, previous_dna)
if attach_spec:
dna.use_spec(self)
return dna
|
Returns a random DNA based on current spec.
Args:
random_generator: An optional Random object. If None, the global random
module will be used.
attach_spec: If True, current spec will be attached to the returned DNA.
previous_dna: An optional DNA representing previous DNA. This field might
be useful for generating stateful random DNAs.
Returns:
A random DNA based on current spec.
|
github-repos
|
def _CheckStatusWorkerProcess(self, pid):
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
process_status = self._QueryProcessStatus(process)
if process_status is None:
process_is_alive = False
else:
process_is_alive = True
process_information = self._process_information_per_pid[pid]
used_memory = process_information.GetUsedMemory() or 0
if self._worker_memory_limit and used_memory > self._worker_memory_limit:
logger.warning((
'Process: {0:s} (PID: {1:d}) killed because it exceeded the '
'memory limit: {2:d}.').format(
process.name, pid, self._worker_memory_limit))
self._KillProcess(pid)
if isinstance(process_status, dict):
self._rpc_errors_per_pid[pid] = 0
status_indicator = process_status.get('processing_status', None)
else:
rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1
self._rpc_errors_per_pid[pid] = rpc_errors
if rpc_errors > self._MAXIMUM_RPC_ERRORS:
process_is_alive = False
if process_is_alive:
rpc_port = process.rpc_port.value
logger.warning((
'Unable to retrieve process: {0:s} (PID: {1:d}) status via '
'RPC socket: http:
process.name, pid, rpc_port))
processing_status_string = 'RPC error'
status_indicator = definitions.STATUS_INDICATOR_RUNNING
else:
processing_status_string = 'killed'
status_indicator = definitions.STATUS_INDICATOR_KILLED
process_status = {
'processing_status': processing_status_string}
self._UpdateProcessingStatus(pid, process_status, used_memory)
for worker_status in self._processing_status.workers_status:
if worker_status.pid == pid:
status_indicator = worker_status.status
break
if status_indicator in definitions.ERROR_STATUS_INDICATORS:
logger.error((
'Process {0:s} (PID: {1:d}) is not functioning correctly. '
'Status code: {2!s}.').format(process.name, pid, status_indicator))
self._TerminateProcessByPid(pid)
replacement_process = None
for replacement_process_attempt in range(
self._MAXIMUM_REPLACEMENT_RETRIES):
logger.info((
'Attempt: {0:d} to start replacement worker process for '
'{1:s}').format(replacement_process_attempt + 1, process.name))
replacement_process = self._StartWorkerProcess(
process.name, self._storage_writer)
if replacement_process:
break
time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)
if not replacement_process:
logger.error(
'Unable to create replacement worker process for: {0:s}'.format(
process.name))
|
Checks the status of a worker process.
If a worker process is not responding the process is terminated and
a replacement process is started.
Args:
pid (int): process ID (PID) of a registered worker process.
Raises:
KeyError: if the process is not registered with the engine.
|
juraj-google-style
|
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
rngs = {}
if dropout_rng is not None:
rngs['dropout'] = dropout_rng
inputs = {'params': params or self.params}
if past_key_values:
inputs['cache'] = past_key_values
mutable = ['cache']
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)
outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs['past_key_values'] = unfreeze(past['cache'])
return outputs
elif past_key_values is not None and (not return_dict):
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]
return outputs
|
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```
|
github-repos
|
def _get_tags(self):
tags = self.data.get('tags', None)
if (not tags):
return list()
return [x['name'] for x in tags]
|
Return the dataset's list of tags
Returns:
List[str]: list of tags or [] if there are none
|
codesearchnet
|
def obj_with_unit(obj, unit):
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
elif isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k,v in obj.items()}
else:
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
|
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
|
juraj-google-style
|
def power(self, n):
if n > 0:
return super().power(n)
return Kraus(SuperOp(self).power(n))
|
The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer.
|
juraj-google-style
|
def _compare(self, other: Any, comparison: Callable[[Any, Any], bool]) -> bool:
if isinstance(other, str):
return comparison(self.path, other)
if isinstance(other, KeyPath):
return comparison(tuple(map(KeyPath._KeyComparisonWrapper, self.keys)), tuple(map(KeyPath._KeyComparisonWrapper, other.keys)))
raise TypeError(f"Comparison is not supported between instances of 'KeyPath' and {type(other).__name__!r}.")
|
Compare to another KeyPath or a string.
Args:
other: A Keypath or a string.
comparison: A comparison operator.
Returns:
Whether or not the comparison holds true.
Raises:
TypeError: The other object is neither a Keypath nor a string.
|
github-repos
|
def WriteFileHash(self, path, hash_value):
string = '{0:s}\t{1:s}'.format(hash_value, path)
encoded_string = self._EncodeString(string)
print(encoded_string)
|
Writes the file path and hash to stdout.
Args:
path (str): path of the file.
hash_value (str): message digest hash calculated over the file data.
|
juraj-google-style
|
def get_current_user(with_domain=True):
try:
user_name = win32api.GetUserNameEx(win32api.NameSamCompatible)
if user_name[-1] == '$':
test_user = win32api.GetUserName()
if test_user == 'SYSTEM':
user_name = 'SYSTEM'
elif get_sid_from_name(test_user) == 'S-1-5-18':
user_name = 'SYSTEM'
elif not with_domain:
user_name = win32api.GetUserName()
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to get current user: {0}'.format(exc))
if not user_name:
return False
return user_name
|
Gets the user executing the process
Args:
with_domain (bool):
``True`` will prepend the user name with the machine name or domain
separated by a backslash
Returns:
str: The user name
|
juraj-google-style
|
def filter_collections_by_statement(data_collections, statement):
pattern = BaseCollection.pattern_from_collections_and_statement(data_collections, statement)
collections = [coll.filter_by_pattern(pattern) for coll in data_collections]
return collections
|
Generate a filtered data collections according to a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
collections: A list of Data Collections that have been filtered based
on the statement.
|
codesearchnet
|
def walknset_vars(self, task_class=None, *args, **kwargs):
def change_task(task):
if ((task_class is not None) and (task.__class__ is not task_class)):
return False
return True
if self.is_work:
for task in self:
if (not change_task(task)):
continue
task.set_vars(*args, **kwargs)
elif self.is_flow:
for task in self.iflat_tasks():
if (not change_task(task)):
continue
task.set_vars(*args, **kwargs)
else:
raise TypeError(("Don't know how to set variables for object class %s" % self.__class__.__name__))
|
Set the values of the ABINIT variables in the input files of the nodes
Args:
task_class: If not None, only the input files of the tasks belonging
to class `task_class` are modified.
Example:
flow.walknset_vars(ecut=10, kptopt=4)
|
codesearchnet
|
def format_rpc(data):
(address, rpc_id, args, resp, _status) = data
name = rpc_name(rpc_id)
if isinstance(args, (bytes, bytearray)):
arg_str = hexlify(args)
else:
arg_str = repr(args)
if isinstance(resp, (bytes, bytearray)):
resp_str = hexlify(resp)
else:
resp_str = repr(resp)
return ('%s called on address %d, payload=%s, response=%s' % (name, address, arg_str, resp_str))
|
Format an RPC call and response.
Args:
data (tuple): A tuple containing the address, rpc_id, argument and
response payloads and any error code.
Returns:
str: The formated RPC string.
|
codesearchnet
|
def random_dna(dna_spec: DNASpec, random_generator: Union[None, types.ModuleType, random.Random]=None, attach_spec: bool=True, previous_dna: Optional[DNA]=None) -> DNA:
return dna_spec.random_dna(random_generator or random, attach_spec, previous_dna)
|
Generates a random DNA from a DNASpec.
Example::
spec = pg.geno.space([
pg.geno.oneof([
pg.geno.constant(),
pg.geno.constant(),
pg.geno.constant()
]),
pg.geno.floatv(0.1, 0.2)
])
print(pg.random_dna(spec))
# DNA([2, 0.1123])
Args:
dna_spec: a DNASpec object.
random_generator: a Python random generator.
attach_spec: If True, attach the DNASpec to generated DNA.
previous_dna: An optional DNA representing previous DNA. This field might
be useful for generating stateful random DNAs.
Returns:
A DNA object.
|
github-repos
|
def dummy_signatures(self):
if (not self.signing_algorithm):
return []
algo_id = {'sha1': 1, 'sha384': 2}[self.signing_algorithm]
signature = make_dummy_signature(algo_id)
return [(algo_id, signature)]
|
Create a dummy signature.
This is used when initially writing the MAR header and we don't know
what the final signature data will be.
Returns:
Fake signature data suitable for writing to the header with
.write_signatures()
|
codesearchnet
|
def get_image_data(self, ids=None, voxels=None, dense=True):
return self.image_table.get_image_data(ids, voxels=voxels, dense=dense)
|
A convenience wrapper for ImageTable.get_image_data().
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list or 1D numpy array of voxel indices
(i.e., rows) to return. If None, returns data for all voxels.
|
juraj-google-style
|
def ReplaceAll(pattern, rep, s):
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
|
Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
|
juraj-google-style
|
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = text_outputs[1]
text_features = self.text_projection(pooled_output)
return text_features
|
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`GroupViTTextModel`].
Examples:
```python
>>> from transformers import CLIPTokenizer, GroupViTModel
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```
|
github-repos
|
def crypto_withdraw(self, amount, currency, crypto_address):
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
|
Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
|
juraj-google-style
|
def __init__(self, fetches):
self._fetch_type = type(fetches)
if isinstance(fetches, collections.defaultdict):
self._type_ctor = functools.partial(collections.defaultdict, fetches.default_factory)
else:
self._type_ctor = self._fetch_type
self._keys = fetches.keys()
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches.values()]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
|
Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
|
github-repos
|
def get_estimated_size_and_observables(self, value, nested=False):
return (self.estimate_size(value, nested), [])
|
Returns estimated size of value along with any nested observables.
The list of nested observables is returned as a list of 2-tuples of
(obj, coder_impl), where obj is an instance of observable.ObservableMixin,
and coder_impl is the CoderImpl that can be used to encode elements sent by
obj to its observers.
Arguments:
value: the value whose encoded size is to be estimated.
nested: whether the value is nested.
Returns:
The estimated encoded size of the given value and a list of observables
whose elements are 2-tuples of (obj, coder_impl) as described above.
|
github-repos
|
def _handle_skip_feature(self, test_dict):
skip_reason = None
if "skip" in test_dict:
skip_reason = test_dict["skip"]
elif "skipIf" in test_dict:
skip_if_condition = test_dict["skipIf"]
if self.session_context.eval_content(skip_if_condition):
skip_reason = "{} evaluate to True".format(skip_if_condition)
elif "skipUnless" in test_dict:
skip_unless_condition = test_dict["skipUnless"]
if not self.session_context.eval_content(skip_unless_condition):
skip_reason = "{} evaluate to False".format(skip_unless_condition)
if skip_reason:
raise SkipTest(skip_reason)
|
handle skip feature for test
- skip: skip current test unconditionally
- skipIf: skip current test if condition is true
- skipUnless: skip current test unless condition is true
Args:
test_dict (dict): test info
Raises:
SkipTest: skip test
|
juraj-google-style
|
def __add__(self, other: Self | Processor) -> PartProcessor | Processor:
if isinstance(other, _ChainPartProcessor):
return _ChainPartProcessor([self] + other._processor_list)
elif isinstance(other, _ChainProcessor):
return _ChainProcessor([self.to_processor().call] + other._processor_list)
elif isinstance(other, Processor):
return _ChainProcessor([self.to_processor().call, other])
else:
return _ChainPartProcessor([self, other])
|
Adds `other` to this processor.
Args:
other: a processor to add to `self`.
Returns:
The chain of this process with `other`.
|
github-repos
|
def _pearson_correlation(self, imgs_to_decode):
x, y = imgs_to_decode.astype(float), self.feature_images.astype(float)
return self._xy_corr(x, y)
|
Decode images using Pearson's r.
Computes the correlation between each input image and each feature
image across voxels.
Args:
imgs_to_decode: An ndarray of images to decode, with voxels in rows
and images in columns.
Returns:
An n_features x n_images 2D array, with each cell representing the
pearson correlation between the i'th feature and the j'th image
across all voxels.
|
juraj-google-style
|
def replace_in_file(filename: str, text_from: str, text_to: str) -> None:
log.info("Amending {}: {} -> {}",
filename, repr(text_from), repr(text_to))
with open(filename) as infile:
contents = infile.read()
contents = contents.replace(text_from, text_to)
with open(filename, 'w') as outfile:
outfile.write(contents)
|
Replaces text in a file.
Args:
filename: filename to process (modifying it in place)
text_from: original text to replace
text_to: replacement text
|
juraj-google-style
|
def __init__(self, config: interfaces.Config | None=None):
self._config = config or interfaces.Config()
|
Initializes the TopicVerbalizer.
Args:
config: The agent configuration.
|
github-repos
|
def _lease_owned(self, lease, current_uuid_path):
(prev_uuid_path, prev_uuid) = lease.metadata
with open(current_uuid_path) as f:
current_uuid = f.read()
return ((current_uuid_path == prev_uuid_path) and (prev_uuid == current_uuid))
|
Checks if the given lease is owned by the prefix whose uuid is in
the given path
Note:
The prefix must be also in the same path it was when it took the
lease
Args:
path (str): Path to the lease
current_uuid_path (str): Path to the uuid to check ownership of
Returns:
bool: ``True`` if the given lease in owned by the prefix,
``False`` otherwise
|
codesearchnet
|
def are_equal(self, sp1, sp2):
comp1 = Composition(sp1)
comp2 = Composition(sp2)
return comp1.get_el_amt_dict() == comp2.get_el_amt_dict()
|
True if element:amounts are exactly the same, i.e.,
oxidation state is not considered.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are the same based on element
and amounts.
|
juraj-google-style
|
def reply(self, user, msg, errors_as_replies=True):
return self._brain.reply(user, msg, errors_as_replies)
|
Fetch a reply from the RiveScript brain.
Arguments:
user (str): A unique user ID for the person requesting a reply.
This could be e.g. a screen name or nickname. It's used internally
to store user variables (including topic and history), so if your
bot has multiple users each one should have a unique ID.
msg (str): The user's message. This is allowed to contain
punctuation and such, but any extraneous data such as HTML tags
should be removed in advance.
errors_as_replies (bool): When errors are encountered (such as a
deep recursion error, no reply matched, etc.) this will make the
reply be a text representation of the error message. If you set
this to ``False``, errors will instead raise an exception, such as
a ``DeepRecursionError`` or ``NoReplyError``. By default, no
exceptions are raised and errors are set in the reply instead.
Returns:
str: The reply output.
|
codesearchnet
|
async def execute_method(self, method, **params):
url = self.url_builder(method, url_params=params)
logger.info('Executing method %r', method)
response = (await aiohttp.get(url))
logger.info('Status: %r', response.status)
if (response.status == 200):
json = (await response.json())
logger.debug('...with JSON %r', json)
if json.get('ok'):
return json
raise SlackApiError(json['error'])
else:
raise_for_status(response)
|
Execute a specified Slack Web API method.
Arguments:
method (:py:class:`str`): The name of the method.
**params (:py:class:`dict`): Any additional parameters
required.
Returns:
:py:class:`dict`: The JSON data from the response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP
request returns a code other than 200 (OK).
SlackApiError: If the Slack API is reached but the response
contains an error message.
|
codesearchnet
|
def _scan(self, fs, dir_path, namespaces=None):
try:
for info in fs.scandir(dir_path, namespaces=namespaces):
(yield info)
except FSError as error:
if (not self.on_error(dir_path, error)):
six.reraise(type(error), error)
|
Get an iterator of `Info` objects for a directory path.
Arguments:
fs (FS): A filesystem instance.
dir_path (str): A path to a directory on the filesystem.
namespaces (list): A list of additional namespaces to
include in the `Info` objects.
Returns:
~collections.Iterator: iterator of `Info` objects for
resources within the given path.
|
codesearchnet
|
def IsDerivedFunction(clean_lines, linenum):
for i in xrange(linenum, max((- 1), (linenum - 10)), (- 1)):
match = Match('^([^()]*\\w+)\\(', clean_lines.elided[i])
if match:
(line, _, closing_paren) = CloseExpression(clean_lines, i, len(match.group(1)))
return ((closing_paren >= 0) and Search('\\boverride\\b', line[closing_paren:]))
return False
|
Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
|
codesearchnet
|
def oauth_aware(self, method):
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
if (not user):
request_handler.redirect(users.create_login_url(request_handler.request.uri))
return
self._create_flow(request_handler)
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = self._storage_class(self._credentials_class, None, self._credentials_property_name, user=user).get()
try:
resp = method(request_handler, *args, **kwargs)
finally:
self.credentials = None
return resp
return setup_oauth
|
Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
|
codesearchnet
|
def matches(self, applied_ptransform):
raise NotImplementedError
|
Determines whether the given AppliedPTransform matches.
Note that the matching will happen *after* Runner API proto translation.
If matching is done via type checks, to/from_runner_api[_parameter] methods
must be implemented to preserve the type (and other data) through proto
serialization.
Consider URN-based translation instead.
Args:
applied_ptransform: AppliedPTransform to be matched.
Returns:
a bool indicating whether the given AppliedPTransform is a match.
|
github-repos
|
def enroll_users_in_program(cls, enterprise_customer, program_details, course_mode, emails, cohort=None):
(existing_users, unregistered_emails) = cls.get_users_by_email(emails)
course_ids = get_course_runs_from_program(program_details)
successes = []
pending = []
failures = []
for user in existing_users:
succeeded = cls.enroll_user(enterprise_customer, user, course_mode, *course_ids)
if succeeded:
successes.append(user)
else:
failures.append(user)
for email in unregistered_emails:
pending_user = enterprise_customer.enroll_user_pending_registration(email, course_mode, *course_ids, cohort=cohort)
pending.append(pending_user)
return (successes, pending, failures)
|
Enroll existing users in all courses in a program, and create pending enrollments for nonexisting users.
Args:
enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment
program_details: The details of the program in which we're enrolling
course_mode (str): The mode with which we're enrolling in the program
emails: An iterable of email addresses which need to be enrolled
Returns:
successes: A list of users who were successfully enrolled in all courses of the program
pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had
pending enrollments created for them in the database
failures: A list of users who could not be enrolled in the program
|
codesearchnet
|
def import_image(self, src=None, repository=None, tag=None, image=None, changes=None, stream_src=False):
if (not (src or image)):
raise errors.DockerException('Must specify src or image to import from')
u = self._url('/images/create')
params = _import_image_params(repository, tag, image, src=(src if isinstance(src, six.string_types) else None), changes=changes)
headers = {'Content-Type': 'application/tar'}
if (image or (params.get('fromSrc') != '-')):
return self._result(self._post(u, data=None, params=params))
elif isinstance(src, six.string_types):
with open(src, 'rb') as f:
return self._result(self._post(u, data=f, params=params, headers=headers, timeout=None))
else:
if stream_src:
headers['Transfer-Encoding'] = 'chunked'
return self._result(self._post(u, data=src, params=params, headers=headers))
|
Import an image. Similar to the ``docker import`` command.
If ``src`` is a string or unicode string, it will first be treated as a
path to a tarball on the local system. If there is an error reading
from that file, ``src`` will be treated as a URL instead to fetch the
image from. You can also pass an open file handle as ``src``, in which
case the data will be read from that file.
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
be taken as the name of an existing image to import from.
Args:
src (str or file): Path to tarfile, URL, or file-like object
repository (str): The repository to create
tag (str): The tag to apply
image (str): Use another image like the ``FROM`` Dockerfile
parameter
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False):
residual = hidden_states
attn_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions)
hidden_states = attn_outputs[0]
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return (hidden_states,) + attn_outputs[1:]
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
|
github-repos
|
def Rsync(url, tgt_name, tgt_root=None):
if (tgt_root is None):
tgt_root = str(CFG['tmp_dir'])
from benchbuild.utils.cmd import rsync
tgt_dir = (local.path(tgt_root) / tgt_name)
if (not source_required(tgt_dir)):
Copy(tgt_dir, '.')
return
rsync('-a', url, tgt_dir)
update_hash(tgt_dir)
Copy(tgt_dir, '.')
|
RSync a folder.
Args:
url (str): The url of the SOURCE location.
fname (str): The name of the TARGET.
to (str): Path of the target location.
Defaults to ``CFG["tmpdir"]``.
|
codesearchnet
|
def cat_adb_log(self, tag, begin_time):
if (not self.adb_logcat_file_path):
raise Error(self._ad, 'Attempting to cat adb log when none has been collected.')
end_time = mobly_logger.get_log_line_timestamp()
self._ad.log.debug('Extracting adb log from logcat.')
adb_excerpt_path = os.path.join(self._ad.log_path, 'AdbLogExcerpts')
utils.create_dir(adb_excerpt_path)
f_name = os.path.basename(self.adb_logcat_file_path)
out_name = f_name.replace('adblog,', '').replace('.txt', '')
out_name = (',%s,%s.txt' % (begin_time, out_name))
out_name = out_name.replace(':', '-')
tag_len = (utils.MAX_FILENAME_LEN - len(out_name))
tag = tag[:tag_len]
out_name = (tag + out_name)
full_adblog_path = os.path.join(adb_excerpt_path, out_name)
with io.open(full_adblog_path, 'w', encoding='utf-8') as out:
in_file = self.adb_logcat_file_path
with io.open(in_file, 'r', encoding='utf-8', errors='replace') as f:
in_range = False
while True:
line = None
try:
line = f.readline()
if (not line):
break
except:
continue
line_time = line[:mobly_logger.log_line_timestamp_len]
if (not mobly_logger.is_valid_logline_timestamp(line_time)):
continue
if self._is_timestamp_in_range(line_time, begin_time, end_time):
in_range = True
if (not line.endswith('\n')):
line += '\n'
out.write(line)
elif in_range:
break
|
Takes an excerpt of the adb logcat log from a certain time point to
current time.
Args:
tag: An identifier of the time period, usualy the name of a test.
begin_time: Logline format timestamp of the beginning of the time
period.
|
codesearchnet
|
def address(self, num):
url_root = 'company/{}/registered-office-address'
baseuri = (self._BASE_URI + url_root.format(num))
res = self.session.get(baseuri)
self.handle_http_error(res)
return res
|
Search for company addresses by company number.
Args:
num (str): Company number to search on.
|
codesearchnet
|
def point_probability(self, threshold):
point_prob = np.zeros(self.data.shape[1:])
for t in range(self.data.shape[1]):
point_prob[t] = np.where(self.data[:, t] >= threshold, 1.0, 0.0).mean(axis=0)
return EnsembleConsensus(point_prob, "point_probability", self.ensemble_name,
self.run_date, self.variable + "_{0:0.2f}_{1}".format(threshold,
self.units.replace(" ", "_")),
self.start_date, self.end_date, "")
|
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at
that point.
Args:
threshold: If >= threshold assigns a 1 to member, otherwise 0.
Returns:
EnsembleConsensus
|
juraj-google-style
|
def disassemble(self, start=None, end=None, arch_mode=None):
if arch_mode is None:
arch_mode = self.binary.architecture_mode
curr_addr = start if start else self.binary.ea_start
end_addr = end if end else self.binary.ea_end
while curr_addr < end_addr:
encoding = self.__fetch_instr(curr_addr)
asm_instr = self.disassembler.disassemble(encoding, curr_addr, architecture_mode=arch_mode)
if not asm_instr:
return
yield curr_addr, asm_instr, asm_instr.size
curr_addr += asm_instr.size
|
Disassemble native instructions.
Args:
start (int): Start address.
end (int): End address.
arch_mode (int): Architecture mode.
Returns:
(int, Instruction, int): A tuple of the form (address, assembler instruction, instruction size).
|
juraj-google-style
|
def assert_splits_match(nested_splits_lists):
error_msg = 'Inputs must have identical ragged splits'
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg)
return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]
|
Checks that the given splits lists are identical.
Performs static tests to ensure that the given splits lists are identical,
and returns a list of control dependency op tensors that check that they are
fully identical.
Args:
nested_splits_lists: A list of nested_splits_lists, where each split_list is
a list of `splits` tensors from a `RaggedTensor`, ordered from outermost
ragged dimension to innermost ragged dimension.
Returns:
A list of control dependency op tensors.
Raises:
ValueError: If the splits are not identical.
|
github-repos
|
def _starts_with(field, filter_value):
valid = False
if field.startswith(filter_value):
valid = True
return valid
|
Validate field starts with provided value.
Args:
filter_value (string): A string or list of values.
Returns:
(boolean): Results of validation
|
juraj-google-style
|
def get_min_max_value(self) -> tuple[float, float]:
total_freq = sum(self._hist_freq)
hist_freq_cumsum = np.cumsum(self._hist_freq) / total_freq
min_quantile, max_quantile = (self._calib_opts.calibration_parameters.min_percentile / 100.0, self._calib_opts.calibration_parameters.max_percentile / 100.0)
min_quantile_idx, max_quantile_idx = (np.searchsorted(hist_freq_cumsum, min_quantile, side='right'), np.searchsorted(hist_freq_cumsum, max_quantile, side='left'))
min_value, max_value = (self._hist_mids[min_quantile_idx], self._hist_mids[max_quantile_idx])
return (min_value, max_value)
|
Calculates min and max from statistics using calibration options.
A "percentile" is a statistical concept that represents the value below
which a given percentage of data falls in a dataset. It involves sorting the
data from smallest to largest and then finding the value at a specified
percentage position. For example, the 0.01 percentile represents the value
in a given data set that corresponds to the lowest 0.01% of the data.
HistogramPercentile calibration uses min_percentile and max_percentile to
find min and max.
min_percentile and max_percentile must be in range [0, 100].
min_percentile is 0.001 by default.
max_percentile is 99.999 by default.
Returns:
(min_value, max_value): Min and max calculated using HistogramPercentile
|
github-repos
|
def get_bin_test(self, hashes):
all_responses = {}
if self._cache:
api_name = 'shadowserver-bin-test'
all_responses = self._cache.bulk_lookup(api_name, hashes)
hashes = [key for key in hashes if key not in all_responses.keys()]
all_responses = dict([(key, val) for key, val in all_responses.items() if len(val) >= 2])
HASHES_PER_REQ = 25
hash_chunks = ['\n'.join(hashes[pos:pos + HASHES_PER_REQ]) for pos in range(0, len(hashes), HASHES_PER_REQ)]
responses = self._requests.multi_post(self.BINTEST_URL, data=hash_chunks, to_json=False, send_as_file=True)
for response in responses:
if response is not None and 200 == response.status_code:
response_lines = response.text.split('\n')
for line in response_lines:
val = {}
index_of_first_space = line.find(' ')
if -1 == index_of_first_space:
index_of_first_space = len(line)
key = line[:index_of_first_space].lower()
json_text = line[index_of_first_space + 1:]
if len(json_text):
try:
val = simplejson.loads(json_text)
if len(val.keys()) >= 2:
all_responses[key] = val
except ValueError:
pass
if self._cache:
self._cache.cache_value(api_name, key, val)
return all_responses
|
Test hashes against a list of known software applications.
Known hashes will return a dictionary of information.
Unknown hashes will return nothing.
Args:
hashes: list of string hashes.
Returns:
A dict with the hash as key and the shadowserver report as value.
|
juraj-google-style
|
def Process(self, parser_mediator, cache=None, database=None, **kwargs):
if (database is None):
raise ValueError('Invalid database.')
super(ESEDBPlugin, self).Process(parser_mediator)
self.GetEntries(parser_mediator, cache=cache, database=database, **kwargs)
|
Determines if this is the appropriate plugin for the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache.
database (Optional[pyesedb.file]): ESE database.
Raises:
ValueError: If the database attribute is not valid.
|
codesearchnet
|
def get_logfile_name(tags):
if (not os.path.exists(sd.LOG_DIR)):
os.mkdir(sd.LOG_DIR)
filename = 'log'
for tag in tags:
filename += '_{}'.format(tag)
filename += '.txt'
filename = os.path.join(sd.LOG_DIR, filename)
return filename
|
Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided.
|
codesearchnet
|
def restore(self, state):
selector = DataStreamSelector.FromString(state.get(u'selector'))
if self.selector != selector:
raise ArgumentError("Attempted to restore an InvalidStreamWalker with a different selector",
selector=self.selector, serialized_data=state)
if state.get(u'type') != u'invalid':
raise ArgumentError("Invalid serialized state for InvalidStreamWalker", serialized_data=state)
|
Restore the contents of this virtual stream walker.
Args:
state (dict): The previously serialized state.
Raises:
ArgumentError: If the serialized state does not have
a matching selector.
|
juraj-google-style
|
def _get_cert_expiration_time(headers):
cache_control = headers.get('Cache-Control', '')
for entry in cache_control.split(','):
match = _MAX_AGE_REGEX.match(entry)
if match:
cache_time_seconds = int(match.group(1))
break
else:
return 0
age = headers.get('Age')
if age is not None:
try:
age = int(age)
except ValueError:
age = 0
cache_time_seconds -= age
return max(0, cache_time_seconds)
|
Get the expiration time for a cert, given the response headers.
Get expiration time from the headers in the result. If we can't get
a time from the headers, this returns 0, indicating that the cert
shouldn't be cached.
Args:
headers: A dict containing the response headers from the request to get
certs.
Returns:
An integer with the number of seconds the cert should be cached. This
value is guaranteed to be >= 0.
|
juraj-google-style
|
def IsCppString(line):
line = line.replace(r'\\', 'XX')
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
|
Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
|
juraj-google-style
|
def leaky_relu(x, name=None):
with tf.name_scope(name, 'leaky_relu', [x]) as scope:
x = tf.convert_to_tensor(x, name='x')
return tf.where(tf.less(x, 0.0), 0.01 * x, x, name=scope)
|
Creates a leaky_relu.
This is an alternate non-linearity to relu. The leaky part of the relu may
prevent dead Neurons in a model since the gradient doesn't go completely to
0.
Args:
x: The input tensor.
name: Optional name for this op.
Returns:
x if x > 0 otherwise 0.01 * x.
|
juraj-google-style
|
def r_edges(step):
rbot, rtop = misc.get_rbounds(step)
centers = step.rprof.loc[:, 'r'].values + rbot
edges = (centers[:-1] + centers[1:]) / 2
edges = np.insert(edges, 0, rbot)
edges = np.append(edges, rtop)
return edges, edges
|
Cell border.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the position of the bottom and top walls
of the cells. The two elements of the tuple are identical.
|
juraj-google-style
|
def process_usufy(self, data):
mode = 'usufy'
info = []
try:
verifier = self.modes.get(mode, {}).get('extra_fields', {})
for field in verifier.keys():
regexp = verifier[field]
values = re.findall(regexp, data)
for val in values:
aux = {}
aux['type'] = field
aux['value'] = val
aux['attributes'] = []
if (aux not in info):
info.append(aux)
except AttributeError as e:
for field in self.fieldsRegExp[mode].keys():
try:
regexp = ((self.fieldsRegExp[mode][field]['start'] + '([^\\)]+)') + self.fieldsRegExp[mode][field]['end'])
tmp = re.findall(regexp, data)
values = []
for t in tmp:
if (self.fieldsRegExp[mode][field]['end'] in t):
values.append(t.split(self.fieldsRegExp[mode][field]['end'])[0])
else:
values.append(t)
except:
regexp = self.fieldsRegExp[mode][field]
values = re.findall(regexp, data)
for val in values:
aux = {}
aux['type'] = field
aux['value'] = val
aux['attributes'] = []
if (aux not in info):
info.append(aux)
return info
|
Method to process and extract the entities of a usufy
Args:
-----
data: The information from which the info will be extracted.
Return:
-------
A list of the entities found.
|
codesearchnet
|
def weights_to_cpu(state_dict):
state_dict_cpu = OrderedDict()
for (key, val) in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
|
Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
|
codesearchnet
|
def ParseReportDescriptor(rd, desc):
rd = bytearray(rd)
pos = 0
report_count = None
report_size = None
usage_page = None
usage = None
while (pos < len(rd)):
key = rd[pos]
(key_size, value_length) = GetValueLength(rd, pos)
if ((key & REPORT_DESCRIPTOR_KEY_MASK) == INPUT_ITEM):
if (report_count and report_size):
byte_length = ((report_count * report_size)
desc.internal_max_in_report_len = max(desc.internal_max_in_report_len, byte_length)
report_count = None
report_size = None
elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == OUTPUT_ITEM):
if (report_count and report_size):
byte_length = ((report_count * report_size)
desc.internal_max_out_report_len = max(desc.internal_max_out_report_len, byte_length)
report_count = None
report_size = None
elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == COLLECTION_ITEM):
if usage_page:
desc.usage_page = usage_page
if usage:
desc.usage = usage
elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == REPORT_COUNT):
if (len(rd) >= ((pos + 1) + value_length)):
report_count = ReadLsbBytes(rd, (pos + 1), value_length)
elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == REPORT_SIZE):
if (len(rd) >= ((pos + 1) + value_length)):
report_size = ReadLsbBytes(rd, (pos + 1), value_length)
elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == USAGE_PAGE):
if (len(rd) >= ((pos + 1) + value_length)):
usage_page = ReadLsbBytes(rd, (pos + 1), value_length)
elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == USAGE):
if (len(rd) >= ((pos + 1) + value_length)):
usage = ReadLsbBytes(rd, (pos + 1), value_length)
pos += (value_length + key_size)
return desc
|
Parse the binary report descriptor.
Parse the binary report descriptor into a DeviceDescriptor object.
Args:
rd: The binary report descriptor
desc: The DeviceDescriptor object to update with the results
from parsing the descriptor.
Returns:
None
|
codesearchnet
|
def get_env_spec(self, filters=None):
spec = {
'domains':
{
vm_name: deepcopy(vm_object.spec)
for vm_name, vm_object in self._vms.viewitems()
},
'nets':
{
net_name: deepcopy(net_object.spec)
for net_name, net_object in self._nets.viewitems()
}
}
if filters:
utils.filter_spec(spec, filters)
return spec
|
Get the spec of the current env.
The spec will hold the info about all the domains and
networks associated with this env.
Args:
filters (list): list of paths to keys that should be removed from
the init file
Returns:
dict: the spec of the current env
|
juraj-google-style
|
def get_checklists(self, **query_params):
checklists = self.get_checklist_json(self.base_uri, query_params=query_params)
checklists_list = []
for checklist_json in checklists:
checklists_list.append(self.create_checklist(checklist_json))
return checklists_list
|
Get the checklists for this card. Returns a list of Checklist objects.
Returns:
list(Checklist): The checklists attached to this card
|
codesearchnet
|
def update(self, information, timeout=(- 1)):
return self._client.update(information, timeout=timeout)
|
Edit an IPv4 Range.
Args:
information (dict): Information to update.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Updated IPv4 range.
|
codesearchnet
|
def serialize_workflow(self):
self.workflow.refresh_waiting_tasks()
return CompactWorkflowSerializer().serialize_workflow(self.workflow, include_spec=False)
|
Serializes the current WF.
Returns:
WF state data.
|
codesearchnet
|
def with_dtype(self, dtype):
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError('dtype must be int32 or int64')
if self.dtype == dtype:
return self
return RowPartition(row_splits=_cast_if_not_none(self._row_splits, dtype), row_lengths=_cast_if_not_none(self._row_lengths, dtype), value_rowids=_cast_if_not_none(self._value_rowids, dtype), nrows=_cast_if_not_none(self._nrows, dtype), uniform_row_length=_cast_if_not_none(self._uniform_row_length, dtype), internal=_row_partition_factory_key)
|
Returns a copy of this RowPartition with the given encoding dtype.
Args:
dtype: The dtype for encoding tensors, such as `row_splits` and `nrows`.
One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RowPartition, with the encoding tensors cast to the given
type.
|
github-repos
|
def is_predecessor_of_other(self, predecessor, others):
return any(((predecessor in self._predecessors_by_id[o]) for o in others))
|
Returns whether the predecessor is a predecessor or a predecessor
of a predecessor...of any of the others.
Args:
predecessor (str): The txn id of the predecessor.
others (list(str)): The txn id of the successor.
Returns:
(bool)
|
codesearchnet
|
def inspect(obj: object) -> None:
root = nodes.Node.from_obj(obj)
html_content = IPython.display.HTML(f'\n {resource_utils.resource_import('theme.css')}\n {pyjs_com.js_import()}\n {resource_utils.resource_import('main.js')}\n\n {main_inspect_html(root)}\n <script>\n load_content("{root.id}");\n </script>\n ')
IPython.display.display(html_content)
|
Inspect all attributes of a Python object interactivelly.
Args:
obj: Any object to inspect (module, class, dict,...).
|
github-repos
|
def recompose(src: Path, target_file: Path):
(mission_folder, assets_folder) = NewMiz._get_subfolders(src)
base_info = ujson.loads(Path(mission_folder, 'base_info.json').read_text(encoding=ENCODING))
version = base_info['__version__']
with Miz(target_file) as miz:
LOGGER.info('re-composing mission table from folder: "%s"', mission_folder)
miz.mission.d = NewMiz._recreate_dict_from_folder(mission_folder, version)
for item in assets_folder.iterdir():
target = Path(miz.temp_dir, item.name).absolute()
if item.is_dir():
if target.exists():
shutil.rmtree(target)
shutil.copytree(item.absolute(), target)
elif item.is_file():
shutil.copy(item.absolute(), target)
miz.zip(target_file, encode=False)
|
Recompose a Miz from json object
Args:
src: folder containing the json structure
target_file: target Miz file
|
codesearchnet
|
def get_variables_in_module(module, collection=tf.GraphKeys.TRAINABLE_VARIABLES):
return module.get_variables(collection=collection)
|
Returns tuple of `tf.Variable`s declared inside an `snt.Module`.
Note that this operates by searching the variable scope a module contains,
and so does not know about any modules which were constructed elsewhere but
used inside this module.
Args:
module: `snt.Module` instance to query the scope of.
collection: Collection to restrict query to. By default this is
`tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable
variables such as moving averages.
Returns:
A tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
|
codesearchnet
|
def put(self, item):
if not item:
return
self._queue.put(item)
if self._queue.qsize() >= self._max_queue_length:
self.flush()
|
Adds the passed in item object to the queue and calls :func:`flush` if the size of the queue is larger
than :func:`max_queue_length`. This method does nothing if the passed in item is None.
Args:
item (:class:`contracts.Envelope`) item the telemetry envelope object to send to the service.
|
juraj-google-style
|
def create(cls, name, config=None, kind='spark'):
conn = Qubole.agent()
return conn.post(cls.rest_entity_path, data={'name': name, 'config': config, 'kind': kind})
|
Create a new app.
Args:
`name`: the name of the app
`config`: a dictionary of key-value pairs
`kind`: kind of the app (default=spark)
|
codesearchnet
|
def _address_content(self, x):
mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name='mem_key')
mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name='mem_query')
norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True)
dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True)
cos_dist = tf.div(dot_product, (norm + 1e-07), name='cos_dist')
access_logits = (self.sharpen_factor * cos_dist)
return access_logits
|
Address the memory based on content similarity.
Args:
x: a tensor in the shape of [batch_size, length, depth].
Returns:
the logits for each memory entry [batch_size, length, memory_size].
|
codesearchnet
|
def set_mtu(self, name, value=None, default=False, disable=False):
if (value is not None):
value = int(value)
if (not (68 <= value <= 65535)):
raise ValueError('invalid mtu value')
commands = [('interface %s' % name)]
commands.append(self.command_builder('mtu', value=value, default=default, disable=disable))
return self.configure(commands)
|
Configures the interface IP MTU
Args:
name (string): The interface identifier to apply the interface
config to
value (integer): The MTU value to set the interface to. Accepted
values include 68 to 65535
default (bool): Configures the mtu parameter to its default
value using the EOS CLI default command
disable (bool); Negate the mtu parameter value using the EOS
CLI no command
Returns:
True if the operation succeeds otherwise False.
Raises:
ValueError: If the value for MTU is not an integer value or
outside of the allowable range
|
codesearchnet
|
def get_tensor_dependencies(tensor):
dependencies = set()
dependencies.update(tensor.op.inputs)
for sub_op in tensor.op.inputs:
dependencies.update(get_tensor_dependencies(sub_op))
return dependencies
|
Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).
Args:
tensor (tf.Tensor): The input tensor.
Returns: Set of all dependencies (including needed placeholders) for the input tensor.
|
juraj-google-style
|
def get_forced_variation(self, experiment, user_id):
forced_variations = experiment.forcedVariations
if forced_variations and user_id in forced_variations:
variation_key = forced_variations.get(user_id)
variation = self.config.get_variation_from_key(experiment.key, variation_key)
if variation:
self.logger.info('User "%s" is forced in variation "%s".' % (user_id, variation_key))
return variation
return None
|
Determine if a user is forced into a variation for the given experiment and return that variation.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for the user.
Returns:
Variation in which the user with ID user_id is forced into. None if no variation.
|
juraj-google-style
|
def __init__(self, vocab, unk_token, normalize_text=True):
self.vocab = vocab
self.unk_token = unk_token
self.normalize_text = normalize_text
|
Constructs a CharacterTokenizer.
Args:
**vocab**:
Vocabulary object.
**unk_token**: str
A special symbol for out-of-vocabulary token.
**normalize_text**: (`optional`) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
|
github-repos
|
def rectify_acquaintance_strategy(
circuit: circuits.Circuit,
acquaint_first: bool=True
) -> None:
if not is_acquaintance_strategy(circuit):
raise TypeError('not is_acquaintance_strategy(circuit)')
rectified_moments = []
for moment in circuit:
gate_type_to_ops = collections.defaultdict(list
)
for op in moment.operations:
gate_type_to_ops[isinstance(op.gate, AcquaintanceOpportunityGate)
].append(op)
if len(gate_type_to_ops) == 1:
rectified_moments.append(moment)
continue
for acquaint_first in sorted(gate_type_to_ops.keys(),
reverse=acquaint_first):
rectified_moments.append(
ops.Moment(gate_type_to_ops[acquaint_first]))
circuit._moments = rectified_moments
|
Splits moments so that they contain either only acquaintance gates
or only permutation gates. Orders resulting moments so that the first one
is of the same type as the previous one.
Args:
circuit: The acquaintance strategy to rectify.
acquaint_first: Whether to make acquaintance moment first in when
splitting the first mixed moment.
|
juraj-google-style
|
def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_entity(entity_value=entity_value,
entity_type=entity_type,
alias_of=alias_of)
|
Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to
|
juraj-google-style
|
class ActivityRegularization(Layer):
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super(ActivityRegularization, self).__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
|
github-repos
|
def SvelteComponent(name, path):
if (path[(- 3):] == '.js'):
js_path = path
elif (path[(- 5):] == '.html'):
print('Trying to build svelte component from html...')
js_path = build_svelte(path)
js_content = read(js_path, mode='r')
def inner(data):
id_str = js_id(name)
html = _template.replace('$js', js_content).replace('$name', name).replace('$data', json.dumps(data)).replace('$id', id_str)
_display_html(html)
return inner
|
Display svelte components in iPython.
Args:
name: name of svelte component (must match component filename when built)
path: path to compile svelte .js file or source svelte .html file.
(If html file, we try to call svelte and build the file.)
Returns:
A function mapping data to a rendered svelte component in ipython.
|
codesearchnet
|
def get_path(url):
url = urlsplit(url)
path = url.path
if url.query:
path += '?{}'.format(url.query)
return path
|
Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.