code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def from_features(cls, features, types):
params = cls()
if features:
for key in sorted(features.keys()):
feature = features[key]
if not isinstance(feature, tuple(types)):
raise ValueError(f"Unsupported {type(feature).__name__} {feature} for key '{key}'")
params._add_feature(key, feature)
params._validate()
return params
|
Builds _ParseOpParams for a given set of features and allowed types.
Args:
features: A `dict` mapping feature keys to objects of a type in `types`.
types: Type of features to allow, among `FixedLenFeature`,
`VarLenFeature`, `SparseFeature`, and `FixedLenSequenceFeature`.
Returns:
A `_ParseOpParams` containing the raw parameters for `gen_parsing_ops`.
Raises:
ValueError: if `features` contains an item not in `types`, or an invalid
feature.
ValueError: if sparse and dense key sets intersect.
ValueError: if input lengths do not match up.
|
github-repos
|
def download(self, folder=None):
url = self.data.get('url', None)
if not url:
raise HDXError('No URL to download!')
logger.debug('Downloading %s' % url)
filename = self.data['name']
format = '.%s' % self.data['format']
if format not in filename:
filename = '%s%s' % (filename, format)
with Download(full_agent=self.configuration.get_user_agent()) as downloader:
path = downloader.download_file(url, folder, filename)
return url, path
|
Download resource store to provided folder or temporary folder if no folder supplied
Args:
folder (Optional[str]): Folder to download resource to. Defaults to None.
Returns:
Tuple[str, str]: (URL downloaded, Path to downloaded file)
|
juraj-google-style
|
def abort_collective_ops(self, code, message):
self.ensure_initialized()
pywrap_tfe.TFE_AbortCollectiveOps(self._handle, code, message)
|
Abort the collective ops.
This is intended to be used when a peer failure is detected, which allows
the user to handle the case instead of hanging. This aborts all on-going
collectives. After all subsequent collectives error immediately, and you
need to reset_context() to use collectives again.
Args:
code: a `tf.errors` error code.
message: a string. The error message.
|
github-repos
|
def base256_encode(n, minwidth=0):
if n > 0:
arr = []
while n:
n, rem = divmod(n, 256)
arr.append(rem)
b = bytearray(reversed(arr))
elif n == 0:
b = bytearray(b'\x00')
else:
raise ValueError("Negative numbers not supported")
if minwidth > 0 and len(b) < minwidth:
padding = (minwidth - len(b)) * b'\x00'
b = bytearray(padding) + b
b.reverse()
return b
|
Encode the input with base256.
Args:
n (int): input value.
minwidth: minimum return value length.
Raises:
ValueError: if a negative number is provided.
Returns:
bytearray:
|
juraj-google-style
|
def search_track(self, artist, album=None, track=None,
full_album_art_uri=False):
subcategories = [artist]
subcategories.append(album or '')
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories, search_term=track,
complete_result=True)
result._metadata['search_type'] = 'search_track'
return result
|
Search for an artist, an artist's albums, or specific track.
Args:
artist (str): an artist's name.
album (str, optional): an album name. Default `None`.
track (str, optional): a track name. Default `None`.
full_album_art_uri (bool): whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
|
juraj-google-style
|
def diff_compute(self, text1, text2, checklines, deadline):
if (not text1):
return [(self.DIFF_INSERT, text2)]
if (not text2):
return [(self.DIFF_DELETE, text1)]
if (len(text1) > len(text2)):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if (i != (- 1)):
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[(i + len(shorttext)):])]
if (len(text1) > len(text2)):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if (len(shorttext) == 1):
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
hm = self.diff_halfMatch(text1, text2)
if hm:
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
return ((diffs_a + [(self.DIFF_EQUAL, mid_common)]) + diffs_b)
if (checklines and (len(text1) > 100) and (len(text2) > 100)):
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline)
|
Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
|
codesearchnet
|
def __init__(self, exclude_columns, coder=coders.registry.get_coder(Any)):
self.coder = coder
self.exclude_columns = exclude_columns
|
Encodes/decodes items of a dictionary into a single element.
Args:
exclude_columns: list of columns to exclude from the encoding.
|
github-repos
|
def old_format(self, content: BeautifulSoup) -> List[str]:
b = content.find('body')
sender, date, nxt, rep_to = None, None, None, None
strongs = b.findAll('strong', recursive=False)
for s in strongs:
field = str(s).split(">")[1].split("<")[0]
if 'From' in field:
sender = s.next_sibling.split("(")[0].strip()
elif 'Date' in field:
date_str = s.next_sibling.strip().replace("-","").replace(" "," ").strip()
try:
date = parsedate_to_datetime(date_str).isoformat()[:19]
except:
date = None
sender = b.find('b').text if sender == None else sender
sender = b.find('a').text if len(sender) == 0 else sender
date = b.find('i').text[:19] if date == None else date
try:
nav = content.find('ul').findAll('li')
except:
nav = None
if nav != None:
for l in nav:
s = l.text
if 'Next in thread' in s:
nxt = '/'.join(self.email_url.split('/')[:-1]) + '/' + l.find('a')['href']
nxt = nxt[1:] if nxt[0] == '/' else nxt
elif 'reply to' in s:
rep_to = '/'.join(self.email_url.split('/')[:-1]) + '/' + l.find('a')['href']
rep_to = rep_to[1:] if rep_to[0] == '/' else rep_to
body = content.find('pre')
body = body.text.strip() if body != None else None
return [str(i) for i in [sender, date, body, nxt, rep_to]]
|
Extracts email message information if it uses the old Mailman format
Args:
content: BeautifulSoup
Returns: List[str]
|
juraj-google-style
|
def CompileReport(self, mediator):
lines_of_text = ['Listing domains visited by all users']
for domain in sorted(self._domains):
lines_of_text.append(domain)
lines_of_text.append('')
report_text = '\n'.join(lines_of_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
|
Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: the analysis report.
|
juraj-google-style
|
def subprogram_prototype(vo):
plist = '; '.join(str(p) for p in vo.parameters)
if isinstance(vo, VhdlFunction):
if len(vo.parameters) > 0:
proto = 'function {}({}) return {};'.format(vo.name, plist, vo.return_type)
else:
proto = 'function {} return {};'.format(vo.name, vo.return_type)
else:
proto = 'procedure {}({});'.format(vo.name, plist)
return proto
|
Generate a canonical prototype string
Args:
vo (VhdlFunction, VhdlProcedure): Subprogram object
Returns:
Prototype string.
|
juraj-google-style
|
def _pretty_print(data_item, summarize):
if isinstance(data_item, tensor_lib.Tensor):
arr = data_item.numpy()
if np.isscalar(arr):
return str(arr)
else:
flat = arr.reshape((-1,))
lst = [str(x) for x in flat[:summarize]]
if len(lst) < flat.size:
lst.append('...')
return str(lst)
else:
return str(data_item)
|
Format a data item for use in an error message in eager mode.
Args:
data_item: One of the items in the "data" argument to an assert_* function.
Can be a Tensor or a scalar value.
summarize: How many elements to retain of each tensor-valued entry in data.
Returns:
An appropriate string representation of data_item
|
github-repos
|
def transform_kernels(kernels, func, n_gates):
return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])
|
Transforms kernel for each gate separately using given function.
Args:
kernels: Stacked array of kernels for individual gates.
func: Function applied to kernel of each gate.
n_gates: Number of gates (4 for LSTM, 3 for GRU).
Returns:
Stacked array of transformed kernels.
|
github-repos
|
def create_asset_accesspolicy(access_token, name, duration, permission='1'):
path = '/AccessPolicies'
endpoint = ''.join([ams_rest_endpoint, path])
body = (((((('{ \t\t"Name": "' + str(name)) + '", \t\t"DurationInMinutes": "') + duration) + '", \t\t"Permissions": "') + permission) + '" \t}')
return do_ams_post(endpoint, path, body, access_token)
|
Create Media Service Asset Access Policy.
Args:
access_token (str): A valid Azure authentication token.
name (str): A Media Service Asset Access Policy Name.
duration (str): A Media Service duration.
permission (str): A Media Service permission.
Returns:
HTTP response. JSON body.
|
codesearchnet
|
def _infer_hints_allowing_override(op1, op2, hints):
hints = hints or _Hints()
if hints.is_self_adjoint is None:
is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint
else:
is_self_adjoint = hints.is_self_adjoint
if hints.is_positive_definite is None:
is_positive_definite = op1.is_positive_definite and op2.is_positive_definite
else:
is_positive_definite = hints.is_positive_definite
if is_positive_definite and hints.is_positive_definite is None:
is_non_singular = True
else:
is_non_singular = hints.is_non_singular
return _Hints(is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite)
|
Infer hints from op1 and op2. hints argument is an override.
Args:
op1: LinearOperator
op2: LinearOperator
hints: _Hints object holding "is_X" boolean hints to use for returned
operator.
If some hint is None, try to set using op1 and op2. If the
hint is provided, ignore op1 and op2 hints. This allows an override
of previous hints, but does not allow forbidden hints (e.g. you still
cannot say a real diagonal operator is not self-adjoint.
Returns:
_Hints object.
|
github-repos
|
def visible(self):
query_results = self.map((lambda el: el.is_displayed()), 'visible').results
if query_results:
return all(query_results)
return False
|
Check whether all matched elements are visible.
Returns:
bool
|
codesearchnet
|
def export_json(data, status, headers):
dumped = json.dumps(data, ensure_ascii=False)
resp = current_app.response_class(dumped, status=status, headers=headers, content_type='application/json; charset=utf-8')
return resp
|
Creates a JSON response
JSON content is encoded by utf-8, not unicode escape.
Args:
data: any type object that can dump to json
status (int): http status code
headers (dict): http headers
|
codesearchnet
|
def handle_enterprise_logistration(backend, user, **kwargs):
request = backend.strategy.request
enterprise_customer = get_enterprise_customer_for_running_pipeline(request, {'backend': backend.name, 'kwargs': kwargs})
if (enterprise_customer is None):
return
(enterprise_customer_user, _) = EnterpriseCustomerUser.objects.update_or_create(enterprise_customer=enterprise_customer, user_id=user.id)
enterprise_customer_user.update_session(request)
|
Perform the linking of user in the process of logging to the Enterprise Customer.
Args:
backend: The class handling the SSO interaction (SAML, OAuth, etc)
user: The user object in the process of being logged in with
**kwargs: Any remaining pipeline variables
|
codesearchnet
|
def add(self, text, checked=False, sort=None):
node = ListItem(parent_id=self.id, parent_server_id=self.server_id)
node.checked = checked
node.text = text
if sort is not None:
node.sort = sort
self.append(node, True)
self.touch(True)
return node
|
Add a new item to the list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting.
|
juraj-google-style
|
def discount_bond_price(self, state: types.RealTensor, times: types.RealTensor, maturities: types.RealTensor, name: str=None) -> types.RealTensor:
name = name or self._name + '_discount_bond_prices'
with tf.name_scope(name):
x_t = tf.convert_to_tensor(state, self._dtype)
times = tf.convert_to_tensor(times, self._dtype)
maturities = tf.convert_to_tensor(maturities, self._dtype)
input_shape_times = tf.shape(times)
mean_reversion = self._mean_reversion
y_t = self.state_y(times)
y_t = tf.reshape(tf.transpose(y_t), tf.concat([input_shape_times, [self._dim, self._dim]], axis=0))
values = self._bond_reconstitution(times, maturities, mean_reversion, x_t, y_t, 1, tf.shape(times)[0])
return values[0][0]
|
Returns zero-coupon bond prices `P(t,T)` conditional on `x(t)`.
Args:
state: A `Tensor` of real dtype and shape compatible with
`(num_times, dim)` specifying the state `x(t)`.
times: A `Tensor` of real dtype and shape `(num_times,)`. The time `t`
at which discount bond prices are computed.
maturities: A `Tensor` of real dtype and shape `(num_times,)`. The time
to maturity of the discount bonds.
name: Str. The name to give this op.
Default value: `discount_bond_prices`.
Returns:
A `Tensor` of real dtype and the same shape as `(num_times,)`
containing the price of zero-coupon bonds.
|
github-repos
|
def get_summary(result):
summary = {'success': result.wasSuccessful(), 'stat': {'total': result.testsRun, 'failures': len(result.failures), 'errors': len(result.errors), 'skipped': len(result.skipped), 'expectedFailures': len(result.expectedFailures), 'unexpectedSuccesses': len(result.unexpectedSuccesses)}}
summary['stat']['successes'] = (((((summary['stat']['total'] - summary['stat']['failures']) - summary['stat']['errors']) - summary['stat']['skipped']) - summary['stat']['expectedFailures']) - summary['stat']['unexpectedSuccesses'])
summary['time'] = {'start_at': result.start_at, 'duration': result.duration}
summary['records'] = result.records
return summary
|
get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
}
|
codesearchnet
|
def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):
audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0)
music_tokens_list = []
for chunk_i in audio_chunks:
music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level)
music_tokens_list.append(music_tokens_i)
music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)]
return music_tokens
|
Transforms the `input_audio` to a discrete representation made out of `music_tokens`.
Args:
input_audio (`torch.Tensor`):
Raw audio which will be encoded to its discrete representation using the codebook. The closest `code`
form the codebook will be computed for each sequence of samples.
start_level (`int`, *optional*, defaults to 0):
Level at which the encoding process will start. Default to 0.
end_level (`int`, *optional*):
Level at which the encoding process will start. Default to None.
bs_chunks (int, *optional*, defaults to 1):
Number of chunks of raw audio to process at the same time.
|
github-repos
|
def __init__(self, time_elements_tuple=None):
fraction_of_second = None
if time_elements_tuple:
if len(time_elements_tuple) < 7:
raise ValueError((
'Invalid time elements tuple at least 7 elements required,'
'got: {0:d}').format(len(time_elements_tuple)))
milliseconds = time_elements_tuple[6]
time_elements_tuple = time_elements_tuple[:6]
if (milliseconds < 0 or
milliseconds >= definitions.MILLISECONDS_PER_SECOND):
raise ValueError('Invalid number of milliseconds.')
fraction_of_second = (
decimal.Decimal(milliseconds) / definitions.MILLISECONDS_PER_SECOND)
super(TimeElementsInMilliseconds, self).__init__(
fraction_of_second=fraction_of_second,
time_elements_tuple=time_elements_tuple)
self._precision = definitions.PRECISION_1_MILLISECOND
|
Initializes time elements.
Args:
time_elements_tuple (Optional[tuple[int, int, int, int, int, int, int]]):
time elements, contains year, month, day of month, hours, minutes,
seconds and milliseconds.
Raises:
ValueError: if the time elements tuple is invalid.
|
juraj-google-style
|
def useQt(qtLib: str = 'PyQt5', period: float = 0.01):
def qt_step():
loop.call_later(period, qt_step)
if not stack:
qloop = QEventLoop()
timer = QTimer()
timer.timeout.connect(qloop.quit)
stack.append((qloop, timer))
qloop, timer = stack.pop()
timer.start(0)
qloop.exec_()
timer.stop()
stack.append((qloop, timer))
if qtLib not in ('PyQt5', 'PySide2'):
raise RuntimeError(f'Unknown Qt library: {qtLib}')
if qtLib == 'PyQt5':
from PyQt5.Qt import QApplication, QTimer, QEventLoop
else:
from PySide2.QtWidgets import QApplication
from PySide2.QtCore import QTimer, QEventLoop
global qApp
qApp = QApplication.instance() or QApplication(sys.argv)
loop = asyncio.get_event_loop()
stack: list = []
qt_step()
|
Run combined Qt5/asyncio event loop.
Args:
qtLib: Name of Qt library to use, can be 'PyQt5' or 'PySide2'.
period: Period in seconds to poll Qt.
|
juraj-google-style
|
def percentile(self, percent):
if percent >= 100:
percent = 100
target = len(self) - len(self) * (percent / 100)
for k in reversed(sorted(self._data.keys())):
target -= self._data[k]
if target < 0:
return k
return 10
|
Return the value that is the Nth precentile in the histogram.
Args:
percent (Union[int, float]): The precentile being sought. The
default consumer implementations use consistently use ``99``.
Returns:
int: The value corresponding to the requested percentile.
|
juraj-google-style
|
def _new_population_genalg(population, fitnesses, mutation_chance=0.02, crossover_chance=0.7, selection_function=gaoperators.tournament_selection, crossover_function=gaoperators.one_point_crossover):
intermediate_population = selection_function(population, fitnesses)
new_population = _crossover(intermediate_population, crossover_chance, crossover_function)
gaoperators.random_flip_mutate(new_population, mutation_chance)
return new_population
|
Perform all genetic algorithm operations on a population, and return a new population.
population must have an even number of chromosomes.
Args:
population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]]
fitness: A list of fitnesses that correspond with chromosomes in the population,
ex. [1.2, 10.8]
mutation_chance: the chance that a bit will be flipped during mutation
crossover_chance: the chance that two parents will be crossed during crossover
selection_function: A function that will select parents for crossover and mutation
crossover_function: A function that will cross two parents
Returns:
list; A new population of chromosomes, that should be more fit.
|
codesearchnet
|
def Unregister(self, name):
precondition.AssertType(name, Text)
try:
del self._constructors[name]
except KeyError:
raise ValueError(("Constructor with name '%s' is not registered" % name))
|
Unregisters a constructor.
Args:
name: A name of the constructor to unregister.
Raises:
ValueError: If constructor with specified name has never been registered.
|
codesearchnet
|
class MaxTimeCriteria(StoppingCriteria):
def __init__(self, max_time: float, initial_timestamp: Optional[float]=None):
self.max_time = max_time
self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
is_done = time.time() - self.initial_timestamp > self.max_time
return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
|
This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the
time will start being counted when you initialize this function. You can override this by passing an
`initial_time`.
Args:
max_time (`float`):
The maximum allowed time in seconds for the generation.
initial_time (`float`, *optional*, defaults to `time.time()`):
The start of the generation allowed time.
|
github-repos
|
def ch_duration(self, *channels: List[Channel]) -> int:
return self.timeslots.ch_duration(*channels)
|
Return duration of supplied channels.
Args:
*channels: Supplied channels
|
juraj-google-style
|
def add_arguments(cls, parser):
parser.add_argument(
'-c', '--create-missing-tasks',
action='store_true',
dest='create_missing_tasks',
help="[sync] create asana tasks for issues without tasks"
)
parser.add_argument(
'-l', '--sync-labels',
action='store_true',
dest='sync_labels',
help="[sync] sync labels and milestones for each issue"
)
|
Add arguments to the parser for collection in app.args.
Args:
parser:
`argparse.ArgumentParser`. Parser.
Arguments added here are server on
self.args.
|
juraj-google-style
|
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
device_name = ctx.device_name
try:
ctx.ensure_initialized()
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, inputs, attrs, num_outputs)
except core._NotOkStatusException as e:
if name is not None:
e.message += ' name: ' + name
raise core._status_to_exception(e) from None
except TypeError as e:
keras_symbolic_tensors = [x for x in inputs if _is_keras_symbolic_tensor(x)]
if keras_symbolic_tensors:
raise core._SymbolicException('Inputs to eager execution function cannot be Keras symbolic tensors, but found {}'.format(keras_symbolic_tensors))
raise e
return tensors
|
Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch. (Explicitly
provided instead of being inferred for performance reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
|
github-repos
|
def load_object(obj) -> object:
if isinstance(obj, str):
if (':' in obj):
(module_name, obj_name) = obj.split(':')
if (not module_name):
module_name = '.'
else:
module_name = obj
obj = importlib.import_module(module_name)
if obj_name:
attrs = obj_name.split('.')
for attr in attrs:
obj = getattr(obj, attr)
return obj
|
Load an object.
Args:
obj (str|object): Load the indicated object if this is a string;
otherwise, return the object as is.
To load a module, pass a dotted path like 'package.module';
to load an an object from a module pass a path like
'package.module:name'.
Returns:
object
|
codesearchnet
|
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
|
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
|
juraj-google-style
|
def cv_squared(x):
epsilon = 1e-10
float_size = (tf.to_float(tf.size(x)) + epsilon)
mean = (tf.reduce_sum(x) / float_size)
variance = (tf.reduce_sum(tf.squared_difference(x, mean)) / float_size)
return (variance / (tf.square(mean) + epsilon))
|
The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
|
codesearchnet
|
def load_module(self, namespace, module_name):
try:
filename, src = self.typeshed.get_module_file(namespace, module_name, self.options.python_version)
except OSError:
return (None, None)
ast = parser.parse_string(src, filename=filename, name=module_name, options=self.options)
return (filename, ast)
|
Load and parse a *.pyi from typeshed.
Args:
namespace: one of "stdlib" or "third_party"
module_name: the module name (without any file extension or "__init__"
suffix).
Returns:
(None, None) if the module doesn't have a definition.
Else a tuple of the filename and the AST of the module.
|
github-repos
|
def add_spin_by_site(self, spins):
if (len(spins) != len(self.sites)):
raise ValueError('Spin of all sites must be specified in the dictionary.')
for (site, spin) in zip(self.sites, spins):
new_sp = {}
for (sp, occu) in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, 'oxi_state', None)
new_sp[Specie(sym, oxidation_state=oxi_state, properties={'spin': spin})] = occu
site.species = new_sp
|
Add spin states to a structure by site.
Args:
spins (list): List of spins
E.g., [+5, -5, 0, 0]
|
codesearchnet
|
def get_flights(self, search_key):
url = AIRLINE_FLT_BASE.format(search_key, 100)
return self._fr24.get_airline_flight_data(url)
|
Get the flights for a particular airline.
Given a full or partial flight number string, this method returns the first 100 flights matching that string.
Please note this method was different in earlier versions. The older versions took an airline code and returned all scheduled flights for that airline
Args:
search_key (str): Full or partial flight number for any airline e.g. MI47 to get all SilkAir flights starting with MI47
Returns:
A list of dicts, one for each scheduled flight in the airlines network
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_flights('MI47')
|
codesearchnet
|
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha, use_tpu=False):
return self._beam_decode_slow(features, decode_length, beam_size, top_beams, alpha, use_tpu)
|
Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
|
codesearchnet
|
def _verify_setup(self):
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ['Variable', 'VariableV2'] and (not op.device):
raise ValueError('When using replicas, all Variables must have their device set: %s' % op)
|
Check that all is good.
Raises:
ValueError: If something is not good.
|
github-repos
|
def delete_customer(self, customer_id):
return self.client._delete(self.url + 'customers/{}'.format(customer_id), headers=self.get_headers())
|
Removes a user from the system.
Args:
customer_id: Identifier of the client to be deleted.
Returns:
|
juraj-google-style
|
def __init__(self, latitude, longitude, comment=None):
super(Xearth, self).__init__(latitude, longitude)
self.comment = comment
|
Initialise a new ``Xearth`` object.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
comment (str): Comment for location
|
juraj-google-style
|
def list_files(file_directory,
file_extensions=None,
include_subfolders=True,
include_root=True,
root_dir=None):
log = logging.getLogger("%s" % (inspect.stack()[0][3]))
log.setLevel(__LOG_LEVEL__)
rtn_list = []
if not root_dir:
root_dir = file_directory
root_dir = root_dir.strip()
if root_dir.endswith(os.path.sep):
root_dir = root_dir.strip()[:-1]
dir_parts_len = len(root_dir.split(os.path.sep))
level = 0
for root, dirnames, filenames in os.walk(file_directory):
root_str = root
if level > 0 and not include_subfolders:
break
if not include_root:
root_str = os.path.sep.join(root.split(os.path.sep)[dir_parts_len:])
if file_extensions:
files = [(x,
os.path.join(root_str, x),
os.path.getmtime(os.path.join(root, x)),
os.path.join(root, x))
for x in filenames \
if "." in x \
and x.split(".")[len(x.split("."))-1] in file_extensions]
else:
files = [(x,
os.path.join(root_str, x),
os.path.getmtime(os.path.join(root, x)),
os.path.join(root, x))
for x in filenames]
rtn_list += files
level += 1
rtn_list.sort(key=lambda tup: tup[0], reverse=True)
return rtn_list
|
Returns a list of files
args:
file_directory: a sting path to the file directory
file_extensions: a list of file extensions to filter example
['xml', 'rdf']. If none include all files
include_subfolders: as implied
include_root: whether to include the root in the path
root_dir: the root directory to remove if include_root is False
returns:
(tuple) (file_name, file_path_with_root_mod, modified_time, full_path)
|
juraj-google-style
|
def dict_load(self, ns_dict):
for (prefix, uri) in ns_dict.items():
self.bind(prefix, uri, override=False, calc=False)
self.__make_dicts__
|
Reads a dictionary of namespaces and binds them to the manager
Args:
ns_dict: dictionary with the key as the prefix and the value
as the uri
|
codesearchnet
|
def nth(series, n, order_by=None):
if (order_by is not None):
series = order_series_by(series, order_by)
try:
return series.iloc[n]
except:
return np.nan
|
Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
|
codesearchnet
|
def get_log_file_timestamp(delta=None):
return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)
|
Returns a timestamp in the format used for log file names.
Default is current time. If a delta is set, the return value will be
the current time offset by delta seconds.
Args:
delta: Number of seconds to offset from current time; can be negative.
Returns:
A timestamp in log filen name format with an offset.
|
github-repos
|
def Decompress(self, compressed_data):
try:
if hasattr(lzma, 'LZMA_VERSION'):
uncompressed_data = self._lzma_decompressor.decompress(
compressed_data, 0)
else:
uncompressed_data = self._lzma_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._lzma_decompressor, 'unused_data', b'')
except (EOFError, IOError, LZMAError) as exception:
raise errors.BackEndError((
'Unable to decompress XZ compressed stream with error: '
'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data
|
Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the XZ compressed stream cannot be decompressed.
|
juraj-google-style
|
def map_kegg_all_genes(organism_code, target_db):
mapping = bs_kegg.conv(target_db, organism_code)
new_mapping = {}
for (k, v) in mapping.items():
new_mapping[k.replace((organism_code + ':'), '')] = str(v.split(':')[1])
return new_mapping
|
Map all of an organism's gene IDs to the target database.
This is faster than supplying a specific list of genes to map,
plus there seems to be a limit on the number you can map with a manual REST query anyway.
Args:
organism_code: the three letter KEGG code of your organism
target_db: ncbi-proteinid | ncbi-geneid | uniprot
Returns:
Dictionary of ID mapping
|
codesearchnet
|
def __init__(self, option=None, default=None, *args, **kwargs):
super(ListOption, self).__init__(*args, **kwargs)
if not isinstance(option, opt.Option):
raise TypeError("Option must be an option type.")
self._option = option
self._default = default
if default is not None:
self._value = self.coerce(default)
|
Initialize the option with an option type.
Args:
option (option.Option): The option which is used to validate all
list options.
Raises:
TypeError: If the given option is not an instance of option.Option.
TypeError: If the default value is set but not an iterable.
|
juraj-google-style
|
def copy(self, dest):
if os.path.isfile(self.path):
shutil.copy2(self.path, dest)
else:
shutil.copytree(self.path, dest, symlinks=False, ignore=None)
|
Copy item to the given `dest` path.
Args:
* dest: destination path to copy.
|
codesearchnet
|
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=False)
if 'shortest_edge' in size:
output_size = get_resize_output_image_size(image, size['shortest_edge'], default_to_square=False, input_data_format=input_data_format)
elif 'height' in size and 'width' in size:
output_size = (size['height'], size['width'])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
|
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its
shortest edge of length `s` while keeping the aspect ratio of the original image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
def _group_chunks_by_entities(self, chunks, entities):
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
|
Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
|
juraj-google-style
|
def get_dropout_mask_for_cell(self, inputs, training, count=1):
if self.dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)
|
Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
|
github-repos
|
def _SanitizedMRO(obj):
return_list = []
for cls in tf_inspect.getmro(obj):
if cls.__name__ == '_NewClass':
continue
str_repr = _NormalizeType(str(cls))
return_list.append(str_repr)
if 'tensorflow' not in str_repr and 'keras' not in str_repr:
break
if 'StubOutForTesting' in str_repr:
break
return return_list
|
Get a list of superclasses with minimal amount of non-TF classes.
Based on many parameters like python version, OS, protobuf implementation
or changes in google core libraries the list of superclasses of a class
can change. We only return the first non-TF class to be robust to non API
affecting changes. The Method Resolution Order returned by `tf_inspect.getmro`
is still maintained in the return value.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
list of strings, string representation of the class names.
|
github-repos
|
def SetTimelineOwner(self, username):
self._timeline_owner = username
logger.info('Owner of the timeline: {0!s}'.format(self._timeline_owner))
|
Sets the username of the user that should own the timeline.
Args:
username (str): username.
|
codesearchnet
|
def add_property(self, set_property, name, starting_value, tag_name=None):
def del_property(self, tag_name):
try:
del self._content[tag_name]
except KeyError:
pass
def get_property(self, tag_name):
try:
return self._content[tag_name]
except KeyError:
return None
tag_name = (name if (tag_name is None) else tag_name)
fget = (lambda self: get_property(self, tag_name))
fdel = (lambda self: del_property(self, tag_name))
fset = (lambda self, value: set_property(value))
setattr(self.__class__, name, property(fget, fset, fdel))
set_property(starting_value)
|
Set properies of atributes stored in content using stored common fdel and fget and given fset.
Args:
set_property -- Function that sets given property.
name -- Name of the atribute this property must simulate. Used as key in content dict by default.
starting_value -- Starting value of given property.
Keyword args:
tag_name -- The tag name stored in conted dict as a key if different to name.
|
codesearchnet
|
def setup_prefix_logging(logdir):
if not os.path.exists(logdir):
os.mkdir(logdir)
file_handler = logging.FileHandler(
filename=os.path.join(logdir, 'lago.log'),
)
file_formatter = get_default_log_formatter()
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler)
hide_paramiko_logs()
hide_stevedore_logs()
|
Sets up a file logger that will create a log in the given logdir (usually a
lago prefix)
Args:
logdir (str): path to create the log into, will be created if it does
not exist
Returns:
None
|
juraj-google-style
|
def query(
self,
url: Union[str, methods],
data: Optional[MutableMapping] = None,
headers: Optional[MutableMapping] = None,
as_json: Optional[bool] = None,
) -> dict:
url, body, headers = sansio.prepare_request(
url=url,
data=data,
headers=headers,
global_headers=self._headers,
token=self._token,
)
return self._make_query(url, body, headers)
|
Query the slack API
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers: Custom headers
as_json: Post JSON to the slack API
Returns:
dictionary of slack API response data
|
juraj-google-style
|
async def msetup(self, text_channel):
if self.mready:
logger.warning('Attempt to init music when already initialised')
return
if (self.state != 'starting'):
logger.error("Attempt to init from wrong state ('{}'), must be 'starting'.".format(self.state))
return
self.logger.debug('Setting up gui')
self.mchannel = text_channel
self.new_embed_ui()
(await self.embed.send())
(await self.embed.usend())
(await self.add_reactions())
self.mready = True
|
Creates the gui
Args:
text_channel (discord.Channel): The channel for the embed ui to run in
|
codesearchnet
|
async def on_message(message):
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
if server is not None and author != channel.server.me:
prefix = data["discord"]["servers"][server.id]["prefix"]
if content.startswith(prefix):
package = content.split(" ")
command = package[0][len(prefix):]
args = package[1:]
alias_steam = ["steam", "pc"]
alias_ps = ["ps", "psn", "playstation", "ps4", "playstation 4"]
alias_xbox = ["xbox", "xb", "xb1", "xbone", "xbox one", "xbox one"]
platform = "steam"
if len(args) > 0:
player_name = args[0]
else:
return
if len(args) > 1:
platform = ' '.join(args[1:]).lower()
if platform in alias_steam:
platform = "steam"
elif platform in alias_ps:
platform = "ps"
elif platform in alias_xbox:
platform = "xbox"
if command == 'rlstats':
await client.send_typing(channel)
success, rldata = api_rocketleaguestats.check_rank(player_name, platform)
if success:
embed = ui_embed.success(channel, rldata[0], rldata[1], rldata[2], rldata[3])
else:
embed = ui_embed.fail_api(channel)
await embed.send()
|
The on_message event handler for this module
Args:
message (discord.Message): Input message
|
juraj-google-style
|
def Matches(self, registry_key, search_depth):
if (self._key_path_segments is None):
key_path_match = None
else:
key_path_match = self._CheckKeyPath(registry_key, search_depth)
if (not key_path_match):
return (False, key_path_match)
if (search_depth != self._number_of_key_path_segments):
return (False, key_path_match)
return (True, key_path_match)
|
Determines if the Windows Registry key matches the find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
tuple: contains:
bool: True if the Windows Registry key matches the find specification,
False otherwise.
bool: True if the key path matches, False if not or None if no key path
specified.
|
codesearchnet
|
def get_data_dirs(__pkg: str) -> List[str]:
dirs = [user_data(__pkg)]
dirs.extend((path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_DATA_DIRS', '/usr/local/share/:/usr/share/').split(':')))
return [d for d in dirs if path.isdir(d)]
|
Return all data directories for given package.
Args:
__pkg: Package name
|
codesearchnet
|
def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs, inputs_stacked):
num_outputs = len(self._outputs)
assert len(inputs) == len(self._enters)
assert len(inputs_stacked) == len(self._enters)
loop_var = parent_pfor.loop_var
loop_len = array_ops.size(indices)
pfor = PFor(loop_var, loop_len, pfor_ops=self._pfor_ops, all_indices=indices, all_indices_partitioned=cond_stacked, fallback_to_while_loop=self._fallback_to_while_loop, pfor_config=self._pfor_config)
for enter in self._direct_enters:
enter_input = enter.op.inputs[0]
converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(enter_input)
assert not stacked and (not is_sparse_stacked), (enter, converted_enter)
pfor._add_conversion(enter, wrap(converted_enter, False))
for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
pfor._add_conversion(enter, wrap(inp, stacked))
for i in range(num_outputs):
wrapped_inp = wrap(inputs[i], inputs_stacked[i])
merge = self._enter_merges[i]
pfor._add_conversion(merge.outputs[0], wrapped_inp)
pfor._add_conversion(merge.outputs[1], wrap(constant_op.constant(-1.0), False))
switch = self._exit_switches[i]
pfor._add_conversion(switch.outputs[1], wrapped_inp)
return pfor
|
Create a PFor object for converting parts of the while_loop.
Args:
parent_pfor: PFor object being used for converting the while_loop.
indices: int32 Tensor of ids for the iterations that are still active
(i.e. did not exit the while_loop).
cond_stacked: True if the while_loop condition is stacked.
inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
that these Tensors are a subset of the loop variables for the generated
while_loop.
inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
indicating if the value is stacked or not.
Returns:
A PFor instance. The instance is initialized by adding conversion mappings
of nodes that will be external to the conversion that the returned
instance will be used for. e.g. Enter nodes as well as Merge and Switch
outputs are mapped to converted values.
|
github-repos
|
def stop_on_exception(self):
return self._coord.stop_on_exception()
|
Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
|
github-repos
|
def observations(self, main_type, sub_type, unique_id, owner=None, params=None):
params = params or {}
if owner:
params['owner'] = owner
if not sub_type:
url = '/v2/{}/{}/observations'.format(main_type, unique_id)
else:
url = '/v2/{}/{}/{}/observations'.format(type, sub_type, unique_id)
return self.tcex.session.get(url, json=params)
|
Args:
main_type:
sub_type:
unique_id:
owner:
params:
Return:
|
juraj-google-style
|
def _pick_unused_port_without_server():
rng = random.Random()
for _ in range(10):
port = int(rng.randrange(15000, 25000))
if is_port_free(port):
_random_ports.add(port)
return port
for _ in range(10):
port = bind(0, _PROTOS[0][0], _PROTOS[0][1])
if (port and bind(port, _PROTOS[1][0], _PROTOS[1][1])):
_random_ports.add(port)
return port
raise NoFreePortFoundError()
|
Pick an available network port without the help of a port server.
This code ensures that the port is available on both TCP and UDP.
This function is an implementation detail of PickUnusedPort(), and
should not be called by code outside of this module.
Returns:
A port number that is unused on both TCP and UDP.
Raises:
NoFreePortFoundError: No free port could be found.
|
codesearchnet
|
def send_log_message(self, message: LogMessage) -> None:
print(message)
|
Prints the log message to be captured by cloud logging.
Args:
* message: LogMessage dictionary
Returns:
* None
|
github-repos
|
def write_worksheets(workbook, data_list, result_info_key, identifier_keys):
worksheet_keys = get_worksheet_keys(data_list[0], result_info_key)
for key in worksheet_keys:
title = key.split('/')[1]
title = utilities.convert_snake_to_title_case(title)
title = KEY_TO_WORKSHEET_MAP.get(title, title)
if (key == 'property/nod'):
create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys)
else:
worksheet = workbook.create_sheet(title=title[:31])
processed_data = process_data(key, data_list, result_info_key, identifier_keys)
write_data(worksheet, processed_data)
workbook.remove_sheet(workbook.active)
|
Writes rest of the worksheets to workbook.
Args:
workbook: workbook to write into
data_list: Analytics API data as a list of dicts
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
|
codesearchnet
|
def _ParseFileData(self, knowledge_base, file_object):
text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8')
system_product = text_file_object.readline()
if system_product.startswith('Debian GNU/Linux '):
system_product, _, _ = system_product.partition('\\')
system_product = system_product.rstrip()
else:
system_product = None
if not knowledge_base.GetValue('operating_system_product'):
if system_product:
knowledge_base.SetValue('operating_system_product', system_product)
|
Parses file content (data) for system product preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_object (dfvfs.FileIO): file-like object that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def read_binary_array(self, key, b64decode=True, decode=False):
data = None
if (key is not None):
data = self.db.read(key.strip())
if (data is not None):
data_decoded = []
for d in json.loads(data, object_pairs_hook=OrderedDict):
if b64decode:
dd = base64.b64decode(d)
if decode:
try:
dd = dd.decode('utf-8')
except UnicodeDecodeError:
dd = dd.decode('latin-1')
data_decoded.append(dd)
else:
data_decoded.append(d)
data = data_decoded
else:
self.tcex.log.warning(u'The key field was None.')
return data
|
Read method of CRUD operation for binary array data.
Args:
key (string): The variable to read from the DB.
b64decode (bool): If true the data will be base64 decoded.
decode (bool): If true the data will be decoded to a String.
Returns:
(list): Results retrieved from DB.
|
codesearchnet
|
def js_adaptor(buffer):
buffer = re.sub('true', 'True', buffer)
buffer = re.sub('false', 'False', buffer)
buffer = re.sub('none', 'None', buffer)
buffer = re.sub('NaN', '"NaN"', buffer)
return buffer
|
convert javascript objects like true, none, NaN etc. to
quoted word.
Arguments:
buffer: string to be converted
Returns:
string after conversion
|
juraj-google-style
|
def extraterrestrial_horizontal_radiation(self, value=9999.0):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `extraterrestrial_horizontal_radiation`'.format(value))
if value < 0.0:
raise ValueError(
'value need to be greater or equal 0.0 '
'for field `extraterrestrial_horizontal_radiation`')
self._extraterrestrial_horizontal_radiation = value
|
Corresponds to IDD Field `extraterrestrial_horizontal_radiation`
Args:
value (float): value for IDD Field `extraterrestrial_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def get_elements_iterable(self, make_copy: bool=False) -> Iterable[WindowedValue]:
if not self._stacked:
elements = cast('List[WindowedValue]', self._elements)
if self._committed and (not make_copy):
return elements
return list(elements)
def iterable_stacked_or_elements(elements):
for e in elements:
if isinstance(e, _Bundle._StackedWindowedValues):
for w in e.windowed_values():
yield w
else:
yield e
if self._committed and (not make_copy):
return iterable_stacked_or_elements(self._elements)
return [e for e in iterable_stacked_or_elements(self._elements)]
|
Returns iterable elements.
Args:
make_copy: whether to force returning copy or yielded iterable.
Returns:
unstacked elements,
in the form of iterable if committed and make_copy is not True,
or as a list of copied WindowedValues.
|
github-repos
|
def find_labels(model_class):
model_name = model_class.__name__
framework = infer_framework(model_class)
if framework == 'tf':
signature = inspect.signature(model_class.call)
elif framework == 'pt':
signature = inspect.signature(model_class.forward)
else:
signature = inspect.signature(model_class.__call__)
if 'QuestionAnswering' in model_name:
return [p for p in signature.parameters if 'label' in p or p in ('start_positions', 'end_positions')]
else:
return [p for p in signature.parameters if 'label' in p]
|
Find the labels used by a given model.
Args:
model_class (`type`): The class of the model.
|
github-repos
|
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)
|
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
|
github-repos
|
def get_form_energy(self, entry):
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
|
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
|
juraj-google-style
|
def files_upload(self, *, file: Union[(str, IOBase)]=None, content: str=None, **kwargs) -> SlackResponse:
if ((file is None) and (content is None)):
raise e.SlackRequestError('The file or content argument must be specified.')
if ((file is not None) and (content is not None)):
raise e.SlackRequestError('You cannot specify both the file and the content argument.')
if file:
return self.api_call('files.upload', files={'file': file}, data=kwargs)
elif content:
data = kwargs.copy()
data.update({'content': content})
return self.api_call('files.upload', data=data)
|
Uploads or creates a file.
Args:
file (str): Supply a file path.
when you'd like to upload a specific file. e.g. 'dramacat.gif'
content (str): Supply content when you'd like to create an
editable text file containing the specified text. e.g. 'launch plan'
Raises:
SlackRequestError: If niether or both the `file` and `content` args are specified.
|
codesearchnet
|
def _build(self, inputs, is_training=True, dropout_keep_prob=0.5):
self._input_shape = tuple(inputs.get_shape().as_list())
net = inputs
final_index = (self._num_layers - 1)
for layer_id in xrange(self._num_layers):
net = self._layers[layer_id](net)
if ((final_index != layer_id) or self._activate_final):
if self._use_dropout:
keep_prob = utils.smart_cond(is_training, true_fn=(lambda : dropout_keep_prob), false_fn=(lambda : tf.constant(1.0)))
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = self._activation(net)
return net
|
Assembles the `MLP` and connects it to the graph.
Args:
inputs: A 2D Tensor of size `[batch_size, input_size]`.
is_training: A bool or tf.Bool Tensor. Indicates whether we are
currently training. Defaults to `True`.
dropout_keep_prob: The probability that each element is kept when
both `use_dropout` and `is_training` are True. Defaults to 0.5.
Returns:
A 2D Tensor of size `[batch_size, output_sizes[-1]]`.
|
codesearchnet
|
def insert(cls, cur, table: str, values: dict):
keys = cls._COMMA.join(values.keys())
value_place_holder = cls._PLACEHOLDER * len(values)
query = cls._insert_string.format(table, keys, value_place_holder[:-1])
yield from cur.execute(query, tuple(values.values()))
return (yield from cur.fetchone())
|
Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties
|
juraj-google-style
|
def _get_corrupted_example(self, x):
corruption_type = self.builder_config.corruption_type
severity = self.builder_config.severity
return {
'gaussian_noise': corruptions.gaussian_noise,
'shot_noise': corruptions.shot_noise,
'impulse_noise': corruptions.impulse_noise,
'defocus_blur': corruptions.defocus_blur,
'frosted_glass_blur': corruptions.frosted_glass_blur,
'zoom_blur': corruptions.zoom_blur,
'fog': corruptions.fog,
'brightness': corruptions.brightness,
'contrast': corruptions.contrast,
'elastic': corruptions.elastic,
'pixelate': corruptions.pixelate,
'jpeg_compression': corruptions.jpeg_compression,
}[corruption_type](x, severity)
|
Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images.
|
juraj-google-style
|
def mme_nodes(mme_base_url, token):
nodes = []
if not mme_base_url or not token:
return nodes
url = ''.join([mme_base_url, '/nodes'])
nodes = matchmaker_request(url=url, token=token, method='GET')
LOG.info('Matchmaker has the following connected nodes:{}'.format(nodes))
return nodes
|
Return the available MatchMaker nodes
Args:
mme_base_url(str): base URL of MME service
token(str): MME server authorization token
Returns:
nodes(list): a list of node disctionaries
|
juraj-google-style
|
def get_vcs_root():
for vcs in (git, hg):
repo_root = vcs.repository_root()
if repo_root:
return (vcs, repo_root)
return (None, None)
|
Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (git, hg) and the root of the
repository. If no repository exisits then (None, None) is returned.
|
codesearchnet
|
class GraniteMoeSharedMLP(nn.Module):
def __init__(self, config: GraniteMoeSharedConfig):
super(GraniteMoeSharedMLP, self).__init__()
self.input_size = config.hidden_size
self.hidden_size = config.shared_intermediate_size
self.activation = ACT2FN[config.hidden_act]
self.input_linear = nn.Linear(self.input_size, self.hidden_size * 2, bias=False)
self.output_linear = nn.Linear(self.hidden_size, self.input_size, bias=False)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.input_linear(hidden_states)
chunked_hidden_states = hidden_states.chunk(2, dim=-1)
hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]
hidden_states = self.output_linear(hidden_states)
return hidden_states
|
MLP layer for shared experts
Args:
config:
Configuration object with model hyperparameters.
|
github-repos
|
def get_system_memory():
docker_limit = None
memory_limit_filename = '/sys/fs/cgroup/memory/memory.limit_in_bytes'
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, 'r') as f:
docker_limit = int(f.read())
psutil_memory_in_bytes = None
try:
import psutil
psutil_memory_in_bytes = psutil.virtual_memory().total
except ImportError:
pass
if (psutil_memory_in_bytes is not None):
memory_in_bytes = psutil_memory_in_bytes
elif ((sys.platform == 'linux') or (sys.platform == 'linux2')):
bytes_in_kilobyte = 1024
memory_in_bytes = (vmstat('total memory') * bytes_in_kilobyte)
else:
memory_in_bytes = sysctl(['sysctl', 'hw.memsize'])
if (docker_limit is not None):
return min(docker_limit, memory_in_bytes)
else:
return memory_in_bytes
|
Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
|
codesearchnet
|
def split(pcoll, regex, outputEmpty=False):
regex = Regex._regex_compile(regex)
outputEmpty = bool(outputEmpty)
def _process(element):
r = regex.split(element)
if r and (not outputEmpty):
r = list(filter(None, r))
yield r
return pcoll | FlatMap(_process)
|
Returns the list string which was splitted on the basis of regular
expression. It will not output empty items (by defaults).
Args:
regex: the regular expression string or (re.compile) pattern.
outputEmpty: (optional) Should empty be output. True to output empties
and false if not.
|
github-repos
|
def write_graph(graph_or_graph_def, logdir, name, as_text=True):
if isinstance(graph_or_graph_def, ops.Graph):
graph_def = graph_or_graph_def.as_graph_def()
else:
graph_def = graph_or_graph_def
if sys.byteorder == 'big':
if hasattr(graph_def, 'node'):
byte_swap_tensor.swap_tensor_content_in_graph_node(graph_def, 'big', 'little')
else:
byte_swap_tensor.swap_tensor_content_in_graph_function(graph_def, 'big', 'little')
if not logdir.startswith('gs:'):
file_io.recursive_create_dir(logdir)
path = os.path.join(logdir, name)
if as_text:
file_io.atomic_write_string_to_file(path, text_format.MessageToString(graph_def, float_format=''))
else:
file_io.atomic_write_string_to_file(path, graph_def.SerializeToString(deterministic=True))
return path
|
Writes a graph proto to a file.
The graph is written as a text proto unless `as_text` is `False`.
```python
v = tf.Variable(0, name='my_variable')
sess = tf.compat.v1.Session()
tf.io.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
```
or
```python
v = tf.Variable(0, name='my_variable')
sess = tf.compat.v1.Session()
tf.io.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')
```
Args:
graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.
logdir: Directory where to write the graph. This can refer to remote
filesystems, such as Google Cloud Storage (GCS).
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
Returns:
The path of the output proto file.
|
github-repos
|
class FlaxTopPLogitsWarper(FlaxLogitsWarper):
def __init__(self, top_p: float, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1):
if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}')
if not isinstance(min_tokens_to_keep, int) or min_tokens_to_keep < 1:
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])
mask_scores = jnp.full_like(scores, self.filter_value)
cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)
score_mask = cumulative_probs < self.top_p
score_mask = jnp.roll(score_mask, 1)
score_mask |= score_mask.at[:, 0].set(True)
score_mask = score_mask.at[:, :self.min_tokens_to_keep].set(True)
topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)
next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]
return next_scores
|
[`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
Args:
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
filter_value (`float`, *optional*, defaults to -inf):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
|
github-repos
|
def cpu_halt_reasons(self):
buf_size = self.MAX_NUM_MOES
buf = (structs.JLinkMOEInfo * buf_size)()
num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size)
if (num_reasons < 0):
raise errors.JLinkException(num_reasons)
return list(buf)[:num_reasons]
|
Retrives the reasons that the CPU was halted.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of ``JLInkMOEInfo`` instances specifying the reasons for which
the CPU was halted. This list may be empty in the case that the CPU
is not halted.
Raises:
JLinkException: on hardware error.
|
codesearchnet
|
def split_image(self, image: np.ndarray, input_data_format: Optional[Union[str, ChannelDimension]]=None):
height, width = get_image_size(image, input_data_format)
mid_width = width
mid_height = height
return [self._crop(image, 0, 0, mid_width, mid_height, input_data_format), self._crop(image, mid_width, 0, width, mid_height, input_data_format), self._crop(image, 0, mid_height, mid_width, height, input_data_format), self._crop(image, mid_width, mid_height, width, height, input_data_format), image]
|
Split an image into 4 equal sub-images, and the concatenate that sequence with the original image.
That means that a single image becomes a sequence of 5 images.
This is a "trick" to spend more compute on each image with no changes in the vision encoder.
Args:
image (`np.ndarray`):
Images to split.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
def _disconnect_from_device(self, uuid, key, client, unsolicited=False):
conn_id = self._validate_connection('disconnect', uuid, key)
if (conn_id is None):
return
conn_data = self._connections[uuid]
slug = self._build_device_slug(uuid)
message = {'client': client, 'type': 'response', 'operation': 'disconnect'}
self.client.reset_sequence(self.topics.gateway_topic(slug, 'control/connect'))
self.client.reset_sequence(self.topics.gateway_topic(slug, 'control/action'))
try:
resp = (yield self._manager.disconnect(conn_id))
except Exception as exc:
self._logger.exception('Error in manager disconnect')
resp = {'success': False, 'reason': ('Internal error: %s' % str(exc))}
self._manager.remove_monitor(conn_data['report_monitor'])
self._manager.remove_monitor(conn_data['trace_monitor'])
if resp['success']:
del self._connections[uuid]
message['success'] = True
else:
message['success'] = False
message['failure_reason'] = resp['reason']
self._logger.info('Client %s disconnected from device 0x%X', client, uuid)
if (unsolicited and resp['success']):
self._publish_response(slug, {'client': client, 'type': 'notification', 'operation': 'disconnect'})
elif (not unsolicited):
self._publish_response(slug, message)
|
Disconnect from a device that we have previously connected to.
Args:
uuid (int): The unique id of the device
key (string): A 64 byte string used to secure this connection
client (string): The client id for who is trying to connect
to the device.
unsolicited (bool): Whether the client asked us to disconnect or we
are forcibly doing it. Forcible disconnections are sent as notifications
instead of responses.
|
codesearchnet
|
def fetches(self):
return self._final_fetches
|
Return the unique names of tensors to fetch.
Returns:
A list of strings.
|
github-repos
|
def parse_table_name(name, project_id=None, dataset_id=None):
_project_id = _dataset_id = _table_id = _decorator = None
if isinstance(name, basestring):
m = re.match(_ABS_TABLE_NAME_PATTERN, name, re.IGNORECASE)
if (m is not None):
(_project_id, _dataset_id, _table_id, _decorator) = m.groups()
else:
m = re.match(_REL_TABLE_NAME_PATTERN, name)
if (m is not None):
groups = m.groups()
(_project_id, _dataset_id, _table_id, _decorator) = (project_id, groups[0], groups[1], groups[2])
else:
m = re.match(_TABLE_NAME_PATTERN, name)
if (m is not None):
groups = m.groups()
(_project_id, _dataset_id, _table_id, _decorator) = (project_id, dataset_id, groups[0], groups[1])
elif isinstance(name, dict):
try:
_table_id = name['table_id']
_dataset_id = name['dataset_id']
_project_id = name['project_id']
except KeyError:
pass
elif (len(name) == 4):
(_project_id, _dataset_id, _table_id, _decorator) = name
elif (len(name) == 3):
(_project_id, _dataset_id, _table_id) = name
elif (len(name) == 2):
(_dataset_id, _table_id) = name
if (not _table_id):
raise Exception(('Invalid table name: ' + str(name)))
if (not _project_id):
_project_id = project_id
if (not _dataset_id):
_dataset_id = dataset_id
if (not _decorator):
_decorator = ''
return TableName(_project_id, _dataset_id, _table_id, _decorator)
|
Parses a table name into its individual parts.
Args:
name: the name to parse, or a tuple, dictionary or array containing the parts.
project_id: the expected project ID. If the name does not contain a project ID,
this will be used; if the name does contain a project ID and it does not match
this, an exception will be thrown.
dataset_id: the expected dataset ID. If the name does not contain a dataset ID,
this will be used; if the name does contain a dataset ID and it does not match
this, an exception will be thrown.
Returns:
A TableName named tuple consisting of the full name and individual name parts.
Raises:
Exception: raised if the name doesn't match the expected formats, or a project_id and/or
dataset_id was provided that does not match that in the name.
|
codesearchnet
|
def read_handler(Model, name=None, **kwds):
async def action_handler(service, action_type, payload, props, **kwds):
if action_type == get_crud_action('read', name or Model):
message_props = {}
if 'correlation_id' in props:
message_props['correlation_id'] = props['correlation_id']
try:
resolved = service.schema.execute(payload)
response = json.dumps({
'data': {key:value for key,value in resolved.data.items()},
'errors': resolved.errors
})
await service.event_broker.send(
payload=response,
action_type=change_action_status(action_type, success_status()),
**message_props
)
except Exception as err:
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
return action_handler
|
This factory returns an action handler that responds to read requests
by resolving the payload as a graphql query against the internal schema.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model
|
juraj-google-style
|
def SetValue(self, identifier, value):
if (not isinstance(identifier, py2to3.STRING_TYPES)):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
self._values[identifier] = value
|
Sets a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
value (object): value.
Raises:
TypeError: if the identifier is not a string type.
|
codesearchnet
|
def find_equivalent_sites(self, site):
for sites in self.equivalent_sites:
if (site in sites):
return sites
raise ValueError('Site not in structure')
|
Finds all symmetrically equivalent sites for a particular site
Args:
site (PeriodicSite): A site in the structure
Returns:
([PeriodicSite]): List of all symmetrically equivalent sites.
|
codesearchnet
|
def substitute(expr, var_map):
try:
if isinstance(expr, SympyBasic):
sympy_var_map = {
k: v for (k, v) in var_map.items()
if isinstance(k, SympyBasic)}
return expr.subs(sympy_var_map)
else:
return expr.substitute(var_map)
except AttributeError:
if expr in var_map:
return var_map[expr]
return expr
|
Substitute symbols or (sub-)expressions with the given replacements and
re-evalute the result
Args:
expr: The expression in which to perform the substitution
var_map (dict): The substitution dictionary.
|
juraj-google-style
|
def get_concepts_to_recalculate(self, users, lang, concepts=None):
only_one_user = False
if (not isinstance(users, list)):
only_one_user = True
users = [users]
mapping = self.get_item_concept_mapping(lang)
current_user_stats = defaultdict((lambda : {}))
user_stats_qs = UserStat.objects.filter(user__in=users, stat='answer_count')
if (concepts is not None):
user_stats_qs = user_stats_qs.filter(concept__in=concepts)
for user_stat in user_stats_qs:
current_user_stats[user_stat.user_id][user_stat.concept_id] = user_stat
concepts_to_recalculate = defaultdict((lambda : set()))
for (user, item, time) in Answer.objects.filter(user__in=users).values_list('user_id', 'item').annotate(Max('time')):
if (item not in mapping):
continue
time_expiration_lower_bound = get_config('proso_models', 'knowledge_overview.time_shift_hours', default=4)
time_expiration_factor = get_config('proso_models', 'knowledge_overview.time_expiration_factor', default=2)
for concept in mapping[item]:
if ((user in current_user_stats) and (concept in current_user_stats[user]) and (current_user_stats[user][concept].time > time)):
if (not self.has_time_expired(current_user_stats[user][concept].time, time, time_expiration_lower_bound, time_expiration_factor)):
continue
if ((concepts is None) or (concept in ([c.pk for c in concepts] if (type(concepts[0]) == Concept) else Concept))):
concepts_to_recalculate[user].add(concept)
if only_one_user:
return concepts_to_recalculate[users[0]]
return concepts_to_recalculate
|
Get concept which have same changes and have to be recalculated
Args:
users (list of users or user): users whose user stats we are interesting in
lang (str): language of used concepts
concepts (Optional[list of concepts]): list of primary keys of concepts or concepts
Defaults to None meaning all concepts.
Returns:
dict: user -> set of concepts (int) - in case of list of users
list of stats (str) - in case of one user
|
codesearchnet
|
def _or_join(self, terms):
if isinstance(terms, (tuple, list)):
if len(terms) > 1:
return '(' + ' OR '.join(terms) + ')'
else:
return terms[0]
else:
return terms
|
Joins terms using OR operator.
Args:
terms (list): terms to join
Examples:
self._or_join(['term1', 'term2']) -> 'term1 OR term2'
Returns:
str
|
juraj-google-style
|
def wait_for_jobs(jobs):
all_running = False
while (not all_running):
all_running = True
time.sleep(5)
for job in jobs:
job.refresh()
scheduled = getattr(job, 'scheduled_at', None)
if (scheduled is not None):
logger.info(('Waiting for %s on %s [%s]' % (job.uid, job.site, _date2h(scheduled))))
all_running = (all_running and (job.state == 'running'))
if (job.state == 'error'):
raise Exception(('The job %s is in error state' % job))
logger.info('All jobs are Running !')
|
Waits for all the jobs to be runnning.
Args:
jobs(list): list of the python-grid5000 jobs to wait for
Raises:
Exception: if one of the job gets in error state.
|
codesearchnet
|
def get_pluggable_module_information(self, id_or_uri):
uri = self._client.build_uri(id_or_uri) + "/pluggableModuleInformation"
return self._client.get(uri)
|
Gets all the pluggable module information.
Args:
id_or_uri: Can be either the interconnect id or uri.
Returns:
array: dicts of the pluggable module information.
|
juraj-google-style
|
def _batch_prepare_for_model_boxes(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
batch_text_or_text_pair, boxes_example = example
outputs = self.prepare_for_model_boxes(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
|
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
|
github-repos
|
def iter_package_families(paths=None):
for path in (paths or config.packages_path):
repo = package_repository_manager.get_repository(path)
for resource in repo.iter_package_families():
yield PackageFamily(resource)
|
Iterate over package families, in no particular order.
Note that multiple package families with the same name can be returned.
Unlike packages, families later in the searchpath are not hidden by earlier
families.
Args:
paths (list of str, optional): paths to search for package families,
defaults to `config.packages_path`.
Returns:
`PackageFamily` iterator.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.