code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _ParseShVariables(self, lines):
paths = {}
for line in lines:
for entry in line:
if ('=' in entry):
(target, vals) = (entry.split('=', 1) + [''])[:2]
if vals:
path_vals = vals.split(':')
else:
path_vals = []
self._ExpandPath(target, path_vals, paths)
elif (entry not in self._SH_CONTINUATION):
break
return paths
|
Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values.
|
codesearchnet
|
def _contains_nd(nodes, point):
r
min_vals = np.min(nodes, axis=1)
if not np.all(min_vals <= point):
return False
max_vals = np.max(nodes, axis=1)
if not np.all(point <= max_vals):
return False
return True
|
r"""Predicate indicating if a point is within a bounding box.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
point (numpy.ndarray): A 1D NumPy array representing a point
in the same dimension as ``nodes``.
Returns:
bool: Indicating containment.
|
juraj-google-style
|
def _apply_sparse_duplicate_indices(self, grad, var):
summed_values, unique_indices = _deduplicate_indexed_slices(values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = indexed_slices.IndexedSlices(indices=unique_indices, values=summed_values, dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var)
|
Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Returns:
An `Operation`.
|
github-repos
|
def VerifyStructure(self, parser_mediator, line):
try:
structure = self._DPKG_LOG_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug('Unable to parse Debian dpkg.log file with error: {0!s}'.format(exception))
return False
return (('date_time' in structure) and ('body' in structure))
|
Verifies if a line from a text file is in the expected format.
Args:
parser_mediator (ParserMediator): parser mediator.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
|
codesearchnet
|
def get_temp_dir(self) -> str:
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
|
Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
|
github-repos
|
def traverse_levelorder(self, leaves=True, internal=True):
q = deque()
q.append(self)
while (len(q) != 0):
n = q.popleft()
if ((leaves and n.is_leaf()) or (internal and (not n.is_leaf()))):
(yield n)
q.extend(n.children)
|
Perform a levelorder traversal starting at this ``Node`` object
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
|
codesearchnet
|
def search(self, terms):
messages = self._connection.get(('search/%s' % urllib.quote_plus(terms)), key='messages')
if messages:
messages = [Message(self, message) for message in messages]
return messages
|
Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages
|
codesearchnet
|
def get_type(self, index):
if index < 0 or index >= len(self._types):
raise ValueError("Index for getting order parameter type"
" out-of-bounds!")
return self._types[index]
|
Return type of order parameter at the index provided and
represented by a short string.
Args:
index (int): index of order parameter for which type is
to be returned.
Returns:
str: OP type.
|
juraj-google-style
|
def copy_default_config_to_user_directory(basename, clobber=False, dst_dir='~/.config/scriptabit'):
dst_dir = os.path.expanduser(dst_dir)
dst = os.path.join(dst_dir, basename)
src = resource_filename(Requirement.parse('scriptabit'), os.path.join('scriptabit', basename))
if (not os.path.exists(dst_dir)):
os.makedirs(dst_dir)
if (clobber or (not os.path.isfile(dst))):
shutil.copy(src, dst)
|
Copies the default configuration file into the user config directory.
Args:
basename (str): The base filename.
clobber (bool): If True, the default will be written even if a user
config already exists.
dst_dir (str): The destination directory.
|
codesearchnet
|
def turtle_to_texture(turtle_program, turn_amount=DEFAULT_TURN,
initial_angle=DEFAULT_INITIAL_ANGLE, resolution=1):
generator = branching_turtle_generator(
turtle_program, turn_amount, initial_angle, resolution)
return texture_from_generator(generator)
|
Makes a texture from a turtle program.
Args:
turtle_program (str): a string representing the turtle program; see the
docstring of `branching_turtle_generator` for more details
turn_amount (float): amount to turn in degrees
initial_angle (float): initial orientation of the turtle
resolution (int): if provided, interpolation amount for visible lines
Returns:
texture: A texture.
|
juraj-google-style
|
def run_step(self, representer):
assert representer, 'ObjectRepresenter instance required to run ObjectRewriterStep.'
rewriter = ObjectRewriter(self.context.get_formatted_iterable, representer)
super().run_step(rewriter)
|
Do the object in-out rewrite.
Args:
representer: A pypyr.filesystem.ObjectRepresenter instance.
|
codesearchnet
|
def p44(msg):
d = hex2bin(data(msg))
if (d[34] == '0'):
return None
p = bin2int(d[35:46])
return p
|
Static pressure.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: static pressure in hPa
|
codesearchnet
|
def generate_timing_stats(file_list, var_list):
timing_result = dict()
timing_summary = dict()
for file in file_list:
timing_result[file] = functions.parse_gptl(file, var_list)
for var in var_list:
var_time = []
for f, data in timing_result.items():
try:
var_time.append(data[var])
except:
continue
if len(var_time):
timing_summary[var] = {'mean': np.mean(var_time),
'max': np.max(var_time),
'min': np.min(var_time),
'std': np.std(var_time)}
return timing_summary
|
Parse all of the timing files, and generate some statistics
about the run.
Args:
file_list: A list of timing files to parse
var_list: A list of variables to look for in the timing file
Returns:
A dict containing values that have the form:
[mean, min, max, mean, standard deviation]
|
juraj-google-style
|
def dt_dt(sdat, tstart=None, tend=None):
tseries = sdat.tseries_between(tstart, tend)
time = tseries['t'].values
temp = tseries['Tmean'].values
dtdt = ((temp[1:] - temp[:(- 1)]) / (time[1:] - time[:(- 1)]))
return (dtdt, time[:(- 1)])
|
Derivative of temperature.
Compute dT/dt as a function of time using an explicit Euler scheme.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: derivative of temperature and time
arrays.
|
codesearchnet
|
def GetUsedMemory(self):
try:
memory_info = self._process.memory_info()
except psutil.NoSuchProcess:
return None
memory_data = getattr(memory_info, 'data', 0)
memory_shared = getattr(memory_info, 'shared', 0)
return (memory_data + memory_shared)
|
Retrieves the amount of memory used by the process.
Returns:
int: amount of memory in bytes used by the process or None
if not available.
|
codesearchnet
|
def normal(key, shape, dtype=dtypes.float32):
key = tf_np.asarray(key, dtype=_RNG_KEY_DTYPE)
return tf_np.asarray(stateless_random_ops.stateless_random_normal(shape, seed=_key2seed(key), dtype=dtype))
|
Sample standard-normal random values.
Args:
key: the RNG key.
shape: the shape of the result.
dtype: the dtype of the result.
Returns:
Random values in standard-normal distribution.
|
github-repos
|
def plot_feature_correlation_heatmap(df, features, font_size=9, figsize=(15, 15), save_filename=None):
features = features[:]
features += ['target']
mcorr = df[features].corr()
mask = np.zeros_like(mcorr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(220, 10, as_cmap=True)
fig = plt.figure(figsize=figsize)
heatmap = sns.heatmap(
mcorr,
mask=mask,
cmap=cmap,
square=True,
annot=True,
fmt='0.2f',
annot_kws={'size': font_size},
)
heatmap.tick_params(axis='both', which='major', labelsize=font_size)
heatmap.tick_params(axis='both', which='minor', labelsize=font_size)
heatmap.set_xticklabels(features, rotation=90)
heatmap.set_yticklabels(reversed(features))
plt.show()
if save_filename is not None:
fig.savefig(save_filename, dpi=300)
|
Plot a correlation heatmap between every feature pair.
Args:
df: Pandas dataframe containing the target column (named 'target').
features: The list of features to include in the correlation plot.
font_size: Font size for heatmap cells and axis labels.
figsize: The size of the plot.
save_filename: (Optional) The path of the file to save a high-res version of the plot to.
|
juraj-google-style
|
def verify_docker_image_sha(chain, link):
cot = link.cot
task = link.task
errors = []
if isinstance(task['payload'].get('image'), dict):
docker_image_task_id = task['extra']['chainOfTrust']['inputs']['docker-image']
log.debug('Verifying {} {} against docker-image {}'.format(link.name, link.task_id, docker_image_task_id))
if (docker_image_task_id != task['payload']['image']['taskId']):
errors.append("{} {} docker-image taskId isn't consistent!: {} vs {}".format(link.name, link.task_id, docker_image_task_id, task['payload']['image']['taskId']))
else:
path = task['payload']['image']['path']
image_hash = cot['environment']['imageArtifactHash']
(alg, sha) = image_hash.split(':')
docker_image_link = chain.get_link(docker_image_task_id)
upstream_sha = docker_image_link.cot['artifacts'].get(path, {}).get(alg)
if (upstream_sha is None):
errors.append('{} {} docker-image docker sha {} is missing! {}'.format(link.name, link.task_id, alg, docker_image_link.cot['artifacts'][path]))
elif (upstream_sha != sha):
errors.append("{} {} docker-image docker sha doesn't match! {} {} vs {}".format(link.name, link.task_id, alg, sha, upstream_sha))
else:
log.debug('Found matching docker-image sha {}'.format(upstream_sha))
else:
prebuilt_task_types = chain.context.config['prebuilt_docker_image_task_types']
if ((prebuilt_task_types != 'any') and (link.task_type not in prebuilt_task_types)):
errors.append('Task type {} not allowed to use a prebuilt docker image!'.format(link.task_type))
raise_on_errors(errors)
|
Verify that built docker shas match the artifact.
Args:
chain (ChainOfTrust): the chain we're operating on.
link (LinkOfTrust): the task link we're checking.
Raises:
CoTError: on failure.
|
codesearchnet
|
def _BuildFindSpecsFromGroupName(self, group_name, environment_variables):
definition = self._artifacts_registry.GetDefinitionByName(group_name)
if (not definition):
return None
return self._BuildFindSpecsFromArtifact(definition, environment_variables)
|
Builds find specifications from a artifact group name.
Args:
group_name (str): artifact group name.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in file and registry
artifacts.
Returns:
list[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no
artifact with the given name can be retrieved.
|
codesearchnet
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
file_object.seek(0, os.SEEK_SET)
tar_file = tarfile.open(mode='r:', fileobj=file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._tar_file = tar_file
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
juraj-google-style
|
def from_json(cls, json_value: Any, *, value_spec: Optional[pg_typing.Dict]=None, allow_partial: bool=False, root_path: Optional[utils.KeyPath]=None, **kwargs) -> 'Dict':
return cls({k: base.from_json(v, root_path=utils.KeyPath(k, root_path), allow_partial=allow_partial, **kwargs) for k, v in json_value.items()}, value_spec=value_spec, root_path=root_path, allow_partial=allow_partial)
|
Class method that load an symbolic Dict from a JSON value.
Args:
json_value: Input JSON value, only JSON dict is acceptable.
value_spec: An optional value spec to apply.
allow_partial: Whether to allow members of the dict to be partial.
root_path: KeyPath of loaded object in its object tree.
**kwargs: Allow passing through keyword arguments that are not applicable.
Returns:
A schemaless symbolic dict. For example::
d = Dict.from_json({
'a': {
'_type': '__main__.Foo',
'f1': 1,
'f2': {
'f21': True
}
}
})
assert d.value_spec is None
# Okay:
d.b = 1
# a.f2 is bound by class Foo's field 'f2' definition (assume it defines
# a schema for the Dict field).
assert d.a.f2.value_spec is not None
# Not okay:
d.a.f2.abc = 1
|
github-repos
|
def get_account_name(config, auth, account):
account_id, advertiser_ids = parse_account(config, auth, account)
is_superuser, profile_id = get_profile_for_api(config, auth, account_id)
response = API_DCM(config, auth, internal=is_superuser).accounts().get(id=account_id, profileId=profile_id).execute()
return response['name']
|
Return the name of a DCM account given the account ID.
Args:
* auth: (string) Either user or service.
* account: (string) [account:advertiser@profile] token.
Returns:
* Profile ID.
Raises:
* If current credentials do not have a profile for this account.
|
github-repos
|
def build_srpm(specfile, save_dir):
logger.info('Starting rpmbuild to build: {0} SRPM.'.format(specfile))
if save_dir != get_default_save_path():
try:
msg = subprocess.Popen(
['rpmbuild',
'--define', '_sourcedir {0}'.format(save_dir),
'--define', '_builddir {0}'.format(save_dir),
'--define', '_srcrpmdir {0}'.format(save_dir),
'--define', '_rpmdir {0}'.format(save_dir),
'-bs', specfile], stdout=subprocess.PIPE).communicate(
)[0].strip()
except OSError:
logger.error(
"Rpmbuild failed for specfile: {0} and save_dir: {1}".format(
specfile, save_dir), exc_info=True)
msg = 'Rpmbuild failed. See log for more info.'
return msg
else:
if not os.path.exists(save_dir):
raise IOError("Specify folder to store a file (SAVE_DIR) "
"or install rpmdevtools.")
try:
msg = subprocess.Popen(
['rpmbuild',
'--define', '_sourcedir {0}'.format(save_dir + '/SOURCES'),
'--define', '_builddir {0}'.format(save_dir + '/BUILD'),
'--define', '_srcrpmdir {0}'.format(save_dir + '/SRPMS'),
'--define', '_rpmdir {0}'.format(save_dir + '/RPMS'),
'-bs', specfile], stdout=subprocess.PIPE).communicate(
)[0].strip()
except OSError:
logger.error("Rpmbuild failed for specfile: {0} and save_dir: "
"{1}".format(specfile, save_dir), exc_info=True)
msg = 'Rpmbuild failed. See log for more info.'
return msg
|
Builds a srpm from given specfile using rpmbuild.
Generated srpm is stored in directory specified by save_dir.
Args:
specfile: path to a specfile
save_dir: path to source and build tree
|
juraj-google-style
|
def pop_parameter(key):
names = key.split('/')
if len(names) > 1:
with parameter_scope(names[0]):
return pop_parameter('/'.join(names[1:]))
global current_scope
param = current_scope.get(key, None)
if param is not None:
del current_scope[key]
return param
|
Remove and get parameter by key.
Args:
key(str): Key of parameter.
Returns: ~nnabla.Variable
Parameter if key found, otherwise None.
|
juraj-google-style
|
def _collect_paths(element):
output = []
path = vectors.el_to_path_vector(element)
root = path[0]
params = element.params if element.params else None
match = root.find(element.getTagName(), params)
if len(match) == 1:
output.append(
PathCall("find", 0, [element.getTagName(), params])
)
output.extend(path_patterns.neighbours_pattern(element))
output.extend(path_patterns.predecesors_pattern(element, root))
index_backtrack = []
last_index_backtrack = []
params_backtrack = []
last_params_backtrack = []
for el in reversed(path):
if not el.parent:
continue
tag_name = el.getTagName()
match = el.parent.wfind(tag_name).childs
index = match.index(el)
index_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_index_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
if el.params:
match = el.parent.wfind(tag_name, el.params).childs
index = match.index(el)
params_backtrack.append(
PathCall("wfind", index, [tag_name, el.params])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name, el.params])
)
else:
params_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
output.extend([
Chained(reversed(params_backtrack)),
Chained(reversed(last_params_backtrack)),
Chained(reversed(index_backtrack)),
Chained(reversed(last_index_backtrack)),
])
return output
|
Collect all possible path which leads to `element`.
Function returns standard path from root element to this, reverse path,
which uses negative indexes for path, also some pattern matches, like
"this is element, which has neighbour with id 7" and so on.
Args:
element (obj): HTMLElement instance.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
|
juraj-google-style
|
def _GetDelayImportTimestamps(self, pefile_object):
delay_import_timestamps = []
if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_DELAY_IMPORT')):
return delay_import_timestamps
for importdata in pefile_object.DIRECTORY_ENTRY_DELAY_IMPORT:
dll_name = importdata.dll
try:
dll_name = dll_name.decode('ascii')
except UnicodeDecodeError:
dll_name = dll_name.decode('ascii', errors='replace')
timestamp = getattr(importdata.struct, 'dwTimeStamp', 0)
delay_import_timestamps.append([dll_name, timestamp])
return delay_import_timestamps
|
Retrieves timestamps from delay import entries, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
tuple[str, int]: name of the DLL being imported and the second is
the timestamp of the entry.
|
codesearchnet
|
def get_discount_curve(discount_curve_types: List[Union[curve_types_lib.RiskFreeCurve, curve_types_lib.RateIndexCurve]], market: pmd.ProcessedMarketData, mask: List[int]) -> rate_curve.RateCurve:
discount_curves = [market.yield_curve(curve_type) for curve_type in discount_curve_types]
discounts = []
dates = []
interpolation_method = None
interpolate_rates = None
for curve in discount_curves:
discount, date = curve.discount_factors_and_dates()
discounts.append(discount)
dates.append(date)
interpolation_method = curve.interpolation_method
interpolate_rates = curve.interpolate_rates
all_discounts = tf.stack(pad.pad_tensors(discounts), axis=0)
all_dates = pad.pad_date_tensors(dates)
all_dates = dateslib.DateTensor.stack(dates, axis=0)
prepare_discounts = tf.gather(all_discounts, mask)
prepare_dates = dateslib.dates_from_ordinals(tf.gather(all_dates.ordinal(), mask))
discount_curve = rate_curve.RateCurve(prepare_dates, prepare_discounts, market.date, interpolator=interpolation_method, interpolate_rates=interpolate_rates)
return discount_curve
|
Builds a batched discount curve.
Given a list of discount curve an integer mask, creates a discount curve
object to compute discount factors against the list of discount curves.
#### Example
```none
curve_types = [RiskFreeCurve("USD"), RiskFreeCurve("AUD")]
# A mask to price a batch of 7 instruments with the corresponding discount
# curves ["USD", "AUD", "AUD", "AUD" "USD", "USD", "AUD"].
mask = [0, 1, 1, 1, 0, 0, 1]
market = MarketDataDict(...)
get_discount_curve(curve_types, market, mask)
# Returns a RateCurve object that can compute a discount factors for a
# batch of 7 dates.
```
Args:
discount_curve_types: A list of curve types.
market: an instance of the processed market data.
mask: An integer mask.
Returns:
An instance of `RateCurve`.
|
github-repos
|
def get(self):
with warnings.catch_warnings(record=False):
warnings.simplefilter('ignore')
return np.nanquantile(self._queue, self._q)
|
Calculates and returns the specified quantile of the current sliding
window.
Returns:
float: The specified quantile of the values in the current sliding window.
Returns NaN if the window is empty.
|
github-repos
|
def get_position_encoding(length, hidden_size, min_timescale=1.0, max_timescale=10000.0):
position = tf.to_float(tf.range(length))
num_timescales = (hidden_size
log_timescale_increment = (math.log((float(max_timescale) / float(min_timescale))) / (tf.to_float(num_timescales) - 1))
inv_timescales = (min_timescale * tf.exp((tf.to_float(tf.range(num_timescales)) * (- log_timescale_increment))))
scaled_time = (tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
|
Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
|
codesearchnet
|
def install_hook(self, hook_name, hook_content):
hook_path = os.path.join(self.path, '.git/hooks', hook_name)
with open(hook_path, 'w') as f:
f.write(hook_content)
os.chmod(hook_path, ((stat.S_IEXEC | stat.S_IREAD) | stat.S_IWRITE))
|
Install the repository hook for this repo.
Args:
hook_name (str)
hook_content (str)
|
codesearchnet
|
def get_variable_name_from_bird(bird_conf):
bird_variable_pattern = re.compile(
r, re.VERBOSE
)
with open(bird_conf, 'r') as content:
for line in content.readlines():
variable_match = bird_variable_pattern.search(line)
if variable_match:
return variable_match.group('name')
return None
|
Return the variable name set in Bird configuration.
The variable name in Bird configuration is set with the keyword 'define',
here is an example:
define ACAST_PS_ADVERTISE =
and we exract the string between the word 'define' and the equals sign.
Arguments:
bird_conf (str): The absolute file name path of Bird configuration.
Returns:
The variable name as a string or None if it isn't found.
|
juraj-google-style
|
def add_forwarding_rules(self, forwarding_rules):
rules_dict = [rule.__dict__ for rule in forwarding_rules]
return self.get_data(('load_balancers/%s/forwarding_rules/' % self.id), type=POST, params={'forwarding_rules': rules_dict})
|
Adds new forwarding rules to a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
|
codesearchnet
|
class XLMPoolerAnswerClass(nn.Module):
def __init__(self, config: XLMConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)
else:
cls_token_state = hidden_states[:, -1, :]
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
|
Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config ([`XLMConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model.
|
github-repos
|
def _inplace_helper(x, i, v, op):
x = ops.convert_to_tensor(x)
v = ops.convert_to_tensor(v, x.dtype)
if i is None:
return array_ops.reshape(op(array_ops.reshape(x, [1, -1]), [0], array_ops.reshape(v, [1, -1])), array_ops.shape(x))
i = math_ops.cast(i, dtypes.int32)
if i.get_shape().ndims == 0:
return op(x, array_ops.reshape(i, [1]), array_ops.expand_dims(v, 0))
return op(x, i, v)
|
Applies an inplace op on (x, i, v).
op is one of gen_array_ops.alias_inplace_update,
gen_array_ops.alias_inplace_add, or gen_array_ops.alias_inplace_sub.
If i is None, x and v must be the same shape. Computes
x op v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] op v;
Otherwise, x and v must have the same rank. Computes
x[i, :] op v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
op: alias_inplace_update, alias_inplace_add, or alias_inplace_sub.
Returns:
Returns x.
|
github-repos
|
def signUserCsr(self, xcsr, signas, outp=None):
pkey = xcsr.get_pubkey()
name = xcsr.get_subject().CN
return self.genUserCert(name, csr=pkey, signas=signas, outp=outp)
|
Signs a user CSR with a CA keypair.
Args:
cert (OpenSSL.crypto.X509Req): The certificate signing request.
signas (str): The CA keypair name to sign the CSR with.
outp (synapse.lib.output.Output): The output buffer.
Examples:
cdir.signUserCsr(mycsr, 'myca')
Returns:
((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the public key and certificate objects.
|
codesearchnet
|
def attach(self, engine, log_handler, event_name):
if event_name not in State.event_to_attr:
raise RuntimeError("Unknown event name '{}'".format(event_name))
engine.add_event_handler(event_name, log_handler, self, event_name)
|
Attach the logger to the engine and execute `log_handler` function at `event_name` events.
Args:
engine (Engine): engine object.
log_handler (callable): a logging handler to execute
event_name: event to attach the logging handler to. Valid events are from :class:`~ignite.engine.Events`
or any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.
|
juraj-google-style
|
def save_screenshot(self, filename, quietly=False):
imgData = self.take_screenshot()
try:
with open(filename, 'wb') as f:
f.write(b64decode(imgData.encode('ascii')))
except IOError as err:
if (not quietly):
raise err
|
Save the screenshot to local.
Support:
Android iOS Web(WebView)
Args:
filename(str): The path to save the image.
quietly(bool): If True, omit the IOError when
failed to save the image.
Returns:
WebElement Object.
Raises:
WebDriverException.
IOError.
|
codesearchnet
|
def _ParsePerformanceOptions(self, options):
self._buffer_size = getattr(options, 'buffer_size', 0)
if self._buffer_size:
try:
if (self._buffer_size[(- 1)].lower() == 'm'):
self._buffer_size = int(self._buffer_size[:(- 1)], 10)
self._buffer_size *= self._BYTES_IN_A_MIB
else:
self._buffer_size = int(self._buffer_size, 10)
except ValueError:
raise errors.BadConfigOption('Invalid buffer size: {0!s}.'.format(self._buffer_size))
self._queue_size = self.ParseNumericOption(options, 'queue_size')
|
Parses the performance options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
codesearchnet
|
def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):
try:
result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)
except ValueError as e:
raise tokenizer.ParseError(str(e))
tokenizer.NextToken()
return result
|
Consumes an integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer parsed.
Raises:
ParseError: If an integer with given characteristics couldn't be consumed.
|
juraj-google-style
|
def get_sample_dataset(dataset_properties):
kwargs = dataset_properties.copy()
data_type = kwargs.pop('type')
if data_type == 'multiclass':
try:
X, y = datasets.make_classification(random_state=8, **kwargs)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
except Exception as e:
raise exceptions.UserError(repr(e))
elif data_type == 'iris':
X, y = datasets.load_iris(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'mnist':
X, y = datasets.load_digits(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'breast_cancer':
X, y = datasets.load_breast_cancer(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'boston':
X, y = datasets.load_boston(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
elif data_type == 'diabetes':
X, y = datasets.load_diabetes(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
else:
raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type']))
return X, y, splits
|
Returns sample dataset
Args:
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
X (array-like): Features array
y (array-like): Labels array
splits (iterator): This is an iterator that returns train test splits for
cross-validation purposes on ``X`` and ``y``.
|
juraj-google-style
|
def getEntity(self, name):
return lock_and_call(
lambda: Entity(self._impl.getEntity(name)),
self._lock
)
|
Get entity corresponding to the specified name (looks for it in all
types of entities).
Args:
name: Name of the entity.
Raises:
TypeError: if the specified entity does not exist.
Returns:
The AMPL entity with the specified name.
|
juraj-google-style
|
def user_picklist(i_info, command):
valid_entry = False
awsc.get_all_aminames(i_info)
list_instances(i_info, "", True)
msg_txt = ("Enter {0}
" [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI,
command, len(i_info)))
while not valid_entry:
entry_raw = obtain_input(msg_txt)
try:
entry_int = int(entry_raw)
except ValueError:
entry_int = 999
(tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command)
return tar_idx
|
Display list of instances matching args and ask user to select target.
Instance list displayed and user asked to enter the number corresponding
to the desired target instance, or '0' to abort.
Args:
i_info (dict): information on instances and details.
command (str): command specified on the command line.
Returns:
tar_idx (int): the dictionary index number of the targeted instance.
|
juraj-google-style
|
def convert_phase(component, subcomponent=SubComponent.UNSPECIFIED):
if component not in Component:
raise ValueError('Given component name not found')
if subcomponent not in SubComponent:
raise ValueError('Given subcomponent name not found')
if subcomponent != SubComponent.UNSPECIFIED and subcomponent.component != component:
raise ValueError("component and subcomponent name don't match")
def report_error(error_data: converter_error_data_pb2.ConverterErrorData):
error_data.component = component.value
if not error_data.subcomponent:
error_data.subcomponent = subcomponent.name
tflite_metrics = metrics.TFLiteConverterMetrics()
tflite_metrics.set_converter_error(error_data)
def report_error_message(error_message: Text):
error_data = converter_error_data_pb2.ConverterErrorData()
error_data.error_message = error_message
report_error(error_data)
def actual_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ConverterError as converter_error:
if converter_error.errors:
for error_data in converter_error.errors:
report_error(error_data)
else:
report_error_message(str(converter_error))
raise converter_error from None
except Exception as error:
report_error_message(str(error))
raise error from None
return wrapper
return actual_decorator
|
The decorator to identify converter component and subcomponent.
Args:
component: Converter component name.
subcomponent: Converter subcomponent name.
Returns:
Forward the result from the wrapped function.
Raises:
ValueError: if component and subcomponent name is not valid.
|
github-repos
|
def dropout_no_scaling(x, keep_prob):
if (keep_prob == 1.0):
return x
mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)
return (x * cast_like(mask, x))
|
Like tf.nn.dropout, but does not scale up. Works on integers also.
Args:
x: a Tensor
keep_prob: a floating point number
Returns:
Tensor of the same shape as x.
|
codesearchnet
|
def stopService(self):
self._service.factory.stopTrying()
(yield self._service.factory.stopFactory())
(yield service.MultiService.stopService(self))
|
Gracefully stop the service.
Returns:
defer.Deferred: a Deferred which is triggered when the service has
finished shutting down.
|
codesearchnet
|
def patch_traces(self, project_id, traces):
traces_pb = _traces_mapping_to_pb(traces)
self._gapic_api.patch_traces(project_id, traces_pb)
|
Sends new traces to Stackdriver Trace or updates existing traces.
Args:
project_id (Optional[str]): ID of the Cloud project where the trace
data is stored.
traces (dict): Required. The traces to be patched in the API call.
|
juraj-google-style
|
def dependency_to_rpm(dep, runtime):
logger.debug('Dependencies provided: {0} runtime: {1}.'.format(
dep, runtime))
converted = []
if not len(dep.specs):
converted.append(['Requires', dep.project_name])
else:
for ver_spec in dep.specs:
if ver_spec[0] == '!=':
converted.append(
['Conflicts', dep.project_name, '=', ver_spec[1]])
elif ver_spec[0] == '==':
converted.append(
['Requires', dep.project_name, '=', ver_spec[1]])
else:
converted.append(
['Requires', dep.project_name, ver_spec[0], ver_spec[1]])
if not runtime:
for conv in converted:
conv[0] = "Build" + conv[0]
logger.debug('Converted dependencies: {0}.'.format(converted))
return converted
|
Converts a dependency got by pkg_resources.Requirement.parse()
to RPM format.
Args:
dep - a dependency retrieved by pkg_resources.Requirement.parse()
runtime - whether the returned dependency should be runtime (True)
or build time (False)
Returns:
List of semi-SPECFILE dependencies (package names are not properly
converted yet).
For example: [['Requires', 'jinja2'],
['Conflicts', 'jinja2', '=', '2.0.1']]
|
juraj-google-style
|
def get_arg_value(node, arg_name, arg_pos=None):
if arg_name is not None:
for kw in node.keywords:
if kw.arg == arg_name:
return (True, kw.value)
if arg_pos is not None:
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue
if idx == arg_pos:
return (True, arg)
idx += 1
return (False, None)
|
Get the value of an argument from a ast.Call node.
This function goes through the positional and keyword arguments to check
whether a given argument was used, and if so, returns its value (the node
representing its value).
This cannot introspect *args or **args, but it safely handles *args in
Python3.5+.
Args:
node: The ast.Call node to extract arg values from.
arg_name: The name of the argument to extract.
arg_pos: The position of the argument (in case it's passed as a positional
argument).
Returns:
A tuple (arg_present, arg_value) containing a boolean indicating whether
the argument is present, and its value in case it is.
|
github-repos
|
def speed_difference(points):
data = [0]
for before, after in pairwise(points):
data.append(before.vel - after.vel)
return data
|
Computes the speed difference between each adjacent point
Args:
points (:obj:`Point`)
Returns:
:obj:`list` of int: Indexes of changepoints
|
juraj-google-style
|
def get_meas_los(self, user_lo_config):
try:
_m_los = self.default_meas_los.copy()
except KeyError:
raise PulseError('Default measurement frequencies not exist.')
for channel, lo_freq in user_lo_config.meas_lo_dict().items():
_m_los[channel.index] = lo_freq
if _m_los == self.default_meas_los:
return None
return _m_los
|
Embed default meas LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of meas LOs.
Raises:
PulseError: when LO frequencies are missing.
|
juraj-google-style
|
def _make_3d(field, twod):
shp = list(field.shape)
if twod and 'X' in twod:
shp.insert(1, 1)
elif twod:
shp.insert(0, 1)
return field.reshape(shp)
|
Add a dimension to field if necessary.
Args:
field (numpy.array): the field that need to be 3d.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
Returns:
numpy.array: reshaped field.
|
juraj-google-style
|
def generate_reciprocal_vectors_squared(a1, a2, a3, encut):
for vec in genrecip(a1, a2, a3, encut):
yield np.dot(vec, vec)
|
Generate reciprocal vector magnitudes within the cutoff along the specied
lattice vectors.
Args:
a1: Lattice vector a (in Bohrs)
a2: Lattice vector b (in Bohrs)
a3: Lattice vector c (in Bohrs)
encut: Reciprocal vector energy cutoff
Returns:
[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2
determined by a1, a2, a3 and whose magntidue is less than gcut^2.
|
juraj-google-style
|
def load_from_files(files, globs: Optional[Dict[str, Any]]=None, set_up: Optional[Callable[[Any], None]]=None, tear_down: Optional[Callable[[Any], None]]=None) -> doctest.DocFileSuite:
if globs is None:
globs = {}
files = [os.fspath(f) for f in files]
globs['_print_if_not_none'] = _print_if_not_none
return doctest.DocFileSuite(*files, module_relative=False, parser=FencedCellParser(fence_label='python'), globs=globs, setUp=set_up, tearDown=tear_down, checker=FencedCellOutputChecker(), optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL | doctest.DONT_ACCEPT_BLANKLINE)
|
Creates a doctest suite from the files list.
Args:
files: A list of file paths to test.
globs: The global namespace the tests are run in.
set_up: Run before each test, receives the test as argument.
tear_down: Run after each test, receives the test as argument.
Returns:
A DocFileSuite containing the tests.
|
github-repos
|
def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
protein_df = pd.read_feather(protein_feather).set_index('index')
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {'aa_count_bulk': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'], 'subseqs': ['metal_2_5D', 'metal_3D']}, 'aa_count_carb': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_chrg': {'residues': _aa_property_dict_one['Charged'], 'subseqs': ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_poschrg': {'residues': _aa_property_dict_one['Basic'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_negchrg': {'residues': _aa_property_dict_one['Acidic'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_tmstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'], 'subseqs': ['tm_2D', 'tm_3D']}, 'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'], 'subseqs': ['tm_2D', 'tm_3D']}, 'aa_count_dis': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'], 'subseqs': ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'dna_2_5D']}, 'aa_count_ord': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'], 'subseqs': ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'dna_2_5D']}}
for (suffix, info) in aggregators.items():
agg_residues = info['residues']
for prefix in info['subseqs']:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = ((prefix + '_aa_count_') + agg_res)
if (to_add_idx in protein_df.index):
to_add_idxes.append(to_add_idx)
subseq_agged_col = protein_df.loc[(to_add_idxes, :)].sum()
protein_df.loc[((prefix + '_') + suffix)] = subseq_agged_col
if length_filter_pid:
keep_cols = protein_df.loc['aa_count_total'][(protein_df.loc['aa_count_total'] > (protein_df.at[('aa_count_total', 'K12')] * length_filter_pid))].index
protein_df = protein_df[keep_cols]
if copynum_scale:
if (not isinstance(copynum_df, pd.DataFrame)):
raise ValueError('Please supply copy numbers')
protein_id = op.basename(protein_feather).split('_protein')[0]
if (protein_id in copynum_df.index):
copynum = copynum_df.at[(protein_id, 'copynum')]
if (copynum > 0):
protein_df = (protein_df * copynum)
return protein_df
|
Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
|
codesearchnet
|
def getText(page, output = "text"):
CheckParent(page)
dl = page.getDisplayList()
formats = ("text", "html", "json", "xml", "xhtml", "dict", "rawdict")
images = (0, 1, 1, 0, 1, 1, 1)
try:
f = formats.index(output.lower())
except:
f = 0
flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE
if images[f] :
flags |= TEXT_PRESERVE_IMAGES
tp = dl.getTextPage(flags)
t = tp._extractText(f)
del dl
del tp
return t
|
Extract a document page's text.
Args:
output: (str) text, html, dict, json, rawdict, xhtml or xml.
Returns:
the output of TextPage methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT, extractXHTML or etractXML respectively. Default and misspelling choice is "text".
|
juraj-google-style
|
def getattr(self, c, attr, default=None, match_only=None):
matching_decor = self.get_decor(c, match_only=match_only)
try:
return getattr(matching_decor, attr)
except AttributeError:
return default
|
Get the attribute of a component.
Args:
c (component): The component to look up.
attr (str): The attribute to get.
default (str): What to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
obj. The specified attribute of the matching Decor in the Legend.
|
codesearchnet
|
def FromString(val):
if isinstance(val, bytes):
val = val.decode('utf-8')
try:
return ContractParameterType[val]
except Exception as e:
pass
try:
if isinstance(val, (bytearray, bytes)):
int_val = int.from_bytes(val, 'little')
else:
int_val = int.from_bytes(binascii.unhexlify(val), 'little')
except (binascii.Error, TypeError) as e:
int_val = int(val)
return ContractParameterType(int_val)
|
Create a ContractParameterType object from a str
Args:
val (str): the value to be converted to a ContractParameterType.
val can be hex encoded (b'07'), int (7), string int ("7"), or string literal ("String")
Returns:
ContractParameterType
|
juraj-google-style
|
def DeregisterHelper(cls, helper_class):
helper_name = helper_class.NAME.lower()
if (helper_name not in cls._helper_classes):
raise KeyError('Helper class not set for name: {0:s}.'.format(helper_class.NAME))
del cls._helper_classes[helper_name]
|
Deregisters a helper class.
The helper classes are identified based on their lower case name.
Args:
helper_class (type): class object of the argument helper.
Raises:
KeyError: if helper class is not set for the corresponding name.
|
codesearchnet
|
def destroy_unit(self, unit):
if isinstance(unit, Unit):
unit = unit.name
else:
unit = str(unit)
self._single_request('Units.Delete', unitName=unit)
return True
|
Delete a unit from the cluster
Args:
unit (str, Unit): The Unit, or name of the unit to delete
Returns:
True: The unit was deleted
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
|
codesearchnet
|
def scatter(self, indices, value, name=None):
return self._implementation.scatter(indices, value, name=name)
|
Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the
`TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the scatter occurs.
Use this object for all subsequent operations.
Raises:
ValueError: if the shape inference fails.
|
github-repos
|
def circuit_to_dag(circuit):
dagcircuit = DAGCircuit()
dagcircuit.name = circuit.name
for register in circuit.qregs:
dagcircuit.add_qreg(register)
for register in circuit.cregs:
dagcircuit.add_creg(register)
for (instruction, qargs, cargs) in circuit.data:
if (instruction.control is None):
control = None
else:
control = (instruction.control[0], instruction.control[1])
dagcircuit.apply_operation_back(instruction.copy(), qargs, cargs, control)
return dagcircuit
|
Build a ``DAGCircuit`` object from a ``QuantumCircuit``.
Args:
circuit (QuantumCircuit): the input circuit.
Return:
DAGCircuit: the DAG representing the input circuit.
|
codesearchnet
|
def to_dict(self):
return {'node': [v.to_dict() for v in self.vertices], 'edge': [e.to_dict() for e in self.edges]}
|
Returns a simplified dictionary representing the Graph.
Returns:
A dictionary that can easily be serialized to JSON.
|
codesearchnet
|
def _prepare_4d_attention_mask(attention_mask: Optional[torch.Tensor], sequence_length: int, dtype: torch.dtype, device: torch.device, is_causal: bool=True) -> Optional[torch.Tensor]:
min_value = torch.finfo(dtype).min if dtype.is_floating_point else torch.iinfo(dtype).min
if attention_mask is not None:
attention_mask = attention_mask.view(attention_mask.shape[0], 1, 1, -1)
attention_mask = attention_mask * min_value
if is_causal:
causal_mask = torch.triu(torch.ones((sequence_length, sequence_length), dtype=dtype, device=device) * min_value, diagonal=1)
causal_mask = causal_mask.view(1, 1, sequence_length, sequence_length)
if attention_mask is not None:
attention_mask = torch.minimum(attention_mask, causal_mask)
else:
attention_mask = causal_mask
return attention_mask
|
Creates 4D attention mask and combines causal and padding masks if needed.
Args:
attention_mask: Optional tensor of shape (batch_size, seq_length) containing padding mask
sequence_length: Length of the sequence
dtype: Data type of the mask
device: Device of the mask
is_causal: Whether to apply causal masking
Returns:
4D attention mask of shape (batch_size, 1, seq_length, seq_length)
|
github-repos
|
def set_weights(distribution_strategy, dist_model, weights):
assign_ops = []
for layer in dist_model.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
if ops.executing_eagerly_outside_functions():
sw.assign(w)
else:
assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))
weights = weights[num_param:]
if not ops.executing_eagerly_outside_functions():
backend.get_session(assign_ops).run(assign_ops)
|
Sets the weights of the replicated models.
The weights of the replicated models are set to the weights of the original
model. The weights of the replicated model are Mirrored variables and hence
we need to use the `update` call within a DistributionStrategy scope.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
dist_model: The replicated models on the different devices.
weights: The weights of the original model.
|
github-repos
|
def _upload_artifacts_to_path(self, mirror=False):
if not os.listdir(self.artifact_path) or not self.artifact_path:
raise S3ArtifactNotFound
uploaded = False
if self.s3props.get("content_metadata"):
LOG.info("Uploading in multiple parts to set metadata")
uploaded = self.content_metadata_uploads(mirror=mirror)
if not uploaded:
cmd = self._get_upload_cmd(mirror=mirror)
result = subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE)
LOG.debug("Upload Command Ouput: %s", result.stdout)
LOG.info("Uploaded artifacts to %s bucket", self.bucket)
|
Recursively upload directory contents to S3.
Args:
mirror (bool): If true, uses a flat directory structure instead of nesting under a version.
|
juraj-google-style
|
def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False):
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids, past_key_values_length=past_key_values_length)
else:
position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
|
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
|
github-repos
|
def _count_condition(values, weights=None, metrics_collections=None, updates_collections=None):
check_ops.assert_type(values, dtypes.bool)
count = metric_variable([], dtypes.float32, name='count')
values = math_ops.cast(values, dtypes.float32)
if weights is not None:
with ops.control_dependencies((check_ops.assert_rank_in(weights, (0, array_ops.rank(values))),)):
weights = math_ops.cast(weights, dtypes.float32)
values = math_ops.multiply(values, weights)
value_tensor = _aggregate_variable(count, metrics_collections)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return (value_tensor, update_op)
|
Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
|
github-repos
|
def add_byte_counter(self, reader):
def update_bytes_read(record_size, is_record_size=False, **kwargs):
if is_record_size:
self.read_counter.add_bytes_read(record_size)
if isinstance(reader, observable.ObservableMixin):
reader.register_observer(update_bytes_read)
|
Adds byte counter observer to a side input reader.
Args:
reader: A reader that should inherit from ObservableMixin to have
bytes tracked.
|
github-repos
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
if self._uncompressed_stream_size < 0:
raise IOError('Invalid uncompressed stream size.')
if self._current_offset >= self._uncompressed_stream_size:
return b''
if self._realign_offset:
self._AlignUncompressedDataOffset(self._current_offset)
self._realign_offset = False
if size is None:
size = self._uncompressed_stream_size
if self._current_offset + size > self._uncompressed_stream_size:
size = self._uncompressed_stream_size - self._current_offset
uncompressed_data = b''
if size == 0:
return uncompressed_data
while size > self._uncompressed_data_size:
uncompressed_data = b''.join([
uncompressed_data,
self._uncompressed_data[self._uncompressed_data_offset:]])
remaining_uncompressed_data_size = (
self._uncompressed_data_size - self._uncompressed_data_offset)
self._current_offset += remaining_uncompressed_data_size
size -= remaining_uncompressed_data_size
if self._current_offset >= self._uncompressed_stream_size:
break
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
self._uncompressed_data_offset = 0
if read_count == 0:
break
if size > 0:
slice_start_offset = self._uncompressed_data_offset
slice_end_offset = slice_start_offset + size
uncompressed_data = b''.join([
uncompressed_data,
self._uncompressed_data[slice_start_offset:slice_end_offset]])
self._uncompressed_data_offset += size
self._current_offset += size
return uncompressed_data
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
juraj-google-style
|
def apply_to_structure(self, structure):
def_struct = structure.copy()
old_latt = def_struct.lattice.matrix
new_latt = np.transpose(np.dot(self, np.transpose(old_latt)))
def_struct.lattice = Lattice(new_latt)
return def_struct
|
Apply the deformation gradient to a structure.
Args:
structure (Structure object): the structure object to
be modified by the deformation
|
juraj-google-style
|
def _call_api(self, verb, url, **request_kwargs):
api = 'https:
auth_headers = {'Authorization': 'token {}'.format(self.api_token)}
headers = {**auth_headers, **request_kwargs.pop('headers', {})}
return getattr(requests, verb)(api, headers=headers, **request_kwargs)
|
Perform a github API call
Args:
verb (str): Can be "post", "put", or "get"
url (str): The base URL with a leading slash for Github API (v3)
auth (str or HTTPBasicAuth): A Github API token or a HTTPBasicAuth object
|
codesearchnet
|
def prepare_question_encoder(inputs, hparams):
encoder_input = inputs
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
if hparams.pos == "timing":
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
elif hparams.pos == "emb":
encoder_input = common_attention.add_positional_embedding(
encoder_input, hparams.max_length, "inputs_positional_embedding",
None)
return (encoder_input, encoder_self_attention_bias)
|
Prepare question encoder.
Args:
inputs: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
|
juraj-google-style
|
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
if registry_key.name == 'RecentDocs':
for subkey in registry_key.GetSubkeys():
self._ParseMRUListExKey(parser_mediator, subkey, codepage=codepage)
|
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
|
juraj-google-style
|
def easeOutBounce(n):
_checkRange(n)
if (n < (1 / 2.75)):
return ((7.5625 * n) * n)
elif (n < (2 / 2.75)):
n -= (1.5 / 2.75)
return (((7.5625 * n) * n) + 0.75)
elif (n < (2.5 / 2.75)):
n -= (2.25 / 2.75)
return (((7.5625 * n) * n) + 0.9375)
else:
n -= (2.65 / 2.75)
return (((7.5625 * n) * n) + 0.984375)
|
A bouncing tween function that hits the destination and then bounces to rest.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
codesearchnet
|
def _load_certificate(location):
if location.startswith('https:
_log.info('Downloading x509 certificate from %s', location)
with requests.Session() as session:
session.mount('https:
response = session.get(location, timeout=30)
response.raise_for_status()
return response.text
else:
_log.info('Loading local x509 certificate from %s', location)
with open(location, 'rb') as fd:
return fd.read().decode('ascii')
|
Load a certificate from the given location.
Args:
location (str): The location to load. This can either be an HTTPS URL or an absolute file
path. This is intended to be used with PEM-encoded certificates and therefore assumes
ASCII encoding.
Returns:
str: The PEM-encoded certificate as a unicode string.
Raises:
requests.exception.RequestException: Any exception requests could raise.
IOError: If the location provided could not be opened and read.
|
codesearchnet
|
def _schedule_shards(cls, spec, readers, queue_name, base_path, mr_state):
shard_states = []
for (shard_number, input_reader) in enumerate(readers):
shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard_state.shard_description = str(input_reader)
shard_states.append(shard_state)
existing_shard_states = db.get((shard.key() for shard in shard_states))
existing_shard_keys = set((shard.key() for shard in existing_shard_states if (shard is not None)))
db.put((shard for shard in shard_states if (shard.key() not in existing_shard_keys)), config=util.create_datastore_write_config(spec))
writer_class = spec.mapper.output_writer_class()
writers = ([None] * len(readers))
if writer_class:
for (shard_number, shard_state) in enumerate(shard_states):
writers[shard_number] = writer_class.create(mr_state.mapreduce_spec, shard_state.shard_number, (shard_state.retries + 1), mr_state.writer_state)
for (shard_number, (input_reader, output_writer)) in enumerate(zip(readers, writers)):
shard_id = model.ShardState.shard_id_from_number(spec.mapreduce_id, shard_number)
task = MapperWorkerCallbackHandler._state_to_task(model.TransientShardState(base_path, spec, shard_id, 0, input_reader, input_reader, output_writer=output_writer, handler=spec.mapper.handler), shard_states[shard_number])
MapperWorkerCallbackHandler._add_task(task, spec, queue_name)
|
Prepares shard states and schedules their execution.
Even though this method does not schedule shard task and save shard state
transactionally, it's safe for taskqueue to retry this logic because
the initial shard_state for each shard is the same from any retry.
This is an important yet reasonable assumption on model.ShardState.
Args:
spec: mapreduce specification as MapreduceSpec.
readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
mr_state: The MapReduceState of current job.
|
codesearchnet
|
def compare_jsone_task_definition(parent_link, rebuilt_definitions):
diffs = []
for compare_definition in rebuilt_definitions['tasks']:
if ('taskId' in compare_definition):
del compare_definition['taskId']
compare_definition = remove_empty_keys(compare_definition)
runtime_definition = remove_empty_keys(parent_link.task)
diff = list(dictdiffer.diff(compare_definition, runtime_definition))
if diff:
diffs.append(pprint.pformat(diff))
continue
log.info('{}: Good.'.format(parent_link.name))
break
else:
error_msg = "{} {}: the runtime task doesn't match any rebuilt definition!\n{}".format(parent_link.name, parent_link.task_id, pprint.pformat(diffs))
log.critical(error_msg)
raise CoTError(error_msg)
|
Compare the json-e rebuilt task definition vs the runtime definition.
Args:
parent_link (LinkOfTrust): the parent link to test.
rebuilt_definitions (dict): the rebuilt task definitions.
Raises:
CoTError: on failure.
|
codesearchnet
|
def multisorted(items, *keys):
if len(keys) == 0:
keys = [asc()]
for key in reversed(keys):
items = sorted(items, key=key.func, reverse=key.reverse)
return items
|
Sort by multiple attributes.
Args:
items: An iterable series to be sorted.
*keys: Key objects which extract key values from the items.
The first key will be the most significant, and the
last key the least significant. If no key functions
are provided, the items will be sorted in ascending
natural order.
Returns:
A list of items sorted according to keys.
|
juraj-google-style
|
def process(self, elem: t.Any):
pass
|
Process is the operation that will be rate limited.
Results will be yielded each time time the process method is called.
Args:
elem: The individual element to process.
Returns:
Output can be anything, output will be the output of the RateLimit
PTransform.
|
github-repos
|
def convert_videos_to_summaries(input_videos, output_videos, target_videos, tag, decode_hparams, display_ground_truth=False):
fps = decode_hparams.frames_per_second
border_percent = decode_hparams.border_percent
max_outputs = decode_hparams.max_display_outputs
target_steps = target_videos.shape[1]
all_summaries = []
input_videos = create_border(input_videos, color='blue', border_percent=border_percent)
target_videos = create_border(target_videos, color='red', border_percent=border_percent)
output_videos = create_border(output_videos, color='red', border_percent=border_percent)
all_input = np.concatenate((input_videos, target_videos), axis=1)
all_output = np.concatenate((input_videos, output_videos), axis=1)
(output_summ_vals, _) = common_video.py_gif_summary(('%s/output' % tag), all_output, max_outputs=max_outputs, fps=fps, return_summary_value=True)
all_summaries.extend(output_summ_vals)
if display_ground_truth:
(input_summ_vals, _) = common_video.py_gif_summary(('%s/input' % tag), all_input, max_outputs=max_outputs, fps=fps, return_summary_value=True)
all_summaries.extend(input_summ_vals)
iterable = zip(output_videos[(:max_outputs, :target_steps)], target_videos[:max_outputs])
for (ind, (input_video, output_video)) in enumerate(iterable):
(t, h, w, c) = input_video.shape
input_frames = np.reshape(input_video, ((t * h), w, c))
output_frames = np.reshape(output_video, ((t * h), w, c))
all_frames = np.concatenate((input_frames, output_frames), axis=1)
tag = ('input/output/%s_sample_%d' % (tag, ind))
frame_by_frame_summ = image_utils.image_to_tf_summary_value(all_frames, tag=tag)
all_summaries.append(frame_by_frame_summ)
return all_summaries
|
Converts input, output and target videos into video summaries.
Args:
input_videos: 5-D NumPy array, (NTHWC) conditioning frames.
output_videos: 5-D NumPy array, (NTHWC) model predictions.
target_videos: 5-D NumPy array, (NTHWC) target frames.
tag: tf summary tag.
decode_hparams: HParams.
display_ground_truth: Whether or not to display ground truth videos.
Returns:
summaries: a list of tf frame-by-frame and video summaries.
|
codesearchnet
|
def find(self, username):
filter = ['(uid={})'.format(username)]
results = self.client.search(filter)
if (len(results) < 1):
raise ldap_tools.exceptions.NoUserFound('User ({}) not found'.format(username))
return
elif (len(results) > 1):
raise ldap_tools.exceptions.TooManyResults('Multiple users found. Please narrow your search.')
return
else:
return results
|
Find user with given username.
Args:
username Username of the user to search for
Raises:
ldap_tools.exceptions.NoUserFound: No users returned by LDAP
ldap_tools.exceptions.TooManyResults:
Multiple users returned by LDAP
|
codesearchnet
|
def _prepare_summary_table(rows):
if (not rows):
return []
key_field = 'job-name'
if (key_field not in rows[0]):
key_field = 'job-id'
grouped = collections.defaultdict((lambda : collections.defaultdict((lambda : []))))
for row in rows:
grouped[row.get(key_field, '')][row.get('status', '')] += [row]
new_rows = []
for job_key in sorted(grouped.keys()):
group = grouped.get(job_key, None)
canonical_status = ['RUNNING', 'SUCCESS', 'FAILURE', 'CANCEL']
for status in (canonical_status + sorted(group.keys())):
if (status not in group):
continue
task_count = len(group[status])
del group[status]
if task_count:
summary_row = collections.OrderedDict()
summary_row[key_field] = job_key
summary_row['status'] = status
summary_row['task-count'] = task_count
new_rows.append(summary_row)
return new_rows
|
Create a new table that is a summary of the input rows.
All with the same (job-name or job-id, status) go together.
Args:
rows: the input rows, a list of dictionaries.
Returns:
A new row set of summary information.
|
codesearchnet
|
def on_test_begin(self, logs=None):
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_begin(logs)
|
Calls the `on_test_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
|
github-repos
|
class BaseModelOutputWithCLSToken(ModelOutput):
last_hidden_state: Optional[torch.FloatTensor] = None
cls_token_value: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
cls_token_value (`torch.FloatTensor` of shape `(batch_size, 1, hidden_size)`):
Classification token at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
|
github-repos
|
def __init__(self, session_root, watch_fn=None, thread_name_filter=None):
self._session_root = session_root
self._watch_fn = watch_fn
self._thread_name_filter = thread_name_filter
self._session_wrapper = None
|
Create a local debugger command-line interface (CLI) hook.
Args:
session_root: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
watch_fn: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
|
github-repos
|
def rename(self, name):
return self.client.api.rename(self.id, name)
|
Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
async def _get_person_json(self, id_, url_params=None):
url = self.url_builder(
'person/{person_id}',
dict(person_id=id_),
url_params=url_params or OrderedDict(),
)
data = await self.get_data(url)
return data
|
Retrieve raw person JSON by ID.
Arguments:
id_ (:py:class:`int`): The person's TMDb ID.
url_params (:py:class:`dict`): Any additional URL parameters.
Returns:
:py:class:`dict`: The JSON data.
|
juraj-google-style
|
def get_evaluation_parameter(self, parameter_name, default_value=None):
if (('evaluation_parameters' in self._expectations_config) and (parameter_name in self._expectations_config['evaluation_parameters'])):
return self._expectations_config['evaluation_parameters'][parameter_name]
else:
return default_value
|
Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter.
|
codesearchnet
|
def _send_data(self, data, start_offset, file_len):
headers = {}
end_offset = ((start_offset + len(data)) - 1)
if data:
headers['content-range'] = ('bytes %d-%d/%s' % (start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
(status, response_headers, content) = self._api.put_object(self._path_with_token, payload=data, headers=headers)
if (file_len == '*'):
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers, response_headers, content, {'upload_path': self._path_with_token})
|
Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
|
codesearchnet
|
def all_v2_summary_ops():
if context.executing_eagerly():
return None
return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION)
|
Returns all V2-style summary ops defined in the current default graph.
This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except
for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but
does *not* include TF 1.x tf.summary ops.
Returns:
List of summary ops, or None if called under eager execution.
|
github-repos
|
def build_inception_graph(self):
image_str_tensor = tf.placeholder(tf.string, shape=[None])
image = tf.map_fn(_util.decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
with slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):
(_, end_points) = _inceptionlib.inception_v3(image, is_training=False)
inception_embeddings = end_points['PreLogits']
inception_embeddings = tf.squeeze(inception_embeddings, [1, 2], name='SpatialSqueeze')
return (image_str_tensor, inception_embeddings)
|
Builds an inception graph and add the necessary input & output tensors.
To use other Inception models modify this file. Also preprocessing must be
modified accordingly.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A placeholder for jpeg string batch that allows feeding the
Inception layer with image bytes for prediction.
inception_embeddings: The embeddings tensor.
|
codesearchnet
|
def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None):
parsed_date = self._parse_date(dataset_date, date_format)
if dataset_end_date is None:
self.set_dataset_date_from_datetime(parsed_date)
else:
parsed_end_date = self._parse_date(dataset_end_date, date_format)
self.set_dataset_date_from_datetime(parsed_date, parsed_end_date)
|
Set dataset date from string using specified format. If no format is supplied, the function will guess.
For unambiguous formats, this should be fine.
Args:
dataset_date (str): Dataset date string
dataset_end_date (Optional[str]): Dataset end date string
date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None.
Returns:
None
|
juraj-google-style
|
def add(self, payload=None):
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return str(col.insert_one({
DataStoreDocumentSection.Meta:
payload if isinstance(payload, dict) else {},
DataStoreDocumentSection.Data: {}
}).inserted_id)
except ConnectionFailure:
raise DataStoreNotConnected()
|
Adds a new document to the data store and returns its id.
Args:
payload (dict): Dictionary of initial data that should be stored
in the new document in the meta section.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
str: The id of the newly created document.
|
juraj-google-style
|
def _countIdentities(self, nodes):
return len([x for x in nodes if x.op == 'Identity'])
|
Count the number of "Identity" op types in the list of proto nodes.
Args:
nodes: NodeDefs of the graph.
Returns:
The number of nodes with op type "Identity" found.
|
github-repos
|
def add_comment(self, app_id, record_id, field_id, message):
self._swimlane.request('post', 'app/{0}/record/{1}/{2}/comment'.format(app_id, record_id, field_id), json={'message': message, 'createdDate': pendulum.now().to_rfc3339_string()})
|
Directly add a comment to a record without retrieving the app or record first
Warnings:
Does not perform any app, record, or field ID validation
Args:
app_id (str): Full App ID string
record_id (str): Full parent Record ID string
field_id (str): Full field ID to target reference field on parent Record string
message (str): New comment message body
|
codesearchnet
|
def load_user_config(vcs):
config_path = os.path.join(vcs.path, 'eci.yaml')
if (not os.path.exists(config_path)):
raise ConfigNotFoundError
with open(config_path, 'r') as f:
try:
config = yaml.safe_load(f)
except yaml.YAMLError:
raise ConfigFormatError
if (not isinstance(config, dict)):
raise ConfigFormatError
for (k, v) in _default_config.iteritems():
config.setdefault(k, v)
for (k, v) in _config_types.iteritems():
if (not isinstance(config[k], v)):
raise ConfigFormatError
return config
|
Load the user config
Args:
vcs (easyci.vcs.base.Vcs) - the vcs object for the current project
Returns:
dict - the config
Raises:
ConfigFormatError
ConfigNotFoundError
|
codesearchnet
|
def should_collapse(self, value: Any, name: Optional[str], root_path: KeyPath, parent: Any, collapse_level: Optional[int]=1, uncollapse: Union[KeyPathSet, base.NodeFilter]=None) -> bool:
if collapse_level is None or collapse_level > 0:
return False
if callable(uncollapse):
return not uncollapse(root_path, value, parent)
if root_path in uncollapse:
return False
if name is not None and isinstance(value, (bool, int, float, str, type(None))):
return False
return True
|
Returns True if the object should be collapsed.
Args:
value: The value to render.
name: The referred field name of the value.
root_path: The root path of the value.
parent: The parent of the value.
collapse_level: The level of collapsing. If 0, the object will be
collapsed (without showing its sub-nodes). If 1, the immediate sub-nodes
will be shown in collapsed form. If None, all sub-tree will be shown.
uncollapse: Indivdual nodes to uncollapse. It can be a KeyPathSet or a
function that takes (root_path, value, parent) and returns a KeyPathSet.
Returns:
True if the object should be collapsed.
|
github-repos
|
def _peer_get_bfd(self, tx, rx, multiplier):
tx = self._callback(tx, handler='get_config')
rx = self._callback(rx, handler='get_config')
multiplier = self._callback(multiplier, handler='get_config')
tx = pynos.utilities.return_xml(str(tx))
rx = pynos.utilities.return_xml(str(rx))
multiplier = pynos.utilities.return_xml(str(multiplier))
config = pynos.utilities.merge_xml(tx, rx)
return pynos.utilities.merge_xml(config, multiplier)
|
Get and merge the `bfd` config from global BGP.
You should not use this method.
You probably want `BGP.bfd`.
Args:
tx: XML document with the XML to get the transmit interval.
rx: XML document with the XML to get the receive interval.
multiplier: XML document with the XML to get the interval
multiplier.
Returns:
Merged XML document.
Raises:
None
|
codesearchnet
|
def adjoint(self, name: str='adjoint') -> 'LinearOperator':
if self.is_self_adjoint is True:
return self
with self._name_scope(name):
return self._linop_adjoint()
|
Returns the adjoint of the current `LinearOperator`.
Given `A` representing this `LinearOperator`, return `A*`.
Note that calling `self.adjoint()` and `self.H` are equivalent.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the adjoint of this `LinearOperator`.
|
github-repos
|
def slice_inputs(indices_dataset, inputs):
inputs = array_slicing.convert_to_sliceable(self._inputs, target_backend='tensorflow')
inputs = tree.lists_to_tuples(inputs)
dataset = tf.data.Dataset.zip((indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat()))
def grab_batch(i, data):
def grab_one(x):
if isinstance(x, array_slicing.TensorflowSparseWrapper):
return array_slicing.slice_tensorflow_sparse_wrapper(x, i)
if isinstance(x, (list, tuple, dict)):
return None
if tf.is_tensor(x):
return tf.gather(x, i, axis=0)
return x
return tree.traverse(grab_one, data)
dataset = dataset.map(grab_batch, num_parallel_calls=tf.data.AUTOTUNE)
options = tf.data.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
options.experimental_external_state_policy = tf.data.experimental.ExternalStatePolicy.IGNORE
dataset = dataset.with_options(options)
return dataset
|
Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices.
inputs: A python data structure that contains the inputs,
targets, and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.