code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get_instances(serials):
_validate_device_existence(serials)
results = []
for s in serials:
results.append(AndroidDevice(s))
return results
|
Create AndroidDevice instances from a list of serials.
Args:
serials: A list of android device serials.
Returns:
A list of AndroidDevice objects.
|
github-repos
|
def Extract(self, components):
rundll_index = -1
for index, component in enumerate(components):
if component.lower().endswith("rundll32.exe"):
rundll_index = index
if rundll_index == -1:
return []
components = components[(rundll_index + 1):]
last_component = components[-1].rsplit(",", 1)[0]
extracted_path = " ".join(components[0:-1] + [last_component])
return [extracted_path]
|
Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings).
|
juraj-google-style
|
def company(self, **kwargs):
path = self._get_path('company')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Search for companies by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
|
juraj-google-style
|
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(Certificate, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.certificate_type = CertificateType()
self.certificate_value = CertificateValue()
self.certificate_type.read(tstream, kmip_version=kmip_version)
self.certificate_value.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
|
Read the data encoding the Certificate object and decode it into its
constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def deconv_elems_1d(x, factor, out_depth=None):
out_depth = out_depth or x.get_shape().as_list()[-1]
x = tf.expand_dims(x, 1)
x = layers().Conv2DTranspose(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x)
x = tf.squeeze(x, 1)
return x
|
Increase the length and change the dimensionality.
Expand/project each positions of dim depth of the input into
factor*tokens of dim out_depth
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Multiplicative factor of each tokens.
out_depth (int): Output depth (if None, keep depth constant)
Returns:
tf.Tensor: shape [batch_size, length*factor, out_depth]
|
juraj-google-style
|
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
if input_length is None:
return None
assert padding in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1)
|
Determines output length of a convolution given input length.
Args:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
|
github-repos
|
def wind_direction(self, value=999.0):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `wind_direction`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `wind_direction`')
if value > 360.0:
raise ValueError('value need to be smaller 360.0 '
'for field `wind_direction`')
self._wind_direction = value
|
Corresponds to IDD Field `wind_direction`
Args:
value (float): value for IDD Field `wind_direction`
Unit: degrees
value >= 0.0
value <= 360.0
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
class Sum(Metric):
def __init__(self, name='sum', dtype=None):
super().__init__(name=name, dtype=dtype)
self.total = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='total')
def update_state(self, values, sample_weight=None):
values, _ = reduce_to_samplewise_values(values, sample_weight, reduce_fn=ops.sum, dtype=self.dtype)
self.total.assign_add(ops.sum(values))
def reset_state(self):
self.total.assign(0)
def result(self):
return ops.cast(self.total, self.dtype)
|
Compute the (weighted) sum of the given values.
For example, if `values` is `[1, 3, 5, 7]` then their sum is 16.
If `sample_weight` was specified as `[1, 1, 0, 0]` then the sum would be 4.
This metric creates one variable, `total`.
This is ultimately returned as the sum value.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = metrics.Sum()
>>> m.update_state([1, 3, 5, 7])
>>> m.result()
16.0
>>> m = metrics.Sum()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result()
4.0
|
github-repos
|
def to_grid_locator(latitude, longitude, precision='square'):
if precision not in ('square', 'subsquare', 'extsquare'):
raise ValueError('Unsupported precision value %r' % precision)
if not -90 <= latitude <= 90:
raise ValueError('Invalid latitude value %r' % latitude)
if not -180 <= longitude <= 180:
raise ValueError('Invalid longitude value %r' % longitude)
latitude += 90.0
longitude += 180.0
locator = []
field = int(longitude / LONGITUDE_FIELD)
locator.append(chr(field + 65))
longitude -= field * LONGITUDE_FIELD
field = int(latitude / LATITUDE_FIELD)
locator.append(chr(field + 65))
latitude -= field * LATITUDE_FIELD
square = int(longitude / LONGITUDE_SQUARE)
locator.append(str(square))
longitude -= square * LONGITUDE_SQUARE
square = int(latitude / LATITUDE_SQUARE)
locator.append(str(square))
latitude -= square * LATITUDE_SQUARE
if precision in ('subsquare', 'extsquare'):
subsquare = int(longitude / LONGITUDE_SUBSQUARE)
locator.append(chr(subsquare + 97))
longitude -= subsquare * LONGITUDE_SUBSQUARE
subsquare = int(latitude / LATITUDE_SUBSQUARE)
locator.append(chr(subsquare + 97))
latitude -= subsquare * LATITUDE_SUBSQUARE
if precision == 'extsquare':
extsquare = int(longitude / LONGITUDE_EXTSQUARE)
locator.append(str(extsquare))
extsquare = int(latitude / LATITUDE_EXTSQUARE)
locator.append(str(extsquare))
return ''.join(locator)
|
Calculate Maidenhead locator from latitude and longitude.
Args:
latitude (float): Position's latitude
longitude (float): Position's longitude
precision (str): Precision with which generate locator string
Returns:
str: Maidenhead locator for latitude and longitude
Raise:
ValueError: Invalid precision identifier
ValueError: Invalid latitude or longitude value
|
juraj-google-style
|
def __init__(self, pyof_class, items=None):
self._pyof_class = pyof_class
super().__init__(items)
|
Create a FixedTypeList with the parameters follows.
Args:
pyof_class (:obj:`type`): Class of the items to be stored.
items (iterable, ``pyof_class``): Items to be stored.
|
juraj-google-style
|
def run_build_model(self, num_runs=5, silent=False, force_rerun=False):
self.mutation_ddG_avg_outfile = 'Average_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
self.mutation_ddG_raw_outfile = 'Raw_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
foldx_build_model = 'foldx --command=BuildModel --pdb={} --mutant-file={} --numberOfRuns={}'.format(self.repaired_pdb_outfile, op.basename(self.mutation_infile), num_runs)
ssbio.utils.command_runner(shell_command=foldx_build_model, force_rerun_flag=force_rerun, silent=silent, outfile_checker=self.mutation_ddG_avg_outfile, cwd=self.foldx_dir)
|
Run FoldX BuildModel command with a mutant file input.
Original command::
foldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5
Args:
num_runs (int):
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX BuildModel should be rerun even if the results file exists.
|
codesearchnet
|
def replace(template, **replacements):
if not isinstance(template, str):
raise ValueError('Expected string template, got %s' % type(template))
for k in replacements:
replacements[k] = _convert_to_ast(replacements[k])
template_str = parser.STANDARD_PREAMBLE + textwrap.dedent(template)
nodes = parser.parse(template_str, preamble_len=parser.STANDARD_PREAMBLE_LEN, single_node=False)
results = []
for node in nodes:
node = ReplaceTransformer(replacements).visit(node)
if isinstance(node, (list, tuple)):
results.extend(node)
else:
results.append(node)
results = [qual_names.resolve(r) for r in results]
return results
|
Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
contain Name children), then the caller is responsible for setting the
appropriate context.
Args:
template: A string representing Python code. Any symbol name can be used
that appears in the template code can be used as placeholder.
**replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by. String values are also
supported as a shorthand for AST Name nodes with the respective ID.
Returns:
An AST node or list of AST nodes with the replacements made. If the
template was a function, a list will be returned. If the template was a
node, the same node will be returned. If the template was a string, an
AST node will be returned (a `Module` node in the case of a multi-line
string, an `Expr` node otherwise).
Raises:
ValueError: if the arguments are incorrect.
|
github-repos
|
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params)
|
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (CloudbuildOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
|
github-repos
|
def FromDictionary(cls, msg_dict):
level = msg_dict.get('level')
msg = msg_dict.get('message')
now = msg_dict.get('now_time')
created = msg_dict.get('created_time')
count = msg_dict.get('count', 1)
msg_id = msg_dict.get('id', 0)
new_msg = ServiceMessage(level, msg, msg_id, created, now)
if count > 1:
new_msg.count = count
return new_msg
|
Create from a dictionary with kv pairs.
Args:
msg_dict (dict): A dictionary with information as created by to_dict()
Returns:
ServiceMessage: the converted message
|
juraj-google-style
|
def get_symmetrized_structure(self):
ds = self.get_symmetry_dataset()
sg = SpacegroupOperations(self.get_space_group_symbol(), self.get_space_group_number(), self.get_symmetry_operations())
return SymmetrizedStructure(self._structure, sg, ds['equivalent_atoms'], ds['wyckoffs'])
|
Get a symmetrized structure. A symmetrized structure is one where the
sites have been grouped into symmetrically equivalent groups.
Returns:
:class:`pymatgen.symmetry.structure.SymmetrizedStructure` object.
|
codesearchnet
|
def score_cosine(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
|
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
|
juraj-google-style
|
def Open(self, path, ascii_codepage='cp1252'):
path_specification = self._path_resolver.ResolvePath(path)
if path_specification is None:
return None
return self._OpenPathSpec(path_specification)
|
Opens the Windows Registry file specified by the path.
Args:
path (str): path of the Windows Registry file.
ascii_codepage (Optional[str]): ASCII string codepage.
Returns:
WinRegistryFile: Windows Registry file or None.
|
juraj-google-style
|
def _ScanFileSystemForWindowsDirectory(self, path_resolver):
result = False
for windows_path in self._WINDOWS_DIRECTORIES:
windows_path_spec = path_resolver.ResolvePath(windows_path)
result = (windows_path_spec is not None)
if result:
self._windows_directory = windows_path
break
return result
|
Scans a file system for a known Windows directory.
Args:
path_resolver (WindowsPathResolver): Windows path resolver.
Returns:
bool: True if a known Windows directory was found.
|
codesearchnet
|
def _encode_constraints(self, builder: expressions.Builder, element_definition: ElementDefinition) -> List[validation_pb2.SqlRequirement]:
result: List[validation_pb2.SqlRequirement] = []
constraints: List[Constraint] = cast(Any, element_definition).constraint
root_constraints: List[Constraint] = []
if isinstance(builder.return_type, _fhir_path_data_types.StructureDataType):
root_constraints = cast(Any, builder.return_type.root_element_definition).constraint
dedup_constraint_keys: Set[str] = set()
for constraint in itertools.chain(constraints, root_constraints):
constraint_key: str = cast(Any, constraint).key.value
if constraint_key in dedup_constraint_keys:
continue
dedup_constraint_keys.add(constraint_key)
if constraint_key in self._options.skip_keys:
continue
fhir_path_expression: str = cast(Any, constraint).expression.value
if '%resource' in fhir_path_expression or 'comparator' in fhir_path_expression:
continue
element_definition_path = self._abs_path_invocation(builder)
constraint_key_column_name: str = _key_to_sql_column_name(constraint_key)
column_name_base: str = _path_to_sql_column_name(element_definition_path)
column_name = f'{column_name_base}_{constraint_key_column_name}'
if column_name in self._requirement_column_names:
self._error_reporter.report_fhir_path_error(element_definition_path, fhir_path_expression, f'Duplicate FHIRPath requirement: {column_name}.')
continue
if cast(Any, constraint).severity.value == 0:
self._error_reporter.report_fhir_path_error(element_definition_path, fhir_path_expression, 'Constraint severity must be set.')
continue
if self._options.expr_replace_list:
for replacement in self._options.expr_replace_list.replacement:
if (not replacement.element_path or replacement.element_path == element_definition_path) and replacement.expression_to_replace == fhir_path_expression:
fhir_path_expression = replacement.replacement_expression
struct_def = cast(_fhir_path_data_types.StructureDataType, builder.get_root_builder().return_type)
result_constraint = self._encode_fhir_path_constraint(struct_def, fhir_path_expression, builder)
if result_constraint is None:
continue
if any((node.return_type.url in ('http:
self._error_reporter.report_fhir_path_error(self._abs_path_invocation(builder), result_constraint.builder.fhir_path, 'Constraints involving Extension or Resource fields are not supported. Unable to enforce this constraint because it references a field with an unsupported "Extension" or "Resource" type field which is not included in the database schema.')
continue
type_ = validation_pb2.ValidationType.VALIDATION_TYPE_FHIR_PATH_CONSTRAINT
severity = cast(Any, constraint).severity
severity_value_field = severity.DESCRIPTOR.fields_by_name.get('value')
severity_str = codes.enum_value_descriptor_to_code_string(severity_value_field.enum_type.values_by_number[severity.value])
try:
validation_severity = validation_pb2.ValidationSeverity.Value(f'SEVERITY_{severity_str.upper()}')
except ValueError:
self._error_reporter.report_fhir_path_warning(element_definition_path, fhir_path_expression, f'Unknown validation severity conversion: {severity_str}.')
validation_severity = validation_pb2.ValidationSeverity.SEVERITY_WARNING
requirement = validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result_constraint.sql, fhir_path_sql_expression=result_constraint.fhir_path_sql, severity=validation_severity, type=type_, element_path=element_definition_path, description=cast(Any, constraint).human.value, fhir_path_key=constraint_key, fhir_path_expression=result_constraint.builder.fhir_path, fields_referenced_by_expression=sorted(result_constraint.builder.node.find_paths_referenced()))
self._requirement_column_names.add(column_name)
result.append(requirement)
return result
|
Returns a list of `SqlRequirement`s for FHIRPath constraints.
Args:
builder: The builder containing the element to encode constraints for.
element_definition: Element definition passed from the parent.
Returns:
A list of `SqlRequirement`s expressing FHIRPath constraints defined on the
`element_definition` and `builder` if applicable.
|
github-repos
|
def create(self, uri, local_path):
matches = self.schema_pattern.search(uri)
if not matches:
logger.error("Unknown uri schema: '%s'. Added schemas: %s", uri, list(self.handlers.keys()))
return None
schema = matches.group(1)
url = matches.group(2)
return self.handlers[schema](url, local_path)
|
Create a project handler
Args:
uri (str): schema://something formatted uri
local_path (str): the project configs directory
Return:
ProjectHandler derived class instance
|
juraj-google-style
|
def _SanitizedArgSpec(obj):
output_string = ''
unsanitized_arg_spec = tf_inspect.getargspec(obj)
for clean_attr in ('args', 'varargs', 'keywords'):
output_string += '%s=%s, ' % (clean_attr, getattr(unsanitized_arg_spec, clean_attr))
if unsanitized_arg_spec.defaults:
sanitized_defaults = []
for val in unsanitized_arg_spec.defaults:
str_val = str(val)
if ' at 0x' in str_val:
sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])
else:
sanitized_defaults.append(str_val)
output_string += 'defaults=%s, ' % sanitized_defaults
else:
output_string += 'defaults=None'
return output_string
|
Get an ArgSpec string that is free of addresses.
We have callables as function arg defaults. This results in addresses in
getargspec output. This function returns a sanitized string list of base
classes.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
string, a string representation of the argspec.
|
github-repos
|
def timTuVi(cuc, ngaySinhAmLich):
cungDan = 3
cucBanDau = cuc
if cuc not in [2, 3, 4, 5, 6]:
raise Exception("Số cục phải là 2, 3, 4, 5, 6")
while cuc < ngaySinhAmLich:
cuc += cucBanDau
cungDan += 1
saiLech = cuc - ngaySinhAmLich
if saiLech % 2 is 1:
saiLech = -saiLech
return dichCung(cungDan, saiLech)
|
Tìm vị trí của sao Tử vi
Args:
cuc (TYPE): Description
ngaySinhAmLich (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
|
juraj-google-style
|
def filter_lines(code, line_spec):
code_lines = code.splitlines()
line_specs = [line_denom.strip() for line_denom in line_spec.split(',')]
single_lines = set(map(int, filter((lambda line: ('-' not in line)), line_specs)))
line_ranges = set(filter((lambda line: ('-' in line)), line_specs))
for line_range in line_ranges:
(begin, end) = line_range.split('-')
if (not begin):
begin = 1
if (not end):
end = len(code_lines)
single_lines.update(range(int(begin), (int(end) + 1)))
keep_lines = []
for (line_number, line) in enumerate(code_lines, 1):
if (line_number in single_lines):
keep_lines.append(line)
return '\n'.join(keep_lines)
|
Removes all lines not matching the line_spec.
Args:
code The code to filter
line_spec The line specification. This should be a comma-separated
string of lines or line ranges, e.g. 1,2,5-12,15
If a line range starts with -, all lines up to this line are
included.
If a line range ends with -, all lines from this line on are
included.
All lines mentioned (ranges are inclusive) are used.
Returns:
Only the specified lines.
|
codesearchnet
|
def post_create_app(cls, app, **settings):
super(MarshmallowAwareApp, cls).post_create_app(app, **settings)
marsh.init_app(app)
return app
|
Automatically register and init the Flask Marshmallow extension.
Args:
app (flask.Flask): The application instance in which to initialize
Flask Marshmallow upon.
Kwargs:
settings (dict): The settings passed to this method from the
parent app.
Returns:
flask.Flask: The Flask application that was passed in.
|
codesearchnet
|
def _ln_rnn(x, gamma, beta):
r
mean, variance = tf.nn.moments(x, axes=[len(x.get_shape()) - 1], keep_dims=True)
x = (x - mean) / tf.sqrt(variance + tf.sg_eps)
return gamma * x + beta
|
r"""Applies layer normalization.
Normalizes the last dimension of the tensor `x`.
Args:
x: A `Tensor`.
gamma: A constant `Tensor`. Scale parameter. Default is 1.
beta: A constant `Tensor`. Offset parameter. Default is 0.
Returns:
A `Tensor` with the same shape as `x`.
|
juraj-google-style
|
def batch_decode(self, sequences: Union[List[int], List[List[int]], 'np.ndarray', 'torch.Tensor', 'tf.Tensor'], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, **kwargs) -> List[str]:
return [self.decode(seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) for seq in sequences]
|
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces`.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`List[str]`: The list of decoded sentences.
|
github-repos
|
def to_json(self, from_api: dict=None, from_json: dict=None, parents: dict={}) -> dict:
if from_api:
from_json = deepcopy(from_api)
for key, value in from_json.items():
if not isinstance(value, dict):
continue
if '$ref' in value:
ref = value['$ref']
parents.setdefault(ref, 0)
if parents[ref] < self.recursion_depth:
parents[ref] += 1
from_json[key] = self.to_json(from_api=self.api_document['schemas'][ref]['properties'], parents=parents)
parents[ref] -= 1
else:
from_json[key] = None
else:
self.to_json(from_json=value, parents=parents)
return from_json
|
Returns a Discovery API Document schema with all refrences extrapolated.
Recursively crawls the discovery document reference tree to build document.
Leverages recursion depth passed in constructor to stop if necessary.
Args:
from_api: the api schema to extrapolate
from_json: new object with references replaced, not passed by caller
parents: used to track recursion depth for a specific schema branch
Returns:
A Discovery API Document schema object.
|
github-repos
|
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
|
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
github-repos
|
def price(self, valuation_date, market, model=None, pricing_context=None, name=None):
del model, pricing_context
name = name or self._name + '_price'
with tf.name_scope(name):
discount_curve = market.discount_curve
reference_curve = market.reference_curve
libor_rate = rc.get_rate_index(market, self._start_date, rc.RateIndexType.LIBOR, dtype=self._dtype)
libor_rate = tf.repeat(tf.convert_to_tensor(libor_rate, dtype=self._dtype), self._num_cashflows)
discount_factors = discount_curve.get_discount_factor(self._payment_dates)
forward_rates = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fractions)
forward_rates = tf.where(self._daycount_fractions > 0.0, forward_rates, tf.zeros_like(forward_rates))
forward_rates = tf.where(self._coupon_end_dates < valuation_date, tf.constant(0.0, dtype=self._dtype), tf.where(self._coupon_start_dates < valuation_date, libor_rate, forward_rates))
coupon_rate = self._coupon_multiplier * (forward_rates + self._coupon_basis)
cashflow_pvs = self._notional * (self._daycount_fractions * coupon_rate * discount_factors)
return tf.math.reduce_sum(tf.reshape(cashflow_pvs, (self._batch_size, self._num_cashflows)), axis=1)
|
Returns the present value of the stream on the valuation date.
Args:
valuation_date: A scalar `DateTensor` specifying the date on which
valuation is being desired.
market: A namedtuple of type `InterestRateMarket` which contains the
necessary information for pricing the cashflow stream.
model: Reserved for future use.
pricing_context: Additional context relevant for pricing.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'price'.
Returns:
A Rank 1 `Tensor` of real type containing the modeled price of each stream
contract based on the input market data.
|
github-repos
|
def Serialize(self, writer):
super(SpentCoinState, self).Serialize(writer)
writer.WriteUInt256(self.TransactionHash)
writer.WriteUInt32(self.TransactionHeight)
writer.WriteVarInt(len(self.Items))
for item in self.Items:
writer.WriteUInt16(item.index)
writer.WriteUInt32(item.height)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
juraj-google-style
|
def stringify_default(default: Any) -> str:
if isinstance(default, bool):
return f'`{default}`'
elif isinstance(default, enum.Enum):
return f'`{str(default)}`'
elif isinstance(default, int):
return str(default)
elif isinstance(default, float):
result = str(default)
return str(round(default, 2)) if len(result) > 6 else result
elif isinstance(default, str):
return str(default) if default.isnumeric() else f'`"{default}"`'
elif isinstance(default, type):
return f'`{default.__name__}`'
else:
return f'`{default}`'
|
Returns the string representation of a default value, as used in docstring: numbers are left as is, all other
objects are in backtiks.
Args:
default (`Any`): The default value to process
Returns:
`str`: The string representation of that default.
|
github-repos
|
def from_shape(cls, shape):
if shape.__class__ is cls:
return shape
else:
error = linearization_error(shape.nodes)
if error < _ERROR_VAL:
linearized = cls(shape, error)
return linearized
else:
return shape
|
Try to linearize a curve (or an already linearized curve).
Args:
shape (Union[SubdividedCurve, \
~bezier._geometric_intersection.Linearization]): A curve or an
already linearized curve.
Returns:
Union[SubdividedCurve, \
~bezier._geometric_intersection.Linearization]: The
(potentially linearized) curve.
|
juraj-google-style
|
def db_dp010(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `db_dp010`'.format(value))
self._db_dp010 = value
|
Corresponds to IDD Field `db_dp010`
mean coincident dry-bulb temperature to
Dew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_dp010`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def transition_complete(self, pipeline_key):
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to mark pipeline ID "%s" as complete but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to mark pipeline ID "%s" as complete, found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.DONE
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
|
Marks the given pipeline as complete.
Does nothing if the pipeline is no longer in a state that can be completed.
Args:
pipeline_key: db.Key of the _PipelineRecord that has completed.
|
juraj-google-style
|
def add_or_update(self, section, key, value):
updates = self.update(section, key, value)
if (updates == 0):
self.add(section, key, value)
return updates
|
Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
|
codesearchnet
|
def delete_many(self, keys, noreply=None):
if (not keys):
return True
if (noreply is None):
noreply = self.default_noreply
cmds = []
for key in keys:
cmds.append((((b'delete ' + self.check_key(key)) + (b' noreply' if noreply else b'')) + b'\r\n'))
self._misc_cmd(cmds, b'delete', noreply)
return True
|
A convenience function to delete multiple keys.
Args:
keys: list(str), the list of keys to delete.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. If an exception is raised then all, some or none of the keys
may have been deleted. Otherwise all the keys have been sent to
memcache for deletion and if noreply is False, they have been
acknowledged by memcache.
|
codesearchnet
|
def changed(dirname, filename='.md5', args=None, glob=None):
root = Path(dirname)
if not root.exists():
return True
cachefile = root / filename
current_digest = cachefile.open().read() if cachefile.exists() else ""
_digest = digest(dirname, glob=glob)
if args and args.verbose:
print("md5:", _digest)
has_changed = current_digest != _digest
if has_changed:
with open(os.path.join(dirname, filename), 'w') as fp:
fp.write(_digest)
return has_changed
|
Has `glob` changed in `dirname`
Args:
dirname: directory to measure
filename: filename to store checksum
|
juraj-google-style
|
def __init__(self, client, base_path):
self._client = client
self._base_path = base_path
self._queue_path = posixpath.join(self._base_path, 'queue', '')
self._counter_path = posixpath.join(self._queue_path, 'counter')
self._ensure_counter()
self._ensure_queue()
|
Initialise the class.
Args:
client (:class:`consulate.Consul`): A :class:`consulate.Consul` instance.
base_path (str): the base path to use in Consul.
|
juraj-google-style
|
def save(self, force=False):
from time import time
from datetime import datetime
savefreq = TaskDB.get_option("savefreq", 2, int)
if self.lastsave is not None:
delta = (datetime.fromtimestamp(time()) -
datetime.fromtimestamp(self.lastsave))
elapsed = int(delta.total_seconds()/60)
else:
elapsed = savefreq + 1
if elapsed > savefreq or force:
if not writeable:
self.lastsave = time()
msg.std("Skipping database write to disk by setting.", 2)
return
import json
try:
entities, compkeys = _json_clean(self.entities)
jdb = {"entities": entities,
"compkeys": compkeys,
"uuids": self.uuids}
with open(self.dbpath, 'w') as f:
json.dump(jdb, f)
except:
from acorn.msg import err
import sys
raise
err("{}: {}".format(*sys.exc_info()[0:2]))
self.lastsave = time()
|
Serializes the database file to disk.
Args:
force (bool): when True, the elapsed time since last save is ignored
and the database is saved anyway (subject to global
:data:`writeable` setting).
|
juraj-google-style
|
def sg_argmax(tensor, opt):
r
opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)
return tf.argmax(tensor, opt.axis, opt.name)
|
r"""Returns the indices of the maximum values along the specified axis.
See `tf.argmax()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
|
juraj-google-style
|
def all(self, data={}, **kwargs):
return super(Invoice, self).all(data, **kwargs)
|
Fetch all Invoice entities
Returns:
Dictionary of Invoice data
|
codesearchnet
|
def _format_variant(self, case_id, gemini_variant, individual_objs, index=0, add_all_info=False):
chrom = gemini_variant['chrom']
if (chrom.startswith('chr') or chrom.startswith('CHR')):
chrom = chrom[3:]
variant_dict = {'CHROM': chrom, 'POS': str(gemini_variant['start']), 'ID': gemini_variant['rs_ids'], 'REF': gemini_variant['ref'], 'ALT': gemini_variant['alt'], 'QUAL': gemini_variant['qual'], 'FILTER': gemini_variant['filter']}
variant = Variant(**variant_dict)
variant.update_variant_id(gemini_variant['variant_id'])
logger.debug('Creating a variant object of variant {0}'.format(variant.variant_id))
variant['index'] = index
self._add_most_severe_consequence(variant, gemini_variant)
self._add_impact_severity(variant, gemini_variant)
variant.start = int(gemini_variant['start'])
variant.stop = int(gemini_variant['end'])
if (self.variant_type == 'sv'):
variant.sv_type = gemini_variant['sub_type']
variant.stop = int(gemini_variant['end'])
self._add_sv_coordinates(variant)
else:
self._add_transcripts(variant, gemini_variant)
self._add_thousand_g(variant, gemini_variant)
self._add_exac(variant, gemini_variant)
self._add_gmaf(variant, gemini_variant)
if gemini_variant['cadd_scaled']:
variant.cadd_score = gemini_variant['cadd_scaled']
polyphen = gemini_variant['polyphen_pred']
if polyphen:
variant.add_severity('Polyphen', polyphen)
sift = gemini_variant['sift_pred']
if sift:
variant.add_severity('SIFT', sift)
self._add_hgnc_symbols(variant)
if (self.variant_type == 'snv'):
self._add_genes(variant)
self._add_consequences(variant)
if add_all_info:
self._add_genotypes(variant, gemini_variant, case_id, individual_objs)
if (self.variant_type == 'sv'):
self._add_genes(variant)
return variant
|
Make a puzzle variant from a gemini variant
Args:
case_id (str): related case id
gemini_variant (GeminiQueryRow): The gemini variant
individual_objs (list(dict)): A list of Individuals
index(int): The index of the variant
Returns:
variant (dict): A Variant object
|
codesearchnet
|
def _ConvertDictToObject(cls, json_dict):
class_type = json_dict.get('__type__', None)
if (not class_type):
return json_dict
if (class_type == 'bytes'):
return binascii.a2b_qp(json_dict['stream'])
if (class_type == 'tuple'):
return tuple(cls._ConvertListToObject(json_dict['values']))
if (class_type == 'collections.Counter'):
return cls._ConvertDictToCollectionsCounter(json_dict)
if (class_type == 'AttributeContainer'):
container_type = json_dict.get('__container_type__', None)
elif (class_type == 'PathSpec'):
return cls._ConvertDictToPathSpec(json_dict)
else:
raise ValueError('Unsupported class type: {0:s}'.format(class_type))
container_class = containers_manager.AttributeContainersManager.GetAttributeContainer(container_type)
if (not container_class):
raise ValueError('Unsupported container type: {0:s}'.format(container_type))
container_object = container_class()
supported_attribute_names = container_object.GetAttributeNames()
for (attribute_name, attribute_value) in iter(json_dict.items()):
if ((container_type not in ('event', 'event_data')) and (attribute_name not in supported_attribute_names)):
if (attribute_name not in ('__container_type__', '__type__')):
logger.debug('[ConvertDictToObject] unsupported attribute name: {0:s}.{1:s}'.format(container_type, attribute_name))
continue
if isinstance(attribute_value, dict):
attribute_value = cls._ConvertDictToObject(attribute_value)
elif isinstance(attribute_value, list):
attribute_value = cls._ConvertListToObject(attribute_value)
setattr(container_object, attribute_name, attribute_value)
return container_object
|
Converts a JSON dict into an object.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'AttributeContainer'
'__container_type__': ...
...
}
Here '__type__' indicates the object base type. In this case
'AttributeContainer'.
'__container_type__' indicates the attribute container type.
The rest of the elements of the dictionary make up the attributes.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
AttributeContainer|dict|list|tuple: deserialized object.
Raises:
ValueError: if the class type or container type is not supported.
|
codesearchnet
|
def _deferred_pool_runner(has_chief, num_workers, initializer=None, share_gpu=True):
container = []
def get_or_create():
if not container:
cluster_spec = multi_worker_test_base.create_cluster_spec(has_chief=has_chief, num_workers=num_workers, num_ps=0, has_eval=False)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec, initializer=initializer, share_gpu=share_gpu)
container.append(runner)
return container[0]
return get_or_create
|
Returns a callable that returns the pool runner.
It creates the pool runner only upon first invocation. This avoids creating it
when this file is imported.
Args:
has_chief: whether there should be a chief.
num_workers: the number of workers excluding the chief.
initializer: initializer of each process.
share_gpu: whether to share GPU between the workers.
Returns:
A callable that returns the runner.
|
github-repos
|
def normalize_version(version):
rv = []
for x in version.split("."):
try:
rv.append(int(x))
except ValueError:
for y in re.split("([0-9]+)", x):
if y == '':
continue
try:
rv.append(int(y))
except ValueError:
rv.append(y)
return rv
|
Helper function to normalize version.
Returns a comparable object.
Args:
version (str) version, e.g. "0.1.0"
|
juraj-google-style
|
def __init__(self, use_resource_alias: bool=False, value_set_codes_table: Optional[str]=None, value_set_codes_definitions: Optional[fhir_package.FhirPackageManager]=None) -> None:
self._use_resource_alias = use_resource_alias
self._value_set_codes_table = value_set_codes_table
self._value_set_codes_definitions = value_set_codes_definitions
|
Creates a SparkSqlInterpreter.
Args:
use_resource_alias: Determines whether it is necessary to call the
resource table directly through an alias.
value_set_codes_table: The name of the database table containing value set
code definitions. Used when building SQL for memberOf expressions. If
given, value set definitions needed for memberOf expressions will be
retrieved from this table if they can not be found in
`value_set_codes_definitions`. If neither this nor
`value_set_codes_definitions` is given, no memberOf SQL will be
generated.
value_set_codes_definitions: A package manager containing value set
definitions which can be used to build SQL for memberOf expressions.
These value set definitions can be consulted in favor of using an
external `value_set_codes_table`. If neither this nor
`value_set_codes_definitions` is given, no memberOf SQL will be
generated.
|
github-repos
|
def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):
return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)
|
Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.
Args:
fn_x: A callable returning the value $f(x)$ at $x$.
x_init: Initial solution guess $x_0$.
base_value: Value $f(x')$ at $x = x'$.
target_value: Value $f(x_0)$ at $x = x_0$.
estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.
Returns:
A solution $x$ to the problem as given by the solver.
|
juraj-google-style
|
def list(self, *args, **kwargs):
return [self.prepare_model(n) for n in self.client.api.nodes(*args, **kwargs)]
|
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
|
codesearchnet
|
def _MaybeNewName(self, name):
if not name:
return name
if name == self._old[:-1]:
return self._module_name
before, match, after = name.partition(self._old)
if match and (not before):
return self._new + after
else:
return name
|
Decides if a name should be replaced.
Args:
name: A name for which a prefix should be changed.
Returns:
If name is local to the module described by old_module_name the
old_module_part will be replaced by new_module_name and returned,
otherwise node.name will be returned.
|
github-repos
|
def __init__(self, state_regex, regex, actions, next_state, flags=re.I):
self.state_regex = re.compile(
state_regex, re.DOTALL | re.M | re.S | re.U | flags)
self.regex = re.compile(regex, re.DOTALL | re.M | re.S | re.U | flags)
self.re_str = regex
self.actions = []
if actions:
self.actions = actions.split(',')
self.next_state = next_state
|
Initializes the token object.
Args:
state_regex: If this regular expression matches the current state this
rule is considered.
regex: A regular expression to try and match from the current point.
actions: A command separated list of method names in the Lexer to call.
next_state: The next state we transition to if this Token matches.
flags: re flags.
|
juraj-google-style
|
def shared_s3_app_bucket(self, include_region=False):
if include_region:
shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data)
else:
shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data)
return shared_s3_app_bucket
|
Generate shared s3 application bucket name.
Args:
include_region (bool): Include region in the name generation.
|
codesearchnet
|
def checksum(self, path):
try:
return self._gcsIO().checksum(path)
except Exception as e:
raise BeamIOError('Checksum operation failed', {path: e})
|
Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
|
github-repos
|
def MultiDeleteAttributes(self,
subjects,
attributes,
start=None,
end=None,
sync=True):
for subject in subjects:
self.DeleteAttributes(
subject, attributes, start=start, end=end, sync=sync)
|
Remove all specified attributes from a list of subjects.
Args:
subjects: The list of subjects that will have these attributes removed.
attributes: A list of attributes.
start: A timestamp, attributes older than start will not be deleted.
end: A timestamp, attributes newer than end will not be deleted.
sync: If true we block until the operation completes.
|
juraj-google-style
|
def _get_dump_file_path(dump_root, device_name, debug_node_name):
dump_root = os.path.join(dump_root, debug_data.device_name_to_device_path(device_name))
if '/' in debug_node_name:
dump_dir = os.path.join(dump_root, os.path.dirname(debug_node_name))
dump_file_name = re.sub(':', '_', os.path.basename(debug_node_name))
else:
dump_dir = dump_root
dump_file_name = re.sub(':', '_', debug_node_name)
now_microsec = int(round(time.time() * 1000 * 1000))
dump_file_name += '_%d' % now_microsec
return os.path.join(dump_dir, dump_file_name)
|
Get the file path of the dump file for a debug node.
Args:
dump_root: (str) Root dump directory.
device_name: (str) Name of the device that the debug node resides on.
debug_node_name: (str) Name of the debug node, e.g.,
cross_entropy/Log:0:DebugIdentity.
Returns:
(str) Full path of the dump file.
|
github-repos
|
def parse_table_name(bigquery_table):
id_name = bigquery_table.split(':')
if len(id_name) != 2:
raise ValueError('Bigquery table name should be in the form '
'project_id:dataset.table_name. Got %s' % bigquery_table)
return id_name[1]
|
Giving a string a:b.c, returns b.c.
Args:
bigquery_table: full table name project_id:dataset:table
Returns:
dataset:table
Raises:
ValueError: if a, b, or c contain the character ':'.
|
juraj-google-style
|
def setSeasonSchedules(self, cmd_dict=None, password="00000000"):
result = False
self.setContext("setSeasonSchedules")
if not cmd_dict:
cmd_dict = self.m_seasons_sched_params
try:
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_table = ""
req_table += binascii.hexlify(str(cmd_dict["Season_1_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_1_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_1_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_2_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_2_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_2_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_3_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_3_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_3_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_4_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_4_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_4_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(0).zfill(24))
req_str = "015731023030383028" + req_table + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setSeasonSchedules): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
|
Serial command to set seasons table.
If no dictionary is passed, the meter object buffer is used.
Args:
cmd_dict (dict): Optional dictionary of season schedules.
password (str): Optional password
Returns:
bool: True on completion and ACK.
|
juraj-google-style
|
def stations(self, station, limit=10):
query = {'start': 1, 'S': (station + '?'), 'REQ0JourneyStopsB': limit}
rsp = requests.get('http:
return parse_stations(rsp.text)
|
Find stations for given queries
Args:
station (str): search query
limit (int): limit number of results
|
codesearchnet
|
def _parse_batch_get(get_doc_response, reference_map, client):
result_type = get_doc_response.WhichOneof('result')
if (result_type == 'found'):
reference = _get_reference(get_doc_response.found.name, reference_map)
data = _helpers.decode_dict(get_doc_response.found.fields, client)
snapshot = DocumentSnapshot(reference, data, exists=True, read_time=get_doc_response.read_time, create_time=get_doc_response.found.create_time, update_time=get_doc_response.found.update_time)
elif (result_type == 'missing'):
snapshot = DocumentSnapshot(None, None, exists=False, read_time=get_doc_response.read_time, create_time=None, update_time=None)
else:
raise ValueError('`BatchGetDocumentsResponse.result` (a oneof) had a field other than `found` or `missing` set, or was unset')
return snapshot
|
Parse a `BatchGetDocumentsResponse` protobuf.
Args:
get_doc_response (~google.cloud.proto.firestore.v1beta1.\
firestore_pb2.BatchGetDocumentsResponse): A single response (from
a stream) containing the "get" response for a document.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
client (~.firestore_v1beta1.client.Client): A client that has
a document factory.
Returns:
[.DocumentSnapshot]: The retrieved snapshot.
Raises:
ValueError: If the response has a ``result`` field (a oneof) other
than ``found`` or ``missing``.
|
codesearchnet
|
def _replay(self, trial_id: int, dna: DNA, reward: Union[None, float, Tuple[float]]):
del trial_id
if reward is not None:
self._feedback(dna, reward)
|
Replay a single DNA from the history for state recovery.
The default implementation to call `DNAGenerator._feedback`. Subclasses that
have states and can be recovered from replaying the history should override
this method. See class `Sweeping` as an example.
Args:
trial_id: A zero-based integer as the trial ID for the DNA.
dna: A historically proposed DNA.
reward: The reward for the DNA. If None, the reward is not yet fed back
to the optimizer.
|
github-repos
|
def start(period: int) -> threading.Event:
global _heartbeat_timer
if _heartbeat_timer is not None:
logging.warning('A heartbeat thread is already running, skipping this one.')
return _heartbeat_timer
task_id = config.client_id()
num_tasks = config.num_clients()
if task_id == 0:
token = np.random.randint(0, pow(2, 16) - 1)
signal = np.full([num_tasks], token, dtype=np.int32)
else:
signal = np.zeros([num_tasks], dtype=np.int32)
logging.info('Initial heartbeat signal: %s', signal)
device = tf_device.DeviceSpec(job=config.job_name(), replica=0, task=task_id, device_type='CPU', device_index=0)
with ops.device(device):
signal = all_reduce(constant_op.constant(signal), group_size=num_tasks, group_key=0, instance_key=0, timeout=max(period - 10, 2)).numpy()
logging.info('Merged heartbeat signal %s', signal)
if task_id == 0:
if not np.all(signal == token):
logging.fatal('Merged heartbeat signal has value != %d', token)
else:
if len(set(signal)) != 1:
logging.fatal('Merged heartbeat signal has unequal elements')
token = signal[0]
_heartbeat_timer = threading.Event()
def stop_heartbeat():
logging.info('Stopping the heartbeat thread')
_heartbeat_timer.set()
time.sleep(max(period
atexit.register(stop_heartbeat)
thread = threading.Thread(target=_heartbeat, args=[period, _heartbeat_timer, token, num_tasks, task_id, device], daemon=True)
thread.start()
return _heartbeat_timer
|
Starts a persistent thread exchanging heartbeats between workers.
Args:
period: Heartbeat interval in seconds. Heartbeat timeout is set to the
larger of `period` - 10 and 2s.
Returns:
A threading.Event object. Users can choose to call its set() method to shut
down the heartbeat service gracefully. This isn't necessary in most cases,
because the heartbeat service automatically shuts down at successful program
exit through atexit handlers. But in situations when atexit handlers are not
invoked, such as when multiprocessing processes exit in tests, users can
manually request a shutdown.
|
github-repos
|
def get_by_index(self, index):
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
return self._datasets[index]
|
Return a dataset by its index.
Args:
index (int): The index of the dataset that should be returned.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
|
juraj-google-style
|
def adaptive_enc_mask(x_len, chunk_start_idx, left_window=0, right_window=0):
chunk_start_idx = torch.Tensor(chunk_start_idx).long()
start_pad = torch.nn.functional.pad(chunk_start_idx, (1, 0))
end_pad = torch.nn.functional.pad(chunk_start_idx, (0, 1), value=x_len)
seq_range = torch.arange(0, x_len).unsqueeze(-1)
idx = ((seq_range < end_pad) & (seq_range >= start_pad)).nonzero()[:, 1]
seq_range_expand = torch.arange(0, x_len).unsqueeze(0).expand(x_len, -1)
idx_left = idx - left_window
idx_left[idx_left < 0] = 0
boundary_left = start_pad[idx_left]
mask_left = seq_range_expand >= boundary_left.unsqueeze(-1)
idx_right = idx + right_window
idx_right[idx_right > len(chunk_start_idx)] = len(chunk_start_idx)
boundary_right = end_pad[idx_right]
mask_right = seq_range_expand < boundary_right.unsqueeze(-1)
return mask_left & mask_right
|
The function is very important for Transformer Transducer Streaming mode
Args:
xs_len (int): sequence length
chunk_start_idx (list): first idx of each chunk, such as [0,18,36,48]. It also supports adaptive chunk size [0,10,15,45]
left_window (int): how many left chunks can be seen
right_window (int): how many right chunks can be seen. It is used for chunk overlap model.
Returns:
mask (torch.Tensor): a mask tensor for streaming model
|
github-repos
|
def register_command(self, name, handler, validator):
self._commands[name] = (handler, validator)
|
Register a coroutine command handler.
This handler will be called whenever a command message is received
from the client, whose operation key matches ``name``. The handler
will be called as::
response_payload = await handler(cmd_payload, context)
If the coroutine returns, it will be assumed to have completed
correctly and its return value will be sent as the result of the
command. If the coroutine wishes to signal an error handling the
command, it must raise a ServerCommandError exception that contains a
string reason code for the error. This will generate an error
response to the command.
The cmd_payload is first verified using the SchemaVerifier passed in
``validator`` and handler is only called if verification succeeds. If
verification fails, a failure response to the command is returned
automatically to the client.
Args:
name (str): The unique command name that will be used to dispatch
client command messages to this handler.
handler (coroutine function): A coroutine function that will be
called whenever this command is received.
validator (SchemaVerifier): A validator object for checking the
command payload before calling this handler.
|
codesearchnet
|
class PromptDepthAnythingReassembleStage(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList()
for channels, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):
self.layers.append(PromptDepthAnythingReassembleLayer(config, channels=channels, factor=factor))
def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:
out = []
for i, hidden_state in enumerate(hidden_states):
hidden_state = hidden_state[:, 1:]
batch_size, _, num_channels = hidden_state.shape
hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
hidden_state = self.layers[i](hidden_state)
out.append(hidden_state)
return out
|
This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Take the patch embeddings and reshape them to image-like feature representations.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[PromptDepthAnythingConfig]`):
Model configuration class defining the model architecture.
|
github-repos
|
def add_server(self, hostname, port, use_ssl, tls_ctx=None):
if not use_ssl and tls_ctx:
raise ValueError("Cannot specify a TLS context and not use SSL!")
server = ldap3.Server(
hostname,
port=port,
use_ssl=use_ssl,
tls=tls_ctx
)
self._server_pool.add(server)
return server
|
Add an additional server to the server pool and return the
freshly created server.
Args:
hostname (str): Hostname of the server
port (int): Port of the server
use_ssl (bool): True if SSL is to be used when connecting.
tls_ctx (ldap3.Tls): An optional TLS context object to use
when connecting.
Returns:
ldap3.Server: The freshly created server object.
|
juraj-google-style
|
def GetSourceStrings(cls, event):
formatter_object = cls.GetFormatterObject(event.data_type)
return formatter_object.GetSources(event)
|
Retrieves the formatted source strings for a specific event object.
Args:
event (EventObject): event.
Returns:
list[str, str]: short and long version of the source of the event.
|
codesearchnet
|
def _FormatIPToken(self, token_data):
data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])
return {'IPv4_Header': data}
|
Formats an IPv4 packet header token as a dictionary of values.
Args:
token_data (bsm_token_data_ip): AUT_IP token data.
Returns:
dict[str, str]: token values.
|
codesearchnet
|
def delete(self, json=None):
return self._call('delete', url=self.endpoint, json=json)
|
Send a DELETE request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
|
juraj-google-style
|
def set_imu_callback(self, callback, data=None):
self.imu_callback = callback
self.imu_callback_data = data
|
Register a callback for incoming IMU data packets.
This method allows you to pass in a callbable which will be called on
receipt of each IMU data packet sent by this SK8 device. Set to `None`
to disable it again.
Args:
callback: a callable with the following signature:
(acc, gyro, mag, imu_index, seq, timestamp, data)
where:
acc, gyro, mag = sensor data ([x,y,z] in each case)
imu_index = originating IMU number (int, 0-4)
seq = packet sequence number (int, 0-255)
timestamp = value of time.time() when packet received
data = value of `data` parameter passed to this method
data: an optional arbitrary object that will be passed as a
parameter to the callback
|
codesearchnet
|
def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None):
if (st is None):
st = os.stat_result((33188, 0, 0, 0, 0, 0, 0, 0, 0, 0))
mtime = time.localtime((st.st_mtime or time.time()))
date_time = mtime[0:6]
if (arcname is None):
raise ValueError('An arcname must be provided.')
zinfo = zipfile.ZipInfo(arcname, date_time)
zinfo.external_attr = ((st[0] & 65535) << 16)
if (compress_type is None):
zinfo.compress_type = self._compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.flag_bits = 8
zinfo.CRC = 134695760
zinfo.extra = struct.pack('<HHIIHH', 22613, 12, 0, 0, 0, 0)
return zinfo
|
Generate ZipInfo instance for the given name, compression and stat.
Args:
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Returns:
ZipInfo instance.
Raises:
ValueError: If arcname is not provided.
|
codesearchnet
|
def authorization_url(self, **kwargs):
kwargs.setdefault('access_type', 'offline')
(url, state) = self.oauth2session.authorization_url(self.client_config['auth_uri'], **kwargs)
return (url, state)
|
Generates an authorization URL.
This is the first step in the OAuth 2.0 Authorization Flow. The user's
browser should be redirected to the returned URL.
This method calls
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
and specifies the client configuration's authorization URI (usually
Google's authorization server) and specifies that "offline" access is
desired. This is required in order to obtain a refresh token.
Args:
kwargs: Additional arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
Returns:
Tuple[str, str]: The generated authorization URL and state. The
user must visit the URL to complete the flow. The state is used
when completing the flow to verify that the request originated
from your application. If your application is using a different
:class:`Flow` instance to obtain the token, you will need to
specify the ``state`` when constructing the :class:`Flow`.
|
codesearchnet
|
def _concat(prefix, suffix, static=False):
if isinstance(prefix, tensor.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError('prefix tensor must be either a scalar or vector, but saw tensor: %s' % p)
else:
p = tensor_shape.TensorShape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = constant_op.constant(p.as_list(), dtype=dtypes.int32) if p.is_fully_defined() else None
if isinstance(suffix, tensor.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError('suffix tensor must be either a scalar or vector, but saw tensor: %s' % s)
else:
s = tensor_shape.TensorShape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = constant_op.constant(s.as_list(), dtype=dtypes.int32) if s.is_fully_defined() else None
if static:
shape = tensor_shape.TensorShape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError('Provided a prefix or suffix of None: %s and %s' % (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
|
Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
|
github-repos
|
def delete(self, key):
dct = self
keys = key.split('.')
last_key = keys[-1]
for k in keys:
if k == last_key:
del dct[k]
break
if isinstance(dct, DotDict):
dct = super(DotDict, dct).__getitem__(k)
else:
dct = dct.__getitem__(k)
if not isinstance(dct, (DotDict, dict)):
raise KeyError(
'Subkey "{}" in "{}" invalid for deletion'.format(k, key)
)
|
Remove a value from the `DotDict`.
The `key` parameter can either be a regular string key,
e.g. "foo", or it can be a string key with dot notation,
e.g. "foo.bar.baz", to signify a nested element.
If the key does not exist in the `DotDict`, it will continue
silently.
Args:
key (str): The key to remove.
|
juraj-google-style
|
def max_intensity(item_a, time_a, item_b, time_b, max_value):
intensity_a = item_a.max_intensity(time_a)
intensity_b = item_b.max_intensity(time_b)
diff = np.sqrt((intensity_a - intensity_b) ** 2)
return np.minimum(diff, max_value) / float(max_value)
|
RMS difference in maximum intensity
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
|
juraj-google-style
|
def filter_on_submodules(all_modules, submodules):
filtered_modules = []
for mod in all_modules:
for submodule in submodules:
for package in PACKAGES:
if package + submodule in mod.__name__:
filtered_modules.append(mod)
return filtered_modules
|
Filters all the modules based on the modules flag.
The module flag has to be relative to the core package imported.
For example, if `module=keras.layers` then, this function will return
all the modules in the submodule.
Args:
all_modules: All the modules in the core package.
submodules: Submodules to filter from all the modules.
Returns:
All the modules in the submodule.
|
github-repos
|
def get_transformed_output_time(self, window: 'BoundedWindow', input_timestamp: Timestamp) -> Timestamp:
return input_timestamp
|
Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
|
github-repos
|
def _TensorArrayScatterGrad(op: ops.Operation, flow):
handle = op.inputs[0]
indices = op.inputs[1]
dtype = op.get_attr('T')
grad_source = _GetGradSource(flow)
flow_out = array_ops.identity(op.outputs[0], 'flow_out')
with ops.control_dependencies([flow_out]):
flow = array_ops.identity(flow, 'write_barrier')
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)
grad = g.gather(indices)
return [None, None, grad, flow]
|
Gradient for TensorArrayScatter.
Args:
op: Forward TensorArrayScatter op.
flow: Gradient `Tensor` flow to TensorArrayScatter.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
|
github-repos
|
def fetch(self, invoice_id, data={}, **kwargs):
return super(Invoice, self).fetch(invoice_id, data, **kwargs)
|
Fetch Invoice for given Id
Args:
invoice_id : Id for which invoice object has to be retrieved
Returns:
Invoice dict for given invoice Id
|
codesearchnet
|
def split_once(self, horizontal: bool, position: int) -> None:
cdata = self._as_cdata()
lib.TCOD_bsp_split_once(cdata, horizontal, position)
self._unpack_bsp_tree(cdata)
|
Split this partition into 2 sub-partitions.
Args:
horizontal (bool):
position (int):
|
juraj-google-style
|
def _get_new_global_index(self, index_override):
if index_override is None:
global_index = self._next_global_index
else:
if index_override in self._used_global_indices:
raise ValueError('Index %d was already used by another call to add')
global_index = index_override
self._used_global_indices.add(global_index)
while self._next_global_index in self._used_global_indices:
self._next_global_index += 1
return global_index
|
Return the next unused argument index in order or use an override.
Args:
index_override: An index to use instead of the next available or None
to use the next available.
Returns:
A valid global_index to use for the next hint argument.
Raises:
ValueError: If the index_override is already used by another hint.
|
github-repos
|
def restore_saved_local_scope(self, saved_variables, args_mapping, line_number):
restore_nodes = list()
for var in saved_variables:
if (var.RHS in args_mapping):
restore_nodes.append(RestoreNode(((var.RHS + ' = ') + args_mapping[var.RHS]), var.RHS, [var.LHS], line_number=line_number, path=self.filenames[(- 1)]))
else:
restore_nodes.append(RestoreNode(((var.RHS + ' = ') + var.LHS), var.RHS, [var.LHS], line_number=line_number, path=self.filenames[(- 1)]))
for (node, successor) in zip(restore_nodes, restore_nodes[1:]):
node.connect(successor)
if restore_nodes:
self.nodes[(- 1)].connect(restore_nodes[0])
self.nodes.extend(restore_nodes)
return restore_nodes
|
Restore the previously saved variables to their original values.
Args:
saved_variables(list[SavedVariable])
args_mapping(dict): A mapping of call argument to definition argument.
line_number(int): Of the def of the function call about to be entered into.
Note: We do not need connect_if_allowed because of the
preceding call to save_local_scope.
|
codesearchnet
|
def default_pass_manager_simulator(basis_gates):
pass_manager = PassManager()
pass_manager.append(Unroller(basis_gates))
pass_manager.append([RemoveResetInZeroState(), Depth(), FixedPoint('depth')],
do_while=lambda property_set: not property_set['depth_fixed_point'])
return pass_manager
|
The default pass manager without a coupling map.
Args:
basis_gates (list[str]): list of basis gate names to unroll to.
Returns:
PassManager: A passmanager that just unrolls, without any optimization.
|
juraj-google-style
|
def convert(self, obj):
if isinstance(obj, pobjects.SymmetricKey):
return self._build_core_key(obj, secrets.SymmetricKey)
elif isinstance(obj, secrets.SymmetricKey):
return self._build_pie_key(obj, pobjects.SymmetricKey)
elif isinstance(obj, pobjects.PublicKey):
return self._build_core_key(obj, secrets.PublicKey)
elif isinstance(obj, secrets.PublicKey):
return self._build_pie_key(obj, pobjects.PublicKey)
elif isinstance(obj, pobjects.PrivateKey):
return self._build_core_key(obj, secrets.PrivateKey)
elif isinstance(obj, secrets.PrivateKey):
return self._build_pie_key(obj, pobjects.PrivateKey)
elif isinstance(obj, pobjects.Certificate):
return self._build_core_certificate(obj)
elif isinstance(obj, secrets.Certificate):
return self._build_pie_certificate(obj)
elif isinstance(obj, pobjects.SecretData):
return self._build_core_secret_data(obj)
elif isinstance(obj, secrets.SecretData):
return self._build_pie_secret_data(obj)
elif isinstance(obj, pobjects.OpaqueObject):
return self._build_core_opaque_object(obj)
elif isinstance(obj, secrets.OpaqueObject):
return self._build_pie_opaque_object(obj)
else:
raise TypeError("object type unsupported and cannot be converted")
|
Convert a Pie object into a core secret object and vice versa.
Args:
obj (various): A Pie or core secret object to convert into the
opposite object space. Required.
Raises:
TypeError: if the object type is unrecognized or unsupported.
|
juraj-google-style
|
def info_qry(tickers, flds) -> str:
full_list = '\n'.join(([f'tickers: {tickers[:8]}'] + [f' {tickers[n:(n + 8)]}' for n in range(8, len(tickers), 8)]))
return f
|
Logging info for given tickers and fields
Args:
tickers: tickers
flds: fields
Returns:
str
Examples:
>>> print(info_qry(
... tickers=['NVDA US Equity'], flds=['Name', 'Security_Name']
... ))
tickers: ['NVDA US Equity']
fields: ['Name', 'Security_Name']
|
codesearchnet
|
def raise_io_error(self, errno, filename=None):
raise IOError(errno, self._error_message(errno), filename)
|
Raises IOError.
The error message is constructed from the given error code and shall
start with the error in the real system.
Args:
errno: A numeric error code from the C variable errno.
filename: The name of the affected file, if any.
|
juraj-google-style
|
def parse_coach_ec_df(infile):
ec_df = pd.read_table(infile, delim_whitespace=True,
names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage',
'c_score', 'ec_number', 'binding_residues'])
ec_df['pdb_template_id'] = ec_df['pdb_template'].apply(lambda x: x[:4])
ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply(lambda x: x[4])
ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd',
'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]
ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')
return ec_df
|
Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions
|
juraj-google-style
|
def file_md5( filename ):
with zopen( filename, 'r' ) as f:
file_string = f.read()
try:
file_string = file_string.decode()
except AttributeError:
pass
return( md5sum( file_string ) )
|
Generate the md5 checksum for a file
Args:
filename (Str): The file to be checksummed.
Returns:
(Str): The hex checksum
Notes:
If the file is gzipped, the md5 checksum returned is
for the uncompressed ASCII file.
|
juraj-google-style
|
def underlying_variable(t):
t = underlying_variable_ref(t)
assert t is not None
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name]
|
Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
tf.Variable.
|
juraj-google-style
|
def push(self, document=None):
if (self.document is None):
if (document is None):
doc = Document()
else:
doc = document
elif (document is None):
doc = self.document
else:
raise ValueError('Cannot push() a different document from existing session.document')
self.connect()
if (not self.connected):
raise IOError("Cannot push session document because we failed to connect to the server (to start the server, try the 'bokeh serve' command)")
self._connection.push_doc(doc)
if (self._document is None):
self._attach_document(doc)
|
Push the given document to the server and record it as session.document.
If this is called more than once, the Document has to be the same (or None
to mean "session.document").
.. note::
Automatically calls :func:`~connect` before pushing.
Args:
document (:class:`~bokeh.document.Document`, optional) :
The document which will be kept in sync with the server document.
None to use session.document or create a new document.
|
codesearchnet
|
def get_mail_keys(message, complete=True):
if complete:
log.debug('Get all headers')
all_headers_keys = {i.lower() for i in message.keys()}
all_parts = ((ADDRESSES_HEADERS | OTHERS_PARTS) | all_headers_keys)
else:
log.debug('Get only mains headers')
all_parts = (ADDRESSES_HEADERS | OTHERS_PARTS)
log.debug('All parts to get: {}'.format(', '.join(all_parts)))
return all_parts
|
Given an email.message.Message, return a set with all email parts to get
Args:
message (email.message.Message): email message object
complete (bool): if True returns all email headers
Returns:
set with all email parts
|
codesearchnet
|
def dumpfile(item, path):
with io.open(path, 'wb') as fd:
fd.write(en(item))
|
Dump an object to a file by path.
Args:
item (object): The object to serialize.
path (str): The file path to save.
Returns:
None
|
codesearchnet
|
def supported_device(self, index=0):
if ((not util.is_natural(index)) or (index >= self.num_supported_devices())):
raise ValueError('Invalid index.')
info = structs.JLinkDeviceInfo()
result = self._dll.JLINKARM_DEVICE_GetInfo(index, ctypes.byref(info))
return info
|
Gets the device at the given ``index``.
Args:
self (JLink): the ``JLink`` instance
index (int): the index of the device whose information to get
Returns:
A ``JLinkDeviceInfo`` describing the requested device.
Raises:
ValueError: if index is less than 0 or >= supported device count.
|
codesearchnet
|
def WaitUntilDone(self, timeout=None):
utils.Poll(generator=self.GetState, condition=(lambda s: (s != self.__class__.STATE_RUNNING)), timeout=timeout)
self.target_file = self.target_file.Get()
return self
|
Wait until the operation is done.
Args:
timeout: timeout in seconds. None means default timeout (1 hour).
0 means no timeout (wait forever).
Returns:
Operation object with refreshed target_file.
Raises:
PollTimeoutError: if timeout is reached.
|
codesearchnet
|
def ExportClientsByKeywords(keywords, filename, token=None):
r
index = client_index.CreateClientIndex(token=token)
client_list = index.LookupClients(keywords)
logging.info("found %d clients", len(client_list))
if not client_list:
return
writer = csv.DictWriter([
u"client_id",
u"hostname",
u"last_seen",
u"os",
u"os_release",
u"os_version",
u"users",
u"ips",
u"macs",
])
writer.WriteHeader()
for client in aff4.FACTORY.MultiOpen(client_list, token=token):
s = client.Schema
writer.WriteRow({
u"client_id": client.urn.Basename(),
u"hostname": client.Get(s.HOSTNAME),
u"os": client.Get(s.SYSTEM),
u"os_release": client.Get(s.OS_RELEASE),
u"os_version": client.Get(s.OS_VERSION),
u"ips": client.Get(s.HOST_IPS),
u"macs": client.Get(s.MAC_ADDRESS),
u"users": "\n".join(client.Get(s.USERNAMES, [])),
u"last_seen": client.Get(s.PING),
})
with io.open(filename, "w") as csv_out:
csv_out.write(writer.Content())
|
r"""A script to export clients summaries selected by a keyword search.
This script does a client search for machines matching all of keywords and
writes a .csv summary of the results to filename. Multi-value fields are '\n'
separated.
Args:
keywords: a list of keywords to search for
filename: the name of the file to write to, will be replaced if already
present
token: datastore token.
|
juraj-google-style
|
def ravel(x):
if any_symbolic_tensors((x,)):
return Ravel().symbolic_call(x)
return backend.numpy.ravel(x)
|
Return a contiguous flattened tensor.
A 1-D tensor, containing the elements of the input, is returned.
Args:
x: Input tensor.
Returns:
Output tensor.
|
github-repos
|
def properties_with_values(self, include_defaults=True):
return self.query_properties_with_values((lambda prop: prop.serialized), include_defaults)
|
Collect a dict mapping property names to their values.
This method *always* traverses the class hierarchy and includes
properties defined on any parent classes.
Non-serializable properties are skipped and property values are in
"serialized" format which may be slightly different from the values
you would normally read from the properties; the intent of this method
is to return the information needed to losslessly reconstitute the
object instance.
Args:
include_defaults (bool, optional) :
Whether to include properties that haven't been explicitly set
since the object was created. (default: True)
Returns:
dict : mapping from property names to their values
|
codesearchnet
|
def global_step(device=''):
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False, collections=collections)
|
Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
|
juraj-google-style
|
def retransmit(self, data):
if data["euuid"] in self.event_uuids:
self.event_uuids[data["euuid"]] += 1
if (self.event_uuids[data["euuid"]] > self.max_retries or
data["cuuid"] not in self.registry):
logger.warning("<%s> Retry limit exceeded. "
"Timed out waiting for client for "
"event: %s" % (data["cuuid"], data["euuid"]))
logger.warning("<%s> Deleting event from currently processing "
"event uuids" % data["cuuid"])
del self.event_uuids[data["euuid"]]
else:
logger.debug("<%s> Timed out waiting for response. Retry %s. "
"Retransmitting message: "
"%s" % (data["cuuid"],
pformat(self.event_uuids[data["euuid"]]),
data["response"]))
host = self.registry[data["cuuid"]]["host"]
port = self.registry[data["cuuid"]]["port"]
self.listener.send_datagram(data["response"], (host, port))
logger.debug("<%s> Scheduling to retry in %s "
"seconds" % (data["cuuid"], str(self.timeout)))
self.listener.call_later(self.timeout, self.retransmit, data)
|
Processes messages that have been delivered from the listener.
Args:
data (dict): A dictionary containing the uuid, euuid, and message
response. E.g. {"cuuid": x, "euuid": y, "response": z}.
Returns:
None
|
juraj-google-style
|
def time_stats(self, **kwargs):
if ('time_stats' in self.attributes):
return self.attributes['time_stats']
path = ('%s/%s/time_stats' % (self.manager.path, self.get_id()))
return self.manager.gitlab.http_get(path, **kwargs)
|
Get time stats for the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
|
codesearchnet
|
def random_subsets(self, relative_sizes, by_duration=False, balance_labels=False, label_list_ids=None):
resulting_sets = {}
next_bigger_subset = self.corpus
for relative_size in reversed(relative_sizes):
generator = SubsetGenerator(next_bigger_subset, random_seed=self.random_seed)
if by_duration:
sv = generator.random_subset_by_duration(relative_size, balance_labels=balance_labels, label_list_ids=label_list_ids)
else:
sv = generator.random_subset(relative_size, balance_labels=balance_labels, label_list_ids=label_list_ids)
resulting_sets[relative_size] = sv
return resulting_sets
|
Create a bunch of subsets with the given sizes relative to the size or duration of the full corpus.
Basically the same as calling ``random_subset`` or ``random_subset_by_duration`` multiple times
with different values. But this method makes sure that every subset contains only utterances,
that are also contained in the next bigger subset.
Args:
relative_sizes (list): A list of numbers between 0 and 1 indicating the sizes of the desired subsets,
relative to the full corpus.
by_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus.
balance_labels (bool): If True the labels contained in a subset are chosen to be balanced
as far as possible.
label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered
for balancing. Otherwise only the ones that are in the list are considered.
Returns:
dict : A dictionary containing all subsets with the relative size as key.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.