code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(
self,
pooling_type='max',
window=2,
stride=2,
padding='SAME',
named_tensors=None,
scope='pool2d',
summary_labels=()
):
self.pooling_type = pooling_type
if isinstance(window, int):
self.window = (1, window, window, 1)
elif len(window) == 2:
self.window = (1, window[0], window[1], 1)
else:
raise TensorForceError('Invalid window {} for pool2d layer, must be of size 2'.format(window))
if isinstance(stride, int):
self.stride = (1, stride, stride, 1)
elif len(window) == 2:
self.stride = (1, stride[0], stride[1], 1)
else:
raise TensorForceError('Invalid stride {} for pool2d layer, must be of size 2'.format(stride))
self.padding = padding
super(Pool2d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
|
2-dimensional pooling layer.
Args:
pooling_type: Either 'max' or 'average'.
window: Pooling window size, either an integer or pair of integers.
stride: Pooling stride, either an integer or pair of integers.
padding: Pooling padding, one of 'VALID' or 'SAME'.
|
juraj-google-style
|
def _prepare_4d_causal_attention_mask(attention_mask: Optional[torch.Tensor], input_shape: Union[torch.Size, tuple, list], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None):
attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
key_value_length = input_shape[-1] + past_key_values_length
if attention_mask is not None and len(attention_mask.shape) == 2:
attention_mask = attn_mask_converter.to_4d(attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype)
elif attention_mask is not None and len(attention_mask.shape) == 4:
expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)
if tuple(attention_mask.shape) != expected_shape:
raise ValueError(f'Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}.')
else:
inverted_mask = 1.0 - attention_mask
attention_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min)
else:
attention_mask = attn_mask_converter.to_causal_4d(input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device)
return attention_mask
|
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`
Args:
attention_mask (`torch.Tensor` or `None`):
A 2D attention mask of shape `(batch_size, key_value_length)`
input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
The input shape should be a tuple that defines `(batch_size, query_length)`.
inputs_embeds (`torch.Tensor`):
The embedded inputs as a torch Tensor.
past_key_values_length (`int`):
The length of the key value cache.
sliding_window (`int`, *optional*):
If the model uses windowed attention, a sliding window should be passed.
|
github-repos
|
def usufyToPngExport(d, fPath):
newGraph = _generateGraphData(d)
import matplotlib.pyplot as plt
nx.draw(newGraph)
plt.savefig(fPath)
|
Workaround to export to a png file.
Args:
-----
d: Data to export.
fPath: File path for the output file.
|
juraj-google-style
|
def nonzero(x):
if any_symbolic_tensors((x,)):
return Nonzero().symbolic_call(x)
return backend.numpy.nonzero(x)
|
Return the indices of the elements that are non-zero.
Args:
x: Input tensor.
Returns:
Indices of elements that are non-zero.
|
github-repos
|
def __build_helper_map(cls):
ret = {}
for name in dir(cls):
obj = getattr(cls, name)
if ishelper(obj):
for cmd in obj.__help_targets__:
if (cmd in ret.keys()):
raise PyShellError("The command '{}' already has helper method '{}', cannot register a second method '{}'.".format(cmd, ret[cmd], obj.__name__))
ret[cmd] = obj.__name__
return ret
|
Build a mapping from command names to helper names.
One command name maps to at most one helper method.
Multiple command names can map to the same helper method.
Only used by __init__() to initialize self._cmd_map. MUST NOT be used
elsewhere.
Raises:
PyShellError: A command maps to multiple helper methods.
|
codesearchnet
|
def _extract_filename(self, flagfile_str):
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise _exceptions.Error(
'Hit illegal --flagfile type: %s' % flagfile_str)
|
Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
Args:
flagfile_str: str, the flagfile string.
Returns:
str, the filename from a flagfile_str of form -[-]flagfile=filename.
Raises:
Error: Raised when illegal --flagfile is provided.
|
juraj-google-style
|
def find_user(cls, session, mailbox, user):
return cls(
'/mailboxes/%d/users/%s/conversations.json' % (
mailbox.id, user.id,
),
session=session,
)
|
Return conversations for a specific user in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
user (helpscout.models.User): User to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
|
juraj-google-style
|
def create_from_options(cls, pipeline_options):
from apache_beam.options.pipeline_options import PipelineOptions
if not isinstance(pipeline_options, PipelineOptions):
raise ValueError('Element of class {}.{} does not subclass PipelineOptions'.format(pipeline_options.__module__, pipeline_options.__class__.__name__))
items = {k: v if DisplayDataItem._get_value_type(v) is not None else str(v) for k, v in pipeline_options.display_data().items()}
return cls(pipeline_options._get_display_data_namespace(), items)
|
Creates :class:`~apache_beam.transforms.display.DisplayData` from a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` instance.
When creating :class:`~apache_beam.transforms.display.DisplayData`, this
method will convert the value of any item of a non-supported type to its
string representation.
The normal :meth:`.create_from()` method rejects those items.
Returns:
~apache_beam.transforms.display.DisplayData:
A :class:`~apache_beam.transforms.display.DisplayData` instance with
populated items.
Raises:
ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
|
github-repos
|
def _list_profile_filter(profile_datum, node_name_regex, file_path_regex, op_type_regex, op_time_interval, exec_time_interval, min_lineno=-1, max_lineno=-1):
if node_name_regex and (not node_name_regex.match(profile_datum.node_exec_stats.node_name)):
return False
if file_path_regex:
if not profile_datum.file_path or not file_path_regex.match(profile_datum.file_path):
return False
if min_lineno > 0 and profile_datum.line_number and (profile_datum.line_number < min_lineno):
return False
if max_lineno > 0 and profile_datum.line_number and (profile_datum.line_number >= max_lineno):
return False
if profile_datum.op_type is not None and op_type_regex and (not op_type_regex.match(profile_datum.op_type)):
return False
if op_time_interval is not None and (not op_time_interval.contains(profile_datum.op_time)):
return False
if exec_time_interval and (not exec_time_interval.contains(profile_datum.node_exec_stats.all_end_rel_micros)):
return False
return True
|
Filter function for list_profile command.
Args:
profile_datum: A `ProfileDatum` object.
node_name_regex: Regular expression pattern object to filter by name.
file_path_regex: Regular expression pattern object to filter by file path.
op_type_regex: Regular expression pattern object to filter by op type.
op_time_interval: `Interval` for filtering op time.
exec_time_interval: `Interval` for filtering exec time.
min_lineno: Lower bound for 1-based line number, inclusive.
If <= 0, has no effect.
max_lineno: Upper bound for 1-based line number, exclusive.
If <= 0, has no effect.
# TODO(cais): Maybe filter by function name.
Returns:
True iff profile_datum should be included.
|
github-repos
|
def _tf_extension_type_with_packed(self, value):
copy = _create_object_from_type_and_dict(type(self), self.__dict__)
copy.__dict__['_tf_extension_type_is_packed'] = value
return copy
|
Returns a copy of this `TypeSpec` with `packed=value`.
Args:
value: A boolean value.
Returns:
A copy of `self` with `_tf_extension_type_is_packed=value`.
|
github-repos
|
def stats(self, *args):
result = self._fetch_cmd(b'stats', args, False)
for key, value in six.iteritems(result):
converter = STAT_TYPES.get(key, int)
try:
result[key] = converter(value)
except Exception:
pass
return result
|
The memcached "stats" command.
The returned keys depend on what the "stats" command returns.
A best effort is made to convert values to appropriate Python
types, defaulting to strings when a conversion cannot be made.
Args:
*arg: extra string arguments to the "stats" command. See the
memcached protocol documentation for more information.
Returns:
A dict of the returned stats.
|
juraj-google-style
|
def flat_values_spec(self):
return self._flat_values_spec
|
The `TypeSpec` of the flat_values of RaggedTensor.
Returns:
- The TypeSpec of flat_values.
- None when the flat_values is a Tensor.
|
github-repos
|
def init(args):
dir_path = Path().absolute()
if not args.project_name or args.project_name.find("/") >= 0:
print(
"{}You should specify a valid project name{}".format(
utils.colors.FAIL, utils.colors.ENDC
)
)
return
project_path = dir_path / args.project_name
if not project_path.exists():
project_path.mkdir()
else:
print(
"{}This project already exists{}".format(
utils.colors.FAIL, utils.colors.ENDC
)
)
return
home_doc_path = project_path / "docs"
home_doc_path.mkdir()
help_doc_path = home_doc_path / "help"
help_doc_path.mkdir()
file_path = Path(__file__).resolve().parent / "include"
copyfile(file_path / "index.md", home_doc_path / "index.md")
copyfile(file_path / "How_To_Use_Mkinx.md", help_doc_path / "How_To_Use_Mkinx.md")
copyfile(
file_path / "Writing_Sphinx_Documentation.md",
help_doc_path / "Writing_Sphinx_Documentation.md",
)
with open(file_path / "mkdocs.yml", "r") as f:
lines = f.readlines()
input_text = "What is your Documentation's name"
input_text += " (it can be changed later in mkdocs.yml)?\n"
input_text += "[Default: {} - Home Documentation]\n"
site_name = input(input_text.format(args.project_name.capitalize()))
if not site_name:
site_name = "{} - Home Documentation".format(args.project_name.capitalize())
lines[0] = "site_name: {}\n".format(site_name)
with open(project_path / "mkdocs.yml", "w") as f:
f.writelines(lines)
example_project_path = project_path / "example_project" / "example_project"
windows = "y" if sys.platform in {"win32", "cygwin"} else "n"
copytree(file_path / "example_project", example_project_path)
move(str(example_project_path / "source"), str(project_path / "example_project"))
move(
str(project_path / "example_project" / "example_project" / "Makefile"),
str(project_path / "example_project"),
)
if windows == "y":
move(
str(project_path / "example_project" / "example_project" / "make.bat"),
str(project_path / "example_project"),
)
else:
os.remove(
str(project_path / "example_project" / "example_project" / "make.bat")
)
static = project_path / "example_project" / "source"
static /= "_static"
if not static.exists():
static.mkdir()
_ = subprocess.check_output(
"cd {} && mkinx build -F -A > /dev/null".format(args.project_name), shell=True
)
print(
"\n\n",
utils.colors.OKBLUE,
"{}/{} created as a showcase of how mkinx works".format(
args.project_name, "example_project"
),
utils.colors.ENDC,
)
print(
"\n",
utils.colors.OKGREEN,
"Success!",
utils.colors.ENDC,
"You can now start your Docs in ./{}\n".format(args.project_name),
utils.colors.HEADER,
"$ cd ./{}".format(args.project_name),
utils.colors.ENDC,
)
print(
" Start the server from within your Docs to see them \n (default",
"port is 8443 but you can change it with the -s flag):",
)
print(
utils.colors.HEADER,
" {} $ mkinx serve\n".format(args.project_name),
utils.colors.ENDC,
)
|
Initialize a Home Documentation's folder
Args:
args (ArgumentParser): Flags from the CLI
|
juraj-google-style
|
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(), lookup_ops.tables_initializer()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
|
Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
|
github-repos
|
def num_samples(self, sr=None):
native_sr = self.sampling_rate
num_samples = units.seconds_to_sample(self.duration, native_sr)
if (sr is not None):
ratio = (float(sr) / native_sr)
num_samples = int(np.ceil((num_samples * ratio)))
return num_samples
|
Return the number of samples.
Args:
sr (int): Calculate the number of samples with the given
sampling-rate. If None use the native sampling-rate.
Returns:
int: Number of samples
|
codesearchnet
|
def execute(self, triple_map, output, **kwargs):
subjects = []
found_elements = self.source.xpath(
str(triple_map.logicalSource.iterator),
namespaces=self.xml_ns)
for element in found_elements:
subject = self.generate_term(term_map=triple_map.subjectMap,
element=element,
**kwargs)
start = len(output)
for row in triple_map.predicateObjectMap:
predicate = row.predicate
if row.template is not None:
obj_ = self.generate_term(term_map=row, **kwargs)
output.add((subject, predicate, obj_))
if row.parentTriplesMap is not None:
self.__handle_parents__(
output,
parent_map=row.parentTriplesMap,
subject=subject,
predicate=predicate,
**kwargs)
new_subjects = self.__reference_handler__(
output,
predicate_obj_map=row,
element=element,
subject=subject)
subjects.extend(new_subjects)
if row.constant is not None:
output.add((subject, predicate, row.constant))
if start < len(output):
if triple_map.subjectMap.class_ is not None:
output.add((subject,
NS_MGR.rdf.type.rdflib,
triple_map.subjectMap.class_))
subjects.append(subject)
return subjects
|
Method executes mapping between source
Args:
-----
triple_map: SimpleNamespace, Triple Map
|
juraj-google-style
|
def MakeSimpleProtoClass(fields, full_name=None, pool=None):
factory = message_factory.MessageFactory(pool=pool)
if (full_name is not None):
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
pass
field_items = fields.items()
if (not isinstance(fields, OrderedDict)):
field_items = sorted(field_items)
fields_hash = hashlib.sha1()
for (f_name, f_type) in field_items:
fields_hash.update(f_name.encode('utf-8'))
fields_hash.update(str(f_type).encode('utf-8'))
proto_file_name = (fields_hash.hexdigest() + '.proto')
if (full_name is None):
full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + fields_hash.hexdigest())
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
pass
factory.pool.Add(_MakeFileDescriptorProto(proto_file_name, full_name, field_items))
return _GetMessageFromFactory(factory, full_name)
|
Create a Protobuf class whose fields are basic types.
Note: this doesn't validate field names!
Args:
fields: dict of {name: field_type} mappings for each field in the proto. If
this is an OrderedDict the order will be maintained, otherwise the
fields will be sorted by name.
full_name: optional str, the fully-qualified name of the proto type.
pool: optional DescriptorPool instance.
Returns:
a class, the new protobuf class with a FileDescriptor.
|
codesearchnet
|
def _generate_subtokens(token_counts, alphabet, min_count, num_iterations=4, reserved_tokens=None):
if (reserved_tokens is None):
reserved_tokens = RESERVED_TOKENS
subtoken_list = (reserved_tokens + list(alphabet))
max_subtoken_length = 1
for i in xrange(num_iterations):
tf.logging.info(('\tGenerating subtokens: iteration %d' % i))
subtoken_dict = _list_to_index_dict(subtoken_list)
subtoken_counts = _count_and_gen_subtokens(token_counts, alphabet, subtoken_dict, max_subtoken_length)
(subtoken_list, max_subtoken_length) = _gen_new_subtoken_list(subtoken_counts, min_count, alphabet, reserved_tokens)
tf.logging.info(('\tVocab size: %d' % len(subtoken_list)))
return subtoken_list
|
Create a list of subtokens in decreasing order of frequency.
Args:
token_counts: dict mapping str tokens -> int count
alphabet: set of characters
min_count: int minimum number of times a subtoken must appear before it is
added to the vocabulary.
num_iterations: int number of iterations to generate new tokens.
reserved_tokens: list of tokens that will be added to the beginning to the
returned subtoken list.
Returns:
Sorted list of subtokens (most frequent first)
|
codesearchnet
|
def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, Tuple[int, int, int]]=0) -> 'torch.Tensor':
height, width = get_image_size(images, ChannelDimension.FIRST)
if height == width:
return images
num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0]
if isinstance(background_color, int):
background_color = [background_color] + [0] * (num_channels - 1)
elif len(background_color) != num_channels:
raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels')
max_dim = max(height, width)
paste_x_left = (max_dim - width)
paste_y_left = (max_dim - height)
paste_x_right = max_dim - width - paste_x_left
paste_y_right = max_dim - height - paste_y_left
padded_images = F.pad(images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color)
return padded_images
|
Pads an image to a square based on the longest edge.
Args:
images (`np.ndarray`):
The images to pad.
background_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0):
The color to use for the padding. Can be an integer for single channel or a
tuple of integers representing for multi-channel images. If passed as integer
in mutli-channel mode, it will default to `0` in subsequent channels.
Returns:
`torch.Tensor`: The padded images.
|
github-repos
|
def _sd_of_runs(stats, mean, key='runs'):
num_runs = len(stats[key])
first = stats[key][0]
standard_deviation = {}
for stat_key in first:
if isinstance(first[stat_key], numbers.Number):
standard_deviation[stat_key] = math.sqrt(
sum((run[stat_key] - mean[stat_key])**2
for run in stats[key]) / float(num_runs))
return standard_deviation
|
Obtain the standard deviation of stats.
Args:
stats: dict; A set of stats, structured as above.
mean: dict; Mean for each key in stats.
key: str; Optional key to determine where list of runs is found in stats
|
juraj-google-style
|
def convert_transpose(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting transpose ...')
if (params['perm'][0] != 0):
if (inputs[0] in layers):
print('!!! Cannot permute batch dimension. Result may be wrong !!!')
layers[scope_name] = layers[inputs[0]]
else:
print('Skip weight matrix transpose, result may be wrong.')
else:
if names:
tf_name = ('PERM' + random_string(4))
else:
tf_name = (w_name + str(random.random()))
permute = keras.layers.Permute(params['perm'][1:], name=tf_name)
layers[scope_name] = permute(layers[inputs[0]])
|
Convert transpose layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
codesearchnet
|
def set_energy(self, spins, target_energy):
spin_energy = self.energy(spins)
self.assertions.add(Equals(spin_energy, limitReal(target_energy)))
|
Set the energy of Theta with spins fixed to target_energy.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
target_energy (float): The desired energy for Theta with spins fixed.
Notes:
Add equality constraint to assertions.
|
juraj-google-style
|
def create_branch(self, branch_name: str):
LOGGER.info('creating branch: %s', branch_name)
self._validate_branch_name(branch_name)
if branch_name in self.list_branches():
LOGGER.error('branch already exists')
sys.exit(-1)
new_branch = self.repo.create_head(branch_name)
new_branch.commit = self.repo.head.commit
|
Creates a new branch
Args:
branch_name: name of the branch
|
juraj-google-style
|
def _ParseAttribute(self, file_object):
file_offset = file_object.tell()
attribute_map = self._GetDataTypeMap('cups_ipp_attribute')
try:
(attribute, _) = self._ReadStructureFromFileObject(file_object, file_offset, attribute_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse attribute with error: {0!s}'.format(exception))
value = None
if (attribute.tag_value in self._INTEGER_TAG_VALUES):
value = self._ParseIntegerValue(attribute.value_data, file_offset)
elif (attribute.tag_value == self._TAG_VALUE_BOOLEAN):
value = self._ParseBooleanValue(attribute.value_data)
elif (attribute.tag_value == self._TAG_VALUE_DATE_TIME):
value = self._ParseDateTimeValue(attribute.value_data, file_offset)
elif (attribute.tag_value in self._STRING_WITHOUT_LANGUAGE_VALUES):
value = attribute.value_data.decode(self._last_charset_attribute)
elif (attribute.tag_value in self._ASCII_STRING_VALUES):
value = attribute.value_data.decode('ascii')
if (attribute.tag_value == self._TAG_VALUE_CHARSET):
self._last_charset_attribute = value
else:
value = attribute.value_data
return (attribute.name, value)
|
Parses a CUPS IPP attribute from a file-like object.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
tuple[str, object]: attribute name and value.
Raises:
ParseError: if the attribute cannot be parsed.
|
codesearchnet
|
def _process_exception(e, body, tb):
msg = e.message if hasattr(e, "message") else str(e)
exception_type = str(e.__class__)
exception_name = str(e.__class__.__name__)
properties = pika.BasicProperties(
content_type="application/text",
delivery_mode=2,
headers={
"exception": msg,
"exception_type": exception_type,
"exception_name": exception_name,
"traceback": tb,
"UUID": str(uuid.uuid4())
}
)
send_message("harvester", body, properties=properties)
|
Process informations about exception and send them thru AMQP.
Args:
e (obj): Exception instance.
body (str): Text which will be sent over AMQP.
tb (obj): Traceback object with informations, which will be put to the
headers.
|
juraj-google-style
|
def to_insert(table, d):
columns = []
args = []
for (key, val) in d.items():
columns.append('"{}"'.format(key))
args.append(val)
stmt = 'insert into {table} ({columns}) values ({params})'.format(table=table, columns=', '.join(columns), params=', '.join((['?'] * len(columns))))
return (stmt, args)
|
Generate an insert statement using the given table and dictionary.
Args:
table (str): table name
d (dict): dictionary with column names as keys and values as values.
Returns:
tuple of statement and arguments
>>> to_insert('doc.foobar', {'name': 'Marvin'})
('insert into doc.foobar ("name") values (?)', ['Marvin'])
|
codesearchnet
|
def get_id(date: datetime.datetime) -> str:
date = date.strftime('%Y%m%d')
return 'PB-{}-{}-{:03d}'.format(date, 'sip', randint(0, 100))
|
Generate a Processing Block (PB) Instance ID.
Args:
date (datetime.datetime): UTC date of the PB
Returns:
str, Processing Block ID
|
juraj-google-style
|
def _try_parse_datetime(time_str, fmts):
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
|
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
|
juraj-google-style
|
def raster_erosion(rasterfile):
if is_string(rasterfile):
origin_raster = RasterUtilClass.read_raster(str(rasterfile))
elif isinstance(rasterfile, Raster):
origin_raster = rasterfile.data
elif isinstance(rasterfile, numpy.ndarray):
origin_raster = rasterfile
else:
return 'Your rasterfile has a wrong type. Type must be string or numpy.array or class Raster in pygeoc.'
max_value_raster = origin_raster.max()
erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))
add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)
temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))
add_col = numpy.full(((origin_raster.shape[0] + 2), 1), max_value_raster)
expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))
for i in range(origin_raster.shape[0]):
for j in range(origin_raster.shape[1]):
min_pixel_value = max_value_raster
for k in range(3):
for l in range(3):
if (expand_origin_raster[((i + k), (j + l))] <= min_pixel_value):
min_pixel_value = expand_origin_raster[((i + k), (j + l))]
erosion_raster[(i, j)] = min_pixel_value
return erosion_raster
|
Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray.
|
codesearchnet
|
def _parse_access_vlan(self, config):
value = re.search(r'switchport access vlan (\d+)', config)
return dict(access_vlan=value.group(1))
|
Scans the specified config and parse the access-vlan value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport access
value. The dict returned is intended to be merged into the
resource dict
|
juraj-google-style
|
def assert_consumed(self):
pretty_printer = ObjectGraphProtoPrettyPrinter(self._checkpoint.object_graph_proto)
self.assert_existing_objects_matched()
ignore_node_ids = []
if self._options.experimental_skip_slot_variables:
for node in self._checkpoint.object_graph_proto.nodes:
for sv in node.slot_variables:
ignore_node_ids.append(sv.slot_variable_node_id)
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
if not node.attributes:
continue
if node_id in ignore_node_ids:
continue
trackable = self._checkpoint.object_by_proto_id.get(node_id, None)
if trackable is None:
raise AssertionError(f'Unresolved object in checkpoint {pretty_printer.node_names[node_id]}: {node}')
if not self._options.experimental_skip_slot_variables and self._checkpoint.slot_restorations:
raise AssertionError(f'Unresolved slot restorations: {self._checkpoint.slot_restorations}')
if self._checkpoint.unused_attributes:
unused_attribute_messages = []
for node_id, attribute in self._checkpoint.unused_attributes.items():
obj = self._checkpoint.object_by_proto_id[node_id]
unused_attribute_messages.append(f'{pretty_printer.node_names[node_id]} ({obj}): {attribute}')
joined_attribute_messages = '\n'.join(unused_attribute_messages)
raise AssertionError(f'Unused attributes in these objects (the attributes exist in the checkpoint but were not restored):\n{joined_attribute_messages}')
return self
|
Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
|
github-repos
|
def get_contrib_features(project_root):
project = Project(project_root)
contrib = project._resolve('.features.contrib')
return _get_contrib_features(contrib)
|
Get contributed features for a project at project_root
For a project ``foo``, walks modules within the ``foo.features.contrib``
subpackage. A single object that is an instance of ``ballet.Feature`` is
imported if present in each module. The resulting ``Feature`` objects are
collected.
Args:
project_root (str, path-like): Path to project root
Returns:
List[ballet.Feature]: list of Feature objects
|
juraj-google-style
|
def global_step(sess, global_step_tensor):
if context.executing_eagerly():
return int(global_step_tensor.numpy())
return int(sess.run(global_step_tensor))
|
Small helper to get the global step.
```python
# Create a variable to hold the global_step.
global_step_tensor = tf.Variable(10, trainable=False, name='global_step')
# Create a session.
sess = tf.compat.v1.Session()
# Initialize the variable
sess.run(global_step_tensor.initializer)
# Get the variable value.
print('global_step: %s' % tf.compat.v1.train.global_step(sess,
global_step_tensor))
global_step: 10
```
Args:
sess: A TensorFlow `Session` object.
global_step_tensor: `Tensor` or the `name` of the operation that contains
the global step.
Returns:
The global step value.
|
github-repos
|
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):
r
prefix = '' if prefix is None else prefix + '/'
name = prefix + _pretty_name(tensor) if name is None else prefix + name
if not tf.get_variable_scope().reuse:
tf.summary.audio(name + '-au', tensor, sample_rate)
|
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
|
juraj-google-style
|
def update(self, session, arrays=None, frame=None):
new_config = self._get_config()
if self._enough_time_has_passed(self.previous_config['FPS']):
self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(session, arrays, frame, new_config)
self._update_recording(final_image, new_config)
|
Creates a frame and writes it to disk.
Args:
arrays: a list of np arrays. Use the "custom" option in the client.
frame: a 2D np array. This way the plugin can be used for video of any
kind, not just the visualization that comes with the plugin.
frame can also be a function, which only is evaluated when the
"frame" option is selected by the client.
|
codesearchnet
|
def _ReadTablesArray(self, file_object, tables_array_offset):
data_type_map = self._GetDataTypeMap('keychain_tables_array')
tables_array, _ = self._ReadStructureFromFileObject(
file_object, tables_array_offset, data_type_map)
tables = collections.OrderedDict()
for table_offset in tables_array.table_offsets:
self._ReadTable(tables, file_object, tables_array_offset + table_offset)
return tables
|
Reads the tables array.
Args:
file_object (file): file-like object.
tables_array_offset (int): offset of the tables array relative to
the start of the file.
Returns:
dict[int, KeychainDatabaseTable]: tables per identifier.
Raises:
ParseError: if the tables array cannot be read.
|
juraj-google-style
|
def sin(x):
if any_symbolic_tensors((x,)):
return Sin().symbolic_call(x)
return backend.numpy.sin(x)
|
Trigonometric sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
|
github-repos
|
def _setup_test_logger(log_path, prefix=None):
log = logging.getLogger()
kill_test_logger(log)
log.propagate = False
log.setLevel(logging.DEBUG)
terminal_format = log_line_format
if prefix:
terminal_format = '[%s] %s' % (prefix, log_line_format)
c_formatter = logging.Formatter(terminal_format, log_line_time_format)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(c_formatter)
ch.setLevel(logging.INFO)
f_formatter = logging.Formatter(log_line_format, log_line_time_format)
fh_info = logging.FileHandler(
os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))
fh_info.setFormatter(f_formatter)
fh_info.setLevel(logging.INFO)
fh_debug = logging.FileHandler(
os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))
fh_debug.setFormatter(f_formatter)
fh_debug.setLevel(logging.DEBUG)
log.addHandler(ch)
log.addHandler(fh_info)
log.addHandler(fh_debug)
log.log_path = log_path
logging.log_path = log_path
|
Customizes the root logger for a test run.
The logger object has a stream handler and a file handler. The stream
handler logs INFO level to the terminal, the file handler logs DEBUG
level to files.
Args:
log_path: Location of the log file.
prefix: A prefix for each log line in terminal.
filename: Name of the log file. The default is the time the logger
is requested.
|
juraj-google-style
|
def standardize(self, x):
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= np.std(x, keepdims=True) + 1e-06
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn("This ImageDataGenerator specifies `featurewise_center`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
if self.featurewise_std_normalization:
if self.std is not None:
x /= self.std + 1e-06
else:
warnings.warn("This ImageDataGenerator specifies `featurewise_std_normalization`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
if self.zca_whitening:
if self.zca_whitening_matrix is not None:
flat_x = x.reshape(-1, np.prod(x.shape[-3:]))
white_x = flat_x @ self.zca_whitening_matrix
x = np.reshape(white_x, x.shape)
else:
warnings.warn("This ImageDataGenerator specifies `zca_whitening`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
return x
|
Applies the normalization configuration in-place to a batch of
inputs.
`x` is changed in-place since the function is mainly used internally
to standardize images and feed them to your network. If a copy of `x`
would be created instead it would have a significant performance cost.
If you want to apply this method without changing the input in-place
you can call the method creating a copy before:
standardize(np.copy(x))
Args:
x: Batch of inputs to be normalized.
Returns:
The inputs, normalized.
|
github-repos
|
def set_active(self, username, active_state):
if active_state not in (True, False):
raise ValueError("active_state must be True or False")
user = self.get_user(username)
if user is None:
return None
if user['active'] is active_state:
return True
user['active'] = active_state
response = self._put(self.rest_url + "/user",
params={"username": username},
data=json.dumps(user))
if response.status_code == 204:
return True
return None
|
Set the active state of a user
Args:
username: The account username
active_state: True or False
Returns:
True: If successful
None: If no user or failure occurred
|
juraj-google-style
|
async def get_all(self, url, params=None):
if not params:
params = {}
items = []
next_page_token = None
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
items.append(response)
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
return items
|
Aggregate data from all pages of an API query.
Args:
url (str): Google API endpoint URL.
params (dict): (optional) URL query parameters.
Returns:
list: Parsed JSON query response results.
|
juraj-google-style
|
def typical_or_extreme_period_name(self, value=None):
if (value is not None):
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str for field `typical_or_extreme_period_name`'.format(value))
if (',' in value):
raise ValueError('value should not contain a comma for field `typical_or_extreme_period_name`')
self._typical_or_extreme_period_name = value
|
Corresponds to IDD Field `typical_or_extreme_period_name`
Args:
value (str): value for IDD Field `typical_or_extreme_period_name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
async def get_tournaments(self, subdomain: str=None, force_update: bool=False) -> list:
if (self.tournaments is None):
force_update = True
self._subdomains_searched.append(('' if (subdomain is None) else subdomain))
elif ((subdomain is None) and ('' not in self._subdomains_searched)):
force_update = True
self._subdomains_searched.append('')
elif ((subdomain is not None) and (subdomain not in self._subdomains_searched)):
force_update = True
self._subdomains_searched.append(subdomain)
if force_update:
params = {'include_participants': (1 if AUTO_GET_PARTICIPANTS else 0), 'include_matches': (1 if AUTO_GET_MATCHES else 0)}
if (subdomain is not None):
params['subdomain'] = subdomain
res = (await self.connection('GET', 'tournaments', **params))
if (len(res) == 0):
self.tournaments = []
else:
for t_data in res:
self._refresh_tournament_from_json(t_data)
return self.tournaments
|
gets all user's tournaments
|methcoro|
Args:
subdomain: *optional* subdomain needs to be given explicitely to get tournaments in a subdomain
force_update: *optional* set to True to force the data update from Challonge
Returns:
list[Tournament]: list of all the user tournaments
Raises:
APIException
|
codesearchnet
|
def sample_node_list(self, low, high, generator):
statements = []
for _ in range(np.random.randint(low, high)):
statements.append(generator())
return statements
|
Generate a list of statements of random length.
Args:
low: Fewest number of statements to generate.
high: Highest number of statements to generate.
generator: Function to call to generate nodes.
Returns:
A list of statements.
|
github-repos
|
def add(self, origin):
digest = self._calc_digest(origin)
if self.exists(digest):
self.logger.debug('Added File: [{0}] ( Already exists. Skipping transfer)'.format(digest))
return digest
absPath = self.get_file_path(digest)
absFolderPath = os.path.dirname(absPath)
self._makedirs(absFolderPath)
self._copy_content(origin, absPath)
self.logger.debug('Added file: "{0}" [{1}]'.format(digest, absPath))
return digest
|
Add new element to fsdb.
Args:
origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...)
Returns:
String rapresenting the digest of the file
|
juraj-google-style
|
def convert_variable_to_constant(self, incoming_edge, tensor_data):
index = incoming_edge.destination.index
for edge in self.outgoing_edges:
if edge.source.index == index:
edge.destination.convertible.convert_variable_to_constant(edge, tensor_data)
function = self.converted_self().function
function.signature.input_arg[index].type = tensor_data.dtype
if '_input_shapes' in function.attr:
function.attr['_input_shapes'].list.shape[index].unknown_rank = True
del function.attr['_input_shapes'].list.shape[index].dim[:]
arg_attrs = function.arg_attr[index].attr
if '_output_shapes' in arg_attrs:
arg_attrs['_output_shapes'].list.shape[0].unknown_rank = True
del arg_attrs['_output_shapes'].list.shape[0].dim[:]
|
Converts one function argument into a constant.
Args:
incoming_edge: The edge into the argument to be converted.
tensor_data: The constant value.
|
github-repos
|
def enum(cls):
assert (cls.__bases__ == (object,))
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for (key, value) in iteritems(d):
if ((key.upper() == key) and isinstance(value, integer_types)):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def str_(self):
if (self in map_):
return ('%s.%s' % (type(self).__name__, map_[self]))
return ('%d' % int(self))
def repr_(self):
if (self in map_):
return ('<%s.%s: %d>' % (type(self).__name__, map_[self], int(self)))
return ('%d' % int(self))
setattr(new_type, '__repr__', repr_)
setattr(new_type, '__str__', str_)
return new_type
|
A decorator for creating an int enum class.
Makes the values a subclass of the type and implements repr/str.
The new class will be a subclass of int.
Args:
cls (type): The class to convert to an enum
Returns:
type: A new class
::
@enum
class Foo(object):
FOO = 1
BAR = 2
|
codesearchnet
|
def trigger(self, attr, old, new, hint=None, setter=None):
def invoke():
callbacks = self._callbacks.get(attr)
if callbacks:
for callback in callbacks:
callback(attr, old, new)
if hasattr(self, '_document') and self._document is not None:
self._document._notify_change(self, attr, old, new, hint, setter, invoke)
else:
invoke()
|
Trigger callbacks for ``attr`` on this object.
Args:
attr (str) :
old (object) :
new (object) :
Returns:
None
|
juraj-google-style
|
def AddClient(self, client):
keywords = self.AnalyzeClient(client)
keywords.add(self._NormalizeKeyword(client.client_id))
data_store.REL_DB.AddClientKeywords(client.client_id, keywords)
|
Adds a client to the index.
Args:
client: A Client object record.
|
juraj-google-style
|
def createDirStruct(paths, verbose=True):
for k, path in paths.items():
p = None
try:
pathlist = path if type(path) is list else [ path ]
for p in pathlist:
os.makedirs(p)
if verbose:
log.info('Creating directory: ' + p)
except OSError, e:
if e.errno == errno.EEXIST and os.path.isdir(p):
pass
else:
raise
return True
|
Loops ait.config._datapaths from AIT_CONFIG and creates a directory.
Replaces year and doy with the respective year and day-of-year.
If neither are given as arguments, current UTC day and year are used.
Args:
paths:
[optional] list of directory paths you would like to create.
doy and year will be replaced by the datetime day and year, respectively.
datetime:
UTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ
|
juraj-google-style
|
def get_max_bond_distance(self, el1_sym, el2_sym):
return sqrt(
(self.el_radius[el1_sym] + self.el_radius[el2_sym] + self.tol) ** 2)
|
Use Jmol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
Returns: (float) max bond length
|
juraj-google-style
|
def get_cells_iterator(bq_read_client: BigQueryReadClient, table_metadata: TableMetadata, column: str) -> Generator[Any, None, None]:
if '.' not in column and '[' not in column:
rows = get_readrows_iterator(bq_read_client, table_metadata, [column], data_format=DataFormat.AVRO)
for row in rows:
yield row.get(column)
else:
nested_columns = parse_column_path(column)
parent_column = nested_columns[0][0]
rows = get_readrows_iterator(bq_read_client, table_metadata, [parent_column], data_format=DataFormat.AVRO)
for current_value in rows:
for column_name, key in nested_columns:
if isinstance(current_value, dict):
current_value = current_value.get(column_name)
elif isinstance(current_value, list) and key:
current_value = next((item for item in current_value if item.get(column_name) == key), None)
if isinstance(current_value, dict) and 'value' in current_value:
extracted_value = next((value for key, value in current_value['value'].items() if value is not None), None)
current_value = extracted_value if extracted_value is not None else current_value
if current_value is None:
break
yield current_value
|
Retrieves an iterator of cell values for a specified column, optimized
for both simple and nested column
access, including handling special value structures with dynamic value types
for nested columns.
Args:
bq_read_client (BigQueryReadClient): The BigQuery Storage API Read client.
table_metadata (TableMetadata): The table's metadata.
column (str): The column name, supporting nested fields and array indices
for complex cases.
Returns:
Generator[Any, None, None]: An iterator over cell values.
|
github-repos
|
def _prefix_from_prefix_int(self, prefixlen):
if not isinstance(prefixlen, (int, long)):
raise NetmaskValueError('%r is not an integer' % prefixlen)
prefixlen = int(prefixlen)
if not (0 <= prefixlen <= self._max_prefixlen):
raise NetmaskValueError('%d is not a valid prefix length' %
prefixlen)
return prefixlen
|
Validate and return a prefix length integer.
Args:
prefixlen: An integer containing the prefix length.
Returns:
The input, possibly converted from long to int.
Raises:
NetmaskValueError: If the input is not an integer, or out of range.
|
juraj-google-style
|
def easeInOutCubic(n):
_checkRange(n)
n = 2 * n
if n < 1:
return 0.5 * n**3
else:
n = n - 2
return 0.5 * (n**3 + 2)
|
A cubic tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
juraj-google-style
|
def validate_primitive_without_value(fhir_primitive: message.Message) -> None:
name = fhir_primitive.DESCRIPTOR.full_name
if len(extensions.get_fhir_extensions(fhir_primitive)) < 2:
raise fhir_errors.InvalidFhirError(f'{name!r} must have either extensions or a value present.')
for field, _ in fhir_primitive.ListFields():
if field.name not in extensions.NON_VALUE_FIELDS:
raise fhir_errors.InvalidFhirError(f'{name!r} contains PrimitiveHasNoValue but {field.name!r} is set.')
|
Validates a Message which has the PrimitiveWithoutValue extension.
Given that there is a PrimitiveWithoutValue extension present, there must be
at least one other extension. Otherwise, there is truly no value set other
than id and/or extension (non-value fields).
Args:
fhir_primitive: The FHIR primitive Message to validate.
Raises:
fhir_errors.InvalidFhirError: In the event that there is less than one
extension present, or there are values set other than id and/or extension.
|
github-repos
|
def decorate_event_js(js_code):
def add_annotation(method):
setattr(method, '__is_event', True)
setattr(method, '_js_code', js_code)
return method
return add_annotation
|
setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
|
codesearchnet
|
def get_reconciler(config, metrics, rrset_channel, changes_channel, **kw):
builder = reconciler.GDNSReconcilerBuilder(config, metrics, rrset_channel, changes_channel, **kw)
return builder.build_reconciler()
|
Get a GDNSReconciler client.
A factory function that validates configuration, creates an auth
and :class:`GDNSClient` instance, and returns a GDNSReconciler
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue from which to consume
record set messages to validate.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Reconciler.
Returns:
A :class:`GDNSReconciler` instance.
|
codesearchnet
|
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
N_input = (N_input or 1)
N_output = (N_output or 1)
N_hidden = (N_hidden or tuple())
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = (hidden_layer_type or tuple())
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if (verbosity > 0):
print(N_hidden, ' layers of type ', hidden_layer_type)
assert (len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for (i, (Nhid, hidlaytype)) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
nn.addConnection(pb.structure.FullConnection(nn['bias'], (nn['hidden'] if N_hidden else nn['output'])))
nn.addConnection(pb.structure.FullConnection(nn['input'], (nn['hidden'] if N_hidden else nn['output'])))
for (i, (Nhid, hidlaytype)) in enumerate(zip(N_hidden[:(- 1)], hidden_layer_type[:(- 1)])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')], nn['hidden-{}'.format((i + 1))]))
i = (len(N_hidden) - 1)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if (verbosity > 0):
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if (verbosity > 0):
print(nn.connections)
return nn
|
Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
|
codesearchnet
|
def stop(self):
self._logger.info('Cleaning up remaining connection threads.')
for thread in threading.enumerate():
if (thread is not threading.current_thread()):
try:
thread.join(10.0)
except Exception as e:
self._logger.info('Error occurred while attempting to cleanup thread: {0}'.format(thread.name))
self._logger.exception(e)
else:
if thread.is_alive():
self._logger.warning('Cleanup failed for thread: {0}. Thread is still alive'.format(thread.name))
else:
self._logger.info('Cleanup succeeded for thread: {0}'.format(thread.name))
self._logger.info('Shutting down server socket handler.')
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except Exception as e:
self._logger.exception(e)
raise exceptions.NetworkingError('Server failed to shutdown socket handler.')
if hasattr(self, 'policy_monitor'):
try:
self.policy_monitor.stop()
self.policy_monitor.join()
except Exception as e:
self._logger.exception(e)
raise exceptions.ShutdownError('Server failed to clean up the policy monitor.')
|
Stop the server.
Halt server client connections and clean up any existing connection
threads.
Raises:
NetworkingError: Raised if a failure occurs while sutting down
or closing the TLS server socket.
|
codesearchnet
|
def do_batch_status(args):
rest_client = RestClient(args.url, args.user)
batch_ids = args.batch_ids.split(',')
if args.wait and args.wait > 0:
statuses = rest_client.get_statuses(batch_ids, args.wait)
else:
statuses = rest_client.get_statuses(batch_ids)
if args.format == 'yaml':
fmt.print_yaml(statuses)
elif args.format == 'json':
fmt.print_json(statuses)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
|
Runs the batch-status command, printing output to the console
Args:
args: The parsed arguments sent to the command at runtime
|
juraj-google-style
|
def git_checkout(branch_name, create=False):
log.info('Checking out <33>{}'.format(branch_name))
shell.run('git checkout {} {}'.format(('-b' if create else ''), branch_name))
|
Checkout or create a given branch
Args:
branch_name (str):
The name of the branch to checkout or create.
create (bool):
If set to **True** it will create the branch instead of checking it
out.
|
codesearchnet
|
def __init__(self, sv, sess):
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
|
Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
|
github-repos
|
def __call__(self, batch: List[List[str]], mean: bool = None) -> List[Union[list, np.ndarray]]:
batch = [self._encode(sample, mean) for sample in batch]
if self.pad_zero:
batch = zero_pad(batch)
return batch
|
Embed sentences from batch
Args:
batch: list of tokenized text samples
mean: whether to return mean embedding of tokens per sample
Returns:
embedded batch
|
juraj-google-style
|
def _GetFlagValues(self, flags):
event_types = []
for (event_flag, description) in self._FLAG_VALUES.items():
if (event_flag & flags):
event_types.append(description)
return ', '.join(event_types)
|
Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record.
|
codesearchnet
|
def format_rpc(data):
address, rpc_id, args, resp, _status = data
name = rpc_name(rpc_id)
if isinstance(args, (bytes, bytearray)):
arg_str = hexlify(args)
else:
arg_str = repr(args)
if isinstance(resp, (bytes, bytearray)):
resp_str = hexlify(resp)
else:
resp_str = repr(resp)
return "%s called on address %d, payload=%s, response=%s" % (name, address, arg_str, resp_str)
|
Format an RPC call and response.
Args:
data (tuple): A tuple containing the address, rpc_id, argument and
response payloads and any error code.
Returns:
str: The formated RPC string.
|
juraj-google-style
|
def from_url(cls, path):
if os.path.isfile(path):
with open(path) as fd:
data = fd.read()
else:
try:
response = urllib.urlopen(path)
if response.code >= 300:
raise RuntimeError('Unable to load repo from %s' % path)
data = response.read()
response.close()
except IOError:
raise RuntimeError(
'Unable to load repo from %s (IO error)' % path
)
return cls(json.loads(data), path)
|
Instantiate a :class:`TemplateRepository` instance from the data in a
file or url
Args:
path (str): Path or url to the json file to load
Returns:
TemplateRepository: A new instance
|
juraj-google-style
|
def find_code_in_transformers(object_name: str, base_path: Optional[str]=None, return_indices: bool=False) -> Union[str, Tuple[List[str], int, int]]:
parts = object_name.split('.')
i = 0
if base_path is None:
base_path = TRANSFORMERS_PATH
if base_path == MODEL_TEST_PATH:
base_path = 'tests'
module = parts[i]
while i < len(parts) and (not os.path.isfile(os.path.join(base_path, f'{module}.py'))):
i += 1
if i < len(parts):
module = os.path.join(module, parts[i])
if i >= len(parts):
raise ValueError(f'`object_name` should begin with the name of a module of transformers but got {object_name}.')
with open(os.path.join(base_path, f'{module}.py'), 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
indent = ''
line_index = 0
for name in parts[i + 1:]:
while line_index < len(lines) and re.search(f'^{indent}(class|def)\\s+{name}(\\(|\\:)', lines[line_index]) is None:
line_index += 1
indent += ' '
line_index += 1
if line_index >= len(lines):
raise ValueError(f' {object_name} does not match any function or class in {module}.')
start_index = line_index - 1
end_index = find_block_end(lines, start_index, len(indent))
code = ''.join(lines[start_index:end_index])
return (code, (lines, start_index, end_index)) if return_indices else code
|
Find and return the source code of an object.
Args:
object_name (`str`):
The name of the object we want the source code of.
base_path (`str`, *optional*):
The path to the base folder where files are checked. If not set, it will be set to `TRANSFORMERS_PATH`.
return_indices(`bool`, *optional*, defaults to `False`):
If `False`, will only return the code (as a string), otherwise it will also return the whole lines of the
file where the object specified by `object_name` is defined, together the start/end indices of the block in
the file that defines the object.
Returns:
`Union[str, Tuple[List[str], int, int]]`: If `return_indices=False`, only the source code of the object will be
returned. Otherwise, it also returns the whole lines of the file where the object specified by `object_name` is
defined, together the start/end indices of the block in the file that defines the object.
|
github-repos
|
def _contains_composite_function_call(self, graphdef: graph_pb2.GraphDef) -> bool:
return any(map(self._is_composite_function, graphdef.library.function))
|
Determines if the graph def has composite function call.
Args:
graphdef: A GraphDef object.
Returns:
True if and only if the graph def contains a composite function call.
|
github-repos
|
def _single_shard_restore(file_prefix: tensor_lib.Tensor, shardable_tensors: Sequence[sharding_util.ShardableTensor], options: 'checkpoint_options.CheckpointOptions | None'=None) -> sharding_util.Shard:
options = options or checkpoint_options.CheckpointOptions()
tensor_names = []
tensor_dtypes = []
slice_specs = []
for shardable_tensor in shardable_tensors:
if shardable_tensor._tensor_save_spec:
name = shardable_tensor._tensor_save_spec.name
spec = shardable_tensor._tensor_save_spec.slice_spec
else:
name, spec = (shardable_tensor.checkpoint_key, shardable_tensor.slice_spec)
tensor_names.append(name)
slice_specs.append(spec)
tensor_dtypes.append(shardable_tensor.dtype)
restore_device = options.experimental_io_device or 'cpu:0'
with ops.device(restore_device):
restored_tensors = io_ops.restore_v2(file_prefix, tensor_names, slice_specs, tensor_dtypes)
restored_tensor_dict = {}
for shardable_tensor in shardable_tensors:
restored_tensor = restored_tensors.pop(0)
restored_tensor_dict.setdefault(shardable_tensor.checkpoint_key, {})[shardable_tensor.slice_spec] = restored_tensor
return restored_tensor_dict
|
Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
shardable_tensors: A list of ShardableTensors to restore.
options: Optional `CheckpointOptions` object.
Returns:
A restored tensor dict (maps checkpoint_key -> slice_spec -> tensor).
|
github-repos
|
def ensure_list_size(list_, size_):
lendiff = (size_ - len(list_))
if (lendiff > 0):
extension = [None for _ in range(lendiff)]
list_.extend(extension)
|
Allocates more space if needbe.
Ensures len(``list_``) == ``size_``.
Args:
list_ (list): ``list`` to extend
size_ (int): amount to exent by
|
codesearchnet
|
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
rot_mats = self.get_rot_mats()
inv_rot_mats = invert_rot_mat(rot_mats)
return rot_vec_mul(inv_rot_mats, pts)
|
The inverse of the apply() method.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] inverse-rotated points
|
github-repos
|
def shape(self):
return self._ragged_shape._to_tensor_shape()
|
The static shape of this StructuredTensor.
The returned `TensorShape` is guaranteed to have a known rank, but the
individual dimension sizes may be unknown.
Returns:
`tf.TensorShape`
|
github-repos
|
def get(self, catID, includeRelationships=False):
url = ('%(base_url)s/record/%(catID)s' % {'base_url': self.base_url, 'catID': catID})
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json()
|
Retrieves the strip footprint WKT string given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
includeRelationships (bool): whether to include graph links to related objects. Default False.
Returns:
record (dict): A dict object identical to the json representation of the catalog record
|
codesearchnet
|
def _combine_handle_data(handle, initial_value):
assert handle.dtype == dtypes.resource
variable_handle_data = get_eager_safe_handle_data(handle)
if initial_value.dtype != dtypes.variant:
return variable_handle_data
extra_handle_data = get_eager_safe_handle_data(initial_value)
if extra_handle_data is not None and extra_handle_data.is_set:
if variable_handle_data is None or not variable_handle_data.is_set or len(variable_handle_data.shape_and_type) != 1:
raise RuntimeError(f"Expected VarHandleOp to return a length==1 shape_and_type, but saw: '{variable_handle_data}'")
variable_handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)
return variable_handle_data
|
Concats HandleData from tensors `handle` and `initial_value`.
Args:
handle: A `Tensor` of dtype `resource`.
initial_value: A `Tensor`.
Returns:
A `CppShapeInferenceResult.HandleData`. If `initial_value` has dtype
`variant`, the `HandleData` contains the concatenation of the shape_and_type
from both `handle` and `initial_value`.
Raises:
RuntimeError: If handle, which was returned by VarHandleOp, either has
no handle data, or its len(handle_data.shape_and_type) != 1.
|
github-repos
|
def _GetAnalysisPlugins(self, analysis_plugins_string):
if not analysis_plugins_string:
return []
analysis_plugins_list = [
name.strip() for name in analysis_plugins_string.split(',')]
analysis_plugins = self._analysis_manager.GetPluginObjects(
analysis_plugins_list)
return analysis_plugins.values()
|
Retrieves analysis plugins.
Args:
analysis_plugins_string (str): comma separated names of analysis plugins
to enable.
Returns:
list[AnalysisPlugin]: analysis plugins.
|
juraj-google-style
|
def _get_job_metadata(provider, user_id, job_name, script, task_ids,
user_project, unique_job_id):
create_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())
user_id = user_id or dsub_util.get_os_user()
job_metadata = provider.prepare_job_metadata(script.name, job_name, user_id,
create_time)
if unique_job_id:
job_metadata['job-id'] = uuid.uuid4().hex
job_metadata['create-time'] = create_time
job_metadata['script'] = script
job_metadata['user-project'] = user_project
if task_ids:
job_metadata['task-ids'] = dsub_util.compact_interval_string(list(task_ids))
return job_metadata
|
Allow provider to extract job-specific metadata from command-line args.
Args:
provider: job service provider
user_id: user submitting the job
job_name: name for the job
script: the script to run
task_ids: a set of the task-ids for all tasks in the job
user_project: name of the project to be billed for the request
unique_job_id: generate a unique job id
Returns:
A dictionary of job-specific metadata (such as job id, name, etc.)
|
juraj-google-style
|
def histogram(namespace: Union[Type, str], name: str, bucket_type: 'BucketType', logger: Optional['MetricLogger']=None) -> 'Metrics.DelegatingHistogram':
namespace = UserMetrics.get_namespace(namespace)
return Metrics.DelegatingHistogram(MetricName(namespace, name), bucket_type, logger)
|
Obtains or creates a Histogram metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
bucket_type: A type of bucket used in a histogram. A subclass of
apache_beam.utils.histogram.BucketType
logger: MetricLogger for logging locally aggregated metric
Returns:
A Histogram object.
|
github-repos
|
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
if ssbio.utils.is_ipynb():
import nglview as nv
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if (not self.structure_file):
raise ValueError('Structure file not loaded')
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if ((self.file_type == 'mmtf') or (self.file_type == 'mmtf.gz')):
view = nv.NGLWidget()
view.add_component(self.structure_path)
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity)
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
return view
|
Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
|
codesearchnet
|
def _from_signer_and_info(cls, signer, info, **kwargs):
return cls(
signer,
service_account_email=info['client_email'],
token_uri=info['token_uri'],
project_id=info.get('project_id'), **kwargs)
|
Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
|
juraj-google-style
|
def get_atlas_per_gene_mutation_df(self, gene_id):
g = self.reference_gempro.genes.get_by_id(gene_id)
(single, fingerprint) = g.protein.sequence_mutation_summary(alignment_type='seqalign')
structure_type_suffix = 'NA'
appender = []
for (k, strains) in single.items():
to_append = {}
orig_res = k[0]
resnum = int(k[1])
mutated_res = k[2]
num_strains_mutated = len(strains)
strain_ids = [str(x.split((g.id + '_'))[1]) for x in strains]
to_append['ref_residue'] = orig_res
to_append['ref_resnum'] = resnum
to_append['strain_residue'] = mutated_res
to_append['num_strains_mutated'] = num_strains_mutated
to_append['strains_mutated'] = ';'.join(strain_ids)
to_append['at_disulfide_bridge'] = False
origres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(orig_res)
mutres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(mutated_res)
to_append['ref_residue_prop'] = origres_props
to_append['strain_residue_prop'] = mutres_props
(grantham_s, grantham_txt) = ssbio.protein.sequence.properties.residues.grantham_score(orig_res, mutated_res)
to_append['grantham_score'] = grantham_s
to_append['grantham_annotation'] = grantham_txt
to_append.update(g.protein.get_residue_annotations(seq_resnum=resnum, use_representatives=True))
if g.protein.representative_structure:
if g.protein.representative_structure.is_experimental:
to_append['structure_type'] = 'EXP'
else:
to_append['structure_type'] = 'HOM'
repchain = g.protein.representative_chain
repchain_annotations = g.protein.representative_structure.chains.get_by_id(repchain).seq_record.annotations
if ('SSBOND-biopython' in repchain_annotations):
structure_resnum = g.protein.map_seqprop_resnums_to_structprop_resnums(resnums=resnum, use_representatives=True)
if (resnum in structure_resnum):
ssbonds = repchain_annotations['SSBOND-biopython']
ssbonds_res = []
for x in ssbonds:
ssbonds_res.append(x[0])
ssbonds_res.append(x[1])
if (structure_resnum in ssbonds_res):
to_append['at_disulfide_bridge'] = True
appender.append(to_append)
if (not appender):
return pd.DataFrame()
cols = ['ref_residue', 'ref_resnum', 'strain_residue', 'num_strains_mutated', 'strains_mutated', 'ref_residue_prop', 'strain_residue_prop', 'grantham_score', 'grantham_annotation', 'at_disulfide_bridge', 'seq_SS-sspro', 'seq_SS-sspro8', 'seq_RSA-accpro', 'seq_RSA-accpro20', 'seq_TM-tmhmm', 'struct_SS-dssp', 'struct_RSA-dssp', 'struct_ASA-dssp', 'struct_CA_DEPTH-msms', 'struct_RES_DEPTH-msms', 'struct_PHI-dssp', 'struct_PSI-dssp', 'struct_resnum', 'struct_residuestrains_mutated']
df_gene_summary = pd.DataFrame.from_records(appender, columns=cols)
df_gene_summary.dropna(axis=1, how='all', inplace=True)
df_gene_summary.sort_values(by='ref_resnum', inplace=True)
df_gene_summary = df_gene_summary.set_index('ref_resnum')
return df_gene_summary
|
Create a single data frame which summarizes a gene and its mutations.
Args:
gene_id (str): Gene ID in the base model
Returns:
DataFrame: Pandas DataFrame of the results
|
codesearchnet
|
def compose_tree_url(tree, issn_url=False):
url = compose_tree_path(tree, issn_url)
if (WEB_PORT == 80):
return ('%s:
return ('%s:
|
Compose full url for given `tree`, with protocol, server's address and
port.
Args:
tree (obj): :class:`.Tree` instance.
issn_url (bool, default False): Compose URL using ISSN.
Returns:
str: URL of the tree
|
codesearchnet
|
def get_channel(self, chan_name, coll_name, exp_name):
chan = ChannelResource(chan_name, coll_name, exp_name)
return self.get_project(chan)
|
Helper that gets a fully initialized ChannelResource for an *existing* channel.
Args:
chan_name (str): Name of channel.
coll_name (str): Name of channel's collection.
exp_name (str): Name of channel's experiment.
Returns:
(intern.resource.boss.ChannelResource)
|
juraj-google-style
|
def FindFileByName(self, file_name):
try:
return self._file_descriptors[file_name]
except KeyError:
pass
try:
file_proto = self._internal_db.FindFileByName(file_name)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file named %s' % file_name)
return self._ConvertFileProtoToFileDescriptor(file_proto)
|
Gets a FileDescriptor by file name.
Args:
file_name: The path to the file to get a descriptor for.
Returns:
A FileDescriptor for the named file.
Raises:
KeyError: if the file cannot be found in the pool.
|
juraj-google-style
|
def _create_trial_info(self, expr_dir):
meta = self._build_trial_meta(expr_dir)
self.logger.debug("Create trial for %s" % meta)
trial_record = TrialRecord.from_json(meta)
trial_record.save()
|
Create information for given trial.
Meta file will be loaded if exists, and the trial information
will be saved in db backend.
Args:
expr_dir (str): Directory path of the experiment.
|
juraj-google-style
|
def bbox_line_intersect(nodes, line_start, line_end):
(left, right, bottom, top) = _helpers.bbox(nodes)
if (_helpers.in_interval(line_start[0], left, right) and _helpers.in_interval(line_start[1], bottom, top)):
return BoxIntersectionType.INTERSECTION
if (_helpers.in_interval(line_end[0], left, right) and _helpers.in_interval(line_end[1], bottom, top)):
return BoxIntersectionType.INTERSECTION
(s_bottom, t_bottom, success) = segment_intersection(np.asfortranarray([left, bottom]), np.asfortranarray([right, bottom]), line_start, line_end)
if (success and _helpers.in_interval(s_bottom, 0.0, 1.0) and _helpers.in_interval(t_bottom, 0.0, 1.0)):
return BoxIntersectionType.INTERSECTION
(s_right, t_right, success) = segment_intersection(np.asfortranarray([right, bottom]), np.asfortranarray([right, top]), line_start, line_end)
if (success and _helpers.in_interval(s_right, 0.0, 1.0) and _helpers.in_interval(t_right, 0.0, 1.0)):
return BoxIntersectionType.INTERSECTION
(s_top, t_top, success) = segment_intersection(np.asfortranarray([right, top]), np.asfortranarray([left, top]), line_start, line_end)
if (success and _helpers.in_interval(s_top, 0.0, 1.0) and _helpers.in_interval(t_top, 0.0, 1.0)):
return BoxIntersectionType.INTERSECTION
return BoxIntersectionType.DISJOINT
|
r"""Determine intersection of a bounding box and a line.
We do this by first checking if either the start or end node of the
segment are contained in the bounding box. If they aren't, then
checks if the line segment intersects any of the four sides of the
bounding box.
.. note::
This function is "half-finished". It makes no distinction between
"tangent" intersections of the box and segment and other types
of intersection. However, the distinction is worthwhile, so this
function should be "upgraded" at some point.
Args:
nodes (numpy.ndarray): Points (``2 x N``) that determine a
bounding box.
line_start (numpy.ndarray): Beginning of a line segment (1D
``2``-array).
line_end (numpy.ndarray): End of a line segment (1D ``2``-array).
Returns:
int: Enum from ``BoxIntersectionType`` indicating the type of
bounding box intersection.
|
codesearchnet
|
def allocate(self, amount, child=None, update=True):
if (child is not None):
if (child not in self.children):
c = SecurityBase(child)
c.setup(self._universe)
c.update(self.now)
self._add_child(c)
self.children[child].allocate(amount)
else:
if (self.parent == self):
self.parent.adjust((- amount), update=False, flow=True)
else:
self.parent.adjust((- amount), update=False, flow=False)
self.adjust(amount, update=False, flow=True)
if (self.children is not None):
[c.allocate((amount * c._weight), update=False) for c in self._childrenv]
if update:
self.root.stale = True
|
Allocate capital to Strategy. By default, capital is allocated
recursively down the children, proportionally to the children's
weights. If a child is specified, capital will be allocated
to that specific child.
Allocation also have a side-effect. They will deduct the same amount
from the parent's "account" to offset the allocation. If there is
remaining capital after allocation, it will remain in Strategy.
Args:
* amount (float): Amount to allocate.
* child (str): If specified, allocation will be directed to child
only. Specified by name.
* update (bool): Force update.
|
codesearchnet
|
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(local_stream, kmip_version=kmip_version)
if self._cryptographic_parameters:
self._cryptographic_parameters.write(local_stream, kmip_version=kmip_version)
if self._data:
self._data.write(local_stream, kmip_version=kmip_version)
if self._digested_data:
self._digested_data.write(local_stream, kmip_version=kmip_version)
if self._signature_data:
self._signature_data.write(local_stream, kmip_version=kmip_version)
if self._correlation_value:
self._correlation_value.write(local_stream, kmip_version=kmip_version)
if self._init_indicator:
self._init_indicator.write(local_stream, kmip_version=kmip_version)
if self._final_indicator:
self._final_indicator.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(SignatureVerifyRequestPayload, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer)
|
Write the data encoding the SignatureVerify request payload to a
stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
|
codesearchnet
|
def util_granulate_time_series(time_series, scale):
n = len(time_series)
b = int(np.fix((n / scale)))
temp = np.reshape(time_series[0:(b * scale)], (b, scale))
cts = np.mean(temp, axis=1)
return cts
|
Extract coarse-grained time series
Args:
time_series: Time series
scale: Scale factor
Returns:
Vector of coarse-grained time series with given scale factor
|
codesearchnet
|
class RunEnsembleDetector(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):
def __init__(self, ensemble_detector: EnsembleAnomalyDetector):
self._ensemble_detector = ensemble_detector
def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:
model_uuid = f'{self._ensemble_detector._model_id}:{uuid.uuid4().hex[:6]}'
assert self._ensemble_detector._sub_detectors is not None
if not self._ensemble_detector._sub_detectors:
raise ValueError(f'No detectors found at {model_uuid}')
results = []
for idx, detector in enumerate(self._ensemble_detector._sub_detectors):
if isinstance(detector, EnsembleAnomalyDetector):
results.append(input | f'Run Ensemble Detector at index {idx} ({model_uuid})' >> RunEnsembleDetector(detector))
elif isinstance(detector, OfflineDetector):
results.append(input | f'Run Offline Detector at index {idx} ({model_uuid})' >> RunOfflineDetector(detector))
else:
results.append(input | f'Run One Detector at index {idx} ({model_uuid})' >> RunOneDetector(detector))
if self._ensemble_detector._aggregation_strategy is None:
aggregation_type = 'Simple'
else:
aggregation_type = 'Custom'
ret = results | beam.Flatten() | f'Run {aggregation_type} Aggregation Strategy ({model_uuid})' >> RunAggregationStrategy(self._ensemble_detector._aggregation_strategy, self._ensemble_detector._model_id)
if self._ensemble_detector._threshold_criterion:
ret = ret | f'Run Threshold Criterion ({model_uuid})' >> RunThresholdCriterion(self._ensemble_detector._threshold_criterion)
return ret
|
Runs an ensemble of anomaly detectors on a PCollection of data.
This PTransform applies an `EnsembleAnomalyDetector` to the input data,
running each sub-detector and aggregating the results.
Args:
ensemble_detector: The `EnsembleAnomalyDetector` to run.
|
github-repos
|
def _page_to_title(page):
start_tag = u"<title>"
end_tag = u"</title>"
start_pos = page.find(start_tag)
end_pos = page.find(end_tag)
assert start_pos != -1
assert end_pos != -1
start_pos += len(start_tag)
return page[start_pos:end_pos]
|
Extract the title from a page.
Args:
page: a unicode string
Returns:
a unicode string
|
juraj-google-style
|
def get_course_certificate(self, course_id, username):
return self.client.certificates(username).courses(course_id).get()
|
Retrieve the certificate for the given username for the given course_id.
Args:
* ``course_id`` (str): The string value of the course's unique identifier
* ``username`` (str): The username ID identifying the user for which to retrieve the certificate
Raises:
HttpNotFoundError if no certificate found for the given user+course.
Returns:
a dict containing:
* ``username``: A string representation of an user's username passed in the request.
* ``course_id``: A string representation of a Course ID.
* ``certificate_type``: A string representation of the certificate type.
* ``created_date`: Datetime the certificate was created (tz-aware).
* ``status``: A string representation of the certificate status.
* ``is_passing``: True if the certificate has a passing status, False if not.
* ``download_url``: A string representation of the certificate url.
* ``grade``: A string representation of a float for the user's course grade.
|
codesearchnet
|
def delete(self, *names: str, pipeline=False):
if pipeline:
self._pipeline.delete(*names)
else:
self._db.delete(*names)
|
Delete one or more keys specified by names.
Args:
names (str): Names of keys to delete
pipeline (bool): True, start a transaction block. Default false.
|
juraj-google-style
|
def write_to_fp(self, fp):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
text_parts = self._tokenize(self.text)
log.debug("text_parts: %i", len(text_parts))
assert text_parts, 'No text to send to TTS API'
for idx, part in enumerate(text_parts):
try:
part_tk = self.token.calculate_token(part)
except requests.exceptions.RequestException as e:
log.debug(str(e), exc_info=True)
raise gTTSError(
"Connection error during token calculation: %s" %
str(e))
payload = {'ie': 'UTF-8',
'q': part,
'tl': self.lang,
'ttsspeed': self.speed,
'total': len(text_parts),
'idx': idx,
'client': 'tw-ob',
'textlen': _len(part),
'tk': part_tk}
log.debug("payload-%i: %s", idx, payload)
try:
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=self.GOOGLE_TTS_HEADERS,
proxies=urllib.request.getproxies(),
verify=False)
log.debug("headers-%i: %s", idx, r.request.headers)
log.debug("url-%i: %s", idx, r.request.url)
log.debug("status-%i: %s", idx, r.status_code)
r.raise_for_status()
except requests.exceptions.HTTPError:
raise gTTSError(tts=self, response=r)
except requests.exceptions.RequestException as e:
raise gTTSError(str(e))
try:
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
log.debug("part-%i written to %s", idx, fp)
except (AttributeError, TypeError) as e:
raise TypeError(
"'fp' is not a file-like object or it does not take bytes: %s" %
str(e))
|
Do the TTS API request and write bytes to a file-like object.
Args:
fp (file object): Any file-like object to write the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
TypeError: When ``fp`` is not a file-like object that takes bytes.
|
juraj-google-style
|
def parse_default_property_value(property_name, property_type_id, default_value_string):
if ((property_type_id == PROPERTY_TYPE_EMBEDDED_SET_ID) and (default_value_string == '{}')):
return set()
elif ((property_type_id == PROPERTY_TYPE_EMBEDDED_LIST_ID) and (default_value_string == '[]')):
return list()
elif ((property_type_id == PROPERTY_TYPE_STRING_ID) and isinstance(default_value_string, six.string_types)):
return default_value_string
elif (property_type_id == PROPERTY_TYPE_BOOLEAN_ID):
return _parse_bool_default_value(property_name, default_value_string)
elif (property_type_id == PROPERTY_TYPE_DATETIME_ID):
return _parse_datetime_default_value(property_name, default_value_string)
elif (property_type_id == PROPERTY_TYPE_DATE_ID):
return _parse_date_default_value(property_name, default_value_string)
else:
raise AssertionError(u'Unsupported default value for property "{}" with type id {}: {}'.format(property_name, property_type_id, default_value_string))
|
Parse the default value string into its proper form given the property type ID.
Args:
property_name: string, the name of the property whose default value is being parsed.
Used primarily to construct meaningful error messages, should the default
value prove invalid.
property_type_id: int, one of the property type ID constants defined in this file that
OrientDB uses to designate the native type of a given property.
default_value_string: string, the textual representation of the default value for
for the property, as returned by OrientDB's schema introspection code.
Returns:
an object of type matching the property that can be used as the property's default value.
For example, if the property is of string type, the return type will be a string, and if
the property is of list type, the return type will be a list.
Raises:
AssertionError, if the default value is not supported or does not match the
property's declared type (e.g. if a default of "[]" is set on an integer property).
|
codesearchnet
|
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
if (bbox is None):
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.image_size
width = FLAGS.image_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
|
Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
|
codesearchnet
|
def camel_to_title(name):
split = re.findall('[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)', name)
ret = ' '.join(split)
ret = (ret[0].upper() + ret[1:])
return ret
|
Takes a camelCaseFieldName and returns an Title Case Field Name
Args:
name (str): E.g. camelCaseFieldName
Returns:
str: Title Case converted name. E.g. Camel Case Field Name
|
codesearchnet
|
def sorted(field_name, ascending=True, fields=None, count=5):
if field_name is None:
raise Exception('Sort field must be specified')
direction = '' if ascending else ' DESC'
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d' % (projection, sql, field_name,
direction, count)
|
Provides a sampling strategy that picks from an ordered set of rows.
Args:
field_name: the name of the field to sort the rows by.
ascending: whether to sort in ascending direction or not.
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get the initial few rows.
|
juraj-google-style
|
def process_cgmlst_results(df):
assert isinstance(df, pd.DataFrame)
markers = []
alleles = []
for x in df['qseqid']:
(marker, allele) = x.split('|')
markers.append(marker)
alleles.append(int(allele))
df.loc[(:, 'marker')] = markers
df.loc[(:, 'allele')] = alleles
df.loc[(:, 'is_match')] = (((df['coverage'] >= 1.0) & (df['pident'] >= 90.0)) & (~ df['is_trunc']))
df.loc[(:, 'allele_name')] = df.apply((lambda x: allele_name(x.sseq.replace('-', ''))), axis=1)
df.loc[(:, 'is_perfect')] = ((df['coverage'] == 1.0) & (df['pident'] == 100.0))
df_perf = df[df['is_perfect']]
perf_markers = df_perf['marker'].unique()
df.loc[(:, 'has_perfect_match')] = df['marker'].isin(perf_markers)
(start_idxs, end_idxs, needs_revcomps, trunc, is_extended) = extend_subj_match_vec(df)
df.loc[(:, 'start_idx')] = start_idxs
df.loc[(:, 'end_idx')] = end_idxs
df.loc[(:, 'needs_revcomp')] = needs_revcomps
df.loc[(:, 'trunc')] = trunc
df.loc[(:, 'is_extended')] = is_extended
df.loc[(:, 'sseq_msa_gaps')] = np.zeros(df.shape[0], dtype=np.int64)
df.loc[(:, 'sseq_msa_p_gaps')] = np.zeros(df.shape[0], dtype=np.float64)
df.loc[(:, 'too_many_gaps')] = trunc
return df
|
Append informative fields to cgMLST330 BLAST results DataFrame
The `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.
The `qseqid` parsed allele numbers and marker names are appended as new fields.
`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.
`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.
The top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present
at that marker locus.
Args:
df (pandas.DataFrame): DataFrame of cgMLST330 BLAST results
Returns:
pandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`)
|
codesearchnet
|
def metar_to_speech(metar: str) -> str:
LOGGER.info('getting speech text from METAR: %s', metar)
(metar_data, metar_units) = emiz.avwx.metar.parse_in(metar)
speech = emiz.avwx.speech.metar(metar_data, metar_units)
speech = str(speech).replace('Altimeter', 'Q N H')
LOGGER.debug('resulting speech: %s', speech)
return speech
|
Creates a speakable text from a METAR
Args:
metar: METAR string to use
Returns: speakable METAR for TTS
|
codesearchnet
|
def title_of_design_condition(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `title_of_design_condition`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `title_of_design_condition`')
self._title_of_design_condition = value
|
Corresponds to IDD Field `title_of_design_condition`
Args:
value (str): value for IDD Field `title_of_design_condition`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.