code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _bytestringToValuelist(bytestring, numberOfRegisters):
_checkInt(numberOfRegisters, minvalue=1, description='number of registers')
numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters
_checkString(bytestring, 'byte string', minlength=numberOfBytes, maxlength=numberOfBytes)
values = []
for i in range(numberOfRegisters):
offset = _NUMBER_OF_BYTES_PER_REGISTER * i
substring = bytestring[offset : offset + _NUMBER_OF_BYTES_PER_REGISTER]
values.append(_twoByteStringToNum(substring))
return values
|
Convert a bytestring to a list of numerical values.
The bytestring is interpreted as 'unsigned INT16'.
Args:
* bytestring (str): The string from the slave. Length = 2*numberOfRegisters
* numberOfRegisters (int): The number of registers. For error checking.
Returns:
A list of integers.
Raises:
TypeError, ValueError
|
juraj-google-style
|
def _get_music_services_data_xml(soco=None):
device = soco or discovery.any_soco()
log.debug("Fetching music services data from %s", device)
available_services = device.musicServices.ListAvailableServices()
descriptor_list_xml = available_services[
'AvailableServiceDescriptorList']
log.debug("Services descriptor list: %s", descriptor_list_xml)
return descriptor_list_xml
|
Fetch the music services data xml from a Sonos device.
Args:
soco (SoCo): a SoCo instance to query. If none is specified, a
random device will be used. Defaults to `None`.
Returns:
str: a string containing the music services data xml
|
juraj-google-style
|
def _bfd_tx(self, **kwargs):
int_type = kwargs['int_type']
method_name = 'interface_%s_bfd_interval_min_tx' % int_type
bfd_tx = getattr(self._interface, method_name)
config = bfd_tx(**kwargs)
if kwargs['delete']:
tag = 'min-tx'
config.find('.
return config
|
Return the BFD minimum transmit interval XML.
You should not use this method.
You probably want `BGP.bfd`.
Args:
min_tx (str): BFD transmit interval in milliseconds (300, 500, etc)
delete (bool): Remove the configuration if ``True``.
Returns:
XML to be passed to the switch.
Raises:
None
|
juraj-google-style
|
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
import requests
url = 'https:
root = os.path.expanduser(root)
if (not filename):
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if (os.path.isfile(fpath) and check_integrity(fpath, md5)):
print(('Using downloaded and verified file: ' + fpath))
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
|
Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
|
codesearchnet
|
def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
microseconds = date_time_values.get('microseconds', 0)
milliseconds, _ = divmod(
microseconds, definitions.MICROSECONDS_PER_MILLISECOND)
if year < 1601 or year > 30827:
raise ValueError('Unsupported year value: {0:d}.'.format(year))
self._normalized_timestamp = None
self._number_of_seconds = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
self.year = year
self.month = month
self.day_of_month = day_of_month
self.day_of_week = None
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.milliseconds = milliseconds
self.is_local_time = False
|
Copies a SYSTEMTIME structure from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
Raises:
ValueError: if the date string is invalid or not supported.
|
juraj-google-style
|
def read_label_file(path):
labels = []
for record in textfile.read_separated_lines_generator(path, separator='\t', max_columns=3):
value = ''
if (len(record) > 2):
value = str(record[2])
labels.append([float(_clean_time(record[0])), float(_clean_time(record[1])), value])
return labels
|
Read the labels from an audacity label file.
Args:
path (str): Path to the label file.
Returns:
list: List of labels (start [sec], end [sec], label)
Example::
>>> read_label_file('/path/to/label/file.txt')
[
[0.0, 0.2, 'sie'],
[0.2, 2.2, 'hallo']
]
|
codesearchnet
|
def default_matrix(self):
matrix = (c_float * 6)()
rc = self._libinput.libinput_device_config_calibration_get_default_matrix(self._handle, matrix)
return (rc, tuple(matrix))
|
The default calibration matrix for this device.
On most devices, this is the identity matrix. If the udev property
``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,
that property's value becomes the default matrix, see
`Static device configuration via udev`_.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described
in :meth:`config_calibration_set_matrix`.
|
codesearchnet
|
def get_factors(n):
def factor(n, i, combi, res):
while i * i <= n:
if n % i == 0:
res += combi + [i, int(n/i)],
factor(n/i, i, combi+[i], res)
i += 1
return res
return factor(n, 2, [], [])
|
[summary]
Arguments:
n {[int]} -- [to analysed number]
Returns:
[list of lists] -- [all factors of the number n]
|
juraj-google-style
|
def songs(self, *, uploaded=True, purchased=True):
if ((not uploaded) and (not purchased)):
raise ValueError("'uploaded' and 'purchased' cannot both be False.")
if (purchased and uploaded):
song_list = []
for chunk in self.songs_iter(export_type=1):
song_list.extend(chunk)
elif purchased:
song_list = []
for chunk in self.songs_iter(export_type=2):
song_list.extend(chunk)
elif uploaded:
purchased_songs = []
for chunk in self.songs_iter(export_type=2):
purchased_songs.extend(chunk)
song_list = [song for chunk in self.songs_iter(export_type=1) for song in chunk if (song not in purchased_songs)]
return song_list
|
Get a listing of Music Library songs.
Returns:
list: Song dicts.
|
codesearchnet
|
def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):
n_atoms = len(struct.species)
fc = np.array(struct.frac_coords)
fc_copy = np.repeat(fc[(:, :, np.newaxis)], 27, axis=2)
neighbors = np.array(list(itertools.product([0, 1, (- 1)], [0, 1, (- 1)], [0, 1, (- 1)]))).T
neighbors = np.repeat(neighbors[(np.newaxis, :, :)], 1, axis=0)
fc_diff = (fc_copy - neighbors)
species = list(map(str, struct.species))
for (i, item) in enumerate(species):
if (not (item in ldict.keys())):
species[i] = str(Specie.from_string(item).element)
latmat = struct.lattice.matrix
connected_matrix = np.zeros((n_atoms, n_atoms))
for i in range(n_atoms):
for j in range((i + 1), n_atoms):
max_bond_length = ((ldict[species[i]] + ldict[species[j]]) + tolerance)
frac_diff = (fc_diff[j] - fc_copy[i])
distance_ij = np.dot(latmat.T, frac_diff)
if (sum((np.linalg.norm(distance_ij, axis=0) < max_bond_length)) > 0):
connected_matrix[(i, j)] = 1
connected_matrix[(j, i)] = 1
return connected_matrix
|
Finds bonded atoms and returns a adjacency matrix of bonded atoms.
Author: "Gowoon Cheon"
Email: "gcheon@stanford.edu"
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms
are considered bonded if (radius of atom 1) + (radius of atom 2) +
(tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values
from JMol are used as default
Returns:
(np.ndarray): A numpy array of shape (number of atoms, number of atoms);
If any image of atom j is bonded to atom i with periodic boundary
conditions, the matrix element [atom i, atom j] is 1.
|
codesearchnet
|
def validate_json_against_schema(json_dict, schema, err_msg=None):
try:
if isinstance(schema, str):
schema_name = schema
schema = _SCHEMAS[schema_name]
validator = _get_validator(schema_name)
validator.validate(json_dict)
else:
jsonschema.validate(json_dict, schema)
except jsonschema.ValidationError as err:
if (err_msg is None):
err_msg = 'JSON failed validation. Set Qiskit log level to DEBUG for further information.'
newerr = SchemaValidationError(err_msg)
newerr.__cause__ = _SummaryValidationError(err)
logger.debug('%s', _format_causes(err))
raise newerr
|
Validates JSON dict against a schema.
Args:
json_dict (dict): JSON to be validated.
schema (dict or str): JSON schema dictionary or the name of one of the
standards schemas in Qiskit to validate against it. The list of
standard schemas is: ``backend_configuration``,
``backend_properties``, ``backend_status``,
``default_pulse_configuration``, ``job_status``, ``qobj``,
``result``.
err_msg (str): Optional error message.
Raises:
SchemaValidationError: Raised if validation fails.
|
codesearchnet
|
def traverse_preorder(self, leaves=True, internal=True):
s = deque()
s.append(self)
while (len(s) != 0):
n = s.pop()
if ((leaves and n.is_leaf()) or (internal and (not n.is_leaf()))):
(yield n)
s.extend(n.children)
|
Perform a preorder traversal starting at this ``Node`` object
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
|
codesearchnet
|
def _default_tolerance(dtype):
if dtype == np.float16:
return 0.005
elif dtype in (np.float32, np.complex64):
return 0.001
elif dtype in (np.float64, np.complex128):
return 1e-05
else:
return None
|
Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
|
github-repos
|
def __init__(self, channel):
self.Watch = channel.stream_stream(
'/etcdserverpb.Watch/Watch',
request_serializer=rpc__pb2.WatchRequest.SerializeToString,
response_deserializer=rpc__pb2.WatchResponse.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
typenames = GetTemplateArgs(clean_lines, linenum)
and_pos = len(match.group(1))
if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum, typenames):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
|
Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
|
juraj-google-style
|
def add(self, index, var):
import nnabla as nn
from nnabla.utils.image_utils import imsave
if ((index != 0) and (((index + 1) % self.interval) != 0)):
return
if isinstance(var, nn.Variable):
data = var.d.copy()
elif isinstance(var, nn.NdArray):
data = var.data.copy()
else:
assert isinstance(var, np.ndarray)
data = var.copy()
assert (data.ndim > 2)
channels = data.shape[(- 3)]
data = data.reshape((- 1), *data.shape[(- 3):])
data = data[:min(data.shape[0], self.num_images)]
data = self.normalize_method(data)
if (channels > 3):
data = data[(:, :3)]
elif (channels == 2):
data = np.concatenate([data, np.ones(((data.shape[0], 1) + data.shape[(- 2):]))], axis=1)
path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png')
for j in range(min(self.num_images, data.shape[0])):
img = data[j].transpose(1, 2, 0)
if (img.shape[(- 1)] == 1):
img = img[(..., 0)]
path = path_tmpl.format(index, '{:03d}'.format(j))
imsave(path, img)
if self.verbose:
logger.info('iter={} {{{}}} are written to {}.'.format(index, self.name, path_tmpl.format(index, '*')))
|
Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array.
|
codesearchnet
|
def query_snl(self, criteria):
try:
payload = {'criteria': json.dumps(criteria)}
response = self.session.post('{}/snl/query'.format(self.preamble), data=payload)
if (response.status_code in [200, 400]):
resp = json.loads(response.text)
if resp['valid_response']:
if resp.get('warning'):
warnings.warn(resp['warning'])
return resp['response']
else:
raise MPRestError(resp['error'])
raise MPRestError('REST error with status code {} and error {}'.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
|
Query for submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
criteria (dict): Query criteria.
Returns:
A dict, with a list of submitted SNLs in the "response" key.
Raises:
MPRestError
|
codesearchnet
|
def register_hooked(self, hooks, func, args_gen=None):
if (self.hooked is None):
self.hooked = {}
if (args_gen is None):
args_gen = getattr(func, 'call_types', {}).keys
if (not isinstance(hooks, Sequence)):
hooks = [hooks]
for hook_cls in hooks:
self.hooked[hook_cls] = (func, args_gen)
|
Register func to be run when any of the hooks are run by parent
Args:
hooks: A Hook class or list of Hook classes of interest
func: The callable that should be run on that Hook
args_gen: Optionally specify the argument names that should be
passed to func. If not given then use func.call_types.keys
|
codesearchnet
|
def SetName(obj, name):
precondition.AssertType(name, str)
if PY2:
obj.__name__ = name.encode("ascii")
else:
obj.__name__ = name
|
A compatibility wrapper for setting object's name.
See documentation for `GetName` for more information.
Args:
obj: A type or function object to set the name for.
name: A name to set.
|
juraj-google-style
|
def segment_pofile(filename, segments):
reading_msg = 'Reading {num} entries from {file}'
writing_msg = 'Writing {num} entries to {file}'
source_po = polib.pofile(filename)
LOG.info(reading_msg.format(file=filename, num=len(source_po)))
remaining_po = copy.deepcopy(source_po)
remaining_po[:] = []
segment_po_files = {filename: remaining_po}
segment_patterns = []
for (segmentfile, patterns) in segments.items():
segment_po_files[segmentfile] = copy.deepcopy(remaining_po)
segment_patterns.extend(((pat, segmentfile) for pat in patterns))
for msg in source_po:
msg_segments = set()
for (occ_file, _) in msg.occurrences:
for (pat, segment_file) in segment_patterns:
if fnmatch.fnmatch(occ_file, pat):
msg_segments.add(segment_file)
break
else:
msg_segments.add(filename)
assert msg_segments
if (len(msg_segments) == 1):
segment_file = msg_segments.pop()
segment_po_files[segment_file].append(msg)
else:
remaining_po.append(msg)
files_written = set()
for (segment_file, pofile) in segment_po_files.items():
out_file = (filename.dirname() / segment_file)
if (not pofile):
LOG.error('No messages to write to %s, did you run segment twice?', out_file)
else:
LOG.info(writing_msg.format(file=out_file, num=len(pofile)))
pofile.save(out_file)
files_written.add(out_file)
return files_written
|
Segment a .po file using patterns in `segments`.
The .po file at `filename` is read, and the occurrence locations of its
messages are examined. `segments` is a dictionary: the keys are segment
.po filenames, the values are lists of patterns::
{
'django-studio.po': [
'cms/*',
'some-other-studio-place/*',
],
'django-weird.po': [
'*/weird_*.*',
],
}
If all a message's occurrences match the patterns for a segment, then that
message is written to the new segmented .po file.
Any message that matches no segments, or more than one, is written back to
the original file.
Arguments:
filename (path.path): a path object referring to the original .po file.
segments (dict): specification of the segments to create.
Returns:
a set of path objects, all the segment files written.
|
codesearchnet
|
def load(png_filename):
png_filename = os.path.expanduser(png_filename)
try:
img = Image.open(png_filename)
except Exception as e:
raise ValueError("Could not load file {0} for conversion."
.format(png_filename))
raise
return numpy.array(img)
|
Import a png file into a numpy array.
Arguments:
png_filename (str): A string filename of a png datafile
Returns:
A numpy array with data from the png file
|
juraj-google-style
|
def _ParseJournalEntry(self, file_object, file_offset):
entry_object = self._ParseEntryObject(file_object, file_offset)
entry_item_map = self._GetDataTypeMap('systemd_journal_entry_item')
file_offset += 64
data_end_offset = file_offset + entry_object.data_size - 64
fields = {'real_time': entry_object.real_time}
while file_offset < data_end_offset:
try:
entry_item, entry_item_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, entry_item_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry item at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
file_offset += entry_item_data_size
if entry_item.object_offset < self._maximum_journal_file_offset:
raise errors.ParseError(
'object offset should be after hash tables ({0:d} < {1:d})'.format(
entry_item.object_offset, self._maximum_journal_file_offset))
event_data = self._ParseDataObject(file_object, entry_item.object_offset)
event_string = event_data.decode('utf-8')
key, value = event_string.split('=', 1)
fields[key] = value
return fields
|
Parses a journal entry.
This method will generate an event per ENTRY object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry object relative to the start
of the file-like object.
Returns:
dict[str, objects]: entry items per key.
Raises:
ParseError: when an object offset is out of bounds.
|
juraj-google-style
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
fsnfts_volume = pyfsntfs.volume()
fsnfts_volume.open_file_object(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._fsntfs_volume = fsnfts_volume
|
Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
juraj-google-style
|
def format_cert_name(env='', account='', region='', certificate=None):
cert_name = None
if certificate:
if certificate.startswith('arn'):
LOG.info("Full ARN provided...skipping lookup.")
cert_name = certificate
else:
generated_cert_name = generate_custom_cert_name(env, region, account, certificate)
if generated_cert_name:
LOG.info("Found generated certificate %s from template", generated_cert_name)
cert_name = generated_cert_name
else:
LOG.info("Using default certificate name logic")
cert_name = ('arn:aws:iam::{account}:server-certificate/{name}'.format(
account=account, name=certificate))
LOG.debug('Certificate name: %s', cert_name)
return cert_name
|
Format the SSL certificate name into ARN for ELB.
Args:
env (str): Account environment name
account (str): Account number for ARN
region (str): AWS Region.
certificate (str): Name of SSL certificate
Returns:
str: Fully qualified ARN for SSL certificate
None: Certificate is not desired
|
juraj-google-style
|
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
self._fsntfs_data_stream.seek(offset, whence)
else:
self._fsntfs_file_entry.seek(offset, whence)
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
juraj-google-style
|
def GetProperties(cls, path_spec):
properties = {}
for property_name in cls.PROPERTY_NAMES:
if hasattr(path_spec, property_name):
properties[property_name] = getattr(path_spec, property_name)
return properties
|
Retrieves a dictionary containing the path specification properties.
Args:
path_spec (PathSpec): path specification.
Returns:
dict[str, str]: path specification properties.
Raises:
dict: path specification properties.
|
juraj-google-style
|
def UsesArtifact(self, artifacts):
if isinstance(artifacts, string_types):
return (artifacts in self.artifacts)
else:
return any((True for artifact in artifacts if (artifact in self.artifacts)))
|
Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact.
|
codesearchnet
|
def create(self, project_id=None):
if not self.exists():
if project_id is None:
project_id = self._api.project_id
try:
self._info = self._api.buckets_insert(self._name, project_id=project_id)
except Exception as e:
raise e
return self
|
Creates the bucket.
Args:
project_id: the project in which to create the bucket.
Returns:
The bucket.
Raises:
Exception if there was an error creating the bucket.
|
juraj-google-style
|
def set_attribute(self, key, value):
if ((not isinstance(key, str)) or (not isinstance(value, str))):
raise ValueError("The arguments 'key' and 'value' must both be strings. Instead they are {} and {}.".format(key, value))
self.extra_data[key] = value
|
Add a key-value pair to the extra_data dict.
This can be used to add attributes that are not available when
ray.profile was called.
Args:
key: The attribute name.
value: The attribute value.
|
codesearchnet
|
def sum(x, axis=None, keepdims=False):
if any_symbolic_tensors((x,)):
return Sum(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.sum(x, axis=axis, keepdims=keepdims)
|
Sum of a tensor over the given axes.
Args:
x: Input tensor.
axis: Axis or axes along which the sum is computed. The default is to
compute the sum of the flattened tensor.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Returns:
Output tensor containing the sum.
|
github-repos
|
def make_tex_table(inputlist, outputfile, close=False, fmt=None,
**kwargs):
output_str = ""
if fmt is None:
fmt = {}
for row in inputlist:
for key, val in enumerate(row):
if val is None:
output_str += r'\text{{{}}}'.format(
str(kwargs.get("nonestring", "None"))
)
else:
if np.isscalar(val):
temp_str_fmt = "$\\num{{" + fmt.get(
key, "{:g}") + "}}$"
else:
temp_str_fmt = fmt.get(key, "{}")
temp_str = temp_str_fmt.format(val).replace("+", "")
output_str += temp_str + "&"
output_str = output_str[:-1]
output_str += "\\\\\n"
outputfile.write(output_str)
if close:
outputfile.close()
|
Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None
|
juraj-google-style
|
def generate_workflow_description(self):
if (not self.tasks):
raise WorkflowError('Workflow contains no tasks, and cannot be executed.')
self.definition = self.workflow_skeleton()
if self.batch_values:
self.definition['batch_values'] = self.batch_values
all_input_port_values = [t.inputs.__getattribute__(input_port_name).value for t in self.tasks for input_port_name in t.inputs._portnames]
for task in self.tasks:
output_multiplex_ports_to_exclude = []
multiplex_output_port_names = [portname for portname in task.outputs._portnames if task.outputs.__getattribute__(portname).is_multiplex]
for p in multiplex_output_port_names:
output_port_reference = ((('source:' + task.name) + ':') + p)
if (output_port_reference not in all_input_port_values):
output_multiplex_ports_to_exclude.append(p)
task_def = task.generate_task_workflow_json(output_multiplex_ports_to_exclude=output_multiplex_ports_to_exclude)
self.definition['tasks'].append(task_def)
if self.callback:
self.definition['callback'] = self.callback
return self.definition
|
Generate workflow json for launching the workflow against the gbdx api
Args:
None
Returns:
json string
|
codesearchnet
|
def id(self, value):
if value == self._defaults['ai.device.id'] and 'ai.device.id' in self._values:
del self._values['ai.device.id']
else:
self._values['ai.device.id'] = value
|
The id property.
Args:
value (string). the property value.
|
juraj-google-style
|
def argsort(*args, **kwargs):
if len(args) == 1 and isinstance(args[0], dict):
dict_ = args[0]
index_list = list(dict_.keys())
value_list = list(dict_.values())
return sortedby2(index_list, value_list)
else:
index_list = list(range(len(args[0])))
return sortedby2(index_list, *args, **kwargs)
|
like np.argsort but for lists
Args:
*args: multiple lists to sort by
**kwargs:
reverse (bool): sort order is descending if True else acscending
CommandLine:
python -m utool.util_list argsort
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> result = ut.argsort({'a': 3, 'b': 2, 'c': 100})
>>> print(result)
|
juraj-google-style
|
def profile_update_args_v2(self, profile):
ij = self.load_install_json(profile.get('install_json', 'install.json'))
if ((profile.get('args', {}).get('app') is None) and (profile.get('args', {}).get('default') is None)):
_args = profile.pop('args')
profile['args'] = {}
profile['args']['app'] = {}
profile['args']['default'] = {}
for arg in self.profile_settings_args_install_json(ij, None):
try:
profile['args']['app'][arg] = _args.pop(arg)
except KeyError:
if self.args.verbose:
print('{}{}Input "{}" not found in profile "{}".'.format(c.Style.BRIGHT, c.Fore.YELLOW, arg, profile.get('profile_name')))
profile['args']['default'] = _args
print('{}{}Updating args section to v2 schema for profile {}.'.format(c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name')))
|
Update v1 profile args to v2 schema for args.
.. code-block:: javascript
"args": {
"app": {
"input_strings": "capitalize",
"tc_action": "Capitalize"
}
},
"default": {
"api_access_id": "$env.API_ACCESS_ID",
"api_default_org": "$env.API_DEFAULT_ORG",
},
Args:
profile (dict): The dictionary containting the profile settings.
|
codesearchnet
|
def _validate_instantiation_options(self, datafile, skip_json_validation):
if not skip_json_validation and not validator.is_datafile_valid(datafile):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('datafile'))
if not validator.is_event_dispatcher_valid(self.event_dispatcher):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('event_dispatcher'))
if not validator.is_logger_valid(self.logger):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('logger'))
if not validator.is_error_handler_valid(self.error_handler):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('error_handler'))
|
Helper method to validate all instantiation parameters.
Args:
datafile: JSON string representing the project.
skip_json_validation: Boolean representing whether JSON schema validation needs to be skipped or not.
Raises:
Exception if provided instantiation options are valid.
|
juraj-google-style
|
def get_task_ops(task_type=TaskType.ALG_CTRL):
try:
return LearnToExecuteState.TASK_TYPE_OPS[task_type]
except KeyError:
raise KeyError("Bad task_type '%s', check config." % task_type)
|
Returns an operations list based on the specified task index.
Args:
task_type: indicates the task type used.
Returns:
List of the eligible ops.
|
juraj-google-style
|
def _get_object_from_python_path(python_path):
python_path = python_path.split('.')
module_path = python_path[:(- 1)]
object_class = python_path[(- 1)]
if isinstance(module_path, list):
module_path = '.'.join(module_path)
module = import_module(module_path)
schema = getattr(module, object_class)
if isclass(schema):
schema = schema()
return schema
|
Method that will fetch a Marshmallow schema from a path to it.
Args:
python_path (str): The string path to the Marshmallow schema.
Returns:
marshmallow.Schema: The schema matching the provided path.
Raises:
TypeError: This is raised if the specified object isn't
a Marshmallow schema.
|
codesearchnet
|
def __init__(self, location):
super(ContextFieldExistence, self).__init__(location)
self.location = location
self.validate()
|
Construct a new ContextFieldExistence object for a vertex field from the global context.
Args:
location: Location, specifying where the field was declared. Must point to a vertex.
Returns:
new ContextFieldExistence expression which evaluates to True iff the vertex exists
|
juraj-google-style
|
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
return output
|
Serializes this instance to a Python dictionary.
Returns:
Dict[str, Any]: Dictionary of all the attributes that make up this configuration instance.
|
github-repos
|
def __init__(self, server_port, stream_handler_class):
self._server_port = server_port
self._stream_handler_class = stream_handler_class
self._server_lock = threading.Lock()
self._server_started = False
self._stop_requested = False
self._debug_ops_state_change_queue = queue.Queue()
self._gated_grpc_debug_watches = set()
self._breakpoints = set()
|
Constructor.
Args:
server_port: (int) Port number to bind to.
stream_handler_class: A class of the base class
`EventListenerBaseStreamHandler` that will be used to constructor
stream handler objects during `SendEvents` calls.
|
github-repos
|
def put_many(self, items: Iterable[T], context: PipelineContext = None) -> None:
LOGGER.info("Creating transform generator for items \"{items}\" for sink \"{sink}\"".format(items=items, sink=self._sink))
transform_generator = (self._transform(data=item, context=context) for item in items)
LOGGER.info("Putting transform generator for items \"{items}\" into sink \"{sink}\"".format(items=items, sink=self._sink))
self._sink.put_many(self._store_type, transform_generator, context)
|
Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
items: An iterable (e.g. list) of objects to be inserted into the data sink.
context: The context of the insertions (mutable).
|
juraj-google-style
|
def epoch_to_log_line_timestamp(epoch_time, time_zone=None):
s, ms = divmod(epoch_time, 1000)
d = datetime.datetime.fromtimestamp(s, tz=time_zone)
return d.strftime('%m-%d %H:%M:%S.') + str(ms)
|
Converts an epoch timestamp in ms to log line timestamp format, which
is readible for humans.
Args:
epoch_time: integer, an epoch timestamp in ms.
time_zone: instance of tzinfo, time zone information.
Using pytz rather than python 3.2 time_zone implementation for
python 2 compatibility reasons.
Returns:
A string that is the corresponding timestamp in log line timestamp
format.
|
github-repos
|
def upload(s3_conn, filepath, s3_path):
(bucket_name, prefix) = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
filename = os.path.basename(filepath)
key = boto.s3.key.Key(bucket=bucket, name='{}/{}'.format(prefix, filename))
logging.info('uploading from %s to %s', filepath, key)
key.set_contents_from_filename(filepath)
|
Uploads the given file to s3
Args:
s3_conn: (boto.s3.connection) an s3 connection
filepath (str) the local filename
s3_path (str) the destination path on s3
|
codesearchnet
|
def fileSave(self, filePath=None, updatePath=False):
if (not filePath):
filePath = self.filePath
if (not os.path.isfile(filePath)):
print(("Data file '%s' does not exist, will create new file." % filePath))
if (not os.path.exists(os.path.split(filePath)[0])):
os.makedirs(os.path.split(filePath)[0])
dataJsonString = json.dumps(self.data, indent=4, sort_keys=True)
print(("Writing to file '%s' ... " % filePath), end='', flush=True)
with open(filePath, 'w') as fileout:
fileout.write(dataJsonString)
print('Wrote file!')
if updatePath:
self.filePath = filePath
|
Write the internal JSON data dictionary to a JSON data file.
If no file path is provided, the stored data file path will be used.
Args:
filePath (Optional[str]): A relative or absolute path to a
'.json' file. Defaults to None.
updatePath (Optional[bool]): Specifies whether or not to update
the stored data file path. Defaults to False.
|
codesearchnet
|
def get_substring_idxs(substr, string):
return [match.start() for match in re.finditer(substr, string)]
|
Return a list of indexes of substr. If substr not found, list is
empty.
Arguments:
substr (str): Substring to match.
string (str): String to match in.
Returns:
list of int: Start indices of substr.
|
codesearchnet
|
def remove_padding(sequence):
length = sequence.pop('length')
sequence = tools.nested.map(lambda tensor: tensor[:length], sequence)
return sequence
|
Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
|
juraj-google-style
|
def pprnt(input, return_data=False):
HEADER = '\x1b[95m'
OKBLUE = '\x1b[94m'
OKGREEN = '\x1b[32m'
WARNING = '\x1b[93m'
FAIL = '\x1b[91m'
ENDC = '\x1b[0m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
import json, re
result = json.dumps(input, sort_keys=True, indent=4)
result = re.sub('(")(\\w*?_id)(":)', ('\\1%s%s\\2%s\\3' % (BOLD, HEADER, ENDC)), result)
result = re.sub('(")(\\w*?_set)(":)', ('\\1%s%s\\2%s\\3' % (BOLD, HEADER, ENDC)), result)
result = re.sub('(\\n *?")(\\w*?)(":)', ('\\1%s%s\\2%s\\3' % (BOLD, OKGREEN, ENDC)), result)
if (not return_data):
print(result)
else:
return result
|
Prettier print for nested data
Args:
input: Input data
return_data (bool): Default False. Print outs if False, returns if True.
Returns:
None | Pretty formatted text representation of input data.
|
codesearchnet
|
def _get_args_and_defaults(args, defaults):
defaults = defaults or []
args_and_defaults = [(argument, default) for (argument, default)
in zip_longest(args[::-1], defaults[::-1],
fillvalue=NoDefault)]
return args_and_defaults[::-1]
|
Return a list of 2-tuples - the argument name and its default value or
a special value that indicates there is no default value.
Args:
args: list of argument name
defaults: tuple of default values
|
juraj-google-style
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
artists_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'])
with open(artists_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.artists_encoder, ensure_ascii=False))
genres_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'])
with open(genres_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.genres_encoder, ensure_ascii=False))
lyrics_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'])
with open(lyrics_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.lyrics_encoder, ensure_ascii=False))
return (artists_file, genres_file, lyrics_file)
|
Saves the tokenizer's vocabulary dictionary to the provided save_directory.
Args:
save_directory (`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
filename_prefix (`Optional[str]`, *optional*):
A prefix to add to the names of the files saved by the tokenizer.
|
github-repos
|
def _convert_tflite_enum_type_to_tf_type(tflite_enum_type):
tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)
if tf_type is None:
raise ValueError('Unsupported enum {}. The valid map of enum to tf types is : {}'.format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))
return tf_type
|
Converts tflite enum type (eg: 0) to tf type (eg: tf.float32).
Args:
tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32)
Raises:
ValueError: If an invalid tflite enum type is provided.
Returns:
tf type (eg: tf.float32)
|
github-repos
|
def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa):
oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if spec.symbol == oxid_el.symbol])
oxid_new = math.floor(oxid_old + 1)
if oxid_new > oxid_el.max_oxidation_state:
return numa
spec_old = Specie(oxid_el.symbol, oxid_old)
spec_new = Specie(oxid_el.symbol, oxid_new)
specamt = spec_amts_oxi[spec_old]
spec_amts_oxi = {sp: amt for sp, amt in spec_amts_oxi.items() if sp != spec_old}
spec_amts_oxi[spec_new] = specamt
spec_amts_oxi = Composition(spec_amts_oxi)
oxi_noA = sum([spec.oxi_state * spec_amts_oxi[spec] for spec in spec_amts_oxi if
spec.symbol not in self.cation.symbol])
a = max(0, -oxi_noA / self.cation_charge)
numa = numa.union({a})
if a == 0:
return numa
else:
for oxid_el in oxid_els:
numa = numa.union(
self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa))
return numa
|
This is a helper method for get_removals_int_oxid!
Args:
spec_amts_oxi - a dict of species to their amounts in the structure
oxid_el - the element to oxidize
oxid_els - the full list of elements that might be oxidized
numa - a running set of numbers of A cation at integer oxidation steps
Returns:
a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list
|
juraj-google-style
|
def __init__(self, instruments, scripts = None, name=None, settings=None, log_function=None, data_path = None):
Script.__init__(self, name, settings=settings, scripts=scripts, instruments=instruments, log_function=log_function, data_path = data_path)
self.data = {'plant_output': deque(maxlen=self.settings['buffer_length']),
'control_output': deque(maxlen=self.settings['buffer_length'])}
|
Example of a script that emits a QT signal for the gui
Args:
name (optional): name of script, if empty same as class name
settings (optional): settings for this script, if empty same as default settings
|
juraj-google-style
|
def do_labels_update(self, info, labels):
if self.update_label_func:
self.update_label_func(self.label_name, info, labels)
|
Updates a dictionary of labels using the assigned update_op_func
Args:
info (:class:`endpoints_management.control.report_request.Info`): the
info instance to update
labels (dict[string[string]]): the labels dictionary
Return:
`True` if desc is supported, otherwise `False`
|
codesearchnet
|
def convert_tanh(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting tanh ...')
if (names == 'short'):
tf_name = ('TANH' + random_string(4))
elif (names == 'keep'):
tf_name = w_name
else:
tf_name = (w_name + str(random.random()))
tanh = keras.layers.Activation('tanh', name=tf_name)
layers[scope_name] = tanh(layers[inputs[0]])
|
Convert tanh layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
codesearchnet
|
def query(self, watch_key, time_indices=None, slicing=None, mapping=None):
if (watch_key not in self._tensor_data):
raise KeyError(('watch_key not found: %s' % watch_key))
if (time_indices is None):
time_indices = '-1'
time_slicing = tensor_helper.parse_time_indices(time_indices)
all_time_indices = list(range(self._tensor_data[watch_key].num_total()))
sliced_time_indices = all_time_indices[time_slicing]
if (not isinstance(sliced_time_indices, list)):
sliced_time_indices = [sliced_time_indices]
recombine_and_map = False
step_mapping = mapping
if ((len(sliced_time_indices) > 1) and (mapping not in (None,))):
recombine_and_map = True
step_mapping = None
output = []
for index in sliced_time_indices:
value = self._tensor_data[watch_key].query(index)[0]
if ((value is not None) and (not isinstance(value, debug_data.InconvertibleTensorProto))):
output.append(tensor_helper.array_view(value, slicing=slicing, mapping=step_mapping)[2])
else:
output.append(None)
if recombine_and_map:
if (mapping == 'image/png'):
output = tensor_helper.array_to_base64_png(output)
elif (mapping and (mapping != 'none')):
logger.warn('Unsupported mapping mode after recomining time steps: %s', mapping)
return output
|
Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid.
|
codesearchnet
|
def _capture_by_ref(self, graph: Any, lam: Callable[[], Any], key: Hashable=None) -> Any:
if key is not None and key in self._by_ref_internal:
return self._by_ref_internal[key]
if key is None:
key = len(self._by_ref_internal)
while key in self._by_ref_internal:
key += 1
value_nested = lam()
capture_trace_type = trace_type.from_value(value_nested)
ctx = trace_type.InternalPlaceholderContext(graph)
internal = capture_trace_type.placeholder_value(ctx)
def lam_fn():
value = lam()
return capture_trace_type.to_tensors(value)
self._by_ref_external[key] = lam_fn
self._by_ref_internal[key] = internal
self._by_ref_tracetype[key] = capture_trace_type
return self._by_ref_internal[key]
|
Used during tracing process to create/retrive by-ref captures.
Args:
graph: The FuncGraph that captures this tensor.
lam: A callable that takes no arguments and returns tensor captures.
key: A hashable identifier.
Returns:
Tensor from this FuncGraph.
|
github-repos
|
def materialize(self, ref, table_name=None, index_columns=None, logger=None):
from ambry.library import Library
assert isinstance(self._library, Library)
logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref))
partition = self._library.partition(ref)
connection = self._backend._get_connection()
return self._backend.install(connection, partition, table_name=table_name,
index_columns=index_columns, materialize=True, logger=logger)
|
Creates materialized table for given partition reference.
Args:
ref (str): id, vid, name or vname of the partition.
Returns:
str: name of the partition table in the database.
|
juraj-google-style
|
def get_proj(prj_code):
if prj_code in CUSTOM_PRJ:
proj = pyproj.Proj(CUSTOM_PRJ[prj_code])
else:
proj = pyproj.Proj(init=prj_code)
return proj
|
Helper method for handling projection codes that are unknown to pyproj
Args:
prj_code (str): an epsg proj code
Returns:
projection: a pyproj projection
|
juraj-google-style
|
def requires_submit(func):
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
if self._future is None:
raise JobError("Job not submitted yet!. You have to .submit() first!")
return func(self, *args, **kwargs)
return _wrapper
|
Decorator to ensure that a submit has been performed before
calling the method.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
|
juraj-google-style
|
def get(self, addresses):
with self._lock:
results = []
for add in addresses:
self.validate_read(add)
results.append(self._get(add))
return results
|
Returns the value in this context, or None, for each address in
addresses. Useful for gets on the context manager.
Args:
addresses (list of str): The addresses to return values for, if
within this context.
Returns:
results (list of bytes): The values in state for these addresses.
|
juraj-google-style
|
def get_template_name(env, pipeline_type):
pipeline_base = 'pipeline/pipeline'
template_name_format = '{pipeline_base}'
if env.startswith('prod'):
template_name_format = (template_name_format + '_{env}')
else:
template_name_format = (template_name_format + '_stages')
if (pipeline_type != 'ec2'):
template_name_format = (template_name_format + '_{pipeline_type}')
template_name_format = (template_name_format + '.json.j2')
template_name = template_name_format.format(pipeline_base=pipeline_base, env=env, pipeline_type=pipeline_type)
return template_name
|
Generates the correct template name based on pipeline type
Args:
env (str): environment to generate templates for
pipeline_type (str): Type of pipeline like ec2 or lambda
Returns:
str: Name of template
|
codesearchnet
|
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars'):
batch_norm_params = {
'decay': 0.9997,
'epsilon': 0.001,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
|
Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
|
juraj-google-style
|
def remove_user(username):
users = passwd_reader.load_users()
assert (username in users), ("Username '%s' not found!" % username)
del users[username]
passwd_reader.save_users(users)
home_dir = (settings.DATA_PATH + username)
if os.path.exists(home_dir):
shutil.rmtree(home_dir)
reload_configuration()
|
Remove user, his home directory and so on..
Args:
username (str): User's name.
|
codesearchnet
|
def upload_files(self, file_list):
counter = 0
files_to_upload = list(set(file_list) - set(self.uploaded_files))
try:
for f in files_to_upload:
with open(config.get_storage_path(f), 'rb') as file_obj:
response = config.SESSION.post(config.file_upload_url(), files={'file': file_obj})
if response.status_code == 200:
response.raise_for_status()
self.uploaded_files.append(f)
counter += 1
config.LOGGER.info("\tUploaded {0} ({count}/{total}) ".format(f, count=counter, total=len(files_to_upload)))
else:
self.failed_uploads[f] = response._content.decode('utf-8')
finally:
config.PROGRESS_MANAGER.set_uploading(self.uploaded_files)
|
upload_files: uploads files to server
Args:
file_list (str): list of files to upload
Returns: None
|
juraj-google-style
|
def shape_type_conversion(fn):
def wrapper(instance, input_shape):
if input_shape is not None:
input_shape = convert_shapes(input_shape, to_tuples=True)
output_shape = fn(instance, input_shape)
if output_shape is not None:
output_shape = convert_shapes(output_shape, to_tuples=False)
return output_shape
return wrapper
|
Decorator that handles tuple/TensorShape conversion.
Used in `compute_output_shape` and `build`.
Args:
fn: function to wrap.
Returns:
Wrapped function.
|
github-repos
|
def __init__(self, device=''):
self._resource_handle_value = None
self._resource_device = device
self._self_destruction_context = context.eager_mode if context.executing_eagerly() else ops.get_default_graph().as_default
|
Initialize the `CapturableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
|
github-repos
|
class PerceiverTextPreprocessor(AbstractPreprocessor):
def __init__(self, config: PerceiverConfig) -> None:
super().__init__()
self.config = config
self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
@property
def num_channels(self) -> int:
return self.config.d_model
def forward(self, inputs: torch.LongTensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False):
embeddings_without_pos = self.embeddings(inputs)
seq_length = inputs.shape[1]
position_ids = torch.arange(0, seq_length, device=inputs.device)
embeddings = embeddings_without_pos + self.position_embeddings(position_ids)
return (embeddings, None, embeddings_without_pos)
|
Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings.
The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration.
Args:
config ([`PerceiverConfig`]):
Model configuration.
|
github-repos
|
def map_fn(*columns):
features = collections.OrderedDict(zip(column_names, columns))
if label_name is not None:
label = features.pop(label_name)
return (features, label)
return features
|
Organizes columns into a features dictionary.
Args:
*columns: list of `Tensor`s corresponding to one csv record.
Returns:
An OrderedDict of feature names to values for that particular record. If
label_name is provided, extracts the label feature to be returned as the
second element of the tuple.
|
github-repos
|
def LoadSecondaryConfig(self, filename=None, parser=None):
if filename:
self.files.append(filename)
parser_cls = self.GetParserFromFilename(filename)
parser = parser_cls(filename=filename)
logging.debug('Loading configuration from %s', filename)
self.secondary_config_parsers.append(parser)
elif (parser is None):
raise ValueError('Must provide either a filename or a parser.')
clone = self.MakeNewConfig()
clone.MergeData(parser.RawData())
clone.initialized = True
for file_to_load in clone['Config.includes']:
if (not os.path.isabs(file_to_load)):
if (not filename):
raise ConfigFileNotFound(('While loading %s: Unable to include a relative path (%s) from a config without a filename' % (filename, file_to_load)))
file_to_load = os.path.join(os.path.dirname(filename), file_to_load)
clone_parser = clone.LoadSecondaryConfig(file_to_load)
if (not clone_parser.parsed):
raise ConfigFileNotFound(('Unable to load include file %s' % file_to_load))
self.MergeData(clone.raw_data)
self.files.extend(clone.files)
return parser
|
Loads an additional configuration file.
The configuration system has the concept of a single Primary configuration
file, and multiple secondary files. The primary configuration file is the
main file that is used by the program. Any writebacks will only be made to
the primary configuration file. Secondary files contain additional
configuration data which will be merged into the configuration system.
This method adds an additional configuration file.
Args:
filename: The configuration file that will be loaded. For example
file:///etc/grr.conf or reg://HKEY_LOCAL_MACHINE/Software/GRR.
parser: An optional parser can be given. In this case, the parser's data
will be loaded directly.
Returns:
The parser used to parse this configuration source.
Raises:
ValueError: if both filename and parser arguments are None.
ConfigFileNotFound: If a specified included file was not found.
|
codesearchnet
|
def set_callback_parameters(callback_list, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, mode=ModeKeys.TRAIN):
metric_names = model.metrics_names
for cbk in callback_list:
if isinstance(cbk, (BaseLogger, ProgbarLogger)):
cbk.stateful_metrics = metric_names[1:]
callback_metrics = []
if mode != ModeKeys.PREDICT:
callback_metrics = copy.copy(metric_names)
if do_validation:
callback_metrics += ['val_' + n for n in metric_names]
callback_params = {'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics}
callback_list.set_params(callback_params)
|
Sets callback parameters.
Args:
callback_list: CallbackList instance.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
|
github-repos
|
def GetQueryResults(self, request, global_params=None):
config = self.GetMethodConfig('GetQueryResults')
return self._RunMethod(config, request, global_params=global_params)
|
Retrieves the results of a query job.
Args:
request: (BigqueryJobsGetQueryResultsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetQueryResultsResponse) The response message.
|
github-repos
|
def matches(self, desc):
return (self.metric_name == desc.name and
self.kind == desc.metricKind and
self.value_type == desc.valueType)
|
Determines if a given metric descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the
instance to test
Return:
`True` if desc is supported, otherwise `False`
|
juraj-google-style
|
def get_records(self, name):
if name in self._cache:
return self._cache[name].values()
else:
return []
|
Return all the records for the given name in the cache.
Args:
name (string): The name which the required models are stored under.
Returns:
list: A list of :class:`cinder_data.model.CinderModel` models.
|
juraj-google-style
|
def serialize(self, user=None):
return {
'content': self.body,
'type': self.typ,
'updated_at': self.updated_at,
'timestamp': self.updated_at,
'is_update': not hasattr(self, 'unsaved'),
'attachments': [attachment.serialize() for attachment in self.attachment_set],
'title': self.msg_title,
'url': self.url,
'sender_name': self.sender.full_name,
'sender_key': self.sender.key,
'channel_key': self.channel.key,
'cmd': 'message',
'avatar_url': self.sender.avatar,
'key': self.key,
}
|
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
|
juraj-google-style
|
def lookup_package(self, definition_name):
while True:
descriptor = self.lookup_descriptor(definition_name)
if isinstance(descriptor, FileDescriptor):
return descriptor.package
else:
index = definition_name.rfind('.')
if (index < 0):
return None
definition_name = definition_name[:index]
|
Determines the package name for any definition.
Determine the package that any definition name belongs to. May
check parent for package name and will resolve missing
descriptors if provided descriptor loader.
Args:
definition_name: Definition name to find package for.
|
codesearchnet
|
def run_amylpred2(self, seq, outdir, run_amylmuts=False):
outdir_amylpred = op.join(outdir, 'AMYLPRED2_results')
if (not op.exists(outdir_amylpred)):
os.mkdir(outdir_amylpred)
url = 'http:
cj = CookieJar()
opener = build_opener(HTTPCookieProcessor(cj))
formdata = {'email': self.email, 'password': self.password}
data_encoded = urlencode(formdata)
data_encoded = data_encoded.encode('ASCII')
response = opener.open(url, data_encoded)
methods = ['AGGRESCAN', 'NETCSSP', 'PAFIG', 'APD', 'AMYLPATTERN', 'SECSTR', 'BSC', 'WALTZ', 'CONFENERGY', 'TANGO']
if run_amylmuts:
methods.append('AMYLMUTS')
output = {}
timeCounts = 0
for met in methods:
existing_results = glob.glob(op.join(outdir_amylpred, '*_{}.txt'.format(met)))
if existing_results:
results_file = existing_results[0]
else:
values = {'seq_data': seq, 'method': met}
data = urlencode(values)
data = data.encode('ASCII')
url_input = 'http:
response = opener.open(url_input, data)
result = str(response.read())
ind = str.find(result, 'Job ID')
result2 = result[ind:(ind + 50)]
ind1 = str.find(result2, ':')
ind2 = str.find(result2, '<BR>')
job_id = result2[(ind1 + 2):ind2]
url_result = (('http:
print(url_result)
print(('Waiting for %s results' % met), end='.')
while True:
result = urlopen(url_result).read()
if (not result):
time.sleep(1)
timeCounts += 1
print('.', end='')
else:
response = requests.get(url_result)
break
results_file = op.join(outdir_amylpred, '{}_{}.txt'.format(url_result.split('/')[(- 1)].strip('.txt'), met))
with open(results_file, 'wb') as handle:
for data in response.iter_content():
handle.write(data)
print('')
(method, hits) = self.parse_method_results(results_file, met)
output[met] = hits
if (timeCounts != 0):
print(('Time spent: %d seconds' % timeCounts))
return output
|
Run all methods on the AMYLPRED2 web server for an amino acid sequence and gather results.
Result files are cached in ``/path/to/outdir/AMYLPRED2_results``.
Args:
seq (str): Amino acid sequence as a string
outdir (str): Directory to where output files should be saved
run_amylmuts (bool): If AMYLMUTS method should be run, default False
Returns:
dict: Result for each method run
|
codesearchnet
|
def extract(self, destination):
if os.path.exists(destination):
raise OSError(20, 'Destination exists', destination)
self.__extract_directory(
'.',
self.files['files'],
destination
)
|
Extracts the contents of the archive to the specifed directory.
Args:
destination (str):
Path to an empty directory to extract the files to.
|
juraj-google-style
|
def remove(self, uids: Iterable[int]) -> None:
for uid in uids:
self._recent.discard(uid)
self._flags.pop(uid, None)
|
Remove any session flags for the given message.
Args:
uids: The message UID values.
|
juraj-google-style
|
def write_script(script, tempdir):
name = ('script' + self.suffix)
path = os.path.join(tempdir, name)
with open(path, 'w') as f:
f.write('\n'.join(script))
return path
|
Write script to a temporary directory
Arguments:
script (list): Commands which to put into a file
Returns:
Absolute path to script
|
codesearchnet
|
def deserialize_pem(cert_pem):
if isinstance(cert_pem, str):
cert_pem = cert_pem.encode("utf-8")
return cryptography.x509.load_pem_x509_certificate(
data=cert_pem, backend=cryptography.hazmat.backends.default_backend()
)
|
Deserialize PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
cert_obj: cryptography.Certificate
|
juraj-google-style
|
def show(self, obj=None, browser=None, new='tab'):
if (obj and (obj not in self.document.roots)):
self.document.add_root(obj)
show_session(session=self, browser=browser, new=new)
|
Open a browser displaying this session.
Args:
obj (LayoutDOM object, optional) : a Layout (Row/Column),
Plot or Widget object to display. The object will be added
to the session's document.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
|
codesearchnet
|
def _DropCommonSuffixes(filename):
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
|
Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
|
juraj-google-style
|
def unregisterObserver(self, observer):
if observer in self.m_observers:
self.m_observers.remove(observer)
pass
|
Remove an observer from the meter update() chain.
Args:
observer (MeterObserver): Subclassed MeterObserver.
|
juraj-google-style
|
def _CheckType(value, check_type, name, allow_none=True):
if value is None and allow_none:
return
if not isinstance(value, check_type):
raise TypeError('%s type doesn\'t match %s.' % (name, check_type))
|
Check that the type of an object is acceptable.
Args:
value: The object whose type is to be checked.
check_type: The type that the object must be an instance of.
name: Name of the object, to be placed in any error messages.
allow_none: True if value can be None, false if not.
Raises:
TypeError: If value is not an acceptable type.
|
juraj-google-style
|
def _ExtractOAuth2Client(product_yaml_key, product_data, proxy_config):
oauth2_kwargs = {
'proxy_config': proxy_config
}
if all(config in product_data for config in _OAUTH2_INSTALLED_APP_KEYS):
oauth2_args = [
product_data['client_id'], product_data['client_secret'],
product_data['refresh_token']
]
oauth2_client = googleads.oauth2.GoogleRefreshTokenClient
for key in _OAUTH2_INSTALLED_APP_KEYS:
del product_data[key]
elif all(config in product_data for config in _OAUTH2_SERVICE_ACCT_KEYS):
oauth2_args = [
product_data['path_to_private_key_file'],
googleads.oauth2.GetAPIScope(product_yaml_key),
]
oauth2_kwargs.update({
'sub': product_data.get('delegated_account')
})
oauth2_client = googleads.oauth2.GoogleServiceAccountClient
for key in _OAUTH2_SERVICE_ACCT_KEYS:
del product_data[key]
for optional_key in _OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL:
if optional_key in product_data:
del product_data[optional_key]
else:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file is incorrectly configured for OAuth2. You need to '
'specify credentials for either the installed application flow (%s) '
'or service account flow (%s).' %
(_OAUTH2_INSTALLED_APP_KEYS, _OAUTH2_SERVICE_ACCT_KEYS))
return oauth2_client(*oauth2_args, **oauth2_kwargs)
|
Generates an GoogleOAuth2Client subclass using the given product_data.
Args:
product_yaml_key: a string key identifying the product being configured.
product_data: a dict containing the configurations for a given product.
proxy_config: a ProxyConfig instance.
Returns:
An instantiated GoogleOAuth2Client subclass.
Raises:
A GoogleAdsValueError if the OAuth2 configuration for the given product is
misconfigured.
|
juraj-google-style
|
def x_www_form_urlencoded(post_data):
if isinstance(post_data, dict):
return '&'.join([u'{}={}'.format(key, value) for (key, value) in post_data.items()])
else:
return post_data
|
convert origin dict to x-www-form-urlencoded
Args:
post_data (dict):
{"a": 1, "b":2}
Returns:
str:
a=1&b=2
|
codesearchnet
|
def parse_napp(napp_id):
regex = r'([a-zA-Z][a-zA-Z0-9_]{2,})/([a-zA-Z][a-zA-Z0-9_]{2,}):?(.+)?'
compiled_regex = re.compile(regex)
matched = compiled_regex.fullmatch(napp_id)
if not matched:
msg = '"{}" NApp has not the form username/napp_name[:version].'
raise KytosException(msg.format(napp_id))
return matched.groups()
|
Convert a napp_id in tuple with username, napp name and version.
Args:
napp_id: String with the form 'username/napp[:version]' (version is
optional). If no version is found, it will be None.
Returns:
tuple: A tuple with (username, napp, version)
Raises:
KytosException: If a NApp has not the form _username/name_.
|
juraj-google-style
|
def merge_scores(self, df_addition, reference_markers='all', addition_markers='all', on=['project_name', 'sample_name', 'frame_name', 'cell_index']):
if isinstance(reference_markers, str):
reference_markers = self.scored_names
elif (reference_markers is None):
reference_markers = []
if isinstance(addition_markers, str):
addition_markers = df_addition.scored_names
elif (addition_markers is None):
addition_markers = []
df_addition = df_addition.copy()
df_addition['_key'] = 1
df = self.merge(df_addition[(['scored_calls', '_key'] + on)].rename(columns={'scored_calls': '_addition'}), on=on, how='left')
df['_sub1'] = df['scored_calls'].apply((lambda x: dict(((k, x[k]) for k in reference_markers))))
df['_sub2'] = df['_addition'].apply((lambda x: (dict({}) if (x != x) else dict(((k, x[k]) for k in addition_markers)))))
df['scored_calls'] = df.apply((lambda x: {**x['_sub1'], **x['_sub2']}), 1)
df = df.drop(columns=['_sub1', '_sub2', '_addition'])
df = (df.drop(columns='_key').copy(), df[df['_key'].isna()].drop(columns='_key').copy())
if self.microns_per_pixel:
df[0].microns_per_pixel = self.microns_per_pixel
if self.microns_per_pixel:
df[1].microns_per_pixel = self.microns_per_pixel
return df
|
Combine CellDataFrames that differ by score composition
Args:
df_addition (CellDataFrame): The CellDataFrame to merge scores in from
reference_markers (list): which scored call names to keep in the this object (default: all)
addition_markers (list): which scored call names to merge in (default: all)
on (list): the features to merge cells on
Returns:
CellDataFrame,CellDataFrame: returns a passing CellDataFrame where merge criteria were met and a fail CellDataFrame where merge criteria were not met.
|
codesearchnet
|
def get_member(self, id='me', name=None):
return self.create_member(dict(id=id, fullName=name))
|
Get a member or your current member if `id` wasn't given.
Returns:
Member: The member with the given `id`, defaults to the
logged in member.
|
codesearchnet
|
def process_streamer(self, streamer, callback=None):
index = streamer.index
if index in self._in_progress_streamers:
raise InternalError("You cannot add a streamer again until it has finished streaming.")
queue_item = QueuedStreamer(streamer, callback)
self._in_progress_streamers.add(index)
self._logger.debug("Streamer %d: queued to send %d readings", index, queue_item.initial_count)
self._queue.put_nowait(queue_item)
|
Start streaming a streamer.
Args:
streamer (DataStreamer): The streamer itself.
callback (callable): An optional callable that will be called as:
callable(index, success, highest_id_received_from_other_side)
|
juraj-google-style
|
def add_command(self, command):
if self._commands and command == self._commands[-1]:
return
if not isinstance(command, str):
raise TypeError('Attempt to enter non-str entry to command history')
self._commands.append(command)
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
self._add_command_to_history_file(command)
|
Add a command to the command history.
Args:
command: The history command, as a str.
Raises:
TypeError: if command is not a str.
|
github-repos
|
def StartFlowAndWorker(client_id, flow_name, **kwargs):
queue = rdfvalue.RDFURN(('DEBUG-%s-' % getpass.getuser()))
if ('token' in kwargs):
token = kwargs.pop('token')
else:
token = access_control.ACLToken(username='GRRConsole')
session_id = flow.StartAFF4Flow(client_id=client_id, flow_name=flow_name, queue=queue, token=token, **kwargs)
worker_thrd = worker_lib.GRRWorker(queues=[queue], token=token, threadpool_size=1)
while True:
try:
worker_thrd.RunOnce()
except KeyboardInterrupt:
print('exiting')
worker_thrd.thread_pool.Join()
break
time.sleep(2)
with aff4.FACTORY.Open(session_id, token=token) as flow_obj:
if (not flow_obj.GetRunner().IsRunning()):
break
worker_thrd.thread_pool.Join()
return session_id
|
Launches the flow and worker and waits for it to finish.
Args:
client_id: The client common name we issue the request.
flow_name: The name of the flow to launch.
**kwargs: passthrough to flow.
Returns:
A flow session id.
Note: you need raw access to run this flow as it requires running a worker.
|
codesearchnet
|
def reset_from_seed(self, seed):
state = create_rng_state(seed, self.algorithm)
self._state_var.assign(state)
|
Resets the generator by a new seed.
See `from_seed` for the meaning of "seed".
Args:
seed: the new seed.
|
github-repos
|
def lint_command(name, program, arguments, filter_regex, filename, lines):
output = utils.get_output_from_cache(name, filename)
if (output is None):
call_arguments = (([program] + arguments) + [filename])
try:
output = subprocess.check_output(call_arguments, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
output = error.output
except OSError:
return {filename: {'error': [(('Could not execute "%s".%sMake sure all ' + 'required programs are installed') % (' '.join(call_arguments), os.linesep))]}}
output = output.decode('utf-8')
utils.save_output_in_cache(name, filename, output)
output_lines = output.split(os.linesep)
if (lines is None):
lines_regex = '\\d+'
else:
lines_regex = '|'.join(map(str, lines))
lines_regex = ('(%s)' % lines_regex)
groups = ('line', 'column', 'message', 'severity', 'message_id')
filtered_lines = utils.filter_lines(output_lines, filter_regex.format(lines=lines_regex, filename=re.escape(filename)), groups=groups)
result = []
for data in filtered_lines:
comment = dict((p for p in zip(groups, data) if (p[1] is not None)))
if ('line' in comment):
comment['line'] = int(comment['line'])
if ('column' in comment):
comment['column'] = int(comment['column'])
if ('severity' in comment):
comment['severity'] = comment['severity'].title()
result.append(comment)
return {filename: {'comments': result}}
|
Executes a lint program and filter the output.
Executes the lint tool 'program' with arguments 'arguments' over the file
'filename' returning only those lines matching the regular expression
'filter_regex'.
Args:
name: string: the name of the linter.
program: string: lint program.
arguments: list[string]: extra arguments for the program.
filter_regex: string: regular expression to filter lines.
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
Returns: dict: a dict with the extracted info from the message.
|
codesearchnet
|
def __init__(self, proto_id=None, debug=False):
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='google-ip-forwarding', debug=debug, facility=facility)
self.ip_forwarding_utils = ip_forwarding_utils.IpForwardingUtils(
logger=self.logger, proto_id=proto_id)
|
Constructor.
Args:
proto_id: string, the routing protocol identifier for Google IP changes.
debug: bool, True if debug output should write to the console.
|
juraj-google-style
|
def __init__(self, prefs, kappa=2.0, omega=0.5, beta=1.0, mu=1.0,
phi=scipy.ones(N_NT) / N_NT,
freeparams=['kappa', 'omega', 'beta', 'mu', 'eta']):
self._nsites = len(prefs)
assert self.nsites > 0, "No preferences specified"
assert all(map(lambda x: x in self.ALLOWEDPARAMS, freeparams)),\
"Invalid entry in freeparams\nGot: {0}\nAllowed: {1}".format(
', '.join(freeparams), ', '.join(self.ALLOWEDPARAMS))
self._freeparams = list(freeparams)
self.pi = scipy.ndarray((self.nsites, N_AA), dtype='float')
assert (isinstance(prefs, list) and
all([isinstance(x, dict) for x in prefs])),\
"prefs is not a list of dicts"
for r in range(self.nsites):
assert set(prefs[r].keys()) == set(AA_TO_INDEX.keys()),\
"prefs not keyed by amino acids for site {0}".format(r)
assert abs(1 - sum(prefs[r].values())) <= ALMOST_ZERO,\
"prefs don't sum to one for site {0}".format(r)
for (a, aa) in INDEX_TO_AA.items():
_checkParam('pi', prefs[r][aa], self.PARAMLIMITS, self.PARAMTYPES)
self.pi[r][a] = prefs[r][aa]
self.pi[r] /= self.pi[r].sum()
self.pi_codon = scipy.full((self.nsites, N_CODON), -1, dtype='float')
self.ln_pi_codon = scipy.full((self.nsites, N_CODON), -1, dtype='float')
self.piAx_piAy = scipy.full((self.nsites, N_CODON, N_CODON), -1,
dtype='float')
_checkParam('phi', phi, self.PARAMLIMITS, self.PARAMTYPES)
assert abs(1 - phi.sum()) <= ALMOST_ZERO, "phi doesn't sum to 1"
self.phi = phi.copy()
self.phi /= self.phi.sum()
self._eta_from_phi()
self._mu = mu
self.kappa = kappa
self.omega = omega
self.beta = beta
for (name, value) in [('kappa', self.kappa), ('omega', self.omega),
('beta', self.beta), ('eta', self.eta), ('mu', self.mu)]:
_checkParam(name, value, self.PARAMLIMITS, self.PARAMTYPES)
self.piAx_piAy_beta = scipy.zeros((self.nsites, N_CODON, N_CODON),
dtype='float')
self.ln_piAx_piAy_beta = scipy.zeros((self.nsites, N_CODON, N_CODON),
dtype='float')
self.Prxy = scipy.zeros((self.nsites, N_CODON, N_CODON), dtype='float')
self.prx = scipy.zeros((self.nsites, N_CODON), dtype='float')
self.Qxy = scipy.zeros((N_CODON, N_CODON), dtype='float')
self.Frxy = scipy.ones((self.nsites, N_CODON, N_CODON), dtype='float')
self.Frxy_no_omega = scipy.ones((self.nsites, N_CODON, N_CODON),
dtype='float')
self.D = scipy.zeros((self.nsites, N_CODON), dtype='float')
self.A = scipy.zeros((self.nsites, N_CODON, N_CODON), dtype='float')
self.Ainv = scipy.zeros((self.nsites, N_CODON, N_CODON), dtype='float')
self.dPrxy = {}
self.B = {}
self.dprx = {}
for param in self.freeparams:
if param == 'mu':
self.dprx['mu'] = 0.0
elif self.PARAMTYPES[param] == float:
self.dPrxy[param] = scipy.zeros((self.nsites, N_CODON, N_CODON),
dtype='float')
self.B[param] = scipy.zeros((self.nsites, N_CODON, N_CODON),
dtype='float')
self.dprx[param] = scipy.zeros((self.nsites, N_CODON), dtype='float')
else:
assert self.PARAMTYPES[param][0] == scipy.ndarray
paramshape = self.PARAMTYPES[param][1]
assert len(paramshape) == 1, "Can't handle multi-dimensional ndarray"
paramlen = paramshape[0]
self.dPrxy[param] = scipy.zeros((paramlen, self.nsites, N_CODON,
N_CODON), dtype='float')
self.B[param] = scipy.zeros((paramlen, self.nsites, N_CODON,
N_CODON), dtype='float')
self.dprx[param] = scipy.zeros((paramlen, self.nsites, N_CODON),
dtype='float')
self._diag_indices = scipy.diag_indices(N_CODON)
self.updateParams({}, update_all=True)
|
Initialize an `ExpCM` object.
Args:
`prefs` (list)
List of dicts giving amino-acid preferences for
each site. Each dict keyed by amino acid letter
codes, value is pref > 0 and < 1. Must sum to 1
at each site.
`kappa`, `omega`, `beta`, `mu`, `phi`
Model params described in main class doc string.
`freeparams` (list of strings)
Specifies free parameters.
|
juraj-google-style
|
def AsyncSleep(delay, name=None):
return examples_async_sleep(delay=delay, name=name)
|
Pause for `delay` seconds (which need not be an integer).
This is an asynchronous (non-blocking) version of a sleep op. It includes
any time spent being blocked by another thread in `delay`. If it is blocked
for a fraction of the time specified by `delay`, it only calls `sleep`
(actually `usleep`) only for the remainder. If it is blocked for the full
time specified by `delay` or more, it returns without explicitly calling
`sleep`.
Args:
delay: tf.Tensor which is a scalar of type float.
name: An optional name for the op.
Returns:
The `delay` value.
|
github-repos
|
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Search(' (if\\(|for\\(|while\\(|switch\\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5, ('Missing space before ( in %s' % match.group(1)))
match = Search('\\b(if|for|while|switch)\\s*\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$', line)
if match:
if (len(match.group(2)) != len(match.group(4))):
if (not (((match.group(3) == ';') and (len(match.group(2)) == (1 + len(match.group(4))))) or ((not match.group(2)) and Search('\\bfor\\s*\\(.*; \\)', line)))):
error(filename, linenum, 'whitespace/parens', 5, ('Mismatching spaces inside () in %s' % match.group(1)))
if (len(match.group(2)) not in [0, 1]):
error(filename, linenum, 'whitespace/parens', 5, ('Should have zero or one spaces inside ( and ) in %s' % match.group(1)))
|
Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
codesearchnet
|
def parse_meta(meta):
resources = {}
for name in meta:
if name.startswith('$'):
continue
resources[name] = resource = {}
for action in meta[name]:
if action.startswith('$'):
continue
(url, httpmethod) = res_to_url(name, action)
resource[action] = {'url': url, 'method': httpmethod}
url_prefix = meta.get('$url_prefix', '').rstrip('/')
return (url_prefix, meta['$auth']['header'].lower(), resources)
|
Parse metadata of API
Args:
meta: metadata of API
Returns:
tuple(url_prefix, auth_header, resources)
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.