code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def equals(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.properties_with_values() == other.properties_with_values()
|
Structural equality of models.
Args:
other (HasProps) : the other instance to compare to
Returns:
True, if properties are structurally equal, otherwise False
|
juraj-google-style
|
def __init__(self, fail_on_unset: bool = False, default: str = 'none', **_vars: Any):
self.fail_on_unset = bool(fail_on_unset)
self.default = str(default)
self.vars = _vars
|
Initializer.
Args:
fail_on_unset (bool): If set to True an exception will be raised when the environment
variable is unset; otherwise the default value (see next) will be used instead.
default (str): If a environment variable is unset, it will get this value instead.
|
juraj-google-style
|
def _AddEnumValues(descriptor, cls):
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
|
Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
|
juraj-google-style
|
class Activation(Layer):
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
self._build_at_init()
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super().get_config()
return {**base_config, **config}
|
Applies an activation function to an output.
Args:
activation: Activation function. It could be a callable, or the name of
an activation from the `keras.activations` namespace.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> layer = keras.layers.Activation('relu')
>>> layer(np.array([-3.0, -1.0, 0.0, 2.0]))
[0.0, 0.0, 0.0, 2.0]
>>> layer = keras.layers.Activation(keras.activations.relu)
>>> layer(np.array([-3.0, -1.0, 0.0, 2.0]))
[0.0, 0.0, 0.0, 2.0]
|
github-repos
|
def compile_reward(self, scope: Dict[str, TensorFluent]) -> TensorFluent:
reward_expr = self.rddl.domain.reward
with self.graph.as_default():
with tf.name_scope('reward'):
return self._compile_expression(reward_expr, scope)
|
Compiles the reward function given the fluent `scope`.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.
Returns:
A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.
|
juraj-google-style
|
def download_from_url(path, url):
filename = url.split("/")[-1]
found_file = find_file(path, filename, max_depth=0)
if found_file is None:
filename = os.path.join(path, filename)
tf.logging.info("Downloading from %s to %s." % (url, filename))
inprogress_filepath = filename + ".incomplete"
inprogress_filepath, _ = urllib.request.urlretrieve(
url, inprogress_filepath, reporthook=download_report_hook)
print()
tf.gfile.Rename(inprogress_filepath, filename)
return filename
else:
tf.logging.info("Already downloaded: %s (at %s)." % (url, found_file))
return found_file
|
Download content from a url.
Args:
path: string directory where file will be downloaded
url: string url
Returns:
Full path to downloaded file
|
juraj-google-style
|
def slice_constant(data, batch_size=32, name='constant_data', global_step=None):
with tf.name_scope(name):
all_data = tf.convert_to_tensor(data)
global_step = (global_step or bookkeeper.global_step())
count = (len(data) / batch_size)
extra = (len(data) - (count * batch_size))
if extra:
offset = tf.mod(global_step, count)
return tf.slice(all_data, (offset * batch_size), batch_size)
else:
offset = tf.mod(global_step, (count + 1))
return tf.slice(all_data, (offset * batch_size), tf.where(tf.equal(offset, count), extra, batch_size))
|
Provide a slice based on the global_step.
This is useful when the entire data array can be stored in memory because it
allows you to feed the data very efficiently.
Args:
data: A numpy array or tensor.
batch_size: The batch size for the produced data.
name: An optional name for this data.
global_step: A global step variable that is used to read the data. If None
then the default prettytensor global_step is used.
Returns:
A tensor that produces the given data.
|
codesearchnet
|
def UpdateMapping(self, filename, mapping_update):
if filename not in self._file_mapping:
raise problems.NonexistentMapping(filename)
mapping = self._file_mapping[filename]
mapping.update(mapping_update)
|
Updates an entry in the list of known filenames.
An entry is identified by its filename.
Args:
filename: The filename whose mapping is to be updated
mapping_update: A dictionary containing the fields to update and their
new values.
Raises:
InexistentMapping if the filename does not exist in the mapping
|
juraj-google-style
|
def has_auth_params(self, scheme):
for (k, v) in iteritems(self.schemes[scheme][u'params']):
if (not v):
return False
return True
|
Check whether all information required for a given auth scheme have
been supplied.
Args:
scheme (str): Name of the authentication scheme to check. One of
Gem-Identify, Gem-Device, Gem-Application
Returns:
True if all required parameters for the specified scheme are present
or False otherwise.
|
codesearchnet
|
def load_addon(username, package_name, _globals):
addon_module = get_or_create_module_r(username)
package_module = __import__(package_name)
add_tasks_r(addon_module, package_module, package_name)
_globals.update({username: addon_module})
del package_module
del addon_module
|
Load an fabsetup addon given by 'package_name' and hook it in the
base task namespace 'username'.
Args:
username(str)
package_name(str)
_globals(dict): the globals() namespace of the fabric script.
Return: None
|
juraj-google-style
|
class PerceiverDecoderOutput(ModelOutput):
logits: Optional[torch.FloatTensor] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
|
Base class for Perceiver decoder outputs, with potential cross-attentions.
Args:
logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Output of the basic decoder.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
|
github-repos
|
def sendCommand(self, command):
command_data = [ord(x) for x in buffer(command)]
self.hid.write(command_data)
response_data = ''.join((chr(x) for x in self.hid.read(64)))
response = command.RESPONSE.from_buffer_copy(response_data)
if (response.status != 0):
raise CommandException(response.status)
return response
|
Sends a Command object to the MCP2210 and returns its response.
Arguments:
A commands.Command instance
Returns:
A commands.Response instance, or raises a CommandException on error.
|
codesearchnet
|
def spec_filled(self, pos_args, kw_args):
req_names = self.arg_names
if len(self.arg_defaults) > 0:
req_names = req_names[:-len(self.arg_defaults)]
req = [x for x in req_names if x not in kw_args]
return len(req) <= len(pos_args)
|
Check if we have enough arguments to call this function.
Args:
pos_args (list): A list of all the positional values we have.
kw_args (dict): A dict of all of the keyword args we have.
Returns:
bool: True if we have a filled spec, False otherwise.
|
juraj-google-style
|
def my_solid_angle(center, coords):
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
phi = 0.0
for i in range(len(n) - 1):
try:
value = math.acos(-np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])))
except ValueError:
mycos = -np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
if 0.999999999999 < mycos < 1.000000000001:
value = math.acos(1.0)
elif -0.999999999999 > mycos > -1.000000000001:
value = math.acos(-1.0)
else:
raise SolidAngleError(mycos)
phi += value
return phi + (3 - len(r)) * math.pi
|
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center:
Center to measure solid angle from.
coords:
List of coords to determine solid angle.
Returns:
The solid angle.
|
juraj-google-style
|
def extract_wavs(utterances: List[Utterance], tgt_dir: Path, lazy: bool) -> None:
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
wav_fn = '{}.{}'.format(utter.prefix, 'wav')
out_wav_path = (tgt_dir / wav_fn)
if (lazy and out_wav_path.is_file()):
logger.info('File {} already exists and lazy == {}; not writing.'.format(out_wav_path, lazy))
continue
logger.info('File {} does not exist and lazy == {}; creating it.'.format(out_wav_path, lazy))
trim_wav_ms(utter.org_media_path, out_wav_path, utter.start_time, utter.end_time)
|
Extracts WAVs from the media files associated with a list of Utterance
objects and stores it in a target directory.
Args:
utterances: A list of Utterance objects, which include information
about the source media file, and the offset of the utterance in the
media_file.
tgt_dir: The directory in which to write the output WAVs.
lazy: If True, then existing WAVs will not be overwritten if they have
the same name
|
codesearchnet
|
def get_serialization_context(self, driver_id):
with self.lock:
if (driver_id not in self.serialization_context_map):
_initialize_serialization(driver_id)
return self.serialization_context_map[driver_id]
|
Get the SerializationContext of the driver that this worker is processing.
Args:
driver_id: The ID of the driver that indicates which driver to get
the serialization context for.
Returns:
The serialization context of the given driver.
|
codesearchnet
|
def add_citations(voevent, event_ivorns):
if not voevent.xpath('Citations'):
etree.SubElement(voevent, 'Citations')
voevent.Citations.extend(_listify(event_ivorns))
|
Add citations to other voevents.
The schema mandates that the 'Citations' section must either be entirely
absent, or non-empty - hence we require this wrapper function for its
creation prior to listing the first citation.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
event_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn
elements to add to citation list.
|
juraj-google-style
|
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
|
Subtracts `tf.IndexedSlices` from this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
|
github-repos
|
def getctime(self, path=None, client_kwargs=None, header=None):
return self._getctime_from_header(self.head(path, client_kwargs, header))
|
Return the creation time of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module).
|
codesearchnet
|
def _key2seed(a):
def int64_to_int32s(a):
a = math_ops.cast(a, dtypes.uint64)
fst = math_ops.cast(a, dtypes.uint32)
snd = math_ops.cast(gen_bitwise_ops.right_shift(a, constant_op.constant(32, dtypes.uint64)), dtypes.uint32)
a = [fst, snd]
a = nest.map_structure(lambda x: math_ops.cast(x, dtypes.int32), a)
a = array_ops_stack.stack(a)
return a
return int64_to_int32s(a)
|
Converts an RNG key to an RNG seed.
Args:
a: an RNG key, an ndarray of shape [] and dtype `np.int64`.
Returns:
an RNG seed, a tensor of shape [2] and dtype `tf.int32`.
|
github-repos
|
def get_gap(self, tol=0.001, abs_tol=False, spin=None):
(cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)
return max(cbm - vbm, 0.0)
|
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
|
juraj-google-style
|
def cluster_from_file(filename):
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
|
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
|
juraj-google-style
|
def reset_partition_offset(self, partition):
LATEST = -1
EARLIEST = -2
if self.auto_offset_reset == 'largest':
reqs = [OffsetRequestPayload(self.topic, partition, LATEST, 1)]
elif self.auto_offset_reset == 'smallest':
reqs = [OffsetRequestPayload(self.topic, partition, EARLIEST, 1)]
else:
if sys.exc_info() == (None, None, None):
raise OffsetOutOfRangeError('Cannot reset partition offsets without a '
'valid auto_offset_reset setting '
'(largest|smallest)')
raise
log.info('Resetting topic-partition offset to %s for %s:%d',
self.auto_offset_reset, self.topic, partition)
try:
(resp, ) = self.client.send_offset_request(reqs)
except KafkaError as e:
log.error('%s sending offset request for %s:%d',
e.__class__.__name__, self.topic, partition)
else:
self.offsets[partition] = resp.offsets[0]
self.fetch_offsets[partition] = resp.offsets[0]
return resp.offsets[0]
|
Update offsets using auto_offset_reset policy (smallest|largest)
Arguments:
partition (int): the partition for which offsets should be updated
Returns: Updated offset on success, None on failure
|
juraj-google-style
|
def read_local_config(cfg):
try:
if os.path.exists(cfg):
config = import_file_object(cfg)
return config
else:
logger.warning(
'%s: local config file (%s) not found, cannot be read' %
(inspect.stack()[0][3], str(cfg)))
except IOError as e:
logger.warning(
'import_file_object: %s error opening %s' % (str(e), str(cfg))
)
return {}
|
Parses local config file for override values
Args:
:local_file (str): filename of local config file
Returns:
dict object of values contained in local config file
|
juraj-google-style
|
def create_app(config=None, config_obj=None):
app = Flask(__name__)
configure_app(app, config=config, config_obj=config_obj)
register_blueprints(app)
bind_extensions(app)
return app
|
Flask app factory function.
Args:
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object
|
codesearchnet
|
def form_uri(item_id, service, is_track):
if is_track:
uri = service.sonos_uri_from_id(item_id)
else:
uri = ('x-rincon-cpcontainer:' + item_id)
return uri
|
Form and return a music service item uri
Args:
item_id (str): The item id
service (MusicService): The music service that the item originates from
is_track (bool): Whether the item_id is from a track or not
Returns:
str: The music service item uri
|
codesearchnet
|
def __init__(self, host: str, port: int, command: Optional[str]=None, batch_size: int=100, embedded_columns: list=[]):
self._host = host
self._port = port
self._command = command
self._batch_size = batch_size
self.embedded_columns = embedded_columns
|
Args:
host (str): The redis host
port (int): The redis port
command (str): command to be executed with redis client
batch_size (int): Number of key, values pairs to write at once
embedded_columns (list): list of column whose embedding needs to be generated
Returns:
:class:`~apache_beam.transforms.ptransform.PTransform`
|
github-repos
|
def create_releasenotes(project_dir=os.curdir, bugtracker_url=''):
pkg_info_file = os.path.join(project_dir, 'PKG-INFO')
if os.path.exists(pkg_info_file):
return
with open('RELEASE_NOTES', 'wb') as releasenotes_fd:
releasenotes_fd.write((get_releasenotes(project_dir=project_dir, bugtracker_url=bugtracker_url).encode('utf-8') + b'\n'))
|
Creates the release notes file, if not in a package.
Args:
project_dir(str): Path to the git repo of the project.
bugtracker_url(str): Url to the bug tracker for the issues.
Returns:
None
Raises:
RuntimeError: If the release notes could not be retrieved
|
codesearchnet
|
def get_pull_request_number(task, source_env_prefix):
pull_request = _extract_from_env_in_payload(task, source_env_prefix + '_PULL_REQUEST_NUMBER')
if pull_request is not None:
pull_request = int(pull_request)
return pull_request
|
Get what Github pull request created the graph.
Args:
obj (ChainOfTrust or LinkOfTrust): the trust object to inspect
source_env_prefix (str): The environment variable prefix that is used
to get repository information.
Returns:
int: the pull request number.
None: if not defined for this task.
|
juraj-google-style
|
def remove_collisions(self, min_dist=0.5):
vfcoords = [v.frac_coords for v in self.vnodes]
sfcoords = self.structure.frac_coords
dist_matrix = self.structure.lattice.get_all_distances(vfcoords, sfcoords)
all_dist = np.min(dist_matrix, axis=1)
new_vnodes = []
for (i, v) in enumerate(self.vnodes):
if (all_dist[i] > min_dist):
new_vnodes.append(v)
self.vnodes = new_vnodes
|
Remove vnodes that are too close to existing atoms in the structure
Args:
min_dist(float): The minimum distance that a vertex needs to be
from existing atoms.
|
codesearchnet
|
def _GetParser(self):
usage = 'nsscache synchronises a local NSS cache against a remote data source.\n\nUsage: nsscache [global options] command [command options]\n\ncommands:\n'
command_descriptions = []
for name, cls in list(command.__dict__.items()):
if name == 'Command':
continue
if hasattr(cls, 'Help'):
short_help = cls().Help(short=True)
command_descriptions.append(' %-21s %.40s' % (name.lower(), short_help.lower()))
usage += '\n'.join(command_descriptions)
version_string = 'nsscache ' + nss_cache.__version__ + '\n\nCopyright (c) 2007 Google, Inc.\nThis is free software; see the source for copying conditions. There is NO\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\nWritten by Jamie Wilkinson and Vasilios Hoffman.'
parser = optparse.OptionParser(usage, version=version_string)
parser.disable_interspersed_args()
parser.set_defaults(verbose=False, debug=False)
parser.add_option('-v', '--verbose', action='store_true', help='enable verbose output')
parser.add_option('-d', '--debug', action='store_true', help='enable debugging output')
parser.add_option('-c', '--config-file', type='string', help='read configuration from FILE', metavar='FILE')
old_get_usage = parser.get_usage
def get_usage():
return old_get_usage()[7:]
parser.get_usage = get_usage
return parser
|
Sets up our parser for global options.
Args: None
Returns:
# OptionParser is from standard python module optparse
OptionParser
|
github-repos
|
def rApply(d, f):
remainingDicts = [(d, ())]
while (len(remainingDicts) > 0):
(current, prevKeys) = remainingDicts.pop()
for (k, v) in current.iteritems():
keys = (prevKeys + (k,))
if isinstance(v, dict):
remainingDicts.insert(0, (v, keys))
else:
f(v, keys)
|
Recursively applies f to the values in dict d.
Args:
d: The dict to recurse over.
f: A function to apply to values in d that takes the value and a list of
keys from the root of the dict to the value.
|
codesearchnet
|
def delete(self, paths):
raise NotImplementedError
|
Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
Raises:
``BeamIOError``: if any of the delete operations fail
|
github-repos
|
def create_new(mapreduce_id=None,
gettime=datetime.datetime.now):
if not mapreduce_id:
mapreduce_id = MapreduceState.new_mapreduce_id()
state = MapreduceState(key_name=mapreduce_id,
last_poll_time=gettime())
state.set_processed_counts([], [])
return state
|
Create a new MapreduceState.
Args:
mapreduce_id: Mapreduce id as string.
gettime: Used for testing.
|
juraj-google-style
|
def parse_args(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
commas = char_locs["commas"]
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue
sp, ep = parsed[span]["parens_span"]
if ep == -1:
args_end = len(bels) - 1
else:
args_end = ep - 1
args = []
arg_start = sp + 1
each_arg_end_list = sorted([end - 1 for end in commas.get(sp, [])] + [args_end])
for arg_end in each_arg_end_list:
while arg_start < args_end and bels[arg_start] == " ":
arg_start += 1
trimmed_arg_end = arg_end
while trimmed_arg_end > arg_start and bels[trimmed_arg_end] == " ":
trimmed_arg_end -= 1
if trimmed_arg_end < arg_start:
trimmed_arg_end = arg_start
arg = "".join(bels[arg_start : trimmed_arg_end + 1])
args.append({"arg": arg, "span": (arg_start, trimmed_arg_end)})
arg_start = arg_end + 2
parsed[span]["args"] = args
return parsed, errors
|
Parse arguments from functions
Args:
bels: BEL string as list of chars
char_locs: char locations for parens, commas and quotes
parsed: function locations
errors: error messages
Returns:
(functions, errors): function and arg locations plus error messages
|
juraj-google-style
|
def from_surface(renderer, surface):
texture = object.__new__(Texture)
texture._ptr = check_ptr_err(lib.SDL_CreateTextureFromSurface(renderer._ptr, surface._ptr))
return texture
|
Create a texture from an existing surface.
Args:
surface (Surface): The surface containing pixel data used to fill the texture.
Returns:
Texture: A texture containing the pixels from surface.
Raises:
SDLError: If an error is encountered.
|
codesearchnet
|
def update(self, resource, timeout=-1):
return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES, uri=self.URI)
|
Updates a User.
Args:
resource (dict): Object to update.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Updated resource.
|
juraj-google-style
|
def run_callback(self):
if (self._callback_func is not None):
try:
self._callback_func(self._request, self._result)
except Exception:
LOGGER.exception('An unhandled error occurred while running future callback')
|
Calls the callback_func, passing in the two positional arguments,
conditionally waiting if the callback function hasn't been set yet.
Meant to be run in a threadpool owned by the FutureCollection.
Returns:
None
|
codesearchnet
|
def cancel(self, consumers):
for consumer in consumers:
del self._consumers[consumer.queue]
protocol = (yield self.when_connected())
(yield protocol.cancel(consumer))
|
Cancel a consumer that was previously started with consume.
Args:
consumer (list of fedora_messaging.api.Consumer): The consumers to cancel.
|
codesearchnet
|
def _update_version(connection, version):
if connection.engine.name == 'sqlite':
connection.execute('PRAGMA user_version = {}'.format(version))
elif connection.engine.name == 'postgresql':
connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME)))
connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME)))
connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);'
.format(POSTGRES_SCHEMA_NAME))
if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone():
connection.execute('UPDATE {}.user_version SET version = {};'
.format(POSTGRES_SCHEMA_NAME, version))
else:
connection.execute('INSERT INTO {}.user_version (version) VALUES ({})'
.format(POSTGRES_SCHEMA_NAME, version))
else:
raise DatabaseMissingError('Do not know how to migrate {} engine.'
.format(connection.engine.driver))
|
Updates version in the db to the given version.
Args:
connection (sqlalchemy connection): sqlalchemy session where to update version.
version (int): version of the migration.
|
juraj-google-style
|
def build_byte_align_buff(bits):
bitmod = len(bits)%8
if bitmod == 0:
rdiff = bitarray()
else:
rdiff = bitarray(8-bitmod)
rdiff.setall(False)
return rdiff+bits
|
Pad the left side of a bitarray with 0s to align its length with byte boundaries.
Args:
bits: A bitarray to be padded and aligned.
Returns:
A newly aligned bitarray.
|
juraj-google-style
|
def format_image(path, options):
image = Image.open(path)
image_pipeline_results = __pipeline_image(image, options)
return image_pipeline_results
|
Formats an image.
Args:
path (str): Path to the image file.
options (dict): Options to apply to the image.
Returns:
(list) A list of PIL images. The list will always be of length
1 unless resolutions for resizing are provided in the options.
|
codesearchnet
|
def __init__(self):
super(JLinkStraceEventInfo, self).__init__()
self.SizeOfStruct = ctypes.sizeof(self)
|
Initializes the ``JLinkStraceEventInfo`` instance.
Sets the size of the structure.
Args:
self (JLinkStraceEventInfo): the ``JLinkStraceEventInfo`` instance
Returns:
``None``
|
juraj-google-style
|
def login(self, email, password):
r = requests.post('{0}/v2/session'.format(self._api_endpoint), json={'email': email, 'password': password})
r.raise_for_status()
return r.json()['accessToken']
|
Authenticate a user with SignalFx to acquire a session token.
Note that data ingest can only be done with an organization or team API
access token, not with a user token obtained via this method.
Args:
email (string): the email login
password (string): the password
Returns a new, immediately-usable session token for the logged in user.
|
codesearchnet
|
def is_enrolled(self, username, course_run_id):
enrollment = self.get_course_enrollment(username, course_run_id)
return enrollment is not None and enrollment.get('is_active', False)
|
Query the enrollment API and determine if a learner is enrolled in a course run.
Args:
username (str): The username by which the user goes on the OpenEdX platform
course_run_id (str): The string value of the course's unique identifier
Returns:
bool: Indicating whether the user is enrolled in the course run. Returns False under any errors.
|
juraj-google-style
|
def output(self, _filename):
for contract in self.contracts:
txt = "\nContract %s\n"%contract.name
table = PrettyTable(["Function", "State variables written", "Conditions on msg.sender"])
for function in contract.functions:
state_variables_written = [v.name for v in function.all_state_variables_written()]
msg_sender_condition = self.get_msg_sender_checks(function)
table.add_row([function.name, str(state_variables_written), str(msg_sender_condition)])
self.info(txt + str(table))
|
_filename is not used
Args:
_filename(string)
|
juraj-google-style
|
def empty(element_spec):
return _OptionalImpl(gen_optional_ops.optional_none(), element_spec)
|
Returns an `Optional` that has no value.
NOTE: This method takes an argument that defines the structure of the value
that would be contained in the returned `Optional` if it had a value.
>>> optional = tf.experimental.Optional.empty(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
Args:
element_spec: A (nested) structure of `tf.TypeSpec` objects matching the
structure of an element of this optional.
Returns:
A `tf.experimental.Optional` with no value.
|
github-repos
|
def compstat(sdat, tstart=None, tend=None):
data = sdat.tseries_between(tstart, tend)
time = data['t'].values
delta_time = time[-1] - time[0]
data = data.iloc[:, 1:].values
mean = np.trapz(data, x=time, axis=0) / delta_time
rms = np.sqrt(np.trapz((data - mean)**2, x=time, axis=0) / delta_time)
with open(misc.out_name('statistics.dat'), 'w') as out_file:
mean.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n')
rms.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n')
|
Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data.
|
juraj-google-style
|
def assert_processor_available(processor: str) -> None:
if (processor not in [Processors.XHTML2PDF, Processors.WEASYPRINT, Processors.PDFKIT]):
raise AssertionError('rnc_pdf.set_pdf_processor: invalid PDF processor specified')
if ((processor == Processors.WEASYPRINT) and (not weasyprint)):
raise RuntimeError('rnc_pdf: Weasyprint requested, but not available')
if ((processor == Processors.XHTML2PDF) and (not xhtml2pdf)):
raise RuntimeError('rnc_pdf: xhtml2pdf requested, but not available')
if ((processor == Processors.PDFKIT) and (not pdfkit)):
raise RuntimeError('rnc_pdf: pdfkit requested, but not available')
|
Assert that a specific PDF processor is available.
Args:
processor: a PDF processor type from :class:`Processors`
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable
|
codesearchnet
|
def validate_token(key, token, user_id, action_id='', current_time=None):
if (not token):
return False
try:
decoded = base64.urlsafe_b64decode(token)
token_time = int(decoded.split(DELIMITER)[(- 1)])
except (TypeError, ValueError, binascii.Error):
return False
if (current_time is None):
current_time = time.time()
if ((current_time - token_time) > DEFAULT_TIMEOUT_SECS):
return False
expected_token = generate_token(key, user_id, action_id=action_id, when=token_time)
if (len(token) != len(expected_token)):
return False
different = 0
for (x, y) in zip(bytearray(token), bytearray(expected_token)):
different |= (x ^ y)
return (not different)
|
Validates that the given token authorizes the user for the action.
Tokens are invalid if the time of issue is too old or if the token
does not match what generateToken outputs (i.e. the token was forged).
Args:
key: secret key to use.
token: a string of the token generated by generateToken.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
Returns:
A boolean - True if the user is authorized for the action, False
otherwise.
|
codesearchnet
|
def allreduce_grads_hierarchical(all_grads, devices, average=False):
num_gpu = len(devices)
assert num_gpu == 8, num_gpu
assert len(all_grads) == num_gpu, len(all_grads)
group_size = num_gpu
agg_all_grads = []
for varid, grads in enumerate(zip(*all_grads)):
g0_main_gpu = varid % num_gpu
g1_main_gpu = (g0_main_gpu + group_size) % num_gpu
g0_start = 0 if g0_main_gpu < group_size else group_size
g1_start = 0 if g1_main_gpu < group_size else group_size
assert g0_start != g1_start
g0_grads = grads[g0_start: g0_start + group_size]
g1_grads = grads[g1_start: g1_start + group_size]
with tf.device(devices[g0_main_gpu]):
g0_agg = tf.add_n(g0_grads, name='group0_agg')
with tf.device(devices[g1_main_gpu]):
g1_agg = tf.add_n(g1_grads, name='group1_agg')
g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg')
with tf.device(devices[g0_main_gpu]):
g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg')
agg_grads = []
for k in range(num_gpu):
if (k < group_size) == (g0_main_gpu < group_size):
main_gpu = g0_total_agg
else:
main_gpu = g1_total_agg
with tf.device(devices[k]):
if not average:
device_total_agg = tf.identity(
main_gpu, name='device{}_total_agg'.format(k))
else:
device_total_agg = tf.multiply(
main_gpu, 1.0 / num_gpu, name='device{}_total_agg'.format(k))
agg_grads.append(device_total_agg)
agg_all_grads.append(agg_grads)
agg_all_grads = list(zip(*agg_all_grads))
return agg_all_grads
|
Hierarchical allreduce for DGX-1 system.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
devices ([str]): K str for the K devices.
average (bool): average gradients or not.
Returns:
(K x N): same as input, but each grad is replaced by the average over K lists.
|
juraj-google-style
|
def assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, left_fraction, left_result, right_fraction, right_result, stats, start_position=None, stop_position=None):
assert right_fraction > left_fraction
if right_fraction - left_fraction < 0.001:
return
middle_fraction = (left_fraction + right_fraction) / 2
if left_result is None:
left_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, left_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if right_result is None:
right_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, right_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
middle_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, middle_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if middle_result[1] != -1:
stats.successful_fractions.append(middle_fraction)
if middle_result[1] > 0:
stats.non_trivial_fractions.append(middle_fraction)
if left_result[0] != middle_result[0]:
assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, left_fraction, left_result, middle_fraction, middle_result, stats)
if right_fraction == 1.0 or middle_result[0] != right_result[0]:
assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, middle_fraction, middle_result, right_fraction, right_result, stats)
|
Performs dynamic work rebalancing for fractions within a given range.
Asserts that given a start position, a source can be split at every
interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Args:
source: source to perform dynamic splitting on.
expected_items: total set of items expected when reading the source.
num_items_to_read_before_split: number of items to read before splitting.
left_fraction: left fraction for binary splitting.
left_result: result received by splitting at left fraction.
right_fraction: right fraction for binary splitting.
right_result: result received by splitting at right fraction.
stats: a ``SplitFractionStatistics`` for storing results.
|
github-repos
|
def protein_only_and_noH(self, keep_ligands=None, force_rerun=False):
log.debug('{}: running protein receptor isolation...'.format(self.id))
if not self.dockprep_path:
return ValueError('Please run dockprep')
receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id))
receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id))
prly_com = op.join(self.dock_dir, "prly.com")
if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh):
with open(prly_com, "w") as f:
f.write('open {}\n'.format(self.dockprep_path))
keep_str = 'delete ~protein'
if keep_ligands:
keep_ligands = ssbio.utils.force_list(keep_ligands)
for res in keep_ligands:
keep_str += ' & ~:{} '.format(res)
keep_str = keep_str.strip() + '\n'
f.write(keep_str)
f.write('write format mol2 0 {}\n'.format(receptor_mol2))
f.write('delete element.H\n')
f.write('write format pdb 0 {}\n'.format(receptor_noh))
cmd = 'chimera --nogui {}'.format(prly_com)
os.system(cmd)
os.remove(prly_com)
if ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh):
self.receptormol2_path = receptor_mol2
self.receptorpdb_path = receptor_noh
log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path))
log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path))
else:
log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path))
|
Isolate the receptor by stripping everything except protein and specified ligands.
Args:
keep_ligands (str, list): Ligand(s) to keep in PDB file
force_rerun (bool): If method should be rerun even if output file exists
|
juraj-google-style
|
def __init__(self, mu, sigma, output_shape):
self.__mu = mu
self.__sigma = sigma
self.__output_shape = output_shape
|
Init.
Args:
mu: `float` or `array_like of floats`.
Mean (`centre`) of the distribution.
sigma: `float` or `array_like of floats`.
Standard deviation (spread or `width`) of the distribution.
output_shape: Output shape.
the shape is `(batch size, d1, d2, d3, ...)`.
|
juraj-google-style
|
def list_objects(self, path='', relative=False, first_level=False,
max_request_entries=None):
entries = 0
max_request_entries_arg = None
if not relative:
path = self.relpath(path)
if not path:
locators = self._list_locators()
if first_level:
for locator in locators:
entries += 1
yield locator
if entries == max_request_entries:
return
return
for loc_path, loc_header in locators:
loc_path = loc_path.strip('/')
entries += 1
yield loc_path, loc_header
if entries == max_request_entries:
return
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
try:
for obj_path, obj_header in self._list_objects(
self.get_client_kwargs(loc_path), '',
max_request_entries_arg):
entries += 1
yield ('/'.join((loc_path, obj_path.lstrip('/'))),
obj_header)
if entries == max_request_entries:
return
except ObjectPermissionError:
continue
return
locator, path = self.split_locator(path)
if first_level:
seen = set()
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
for obj_path, header in self._list_objects(
self.get_client_kwargs(locator), path, max_request_entries_arg):
if path:
try:
obj_path = obj_path.split(path, 1)[1]
except IndexError:
continue
obj_path = obj_path.lstrip('/')
if not obj_path:
continue
if first_level:
try:
obj_path, _ = obj_path.strip('/').split('/', 1)
obj_path += '/'
header = dict()
except ValueError:
pass
if obj_path not in seen:
entries += 1
yield obj_path, header
if entries == max_request_entries:
return
seen.add(obj_path)
else:
entries += 1
yield obj_path, header
if entries == max_request_entries:
return
|
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
|
juraj-google-style
|
def configure(cls, api_token,
api_url="https:
poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"):
cls._auth = QuboleAuth(api_token)
cls.api_token = api_token
cls.version = version
cls.baseurl = api_url
if poll_interval < Qubole.MIN_POLL_INTERVAL:
log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL))
cls.poll_interval = Qubole.MIN_POLL_INTERVAL
else:
cls.poll_interval = poll_interval
cls.skip_ssl_cert_check = skip_ssl_cert_check
cls.cloud_name = cloud_name.lower()
cls.cached_agent = None
|
Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events
|
juraj-google-style
|
def list_directory_v2(path):
if not is_directory(path):
raise errors.NotFoundError(node_def=None, op=None, message='Could not find directory {}'.format(path))
return [compat.as_str_any(filename) for filename in _pywrap_file_io.GetChildren(compat.path_to_bytes(path))]
|
Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
path: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
|
github-repos
|
def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
saved_weight_names_set = set()
saved_weights = {}
mismatched_keys = set()
unexpected_keys = set()
try:
with h5py.File(resolved_archive_file, 'r') as sharded_checkpoint_file:
saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, 'layer_names'))
weight_value_tuples = []
for layer_name in saved_h5_model_layers_name:
h5_layer_object = sharded_checkpoint_file[layer_name]
saved_weights[layer_name] = np.asarray(h5_layer_object)
saved_weight_names_set.add(layer_name)
if layer_name not in model_layer_map:
unexpected_keys.add(layer_name)
else:
symbolic_weight = model.weights[model_layer_map[layer_name]]
saved_weight_value = saved_weights[layer_name]
if saved_weight_value is not None:
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
mismatched_keys.add((layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight)))
continue
else:
raise e
else:
array = saved_weight_value
weight_value_tuples.append((symbolic_weight, array))
K.batch_set_value(weight_value_tuples)
return (saved_weight_names_set, unexpected_keys, mismatched_keys)
except Exception as e:
try:
with open(resolved_archive_file) as f:
if f.read().startswith('version'):
raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.')
else:
raise ValueError(f'Unable to locate the file {resolved_archive_file} which is necessary to load this pretrained model. Make sure you have saved the model properly.') from e
except (UnicodeDecodeError, ValueError):
raise OSError(f"Unable to load weights from TF checkpoint file for '{resolved_archive_file}' at '{resolved_archive_file}'. If you tried to load a TF model from a sharded checkpoint, you should try converting the model by loading it in pytorch and saving it locally. A conversion script should be released soon.")
|
Loads a shard from a sharded checkpoint file. Can be either H5 or Safetensors.
Handles missing keys and unexpected keys.
Args:
model (`keras.models.Model`): Model in which the weights are loaded
model_layer_map (`Dict`): A dictionary mapping the layer name to the index of the layer in the model.
resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys
Returns:
`keras.models.Model`: Three lists, one for the layers that were found and successfully restored (from the
shard file), one for the mismatched layers, and another one for the unexpected layers.
|
github-repos
|
def __init__(self, key, attributes):
self.key = key
self.attributes = attributes
|
Object initialization
Args:
key: String name of an attributes key that represents the unique identify of the request
attributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values
|
juraj-google-style
|
def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor:
decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes
speech_ids = speech_ids[:, 1:]
stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0)
speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0])
for i, each_seq_stop_token_index in enumerate(stop_token_indices):
if each_seq_stop_token_index.sum() == 0:
continue
stm = each_seq_stop_token_index.argmax()
speech_ids[i, stm:] = decoder_fixing_codes[0]
if stm - 3 < speech_ids.shape[1]:
speech_ids[i, -3:] = torch.tensor([decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long)
return speech_ids
|
This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the
last few tokens of each sequence.
Args:
speech_ids (`torch.LongTensor`):
This refers to the output of the decoder model.
|
github-repos
|
def get_num_chunks(length, chunksize):
n_chunks = int(math.ceil((length / chunksize)))
return n_chunks
|
r"""
Returns the number of chunks that a list will be split into given a
chunksize.
Args:
length (int):
chunksize (int):
Returns:
int: n_chunks
CommandLine:
python -m utool.util_progress --exec-get_num_chunks:0
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_progress import * # NOQA
>>> length = 2000
>>> chunksize = 256
>>> n_chunks = get_num_chunks(length, chunksize)
>>> result = ('n_chunks = %s' % (six.text_type(n_chunks),))
>>> print(result)
n_chunks = 8
|
codesearchnet
|
def list_bindings(site):
ret = dict()
sites = list_sites()
if (site not in sites):
log.warning('Site not found: %s', site)
return ret
ret = sites[site]['bindings']
if (not ret):
log.warning('No bindings found for site: %s', site)
return ret
|
Get all configured IIS bindings for the specified site.
Args:
site (str): The name if the IIS Site
Returns:
dict: A dictionary of the binding names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_bindings site
|
codesearchnet
|
def deserialize_report(self, serialized):
type_map = self.known_formats
if serialized['report_format'] not in type_map:
raise ArgumentError("Unknown report format in DeserializeReport", format=serialized['report_format'])
report = type_map[serialized['report_format']](serialized['encoded_report'])
report.received_time = serialized['received_time']
return report
|
Deserialize a report that has been serialized by calling report.serialize()
Args:
serialized (dict): A serialized report object
|
juraj-google-style
|
def register_piece(self, from_address, to_address, hash, password, min_confirmations=6, sync=False, ownership=True):
(file_hash, file_hash_metadata) = hash
(path, from_address) = from_address
verb = Spoolverb()
unsigned_tx = self.simple_spool_transaction(from_address, [file_hash, file_hash_metadata, to_address], op_return=verb.piece, min_confirmations=min_confirmations)
signed_tx = self._t.sign_transaction(unsigned_tx, password)
txid = self._t.push(signed_tx)
return txid
|
Register a piece
Args:
from_address (Tuple[str]): Federation address. All register transactions
originate from the the Federation wallet
to_address (str): Address registering the edition
hash (Tuple[str]): Hash of the piece. (file_hash, file_hash_metadata)
password (str): Federation wallet password. For signing the transaction
edition_num (int): The number of the edition to register. User
edition_num=0 to register the master edition
min_confirmations (int): Override the number of confirmations when
chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the
function will block until there is at least on confirmation on
the blockchain. Defaults to False
ownership (bool): Check ownsership in the blockchain before pushing the
transaction. Defaults to True
Returns:
str: transaction id
|
codesearchnet
|
def is_broadcast_compatible(shape_x, shape_y):
if shape_x.ndims is None or shape_y.ndims is None:
return False
return _broadcast_shape_helper(shape_x, shape_y) is not None
|
Returns True if `shape_x` and `shape_y` are broadcast compatible.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
True if a shape exists that both `shape_x` and `shape_y` can be broadcasted
to. False otherwise.
|
github-repos
|
def env(cls, separator=None, match=None, whitelist=None, parse_values=None, to_lower=None, convert_underscores=None):
cls.__hierarchy.append(env.Env(separator, match, whitelist, parse_values, to_lower, convert_underscores))
|
Set environment variables as a source.
By default all environment variables available to the process are used.
This can be narrowed by the args.
Args:
separator: Keys are split along this character, the resulting
splits are considered nested values.
match: Regular expression for key matching. Keys matching the
expression are considered whitelisted.
whitelist: Only use environment variables that are listed in this
list.
parse_values: Try to parse all variable for well-known types.
to_lower: Convert all variable names to lower case.
convert_underscores: Convert all underscores in the name to dashes,
this takes place after separation via the separator option.
|
codesearchnet
|
def alloc_buffer(self, length):
buf = Buffer(sum(len(v) for v in six.iterkeys(self.data)) + sum(v.length for v in self.buffers), length)
self.buffers.append(buf)
return buf
|
Allocate a buffer (a range of uninitialized memory).
Arguments:
length(int): The length of the buffer to allocate.
Returns:
~pwnypack.types.Buffer: The object used to address this buffer.
|
juraj-google-style
|
def end_container(self, header_buf):
if (not self.__container_nodes):
raise ValueError('Attempted to end container with none active.')
self.__container_node.add_leaf(_Node(header_buf))
self.__container_node = self.__container_nodes.pop()
parent_container_length = self.__container_lengths.pop()
self.current_container_length = ((parent_container_length + self.current_container_length) + len(header_buf))
|
Add a node containing the container's header to the current subtree.
This node will be added as the leftmost leaf of the subtree that was
started by the matching call to start_container.
Args:
header_buf (bytearray): bytearray containing the container header.
|
codesearchnet
|
def delete_permissions(self, grp_name, resource):
self.service.delete_permissions(
grp_name, resource, self.url_prefix, self.auth, self.session, self.session_send_opts)
|
Removes permissions from the group for the given resource.
Args:
grp_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def received(self, limit=None):
return list(itertools.islice((itertools.filterfalse(lambda x: x[1].sent, self.store)), limit))[::-1]
|
Returns all the events that have been received (excluding sent events), until a limit if defined
Args:
limit (int, optional): the max length of the events to return (Default value = None)
Returns:
list: a list of received events
|
juraj-google-style
|
def flatten_zip_dataset(*args):
flattened = tf.data.Dataset.from_tensors(args[0])
for ex in args[1:]:
flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex))
return flattened
|
A list of examples to a dataset containing mixed examples.
Given a list of `n` dataset examples, flatten them by converting
each element into a dataset and concatenating them to convert into a
single dataset.
Args:
*args: A list containing one example each from `n` different datasets.
Returns:
flattened: A new dataset containing the examples from the list as part
of a single dataset.
|
codesearchnet
|
def set_number_of_atoms(self, n, selected_sites=None):
self.number_of_atoms = n
self.atoms = species.Species(self.lattice.populate_sites(self.number_of_atoms, selected_sites=selected_sites))
|
Set the number of atoms for the simulation, and populate the simulation lattice.
Args:
n (Int): Number of atoms for this simulation.
selected_sites (:obj:(List|Set|String), optional): Selects a subset of site types to be populated with atoms. Defaults to None.
Returns:
None
|
codesearchnet
|
def delete_data(self, url, *args, **kwargs):
res = self._conn.delete(url, headers=self._prepare_headers(**kwargs))
if ((res.status_code == 200) or (res.status_code == 202)):
return True
else:
return False
|
Deletes data under provided url
Returns status as boolean.
Args:
**url**: address of file to be deleted
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Boolean. True if request was successful. False if not.
|
codesearchnet
|
def write_variables(app_configs=None, out_file='', git_short=''):
generated = gogoutils.Generator(*gogoutils.Parser(git_short).parse_url(), formats=APP_FORMATS)
json_configs = {}
for (env, configs) in app_configs.items():
if (env != 'pipeline'):
instance_profile = generated.iam()['profile']
rendered_configs = json.loads(get_template('configs/configs.json.j2', env=env, app=generated.app_name(), profile=instance_profile, formats=generated))
json_configs[env] = dict(DeepChainMap(configs, rendered_configs))
region_list = configs.get('regions', rendered_configs['regions'])
json_configs[env]['regions'] = region_list
for region in region_list:
region_config = json_configs[env][region]
json_configs[env][region] = dict(DeepChainMap(region_config, rendered_configs))
else:
default_pipeline_json = json.loads(get_template('configs/pipeline.json.j2', formats=generated))
json_configs['pipeline'] = dict(DeepChainMap(configs, default_pipeline_json))
LOG.debug('Compiled configs:\n%s', pformat(json_configs))
config_lines = convert_ini(json_configs)
with open(out_file, 'at') as jenkins_vars:
LOG.info('Appending variables to %s.', out_file)
jenkins_vars.write('\n'.join(config_lines))
with open((out_file + '.exports'), 'wt') as export_vars:
LOG.info('Writing sourceable variables to %s.', export_vars.name)
export_vars.write('\n'.join(('export {0}'.format(line) for line in config_lines)))
with open((out_file + '.json'), 'wt') as json_handle:
LOG.info('Writing JSON to %s.', json_handle.name)
LOG.debug('Total JSON dict:\n%s', json_configs)
json.dump(json_configs, json_handle)
return json_configs
|
Append _application.json_ configs to _out_file_, .exports, and .json.
Variables are written in INI style, e.g. UPPER_CASE=value. The .exports file
contains 'export' prepended to each line for easy sourcing. The .json file
is a minified representation of the combined configurations.
Args:
app_configs (dict): Environment configurations from _application.json_
files, e.g. {'dev': {'elb': {'subnet_purpose': 'internal'}}}.
out_file (str): Name of INI file to append to.
git_short (str): Short name of Git repository, e.g. forrest/core.
Returns:
dict: Configuration equivalent to the JSON output.
|
codesearchnet
|
def export_constant(self, module_name: str, name: str) -> None:
module = sys.modules[module_name]
api_constants_attr = API_ATTRS[self._api_name].constants
api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
if not hasattr(module, api_constants_attr):
setattr(module, api_constants_attr, [])
getattr(module, api_constants_attr).append((self._names, name))
if not hasattr(module, api_constants_attr_v1):
setattr(module, api_constants_attr_v1, [])
getattr(module, api_constants_attr_v1).append((self._names_v1, name))
|
Store export information for constants/string literals.
Export information is stored in the module where constants/string literals
are defined.
e.g.
```python
foo = 1
bar = 2
tf_export("consts.foo").export_constant(__name__, 'foo')
tf_export("consts.bar").export_constant(__name__, 'bar')
```
Args:
module_name: (string) Name of the module to store constant at.
name: (string) Current constant name.
|
github-repos
|
def FirstEventTimestamp(self):
if (self._first_event_timestamp is not None):
return self._first_event_timestamp
with self._generator_mutex:
try:
event = next(self._generator.Load())
self._ProcessEvent(event)
return self._first_event_timestamp
except StopIteration:
raise ValueError('No event timestamp could be found')
|
Returns the timestamp in seconds of the first event.
If the first event has been loaded (either by this method or by `Reload`,
this returns immediately. Otherwise, it will load in the first event. Note
that this means that calling `Reload` will cause this to block until
`Reload` has finished.
Returns:
The timestamp in seconds of the first event that was loaded.
Raises:
ValueError: If no events have been loaded and there were no events found
on disk.
|
codesearchnet
|
def transform(self, X):
assert np.shape(X)[0] == len(self._weights), (
'BlendingOptimizer: Number of models to blend its predictions and weights does not match: '
'n_models={}, weights_len={}'.format(np.shape(X)[0], len(self._weights)))
blended_predictions = np.average(np.power(X, self._power),
weights=self._weights,
axis=0) ** (1.0 / self._power)
return {'y_pred': blended_predictions}
|
Performs predictions blending using the trained weights.
Args:
X (array-like): Predictions of different models.
Returns: dict with blended predictions (key is 'y_pred').
|
juraj-google-style
|
def execute_status(args, root_dir=None):
status = command_factory('status')({}, root_dir=root_dir)
if status['status'] == 'running':
status['status'] = Color('{autogreen}' + '{}'.format(status['status']) + '{/autogreen}')
elif status['status'] in ['paused']:
status['status'] = Color('{autoyellow}' + '{}'.format(status['status']) + '{/autoyellow}')
print('Daemon: {}\n'.format(status['status']))
data = status['data']
if isinstance(data, str):
print(data)
elif isinstance(data, dict):
formatted_data = []
formatted_data.append(['Index', 'Status', 'Code',
'Command', 'Path', 'Start', 'End'])
for key, entry in sorted(data.items(), key=operator.itemgetter(0)):
formatted_data.append(
[
'
entry['status'],
'{}'.format(entry['returncode']),
entry['command'],
entry['path'],
entry['start'],
entry['end']
]
)
table = AsciiTable(formatted_data)
table.outer_border = False
table.inner_column_border = False
terminal_width = terminal_size()
customWidth = table.column_widths
if (reduce(lambda a, b: a+b, table.column_widths) + 10) > terminal_width[0]:
left_space = math.floor((terminal_width[0] - customWidth[0] - customWidth[1] - customWidth[2] - customWidth[5] - customWidth[6] - 14)/2)
if customWidth[3] < left_space:
customWidth[4] = 2*left_space - customWidth[3]
elif customWidth[4] < left_space:
customWidth[3] = 2*left_space - customWidth[4]
else:
customWidth[3] = left_space
customWidth[4] = left_space
for i, entry in enumerate(table.table_data):
for j, string in enumerate(entry):
max_width = customWidth[j]
wrapped_string = '\n'.join(wrap(string, max_width))
if j == 1:
if wrapped_string == 'done' or wrapped_string == 'running' or wrapped_string == 'paused':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string in ['queued', 'stashed']:
wrapped_string = Color('{autoyellow}' + '{}'.format(wrapped_string) + '{/autoyellow}')
elif wrapped_string in ['failed', 'stopping', 'killing']:
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
elif j == 2:
if wrapped_string == '0' and wrapped_string != 'Code':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string != '0' and wrapped_string != 'Code':
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
table.table_data[i][j] = wrapped_string
print(table.table)
print('')
|
Print the status of the daemon.
This function displays the current status of the daemon as well
as the whole queue and all available information about every entry
in the queue.
`terminaltables` is used to format and display the queue contents.
`colorclass` is used to color format the various items in the queue.
Args:
root_dir (string): The path to the root directory the daemon is running in.
|
juraj-google-style
|
class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):
def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling3D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)
|
Max pooling layer for 3D inputs (e.g. volumes).
Args:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
|
github-repos
|
def load_with_vocab(fin, vocab, dtype=np.float32):
arr = None
for line in fin:
try:
token, v = _parse_line(line, dtype)
except (ValueError, IndexError):
raise ParseError(b'Parsing error in line: ' + line)
if token in vocab:
if arr is None:
arr = np.empty((len(vocab), len(v)), dtype=dtype)
arr.fill(np.NaN)
elif arr.shape[1] != len(v):
raise ParseError(b'Vector size did not match in line: ' + line)
arr[vocab[token], :] = np.array(v, dtype=dtype).reshape(1, -1)
return arr
|
Load word embedding file with predefined vocabulary
Args:
fin (File): File object to read. File should be open for reading ascii.
vocab (dict): Mapping from words (``bytes``) to vector indices
(``int``).
dtype (numpy.dtype): Element data type to use for the array.
Returns:
numpy.ndarray: Word embedding representation vectors
|
juraj-google-style
|
def of(seconds: DurationTypes) -> 'Duration':
if isinstance(seconds, Timestamp):
raise TypeError('Cannot interpret %s as Duration.' % seconds)
if isinstance(seconds, Duration):
return seconds
return Duration(seconds)
|
Return the Duration for the given number of seconds since Unix epoch.
If the input is already a Duration, the input itself will be returned.
Args:
seconds: Number of seconds as int, float or Duration.
Returns:
Corresponding Duration object.
|
github-repos
|
def _validate_min_version(min_version):
if (min_version is not None):
try:
parsed_min_version = version.StrictVersion(min_version)
except ValueError:
return ExtensionVersionResult(error_reason=ExtensionValidationError.UNPARSEABLE_REQUESTED_VERSION, requested_extension_version=min_version)
if (parsed_min_version > HANDLER_VERSION):
return ExtensionVersionResult(error_reason=ExtensionValidationError.OUTDATED_VERSION, requested_extension_version=str(parsed_min_version))
return ExtensionVersionResult(error_reason=None, requested_extension_version=min_version)
|
Validates the extension version matches the requested version.
Args:
min_version: Minimum version passed as a query param when establishing the
connection.
Returns:
An ExtensionVersionResult indicating validation status. If there is a
problem, the error_reason field will be non-empty.
|
codesearchnet
|
def optimized_trace_matmul(rho, sigma):
return tf.reduce_sum(tf.multiply(tf.cast(rho, tf.complex128), tf.transpose(tf.cast(sigma, tf.complex128))))
|
Returns optimized version of tf.linalg.trace(tf.matmul(rho, sigma)).
Assuming the both have the same shape.
Args:
rho: 2-D `tf.Tensor` of dtype `complex64` representing the left density
matrix in the trace-matmul calculation.
sigma: 2-D `tf.Tensor` of dtype `complex64` representing the right density
matrix in the trace-matmul calculation.
Returns:
A tf.Tensor float64 trace value between the two given density matrices.
|
github-repos
|
def cancel(batch_fn, cancel_fn, ops):
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
(batch_canceled, batch_messages) = _cancel_batch(batch_fn, cancel_fn, ops[first_op:(first_op + max_batch)])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return (canceled_ops, error_messages)
|
Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
|
codesearchnet
|
def hard_shrink(x, threshold=0.5):
return ops.hard_shrink(x, threshold=threshold)
|
Hard Shrink activation function.
It is defined as:
`hard_shrink(x) = x` if `|x| > threshold`,
`hard_shrink(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
|
github-repos
|
def refactor_string(self, data, name):
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
|
Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
|
juraj-google-style
|
def do_not_convert(func=None):
if func is None:
return do_not_convert
def wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
return func(*args, **kwargs)
if inspect.isfunction(func) or inspect.ismethod(func):
wrapper = functools.update_wrapper(wrapper, func)
return autograph_artifact(wrapper)
|
Decorator that suppresses the conversion of a function.
Args:
func: function to decorate.
Returns:
If `func` is not None, returns a `Callable` which is equivalent to
`func`, but is not converted by AutoGraph.
If `func` is None, returns a decorator that, when invoked with a
single `func` argument, returns a `Callable` equivalent to the
above case.
|
github-repos
|
def RebuildHttpConnections(http):
if getattr(http, 'connections', None):
for conn_key in list(http.connections.keys()):
if ':' in conn_key:
del http.connections[conn_key]
|
Rebuilds all http connections in the httplib2.Http instance.
httplib2 overloads the map in http.connections to contain two different
types of values:
{ scheme string: connection class } and
{ scheme + authority string : actual http connection }
Here we remove all of the entries for actual connections so that on the
next request httplib2 will rebuild them from the connection types.
Args:
http: An httplib2.Http instance.
|
juraj-google-style
|
def _to_dict(self, include=None, exclude=None):
if ((include is not None) and (not isinstance(include, (list, tuple, set, frozenset)))):
raise TypeError('include should be a list, tuple or set')
if ((exclude is not None) and (not isinstance(exclude, (list, tuple, set, frozenset)))):
raise TypeError('exclude should be a list, tuple or set')
values = {}
for prop in self._properties.itervalues():
name = prop._code_name
if ((include is not None) and (name not in include)):
continue
if ((exclude is not None) and (name in exclude)):
continue
try:
values[name] = prop._get_for_dict(self)
except UnprojectedPropertyError:
pass
return values
|
Return a dict containing the entity's property values.
Args:
include: Optional set of property names to include, default all.
exclude: Optional set of property names to skip, default none.
A name contained in both include and exclude is excluded.
|
codesearchnet
|
def get_renderer(option: Optional[str]=None) -> Type[PipelineGraphRenderer]:
if option is None:
if os.name == 'nt':
exists = subprocess.call(['where', 'dot.exe']) == 0
else:
exists = subprocess.call(['which', 'dot']) == 0
if exists:
option = 'graph'
else:
option = 'text'
renderer = [r for r in PipelineGraphRenderer.get_all_subclasses() if option == r.option()]
if len(renderer) == 0:
raise ValueError()
elif len(renderer) == 1:
return renderer[0]()
else:
raise ValueError('Found more than one renderer for option: %s', option)
|
Get an instance of PipelineGraphRenderer given rendering option.
Args:
option: (str) the rendering option.
Returns:
(PipelineGraphRenderer)
|
github-repos
|
def plot_predictions_histogram(Y_ph, Y, title=None):
labels = list(set(Y).union(set(Y_ph)))
edges = [x - 0.5 for x in range(min(labels), max(labels) + 2)]
plt.hist([Y_ph, Y], bins=edges, label=["Predicted", "Gold"])
ax = plt.gca()
ax.set_xticks(labels)
plt.xlabel("Label")
plt.ylabel("
plt.legend(loc="upper right")
if isinstance(title, str):
plt.title(title)
plt.show()
|
Plot a histogram comparing int predictions vs true labels by class
Args:
Y_ph: An [n] or [n, 1] np.ndarray of predicted int labels
Y: An [n] or [n, 1] np.ndarray of gold labels
|
juraj-google-style
|
def validate(self):
missing = self.missing_property_names()
if (len(missing) > 0):
raise validators.ValidationError("'{0}' are required attributes for {1}".format(missing, self.__class__.__name__))
for (prop, val) in six.iteritems(self._properties):
if (val is None):
continue
if isinstance(val, ProtocolBase):
val.validate()
elif (getattr(val, 'isLiteralClass', None) is True):
val.validate()
elif isinstance(val, list):
for subval in val:
subval.validate()
else:
setattr(self, prop, val)
return True
|
Applies all defined validation to the current
state of the object, and raises an error if
they are not all met.
Raises:
ValidationError: if validations do not pass
|
codesearchnet
|
def get_concept(self, conceptId, lang='en'):
url = urljoin(self.concept_service + '/', conceptId)
res, status_code = self.get(url, params={'lang': lang})
if status_code != 200:
logger.debug('Fetch concept failed.')
return self.decode(res), status_code
|
Fetch the concept from the Knowledge base
Args:
id (str): The concept id to be fetched, it can be Wikipedia
page id or Wikiedata id.
Returns:
dict, int: A dict containing the concept information; an integer
representing the response code.
|
juraj-google-style
|
def register_hooked(self,
hooks,
func,
args_gen=None
):
if self.hooked is None:
self.hooked = {}
if args_gen is None:
args_gen = getattr(func, "call_types", {}).keys
if not isinstance(hooks, Sequence):
hooks = [hooks]
for hook_cls in hooks:
self.hooked[hook_cls] = (func, args_gen)
|
Register func to be run when any of the hooks are run by parent
Args:
hooks: A Hook class or list of Hook classes of interest
func: The callable that should be run on that Hook
args_gen: Optionally specify the argument names that should be
passed to func. If not given then use func.call_types.keys
|
juraj-google-style
|
def register_subcommand(parser: ArgumentParser):
train_parser = parser.add_parser('convert', help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.')
train_parser.add_argument('--model_type', type=str, required=True, help="Model's type.")
train_parser.add_argument('--tf_checkpoint', type=str, required=True, help='TensorFlow checkpoint path or folder.')
train_parser.add_argument('--pytorch_dump_output', type=str, required=True, help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config', type=str, default='', help='Configuration file path or folder.')
train_parser.add_argument('--finetuning_task_name', type=str, default=None, help='Optional fine-tuning task name if the TF model was a finetuned model.')
train_parser.set_defaults(func=convert_command_factory)
|
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
|
github-repos
|
def _create_array(self, arr: np.ndarray) -> int:
if not isinstance(arr, np.ndarray):
raise ValueError('Array is not a numpy ndarray.')
try:
c_arr = np.ctypeslib.as_ctypes(arr)
except (KeyError, NotImplementedError):
raise ValueError(
'Array has unsupported dtype {}.'.format(arr.dtype))
raw_arr = RawArray(c_arr._type_, c_arr)
with self._lock:
if self._count >= len(self._arrays):
self._arrays += len(self._arrays) * [None]
self._get_next_free()
self._arrays[self._current] = (raw_arr, arr.shape)
self._count += 1
return self._current
|
Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
|
juraj-google-style
|
def evaluate_bound(distribution, x_data, parameters=None, cache=None):
assert (len(x_data) == len(distribution))
assert (len(x_data.shape) == 2)
cache = (cache if (cache is not None) else {})
parameters = load_parameters(distribution, '_bnd', parameters=parameters, cache=cache)
out = numpy.zeros(((2,) + x_data.shape))
(lower, upper) = distribution._bnd(x_data.copy(), **parameters)
out.T[(:, :, 0)] = numpy.asfarray(lower).T
out.T[(:, :, 1)] = numpy.asfarray(upper).T
cache[distribution] = out
return out
|
Evaluate lower and upper bounds.
Args:
distribution (Dist):
Distribution to evaluate.
x_data (numpy.ndarray):
Locations for where evaluate bounds at. Relevant in the case of
multivariate distributions where the bounds are affected by the
output of other distributions.
parameters (:py:data:typing.Any):
Collection of parameters to override the default ones in the
distribution.
cache (:py:data:typing.Any):
A collection of previous calculations in case the same distribution
turns up on more than one occasion.
Returns:
The lower and upper bounds of ``distribution`` at location
``x_data`` using parameters ``parameters``.
|
codesearchnet
|
def _GetTimeValues(self, number_of_seconds):
number_of_seconds = int(number_of_seconds)
(number_of_minutes, seconds) = divmod(number_of_seconds, 60)
(number_of_hours, minutes) = divmod(number_of_minutes, 60)
(number_of_days, hours) = divmod(number_of_hours, 24)
return (number_of_days, hours, minutes, seconds)
|
Determines time values.
Args:
number_of_seconds (int|decimal.Decimal): number of seconds.
Returns:
tuple[int, int, int, int]: days, hours, minutes, seconds.
|
codesearchnet
|
def ReadSerialized(cls, json_string):
if json_string:
json_dict = json.loads(json_string)
return cls.ReadSerializedDict(json_dict)
return None
|
Reads an attribute container from serialized form.
Args:
json_string (str): JSON serialized attribute container.
Returns:
AttributeContainer: attribute container or None.
|
codesearchnet
|
def from_file(filename, use_cores=True, thresh=0.0001):
with zopen(filename, 'rt') as f:
return Xr.from_string(f.read(), use_cores=use_cores, thresh=thresh)
|
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.