content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def mosaic_cut(image, original_width, original_height, width, height, center,
ptop, pleft, pbottom, pright, shiftx, shifty):
"""Generates a random center location to use for the mosaic operation.
Given a center location, cuts the input image into a slice that will be
concatenated with other slices with the same center in order to construct
a final mosaicked image.
Args:
image: `Tensor` of shape [None, None, 3] that needs to be altered.
original_width: `float` value indicating the original width of the image.
original_height: `float` value indicating the original height of the image.
width: `float` value indicating the final width of the image.
height: `float` value indicating the final height of the image.
center: `float` value indicating the desired center of the final patched
image.
ptop: `float` value indicating the top of the image without padding.
pleft: `float` value indicating the left of the image without padding.
pbottom: `float` value indicating the bottom of the image without padding.
pright: `float` value indicating the right of the image without padding.
shiftx: `float` 0.0 or 1.0 value indicating if the image is on the left or
right.
shifty: `float` 0.0 or 1.0 value indicating if the image is at the top or
bottom.
Returns:
image: The cropped image in the same datatype as the input image.
crop_info: `float` tensor that is applied to the boxes in order to select
the boxes still contained within the image.
"""
def cast(values, dtype):
return [tf.cast(value, dtype) for value in values]
with tf.name_scope('mosaic_cut'):
center = tf.cast(center, width.dtype)
zero = tf.cast(0.0, width.dtype)
cut_x, cut_y = center[1], center[0]
# Select the crop of the image to use
left_shift = tf.minimum(
tf.minimum(cut_x, tf.maximum(zero, -pleft * width / original_width)),
width - cut_x)
top_shift = tf.minimum(
tf.minimum(cut_y, tf.maximum(zero, -ptop * height / original_height)),
height - cut_y)
right_shift = tf.minimum(
tf.minimum(width - cut_x,
tf.maximum(zero, -pright * width / original_width)), cut_x)
bot_shift = tf.minimum(
tf.minimum(height - cut_y,
tf.maximum(zero, -pbottom * height / original_height)),
cut_y)
(left_shift, top_shift, right_shift, bot_shift,
zero) = cast([left_shift, top_shift, right_shift, bot_shift, zero],
tf.float32)
# Build a crop offset and a crop size tensor to use for slicing.
crop_offset = [zero, zero, zero]
crop_size = [zero - 1, zero - 1, zero - 1]
if shiftx == 0.0 and shifty == 0.0:
crop_offset = [top_shift, left_shift, zero]
crop_size = [cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 0.0:
crop_offset = [top_shift, cut_x - right_shift, zero]
crop_size = [cut_y, width - cut_x, zero - 1]
elif shiftx == 0.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, left_shift, zero]
crop_size = [height - cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, cut_x - right_shift, zero]
crop_size = [height - cut_y, width - cut_x, zero - 1]
# Contain and crop the image.
ishape = tf.cast(tf.shape(image)[:2], crop_size[0].dtype)
crop_size[0] = tf.minimum(crop_size[0], ishape[0])
crop_size[1] = tf.minimum(crop_size[1], ishape[1])
crop_offset = tf.cast(crop_offset, tf.int32)
crop_size = tf.cast(crop_size, tf.int32)
image = tf.slice(image, crop_offset, crop_size)
crop_info = tf.stack([
tf.cast(ishape, tf.float32),
tf.cast(tf.shape(image)[:2], dtype=tf.float32),
tf.ones_like(ishape, dtype=tf.float32),
tf.cast(crop_offset[:2], tf.float32)
])
return image, crop_info
| 5,900 |
def get_children_info(category_id: str) -> list[dict]:
"""Get information about children categories of the current category.
:param: category_id: category id.
:return: info about children categories.
"""
# Create the URL
url = f'{POINT}/resources/v2/title/domains/{DOMAIN}/' \
f'categories/{category_id}/children'
# Request
response = requests.get(url, params=REQUEST_PARAMS,
headers=REQUEST_HEADERS)
# If error
if not response:
# Raise exception to retry request by decorator
raise RequestException()
# Extract data
children_data = response.json().get('data')
if children_data:
return children_data['categories']
return []
| 5,901 |
def GetTypedValue(field_type, value):
"""Returns a typed value based on a schema description and string value.
BigQuery's Query() method returns a JSON string that has all values stored
as strings, though the schema contains the necessary type information. This
method provides conversion services to make it easy to persist the data in
your JSON as "typed" data.
Args:
field_type: The field type (as defined by BigQuery).
value: The field value, typed as a string.
Returns:
A value of the appropriate type.
Raises:
NotSupportedError: Raised if the field type is not supported.
"""
if value is None:
return None
if field_type == FieldTypes.STRING:
return value
if field_type == FieldTypes.INTEGER:
if value == 'NaN':
return None
else:
return int(value)
if field_type == FieldTypes.FLOAT:
if value == 'NaN':
return None
else:
return float(value)
if field_type == FieldTypes.TIMESTAMP:
if value == 'NaN':
return None
else:
dt = datetime.datetime.utcfromtimestamp(float(value))
return dt.isoformat(' ')
if field_type == FieldTypes.BOOLEAN:
return value.lower() == 'true'
else:
raise NotSupportedError(
'Type {field_type} is not supported.'.format(field_type=field_type))
| 5,902 |
def migrate_users(session):
"""
select us.id, us.user_id, us.school_id
from user_school us
inner join `user` u on u.user_id = us.user_id and u.active
inner join school s on s.school_id = us.school_id and s.hidden_pd_school = 0;
"""
for user in _get_file_data('user.csv'):
typer.echo(f'adding {user["id"]}')
user_obj = general_schema.User(
id=user['id'],
email=user['email'] if user['email'] != '' else None,
username=user['username'] if user['username'] != '' else None,
password=user['password'] if user['password'] != '' else None,
first_name=user['first_name'],
last_name=user['last_name']
)
session.add(user_obj)
role = user['privilege']
if role == 'creator':
user_obj.role_refs = [general_schema.UserRole(role_id=1)]
elif role == 'student':
user_obj.role_refs = [general_schema.UserRole(role_id=3)]
elif role == 'teacher':
user_obj.role_refs = [general_schema.UserRole(role_id=2)]
elif role == 'admin':
user_obj.role_refs = [general_schema.UserRole(role_id=4)]
| 5,903 |
def predict_from_file(audio_file,
hop_length=None,
fmin=50.,
fmax=MAX_FMAX,
model='full',
decoder=torchcrepe.decode.viterbi,
return_harmonicity=False,
return_periodicity=False,
batch_size=None,
device='cpu',
pad=True):
"""Performs pitch estimation from file on disk
Arguments
audio_file (string)
The file to perform pitch tracking on
hop_length (int)
The hop_length in samples
fmin (float)
The minimum allowable frequency in Hz
fmax (float)
The maximum allowable frequency in Hz
model (string)
The model capacity. One of 'full' or 'tiny'.
decoder (function)
The decoder to use. See decode.py for decoders.
return_harmonicity (bool) [DEPRECATED]
Whether to also return the network confidence
return_periodicity (bool)
Whether to also return the network confidence
batch_size (int)
The number of frames per batch
device (string)
The device used to run inference
pad (bool)
Whether to zero-pad the audio
Returns
pitch (torch.tensor [shape=(1, 1 + int(time // hop_length))])
(Optional) periodicity (torch.tensor
[shape=(1, 1 + int(time // hop_length))])
"""
# Load audio
audio, sample_rate = torchcrepe.load.audio(audio_file)
# Predict
return predict(audio,
sample_rate,
hop_length,
fmin,
fmax,
model,
decoder,
return_harmonicity,
return_periodicity,
batch_size,
device,
pad)
| 5,904 |
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('cabotage/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
| 5,905 |
def nm_to_uh(s):
"""Get the userhost part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[1]
| 5,906 |
def write(app, cnx):
"""
Given an app and a SQL connection, write the app features
into the feature table.
:param app: The Android Application object with the data
:param cnx: SQL Connection
:return:
"""
cursor = cnx.cursor()
results = app.features
table_name = 'features'
split = app.name.split("-")
if len(split) != 3:
exit()
logging.debug("getting foregin id")
foreign_key_id = get_version_id(split[0], split[1], split[2], cnx)
add_feature_query = ("INSERT INTO version_features "
"(app_version_id, internet, account_manager, uses_ssl, sharing_sending, translation) "
"VALUES (%s, %s, %s, %s, %s, %s)")
feature_data = (
foreign_key_id,
results['Internet'],
results['Account Manager'],
results['Use SSL'],
results['Sharing-Sending'],
results['Internationalization']
)
try:
cursor.execute(add_feature_query, feature_data)
except IntegrityError as e:
logging.warning("It seems we already analyzed this app." + str(e))
# commit & actually save
cnx.commit()
| 5,907 |
def upload_example_done(request, object_id):
"""
This view is a callback that receives POST data
from uploadify when the download is complete.
See also /media/js/uploadify_event_handlers.js.
"""
example = get_object_or_404(Example, id=object_id)
#
# Grab the post data sent by our OnComplete handler and parse it. Set the fields
# on our example object as appropriate and save.
#
if request.method == 'POST':
post_response = request.POST['s3_response']
location_rexp = '<Location>(.*)</Location>'
example.file_url = unquote_plus(re.search(location_rexp, post_response).group(1))
example.file_name = request.POST['file_obj[name]']
example.file_size = request.POST['file_obj[size]']
example.file_upload_speed = request.POST['upload_data[speed]']
example.file_uploaded = datetime.now()
example.save()
print example.file_url
print example.file_name
print example.file_uploaded
return HttpResponse((reverse('examples_example_detail', args=[example.id])))
| 5,908 |
def epoch_in_milliseconds(epoch):
"""
>>> epoch_in_milliseconds(datetime_from_seconds(-12345678999.0001))
-12345679000000
"""
return epoch_in_seconds(epoch) * 1000
| 5,909 |
def ForestSorter(basename, isortorder = 'random', ibackup = True,
icompress = False):
"""
Sorts a forest file and remaps halo IDs.
The sort fields (or sort keys) we ordered such that the first key will peform the
outer-most sort and the last key will perform the inner-most sort.
Parameters
----------
basename : String
Base file name of the forest file.
Open HDF5 file of the forest, reading meta information and
assumed the HDF5_File to have the following data structure
HDF5_file -> Snapshot_Keys -> Halo properties.
The file is updated with new IDs and stores old IDs as IDs_old
plus saves meta information mapping IDs
ibackup : bool
Whether to back up the file before updating it.
Returns
----------
void:
----------
sort_fields = ["ForestID", "Mass_200mean"]
ForestID = [1, 4, 39, 1, 1, 4]
Mass_200mean = [4e9, 10e10, 8e8, 7e9, 3e11, 5e6]
Then the indices would be [0, 3, 4, 5, 1, 2]
"""
# data fields that will need values updated as ids will be mapped.
# as some fields are aliased, don't update them
temporalkeys = [
#'RootHead',
#'Head',
#'RootTail',
#'Tail',
'FinalDescendant',
'Descendant',
'FirstProgenitor',
'Progenitor',
#'LeftTail',
#'RightTail',
'PreviousProgenitor',
'NextProgenitor',
]
subhalokeys = [
'hostHaloID',
'NextSubhalo',
'PreviousSubhalo',
]
sortorderkeys = ['ids', 'sizes', 'random']
if (isortorder not in sortorderkeys):
print('Error: ',isortorder, 'not valid. Sort order can be ',sortorderkeys, flush=True)
print('Exiting without sorting', flush=True)
# fields used to determine ordering of halos in file
sort_fields = ['ForestID', 'hostHaloID', 'npart']
#open old files to get necessary information
fname = basename+'.foreststats.hdf5'
hdffile = h5py.File(fname, 'r')
forestids = np.array(hdffile['ForestInfo']['ForestIDs'])
forestsizes = np.array(hdffile['ForestInfo']['ForestSizes'])
if (isortorder == 'ids'):
forestordering = np.argsort(forestsizes)
elif (isortorder == 'sizes'):
forestordering = np.argsort(forestsizes)
elif (isortorder == 'random'):
forestordering = np.random.choice(np.argsort(forestsizes),
forestids.size, replace=False)
numsnaps = np.int64(hdffile['Header'].attrs["NSnaps"])
nfiles = np.int64(hdffile['Header'].attrs["NFiles"])
hdffile.close()
fname = basename+'.hdf5.%d'%0
hdffile = h5py.File(fname, 'r')
TEMPORALHALOIDVAL = np.int64(hdffile['Header/TreeBuilder'].attrs['Temporal_halo_id_value'])
snapkey = "Snap_%03d" % (numsnaps-1)
allpropkeys = list(hdffile[snapkey].keys())
idkeylist = []
propkeys = []
aliasedkeys = []
for propkey in allpropkeys:
if (hdffile[snapkey][propkey].id not in idkeylist):
idkeylist.append(hdffile[snapkey][propkey].id)
propkeys.append(propkey)
else:
aliasedkeys.append(propkey)
hdffile.close()
# back up files if necessary
if (ibackup):
print('Backing up original data', flush=True)
fname = basename+'.foreststats.hdf5'
newfname = fname+'.backup'
subprocess.call(['cp', fname, newfname])
for ifile in range(nfiles):
fname = basename+'.hdf5.%d'%ifile
newfname = fname+'.backup'
subprocess.call(['cp', fname, newfname])
# reorder file containing meta information
print('Reordering forest stats data ...', flush=True)
time1 = time.process_time()
fname = basename+'.foreststats.hdf5'
hdffile = h5py.File(fname, 'r+')
forestgrp = hdffile['ForestInfo']
data = forestgrp['ForestIDs']
forestids = forestids[forestordering]
data[:] = forestids
data = forestgrp['ForestSizes']
data[:] = forestsizes[forestordering]
snapskeys = list(forestgrp['Snaps'].keys())
for snapkey in snapskeys:
snapgrp = forestgrp['Snaps'][snapkey]
numhalos = np.array(snapgrp['NumHalosInForest'])[forestordering]
numfofs = np.array(snapgrp['NumFOFGroupsInForest'])[forestordering]
data = snapgrp['NumHalosInForest']
data[:] = numhalos
data = snapgrp['NumFOFGroupsInForest']
data[:] = numfofs
hdffile.close()
print('Done', time.process_time()-time1, flush=True)
for ifile in range(nfiles):
fname = basename+'.hdf5.%d'%ifile
hdffile = h5py.File(fname, 'a')
print('First pass building id map for file', fname, flush=True)
#first pass to resort arrays
#store the ids and the newids to map stuff
alloldids = np.array([], dtype=np.int64)
allnewids = np.array([], dtype=np.int64)
time1 = time.process_time()
for i in range(numsnaps):
snapkey = "Snap_%03d" % i
numhalos = np.int64(hdffile[snapkey].attrs['NHalos'])
if (numhalos == 0): continue
ids = np.array(hdffile[snapkey]['ID'], dtype=np.int64)
sort_data = np.zeros([len(sort_fields),ids.size], dtype=np.int64)
sort_data[0] = -np.array(hdffile[snapkey]['npart'], dtype=np.int64)
sort_data[1] = np.array(hdffile[snapkey]['hostHaloID'], dtype=np.int64)
activeforestids = np.array(hdffile[snapkey]['ForestID'], dtype=np.int64)
xy, x_ind, y_ind = np.intersect1d(activeforestids, forestids, return_indices=True)
unique, inverse = np.unique(activeforestids, return_inverse=True)
sort_data[2] = y_ind[inverse]
indices = np.array(np.lexsort(sort_data))
newids = i*TEMPORALHALOIDVAL+np.arange(numhalos, dtype=np.int64)+1
alloldids = np.concatenate([alloldids,np.array(ids[indices], dtype=np.int64)])
allnewids = np.concatenate([allnewids,newids])
for propkey in propkeys:
if (propkey == 'NHalosPerForestInSnap'): continue
if (propkey == 'ID'): continue
if (propkey in aliasedkeys): continue
newdata = np.array(hdffile[snapkey][propkey])[indices]
data = hdffile[snapkey][propkey]
data[:] = newdata
HDF5WriteDataset(hdffile[snapkey], 'ID_old', ids[indices], icompress)
# hdffile[snapkey].create_dataset('ID_old',
# data=ids[indices], dtype=np.int64, compression='gzip', compression_opts=6)
data = hdffile[snapkey]['ID']
data[:] = newids
#now go over temporal and subhalo fields and update as necessary
print('Finished pass and now have map of new ids to old ids', time.process_time()-time1, flush=True)
time1 = time.process_time()
for i in range(numsnaps):
snapkey = "Snap_%03d" % i
numhalos = np.int32(hdffile[snapkey].attrs['NHalos'])
if (numhalos == 0): continue
print('Processing',snapkey, flush=True)
time2 = time.process_time()
for propkey in temporalkeys:
olddata = np.array(hdffile[snapkey][propkey])
olddata_unique, olddata_unique_inverse = np.unique(olddata, return_inverse = True)
xy, x_ind, y_ind = np.intersect1d(alloldids, olddata_unique, return_indices=True)
newdata = allnewids[x_ind[olddata_unique_inverse]]
data = hdffile[snapkey][propkey]
data[:] = newdata
for propkey in subhalokeys:
olddata = np.array(hdffile[snapkey][propkey])
if (propkey == 'hostHaloID'):
newdata = -np.ones(numhalos, dtype=np.int64)
wdata = np.where(olddata !=-1)[0]
if (wdata.size >0):
olddata_unique, olddata_unique_inverse = np.unique(olddata[wdata], return_inverse = True)
xy, x_ind, y_ind = np.intersect1d(alloldids, olddata_unique, return_indices=True)
newdata[wdata] = allnewids[x_ind[olddata_unique_inverse]]
else:
olddata = np.array(hdffile[snapkey][propkey])
olddata_unique, olddata_unique_inverse = np.unique(olddata, return_inverse = True)
xy, x_ind, y_ind = np.intersect1d(alloldids, olddata_unique, return_indices=True)
newdata = allnewids[x_ind[olddata_unique_inverse]]
data = hdffile[snapkey][propkey]
data[:] = newdata
print('Done', snapkey, 'containing', numhalos, 'in', time.process_time()-time2, flush=True)
#now update the forest info in the file
forestgrp = hdffile['ForestInfoInFile']
data = forestgrp['ForestIDsInFile']
activeforestids = np.array(data)
xy, x_ind, y_ind = np.intersect1d(activeforestids, forestids, return_indices=True)
ordering = x_ind[np.argsort(y_ind)]
data[:] = activeforestids[ordering]
data = forestgrp['ForestSizesInFile']
data[:] = np.array(data)[ordering]
for i in range(numsnaps):
snapkey = "Snap_%03d" % i
data = hdffile[snapkey]['NHalosPerForestInSnap']
newdata = np.array(data)[ordering]
data[:] = newdata
# for i in range(numsnaps):
# snapkey = "Snap_%03d" % i
# snapgrp = forestgrp[snapkey]
# numhalos = np.array(snapgrp['NumHalosInForest'])[ordering]
# numfofs = np.array(snapgrp['NumFOFGroupsInForest'])[ordering]
# data = snapgrp['NumHalosInForest']
# data[:] = numhalos
# data = snapgrp['NumFOFGroupsInForest']
# data[:] = numfofs
hdffile.create_group('ID_mapping')
HDF5WriteDataset(hdffile['ID_mapping'], 'IDs_old', alloldids, icompress)
HDF5WriteDataset(hdffile['ID_mapping'], 'IDs_new', allnewids, icompress)
# hdffile['ID_mapping'].create_dataset('IDs_old', data = alloldids)
# hdffile['ID_mapping'].create_dataset('IDs_new', data = allnewids)
print('Finished updating data ', time.process_time()-time1, flush=True)
hdffile.close()
| 5,910 |
def timeout(time: int = 30):
"""
Raise a timeout if a function does not return in time `time`.
Use as a context manager, so that the signal class can reset it's alarm for
`SIGALARM`
:param int time:
Time in seconds to wait for timeout. Default is 30 seconds.
"""
assert time >= 0, 'Time specified in timeout must be nonnegative.'
def _handler(signum, frame):
raise TimeoutError
signal.signal(signal.SIGALRM, _handler)
signal.alarm(time)
try:
yield
except TimeoutError as e:
raise e
finally:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
| 5,911 |
def get_workspaces(clue, workspaces):
"""
Imports all workspaces if none were provided.
Returns list of workspace names
"""
if workspaces is None:
logger.info("no workspaces specified, importing all toggl workspaces...")
workspaces = clue.get_toggl_workspaces()
logger.info("The following workspaces will be imported: %s", str(workspaces))
return workspaces
| 5,912 |
def create_hparams(hparams_string=None, hparams_json=None, verbose=True):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
training_stage='train_style_extractor',#['train_text_encoder','train_style_extractor','train_style_attention','train_refine_layernorm']
full_refine=False,
################################
# Experiment Parameters #
################################
epochs=500,
iters=1000000,
iters_per_checkpoint=5000,
log_per_checkpoint=1,
seed=1234,
dynamic_loss_scaling=True,
fp16_run=False,
distributed_run=False,
dist_backend="nccl",
dist_url="tcp://localhost:54321",
cudnn_enabled=True,
cudnn_benchmark=False,
numberworkers=8,
ignore_layers=['embedding.weight'],
################################
# Data Parameters #
################################
load_mel=True,
training_files='../../../spk_ttsdatafull_libri500_unpacked/training_with_mel_frame.txt',
mel_dir='../../../spk_ttsdatafull_libri500_unpacked/',
text_cleaners=['english_cleaners'],
is_partial_refine=False,
is_refine_style=False,
use_GAN=False,
GAN_type='wgan-gp',#['lsgan', 'wgan-gp']
GAN_alpha=1.0,
GP_beata=10.0,
Generator_pretrain_step=1,
add_noise=False,
################################
# Audio Parameters #
################################
max_wav_value=32768.0,
num_mels=80,
num_freq=1025,
min_mel_freq=0,
max_mel_freq=8000,
sample_rate=16000,
frame_length_ms=50,
frame_shift_ms=12.5,
preemphasize=0.97,
min_level_db=-100,
ref_level_db=0, # suggest use 20 for griffin-lim and 0 for wavenet
max_abs_value=4,
symmetric_specs=True, # if true, suggest use 4 as max_abs_value
# Eval:
griffin_lim_iters=60,
power=1.5, # Power to raise magnitudes to prior to Griffin-Lim
threshold=0.5, # for stop token
minlenratio=0.0, # Minimum length ratio in inference.
maxlenratio=50.0, # Maximum length ratio in inference.
use_phone=True,
phone_set_file="../../../spk_ttsdatafull_libri500_unpacked/phone_set.json",
n_symbols=5000, # len(symbols),
embed_dim=512, # Dimension of character embedding.
pretrained_model=None,
# VQVAE
use_vqvae=False,
aux_encoder_kernel_size=3,
aux_encoder_n_convolutions=2,
aux_encoder_embedding_dim=512,
speaker_embedding_dim=256,
commit_loss_weight=1.0, # Contribution of commitment loss, between 0.1 and 2.0 (default: 1.0)
eprenet_conv_layers=3, # Number of encoder prenet convolution layers.
eprenet_conv_chans=512, # Number of encoder prenet convolution channels.
eprenet_conv_filts=5, # Filter size of encoder prenet convolution.
dprenet_layers=2, # Number of decoder prenet layers.
dprenet_units=256, # Number of decoder prenet hidden units.
positionwise_layer_type="linear", # FFN or conv or (conv+ffn) in encoder after self-attention
positionwise_conv_kernel_size=1, # Filter size of conv
elayers=6, # Number of encoder layers.
eunits=1536, # Number of encoder hidden units.
adim=384, # Number of attention transformation dimensions.
aheads=4, # Number of heads for multi head attention.
dlayers=6, # Number of decoder layers.
dunits=1536, # Number of decoder hidden units.
duration_predictor_layers=2,
duration_predictor_chans=384,
duration_predictor_kernel_size=3,
use_gaussian_upsampling=False,
postnet_layers=5, # Number of postnet layers.
postnet_chans=512, # Number of postnet channels.
postnet_filts=5, # Filter size of postnet.
use_scaled_pos_enc=True, # Whether to use trainable scaled positional encoding.
use_batch_norm=True, # Whether to use batch normalization in posnet.
encoder_normalize_before=True, # Whether to perform layer normalization before encoder block.
decoder_normalize_before=True, # Whether to perform layer normalization before decoder block.
encoder_concat_after=False, # Whether to concatenate attention layer's input and output in encoder.
decoder_concat_after=False, # Whether to concatenate attention layer's input and output in decoder.
reduction_factor=1, # Reduction factor.
is_multi_speakers=True,
is_spk_layer_norm=True,
pretrained_spkemb_dim=512,
n_speakers=8000,
spk_embed_dim=128, # Number of speaker embedding dimenstions.
spk_embed_integration_type="concat", # concat or add, How to integrate speaker embedding.
use_ssim_loss=True,
use_f0=False,
log_f0=False,
f0_joint_train=False,
f0_alpha=0.1,
stop_gradient_from_pitch_predictor=False,
pitch_predictor_layers=2,
pitch_predictor_chans=384,
pitch_predictor_kernel_size=3,
pitch_predictor_dropout=0.5,
pitch_embed_kernel_size=9,
pitch_embed_dropout=0.5,
is_multi_styles=False,
n_styles=6,
style_embed_dim=128, # Number of style embedding dimenstions.
style_embed_integration_type="concat", # concat or add, How to integrate style embedding.
style_vector_type='mha',#gru or mha, How to generate style vector.
style_query_level='sentence',#phone or sentence
# value: pytorch, xavier_uniform, xavier_normal, kaiming_uniform, kaiming_normal
transformer_init="pytorch", # How to initialize transformer parameters.
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
transformer_enc_dropout_rate=0.1, # Dropout rate in encoder except attention & positional encoding.
transformer_enc_positional_dropout_rate=0.1, # Dropout rate after encoder positional encoding.
transformer_enc_attn_dropout_rate=0.1, # Dropout rate in encoder self-attention module.
transformer_dec_dropout_rate=0.1, # Dropout rate in decoder except attention & positional encoding.
transformer_dec_positional_dropout_rate=0.1, # Dropout rate after decoder positional encoding.
transformer_dec_attn_dropout_rate=0.1, # Dropout rate in deocoder self-attention module.
transformer_enc_dec_attn_dropout_rate=0.1, # Dropout rate in encoder-deocoder attention module.
duration_predictor_dropout_rate=0.1,
eprenet_dropout_rate=0.5, # Dropout rate in encoder prenet.
dprenet_dropout_rate=0.5, # Dropout rate in decoder prenet.
postnet_dropout_rate=0.5, # Dropout rate in postnet.
use_masking=True, # Whether to apply masking for padded part in loss calculation.
use_weighted_masking=False, # Whether to apply weighted masking in loss calculation.
bce_pos_weight=1.0, # Positive sample weight in bce calculation (only for use_masking=true).
loss_type="L2", # L1, L2, L1+L2, How to calculate loss.
# Reference:
# Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention
# https://arxiv.org/abs/1710.08969
use_gst=False,
use_mutual_information=False,
mutual_information_lambda=0.1,
mi_loss_type='unbias',#['bias','unbias']
style_extractor_presteps=300000,
choosestl_steps=100000,
gst_train_att=False,
att_name='100k_noshuffle_gru',
shuffle=False,
gst_reference_encoder='multiheadattention',#'multiheadattention' or 'convs'
gst_reference_encoder_mha_layers=4,
gst_tokens=10,
gst_heads=4,
gst_conv_layers=6,
gst_conv_chans_list=(32, 32, 64, 64, 128, 128),
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=128,
step_use_predicted_dur=20000,
################################
# Optimization Hyperparameters #
################################
learning_rate_decay_scheme='noam',
use_saved_learning_rate=True,
warmup_steps=10000, # Optimizer warmup steps.
decay_steps=12500, # halves the learning rate every 12.5k steps
decay_rate=0.5, # learning rate decay rate
# decay_end=300000,
# decay_rate=0.01,
initial_learning_rate=0.5, # Initial value of learning rate.
final_learning_rate=1e-5,
weight_decay=1e-6,
grad_clip_thresh=1.0,
batch_criterion='utterance',
batch_size=2,
mask_padding=True # set model's padded outputs to padded values
)
if hparams_json:
print('Parsing hparams in json # {}'.format(hparams_json))
with open(hparams_json) as json_file:
hparams.parse_json(json_file.read())
if hparams_string:
print('Parsing command line hparams # {}'.format(hparams_string))
hparams.parse(hparams_string)
# if hparams.use_phone:
# from text.phones import Phones
# phone_class = Phones(hparams.phone_set_file)
# hparams.n_symbols = len(phone_class._symbol_to_id)
# del phone_class
if verbose:
print('Final parsed hparams:')
pprint(hparams.values())
return hparams
| 5,913 |
def set_log_extras(record):
"""set_log_extras [summary].
[extended_summary]
Args:
record ([type]): [description]
"""
record["extra"]["datetime"] = datetime.now(
timezone.utc
) # Log datetime in UTC time zone, even if server is using another timezone
record["extra"]["host"] = os.getenv(
"HOSTNAME", os.getenv("COMPUTERNAME", platform.node())
).split(".")[0]
record["extra"]["pid"] = os.getpid()
record["extra"]["request_id"] = correlation_id.get()
record["extra"]["app_name"] = settings.PROJECT_NAME
| 5,914 |
def ChromiumFetchSync(name, work_dir, git_repo, checkout='origin/master'):
"""Some Chromium projects want to use gclient for clone and dependencies."""
if os.path.isdir(work_dir):
print '%s directory already exists' % name
else:
# Create Chromium repositories one deeper, separating .gclient files.
parent = os.path.split(work_dir)[0]
Mkdir(parent)
proc.check_call(['gclient', 'config', git_repo], cwd=parent)
proc.check_call(['git', 'clone', git_repo], cwd=parent)
proc.check_call(['git', 'fetch'], cwd=work_dir)
proc.check_call(['git', 'checkout', checkout], cwd=work_dir)
proc.check_call(['gclient', 'sync'], cwd=work_dir)
return (name, work_dir)
| 5,915 |
def use_proxy_buffer(snippets_stack, vstate):
"""
Forward all changes made in the buffer to the current snippet stack while
function call.
"""
buffer_proxy = VimBufferProxy(snippets_stack, vstate)
old_buffer = vim_helper.buf
try:
vim_helper.buf = buffer_proxy
yield
finally:
vim_helper.buf = old_buffer
buffer_proxy.validate_buffer()
| 5,916 |
def AddAlphaAddonsFlags(parser):
"""Adds the --addons flag to the parser for the alpha track."""
AddAddonsFlagsWithOptions(parser, api_adapter.ALPHA_ADDONS_OPTIONS)
| 5,917 |
def f(path, n):
"""Hierarchical Clustering
"""
p_d.prepare_data(path, n)
data = pd.DataFrame.from_dict(p_d.globaldict)
X = data.iloc[[0, 1], :].values
X = X.transpose()
print(X.shape)
X = X[0:1000]
print(X)
hiclust(X)
| 5,918 |
def kld_error(res, error='simulate', rstate=None, return_new=False,
approx=False):
"""
Computes the `Kullback-Leibler (KL) divergence
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`_ *from* the
discrete probability distribution defined by `res` *to* the discrete
probability distribution defined by a **realization** of `res`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
:class:`~dynesty.results.Results` instance for the distribution we
are computing the KL divergence *from*.
error : {`'jitter'`, `'resample'`, `'simulate'`}, optional
The error method employed, corresponding to :meth:`jitter_run`,
:meth:`resample_run`, and :meth:`simulate_run`, respectively.
Default is `'simulate'`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
return_new : bool, optional
Whether to return the realization of the run used to compute the
KL divergence. Default is `False`.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
kld : `~numpy.ndarray` with shape (nsamps,)
The cumulative KL divergence defined *from* `res` *to* a
random realization of `res`.
new_res : :class:`~dynesty.results.Results` instance, optional
The :class:`~dynesty.results.Results` instance corresponding to
the random realization we computed the KL divergence *to*.
"""
# Define our original importance weights.
logp2 = res.logwt - res.logz[-1]
# Compute a random realization of our run.
if error == 'jitter':
new_res = jitter_run(res, rstate=rstate, approx=approx)
elif error == 'resample':
new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)
logp2 = logp2[samp_idx] # re-order our original results to match
elif error == 'simulate':
new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)
new_res = jitter_run(new_res)
logp2 = logp2[samp_idx] # re-order our original results to match
else:
raise ValueError("Input `'error'` option '{0}' is not valid."
.format(error))
# Define our new importance weights.
logp1 = new_res.logwt - new_res.logz[-1]
# Compute the KL divergence.
kld = np.cumsum(np.exp(logp1) * (logp1 - logp2))
if return_new:
return kld, new_res
else:
return kld
| 5,919 |
def article_detail(request, slug):
"""
Show details of the article
"""
article = get_article_by_slug(slug=slug, annotate=True)
comment_form = CommentForm()
total_views = r.incr(f'article:{article.id}:views')
return render(request, 'articles/post/detail.html',
{'article': article,
'section': article.category,
'comment_form': comment_form,
'total_views': total_views})
| 5,920 |
def deactivate(userid, tfa_response):
"""
Deactivate 2FA for a specified user.
Turns off 2FA by nulling-out the ``login.twofa_secret`` field for the user record,
and clear any remaining recovery codes.
Parameters:
userid: The user for which 2FA should be disabled.
tfa_response: User-supplied response. May be either the Google Authenticator
(or other app) supplied code, or a recovery code.
Returns: Boolean True if 2FA was successfully disabled, otherwise Boolean False if the
verification of `tfa_response` failed (bad challenge-response or invalid recovery code).
"""
# Sanity checking for length requirement of recovery code/TOTP is performed in verify() function
if verify(userid, tfa_response):
# Verification passed, so disable 2FA
force_deactivate(userid)
return True
else:
return False
| 5,921 |
def hsi_normalize(data, max_=4096, min_ = 0, denormalize=False):
"""
Using this custom normalizer for RGB and HSI images.
Normalizing to -1to1. It also denormalizes, with denormalize = True)
"""
HSI_MAX = max_
HSI_MIN = min_
NEW_MAX = 1
NEW_MIN = -1
if(denormalize):
scaled = (data - NEW_MIN) * (HSI_MAX - HSI_MIN)/(NEW_MAX - NEW_MIN) + HSI_MIN
return scaled.astype(np.float32)
scaled = (data - HSI_MIN) * (NEW_MAX - NEW_MIN)/(HSI_MAX - HSI_MIN) + NEW_MIN
return scaled.astype(np.float32)
| 5,922 |
def measure_complexity(export_dir_root: str = './benchmark'):
""" Run keyword extraction benchmark """
_file = '{}/complexity.json'.format(export_dir_root)
if os.path.exists(_file):
with open(_file, 'r') as f:
complexity = json.load(f)
else:
model_list = kex.VALID_ALGORITHMS
logging.info('Measure complexity')
complexity = {}
for model_name in model_list:
logging.info(' - algorithm: {}'.format(model_name))
complexity[model_name] = {}
n = 0
elapse_list = []
elapse_prior_list = []
while n < trial:
elapse, elapse_prior = run_model(model_name)
elapse_list.append(elapse)
if elapse_prior is not None:
elapse_prior_list.append(elapse_prior)
n += 1
complexity[model_name]['elapse'] = sum(elapse_list) / len(elapse_list)
if len(elapse_prior_list) > 0:
complexity[model_name]['elapse_prior'] = sum(elapse_prior_list) / len(elapse_prior_list)
else:
complexity[model_name]['elapse_prior'] = 0
with open(_file, 'w') as f:
json.dump(complexity, f)
df = pd.DataFrame(complexity).T
df_tmp = df['elapse'].round(1)
df_tmp.name = 'Time (sec.)'
df_tmp.to_csv('{}/complexity.csv'.format(export_dir_root))
pd.DataFrame({
'TF': {'Model': 'TF,LexSpec,LexRank', 'Time (sec.)': df['elapse_prior']['TF'].round(1)},
'TFIDF': {'Model': 'TFIDF,TFIDFRank', 'Time (sec.)': df['elapse_prior']['TFIDF'].round(1)},
'LDA': {'Model': 'SingleTPR', 'Time (sec.)': df['elapse_prior']['SingleTPR'].round(1)}
}).to_csv('{}/complexity.prior.csv'.format(export_dir_root))
| 5,923 |
def safe_elem_text(elem: Optional[ET.Element]) -> str:
"""Return the stripped text of an element if available. If not available, return the empty string"""
text = getattr(elem, "text", "")
return text.strip()
| 5,924 |
def resource(filename):
"""Returns the URL a static resource, including versioning."""
return "/static/{0}/{1}".format(app.config["VERSION"], filename)
| 5,925 |
def get_legacy_description(location):
"""
Return the text of a legacy DESCRIPTION.rst.
"""
location = os.path.join(location, 'DESCRIPTION.rst')
if os.path.exists(location):
with open(location) as i:
return i.read()
| 5,926 |
def read_q_stats(csv_path):
"""Return list of Q stats from file"""
q_list = []
with open(csv_path, newline='') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
q_list.append(float(row['q']))
return q_list
| 5,927 |
def _validate(config):
"""Validate the configuation.
"""
diff = set(REQUIRED_CONFIG_KEYS) - set(config.keys())
if len(diff) > 0:
raise ValueError(
"config is missing required keys".format(diff))
elif config['state_initial']['status'] not in config['status_values']:
raise ValueError(
"initial status '{}' is not among the allowed status values"
.format(config['state_initial']['status']))
else:
return config
| 5,928 |
def test_view_connect_task_with_user_different_email(
task, authorized_client, customer_user):
"""Task was placed from different email, than user's
we are trying to assign it to."""
task.user = None
task.user_email = 'example_email@email.email'
task.save()
assert task.user_email != customer_user.email
url = reverse(
'task:connect-task-with-user', kwargs={'token': task.token})
response = authorized_client.post(url)
redirect_location = get_redirect_location(response)
assert redirect_location == reverse('account:details')
task.refresh_from_db()
assert task.user is None
| 5,929 |
def exp_by_squaring(x, n):
"""
Compute x**n using exponentiation by squaring.
"""
if n == 0:
return 1
if n == 1:
return x
if n % 2 == 0:
return exp_by_squaring(x * x, n // 2)
return exp_by_squaring(x * x, (n - 1) // 2) * x
| 5,930 |
def pipInstall(upgrade: bool = False):
"""pipInstall.
自动处理依赖项
"""
# 配置pip源为清华源
os.system(
'pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple'
)
# return 控制是否仅执行到pip更新
# return 0
# 更新pip>=10.0.0
os.system('pip install pip -U')
# 第三方库列表
depList = [
'numpy',
'pandas',
'tushare', # 金融数据接口
'vaex', # 大数据存储结构
'scipy',
'sympy',
'matplotlib',
'pygal',
'scienceplots',
'seaborn',
'pyecharts',
'ggplot',
'plotnine',
'plotly',
'cufflinks',
'chart_studio',
'bqplot', # 2-D交互式绘图
'pylatex',
'pillow',
'jupyter',
'jupyterlab',
'rise', # 提供jupyter的ppt展示功能
'ipython',
'spyder',
'openpyxl',
'xlwt',
'xlrd',
'pyqt5',
'pyqtgraph',
'wxpython',
'sklearn',
'pytorch', # 一般下载失败,可在官网提取下载命令,科学下载
'keras',
'tensorflow',
'fbprophet', # 由于win上pip安装可能存在编译问题,可通过conda安装后将site-packages下的文件copy到pip下
'orange3', # 数据挖掘GUI工具
'psycopg2', # PostGreSQL适配
'pgxnclient', # PostgreSQL插件管理
'request',
'parsel',
'scrapy',
'flask',
'tablib',
'pyinstaller',
'fbs',
'nuitka',
'jieba',
'virtualenv',
'virtualenvwrapper',
'pipenv',
]
if upgrade:
for each in depList:
os.system('pip install -U {0}'.format(each))
else:
for each in depList:
os.system('pip install {0}'.format(each))
exeHello('pipInstall')
| 5,931 |
def schedule_item_update(item):
"""
Synchronises a JWP video to the passed :py:class:`mediaplatform.models.MediaItem`. If this
necessitates creating a new JWP video, a new upload endpoint is also created. Upload endpoints
are not created if the JWP video already exists.
"""
# TODO: split this into an asynchronous task
_perform_item_update(item)
| 5,932 |
def correct_tree_leaf_names(filename_in, filename_out):
"""Correct a single tree
- Replaces the first _ into @: transition between orothofinder and pdc
- Removes the ENA|id| since pdc removes it too
"""
tree = Phylo.read(filename_in, "newick")
ena_regex = re.compile(r"ENA\|[A-Z0-9]*\|")
for terminal in tree.get_terminals():
terminal.name = terminal.name.replace("_", "@", 1)
terminal.name = ena_regex.sub("", terminal.name)
Phylo.write(tree, filename_out, "newick")
| 5,933 |
def _compare(expected, actual):
"""
Compare SslParams object with dictionary
"""
if expected is None and actual is None:
return True
if isinstance(expected, dict) and isinstance(actual, SslParams):
return expected == actual.__dict__
return False
| 5,934 |
def update_token(refresh_token, user_id):
"""
Refresh the tokens for a given user
:param: refresh_token
Refresh token of the user
:param: user_id
ID of the user for whom the token is to be generated
:returns:
Generated JWT token
"""
token = Token.query.filter_by(refresh_token=refresh_token).first()
token.access_token = Token.encode_token(user_id, "access").decode("utf-8")
token.refresh_token = Token.encode_token(user_id, "refresh").decode(
"utf-8"
)
db.session.commit()
return token
| 5,935 |
def minimax(board):
"""
Returns the optimal action for the current player on the board.
"""
def max_value(state, depth=0):
if ttt.terminal(state):
return (None, ttt.utility(state))
v = (None, -2)
for action in ttt.actions(state):
v = max(v, (action, min_value(ttt.result(state, action), depth+1)[1] - (depth/10)), key=lambda x: x[1])
return v
def min_value(state, depth=0):
if ttt.terminal(state):
return (None, ttt.utility(state))
v = (None, 2)
for action in ttt.actions(state):
v = min(v, (action, max_value(ttt.result(state, action), depth+1)[1] + (depth/10)), key=lambda x: x[1])
return v
if ttt.player(board) == X:
return max_value(board)[0]
elif ttt.player(board) == O:
return min_value(board)[0]
| 5,936 |
def random_order_dic_keys_into_list(in_dic):
"""
Read in dictionary keys, and return random order list of IDs.
"""
id_list = []
for key in in_dic:
id_list.append(key)
random.shuffle(id_list)
return id_list
| 5,937 |
def package(config_file, destination):
"""Package an existing Docker image following the UP42 specification. (cf. https://docs.up42.com/).
\b
Arguments:
config_file: A path to a valid YAML config file.
destination: A path to the output directory for the generated files.
"""
loguru.logger.info('Package the application...')
functionnal.package(config_file, destination)
| 5,938 |
def read_json(json_path: Union[str, Path]) -> Dict:
"""
Read json file from a path.
Args:
json_path: File path to a json file.
Returns:
Python dictionary
"""
with open(json_path, "r") as fp:
data = json.load(fp)
return data
| 5,939 |
def igraph_to_csc(g, save=False, fn="csc_matlab"):
"""
Convert an igraph to scipy.sparse.csc.csc_matrix
Positional arguments:
=====================
g - the igraph graph
Optional arguments:
===================
save - save file to disk
fn - the file name to be used when writing (appendmat = True by default)
"""
assert isinstance(g, igraph.Graph), "Arg1 'g' must be an igraph graph"
print "Creating CSC from igraph object ..."
gs = csc_matrix(g.get_adjacency().data) # Equiv of calling to_dense so may case MemError
print "CSC creation complete ..."
if save:
print "Saving to MAT file ..."
sio.savemat(fn, {"data":gs}, True) # save as MAT format only. No other options!
return gs
| 5,940 |
def xml2dict(data):
"""Turn XML into a dictionary."""
converter = XML2Dict()
if hasattr(data, 'read'):
# Then it's a file.
data = data.read()
return converter.fromstring(data)
| 5,941 |
def balance_dataset(data, size=60000):
"""Implements upsampling and downsampling for the three classes (low, medium, and high)
Parameters
----------
data : pandas DataFrame
A dataframe containing the labels indicating the different nightlight intensity bins
size : int
The number of samples per classes for upsampling and downsampling
Returns
-------
pandas DataFrame
The data with relabelled and balanced nightlight intensity classes
"""
bin_labels = data.label.unique()
classes = []
for label in bin_labels:
class_ = data[data.label == label].reset_index()
if len(class_) >= size:
sample = class_.sample(
n=size, replace=False, random_state=SEED
)
elif len(class_) < size:
sample = class_.sample(
n=size, replace=True, random_state=SEED
)
classes.append(sample)
data_balanced = pd.concat(classes)
data_balanced = data_balanced.sample(
frac=1, random_state=SEED
).reset_index(drop=True)
data_balanced = data_balanced.iloc[:, 1:]
return data_balanced
| 5,942 |
def updateStopList(userId, newStop):
"""
Updates the list of stops for the user in the dynamodb table
"""
response = dynamodb_table.query(
KeyConditionExpression=Key('userId').eq(userId))
if response and len(response["Items"]) > 0:
stops = response["Items"][0]['stops']
else:
stops = {}
if newStop['code'] in stops:
existingStop = stops[newStop['code']]
if 'buses' in existingStop:
newStop['buses'] = list(
set(existingStop['buses'] + newStop['buses']))
stops[newStop['code']] = newStop
response = dynamodb_table.update_item(
Key={
'userId': userId
},
UpdateExpression="set stops = :s",
ExpressionAttributeValues={
':s': stops
}
)
card_title = render_template('card_title')
responseText = render_template(
"add_bus_success", stop=newStop['code'], route=",".join(newStop['buses']))
return statement(responseText).simple_card(card_title, responseText)
| 5,943 |
def create_indices(new_data):
"""
Updates index files for faster FILTER functionality.
:param new_data: New data to enter into data indexes. Of the form
{
"stb": box_id_string,
"date": date_string YYYY-MM-DD,
"title": title_string,
"provider": provider_string,
"rev": revenue_string,
"time": time_string HH:MM
}
Data has been checked prior to calling function. Ideally there would be checks here in this function
was used elsewhere.
:return:
"""
try:
os.makedirs(DIR)
except FileExistsError:
pass
for each in ['date', 'title', 'provider', 'rev', 'time']:
key = new_data[each]
filename = os.path.normpath(DIR + '/' + each + '_index.json')
try:
with open(filename, 'r') as data_file:
data = json.load(data_file)
except IOError:
data = {}
if key not in data:
data[key] = []
if new_data['stb'] not in data[key]:
data[key].append(new_data['stb'])
with open(filename, 'w') as data_file:
json.dump(data, data_file)
return
| 5,944 |
def is_blacklisted_module(module: str) -> bool:
"""Return `True` if the given module matches a blacklisted pattern."""
# Exclude stdlib modules such as the built-in "_thread"
if is_stdlib_module(module):
return False
# Allow user specified exclusions via CLI
blacklist = set.union(MODULE_BLACKLIST_PATTERNS, config.excluded_imports)
return any(re.fullmatch(p, module) for p in blacklist)
| 5,945 |
def api_detach(sess, iqn):
"""
Detach the given volume from the instance using OCI API calls.
Parameters
----------
sess: OCISession
The OCISEssion instance..
iqn: str
The iSCSI qualified name.
Returns
-------
bool
True on success, False otherwise.
"""
if sess is None:
_logger.error("Need OCI Service to detach volume.\n"
"Make sure to install and configure "
"OCI Python SDK (python-oci-sdk)\n")
return False
for v in sess.this_instance().all_volumes():
if v.get_iqn() == iqn:
try:
print "Detaching volume"
v.detach()
return True
except OCISDKError as e:
_logger.debug("Failed to disconnect volume", exc_info=True)
_logger.error("Failed to disconnect volume %s from this instance: %s" % (iqn, e))
return False
_logger.error("Volume not found...\n")
return False
| 5,946 |
def ComponentLibrary(self, lib_name, *args, **kwargs):
"""Pseudo-builder for library to handle platform-dependent type.
Args:
self: Environment in which we were called.
lib_name: Library name.
args: Positional arguments.
kwargs: Keyword arguments.
Returns:
Passthrough return code from env.StaticLibrary() or env.SharedLibrary().
"""
# Clone and modify environment
env = _ComponentPlatformSetup(self, 'ComponentLibrary', **kwargs)
# Make appropriate library type
if env.get('COMPONENT_STATIC'):
lib_outputs = env.StaticLibrary(lib_name, *args, **kwargs)
else:
lib_outputs = env.SharedLibrary(lib_name, *args, **kwargs)
# Add dependencies on includes
env.Depends(lib_outputs, env['INCLUDES'])
# Scan library outputs for files we need to link against this library, and
# files we need to run executables linked against this library.
need_for_link = []
need_for_debug = []
need_for_run = []
for o in lib_outputs:
if o.suffix in env['COMPONENT_LIBRARY_LINK_SUFFIXES']:
need_for_link.append(o)
if o.suffix in env['COMPONENT_LIBRARY_DEBUG_SUFFIXES']:
need_for_debug.append(o)
if o.suffix == env['SHLIBSUFFIX']:
need_for_run.append(o)
all_outputs = lib_outputs
# Install library in intermediate directory, so other libs and programs can
# link against it
all_outputs += env.Replicate('$LIB_DIR', need_for_link)
# Publish output
env.Publish(lib_name, 'link', need_for_link)
env.Publish(lib_name, 'run', need_for_run)
env.Publish(lib_name, 'debug', need_for_debug)
# Add an alias to build and copy the library, and add it to the right groups
a = self.Alias(lib_name, all_outputs)
for group in env['COMPONENT_LIBRARY_GROUPS']:
SCons.Script.Alias(group, a)
# Store list of components for this library
env._StoreComponents(lib_name)
# Let component_targets know this target is available in the current mode.
env.SetTargetProperty(lib_name, TARGET_PATH=lib_outputs[0])
# If library should publish itself, publish as if it was a program
if env.get('COMPONENT_LIBRARY_PUBLISH'):
env['PROGRAM_BASENAME'] = lib_name
env.Defer(ComponentProgramDeferred)
# Return the library
return lib_outputs[0]
| 5,947 |
def write_file(directory, file_name, data):
"""Write data to file."""
directory = get_directory(directory)
with open(directory + file_name, "w") as open_file:
open_file.write(str(data).strip())
| 5,948 |
def _build_geojson_query(query):
"""
See usages below.
"""
# this is basically a translation of the postgis ST_AsGeoJSON example into sqlalchemy/geoalchemy2
return func.json_build_object(
"type",
"FeatureCollection",
"features",
func.json_agg(func.ST_AsGeoJSON(query.subquery(), maxdecimaldigits=5).cast(JSON)),
)
| 5,949 |
async def on_device_state_changed(
coordinator: OverkizDataUpdateCoordinator, event: Event
) -> None:
"""Handle device state changed event."""
if not event.device_url:
return
for state in event.device_states:
device = coordinator.devices[event.device_url]
device.states[state.name] = state
| 5,950 |
def test_command_line_interface() -> None:
"""Test the CLI."""
assert "Usage: nessie" in execute_cli_command([])
assert "Usage: nessie" in execute_cli_command(["--help"])
assert __version__ in execute_cli_command(["--version"])
references = ReferenceSchema().loads(execute_cli_command(["--json", "branch", "-l"]), many=True)
assert len(references) == 1
assert references[0].name == "main"
assert isinstance(references[0], Branch)
| 5,951 |
def p_quanexpr_logic_or(p):
"""expr : expr OR expr"""
p[0] = ( 'z3.Or( ' + val(p[1]) + ' , ' + val(p[3]) +' ) ' , body(p[1]) + body(p[3]) )
| 5,952 |
def test_non_empty_proto():
"""Build a graph proto from an example proto."""
proto = pbutil.FromFile(TEST_PROTO, xla_pb2.HloProto())
graph = xla.BuildProgramGraphProto(proto)
assert len(graph.node) == 155
assert len(graph.function) == 5
| 5,953 |
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
separator='&'):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys.
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
if isinstance(obj, MultiDict):
items = obj.lists()
elif isinstance(obj, dict):
items = []
for k, v in obj.iteritems():
if not isinstance(v, (tuple, list)):
v = [v]
items.append((k, v))
else:
items = obj or ()
if sort:
items.sort(key=key)
tmp = []
for key, values in items:
if encode_keys and isinstance(key, unicode):
key = key.encode(charset)
else:
key = str(key)
for value in values:
if value is None:
continue
elif isinstance(value, unicode):
value = value.encode(charset)
else:
value = str(value)
tmp.append('%s=%s' % (urllib.quote(key),
urllib.quote_plus(value)))
return separator.join(tmp)
| 5,954 |
def test_return_ruleobj_list_with_no_pyobj_specified(reinitialize_ruleclass_variables):
"""Raises NoRulesError if no rules list is specified as argument."""
with pytest.raises(SystemExit):
_return_ruleobj_list_from_listrules(pyobj=None)
| 5,955 |
def copy_variables(infile, outfile):
"""Create variables corresponding to the file dimensions
by copying from infile"""
for var_name in ['TIME', 'LATITUDE', 'LONGITUDE']:
varin = infile.variables[var_name]
outVar = outfile.createVariable(var_name, varin.datatype,
varin.dimensions,
fill_value=varin._FillValue)
outVar[:] = varin[:]
var_atts = {}
for att in varin.ncattrs():
if not att == '_FillValue':
var_atts[att] = eval('varin.'+att)
outVar.setncatts(var_atts)
| 5,956 |
def create_export_and_wait_for_completion(name, bucket, prefix, encryption_config, role_arn=None):
"""
Request QLDB to export the contents of the journal for the given time period and S3 configuration. Before calling
this function the S3 bucket should be created, see
:py:class:`pyqldbsamples.export_journal.create_s3_bucket_if_not_exists`
:type name: str
:param name: Name of the ledger to create a journal export for.
:type bucket: str
:param bucket: S3 bucket to write the data to.
:type prefix: str
:param prefix: S3 prefix to be prefixed to the files being written.
:type encryption_config: dict
:param encryption_config: Encryption configuration for S3.
:type role_arn: str
:param role_arn: The IAM role ARN to be used when exporting the journal.
:rtype: dict
:return: The result of the request.
"""
if role_arn is None:
role_arn = create_export_role(EXPORT_ROLE_NAME, encryption_config.get('KmsKeyArn'), ROLE_POLICY_NAME, bucket)
try:
start_time = datetime.utcnow() - timedelta(minutes=JOURNAL_EXPORT_TIME_WINDOW_MINUTES)
end_time = datetime.utcnow()
result = create_export(name, start_time, end_time, bucket, prefix, encryption_config, role_arn)
wait_for_export_to_complete(Constants.LEDGER_NAME, result.get('ExportId'))
logger.info('JournalS3Export for exportId {} is completed.'.format(result.get('ExportId')))
return result
except Exception as e:
logger.exception('Unable to create an export!')
raise e
| 5,957 |
async def test_opp_binary_sensor_notification(opp, numato_fixture):
"""Test regular operations from within Open Peer Power."""
assert await async_setup_component(opp, "numato", NUMATO_CFG)
await opp.async_block_till_done() # wait until services are registered
assert opp.states.get("binary_sensor.numato_binary_sensor_mock_port2").state == "on"
await opp.async_add_executor_job(numato_fixture.devices[0].callbacks[2], 2, False)
await opp.async_block_till_done()
assert (
opp.states.get("binary_sensor.numato_binary_sensor_mock_port2").state == "off"
)
| 5,958 |
def sample_student(user, **kwargs):
"""create and return sample student"""
return models.Student.objects.create(user=user, **kwargs)
| 5,959 |
def normalise_field_value(value):
""" Converts a field value to a common type/format to make comparable to another. """
if isinstance(value, datetime):
return make_timezone_naive(value)
elif isinstance(value, Decimal):
return decimal_to_string(value)
return value
| 5,960 |
def edit_wn_list(item_list, list_name, all_values, tenant_file_name):
"""
Edit WAN network list
:param item_list: Item list to save
:param list_name: Name of List
:param all_values: All values
:param tenant_file_name: File-system friendly tenant_name
:return: shallow copy of item_list.
"""
loop = True
while loop:
action = [
("View list", 'view'),
("Add to list", 'add'),
("Remove items from list", 'remove'),
("Load/Save list", 'file'),
("Go Back", 'back')
]
banner = "\nSelect Action:"
line_fmt = "{0}: {1}"
# just pull 2nd value
selected_action = menus.quick_menu(banner, line_fmt, action)[1]
if selected_action == 'view':
print("\n{0} ({1} entries):".format(list_name, len(item_list)))
for item in item_list:
print("\t{0}".format(item))
elif selected_action == 'add':
item_list = add_to_list(item_list, list_name, all_values)
elif selected_action == 'remove':
item_list = remove_from_list(item_list, list_name, all_values)
elif selected_action == 'file':
item_list = load_save_list(item_list, list_name, all_values, tenant_file_name)
elif selected_action == 'back':
loop = False
else:
sys.exit()
# return a shallow copy of site list
return item_list[:]
| 5,961 |
def startserver():
"""
Python Hydrus CLI.
"""
pass
| 5,962 |
def parse_yaml(stream: Any) -> Tuple[Swagger, List[str]]:
"""
Parse the Swagger specification from the given text.
:param stream: YAML representation of the Swagger spec satisfying file interface
:return: (parsed Swagger specification, parsing errors if any)
"""
# adapted from https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
# and https://stackoverflow.com/questions/13319067/parsing-yaml-return-with-line-number
object_pairs_hook = collections.OrderedDict
class OrderedLoader(yaml.SafeLoader):
def compose_node(self, parent, index):
# the line number where the previous token has ended (plus empty lines)
node = Composer.compose_node(self, parent, index)
node.__lineno__ = self.line + 1
return node
def construct_mapping(loader, node, deep=False):
loader.flatten_mapping(node)
mapping = Constructor.construct_pairs(loader, node, deep=deep)
ordered_hook = object_pairs_hook(mapping)
# assert not hasattr(ordered_hook, "__lineno__"), \
# "Expected ordered mapping to have no __lineno__ attribute set before"
# setattr(ordered_hook, "__lineno__", node.__lineno__)
return RawDict(adict=ordered_hook, source=stream.name, lineno=node.__lineno__)
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
raw_dict = yaml.load(stream, OrderedLoader)
swagger = Swagger()
errors = [] # type: List[str]
adict = raw_dict.adict
tag_exists: bool = False
if 'tags' in adict:
if len(adict['tags']) > 0:
tag_exists = True
for tag in adict['tags']:
for key, value in tag.adict.items():
if key == 'name':
swagger.name = value
if swagger.name == '':
if not (OptionKey.PermitAbsenseOfTagNameIfNoTagsExist in parse_options
and not tag_exists):
errors.append('missing tag "name" in the swagger specification')
swagger.base_path = adict.get('basePath', '')
for path_id, path_dict in adict.get('paths', RawDict()).adict.items():
path, path_errors = _parse_path(raw_dict=path_dict)
path.identifier = path_id
path.swagger = swagger
errors.extend(['in path {!r}: {}'.format(path_id, error) for error in path_errors])
if not path_errors:
swagger.paths[path_id] = path
for def_id, def_dict in adict.get('definitions', RawDict()).adict.items():
typedef, def_errors = _parse_typedef(raw_dict=def_dict)
errors.extend(['in definition {!r}: {}'.format(def_id, error) for error in def_errors])
adef = Definition()
adef.swagger = swagger
adef.identifier = def_id
adef.typedef = typedef
if not def_errors:
swagger.definitions[def_id] = adef
for param_id, param_dict in adict.get('parameters', RawDict()).adict.items():
param, param_errors = _parse_parameter(raw_dict=param_dict)
errors.extend(['in parameter {!r}: {}'.format(param_id, error) for error in param_errors])
if not param_errors:
swagger.parameters[param_id] = param
swagger.raw_dict = raw_dict
return swagger, errors
| 5,963 |
def createOptFlow_Farneback(): # real signature unknown; restored from __doc__
""" createOptFlow_Farneback() -> retval """
pass
| 5,964 |
def dataset_first_n(dataset, n, show_classes=False, class_labels=None, **kw):
"""
Plots first n images of a dataset containing tensor images.
"""
# [(img0, cls0), ..., # (imgN, clsN)]
first_n = list(itertools.islice(dataset, n))
# Split (image, class) tuples
first_n_images, first_n_classes = zip(*first_n)
if show_classes:
titles = first_n_classes
if class_labels:
titles = [class_labels[cls] for cls in first_n_classes]
else:
titles = []
return tensors_as_images(first_n_images, titles=titles, **kw)
| 5,965 |
def unit_conversion(thing, units, length=False):
"""converts base data between metric, imperial, or nautical units"""
if 'n/a' == thing:
return 'n/a'
try:
thing = round(thing * CONVERSION[units][0 + length], 2)
except TypeError:
thing = 'fubar'
return thing, CONVERSION[units][2 + length]
| 5,966 |
def timed(func):
"""Decorate function to print elapsed time upon completion."""
@functools.wraps(func)
def wrap(*args, **kwargs):
t1 = default_timer()
result = func(*args, **kwargs)
t2 = default_timer()
print('func:{} args:[{}, {}] took: {:.4f} sec'.format(
func.__name__, args, kwargs, t2 - t1))
return result
return wrap
| 5,967 |
def plot_rollouts_segment_wise(
segments_ground_truth: List[List[StepSequence]],
segments_multiple_envs: List[List[List[StepSequence]]],
segments_nominal: List[List[StepSequence]],
use_rec: bool,
idx_iter: int,
idx_round: Optional[int] = None,
state_labels: Optional[List[str]] = None,
save_dir: Optional[str] = None,
) -> List[plt.Figure]:
"""
Plot the different rollouts in separate figures and the different state dimensions along the columns.
:param segments_ground_truth: list of lists containing rollout segments from the ground truth environment
:param segments_multiple_envs: list of lists of lists containing rollout segments from different environment
instances, e.g. samples from a posterior coming from `NDPR`
:param segments_nominal: list of lists containing rollout segments from the nominal environment
:param use_rec: `True` if pre-recorded actions have been used to generate the rollouts
:param idx_iter: selected iteration
:param idx_round: selected round
:param state_labels: y-axes labels to override the default value which is extracted from the state space's labels
:param save_dir: if not `None` create a subfolder plots in `save_dir` and save the plots in there
:return: list of handles to the created figures
"""
# Extract the state dimension, and the number of most likely samples from the data
dim_state = segments_ground_truth[0][0].get_data_values("states")[0, :].size
num_samples = len(segments_multiple_envs[0][0])
# Extract the state labels if not explicitly given
if state_labels is None:
env_spec = segments_ground_truth[0][0].rollout_info.get("env_spec", None)
state_labels = env_spec.state_space.labels if env_spec is not None else np.empty(dim_state, dtype=object)
else:
if len(state_labels) != dim_state:
raise pyrado.ShapeErr(given=state_labels, expected_match=(dim_state,))
colors = plt.get_cmap("Reds")(np.linspace(0.5, 1.0, num_samples))
fig_list = []
for idx_r in range(len(segments_ground_truth)):
fig, axs = plt.subplots(nrows=dim_state, figsize=(16, 9), tight_layout=True, sharex="col")
for idx_state in range(dim_state):
# Plot the real segments
cnt_step = [0]
for segment_real in segments_ground_truth[idx_r]:
axs[idx_state].plot(
np.arange(cnt_step[-1], cnt_step[-1] + segment_real.length),
segment_real.get_data_values("states", truncate_last=True)[:, idx_state],
c="black",
label="real" if cnt_step[-1] == 0 else "", # only print once
)
cnt_step.append(cnt_step[-1] + segment_real.length)
# Plot the maximum likely simulated segments
for idx_seg, sml in enumerate(segments_multiple_envs[idx_r]):
for idx_dp, smdp in enumerate(sml):
axs[idx_state].plot(
np.arange(cnt_step[idx_seg], cnt_step[idx_seg] + smdp.length),
smdp.get_data_values("states", truncate_last=True)[:, idx_state],
c=colors[idx_dp],
ls="--",
label=f"sim ml {idx_dp}" if cnt_step[idx_seg] == 0 else "", # only print once for each dp set
)
# Plot the nominal simulation's segments
for idx_seg, sn in enumerate(segments_nominal[idx_r]):
axs[idx_state].plot(
np.arange(cnt_step[idx_seg], cnt_step[idx_seg] + sn.length),
sn.get_data_values("states", truncate_last=True)[:, idx_state],
c="steelblue",
ls="-.",
label="sim nom" if cnt_step[idx_seg] == 0 else "", # only print once
)
axs[idx_state].set_ylabel(state_labels[idx_state])
# Set window title and the legend, placing the latter above the plot expanding and expanding it fully
use_rec = ", using rec actions" if use_rec else ""
rnd = f"round {idx_round}, " if idx_round is not None else ""
fig.canvas.set_window_title(
f"Target Domain and Simulated Rollouts (iteration {idx_iter}, {rnd}rollout {idx_r}{use_rec})"
)
lg = axs[0].legend(
ncol=2 + num_samples,
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc="lower left",
mode="expand",
borderaxespad=0.0,
)
# Save if desired
if save_dir is not None:
for fmt in ["pdf", "pgf"]:
os.makedirs(os.path.join(save_dir, "plots"), exist_ok=True)
use_rec = "_use_rec" if use_rec else ""
rnd = f"_round_{idx_round}" if idx_round is not None else ""
fig.savefig(
os.path.join(save_dir, "plots", f"posterior_iter_{idx_iter}{rnd}_rollout_{idx_r}{use_rec}.{fmt}"),
bbox_extra_artists=(lg,),
dpi=500,
)
# Append current figure
fig_list.append(fig)
return fig_list
| 5,968 |
def fade_out(s, fade=cf.output.fade_out):
"""
Apply fade-out to waveform time signal.
Arguments:
ndarray:s -- Audio time series
float:fade (cf.output.fade_out) -- Fade-out length in seconds
Returns faded waveform.
"""
length = int(fade * sr)
shape = [1] * len(s.shape)
shape[0] = length
win = np.hanning(length * 2)[length:]
win = win.reshape(shape)
if length < len(s):
s[-length:] = s[-length:] * win
return s
| 5,969 |
def AddAsyncFlag(parser, default_async_for_cluster=False):
"""Add an async flag."""
if default_async_for_cluster:
modified_async_flag = base.Argument(
'--async',
action=arg_parsers.StoreTrueFalseAction,
dest='async_',
help="""\
Return immediately, without waiting for the operation in progress to
complete. Defaults to --no-async for Cloud Run (fully managed) and --async
for Cloud Run for Anthos.""")
modified_async_flag.AddToParser(parser)
else:
base.ASYNC_FLAG.AddToParser(parser)
| 5,970 |
def _build_theme_template(template_name, env, files, config, nav):
""" Build a template using the theme environment. """
log.debug("Building theme template: {}".format(template_name))
try:
template = env.get_template(template_name)
except TemplateNotFound:
log.warning("Template skipped: '{}' not found in theme directories.".format(template_name))
return
output = _build_template(template_name, template, files, config, nav)
if output.strip():
output_path = os.path.join(config['site_dir'], template_name)
utils.write_file(output.encode('utf-8'), output_path)
if template_name == 'sitemap.xml':
log.debug("Gzipping template: %s", template_name)
gz_filename = '{}.gz'.format(output_path)
with open(gz_filename, 'wb') as f:
timestamp = utils.get_build_timestamp()
with gzip.GzipFile(fileobj=f, filename=gz_filename, mode='wb', mtime=timestamp) as gz_buf:
gz_buf.write(output.encode('utf-8'))
else:
log.info("Template skipped: '{}' generated empty output.".format(template_name))
| 5,971 |
def create_arch(T, D, units=64, alpha=0, dr_rate=.3):
"""Creates the architecture of miint"""
X = K.Input(shape=(T, D))
active_mask = K.Input(shape=(T, 1))
edges = K.Input(shape=(T, None))
ycell = netRNN(T=T, D=D, units=units, alpha=alpha, dr_rate=dr_rate)
yrnn = K.layers.RNN(ycell, return_sequences=True)
Y = yrnn((X, edges, active_mask))
return K.Model(inputs=[X, active_mask, edges], outputs=Y)
| 5,972 |
def redact(str_to_redact, items_to_redact):
""" return str_to_redact with items redacted
"""
if items_to_redact:
for item_to_redact in items_to_redact:
str_to_redact = str_to_redact.replace(item_to_redact, '***')
return str_to_redact
| 5,973 |
def FibreDirections(mesh):
"""
Routine dedicated to compute the fibre direction of components in integration point for
the Material in Florence and for the auxiliar routines in this script. First three directions
are taken into the code for Rotation matrix, so always it should be present in this order,
Normal, Tangential, Axial.
"""
ndim = mesh.InferSpatialDimension()
nfibre = 2
# Geometric definitions per element
divider = mesh.elements.shape[1]
directrix = [0.,1.,0.]
fibre_direction = np.zeros((mesh.nelem,nfibre,ndim),dtype=np.float64)
# Loop throught the element in the mesh
for elem in range(mesh.nelem):
# Geometric definitions per element
center = np.sum(mesh.points[mesh.elements[elem,:],:],axis=0)/divider
tangential = np.cross(directrix,center)
tangential = tangential/np.linalg.norm(tangential)
normal = np.cross(tangential,directrix)
# Define the anisotropic orientations
fibre_direction[elem,0,:]=np.multiply(directrix,np.cos(np.pi/4.)) + np.multiply(tangential,np.sin(np.pi/4.))
fibre_direction[elem,1,:]=np.multiply(directrix,np.cos(np.pi/4.)) - np.multiply(tangential,np.sin(np.pi/4.))
return fibre_direction
| 5,974 |
def put(consul_url=None, token=None, key=None, value=None, **kwargs):
"""
Put values into Consul
:param consul_url: The Consul server URL.
:param key: The key to use as the starting point for the list.
:param value: The value to set the key to.
:param flags: This can be used to specify an unsigned value
between 0 and 2^64-1. Clients can choose to use
this however makes sense for their application.
:param cas: This flag is used to turn the PUT into a
Check-And-Set operation.
:param acquire: This flag is used to turn the PUT into a
lock acquisition operation.
:param release: This flag is used to turn the PUT into a
lock release operation.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.put key='web/key1' value="Hello there"
salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592'
salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592'
"""
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error("No Consul URL found.")
ret["message"] = "No Consul URL found."
ret["res"] = False
return ret
if not key:
raise SaltInvocationError('Required argument "key" is missing.')
# Invalid to specified these together
conflicting_args = ["cas", "release", "acquire"]
for _l1 in conflicting_args:
for _l2 in conflicting_args:
if _l1 in kwargs and _l2 in kwargs and _l1 != _l2:
raise SaltInvocationError(
"Using arguments `{}` and `{}` together is invalid.".format(
_l1, _l2
)
)
query_params = {}
available_sessions = session_list(consul_url=consul_url, return_list=True)
_current = get(consul_url=consul_url, token=token, key=key)
if "flags" in kwargs:
if kwargs["flags"] >= 0 and kwargs["flags"] <= 2 ** 64:
query_params["flags"] = kwargs["flags"]
if "cas" in kwargs:
if _current["res"]:
if kwargs["cas"] == 0:
ret["message"] = "Key {} exists, index must be non-zero.".format(key)
ret["res"] = False
return ret
if kwargs["cas"] != _current["data"]["ModifyIndex"]:
ret["message"] = "Key {} exists, but indexes do not match.".format(key)
ret["res"] = False
return ret
query_params["cas"] = kwargs["cas"]
else:
ret[
"message"
] = "Key {} does not exists, CAS argument can not be used.".format(key)
ret["res"] = False
return ret
if "acquire" in kwargs:
if kwargs["acquire"] not in available_sessions:
ret["message"] = "{} is not a valid session.".format(kwargs["acquire"])
ret["res"] = False
return ret
query_params["acquire"] = kwargs["acquire"]
if "release" in kwargs:
if _current["res"]:
if "Session" in _current["data"]:
if _current["data"]["Session"] == kwargs["release"]:
query_params["release"] = kwargs["release"]
else:
ret["message"] = "{} locked by another session.".format(key)
ret["res"] = False
return ret
else:
ret["message"] = "{} is not a valid session.".format(kwargs["acquire"])
ret["res"] = False
else:
log.error("Key {0} does not exist. Skipping release.")
data = value
function = "kv/{}".format(key)
method = "PUT"
res = _query(
consul_url=consul_url,
token=token,
function=function,
method=method,
data=data,
query_params=query_params,
)
if res["res"]:
ret["res"] = True
ret["data"] = "Added key {} with value {}.".format(key, value)
else:
ret["res"] = False
ret["data"] = "Unable to add key {} with value {}.".format(key, value)
if "error" in res:
ret["error"] = res["error"]
return ret
| 5,975 |
def test_not_found_error(client):
"""Tests response for invalid URLs"""
response = client.get('/')
assert b'Resource not found' in response.data
assert response.status_code == 404
| 5,976 |
def prepare_w16():
"""
Prepare a 16-qubit W state using sqrt(iswaps) and local gates,
respecting linear topology
"""
ket = qf.zero_state(16)
circ = w16_circuit()
ket = circ.run(ket)
return ket
| 5,977 |
def get_All_Endpoints(config):
"""
:return:
"""
url = 'https://{}:9060/ers/config/endpoint'.format(config['hostname'])
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
body = {}
response = requests.request('GET', url, headers=headers, data=body, auth=HTTPBasicAuth('Admin', 'C1sco12345'), verify=False)
result = response.json()
return result['SearchResult']['resources']
| 5,978 |
def download_if_needed(folder_name):
""" Folder name will be saved as `.cache/textattack/[folder name]`. If it
doesn't exist on disk, the zip file will be downloaded and extracted.
Args:
folder_name (str): path to folder or file in cache
Returns:
str: path to the downloaded folder or file on disk
"""
cache_dest_path = path_in_cache(folder_name)
os.makedirs(os.path.dirname(cache_dest_path), exist_ok=True)
# Use a lock to prevent concurrent downloads.
cache_dest_lock_path = cache_dest_path + '.lock'
cache_file_lock = filelock.FileLock(cache_dest_lock_path)
cache_file_lock.acquire()
# Check if already downloaded.
if os.path.exists(cache_dest_path):
cache_file_lock.release()
return cache_dest_path
# If the file isn't found yet, download the zip file to the cache.
downloaded_file = tempfile.NamedTemporaryFile(
dir=config('CACHE_DIR'),
suffix='.zip', delete=False)
http_get(folder_name, downloaded_file)
# Move or unzip the file.
downloaded_file.close()
if zipfile.is_zipfile(downloaded_file.name):
unzip_file(downloaded_file.name, cache_dest_path)
else:
get_logger().info(f'Copying {downloaded_file.name} to {cache_dest_path}.')
shutil.copyfile(downloaded_file.name, cache_dest_path)
cache_file_lock.release()
# Remove the temporary file.
os.remove(downloaded_file.name)
get_logger().info(f'Successfully saved {folder_name} to cache.')
return cache_dest_path
| 5,979 |
def get_local_repository_directory():
""" Return settins.LOCAL_REPO_DIR.
Ruturn None on any errors.
"""
if os.path.isdir(settings.LOCAL_REPO_DIR):
return settings.LOCAL_REPO_DIR
else:
logger.error("Local repository directory not found. LOCAL_REPO_DIR: '{}'.".format(settings.LOCAL_REPO_DIR))
return None
| 5,980 |
def synthetic_peptides_by_subsequence(
num_peptides,
fraction_binders=0.5,
lengths=range(8, 20),
binding_subsequences=["A?????Q"]):
"""
Generate a toy dataset where each peptide is a binder if and only if it
has one of the specified subsequences.
Parameters
----------
num_peptides : int
Number of rows in result
fraction_binders : float
Fraction of rows in result where "binder" col is 1
lengths : dict, Series, or list
If a dict or Series, then this should map lengths to the fraction of the
result to have the given peptide length. If it's a list of lengths then
all lengths are given equal weight.
binding_subsequences : list of string
Peptides with any of the given subsequences will be considered binders.
Question marks ("?") in these sequences will be replaced by random
amino acids.
Returns
----------
pandas.DataFrame, indexed by peptide sequence. The "binder" column is a
binary indicator for whether the peptide is a binder.
"""
if not isinstance(lengths, dict):
lengths = dict((length, 1.0) for length in lengths)
lengths_series = pandas.Series(lengths)
lengths_series /= len(lengths)
num_binders = int(round(num_peptides * fraction_binders))
num_non_binders = num_peptides - num_binders
print(num_binders, num_non_binders)
peptides = []
# Generate non-binders
for (length, weight) in lengths_series.iteritems():
peptides.extend(
random_peptides(round(weight * num_non_binders), round(length)))
for binding_core in binding_subsequences:
# Generate binders
lengths_binders = lengths_series.ix[
lengths_series.index >= len(binding_core)
]
normalized_lengths_binders = (
lengths_binders /
lengths_binders.sum() /
len(binding_subsequences))
for (length, weight) in normalized_lengths_binders.iteritems():
if length >= len(binding_core):
num_peptides_to_make = int(round(weight * num_binders))
if length == len(binding_core):
start_positions = [0] * num_peptides_to_make
else:
start_positions = numpy.random.choice(
length - len(binding_core), num_peptides_to_make)
peptides.extend(
"".join([
random_peptides(1, length=start_position)[0],
binding_core,
random_peptides(1, length=length - len(
binding_core) - start_position)[0],
])
for start_position in start_positions)
df = pandas.DataFrame(index=set(peptides))
df["binder"] = False
for binding_core in binding_subsequences:
df["binder"] = df["binder"] | df.index.str.contains(
binding_core,
regex=False)
def replace_question_marks(s):
while "?" in s:
s = s.replace("?", numpy.random.choice(AMINO_ACIDS))
return s
df.index = df.index.map(replace_question_marks)
df_shuffled = df.sample(frac=1)
return df_shuffled
| 5,981 |
def gce_zones() -> list:
"""Returns the list of GCE zones"""
_bcds = dict.fromkeys(['us-east1', 'europe-west1'], ['b', 'c', 'd'])
_abcfs = dict.fromkeys(['us-central1'], ['a', 'b', 'c', 'f'])
_abcs = dict.fromkeys(
[
'us-east4',
'us-west1',
'europe-west4',
'europe-west3',
'europe-west2',
'asia-east1',
'asia-southeast1',
'asia-northeast1',
'asia-south1',
'australia-southeast1',
'southamerica-east1',
'asia-east2',
'asia-northeast2',
'europe-north1',
'europe-west6',
'northamerica-northeast1',
'us-west2',
],
['a', 'b', 'c'],
)
_zones_combo = {**_bcds, **_abcfs, **_abcs}
zones = [f'{loc}-{zone}' for loc, zones in _zones_combo.items() for zone in zones]
return zones
| 5,982 |
def main() -> None:
"""Main function"""
# get command-line args
args = parseargs()
# load configuration
if is_none_or_empty(args.conf):
raise ValueError('config file not specified')
with open(args.conf, 'rb') as f:
config = json.load(f)
logger.debug('loaded config from {}: {}'.format(args.conf, config))
del args
# create federation processor
fed_processor = FederationProcessor(config)
# run the poller
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(
fed_processor.poll_for_federations(loop)
)
except Exception as exc:
logger.exception(str(exc))
finally:
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
try:
fed_processor.fdh.unmount_file_storage()
except Exception as exc:
logger.exception(str(exc))
| 5,983 |
def empty_netbox_query():
"""Return an empty list to a list query."""
value = {
"count": 0,
"next": None,
"previous": None,
"results": [],
}
return value
| 5,984 |
def get_process_metrics(proc):
""" Extracts CPU times, memory infos and connection infos about a given
process started via Popen(). Also obtains the return code. """
p = psutil.Process(proc.pid)
max_cpu = [0, 0]
max_mem = [0, 0]
conns = []
while proc.poll() is None:
try:
cpu = list(p.cpu_times())
mem = list(p.memory_info())
conns = p.connections('all')
for child in p.children(recursive=True):
c_cpu = list(child.cpu_times())
c_mem = list(child.memory_info())
cpu[0] += c_cpu[0]
cpu[1] += c_cpu[1]
mem[0] += c_mem[0]
mem[1] += c_mem[1]
if max_cpu[0] < cpu[0]:
max_cpu = cpu
if max_mem[0] < mem[0]:
max_mem = mem
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
time.sleep(1)
retcode = proc.wait()
return retcode, max_cpu, max_mem, conns
| 5,985 |
def tau_from_T(Tobs, Tkin):
"""
Line optical depth from observed temperature and excitation temperature in Kelvin
"""
tau = -np.log(1.-(Tobs/Tkin))
return tau
| 5,986 |
def check_missing_files():
"""
check for files that are in exposure_files but not on S3
"""
from grizli.aws import db
import boto3
from tqdm import tqdm
from grizli import utils
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
bkt = s3.Bucket('grizli-v2')
files = db.SQL("select assoc, file,extension from exposure_files group by assoc, file,extension order by assoc")
exists = []
for a, f, e in tqdm(zip(files['assoc'], files['file'], files['extension'])):
s3_prefix = f'HST/Pipeline/{a}/Prep/{f}_{e}.fits'
xfiles = [obj.key for obj in bkt.objects.filter(Prefix=s3_prefix)]
exists.append(len(xfiles))
if len(xfiles) == 0:
print(a, f, e)
exists = np.array(exists)
miss = exists == 0
m = utils.GTable()
m['miss_file'] = files['file'][miss]
m['miss_assoc'] = files['assoc'][miss]
db.send_to_database('missing_files', m)
db.execute("""UPDATE exposure_files e
set modtime = 99999
from missing_files m
where assoc = m.miss_assoc
""")
db.execute("""UPDATE assoc_table e
set status = 0
from missing_files m
where assoc_name = m.miss_assoc
""")
db.SQL(f"""SELECT count(*)
FROM mosaic_tiles_exposures t, exposure_files e
WHERE t.expid = e.eid AND e.modtime > 90000""")
db.execute(f"""DELETE FROM mosaic_tiles_exposures t
USING exposure_files e
WHERE t.expid = e.eid AND e.modtime > 90000""")
db.execute(f"""DELETE FROM exposure_files
WHERE modtime > 90000""")
db.execute(f"""DELETE FROM shifts_log
USING missing_files m
WHERE shift_dataset = m.miss_file""")
db.execute(f"""DELETE FROM wcs_log
USING missing_files m
WHERE wcs_assoc = m.miss_assoc""")
db.execute('DROP TABLE missing_files')
# EXPTIME= 0
db.execute(f"""DELETE FROM mosaic_tiles_exposures t
USING exposure_files e
WHERE t.expid = e.eid AND e.exptime <= 0.""")
db.execute(f"""DELETE FROM exposure_files
WHERE exptime <= 0""")
| 5,987 |
def direct_by_type(hostname, check_type, ports):
"""Send request to correct type checker """
try:
if check_type == 'tcp':
tcp_port_check(hostname, ports)
elif check_type == 'http':
http_port_check(hostname, ports)
else:
dict_insert(errors, hostname, check_type + f' Unknown type detected')
except (AttributeError, TypeError) as e:
print("Error occurred:", e)
sys.exit()
| 5,988 |
def drateint(rate, totc, c, gammac, phim, drtinc, iaq):
"""
drateint(rate, totc, c, gammac, phim, drtinc, iaq)
Defined at ../src/drateint.f lines 131-267
Parameters
----------
rate : float
totc : float
c : float
gammac : float
phim : float
drtinc : float
iaq : int
"""
_min3p.f90wrap_drateint(rate=rate, totc=totc, c=c, gammac=gammac, phim=phim, \
drtinc=drtinc, iaq=iaq)
| 5,989 |
def mod_init(mlogger=None):
"""Oh please, please, turn me into a class ;-)
"""
global logger
logger = mlogger
| 5,990 |
def test_type_error_one(six_key_ht):
"""Type Error check when only one input is not HashTable."""
with pytest.raises(TypeError) as err:
left_join(six_key_ht, 15)
assert err == 'Input must be HashTable.'
| 5,991 |
def create_classes_names_list(training_set):
"""
:param training_set: dict(list, list)
:return: (list, list)
"""
learn_classes_list = []
for k, v in training_set.items():
learn_classes_list.extend([str(k)] * len(v))
return learn_classes_list
| 5,992 |
def getSpecialDistribution(queries, kind, burstCount=1, requestsPerBurst=1,
pauseLength=1.0):
"""get a distribution that virtualizes some specifique network situation.
totalTime is the total amount of time the query transmission will take.
Used parameters for the distributions:
- bursts: burstCount, requestsPerBurst, pauseLength(pause between bursts)"""
ret = []
i = 0
query = None
c = 0
if burstCount < 1 or requestsPerBurst < 1:
raise Exception("Invalid parameter for bursts mode")
if kind == "bursts":
for i in range(burstCount):
for j in range(requestsPerBurst):
if len(queries) != 0:
query = queries.pop()
else:
c += 1
if j == requestsPerBurst - 1:
ret.append( (query, pauseLength) )
else:
ret.append( (query, 0) )
if c > 0:
log.warning("Filled up with the last name {} times".format(c))
return ret
elif kind == "infinite":
# return a generator
return loopList([(queries, 0.0001) for query in queries])
elif kind == "file":
# TODO: take timestamps from some kind of file
raise Exception("Not yet implemented")
else:
raise Exception("Invalid kind of distribution: {}".format(kind))
| 5,993 |
async def bound_fetch(sem, session, url, method="GET", postdata="", **headers):
"""Deprecated, search aiohttp semaphore.
"""
async with sem:
await fetch(session, url, method, postdata, **headers)
| 5,994 |
def test_cat(index):
"""Perform several tests with varying execution times."""
time.sleep(1 + (index * 0.1))
assert True
| 5,995 |
def get_petastorm_dataset(cache_dir: str, partitions: int=4):
"""
This Dataloader assumes that the dataset has been converted to Delta table already
The Delta Table Schema is:
root
|-- sample_id: string (nullable = true)
|-- value: string (nullable = true)
|-- sample_label: string (nullable = true)
|-- filepath: string (nullable = true)
|-- filename: string (nullable = true)
|-- extension: string (nullable = true)
|-- set: string (nullable = true)
|-- label: integer (nullable = true)
See: TBD to Load and convert the aclImdb dataset from the tf sample dataset lib
Args:
cache_dir: Cache Directory for Peatstorm
partitions: Num Partitions for Petastorm partitions need to match num horovod threads / gpus (TO CHECK)
Returns:
df_train: spark df of training data
df_val: spark df of val data
size_train: size of the training dataset for use in batch step calcs
size_val: size of the val dataset for use in validation batch step calcs
"""
from petastorm.spark import SparkDatasetConverter, make_spark_converter
spark.conf.set(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, cache_dir)
train_frame = spark.sql("select value, `label` \
from brian_petastorm_datasets.aclImdb_label \
where `set` = 'train'")
df_test = spark.sql("select value, `label` \
from brian_petastorm_datasets.aclImdb_label \
where `set` = 'test'")
df_train, df_val = train_frame.randomSplit([0.8,0.2], seed=12345)
df_train.repartition(partitions)
df_val.repartition(partitions)
df_test.repartition(partitions)
size_train = df_train.count()
size_val = df_val.count()
size_test = df_test.count()
return df_train, df_val, df_test, size_train, size_val, size_test
| 5,996 |
def get_corners(square_to_edges, edge_to_squares):
"""Get squares ids of squares which place in grid in corner."""
return get_squares_with_free_edge(square_to_edges, edge_to_squares, 2)
| 5,997 |
def test_main_u_opt_one_glyph_in_common(fixtures_dir, tmp_path):
"""
The fonts have one glyph in common; the option is to use the union of
names, so all three SVGs should be saved.
"""
ab_font_path = os.path.join(fixtures_dir, 'ab.ttf')
bc_font_path = os.path.join(fixtures_dir, 'bc.ttf')
output_folder = str(tmp_path)
main([ab_font_path, bc_font_path, '-c', 'ff00ff,00ff00', '-u',
'-o', output_folder])
assert os.path.exists(os.path.join(output_folder, 'a.svg'))
assert os.path.exists(os.path.join(output_folder, 'b.svg'))
assert os.path.exists(os.path.join(output_folder, 'c.svg'))
| 5,998 |
async def get_untagged_joke():
"""
Gets an untagged joke from the jokes table and sends returns it
:return: json = {joke_id, joke}
"""
df = jokes.get_untagged_joke()
if not df.empty:
response = {"joke": df["joke"][0], "joke_id": int(df["id"][0])}
else:
response = {"joke": "No more jokes to tag", "joke_id": -1}
return response
| 5,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.