content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def command_line_parsing():
""" Parse the command line arguments, set global TESTING and return the
current position as a tuple (either default or one given on command line """
global TESTING
parser = argparse.ArgumentParser(description='Food Truck Finder.')
parser.add_argument('latlong', metavar='latlong', type=str, nargs='?',
help='current location as latitude,longitude ' \
'(no spaces)')
parser.add_argument('--test', dest='am_testing', action='store_const',
const=True, default=False,
help='testing mode with canned data')
args = parser.parse_args()
TESTING = args.am_testing
if args.latlong is None:
return DEFAULT_POSITION
parts = args.latlong.split(',')
return (float(parts[0]), float(parts[1]))
| 5,100 |
def read_samplesheet(config):
"""
read samplesheet
"""
sample_sheet = pd.read_csv(config["info_dict"]["flowcell_path"]+"/SampleSheet.csv",
sep = ",", skiprows=[0])
# sample_sheet = sample_sheet.fillna("no_bc")
sample_sheet['I7_Index_ID'] = sample_sheet['I7_Index_ID'].str.replace('No_index1','no_bc', regex = True) # TODO!! need to be applied on bc kit too!
# assert(len(sample_sheet["barcode_kits"].unique())==1)
# bc_kit = sample_sheet["barcode_kits"].unique()[0]
if any(sample_sheet['I7_Index_ID'].str.contains('no_bc')):
bc_kit = "no_bc"
else:
bc_kit = "SQK-PCB109" # TODO just for testing
print(sample_sheet)
data=dict()
for index, row in sample_sheet.iterrows():
assert(row["Sample_ID"] not in data.keys())
data[row["Sample_ID"]] = dict({"Sample_Name": row["Sample_Name"],
"Sample_Project": row["Sample_Project"],
# "barcode_kits": row["barcode_kits"], TODO
"barcode_kits": bc_kit, # TODO just for testing
"index_id": row["I7_Index_ID"],
"Sample_ID": row["Sample_ID"]})
print(bc_kit)
return bc_kit, data
| 5,101 |
def convert_to_float_if_possible(x, elsevalue=MISSING):
"""
Return float version of value x, else elsevalue (MISSING or other specified value
if conversion fails
"""
if isnonnumeric(x):
return elsevalue
else:
return float(x)
| 5,102 |
def resolve(
names: Union[list, pd.Series, str],
data_source_ids: list = None,
resolve_once: bool = False,
best_match_only: bool = False,
with_context: bool = False,
with_vernaculars: bool = False,
with_canonical_ranks: bool = False
) -> pd.DataFrame:
"""
Receives a list of names and resolves each against the entire resolver
database or against specific data sources using the Global Names
Resolver (GNR) API. Underlying resolving and scoring algorithms are
described at: http://resolver.globalnames.org/about
Parameters
----------
names
List of species names to resolve.
data_source_ids
List of specific data sources IDs to resolve against. A list of
all the available data sources and their IDs can be found at:
http://resolver.globalnames.org/data_sources.
resolve_once
Find the first available match instead of matches across all data
sources with all possible renderings of a name.
best_match_only
Returns just one result with the highest score.
with_context
Reduce the likelihood of matches to taxonomic homonyms. When True,
a common taxonomic context is calculated for all supplied names
from matches in data sources that have classification tree paths.
Names out of determined context are penalized during score
calculation.
with_vernaculars
Return 'vernacular' field to present common names provided by a
data source for a particular match.
with_canonical_ranks
Returns 'canonical_form' with infraspecific ranks, if they are
present.
Returns
-------
pd.DataFrame
DataFrame where rows are the result for each match.
"""
if isinstance(names, str):
names = [names]
if data_source_ids is None:
data_source_ids = []
# Apparently, the GNR API does not accept Booleans so they need to be
# converted to lowercase strings first.
params = {
"data": "\n".join(names),
"data_source_ids": "|".join(data_source_ids),
"resolve_once": str(resolve_once).lower(),
"best_match_only": str(best_match_only).lower(),
"with_context": str(with_context).lower(),
"with_vernaculars": str(with_vernaculars).lower(),
"with_canonical_ranks": str(with_canonical_ranks).lower()
}
try:
response = requests.post(API_URL, json=params)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise Exception(f"Error calling Global Name Resolver API. {err}")
data = response.json()["data"]
# The pd.json_normalize() function does not work when record_path
# is not found in every single item inside the list of elements
# passed. In some cases, the GNR API returns items without this key,
# so it needs to be added (including an empty dictionary) before
# normalizing the result.
for item in data:
if "results" not in item:
item["results"] = [{}]
return pd.json_normalize(data, record_path="results", meta="supplied_name_string")
| 5,103 |
def ResultObject(): # real signature unknown; restored from __doc__
"""
ResultObject()
ResultObject(success: bool)
ResultObject(success: bool,message: str)
"""
pass
| 5,104 |
def polarisation_frame_from_wcs(wcs, shape) -> PolarisationFrame:
"""Convert wcs to polarisation_frame
See FITS definition in Table 29 of https://fits.gsfc.nasa.gov/standard40/fits_standard40draft1.pdf
or subsequent revision
1 I Standard Stokes unpolarized
2 Q Standard Stokes linear
3 U Standard Stokes linear
4 V Standard Stokes circular
−1 RR Right-right circular
−2 LL Left-left circular
−3 RL Right-left cross-circular
−4 LR Left-right cross-circular
−5 XX X parallel linear
−6 YY Y parallel linear
−7 XY XY cross linear
−8 YX YX cross linear
stokesI [1]
stokesIQUV [1,2,3,4]
circular [-1,-2,-3,-4]
linear [-5,-6,-7,-8]
For example::
pol_frame = polarisation_frame_from_wcs(im.wcs, im.shape)
:param wcs: World Coordinate System
:param shape: Shape corresponding to wcs
:returns: Polarisation_Frame object
"""
# The third axis should be stokes:
polarisation_frame = None
if len(shape) == 2:
polarisation_frame = PolarisationFrame("stokesI")
else:
npol = shape[1]
pol = wcs.sub(['stokes']).wcs_pix2world(range(npol), 0)[0]
pol = numpy.array(pol, dtype='int')
for key in PolarisationFrame.fits_codes.keys():
keypol = numpy.array(PolarisationFrame.fits_codes[key])
if numpy.array_equal(pol, keypol):
polarisation_frame = PolarisationFrame(key)
return polarisation_frame
if polarisation_frame is None:
raise ValueError("Cannot determine polarisation code")
assert isinstance(polarisation_frame, PolarisationFrame)
return polarisation_frame
| 5,105 |
def _update_traffic_class(class_name, class_type, **kwargs):
"""
Perform a PUT call to version-up a traffic class. This is required whenever entries of a traffic class are changed
in any way.
:param class_name: Alphanumeric name of the traffic class
:param class_type: Class type should be one of "ipv4," "ipv6," or "mac"
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
traffic_class_data = _get_traffic_class(class_name, class_type, **kwargs)
# # must remove these fields from the data since they can't be modified
# traffic_class_data.pop('origin', None)
# traffic_class_data.pop('name', None)
# traffic_class_data.pop('type', None)
traffic_class_data['cfg_version'] = random.randrange(9007199254740991)
target_url = kwargs["url"] + "system/classes/%s,%s" % (class_name, class_type)
put_data = json.dumps(traffic_class_data, sort_keys=True, indent=4)
response = kwargs["s"].put(target_url, data=put_data, verify=False)
if not common_ops._response_ok(response, "PUT"):
logging.warning("FAIL: Updating %s traffic class '%s' failed with status code %d: %s"
% (class_type, class_name, response.status_code, response.text))
return False
else:
logging.info("SUCCESS: Updating %s traffic class '%s' succeeded" % (class_type, class_name))
return True
| 5,106 |
def eff_w_error(n_before, n_after):
"""
n_before = entries before
n_after = entries after
"""
eff = n_after/n_before
eff_error = np.sqrt(eff*(1-eff)/n_before)
return (eff, eff_error)
| 5,107 |
def hurst(x):
"""Estimate Hurst exponent on a timeseries.
The estimation is based on the second order discrete derivative.
Parameters
----------
x : 1D numpy array
The timeseries to estimate the Hurst exponent for.
Returns
-------
h : float
The estimation of the Hurst exponent for the given timeseries.
"""
y = np.cumsum(np.diff(x, axis=1), axis=1)
b1 = [1, -2, 1]
b2 = [1, 0, -2, 0, 1]
# second order derivative
y1 = scipy.signal.lfilter(b1, 1, y, axis=1)
y1 = y1[:, len(b1) - 1:-1] # first values contain filter artifacts
# wider second order derivative
y2 = scipy.signal.lfilter(b2, 1, y, axis=1)
y2 = y2[:, len(b2) - 1:-1] # first values contain filter artifacts
s1 = np.mean(y1 ** 2, axis=1)
s2 = np.mean(y2 ** 2, axis=1)
return 0.5 * np.log2(s2 / s1)
| 5,108 |
def get_models(download_models=None):
"""
This runs through all models and downloads them from
remote servers. To add a new model, simply append
the model to the 'models' dict with a server location,
that contains a zip file that can be extracted and a
local location to download and unzip this file to. There
is an optional root_folder_names which specifies what will
be downloaded, and will stop the model from being re-downloaded.
If you want to just download a subset of models simply
specify the models name.
"""
down_models = {}
if download_models:
for i in download_models:
if i not in models:
raise KeyError(
"{} does not exist, please chose from the following models: {}".format(
i, [i for i in models.keys()]
)
)
else:
down_models[i] = models[i]
else:
down_models = models
for model_name, model_info in down_models.items():
# Only download if the file doesn't already exist
exists = False
if model_info["root_folder_names"]:
for folder in model_info["root_folder_names"]:
if Path(current_path, model_info["local_location"], folder).exists():
exists = True
break
if not exists:
print(f"Downloading {model_name} model...")
with request.urlopen(model_info["server_location"]) as url:
length = int(url.headers.get("content-length"))
stream = BytesIO()
block_size = max(4096, length // 20)
with tqdm(total=length, position=0) as tq:
while True:
data = url.read(block_size)
if not data:
break
stream.write(data)
tq.update(block_size)
print(f"Extracting {model_name} model...")
with ZipFile(stream) as zipped:
zipped.extractall(Path(current_path, model_info["local_location"]))
| 5,109 |
def explode_on_matched_columns(df, safe_columns, other_columns):
"""Given the name of multiple columns where each entry is a string encoding
a list, and where for each row the lists in all columns are the same length,
return a dataframe where the each row is transformed into len(list)
rows, each of which contains one entry of the various lists and the
remaining columns are identical.
The columns are split into 'safe_columns', which must always contain strings
that encode lists and 'other_columns' which can sometimes be np.nan. If
a column from other_columns has a np.nan entry in some row, it will be
replaced with a list of np.nan values, with the list the same length
as the lists in safe_columns for that row.
Lists from different rows need not have the same number of elements."""
stringlist_columns = safe_columns + other_columns
copied_df = df.copy()
# Only keep rows where at least one of the stringlist columns is present
copied_df = copied_df.dropna(subset=stringlist_columns, how='all')
# Map the safe columns from strings (strings encoding lists) to lists
for stringlist_column in safe_columns:
copied_df[stringlist_column] = copied_df[stringlist_column].map(yaml.safe_load)
for column in other_columns:
# Replace any nan values with an empty list, matching the list lengths
# from one of the safe columns
copied_df[column] = replace_nan_with_empty_list(column,
safe_columns[0],
copied_df)
exploded = pd.DataFrame({
col:np.repeat(copied_df[col].values, copied_df[stringlist_columns[0]].str.len())
for col in copied_df.columns.drop(stringlist_columns)}
)
exploded_with_col = exploded.assign(**{column_to_expand:np.concatenate(copied_df[column_to_expand].values)
for column_to_expand in stringlist_columns})[df.columns]
return exploded_with_col
| 5,110 |
def plot_time_series_graph(val_matrix,
var_names=None,
fig_ax=None,
figsize=None,
sig_thres=None,
link_matrix=None,
link_colorbar_label='MCI',
save_name=None,
link_width=None,
arrow_linewidth=20.,
vmin_edges=-1,
vmax_edges=1.,
edge_ticks=.4,
cmap_edges='RdBu_r',
order=None,
node_size=10,
arrowhead_size=20,
curved_radius=.2,
label_fontsize=10,
alpha=1.,
node_label_size=10,
label_space_left=0.1,
label_space_top=0.,
network_lower_bound=0.2,
undirected_style='dashed'
):
"""Creates a time series graph.
This is still in beta. The time series graph's links are colored by
val_matrix.
Parameters
----------
val_matrix : array_like
Matrix of shape (N, N, tau_max+1) containing test statistic values.
var_names : list, optional (default: None)
List of variable names. If None, range(N) is used.
fig_ax : tuple of figure and axis object, optional (default: None)
Figure and axes instance. If None they are created.
figsize : tuple
Size of figure.
sig_thres : array-like, optional (default: None)
Matrix of significance thresholds. Must be of same shape as val_matrix.
Either sig_thres or link_matrix has to be provided.
link_matrix : bool array-like, optional (default: None)
Matrix of significant links. Must be of same shape as val_matrix. Either
sig_thres or link_matrix has to be provided.
save_name : str, optional (default: None)
Name of figure file to save figure. If None, figure is shown in window.
link_colorbar_label : str, optional (default: 'MCI')
Test statistic label.
link_width : array-like, optional (default: None)
Array of val_matrix.shape specifying relative link width with maximum
given by arrow_linewidth. If None, all links have same width.
order : list, optional (default: None)
order of variables from top to bottom.
arrow_linewidth : float, optional (default: 30)
Linewidth.
vmin_edges : float, optional (default: -1)
Link colorbar scale lower bound.
vmax_edges : float, optional (default: 1)
Link colorbar scale upper bound.
edge_ticks : float, optional (default: 0.4)
Link tick mark interval.
cmap_edges : str, optional (default: 'RdBu_r')
Colormap for links.
node_size : int, optional (default: 20)
Node size.
arrowhead_size : int, optional (default: 20)
Size of link arrow head. Passed on to FancyArrowPatch object.
curved_radius, float, optional (default: 0.2)
Curvature of links. Passed on to FancyArrowPatch object.
label_fontsize : int, optional (default: 10)
Fontsize of colorbar labels.
alpha : float, optional (default: 1.)
Opacity.
node_label_size : int, optional (default: 10)
Fontsize of node labels.
link_label_fontsize : int, optional (default: 6)
Fontsize of link labels.
label_space_left : float, optional (default: 0.1)
Fraction of horizontal figure space to allocate left of plot for labels.
label_space_top : float, optional (default: 0.)
Fraction of vertical figure space to allocate top of plot for labels.
network_lower_bound : float, optional (default: 0.2)
Fraction of vertical space below graph plot.
undirected_style : string, optional (default: 'dashed')
Style of undirected contemporaneous links.
"""
import networkx
if fig_ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, frame_on=False)
else:
fig, ax = fig_ax
if sig_thres is None and link_matrix is None:
raise ValueError("Need to specify either sig_thres or link_matrix")
elif sig_thres is not None and link_matrix is None:
link_matrix = np.abs(val_matrix) >= sig_thres
if link_width is not None and not np.all(link_width >= 0.):
raise ValueError("link_width must be non-negative")
N, N, dummy = val_matrix.shape
tau_max = dummy - 1
max_lag = tau_max + 1
if var_names is None:
var_names = range(N)
if order is None:
order = range(N)
if set(order) != set(range(N)):
raise ValueError("order must be a permutation of range(N)")
def translate(row, lag):
return row * max_lag + lag
# Define graph links by absolute maximum (positive or negative like for
# partial correlation)
tsg = np.zeros((N * max_lag, N * max_lag))
tsg_attr = np.zeros((N * max_lag, N * max_lag))
for i, j, tau in np.column_stack(np.where(link_matrix)):
# print '\n',i, j, tau
# print np.where(nonmasked[:,j])[0]
for t in range(max_lag):
if (0 <= translate(i, t - tau) and
translate(i, t - tau) % max_lag <= translate(j, t) % max_lag):
# print translate(i, t-tau), translate(j, t), val_matrix[i,j,tau]
tsg[translate(i, t - tau), translate(j, t)
] = val_matrix[i, j, tau]
tsg_attr[translate(i, t - tau), translate(j, t)
] = val_matrix[i, j, tau]
G = networkx.DiGraph(tsg)
# node_color = np.zeros(N)
# list of all strengths for color map
all_strengths = []
# Add attributes, contemporaneous and directed links are handled separately
for (u, v, dic) in G.edges(data=True):
dic['directed_attribute'] = None
if u != v:
if u % max_lag == v % max_lag:
dic['undirected'] = True
dic['directed'] = False
else:
dic['undirected'] = False
dic['directed'] = True
dic['undirected_alpha'] = alpha
dic['undirected_color'] = _get_absmax(
np.array([[[tsg_attr[u, v],
tsg_attr[v, u]]]])
).squeeze()
dic['undirected_width'] = arrow_linewidth
all_strengths.append(dic['undirected_color'])
dic['directed_alpha'] = alpha
dic['directed_width'] = arrow_linewidth
# value at argmax of average
dic['directed_color'] = tsg_attr[u, v]
all_strengths.append(dic['directed_color'])
dic['label'] = None
dic['directed_edge'] = False
dic['directed_edgecolor'] = None
dic['undirected_edge'] = False
dic['undirected_edgecolor'] = None
# If no links are present, set value to zero
if len(all_strengths) == 0:
all_strengths = [0.]
posarray = np.zeros((N * max_lag, 2))
for i in range(N * max_lag):
posarray[i] = np.array([(i % max_lag), (1. - i // max_lag)])
pos_tmp = {}
for i in range(N * max_lag):
# for n in range(N):
# for tau in range(max_lag):
# i = n*N + tau
pos_tmp[i] = np.array([((i % max_lag) - posarray.min(axis=0)[0]) /
(posarray.max(axis=0)[0] -
posarray.min(axis=0)[0]),
((1. - i // max_lag) -
posarray.min(axis=0)[1]) /
(posarray.max(axis=0)[1] -
posarray.min(axis=0)[1])])
pos = {}
for n in range(N):
for tau in range(max_lag):
pos[n * max_lag + tau] = pos_tmp[order[n] * max_lag + tau]
node_rings = {0: {'sizes': None, 'color_array': None,
'label': '', 'colorbar': False,
}
}
# ] for v in range(max_lag)]
node_labels = ['' for i in range(N * max_lag)]
_draw_network_with_curved_edges(
fig=fig, ax=ax,
G=deepcopy(G), pos=pos,
# dictionary of rings: {0:{'sizes':(N,)-array, 'color_array':(N,)-array
# or None, 'cmap':string,
node_rings=node_rings,
# 'vmin':float or None, 'vmax':float or None, 'label':string or None}}
node_labels=node_labels, node_label_size=node_label_size,
node_alpha=alpha, standard_size=node_size,
standard_cmap='OrRd', standard_color='grey',
log_sizes=False,
cmap_links=cmap_edges, links_vmin=vmin_edges,
links_vmax=vmax_edges, links_ticks=edge_ticks,
cmap_links_edges='YlOrRd', links_edges_vmin=-1., links_edges_vmax=1.,
links_edges_ticks=.2, link_edge_colorbar_label='link_edge',
arrowstyle='simple', arrowhead_size=arrowhead_size,
curved_radius=curved_radius, label_fontsize=label_fontsize,
label_fraction=.5,
link_colorbar_label=link_colorbar_label, undirected_curved=True,
network_lower_bound=network_lower_bound,
undirected_style=undirected_style
)
for i in range(N):
trans = transforms.blended_transform_factory(
fig.transFigure, ax.transData)
ax.text(label_space_left, pos[order[i] * max_lag][1],
'%s' % str(var_names[order[i]]), fontsize=label_fontsize,
horizontalalignment='left', verticalalignment='center',
transform=trans)
for tau in np.arange(max_lag - 1, -1, -1):
trans = transforms.blended_transform_factory(
ax.transData, fig.transFigure)
if tau == max_lag - 1:
ax.text(pos[tau][0], 1.-label_space_top, r'$t$',
fontsize=label_fontsize,
horizontalalignment='center',
verticalalignment='top', transform=trans)
else:
ax.text(pos[tau][0], 1.-label_space_top,
r'$t-%s$' % str(max_lag - tau - 1),
fontsize=label_fontsize,
horizontalalignment='center', verticalalignment='top',
transform=trans)
# fig.subplots_adjust(left=0.1, right=.98, bottom=.25, top=.9)
# savestring = os.path.expanduser(save_name)
if save_name is not None:
pyplot.savefig(save_name)
else:
pyplot.show()
| 5,111 |
def get_configuration_docname(doctype=None, txt=None, searchfield=None, start=None, page_len=None, filters=None):
"""get relevant fields of the configuration doctype"""
return frappe.db.sql("""select soi.configuration_docname, so.name, so.customer from `tabSales Order Item` soi
inner join `tabSales Order` so on soi.parent=so.name where
soi.configuration_doctype = %(configuration_doctype)s and soi.configuration_docname is not null
and (soi.configuration_docname like %(txt)s or so.name like %(txt)s)""",
{'configuration_doctype':filters.get('configuration_doctype'),
'txt': "%%%s%%" % txt})
| 5,112 |
def run_cnn_dist(
X_bytes: bytes,
) -> bytes:
"""Run distributed CNN on bytes_in and return the calculated result."""
X = pickle.loads(X_bytes)
# TODO: <He> Process the X data with the fancy neural network.
result_data = X
# MARK: Metadata could be added here to mark the processing status of the
# data.
bytes_out = pickle.dumps(result_data)
return bytes_out
| 5,113 |
def bootstrap(command, conf, vars):
"""Place any commands to setup weeehire here"""
# <websetup.bootstrap.before.auth
from sqlalchemy.exc import IntegrityError
try:
a = model.User()
a.user_name = env['ADMIN_USERNAME']
a.display_name = env['ADMIN_USERNAME']
a.email_address = env['ADMIN_EMAIL']
a.password = env['ADMIN_PASS']
a.created = datetime.now()
model.DBSession.add(a)
g = model.Group()
g.group_name = 'managers'
g.display_name = 'Managers Group'
g.users.append(a)
model.DBSession.add(g)
p = model.Permission()
p.permission_name = 'manage'
p.description = 'This permission gives an administrative right'
p.groups.append(g)
model.DBSession.add(p)
r = model.Recruiter()
r.name = "Hyd3L"
r.telegram = "@Hyd3L"
model.DBSession.add(r)
model.DBSession.flush()
transaction.commit()
except IntegrityError:
print('Warning, there was a problem adding your auth data, '
'it may have already been added:')
import traceback
print(traceback.format_exc())
transaction.abort()
print('Continuing with bootstrapping...')
# <websetup.bootstrap.after.auth>
| 5,114 |
def skymapper_search(searchrad,waveband,targetra,targetdec):
""" Search for stars within search radius of target in Skymapper
catalogue
"""
# set up arrays and url
star_ra = []
star_dec = []
star_mag = []
star_magerr = []
sky_ra = []
sky_dec = []
sky_u_petro = []
sky_u_petro_err = []
sky_u_psf = []
sky_u_psf_err = []
sky_v_petro = []
sky_v_petro_err = []
sky_v_psf = []
sky_v_psf_err = []
sky_g_petro = []
sky_g_petro_err = []
sky_g_psf = []
sky_g_psf_err = []
sky_r_petro = []
sky_r_petro_err = []
sky_r_psf = []
sky_r_psf_err = []
sky_i_petro = []
sky_i_petro_err = []
sky_i_psf = []
sky_i_psf_err = []
sky_z_petro = []
sky_z_petro_err = []
sky_z_psf = []
sky_z_psf_err = []
sr_deg = float(searchrad*0.0166667)
sky_url = "http://skymapper.anu.edu.au/sm-cone/query?RA={0}&DEC={1}&SR={2}"
sky_url = sky_url.format(targetra,targetdec,sr_deg)
# Attempt to parse url to find stars within search radius of filter
try:
skytable = requests.get(sky_url,timeout=30).text
sc = 0
for lines in skytable.split('<TR>'):
sc += 1
if sc >= 2:
columns = re.split("<TD>|</TD>|\n",lines)
sky_ra.append(columns[5])
sky_dec.append(columns[7])
sky_u_petro.append(columns[33])
sky_u_petro_err.append(columns[35])
sky_u_psf.append(columns[29])
sky_u_psf_err.append(columns[31])
sky_v_petro.append(columns[41])
sky_v_petro_err.append(columns[43])
sky_v_psf.append(columns[37])
sky_v_psf_err.append(columns[39])
sky_g_petro.append(columns[49])
sky_g_petro_err.append(columns[51])
sky_g_psf.append(columns[45])
sky_g_psf_err.append(columns[47])
sky_r_petro.append(columns[57])
sky_r_petro_err.append(columns[59])
sky_r_psf.append(columns[53])
sky_r_psf_err.append(columns[55])
sky_i_petro.append(columns[65])
sky_i_petro_err.append(columns[67])
sky_i_psf.append(columns[61])
sky_i_psf_err.append(columns[63])
sky_z_petro.append(columns[73])
sky_z_petro_err.append(columns[75])
sky_z_psf.append(columns[69])
sky_z_psf_err.append(columns[71])
# Raise error if something goes wrong
except requests.exceptions.RequestException as e:
print ('\nException raised for Skymapper url!!')
print (e)
print ('')
# Save parsed star properties for a given filter and remove extended
# shaped sources
for i in range(len(sky_ra)):
if (sky_g_psf[i] != '' and sky_g_petro[i] != '' and
sky_r_psf[i] != '' and sky_r_petro[i] != ''):
if (np.abs(float(sky_g_psf[i]) - float(sky_g_petro[i])) < 0.25
and np.abs(float(sky_r_psf[i]) - float(sky_r_petro[i]))
< 0.25):
if waveband == 'V':
V_mag = float(sky_g_psf[i])-0.0038
V_mag = (V_mag-0.5784*(float(sky_g_psf[i])
-float(sky_r_psf[i])))
gerr = float(sky_g_psf_err[i])**2
rerr = float(sky_r_psf_err[i])**2
V_magerr = np.sqrt((0.5784*rerr)**2+(0.4216*gerr)**2)
star_mag.append(V_mag)
star_magerr.append(V_magerr)
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'B':
B_mag = float(sky_g_psf[i])+0.2271
B_mag = (B_mag+0.3130*(float(sky_g_psf[i])-
float(sky_r_psf[i])))
gerr = float(sky_g_psf_err[i])**2
rerr = float(sky_r_psf_err[i])**2
B_magerr = np.sqrt((0.3130*rerr)**2+(1.3130*gerr)**2)
star_mag.append(B_mag)
star_magerr.append(B_magerr)
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'R':
R_mag = float(sky_r_psf[i])-0.0971
R_mag = (R_mag-0.1837*(float(sky_g_psf[i])-
float(sky_r_psf[i])))
gerr = float(sky_g_psf_err[i])**2
rerr = float(sky_r_psf_err[i])**2
R_magerr = np.sqrt((1.1837*rerr)**2+(0.1837*gerr)**2)
star_mag.append(R_mag)
star_magerr.append(R_magerr)
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'u':
if (sky_u_psf[i] != '' and sky_u_petro[i] != ''):
if (np.abs(float(sky_u_psf[i]) - float(sky_u_petro[i]))<0.25):
star_mag.append(float(sky_u_psf[i]))
star_magerr.append(float(sky_u_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'g':
if (sky_g_psf[i] != '' and sky_g_petro[i] != ''):
if (np.abs(float(sky_g_psf[i]) - float(sky_g_petro[i]))<0.25):
star_mag.append(float(sky_g_psf[i]))
star_magerr.append(float(sky_g_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'r':
if (sky_r_psf[i] != '' and sky_r_petro[i] != ''):
if (np.abs(float(sky_r_psf[i]) - float(sky_r_petro[i]))<0.25):
star_mag.append(float(sky_r_psf[i]))
star_magerr.append(float(sky_r_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'i' :
if (sky_i_psf[i] != '' and sky_i_petro[i] != ''):
if (np.abs(float(sky_i_psf[i]) - float(sky_i_petro[i]))<0.25):
star_mag.append(float(sky_i_psf[i]))
star_magerr.append(float(sky_i_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'z' :
if (sky_z_psf[i] != '' and sky_z_petro[i] != ''):
if (np.abs(float(sky_z_psf[i]) - float(sky_z_petro[i]))<0.25):
star_mag.append(float(sky_z_psf[i]))
star_magerr.append(float(sky_z_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
# Create list with catalogue name
star_cat = ['SkyMapper'] * len(star_ra)
return star_ra,star_dec,star_mag,star_magerr,star_cat
| 5,115 |
def Decimal_to_Hexadecimal(x : str) -> str:
"""
It Converts the Given Decimal Number into Hexadecimal Number System of Base `16` and takes input in `str` form
Args:
x `(str)` : It is the Positional Argument by order which stores the Decimal Input from User.
Returns (str) : The Output `returned` is in the form of a `str` which is the Hexadecimal Converted Number.
"""
""" For Recognising the Dot """
list1 = list(x)
left = []
right = []
flag = False
for val in range(len(list1)):
if list1[val] == "." or flag == True:
if list1[val] != ".":
right.append(list1[val])
else:
flag = True
continue
else:
num = int(list1[val])
left.append(num)
""" For Shifting the left elements in list into a variable """
leftmost = 0
for val in left:
leftmost = leftmost*10 + val
""" For Shifting the right elements in list into a variable """
rightmost = ''
for val in right:
rightmost = rightmost + val
dict = {10: "A", 11 : "B", 12 : "C", 13 : "D", 14 : "E", 15 : "F"}
""" Calculation of the left part """
cur = 0
rem = 0
next = leftmost
list_of_numbers = []
while next != 0:
rem = next%16
if rem > 9:
if rem in dict:
rem = dict[rem]
list_of_numbers.append(rem)
else:
pass
else:
list_of_numbers.append(rem)
cur = next//16
next = cur
list_of_numbers.reverse()
numbers = ''
for val in range(len(list_of_numbers)):
string = str(list_of_numbers[val])
numbers = numbers + string
""" Calculation of the right part """
zeros = '1' + len(rightmost)*'0'
length = int(zeros)
next = int(rightmost)/length
list_of_numbers = []
length = 0
while length <= 20:
if next * 16< 1:
list_of_numbers.append(0)
next = (next * 16)
else:
next = (next * 16)
num2 = int(next)
if num2 > 9:
if num2 in dict:
alter = dict[num2]
list_of_numbers.append(alter)
else:
pass
else:
list_of_numbers.append(num2)
num = int(next)
next = next - num
pass
length += 1
numbers2 = ''
for val in range(len(list_of_numbers)):
number = str(list_of_numbers[val])
numbers2 = numbers2 + number
# print(f"The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')}")
color = random.choice([RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN])
return f" {BOLD} {color} The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')} {RESET}"
| 5,116 |
def hardcorenas_d(pretrained=False, **kwargs):
""" hardcorenas_D """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'],
['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs)
return model
| 5,117 |
def refine(weights, trees, X, Y, epochs, lr, batch_size, optimizer, verbose):
"""Performs SGD using the MSE loss over the leaf nodes of the given trees on the given data. The weights of each tree are respected during optimization but not optimized.
Args:
weights (np.array): The weights of the trees.
trees (list of Tree): The trees.
X (2d np.array): The data.
Y (np.array): The targe.
epochs (int): The number of epochs SGD is performed.
lr (float): The learning rate of SGD.
batch_size (int): The batch size of SGD
optimizer (str): The optimizer used for optimization. Can be {{"sgd", "adam"}}.
verbose (bool): If True outputs the loss during optimization.
Returns:
list of trees: The refined trees.
"""
n_classes = trees[0].n_classes
if batch_size > X.shape[0]:
if verbose:
print("WARNING: The batch size for SGD is larger than the dataset supplied: batch_size = {} > X.shape[0] = {}. Using batch_size = X.shape[0]".format(batch_size, X.shape[0]))
batch_size = X.shape[0]
# To make the following SGD somewhat efficient this code extracts all the leaf nodes and gathers them in an array. To do so it iterates over all trees and all nodes in the trees. Each leaf node is added to the leafs array and the corresponding node.id is stored in mappings. For scikit-learn trees this would be much simpler as they already offer a dedicated leaf field:
# leafs = []
# for tree in trees:
# tmp = tree.tree_.value / tree.tree_.value.sum(axis=(1,2))[:,np.newaxis,np.newaxis]
# leafs.append(tmp.squeeze(1))
mappings = []
leafs = []
for t, w in zip(trees, weights):
leaf_mapping = {}
l = []
for i, n in enumerate(t.nodes):
if n.prediction is not None:
leaf_mapping[n.id] = len(l)
# Normalize the values in the leaf nodes for SGD. This is usually a better initialization
pred = np.array(n.prediction) / sum(n.prediction)
l.append(pred)
mappings.append(leaf_mapping)
leafs.append(np.array(l))
if optimizer == "adam":
m = []
v = []
t = 1
for l in leafs:
m.append(np.zeros_like(l))
v.append(np.zeros_like(l))
for epoch in range(epochs):
mini_batches = create_mini_batches(X, Y, batch_size, True)
batch_cnt = 0
loss_sum = 0
accuracy_sum = 0
with tqdm(total=X.shape[0], ncols=150, disable = not verbose) as pbar:
for x,y in mini_batches:
# Prepare the target and apply all trees
target_one_hot = np.array( [ [1.0 if yi == i else 0.0 for i in range(n_classes)] for yi in y] )
indices = [apply(t, m, x) for t,m in zip(trees, mappings)]
pred = []
for i, idx, w in zip(range(len(trees)), indices, weights):
pred.append(w * leafs[i][idx])
pred = np.array(pred)
fbar = pred.sum(axis=0)
# SGD
if optimizer == "sgd":
deriv = 2 * (fbar - target_one_hot) * 1.0 / x.shape[0] * 1.0 / n_classes #* 1.0 / len(trees)
for i, idx in zip(range(len(trees)), indices):
np.add.at(leafs[i], idx, - lr * deriv)
else:
# Adam
deriv = 2 * (fbar - target_one_hot) * 1.0 / x.shape[0] * 1.0 / n_classes #* 1.0 / len(trees)
beta1 = 0.9
beta2 = 0.999
for i, idx in zip(range(len(trees)), indices):
grad = np.zeros_like(leafs[i])
np.add.at(grad, idx, deriv)
m[i] = beta1 * m[i] + (1-beta1) * grad
v[i] = beta2 * v[i] + (1-beta2) * (grad ** 2)
m_corrected = m[i] / (1-beta1**t)
v_corrected = v[i] / (1-beta2**t)
leafs[i] += - lr * m_corrected / (np.sqrt(v_corrected) + 1e-8)
t += 1
# compute some statistics
loss_sum += ((fbar - target_one_hot)**2).mean()
accuracy_sum += (fbar.argmax(axis=1) == y).mean() * 100.0
batch_cnt += 1
pbar.update(x.shape[0])
desc = '[{}/{}] loss {:2.4f} accuracy {:2.4f}'.format(
epoch,
epochs-1,
loss_sum / batch_cnt,
accuracy_sum / batch_cnt,
)
pbar.set_description(desc)
# Copy the optimized leafs back into the trees with the pre-computed mapping
for t, m, l in zip(trees, mappings, leafs):
for nid, i in m.items():
t.nodes[nid].prediction = l[i].tolist()
return trees
| 5,118 |
def test_ebi_goa_dnld(run_full=False):
"""Test downloading files from GOA source http://www.ebi.ac.uk/GOA."""
obj = DnldGoa()
dnld_files = dnld_goa(obj, run_full, ['gpa']) # 'gpi', 'gaf'
for fout in dnld_files:
assert os.path.isfile(fout), "FILE({F}) NOT PROPERLY DOWNLOADED FROM {FTP}".format(
F=fout, FTP=obj.ftp_pub)
| 5,119 |
def test_aggregate_median_allvar():
"""
Testing aggregate pycytominer function
"""
aggregate_result = aggregate(
population_df=data_df, strata=["g"], features="infer", operation="median"
)
expected_result = pd.concat(
[
pd.DataFrame({"g": "a", "Cells_x": [3], "Nuclei_y": [3]}),
pd.DataFrame({"g": "b", "Cells_x": [3], "Nuclei_y": [3]}),
]
).reset_index(drop=True)
expected_result = expected_result.astype(dtype_convert_dict)
assert aggregate_result.equals(expected_result)
# Test output
aggregate(
population_df=data_df,
strata=["g"],
features="infer",
operation="median",
output_file=test_output_file,
)
test_df = pd.read_csv(test_output_file)
pd.testing.assert_frame_equal(test_df, expected_result)
| 5,120 |
def fetch_db_object(cls: ClassVar, body: Any):
"""Fetch a database object via SQLAlchemy.
:param cls: the class of object to fetch.
:param body: the body of the object. If the body is None then None is returned (for the case where no object
exists), if the body is already of type cls then the body is returned as the object and if the body is a dictionary
with the key 'id' a query is made to fetch the given object.
:return: the object.
"""
if body is None:
item = None
elif isinstance(body, cls):
item = body
elif isinstance(body, Dict):
if "id" not in body:
raise AttributeError(f"id not found in {body}")
id = body["id"]
item = session_.query(cls).filter(cls.id == id).one_or_none()
if item is None:
raise ValueError(f"{item} with id {id} not found")
else:
raise ValueError(f"Unknown item type {body}")
return item
| 5,121 |
def should_skip_cred_test():
"""
Returns `True` if a test requiring credentials should be skipped.
Otherwise returns `False`
"""
if username is None or password is None:
return True
return False
| 5,122 |
def update_registry_location():
"""Handle changes to the container image registry.
Monitor the image registry location. If it changes, manage flags to ensure
our image-related handlers will be invoked with an accurate registry.
"""
registry_location = get_registry_location()
if registry_location:
runtime = endpoint_from_flag('endpoint.container-runtime.available')
if runtime:
# Construct and send the sandbox image (pause container) to our runtime
uri = '{}/pause:3.4.1'.format(registry_location)
runtime.set_config(
sandbox_image=uri
)
if data_changed('registry-location', registry_location):
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('nfs.configured')
set_state('kubernetes-worker.restart-needed')
| 5,123 |
def write_project_name_file(
annofab_service: annofabapi.Resource, project_id: str, command_line_args: CommnadLineArgs, output_project_dir: Path
):
"""
ファイル名がプロジェクト名のjsonファイルを生成する。
"""
project_info = annofab_service.api.get_project(project_id)[0]
project_title = project_info["title"]
logger.info(f"project_title = {project_title}")
filename = annofabcli.utils.to_filename(project_title)
output_project_dir.mkdir(exist_ok=True, parents=True)
project_summary = ProjectSummary(
project_id=project_id,
project_title=project_title,
measurement_datetime=annofabapi.utils.str_now(),
args=command_line_args,
)
with open(str(output_project_dir / f"{filename}.json"), "w", encoding="utf-8") as f:
f.write(project_summary.to_json(ensure_ascii=False, indent=2))
| 5,124 |
def print_genome_matrix(hits, fastas, id2desc, file_name):
"""
optimize later? slow ...
should combine with calculate_threshold module
"""
out = open(file_name, 'w')
fastas = sorted(fastas)
print('## percent identity between genomes', file=out)
print('# - \t %s' % ('\t'.join(fastas)), file=out)
for fasta in fastas:
line = [fasta]
for other in fastas:
if other == fasta:
average = '-'
else:
average = numpy.average([hits[fasta][other][i][3] for i in hits[fasta][other]])
line.append(str(average))
print('\t'.join(line), file=out)
print('', file=out)
print('## percent of orfs that are orthologous between genomes', file=out)
print('# - \t %s' % ('\t'.join(fastas)), file=out)
for fasta in fastas:
line = [fasta]
for other in fastas:
if other == fasta:
percent = '-'
else:
orthologs = float(len(hits[fasta][other]))
orfs = float(len([i for i in id2desc if id2desc[i][0] == fasta]))
percent = float(orthologs / orfs) * 100
line.append(str(percent))
print('\t'.join(line), file=out)
| 5,125 |
def test_unicode_ipdir():
"""Check that IPython starts with non-ascii characters in the IP dir."""
ipdir = tempfile.mkdtemp(suffix=u"€")
# Create the config file, so it tries to load it.
with open(os.path.join(ipdir, 'ipython_config.py'), "w") as f:
pass
old_ipdir1 = os.environ.pop("IPYTHONDIR", None)
old_ipdir2 = os.environ.pop("IPYTHON_DIR", None)
os.environ["IPYTHONDIR"] = ipdir.encode("utf-8")
try:
app = Application()
# The lines below are copied from Application.initialize()
app.create_default_config()
app.log_default_config()
app.set_default_config_log_level()
# Find resources needed for filesystem access, using information from
# the above two
app.find_ipython_dir()
app.find_resources()
app.find_config_file_name()
app.find_config_file_paths()
# File-based config
app.pre_load_file_config()
app.load_file_config(suppress_errors=False)
finally:
if old_ipdir1:
os.environ["IPYTHONDIR"] = old_ipdir1
if old_ipdir2:
os.environ["IPYTHONDIR"] = old_ipdir2
| 5,126 |
def list_input_images(img_dir_or_csv: str,
bucket_name: str = None,
glob_patterns: List = None):
"""
Create list of images from given directory or csv file.
:param img_dir_or_csv: (str) directory containing input images or csv with list of images
:param bucket_name: (str, optional) name of aws s3 bucket
:param glob_patterns: (list of str) if directory is given as input (not csv), these are the glob patterns that will be used
to find desired images
returns list of dictionaries where keys are "tif" and values are paths to found images. "meta" key is also added
if input is csv and second column contains a metadata file. Then, value is path to metadata file.
"""
if bucket_name:
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
if img_dir_or_csv.endswith('.csv'):
bucket.download_file(img_dir_or_csv, 'img_csv_file.csv')
list_img = read_csv('img_csv_file.csv')
else:
raise NotImplementedError(
'Specify a csv file containing images for inference. Directory input not implemented yet')
else:
if img_dir_or_csv.endswith('.csv'):
list_img = read_csv(img_dir_or_csv)
elif is_url(img_dir_or_csv):
list_img = []
img_path = Path(img_dir_or_csv)
img = {}
img['tif'] = img_path
list_img.append(img)
else:
img_dir = Path(img_dir_or_csv)
assert img_dir.is_dir() or img_dir.is_file(), f'Could not find directory/file "{img_dir_or_csv}"'
list_img_paths = set()
if img_dir.is_dir():
for glob_pattern in glob_patterns:
assert isinstance(glob_pattern, str), f'Invalid glob pattern: "{glob_pattern}"'
list_img_paths.update(sorted(img_dir.glob(glob_pattern)))
else:
list_img_paths.update(img_dir)
list_img = []
for img_path in list_img_paths:
img = {}
img['tif'] = img_path
list_img.append(img)
assert len(list_img) >= 0, f'No .tif files found in {img_dir_or_csv}'
return list_img
| 5,127 |
def get_insta_links(L: Instaloader, url: str) -> tuple:
"""
Return list of shortcodes
:param url: URL
:return: success status and list of shortcodes
"""
try:
shortcode = get_insta_shortcode(url)
post = Post.from_shortcode(L.context, shortcode)
return True, post
except Exception as e:
print(str(e))
return False, []
| 5,128 |
def read_snli(data_dir, is_train):
"""将SNLI数据集解析为前提、假设和标签"""
def extract_text(s):
# 删除我们不会使用的信息
s = re.sub('\\(', '', s)
s = re.sub('\\)', '', s)
# 用一个空格替换两个或多个连续的空格
s = re.sub('\\s{2,}', ' ', s)
return s.strip()
label_set = {'entailment': 0, 'contradiction': 1, 'neutral': 2}
file_name = os.path.join(data_dir, 'snli_1.0_train.txt'
if is_train else 'snli_1.0_test.txt')
with open(file_name, 'r') as f:
rows = [row.split('\t') for row in f.readlines()[1:]]
premises = [extract_text(row[1]) for row in rows if row[0] in label_set]
hypotheses = [extract_text(row[2]) for row in rows if row[0] \
in label_set]
labels = [label_set[row[0]] for row in rows if row[0] in label_set]
return premises, hypotheses, labels
| 5,129 |
async def http_request_callback(_request: HttpRequest) -> HttpResponse:
"""A response handler which returns some text"""
with open(__file__, 'rb') as file_pointer:
buf = file_pointer.read()
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(buf)).encode('ascii'))
]
return HttpResponse(200, headers, bytes_writer(buf, chunk_size=-1))
| 5,130 |
def ensure_directory_exists(directory, domain=None, permissions=0o777):
"""Create a directory and give access rights to all
Args:
directory (str): Root directory
domain (str): Domain. Basically a subdirectory to prevent things like
overlapping signal filenames.
rights (int): Directory permissions (default is 0o777)
Returns:
(str) a path to the directory
"""
if domain:
directory = os.path.join(directory, domain)
# Expand and normalize the path
directory = os.path.normpath(directory)
directory = os.path.expanduser(directory)
if not os.path.isdir(directory):
try:
save = os.umask(0)
os.makedirs(directory, permissions)
except OSError:
LOG.warning("Failed to create: " + directory)
finally:
os.umask(save)
return directory
| 5,131 |
def on_state_changed_farm(parent, state):
"""Callback event handler for changed "AWS" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
state (str): Identifier of the callback state.
"""
parent.is_farm = state > 0
if not parent.is_refreshing_data:
if "update_frame_range_dropdowns" in dir(parent):
parent.update_frame_range_dropdowns()
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.update_flagfile(parent.flagfile_fn)
| 5,132 |
def item_coverage(
possible_users_items: Tuple[List[Union[int, str]], List[Union[int, str]]],
recommendations: List[Tuple[Union[int, str], Union[int, str]]],
) -> float:
"""
Calculates the coverage value for items in possible_users_items[1] given the collection of recommendations.
Recommendations over users/items not in possible_users_items are discarded.
Args:
possible_users_items (Tuple[List[Union[int, str]], List[Union[int, str]]]): contains exactly TWO sub-lists,
first one with users, second with items
recommendations (List[Tuple[Union[int, str], Union[int, str]]]): contains user-item recommendation tuples,
e.g. [(user1, item1),(user2, item2),]
Returns: item coverage (float): a metric showing the fraction of items which got recommended at least once.
"""
if len(possible_users_items) != 2:
raise ValueError("possible_users_items must be of length 2: [users, items]")
if np.any([len(x) == 0 for x in possible_users_items]):
raise ValueError("possible_users_items cannot hold empty lists!")
possible_items = set(possible_users_items[1])
items_with_recommendations = set([x[1] for x in recommendations])
items_without_recommendations = possible_items.difference(items_with_recommendations)
item_cov = 1 - len(items_without_recommendations) / len(possible_items)
return round(item_cov, 3)
| 5,133 |
def test_create_alias(mock_es_client):
"""Test create_alias()."""
index_name = 'test-index'
alias_name = 'test-alias'
client = mock_es_client.return_value
elasticsearch.associate_index_with_alias(alias_name, index_name)
client.indices.put_alias.assert_called_with(index_name, alias_name)
| 5,134 |
def display_budgets(budgets_tab, max_resources, reduction_factor):
"""Display hyperband budget as a table in debug log"""
num_brackets = len(budgets_tab[0])
table_str = "Display Budgets:\n"
col_format_str = "{:<4}" + " {:<12}" * num_brackets + "\n"
col_title_list = ["i "] + ["n_i r_i"] * num_brackets
col_sub_list = ["---"] + ["---------"] * num_brackets
table_str += col_format_str.format(*col_sub_list)
table_str += col_format_str.format(*col_title_list)
table_str += col_format_str.format(*col_sub_list)
total_trials = 0
for key, values in sorted(budgets_tab.items()):
table_row = "{:<4} ".format(key)
for value in values:
n_i, r_i = value
total_trials += n_i
st = "{:<5} {:<7}".format(n_i, r_i)
table_row += st
table_str += table_row + "\n"
table_str += col_format_str.format(*col_sub_list)
table_str += "max resource={}, eta={}, trials number of one execution={}\n".format(
max_resources, reduction_factor, total_trials
)
logger.info(table_str)
| 5,135 |
def test_scope():
"""Test that the use of scope dictionary works as intended"""
cip = TwoWheelerInputParameters()
cip.static()
scope = {"powertrain": ["ICEV-d"], "size": ["Lower medium"]}
_, array = fill_xarray_from_input_parameters(cip, scope=scope)
assert "BEV" not in array.coords["powertrain"].values
assert "Large" not in array.coords["size"].values
| 5,136 |
def calc_area(img_it, contours, conv_sq, list_save):
"""
Summary
Parameters
----------
yearstr : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
# Calculate areas
sum_file = 0
for c in contours:
M = cv2.moments(c)
area = M['m00']
area_conv = area * conv_sq
sum_file = sum_file + area_conv
# print(sum_file)
list_save.append([img_it, sum_file])
return(list_save)
| 5,137 |
def truth_seed_box(true_params, init_range, az_ind=4, zen_ind=5):
"""generate initial box limits from the true params
Parameters
----------
true_params : np.ndarray
init_range : np.ndarray
Returns
-------
np.ndarray
shape is (n_params, 2); returned energy limits are in units of log energy
"""
n_params = len(true_params)
true_params = np.copy(true_params[:, np.newaxis])
# clip true energies between 0.3 GeV and 1000 GeV
true_params[-2:] = true_params[-2:].clip(0.3, 1000)
limits = np.empty((n_params, 2), np.float32)
limits[:-2] = true_params[:-2] + init_range[:-2]
limits[-2:] = np.log10(true_params[-2:]) + init_range[-2:]
limits[az_ind] = limits[az_ind].clip(0, 2 * np.pi)
limits[zen_ind] = limits[zen_ind].clip(0, np.pi)
return limits
| 5,138 |
def unlike_post(entry_entity, unliker):
"""Deletes a PostLike entity"""
likes_query = PostLike.all()
likes_query.filter('liker =', unliker)
likes_query.filter('blogEntry =', entry_entity)
like_entity = likes_query.get()
like_entity.delete()
| 5,139 |
def Report(DriverType=None):
"""A factory for ReportWrapper classes."""
from xia2.Driver.DriverFactory import DriverFactory
DriverInstance = DriverFactory.Driver(DriverType)
class ReportWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.report")
self._experiments_filename = None
self._reflections_filename = None
self._html_filename = None
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def set_reflections_filename(self, reflections_filename):
self._reflections_filename = reflections_filename
def set_html_filename(self, html_filename):
self._html_filename = html_filename
def run(self, wait_for_completion=False):
from xia2.Handlers.Streams import Debug
Debug.write("Running dials.report")
self.clear_command_line()
assert (
self._experiments_filename is not None
or self._reflections_filename is not None
)
if self._experiments_filename is not None:
self.add_command_line(self._experiments_filename)
if self._reflections_filename is not None:
self.add_command_line(self._reflections_filename)
if self._html_filename is not None:
self.add_command_line("output.html=%s" % self._html_filename)
self.start()
if wait_for_completion:
self.close_wait()
else:
self.close()
self.check_for_errors()
return ReportWrapper()
| 5,140 |
def summary(task):
"""Given an ImportTask, produce a short string identifying the
object.
"""
if task.is_album:
return u'{0} - {1}'.format(task.cur_artist, task.cur_album)
else:
return u'{0} - {1}'.format(task.item.artist, task.item.title)
| 5,141 |
def ds_tc_resnet_model_params(use_tf_fft=False):
"""Generate parameters for ds_tc_resnet model."""
# model parameters
model_name = 'ds_tc_resnet'
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
params.causal_data_frame_padding = 1 # causal padding on DataFrame
params.clip_duration_ms = 160
params.use_tf_fft = use_tf_fft
params.mel_non_zero_only = not use_tf_fft
params.feature_type = 'mfcc_tf'
params.window_size_ms = 5.0
params.window_stride_ms = 2.0
params.wanted_words = 'a,b,c'
params.ds_padding = "'causal','causal','causal','causal'"
params.ds_filters = '4,4,4,2'
params.ds_repeat = '1,1,1,1'
params.ds_residual = '0,1,1,1' # no residuals on strided layers
params.ds_kernel_size = '3,3,3,1'
params.ds_dilation = '1,1,1,1'
params.ds_stride = '2,1,1,1' # streaming conv with stride
params.ds_pool = '1,2,1,1' # streaming conv with pool
params.ds_filter_separable = '1,1,1,1'
# convert ms to samples and compute labels count
params = model_flags.update_flags(params)
# compute total stride
pools = model_utils.parse(params.ds_pool)
strides = model_utils.parse(params.ds_stride)
time_stride = [1]
for pool in pools:
if pool > 1:
time_stride.append(pool)
for stride in strides:
if stride > 1:
time_stride.append(stride)
total_stride = np.prod(time_stride)
# override input data shape for streaming model with stride/pool
params.data_stride = total_stride
params.data_shape = (total_stride * params.window_stride_samples,)
# set desired number of frames in model
frames_number = 16
frames_per_call = total_stride
frames_number = (frames_number // frames_per_call) * frames_per_call
# number of input audio samples required to produce one output frame
framing_stride = max(
params.window_stride_samples,
max(0, params.window_size_samples -
params.window_stride_samples))
signal_size = framing_stride * frames_number
# desired number of samples in the input data to train non streaming model
params.desired_samples = signal_size
params.batch_size = 1
return params
| 5,142 |
def expected_calibration_error_evaluator(test_data: pd.DataFrame,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None,
n_bins: int = 100,
bin_choice: str = "count") -> EvalReturnType:
"""
Computes the expected calibration error (ECE), given true label and prediction scores.
See "On Calibration of Modern Neural Networks"(https://arxiv.org/abs/1706.04599) for more information.
The ECE is the distance between the actuals observed frequency and the predicted probabilities,
for a given choice of bins.
Perfect calibration results in a score of 0.
For example, if for the bin [0, 0.1] we have the three data points:
1. prediction: 0.1, actual: 0
2. prediction: 0.05, actual: 1
3. prediction: 0.0, actual 0
Then the predicted average is (0.1 + 0.05 + 0.00)/3 = 0.05, and the empirical frequency is (0 + 1 + 0)/3 = 1/3.
Therefore, the distance for this bin is::
|1/3 - 0.05| ~= 0.28.
Graphical intuition::
Actuals (empirical frequency between 0 and 1)
| *
| *
| *
______ Predictions (probabilties between 0 and 1)
Parameters
----------
test_data : Pandas' DataFrame
A Pandas' DataFrame with with target and prediction scores.
prediction_column : Strings
The name of the column in `test_data` with the prediction scores.
target_column : String
The name of the column in `test_data` with the binary target.
eval_name : String, optional (default=None)
The name of the evaluator as it will appear in the logs.
n_bins: Int (default=100)
The number of bins.
This is a trade-off between the number of points in each bin and the probability range they span.
You want a small enough range that still contains a significant number of points for the distance to work.
bin_choice: String (default="count")
Two possibilities:
"count" for equally populated bins (e.g. uses `pandas.qcut` for the bins)
"prob" for equally spaced probabilities (e.g. uses `pandas.cut` for the bins),
with distance weighed by the number of samples in each bin.
Returns
-------
log: dict
A log-like dictionary with the expected calibration error.
"""
if eval_name is None:
eval_name = "expected_calibration_error_evaluator__" + target_column
if bin_choice == "count":
bins = pd.qcut(test_data[prediction_column], q=n_bins)
elif bin_choice == "prob":
bins = pd.cut(test_data[prediction_column], bins=n_bins)
else:
raise AttributeError("Invalid bin_choice")
metric_df = pd.DataFrame({"bins": bins,
"predictions": test_data[prediction_column],
"actuals": test_data[target_column]})
agg_df = metric_df.groupby("bins").agg({"bins": "count", "predictions": "mean", "actuals": "mean"})
sample_weight = None
if bin_choice == "prob":
sample_weight = agg_df["bins"].values
distance = mean_absolute_error(agg_df["actuals"].values, agg_df["predictions"].values, sample_weight=sample_weight)
return {eval_name: distance}
| 5,143 |
def test_unify_lifted_to_ground():
"""Tests for unify() when lifted atoms are the first argument and ground
atoms are the second argument."""
cup_type = Type("cup_type", ["feat1"])
cup0 = cup_type("cup0")
cup1 = cup_type("cup1")
cup2 = cup_type("cup2")
var0 = cup_type("?var0")
var1 = cup_type("?var1")
var2 = cup_type("?var2")
pred0 = Predicate("Pred0", [cup_type], lambda s, o: True)
pred1 = Predicate("Pred1", [cup_type, cup_type], lambda s, o: True)
pred2 = Predicate("Pred2", [cup_type], lambda s, o: True)
kb0 = frozenset({pred0([cup0])})
q0 = frozenset({pred0([var0])})
found, assignment = utils.unify(kb0, q0)
assert found
assert assignment == {cup0: var0}
q1 = frozenset({pred0([var0]), pred0([var1])})
found, assignment = utils.unify(kb0, q1)
assert not found
assert assignment == {}
kb1 = frozenset({pred0([cup0]), pred0([cup1])})
found, assignment = utils.unify(kb1, q0)
assert not found # different number of predicates/objects
assert assignment == {}
kb2 = frozenset({pred0([cup0]), pred2([cup2])})
q2 = frozenset({pred0([var0]), pred2([var2])})
found, assignment = utils.unify(kb2, q2)
assert found
assert assignment == {cup0: var0, cup2: var2}
kb3 = frozenset({pred0([cup0])})
q3 = frozenset({pred0([var0]), pred2([var2])})
found, assignment = utils.unify(kb3, q3)
assert not found
assert assignment == {}
kb4 = frozenset({pred1([cup0, cup1]), pred1([cup1, cup2])})
q4 = frozenset({pred1([var0, var1])})
found, assignment = utils.unify(kb4, q4)
assert not found # different number of predicates
assert assignment == {}
kb5 = frozenset({pred0([cup2]), pred1([cup0, cup1]), pred1([cup1, cup2])})
q5 = frozenset({pred1([var0, var1]), pred0([var1]), pred0([var0])})
found, assignment = utils.unify(kb5, q5)
assert not found
assert assignment == {}
kb6 = frozenset({
pred0([cup0]),
pred2([cup1]),
pred1([cup0, cup2]),
pred1([cup2, cup1])
})
q6 = frozenset({pred0([var0]), pred2([var1]), pred1([var0, var1])})
found, assignment = utils.unify(kb6, q6)
assert not found
assert assignment == {}
kb7 = frozenset({pred0([cup0]), pred2([cup1])})
q7 = frozenset({pred0([var0]), pred2([var0])})
found, assignment = utils.unify(kb7, q7)
assert not found # different number of objects
assert assignment == {}
kb8 = frozenset({pred0([cup0]), pred2([cup0])})
q8 = frozenset({pred0([var0]), pred2([var0])})
found, assignment = utils.unify(kb8, q8)
assert found
assert assignment == {cup0: var0}
kb9 = frozenset({pred1([cup0, cup1]), pred1([cup1, cup2]), pred2([cup0])})
q9 = frozenset({pred1([var0, var1]), pred1([var2, var0]), pred2([var0])})
found, assignment = utils.unify(kb9, q9)
assert not found
assert assignment == {}
| 5,144 |
def format_bytes(size):
"""
Takes a byte size (int) and returns a formatted, human-interpretable string
"""
# 2**10 = 1024
power = 2 ** 10
n = 0
power_labels = {0: " bytes", 1: "KB", 2: "MB", 3: "GB", 4: "TB"}
while size >= power:
size /= power
n += 1
return str(round(size, 2)) + power_labels[n]
| 5,145 |
def plot_features(df):
"""
:param df:
:return:
"""
dfp = df.toPandas()
column_list = ["avgSessionsMonth", "avgSessionMonthDuration", "avgSessionitemsMonth", "avgSessionsDay",
"avgSessionDayDuration", "avgSessionitemsDay", "activeDuration",
"AboutPageMonth", "AddFriendPageMonth", "AddtoPlaylistPageMonth", "DowngradePageMonth",
"ErrorPageMonth", "HelpPageMonth", "HomePageMonth", "LogoutPageMonth",
"NextSongPageMonth", "RollAdvertPageMonth", "SaveSettingsPageMonth", "SettingsPageMonth",
"SubmitDowngradePageMonth", "SubmitUpgradePageMonth", "ThumbsDownPageMonth",
"ThumbsUpPageMonth", "UpgradePageMonth", "itemInSession", "avgLength"]
for c in column_list:
fig = plt.figure(figsize=(8, 5))
ax = fig.gca()
h = dfp[dfp.churn == 0.0][c].plot.hist(color='b', ax=ax, )
h = dfp[dfp.churn == 1.0][c].plot.hist(color='g', ax=ax)
h, l = ax.get_legend_handles_labels()
ax.set_xlabel(c)
ax.legend(h, ["Active", "Canceled"], title="user status")
fig = plt.figure(figsize=(30, 25))
ax = fig.gca()
h = dfp.hist(ax=ax)
| 5,146 |
def load_input_data(filenames, Ag_class):
"""
Load the files specified in filenames.
Parameters
---
filenames: a list of names that specify the files to
be loaded.
Ag_class: classification of sequences from MiXCR txt file
(i.e., antigen binder = 1, non-binder = 0)
"""
# Combine the non-binding sequence data sets.
# Non-binding data sets include Ab+ data and Ag-
# sorted data for all 3 libraries
l_data = []
for file in filenames:
l_data.append(
mixcr_input('data/' + file, Ag_class, seq_len=15)
)
mHER_H3 = pd.concat(l_data)
# Drop duplicate sequences
mHER_H3 = mHER_H3.drop_duplicates(subset='AASeq')
# Remove 'CAR/CSR' motif and last two amino acids
mHER_H3['AASeq'] = [x[3:-2] for x in mHER_H3['AASeq']]
# Shuffle sequences and reset index
mHER_H3 = mHER_H3.sample(frac=1).reset_index(drop=True)
return mHER_H3
| 5,147 |
def get_maximum_value(
inclusive: Optional[Edge] = None,
exclusive: Optional[Edge] = None,
ignore_unlimited: bool = False,
) -> Result[Boundary, TestplatesError]:
"""
Gets maximum boundary.
:param inclusive: inclusive boundary value or None
:param exclusive: exclusive boundary value or None
:param ignore_unlimited: indicates whether to ignore unlimited values or not
"""
return get_value_boundary(
MAXIMUM_EXTREMUM,
inclusive=inclusive,
exclusive=exclusive,
ignore_unlimited=ignore_unlimited,
)
| 5,148 |
def draw_maglites(bmap,**kwargs):
"""
Plot the MagLiteS Phase-I footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting MagLiteS footprint")
infile = os.path.join(get_datadir(),'maglites-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float)])
proj = safe_proj(bmap,perim['ra'],perim['dec'])
bmap.plot(*proj,**kwargs)
| 5,149 |
def build_graph(order: int, edges: List[List[int]]) -> List[List[int]]:
"""Builds an adjacency list from the edges of an undirected graph."""
adj = [[] for _ in range(order)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return adj
| 5,150 |
def construct_scheduler(
optimizer,
cfg: OmegaConf,
):
"""
Creates a learning rate scheduler for a given model
:param optimizer: the optimizer to be used
:return: scheduler
"""
# Unpack values from cfg.train.scheduler_params
scheduler_type = cfg.train.scheduler
decay_factor = cfg.train.scheduler_params.decay_factor
decay_steps = cfg.train.scheduler_params.decay_steps
patience = cfg.train.scheduler_params.patience
warmup_epochs = cfg.train.scheduler_params.warmup_epochs
warmup = warmup_epochs != -1
if scheduler_type == "multistep":
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=decay_steps,
gamma=1.0 / decay_factor,
)
elif scheduler_type == "plateau":
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="max",
factor=1.0 / decay_factor,
patience=patience,
verbose=True,
# threshold_mode="rel",
# min_lr=2.5e-4,
)
elif scheduler_type == "exponential":
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer,
gamma=decay_factor,
last_epoch=-1,
)
elif scheduler_type == "cosine":
size_dataset = DATASET_SIZES[cfg.dataset]
if warmup:
# If warmup is used, then we need to substract this from T_max.
T_max = (cfg.train.epochs - warmup_epochs) * math.ceil(
size_dataset / float(cfg.train.batch_size)
) # - warmup epochs
else:
T_max = cfg.train.epochs * math.ceil(
size_dataset / float(cfg.train.batch_size)
)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=T_max,
eta_min=1e-6,
)
else:
lr_scheduler = None
print(
f"WARNING! No scheduler will be used. cfg.train.scheduler = {scheduler_type}"
)
if warmup and lr_scheduler is not None:
size_dataset = DATASET_SIZES[cfg.dataset]
lr_scheduler = ckconv.nn.LinearWarmUp_LRScheduler(
optimizer=optimizer,
lr_scheduler=lr_scheduler,
warmup_iterations=warmup_epochs
* math.ceil(size_dataset / float(cfg.train.batch_size)),
)
return lr_scheduler
| 5,151 |
def dump_eetf_info(eetf):
"""
18% Gray, 100% White などの情報をダンプする
"""
x_18_gray = tf.oetf_from_luminance(18.0, tf.ST2084)
x_100_white = tf.oetf_from_luminance(100.0, tf.ST2084)
x_ref_white = tf.oetf_from_luminance(250.0, tf.ST2084)
x_18_idx = int(np.round(x_18_gray * 1023))
x_100_idx = int(np.round(x_100_white * 1023))
x_ref_idx = int(np.round(x_ref_white * 1023))
print(x_18_gray, x_100_white, x_ref_white)
print(x_18_idx, x_100_idx, x_ref_idx)
y = eetf[st_pos_v, st_pos_h:st_pos_h+h_sample, 1]
print("18 Gray = {}".format(y[x_18_idx]))
print("100 white = {}".format(y[x_100_idx]))
print("250 white = {}".format(y[x_ref_idx]))
a = (y[x_100_idx] - y[x_18_idx]) / (x_100_white - x_18_gray)
b = a * -x_18_gray + y[x_18_idx]
print(a)
print(b)
a = 0.74
b = a * -x_100_white + y[x_100_idx]
print(a)
print(b)
print(tf.eotf_to_luminance(0.0117522745451, tf.ST2084))
print(tf.eotf_to_luminance(0.3877303064680016, tf.ST2084))
| 5,152 |
def GenerateSerialGraph(num_samples, block_size):
""" Generates a (consistent) serial graph. """
N = num_samples
num_blocks = N // block_size
if N % block_size != 0:
err = "num_samples(%d) must be a multiple of block_size (%d)" % (num_samples, block_size)
raise Exception(err)
if num_blocks < 2:
err = "the number of blocks %d should be at least 2 (%d/%d)" % (num_blocks, num_samples, block_size)
raise Exception(err)
node_weights = numpy.ones(N) * 2.0
node_weights[:block_size] = 1.0
node_weights[-block_size:] = 1.0
edge_weights = {}
w = 1.0
for block in range(num_blocks - 1):
for i in range(block_size):
for j in range(block_size):
edge_weights[(i + block * block_size, j + (block + 1) * block_size)] = w
edge_weights[(j + (block + 1) * block_size, i + block * block_size)] = w # Loops are simply overwritten
return node_weights, edge_weights
| 5,153 |
def load_ref_system():
""" Returns alpha-d-rhamnopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.8728 1.4263 -0.3270
O -1.5909 0.3677 0.2833
C -1.1433 -0.9887 0.0086
C 0.3390 -1.0821 0.4414
O 0.8751 -2.3755 0.1209
C 1.1868 -0.1193 -0.4147
C 0.6705 1.3308 -0.3098
O 1.0480 1.9270 0.9344
O 2.5224 -0.0436 0.1069
C -2.0793 -1.8494 0.8365
O -1.2047 1.4329 -1.7148
H -1.2520 -1.1765 -1.0826
H 0.4676 -0.8772 1.5266
H 1.2377 -0.4682 -1.4721
H 1.1005 1.9572 -1.1305
H -1.2644 2.3269 0.1977
H -2.1732 1.3552 -1.8431
H 1.9510 1.6015 1.1977
H 2.8904 -0.9551 0.1994
H 0.5074 -3.0553 0.7225
H -2.0383 -1.5878 1.9031
H -3.1236 -1.6900 0.5276
H -1.8591 -2.9160 0.7258
""")
| 5,154 |
def load_aggregates(affiliations, a_agg, d_agg, table, dry_run=False):
"""
Description: Will take a dict of affiliation statuses, total affiliation, and individual and load them
into the VP database.
Args:
affiliations (dict): Keyed on carId. Expects a similar format to
'<carId>': {
'<AFFIL>: {
'i': 0 # The number of interpretations associated with this carId, affiliation combo
# which are in progress.
'a': 1 # Number of combos that have a snapshot in Approved status.
'p': 1 # Number of combos that have a Provisioned status.
}
}
a_agg (dict): Keyed on carId, sum of all Affiliation dicts for this carId
'<carId>': {
'i': 3,
'a': 5,
'p': 6
}
d_agg (dict): Keyed on carId, sum of all interpretations without an affiliation which has a snapshot in the recorded status.
'<carId>': {
'i': 2,
'a': 3,
'p': 4,
}
Returns:
success (bool): True on success
"""
unique_keys = set( list(affiliations) + list(a_agg) + list(d_agg) )
total_statuses_loaded = 0
for carId in unique_keys:
# Get the PK
pk = get_pk_by_carId(carId, table)
if pk is None:
logger.info("Did not find PK in VPT Table for %s. Skipping.", carId)
continue
logger.debug("Found PK %s for carId %s", pk, carId)
# Construct the VCI Status Object
vciStatus = {}
if carId in affiliations:
vciStatus = affiliations[carId]
if carId in a_agg:
vciStatus['a'] = a_agg[carId]
if carId in d_agg:
vciStatus['d'] = d_agg[carId]
logger.debug("vciStatus: %s", vciStatus)
if dry_run:
logger.info("Dry Run: Not loading:")
logger.info("[%s] %s: %s", carId, pk, vciStatus)
else:
total_statuses_loaded += 1
load_vci_status(vciStatus, pk, table)
logger.info("Loaded vciStatus for %d VP variants", total_statuses_loaded)
return True
| 5,155 |
def find_python_root_dir(possibles):
"""Find a python root in a list of dirs
If all dirs have the same name, and one of them has setup.py
then it is probably common Python project tree, like
/path/to/projects/cd
/path/to/projects/cd/cd
Or, if all dirs are the same,
except that one has an egg suffix, like
/path/to/dotsite/dotsite
/path/to/dotsite/dotsite.egg-info
then ignore the egg
"""
names = {_.basename() for _ in possibles}
if len(names) == 1:
for possible in possibles:
setup = possible / "setup.py"
if setup.isfile():
return possible
eggless = {paths.path(p.replace(".egg-info", "")) for p in possibles}
if len(eggless) == 1:
return eggless.pop()
return None
| 5,156 |
def _unzip_and_handle_result(zip_content, run, output_handler, benchmark):
"""
Call handle_result with appropriate parameters to fit into the BenchExec expectations.
"""
result_values = collections.OrderedDict()
def _open_output_log(output_path):
log_file = open(run.log_file, 'wb')
log_header = " ".join(
run.cmdline()
) + "\n\n\n--------------------------------------------------------------------------------\n"
log_file.write(log_header.encode('utf-8'))
return log_file
def _handle_run_info(values):
def parseTimeValue(s):
if s[-1] != 's':
raise ValueError(
'Cannot parse "{0}" as a time value.'.format(s))
return float(s[:-1])
for key, value in values.items():
if key == "memory":
result_values["memory"] = int(value.strip('B'))
elif key in ["walltime", "cputime"]:
result_values[key] = parseTimeValue(value)
elif key == "exitcode":
result_values["exitcode"] = int(value)
elif (key == "terminationreason" or key.startswith("blkio-") or
key.startswith("cpuenergy") or key.startswith("energy-") or
key.startswith("cputime-cpu")):
result_values[key] = value
elif key not in IGNORED_VALUES:
result_values['vcloud-' + key] = value
return None
def _handle_host_info(values):
host = values.pop("name", "-")
output_handler.store_system_info(
values.get("os", "-"),
values.get("cpuModel", "-"),
values.get("cores", "-"),
values.get("frequency", "-"),
values.get("memory", "-"),
host,
runSet=run.runSet)
for key, value in values.items():
result_values['vcloud-' + key] = value
result_values["host"] = host
def _handle_stderr_file(result_zip_file, files, output_path):
if RESULT_FILE_STDERR in files:
result_zip_file.extract(RESULT_FILE_STDERR, output_path)
shutil.move(
os.path.join(output_path, RESULT_FILE_STDERR),
run.log_file + ".stdError")
os.rmdir(output_path)
handle_result(
zip_content,
run.log_file + ".output",
run.identifier,
result_files_patterns=benchmark.result_files_patterns,
open_output_log=_open_output_log,
handle_run_info=_handle_run_info,
handle_host_info=_handle_host_info,
handle_special_files=_handle_stderr_file)
if result_values:
with _print_lock:
output_handler.output_before_run(run)
run.set_result(result_values, ["host"])
output_handler.output_after_run(run)
| 5,157 |
def findEndpoint():
"""
scroll to bottom to get the last number
"""
print("Fetching school count")
clickToOpen()
# get scroller
scrollbar = driver.find_elements_by_class_name("scrollbar-inner")[1]
driver.execute_script("arguments[0].scrollBy(0,2);", scrollbar)
inner = driver.find_elements_by_class_name("scroll-bar")
time.sleep(2)
top = float(inner[1].get_attribute("style").split("top: ")[-1].replace("px;", ""))
# scroll until
while top < 159:
driver.execute_script("arguments[0].scrollBy(0,200);", scrollbar)
time.sleep(0.3)
top = float(inner[1].get_attribute("style").split("top: ")[-1].replace("px;", ""))
time.sleep(2)
# get point-inset
vis = driver.find_element_by_class_name("visibleGroup")
children = vis.find_elements_by_xpath(".//div[@class='slicerItemContainer']")
last = children[-1].get_attribute("aria-posinset")
print(f"School count: {last}")
time.sleep(1)
return int(last)
| 5,158 |
def _shake_shake_block(x, output_filters, stride, is_training):
"""Builds a full shake-shake sub layer."""
batch_size = tf.shape(x)[0]
# Generate random numbers for scaling the branches
rand_forward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
rand_backward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
# Normalize so that all sum to 1
total_forward = tf.add_n(rand_forward)
total_backward = tf.add_n(rand_backward)
rand_forward = [samp / total_forward for samp in rand_forward]
rand_backward = [samp / total_backward for samp in rand_backward]
zipped_rand = zip(rand_forward, rand_backward)
branches = []
for branch, (r_forward, r_backward) in enumerate(zipped_rand):
with tf.variable_scope('branch_{}'.format(branch)):
b = _shake_shake_branch(x, output_filters, stride, r_forward, r_backward,
is_training)
branches.append(b)
res = _shake_shake_skip_connection(x, output_filters, stride)
return res + tf.add_n(branches)
| 5,159 |
def find_wp_dir(con, clean_wp_path):
""" Finds the WP file directory based on the directory list of the WP version.
Change the FTPutil connection directory if a matching directory list exists.
Otherwise, gracefully exits the program and closes the FTP connection.
Keyword Arguments:
con -- <Object> FTPutil connection Instance.
clean_wp_path -- <String> Path of the raw WP version
"""
print 'SEARCHING WP parent directory...'
# 1. Get the directory list of the raw WP given the path
raw_dirs = walk(clean_wp_path).next()[1]
print raw_dirs
# 2. Walk the given FTP direcoty to verify directory exists
for root, dirs, files, in con.walk('.'):
if all(x in dirs for x in raw_dirs):
print '[DONE] WP parent directory found in {}'.format(root)
# print root
con.chdir(root)
return
print '[ERROR] WP key directories not found.'
quit()
| 5,160 |
def get_all_paged_events(decision, conn, domain, task_list, identity, maximum_page_size):
"""
Given a poll_for_decision_task response, check if there is a nextPageToken
and if so, recursively poll for all workflow events, and assemble a final
decision response to return
"""
# First check if there is no nextPageToken, if there is none
# return the decision, nothing to page
next_page_token = None
try:
next_page_token = decision["nextPageToken"]
except KeyError:
next_page_token = None
if next_page_token is None:
return decision
# Continue, we have a nextPageToken. Assemble a full array of events by continually polling
all_events = decision["events"]
while next_page_token is not None:
try:
next_page_token = decision["nextPageToken"]
if next_page_token is not None:
decision = conn.poll_for_decision_task(domain, task_list,
identity, maximum_page_size,
next_page_token)
for event in decision["events"]:
all_events.append(event)
except KeyError:
next_page_token = None
# Finally, reset the original decision response with the full set of events
decision["events"] = all_events
return decision
| 5,161 |
def get_pools():
""" gets json of pools. schema follows
#{
# "kind": "tm:ltm:pool:poolcollectionstate",
# "selfLink": "https://localhost/mgmt/tm/ltm/pool?ver=11.5.3",
# "items": [
# {
# "kind": "tm:ltm:pool:poolstate",
# "name": "mypoolname",
# "partition": "mypartition",
# "fullPath": "/mypartition/mypoolname",
# "generation": 1,
# "selfLink": "https://localhost/mgmt/tm/ltm/pool/~mypartition~mypoolname?ver=11.5.3",
# "allowNat": "yes",
# "allowSnat": "yes",
# "ignorePersistedWeight": "disabled",
# "ipTosToClient": "pass-through",
# "ipTosToServer": "pass-through",
# "linkQosToClient": "pass-through",
# "linkQosToServer": "pass-through",
# "loadBalancingMode": "round-robin",
# "minActiveMembers": 0,
# "minUpMembers": 0,
# "minUpMembersAction": "failover",
# "minUpMembersChecking": "disabled",
# "monitor": "/Common/gateway_icmp ",
# "queueDepthLimit": 0,
# "queueOnConnectionLimit": "disabled",
# "queueTimeLimit": 0,
# "reselectTries": 0,
# "slowRampTime": 10,
# "membersReference": {
# "link": "url-for-rest-request-for-pool-members",
# "isSubcollection": true
# }
# }
## ,(repeated as needed for additional pools)
# ]
#}
"""
global f5rest_url
return (get_f5json(f5rest_url + 'ltm/pool'))
| 5,162 |
def set_runner_properties(timestep, infguard=False, profile_nodenet=False, profile_world=False, log_levels={}, log_file=None):
"""Sets the speed of the nodenet calculation in ms.
Argument:
timestep: sets the calculation speed.
"""
if log_file:
if not tools.is_file_writeable(log_file):
return False, "Can not write to specified log file."
logger.set_logfile(log_file)
runner_config['log_file'] = log_file
if log_levels:
set_logging_levels(log_levels)
runner_config['runner_timestep'] = timestep
runner_config['runner_infguard'] = bool(infguard)
runner_config['profile_nodenet'] = bool(profile_nodenet)
runner_config['profile_world'] = bool(profile_world)
runner['timestep'] = timestep
return True, ""
| 5,163 |
def merge_aeroMap(tixi, aeromap_uid_1,aeromap_uid_2,aeromap_uid_merge,
keep_originals = True):
""" Merge two existing aeroPerformanceMap into a new one
Function 'merge_aeroMap' merge two aeroMap into one, an option
allow to keep or not the orignal ones.
Args:
tixi (handles): TIXI Handle of the CPACS file
aeromap_uid_1 (str): UID of the first aeroMap to merge
aeromap_uid_2 (str): UID of the second aeroMap to merge
aeromap_uid_merge (str): UID of the merged aeroMap
delete (boolean): Delete orignal aeroMap
"""
# Check aeroMaps
check_aeromap(tixi, aeromap_uid_1)
check_aeromap(tixi, aeromap_uid_2)
# Create AeroCoefficient objects
Aero1 = get_aeromap(tixi,aeromap_uid_1)
Aero2 = get_aeromap(tixi,aeromap_uid_2)
# Create an empty aeroMap and AeroCoefficient object to recive values
description = 'This aeroMap is a merge of ' + aeromap_uid_1 + ' and ' + aeromap_uid_2
create_empty_aeromap(tixi, aeromap_uid_merge, description)
MergeAero = AeroCoefficient()
MergeAero.alt = Aero1.alt + Aero2.alt
MergeAero.mach = Aero1.mach + Aero2.mach
MergeAero.aoa = Aero1.aoa + Aero2.aoa
MergeAero.aos = Aero1.aos + Aero2.aos
MergeAero.cl = Aero1.cl + Aero2.cl
MergeAero.cd = Aero1.cd + Aero2.cd
MergeAero.cs = Aero1.cs + Aero2.cs
MergeAero.cml = Aero1.cml + Aero2.cml
MergeAero.cmd = Aero1.cmd + Aero2.cmd
MergeAero.cms = Aero1.cms + Aero2.cms
MergeAero.sort_by_key('aoa')
save_parameters(tixi,aeromap_uid_merge,MergeAero)
save_coefficients(tixi,aeromap_uid_merge,MergeAero)
if not keep_originals:
aeroMap_xpath_1 = tixi.uIDGetXPath(aeromap_uid_1)
tixi.removeElement(aeroMap_xpath_1)
log.info(aeromap_uid_1 + ' has been removed from the CPACS file')
aeroMap_xpath_2 = tixi.uIDGetXPath(aeromap_uid_2)
tixi.removeElement(aeroMap_xpath_2)
log.info(aeromap_uid_2 + ' has been removed from the CPACS file')
| 5,164 |
def chunk_sum(vec, chunksize):
"""Computes the sums of chunks of points in a vector.
"""
Nchunks = len(vec)//chunksize
end = Nchunks*chunksize
arr = np.reshape(vec[:end], [Nchunks, chunksize])
sums = np.sum(arr, 1)
return sums
| 5,165 |
def test_catalog_generate_failures(tmp_trestle_dir: pathlib.Path, monkeypatch: MonkeyPatch) -> None:
"""Test failures of author catalog."""
# disallowed output name
test_args = 'trestle author catalog-generate -n foo -o profiles'.split()
monkeypatch.setattr(sys, 'argv', test_args)
assert Trestle().run() == 1
# catalog doesn't exist
test_args = 'trestle author catalog-generate -n foo -o my_md'.split()
monkeypatch.setattr(sys, 'argv', test_args)
assert Trestle().run() == 1
# bad yaml
bad_yaml_path = str(test_utils.YAML_TEST_DATA_PATH / 'bad_simple.yaml')
test_args = f'trestle author catalog-generate -n foo -o my_md -y {bad_yaml_path}'.split()
monkeypatch.setattr(sys, 'argv', test_args)
assert Trestle().run() == 1
| 5,166 |
def install_custom_app(app, app_url, app_trigger = "False"):
"""this function is used to install custom apps"""
if app.endswith(".zip"):
app = app.split(".")[0]
if app.endswith(".git"):
app = app.split(".")[0]
if not os.path.exists(wapps_dir_path):
os.mkdir(wapps_dir)
app_url = app_url.split(" ")
directory = app
if app_url[0].endswith(".git"):
if len(app_url) == 3:
repo_url = app_url[0]
user_branch = app_url[2]
else:
repo_url = app_url[0]
user_branch = 'master'
tempdir = os.path.join(BASE_DIR, directory)
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
os.mkdir(tempdir)
try:
Repo.clone_from(repo_url, tempdir, branch=user_branch)
except:
try:
print(colored("\nFailed to fetch "+app+" app data from git due to poor internet connection, retrying in a moment...", "red"))
time.sleep(4)
Repo.clone_from(repo_url, tempdir, branch=user_branch)
except:
raise
#check if app already exists then compare version and ask for user input
existing_app_path = os.path.join(BASE_DIR, "wapps", directory)
new_app_path = os.path.join(tempdir)
if os.path.exists(existing_app_path):
usr_choice, message = pre_install_checks(app, existing_app_path, new_app_path, app_trigger)
if usr_choice not in ["y", "Y", "yes", "YES", "n", "N", "NO", "no"]:
if message != "Reinstall":
print(colored("Invalid choice!, continuing with the default choice: (Y)", "yellow"))
usr_choice = "Y"
else:
print(colored("Invalid choice!, continuing with the default choice: (N)", "yellow"))
usr_choice = "N"
if usr_choice in ["Y", "y", "yes", "YES"]:
if message == "Reinstall":
print("Reinstalling the " + app + " app")
elif message == "Upgrade":
print("Upgrading the " + app + " app")
elif message == "Downgrade":
print("Downgrading the " + app + " app")
remove_appurl_from_urls_custom(app, "wapps")
remove_app_from_settings_custom(app, "wapps")
remove_cust_app_source(app, "wapps")
source = os.path.join(tempdir)
destination = os.path.join(wapps_dir_path, directory)
shutil.move(source, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
return "True"
else:
print(colored(message +": Skipped", "yellow"))
return "False"
else:
print("Installing: " + app)
create_log("Installing: " + app)
source = os.path.join(tempdir)
destination = os.path.join(wapps_dir_path, directory)
if os.path.exists(destination):
shutil.rmtree(destination)
shutil.move(source, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
return "True"
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
else:
#check if app already exists then compare version and ask for user input
existing_app_path = os.path.join(wapps_dir_path, directory)
new_app_path = app_url[0]
destination = existing_app_path
app_path = app_url[0]
tempdir = os.path.join(BASE_DIR, directory)
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
os.mkdir(tempdir)
if app_path.endswith(".zip"):
if os.path.exists(app_path):
zip_ref = zipfile.ZipFile(app_path, 'r')
zip_ref.extractall(tempdir)
zip_ref.close()
app_path = new_app_path = os.path.join(tempdir, app)
if os.path.exists(existing_app_path):
usr_choice, message = pre_install_checks(app, existing_app_path, new_app_path, app_trigger)
if usr_choice not in ["y", "Y", "yes", "YES", "n", "N", "NO", "no"]:
print(colored("Invalid choice!, it must be (y/n)", "yellow"))
usr_choice, message = pre_install_checks(app, existing_app_path, new_app_path, app_trigger)
if usr_choice not in ["y", "Y", "yes", "YES", "n", "N", "NO", "no"]:
print(colored("Invalid choice!, continuing with the default choice: (y)", "yellow"))
usr_choice = "Y"
if usr_choice in ["Y", "y", "yes", "YES"]:
if message == "Reinstall":
print("Reinstalling the " + app + " app")
elif message == "Upgrade":
print("Upgrading the " + app + " app")
elif message == "Downgrade":
print("Downgrading the " + app + " app")
remove_appurl_from_urls_custom(app, "wapps")
remove_app_from_settings_custom(app, "wapps")
remove_cust_app_source(app, "wapps")
if os.path.isdir(app_path):
shutil.copytree(app_path, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
return "True"
else:
print(colored(message +": Skipped", "yellow"))
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
return "False"
else:
print("Installing: " + app)
create_log("Installing: " + app)
if os.path.isdir(app_path):
shutil.copytree(app_path, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
return "True"
| 5,167 |
def get_feature_definitions(df, feature_group):
"""
Get datatypes from pandas DataFrame and map them
to Feature Store datatypes.
:param df: pandas.DataFrame
:param feature_group: FeatureGroup
:return: list
"""
# Dtype int_, int8, int16, int32, int64, uint8, uint16, uint32
# and uint64 are mapped to Integral feature type.
# Dtype float_, float16, float32 and float64
# are mapped to Fractional feature type.
# string dtype is mapped to String feature type.
# Our schema of our data that we expect
# _after_ SageMaker Processing
feature_definitions = []
for column in df.columns:
feature_type = feature_group._DTYPE_TO_FEATURE_DEFINITION_CLS_MAP.get(
str(df[column].dtype), None
)
feature_definitions.append(
FeatureDefinition(column, feature_type)
) # you can alternatively define your own schema
return feature_definitions
| 5,168 |
def draw_2d_wp_basis(shape, keys, fmt='k', plot_kwargs={}, ax=None,
label_levels=0):
"""Plot a 2D representation of a WaveletPacket2D basis."""
coords, centers = _2d_wp_basis_coords(shape, keys)
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
for coord in coords:
ax.plot(coord[0], coord[1], fmt)
ax.set_axis_off()
ax.axis('square')
if label_levels > 0:
for key, c in centers.items():
if len(key) <= label_levels:
ax.text(c[0], c[1], key,
horizontalalignment='center',
verticalalignment='center')
return fig, ax
| 5,169 |
def contributor_translations(settings, user_a, project_a):
"""
Setup a sample contributor with random set of translations.
"""
translations = OrderedDict()
for i in range(6):
date = make_aware(datetime(2016, 12, 1) - timedelta(days=i))
translations_count = 2
translations.setdefault((date, translations_count), []).append(
sorted(
TranslationFactory.create_batch(
translations_count,
date=date,
user=user_a,
entity__resource__project=project_a,
),
key=lambda t: t.pk,
reverse=True,
)
)
settings.CONTRIBUTORS_TIMELINE_EVENTS_PER_PAGE = 2
yield translations
| 5,170 |
def build_normalizer(signal, sample_weight=None):
"""Prepares normalization function for some set of values
transforms it to uniform distribution from [0, 1]. Example of usage:
>>>normalizer = build_normalizer(signal)
>>>pylab.hist(normalizer(background))
>>># this one should be uniform in [0,1]
>>>pylab.hist(normalizer(signal))
:param numpy.array signal: shape = [n_samples] with floats
:param numpy.array sample_weight: shape = [n_samples], non-negative weights associated to events.
"""
sample_weight = check_sample_weight(signal, sample_weight)
assert numpy.all(sample_weight >= 0.), 'sample weight must be non-negative'
sorter = numpy.argsort(signal)
signal, sample_weight = signal[sorter], sample_weight[sorter]
predictions = numpy.cumsum(sample_weight) / numpy.sum(sample_weight)
def normalizing_function(data):
return numpy.interp(data, signal, predictions)
return normalizing_function
| 5,171 |
def main(target_dir=".", depth=2):
"""Catch main function."""
print(target_dir)
if target_dir == ".":
td_name = os.getcwd().split(
"/")[-1]
elif target_dir == "..":
td_name = os.getcwd().split(
"/")[-2]
else:
td_name = target_dir
directory = 'generated/'
if not os.path.exists(directory):
os.makedirs(directory)
output = directory + 'folder-tree-for-' + \
td_name.replace("/", "-")
while os.path.isfile(f'{output}.dot'):
output += "0"
build_tree(output, td_name, target_dir, depth)
print(f'Wrote folder tree as [{output}.dot]\nComputing pdf...')
g = pgv.AGraph(f'{output}.dot')
g.draw(f'{output}.pdf', prog="dot", args='-Grankdir=LR')
print(f'Wrote pdf as [{output}.pdf]')
| 5,172 |
def plot_tseries_together(data, onset=None, years=None, suptitle='',
figsize=(14,10), legendsize=10,
legendloc='lower right', nrow=3, ncol=4,
yearnm='year', daynm='day', standardize=True,
label_attr=None, data_style=None, onset_style=None,
show_days=False):
"""Plot multiple daily timeseries together each year.
Parameters
----------
data : xray.Dataset
Dataset of timeseries variables to plot together.
onset : ndarray or dict of ndarrays, optional
Array of onset day for each year, or dict of onset arrays (e.g.
to compare onset days from different methods).
years : ndarray, optional
Subset of years to include. If omitted, all years are included.
suptitle : str, optional
Supertitle for plot.
figsize : 2-tuple, optional
Size of each figure.
legendsize : int, optional
Font size for legend
legendloc : str, optional
Legend location
nrow, ncol : int, optional
Number of rows, columns in each figure.
yearnm, daynm : str, optional
Name of year and day dimensions in data.
standardize : bool, optional
If True, standardize each timeseries by dividing by its
standard deviation.
label_attr : str, optional
Attribute of each data variable to use for labels. If omitted,
then the variable name is used.
data_style, onset_style : list or dict, optional
Matlab-style strings for each data variable or onset index.
show_days : bool, optional
If True, annotate each subplot with a textbox showing the
onset days.
"""
if years is None:
# All years
years = data[yearnm].values
data = atm.subset(data, {yearnm : (years, None)})
if label_attr is not None:
labels = {nm : data[nm].attrs[label_attr] for nm in data.data_vars}
if onset is not None:
if isinstance(onset, dict):
if onset_style is None:
onset_style = {key : 'k' for key in onset.keys()}
else:
onset = {'onset' : onset}
if onset_style is None:
onset_style = {'onset' : 'k'}
textpos = {key : (0.05, 0.9 - 0.1*i) for i, key in enumerate(onset)}
# Plot each year
for y, year in enumerate(years):
df = atm.subset(data, {yearnm : (year, None)}).to_dataframe()
df.drop(yearnm, axis=1, inplace=True)
if label_attr is not None:
df.rename(columns=labels, inplace=True)
if standardize:
for key in df.columns:
df[key] = (df[key] - np.nanmean(df[key])) / np.nanstd(df[key])
ylabel = 'Standardized Timeseries'
else:
ylabel = 'Timeseries'
if y % (nrow * ncol) == 0:
fig, axes = plt.subplots(nrow, ncol, figsize=figsize, sharex=True)
plt.subplots_adjust(left=0.08, right=0.95, wspace=0.2, hspace=0.2)
plt.suptitle(suptitle)
yplot = 1
else:
yplot += 1
i, j = atm.subplot_index(nrow, ncol, yplot)
ax = axes[i-1, j-1]
df.plot(ax=ax, style=data_style)
ax.grid()
if yplot == 1:
ax.legend(fontsize=legendsize, loc=legendloc)
else:
ax.legend_.remove()
if onset is not None:
for key in onset:
d0 = onset[key][y]
ax.plot([d0, d0], ax.get_ylim(), onset_style[key])
if show_days:
atm.text(d0, textpos[key], ax=ax, color=onset_style[key])
if j == 1:
ax.set_ylabel(ylabel)
if i == nrow:
ax.set_xlabel('Day')
else:
ax.set_xlabel('')
ax.set_title(year)
| 5,173 |
def get_angler_tag_recoveries(project_slug, tagstat="A"):
"""This is a helper function used by tags_applied_project(). It uses
raw sql to retrieve all of the non-MNR recoveries of tags applied
in a particular project. Only recap's with both a lat and lon and
of the same species as the original tagging event are returned.
Arguments:
- `project_slug`: unique identify for project in which tags were applied
- `tagstat`: the tag status of the tags in project identified by
project slug. 'A' returns agler recaps of tags applied in the
project, 'C' will return angler recaps of tags also recaptured
by the OMNR
Returns dictionary with the following elements:
queryset - a raw sql queryset.
Nobs - the number of records in the queryset
TODO - TEST tagstat argument
"""
sql = """
SELECT species.spc_nmco as common_name,
angler.first_name || ' ' || angler.last_name as reported_by,
angler.id as reported_by_id,
recovery.*
FROM tfat_recovery recovery
join tfat_report report on report.id=recovery.report_id
join tfat_joepublic angler on angler.id=report.reported_by_id
JOIN tfat_encounter encounter
ON encounter.tagid=recovery.tagid
AND encounter.species_id=recovery.species_id
JOIN tfat_project proj ON proj.id=encounter.project_id
join common_species species on species.id=recovery.species_id
WHERE encounter.tagstat='{tagstat}'
AND proj.slug=%s
ORDER BY recovery.recovery_date
"""
# sql = '''
# select recovery.* from tfat_recovery recovery where tagid in (
# select tagid from tfat_encounter encounter join tfat_project project on project.id=encounter.project_id where slug=%s and tagstat='{tagstat}'
# ) order by recovery_date
# '''
sql = sql.format(**{"tagstat": tagstat})
queryset = Recovery.objects.raw(sql, [project_slug])
prefetch_related_objects(queryset, "species", "report", "report__reported_by")
nobs = len([x.id for x in queryset])
return {"queryset": queryset, "nobs": nobs}
| 5,174 |
def sort(pipe: Pipe):
"""Sort values by columns."""
pipe.matrix.sort_values(by=pipe.matrix.columns.values.tolist(), axis=0, inplace=True)
| 5,175 |
def get_phased_trajectory(init_state: np.ndarray,
update_fn: Callable) -> Tuple[np.ndarray, HashableNdArray]:
"""
evolve an initial state until it reaches a limit cycle
Parameters
----------
init_state
update_fn
Returns
-------
trajectory, phase-point pair
"""
state = init_state
trajectory = list()
trajectory_set = set() # set lookup should be faster
# compute state by state until we have a repeat
hashable_state = HashableNdArray(state)
while hashable_state not in trajectory_set:
trajectory.append(hashable_state)
trajectory_set.add(hashable_state)
state = update_fn(state)
hashable_state = HashableNdArray(state)
# separate trajectory into in-bound and limit-cycle parts
repeated_state = HashableNdArray(state)
repeated_state_index = trajectory.index(repeated_state)
limit_cycle = trajectory[repeated_state_index:]
# find state in limit cycle with smallest hash (i.e. smallest lexicographic
# ordering if there is no integer overflow)
# this is our phase fixing point
cycle_min_index: int = 0
cycle_min: int = hash(limit_cycle[0])
for idx in range(1, len(limit_cycle)):
nxt_hash: int = hash(limit_cycle[idx])
if nxt_hash < cycle_min:
cycle_min_index = idx
cycle_min = nxt_hash
# get trajectory with phase
phase_idx: int = len(trajectory) - len(limit_cycle) + cycle_min_index
phased_trajectory = np.array(
[hashable.array for hashable in trajectory[:phase_idx]], dtype=np.int64
)
return phased_trajectory, trajectory[phase_idx]
| 5,176 |
def _execute_gramtools_cpp_build(build_report, action, build_paths, args):
"""Executes `gram build` backend."""
log.info("Running backend build")
command = [
common.gramtools_exec_fpath,
"build",
"--gram_dir",
str(args.gram_dir),
"--ref",
str(args.reference),
"--kmer_size",
str(args.kmer_size),
"--max_threads",
str(args.max_threads),
"--all_kmers", # Currently always build all kmers of given size
]
if args.debug:
command += ["--debug"]
command_result = common.run_subprocess(command)
# Add extra reporting
build_report["processes"][action] = collections.OrderedDict(
[
("command", " ".join(command)),
("stdout", command_result.stdout.splitlines()),
("stderr", command_result.stderr.splitlines()),
]
)
if not command_result.success:
raise Exception(f"while running backend build:\n{command_result.stderr}")
| 5,177 |
async def test_get_user_by_id(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
) -> None:
"""Should return OK, and a body containing one user."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
side_effect=mock_user_object,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.get(f"/users/{ID}", headers=headers)
assert resp.status == 200
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
user = await resp.json()
assert type(user) is dict
assert user["id"] == ID
| 5,178 |
async def test_reauth_unknown_error(hass: HomeAssistant) -> None:
"""Test we show user form on unknown error."""
with patch(
"homeassistant.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "reauth"
assert result2["errors"] == {"base": "unknown"}
| 5,179 |
def list_attribute(db_ip, db_user, db_pass, db_name, log_object):
""" List attributes in database """
from my_library import test_socket, ClassDB
if test_socket(db_ip, 3306, log_object) == 0:
o_db = ClassDB('mysql', (db_ip, '3306', db_user, db_pass, db_name), log_object)
result = o_db.select('attribute', "id > 0")
print(result)
| 5,180 |
def test_country_unicode_insert(session):
"""Country 002: Insert a single record with Unicode characters into Countries table and verify data."""
ivory_coast = mco.Countries(name=u"Côte d'Ivoire", confederation=enums.ConfederationType.africa)
session.add(ivory_coast)
country = session.query(mco.Countries).filter_by(confederation=enums.ConfederationType.africa).one()
assert country.name == u"Côte d'Ivoire"
assert country.confederation.value == 'CAF'
| 5,181 |
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the OpenWeatherMap weather platform."""
name = config.get(CONF_NAME)
phone_id = config.get(PHONE_ID)
device_ids = []
for device_id in config[CONF_DEVICE_ID]:
device_ids.append(device_id)
mad = MobileAlertsData(phone_id, device_ids)
async_add_entities(
[MobileAlertsWeather(name, mad)],
True,
)
| 5,182 |
def exponential_backoff(func):
"""
Retries a Boto3 call up to 5 times if request rate limits are hit.
The time waited between retries increases exponentially. If rate limits are
hit 5 times, exponential_backoff raises a
:py:class:sceptre.exceptions.RetryLimitExceededException().
:param func: a function that uses boto calls
:type func: func
:returns: The decorated function.
:rtype: func
:raises: sceptre.exceptions.RetryLimitExceededException
"""
logger = logging.getLogger(__name__)
@wraps(func)
def decorated(*args, **kwargs):
max_retries = 5
attempts = 0
while attempts < max_retries:
try:
return func(*args, **kwargs)
except ClientError as e:
if e.response["Error"]["Code"] == "Throttling":
logger.error("Request limit exceeded, pausing...")
time.sleep(2 ** attempts)
attempts += 1
else:
raise e
raise RetryLimitExceededError(
"Exceeded request limit {0} times. Aborting.".format(max_retries)
)
return decorated
| 5,183 |
def create_result_as_html(template_data):
"""
Generates html file containing results of web pages comparison
:param template_data: Contains data required to create html file.
:type template_data: TemplateData.
"""
template = JINJA_ENVIRONMENT.get_template(template_data.template_path)
template_values = {
'first': template_data.first_web_page,
'second': template_data.second_web_page,
'categories': template_data.text_similarity,
'images': template_data.image_similarity,
'first_terms': sorted(
template_data.first_web_page.content.terms_membership.iteritems(),
key=itemgetter(1), reverse=True
)[0:10],
'second_terms': sorted(
template_data.second_web_page.content.terms_membership.iteritems(),
key=itemgetter(1), reverse=True
)[0:10]
}
html = template.render(template_values)
with open(template_data.html_path, "w") as text_file:
text_file.write(html)
| 5,184 |
def post_config_adobe_granite_saml_authentication_handler(key_store_password=None, key_store_password_type_hint=None, service_ranking=None, service_ranking_type_hint=None, idp_http_redirect=None, idp_http_redirect_type_hint=None, create_user=None, create_user_type_hint=None, default_redirect_url=None, default_redirect_url_type_hint=None, user_id_attribute=None, user_id_attribute_type_hint=None, default_groups=None, default_groups_type_hint=None, idp_cert_alias=None, idp_cert_alias_type_hint=None, add_group_memberships=None, add_group_memberships_type_hint=None, path=None, path_type_hint=None, synchronize_attributes=None, synchronize_attributes_type_hint=None, clock_tolerance=None, clock_tolerance_type_hint=None, group_membership_attribute=None, group_membership_attribute_type_hint=None, idp_url=None, idp_url_type_hint=None, logout_url=None, logout_url_type_hint=None, service_provider_entity_id=None, service_provider_entity_id_type_hint=None, assertion_consumer_service_url=None, assertion_consumer_service_url_type_hint=None, handle_logout=None, handle_logout_type_hint=None, sp_private_key_alias=None, sp_private_key_alias_type_hint=None, use_encryption=None, use_encryption_type_hint=None, name_id_format=None, name_id_format_type_hint=None, digest_method=None, digest_method_type_hint=None, signature_method=None, signature_method_type_hint=None, user_intermediate_path=None, user_intermediate_path_type_hint=None): # noqa: E501
"""post_config_adobe_granite_saml_authentication_handler
# noqa: E501
:param key_store_password:
:type key_store_password: str
:param key_store_password_type_hint:
:type key_store_password_type_hint: str
:param service_ranking:
:type service_ranking: int
:param service_ranking_type_hint:
:type service_ranking_type_hint: str
:param idp_http_redirect:
:type idp_http_redirect: bool
:param idp_http_redirect_type_hint:
:type idp_http_redirect_type_hint: str
:param create_user:
:type create_user: bool
:param create_user_type_hint:
:type create_user_type_hint: str
:param default_redirect_url:
:type default_redirect_url: str
:param default_redirect_url_type_hint:
:type default_redirect_url_type_hint: str
:param user_id_attribute:
:type user_id_attribute: str
:param user_id_attribute_type_hint:
:type user_id_attribute_type_hint: str
:param default_groups:
:type default_groups: List[str]
:param default_groups_type_hint:
:type default_groups_type_hint: str
:param idp_cert_alias:
:type idp_cert_alias: str
:param idp_cert_alias_type_hint:
:type idp_cert_alias_type_hint: str
:param add_group_memberships:
:type add_group_memberships: bool
:param add_group_memberships_type_hint:
:type add_group_memberships_type_hint: str
:param path:
:type path: List[str]
:param path_type_hint:
:type path_type_hint: str
:param synchronize_attributes:
:type synchronize_attributes: List[str]
:param synchronize_attributes_type_hint:
:type synchronize_attributes_type_hint: str
:param clock_tolerance:
:type clock_tolerance: int
:param clock_tolerance_type_hint:
:type clock_tolerance_type_hint: str
:param group_membership_attribute:
:type group_membership_attribute: str
:param group_membership_attribute_type_hint:
:type group_membership_attribute_type_hint: str
:param idp_url:
:type idp_url: str
:param idp_url_type_hint:
:type idp_url_type_hint: str
:param logout_url:
:type logout_url: str
:param logout_url_type_hint:
:type logout_url_type_hint: str
:param service_provider_entity_id:
:type service_provider_entity_id: str
:param service_provider_entity_id_type_hint:
:type service_provider_entity_id_type_hint: str
:param assertion_consumer_service_url:
:type assertion_consumer_service_url: str
:param assertion_consumer_service_url_type_hint:
:type assertion_consumer_service_url_type_hint: str
:param handle_logout:
:type handle_logout: bool
:param handle_logout_type_hint:
:type handle_logout_type_hint: str
:param sp_private_key_alias:
:type sp_private_key_alias: str
:param sp_private_key_alias_type_hint:
:type sp_private_key_alias_type_hint: str
:param use_encryption:
:type use_encryption: bool
:param use_encryption_type_hint:
:type use_encryption_type_hint: str
:param name_id_format:
:type name_id_format: str
:param name_id_format_type_hint:
:type name_id_format_type_hint: str
:param digest_method:
:type digest_method: str
:param digest_method_type_hint:
:type digest_method_type_hint: str
:param signature_method:
:type signature_method: str
:param signature_method_type_hint:
:type signature_method_type_hint: str
:param user_intermediate_path:
:type user_intermediate_path: str
:param user_intermediate_path_type_hint:
:type user_intermediate_path_type_hint: str
:rtype: None
"""
return 'do some magic!'
| 5,185 |
def _Disable( module_file ):
"""Disables the loading of a module for the current session."""
_module_for_module_file[ module_file ] = None
| 5,186 |
def describe_shape(tree: KDTree, mass: float, name: Optional[str] = None, pole: Optional[str] = None):
"""
Describe the statistics of a tessellated shape to std out.
:param tree: The KDTree containing the tesselated shapes
:param mass: The mass of the object, typically computed from GM
:param name: The optional name of the target
:param pole: The optional pole file for the object
"""
com, volume, surface_area, inertia, com_inertia, moments, rotation_matrix = compute_stats(tree.shapes, mass)
if name is not None:
print(name)
if pole is not None:
print('Pole: {}'.format(pole))
print('COM (km): {}'.format(com.ravel()))
print('Volume (km3): {}'.format(volume))
print('Surface Area (km2): {}'.format(surface_area))
print('Inertia Matrix:')
print(inertia)
print('COM Relative Inertia Matrix:')
print(com_inertia)
print('moments of inertia:')
print(moments)
print('rotation to inertia frame')
print(rotation_matrix)
print('com in inertia frame: {}'.format(rotation_matrix @ com.ravel()))
| 5,187 |
def test_ll2xy_edge():
"""Testing edge cases, literally"""
x = [0, 0, 0, 100, 181, 181, 181]
y = [0, 100, 191, 191, 191, 100, 0]
lon, lat = xy2ll(A, x, y)
x1, y1 = ll2xy(A, lon, lat)
# print(x1)
# print(y1)
assert (~np.any(np.isnan(x1)))
assert (~np.any(np.isnan(y1)))
| 5,188 |
def test_dataproc_operator_execute_failure_async(mock_submit_job, event):
"""Tests that an AirflowException is raised in case of error event"""
mock_submit_job.return_value.reference.job_id = TEST_JOB_ID
task = DataprocSubmitJobOperatorAsync(
task_id="task-id", job=SPARK_JOB, region=TEST_REGION, project_id=TEST_PROJECT_ID
)
with pytest.raises(AirflowException):
task.execute_complete(context=None, event=event)
| 5,189 |
def get_req_env(var_name: str) -> str:
"""
Try to get environment variable and exits if not available
"""
try:
return os.environ[var_name]
except KeyError:
print(f"Missing required environment variable '{var_name}'.")
exit(1)
| 5,190 |
def print_value_function(value_func, grid_width):
"""Print the value function in human-readable format.
Parameters
----------
value_func: np.ndarray
Array of state to state value mappings
grid_width: int
width of the map: eg, 4 (4x4 environment), 8 (8x8 environment)
"""
delimiter = '\t'
row = []
for state in xrange(len(value_func)):
row.append("%.5f" % value_func[state])
if np.mod(state + 1, grid_width) == 0:
print(delimiter.join(row))
row = []
print(" ")
| 5,191 |
def test_order(problem, method):
"""Test order of time discretization.
"""
# TODO add test for spatial order
# Methods together with the expected order of convergence.
helpers.assert_time_order(problem, method)
return
| 5,192 |
def _pcheck(block):
""" Helper for multiprocesses: check a block of logs
Args:
block List[List[str], int]: lines, block_id
Returns:
[type]: [description]
"""
results = []
lines, block_id = block
for li, line in enumerate(lines):
json_line = json.loads(line)
result = [
"%s: %s" % (e.error_type, e.message)
for e in [
validate_normalized(json_line),
check_timestamp_digits(json_line["timestamp"])
if "timestamp" in json_line
else None,
check_time(json_line),
]
if e
]
global_line_number = block_id * BLOCK_SIZE + li
results.append((global_line_number, result))
return results
| 5,193 |
def fetch_exon_locations(database):
""" Queries the database to create a dictionary mapping exon IDs to
the chromosome, start, end, and strand of the exon """
conn = sqlite3.connect(database)
cursor = conn.cursor()
query = """
SELECT
e.edge_ID,
loc1.chromosome,
MIN(loc1.position,loc2.position),
MAX(loc1.position,loc2.position),
e.strand
FROM edge e
LEFT JOIN location loc1 ON e.v1 = loc1.location_ID
LEFT JOIN location loc2 ON e.v2 = loc2.location_ID
WHERE e.edge_type = 'exon';"""
cursor.execute(query)
exon_location_tuples = cursor.fetchall()
# Create dictionary
exon_locations = {}
for loc_tuple in exon_location_tuples:
exon_ID = loc_tuple[0]
exon_locations[exon_ID] = loc_tuple[1:]
conn.close()
return exon_locations
| 5,194 |
def question_1f_sanity_check(model, src_sents, tgt_sents, vocab):
""" Sanity check for question 1f.
Compares student output to that of model with dummy data.
"""
print ("-"*80)
print("Running Sanity Check for Question 1f: Step")
print ("-"*80)
reinitialize_layers(model)
# Inputs
Ybar_t = torch.load('./sanity_check_en_es_data/Ybar_t.pkl')
dec_init_state = torch.load('./sanity_check_en_es_data/dec_init_state.pkl')
enc_hiddens = torch.load('./sanity_check_en_es_data/enc_hiddens.pkl')
enc_masks = torch.load('./sanity_check_en_es_data/enc_masks.pkl')
enc_hiddens_proj = torch.load('./sanity_check_en_es_data/enc_hiddens_proj.pkl')
# Output
dec_state_target = torch.load('./sanity_check_en_es_data/dec_state.pkl')
o_t_target = torch.load('./sanity_check_en_es_data/o_t.pkl')
e_t_target = torch.load('./sanity_check_en_es_data/e_t.pkl')
# Run Tests
with torch.no_grad():
dec_state_pred, o_t_pred, e_t_pred= model.step(Ybar_t, dec_init_state, enc_hiddens, enc_hiddens_proj, enc_masks)
assert(np.allclose(dec_state_target[0].numpy(), dec_state_pred[0].numpy())), "decoder_state[0] is incorrect: it should be:\n {} but is:\n{}".format(dec_state_target[0], dec_state_pred[0])
print("dec_state[0] Sanity Checks Passed!")
assert(np.allclose(dec_state_target[1].numpy(), dec_state_pred[1].numpy())), "decoder_state[1] is incorrect: it should be:\n {} but is:\n{}".format(dec_state_target[1], dec_state_pred[1])
print("dec_state[1] Sanity Checks Passed!")
assert(np.allclose(o_t_target.numpy(), o_t_pred.numpy())), "combined_output is incorrect: it should be:\n {} but is:\n{}".format(o_t_target, o_t_pred)
print("combined_output Sanity Checks Passed!")
assert(np.allclose(e_t_target.numpy(), e_t_pred.numpy())), "e_t is incorrect: it should be:\n {} but is:\n{}".format(e_t_target, e_t_pred)
print("e_t Sanity Checks Passed!")
print("-"*80)
print("All Sanity Checks Passed for Question 1f: Step!")
print("-"*80)
| 5,195 |
def add(request):
"""Displays/processes a form to create a collection."""
data = {}
if request.method == 'POST':
form = forms.CollectionForm(
request.POST, request.FILES,
initial=initial_data_from_request(request))
aform = forms.AddonsForm(request.POST)
if form.is_valid():
collection = form.save(default_locale=request.LANG)
collection.save()
if aform.is_valid():
aform.save(collection)
collection_message(request, collection, 'add')
statsd.incr('collections.created')
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(collection.get_url_path())
else:
data['addons'] = Addon.objects.filter(pk__in=aform.clean_addon())
data['comments'] = aform.clean_addon_comment()
else:
form = forms.CollectionForm()
data.update(form=form, filter=get_filter(request))
return render_cat(request, 'bandwagon/add.html', data)
| 5,196 |
def get_camera_pose_cpp():
"""
Returns camera pose
"""
rospy.wait_for_service('/asr_robot_model_services/GetCameraPose', timeout=5)
pose = rospy.ServiceProxy('/asr_robot_model_services/GetCameraPose',GetPose)
return pose().pose
| 5,197 |
def _convert_model_from_bytearray_to_object(model_bytearray):
"""Converts a tflite model from a bytearray into a parsable object."""
model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)
model_object = schema_fb.ModelT.InitFromObj(model_object)
model_object = copy.deepcopy(model_object)
return model_object
| 5,198 |
def hardenPointCurve(*args, **kwargs):
"""
The hardenPointCurve command changes the knots of a curve given a list of control point indices so that the knot corresponding to that control point gets the specified multiplicity.
Returns: `string[]` Object name and node name
"""
pass
| 5,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.