content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def unit_parameters(_bb_spine_db_export: dict, _grid_name: str, _node_name: str, _unit_name: str, _time_index,
_alternative='Base', _eff_level=1, _p_unit=False,
_node_name_if_output=None, _node_name_if_input=None
):
"""
:param _bb_spine_db_export:
:param _grid_name:
:param _node_name: name used to search in _bb_spine_db_export
:param _unit_name:
:param _time_index: used only for time-variant fuel prices
:param _alternative:
:param _eff_level: default efficiency level for units without effLevelGroupUnit definition,
should be set to the highest level in practice
:param _p_unit: default Fault, True to enable parsing unit specific values from p_unit
:param _node_name_if_output: str, set a new name for the sought node (_node_name) if the unit outputs to it
:param _node_name_if_input: str, set a new name for the sought node (_node_name) if the unit receives its input
:return:
"""
_temp_importer = SpineDBImporter()
# Parameters 1: from BB_gdx p_gnu_io (SpineDB grid__node__unit__io)
_parameters_1 = [
x for x in _bb_spine_db_export['relationship_parameter_values']
if all([x[0] == 'grid__node__unit__io', x[1][:3] == [_grid_name, _node_name, _unit_name]])
]
_fuel_commodity = [
x[1][0] for x in _bb_spine_db_export['relationships'] if x[0] == 'commodity'
]
_unit_capacity = 0
# Parameters 2: from BB_gdx effLevelGroupUnit (SpineDB efflevel__group__unit)
# unit_online_type
_parameters_2 = [
x for x in _bb_spine_db_export['relationships']
if all([x[0] == 'efflevel__group__unit', _unit_name in x[1]])
]
# Parameters 3: from BB_gdx p_unit (SpineDB unit)
_parameters_3 = [
x for x in _bb_spine_db_export['object_parameter_values']
if all([x[0] == 'unit', x[1] == _unit_name])
]
# Translate Parameter 1
# TODO: other bb parameter under the category p_gnu_io, unitSize for investment
for par in _parameters_1:
if par[1][3] == 'output':
if _node_name_if_output:
_node_name = _node_name_if_output
# add unit__to_node relationship for output
_temp_importer.relationships.append(("unit__to_node", (_unit_name, _node_name)))
if par[2] == 'capacity':
# capacity is aggregated in Backbone but SpineOpt requires unit capacity
_number_of_units = __get_number_of_units(_bb_spine_db_export, _unit_name, default=1)
_unit_capacity = par[3] / _number_of_units[0]
_temp_importer.relationship_parameter_values += [
("unit__to_node", [_unit_name, _node_name], "unit_capacity", _unit_capacity, _alternative),
]
elif par[2] == 'conversionCoeff':
_temp_importer.relationship_parameter_values += [
("unit__to_node", [_unit_name, _node_name], "unit_conv_cap_to_flow", par[3], _alternative),
]
elif par[2] == 'vomCosts':
_temp_importer.relationship_parameter_values += [
("unit__to_node", [_unit_name, _node_name], "vom_cost", par[3], _alternative),
]
elif par[1][3] == 'input':
if _node_name_if_input:
_node_name = _node_name_if_input
# add unit__from_node relationship for input
_temp_importer.relationships.append(("unit__from_node", (_unit_name, _node_name)))
# build parameters
if par[2] == 'capacity':
# capacity is aggregated in Backbone but SpineOpt requires unit capacity
_number_of_units = __get_number_of_units(_bb_spine_db_export, _unit_name, default=1)
_unit_capacity = par[3] / _number_of_units[0]
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "unit_capacity", _unit_capacity, _alternative),
]
elif par[2] == 'conversionCoeff':
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "unit_conv_cap_to_flow", par[3], _alternative),
]
elif par[2] == 'vomCosts':
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "vom_cost", par[3], _alternative),
]
# For nodes which are created to supply fuels
if _node_name in _fuel_commodity:
# add an additional dummy unit to balance the input node that is particularly for fuel source
_temp_importer = dummy_unit_for_node(
_temp_importer, _node_name, f"Fueling_unit_{_node_name}", "to_node"
)
# build fuel price, in either TimeSeries or constant value
_fuel_price_dict = __restore_fuel_price_map(_bb_spine_db_export, _node_name, _alternative=_alternative)
if len(_fuel_price_dict) != 1:
__time_index = [str(x) for x in _time_index]
_fuel_price_ts = dict(zip(__time_index, list(_fuel_price_dict.values())[:len(__time_index)]))
_temp_importer.relationship_parameter_values += [
(
"unit__from_node", [_unit_name, _node_name], "fuel_cost",
{"type": "time_series", "data": _fuel_price_ts, "index": {"repeat": timeseries_repeat}},
_alternative
),
]
# constant value
else:
_fuel_price = list(_fuel_price_dict.values())[0]
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "fuel_cost", _fuel_price, _alternative),
]
# grid in bb_format translated as commodity in spineopt
# node in bb_format translated as node in spineopt
# unit in bb_format translated as unit in spineopt
_temp_importer.objects += [("commodity", _grid_name), ("node", _node_name), ("unit", _unit_name)]
# add node__commodity relationship
_temp_importer.relationships.append(("node__commodity", (_node_name, _grid_name)))
# Translate Parameters 2
if _parameters_2:
# TODO: level?, lambda eff type?
_unit_on = _parameters_2['level1' in _parameters_2]
if _unit_on[1][1] == 'directOnMIP':
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "online_variable_type", "unit_online_variable_type_integer", _alternative),
)
elif _unit_on[1][1] == 'directOnLP':
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "online_variable_type", "unit_online_variable_type_linear", _alternative),
)
elif _unit_on[1][1] == 'directOff':
_number_of_units = __get_number_of_units(_bb_spine_db_export, _unit_name, default=1)
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "fix_units_on", _number_of_units[0], _alternative),
)
# Translate Parameters 3
_operating_points = [0]
_min_operating_point = _operating_points[0]
# for units with efficiency levels
if _parameters_2:
_eff_level = len(_parameters_2)
_direction = set([x[1][3] for x in _parameters_1])
# TODO: what about the units with _direction containing both input and output?
_constraint_name = f"Eff_{_unit_name}"
if 'output' in _direction and len(_direction) == 1:
if ("unit_constraint", _constraint_name) not in _temp_importer.objects:
_temp_importer.objects += [("unit_constraint", _constraint_name), ]
# specify constraint settings
_temp_importer.object_parameter_values += [
("unit_constraint", _constraint_name, "constraint_sense", "==", _alternative),
("unit_constraint", _constraint_name, "right_hand_side", 0.0, _alternative),
]
_temp_importer.relationships += [
("unit__unit_constraint", (_unit_name, _constraint_name)),
("unit__to_node__unit_constraint", (_unit_name, _node_name, _constraint_name)),
]
# data for units with constant efficiency is stored in the highest effLevel
if all(['directOff' in x[1] for x in _parameters_2]):
_max_level = max(range(0, _eff_level))
_operating_points = [x[3] for x in _parameters_3 if x[2] == f"op{_max_level:02d}"]
# eff = output/input
_unit_flow_coefficient = [-(x[3] ** -1) for x in _parameters_3 if x[2] == f"eff{_max_level:02d}"]
else:
_operating_points = list(
map(lambda i: [x[3] for x in _parameters_3 if x[2] == f"op{i:02d}"][0],
range(0, _eff_level))
)
# TODO: to be confirmed
_min_operating_point = _operating_points[0]
# eff = output/input
_unit_flow_coefficient = list(
map(lambda i: [-(x[3] ** -1) for x in _parameters_3 if x[2] == f"eff{i:02d}"][0],
range(0, _eff_level))
)
_temp_importer.relationship_parameter_values += [
("unit__to_node", (_unit_name, _node_name),
"operating_points", {"type": "array", "value_type": "float", "data": _operating_points}, _alternative),
("unit__to_node", (_unit_name, _node_name),
"minimum_operating_point", _min_operating_point, _alternative),
("unit__to_node__unit_constraint", (_unit_name, _node_name, _constraint_name),
"unit_flow_coefficient", {"type": "array", "value_type": "float", "data": _unit_flow_coefficient},
_alternative)
]
elif 'input' in _direction and len(_direction) == 1:
if ("unit_constraint", _constraint_name) not in _temp_importer.objects:
_temp_importer.objects += [("unit_constraint", _constraint_name), ]
# specify constraint settings
_temp_importer.object_parameter_values += [
("unit_constraint", _constraint_name, "constraint_sense", "==", _alternative),
("unit_constraint", _constraint_name, "right_hand_side", 0.0, _alternative),
]
_temp_importer.relationships += [
("unit__unit_constraint", (_unit_name, _constraint_name)),
("unit__from_node__unit_constraint", (_unit_name, _node_name, _constraint_name)),
]
_unit_flow_coefficient = [
x[3] for x in _parameters_1 if all([x[2] == "conversionCoeff"])
][0]
_temp_importer.relationship_parameter_values += [
("unit__from_node__unit_constraint", (_unit_name, _node_name, _constraint_name),
"unit_flow_coefficient", _unit_flow_coefficient, _alternative),
]
# Whether to parse unit specific values from p_unit
if _p_unit:
# parameters directly translatable
def _rename_unit_para(obj_para_value_entity: tuple, _target_para_name: str, __alternative: str):
_para = list(obj_para_value_entity)
_para[2] = _target_para_name
_para[4] = __alternative
return _para
for par in _parameters_3:
if par[2] == 'availability':
_parameter = _rename_unit_para(par, "unit_availability_factor", _alternative)
_temp_importer.object_parameter_values.append(tuple(_parameter))
elif par[2] == 'minOperationHours':
_parameter = _rename_unit_para(par, "min_up_time", _alternative)
_parameter[3] = {"type": "duration", "data": f"{int(_parameter[3])}h"}
_temp_importer.object_parameter_values.append(tuple(_parameter))
elif par[2] == 'minShutdownHours':
_parameter = _rename_unit_para(par, "min_down_time", _alternative)
_parameter[3] = {"type": "duration", "data": f"{int(_parameter[3])}h"}
_temp_importer.object_parameter_values.append(tuple(_parameter))
# number of units, 1 is default value defined in SpineOpt database template
elif par[2] == 'unitCount':
_parameter = _rename_unit_para(par, "number_of_units", _alternative)
_temp_importer.object_parameter_values.append(tuple(_parameter))
# EUR/start/unit, start per unit capacity
elif par[2] == 'startCostCold':
_start_up_cost = par[3] * _unit_capacity
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "start_up_cost", _start_up_cost, _alternative)
)
elif par[2] == 'startFuelConsCold':
# MWh fuel/unit startup
_start_up_fuel_consumption = - par[3] * _unit_capacity
# Parameters 4: from BB_gdx p_uStartupfuel (SpineDB unit__startupFuel)
_parameters_4 = [
x for x in _bb_spine_db_export['relationship_parameter_values']
if all([x[0] == 'unit__startupFuel', _unit_name in x[1]])
]
if _parameters_4:
# the corresponding fuel node for te startup fuel
_start_up_fuel = _parameters_4[0][1][1]
# explicit startup fuel node for clarity
_startup_fuel_node = f"{_start_up_fuel}_for_unit_startup"
# _startup_fuel_node links to the same fueling unit as the fuel node
if ("node", _startup_fuel_node) not in _temp_importer.objects:
_temp_importer.objects.append(("node", _startup_fuel_node))
_temp_importer = dummy_unit_for_node(
_temp_importer, _startup_fuel_node, f"Fueling_unit_{_start_up_fuel}", "to_node"
)
# add commodity for the fuel node if there is any
# _startup_fuel_node shares the same commodity with the fuel node
_grid_for_fuel_commodity = [
x[1][0] for x in _bb_spine_db_export['relationships']
if all([x[0] == 'grid__node', _start_up_fuel in x[1]])
]
if _grid_for_fuel_commodity:
_fuel_commodity = _grid_for_fuel_commodity[0]
if ("commodity", _fuel_commodity) not in _temp_importer.objects:
_temp_importer.objects.append(("commodity", _fuel_commodity))
_temp_importer.relationships += [
("node__commodity", (_startup_fuel_node, _fuel_commodity)),
]
else:
print(f"The corresponding grid for fuel node {_start_up_fuel} is missing.")
# build unit_constraint for startup fuel flow
_constraint_name = f"Startup_fuel_{_unit_name}"
_temp_importer.objects += [
("unit_constraint", _constraint_name),
]
_temp_importer.relationships += [
("unit__from_node", (_unit_name, _startup_fuel_node)),
("unit__unit_constraint", (_unit_name, _constraint_name)),
("unit__from_node__unit_constraint", (_unit_name, _startup_fuel_node, _constraint_name)),
]
_temp_importer.object_parameter_values += [
("unit_constraint", _constraint_name, "constraint_sense", "==", _alternative),
("unit_constraint", _constraint_name, "right_hand_side", 0.0, _alternative),
]
_temp_importer.relationship_parameter_values += [
("unit__unit_constraint", (_unit_name, _constraint_name),
"units_started_up_coefficient", _start_up_fuel_consumption, _alternative),
("unit__from_node__unit_constraint", (_unit_name, _startup_fuel_node, _constraint_name),
"unit_flow_coefficient", 1.0, _alternative),
]
# build fuel price, in either TimeSeries or constant value, if there is any
_fuel_price_dict = __restore_fuel_price_map(
_bb_spine_db_export, _start_up_fuel, _alternative=_alternative
)
if len(_fuel_price_dict) != 1:
__time_index = [str(x) for x in _time_index]
_fuel_price_ts = dict(
zip(__time_index, list(_fuel_price_dict.values())[:len(__time_index)]))
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _startup_fuel_node], "fuel_cost",
{"type": "time_series", "data": _fuel_price_ts, "index": {"repeat": timeseries_repeat}},
_alternative),
]
# constant value
else:
_fuel_price = list(_fuel_price_dict.values())[0]
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _startup_fuel_node], "fuel_cost", _fuel_price,
_alternative),
]
# set default number_of_units and unit_availability_factor to 1.0 if not explicitly recorded in the database
# else:
# _temp_importer.object_parameter_values += [
# ("unit", _unit_name, "unit_availability_factor", 1.0, _alternative),
# ("unit", _unit_name, "number_of_units", 1.0, _alternative),
# ]
return _temp_importer
| 7,800 |
def read_from_file(file_path):
"""
Read a file and return a list with all the lines in the file
"""
file_in_list = []
with open(file_path, 'r') as f:
for line in f.readlines():
file_in_list.append(line)
return file_in_list
| 7,801 |
def count(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return the number of times the predicate returns value considered True.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to count.
@return: The number of true element.
"""
result = 0L
for i in iterable:
if predicate(i):
result += 1
return result
| 7,802 |
def get_object_from_path(path):
"""
:param path:
dot seperated path. Assumes last item is the object and first part is module
path(str) -
example:
cls = get_object_from_path("a.module.somewhere.MyClass")
you can create a path like this:
class_path = "{0}.{1}".format(MyClass.__module__, MyClass.__name__)
"""
module_path, _, obj_name = path.rpartition(".")
module = __import__(module_path, globals(), locals(), [obj_name], -1)
obj = getattr(module, obj_name, None)
return obj
| 7,803 |
def _get_collection_memcache_key(collection_id, version=None):
"""Returns a memcache key for the collection.
Args:
collection_id: str. ID of the collection.
version: int. Schema version of the collection.
Returns:
str. The memcache key of the collection.
"""
if version:
return 'collection-version:%s:%s' % (collection_id, version)
else:
return 'collection:%s' % collection_id
| 7,804 |
def test_landsat_id_pre_invalid():
"""Raises error on invalid pre-collection."""
scene = "L0300342017083LGN00"
with pytest.raises(InvalidLandsatSceneId):
landsat8._landsat_parse_scene_id(scene)
| 7,805 |
def test_kubeadm_binary_which(host):
"""
Tests the output to confirm kubeadm's binary location.
"""
assert host.check_output('which kubeadm') == PACKAGE_BINARY
| 7,806 |
def biKmeans(dataSet, k, distMeas=calcEuclideanDistance):
"""
二分K-均值算法
:param dataSet:
:param k:
:param distMeas:
:return:
"""
m = np.shape(dataSet)[0]
clusterAssment = np.mat(np.zeros((m, 2)))
centroid0 = np.mean(dataSet, axis=0).tolist()[0]
centList = [centroid0] # create a list with one centroid
for j in range(m): # calc initial Error
clusterAssment[j, 1] = distMeas(np.mat(centroid0), dataSet[j, :]) ** 2
while len(centList) < k:
lowestSSE = np.inf
for i in range(len(centList)):
# get the data points currently in cluster i
ptsInCurrCluster = dataSet[np.nonzero(clusterAssment[:, 0].A == i)[0], :]
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:, 1]) # compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[np.nonzero(clusterAssment[:, 0].A != i)[0], 1])
print "sseSplit, and notSplit: ", sseSplit, sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[np.nonzero(bestClustAss[:, 0].A == 1)[0], 0] = len(centList) # change 1 to 3,4, or whatever
bestClustAss[np.nonzero(bestClustAss[:, 0].A == 0)[0], 0] = bestCentToSplit
print 'the bestCentToSplit is: ', bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0, :].tolist()[0] # replace a centroid with two best centroids
centList.append(bestNewCents[1, :].tolist()[0])
# reassign new clusters, and SSE
clusterAssment[np.nonzero(clusterAssment[:, 0].A == bestCentToSplit)[0], :] = bestClustAss
return np.mat(centList), clusterAssment
| 7,807 |
def get(directory):
"""Prepare df and gdf with solar atlas tiled data."""
files_list = glob.glob(os.path.join(directory, "*", "*.csv"))
data = []
for file in files_list:
logging.info(file)
tiles = pd.read_csv(file, header=None)
tiles.columns = ["tilename", "minx", "maxx", "miny", "maxy"]
tiles["extentBox"] = tiles.apply(
lambda x: box(x.minx, x.miny, x.maxx, x.maxy), axis=1
)
tiles["tilename"] = (
os.path.basename(os.path.dirname(file)) + "/" + tiles["tilename"]
)
tiles["start_at"] = pd.to_datetime(
"2099-" + os.path.dirname(file).split("_")[1], format="%Y-%m"
)
data.append(tiles)
data = pd.concat(data, ignore_index=True)
enermaps_data = utilities.ENERMAPS_DF
enermaps_data["fid"] = data["tilename"]
enermaps_data["start_at"] = data["start_at"]
enermaps_data["variable"] = VARIABLE
enermaps_data["unit"] = UNIT
enermaps_data["israster"] = ISRASTER
enermaps_data["dt"] = DT
spatial = gpd.GeoDataFrame(geometry=data["extentBox"], crs="EPSG:3035",)
spatial["fid"] = data["tilename"]
return enermaps_data, spatial
| 7,808 |
def cleanup(args, repo):
"""Clean up undeployed pods."""
if args.keep < 0:
raise ValueError('negative keep: %d' % args.keep)
def _is_enabled_or_started(pod):
for instance in pod.iter_instances():
if scripts.systemctl_is_enabled(instance.unit_name):
return True
if scripts.systemctl_is_active(instance.unit_name):
return True
return False
for pod_dir_name in repo.get_pod_dir_names():
LOG.info('%s - cleanup', pod_dir_name)
all_pods = list(repo.iter_pods(pod_dir_name))
num_left = len(all_pods)
for pod in all_pods:
if num_left <= args.keep:
break
if _is_enabled_or_started(pod):
LOG.info('refuse to undeploy pod: %s', pod)
continue
_undeploy_pod(repo, pod)
num_left -= 1
return 0
| 7,809 |
def optimizer_builder(
config: Dict):
"""
Instantiate an optimizer.
:param config:
:return:
"""
# --- argument checking
if not isinstance(config, dict):
raise ValueError("config must be a dictionary")
# --- read configuration
decay_rate = config["decay_rate"]
decay_steps = config["decay_steps"]
learning_rate = config["learning_rate"]
gradient_clipping_by_norm = config["gradient_clipping_by_norm"]
# --- set up schedule
lr_schedule = \
keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=learning_rate,
decay_steps=decay_steps,
decay_rate=decay_rate)
return \
keras.optimizers.RMSprop(
learning_rate=lr_schedule,
global_clipnorm=gradient_clipping_by_norm),\
lr_schedule
| 7,810 |
def convertfile(filename, orig=1):
"""Convert the filename given from using Numeric to using NumPy
Copies the file to filename.orig and then over-writes the file
with the updated code
"""
fid = open(filename)
filestr = fid.read()
fid.close()
filestr, changed = fromstr(filestr)
if changed:
if orig:
base, ext = os.path.splitext(filename)
os.rename(filename, base+".orig")
else:
os.remove(filename)
makenewfile(filename, filestr)
| 7,811 |
def psi(z: float, a: float, b: float) -> float:
"""Penalty function with uniformly bounded derivative (Eq. 20)
Args:
z: Relative distance
a: Cohesion strength
b: Separation strength
"""
c = np.abs(a - b) / (2 * np.sqrt(a * b))
return ((a + b) / 2) * (np.sqrt(1 + (z + c) ** 2) - np.sqrt(1 + c ** 2)) + ((a - b) / 2) * z
| 7,812 |
def caselessSort(alist):
"""Return a sorted copy of a list. If there are only strings
in the list, it will not consider case.
"""
try:
return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError:
return sorted(alist)
| 7,813 |
def EnableRing(serialPort):
""" Enable the ISU to listen for SBD Ring Alerts. When SBD Ring Alert indication is enabled, the 9602 asserts the RI line and issues the unsolicited result
code SBDRING when an SBD Ring Alert is received. """
# Enables Ring message to indicate there's a message to read.
Log("EnableRing()")
if not WriteAndCheck(serialPort, "AT+SBDMTA=1\r", "OK", 30):
Log("Issue enabling ring notifications.")
return False
Log("OK.")
return True
| 7,814 |
def verify_image(filename_or_obj, format, resolution):
"""
Verify that the image in filename_or_obj has the specified format and
resolution.
"""
width, height = resolution
if format in RAW_FORMATS:
size1 = (
math.ceil(width / 16) * 16
* math.ceil(height / 16) * 16
* RAW_FORMATS[format]
)
size2 = (
math.ceil(width / 32) * 32
* math.ceil(height / 16) * 16
* RAW_FORMATS[format]
)
if isinstance(filename_or_obj, str):
stream = io.open(filename_or_obj, 'rb')
else:
stream = filename_or_obj
stream.seek(0, os.SEEK_END)
assert stream.tell() in (size1, size2)
else:
img = Image.open(filename_or_obj)
assert img.size == resolution
assert img.format.lower() == format.lower()
img.verify()
| 7,815 |
def render(template, **context):
"""Render the given template.
:param template: The template file name or string to render.
:param **context: Context keyword-arguments.
"""
class Undefined(BaseUndefined):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
super(Undefined, self)._fail_with_undefined_error(*args,
**kwargs)
except Exception, error:
return "{{{{ {0} }}}}".format(error)
try:
try:
template_file = file(expanduser(template))
loader = FileSystemLoader(dirname(template_file.name))
environment = Environment(loader=loader, undefined=Undefined)
try:
template = environment.get_template(basename(
template_file.name))
except TemplateSyntaxError, error:
message = "Template {0}:{1}, {2}{3}".format(error.filename,
error.lineno, error.message[:1].lower(), error.message[1:])
exit(message)
except IOError:
try:
template = Template(template, undefined=Undefined)
except TemplateSyntaxError, error:
message = "Template \"{0}\" {1}{2}".format(template,
error.message[:1].lower(), error.message[1:])
exit(message)
except TemplateError, error:
message = "Template error: {0}".format(error.message)
exit(message)
return template.render(context)
| 7,816 |
def read(pth, format=None, encoding=None, cols=None, **kwargs):
"""Returns the contents of a file into a string or format-dependent data
type (with special handling for json and csv files).
The format will either be inferred from the file extension or can be set
explicitly using the `format` arg. Text will be read using the specified
`encoding` or default to UTF-8.
JSON files will be parsed and an appropriate python type will be selected
based on the top-level object defined in the file. The optional keyword
argument `dict` can be set to `adict` or `odict` if you'd prefer not to use
the standard python dictionary for decoded objects.
CSV files will return a list of rows. By default each row will be an ordered
list of column values. If the first line of the file defines column names,
you can call read() with cols=True in which case each row will be a namedtuple
using those names as keys. If the file doesn't define its own column names,
you can pass a list of strings as the `cols` parameter. Rows can be formatted
as column-keyed dictionaries by passing True as the `dict` parameter.
"""
if re.match(r'https?:', pth):
resp = HTTP.get(pth)
resp.raise_for_status()
extension_type = splitext(urlparse(pth).path)[-1]
content_type = resp.headers.get('content-type', extension_type).lower()
for data_t in ['json', 'csv']:
if data_t in content_type:
extension_type = data_t
if binaryish(content_type, format):
fd = BytesIO(resp.content)
else:
if encoding:
resp.encoding = encoding
elif 'charset' not in content_type:
resp.encoding = resp.apparent_encoding
fd = StringIO(resp.text)
else:
enc = encoding or 'utf-8'
extension_type = splitext(pth)[-1].lower()
if binaryish(extension_type, format):
fd = open(os.path.expanduser(pth), 'rb')
else:
fd = open(os.path.expanduser(pth), 'rt', encoding=enc)
if kwargs.get('dict') is True:
kwargs['dict'] = dict
elif kwargs.get('dict') is False:
del kwargs['dict']
dict_type = kwargs.get('dict', dict)
format = (format or extension_type).lstrip('.')
if format=='json':
return json.load(fd, object_pairs_hook=dict_type)
elif format=='csv':
dialect = csv_dialect(fd)
if cols:
if kwargs.get('dict'):
return list(csv_dict(fd, dialect=dialect, cols=cols, dict=dict_type))
else:
return list(csv_tuple(fd, dialect=dialect, cols=cols))
return list(csv_rows(fd, dialect=dialect))
else:
return fd.read()
| 7,817 |
def audio(src: str) -> str:
""" Insert audio tag
The tag is currently not supported by Nuance, please use `audio_player` kit:
docs/use_kits_and_actions.md
:param src:
:return:
"""
return f'<audio src="{src}"/>'
| 7,818 |
def read(*rnames):
"""
Read content of a file. We assume the file to be in utf8
"""
return open(os.path.join(os.path.dirname(__file__), *rnames), encoding="utf8", mode="r").read()
| 7,819 |
def PerpendicularDistanceToFinish(point_b_angle: float,
point_b: gps_pb2.Point) -> float:
"""
cos(B) = Adjacent / Hypotenuse
https://www.mathsisfun.com/algebra/trig-finding-side-right-triangle.html
"""
return math.cos(math.radians(point_b_angle)) * point_b.start_finish_distance
| 7,820 |
def get_revolut_stocks() -> List[str]:
"""
Gets all tickers offered on Revolut trading platform.
Returns:
list(str)
"""
import requests
req = requests.get("https://globefunder.com/revolut-stocks-list/")
tickers = list(pd.read_html(req.content)[0]["Symbol"])
tickers = [ticker.replace(".", "-") for ticker in tickers]
return tickers
| 7,821 |
def move_recipes_by_condition(path, directory_name, condition):
""" Moves the recipes that satisfy conditon to a new directory called directory_name
"""
os.mkdir(directory_name)
recipe_list = list_recipes_by_condition(path, condition)
for recipe in recipe_list:
shutil.move(recipe, os.getcwd()+"/"+directory_name)
| 7,822 |
def response_text(response_class):
"""
Return the UTF-8 encoding of the API response.
:param response_class: class to cast the response to
:return: Text of the response casted to the specified class
"""
def _inner(f):
@wraps(f)
def wrapper(obj, *args, **kwargs):
result = f(obj, *args, **kwargs)
if isinstance(result, response_class):
return result
try:
return response_class(result.text)
except Exception:
logger.debug("Exception during response parsing.", exc_info=True)
raise APIError("Exception during response parsing")
return wrapper
return _inner
| 7,823 |
def create_rankings(
a: Dataset, b: Dataset, n_samples: int = 100, unravel: bool = False, **kwargs: int
) -> Tuple[ndarray, ndarray]:
"""
Sample a dataset 'a' with 'n' negative samples given interactions in dataset 'a'
and 'b'.
Practically, this function allows you to generate evaluation data as described in
the work of He et al. [1]. The evaluation procedure assumes that the input datasets
'a' and 'b' have been generated with a leave 'n' out policy, such that dataset 'b'
corresponds to the 'training' dataset (i.e. dataset with 'left out' samples removed)
and 'a' corresponds to the 'test' dataset with 'n' for each user with
n_interactions > n. For each user in 'a', the function will return that user's 'n'
left-out interactions, plus 'n_samples' negative samples (items the user has not
interacted with in both the 'train' and 'test' datasets).
Parameters
----------
a: Dataset
The 'test' dataset (the dataset you wish to use for evaluation).
b: Dataset
The 'train' dataset (the dataset you wish to include for purposes of sampling
items the user has not interacted with -- negative samples).
n_samples: int
The total number of negative samples per user to generate. For example, if the
dataset 'a' was generated from a leave-one-out split, and n_samples=100, that
user would receive 101 samples.
unravel: bool
If 'True', the function will return two arrays, where the first element of the
first array corresponds to the user _vector_ (i.e. user ID + optional metadata),
the first element of the first array corresponds to an associated sampled item
vector(i.e. item ID + optional metadata).
Returns
-------
output: (ndarray, List[ndarray])
If 'unravel=False', the first element corresponds to an array of _ordered_ user
ids, the second the `n_samples+1`per-user samples.
If `unravel=True`, the first element corresponds to an array of _ordered_ user
vectors, the second to each individual item vector. See `unravel` argument and
`_unravel_ranked`, below. This function is provided for use when evaluating
Keras Models with the `predict` method.
References
----------
[1] He et al. https://dl.acm.org/doi/10.1145/3038912.3052569
"""
users, items, _ = a.to_components(
negative_samples=n_samples,
aux_matrix=b.interactions.tocsr(),
shuffle=False,
sampling_mode="absolute",
)
unique_users = unique(users)
sampled_users, sampled_items = (
users[len(unique_users) :],
items[len(unique_users) :],
)
_, grouped = groupby(sampled_users, sampled_items)
grouped = c_[grouped, items[: len(unique_users)]]
if unravel:
return _unravel_sampled(unique_users, grouped, a, **kwargs)
else:
return unique_users, grouped
| 7,824 |
def make_even(x):
"""Make number divisible by 2"""
if x % 2 != 0:
x -= 1
return x
| 7,825 |
def get_num_vehicles(session, query_filters):
"""Gets the total number of annotations."""
# pylint: disable-msg=E1101
num_vehicles_query = session.query(
func.count(Vehicle.id)) \
.join(Photo) \
.filter(Photo.test == True) \
# pylint: enable-msg=E1101
for query_filter in query_filters:
num_vehicles_query = num_vehicles_query.filter(query_filter)
num_vehicles, = num_vehicles_query.one()
return num_vehicles
| 7,826 |
def create_python_src_file(basedir, daystr, sample_count):
"""Creates a skeleton python file."""
filename = ''
if sample_count == 1:
filename = '../input/sample.txt'
else:
filename = '../input/sample1.txt'
with open(os.path.join(basedir, 'src', daystr + '.py'), 'w') as file:
message = (
"import argparse\n"
"\n"
"def main():\n"
" parser = argparse.ArgumentParser()\n"
f" parser.add_argument('-f', '--filename', default='{filename}')\n"
" parser.add_argument('-p', '--part', choices=[1, 2], default=1, type=int)\n"
" args = parser.parse_args()\n"
"\n"
"if __name__ == \"__main__\":\n"
" main()\n"
)
file.write(message)
| 7,827 |
def task_status_edit(request, status_id, response_format='html'):
"""TaskStatus edit"""
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.profile.has_permission(status, mode='w'):
return user_denied(request, message="You don't have access to this Task Status")
if request.POST:
if 'cancel' not in request.POST:
form = TaskStatusForm(
request.user.profile, request.POST, instance=status)
if form.is_valid():
status = form.save()
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
form = TaskStatusForm(request.user.profile, instance=status)
context = _get_default_context(request)
context.update({'form': form,
'status': status})
return render_to_response('projects/status_edit', context,
context_instance=RequestContext(request), response_format=response_format)
| 7,828 |
def is_windows_system():
"""
| ##@函数目的: 获取系统是否为Windows
| ##@参数说明:True or False
| ##@返回值:
| ##@函数逻辑:
| ##@开发人:jhuang
| ##@时间:
"""
return 'Windows' in platform.system()
| 7,829 |
def seepage_from_unitary(U):
"""
Calculates leakage by summing over all in and output states in the
computational subspace.
L1 = 1- sum_i sum_j abs(|<phi_i|U|phi_j>|)**2
"""
sump = 0
for i in range(2):
for j in range(2):
bra_i = qtp.tensor(qtp.ket([i], dim=[2]),
qtp.ket([2], dim=[3])).dag()
ket_j = qtp.tensor(qtp.ket([j], dim=[2]),
qtp.ket([2], dim=[3]))
p = np.abs((bra_i*U*ket_j).data[0, 0])**2
sump += p
sump /= 2 # divide by dimension of comp subspace
L1 = 1-sump
return L1
| 7,830 |
def auto_get(*args):
"""
auto_get(type, lowEA, highEA) -> ea_t
Retrieve an address from queues regarding their priority. Returns
'BADADDR' if no addresses not lower than 'lowEA' and less than
'highEA' are found in the queues. Otherwise *type will have queue
type.
@param type (C++: atype_t *)
@param lowEA (C++: ea_t)
@param highEA (C++: ea_t)
"""
return _ida_auto.auto_get(*args)
| 7,831 |
def isLineForUser(someLine=None, username=None):
"""determins if a raw output line is for a user"""
doesMatch = False
try:
doesMatch = utils.isLineForMatch(someLine, username)
except Exception as matchErr:
logs.log(str(type(matchErr)), "Error")
logs.log(str(matchErr), "Error")
logs.log(str((matchErr.args)), "Error")
matchErr = None
del matchErr
doesMatch = False
return doesMatch
| 7,832 |
def get_view_class(callback):
"""
Try to get the class from given callback
"""
if hasattr(callback, 'view_class'):
return callback.view_class
if hasattr(callback, 'cls'):
return callback.cls
# TODO: Below code seems to not do anything..
mod = importlib.import_module(callback.__module__)
cls = getattr(mod, callback.__name__)
return cls
| 7,833 |
def create_multipart_upload(s3_obj, bucketname, object_key):
"""
Initiates Multipart Upload
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket on which multipart upload to be initiated on
object_key (str): Unique object Identifier
Returns:
str : Multipart Upload-ID
"""
mpu = s3_obj.s3_client.create_multipart_upload(Bucket=bucketname, Key=object_key)
upload_id = mpu["UploadId"]
return upload_id
| 7,834 |
def test_dispatcher_config_needed_problem():
"""Command needs a config, which is not there."""
class MyCommand(BaseCommand):
help_msg = "some help"
name = 'cmdname'
needs_config = True
def run(self, parsed_args):
pass
groups = [('test-group', 'title', [MyCommand])]
dispatcher = Dispatcher(['cmdname'], groups)
with pytest.raises(CommandError) as err:
dispatcher.run()
assert str(err.value) == (
"The specified command needs a valid 'charmcraft.yaml' configuration file (in the "
"current directory or where specified with --project-dir option); see the reference: "
"https://discourse.charmhub.io/t/charmcraft-configuration/4138")
| 7,835 |
def cn(DB):
"""Return the cursor and connection object."""
conn = sqlite3.connect(DB)
c = conn.cursor()
return (c,conn)
| 7,836 |
def _build_category_tree(slug, reference=None, items=None):
"""
Builds a recursive tree with category relations as children.
"""
if items is None:
items = []
for key in reference:
category = reference[key]
if category["parent"] == slug:
children = _build_category_tree(category["nicename"],
reference=reference)
category["children"] = children
items.append(category)
return items
| 7,837 |
def test_parse_config():
"""Test noded.conf parsing."""
config = noded.parse_config("conf/noded.conf")
assert config.get("defaults", "redis_host") == "localhost"
assert config.getint("defaults", "redis_port") == 6379
assert config.getint("defaults", "redis_db") == 0
assert config.getint("defaults", "redis_timeout") == 1
assert config.getint("defaults", "sleep_time") == 30
assert config.getint("defaults", "expire_time") == 300
assert config.getint("defaults", "rb_maxlen") == 4
| 7,838 |
def partially_matched_crossover(random, mom, dad, args):
"""Return the offspring of partially matched crossover on the candidates.
This function performs partially matched crossover (PMX). This type of
crossover assumes that candidates are composed of discrete values that
are permutations of a given set (typically integers). It produces offspring
that are themselves permutations of the set.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
size = len(mom)
points = random.sample(range(size), 2)
x, y = min(points), max(points)
bro = copy.copy(dad)
bro[x:y+1] = mom[x:y+1]
sis = copy.copy(mom)
sis[x:y+1] = dad[x:y+1]
for parent, child in zip([dad, mom], [bro, sis]):
for i in range(x, y+1):
if parent[i] not in child[x:y+1]:
spot = i
while x <= spot <= y:
spot = parent.index(child[spot])
child[spot] = parent[i]
return [bro, sis]
else:
return [mom, dad]
| 7,839 |
def add(eq, sign):
"""Adds a character to the "equation" list, consisting of two numbers and an operation sign between them"""
print(sign, end=" ")
add_more(eq, sign)
print(eq)
if len(eq) > 3:
count(eq)
print(eq)
| 7,840 |
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input)
| 7,841 |
def test_d2scan():
"""
Test ``d2scan`` scan (1D step scan) along two axes with ZP motors.
"""
print("Running scan ..")
uid, = RE(d2scan([sclr1,zebra,merlin1,xspress3],10,zpssx,-1,1,zpssy,-1,1,0.1))
print("Scan is completed")
print("Filling the table ...")
_ = db[uid].table(fill=True)
print("Table is filled")
| 7,842 |
def getENVIframeDir(strPathScene, sSubDir=None):
""" Return directory containing envi frames
frame bsqs in this dir are named FR_yyyy.mm.dd_X.bsq
Optional subdirectory name sSubDir. workaround for non standard directory organization.
"""
strWild = strPathScene + r'SEQhdr\ENVI_FR*'
if sSubDir is not None:
strWild = strWild + os.sep + sSubDir
lstDirs = [f for f in glob.glob(strWild) if os.path.isdir(f)]
if len(lstDirs) == 0:
raise Exception('No match found for: ' + strWild)
elif len(lstDirs) > 1:
raise Exception('Multiple matchs found for: ' + strWild)
return lstDirs[0]
| 7,843 |
def LoadAllSuitesOfProject(project_name):
"""Loads all of the suites of a project."""
project_key = db.Key.from_path(bite_project.BiteProject.kind(),
project_name)
return BiteSuite.all().ancestor(project_key)
| 7,844 |
def debug(line: str = None, cell: str = None, local_ns = None):
"""Toggle debugging mode for the current cell."""
| 7,845 |
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVG, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
| 7,846 |
def __check_rse_usage(rse: RseData, greedy: bool = False, logger: "Callable[..., Any]" = logging.log) -> 'Tuple[int, bool]':
"""
Internal method to check RSE usage and limits.
:param rse_name: The RSE name.
:param rse_id: The RSE id.
:param greedy: If True, needed_free_space will be set to 1TB regardless of actual rse usage.
:returns: needed_free_space, only_delete_obsolete.
"""
# First of all check if greedy mode is enabled for this RSE
if greedy:
return 1000000000000, False
rse.ensure_loaded(load_limits=True, load_usage=True, load_attributes=True)
# Get RSE limits
min_free_space = rse.limits.get('MinFreeSpace', 0)
# Check from which sources to get used and total spaces
# Default is storage
source_for_total_space = rse.attributes.get('source_for_total_space', 'storage')
source_for_used_space = rse.attributes.get('source_for_used_space', 'storage')
logger(logging.DEBUG, 'RSE: %s, source_for_total_space: %s, source_for_used_space: %s',
rse.name, source_for_total_space, source_for_used_space)
# Get total, used and obsolete space
total_space_entry = None
used_space_entry = None
obsolete_entry = None
for entry in rse.usage:
if total_space_entry and used_space_entry and obsolete_entry:
break
entry_source = entry['source']
if not total_space_entry and entry_source == source_for_total_space:
total_space_entry = entry
if not used_space_entry and entry_source == source_for_used_space:
used_space_entry = entry
if not obsolete_entry and entry_source == 'obsolete':
obsolete_entry = entry
obsolete = 0
if obsolete_entry:
obsolete = obsolete_entry['used']
# If no information is available about disk space, do nothing except if there are replicas with Epoch tombstone
needed_free_space = 0
if not total_space_entry:
if not obsolete:
return needed_free_space, False
return obsolete, True
if not used_space_entry:
return needed_free_space, False
# Extract the total and used space
total, used = total_space_entry['total'], used_space_entry['used']
free = total - used
if min_free_space:
needed_free_space = min_free_space - free
# If needed_free_space negative, nothing to delete except if some Epoch tombstoned replicas
if needed_free_space <= 0:
return obsolete, True
else:
return needed_free_space, False
| 7,847 |
def train_model(ad, rsrc_loc, algo='IR', log_dir=None):
"""
Train a CellO model based on the genes of an
input dataset.
Parameters
----------
ad : AnnData object
Expression matrix of n cells by m genes
algo : String
The name of the algorithm used to train the model. 'IR'
trains a model using isotonic regression. 'CLR' trains
a model using cascaded logistic regression.
rsrc_loc: String
The location of the "resources" directory downloaded
via the ''
log_dir : String
Path to a directory in which to write logging information
Returns
-------
A trained CellO model
"""
_download_resources(rsrc_loc)
genes = ad.var.index
# Load the training data
r = load_training_data.load(UNITS, rsrc_loc)
og = r[0]
label_graph = r[1]
label_to_name = r[2]
the_exps = r[3]
exp_to_index = r[4]
exp_to_labels = r[5]
exp_to_tags = r[6]
exp_to_study = r[7]
study_to_exps = r[8]
exp_to_ms_labels = r[9]
X = r[10]
all_genes = r[11]
# Match genes in test data to those in training
# data
train_genes, gene_to_indices = _match_genes(
genes,
all_genes,
rsrc_loc,
log_dir=log_dir
)
# Take a subset of the columns for the training-genes. Note
# that if a given gene in the test set maps to multiple training
# genes, then we sum over the training genes.
X_train = []
for gene in train_genes:
indices = gene_to_indices[gene]
X_train.append(np.sum(X[:,indices], axis=1))
X_train = np.array(X_train).T
assert X_train.shape[1] == len(train_genes)
# Train the model on these genes
print('Training model...')
mod = model.train_model(
ALGO_TO_INTERNAL[algo],
ALGO_TO_PARAMS[algo],
X_train,
the_exps,
exp_to_labels,
label_graph,
item_to_group=exp_to_study,
features=train_genes,
preprocessor_names=PREPROCESSORS,
preprocessor_params=PREPROCESSOR_PARAMS
)
print('done.')
return mod
| 7,848 |
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices)
| 7,849 |
def received_date_date(soup):
"""
Find the received date in human readable form
"""
return utils.date_text(history_date(soup, date_type="received"))
| 7,850 |
def show_avail_tasks() -> None:
"""
Print the available and callable tasks (FabSim3 APIs or plugins tasks)
"""
avail_task_dict = {}
for task_name, task_obj in env.avail_tasks.items():
if not hasattr(task_obj, "task_type"):
continue
if hasattr(task_obj, "plugin_name"):
key = "{} {}".format(task_obj.plugin_name, task_obj.task_type)
else:
key = "{}".format(task_obj.task_type)
if key not in avail_task_dict:
avail_task_dict.update({key: []})
avail_task_dict[key].append(task_name)
table = Table(
title="\n\nList of available Tasks",
show_header=True,
show_lines=True,
# expand=True,
box=box.ROUNDED,
header_style="dark_cyan",
)
table.add_column("Task Type", style="blue")
table.add_column("Tasks name", style="magenta")
for task_type, tasks_name in avail_task_dict.items():
table.add_row(
"{}".format(task_type),
"{}".format(", ".join(tasks_name)),
)
console = Console()
console.print(table)
| 7,851 |
def create_observation_from_inat_data(inaturalist_data):
"""Creates an observation in our local database according to the data from iNaturalist API.
:returns: the observation (instance of Nest or Individual) created.
Raises:
TaxonMatchError
"""
observation_time = dateparser.parse(inaturalist_data['observed_on_string'],
settings={'TIMEZONE': inaturalist_data['observed_time_zone']})
if observation_time is None:
# Sometimes, dateparser doesn't understand the string but we have the bits and pieces in
# inaturalist_data['observed_on_details']
details = inaturalist_data['observed_on_details']
observation_time = datetime(year=details['year'],
month=details['month'],
day=details['day'],
hour=details['hour']) # in the observed cases, we had nothing more precise than the
# hour
# Sometimes, the time is naive (even when specifying it to dateparser), because (for the detected cases, at least)
# The time is 00:00:00. In that case we make it aware to avoid Django warnings (in the local time zone since all
# observations occur in Belgium
if is_naive(observation_time):
# Some dates (apparently)
observation_time = make_aware(observation_time)
if observation_time:
# Reconcile the taxon
try:
taxon = get_taxon_from_inat_taxon_id(inaturalist_data['taxon']['id'])
except Taxon.DoesNotExist:
raise TaxonMatchError
inat_vv_confirmed = inat_data_confirms_vv(inaturalist_data)
# Check if it has the vespawatch_evidence observation field value and if it's set to "nest"
if 'ofvs' in inaturalist_data:
is_nest_ofv = next((item for item in inaturalist_data['ofvs'] if item["field_id"] == settings.VESPAWATCH_EVIDENCE_OBS_FIELD_ID), None)
else:
is_nest_ofv = None
if is_nest_ofv and is_nest_ofv['value'] == "nest":
created = Nest.objects.create(
inat_vv_confirmed=inat_vv_confirmed,
originates_in_vespawatch=False,
inaturalist_id=inaturalist_data['id'],
taxon=taxon,
latitude=inaturalist_data['geojson']['coordinates'][1],
longitude=inaturalist_data['geojson']['coordinates'][0],
observation_time=observation_time) # TODO: What to do with iNat observations without (parsable) time?
else: # Default is specimen
created = Individual.objects.create(
inat_vv_confirmed=inat_vv_confirmed,
originates_in_vespawatch=False,
inaturalist_id=inaturalist_data['id'],
taxon=taxon,
latitude=inaturalist_data['geojson']['coordinates'][1],
longitude=inaturalist_data['geojson']['coordinates'][0],
observation_time=observation_time) # TODO: What to do with iNat observations without (parsable) time?
for photo in inaturalist_data['photos']:
created.assign_picture_from_url(photo['url'])
return created
else:
raise ParseDateError
| 7,852 |
def get_cert_sha256_by_openssl(certraw: str) -> str:
"""calc the sha1 of a certificate, return openssl result str"""
res: str = None
tmpname = None
try:
tmpname = tmppath / f"{uuid.uuid1()}.crt"
while tmpname.exists():
tmpname = tmppath / f"{uuid.uuid1()}.crt"
tmpname.write_text(certraw, encoding="utf-8")
cmd = f"openssl x509 -in {tmpname} -fingerprint -noout -sha256"
res = exec_openssl(cmd)
except Exception as ex:
raise Exception(f"Parse ssl data error, err:{ex}")
finally:
if tmpname is not None:
tmpname.unlink()
return res
| 7,853 |
def analytic_pi(x, c, w, h):
"""Analytic response function for an even pair of Lorentz distributions.
Correspond to
.. math::
\\Pi(x) = \\int_{-\infty}^{\\infty}
\\frac{\\omega^2}{\\omega^2+x^2}\sigma()_{i}
where :math:`\\sigma(\\omega)` is :func:`~even_lorentzian`.
Args:
x (array): matsubara at which the response function is evaluated
c (float): Center of the distribution (+ or -)
w (float): Width of the distribution (variance)
h (float): Height/weight of the distribtuion (area under the curve)
Returns:
array: Values of the integral at imaginary `x`
"""
return 2*h*c/(c**2+(x+w)**2)
| 7,854 |
def run(canvas):
""" This function runs the rules of game through all points, and changes their status accordingly.(in the same canvas)
@Args:
--
canvas : canvas of population to run the rules on.
@returns:
--
None
"""
canvas = np.array(canvas)
next_gen_canvas = np.array(create_canvas(canvas.shape[0]))
for r, row in enumerate(canvas):
for c, pt in enumerate(row):
# print(r-1,r+2,c-1,c+2)
next_gen_canvas[r][c] = __judge_point(
pt, canvas[r - 1: r + 2, c - 1: c + 2]
)
canvas = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
return canvas.tolist()
| 7,855 |
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if tf.gfile.Exists(vocabulary_path):
rev_vocab = []
with tf.gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
| 7,856 |
def ravel_space(space):
"""
Convert the space into a Discrete space.
"""
dims = _nested_dim_helper(space)
return Discrete(dims[0])
| 7,857 |
def _add_col(dataframe, metadata, col_limits, families, weights, random_state):
"""Add a new column to the end of the dataframe by sampling a distribution
from ``families`` according to the column limits and distribution weights
and sampling the required number of values from that distribution."""
nrows, ncols = dataframe.shape
if isinstance(col_limits[1], tuple):
family_counts = get_family_counts(metadata, families)
while len(dataframe.columns) != ncols + 1:
family = random_state.choice(families, p=weights)
idx = families.index(family)
if family_counts[family] < col_limits[1][idx]:
pdf = family.make_instance(random_state)
dataframe[ncols] = pdf.sample(nrows, random_state)
metadata.append(pdf)
dataframe = _rename(dataframe)
return dataframe, metadata
family = random_state.choice(families, p=weights)
pdf = family.make_instance(random_state)
dataframe[ncols] = pdf.sample(nrows, random_state)
metadata.append(pdf)
dataframe = _rename(dataframe)
return dataframe, metadata
| 7,858 |
def read_and_decrypt_mylogin_cnf(f):
"""Read and decrypt the contents of .mylogin.cnf.
This decryption algorithm mimics the code in MySQL's
mysql_config_editor.cc.
The login key is 20-bytes of random non-printable ASCII.
It is written to the actual login path file. It is used
to generate the real key used in the AES cipher.
:param f: an I/O object opened in binary mode
:return: the decrypted login path file
:rtype: io.BytesIO or None
"""
# Number of bytes used to store the length of ciphertext.
MAX_CIPHER_STORE_LEN = 4
LOGIN_KEY_LEN = 20
# Move past the unused buffer.
buf = f.read(4)
if not buf or len(buf) != 4:
logger.error('Login path file is blank or incomplete.')
return None
# Read the login key.
key = f.read(LOGIN_KEY_LEN)
# Generate the real key.
rkey = [0] * 16
for i in range(LOGIN_KEY_LEN):
try:
rkey[i % 16] ^= ord(key[i:i+1])
except TypeError:
# ord() was unable to get the value of the byte.
logger.error('Unable to generate login path AES key.')
return None
rkey = struct.pack('16B', *rkey)
# Create a decryptor object using the key.
decryptor = _get_decryptor(rkey)
# Create a bytes buffer to hold the plaintext.
plaintext = BytesIO()
while True:
# Read the length of the ciphertext.
len_buf = f.read(MAX_CIPHER_STORE_LEN)
if len(len_buf) < MAX_CIPHER_STORE_LEN:
break
cipher_len, = struct.unpack("<i", len_buf)
# Read cipher_len bytes from the file and decrypt.
cipher = f.read(cipher_len)
plain = _remove_pad(decryptor.update(cipher))
if plain is False:
continue
plaintext.write(plain)
if plaintext.tell() == 0:
logger.error('No data successfully decrypted from login path file.')
return None
plaintext.seek(0)
return plaintext
| 7,859 |
def list_files(tag=None, inst_id=None, data_path=None, format_str=None,
supported_tags=None, file_cadence=dt.timedelta(days=1),
two_digit_year_break=None, delimiter=None, file_type=None):
"""Return a Pandas Series of every file for chosen Instrument data.
Parameters
----------
tag : string or NoneType
Denotes type of file to load. Accepted types are <tag strings>.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : dict or NoneType
keys are inst_id, each containing a dict keyed by tag
where the values file format template strings. (default=None)
file_cadence : dt.timedelta or pds.DateOffset
pysat assumes a daily file cadence, but some instrument data file
contain longer periods of time. This parameter allows the specification
of regular file cadences greater than or equal to a day (e.g., weekly,
monthly, or yearly). (default=dt.timedelta(days=1))
two_digit_year_break : int or NoneType
If filenames only store two digits for the year, then '1900' will be
added for years >= two_digit_year_break and '2000' will be added for
years < two_digit_year_break. If None, then four-digit years are
assumed. (default=None)
delimiter : string or NoneType
Delimiter string upon which files will be split (e.g., '.'). If None,
filenames will be parsed presuming a fixed width format. (default=None)
file_type : str or NoneType
File format for Madrigal data. Load routines currently accepts 'hdf5',
'simple', and 'netCDF4', but any of the Madrigal options may be used
here. If None, will look for all known file types. (default=None)
Returns
-------
out : pds.Series
A pandas Series containing the verified available files
"""
# Initialize the transitional variables
list_file_types = file_types.keys() if file_type is None else [file_type]
sup_tags = {inst_id: {tag: supported_tags[inst_id][tag]}}
out_series = list()
# Cycle through each requested file type, loading the requested files
for ftype in list_file_types:
if supported_tags[inst_id][tag].find('{file_type}') > 0:
sup_tags[inst_id][tag] = supported_tags[inst_id][tag].format(
file_type=file_types[ftype])
out_series.append(pysat.instruments.methods.general.list_files(
tag=tag, inst_id=inst_id, data_path=data_path,
format_str=format_str, supported_tags=sup_tags,
file_cadence=file_cadence,
two_digit_year_break=two_digit_year_break, delimiter=delimiter))
# Combine the file lists, ensuring the files are correctly ordered
if len(out_series) == 0:
out = pds.Series(dtype=str)
elif len(out_series) == 1:
out = out_series[0]
else:
out = pds.concat(out_series).sort_index()
return out
| 7,860 |
def is_on_top(bb1, bb2):
""" For obj 1 to be on top of obj 2:
- obj1 must be above obj 2
- the bottom of obj 1 must be close to the top of obj 2
"""
bb1_min, _ = bb1
_, bb2_max = bb2
x1,y1,z1 = bb1_min
x2,y2,z2 = bb2_max
return z1 < z2 + ONTOP_EPSILON and is_above(bb1, bb2)
| 7,861 |
def main_menu(args):
""" Display main menu
"""
# Main menu prompts selection contains function
choice = qust.select('Public Building Manager - Main Menu',
choices=load_main_entires(), style=st).ask()
# Call funtion of menu entry
if choice:
choice(args)
| 7,862 |
def finishing():
"""
Finish the deployment, clean up server(s).
"""
| 7,863 |
def prepare_definitions(defs, prefix=None):
"""
prepares definitions from a dictionary
With a provided dictionary of definitions in key-value pairs and builds them
into an definition list. For example, if a dictionary contains a key ``foo``
with a value ``bar``, the returns definitions will be a list with the values
``['foo=bar']``. If a key contains a value of ``None``, the key will be
ignored and will not be part of the final definition list. If a ``prefix``
value is provided, each definition entry will be prefixed with the provided
value.
Args:
defs: the arguments to process
prefix (optional): prefix value to prefix each definition
Returns:
list of arguments
"""
final = []
if defs:
for key, val in defs.items():
if val is None:
continue
if prefix:
key = prefix + key
if val:
final.append('{}={}'.format(key, val))
else:
final.append(key)
return final
| 7,864 |
def load_training_data(mapsize=512, grfized=False, exclude_fid=False,
dense_grid=False, random_split=False,
from_files=False):
"""Load data for different training scenarios."""
if not grfized and (not dense_grid) and (not random_split):
# the default data to loas
X_train, X_test, y_train, y_test = load_sparse_grid(imsize=mapsize,
from_files=from_files)
elif grfized:
# equivalent gaussian random filed maps
assert not from_files
X_train, X_test, y_train, y_test = load_grf_sparse_grid()
elif dense_grid:
assert not from_files
# data with additional points around a cosmology
X_train, X_test, y_train, y_test = load_dense_grid(imsize=mapsize)
elif random_split:
# random train and test split
X_train, X_test, y_train, y_test = load_randomsplit_grid(
imsize=mapsize, from_files=from_files)
# aleays predict newidf, why not, it takes not time
# anyway we will not use it with the experiemnts
fn = '../../data/columbia_data_fiducial_new_idf_pix'+str(mapsize)+'.npy'
X_new_idf = np.load(fn)
y_new_idf = np.ones((len(y_test),2))
y_new_idf[:,0], y_new_idf[:,1] = 0.309, 0.816
if exclude_fid: # exclude fiducial cosmo params if asked for
idx = (y_train[:,0] == 0.309) & (y_train[:,1] == 0.816)
X_train, y_train = X_train[~idx], y_train[~idx]
return X_train, X_test, X_new_idf, y_train, y_test, y_new_idf
| 7,865 |
def concat_features(args, feature_dim_name='feature'):
"""Concatenate Xs along a set of feature dimensions
Parameters
----------
args : iterable
list of tuples of the form (dims, DataArray) where dims is a tuple of dimensions that will be considered feature dimensions
Returns
-------
stacked : xr.DataArray
The output where the data has been stacked along the feature_dim_name
"""
indexes = []
arrays = []
for dims, xarr in args:
stacked_arr = xarr.stack(**{feature_dim_name: dims})
indexes.append(stacked_arr.indexes[feature_dim_name])
arrays.append(stacked_arr)
index = concat_multi_indexes(indexes)
return xr.concat(arrays, dim=index)
| 7,866 |
def _inspect_mixin(
self, geoctx=None, format="pyarrow", file=None, timeout=30, client=None, **params
):
"""
Quickly compute this proxy object using a low-latency, lower-reliability backend.
Inspect is meant for getting simple computations out of Workflows, primarily for interactive use.
It's quicker but less resilient, won't be retried if it fails, and has no progress updates.
If you have a larger computation (longer than ~30sec), or you want to be sure the computation will succeed,
use `~.compute` instead. `~.compute` creates a `.Job`, which runs asynchronously, will be retried if it fails,
and stores its results for later retrieval.
Parameters
----------
geoctx: `.scenes.geocontext.GeoContext`, `~.workflows.types.geospatial.GeoContext`, or None
The GeoContext parameter under which to run the computation.
Almost all computations will require a `~.workflows.types.geospatial.GeoContext`,
but for operations that only involve non-geospatial types,
this parameter is optional.
format: str or dict, default "pyarrow"
The serialization format for the result.
See the `formats
<https://docs.descarteslabs.com/descarteslabs/workflows/docs/formats.html#output-formats>`_
documentation for more information.
If "pyarrow" (the default), returns an appropriate Python object, otherwise returns raw bytes.
file: path or file-like object, optional
If specified, writes results to the path or file instead of returning them.
timeout: int, optional, default 30
The number of seconds to wait for the result.
Raises `~descarteslabs.workflows.models.JobTimeoutError` if the timeout passes.
client: `.workflows.inspect.InspectClient`, optional
Allows you to use a specific InspectClient instance with non-default
auth and parameters
**params: Proxytype
Parameters under which to run the computation.
Returns
-------
result: Python object or bytes
When ``format="pyarrow"`` (the default), returns an appropriate Python object representing
the result, either as a plain Python type, or object from `descarteslabs.workflows.result_types`.
For other formats, returns raw bytes. Consider using `file` in that case to save the results to a file.
"""
if geoctx is not None:
params["geoctx"] = GeoContext._promote(geoctx)
if client is None:
client = _get_global_inspect_client()
return client.inspect(self, format=format, file=file, timeout=timeout, **params)
| 7,867 |
def list_dvs(service_instance):
"""
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
"""
return utils_common.list_objects(service_instance, vim.DistributedVirtualSwitch)
| 7,868 |
def setup(app):
"""Call methods during builder initiated."""
app.connect("builder-inited", build_mapping_tables)
| 7,869 |
def generate(env):
"""Add Builders and construction variables for LaTeX to an Environment."""
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
import dvi
dvi.generate(env)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.ltx', LaTeXAuxAction)
bld.add_action('.latex', LaTeXAuxAction)
bld.add_emitter('.ltx', SCons.Tool.tex.tex_eps_emitter)
bld.add_emitter('.latex', SCons.Tool.tex.tex_eps_emitter)
SCons.Tool.tex.generate_common(env)
| 7,870 |
def gdb_cli_args(request: FixtureRequest) -> Optional[str]:
"""
Enable parametrization for the same cli option
"""
return getattr(request, 'param', None) or request.config.getoption('gdb_cli_args', None)
| 7,871 |
def coerce_rfc_3339_date(input_date):
"""This function returns true if its argument is a valid RFC 3339 date."""
if input_date:
return datetime.datetime.strptime(input_date, "%Y-%m-%dT%H:%M:%SZ")
return False
| 7,872 |
def extract_job_url(job):
"""
parse the job data and extract the str for the URL of the job posted
params:
job str: html str representation from bs4
returns:
url str: relative URL path of the job ad
"""
return job.a["href"]
| 7,873 |
def test_upsert_page(mocker):
"""Base case: page doesn't already exist"""
confluence = mocker.Mock(spec=Confluence)
parent_page_mock = mocker.Mock()
parent_page_mock.id = mocker.sentinel.parent_page_id
confluence.get_page.side_effect = [None, parent_page_mock]
page = Page(
space=mocker.sentinel.space,
title=mocker.sentinel.title,
body=mocker.sentinel.body,
parent_title=mocker.sentinel.parent,
)
message = mocker.sentinel.message
main.upsert_page(
confluence=confluence, page=page, message=message,
)
confluence.get_page.assert_has_calls(
[
mocker.call(title=page.title, space_key=page.space, page_id=None),
mocker.call(title=page.parent_title, space_key=page.space),
],
any_order=False,
)
confluence.create_page.assert_called_once_with(
space=page.space,
title=page.title,
body=page.body,
parent_id=mocker.sentinel.parent_page_id,
update_message=message,
)
| 7,874 |
def commit(experiment_name, time):
"""
Try to commit repo exactly as it is when starting the experiment for reproducibility.
"""
try:
sh.git.commit('-a',
m='"auto commit tracked files for new experiment: {} on {}"'.format(experiment_name, time),
allow_empty=True
)
commit_hash = sh.git('rev-parse', 'HEAD').strip()
return commit_hash
except:
return '<Unable to commit>'
| 7,875 |
def weighted_loss(class_weights):
"""
Create a weighted loss function. Penalise the misclassification
of classes more with the higher usage
"""
weight_values = list(class_weights.values())
def weighted_binary_crossentropy(y_true, y_pred):
# add another dimension to compute dot product
expanded_weights = K.expand_dims(weight_values, axis=-1)
return K.dot(K.binary_crossentropy(y_true, y_pred), expanded_weights)
return weighted_binary_crossentropy
| 7,876 |
def delete_cluster(resource_root, name):
"""
Delete a cluster by name
@param resource_root: The root Resource object.
@param name: Cluster name
@return: The deleted ApiCluster object
"""
resp = resource_root.delete("%s/%s" % (CLUSTERS_PATH, name))
return ApiCluster.from_json_dict(resp, resource_root)
| 7,877 |
def reload_plugin(name):
"""Reloads the MIBI plug-in 'name'."""
reload('mibi.plugin.' + name)
| 7,878 |
def mk_test(x, alpha = 0.05):
"""This perform the MK (Mann-Kendall) test to check if there is any trend present in
data or not
Args:
x: a vector of data
alpha: significance level
Returns:
trend: tells the trend (increasing, decreasing or no trend)
h: True (if trend is present) or False (if trend is absence)
p: p value of the sifnificance test
z: normalized test statistics
Examples::
>>> x = np.random.rand(100)
>>> trend = mk_test(x,0.05)
>>> print(trend.trend)
increasing
Credit: http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/
"""
n = len(x)
ta = n*(n-1)/2
# calculate S
s = 0
for k in xrange(n-1):
for j in xrange(k+1,n):
s += np.sign(x[j] - x[k])
# calculate the unique data
unique_x = np.unique(x)
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n*(n-1)*(2*n+5))/18
else: # there are some ties in data
tp = np.zeros(unique_x.shape)
for i in xrange(len(unique_x)):
tp[i] = sum(unique_x[i] == x)
var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
else:
z = 0
# calculate the p_value
p = 2*(1- norm.cdf(abs(z))) # two tail test
h = abs(z) > norm.ppf(1-alpha/2)
if (z<0) and h:
trend = 'decreasing'
elif (z>0) and h:
trend = 'increasing'
else:
trend = 'no trend'
return pd.Series({'trend':trend, 'varS':round(var_s,3), 'p':round(p,3), 'z':round(z,3), 's':round(s,3), 'n':n, 'ta':ta})
| 7,879 |
def convert_event_to_boxes(event: Event) -> List[EventBox]:
"""Takes in an event and converts this into a list of boxes that when combined completely cover the time allocated
to this event. Usually, this list will contain a single EventBox as many events start and end on the same day, but
any events split across multiple day boundaries will be split into multiple boxes."""
start_date = event.start_datetime.date()
end_date = event.end_datetime.date()
start_time_float = time_to_float(event.start_datetime.time())
end_time_float = time_to_float(event.end_datetime.time())
days_spanned = (end_date - start_date).days + 1
boxes = []
if days_spanned == 1:
boxes.append(EventBox(0, start_time_float, end_time_float))
else:
boxes.append(EventBox(0, start_time_float, 24.0))
for i in range(max(0, days_spanned - 2)):
boxes.append(EventBox(i + 1, 0.0, 24.0))
boxes.append(EventBox(days_spanned - 1, 0.0, end_time_float))
return boxes
| 7,880 |
def limit_bone(bone, x, y, z, ik=True):
"""
bone: PoseBone
x: (float, float)
y: (float, float)
z: (float, float)
ik: bool
"""
# Bone Constraints
limit = bone.constraints.new(type='LIMIT_ROTATION')
limit.use_limit_x = True
limit.use_limit_y = True
limit.use_limit_z = True
limit.min_x, limit.max_x = x
limit.min_y, limit.max_y = y
limit.min_z, limit.max_z = z
limit.owner_space = 'LOCAL'
if ik:
# IK Constraints
bone.use_ik_limit_x = True
bone.use_ik_limit_y = True
bone.use_ik_limit_z = True
bone.ik_min_x, bone.ik_max_x = x
bone.ik_min_y, bone.ik_max_y = y
bone.ik_min_z, bone.ik_max_z = z
| 7,881 |
def _reduce_consecutive_layers(conv_defs, start_id, end_id, multiplier=0.5):
"""Reduce the outputs of consecutive layers with multiplier.
Args:
conv_defs: Mobilenet conv_defs.
start_id: 0-based index of the starting conv_def to be reduced.
end_id: 0-based index of the last conv_def to be reduced.
multiplier: The multiplier by which to reduce the conv_defs.
Returns:
Mobilenet conv_defs where the output sizes from layers [start_id, end_id],
inclusive, are reduced by multiplier.
Raises:
ValueError if any layer to be reduced does not have the 'num_outputs'
attribute.
"""
defs = copy.deepcopy(conv_defs)
for d in defs['spec'][start_id:end_id+1]:
d.params.update({
'num_outputs': np.int(np.round(d.params['num_outputs'] * multiplier))
})
return defs
| 7,882 |
def ajax_save_content(request):
""" Save front end edited content """
site = get_current_site(request)
content_name = request.POST['content_name']
cms_content = CmsContent.objects.get(site=site, name=content_name)
cms_content.content = request.POST['content']
cms_content.save()
return HttpResponse('SUCCESS')
| 7,883 |
def add_regional_group_costs(ws, data_sheet):
"""
"""
ws.sheet_properties.tabColor = "92D050"
##Color white
ws.sheet_view.showGridLines = False
#Set blue and red border strips
set_cell_color(ws, 'A1:AZ1', "004C97")
set_cell_color(ws, 'A2:AZ2', "C00000")
ws = bar_chart(ws, "Estimates!$C$64:$C$71", "Estimates!$B$65:$B$71", "Total Cost by Region", 'Cost ($Bn)', "B4")
ws = bar_chart(ws, "Estimates!$F$64:$F$71", "Estimates!$E$65:$E$71", "Mean Annual 10-Year GDP by Region",'GDP ($Bn)', "L4")
ws = bar_chart(ws, "Estimates!$I$64:$I$71", "Estimates!$H$65:$H$71", "Initial Investment by Region",'Cost ($Bn)', "B20")
ws = bar_chart(ws, "Estimates!$C$75:$C$82", "Estimates!$B$76:$B$82", "GDP Share by Region",'Percent of GDP (%)', "L20")
return ws
| 7,884 |
def normal(loc=0.0, scale=1.0, size=(1,1), sparsity=1.0):
"""
Draw random samples from a normal (Gaussian) distribution.
Parameters
----------
loc: Mean ("centre") of the distribution.
scale: Standard deviation (spread or "width") of the distribution.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.normal(loc=3, scale=2, size=(3,3))
>>> m1.toNumPyArray()
array([[ 3.48857226, 6.17261819, 2.51167259],
[ 3.60506708, -1.90266305, 3.97601633],
[ 3.62245706, 5.9430881 , 2.53070413]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
loc = asStr(loc)
scale = asStr(scale)
sparsity = asStr(sparsity)
# loc + scale*standard normal
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = ', loc,' + ', scale,' * random.normal(', rows, ',', cols, ',', sparsity, ')\n'])
| 7,885 |
def random_data(num):
""" will return json random float, hex, int and a random password
{0: {
'float': 186.66541583209647,
'hex': '43435c553c722359e386804f6b28d2c2ee3754456c38f5e7e68f',
'int': 851482763158959204,
'password': '5AJ]-02X0J'
}
}"""
data = {}
count = 0
while count < num:
data.update(
{
count: {
"hex": random_hex(),
"int": randint(1, 10**18),
"float": uniform(0.1, 10**3.01),
"password": randPwStr()
}
}
)
count += 1
return data
| 7,886 |
def test_local_branch_exists(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN branch.exists is called with a valid branch and None remote
THEN True is returned
"""
repo = GitRepo(repo=mock_repo)
mock_repo.branches = ["master", "test"]
assert repo.branch.exists("test") is True
| 7,887 |
def denied(request):
"""Authentication failed and user was denied."""
return render(request, 'djangosaml2/denied.html')
| 7,888 |
def getFontCoverage(f, glyphCache=None):
"""
Calculate a weighted average of all glyph coverages.
Use frequencies of multiple languages to average out language specific bias.
So it does not use all the glyphs, just the A-Z, a-z for the languages we have fequencies for.
"""
total = []
if glyphCache is None:
glyphCache = {}
supportedLanguages = coverage.data.checkLanguages(f)
# make a prioritised list of glyphnames:
# - only the glyphs we need for the tables
# - and the components they need
# - then do the glyphs without components first
# - so that remove overlap work will propagate to the compoents, saving work
availableGlyphs = []
for name in coverage.data.coverageNames:
if not name in f:
continue
g = f[name]
availableGlyphs.append(name)
if not supportedLanguages:
return None
for lang in supportedLanguages:
table = coverage.data.frequencies[lang]
languageTotal = 0
for glyphName in availableGlyphs:
if not glyphName in table:
continue
weight = table[glyphName]
if glyphName in f:
g = f[glyphName]
else:
continue
try:
a = calculateGlyphCoverage(g, f, cache=glyphCache)
except:
if f.path is not None:
fontName = os.path.basename(f.path)
else:
fontName = "object: %s-%s" % (f.info.familyName, f.info.styleName)
print("failed calculating the coverage for %s in %s" % (g.name, fontName))
traceback.print_exc(file=sys.stdout)
a = 0
if a > 0:
languageTotal += a * weight
total.append(languageTotal / len(table))
return sum(total) / len(supportedLanguages)
| 7,889 |
def monday_of_week(year, week):
"""
Returns a datetime for the monday of the given week of the given year.
"""
str_time = time.strptime('{0} {1} 1'.format(year, week), '%Y %W %w')
date = timezone.datetime(year=str_time.tm_year, month=str_time.tm_mon,
day=str_time.tm_mday, tzinfo=timezone.utc)
if timezone.datetime(year, 1, 4).isoweekday() > 4:
# ISO 8601 where week 1 is the first week that has at least 4 days in
# the current year
date -= timezone.timedelta(days=7)
return date
| 7,890 |
def preview_image(instance, sender, *args, **kwargs):
"""Updates book previews if the rating has changed"""
if not ENABLE_PREVIEW_IMAGES or sender not in (Review, ReviewRating):
return
changed_fields = instance.field_tracker.changed()
if len(changed_fields) > 0:
edition = instance.book
generate_edition_preview_image_task.delay(edition.id)
| 7,891 |
def object_meta(metadata: Metadata) -> Mapping[str, Any]:
"""
Return a minimal representation of an ObjectMeta with the supplied information.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#objectmeta
"""
meta: Dict[str, Any] = {}
if metadata.generate_name_from_prefix:
meta["generateName"] = metadata.name
else:
meta["name"] = metadata.name
if metadata.annotations:
meta["annotations"] = metadata.annotations
if metadata.labels:
meta["labels"] = metadata.labels
if metadata.namespace:
meta["namespace"] = metadata.namespace
return meta
| 7,892 |
def assert_equal(actual: Literal["statespace"], desired: Literal["statespace"]):
"""
usage.statsmodels: 1
"""
...
| 7,893 |
def clear_cmd():
"""
Utility function that clear the shell
"""
try:
os.system('clear')
except:
pass
try:
os.system('cls')
except:
pass
| 7,894 |
def vectorize_timing(n_targets):
"""
Calculate the rise time of ``n_targets`` targets, return the
run time in seconds.
"""
from time import time
vega_coord = SkyCoord(279.23473479*u.degree, 38.78368896*u.degree)
vega = FixedTarget(name="Vega", coord=vega_coord)
target_list = n_targets*[vega]
t = Time("2008-02-27 22:00:00")
obs = Observer(location=EarthLocation(10*u.deg, 20*u.deg, 0*u.m))
start = time()
obs.target_rise_time(t, target_list)
end = time()
return end-start
| 7,895 |
def print_stats(fromdir):
"""Print collected statistics."""
# Include statistics from subprocesses.
for statfile in glob.glob('%s/*.dump' % fromdir):
with open(statfile) as f:
statblob = f.read()
statdata = json.loads(statblob)
for num, stat in enumerate([amqp_stats, layer_stats]):
for k, v in statdata[num].items():
if isinstance(v, list):
stat.setdefault(k, [])
stat[k].extend(v)
else:
stat.setdefault(k, 0)
stat[k] += v
headers = ['method', 'calls', 'mean', 'median', 'stdev', '95%', '99%']
for num, stats in enumerate([amqp_stats, layer_stats], start=1):
if stats:
# Print statistic table.
data = []
for method, latencies in stats.items():
if isinstance(latencies, list):
data.append([
method,
len(latencies),
statistics.mean(latencies),
statistics.median(latencies),
statistics.stdev(latencies)
if len(latencies) > 1 else None,
percentile(latencies, 0.95),
percentile(latencies, 0.99),
])
elif isinstance(latencies, int):
data.append(
[method, latencies, None, None, None, None, None],
)
else:
raise Exception(
'Stat(%d) was currupted at method %s' % (num, method),
)
data = sorted(data, key=itemgetter(1), reverse=True)
print()
print(tabulate(data, headers))
else:
print("%d) No statistic available" % num)
| 7,896 |
def get_game(name, all=False):
"""
Get the game information for a particular game.
For response object structure, see:
https://dev.twitch.tv/docs/v5/reference/search/#search-games
May throw exceptions on network/Twitch error.
"""
search_opts = {
'query': name,
'type': 'suggest',
'live': 'false',
}
headers = {
'Client-ID': config['twitch_clientid'],
'Accept': 'application/vnd.twitchtv.v5+json',
}
res = common.http.request("https://api.twitch.tv/kraken/search/games", search_opts, headers=headers)
res = json.loads(res)
if all:
return res['games'] or []
else:
for game in res['games'] or []:
if game['name'] == name:
return game
return None
| 7,897 |
def compute_correlation_prob_class_target(candidates_per_query_target):
"""This function computes the overall correlation between the probability of being in
the positive class and the value of the target column
"""
probs_per_query_target = []
gains_per_query_target = []
for key in candidates_per_query_target.keys():
candidates = candidates_per_query_target[key].keys()
tmp_probs = [candidates_per_query_target[key][candidate]['pred_prob'] for candidate in candidates]
tmp_gains = [candidates_per_query_target[key][candidate][TARGET_COLUMN] for candidate in candidates]
probs_per_query_target += tmp_probs
gains_per_query_target += tmp_gains
return pearsonr(probs_per_query_target, gains_per_query_target)
| 7,898 |
def get(sql: str):
""" execute select SQL and return unique result.
select count(1) form meters
or
select lass(ts) from meters where tag = 'xxx'
:return: only value
"""
result = _query(sql)
try:
value = result.next()
except StopIteration:
return None
except taos.error.OperationalError:
return None
if len(value) == 1:
return value[0]
else:
raise MultiColumnsError('Expect only one column.')
| 7,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.