repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Dalwy/Ayame | 13,778,255,126,710 | 2bd409eaa888355814340f7fcb2243f7800760ff | 25da86286addbccfc28e032cea9380dd6f690202 | /cogs/NOT_USING/Rules.py | 22080284f887e90886295679730dd396378c2f83 | []
| no_license | https://github.com/Dalwy/Ayame | a87a3e68c73f958a7248ca74de1fa59143c89d55 | 686cb63e4d6b811ff73e69616639b44b5fe04ea4 | refs/heads/master | 2023-01-02T05:28:39.010708 | 2020-10-20T05:35:37 | 2020-10-20T05:35:37 | 305,600,869 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import discord
from discord.ext import commands
import json
class Rules(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.emojis =['✅']
self.DalwyRules = 'https://cdn.discordapp.com/attachments/659652540488482837/659658620547629058/Dalwy.png'
@commands.command()
@commands.has_permissions(administrator=True)
async def Rules(self, ctx):
# Dalwy Rules
if ctx.guild.id == 692940723955302451:
Ai = self.bot.get_emoji(692952284354576427)
ST = self.bot.get_emoji(692952385856995339)
PaS = self.bot.get_emoji(692952284581199892)
DB = self.bot.get_emoji(692952284312633455)
embed = discord.Embed()
with open('JSON_Files/DalwyRules.json', encoding='utf-8'):
my_dic = json.load(open('JSON_Files/DalwyRules.json', encoding='utf-8'))
embed.description = '\n'.join(['{}'.format(x) for x in my_dic["Description"]])
embed.colour = 0xf9f9f9
embed.add_field(name="Ai", value=Ai)
embed.add_field(name="Software Testing", value=ST)
embed.add_field(name="Probability and Stats", value=PaS)
embed.add_field(name="Databases", value=DB)
embed.title = ''.join(my_dic["Title"])
# embed.set_image(url=self.DalwyRules)
msg = await ctx.send(embed=embed)
await msg.add_reaction(Ai)
await msg.add_reaction(PaS)
await msg.add_reaction(DB)
await msg.add_reaction(ST)
message = ctx.message
await message.delete()
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
message_id = payload.message_id
if message_id == 692986216005632060:
guild_id = payload.guild_id
guild = discord.utils.find(lambda g : g.id == guild_id, self.bot.guilds)
role = discord.utils.get(guild.roles, name=payload.emoji.name)
if role is not None:
member = discord.utils.find(lambda m: m.id == payload.user_id, guild.members)
if member is not None:
await member.add_roles(role)
else:
await guild.channel.send("Member not found")
else:
await guild.channel.send("Role not found")
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
message_id = payload.message_id
if message_id == 692986216005632060:
guild_id = payload.guild_id
guild = discord.utils.find(lambda g: g.id == guild_id, self.bot.guilds)
role = discord.utils.get(guild.roles, name=payload.emoji.name)
if role is not None:
member = discord.utils.find(lambda m: m.id == payload.user_id, guild.members)
if member is not None:
await member.remove_roles(role)
else:
await guild.channel.send("Member not found")
else:
await guild.channel.send("Role not found")
def setup(bot):
bot.add_cog(Rules(bot))
| UTF-8 | Python | false | false | 3,195 | py | 16 | Rules.py | 10 | 0.576574 | 0.523959 | 0 | 74 | 42.148649 | 114 |
DerekGloudemans/Nutrition-Optimization-New | 14,877,766,750,929 | 1e9dfcd0beb1633d156f96e2db9fbc5aa1d81a91 | eddb269e396fea87cf67a9f92ac6955fd4464e33 | /2 - linear_solver.py | c18e6f5a95333b69d4b4d6bc6bf10ebc30f9c51f | []
| no_license | https://github.com/DerekGloudemans/Nutrition-Optimization-New | 7496688ef37875d46f2cd685a1a6ac5070158fb5 | ad00de73d9fab9039d0d5f4991c5e4c61a89348d | refs/heads/master | 2020-04-22T13:08:37.998128 | 2019-10-03T22:22:02 | 2019-10-03T22:22:02 | 170,398,719 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # optimize as a linear program with nutrient guidelines as constraints
from scipy.optimize import linprog
from scipy.optimize import minimize
import _pickle as cPickle
import numpy as np
import pandas as pd
def prep_data():
# 1. load data from pickle files
f = open("Data/Nutrition_Data_Matrix.cpkl", 'rb')
data_all = cPickle.load(f)
f.close()
food_names = data_all[0]
nut_names = data_all[1]
data = data_all[2]
# 2. Load exclusion lists
# Read constraint and nutrient inclusion values from excel
df = pd.read_excel("Data/nut_constraints.xlsx")
constraints = df.values
nutrient_exclusion_list = constraints[2,:]
constraints = constraints[0:2,:]
#Read in food exclusion list from excel
food_exclusion_list = []
df2 = pd.read_excel("Data/food_exclusions.xlsx")
food_exclusion_list = df2.values
weights = food_exclusion_list[1,:]
food_mins = food_exclusion_list[2,:]/100
food_maxs = food_exclusion_list[3,:]/100
food_exclusion_list = food_exclusion_list[0,:]
# 3. Simplify data using exclusion lists
food_idx = []
for i in range(0,len(food_exclusion_list)):
if food_exclusion_list[i] == 1:
food_idx.append(i)
nut_idx = []
for i in range(0,len(nutrient_exclusion_list)):
if nutrient_exclusion_list[i] == 1:
nut_idx.append(i)
# Selects only relevant data based on exclusion values
data = data[food_idx,:]
data = data[:,nut_idx]
constraints = constraints[:,nut_idx]
weights = weights[food_idx]
food_mins = food_mins[food_idx]
food_maxs = food_maxs[food_idx]
#pairs mins and maxes into (min, max) bounds
bounds = []
for i in range(0,len(food_mins)):
bounds.append((food_mins[i],food_maxs[i]))
# Updates label lists to remove excluded food items
new_food_names = []
for item in food_idx:
new_food_names.append(food_names[item])
food_names = new_food_names
new_nut_names = []
for item in nut_idx:
new_nut_names.append(nut_names[item])
nut_names = new_nut_names
return data, nut_names, food_names, constraints, weights, bounds
def display_result(data_in):
result = data_in[0]
food_names = data_in[3]
nut_names = data_in[2]
data = data_in[1]
x = result.x
included_foods = []
for i in range(0,len(x)):
if x[i] > 0.0001:
included_foods.append((food_names[i]['food_name'], x[i]))
print("Warning: highly optimal diet comin' up:\n")
for item in included_foods:
print("Eat {} grams of {}.".format(np.round(item[1]*float(100),decimals=1),item[0]))
data = np.transpose(data)
x = x.reshape(-1,1)
print("\nThis diet contains the following nutrients:\n")
nuts = np.matmul(data,x)
for i in range(0, len(nuts)):
print("{} {} of {}".format(np.round(nuts[i,0],decimals=1),nut_names[i]['unit'], \
nut_names[i]['nutrient_name']))
print("Total weight: {}g ({} g non-water)".format(np.sum(x*100),(np.sum(x*100)-nuts[4])))
def lin_solver(data,nut_names, food_names, constraints, weights,bounds, weighting = -1, show = False):
# Problem formulation notes:
# each x-value corresponds to the amount (in 100 of grams) of one food
# each constraint corresponds to a bound on a nutrient
# Thus, for 2000 foods and 50 nutrients:
# A should be 100 rows by 2000 columns
# b should be 100 rows by 1 column
# c should be 2000 columns by 1 row
# Define upper bound constraints
# idx will store indices of all defined max constraints
idx = []
A_max = np.transpose(data)
b_max = constraints[1,:]
for i in range(0,len(b_max)):
# constraint is defined if not a nan
if not(np.isnan(b_max[i])):
idx.append(i)
# Thus, A_max times x must be less than b_max
A_max = A_max[idx,:]
b_max = b_max[idx]
# Define lower bound constraints
idx = []
A_min = np.transpose(data)
b_min = constraints[0,:]
for i in range(0,len(b_min)):
# constraint is defined if not a nan
if not(np.isnan(b_min[i])):
idx.append(i)
# Thus, A_min times x must be less greater than b_min
A_min = A_min[idx,:]
b_min = b_min[idx]
# So -A_min times x must be less than -b_min
A_min = -A_min
b_min = -b_min
# Finished LP formulation
A = np.concatenate((A_min,A_max), axis = 0)
b = np.concatenate((b_min,b_max), axis = 0)
# define objective (min weight, none, min all but selected food weights)
if weighting == -1:
c = np.ravel(np.ones([np.size(A,1), 1]))
elif weighting == 0:
c = np.ravel(np.zeros([np.size(A,1), 1]))
else:
c = np.ravel(np.ones([np.size(A,1), 1]))
for i in range (0,len(weights)):
if weights[i] != 0:
c[i] = 0
#Run linear program solver
result = linprog(c, A_ub=A, b_ub=b, bounds = bounds,method = 'interior-point')
if show:
#6. Display results
display_result((result,data, nut_names,food_names))
return result, (A,b,c)
def iter_removal_solver(data,nut_names, food_names, constraints, weights, weighting = 1, step_size = 1, show = False):
#let's try solving, removing all variables below some threshold weight, solve again, etc.
solveable = True
all_results = []
count = 1
while solveable:
if show:
print("On iteration {}.".format(count))
result,(A,b,c) = lin_solver(data,nut_names,food_names,constraints,weighting,show = False)
if result.status != 0:#unsolveable
solveable = False
else:
count = count + 1
all_results.append((result,data,nut_names,food_names))
#remove 10-lowest-weight values from A, c, food_names
x1 = result.x
x1 = x1.reshape(-1,1)
sort = np.argsort(x1[:,0])
remaining = sort[step_size:]
A = A[:,remaining]
c = c[remaining]
data = data[remaining,:]
food_names2 = []
for i in range(0,len(remaining)):
food_names2.append(food_names[remaining[i]])
food_names = food_names2
if show:
display_result((all_results[-1]))
return result2
# returns percentage of min for each nutrient for each ingredient
def nutrients_per_ingredient(x,data,nut_names,food_names):
perfoodmat = data
for i in range(0,len(food_names)):
for j in range (0,len(nut_names)):
perfoodmat[i,j] = data[i,j] * x[i]
return perfoodmat
################################ BEGIN BODY CODE ##############################
#load data
data, nut_names, food_names, constraints, weights, bounds = prep_data()
#run linear program solver
result, junk = lin_solver(data,nut_names, food_names, constraints,weights,bounds, weighting = 1, show = True)
x = result.x
weight = result.fun * 100
# run iterative removal solver
#result2 = iter_removal_solver(data,nut_names, food_names, constraints,weights, weighting = -1, step_size = 1, show = True)
temp = np.nan_to_num(nutrients_per_ingredient(x,data,nut_names,food_names))
#totals = np.matmul(data,x) / np.nan_to_num(constraints[0,:]) | UTF-8 | Python | false | false | 7,598 | py | 6 | 2 - linear_solver.py | 3 | 0.581337 | 0.564491 | 0 | 240 | 30.6625 | 126 |
ruvamd/CodePractice-Python | 11,484,742,587,452 | 94c4012a6f272bc1f0a20a0e3b5b8a95bd3981f6 | aeb8314c32b01281f359a3e6ca66e22b3ac58a80 | /UW/assignment_6/lesson6 code examples/classes.py | 351cfeb252e183049a6d1c8d0886f5efe684ebe7 | []
| no_license | https://github.com/ruvamd/CodePractice-Python | ae853edf3d179504dac0e58a9864d1b636e32956 | a7d957e857b97d616ae9993045b7a19e9650c96a | refs/heads/master | 2021-11-19T03:36:24.906488 | 2021-11-18T23:18:44 | 2021-11-18T23:18:44 | 221,548,197 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Person:
pass
andy = Person()
assert str(type(andy)) == "<class '__main__.Person'>"
class Vehicle:
def __init__(self, owner_name):
self.owner_name = owner_name
my_car = Vehicle("andy's")
assert my_car.owner_name == "andy's"
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * (self.width + self.height)
block_1 = Rectangle(12, 5)
assert block_1.area() == 60
assert block_1.perimeter() == 34
| UTF-8 | Python | false | false | 588 | py | 559 | classes.py | 544 | 0.605442 | 0.586735 | 0 | 32 | 17.375 | 53 |
xpybuild/xpybuild | 1,563,368,127,324 | bd288a57f55a374a3390a73025d29925bb4cc629 | dce8dad6067ff3f6f59e1fa8185d249fd0bd9c58 | /tests/correctness/targets/FilteredCopy_UnusedMappersDisallowed/Input/test.xpybuild.py | 70da1081ba4abae3011a3321e5458433de33eed8 | [
"Apache-2.0"
]
| permissive | https://github.com/xpybuild/xpybuild | ccd6c22d4aa0560ee5583004b94dccc03c0cad52 | 9e0aa0ae268cf3fcc759572a026e1ed2a03379e0 | refs/heads/master | 2023-07-12T03:04:08.687644 | 2023-06-29T13:57:34 | 2023-06-29T13:57:34 | 81,104,277 | 9 | 5 | Apache-2.0 | false | 2022-01-07T18:48:57 | 2017-02-06T15:58:02 | 2022-01-06T22:15:41 | 2022-01-07T18:48:56 | 11,753 | 7 | 4 | 5 | Python | false | false | from xpybuild.buildcommon import enableLegacyXpybuildModuleNames
enableLegacyXpybuildModuleNames()
from propertysupport import *
from buildcommon import *
from pathsets import *
from targets.copy import *
defineOutputDirProperty('OUTPUT_DIR', None)
FilteredCopy('${OUTPUT_DIR}/unused-mapper.txt', 'input.txt', StringReplaceLineMapper('x', 'X'))
| UTF-8 | Python | false | false | 361 | py | 205 | test.xpybuild.py | 161 | 0.778393 | 0.778393 | 0 | 12 | 28.083333 | 95 |
eickenberg/super-duper-octo-disco | 1,125,281,444,969 | 055c7df2d2b6c2e82a56ca8e12e63b036bf340a4 | c61e1bc6358bc601ff468c9a7af1850320ba3690 | /paradigm.py | 509726959d56a15b5b29771a427b8d2f0440cdb9 | []
| no_license | https://github.com/eickenberg/super-duper-octo-disco | aac1082c07c7c801345eae2965b2e324d5dc4c0b | 3e1ed7b1499fbfeb7a20cba428b9f529e1da131f | refs/heads/master | 2021-01-10T12:30:36.267327 | 2016-03-25T07:22:49 | 2016-03-25T07:22:49 | 51,409,317 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from collections import OrderedDict
# Taken from nistats.hemodynamic_model
def _sample_condition(exp_condition, frame_times, oversampling=16,
min_onset=-24):
"""Make a possibly oversampled event regressor from condition information.
Parameters
----------
exp_condition : arraylike of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
frame_times : array of shape(n_scans)
sample time points
over_sampling : int, optional
factor for oversampling event regressor
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
regressor: array of shape(over_sampling * n_scans)
possibly oversampled event regressor
hr_frame_times : array of shape(over_sampling * n_scans)
time points used for regressor sampling
"""
# Find the high-resolution frame_times
n = frame_times.size
min_onset = float(min_onset)
n_hr = ((n - 1) * 1. / (frame_times.max() - frame_times.min()) *
(frame_times.max() * (1 + 1. / (n - 1)) - frame_times.min() -
min_onset) * oversampling) + 1
hr_frame_times = np.linspace(frame_times.min() + min_onset,
frame_times.max() * (1 + 1. / (n - 1)), n_hr)
# Get the condition information
onsets, durations, values = tuple(map(np.asanyarray, exp_condition))
if (onsets < frame_times[0] + min_onset).any():
warnings.warn(('Some stimulus onsets are earlier than %d in the' +
' experiment and are thus not considered in the model'
% (frame_times[0] + min_onset)), UserWarning)
# Set up the regressor timecourse
tmax = len(hr_frame_times)
regressor = np.zeros_like(hr_frame_times).astype(np.float)
t_onset = np.minimum(np.searchsorted(hr_frame_times, onsets), tmax - 1)
regressor[t_onset] += values
t_offset = np.minimum(
np.searchsorted(hr_frame_times, onsets + durations),
tmax - 1)
# Handle the case where duration is 0 by offsetting at t + 1
for i, t in enumerate(t_offset):
if t < (tmax - 1) and t == t_onset[i]:
t_offset[i] += 1
regressor[t_offset] -= values
regressor = np.cumsum(regressor)
return regressor, hr_frame_times
def check_stim_durations(stim_onsets, stimDurations):
""" If no durations specified (stimDurations is None or empty np.array)
then assume spiked stimuli: return a sequence of zeros with same
shape as onsets sequence.
Check that durations have same shape as onsets.
"""
nbc = len(stim_onsets)
nbs = len(stim_onsets[stim_onsets.keys()[0]])
if (stimDurations is None or
(type(stimDurations) == list and
all([d is None for d in stimDurations]))):
dur_seq = [[np.array([]) for s in xrange(nbs)] for i in xrange(nbc)]
stimDurations = OrderedDict(zip(stim_onsets.keys(), dur_seq))
if stimDurations.keys() != stim_onsets.keys():
raise Exception('Conditions in stimDurations (%s) differ '
'from stim_onsets (%s)' % (stimDurations.keys(),
stim_onsets.keys()))
for cn, sdur in stimDurations.iteritems():
for i, dur in enumerate(sdur):
if dur is None:
stimDurations[cn][i] = np.zeros_like(stim_onsets[cn][i])
elif hasattr(dur, 'len') and len(dur) == 0:
stimDurations[cn][i] = np.zeros_like(stim_onsets[cn][i])
elif hasattr(dur, 'size') and dur.size == 0:
stimDurations[cn][i] = np.zeros_like(stim_onsets[cn][i])
else:
if not isinstance(stimDurations, np.ndarray):
stimDurations[cn][i] = np.array(dur)
assert len(stimDurations[cn][i]) == len(stim_onsets[cn][i])
return stimDurations
def extend_sampled_events(sampled_events, sampled_durations):
""" Add events to encode stimulus duration
"""
extended_events = set(sampled_events)
for io, o in enumerate(sampled_events):
extended_events.update(range(o + 1, o + sampled_durations[io]))
return np.array(sorted(list(extended_events)), dtype=int)
def restarize_events(events, durations, dt, t_max):
""" build a binary sequence of events. Each event start is approximated
to the nearest time point on the time grid defined by dt and t_max.
"""
smpl_events = np.array(np.round_(np.divide(events, dt)), dtype=int)
smpl_durations = np.array(np.round_(np.divide(durations, dt)), dtype=int)
smpl_events = extend_sampled_events(smpl_events, smpl_durations)
if np.allclose(t_max % dt, 0):
bin_seq = np.zeros(int(t_max / dt) + 1)
else:
bin_seq = np.zeros(int(np.round((t_max + dt) / dt)))
bin_seq[smpl_events] = 1
return bin_seq
class Paradigm:
def __init__(self, stimOnsets, sessionDurations=None, stimDurations=None):
"""
Args:
*stimOnsets* (dict of list) :
dictionary mapping a condition name to a list of session
stimulus time arrivals.
eg:
{'cond1' : [<session 1 onsets>, <session 2 onsets>]
'cond2' : [<session 1 onsets>, <session 2 onsets>]
}
*sessionDurations* (1D numpy float array): durations for all sessions
*stimDurations* (dict of list) : same structure as stimOnsets.
If None, spiked stimuli are assumed (ie duration=0).
"""
self.stimOnsets = stimOnsets
self.stimDurations = check_stim_durations(stimOnsets, stimDurations)
self.nbSessions = len(self.stimOnsets[self.stimOnsets.keys()[0]])
self.sessionDurations = sessionDurations
def get_stimulus_names(self):
return self.stimOnsets.keys()
def get_t_max(self):
ns = len(self.sessionDurations)
return max([self.sessionDurations[i] for i in xrange(ns)])
def get_rastered(self, dt, tMax=None):
""" Return binary sequences of stimulus arrivals. Each stimulus event
is approximated to the closest time point on the time grid defined
by dt. eg return:
{ 'cond1' : [np.array([ 0 0 0 1 0 0 1 1 1 0 1]),
np.array([ 0 1 1 1 0 0 1 0 1 0 0])] },
'cond2' : [np.array([ 0 0 0 1 0 0 1 1 1 0 0]),
np.array([ 1 1 0 1 0 1 0 0 0 0 0])] },
Arg:
- dt (float): temporal resolution of the target grid
- tMax (float): total duration of the paradigm
If None, then use the session lengths
"""
rasteredParadigm = OrderedDict({})
if tMax is None:
tMax = self.get_t_max()
for cn in self.stimOnsets.iterkeys():
par = []
for iSess, ons in enumerate(self.stimOnsets[cn]):
dur = self.stimDurations[cn][iSess]
binaryEvents = restarize_events(ons, dur, dt, tMax)
par.append(binaryEvents)
rasteredParadigm[cn] = np.vstack(par)
return rasteredParadigm
| UTF-8 | Python | false | false | 7,374 | py | 33 | paradigm.py | 19 | 0.595471 | 0.583672 | 0 | 185 | 38.854054 | 81 |
Sbarbagnem/User_Identify_Inertial_Sensor | 15,083,925,174,921 | 1e1595bac86f2c198e2ec924cfd6e34c43b40b2d | ea159305fb6795b8a0ede245ca3b41f25c3de672 | /util/utils.py | cf1c5d40e944eb588acfd6b0335a72698af14938 | []
| no_license | https://github.com/Sbarbagnem/User_Identify_Inertial_Sensor | 774484984366d697d9207bb025bfe11ca8612e89 | 1d03343501aa5558d691eb3c2670f85064422471 | refs/heads/master | 2023-03-12T05:15:49.420070 | 2021-02-24T13:19:09 | 2021-02-24T13:19:09 | 256,522,374 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import pywt
from pprint import pprint
from sklearn import utils as skutils
from sklearn.model_selection import train_test_split
import math
from sklearn.preprocessing import scale as scale_sklearn
from scipy import signal, fftpack
from scipy.signal import find_peaks as find_peaks_scipy
from scipy.interpolate import CubicSpline
from scipy.interpolate import interp1d
def plot_performance(ActivityAccuracy, UserAccuracy, fold, path_to_save, save=False):
plt.plot(ActivityAccuracy)
plt.plot(UserAccuracy)
plt.title('Fold {} for test'.format(fold))
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['Activity_accuracy', 'User_accuracy'], loc='lower right')
if save:
plt.savefig(path_to_save + 'plot_{}.png'.format(fold))
plt.show()
def mean_cross_performance(history):
mean_activity_accuracy = np.mean(history[:, 1], axis=0)
mean_user_accuracy = np.mean(history[:, 3], axis=0)
return mean_activity_accuracy, mean_user_accuracy
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
def split_balanced_data(lu, la, folders, di=None, log=False):
if log:
print('Numero totale di esempi: {}'.format(len(lu)))
# dict to save indexes' example for every folder
indexes = {}
for i in np.arange(folders):
indexes[str(i)] = []
last_folder = 0
if di is not None:
for displace in np.unique(di):
temp_index_label_displace = [index for index, x in enumerate(
di) if x == displace] # index of specific displace
for user in np.unique(lu):
temp_index_label_user = [index for index, x in enumerate(
lu) if x == user and index in temp_index_label_displace] # index of specific user
for act in np.unique(la):
temp_index_label_act = [index for index, x in enumerate(
la) if x == act and index in temp_index_label_user] # index of specific activity of user
# same percentage data in every folder
while(len(temp_index_label_act) > 0):
for folder in range(last_folder, folders):
if len(temp_index_label_act) > 0:
indexes[str(folder)].append(
temp_index_label_act[0])
del temp_index_label_act[0]
if folder == folders - 1:
last_folder = 0
else:
last_folder = folder
else:
continue
else:
for user in np.unique(lu):
temp_index_label_user = [index for index, x in enumerate(
lu) if x == user] # index of specific user
for act in np.unique(la):
temp_index_label_act = [index for index, x in enumerate(
la) if x == act and index in temp_index_label_user] # index of specific activity of user
# same percentage data in every folder
while(len(temp_index_label_act) > 0):
for folder in range(last_folder, folders):
if len(temp_index_label_act) > 0:
indexes[str(folder)].append(
temp_index_label_act[0])
del temp_index_label_act[0]
if folder == folders - 1:
last_folder = 0
else:
last_folder = folder
else:
continue
if log:
for key in indexes.keys():
print(f'Numero campioni nel folder {key}: {len(indexes[key])}')
return indexes
def delete_overlap(train_id, val_id, distances_to_delete):
overlap_ID = np.empty([0], dtype=np.int32)
for distance in distances_to_delete:
overlap_ID = np.concatenate(
(overlap_ID, val_id+distance, val_id-distance))
overlap_ID = np.unique(overlap_ID)
invalid_idx = np.array([i for i in np.arange(
len(train_id)) if train_id[i] in overlap_ID])
return invalid_idx
def to_delete(overlapping):
"""
Return a list of distance to overlapping sequence.
Parameters
----------
overlapping : float
Overlap percentage used.
"""
if overlapping == 5.0:
return [1]
if overlapping == 6.0:
return [1, 2]
if overlapping == 7.0:
return [1, 2, 3]
if overlapping == 8.0:
return [1, 2, 3, 4]
if overlapping == 9.0:
return [1, 2, 3, 4, 5, 6, 7, 8, 9]
def mapping_act_label(dataset_name):
if 'unimib' in dataset_name or 'shar' in dataset_name:
return ['StandingUpFS', 'StandingUpFL', 'Walking', 'Running', 'GoingUpS',
'Jumping', 'GoingDownS', 'LyingDownFS', 'SittingDown']
if 'sbhar' in dataset_name:
return ['Walking', 'Walking upstairs', 'Walking downstairs', 'Sitting', 'Standing',
'Laying', 'Stand to sit', 'Sit to stand', 'Sit to lie', 'Lie to sit',
'Stand to lie', 'Lie to stan']
if 'realdisp' in dataset_name:
return ['Walking', 'Jogging', 'Running', 'Jump up', 'Jump front & back', 'Jump sideways', 'Jump leg/arms open/closed',
'Jump rope', 'Trunk twist (arms outstretched)', 'Trunk twist (elbows bended)', 'Waist bends forward', 'Waist rotation',
'Waist bends opposite hands', 'Reach heels backwards', 'Lateral bend', 'Lateral bend arm up', 'Repetitive forward stretching',
'Upper trunk and lower body', 'Arms lateral elevation', 'Arms frontal elevation', 'Frontal hand claps', 'Arms frontal crossing',
'Shoulders high rotation', 'Shoulders low rotation', 'Arms inner rotation', 'Knees to breast', 'Heels to backside', 'Nkees bending',
'Knees bend forward', 'Rotation on knees', 'Rowing', 'Elliptic bike', 'Cycling']
def plot_pred_based_act(correct_predictions, label_act, folds=1, title='', dataset_name='', file_name='', colab_path=None, save_plot=False, save_txt=False, show_plot=False):
if np.array(correct_predictions).ndim == 1:
correct = correct_predictions
else:
correct = np.sum(correct_predictions, axis=0)/folds
width = 0.35
plt.bar(np.arange(0, len(label_act)), correct, width, color='g')
plt.ylabel('% correct prediction')
plt.xlabel('Activity')
plt.title(title, pad=5)
plt.xticks(np.arange(0, len(label_act)), label_act, rotation='vertical')
plt.yticks(np.arange(0, 1.1, 0.1))
plt.tight_layout()
if colab_path is not None:
path_to_save = colab_path + f'/plot/{dataset_name}/'
else:
path_to_save = f'plot/{dataset_name}/'
if save_plot:
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
plt.savefig(path_to_save + f'{file_name}.png')
if save_txt:
if os.path.isfile(path_to_save + 'performance_based_act.txt'):
f = open(path_to_save + 'performance_based_act.txt', 'a+')
else:
f = open(path_to_save + 'performance_based_act.txt', 'w+')
for l, p in zip(label_act, correct):
f.write(f"{l}: {p}\r\n")
f.close()
if show_plot:
plt.show()
def save_mean_performance_txt(performances, dataset_name, colab_path):
if colab_path is not None:
path_to_save = colab_path + f'/mean_performance/{dataset_name}/'
else:
path_to_save = f'mean_performance/{dataset_name}/'
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
if os.path.isfile(path_to_save + 'mean_performance.txt'):
f = open(path_to_save + 'mean_performance.txt', 'a+')
else:
f = open(path_to_save + 'mean_performance.txt', 'w+')
for key in list(performances.keys()):
f.write(f"{key}: {performances[key]}\r\n")
f.close()
def smooth(coef):
window_len = 5
s = np.r_[coef[window_len-1:0:-1], coef, coef[-1:-window_len:-1]]
w = np.ones(window_len, 'd')
y = np.convolve(w/w.sum(), s, mode='valid')
return y
def detectGaitCycle(data, plot_peak=False, plot_auto_corr_coeff=False, gcLen=None):
selected_data = data[:,2] # z axis
samples = data.shape[0]
autocorr = False if gcLen != None else True
peaks = find_thresh_peak(selected_data)
# compute gcLen based on autocorrelation of signal if not given by default
gcLen, auto_corr_coeff, peak_auto_corr = find_gcLen(data)
if plot_auto_corr_coeff:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
plt.plot(np.arange(len(auto_corr_coeff)), auto_corr_coeff, 'b-')
plt.scatter(peak_auto_corr, auto_corr_coeff[peak_auto_corr], c='red')
plt.tight_layout()
plt.show()
peaks, to_plot = find_peaks(peaks, selected_data, gcLen, autocorr)
if plot_peak or False:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
plt.subplot(3,1,1)
plt.plot(np.arange(samples), data[:,0], 'b-', label='x')
plt.vlines(peaks, ymin=min(data[:,0]), ymax=max(data[:,0]), color='black', ls='dotted')
plt.legend(loc='upper right')
plt.subplot(3,1,2)
plt.plot(np.arange(samples), data[:,1], 'g-', label='y')
plt.vlines(peaks, ymin=min(data[:,1]), ymax=max(data[:,1]), color='black', ls='dotted')
plt.legend(loc='upper right')
plt.subplot(3,1,3)
plt.plot(np.arange(samples), data[:,2], 'r-', label='z')
plt.vlines(peaks, ymin=min(data[:,2]), ymax=max(data[:,2]), color='black', ls='dotted')
plt.legend(loc='upper right')
plt.tight_layout()
plt.show()
return peaks
def segment2GaitCycle(peaks, segment, plot_split):
cycles = []
for i in range(0, len(peaks)-1):
cycle = segment[peaks[i]:peaks[i+1],:]
cycles.append(cycle)
if plot_split:
for cycle in cycles:
if segment2GaitCycle:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
plt.plot(np.arange(cycle.shape[0]), cycle[:,0], 'b-', label='x')
plt.plot(np.arange(cycle.shape[0]), cycle[:,1], 'g-', label='y')
plt.plot(np.arange(cycle.shape[0]), cycle[:,2], 'r-', label='z')
plt.legend(loc='upper right')
plt.tight_layout()
plt.show()
return cycles
def split_data_train_val_test_gait(data,
label_user,
id_window,
sessions,
method,
overlap,
split,
plot_split):
train_data = []
val_data = []
test_data = []
train_label = []
val_label = []
test_label = []
if method == 'cycle_based':
for user in np.unique(label_user):
# filter for user
idx = np.where(label_user == user)
data_temp = data[idx]
user_temp = label_user[idx]
# shuffle cycles to take random between first and second session
data_temp, user_temp = skutils.shuffle(data_temp, user_temp)
samples = data_temp.shape[0]
# split cycles based on paper, 8 gait for train, 0.5 of remain for val and 0.5 for test
if split == 'paper':
# gait cycle for train
train_gait = 8
# to have at least one sample for every user in train, val and test
if samples < 10:
train_gait = samples - 2
val_gait = 1
else:
val_gait = round((samples - train_gait)/2)
# split cycles in a standard way 70% train, 20% val and 10% test
else:
if samples <= 5:
train_gait = samples - 2
val_gait = 1
else:
train_gait = round(samples*0.7)
val_gait = round(samples*0.2)
# train
train_data.append(data_temp[:train_gait])
train_label.extend(user_temp[:train_gait])
# val
val_data.append(data_temp[train_gait:val_gait+train_gait])
val_label.extend(user_temp[train_gait:val_gait+train_gait])
# test
test_data.append(data_temp[val_gait+train_gait:])
test_label.extend(user_temp[val_gait+train_gait:])
# plot train val and test cycle for user
if plot_split:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
for i,c in enumerate(train_data[-1][:5]):
plt.subplot(3, 5, i+1)
plt.plot(np.arange(c.shape[0]), c[:, 0], 'g-', label='x')
plt.plot(np.arange(c.shape[0]), c[:, 1], 'r-', label='y')
plt.plot(np.arange(c.shape[0]), c[:, 2], 'b-', label='z')
for i,c in enumerate(val_data[-1][:5]):
plt.subplot(3, 5, i+1+5)
plt.plot(np.arange(c.shape[0]), c[:, 0], 'g-', label='x')
plt.plot(np.arange(c.shape[0]), c[:, 1], 'r-', label='y')
plt.plot(np.arange(c.shape[0]), c[:, 2], 'b-', label='z')
for i,c in enumerate(test_data[-1][:5]):
plt.subplot(3, 5, i+1+10)
plt.plot(np.arange(c.shape[0]), c[:, 0], 'g-', label='x')
plt.plot(np.arange(c.shape[0]), c[:, 1], 'r-', label='y')
plt.plot(np.arange(c.shape[0]), c[:, 2], 'b-', label='z')
plt.tight_layout()
plt.show()
elif method == 'window_based':
if overlap == None:
raise Exception('Overlap must not be empty for window base method')
if overlap == 50:
distances_to_delete = [1]
elif overlap == 75:
distances_to_delete = [1, 2, 3]
# 70% train, 20% val, 10% test
for user in np.unique(label_user):
idx = np.where(label_user == user)
data_temp = data[idx]
user_temp = label_user[idx]
id_temp = id_window[idx]
# shuffle for random pick
data_temp, user_temp, id_temp = skutils.shuffle(data_temp, user_temp, id_temp)
# number of window for user and session, in train, val and test
samples = data_temp.shape[0]
train_val_percentage = round(samples*0.9)
if train_val_percentage == samples:
train_val_percentage -= 1
# train_val
train = data_temp[:train_val_percentage]
user_train = user_temp[:train_val_percentage]
id_train = id_temp[:train_val_percentage]
# test
test = data_temp[train_val_percentage:]
user_test = user_temp[train_val_percentage:]
id_test = id_temp[train_val_percentage:]
# delete overlap sequence between train and test
if overlap != 0:
overlap_idx = delete_overlap(
id_train, id_test, distances_to_delete)
train_temp = np.delete(train, overlap_idx, axis=0)
user_train_temp = np.delete(user_train, overlap_idx, axis=0)
else:
train_temp = train
user_train_temp = user_train
# split train in train and val
train_percentage = int(train_temp.shape[0] * 0.8)
if train_percentage == train_temp.shape[0]:
train_percentage -= 1
train = train_temp[:train_percentage]
user_train = user_train_temp[:train_percentage]
val = train_temp[train_percentage:]
user_val = user_train_temp[train_percentage:]
# train
train_data.append(train)
train_label.extend(user_train)
# val
val_data.append(val)
val_label.extend(user_val)
# test
test_data.append(test)
test_label.extend(user_test)
if plot_split:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
for i,c in enumerate(train_data[-1][:5]):
plt.subplot(3, 5, i+1)
plt.plot(np.arange(c.shape[0]), c[:, 0], 'g-', label='x')
plt.plot(np.arange(c.shape[0]), c[:, 1], 'r-', label='y')
plt.plot(np.arange(c.shape[0]), c[:, 2], 'b-', label='z')
for i,c in enumerate(val_data[-1][:5]):
plt.subplot(3, 5, i+1+5)
plt.plot(np.arange(c.shape[0]), c[:, 0], 'g-', label='x')
plt.plot(np.arange(c.shape[0]), c[:, 1], 'r-', label='y')
plt.plot(np.arange(c.shape[0]), c[:, 2], 'b-', label='z')
for i,c in enumerate(test_data[-1][:5]):
plt.subplot(3, 5, i+1+10)
plt.plot(np.arange(c.shape[0]), c[:, 0], 'g-', label='x')
plt.plot(np.arange(c.shape[0]), c[:, 1], 'r-', label='y')
plt.plot(np.arange(c.shape[0]), c[:, 2], 'b-', label='z')
plt.tight_layout()
plt.show()
elif method == 'window_based_svm':
# 70% train, 30% test
for user in np.unique(label_user):
idx = np.where(label_user == user)
data_temp = data[idx]
user_temp = label_user[idx]
id_temp = id_window[idx]
# number of window for user and session, in train, val and test
samples = data_temp.shape[0]
train_percentage = int(samples*0.7)
if train_percentage == samples:
train_percentage -= 1
# shuffle for random pick
data_temp, user_temp, id_temp = skutils.shuffle(data_temp, user_temp, id_temp)
# train
train = data_temp[:train_percentage]
user_train = user_temp[:train_percentage]
id_train = id_temp[:train_percentage]
# test
test = data_temp[train_percentage:]
user_test = user_temp[train_percentage:]
id_test = id_temp[train_percentage:]
train_data.append(train)
train_label.extend(user_train)
test_data.append(test)
test_label.extend(user_test)
train_data = np.concatenate(train_data, axis=0)
if val_data != []:
val_data = np.concatenate(val_data, axis=0)
test_data = np.concatenate(test_data, axis=0)
train_label = np.asarray(train_label)
if val_data != []:
val_label = np.asarray(val_label)
test_label = np.asarray(test_label)
train_data, train_label = skutils.shuffle(train_data, train_label)
if val_data != []:
val_data, val_label = skutils.shuffle(val_data, val_label)
test_data, test_label = skutils.shuffle(test_data, test_label)
return train_data, val_data, test_data, train_label, val_label, test_label
def normalize_data(train, val, test=None, return_mean_std=False):
mean = np.mean(np.reshape(train, [-1, train.shape[2]]), axis=0)
std = np.std(np.reshape(train, [-1, train.shape[2]]), axis=0)
train = (train - mean)/std
val = (val - mean)/std
train = np.expand_dims(train, 3)
val = np.expand_dims(val, 3)
if test is not None:
test = (test - mean)/std
test = np.expand_dims(test, 3)
if not return_mean_std:
return train, val, test
else:
return train, val, test, mean, std
############################################
### From paper Data Augmentation for gait ##
############################################
def scale(x, out_range):
domain = np.min(x), np.max(x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
def denoiseData(signal, plot=False):
denoise = np.empty_like(signal)
original_shape = signal.shape[0]
for dim in np.arange(signal.shape[1]):
original_extent = tuple(slice(s) for s in signal[:,dim].shape)
coeffs = pywt.wavedec(signal[:,dim], wavelet='db6', level=2)
coeffs[-1] == np.zeros_like(coeffs[-1])
coeffs[-2] == np.zeros_like(coeffs[-2])
denoise[:,dim] = pywt.waverec(coeffs, 'db6')[original_extent]
if plot:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
plt.subplot(3, 2, 1)
plt.title(f'noise')
plt.plot(np.arange(original_shape), signal[:, 0], 'b-', label='x')
plt.legend(loc='upper right')
plt.subplot(3, 2, 3)
plt.plot(np.arange(original_shape), signal[:, 1], 'r-', label='y')
plt.legend(loc='upper right')
plt.subplot(3, 2, 5)
plt.plot(np.arange(original_shape), signal[:, 2], 'g-', label='z')
plt.legend(loc='upper right')
plt.subplot(3, 2, 2)
plt.title(f'denoise')
plt.plot(np.arange(original_shape), denoise[:, 0], 'b-', label='x')
plt.legend(loc='upper right')
plt.subplot(3, 2, 4)
plt.plot(np.arange(original_shape), denoise[:, 1], 'r-', label='y')
plt.legend(loc='upper right')
plt.subplot(3, 2, 6)
plt.plot(np.arange(original_shape), denoise[:, 2], 'g-', label='z')
plt.legend(loc='upper right')
plt.tight_layout()
plt.show()
return denoise
def calAutoCorrelation(data):
n = len(data)
autocorrelation_coeff = np.zeros((n,3))
for i in range(3):
autocorrelation_coeff[0,i] = np.sum(data[:,i]**2)/n
for i in range(3):
for t in range(1, n):
for j in range(1, n-t):
autocorrelation_coeff[t,i] = autocorrelation_coeff[t,i] + \
data[j,i]*data[j+t,i]
autocorrelation_coeff[t,i] = autocorrelation_coeff[t,i] / (n-t)
autocorrelation_coeff[:,i] = autocorrelation_coeff[:,i]/autocorrelation_coeff[0,i]
return np.mean(autocorrelation_coeff, axis=1)
def find_peaks(peaks, data, gcLen, autocorr):
# find first possible peak to start search
first_peak = [peaks[0]]
for i,_ in enumerate(peaks[1:]):
if abs(peaks[i] - peaks[0]) <= 0.8*gcLen:
first_peak.append(peaks[i])
else:
break
first_peak = peaks.index(first_peak[np.argmin(data[first_peak])])
# splice peaks from first possible detected peak
peaks = peaks[first_peak:]
# neighbour search of minimum at given gcLen from the first peak
peak_filtered = [peaks[0]]
i = 0
while i < len(peaks[:-1]):
peak_cluster = []
j = 1
while i + j < len(peaks):
if abs(peaks[i] - peaks[i + j]) > 1.1*gcLen:
break
if abs(peaks[i] - peaks[i + j]) >= 0.5*gcLen and abs(peaks[i] - peaks[i + j]) <= 1.1*gcLen:
peak_cluster.append(peaks[i + j])
j += 1
if i + j >= len(peaks) and peak_cluster == []:
break
if peak_cluster == []:
j = 1
while i + j < len(peaks):
if abs(peaks[i] - peaks[i + j]) > 1.6*gcLen:
break
if abs(peaks[i] - peaks[i + j]) >= 0.5*gcLen and abs(peaks[i] - peaks[i + j]) <= 1.6*gcLen:
peak_cluster.append(peaks[i + j])
j += 1
if peak_cluster == []:
j = 1
while i + j < len(peaks):
if abs(peaks[i] - peaks[i + j]) > 2.5*gcLen:
break
if abs(peaks[i] - peaks[i + j]) >= 0.5*gcLen and abs(peaks[i] - peaks[i + j]) <= 2.5*gcLen:
peak_cluster.append(peaks[i + j])
j += 1
if peak_cluster == []:
break
index_min = np.argmin(data[peak_cluster])
min_peak = peak_cluster[index_min]
# from min peak found peak on the right if they are at max 0.1*gcLen
'''
for peak in peak_cluster[peak_cluster.index(min_peak):]:
if abs(peak - min_peak) <= 0.05*gcLen:
min_peak = peak
'''
peak_filtered.append(min_peak)
i = peaks.index(min_peak)
# check on first-second peak distance, and least two peaks distance
if abs(peak_filtered[0] - peak_filtered[1]) > 1.2*gcLen:
peak_filtered = peak_filtered[1:]
if abs(peak_filtered[-1] - peak_filtered[-2]) > 1.2*gcLen:
peak_filtered = peak_filtered[:-1]
if len(peak_filtered) < 5 or len(peak_filtered) > 15:
to_plot = True
else:
to_plot = False
return peak_filtered, to_plot
def find_gcLen(data):
# compute autcorrelation to estimate len cycle
auto_corr_coeff = calAutoCorrelation(data)
# smooth the auto_correlation_coefficient
for i in range(10):
auto_corr_coeff = smooth(auto_corr_coeff)
# approximate the length of a gait cycle by selecting the 2nd peak (positive) in the auto correlation signal
peak_auto_corr = []
gcLen = 0
flag = 0
mean_auto_corr = np.mean(auto_corr_coeff[:200])
for i in range(1, 200):
if auto_corr_coeff[i] > auto_corr_coeff[i-1] and \
auto_corr_coeff[i] > auto_corr_coeff[i+1] and \
auto_corr_coeff[i] > mean_auto_corr:
flag += 1
peak_auto_corr.append(i)
if flag == 2:
gcLen = i - 1
break
if gcLen < 10:
peak_auto_corr = []
flag = 0
for i in range(1, len(auto_corr_coeff)-1):
if auto_corr_coeff[i] > auto_corr_coeff[i-1] and \
auto_corr_coeff[i] > auto_corr_coeff[i+1]:
if flag == 0 or (flag == 1 and i > 10):
flag += 1
peak_auto_corr.append(i)
if flag == 2:
gcLen = i - 1
break
return gcLen, auto_corr_coeff, peak_auto_corr
def find_thresh_peak(data):
plot = False
# all peaks
all_peak_pos = []
for i in range(0, data.shape[0]-1):
if i==0 and data[i] <= data[i+1]:
all_peak_pos.append(i)
if data[i] <= data[i-1] and data[i] <= data[i+1]:
all_peak_pos.append(i)
# filter list of peaks based on mean and standard deviation of detected peaks
_mean = np.mean(data)
_std = np.std(data)
filter_peaks_pos = []
for peak in all_peak_pos:
if(data[peak] < _mean - 0.6*_std):
filter_peaks_pos.append(peak)
if plot:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
plt.plot(np.arange(len(data)), data, 'b-')
plt.scatter(all_peak_pos, data[all_peak_pos], c='red')
plt.scatter(filter_peaks_pos, data[filter_peaks_pos], c='black')
plt.tight_layout()
plt.show()
return filter_peaks_pos
def remove_g_component(signal, sampling_rate, plot):
# get gravity component g(t)
sos = butter_lowpass(cutoff=0.3, nyq_freq=sampling_rate*0.5, order=3, sampling_rate=sampling_rate)
g = butter_lowpass_filter(signal, sos)
# get linear acceleration s(t) = s(t) - g(t)
no_g = signal - g
if plot:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
plt.subplot(2,1,1)
plt.title('With gravity component')
plt.plot(np.arange(signal.shape[0]), signal[:,0], 'g-', label='x')
plt.plot(np.arange(signal.shape[0]), signal[:,1], 'r-', label='y')
plt.plot(np.arange(signal.shape[0]), signal[:,2], 'b-', label='z')
plt.legend(loc='upper right')
plt.subplot(2,1,2)
plt.title('No gravity component')
plt.plot(np.arange(no_g.shape[0]), no_g[:,0], 'g-', label='x')
plt.plot(np.arange(no_g.shape[0]), no_g[:,1], 'r-', label='y')
plt.plot(np.arange(no_g.shape[0]), no_g[:,2], 'b-', label='z')
plt.legend(loc='upper right')
plt.tight_layout()
plt.show()
return no_g
def butter_lowpass(cutoff, nyq_freq, order, sampling_rate):
normal_cutoff = float(cutoff) / nyq_freq
sos = signal.butter(order, normal_cutoff, btype='lowpass', output='sos', fs=sampling_rate)
return sos
def butter_lowpass_filter(data, sos):
y = signal.sosfiltfilt(sos, data, axis=0, padtype=None)
return y
def interpolated(cycles, to_interp, plot_interpolated):
cycles_interpolated = []
for cycle in cycles:
interpolated = np.empty((to_interp, cycle.shape[1]))
for dim in np.arange(cycle.shape[1]):
'''
interpolated[:, dim] = CubicSpline(np.arange(0, cycle.shape[0]), cycle[:, dim])(
np.linspace(0, cycle.shape[0]-1, to_interp))
'''
interpolated[:, dim] = interp1d(np.arange(0, cycle.shape[0]), cycle[:, dim])(np.linspace(0, cycle.shape[0]-1, to_interp))
if plot_interpolated:
plt.figure(figsize=(12, 3))
plt.style.use('seaborn-darkgrid')
plt.subplot(1, 2, 1)
plt.title(f'original')
plt.plot(np.arange(cycle.shape[0]),
cycle[:, 0], 'b-', label='noise')
plt.plot(np.arange(cycle.shape[0]),
cycle[:, 1], 'r-', label='noise')
plt.plot(np.arange(cycle.shape[0]),
cycle[:, 2], 'g-', label='noise')
plt.subplot(1, 2, 2)
plt.title(f'interpolated')
plt.plot(
np.arange(interpolated.shape[0]), interpolated[:, 0], 'b-', label='denoise')
plt.plot(
np.arange(interpolated.shape[0]), interpolated[:, 1], 'r-', label='denoise')
plt.plot(
np.arange(interpolated.shape[0]), interpolated[:, 2], 'g-', label='denoise')
plt.tight_layout()
plt.show()
cycles_interpolated.append(interpolated)
return cycles_interpolated | UTF-8 | Python | false | false | 30,694 | py | 26 | utils.py | 23 | 0.531993 | 0.516648 | 0 | 815 | 36.662577 | 173 |
prajjawalkashyap/shopping-project | 13,700,945,677,681 | 3b270d4a62c8ec83cfa6f36c11c43217bbda66f1 | 18f62304988d0d10909bb42d1c75c79c06cfa17a | /shop/shopViews/payment.py | b25a4203645b5f4bfda5a4b5abb30db06c2868fb | []
| no_license | https://github.com/prajjawalkashyap/shopping-project | a6958efc7ea49a383afc662f2e579324e8d03e62 | a1842fe4345f57b32a661b65844a8a87fbad0917 | refs/heads/master | 2023-08-13T17:59:40.618544 | 2021-10-10T12:55:50 | 2021-10-10T12:55:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
import razorpay
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from shop.models import Product, Cart, CartItem, OrderProduct, Order, Payment, Refund, Coupon, \
CouponUsed
from authy.models import User, Address
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# ==============CART CHECKOUT==============
def checkout(request):
if request.user.is_authenticated:
cart, created = Cart.objects.get_or_create(user=request.user)
cartItem = CartItem.objects.filter(cart=cart)
if cartItem:
amt = 0
for its in cartItem:
amt += (its.product.price * its.quantity)
address = Address.objects.filter(user=request.user)
amount = amt * 100
context = {
'total_amt': amt,
'cartItems': cartItem,
'amount': amount,
'addresses': address,
}
return render(request, 'shop/checkout.html', context)
else:
messages.error(request, 'Add product to Cart')
return redirect('/')
else:
messages.error(request, 'Login required to Checkout')
return redirect('/auth/login/')
# ============ END SECTION ====================
# ======================== SAVE ADDRESS AND PAYMENT=================
def orderPay(request):
if request.user.is_authenticated:
if request.method == 'POST':
coupon = False
addressId = request.POST.get('radio')
code = request.POST.get('couponCode')
address = Address.objects.get(id=addressId)
user = request.user
order = Order.objects.create(user=user, address=address, status="1")
order.save()
cart = Cart.objects.get(user=request.user)
cartItem = CartItem.objects.filter(cart=cart)
amt = 0
for its in cartItem:
amt += its.amt
print(amt)
ord_Prod = OrderProduct(order=order, qty=its.quantity, product=its.product, amt=its.amt, status="1")
ord_Prod.save()
subTotal = amt
if code:
coupon = Coupon.objects.get(code=code)
coupUsed = CouponUsed(user=user, order=order, coupon=coupon)
coupUsed.save()
if amt >= coupon.condition:
amt -= coupon.discount
order.amt = amt
order.save()
orderItem = OrderProduct.objects.filter(order=order)
amt = amt * 100
client = razorpay.Client(auth=(settings.KEY_ID, settings.KEY_SECRET))
payment = client.order.create({'amount': amt, 'currency': 'INR', 'payment_capture': '1'})
pay = Payment(order=order, razorpay_order_id=payment['id'], payment_id="")
pay.save()
context = {
'order': order,
'orderItems': orderItem,
'paymentId': payment['id'],
'key_id': settings.KEY_ID,
'address': address,
'coupon': coupon,
'subTot': subTotal
}
return render(request, 'shop/razorpay.html', context)
else:
messages.error(request, 'Login to place order')
return redirect('/auth/login/')
# ============ END SECTION ====================
# ==========RAZORPAY REDIRECT SUCCESS===========
@csrf_exempt
def handleRequest(request):
response = request.POST
client = razorpay.Client(auth=(settings.KEY_ID, settings.KEY_SECRET))
params_dict = {
'razorpay_order_id': response['razorpay_order_id'],
'razorpay_payment_id': response['razorpay_payment_id'],
'razorpay_signature': response['razorpay_signature']
}
try:
client.utility.verify_payment_signature(params_dict)
pay = Payment.objects.get(razorpay_order_id=response['razorpay_order_id'])
pay.successful = True
pay.save()
order = Order.objects.get(id=pay.order.id)
order.active = True
order.paid = True
order.save()
pay.payment_id = response['razorpay_payment_id']
pay.save()
cart = Cart.objects.get(user=request.user)
cart.delete()
messages.success(request, 'Payment Completed')
return redirect('/')
except:
return redirect('/checkout/')
# ============ END SECTION ====================
# ============= CANCEL ORDERS ==================
@login_required
def cancelOrder(request, oid):
order = Order.objects.get(id=oid, paid=True)
payment = Payment.objects.get(order=order)
client = razorpay.Client(auth=(settings.KEY_ID, settings.KEY_SECRET))
amt = int(order.amt * 100)
c = client.payment.refund(payment.payment_id, amt)
refund_id = c['id']
refund_status = c['status']
refund = Refund(order=order, payment=payment, refund_id=refund_id, status=refund_status)
refund.save()
order.paid = False
order.status="404"
order.save()
payment.refund=True
payment.save()
messages.success(request, 'Refund processed')
return redirect('/')
| UTF-8 | Python | false | false | 5,261 | py | 28 | payment.py | 18 | 0.567193 | 0.563961 | 0 | 141 | 36.312057 | 116 |
FoxRobotLab/catkin_ws | 4,415,226,381,360 | a15ef8cecc7b4fad45d9104c91041c43845e6c40 | efb152cade7d3c8e4ff423cdea41de0a264c5f54 | /src/match_seeker/scripts/olri_classifier/olin_visualizer.py | 173edef7888d3dd8517207078d6057c496b36545 | []
| no_license | https://github.com/FoxRobotLab/catkin_ws | 01218a9448097243574b7b68d220f2c1cfe90a75 | 97bb378a325b1639110de06b88d6e237dffc7330 | refs/heads/master | 2023-02-24T00:36:35.116662 | 2023-02-16T16:43:10 | 2023-02-16T16:43:10 | 59,224,424 | 6 | 4 | null | false | 2016-06-14T19:05:31 | 2016-05-19T16:37:34 | 2016-05-19T16:38:39 | 2016-06-14T19:05:31 | 8,106 | 0 | 0 | 0 | Python | null | null | """--------------------------------------------------------------------------------
olin_visualizer.py
Author: Jinyoung Lim
Date: July 2018
A simple messy program to visualize a model. Takes in a directory of images and the model path
and visualizes each layer.
Acknowledgement:
https://www.analyticsvidhya.com/blog/2018/03/essentials-of-deep-learning-visualizing-convolutional-neural-networks/
https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html
--------------------------------------------------------------------------------"""
import keras
import matplotlib.pyplot as plt
from src.match_seeker.scripts.olri_classifier.DataManipulations import olin_factory as factory
import numpy as np
import os
import cv2
from keras.utils import plot_model
### https://github.com/raghakot/keras-vis
from vis.regularizers import TotalVariation, LPNorm
# model_dir="0724181052_olin-CPDr-CPDr-DDDDr-L_lr0.001-bs256"
# model_hdf5_name="00-90-0.72.hdf5",
# model_hdf5_name="00-190-0.87.hdf5"
model_dir = "0725181447_olin-CPDr-CPDr-CPDr-DDDDr-L_lr0.001-bs128-weighted"
model_hdf5_name = "00-745-0.71.hdf5"
model_path = os.path.join(model_dir, model_hdf5_name) #TODO: should factory.path be used instead?
model = keras.models.load_model(model_path)
model.load_weights(model_path)
model.summary()
print("*** Model restored: {}".format(model_path))
layer_dict = dict([(layer.name, layer) for layer in model.layers])
conv_names = ["conv2d_1", "conv2d_2", "conv2d_3"]
dense_names = ["dense_1", "dense_2", "dense_3", "dense_4"]
## Saves the model summary into the specified file
def save_model_architecture():
plot_model(
model,
to_file=model_dir + "/" + 'model_architecture.png',
show_shapes=True,
show_layer_names=True,
rankdir='TB' # top to bottom, "LR" for left to right
)
def show_conv_filters():
for layer_name in conv_names:
layer = model.get_layer(name=layer_name)
filters = layer.get_weights()
# print(layer.input_shape, layer.output_shape)
# # print(np.array(layer.get_weights()[0]).shape) # (5, 5, 1, 128) (128, )
print("Showing layer {} with input shape {} and output shape {}".format(layer_name, layer.input_shape, layer.output_shape))
fig = plt.figure()
# n = int(np.ceil(np.sqrt(layer.output_shape[-1])))
# for i in range(layer.output_shape[-1]-1):
# ax = fig.add_subplot(n,n,i+1)
# ax.set_axis_off()
# ax.imshow(filters[0][:, :, :, i].squeeze(), cmap="gray")
n = 8
for i in range(np.array(filters[0]).shape[-1]):
ax = fig.add_subplot(n, n, i+1)
ax.set_axis_off()
ax.imshow(filters[0][:, :, 0, i].squeeze(), cmap="gray")
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.1, hspace=0.1) # https://www.programcreek.com/python/example/102282/matplotlib.pyplot.subplots_adjust
plt.show()
plt.clf()
cell_to_intlabel_dict = np.load(factory.paths.one_hot_dict_path).item()
intlabel_to_cell_dict = dict()
for cell in cell_to_intlabel_dict.keys():
intlabel_to_cell_dict[cell_to_intlabel_dict[cell]] = cell
def clean_image(image_path):
"""Preprocess image just as the cnn"""
mean = np.load(factory.paths.train_mean_path)
image_raw = cv2.imread(image_path)
gray_img = cv2.cvtColor(image_raw, cv2.COLOR_BGR2GRAY)
resized_img = cv2.resize(gray_img, (factory.image.size, factory.image.size))
submean_image = resized_img - mean
cleaned_image = np.array([submean_image], dtype="float") \
.reshape(1, factory.image.size, factory.image.size, factory.image.depth)
# pred = softmax.predict(cleaned_image)
# pred_class = np.argmax(pred)
# pred_cell = intlabel_to_cell_dict[int(pred_class)]
return submean_image
def show_activation(cleaned_image, num_images=8):
"""
Visualize activation of convolutional layers. Heavily referred to
https://github.com/ardendertat/Applied-Deep-Learning-with-Keras/blob/master/notebooks/Part%204%20%28GPU%29%20-%20Convolutional%20Neural%20Networks.ipynb
"""
layer_outputs = [layer.output for layer in model.layers if layer.name in conv_names]
activation_model = keras.models.Model(inputs=model.input, outputs=layer_outputs)
intermediate_activations = activation_model.predict(cleaned_image)
# Now let's display our feature maps
for layer_name, layer_activation in zip(conv_names, intermediate_activations):
# This is the number of features in the feature map
n_features = layer_activation.shape[-1]
images_per_row = int(np.ceil(np.sqrt(n_features)))
# The feature map has shape (1, size, size, n_features)
size = layer_activation.shape[1]
# We will tile the activation channels in this matrix
n_cols = n_features // int(np.ceil(np.sqrt(n_features)))
display_grid = np.zeros((size * n_cols, images_per_row * size))
# We'll tile each filter into this big horizontal grid
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
# Post-process the feature to make it visually palatable
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size: (col + 1) * size,
row * size: (row + 1) * size] = channel_image
# Display the grid
scale = 2. / size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.axis('off')
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
# plt.show()
# save_model_architecture()
# show_conv_filters()
frame_name = "frame4593"
image_path = "/home/macalester/PycharmProjects/olri_classifier/frames/moreframes/{}.jpg".format(frame_name)
image_raw = cv2.imread(image_path)
# plt.figure()
# plt.axis("off")
# plt.grid(False)
# plt.title(frame_name)
# plt.imshow(np.array(image_raw))
# # plt.show()
#
# plt.figure()
# plt.axis("off")
# plt.grid(False)
# plt.title("Cleaned Image")
cleaned_image = clean_image(image_path)
cv2.imshow("Cleaned Image",cleaned_image)
cv2.imwrite("0725181447_olin-CPDr-CPDr-CPDr-DDDDr-L_lr0.001-bs128-weighted/image_cleaned.jpg", cleaned_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# plt.imshow(cleaned_image.squeeze(), cmap="gray")
# plt.show()
#
# show_activation(cleaned_image, num_images=8)
# plt.show()
# images_paths = []
# i = 0
# for filename in os.listdir("/home/macalester/PycharmProjects/olri_classifier/frames/moreframes/"):
# if (filename.endswith(".jpg")):
# if (i % 100 == 0):
# images_paths.append("/home/macalester/PycharmProjects/olri_classifier/frames/moreframes/"+filename)
# i += 1
# pred_cells = []
# for image_path in images_paths:
# img = cv2.imread(image_path)
# gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# resized_img = cv2.resize(gray_img, (factory.image.size, factory.image.size))
# submean_image = np.subtract(resized_img, mean)
# cleaned_image = np.array([submean_image], dtype="float") \
# .reshape(1, factory.image.size, factory.image.size, factory.image.depth)
# pred = softmax.predict(cleaned_image)
# pred_class = np.argmax(pred)
# pred_cell = intlabel_to_cell_dict[int(pred_class)]
# pred_cells.append(pred_cell)
#
#
# tsne = TSNE(n_components=2, perplexity=30, verbose=1).fit(np.array(pred_cells))
#
# layer_name = "conv2d_2"
# layer = model.get_layer(name=layer_name)
# filters = layer.get_weights()
#
# print(np.array(filters[0]).shape, np.array(filters[1]).shape)
# print(np.array(filters[1]))
# # print(layer.input_shape, layer.output_shape)
# # # print(np.array(layer.get_weights()[0]).shape) # (5, 5, 1, 128) (128, )
# print("Showing layer {} with input shape {} and output shape {}".format(layer_name, layer.input_shape, layer.output_shape))
# fig = plt.figure()
# n = int(np.ceil(np.sqrt(layer.output_shape[-1])))
# for i in range(layer.output_shape[-1] - 1):
# ax = fig.add_subplot(n,n,i+1)
# ax.set_axis_off()
# ax.imshow(filters[0][:, :, 10, i].squeeze(), cmap="gray")
#
#
#
# plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.1, hspace=0.1) # https://www.programcreek.com/python/example/102282/matplotlib.pyplot.subplots_adjust
#
#
# for i in range(layer.output_shape[-1] - 1):
# ax = fig.add_subplot(n,n,i+1)
# ax.set_axis_off()
# ax.imshow(filters[0][:, :, 20, i].squeeze(), cmap="gray")
#
# plt.show()
# plt.clf()
| UTF-8 | Python | false | false | 8,862 | py | 285 | olin_visualizer.py | 158 | 0.641729 | 0.613631 | 0 | 246 | 35.02439 | 172 |
lsst-camera-dh/EO-utilities | 4,054,449,148,372 | 039f695627750cb54364f84813971868f29f41cf | 257361b57c34729bbe1ad58bea823f9cb7f2c540 | /python/lsst/eo_utils/qe/qe.py | 07c41f1be8a2f07aa442774d1f57aa23e8a83dad | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/lsst-camera-dh/EO-utilities | 749432b0b68f05e89b8bedacd23f01011265e32c | 28418284fdaf2b2fb0afbeccd4324f7ad3e676c8 | refs/heads/master | 2023-03-04T23:46:09.843146 | 2020-12-03T18:44:06 | 2020-12-03T18:44:06 | 117,150,415 | 2 | 1 | NOASSERTION | false | 2020-12-03T18:44:07 | 2018-01-11T20:34:40 | 2020-12-03T17:56:13 | 2020-12-03T18:44:06 | 5,417 | 1 | 1 | 0 | Python | false | false | """Class to analyze the FFT of the bias frames"""
import numpy as np
from lsst.eotest.sensor.QE import QE_Data
from lsst.eo_utils.base.config_utils import EOUtilOptions
from lsst.eo_utils.base.data_utils import TableDict
from lsst.eo_utils.base.factory import EO_TASK_FACTORY
from .meta_analysis import QeSlotTableAnalysisConfig, QeSlotTableAnalysisTask
class QEConfig(QeSlotTableAnalysisConfig):
"""Configuration for QETask"""
infilekey = EOUtilOptions.clone_param('infilekey', default='qe-med')
filekey = EOUtilOptions.clone_param('filekey', default='qe')
class QETask(QeSlotTableAnalysisTask):
"""Analyze some monochromatic data to extract the wavelength dependence of the QE"""
ConfigClass = QEConfig
_DefaultName = "QETask"
plot_names = []
def extract(self, butler, data, **kwargs):
"""Extract data
Parameters
----------
butler : `Butler`
The data butler
data : `dict`
Dictionary (or other structure) contain the input data
kwargs
Used to override default configuration
Returns
-------
dtables : `TableDict`
Output data tables
"""
self.safe_update(**kwargs)
self.log_info_slot_msg(self.config, "")
pd_calib_file = self.get_pd_calib_file()
gains = np.ones((17))
basename = data[0]
qe_data = QE_Data()
qe_data.read_fits(basename)
qe_data.incidentPower(pd_calib_file)
qe_data.calculate_QE(gains)
qe_curves_data_dict = qe_data.make_qe_curves_data_dict()
bands_data_dict = qe_data.make_bands_data_dict()
dtables = TableDict()
dtables.make_datatable('qe_curves', qe_curves_data_dict)
dtables.make_datatable('bands', bands_data_dict)
return dtables
def plot(self, dtables, figs, **kwargs):
"""Make plots
Parameters
----------
dtables : `TableDict`
The data produced by this task
figs : `FigureDict`
The resulting figures
kwargs
Used to override default configuration
"""
self.safe_update(**kwargs)
# Analysis goes here.
# you should use the data in dtables to make a bunch of figures in figs
EO_TASK_FACTORY.add_task_class('QE', QETask)
| UTF-8 | Python | false | false | 2,363 | py | 144 | qe.py | 125 | 0.620821 | 0.619551 | 0 | 89 | 25.550562 | 88 |
Open-CAS/ocf | 154,618,858,188 | 89c63ec4ed6d994f4438ce380732f087d4cbe2bf | 4b15f318ba3332ee946cb0b2838c93e7935b9b89 | /tests/functional/tests/management/test_disable_cleaner.py | 5077397dbcd10079287f2efee3680aa3683686f3 | [
"BSD-3-Clause"
]
| permissive | https://github.com/Open-CAS/ocf | c4f8a5c9c1b254a905fda75be2c19bd7c8ebd450 | 016d7a8ee2822d672c308264e79bae4081e7930e | refs/heads/master | 2023-05-28T08:40:51.328181 | 2023-05-11T08:11:57 | 2023-05-11T08:11:57 | 152,160,836 | 168 | 94 | BSD-3-Clause | false | 2023-09-14T08:01:50 | 2018-10-08T23:46:10 | 2023-09-12T13:23:31 | 2023-09-14T08:01:49 | 3,737 | 158 | 78 | 21 | C | false | false | #
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from pyocf.types.volume import RamVolume
from pyocf.types.cache import Cache, CacheMetadataSegment, CleaningPolicy
from pyocf.types.core import Core
from pyocf.types.shared import OcfError, OcfCompletion
from pyocf.utils import Size as S
from pyocf.helpers import get_metadata_segment_size
from ctypes import c_int
def test_attach_cleaner_disabled(pyocf_ctx):
"""
title: Attach cache with cleaner_disabled option set.
description: |
Check that cache can be attached when cleaner_disabled option is
selected and that "cleaning" metadata section is not allocated.
pass_criteria:
- Cache attaches properly.
- The "cleaning" metadata section is not allocated.
- Cache stops properly.
steps:
- Start the cache with default config.
- Prepare default attach config and set cleaner_disabled field to true.
- Attach cache device using prepared config.
- Verify that cache was attached properly.
- Verify that "cleaning" metadata section was not allocated.
- Stop the cache.
- Verify that the cache stopped properly.
requirements:
- disable_cleaner::set_cleaner_disabled
- disable_cleaner::cleaning_section_alocation
"""
cache_device = RamVolume(S.from_MiB(50))
core_device = RamVolume(S.from_MiB(10))
cache = Cache.start_on_device(cache_device, disable_cleaner=True)
core = Core.using_device(core_device)
stats = cache.get_stats()
assert stats["conf"]["attached"] is True, "checking whether cache is attached properly"
cleaning_size = get_metadata_segment_size(cache, CacheMetadataSegment.CLEANING)
assert (
cleaning_size == 0
), f'Metadata cleaning segment size expected: "0", got: "{cleaning_size}"'
cache.stop()
assert Cache.get_by_name("cache1", pyocf_ctx) != 0, "Try getting cache after stopping it"
def test_load_cleaner_disabled(pyocf_ctx):
"""
title: Load cache in cleaner_disabled mode.
description: |
Check that loading the cache that was previously attached with
cleaner_disabled option preserves cleaner_disabled setting.
pass_criteria:
- Cache loads properly.
- The "cleaning" metadata section is not allocated.
- Cache stops properly.
steps:
- Start the cache with default config.
- Prepare default attach config and set cleaner_disabled field to true.
- Attach cache device using prepared config.
- Stop the cache.
- Load the cache.
- Verify that cache was loaded properly.
- Verify that "cleaning" metadata section was not allocated.
- Stop the cache.
- Verify that the cache stopped properly.
requirements:
- disable_cleaner::load_cleaner_disabled
- disable_cleaner::cleaning_section_alocation
"""
cache_device = RamVolume(S.from_MiB(50))
core_device = RamVolume(S.from_MiB(10))
cache = Cache.start_on_device(cache_device, disable_cleaner=True)
core = Core.using_device(core_device)
cache.add_core(core)
cache.stop()
cache = Cache.load_from_device(cache_device, open_cores=False, disable_cleaner=True)
cache.add_core(core, try_add=True)
stats = cache.get_stats()
assert stats["conf"]["attached"] is True, "checking whether cache is attached properly"
cleaning_size = get_metadata_segment_size(cache, CacheMetadataSegment.CLEANING)
assert (
cleaning_size == 0
), f'Metadata cleaning segment size expected: "0", got: "{cleaning_size}"'
cache.stop()
assert Cache.get_by_name("cache1", pyocf_ctx) != 0, "Try getting cache after stopping it"
def test_cleaner_disabled_nop(pyocf_ctx):
"""
title: NOP enfocement in cleaner_disabled mode..
description: |
Check that after attaching cache with cleaner_diabled option set, the
cleaning policy is by default set to NOP and that it is not possible
to change it.
pass_criteria:
- Cleaning policy is set to NOP after cache attach.
- It is not possible to change cleaning policy to other than NOP.
steps:
- Start the cache with default config.
- Prepare default attach config and set cleaner_disabled field to true.
- Attach cache device using prepared config.
- Verify that cleaning policy is NOP.
- Try to set cleaning policy to [ALRU, ACP] and verify that operation failed.
- Try to set cleaning policy to NOP and verify that operation succeeded.
- Stop the cache.
requirements:
- disable_cleaner::starting_with_nop_policy
- disable_cleaner::nop_enforcement
"""
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(cache_device, disable_cleaner=True)
assert cache.get_cleaning_policy() == CleaningPolicy.NOP, (
"Cleaning policy should be NOP after starting cache with disabled cleaner"
)
with pytest.raises(OcfError):
cache.set_cleaning_policy(CleaningPolicy.ALRU)
assert cache.get_cleaning_policy() == CleaningPolicy.NOP, (
"It shouldn't be possible to switch cleaning policy to ALRU when cleaner is disabled"
)
with pytest.raises(OcfError):
cache.set_cleaning_policy(CleaningPolicy.ACP)
assert cache.get_cleaning_policy() == CleaningPolicy.NOP, (
"It shouldn't be possible to switch cleaning policy to ACP when cleaner is disabled"
)
cache.set_cleaning_policy(CleaningPolicy.NOP)
def test_attach_cleaner_disabled_non_default(pyocf_ctx):
"""
title: Attach cache with default config does not set clener_disabled.
description: |
Check that when attaching cache with default attach config the
cleaner_disabled option is not selected.
pass_criteria:
- Cache attaches properly.
- The "cleaning" metadata section is not allocated.
- Cache stops properly.
steps:
- Start the cache with default config.
- Attach cache device using default config.
- Verify that "cleaning" metadata section was allocated.
- Stop the cache.
requirements:
- disable_cleaner::default_setting
"""
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(cache_device)
cleaning_size = get_metadata_segment_size(cache, CacheMetadataSegment.CLEANING)
assert (
cleaning_size > 0
), f'Metadata cleaning segment size expected: "> 0", got: "{cleaning_size}"'
| UTF-8 | Python | false | false | 6,495 | py | 298 | test_disable_cleaner.py | 57 | 0.693457 | 0.689299 | 0 | 178 | 35.488764 | 93 |
theAnton-forks/Competitive-Programming-Portfolio | 14,937,896,288,325 | b1db994236118e2b4df09dca1b3892d88e1c53e5 | c30c3466c34c41b49e8c8b2791e0d44ae6277cb2 | /Adjacent Numbers Forming Squares/main.py | 8839b7995a77545ac90461787e0fcc16f3d40248 | []
| no_license | https://github.com/theAnton-forks/Competitive-Programming-Portfolio | 784eb9ff5441f1a81f5501d690f9094698bc34c7 | fb3f099d7ecc37b9117d64faa4c1bdf89e1f18d2 | refs/heads/master | 2022-12-14T03:18:04.941318 | 2020-09-03T05:22:46 | 2020-09-03T05:22:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | square = [False for i in range(100)]
i = 1
while i**2 < 100:
square[i**2] = True
i += 1
def f(pos, array, used):
if pos == 15:
print array
return
for i in range(1, 16):
if used[i] is False and square[array[pos] + i] is True:
array[pos + 1] = i
used[i] = True
f(pos + 1, array, used)
used[i] = False
array[pos + 1] = 0
array = [0 for i in range(16)]
used = [False for i in range(16)]
for i in range(1, 16):
array[0] = i
used[i] = True
f(0, array, used)
used[i] = False
| UTF-8 | Python | false | false | 501 | py | 1,256 | main.py | 998 | 0.572854 | 0.51497 | 0 | 27 | 17.555556 | 57 |
savagedude3/comp-glia-book | 11,819,750,012,594 | 476907d0fb310f16b573026294a53b7999008720 | 3b9898c95102c35ac05079ebbff68a2553951c65 | /Ch10.DePitta/code/gliotransmission_models.py | d8c9569ee3ccd716434443a9a1848ce7495a4746 | []
| no_license | https://github.com/savagedude3/comp-glia-book | 902460f1137c4ee0d9fc53993412376eec08668e | c2e3fa54c22c9c2e04376e9af4cb3491ad1dfe91 | refs/heads/master | 2022-03-29T04:38:16.565159 | 2019-04-27T20:48:29 | 2019-04-27T20:48:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
astrocyte_models.py
Library of astrocyte models.
- chi_model : CHI model simulator;
- gchi_model : GCHI model simulator;
- gchi_norm_model : GCHI model simulator w/ normalized parameters (for CMAES)
Maurizio De Pitta', The University of Chicago, Feb 27th, 2015.
'''
from __future__ import division
import numpy as np
import copy as cp
from scipy import *
import weave
from weave import converters
import sys, os
base_dir = '/home/maurizio/Ch10.DePitta'
sys.path.append(os.path.join(os.path.expanduser('~'),base_dir))
import pycustommodules.general_utils as gu
import pycustommodules.solvers.solver_utils as su
import pycustommodules.spk_generators as spg
import matplotlib.pylab as plt
###############################################################################
# General functions
###############################################################################
normalize = lambda x : (x - x.min())/(x.max()-x.min())
unnormalize = lambda xn, mm : mm[0] + xn*(np.diff(mm)) # mm must be in [min, max]
# Some lambdas to reconstruct solutions
xsol = lambda xn, dt, tau: 1 + (xn - 1) * np.exp(-dt / tau)
usol = lambda un, dt, tau: un * np.exp(-dt / tau)
def Hill(x,k,n):
return np.asarray(x**n/(x**n+k**n), dtype=float)
###############################################################################
# Parameters definition
###############################################################################
def lra_parameters(**kwargs):
"""
Parameters for the Li-Rinzel astrocyte (Li and Rinzel, JTB 1994).
Input:
- noise : {False} | True : tell to the simulator whether or not to simulate the stochastic LR model (Default: False)
Return: Dictionary of model parameters.
Maurizio De Pitta', Basque Center of Applied Mathematics, June 8, 2017.
"""
pars = {'d1' : 0.1,
'd2' : 2.1,
'd3' : 0.9967,
'd5' : 0.2,
'a2' : 0.4,
'c1' : 0.4,
'c0' : 4,
'rc' : 7,
'rl' : 0.05,
'ver' : 0.9,
'Ker' : 0.1,
'ip3' : 0.1,
'noise' : False,
'ICs' : [0.05,0.99]
}
## User-defined parameters
pars = gu.varargin(pars,**kwargs)
# Parameters must be floats
for k,item in pars.iteritems():
if isscalar(item):
pars[k] = float(item)
elif isinstance(item, bool):
pars[k] = np.intc(item)
else:
pars[k] = array(item,dtype=float)
return pars
def exocytosis_parameters(**kwargs):
"""
Parameters for the Tsodyks-Markram model of exocytosis and synaptic currents (1- or 2-equations) (Tsodyks, Les Houches 2005).
Input:
- ICs : Must be an array-like of three elements for (x,y,u).
Return: Dictionary of model parameters.
Maurizio De Pitta', Basque Center for Applied Mathematics, June 11, 2018.
"""
pars = {'u0' : 0.5, # Basal release probability
'taud' : 0.5, # Depression time constant
'tauf' : 1.0, # Facilitation time constant
'taui' : 0.02, # Time decay of AMPA-like currents
'psc0' : 30, # Max PSC in [pA]
'ICs' : [1.,0.,0.] #x,y,u
}
## User-defined parameters
pars = gu.varargin(pars,**kwargs)
for k,item in pars.iteritems():
if isscalar(item):
pars[k] = float(item)
else:
pars[k] = array(item,dtype=float)
return pars
def stimulus_params(**kwargs):
"""
Build pars vector with stimulus and system details (used in gtrs class for calls to models including exocytosis)
Input:
**kwargs:
- rate : Array-like or scalar for rate of presynaptic APs
- twin : Array-like [t0,tf] for duration of presynaptic stimulation
- stimulus : 'poisson' | 'periodic' | 'fixed' (requires specification of spikes_pre)
- spikes_pre : {None} | array-like with AP instants to pass to the simulator (if stimulus='fixed')
- Nsyn : Number of synapses (>0)
- Nvar : Number of variables in the synaptic model (1 or 2)
- rate_gtr : Array-like or scalar for rate of GREs
- twin_gtr : Array-like [t0,tf] for duration of gliotransmitter release
- stimulus_gtr : 'poisson' | 'periodic' | 'fixed' (requires specification of pre_gtr)
- pre_gtr : {None} | array-like with AP instants to pass to the simulator (if stimulus_gtr='fixed')
Return: Dictionary of model parameters.
Maurizio De Pitta', Basque Center for Applied Mathematics, June 12, 2018.
"""
pars = {'rate' : 1.,
'twin' : [0.,10.],
'stimulus' : 'poisson',
'spikes_pre': None,
'Nsyn' : 1,
'Nvar' : 2,
# GRE parameters
'rate_gtr' : None,
'twin_gtr' : None,
'stimulus_gtr' : None,
'pre_gtr' : None
}
## User-defined parameters
pars = gu.varargin(pars,**kwargs)
# Parameters must be floats
for k,item in pars.iteritems():
if k in ['Nsyn','Nvar']:
pars[k] = int(item)
elif k=='rate':
if np.isscalar(item):
pars[k] = float(item)
else:
pars[k] = np.array(item,dtype=float)
elif k=='twin':
if not np.isscalar(pars['rate']):
assert len(pars['rate'])==len(item), "twin must be of the same size of rate"
pars[k] = np.array(item, dtype=float)
elif (k=='rate_gtr') and (item!=None):
if np.isscalar(item):
pars[k] = float(item)
else:
pars[k] = np.array(item,dtype=float)
elif (k=='twin_gtr') and (item!=None):
if (not pars['rate_gtr']) and (not np.isscalar(pars['rate_gtr'])):
assert len(pars['rate_gtr'])==len(item), "twin_gtr must be of the same size of rate_gtr"
pars[k] = np.array(item, dtype=float)
return pars
def gtrelease_parameters(**kwargs):
"""
Calcium-dependent gliotransmitter release.
Input:
**kwargs:
- lra_parameters kwargs
- cthr : calcium threshold for exocytosis [uM]
- ua : basal Gt. release probability
- taua : recovery time constant [s]
Return:
- pars : Parameter dictionary
Maurizio De Pitta', Basque Center of Applied Mathematics, June 17, 2018.
"""
pars = {'cthr' : 0.5,
'ua' : 0.6,
'taua' : 1./0.6}
pars = gu.merge_dicts(pars,lra_parameters())
pars['ICs'] = np.r_[pars['ICs'],1.0]
## User-defined parameters
pars = gu.varargin(pars, **kwargs)
for k,item in pars.iteritems():
if isscalar(item):
pars[k] = float(item)
else:
pars[k] = array(item,dtype=float)
return pars
def asn_parameters(model='spk',**kwargs):
"""
Parameter for the model of a gliotransmitter-modulated synaptic release
Input:
- model:
**kwargs:
- exocytosis_parameters **kwargs
- gtrelease_parameters **kwargs
- rhoe : Volume ratio
- Gtot : Total Gt. vesicular concentration [mM] [!]
- Ou : Max uptake rate [uM/s]
- Ku : Transporter affinity [uM]
- Og : Presynaptic receptor binding rate [1/(uM.s)]
- taug : Presynaotic receptor decay time constant [s]
- alpha : Effect parameter
The value of 'js' will be computed internally and automatically included in the returned dictionary.
Return: Dictionary of model parameters.
Maurizio De Pitta', Basque Center for Applied Mathematics, June 18, 2017.
"""
pars = {'rhoe' : 6.5e-4,
'Ou' : 0.,
'Ku' : 100.,
'taue' : 1./60,
'Gtot' : 200., # MUST BE in [mM]
'Og' : 1.5,
'taug' : 30.,
'alpha': 0.5
}
pars = gu.merge_dicts(pars, gtrelease_parameters(),exocytosis_parameters())
pars['ICs'] = np.asarray([0.,0.,0.05,0.99]) # [G_A,\Gamma_S,c,h]
pars['ICr'] = np.asarray([1,0.,0.,1.]) # [x_S,y_S,u_S,x_A]
## User-defined parameters
pars = gu.varargin(pars, **kwargs)
## Takes only the first two elements of ICs in the MF model
if model=='ave':
pars['ICs'] = pars['ICs'][:2]
if 'js' in kwargs:
pars['js'] = kwargs['js']
else:
pars['js'] = pars['rhoe']*pars['Og']*1e3*pars['Gtot']*pars['taue']
for k,item in pars.iteritems():
if isscalar(item):
pars[k] = float(item)
else:
pars[k] = array(item,dtype=float)
# pars['Gtot'] *= 1e3 # Convert to [uM]
return pars
###############################################################################
# Simulators
###############################################################################
def exocytosis_model(rate,twin,N_syn,
pars,N_var,
recompile=0,
model='spiking',
solver_options=None,
stimulus='poisson',spikes_pre=None):
"""
Simulator of the Tsodyks-Markram model (Tsodyks, Les Houches 2005)
Input:
- rate : Array-like or scalar for rate of presynaptic APs
- twin : Array-like [t0,tf] for duration of presynaptic stimulation
- N_syn : Number of independent synapses / release sites to simulate
- pars : exocytosis_parameters
- N_var : Number of variables for the exocytosis model (1 | 2)
- recompile : 0 | 1 (debug flag)
- model : 'spiking' | 'average' (mean-field)
- solver_options : solver options (if model=='average')
- stimulus : 'poisson' | 'periodic' | 'fixed' (requires specification of spikes_pre)
- spikes_pre : {None} | array-like with AP instants to pass to the simulator (if stimulus='fixed')
Return: Solution dictionary.
Maurizio De Pitta', Basque Center of Applied Mathematics, June 11, 2018.
"""
# First check that N_var is compatible
assert N_var<3 and N_var>=1, "Number of variables of exocytosis model must be either 1 (x) or 2 (x,u)"
# Assures that twin is Numpy array for later handling
twin = np.asarray(twin,dtype=float)
# Also convert make sure to recast N_eq in a way that is suitable for C
N_var = np.intc(N_var)
if not N_syn: N_syn = 0 # Dummy value of N_syn in the case of the mean-field model
N_syn = np.intc(N_syn)
if model=='spiking':
# Create input_spikes
if twin.size == 2:
# One rate or multiple ones in the same interval
spikes = spg.input_spikes(N_syn, twin[1], rate, 0, stimulus=stimulus, spikes_pre=spikes_pre)
else:
# Multiple rates in different intervals
spikes = spg.input_spikes(N_syn, twin, rate, 0, stimulus=stimulus, spikes_pre=spikes_pre)
twin = np.r_[0, np.sum(twin)]
N_spk = int(np.shape(spikes)[1])
# Check that ICs are of size N_var x N_syn
# NOTE: you will have to pass the whole vector of ICs to compute solutions from last point
if pars['ICs'].size != (N_var+1)*N_syn:
pars['ICs'] = np.tile(pars['ICs'][:N_var+1],(1,N_syn))[0]
support_code = """
#include "gliotransmission_models.h"
"""
source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]
code = """
// Version
double version = 1.0;
// Define astrocyte model
release synapse(N_var,N_syn,N_spk,pars);
synapse.set_ics(pars);
// Declare output structure
out_release out(N_spk,N_var);
// Simulator
out = synapse.simulate(spikes.data());
//Output
return_val = out.make_PyDict();
"""
libs = ['gsl', 'gslcblas', 'm']
dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]
vars = ['pars', 'spikes','N_spk','N_var','N_syn']
otm = weave.inline(code,
vars,
support_code=support_code,
sources=source_files,
libraries=libs,
library_dirs=dirs,
include_dirs=dirs,
runtime_library_dirs=dirs,
type_converters=converters.blitz,
compiler='gcc',
extra_compile_args=['-std=c++11'],
force=recompile)
# Post-stimulus processing
otm['spk'] = spikes[0] # Spike instants
otm['is'] = spikes[-1] # Synapse indexes in the spike train
otm['ICs'] = pars['ICs']
if N_var>1:
u_ = otm['u']
else:
u_ = None
otm['LCs'] = last_point(pars,otm['spk'],twin[-1],otm['x'],otm['y'],otm['is'],uval=u_)
elif model=='average':
# Check that rate is scalar
assert isscalar(rate), "Mean-field rate must be scalar"
support_code = """
#include "gliotransmission_models.h"
"""
source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]
code = """
// Version
double version = 0.0;
// Define astrocyte model
release_ave synapse(N_var);
// Declare output structure
out_release out;
// Simulator
out = synapse.simulate(rate,pars,solver_options);
//Output
return_val = out.make_PyDict();
"""
libs = ['gsl', 'gslcblas', 'm']
dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]
vars = ['rate', 'pars', 'solver_options', 'N_var']
otm = weave.inline(code,
vars,
support_code=support_code,
sources=source_files,
libraries=libs,
library_dirs=dirs,
include_dirs=dirs,
runtime_library_dirs=dirs,
type_converters=converters.blitz,
compiler='gcc',
extra_compile_args=['-std=c++11'],
force=recompile)
# Post-stimulus processing
otm['twin'] = twin # Simulate interval
# Add released resources
if (N_var < 2):
otm['r'] = pars['u0'] * otm['x']
else:
otm['r'] = np.multiply(otm['x'], otm['u'])
return otm
def lr_model(pars, solver_options,
recompile=0):
"""
Simulator for the plain Li-Rinzel model (Li and Rinzel, JTB 1994) and stochastic Li-Rinzel model (Shuai and Jung, Biophys. J. 2002)
Input:
- pars : lra_parameters
- solver_options : solver options
- recompile : 0 | 1 (debug routine)
Return: Solution dictionary.
Maurizio De Pitta', Basque Center of Applied Mathematics, June 11, 2018.
"""
support_code = """
#include "gliotransmission_models.h"
"""
source_files = [os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/pycapi_utils.cpp'),
os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/solvers/solver_options.cpp'),
os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/solvers/stochastic_solvers.cpp'),
os.path.join(os.path.expanduser('~'), base_dir+'/code/gliotransmission_models.cpp')]
code = """
// Version
double version = 0.0;
// Define astrocyte model
lra astrocyte;
// Declare output structure
out_lra out;
// Simulator
out = astrocyte.simulate(pars,solver_options);
//Output
return_val = out.make_PyDict();
"""
libs = ['gsl', 'gslcblas', 'm']
dirs = [os.path.join(os.path.expanduser('~'), base_dir+'/code/'),
os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules'),
os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/solvers')]
vars = ['pars', 'solver_options']
olr = weave.inline(code,
vars,
support_code=support_code,
sources=source_files,
libraries=libs,
library_dirs=dirs,
include_dirs=dirs,
runtime_library_dirs=dirs,
type_converters=converters.blitz,
compiler='gcc',
extra_compile_args=['-std=c++11'],
force=recompile)
# Post-stimulus processing
return olr
def ca_gtrel_model(pars, solver_options,
recompile=0):
"""
Simulator calcium-dependent exocytosis, i.e. Li-Rinzel model with 1-var TM model.
Input:
- pars : gtrelease_parameters
- solver_options : solver options
- recompile : 0 | 1 (debug routine)
Return: Solution dictionary.
Maurizio De Pitta', Basque Center of Applied Mathematics, June 11, 2018.
"""
support_code = """
#include "gliotransmission_models.h"
"""
source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]
code = """
// Version
double version = 0.0;
// Define astrocyte model
gtrelease gliot;
// Declare output structure
out_gtrelease out;
// Simulator
out = gliot.simulate(pars,solver_options);
//Output
return_val = out.make_PyDict();
"""
libs = ['gsl', 'gslcblas', 'm']
dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]
vars = ['pars', 'solver_options']
gtr = weave.inline(code,
vars,
support_code=support_code,
sources=source_files,
libraries=libs,
library_dirs=dirs,
include_dirs=dirs,
runtime_library_dirs=dirs,
type_converters=converters.blitz,
compiler='gcc',
extra_compile_args=['-std=c++11'],
force=recompile)
# Post-stimulus processing
gtr['twin'] = np.asarray([solver_options['t0'],solver_options['tfin']],dtype=float)
gtr['twin_gtr'] = np.asarray([solver_options['t0'], solver_options['tfin']], dtype=float) # Time window used in the reconstruction
# Clean 'x' and provide GRE vector
i_gre = gtr['xa']>0
gtr['xa'] = gtr['xa'][i_gre]
gtr['gre'] = gtr['t'][i_gre]
# A vector with all the indexes of Gt. CONVENTION: we use negative indexes for astrocytic release. Only one release
# site in this implementation, i.e. index -1
gtr['ig'] = -1*np.ones(len(gtr['gre']))
# Add released GTRs
gtr['ra'] = pars['ua']*gtr['xa']
return gtr
def asn_model(pars, solver_options,
N_syn,N_var,rate_syn,twin_syn,
model='spiking',
stimulus_syn='poisson',pre_syn=None,
gtr=None,
rate_gtr=None,twin_gtr=None,
stimulus_gtr=None,pre_gtr=None,
recompile=0):
"""
Simulator for the plain Li-Rinzel model (Li and Rinzel, JTB 1994) and stochastic Li-Rinzel model (Shuai and Jung, Biophys. J. 2002)
Input:
- pars : asn_parameters
- solver_options: solver options
- N_syn : Number of independent synapses regulated by the same astrocytic domain
- N_var : Number of variables in the synaptic model
- rate_syn : Array-like or scalar for rate of presynaptic APs
- twin_syn : Array-like [t0,tf] for duration of presynaptic stimulation
- model : 'spiking' |
- stimulus_syn : 'poisson' | 'periodic' | 'fixed' (requires specification of pre_syn)
- pre_syn : {None} | array-like with AP instants to pass to the simulator (if stimulus_pre='fixed')
- gtr : None | 'ca_dep' If specified uses Li-Rinzel model to find GREs (not properly tested!)
- rate_gtr : Array-like or scalar for rate of GREs
- twin_gtr : Array-like [t0,tf] for duration of gliotransmitter release
- stimulus_gtr : 'poisson' | 'periodic' | 'fixed' (requires specification of pre_gtr)
- pre_gtr : {None} | array-like with AP instants to pass to the simulator (if stimulus_gtr='fixed')
- recompile : 0 | 1 (debug flag)
Return: Solution dictionary (model specific).
Maurizio De Pitta', Basque Center of Applied Mathematics, June 21, 2018.
"""
# First check that N_var is compatible
assert N_var<3 and N_var>=1, "Number of variables of exocytosis model must be either 1 (x) or 2 (x,u)"
# Assures that twin is Numpy array for later handling
twin_syn = np.asarray(twin_syn,dtype=float)
twin_gtr = np.asarray(twin_gtr,dtype=float)
# Also convert make sure to recast N_eq in a way that is suitable for C
N_var = int(N_var)
if not N_syn: N_syn = 0 # Dummy value of N_syn in the case of the mean-field model
N_syn = int(N_syn)
if model=='spiking':
twin = {}
# Create input_spikes
if twin_syn.size == 2:
# One rate or multiple ones in the same interval
spikes = spg.input_spikes(N_syn, twin_syn[1], rate_syn, 0, stimulus=stimulus_syn, spikes_pre=pre_syn)
else:
# Multiple rates in different intervals
spikes = spg.input_spikes(N_syn, twin_syn, rate_syn, 0, stimulus=stimulus_syn, spikes_pre=pre_syn)
twin['syn'] = np.r_[0, np.sum(twin_syn)]
# To simplify simulations and avoid complex event handling, we smear spike instants in bins of 'dt'
spikes[0] = np.round(spikes[0],int(np.abs(np.floor(np.log10(solver_options['dt']))))) # Rounds at 10^(-x) (where x=0,1,...)
# Handle the case where GREs are specified
# Setting of twin_gtr should be either identical to twin_syn or be such that twin['gtr]==twin['syn']
# Currently issue a warning
gres = np.empty((2,0))
if (rate_gtr!=None) or (stimulus_gtr!= None):
# Handle the case of a fixed stimulus with respect to a Poisson one
if stimulus_gtr!='fixed':
assert (twin_gtr!=None), "twin_gtr not specified"
assert (stimulus_gtr != None), "stimulus_gtr not specified"
else:
assert (pre_gtr!=None), "pre_gtr not specified" # NOTE: will SEGFAULT if pre_gtr contains values >twin[1]
if stimulus_gtr!='fixed':
if twin_gtr.size == 2:
# One rate or multiple ones in the same interval
gres = spg.input_spikes(1, twin_gtr[1], rate_gtr, 0, stimulus=stimulus_gtr, spikes_pre=pre_gtr)
else:
# Multiple rates in different intervals
gres = spg.input_spikes(1, twin_gtr, rate_gtr, 0, stimulus=stimulus_gtr, spikes_pre=pre_gtr)
twin['gtr'] = np.r_[0, np.sum(twin_gtr)]
if (twin['gtr'][1]!=twin['syn'][1]): print "WARNING: twin['gtr'][1] != twin['syn'][1]"
else:
gres = spg.input_spikes(1, [], [], 0, stimulus=stimulus_gtr, spikes_pre=pre_gtr)
twin['gtr'] = np.asarray([solver_options['t0'], solver_options['tfin']], dtype=float)
# To simplify simulations and avoid complex event handling, we smear GRE instants in bins of 'dt'
gres[0] = np.round(gres[0], int(np.abs(np.floor(np.log10(solver_options['dt']))))) # Rounds at 10^(-x) (where x=0,1,...)
else:
gtr = 'ca_dep'
# In the case of calcium-dependent GTR the time window to perform reconstruction (if requested) will be the
# whole integration window
twin['gtr'] = np.asarray([solver_options['t0'],solver_options['tfin']],dtype=float)
# Number of equations
if gtr=='ca_dep':
NEQ = 4
else:
NEQ = 2
# Compute size of spike vectors
N_spk = int(np.shape(spikes)[1])
N_gre = int(np.shape(gres)[1])
# Check on ICs based on NEQ
if gtr=='ca_dep':
assert len(pars['ICs'])==NEQ, "ICs with calcium-dependent GTR must be of size 4"
else:
if len(pars['ICs'])!=NEQ: # This check allows to pass ICs from a previous simulation and suppose to continue with a different model
pars['ICs'] = pars['ICs'][:2] # Takes only the last two elements in the original ICs
# Check that ICr are of size N_var x N_syn
# NOTE: you will have to pass the whole vector of ICs to compute solutions from last point
if (pars['ICr'].size != (N_var+1)*N_syn + 1):
pars['ICr'] = np.r_[np.tile(pars['ICr'][:N_var+1],(1,N_syn))[0],pars['ICr'][-1]]
# C-kernel
support_code = """
#include "gliotransmission_models.h"
"""
source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]
code = """
// Version
double version = 0.0;
// Define astrocyte model
asn tsn(NEQ,N_var,N_syn,N_spk,N_gre);
// Declare output structure
out_asn out;
// Simulator
out = tsn.simulate(pars,solver_options,spikes.data(),gres.data());
//Output
return_val = out.make_PyDict();
"""
libs = ['gsl', 'gslcblas', 'm']
dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]
vars = ['pars','solver_options',
'spikes','N_spk',
'gres','N_gre',
'NEQ','N_var','N_syn',]
asn = weave.inline(code,
vars,
support_code=support_code,
sources=source_files,
libraries=libs,
library_dirs=dirs,
include_dirs=dirs,
runtime_library_dirs=dirs,
type_converters=converters.blitz,
compiler='gcc',
extra_compile_args=['-std=c++11'],
force=recompile)
# Process synaptic stimuli
asn['spk'] = spikes[0] # Spike instants
asn['is'] = spikes[-1] # Synapse indexes in the spike train
if (N_var < 2):
asn['r'] = pars['u0']*asn['x']
else:
asn['r'] = np.multiply(asn['x'],asn['u'])
if not stimulus_gtr:
# No specified spikes are fed into the GTR model, so this means that we are using the Ca2+-dependent model
# of GTR
# Clean 'x' and provide GRE vector
i_gre = asn['xa'] > 0
asn['xa'] = asn['xa'][i_gre]
asn['gre'] = asn['t'][i_gre]
else:
asn['gre'] = gres[0]
# A vector with all the indexes of Gt. CONVENTION: we use negative indexes for astrocytic release. Only one release
# site in this implementation, i.e. index -1
asn['ig'] = -1*np.ones(len(asn['gre']))
# Add released Gt.
asn['ra'] = pars['ua']*asn['xa']
# Append Last point
if N_var>1:
u_ = asn['u']
else:
u_ = None
# Check that spk stimulus is not empty. If so then Last point is the same initial point (needed so far to use
# last_point method with asn['is']=[])
if (asn['is'].size>0) :
LCr_syn = last_point(pars,asn['spk'],twin['syn'][1],asn['x'],asn['y'],asn['is'],uval=u_,gtr=False)
else:
LCr_syn = pars['ICr'][:-1]
if (asn['ig'].size>0) :
LCr_gtr = last_point(pars,asn['gre'],twin['gtr'][1],asn['xa'],asn['xa'],asn['ig'],uval=None,gtr=True)
else:
LCr_gtr = pars['ICr'][-1]
asn['LCr'] = np.r_[LCr_syn,LCr_gtr]
elif model=='average':
# Check that rate is scalar
assert isscalar(rate_syn), "Mean-field synaptic rate must be a non-negative scalar"
assert isscalar(rate_gtr), "Mean-field gliotransmitter release rate must be a non-negative scalar"
support_code = """
#include "gliotransmission_models.h"
"""
source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),
os.path.join(os.path.expanduser('~'),
base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),
os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]
code = """
// Version
double version = 0.0;
// Define astrocyte model
asn_ave tsn(N_var);
// Declare output structure
out_asn out;
// Simulator
out = tsn.simulate(rate_syn,rate_gtr,pars,solver_options);
//Output
return_val = out.make_PyDict();
"""
libs = ['gsl', 'gslcblas', 'm']
dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),
os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]
vars = ['pars', 'solver_options','N_var',
'rate_syn', 'rate_gtr']
asn = weave.inline(code,
vars,
support_code=support_code,
sources=source_files,
libraries=libs,
library_dirs=dirs,
include_dirs=dirs,
runtime_library_dirs=dirs,
type_converters=converters.blitz,
compiler='gcc',
extra_compile_args=['-std=c++11'],
force=recompile)
# Post processing of twin
asn['twin'] = np.asarray([solver_options['t0'], solver_options['tfin']], dtype=float)
if model=='spiking':
asn['twin_syn'] = twin['syn']
asn['twin_gtr'] = twin['gtr']
# Provide ICs (needed for reconstruction)
asn['ICs'] = pars['ICs']
asn['ICr'] = pars['ICr']
return asn
#-------------------------------------------------------------------------------------------------
# Utils to handle solution
#-------------------------------------------------------------------------------------------------
def last_point(pars,spikes,tfin,xval,yval,idx_,uval=None,gtr=False):
"""
Estimate the last point of continuous solution.
Input:
- pars : Dictionary of model parameters (must contain 'u0', 'taud', 'tauf', 'taui')
- spikes : Spike train
- tfin : Last instant of simulation
- xval : x(t_i^-) value array
- yval : y(t_i^+) value array
- idx : Indexes of synapses
- uval : {0.5} | u(t_i^+) or 'u0' value
Return:
- LCs : array of Last points in time for provided input.
Maurizio De Pitta', Basque Center of Applied Mathematics, June 25, 2018.
"""
# Make a temporary copy of synaptic indexes (needed to avoid modification of the index internally when treating the
# the case for gtr=True
idx = cp.copy(idx_)
if not gtr:
Nsyn = int(max(idx))+1
else:
Nsyn = 1
idx += 1 # Make sure that indexes of synapses are 0
idx = np.asarray(idx, dtype=int)
x_last,y_last,dt = np.zeros(Nsyn),np.zeros(Nsyn),np.zeros(Nsyn)
if hasattr(uval,'__len__') or (uval!=None):
u_last = np.zeros(Nsyn)
i,j = 0,len(spikes)-1
# Find the last instant of spike for each synapse
while (i<Nsyn) and (j>=0):
if dt[idx[j]]==0:
dt[idx[j]] = spikes[j]
if hasattr(uval,'__len__') or (uval!=None):
u_last[idx[j]] = uval[j]
x_last[idx[j]] = xval[j]*(1-uval[j])
else:
if not gtr:
x_last[idx[j]] = xval[j]*(1-pars['u0'])
else:
x_last[idx[j]] = xval[j]*(1-pars['ua'])
if not gtr: y_last[idx[j]] = yval[j]
i += 1 # as soon as i==Nsyn we found the last spike for all synapses
j -= 1
# Retrieve the interval from last spike to the end of simulation
dt = tfin-dt
# Compute last value
if not gtr:
x_last = xsol(x_last,dt,pars['taud'])
else:
x_last = xsol(x_last, dt, pars['taua'])
if not gtr:
y_last = usol(y_last, dt, pars['taui'])
if hasattr(uval,'__len__') or (uval!=None):
u_last = usol(u_last, dt, pars['tauf'])
# Recast last values in the order of ICs
if not gtr:
if hasattr(uval,'__len__') or (uval!=None):
LCs = (np.vstack((x_last, y_last, u_last))).flatten(order='F')
else:
LCs = (np.vstack((x_last,y_last))).flatten(order='F')
else:
LCs = (np.asarray(x_last)).flatten(order='F')
return LCs
def reconstruct_solution(spikes,sol,uval,twin,ics,tau,variable,**kwargs):
"""
Reconstruct solution from spiking solutions.
Input:
- spikes : AP/GRE train
- sol : Solution points (aka xn, un, yn)
- uval : value for un (if needed)
- twin : time interval within which to reconstruct the solution
- ics : initial conditions for given sol vector
- tau : decay time for sol
- variable : 'x' | 'u' | 'y'
**kwargs:
- dt : time bin for reconstructed solution
Return:
- solution : a list of 2xT/dt arrays where the first row is a time vector, and the second row is the reconstructed
solution for the specified variable.
Maurizio De Pitta', Basque Center of Applied Mathematics, June 13, 2018.
"""
# Model parameters
pars = {'dt' : 1e-3}
pars = gu.varargin(pars,**kwargs)
# Generate time vector
time = np.arange(twin[0],twin[-1],pars['dt'])
time = np.sort(np.r_[time,spikes])
# Generate spike vector
tspk = np.copy(time)
for i in range(1,len(spikes)):
tspk[np.where(np.logical_and(time>=spikes[i-1],time<spikes[i]))[0]] = spikes[i-1]
tspk[np.where(time >= spikes[len(spikes)-1])[0]] = spikes[len(spikes)-1]
tspk[np.where(time < spikes[0])[0]] = 0
# Generate general solution vector
vsol = np.ones(time.size)
if (variable=='x') and isscalar(uval):
uval = uval * np.ones(sol.size)
if variable=='x':
for i in range(1, len(spikes)):
# x must be given at x(t_i^+) according to xsol
vsol[np.where(np.logical_and(time >= spikes[i - 1], time < spikes[i]))[0]] = sol[i-1]*(1-uval[i-1])
vsol[np.where(time >= spikes[len(spikes) - 1])[0]] = sol[len(spikes) - 1]*(1-uval[len(spikes)-1])
else:
for i in range(1, len(spikes)):
vsol[np.where(np.logical_and(time >= spikes[i - 1], time < spikes[i]))[0]] = sol[i-1]
vsol[np.where(time >= spikes[len(spikes) - 1])[0]] = sol[len(spikes) - 1]
vsol[np.where(time < spikes[0])[0]] = ics
# Compute effective solution
solution = np.zeros((2, time.size))
solution[0] = time
if variable=='x':
# Assumes that the first ICs is x(0)
solution[1] = xsol(vsol,time-tspk,tau)
else:
solution[1] = usol(vsol,time-tspk,tau)
return solution
#-------------------------------------------------------------------------------------------------
# Gliotransmitter-regulated synapse (GTRS) class
#-------------------------------------------------------------------------------------------------
"""
GTRS is the class that allows simulating all the above models. It is essentially a wrapper of the above routines with
some additional features such as reconstruction of solutions by default for all synapses and the astrocyte. Class
definition is as following:
gtrs model_object(model=...)
where model is
-'lra' : Li-Rinzel astrocyte;
-'exo_spk' : TM spiking model;
-'exo_ave' : TM mean field model;
-'lra_exo' : Calcium-dependent release model;
-'asn_spk' : Tripartite synapse spiking model;
-'asn_ave' : Tripartite synapse mean field model.
Methods:
self.stimulation : wrapper of stimulation_pars;
self.simulate : simulate the model
self.reconstruct : reconstruct time solution from spiking solution.
Maurizio De Pitta', Basque Center of Applied Mathematics, June 20, 2018.
"""
class gtrs(object):
def __init__(self,model,**kwargs):
self._model = model
if model=='lra':
self.pars = lra_parameters(**kwargs)
elif (model!='lra_exo') and 'exo' in model:
self.pars = exocytosis_parameters(**kwargs)
elif model=='lra_exo':
self.pars = gtrelease_parameters(**kwargs)
elif 'asn' in model:
if model=='asn_ave':
self.pars = asn_parameters('ave',**kwargs)
else:
self.pars = asn_parameters(**kwargs)
# Assign initial conditions
self.ICs = self.pars['ICs']
if 'asn' in model:
self.ICr = self.pars['ICr']
def stimulation(self,**kwargs):
self.stim = stimulus_params(**kwargs)
def simulate(self,
algparams=None,
reconstruct=False,
recompile=0,
**kwargs):
# Solver options handling
self._algparams = algparams # These are the solver_options
if (not self._algparams):
self._algparams = su.solver_opts(method='gsl_rk2imp',**kwargs)
# Make sure to pass last ICs (necessary if we start simulation from last step)
self.pars['ICs'] = self.ICs
if 'asn' in self._model:
self.pars['ICr'] = self.ICr
# Handling of different cases
if self._model=='lra':
assert self._algparams!=None, "Solver options must be specified"
self.sol = lr_model(self.pars,self._algparams,recompile=recompile)
elif self._model=='exo_spk':
try:
self.sol = exocytosis_model(self.stim['rate'],self.stim['twin'],
self.stim['Nsyn'],self.pars,self.stim['Nvar'],
model='spiking',
solver_options=None,
stimulus=self.stim['stimulus'],spikes_pre=self.stim['spikes_pre'],
recompile=recompile)
if reconstruct:
# Provide a default reconstruction of all spiking solutions
self.ICs = self.sol['ICs']
self.reconstruct(synapse_index=np.arange(self.stim['Nsyn']),var='all')
except AttributeError:
print "Stimulation was not set"
elif self._model=='exo_ave':
assert self._algparams!=None, "Solver options must be specified"
try:
self.sol = exocytosis_model(self.stim['rate'],self.stim['twin'],
None,self.pars,self.stim['Nvar'],
model='average',
solver_options=algparams,
recompile=recompile)
except AttributeError:
print "Stimulation was not set"
elif self._model=='lra_exo':
assert self._algparams!=None, "Solver options must be specified"
self.sol = ca_gtrel_model(self.pars, self._algparams, recompile=recompile)
if reconstruct:
self.reconstruct(synapse_index=[-1], var='x') # Negative indexes are used for astrocytic release
elif self._model=='asn_spk':
assert self._algparams!=None, "Solver options must be specified"
try:
self.sol = asn_model(self.pars,algparams,
self.stim['Nsyn'],self.stim['Nvar'],
self.stim['rate'],self.stim['twin'],
model='spiking',gtr=None,
stimulus_syn=self.stim['stimulus'],pre_syn=self.stim['spikes_pre'],
rate_gtr=self.stim['rate_gtr'],twin_gtr=self.stim['twin_gtr'],
stimulus_gtr=self.stim['stimulus_gtr'],pre_gtr=self.stim['pre_gtr'],
recompile=recompile)
if reconstruct:
# Update ICr with the one produced inside the simulator
self.ICr = self.sol['ICr']
# Provide a default reconstruction of all spiking solutions
if self.stim['Nvar'] > 1:
self.reconstruct(synapse_index=np.arange(self.stim['Nsyn']),var='all') # Reconstruct synaptic inputs
else:
print "WARNING: 'reconstruct' does not support 1-var synaptic model in the presence of GTR. Producing only xa."
self.reconstruct(synapse_index=[-1], var='x') # Reconstruct GTR
except AttributeError:
print "Stimulation was not set"
elif self._model=='asn_ave':
try:
self.sol = asn_model(self.pars,algparams,
self.stim['Nsyn'],self.stim['Nvar'],
self.stim['rate'],self.stim['twin'],
model='average',gtr=None,
rate_gtr=self.stim['rate_gtr'],
recompile=recompile)
except AttributeError:
print "Stimulation was not set"
def reconstruct(self,synapse_index=0,var='all',**kwargs):
"""
Reconstruct specific solution from spiking model.
Input:
synapse_index : Scalar or array of indexes of synapses
var : which variable to reconstruct
Return:
sol : Modified solution dictionary with 'ts','xs','us', 'ys' additional entries
"""
try:
# Fix dt for reconstruction
DT = 1e-3
# First check that synapse_index is not out of range
synapse_index = np.asarray(np.atleast_1d(synapse_index),dtype=int)
# assert (synapse_index.min() >=0)&(synapse_index.max()<=self.stim['Nsyn']), "Synapse index out of range [0,N_syn]"
# Basic handling of ICs (since ICs for the discrete compartments are handled differently in 'asn_spk'
if self._model=='asn_spk':
ICs = self.ICr
else:
ICs = self.ICs
for i,si in enumerate(synapse_index):
# Generate a 'tag' to append to dict keys to distinguish between synaptic ('s') and 'astrocytic ('a') release
if si>=0:
tag = 's'
twin = self.sol['twin']
else:
tag = 'g'
twin = self.sol['twin_gtr']
# Retrieve spike indexes
index = self.sol['i'+tag]==si # Spike indexes per given synapse
# Reconstruction is performed at fixed dt for any solution
if var in ['all','x']:
if si>=0:
if self.stim['Nvar']>1:
u_ = self.sol['u'][index]
else:
u_ = self.pars['u0'] # Will not work in the case of GTR
x = reconstruct_solution(self.sol['spk'][index], self.sol['x'][index], u_,
twin, ICs[si*(self.stim['Nvar']+1)],
self.pars['taud'],variable='x',dt=DT)
else:
x = reconstruct_solution(self.sol['gre'][index], self.sol['xa'][index], self.pars['ua'],
twin, ICs[-1],
self.pars['taua'], variable='x', dt=DT)
if (i==0):
if 't'+tag not in self.sol: self.sol['t'+tag] = [x[0]]
self.sol['x'+tag] = [x[-1]]
else:
if len(self.sol['t'+tag]) <= i: self.sol['t'+tag].append(x[0])
self.sol['x'+tag].append(x[-1])
if (hasattr(self,'stim'))and(self.stim['Nvar']>1)and(var in ['all','u']):
u = reconstruct_solution(self.sol['spk'][index], self.sol['u'][index], [],
twin, ICs[si*(self.stim['Nvar']+1)+2],
self.pars['tauf'], variable='u',dt=DT)
if (i==0):
if 't'+tag not in self.sol: self.sol['t'+tag] = [u[0]]
self.sol['u'+tag] = [u[-1]]
else:
if len(self.sol['t'+tag]) <= i: self.sol['t'+tag].append(u[0])
self.sol['u'+tag].append(u[-1])
if var in ['all','y']:
y = reconstruct_solution(self.sol['spk'][index],self.sol['y'][index], [],
twin, ICs[si*(self.stim['Nvar']+1)+1],
self.pars['taui'], variable='y',dt=DT)
if (i == 0):
if 't'+tag not in self.sol: self.sol['t'+tag] = [y[0]]
self.sol['y'+tag] = [y[-1]]
else:
if len(self.sol['t'+tag]) <= i: self.sol['t'+tag].append(y[0])
self.sol['y'+tag].append(y[-1])
except AttributeError:
print "Spiking solution does not exist"
def __del__(self):
del self.pars
del self.ICs
if hasattr(self,'stim'):
del self.stim
if hasattr(self,'ICr'):
del self.ICr
if hasattr(self,'sol'):
del self.sol
if __name__ == "__main__":
# -------------------------------------------------------------------------------------------------
# Testing Astrocyte class
# -------------------------------------------------------------------------------------------------
####################################################################################################################
# #LRA model
####################################################################################################################
# options = su.solver_opts(t0=0.0, tfin=10.0, dt=1e-2, atol=1e-8, rtol=1e-8, method="gsl_rk2imp")
# astro = gtrs(model='lra',ip3=0.4,noise=True)
# astro.simulate(algparams=options,recompile=1)
# plt.plot(astro.sol['t'], astro.sol['ca'], 'y-', astro.sol['t'], astro.sol['h'], 'm-')
# ####################################################################################################################
# # # Exocytosis (TM model)
# ####################################################################################################################
# exo = gtrs(model='exo_spk',u0=0.2, ICs=[1.0,0.,0.0])
# # exo.stimulation(rate=5,twin=[0.,2.],Nsyn=3,Nvar=1)
# # exo.stimulation(rate=5,twin=[0.,2.],Nsyn=3,Nvar=1)
# exo.stimulation(rate=5, twin=[0., 2.], Nsyn=3, Nvar=2, stimulus='poisson')
# exo.simulate(reconstruct=True,recompile=0)
# # print exo.sol['LCs']
# # # 1-VAR
# # index = exo.sol['is'] == 0
# # plt.plot(exo.sol['ts'][0], exo.sol['xs'][0], 'y-', exo.sol['spk'][index], exo.sol['x'][index], 'yo')
# # index = exo.sol['is']==1
# # plt.plot(exo.sol['ts'][1], exo.sol['xs'][1], 'y-', exo.sol['spk'][index], exo.sol['x'][index], 'yo')
# # index = exo.sol['is']==2
# # plt.plot(exo.sol['ts'][2], exo.sol['xs'][2], 'y-', exo.sol['spk'][index], exo.sol['x'][index], 'yo')
#
# # 2-VAR
# index = exo.sol['is']==0
# plt.plot(exo.sol['ts'][0], exo.sol['xs'][0], 'y-', exo.sol['spk'][index], exo.sol['x'][index], 'yo',
# exo.sol['ts'][0], exo.sol['us'][0], 'm-', exo.sol['spk'][index], exo.sol['u'][index], 'mo')
# index = exo.sol['is']==1
# plt.plot(exo.sol['ts'][1], exo.sol['xs'][1], 'y-', exo.sol['spk'][index], exo.sol['x'][index], 'yo',
# exo.sol['ts'][1], exo.sol['us'][1], 'm--' ,exo.sol['spk'][index], exo.sol['u'][index], 'mo')
# index = exo.sol['is']==2
# plt.plot(exo.sol['ts'][2], exo.sol['xs'][2], 'y-', exo.sol['spk'][index], exo.sol['x'][index], 'yo',
# exo.sol['ts'][2], exo.sol['us'][2], 'm--' ,exo.sol['spk'][index], exo.sol['u'][index], 'mo')
####################################################################################################################
# # Exocytosis (MF model)
####################################################################################################################
# exo = gtrs(model='exo_ave')
# exo.stimulation(rate=3,twin=[0.,1.0],Nvar=1)
# options = su.solver_opts(t0=exo.stim['twin'][0], tfin=exo.stim['twin'][1], dt=1e-4, atol=1e-8, rtol=1e-8, method="gsl_rk2imp")
# exo.simulate(algparams=options,recompile=0)
# if exo.stim['Nvar']>1:
# plt.plot(exo.sol['t'], exo.sol['x'], 'y', exo.sol['t'], exo.sol['u'],'m')
# else:
# plt.plot(exo.sol['t'], exo.sol['x'], 'y')
####################################################################################################################
# # #Calcium-dependent gliotransmitter exocytosis model
####################################################################################################################
# options = su.solver_opts(t0=0.0, tfin=60.0, dt=1e-3, atol=1e-8, rtol=1e-8, method="gsl_rk2imp")
# astro = gtrs(model='lra_exo',ip3=0.4,noise=False,ICs=[0.05,0.9,1.0])
# astro.simulate(algparams=options,reconstruct=True,recompile=0)
# # astro.simulate(algparams=options,reconstruct=True,recompile=1)
# # You must take the first element regardless
# plt.plot(astro.sol['tg'][0], astro.sol['xg'][0], 'y-',astro.sol['gre'],astro.sol['xa'], 'yo',
# astro.sol['t'], astro.sol['ca'],'k-',astro.sol['t'], astro.sol['h'],'m-') # You must take the first element regardless
####################################################################################################################
# Tripartite synapse model (with calcium-dependent GTR)
####################################################################################################################
options = su.solver_opts(t0=0.0, tfin=20.0, dt=1e-3, atol=1e-8, rtol=1e-8, method="gsl_rk2imp")
# tsn = gtrs(model='asn_spk',ip3=0.4,alpha=0.0,noise=False)
# # # Case of NEQ>2 (i.e. calcium-dependent GTR)
# # tsn.stimulation(rate=5, twin=[0., 2.], Nsyn=1, Nvar=2)
# # tsn.simulate(algparams=options,reconstruct=True,recompile=0)
# # # Case of NEQ<3 (GREs given)
# # tsn.stimulation(rate=5, twin=[0., 10.], Nsyn=1, Nvar=2,
# # rate_gtr=1, twin_gtr=[0.,10.],stimulus_gtr='poisson')
# # tsn.simulate(algparams=options,reconstruct=False,recompile=1)
#
# # General Output
# plt.plot(tsn.sol['t'], tsn.sol['ga'], 'y-', tsn.sol['t'], tsn.sol['gammas'], 'm-')
# if not tsn.stim['rate_gtr']: plt.plot(tsn.sol['t'], tsn.sol['ca'], 'k-', tsn.sol['t'],tsn.sol['h'],'b-')
# ## Checking on the effect of GTR on u0 using a periodic/poisson stimulus
# tsn = gtrs(model='asn_spk',ip3=0.4,alpha=1.0,u0=0.1,taud=0.5,tauf=1.,Og=1.5,
# noise=False)
# tsn.stimulation(rate=5, twin=[0., 10.], stimulus='poisson',Nsyn=1, Nvar=2,
# rate_gtr=1, twin_gtr=[0.,10.],stimulus_gtr='fixed',pre_gtr=[4.,6.])
# # tsn.stimulation(rate=1, twin=[0., 10.], stimulus='periodic', Nsyn=1, Nvar=2)
# tsn.simulate(algparams=options,reconstruct=True,recompile=0)
# # Comparison between gammas and y
# plt.plot(tsn.sol['t'], tsn.sol['gammas'], 'm-', tsn.sol['ts'][0],tsn.sol['ys'][0],'k-')
# ## Checking on the effect of GTR on u0 using a periodic/poisson stimulus
# tsn = gtrs(model='asn_spk',noise=False,ua=0.)
# tsn.stimulation(rate=1, twin=[0., 20.], stimulus='periodic',Nsyn=1, Nvar=2, rate_gtr=0.1, twin_gtr=[0.,20.], stimulus_gtr='periodic')
# # tsn.stimulation(rate=1, twin=[0., 10.], stimulus='periodic', Nsyn=1, Nvar=2)
# tsn.simulate(algparams=options,reconstruct=False,recompile=0)
# # Comparison between gammas and y
# plt.plot(tsn.sol['t'], tsn.sol['gammas'], 'm-', tsn.sol['t'],tsn.sol['ga'],'k-')
# Nsyn = 10
# options = su.solver_opts(t0=0.0, tfin=20.0, dt=1e-3, atol=1e-8, rtol=1e-8, method="gsl_rk2imp")
# tsn = gtrs(model='asn_spk',Ou=1e2, Ku=0.1, taue=1./30.,Og=0.1,taug=30.,alpha=1.0,u0=0.05,taud=0.3,tauf=1.0)
# tsn.stimulation(rate=1, twin=[0., 20.], stimulus='periodic',Nsyn=Nsyn, Nvar=2,
# stimulus_gtr='fixed',pre_gtr=[7.0])
# tsn.simulate(algparams=options,reconstruct=True,recompile=0)
# # plt.plot(tsn.sol['t'], tsn.sol['gammas'], 'm-', tsn.sol['ts'][0], tsn.sol['ys'][0], 'k-')
# # for i in xrange(Nsyn):
# # plt.plot(tsn.sol['ts'][i], tsn.sol['us'][i])
# plt.plot(tsn.sol['spk'], tsn.sol['x'], 'ko')
# plt.plot(tsn.sol['spk'], tsn.sol['u'], 'ro')
# # Check on synaptic input reconstruction (single variable)
# index = tsn.sol['is']==0
# print tsn.sol['x'][index]
# plt.plot(tsn.sol['ts'][0], tsn.sol['xs'][0], 'k-',tsn.sol['spk'][index],tsn.sol['x'][index],'ko')
# # plt.plot(tsn.sol['ts'][0], tsn.sol['ys'][0], 'm-',tsn.sol['spk'][index],tsn.sol['y'][index],'yo')
# plt.plot(tsn.sol['ts'][0], tsn.sol['us'][0], 'k--',tsn.sol['spk'][index],tsn.sol['u'][index],'ks')
# index = tsn.sol['is']==1
# print tsn.sol['x'][index]
# plt.plot(tsn.sol['ts'][1], tsn.sol['xs'][1], 'r-',tsn.sol['spk'][index],tsn.sol['x'][index],'ro')
# plt.plot(tsn.sol['ts'][1], tsn.sol['us'][1], 'r--', tsn.sol['spk'][index], tsn.sol['u'][index],'rs')
# # Check on reconstruction of xa (even in the case of a single variable such as xa, the reconstructed variable must be indexed)
# plt.plot(tsn.sol['tg'][0], tsn.sol['xg'][0],'y-',tsn.sol['gre'], tsn.sol['xa'], 'yo')
# if not tsn.stim['rate_gtr']: plt.plot(tsn.sol['t'], tsn.sol['ca'], 'k-', tsn.sol['t'],tsn.sol['h'],'b-')
####################################################################################################################
# MF Tripartite synapse model
####################################################################################################################
options = su.solver_opts(t0=0.0, tfin=30.0, dt=1e-3, atol=1e-8, rtol=1e-8, method="gsl_bsimp")
tsn = gtrs(model='asn_ave',u0=0.6,alpha=0.0)
tsn.stimulation(rate=5, twin=[0., 30.], Nvar=2,rate_gtr=2)
tsn.simulate(algparams=options,recompile=1)
# plt.plot(tsn.sol['t'],tsn.sol['xa'],'y-',tsn.sol['t'],tsn.sol['gammas'],'k-',tsn.sol['t'],tsn.sol['x'],'r-')
plt.plot(tsn.sol['t'], tsn.sol['ga'], 'k-')
# plt.plot(tsn.sol['t'],tsn.sol['xa'],'y-',tsn.sol['t'],tsn.sol['gammas'],'k-',tsn.sol['t'],tsn.sol['x'],'r-')
# if tsn.stim['Nvar']>1:
# plt.plot(tsn.sol['t'],tsn.sol['u'],'b-')
# # Show plot
plt.show() | UTF-8 | Python | false | false | 58,734 | py | 137 | gliotransmission_models.py | 87 | 0.508462 | 0.495692 | 0 | 1,282 | 44.815133 | 151 |
sjabby/home-assistant-config | 1,666,447,360,381 | 51055d988c7ee74a6dac60fbb1843757726dd344 | abac98f525ac744847bfb7a52ebad702b95d5df7 | /custom_components/hacs/repositories/hacsrepositoryintegration.py | 6fa6f4f9cbed607d1e078c0f7ee01f1cd4ab1bdb | [
"MIT"
]
| permissive | https://github.com/sjabby/home-assistant-config | e7d2990df201bb1cc331647e0377ac2dffbc0b23 | 89f4bcea5daeddc1e657bc0d7f6db3197f6bddb8 | refs/heads/master | 2021-06-17T01:11:39.034847 | 2021-03-20T12:40:06 | 2021-03-20T12:40:06 | 170,991,151 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Blueprint for HacsRepositoryIntegration."""
# pylint: disable=too-many-instance-attributes,invalid-name,broad-except,access-member-before-definition
import logging
import json
from .hacsrepositorybase import HacsRepositoryBase
from ..hacsbase.exceptions import HacsRequirement
_LOGGER = logging.getLogger("custom_components.hacs.repository")
class HacsRepositoryIntegration(HacsRepositoryBase):
"""
Set up a HacsRepositoryIntegration object.
repository_name(str): The full name of a repository
(example: awesome-dev/awesome-repo)
"""
def __init__(self, repository_name: str, repositoryobject=None):
"""Initialize a HacsRepositoryIntegration object."""
super().__init__()
self.repository = repositoryobject
self.repository_name = repository_name
self.repository_type = "integration"
self.manifest_content = None
self.domain = None
self.name = repository_name.split("/")[-1]
@property
def config_flow(self):
"""Return bool if integration has config_flow."""
if self.manifest_content is None:
return self.manifest_content.get("config_flow", False)
return False
async def reload_config_flows(self):
"""Reload config flows in HA."""
async def update(self):
"""Run update tasks."""
if await self.comperson2_update():
return
await self.set_repository_content()
await self.set_manifest_content()
async def set_repository_content(self):
"""Set repository content attributes."""
contentfiles = []
if self.content_path is None:
first = await self.repository.get_contents("custom_components", self.ref)
self.content_path = first[0].path
self.content_objects = await self.repository.get_contents(
self.content_path, self.ref
)
if not isinstance(self.content_objects, list):
raise HacsRequirement("Repository structure does not meet the requirements")
for filename in self.content_objects:
contentfiles.append(filename.name)
if contentfiles:
self.content_files = contentfiles
async def set_manifest_content(self):
"""Set manifest content."""
manifest_path = "{}/manifest.json".format(self.content_path)
manifest = None
if "manifest.json" not in self.content_files:
raise HacsRequirement("manifest.json is missing.")
manifest = await self.repository.get_contents(manifest_path, self.ref)
manifest = json.loads(manifest.content)
if manifest:
self.manifest_content = manifest
self.authors = manifest["codeowners"]
self.name = manifest["name"]
self.domain = manifest["domain"]
self.homeassistant_version = manifest.get("homeassistant")
return
raise HacsRequirement("manifest.json does not contain expected values.")
| UTF-8 | Python | false | false | 3,006 | py | 523 | hacsrepositoryintegration.py | 26 | 0.652029 | 0.651031 | 0 | 88 | 33.159091 | 104 |
fangqyi/garage | 7,327,214,249,835 | 482c79cd77b3de30a926a88e2ecc41ea76b32953 | 51d7e8c09793b50d45731bd5ab9b531b525cf6db | /src/garage/torch/algos/discriminator/mlp_discriminator.py | 28d0d389cc2108450890bf310399c454bef8eda2 | [
"MIT"
]
| permissive | https://github.com/fangqyi/garage | 454247849a6a3f547557b3fac3787ba9eeb0391f | ddafba385ef005f46f913ab352f9638760e5b412 | refs/heads/master | 2023-02-25T00:43:18.903328 | 2021-01-26T01:52:15 | 2021-01-26T01:52:15 | 267,667,220 | 0 | 0 | MIT | true | 2020-05-28T18:35:08 | 2020-05-28T18:35:07 | 2020-05-28T16:12:11 | 2020-05-28T18:02:35 | 64,463 | 0 | 0 | 0 | null | false | false | import numpy as np
import torch
import garage.torch.utils as tu
from garage.torch.modules import MLPModule
class MLPDiscriminator(MLPModule):
def __init__(self, env_spec, skills_num, **kwargs):
self._obs_dim = env_spec.observation_space.flat_dim
self._skills_num = skills_num
super().__init__(input_dim=self._obs_dim,
output_dim=skills_num,
**kwargs)
def forward(self, states):
if not isinstance(states, torch.Tensor):
states = torch.from_numpy(states).float().to(
tu.global_device())
# print("in forward")
# print(states.size())
# states = torch.from_numpy(np.array([1, 2, 3])).float().to(tu.global_device())
x = super().forward(states)
return torch.softmax(x, dim=-1)
def infer_skills(self, states):
with torch.no_grad():
# if not isinstance(states, torch.Tensor):
# states = torch.from_numpy(states).float().to(
# tu.global_device())
# print("in infer_skills")
# print(states.size())
x = self.forward(torch.Tensor(states))
return np.array([np.random.choice(self._skills_num, p=x.numpy()[idx])
for idx in range(x.numpy().shape[0])])
def infer_skill(self, state):
with torch.no_grad():
# if not isinstance(state, torch.Tensor):
# state = torch.from_numpy(state).float().to(
# tu.global_device())
# print("in infer_skill")
# print(state.size())
x = self.forward(torch.Tensor(state).unsqueeze(0))
return np.random.choice(self._skills_num, p=x.squeeze(0).numpy())
| UTF-8 | Python | false | false | 1,768 | py | 110 | mlp_discriminator.py | 94 | 0.549208 | 0.545249 | 0 | 46 | 37.434783 | 87 |
jccode/djcems | 5,712,306,508,709 | 03a38991cd07a84fb1c1992f17ab3639a8b85280 | a2a1c55bdbe58e0b441f067ccbba02f87e275409 | /vehicle/migrations/0006_auto_20160101_1606.py | 123804189801afaaf09ca64b658bf90d9517a1c2 | []
| no_license | https://github.com/jccode/djcems | 41418efbd8c54c2aac660be6d0460f0b6576ad6f | b44965f3357d23073d508b82ea57d44e3edb809d | refs/heads/master | 2021-01-21T13:57:06.632804 | 2016-06-02T08:56:12 | 2016-06-02T08:56:12 | 48,077,422 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-01 08:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vehicle', '0005_auto_20151223_0015'),
]
operations = [
migrations.AlterField(
model_name='bus',
name='drivers',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='\u53f8\u673a'),
),
]
| UTF-8 | Python | false | false | 540 | py | 67 | 0006_auto_20160101_1606.py | 58 | 0.62963 | 0.561111 | 0 | 21 | 24.714286 | 111 |
kaushikcfd/mirgecom | 18,382,460,041,884 | f87bf09acb0efa0d1733874c532e0c61b206c601 | 2d6aafcb0926b386243fa2de14745e88984af745 | /test/test_restart.py | 287598e01299db11b07012e8ca43a4f1ff734306 | [
"X11",
"MIT"
]
| permissive | https://github.com/kaushikcfd/mirgecom | 750007dfd918f9c1600eb2e81c7666d6ee4c15af | 47f144782258eae2b1fb39520e96f414ae176ff4 | refs/heads/main | 2023-09-05T23:39:27.929740 | 2021-10-21T00:23:39 | 2021-10-21T00:23:39 | 353,848,748 | 0 | 0 | NOASSERTION | true | 2021-04-01T23:10:52 | 2021-04-01T23:10:52 | 2021-03-30T16:54:13 | 2021-04-01T20:58:32 | 1,746 | 0 | 0 | 0 | null | false | false | """Test the restart module."""
__copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.random
import logging
import pytest
from pytools.obj_array import make_obj_array
from grudge.eager import EagerDGDiscretization
from meshmode.array_context import ( # noqa
pytest_generate_tests_for_pyopencl_array_context
as pytest_generate_tests)
logger = logging.getLogger(__name__)
@pytest.mark.parametrize("nspecies", [0, 10])
def test_restart_cv(actx_factory, nspecies):
"""Test that restart can read a CV array container."""
actx = actx_factory()
nel_1d = 4
dim = 3
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 3
discr = EagerDGDiscretization(actx, mesh, order=order)
from meshmode.dof_array import thaw
nodes = thaw(actx, discr.nodes())
mass = nodes[0]
energy = nodes[1]
mom = make_obj_array([nodes[2]*(i+3) for i in range(dim)])
species_mass = None
if nspecies > 0:
mass_fractions = make_obj_array([i*nodes[0] for i in range(nspecies)])
species_mass = mass * mass_fractions
rst_filename = f"test_{nspecies}.pkl"
from mirgecom.fluid import make_conserved
test_state = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
rst_data = {"state": test_state}
from mirgecom.restart import write_restart_file
write_restart_file(actx, rst_data, rst_filename)
from mirgecom.restart import read_restart_data
restart_data = read_restart_data(actx, rst_filename)
resid = test_state - restart_data["state"]
assert discr.norm(resid.join(), np.inf) == 0
| UTF-8 | Python | false | false | 2,886 | py | 104 | test_restart.py | 56 | 0.716563 | 0.708593 | 0 | 79 | 35.531646 | 78 |
lyulu/tautology_judge | 3,066,606,687,376 | 34533c891cf143744de62d6e4189dad5111aecff | a1fdc212b47f48f0d553742c84d4c470253862b6 | /tautology_judge.py | 1b56823d9caed2c8ec488b498dff94d65c98e87f | []
| no_license | https://github.com/lyulu/tautology_judge | bfd4d9b369dafdd9395c7d337197135be974932b | babe4f879d63503e5f22a33668cda1b839af96ab | refs/heads/main | 2023-02-26T20:57:04.911757 | 2021-02-09T17:04:27 | 2021-02-09T17:04:27 | 337,470,514 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | wff = input("論理式を入れてください。") # 命題記号はAから順に、 ()を用いて優先順位をわかりやすく。¬だけにも()を使う。余計な()は用いない。
# 例:(A→B)→((¬B)→(¬A)), ((A∧B)→C)→((A→C)∨(B→C)), (×∧A)→((C∨D)∨(¬B))
nwff = 0 # 使われている文字の個数(A〜Z)
ntree = 0 # 使うbool値の数
floor = 0 # ()の個数管理、木の深さ
contradiction = 0
tmpprocess = [] # 記号の処理順
process = [] # 処理順
tautology = True
for i in range(len(wff)): # wffは入力した論理式の文字列
if wff[i] >= "A" and wff[i] <= "Z": # 論理式のi文字目が命題記号であった場合
nwff = max(nwff, ord(wff[i]) - ord("A") + 1) # 命題記号の種類数(nwff)の把握
process.append([ntree, ord(wff[i]) - ord("A")]) # 命題記号の位置と種類の記録
ntree += 1 # 木構造の頂点数(ntree)の把握
elif wff[i] == "(":
floor += 1 # 木構造の深さ(floor)の把握
elif wff[i] == ")":
floor -= 1 # 木構造の深さ(floor)の把握
elif wff[i] == "×": # 論理式のi文字目が矛盾記号であった場合
process.append([ntree]) # 矛盾記号の位置の記録
ntree += 1 # 木構造の頂点数(ntree)の把握
else: # 論理式のi文字目が論理演算子であった場合
tmpprocess.append([floor, ntree, i])
ntree += 1 # 木構造の頂点数(ntree)の把握
for i in range(len(tmpprocess)):
tmpfloor = tmpprocess[i][0] # 現在みている論理演算子の深さ
tmpplace = tmpprocess[i][1] # 現在みている論理演算子の位置
tmpx = tmpprocess[i][2] # 現在みている論理演算子の文字列中での位置
tmpleft = 0 # 現在みている論理演算子の左側の子の位置 ¬の場合は無視
tmpright = 0 # 現在みている論理演算子の右側の子の位置
if wff[tmpx] != "¬": # tmpleftを求める
if (wff[tmpx-1] >= "A" and wff[tmpx-1] <= "Z") or wff[tmpx-1] == "×":
tmpleft = tmpplace - 1
else:
j = i - 1
while j >= 0:
if len(tmpprocess[j]) != 2 and tmpprocess[j][0] == tmpfloor + 1:
tmpleft = tmpprocess[j][1]
break
else:
j -= 1
if (wff[tmpx+1] >= "A" and wff[tmpx+1] <= "Z") or wff[tmpx+1] == "×": # tmprightを求める
tmpright = tmpplace + 1
else:
j = i + 1
while j < len(tmpprocess):
if len(tmpprocess[j]) != 2 and tmpprocess[j][0] == tmpfloor + 1:
tmpright = tmpprocess[j][1]
break
else:
j += 1
tmpprocess[i] = [tmpfloor, tmpplace, tmpleft, tmpright, tmpx] # 記録
tmpprocess.sort()
tmpprocess.reverse() # ここの2行で論理演算子を深い順に並べる
for i in range(len(tmpprocess)):
process.append(tmpprocess[i]) # 先に全ての命題記号が入れられ真理表の各場合わけによって真偽値を順番に当てはめる その後論理演算子により順番に演算 processはその記録
# bit全探索で解く
for i in range(2**nwff):
makearray = i
tmparray = [False]
wfftree = []
for j in range(nwff):
if makearray % 2 == 1:
tmparray.append(True)
else:
tmparray.append(False)
makearray //= 2
for j in range(ntree):
wfftree.append(True)
for j in range(ntree):
if len(process[j]) == 1:
wfftree[process[j][0]] = False
elif len(process[j]) == 2:
wfftree[process[j][0]] = tmparray[process[j][1] + 1]
else:
tp = process[j][1]
tl = process[j][2]
tr = process[j][3]
tx = process[j][4]
if wff[tx] == "¬":
wfftree[tp] = not(wfftree[tr])
elif wff[tx] == "∧":
wfftree[tp] = wfftree[tl] and wfftree[tr]
elif wff[tx] == "∨":
wfftree[tp] = wfftree[tl] or wfftree[tr]
elif wff[tx] == "→":
wfftree[tp] = not(wfftree[tl]) or wfftree[tr]
if j == ntree - 1 and wfftree[tp] == False:
autology = False
if tautology == False:
break
if tautology:
print("この論理式はトートロジーです。")
else:
print("この論理式はトートロジーではありません。")
| UTF-8 | Python | false | false | 4,478 | py | 3 | tautology_judge.py | 1 | 0.52582 | 0.510984 | 0 | 97 | 35.134021 | 105 |
dnoonan08/Plotting_TTGammaRun2 | 2,499,670,993,746 | 0bcfe3fda63fb522df4c1466e2bdc1b62b25b242 | 4ee76700ddf90c10b37021c398e69cb0681403bb | /makePlot_M3Control.py | a59a1d5b5a13adb833ca335b34e2d63b229774a3 | []
| no_license | https://github.com/dnoonan08/Plotting_TTGammaRun2 | f502467ab2635ecf6ef6efe1babd320624dd19f5 | c53b313a893b3563e66186dba07fc8bb4e6ec92b | refs/heads/master | 2021-04-09T02:55:28.017927 | 2020-03-20T03:27:14 | 2020-03-20T03:27:14 | 248,832,053 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ROOT import TFile, TLegend, TCanvas, TPad, THStack, TF1, TPaveText, TGaxis, SetOwnership, TObject, gStyle,TH1F, gROOT, kViolet,kBlack,kOrange,kRed,kGreen,kBlue,gApplication,kGray,kYellow,kCyan,kSpring
#from ROOT import *
import os
import numpy
import sys
from optparse import OptionParser
from sampleInformation import sampleList
import sampleInformation
from numpy import log10
from array import array
#from getFullYearMisIDEleSF import getFullYearMisIDEleSF
from getMisIDEleSF import getMisIDEleSF
from getZJetsSF import getZJetsSF
padRatio = 0.25
padOverlap = 0.15
padGap = 0.01
parser = OptionParser()
parser.add_option("-y", "--year", dest="Year", default="",type='str',
help="Specify which year 2016, 2017 or 2018?" )
parser.add_option("-c", "--channel", dest="channel", default="",type='str',
help="Specify which channel Mu or Ele? default is Mu" )
parser.add_option("--postfitPlots", dest="postfitPlots", default=False,action="store_true",
help="post fit plots" )
parser.add_option("--M3Plot", dest="M3Plot",default=False,action="store_true",
help="Specify M3 or ChIso" )
parser.add_option("--ChIsoPlot", dest="ChIsoPlot",default=False,action="store_true",
help="Specify M3 or ChIso" )
parser.add_option("--M3pho0Plot", dest="M3pho0Plot",default=False,action="store_true",
help="M3 control region" )
parser.add_option("--tight", dest="tight", default=False,action="store_true",
help="draw photon Category for tight selection" )
parser.add_option("--looseCRge2ge0", dest="looseCRge2ge0", default=False,action="store_true",
help="draw photon Category for loose CR ge2 ge0" )
parser.add_option("--looseCRge2e0", dest="looseCRge2e0", default=False,action="store_true",
help="draw photon Category for loose CR ge2 =0" )
parser.add_option("--LooseCRe2e0","--looseCRe2e0", dest="looseCRe2e0", default=False,action="store_true",
help="Use ==2 jets + ==0 bjets selection" )
parser.add_option("--LooseCRe2e1","--looseCRe2e1", dest="looseCRe2e1", default=False,action="store_true",
help="Use ==2 jets + ==1 bjets selection" )
parser.add_option("--LooseCRe3e0","--looseCRe3e0", dest="looseCRe3e0", default=False,action="store_true",
help="Use ==3 jets + ==0 bjets selection" )
parser.add_option("--LooseCRge4e0","--looseCRge4e0", dest="looseCRge4e0", default=False,action="store_true",
help="Use >=4 jets + ==0 bjets selection" )
parser.add_option("--LooseCRe3e1","--looseCRe3e1", dest="looseCRe3e1", default=False,action="store_true",
help="Use ==3 jets + ==1 bjets selection" )
parser.add_option("--LooseCRe2e2","--looseCRe2e2", dest="looseCRe2e2", default=False,action="store_true",
help="Use ==2 jets + ==2 bjets selection" )
parser.add_option("--LooseCRe3ge2","--looseCRe3ge2", dest="looseCRe3ge2", default=False,action="store_true",
help="Use ==3 jets + >=2 bjets selection" )
parser.add_option("--useQCDMC","--qcdMC",dest="useQCDMC", default=False, action="store_true",
help="")
parser.add_option("--useQCDCR",dest="useQCDCR", default=False, action="store_true",
help="to make plots in QCDCR region")
(options, args) = parser.parse_args()
selYear = options.Year
if selYear=="":
print "Specify which year 2016, 2017 or 2018?"
sys.exit()
finalState = options.channel
postfitPlots = options.postfitPlots
tight = options.tight
looseCRge2ge0=options.looseCRge2ge0
looseCRge2e0 =options.looseCRge2e0
looseCRe2e0 =options.looseCRe2e0
looseCRe2e1 =options.looseCRe2e1
looseCRe3e0 =options.looseCRe3e0
looseCRge4e0 =options.looseCRge4e0
looseCRe3e1 =options.looseCRe3e1
looseCRe2e2 =options.looseCRe2e2
looseCRe3ge2 =options.looseCRe3ge2
useQCDMC = options.useQCDMC
useQCDCR = options.useQCDCR
M3Plot = options.M3Plot
ChIsoPlot=options.ChIsoPlot
M3pho0Plot=options.M3pho0Plot
if finalState=='Mu':
channel = 'mu'
channelText = "#mu+jets"
if finalState=='Ele':
channel = 'ele'
channelText = "e+jets"
rebinCenter = 2
rebinLeftRight = 10
eosFolder="root://cmseos.fnal.gov//store/user/npoudyal/"
#######
########
if tight: #SR8
isSelection = "looseCRge2e0"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
if channel=='ele':
isoTTGammaSF = 0.871212 #-0.0421535/+0.0423445 # WGamma non prompt came out empty so fitting failed.
nonPromptTTGammaSF = 0.88642 # +/- 1.36e+00
else:
isoTTGammaSF = 0.916049 #-0.022929/+0.0230016 ; 0.970489 #-0.0275155/+0.0275579
nonPromptTTGammaSF = 3.9909 # +/- 6.48e-01# 3.1048 # +/- 8.47e-01
fileDir = "%shistograms_%s/%s/hists_tight/"%(eosFolder,selYear, channel)
#fileDir = "histograms_%s/%s/hists_tight/"%(selYear, channel)
plotDirectory = "M3ChIso_tightplots_%s_%s/"%(channel, selYear)
regionText = "N_{j}#geq4, N_{b}#geq1"
print fileDir
if looseCRge2ge0: #AR
isSelection = "looseCRge2ge0"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
fileDir = "histograms_%s/%s/hists_looseCRge2ge0/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRge2ge0plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}#geq2, N_{b}#geq0"
if looseCRge2e0: #CR1+CR2+CR3
isSelection = "looseCRge2e0"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
fileDir = "histograms_%s/%s/hists_looseCRge2e0/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRge2e0plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}#geq2, N_{b}=0"
###
if looseCRe2e0: #CR1
isSelection = "looseCRe2e0"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
fileDir = "histograms_%s/%s/hists_looseCRe2e0/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRe2e0plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}=2, N_{b}=0"
if looseCRe3e0: #CR2
isSelection = "looseCRe3e0"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
fileDir = "histograms_%s/%s/hists_looseCRe3e0/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRe3e0plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}=3, N_{b}=0"
if looseCRge4e0: #CR3
isSelection = "looseCRge4e0"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = getMisIDEleSF(selYear,isSelection);
fileDir = "histograms_%s/%s/hists_looseCRge4e0/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRge4e0plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}#geq4, N_{b}=0"
if looseCRe2e1: #CR4
isSelection = "looseCRe2e1"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
fileDir = "histograms_%s/%s/hists_looseCRe2e1/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRe2e1plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}=2, N_{b}=1"
if looseCRe3e1: #CR5
isSelection = "looseCRe3e1"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
fileDir = "histograms_%s/%s/hists_looseCRe3e1/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRe3e1plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}=3, N_{b}=1"
if looseCRe2e2: #
isSelection = "looseCRe2e2" #
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
fileDir = "histograms_%s/%s/hists_looseCRe2e2/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRe2e2plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}=2, N_{b}=2"
if looseCRe3ge2: #CR7
isSelection = "looseCRe3ge2"
if selYear =='2016': ZJetSF = 1.23; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
elif selYear=='2017': ZJetSF = 1.30; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
else : ZJetSF = 1.26; MisIDEleSF,ZGammaSF,WGammaSF = (1,1,1);
fileDir = "histograms_%s/%s/hists_looseCRe3ge2/"%(selYear, channel)
plotDirectory = "M3ChIso_looseCRe3ge2plots_%s_%s/"%(channel, selYear)
regionText = "N_{j}=3, N_{b}#geq2"
###
####
if not os.path.exists(plotDirectory):
os.mkdir(plotDirectory)
gROOT.SetBatch(True)
gStyle.SetOptStat(0)
from Style import *
gROOT.ForceStyle()
sampleList = ['TTGamma', 'TTbar', 'TGJets','SingleTop', 'WJets', 'ZJets', 'WGamma','ZGamma','Diboson','TTV','GJets',"QCD"]
sampleListColor = {'TTGamma':kOrange, 'TTbar':kRed+1, 'TGJets':kGray,'SingleTop':kOrange-3, 'WJets':kCyan-3, 'ZJets':kCyan-5, 'WGamma':kBlue-4,'ZGamma':kBlue-2,'Diboson':kCyan-7,'TTV':kRed-7,'GJets':kGreen+1,"QCD":kGreen+3}
template_category = {"TTGamma":kOrange,
"TTbar": kRed+1,
"WGamma": kBlue-4,
"ZGamma": kBlue-2,
"Other": kGreen+3}
template_categoryName = {"TTGamma":"TT#gamma",
"TTbar": "T#barT",
"WGamma": "W#gamma",
"ZGamma": "Z#gamma",
"Other": "Other"}
_file = {}
import CMS_lumi
if selYear == '2016': CMS_lumi.lumi_13TeV = "35.92 fb^{-1}"
if selYear == '2017': CMS_lumi.lumi_13TeV = "41.53 fb^{-1}"
if selYear == '2018': CMS_lumi.lumi_13TeV = "59.74 fb^{-1}"
if useQCDMC:
if channel=="mu": sampleList[-1] = "QCDMu"
if channel=="ele": sampleList[-1] = "QCDEle"
elif useQCDCR:
sampleList[-1] = "QCD_DD"
stackList.remove("GJets")
else:
print "use --useQCDMC or --useQCDCR!"
sys.exit()
H = 600;
W = 800;
T = 0.08*H
B = 0.12*H
L = 0.12*W
R = 0.1*W
legendHeightPer = 0.04
legendStart = 0.69
legendEnd = 0.97-(R/W)
#legend = TLegend(2*legendStart - legendEnd, 1-T/H-0.01 - legendHeightPer*(len(legList)+1), legendEnd, 0.99-(T/H)-0.01)
legend = TLegend(2*legendStart - legendEnd , 0.99 - (T/H)/(1.-padRatio+padOverlap) - legendHeightPer/(1.-padRatio+padOverlap)*round((7+1)/2.), legendEnd, 0.99-(T/H)/(1.-padRatio+padOverlap))
legend.SetNColumns(2)
#legendR = TLegend(0.71, 0.99 - (T/H)/(1.-padRatio+padOverlap) - legendHeightPer/(1.-padRatio+padOverlap)*(len(legList)+1), 0.99-(R/W), 0.99-(T/H)/(1.-padRatio+padOverlap))
legendR = TLegend(2*legendStart - legendEnd , 0.99 - (T/H)/(1.-padRatio+padOverlap) - legendHeightPer/(1.-padRatio+padOverlap)*round((7+1)/2.)-0.1, legendEnd, 0.99-(T/H)/(1.-padRatio+padOverlap))
legendR.SetNColumns(2)
legendR.SetBorderSize(0)
legendR.SetFillColor(0)
legend.SetBorderSize(0)
legend.SetFillColor(0)
histName = "presel_M3_%s"
histNameData= "presel_M3_%s"
mydistributionName = histNameData[7:-3]+"_Pho0"
if finalState=='Ele':
sample = "DataEle"
_file[sample] = TFile.Open("%s%s.root"%(fileDir,sample),"read")
dataHist = _file[sample].Get(histNameData%(sample))
dataHist.SetLineColor(kBlack)
dataHist.SetMarkerStyle(8)
elif finalState=='Mu':
sample = "DataMu"
_file[sample] = TFile.Open("%s%s.root"%(fileDir,sample),"read")
dataHist = _file[sample].Get(histNameData%(sample))
dataHist.SetLineColor(kBlack)
dataHist.SetMarkerStyle(8)
else:
print "Select the channel !!!"
sys.exit()
templateHist ={}
for sample in sampleList:
if finalState == 'Ele' and sample == 'QCD': sample = 'QCDEle'
if finalState == 'Mu' and sample == 'QCD': sample = 'QCDMu'
_file[sample] = TFile.Open('%s%s.root'%(fileDir,sample),'read')
templateHist = {}
templateHist["TTGamma" ] = None
templateHist["TTbar" ] = None
templateHist["WGamma" ] = None
templateHist["ZGamma" ] = None
templateHist["Other" ] = None
for sample in sampleList:
tempHist = _file[sample].Get(histName%(sample))
if sample=='ZJets':
tempHist.Scale(ZJetSF)
print "ZJetSF", ZJetSF
if sample=='TTGamma': templateHist["TTGamma"]= tempHist.Clone("TTGamma")
elif sample=='TTbar' : templateHist["TTbar"] = tempHist.Clone("TTbar")
elif sample=='WGamma' : templateHist["WGamma"] = tempHist.Clone("WGamma")
elif sample=='ZGamma' : templateHist["ZGamma"] = tempHist.Clone("ZGamma")
else:
if templateHist["Other"] is None:
templateHist["Other"] = tempHist.Clone("Other")
templateHist["Other"].SetDirectory(0)
else:
templateHist["Other"].Add(tempHist)
#gApplication.Run()
#print "exited"
#sys.exit()
# apply SF before plotting or feeding into combine
templateHist["WGamma"].Scale(WGammaSF)
templateHist["ZGamma"].Scale(ZGammaSF)
print "WGammaSF and ZGammaSF", WGammaSF," ",ZGammaSF
data_obs = dataHist.Clone("data_obs")
rebin=20
rebinCenter = 10 #2 #4
rebinLeftRight =20 # 10 #20
rebinLeftRightRight =40 # 10 #20
#binning = numpy.arange(50,500.1,rebin)
binningLeft = list(numpy.arange(50,90.1,rebinLeftRight)) # 40, 20 start
binningCenter = list(numpy.arange(100,200.1,rebinCenter))
binningRight = list(numpy.arange(210,300.1,rebinLeftRight))
binningRightRight = list(numpy.arange(310,500.1,rebinLeftRightRight))
binning = numpy.array(binningLeft + binningCenter + binningRight+binningRightRight)
rebinnedData = data_obs.Rebin(len(binning)-1,"",binning)
rebinnedHist ={}
if postfitPlots:
print"no need save root file"
else:
myfile = TFile(plotDirectory+"promptTemplate_%s_%s_Prefit.root"%(channel,mydistributionName),"recreate")
for ih in templateHist:
rebinnedHist[ih] = templateHist[ih].Rebin(len(binning)-1,"",binning)
rebinnedHist[ih].SetLineColor(template_category[ih])
rebinnedHist[ih].SetFillColor(template_category[ih])
rebinnedHist[ih].Write()
rebinnedData.Write()
# forget about plotting right now. Just make a template.
## purpose for plotting
rebinnedData.Scale(1.,"width")
stack = THStack()
print rebinnedHist.keys()
for ih in rebinnedHist:
rebinnedHist[ih].Scale(1.,"width")
stack.Add(rebinnedHist["Other"])
stack.Add(rebinnedHist["ZGamma"])
stack.Add(rebinnedHist["WGamma"])
stack.Add(rebinnedHist["TTbar"])
stack.Add(rebinnedHist["TTGamma"])
if postfitPlots:
MC = stack.GetStack().Last().Clone("MC")
x = rebinnedData.Chi2Test(MC,"UW CHI2/NDF")
chi2Text = "#chi^{2}/NDF=%.2f"%x
canvasRatio = TCanvas('c1Ratio','c1Ratio',W,H)
canvasRatio.SetFillColor(0)
canvasRatio.SetBorderMode(0)
canvasRatio.SetFrameFillStyle(0)
canvasRatio.SetFrameBorderMode(0)
canvasRatio.SetLeftMargin( L/W )
canvasRatio.SetRightMargin( R/W )
canvasRatio.SetTopMargin( T/H )
canvasRatio.SetBottomMargin( B/H )
canvasRatio.SetTickx(0)
canvasRatio.SetTicky(0)
canvasRatio.Draw()
canvasRatio.cd()
pad1 = TPad("zxc_p1","zxc_p1",0,padRatio-padOverlap,1,1)
pad2 = TPad("qwe_p2","qwe_p2",0,0,1,padRatio+padOverlap)
pad1.SetLeftMargin( L/W )
pad1.SetRightMargin( R/W )
pad1.SetTopMargin( T/H/(1-padRatio+padOverlap) )
pad1.SetBottomMargin( (padOverlap+padGap)/(1-padRatio+padOverlap) )
pad1.SetFillColor(0)
pad1.SetBorderMode(0)
pad1.SetFrameFillStyle(0)
pad1.SetFrameBorderMode(0)
pad1.SetTickx(0)
pad1.SetTicky(0)
pad2.SetLeftMargin( L/W )
pad2.SetRightMargin( R/W )
pad2.SetTopMargin( (padOverlap)/(padRatio+padOverlap) )
pad2.SetBottomMargin( B/H/(padRatio+padOverlap) )
pad2.SetFillColor(0)
pad2.SetFillStyle(4000)
pad2.SetBorderMode(0)
pad2.SetFrameFillStyle(0)
pad2.SetFrameBorderMode(0)
pad2.SetTickx(0)
pad2.SetTicky(0)
pad1.Draw()
pad2.Draw()
noData = False
oneLine = TF1("oneline","1",-9e9,9e9)
oneLine.SetLineColor(kBlack)
oneLine.SetLineWidth(1)
oneLine.SetLineStyle(2)
maxVal = stack.GetMaximum()
if not noData:
maxVal = max(rebinnedData.GetMaximum(),maxVal)
minVal = 1
# minVal = max(stack.GetStack()[0].GetMinimum(),1)
stack.SetMaximum(1.25*maxVal)
stack.SetMinimum(minVal)
errorband=stack.GetStack().Last().Clone("error")
errorband.Sumw2()
errorband.SetLineColor(kBlack)
errorband.SetFillColor(kBlack)
errorband.SetFillStyle(3245)
errorband.SetMarkerSize(0)
legend.AddEntry(rebinnedData,"Data", 'pe')
legend.AddEntry(errorband,"Uncertainty","f")
for ih in rebinnedHist:
legend.AddEntry(rebinnedHist[ih],template_categoryName[ih],'f')
pad1.cd()
stack.Draw('HIST')
rebinnedData.Draw('E,X0,SAME')
legend.Draw("same")
stack.GetXaxis().SetTitle('')
stack.GetXaxis().SetLabelSize(0)
stack.GetYaxis().SetLabelSize(gStyle.GetLabelSize()/(1.-padRatio+padOverlap))
stack.GetYaxis().SetTitleSize(gStyle.GetTitleSize()/(1.-padRatio+padOverlap))
stack.GetYaxis().SetTitleOffset(gStyle.GetTitleYOffset()*(1.-padRatio+padOverlap))
stack.SetTitle(';;<Events/GeV>')# '%rebin)
#CMS_lumi.channelText = (channelText+"\\n"+regionText)
#if postfitPlots: CMS_lumi.channelText =channelText+"\\n "+regionText+"\\n "+chi2Text
CMS_lumi.channelText = "#splitline{%s}{%s}"%(channelText,regionText)
if postfitPlots: CMS_lumi.channelText = "#splitline{%s}{%s}"%(channelText+";"+regionText,chi2Text)
CMS_lumi.writeChannelText = True
CMS_lumi.writeExtraText = True
CMS_lumi.CMS_lumi(pad1, 4, 11)
if not noData:
ratio = rebinnedData.Clone("temp")
temp = stack.GetStack().Last().Clone("temp")
for i_bin in range(1,temp.GetNbinsX()+1):
temp.SetBinError(i_bin,0.)
ratio.Divide(temp)
else:
ratio = rebinnedData.Clone("temp")
temp = stack.GetStack().Last().Clone("temp")
ratio.SetTitle('')
ratio.GetXaxis().SetLabelSize(gStyle.GetLabelSize()/(padRatio+padOverlap))
ratio.GetYaxis().SetLabelSize(gStyle.GetLabelSize()/(padRatio+padOverlap))
ratio.GetXaxis().SetTitleSize(gStyle.GetTitleSize()/(padRatio+padOverlap))
ratio.GetYaxis().SetTitleSize(gStyle.GetTitleSize()/(1.-padRatio+padOverlap))
ratio.GetYaxis().SetTitleOffset(gStyle.GetTitleYOffset()*(1.-padRatio+padOverlap-padGap))
maxRatio = ratio.GetMaximum()
minRatio = ratio.GetMinimum()
for i_bin in range(1,ratio.GetNbinsX()):
if ratio.GetBinError(i_bin)<1:
if ratio.GetBinContent(i_bin)>maxRatio:
maxRatio = ratio.GetBinContent(i_bin)
if ratio.GetBinContent(i_bin)<minRatio:
minRatio = ratio.GetBinContent(i_bin)
if maxRatio > 1.8:
ratio.GetYaxis().SetRangeUser(0,round(0.5+maxRatio))
elif maxRatio < 1:
ratio.GetYaxis().SetRangeUser(0,1.2)
elif maxRatio-1 < 1-minRatio:
ratio.GetYaxis().SetRangeUser((1-(1-minRatio)*1.2),1.1*maxRatio)
else:
ratio.GetYaxis().SetRangeUser(2-1.1*maxRatio,1.1*maxRatio)
ratio.GetYaxis().SetRangeUser(0.8,1.2)
ratio.GetYaxis().SetNdivisions(504)
ratio.GetXaxis().SetTitle('%s(GeV)'%mydistributionName)
ratio.GetYaxis().SetTitle("Data/MC")
ratio.GetYaxis().SetTitleOffset(.4)
ratio.GetYaxis().SetTitleSize(.09)
ratio.GetYaxis().SetNdivisions(2)
CMS_lumi.CMS_lumi(pad2, 4, 11)
pad2.cd()
maxRatio = 1.5
minRatio = 0.5
ratio.SetMarkerStyle(rebinnedData.GetMarkerStyle())
ratio.SetMarkerSize(rebinnedData.GetMarkerSize())
ratio.SetLineColor(rebinnedData.GetLineColor())
ratio.SetLineWidth(rebinnedData.GetLineWidth())
ratio.Draw('e,x0')
errorbandRatio = errorband.Clone("errorRatio")
errorbandRatio.Divide(temp)
errorbandRatio.Draw('e2,same')
oneLine.Draw("same")
canvasRatio.Update()
canvasRatio.RedrawAxis()
if postfitPlots:
canvasRatio.SaveAs("%s%s_%s_postfit.root"%(plotDirectory,plotDirectory[:-1],mydistributionName))
canvasRatio.Print("%s%s_%s_postfit.pdf" %(plotDirectory,plotDirectory[:-1],mydistributionName))
else:
canvasRatio.SaveAs("%s%s_%s.root"%(plotDirectory,plotDirectory[:-1],mydistributionName))
canvasRatio.Print("%s%s_%s.pdf" %(plotDirectory,plotDirectory[:-1],mydistributionName))
myfile.Close()
canvasRatio.Close()
| UTF-8 | Python | false | false | 20,574 | py | 99 | makePlot_M3Control.py | 70 | 0.708564 | 0.66307 | 0 | 566 | 35.337456 | 223 |
gfcarbonell/app_main | 17,703,855,198,359 | deed5ce9edde65f79675ff0825eb38f988a62365 | 467f7f01de1a7995e4a9fe97ec880edb9e64af3c | /app_main/web_headers/views.py | cdfb49e748f8e7fd17bd01f2c9597832a25ef7a4 | []
| no_license | https://github.com/gfcarbonell/app_main | 00f0fd53b883d7a119852b7a282e22612ac0593c | 0cdee47c717078f9e78a687a1e0da8f4304c7f02 | refs/heads/master | 2020-06-17T22:24:09.399604 | 2017-02-15T23:23:20 | 2017-02-15T23:23:20 | 74,966,359 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- encoding: utf-8 -*-
from django.shortcuts import render
from rest_framework import viewsets
from .models import WebHeader
from .serializers import WebHeaderModelSerializer
class WebHeaderModelViewSet(viewsets.ModelViewSet):
model = WebHeader
serializer_class = WebHeaderModelSerializer
queryset = WebHeader.objects.all()
| UTF-8 | Python | false | false | 360 | py | 83 | views.py | 80 | 0.75 | 0.747222 | 0 | 11 | 31.727273 | 51 |
Vijendrapratap/Machine-Learning | 19,000,935,322,208 | 9d5282b60db04b75e6357f8a23f97e5edf94e604 | 331bcb7a843a75299338bcab6868e75a2ee456cd | /week1/1. WAP which accepts the user's first and last name and print them in reverse order with a space between them.py | 5ff6b30141c139d0e75158605fd82c974957af99 | []
| no_license | https://github.com/Vijendrapratap/Machine-Learning | 63d2a955ab04f226f243a94f4972aa06857f25b4 | e0a6acf5def491705a803b15b03db85a7c3b875c | refs/heads/master | 2022-06-11T19:29:33.615535 | 2019-06-03T07:07:46 | 2019-06-03T07:07:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Take user's name as input and print in reverse order
Example:
name: alfa singh
output : afla hgnis
Author : vijendra pratsp singh
Email : pratap.vijendrasingh96@gmail.com
"""
def reverse_name():
f_name = input("Enter your first name : ")
l_name = input("Enter your last name : ")
f_name.split()
l_name.split()
f_name = f_name[::-1]
l_name = l_name[::-1]
print("Hello! {} {}".format(f_name, l_name))
reverse_name()
| UTF-8 | Python | false | false | 470 | py | 182 | 1. WAP which accepts the user's first and last name and print them in reverse order with a space between them.py | 146 | 0.604255 | 0.595745 | 0 | 24 | 18.541667 | 52 |
wuxvsuizhong/Li_pro | 13,211,319,443,489 | 48e07f2020fe2436543a0dd342a29ec891126e54 | 42e0305c8cc9e20fee14d359ec3d466fb4608607 | /django/django_1/project/everydayfresh/df_goods/admin.py | f3d7e5ff4ee5e1c4bc05bada128d0448fbaa450a | []
| no_license | https://github.com/wuxvsuizhong/Li_pro | 976159583927823464d4576efb59aaf86ef65e13 | 7facd87e67f767412917d9b8668746f1d87ec28f | refs/heads/master | 2023-08-08T23:13:08.226873 | 2023-07-22T10:09:25 | 2023-07-22T10:09:25 | 107,368,788 | 0 | 0 | null | false | 2017-10-18T06:50:33 | 2017-10-18T06:42:18 | 2017-10-18T06:42:18 | 2017-10-18T06:50:32 | 0 | 0 | 0 | 0 | null | null | null | from django.contrib import admin
from models import TypeInfo,GoodsInfo
class TypeInfoAdmin(admin.ModelAdmin):
list_display=['id','ttitle']
class GoodsInfoAdmin(admin.ModelAdmin):
list_display=['id','gtitle','gpic','gprice','gunit','gbrief','gtype']
# Register your models here.
admin.site.register(TypeInfo,TypeInfoAdmin)
admin.site.register(GoodsInfo,GoodsInfoAdmin)
| UTF-8 | Python | false | false | 379 | py | 224 | admin.py | 195 | 0.770449 | 0.770449 | 0 | 12 | 30.583333 | 73 |
Gerry1208/Level4Project | 11,175,504,932,569 | 9d4d9bdb5f71851207208a5c40c2669bcf59bfd0 | e70bf2c1ed6b0f36efcd2777cf4f3e6bb295d9f7 | /names/views.py | 1acfc619e17093e5a7681486502fd423db0cd08e | []
| no_license | https://github.com/Gerry1208/Level4Project | 40e8cb851b14c0d94fc8ab42183942ffd76bcabf | dce8eef5ad7b0ff3e084d94e6ed20d84c86de994 | refs/heads/master | 2021-01-10T17:31:21.633574 | 2016-03-19T20:43:06 | 2016-03-19T20:43:06 | 43,259,166 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from lib2to3.fixes.fix_input import context
from names.forms import UserForm, UserProfileForm, cardForm, groupsForm, picForm, bulkUpload
from django.shortcuts import render, render_to_response, redirect
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.views.decorators.csrf import csrf_protect
from names.models import groupModel, card, cardPicture
from django.views.generic.edit import FormView
from .forms import pictureForm
from .models import cardPicture
import random
import logging
logger = logging.getLogger(__name__)
#form.cleaned_data for all?
@login_required
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, data=request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, "Password changed.")
return redirect("/names/index")
else:
form = PasswordChangeForm(request.user)
data = {
'form': form
}
return render(request, "changepass.html", data)
def index(request):
return render(request, 'index.html', {})
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
user = authenticate(username=user_form.cleaned_data['username'],password=user_form.cleaned_data['password'],)
login(request,user)
# Did the user provide a profile picture?
# If so, we need to get it from the input form and put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request,
'register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
@login_required
def upload(request):
if request.method == "POST":
form = bulkUpload(request.POST, request.FILES)
if form.is_valid():
form.save(request)
context = {"form":form}
return render_to_response('upload.html', context, context_instance=RequestContext(request))
else:
form = bulkUpload()
context = {"form":form}
return render_to_response('upload.html', context, context_instance=RequestContext(request))
@csrf_protect
@login_required
def create_cards(request):
if request.method == 'POST':
card_form = cardForm(data = request.POST)
pic_form = picForm(request.POST, request.FILES)
if card_form.is_valid() and pic_form.is_valid():
card = card_form.save()
card.save()
pic = pic_form.save(commit=False)
pic.student = card
pic.file = request.FILES['file']
pic.save()
else:
card_form = cardForm()
pic_form = picForm()
return render_to_response('create.html', {'card_form': card_form, 'pic_form':pic_form},
context_instance=RequestContext(request))
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect('/names/index/')
else:
return HttpResponse("Your rango account is disabled")
else:
print "invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied")
else:
return render(request, 'login.html', {})
@csrf_protect
@login_required
def groups(request):
if request.method == 'POST':
group = groupsForm(request.POST)
if group.is_valid():
g = group.save(commit=False)
g.group_name = request.POST.get('groupname')
g.save()
else:
group = groupsForm()
return render_to_response('groups.html',{"group":group},context_instance=RequestContext(request))
@csrf_protect
@login_required
def cardview(request):
group_name = request.GET.get('name')
cards = card.objects.filter(group=group_name)
pictures = []
group = group_name
for c in cards:
pictures += cardPicture.objects.filter(student=c.student)
return render_to_response('cardview.html', {'cards':cards, 'pictures':pictures, 'group':group}, context_instance=RequestContext(request))
@login_required
def quiz(request):
count = -1
group_name=request.GET.get('name')
quiz_type=request.GET.get('quiz')
cards = card.objects.values_list(flat=True).filter(group=group_name).order_by('?')
pictures = cardPicture.objects.values_list(flat=True).order_by('?')
cards = list(cards)
pictures = list(pictures)
request.session['cards'] = cards
request.session['pictures'] = pictures
request.session['count'] = count
return render_to_response('readyquiz.html', {'cards':cards, 'pictures':pictures, 'count': count, 'quiz_type':quiz_type}, context_instance=RequestContext(request))
def nextQuestion(request):
cards = request.session.get('cards')
pictures = request.session.get('pictures')
count = request.session.get('count')
score = 0
if(request.GET.get('score')):
score = request.GET.get('score')
# Gets the correct question number, and finishes the quiz if 10 questions have been answered
if len(cards) < 10:
if count==(len(cards)-1):
count = 10
if count == 10:
return render_to_response('quiz.html', {'score':score, 'count':count}, context_instance=RequestContext(request))
if len(cards) < 4:
return render(request, 'index.html')
count += 1
request.session['count'] = count
# Gets the correct card and corresponding photo
card = cards[count]
for p in pictures:
if p[1] == card[0]:
pictures = p
if len(pictures) > 1:
pictures = []
# Gets three random names to go along with it
# Adds in the correct answer, and shuffles
rndNames = []
rndNames.append(card)
while len(rndNames) < 4:
choice = random.choice(cards)
if choice not in rndNames:
rndNames.append(choice)
random.shuffle(rndNames)
return render_to_response('quiz.html', {'cards':card, 'pictures':pictures, 'names':rndNames, 'score':score, 'count':count}, context_instance=RequestContext(request))
@login_required
def SelfMarkQuiz(request):
cards = request.session.get('cards')
pictures = request.session.get('pictures')
count = request.session.get('count')
score = 0
if(request.GET.get('score')):
score = request.GET.get('score')
if len(cards) < 10:
if count==(len(cards)-1):
count = 10
if count == 10:
return render_to_response('selfmark.html', {'score':score, 'count':count}, context_instance=RequestContext(request))
# Gets the correct question number, and finishes the quiz if 10 questions have been answered
count += 1
request.session['count'] = count
cards = cards[count]
for p in pictures:
if p[1] == cards[0]:
pictures = p
if len(pictures) > 1:
pictures = []
return render_to_response('selfmark.html', {'cards':cards, 'pictures':pictures, 'score':score, 'count':count}, context_instance=RequestContext(request))
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/names/index/')
@csrf_protect
@login_required
def groupview(request):
if request.method == 'POST':
group_form = groupsForm(data = request.POST)
if group_form.is_valid():
g = group_form.save(commit=False)
g.user.add(request.user.id)
g.save()
groups = groupModel.objects.filter(user = request.user)
group_form = groupsForm()
return render_to_response('groups.html', {'groups':groups, 'group_form':group_form}, context_instance=RequestContext(request))
class addPicture(FormView):
template_name='addpicture.html'
form_class = pictureForm
success_url = '/names/addpicture/'
def form_valid(self, form):
for each in form.cleaned_data['files']:
studArray= each.name.split(".")
studName=studArray[0]
studCard = card.objects.filter(student=studName).first()
cardPicture.objects.create(file=each, student=studCard)
return super(addPicture,self).form_valid(form)
@login_required
def complete(request):
score = request.GET.get('score')
return render_to_response('complete.html', {'score':score}, context_instance=RequestContext(request))
| UTF-8 | Python | false | false | 10,910 | py | 22 | views.py | 9 | 0.646013 | 0.642713 | 0 | 316 | 33.525316 | 169 |
CUrW-SL/curw_wrf_data_pusher | 18,485,539,258,464 | 23657255fa6eaae9984e15956eb352ec626026fb | 5faa1998e4eba7f8e1da09b1a469ef268292b4f7 | /to_be_deprecated/wrf_data_pusher_v1.py | ebc79efdae6ec15ff93b95eb5a368d9666a30e26 | []
| no_license | https://github.com/CUrW-SL/curw_wrf_data_pusher | 01285a0a49abe4cafac847be9d26acf2f8303e47 | 9d4838d2726944abe421b42a6998ee3fc24d1930 | refs/heads/master | 2022-07-10T15:56:26.164786 | 2020-10-27T11:19:07 | 2020-10-27T11:19:07 | 203,581,602 | 1 | 0 | null | false | 2022-06-22T02:55:25 | 2019-08-21T12:40:36 | 2020-10-27T11:19:53 | 2022-06-22T02:55:25 | 281 | 0 | 0 | 4 | Python | false | false | #!/home/uwcc-admin/curw_wrf_data_pusher/venv/bin/python3
# before new directory convention
import traceback
from netCDF4 import Dataset
import numpy as np
import os
import json
from datetime import datetime, timedelta
import time
import paramiko
import multiprocessing as mp
import sys
import getopt
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.curw_fcst.source import get_source_id, add_source
from db_adapter.curw_fcst.variable import get_variable_id, add_variable
from db_adapter.curw_fcst.unit import get_unit_id, add_unit, UnitType
from db_adapter.curw_fcst.station import StationEnum, get_station_id, add_station, get_wrf_stations
from db_adapter.curw_fcst.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
from db_adapter.constants import (
CURW_FCST_DATABASE, CURW_FCST_PASSWORD, CURW_FCST_USERNAME, CURW_FCST_PORT,
CURW_FCST_HOST,
)
from db_adapter.logger import logger
SRI_LANKA_EXTENT = [79.5213, 5.91948, 81.879, 9.83506]
wrf_v3_stations = {}
email_content = {}
def usage():
usageText = """
Usage: python wrf_data_pusher.py -c "wrf_d1_18_config"
-h --help Show usage
-c --config Config file name
"""
print(usageText)
def read_attribute_from_config_file(attribute, config):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
else:
msg = "{} not specified in config file.".format(attribute)
logger.error(msg)
email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
sys.exit(1)
def get_per_time_slot_values(prcp):
per_interval_prcp = (prcp[1:] - prcp[:-1])
return per_interval_prcp
def get_file_last_modified_time(file_path):
# returns local time (UTC + 5 30)
modified_time = time.gmtime(os.path.getmtime(file_path) + 19800)
return time.strftime('%Y-%m-%d %H:%M:%S', modified_time)
def datetime_utc_to_lk(timestamp_utc, shift_mins=0):
return timestamp_utc + timedelta(hours=5, minutes=30 + shift_mins)
def ssh_command(ssh, command):
ssh.invoke_shell()
stdin, stdout, stderr = ssh.exec_command(command)
if stdout.channel.recv_exit_status() is not 0:
return False
return True
# for line in stdout.readlines():
# logger.info(line)
# for line in stderr.readlines():
# logger.error(line)
def run_remote_command(host, user, key, command):
"""
:return: True if successful, False otherwise
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=host, username=user, key_filename=key)
return ssh_command(ssh, command)
except Exception as e:
msg = "Connection failed :: {} :: {}".format(host, command.split('2>&1')[0])
logger.error(msg)
email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
return False
finally:
ssh.close()
def gen_kelani_basin_rfields(source_names, version, sim_tag, rfield_host, rfield_key, rfield_user):
"""
Generate kelani basin rfields
:param source_names: e.g.: WRF_A,WRF_C
:param version: e.g.: v4.0
:param rfield_host:
:param sim_tag: e.g.: "evening_18hrs"
:param rfield_key:
:param rfield_user:
:return: True if successful, False otherwise
"""
rfield_command_kelani_basin = "nohup ./curw_rfield_extractor/gen_kelani_basin_rfield.py -m {} -v {} -s {} " \
"2>&1 ./curw_rfield_extractor/rfield.log".format(source_names, version, sim_tag)
logger.info("Generate {} kelani basin rfield files.".format(source_names))
return run_remote_command(host=rfield_host, key=rfield_key, user=rfield_user,
command=rfield_command_kelani_basin)
def gen_all_d03_rfields(source_names, version, sim_tag, rfield_host, rfield_key, rfield_user):
"""
Generate d03 rfields for SL extent
:param source_names: e.g.: WRF_A,WRF_C
:param version: e.g.: v4.0
:param sim_tag: e.g.: "evening_18hrs"
:param rfield_host:
:param rfield_key:
:param rfield_user:
:return: True if successful, False otherwise
"""
rfield_command_d03 = "nohup ./curw_rfield_extractor/gen_SL_d03_rfield.py -m {} -v {} -s {} 2>&1 " \
"./curw_rfield_extractor/rfield.log".format(source_names, version, sim_tag)
logger.info("Generate {} d03 rfield files.".format(source_names))
return run_remote_command(host=rfield_host, key=rfield_key, user=rfield_user,
command=rfield_command_d03)
def gen_kelani_basin_rfields_locally(source_names, version, sim_tag):
"""
Generate kelani basin rfields
:param source_names: e.g.: WRF_A,WRF_C
:param version: e.g.: v4.0
:param sim_tag: e.g.: "evening_18hrs"
:return: True if successful, False otherwise
"""
rfield_command_kelani_basin = "nohup /home/uwcc-admin/curw_rfield_extractor/gen_kelani_basin_rfield.py -m {} -v {} -s {} " \
"2>&1 /home/uwcc-admin/curw_rfield_extractor/rfield.log".format(source_names, version,
sim_tag)
logger.info("Generate {} kelani basin rfield files.".format(source_names))
output = os.system(rfield_command_kelani_basin)
if output is not 0:
return False
return True
def gen_all_d03_rfields_locally(source_names, version, sim_tag):
"""
Generate d03 rfields for SL extent
:param source_names: e.g.: WRF_A,WRF_C
:param version: e.g.: v4.0
:param sim_tag: e.g.: "evening_18hrs"
:return: True if successful, False otherwise
"""
rfield_command_d03 = "nohup /home/uwcc-admin/curw_rfield_extractor/gen_SL_d03_rfield.py -m {} -v {} -s {} 2>&1 " \
"/home/uwcc-admin/curw_rfield_extractor/rfield.log".format(source_names, version, sim_tag)
logger.info("Generate {} d03 rfield files.".format(source_names))
output = os.system(rfield_command_d03)
if output is not 0:
return False
return True
def push_rainfall_to_db(ts, ts_data, tms_id, fgt, wrf_email_content):
"""
:param ts: timeseries class instance
:param ts_data: timeseries
:return:
"""
try:
ts.insert_formatted_data(ts_data, True) # upsert True
ts.update_latest_fgt(id_=tms_id, fgt=fgt)
except Exception:
time.sleep(5)
try:
ts.insert_formatted_data(ts_data, True) # upsert True
ts.update_latest_fgt(id_=tms_id, fgt=fgt)
except Exception:
msg = "Inserting the timseseries for tms_id {} and fgt {} failed.".format(ts_data[0][0], ts_data[0][2])
logger.error(msg)
traceback.print_exc()
wrf_email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
finally:
return wrf_email_content
def read_netcdf_file(pool, rainnc_net_cdf_file_path, tms_meta, wrf_email_content):
"""
:param pool: database connection pool
:param rainnc_net_cdf_file_path:
:param source_id:
:param variable_id:
:param unit_id:
:param tms_meta:
:return:
rainc_unit_info: mm
lat_unit_info: degree_north
time_unit_info: minutes since 2019-04-02T18:00:00
"""
if not os.path.exists(rainnc_net_cdf_file_path):
msg = 'no rainnc netcdf :: {}'.format(rainnc_net_cdf_file_path)
logger.warning(msg)
wrf_email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
return wrf_email_content
else:
try:
"""
RAINNC netcdf data extraction
"""
fgt = get_file_last_modified_time(rainnc_net_cdf_file_path)
nnc_fid = Dataset(rainnc_net_cdf_file_path, mode='r')
time_unit_info = nnc_fid.variables['XTIME'].units
time_unit_info_list = time_unit_info.split(' ')
lats = nnc_fid.variables['XLAT'][0, :, 0]
lons = nnc_fid.variables['XLONG'][0, 0, :]
lon_min = lons[0].item()
lat_min = lats[0].item()
lon_max = lons[-1].item()
lat_max = lats[-1].item()
lat_inds = np.where((lats >= lat_min) & (lats <= lat_max))
lon_inds = np.where((lons >= lon_min) & (lons <= lon_max))
rainnc = nnc_fid.variables['RAINNC'][:, lat_inds[0], lon_inds[0]]
times = nnc_fid.variables['XTIME'][:]
start_date = fgt
end_date = fgt
nnc_fid.close()
diff = get_per_time_slot_values(rainnc)
width = len(lons)
height = len(lats)
ts = Timeseries(pool)
for y in range(height):
for x in range(width):
lat = float('%.6f' % lats[y])
lon = float('%.6f' % lons[x])
tms_meta['latitude'] = str(lat)
tms_meta['longitude'] = str(lon)
station_prefix = 'wrf_{}_{}'.format(lat, lon)
station_id = wrf_v3_stations.get(station_prefix)
if station_id is None:
add_station(pool=pool, name=station_prefix, latitude=lat, longitude=lon,
description="WRF point", station_type=StationEnum.WRF)
station_id = get_station_id(pool=pool, latitude=lat, longitude=lon,
station_type=StationEnum.WRF)
tms_id = ts.get_timeseries_id_if_exists(tms_meta)
if tms_id is None:
tms_id = ts.generate_timeseries_id(tms_meta)
run_meta = {
'tms_id': tms_id,
'sim_tag': tms_meta['sim_tag'],
'start_date': start_date,
'end_date': end_date,
'station_id': station_id,
'source_id': tms_meta['source_id'],
'unit_id': tms_meta['unit_id'],
'variable_id': tms_meta['variable_id']
}
try:
ts.insert_run(run_meta)
except Exception:
logger.error("Exception occurred while inserting run entry {}".format(run_meta))
traceback.print_exc()
data_list = []
# generate timeseries for each station
for i in range(len(diff)):
ts_time = datetime.strptime(time_unit_info_list[2], '%Y-%m-%dT%H:%M:%S') + timedelta(
minutes=times[i + 1].item())
t = datetime_utc_to_lk(ts_time, shift_mins=0)
data_list.append([tms_id, t.strftime('%Y-%m-%d %H:%M:%S'), fgt, float('%.3f' % diff[i, y, x])])
push_rainfall_to_db(ts=ts, ts_data=data_list, tms_id=tms_id, fgt=fgt,
wrf_email_content=wrf_email_content)
except Exception as e:
msg = "netcdf file at {} reading error.".format(rainnc_net_cdf_file_path)
logger.error(msg)
traceback.print_exc()
wrf_email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
finally:
return wrf_email_content
def extract_wrf_data(wrf_system, config_data, tms_meta):
logger.info("-- {} --".format(wrf_system))
wrf_email_content = {}
source_name = "{}_{}".format(config_data['model'], wrf_system)
source_id = None
try:
source_id = get_source_id(pool=pool, model=source_name, version=tms_meta['version'])
except Exception:
try:
time.sleep(5)
source_id = get_source_id(pool=pool, model=source_name, version=tms_meta['version'])
except Exception:
msg = "Exception occurred while loading source meta data for WRF_{} from database.".format(wrf_system)
logger.error(msg)
wrf_email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
return wrf_email_content
if source_id is None:
try:
add_source(pool=pool, model=source_name, version=tms_meta['version'])
source_id = get_source_id(pool=pool, model=source_name, version=tms_meta['version'])
except Exception:
msg = "Exception occurred while addding new source {} {} to database.".format(source_name,
tms_meta['version'])
logger.error(msg)
wrf_email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
return wrf_email_content
tms_meta['model'] = source_name
tms_meta['source_id'] = source_id
for date in config_data['dates']:
# /wrf_nfs/wrf/4.0/18/A/2019-07-30/d03_RAINNC.nc
output_dir = os.path.join(config_data['wrf_dir'], config_data['version'], config_data['gfs_data_hour'],
wrf_system, date)
rainnc_net_cdf_file = 'd03_RAINNC.nc'
rainnc_net_cdf_file_path = os.path.join(output_dir, rainnc_net_cdf_file)
return read_netcdf_file(pool=pool, rainnc_net_cdf_file_path=rainnc_net_cdf_file_path, tms_meta=tms_meta,
wrf_email_content=wrf_email_content)
if __name__ == "__main__":
"""
Config.json
{
"wrf_dir": "/wrf_nfs/wrf",
"gfs_data_hour": "18",
"version": "4.0",
"model": "WRF",
"wrf_systems": "A,C,E,SE",
"run_date": ["2019-04-18","2019-04-17"],
"sim_tag": "gfs_d1_18",
"unit": "mm",
"unit_type": "Accumulative",
"variable": "Precipitation",
"rfield_host": "233.646.456.78",
"rfield_user": "blah",
"rfield_key": "/home/uwcc-admin/.ssh/blah"
}
/wrf_nfs/wrf/4.0/18/A/2019-07-30/d03_RAINNC.nc
tms_meta = {
'sim_tag' : sim_tag,
'latitude' : latitude,
'longitude' : longitude,
'model' : model,
'version' : version,
'variable' : variable,
'unit' : unit,
'unit_type' : unit_type
}
"""
try:
config_name = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:c:",
["help", "config="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--config"):
config_name = arg.strip()
if config_name is None:
msg = "Config file name is not specified."
logger.error(msg)
email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
sys.exit(1)
config = json.loads(open('{}.json'.format(config_name)).read())
# source details
wrf_dir = read_attribute_from_config_file('wrf_dir', config)
model = read_attribute_from_config_file('model', config)
version = read_attribute_from_config_file('version', config)
gfs_data_hour = read_attribute_from_config_file('gfs_data_hour', config)
wrf_systems = read_attribute_from_config_file('wrf_systems', config)
wrf_systems_list = wrf_systems.split(',')
# sim_tag
sim_tag = read_attribute_from_config_file('sim_tag', config)
# unit details
unit = read_attribute_from_config_file('unit', config)
unit_type = UnitType.getType(read_attribute_from_config_file('unit_type', config))
# variable details
variable = read_attribute_from_config_file('variable', config)
# rfield params
# rfield_host = read_attribute_from_config_file('rfield_host', config)
# rfield_user = read_attribute_from_config_file('rfield_user', config)
# rfield_key = read_attribute_from_config_file('rfield_key', config)
dates = []
if 'run_date' in config and (config['run_date'] != ""):
dates = config['run_date']
else:
dates.append((datetime.now() + timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d'))
pool = get_Pool(host=CURW_FCST_HOST, port=CURW_FCST_PORT, user=CURW_FCST_USERNAME, password=CURW_FCST_PASSWORD,
db=CURW_FCST_DATABASE)
try:
wrf_v3_stations = get_wrf_stations(pool)
variable_id = get_variable_id(pool=pool, variable=variable)
unit_id = get_unit_id(pool=pool, unit=unit, unit_type=unit_type)
except Exception:
msg = "Exception occurred while loading common metadata from database."
logger.error(msg)
email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
sys.exit(1)
tms_meta = {
'sim_tag': sim_tag,
'version': version,
'variable': variable,
'unit': unit,
'unit_type': unit_type.value,
'variable_id': variable_id,
'unit_id': unit_id
}
config_data = {
'model': model,
'version': version,
'dates': dates,
'wrf_dir': wrf_dir,
'gfs_data_hour': gfs_data_hour
}
mp_pool = mp.Pool(mp.cpu_count())
# wrf_results = mp_pool.starmap_async(extract_wrf_data,
# [(wrf_system, config_data, tms_meta) for wrf_system in wrf_systems_list]).get()
wrf_results = mp_pool.starmap(extract_wrf_data,
[(wrf_system, config_data, tms_meta) for wrf_system in
wrf_systems_list])
source_list = ""
for wrf_system in wrf_systems_list:
source_list += "WRF_{},".format(wrf_system)
source_list = source_list[:-1]
# kelani_basin_rfield_status = gen_kelani_basin_rfields(source_names=source_list, version=version, sim_tag=sim_tag,
# rfield_host=rfield_host, rfield_key=rfield_key, rfield_user=rfield_user)
kelani_basin_rfield_status = gen_kelani_basin_rfields_locally(source_names=source_list, version=version,
sim_tag=sim_tag)
if not kelani_basin_rfield_status:
email_content[datetime.now().strftime(
COMMON_DATE_TIME_FORMAT)] = "Kelani basin rfiled generation for {} failed".format(source_list)
# d03_rfield_status = gen_all_d03_rfields(source_names=source_list, version=version, sim_tag=sim_tag,
# rfield_host=rfield_host, rfield_key=rfield_key, rfield_user=rfield_user)
d03_rfield_status = gen_all_d03_rfields_locally(source_names=source_list, version=version, sim_tag=sim_tag)
if not d03_rfield_status:
email_content[
datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = "SL d03 rfiled generation for {} failed".format(
source_list)
except Exception as e:
msg = 'Multiprocessing error.'
logger.error(msg)
email_content[datetime.now().strftime(COMMON_DATE_TIME_FORMAT)] = msg
traceback.print_exc()
finally:
mp_pool.close()
destroy_Pool(pool)
logger.info("Process finished.")
logger.info("Email Content {}".format(json.dumps(email_content)))
logger.info("############ wrf extraction results ########## ")
for i in range(len(wrf_results)):
logger.info(wrf_results[i])
| UTF-8 | Python | false | false | 20,263 | py | 16 | wrf_data_pusher_v1.py | 13 | 0.557913 | 0.547056 | 0 | 555 | 35.508108 | 128 |
vijaykumarnegi/advanced-netops | 12,704,513,284,415 | 20083054028d2907cc188ef29d0ce3e0097d9d2b | 5a0fb10f4aabfb40a927929adf9035e48eb7346e | /Config Builder/bgp_underlay_trainer.py | 0965fbf962af507bc897dc80070aa05747222905 | []
| no_license | https://github.com/vijaykumarnegi/advanced-netops | 45fee9dfd736a56d105b50a28acf1f6df703e484 | 68d1408e447f0bb91b64cf69fa75062d87215c47 | refs/heads/main | 2023-06-17T10:52:40.589585 | 2021-07-15T13:18:40 | 2021-07-15T13:18:40 | 385,142,362 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import yaml
from cvplibrary import CVPGlobalVariables, GlobalVariableNames
labels = CVPGlobalVariables.getValue(GlobalVariableNames.CVP_ALL_LABELS)
hostname = [x for x in labels if 'hostname' in x][0]
hostname = hostname.split(':', 1)
hostname = hostname[1]
underlay_yaml = """
global:
DC1:
spine_ASN: 65100
spine_lo0:
- 192.168.101.101
- 192.168.101.102
- 192.168.101.103
DC2:
spine_ASN: 65200
spine_lo0:
- 192.168.201.101
- 192.168.201.102
- 192.168.201.103
MTU: 9214
spine1-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.101
mask: 32
Ethernet2:
ipv4: 192.168.103.1
mask: 31
Ethernet3:
ipv4: 192.168.103.7
mask: 31
Ethernet4:
ipv4: 192.168.103.13
mask: 31
Ethernet5:
ipv4: 192.168.103.19
mask: 31
Ethernet6:
ipv4: 192.168.103.25
mask: 31
Ethernet7:
ipv4: 192.168.103.31
mask: 31
BGP:
ASN: 65100
spine2-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.102
mask: 32
Ethernet2:
ipv4: 192.168.103.3
mask: 31
Ethernet3:
ipv4: 192.168.103.9
mask: 31
Ethernet4:
ipv4: 192.168.103.15
mask: 31
Ethernet5:
ipv4: 192.168.103.21
mask: 31
Ethernet6:
ipv4: 192.168.103.27
mask: 31
Ethernet7:
ipv4: 192.168.103.33
mask: 31
BGP:
ASN: 65100
spine3-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.103
mask: 32
Ethernet2:
ipv4: 192.168.103.5
mask: 31
Ethernet3:
ipv4: 192.168.103.11
mask: 31
Ethernet4:
ipv4: 192.168.103.17
mask: 31
Ethernet5:
ipv4: 192.168.103.23
mask: 31
Ethernet6:
ipv4: 192.168.103.29
mask: 31
Ethernet7:
ipv4: 192.168.103.35
mask: 31
BGP:
ASN: 65100
leaf1-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.11
mask: 32
loopback1:
ipv4: 192.168.102.11
mask: 32
Ethernet3:
ipv4: 192.168.103.0
mask: 31
Ethernet4:
ipv4: 192.168.103.2
mask: 31
Ethernet5:
ipv4: 192.168.103.4
mask: 31
BGP:
ASN: 65101
spine-peers:
- 192.168.103.1
- 192.168.103.3
- 192.168.103.5
spine-ASN: 65100
MLAG: Odd
leaf2-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.12
mask: 32
loopback1:
ipv4: 192.168.102.11
mask: 32
Ethernet3:
ipv4: 192.168.103.6
mask: 31
Ethernet4:
ipv4: 192.168.103.8
mask: 31
Ethernet5:
ipv4: 192.168.103.10
mask: 31
BGP:
ASN: 65101
spine-peers:
- 192.168.103.7
- 192.168.103.9
- 192.168.103.11
spine-ASN: 65100
MLAG: Even
leaf3-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.13
mask: 32
loopback1:
ipv4: 192.168.102.13
mask: 32
Ethernet3:
ipv4: 192.168.103.12
mask: 31
Ethernet4:
ipv4: 192.168.103.14
mask: 31
Ethernet5:
ipv4: 192.168.103.16
mask: 31
BGP:
ASN: 65102
spine-peers:
- 192.168.103.13
- 192.168.103.15
- 192.168.103.17
spine-ASN: 65100
MLAG: Even
leaf4-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.14
mask: 32
loopback1:
ipv4: 192.168.102.13
mask: 32
Ethernet3:
ipv4: 192.168.103.18
mask: 31
Ethernet4:
ipv4: 192.168.103.20
mask: 31
Ethernet5:
ipv4: 192.168.103.22
mask: 31
BGP:
ASN: 65102
spine-peers:
- 192.168.103.19
- 192.168.103.21
- 192.168.103.23
spine-ASN: 65100
MLAG: Odd
borderleaf1-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.21
mask: 32
loopback1:
ipv4: 192.168.102.21
mask: 32
Ethernet3:
ipv4: 192.168.103.24
mask: 31
Ethernet4:
ipv4: 192.168.103.26
mask: 31
Ethernet5:
ipv4: 192.168.103.28
mask: 31
Ethernet12:
ipv4: 192.168.254.0
mask: 31
BGP:
ASN: 65103
spine-peers:
- 192.168.103.25
- 192.168.103.27
- 192.168.103.29
spine-ASN: 65100
DCI-peers:
- 192.168.254.1
MLAG: Odd
borderleaf2-DC1:
interfaces:
loopback0:
ipv4: 192.168.101.21
mask: 32
loopback1:
ipv4: 192.168.102.21
mask: 32
Ethernet3:
ipv4: 192.168.103.30
mask: 31
Ethernet4:
ipv4: 192.168.103.32
mask: 31
Ethernet5:
ipv4: 192.168.103.34
mask: 31
Ethernet12:
ipv4: 192.168.254.2
mask: 31
BGP:
ASN: 65103
spine-peers:
- 192.168.103.31
- 192.168.103.33
- 192.168.103.35
spine-ASN: 65100
DCI-peers:
- 192.168.254.3
MLAG: Even
spine1-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.101
mask: 32
Ethernet2:
ipv4: 192.168.203.1
mask: 31
Ethernet3:
ipv4: 192.168.203.7
mask: 31
Ethernet4:
ipv4: 192.168.203.13
mask: 31
Ethernet5:
ipv4: 192.168.203.19
mask: 31
Ethernet6:
ipv4: 192.168.203.25
mask: 31
Ethernet7:
ipv4: 192.168.203.31
mask: 31
BGP:
ASN: 65200
spine2-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.102
mask: 32
Ethernet2:
ipv4: 192.168.203.3
mask: 31
Ethernet3:
ipv4: 192.168.203.9
mask: 31
Ethernet4:
ipv4: 192.168.203.15
mask: 31
Ethernet5:
ipv4: 192.168.203.21
mask: 31
Ethernet6:
ipv4: 192.168.203.27
mask: 31
Ethernet7:
ipv4: 192.168.203.33
mask: 31
BGP:
ASN: 65200
spine3-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.103
mask: 32
Ethernet2:
ipv4: 192.168.203.5
mask: 31
Ethernet3:
ipv4: 192.168.203.11
mask: 31
Ethernet4:
ipv4: 192.168.203.17
mask: 31
Ethernet5:
ipv4: 192.168.203.23
mask: 31
Ethernet6:
ipv4: 192.168.203.29
mask: 31
Ethernet7:
ipv4: 192.168.203.35
mask: 31
BGP:
ASN: 65200
leaf1-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.11
mask: 32
loopback1:
ipv4: 192.168.202.11
mask: 32
Ethernet3:
ipv4: 192.168.203.0
mask: 31
Ethernet4:
ipv4: 192.168.203.2
mask: 31
Ethernet5:
ipv4: 192.168.203.4
mask: 31
BGP:
ASN: 65201
spine-peers:
- 192.168.203.1
- 192.168.203.3
- 192.168.203.5
spine-ASN: 65200
MLAG: Odd
leaf2-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.12
mask: 32
loopback1:
ipv4: 192.168.202.11
mask: 32
Ethernet3:
ipv4: 192.168.203.6
mask: 31
Ethernet4:
ipv4: 192.168.203.8
mask: 31
Ethernet5:
ipv4: 192.168.203.10
mask: 31
BGP:
ASN: 65201
spine-peers:
- 192.168.203.7
- 192.168.203.9
- 192.168.203.11
spine-ASN: 65200
MLAG: Even
leaf3-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.13
mask: 32
loopback1:
ipv4: 192.168.202.13
mask: 32
Ethernet3:
ipv4: 192.168.203.12
mask: 31
Ethernet4:
ipv4: 192.168.203.14
mask: 31
Ethernet5:
ipv4: 192.168.203.16
mask: 31
BGP:
ASN: 65202
spine-peers:
- 192.168.203.13
- 192.168.203.15
- 192.168.203.17
spine-ASN: 65200
MLAG: Odd
leaf4-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.14
mask: 32
loopback1:
ipv4: 192.168.202.13
mask: 32
Ethernet3:
ipv4: 192.168.203.18
mask: 31
Ethernet4:
ipv4: 192.168.203.20
mask: 31
Ethernet5:
ipv4: 192.168.203.22
mask: 31
BGP:
ASN: 65202
spine-peers:
- 192.168.203.19
- 192.168.203.21
- 192.168.203.23
spine-ASN: 65200
MLAG: Even
borderleaf1-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.21
mask: 32
loopback1:
ipv4: 192.168.202.21
mask: 32
Ethernet3:
ipv4: 192.168.203.24
mask: 31
Ethernet4:
ipv4: 192.168.203.26
mask: 31
Ethernet5:
ipv4: 192.168.203.28
mask: 31
Ethernet12:
ipv4: 192.168.254.4
mask: 31
BGP:
ASN: 65203
spine-peers:
- 192.168.203.25
- 192.168.203.27
- 192.168.203.29
spine-ASN: 65200
DCI-peers:
- 192.168.254.5
MLAG: Odd
borderleaf2-DC2:
interfaces:
loopback0:
ipv4: 192.168.201.21
mask: 32
loopback1:
ipv4: 192.168.202.21
mask: 32
Ethernet3:
ipv4: 192.168.203.30
mask: 31
Ethernet4:
ipv4: 192.168.203.32
mask: 31
Ethernet5:
ipv4: 192.168.203.34
mask: 31
Ethernet12:
ipv4: 192.168.254.6
mask: 31
BGP:
ASN: 65203
spine-peers:
- 192.168.203.31
- 192.168.203.33
- 192.168.203.35
spine-ASN: 65200
DCI-peers:
- 192.168.254.7
MLAG: Even
"""
route_maps = """
ip prefix-list LOOPBACK
seq 10 permit 192.168.101.0/24 eq 32
seq 20 permit 192.168.102.0/24 eq 32
seq 30 permit 192.168.201.0/24 eq 32
seq 40 permit 192.168.202.0/24 eq 32
seq 50 permit 192.168.253.0/24 eq 32
route-map LOOPBACK permit 10
match ip address prefix-list LOOPBACK
"""
bgp_peer_filter = """
peer-filter LEAF-AS-RANGE
10 match as-range 65000-65535 result accept
"""
bgp_vars_config = """
no bgp default ipv4-unicast
maximum-paths 3
distance bgp 20 200 200
"""
underlay_dict = yaml.load(underlay_yaml)
MTU = underlay_dict['global']['MTU']
def generate_interface_config(hostname):
for interface in underlay_dict[hostname]['interfaces']:
print("interface %s") % (interface)
print(" ip address %s/%s") % (underlay_dict[hostname]['interfaces'][interface]['ipv4'], underlay_dict[hostname]['interfaces'][interface]['mask'])
if 'thernet' in interface:
print(" mtu %s") % MTU
print(" no switchport")
def gen_bgp_config_leaf(hostname):
ASN = underlay_dict[hostname]['BGP']['ASN']
router_id = underlay_dict[hostname]['interfaces']['loopback0']['ipv4']
spine_peers = underlay_dict[hostname]['BGP']['spine-peers']
MLAG = underlay_dict[hostname]['MLAG']
spine_ASN = underlay_dict[hostname]['BGP']['spine-ASN']
print("")
print(route_maps)
print("")
print("router bgp %s") % ASN
print(" router-id %s") % router_id
print(" no bgp default ipv4-unicast")
print(" distance bgp 20 200 200")
print(" maximum-paths 3")
print(" neighbor LEAF_Peer peer group")
print(" neighbor LEAF_Peer remote-as %s") % ASN
print(" neighbor LEAF_Peer next-hop-self")
print(" neighbor LEAF_Peer maximum-routes 12000")
print(" neighbor SPINE_Underlay peer group")
print(" neighbor SPINE_Underlay remote-as %s") % spine_ASN
print(" neighbor SPINE_Underlay send-community")
print(" neighbor SPINE_Underlay maximum-routes 12000")
print(" neighbor EVPN peer group")
print(" neighbor EVPN remote-as %s") % spine_ASN
print(" neighbor EVPN send-community")
print(" neighbor EVPN source-interface loopback0")
print(" neighbor EVPN maximum-routes 0")
for peer in spine_peers:
print(" neighbor %s peer group SPINE_Underlay") % peer
if MLAG == "Odd":
print(" neighbor 192.168.255.2 peer group LEAF_Peer")
if MLAG == "Even":
print(" neighbor 192.168.255.1 peer group LEAF_Peer")
for switch in underlay_dict:
if "spine" in switch:
if "DC1" in hostname:
if "DC1" in switch:
lo0_peer = underlay_dict[switch]['interfaces']['loopback0']['ipv4']
print(" neighbor %s peer group EVPN") % lo0_peer
if "DC2" in hostname:
if "DC2" in switch:
lo0_peer = underlay_dict[switch]['interfaces']['loopback0']['ipv4']
print(" neighbor %s peer group EVPN") % lo0_peer
print(" address-family ipv4")
print(" neighbor SPINE_Underlay activate")
print(" neighbor LEAF_Peer activate")
print(" redistribute connected route-map LOOPBACK")
print(" address-family evpn")
print(" neighbor EVPN activate")
print(" redistribute connected")
def gen_bgp_config_spine(hostname):
print(route_maps)
print(bgp_peer_filter)
print("router bgp %s") % underlay_dict[hostname]['BGP']['ASN']
print(" router-id %s") % underlay_dict[hostname]['interfaces']['loopback0']['ipv4']
print(bgp_vars_config)
print(" bgp listen range 192.168.103.0/24 peer-group LEAF_Underlay peer-filter LEAF-AS-RANGE")
print(" bgp listen range 192.168.203.0/24 peer-group LEAF_Underlay peer-filter LEAF-AS-RANGE")
print(" bgp listen range 192.168.101.0/24 peer-group EVPN peer-filter LEAF-AS-RANGE")
print(" bgp listen range 192.168.201.0/24 peer-group EVPN peer-filter LEAF-AS-RANGE")
print(" neighbor LEAF_Underlay peer group")
print(" neighbor LEAF_Underlay send-community")
print(" neighbor LEAF_Underlay maximum-routes 12000")
print(" neighbor EVPN peer group")
print(" neighbor EVPN send-community")
print(" neighbor EVPN maximum-routes 0")
print(" redistribute connected route-map LOOPBACK")
print(" address-family ipv4")
print(" neighbor LEAF_Underlay activate")
print(" redistribute connected route-map LOOPBACK")
print(" address-family evpn")
print(" neighbor EVPN activate")
print(" redistribute connected")
if 'spine' in hostname or 'leaf' in hostname:
print("service routing protocols model multi-agent")
print("")
generate_interface_config(hostname)
if 'leaf' in hostname:
gen_bgp_config_leaf(hostname)
if 'spine' in hostname:
gen_bgp_config_spine(hostname)
| UTF-8 | Python | false | false | 13,811 | py | 6 | bgp_underlay_trainer.py | 3 | 0.582289 | 0.401274 | 0 | 638 | 20.647335 | 150 |
NKrvavica/Advent-of-Code-2020 | 13,950,053,792,742 | 1c5804c24b30b56cc463acecc76a5e8b1c5d7454 | ee68b7bc8e7b72b5a61a73fd8795f5783f0d4f62 | /Day08/day08.py | 72e16bf13afc2b68140c141cc552510f5ed62b3c | []
| no_license | https://github.com/NKrvavica/Advent-of-Code-2020 | e6faa4f7526be272daf01405bbf3aa35909e4ad2 | 1aa8c7aa4840f06490814e5418fc44b85140eac8 | refs/heads/main | 2023-02-04T21:11:43.130227 | 2020-12-26T15:51:53 | 2020-12-26T15:51:53 | 317,564,094 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 8 09:19:05 2020
@author: Nino
"""
import pandas as pd
# load data
program = pd.read_csv('input.txt', header=None, sep=' ',
names=['op', 'arg'])
program_len = len(program)
# run the program
def run_program(program, program_len):
idx, accumulator = 0, 0
program['visited']= False
while True:
op, arg, visited = program.loc[idx]
if visited:
return program, accumulator, idx
else:
program.loc[idx, 'visited'] = True
if op == 'nop':
idx += 1
elif op == 'acc':
accumulator += arg
idx += 1
else: # op == 'jmp'
idx += arg
if idx >= program_len: # this is needed for part 2
print('Warning! Attempting to run the instruction below the last',
'instruction in the file')
return program, accumulator, idx
# part 1
program, accumulator, idx = run_program(program, program_len)
print(f'Solution to part 1: {accumulator} \n')
# part 2
def attempt_brute_forces_changes(program):
program_len = len(program)
for i, row in program.iterrows():
op, arg, visited = row
if op == 'nop':
program_copy = program.copy()
program_copy.loc[i, 'op'] = 'jmp'
print(f'attempt changing line {i}, from nop to jmp')
elif op == 'jmp':
program_copy = program.copy()
program_copy.loc[i, 'op'] = 'nop'
print(f'attempt changing line {i}, from jmp to nop,')
else:
continue
program_copy, accumulator, idx = run_program(program_copy, program_len)
# print(f'program terminated at line {idx}')
if idx >= program_len:
return accumulator
accumulator = attempt_brute_forces_changes(program)
print(f'Solution to part 2: {accumulator}')
| UTF-8 | Python | false | false | 1,926 | py | 33 | day08.py | 31 | 0.555036 | 0.544133 | 0 | 72 | 25.75 | 79 |
adehad/klustaviewa | 18,073,222,399,174 | ab08d0e9a10d34bee47cba595a325885aeebb220 | 158a5effa9247ec13845c2da9c734c9f22fc01b8 | /postinstall.py | cb20d6befb57872f728c3d6dbdb7e31fd34a89e5 | [
"BSD-3-Clause"
]
| permissive | https://github.com/adehad/klustaviewa | 5b334de15664c463b8754e0dcf22dbaf4ed13d1f | d1cf3ddb341f5cc6273771a20f40e1f4cc9b31d1 | refs/heads/master | 2021-01-01T16:31:54.856613 | 2017-03-27T13:00:49 | 2017-03-27T13:00:49 | 97,851,396 | 0 | 0 | null | true | 2017-07-20T15:30:08 | 2017-07-20T15:30:08 | 2017-02-04T02:50:34 | 2017-03-27T13:00:50 | 3,406 | 0 | 0 | 0 | null | null | null | import os
import sys
import shutil
import klustaviewa
DESKTOP_FOLDER = get_special_folder_path("CSIDL_DESKTOPDIRECTORY")
STARTMENU_FOLDER = get_special_folder_path("CSIDL_STARTMENU")
NAME = 'KlustaViewa.lnk'
if sys.argv[1] == '-install':
create_shortcut(
os.path.join(sys.prefix, 'pythonw.exe'), # program
'KlustaViewa: graphical user interface for semi-automatic spike sorting',
NAME, # filename
os.path.join(os.path.dirname(klustaviewa.__file__), 'scripts/runklustaviewa.py'),
'', # workdir
# to create ICO from PNG: http://www.icoconverter.com/
os.path.join(os.path.dirname(klustaviewa.__file__), 'icons/favicon.ico'), # iconpath
)
# move shortcut from current directory to folders
shutil.copyfile(os.path.join(os.getcwd(), NAME),
os.path.join(DESKTOP_FOLDER, NAME))
shutil.move(os.path.join(os.getcwd(), NAME),
os.path.join(STARTMENU_FOLDER, NAME))
# tell windows installer that we created another
# file which should be deleted on uninstallation
file_created(os.path.join(DESKTOP_FOLDER, NAME))
file_created(os.path.join(STARTMENU_FOLDER, NAME))
# This will be run on uninstallation. Nothing to do.
if sys.argv[1] == '-remove':
pass
| UTF-8 | Python | false | false | 1,284 | py | 103 | postinstall.py | 75 | 0.669003 | 0.667445 | 0 | 33 | 37.606061 | 92 |
hgq521/autest | 5,239,860,103,137 | bb200692a15c49680096f0f8d6785d84be3d0108 | c216445777dc269418550382d455fef9137083b1 | /mytime/mytime.py | f3f10014187775e8d74cc20e495708f687d2f15a | []
| no_license | https://github.com/hgq521/autest | eb801ea1adc2a7573ecddd1a5b457ff50a2bf86f | 15fe62c9bd99b36cd68a736290b08804d13c8e64 | refs/heads/master | 2020-08-02T17:59:13.632616 | 2019-11-16T10:40:27 | 2019-11-16T10:40:27 | 211,456,341 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
def day_hours():
return 24*60*60
def hour_sec():
return 3600
def today_hour(hour): #24小时制
sec = time.time()
return (sec - sec % day_hours() + hour * hour_sec() - 8 * hour_sec())
def hour(sec):
tmp = time.localtime(sec)
#return sec - tmp.tm_min * 60 - tmp.tm_sec
return sec - sec % hour_sec()
def wday(sec):
tt = time.localtime(sec)
return (tt.tm_wday+1)
if __name__ == '__main__':
sec = today_hour(5)
sec = time.time()
print(sec)
sec = hour(sec)
print(time.localtime(sec))
print(wday(sec))
| UTF-8 | Python | false | false | 531 | py | 21 | mytime.py | 20 | 0.622857 | 0.590476 | 0 | 28 | 17.714286 | 70 |
DipendraDLS/Python_OOP | 5,059,471,494,830 | de6366a9695f9f980158d8d577482dd923683c32 | 3b50c16eeb4b4cca7bc75cba6d36a21e4e288a07 | /01. Class_and_Object/02. Simple_Class_&_Object.py | 7408be6fcf9ddbf855c902b59fd85476b5152c0c | []
| no_license | https://github.com/DipendraDLS/Python_OOP | 1349dbb1f87eb16c18059c774b9e7b34b04edc29 | b253801270530cdda8c70f1ae445ebc1cc52694e | refs/heads/master | 2023-06-24T05:29:32.066673 | 2021-07-17T06:26:59 | 2021-07-17T06:26:59 | 380,717,255 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Creating the Class named 'Mobile'
class Mobile:
# Initializing the member variable. This is self calling constructor whenever the object is created this constructor is self called.
def __init__(self):
self.model = 'Redmi Note 5 Pro'
# Defining the member function/method
def show_model(self):
print('Model Name: ', self.model)
obj_1 = Mobile() # Creating the object named as obj_1 for class Mobile.
obj_1.show_model() # Accessing the method using the object which is inside the 'Mobile' class.
obj_1.model # Accessing the member variable using object. | UTF-8 | Python | false | false | 661 | py | 67 | 02. Simple_Class_&_Object.py | 66 | 0.636914 | 0.629349 | 0 | 16 | 40.3125 | 136 |
jrsk23/BridgeIT | 6,837,587,948,557 | edc26c135ad820522807d5e5063cf632e7c7ad92 | 6fa180d9e0c2073dec7604e07c97332b4c4de7f9 | /backend/geotabs/migrations/0001_initial.py | ed497c89c1be39b6ecf1ffaa3f07e4eb86c83e84 | []
| no_license | https://github.com/jrsk23/BridgeIT | 1347c355953668d899ec267e7fe1c72c666395ec | 021bd482690a8de89b2cfbcd91a9bd393ef67124 | refs/heads/main | 2023-03-15T04:41:18.802417 | 2021-01-12T03:35:56 | 2021-01-12T03:35:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0 on 2021-01-09 05:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hazardous',
fields=[
('id', models.TextField(primary_key=True, serialize=False)),
('lat', models.DecimalField(decimal_places=4, max_digits=10)),
('lon', models.DecimalField(decimal_places=4, max_digits=10)),
('city', models.CharField(max_length=50)),
('severity_score', models.DecimalField(decimal_places=4, max_digits=10)),
('incidents_total', models.IntegerField()),
],
options={
'ordering': ('-incidents_total',),
},
),
]
| UTF-8 | Python | false | false | 844 | py | 73 | 0001_initial.py | 70 | 0.541469 | 0.511848 | 0 | 28 | 29.142857 | 89 |
AlexHerry/studyPython | 7,258,494,731,119 | 7e8c306e607326e1d933c17154166f1f620b6072 | 97659ab2a9a6aed6f6393d3578c1f457dc99cd2d | /equ.py | 0c7899dfe8699753ca679883bf50ff642ba56312 | []
| no_license | https://github.com/AlexHerry/studyPython | 4a58fb616821ed2fc10718d43b1b703b59dd071b | 0121e1876d2e68b55af043d67aa4a78362f128a4 | refs/heads/master | 2020-07-05T12:47:07.847008 | 2019-08-16T03:22:55 | 2019-08-16T03:22:55 | 202,651,856 | 0 | 0 | null | false | 2019-11-01T02:42:35 | 2019-08-16T03:27:40 | 2019-08-16T03:39:24 | 2019-11-01T02:42:26 | 16 | 0 | 0 | 1 | Python | false | false | import math
def quadratic(a, b, c):
x = b ** 2 - (4 * a * c)
if x < 0:
return '不存在实数根'
else:
x1 = (-b + math.sqrt(x)) / (2 * a)
x2 = (-b - math.sqrt(x)) / (2 * a)
return x1, x2
print('quadratic(2, 3, 1) =', quadratic(2, 3, 1))
print('quadratic(1, 3, -4) =', quadratic(1, 3, -4))
if quadratic(2, 3, 1) != (-0.5, -1.0):
print('测试失败')
elif quadratic(1, 3, -4) != (1.0, -4.0):
print('测试失败')
else:
print('测试成功') | UTF-8 | Python | false | false | 502 | py | 15 | equ.py | 15 | 0.463519 | 0.388412 | 0 | 21 | 21.238095 | 51 |
TaeHun-Lee/Summareader_web | 9,809,705,343,475 | 3bec961e6d85e2e918d5ab7b981b191fbec7f3f7 | 28b82f9b6914a1b9de044764972498b141b0c718 | /board/summareader.py | ced6e16b48b85b8f53b702376bd6fc8e4e628fe1 | []
| no_license | https://github.com/TaeHun-Lee/Summareader_web | e3a1bb948e7ebf51e133a73ecddae5d77f318f47 | d9f356c03056632cd18264d4046b370747b66d45 | refs/heads/master | 2023-01-07T05:56:45.591961 | 2019-11-12T10:39:07 | 2019-11-12T10:39:07 | 221,191,343 | 0 | 0 | null | false | 2023-01-04T13:07:16 | 2019-11-12T10:29:51 | 2019-11-12T10:39:20 | 2023-01-04T13:07:16 | 7,276 | 0 | 0 | 24 | Python | false | false | # -*- coding: utf-8 -*-
from re import split
from networkx import Graph
from networkx import pagerank
from itertools import combinations
import urllib.request as urlopen
import ssl
import bs4
import re
from textrankr import TextRank
from collections import Counter
from konlpy.tag import Okt
from newspaper import Article
class Sentence(object):
okt = Okt()
def __init__(self, text, index=0):
self.index = index
self.text = text.strip()
self.tokens = self.okt.phrases(self.text)
self.bow = Counter(self.tokens)
def __str__(self):
return self.text
def __hash__(self):
return self.index
class TextRank(object):
def __init__(self, text):
self.text = text.strip()
self.build()
self.stopwords = []
def build(self):
self._build_sentences()
self._build_graph()
self.pageranks = pagerank(self.graph, weight='weight')
self.reordered = sorted(self.pageranks, key=self.pageranks.get, reverse=True)
def _build_sentences(self):
dup = {}
candidates = split(r'(?:(?<=[^0-9])\.|\n)', self.text)
self.sentences = []
index = 0
for candidate in candidates:
while len(candidate) and (candidate[-1] == '.' or candidate[-1] == ' '):
candidate = candidate.strip(' ').strip('.')
if len(candidate) and candidate not in dup:
dup[candidate] = True
self.sentences.append(Sentence(candidate + '.', index))
index += 1
del dup
del candidates
def _build_graph(self):
self.graph = Graph()
self.graph.add_nodes_from(self.sentences)
for sent1, sent2 in combinations(self.sentences, 2):
weight = self._jaccard(sent1, sent2)
if weight:
self.graph.add_edge(sent1, sent2, weight=weight)
def _jaccard(self, sent1, sent2):
p = sum((sent1.bow & sent2.bow).values())
q = sum((sent1.bow | sent2.bow).values())
return p / q if q else 0
def summarize(self, count=6 , verbose=True):
results = sorted(self.reordered[:count], key=lambda sentence: sentence.index)
results = [result.text for result in results]
if verbose:
return '\n'.join(results)
else:
return results
def summareader():
url="https://news.naver.com"
context=ssl._create_unverified_context()
response=urlopen.urlopen(url, context=context)
objBS= bs4.BeautifulSoup(response, "html.parser")
news_item=objBS.find_all("ul",{"class":"section_list_ranking"})
naverurl=[]
newstitle=[]
article=[]
for nws in news_item:
txt=nws.find_all("a")
for we in txt:
k = "https://news.naver.com"+we.get('href')
title=we.text.strip()
naverurl.append(k)
newstitle.append(title)
for i in range(len(naverurl)):
news = Article(naverurl[i], language='ko')
news.download()
news.parse()
textrank = TextRank(news.text)
suma=textrank.summarize(3)
article.append(suma)
# print(article[i],"\n-----------------------------------------------------------------------------------\n")
# print(newstitle[i], "\n-----------------------------------------------------------------------------------\n")
return newstitle, article
# test = summareader()
# print(test[0][0])
# print(test[1][0]) | UTF-8 | Python | false | false | 3,497 | py | 7 | summareader.py | 5 | 0.557907 | 0.549328 | 0 | 122 | 27.672131 | 120 |
mtonjes/usercode | 335,007,471,917 | eefe5bcc6735a518d3ca285aefd32781da169842 | d3d5e6cf3899b1d81df4f9f40770cbf1cbe74806 | /ZDC2015src/crab/crabConfig.py | d48bdbca9d09d7c1ce6d26a68a04cb9a4ad60fd4 | []
| no_license | https://github.com/mtonjes/usercode | 1b17653c99733e5c56f1b5bdebb6d2749cbbc4af | 860d284cb3aed1aea5360d125b0ff8adde20fd00 | refs/heads/master | 2021-01-25T08:37:14.880819 | 2016-11-23T18:20:01 | 2016-11-23T18:20:01 | 13,154,805 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from CRABClient.UserUtilities import config
config = config()
#config.General.requestName = 'HIRun2015-PromptReco-v1_YYYYMMDD'
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
# Name of the CMSSW configuration file
config.JobType.psetName = 'RunForwardAnalyzer_PbPb2015_all.py'
config.Data.inputDataset = '/HIExpressPhysics/HIRun2015-Express-v1/FEVT'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 1000
config.Data.publication = False
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'HIExpress_HIRun2015-v1_HIL1MinimumBiasHFstarTrig'
#config.outputFiles = 'ForwardAnalyzerRun262694_ZeroBiasPromptReco_HF.root' #not necessary
# These values only make sense for processing data
# Select input data based on a lumi mask
#config.Data.lumiMask = 'Cert_190456-208686_8TeV_PromptReco_Collisions12_JSON.txt'
# Select input data based on run-ranges
config.Data.runRange = '262811'
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_US_MIT' | UTF-8 | Python | false | false | 1,051 | py | 169 | crabConfig.py | 40 | 0.80019 | 0.750714 | 0 | 26 | 39.461538 | 91 |
pooyahrtn/django-qarzi | 14,766,097,583,426 | 0ab6c111db39aeddf6776f41e9fe203eaf912266 | ea51ccb739b8ababca5f266b7fc1d84d2547f342 | /api/feeds/views.py | 14a87f5e197b5e819205c47b57fa9b668c9cce66 | []
| no_license | https://github.com/pooyahrtn/django-qarzi | c43f44724cb86014a72ba6ec6c0ddf6aeee5a5c3 | f8fc3ec748c082a14c00a2606aeff4565b336bc1 | refs/heads/master | 2022-12-15T06:49:49.707226 | 2020-03-15T20:47:49 | 2020-03-15T20:47:49 | 196,826,021 | 0 | 0 | null | false | 2022-12-08T05:55:47 | 2019-07-14T11:02:46 | 2020-03-15T20:48:06 | 2022-12-08T05:55:47 | 31,784 | 0 | 0 | 10 | Python | false | false | from rest_framework import mixins, viewsets, generics
from rest_framework.permissions import IsAuthenticatedOrReadOnly, AllowAny, IsAuthenticated
from . import permissions
from . import models
from . import serializers
from utils.CursorPagination import CreatedTimeCursorPagination
class BaseFeedViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin):
pagination_class = CreatedTimeCursorPagination
def perform_create(self, serializer):
serializer.validated_data['user'] = self.request.user
serializer.save()
def filter_queryset(self, queryset):
return queryset.filter(checked=True, user__blocked=False, user__is_active=True)
class LendFeedsViewSet(BaseFeedViewSet):
queryset = models.LendFeed.objects.all()
serializer_class = serializers.LendSerializer
permission_classes = (AllowAny,)
# def filter_queryset(self, queryset):
class BorrowFeedsViewSet(BaseFeedViewSet):
queryset = models.BorrowFeed.objects.all()
serializer_class = serializers.BorrowSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
# def filter_queryset(self, queryset):
# return queryset.filter(checked=True)
class MyFeedsViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
):
queryset = models.BaseFeed.objects.all()
serializer_class = serializers.CombinedSerializer
permission_classes = (IsAuthenticated,)
pagination_class = CreatedTimeCursorPagination
def filter_queryset(self, queryset):
return queryset.filter(user=self.request.user)
class ReportViewSet(viewsets.GenericViewSet,
mixins.CreateModelMixin):
queryset = models.ReportFeed.objects.all()
serializer_class = serializers.ReportSerializer
permission_classes = (IsAuthenticated,)
def perform_create(self, serializer):
serializer.save(reporter=self.request.user)
class DeleteMyFeed(viewsets.GenericViewSet, mixins.DestroyModelMixin):
queryset = models.BaseFeed.objects.all()
serializer_class = serializers.CombinedSerializer
permission_classes = (IsAuthenticated, permissions.IsUser)
| UTF-8 | Python | false | false | 2,270 | py | 51 | views.py | 48 | 0.727753 | 0.727753 | 0 | 66 | 33.348485 | 91 |
ojongchul/studyPython | 5,368,709,166,844 | c0dd899f4f4e5e79e84cc5534d6be08025197f6e | 952238cd68953a4c9c5edb37f47f845afaf84495 | /string.py | 6aba582fda6100aba711a25f5af6fd2f518e971d | []
| no_license | https://github.com/ojongchul/studyPython | 67d4a673e9620a3731408c5aa360bd1eb117bcaf | b419f584f794243286f43163e3cd9a7e34af6611 | refs/heads/master | 2021-03-29T17:57:24.200288 | 2020-03-18T21:03:05 | 2020-03-18T21:03:05 | 247,973,402 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print ("help me")
print ('help me' + " " + 'dohnald')
print ("help 'me'")
print ('help "me"')
a = 'help me'
print (a)
print (len(a))
print (a[0])
print (a[1])
print (a[2])
print ((a+'\n')*3)
print ('dohnald says ' + a )
print ('{} says {}.'.format( 'dohnald', a))
print (' {name} says {says}'.format(says=a, name='dohnald'))
| UTF-8 | Python | false | false | 327 | py | 7 | string.py | 7 | 0.553517 | 0.541284 | 0 | 16 | 19.4375 | 60 |
etano/simpimc | 11,467,562,722,287 | d6cae8e284e1306ff03120f3e54a47859ffd80ee | f43e0439caac176e8c843677b142bdb2e94f0aea | /inputs/h-atom/gen_h_pa_david.py | 1f7b636c6270bfed8c1ec982a1436047ac4e2145 | []
| no_license | https://github.com/etano/simpimc | 78034b3caf7905231ef537df3c92035f93928b04 | 943fb6e3ec399b1f7b9ea075a447806f4522a6fd | refs/heads/master | 2020-06-01T07:36:03.448913 | 2017-01-03T18:16:07 | 2017-01-03T18:16:07 | 1,943,220 | 9 | 2 | null | false | 2016-02-25T12:49:30 | 2011-06-23T18:13:08 | 2015-12-20T06:11:14 | 2016-02-25T12:49:30 | 4,444 | 2 | 3 | 0 | C++ | null | null | import sys, os
from math import sqrt
# Exact location of PAGEN scripts
PAGEN_HOME = '../../scripts/pagen'
sys.path.append(PAGEN_HOME)
from GenPairAction import *
# Units
units = {'energy':'H', 'distance':'A'}
# Constants
tau = 0.125 # Time step
L = 10.0 # Box size
D = 3 # physical dimension
# Species
e = {'type': 'e', 'lambda': 0.5, 'Z': -1.0}
p = {'type': 'p', 'lambda': 0.0002723089072243553, 'Z': 1.0}
# Potential
potential = {}
potential['function'] = lambda Z1,Z2,r: Z1*Z2/r
potential['r_min'] = 0.0001 # first grid point
potential['r_max'] = 100. # last grid point
potential['n_grid'] = 1000 # number grid points
potential['grid_type'] = "OPTIMIZED" # grid type (LINEAR, LOG, LOGLIN (David only!), OPTIMIZED (Ilkka only!))
# Squarer
squarer = {}
squarer['type'] = "David" # Ilkka, David, or None
squarer['tau'] = tau # desired timestep of PIMC simulation
squarer['n_d'] = D # dimension
squarer['r_max'] = 10.0 # maximum distance on grid
squarer['n_grid'] = 100 # number of grid points
squarer['grid_type'] = "LOG" # grid type (LINEAR, LOG, LOGLIN (David only!), OPTIMIZED (Ilkka only!))
squarer['n_square'] = 14 # total number of squarings to reach lowest temperature
squarer['n_order'] = 2 # order of off-diagonal PA fit: -1 = no fit (direct spline, Ilkka only!), 0 = only diagonal, 1-3 = fit off-diagonal to 1-3 order
squarer['n_temp'] = 1 # number of temperatures for which to calculate the pair action (David only!)
# Long-range breakup
breakup = {}
breakup['type'] = 'None' # OptimizedEwald, StandardEwald, or None
breakup['n_d'] = D # dimension
breakup['L'] = L # length of box
breakup['tau'] = tau # desired timestep of PIMC simulation
breakup['r_min'] = 0.0001 # first grid point
breakup['r_max'] = sqrt(breakup['n_d'])*breakup['L']/2. # last grid point
breakup['r_paste'] = breakup['L']/4. # pasting grid point, ONLY FOR LOGLIN GRID!
breakup['r_cut'] = breakup['L']/2. # r cutoff for ewald
breakup['k_cut'] = 14./(L/2.) # k cutoff for ewald
breakup['n_grid'] = 100 # number of grid points
breakup['grid_type'] = "LOG" # grid type (LINEAR, LOG, LOGLIN (David only!), OPTIMIZED (Ilkka only!))
breakup['n_knots'] = 10 # number of knots in spline (probably fine)
breakup['n_images'] = 10 # Naive check
# Pair action objects
pa_objects = [
{'species_a': e, 'species_b': p, 'potential': potential, 'breakup': breakup, 'squarer': squarer},
]
# Run
run(pa_objects)
| UTF-8 | Python | false | false | 2,383 | py | 79 | gen_h_pa_david.py | 64 | 0.669744 | 0.634494 | 0 | 63 | 36.825397 | 151 |
JakeRivett31/Year9DesignPythonJR | 824,633,753,087 | 08e768aed11a1425368cbcfb8d78296c5a15161c | 4d35172e68c74fba6eb5ff8b39f03cbd3e1ed975 | /J1_2012.py | 0d06018da0a4a49f1a04b33fa08b991df518b034 | []
| no_license | https://github.com/JakeRivett31/Year9DesignPythonJR | 9c8c4ffc2b5ad583315de89b18fc801ad964a76f | 6117dca73a411912165dea2ab317d946276c81e1 | refs/heads/master | 2020-07-20T07:50:43.375899 | 2020-06-04T13:06:45 | 2020-06-04T13:06:45 | 206,602,109 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
limit = float(input("Enter the speed limit: "))
speed = float(input("Enter the recorded speed of the car: "))
fine = 0
overspeedlimit = speed - limit
if 0 < overspeedlimit <= 20:
fine = 100
if 20 < overspeedlimit <= 30:
fine = 270
if overspeedlimit > 30:
fine = 500
if speed > limit:
print("You are speeding and your fine is $"+str(fine)+".")
if speed <= limit:
print("Congratulations, you are within the speed limit!")
| UTF-8 | Python | false | false | 450 | py | 47 | J1_2012.py | 42 | 0.671111 | 0.628889 | 0 | 22 | 19.181818 | 61 |
Monu7053/monu-chaurasiya | 14,113,262,560,720 | 8a539f69761961887f16a4714b71867c05bcb2f0 | bf8894eaa66a701de1707101125bd37c63145160 | /dis2.py | 35075a633b3571633a0adefe60434840fdeaf686 | []
| no_license | https://github.com/Monu7053/monu-chaurasiya | 227de44ac500d2e4a8404d674abc48061ace033d | e2f3eaec664938da94b8565d4c58485865b04693 | refs/heads/master | 2023-05-11T15:51:05.138423 | 2023-05-04T03:45:23 | 2023-05-04T03:45:23 | 111,294,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy as np
fig, (ax1, ax2) = plt.subplots(1,2)
fig.suptitle("Density of States")
y = np.zeros(101)
x = np.arange(0,101,1)
x0 = (x[0]+x[-1])/2
for i in range(0,len(y)):
if x[i] == x0:
y[i] = 1
ax1.plot(x,y)
ax1.set_ylabel(r"$\frac{G(\nu)}{N_Af}$")
ax1.set_xlabel(r"$\frac{\nu}{\nu_{\epsilon}}$")
ax1.set_title("DOS : Einstein's theory")
ax1.grid()
x = np.linspace(0,1,100)
y = x**2
ax2.plot(x,y)
ax2.set_ylabel(r"$\frac{G(\nu)}{N_Af}$")
ax2.set_xlabel(r"$\frac{\nu}{\nu_{\epsilon}}$")
ax2.set_title("DOS : Debye's theory")
ax2.grid()
plt.show() | UTF-8 | Python | false | false | 616 | py | 13 | dis2.py | 13 | 0.587662 | 0.530844 | 0 | 26 | 21.769231 | 47 |
zxwqxtu/python | 7,516,192,800,929 | 585a4d9faccc69cfd97f40564d851961bdd36055 | 4611b2a17c2e5f619c4fb45d5bdc26acb66aa080 | /socket/test.py | 4b9ce02432a64c8838fdaefca30ed090fd340eed | []
| no_license | https://github.com/zxwqxtu/python | 9ee992905dc98e5f2fb67e4adba0c762a2cc3753 | b79a410c19f00c9ffe5f0d9db2d3577912edf487 | refs/heads/master | 2021-01-15T16:53:28.292715 | 2015-04-03T03:22:31 | 2015-04-03T03:22:31 | 32,973,175 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
print socket.getaddrinfo('www.xrschzs.com', 80)
print socket.gethostbyname('www.baidu.com')
print socket.gethostname()
print socket.gethostbyaddr('61.135.169.125')
| UTF-8 | Python | false | false | 179 | py | 32 | test.py | 29 | 0.787709 | 0.715084 | 0 | 6 | 28.833333 | 47 |
Tardo/TeeMo | 2,980,707,327,247 | 421b4a0262942d58769bc144e08bee5119a5fed5 | c65fa81f4d2a265fc77c079d544710209d798ce8 | /scripts/cmd5.py | b22ed16ebc2ddad45beb546616deb6efc9b545ef | [
"LicenseRef-scancode-other-permissive",
"Zlib",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/Tardo/TeeMo | 5db9228cc1716b0bae3067139469012331a85c5c | eb78fe9bcd6f23f22177633fe73ce3aa0157f115 | refs/heads/master | 2017-12-05T03:49:31.573323 | 2017-06-25T18:11:06 | 2017-06-25T18:11:06 | 95,375,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #TeeMo Force Hash Version
hash = "626fce9a778df4d4"
print('#define GAME_NETVERSION_HASH "%s"' % hash)
| UTF-8 | Python | false | false | 102 | py | 10 | cmd5.py | 9 | 0.735294 | 0.647059 | 0 | 3 | 33 | 49 |
Zojax/zojax.banner | 2,482,491,145,901 | d3d66b90cf59467a3c3621520738ce547b9fee73 | 9558973f6948adff8da23d4e03e7b294915be671 | /src/zojax/banner/portlets/interfaces.py | 04244eb123bdcf5a73f0994cddd6ca6ea562f2e0 | [
"ZPL-2.1"
]
| permissive | https://github.com/Zojax/zojax.banner | 1da4c051139bd566822d0344e192d899f957c9a5 | 6789e9b25b311e72fe9cfa2f601fff4b9a68ee76 | refs/heads/master | 2021-01-20T12:00:30.110442 | 2011-12-16T07:10:40 | 2011-12-16T07:10:40 | 2,035,345 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import interface, schema
from zojax.banner.interfaces import _
class IBannerPortlet(interface.Interface):
""" Banner portlet """
label = schema.TextLine(
title = _(u'Label'),
default = _(u'Banner'),
required = False)
decoration = schema.Bool(
title = _(u'Portlet decoration'),
description = _(u'Show portlet decoration, or just banner.'),
default = True,
required = False)
place = schema.Choice(
title= _(u'Banner Place'),
vocabulary="zojax.banner.places")
class ISponsorsPortlet(interface.Interface):
""" Sponsors portlet """
label = schema.TextLine(
title = _(u'Label'),
default = _(u'Sponsors'),
required = False)
decoration = schema.Bool(
title = _(u'Portlet decoration'),
description = _(u'Show portlet decoration, or just sponsors.'),
default = True,
required = False)
| UTF-8 | Python | false | false | 1,600 | py | 24 | interfaces.py | 9 | 0.585 | 0.58125 | 0 | 53 | 29.188679 | 78 |
QingqingSun-Bao/GitResp2 | 3,770,981,324,602 | b8d430ec44bb4279f40fae36a07a61def210d879 | 9316f0d31de1bdfdbffaf86156b9bbc4f2c30d56 | /zexmp/yang-jj.py | 6487c620274bebb2bc888e83940dfb1469e60445 | []
| no_license | https://github.com/QingqingSun-Bao/GitResp2 | 405372ddc7f08a90d60874c08ac05bb3f35cf15e | 39c2d8538475fa64110734384f37f6e922b6d07b | refs/heads/master | 2023-06-26T00:19:32.951231 | 2021-07-08T01:15:43 | 2021-07-08T01:15:43 | 349,898,472 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# @Time:2021/3/99:15
# @File:yang-jj.py
# @Software:PyCharm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import make_interp_spline
df=pd.read_excel("C://Users/97899/Desktop/N/YJJ/生物量和多样性的抵抗力.xls")
print(df)
gb=df.groupby("年份")
Y=[]
for g in gb:
# 生物量抵抗力(%)
# 物种数抵抗力(%)
Y.append(np.mean(g[1].loc[:,"物种数抵抗力(%)"]))
X=np.linspace(2009,2020,12)
# plt.subplots(4,3,1)
plt.scatter(X,Y)
x_new = np.linspace(np.min(X), np.max(X), 300)
y_smooth = make_interp_spline(X, Y)(x_new)
plt.plot(x_new,y_smooth)
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.sans-serif'] = ['SimHei']
# plt.xlim(12,[2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020])
plt.xlabel("年份")
plt.ylabel("物种数抵抗力(%)")
plt.show() | UTF-8 | Python | false | false | 891 | py | 132 | yang-jj.py | 132 | 0.676286 | 0.5734 | 0 | 30 | 25.6 | 76 |
OliynykPro/invbase | 11,278,584,155,412 | 20af652fc40b6ccd0542289feaf6d60ce766291e | c6ba862fc3e7d39ea35c792e57b5e305d143a513 | /investorsmanager/models.py | 9eaea61396aab9ac6c314270b1a7a158cc3d6f87 | []
| no_license | https://github.com/OliynykPro/invbase | 835aa85613349376d535910a860d686781e8242b | b3ebbc1151e37a516c25aa22ad64050bbdbb89f5 | refs/heads/master | 2023-02-22T12:00:28.631999 | 2021-01-27T00:00:08 | 2021-01-27T00:00:08 | 329,417,287 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
# Create your models here.
class InteractionStatus(models.Model):
interaction_status = models.CharField(max_length=100, help_text='Enter interaction status (Need to check, Sended, Wait for answer, Denied, With answer, In progress)')
class TypeOfInvestor(models.Model):
type_of_investor = models.CharField(max_length=100, help_text='Enter type of investor (Ukraine VC, Ukraine Angel, International VC, International Angel, Aggregators)')
class Investor(models.Model):
company_name = models.CharField(max_length=200)
company_description = models.CharField(max_length=2000)
#type_of_investor = models.CharField(max_length=100)
type_of_investor = models.ManyToManyField(TypeOfInvestor, help_text='Select a type of this investor')
email = models.EmailField(max_length=100, unique=True)
investment_focus = models.CharField(max_length=1000)
#interaction_status = models.CharField(max_length=100)
# owner = models.ForeignKey(User, related_name="investors", on_delete=models.CASCADE, null=True)
interaction_status = models.ManyToManyField(InteractionStatus, help_text='Select a status for this investor')
investor_rating = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
investor_portfolio = models.CharField(max_length=500)
investor_location = models.CharField(max_length=200)
website_url = models.CharField(max_length=200, default='NO EMAIl')
created_at = models.DateTimeField(auto_now_add=True)
| UTF-8 | Python | false | false | 1,632 | py | 23 | models.py | 18 | 0.769608 | 0.746324 | 0 | 26 | 61.730769 | 171 |
psai-github/MagnitudoWebsite2011_CreatedByPranavSai2011 | 15,547,781,619,531 | 26b0f4b07de9a624f82e7babed84a288412512c8 | 9d8f981bc0b7d06acbd4e884f50f6eea12f8e140 | /buying.py | 8d2b9eec467192af234e20f4e8ebe9494f78ceea | []
| no_license | https://github.com/psai-github/MagnitudoWebsite2011_CreatedByPranavSai2011 | 2db333ea545967fac04d46e0f6c555748ffa8cb8 | f0c2540f9b696b305c59c1ba4fb1bab097e746d7 | refs/heads/main | 2023-07-03T22:33:17.282627 | 2022-08-24T15:24:21 | 2022-08-24T15:24:21 | 370,544,846 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import *
import smtplib
import time
from validate_email import validate_email
import base64
def error():
screen1=Toplevel(screen)
screen1.geometry('150x90')
screen1.title('Warning')
warning=Label(screen1,text='All fields need to be valid',bg="red",fg="yellow").pack()
def error1():
screen1=Toplevel(screen)
screen1.geometry('150x90')
screen1.title('Warning')
warning=Label(screen1,text='Sorry,you dont have enough points to buy this product in your account' ,bg="red",fg="yellow").pack()
def save():
price=int(0)
f=0
a=0
global p
print("Validating...")
le_info=le.get()
u_info=u.get()
p_info=p.get()
encode=p_info.encode("utf-8")
encoded=str(base64.b16encode(encode))
#a=base64.b16decode(encoded.decode("utf-8"))
f3=open('user.txt','r')
contex=f3.read()
ok=u_info and encoded in contex
f3.close()
if ok==False:
error()
f=1
if le_info=='':
error()
f=1
if p_info=='':
error()
f=1
if u_info=='':
error()
f=1
encode2=p_info.encode("utf-8")
encoded2=base64.b16encode(encode2)
encoded2=str(encoded2)
#What the product is
if le_info=='1':
price=int(14.99)
le_info='Paper 500 sheets'
if le_info=='2':
le_info=='The Deep End Book'
print(encoded2)
file=open(u_info+'.txt','r')
content=file.read()
content=int(content)
for chr in content:
a=int(a+1)
print(content)
file.close()
file1=open(u_info+'.txt','a')
print(content)
if content<price:
error1()
f=1
if content>price:
file1.truncate(0)
content1=content-price
content1=str(content1)
file1.write(content1)
if f==0:
a=Toplevel(screen)
a.title('Validating')
a.geometry("100x150")
b=Label(a,text='You have purchased an item!You will recive your item in less than a week.Thankyou for ordering!.',bg='powder blue',fg='red')
b.place()
gmailaddress = 'pranavS31899@gmail.com'
gmailpassword = 'PRANAVSAI'
#gmailpassword=base64.b16decode(encoded.decode("utf-8"))
mailto = 'pranavS31899@gmail.com'
msg = 'Product'+p_info
mailServer = smtplib.SMTP('smtp.gmail.com' , 587)
mailServer.starttls()
mailServer.login(gmailaddress , gmailpassword)
mailServer.sendmail(gmailaddress, mailto , msg)
mailServer.quit()
screen=Tk()
screen.title('Form')
screen.geometry("500x500")
heading=Label(text='Form',bg="yellow",fg="blue",width="500",height="3")
heading.pack()
lastname=Label(text="Id of product")
lastname.place(x=15,y=140)
Username=Label(text='Username')
Username.place(x=15,y=200)
password=Label(text='Password')
password.place(x=15,y=280)
firstname=StringVar()
lastname=StringVar()
gmail=StringVar()
le=Entry(textvariable=lastname,width="30")
u=Entry(textvariable=gmail,width="30")
u.place(x=15,y=250)
p=Entry(text='Id of product',width='30')
p.place(x=15,y=315)
le.place(x=15,y=180)
save=Button(text="Buy",width="20",height="2",bg='powder blue',fg="black",command=save)
save.place(x=15,y=400)
| UTF-8 | Python | false | false | 3,379 | py | 24 | buying.py | 4 | 0.593667 | 0.548979 | 0 | 131 | 23.793893 | 148 |
matsub/sandbox | 8,693,013,827,490 | 164516c3594e1e6cae5ad830c3c0fe90454feb0c | 226071f65572b9b53585a21972844fcfce86c7fa | /python/basic_tips/mydict.py | d91702f4cd5122e1d9ed609d37743a54d7d45ff7 | []
| no_license | https://github.com/matsub/sandbox | 3ded3399e6151ecfb3e290d5fb17e16a446935ae | dc016c54d092ac068e031a8df60eafb8fb85376f | refs/heads/master | 2023-01-28T20:18:34.443130 | 2020-02-10T08:31:13 | 2020-02-11T07:46:48 | 28,806,809 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | dic = {
'a': 10,
'b': 'test',
'c': 0.9,
}
class MyDict(dict):
def __init__(self, dic):
super().__init__(dic)
for k, v in self.items():
self.__setattr__(k, v)
def echo(self):
return dir(self)
d = MyDict(dic)
print(d, d.echo())
print('=== checking the id of values')
print('--- list of k/v of dictionary')
for k, v in d.items():
print(id(k), id(v))
print('--- list of k/v of attr')
for k in d.keys():
print(id(k), id(d.__getattribute__(k)))
| UTF-8 | Python | false | false | 508 | py | 184 | mydict.py | 129 | 0.507874 | 0.5 | 0 | 26 | 18.538462 | 43 |
jimcadden/ebbrt-contrib | 8,220,567,406,314 | 4e865023acc4e1f1f934a462ba1a12306f371cc3 | f5bab0feb337491bb9d6c1a7658818238f87f690 | /scripts/osdi14/colstats.py | c6ade06e7e5fafdae572e3abe7068013a2ddf4d4 | []
| no_license | https://github.com/jimcadden/ebbrt-contrib | 0cb18653a396e59bc894556f394537de0f82b57a | 76a1fe0c96a7fccc4958ad6cc5916d7923b6f7a4 | refs/heads/master | 2021-09-20T03:37:38.826638 | 2018-08-02T23:00:38 | 2018-08-02T23:00:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
''' Input format must be as follows:
# Trial 123
12 33 442.5 595 3343.55
5 13 42.33 54495 3243.44
12 31 442.5 595 3343.55
- Comment identifies the start of a data set
- Script can support and number of tab-seperated columns
Values are assumed to be floats
- Statistics are output in CSV format, each row containing
the results for all columns of a trial. Column data is seperated by an empty field.
- Order of results min,max,mean,var,std
'''
import csv
import sys
import time
import numpy as n
cols = []
ncols = []
numcols = 0
title = ""
in_trial = False
try:
for line in sys.stdin:
if in_trial == True:
if line[0] == "#":
# signals the start of another trial, lets dump previous data
writerlist = [title]
for i in range(numcols):
ncols.append(n.array(cols[i], float))
writerlist += [ncols[i].min(), ncols[i].max(),
ncols[i].mean(), ncols[i].var(), ncols[i].std(), " "]
sys.stdout.flush()
csv.writer(sys.stdout).writerow(writerlist)
# reset trial variables
numcols = 0
cols[:] = []
ncols[:] = []
title = line[:-2] # remove newline char
elif not line == "\n": # skip blank lines
v = line.split('\t')
if numcols == 0:
numcols = len(v) - 1 #minus removes newline
for i in range(numcols):
cols.append([])
for i in range(numcols):
cols[i].append(v[i])
else: # not yet in trial
if line[0] == "#":
title = line[:-2] # remove newline char
in_trial = True
except KeyboardInterrupt:
sys.stdout.flush()
pass
# a final dump
writerlist = [title]
for i in range(numcols):
ncols.append(n.array(cols[i], float))
writerlist += [ncols[i].min(), ncols[i].max(),
ncols[i].mean(), ncols[i].var(), ncols[i].std(), " "]
sys.stdout.flush()
csv.writer(sys.stdout).writerow(writerlist)
| UTF-8 | Python | false | false | 1,986 | py | 78 | colstats.py | 60 | 0.583082 | 0.55136 | 0 | 66 | 29.090909 | 89 |
MushinMiscellanea/Music-ally | 13,297,218,749,311 | 59e1892418c7ec3a7be796fe56f059a0dc5762ce | 22441229d7b333247503f4324c6bc78be8bbad7c | /test.py | 1ec48483cf6a812b6fdeaee84a44fdb371df5a34 | [
"MIT"
]
| permissive | https://github.com/MushinMiscellanea/Music-ally | 9495695eda15da1c7bcc06e41c63594229ca7ea1 | f1bd8df3e43317213b149bda255445d6ac3a2c09 | refs/heads/master | 2022-04-12T04:11:18.062849 | 2020-02-25T01:46:17 | 2020-02-25T01:46:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
import pandas as pd
import matplotlib as mt
pitch = pd.read_csv('/Users/spencerfinkel/repos/musically/pitch_freq.csv')
pitch = pitch.rename(columns={'Unnamed: 2': 'Octive'})
p = (list(pitch))
freq = pitch['Frequency (Hz)']
freq.()
'''#frequency of all notes in a scale
for i in range(13):
freq = 16.35
freq1 = freq*((2**(1/12))**i)
print(f'{freq1:.2f}')
#list comprehension, whole octive frequencies
freq = 73.42
freqsincomprehension = [float(freq*((2**(1/12))**i)) for i in range(13)]
for item in freqsincomprehension:
print('{:.2f}'.format(item))
print()
#frequencies of minor scale
def minor_freq(*args):
for i in range(13):
freq1=freq
freq1=freq*((2**(1/12))**i)
if i == 1 or i ==4 or i == 6 or i == 9 or i ==11:
continue
else:
print(f'{freq1:.2f}')
minor_freq(freq)
print()
#all the octaves of the frequency
def octive(arg):
print()
series = [arg]
if arg > 16.35:
arg1 = arg
for oct in range(8):
arg1/=2
series.insert(0, arg1)
for oct in range(8):
arg *= 2
series.append(arg)
else:
for oct in range(8):
arg *= 2
series.append(arg)
series1 = [item for item in series if item <= 7902.13 and item >= 1.00]
#[enumerate(item) for item in series if item >= 16.35]
print(series1)
def main():
freq = float(input('What frequency are you looking for: '))
octive(freq)
main()'''
| UTF-8 | Python | false | false | 1,522 | py | 7 | test.py | 5 | 0.571616 | 0.525624 | 0 | 70 | 20.742857 | 75 |
reed-only/flask_app | 14,654,428,445,340 | 9451549ad8c44d67ae1b756c405c625e99e6017c | 55c928d6ee5374f0f21d1e925fc7993c24e59bee | /flask_app/__init__.py | 8fb2a50782f1364cab65cb1f2b6f565479610771 | [
"MIT"
]
| permissive | https://github.com/reed-only/flask_app | 2ee717a55fa6a41a893bea246eaa099cf7d3fd50 | 52010732478d4faa9318a7d76cbf2802671e8378 | refs/heads/master | 2021-09-21T18:04:42.917137 | 2018-08-30T04:35:28 | 2018-08-30T04:35:28 | 114,563,711 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Flask app
All models and resources should be registered here
"""
from flask_app.extensions import app, db
from flask_app import models, resources
| UTF-8 | Python | false | false | 152 | py | 23 | __init__.py | 15 | 0.769737 | 0.769737 | 0 | 8 | 18 | 50 |
PyLadiesDC/opportunities | 17,145,509,469,910 | 85a23562c62945eccb29f905c5be79a5db9a2850 | f74faa679e67b1776ece16bb4aa295b897acdfcf | /diversity_webpage/opportunities_app/migrations/0003_auto_20150910_1611.py | 4777f362611e2e70f02fa83aaf116d798b781549 | [
"Apache-2.0"
]
| permissive | https://github.com/PyLadiesDC/opportunities | d904587d6b22b6906d5515e653974d04f7144a31 | e2a7653d3f285a98075ba3fc8cad31bd6cf5c8d0 | refs/heads/master | 2020-12-02T17:34:02.860074 | 2015-09-14T14:58:42 | 2015-09-14T14:58:42 | 38,165,390 | 1 | 0 | null | false | 2015-09-14T00:17:56 | 2015-06-27T15:43:19 | 2015-09-12T17:57:02 | 2015-09-14T00:17:56 | 10,256 | 0 | 0 | 0 | Python | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('opportunities_app', '0002_post_content'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='phone',
),
migrations.AddField(
model_name='post',
name='last_updated',
field=models.DateTimeField(default=datetime.datetime(2015, 9, 10, 16, 11, 8, 196149, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='address',
field=models.CharField(help_text=b'Address of the business', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='post',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='post',
name='name',
field=models.CharField(help_text=b'Title of the job post', max_length=100),
),
migrations.AlterField(
model_name='post',
name='point_of_contact',
field=models.CharField(help_text=b'Posting contact point', max_length=100),
),
]
| UTF-8 | Python | false | false | 1,423 | py | 19 | 0003_auto_20150910_1611.py | 11 | 0.567814 | 0.543219 | 0 | 46 | 29.934783 | 125 |
JetBrains/intellij-community | 1,142,461,341,902 | 83baf0728981f728df1290f8c5dd2117a1413ed8 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/codeInsight/mlcompletion/scopeFileDontConsiderFunctionBodies.py | 37a84a129d4aa24dad330ed70cd5a65f7d5208dc | [
"Apache-2.0"
]
| permissive | https://github.com/JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | false | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | 2023-09-12T03:37:30 | 2023-09-12T06:46:46 | 4,523,919 | 15,754 | 4,972 | 237 | null | false | false | def some_fun(some_param):
some_var_1 = 22
some_var_2 = 23
some_var_1 = 22
some_var_2 = 23
SOME_VAR_3 = 24
print(<caret>)
def some_fun_2(some_param):
some_var_1 = 22
some_var_2 = 23 | UTF-8 | Python | false | false | 190 | py | 127,182 | scopeFileDontConsiderFunctionBodies.py | 70,394 | 0.626316 | 0.510526 | 0 | 12 | 14.916667 | 27 |
P79N6A/WeCloud | 5,248,450,055,608 | d9e2fcfaed5103980c4c2c69fe5bdbc041de00e7 | 4fb524755fb8997a8cc845917e6d5194ed737672 | /platform/bin/http_status.py | a747f1666b2ba00319789aa6b43f9a475715a969 | []
| no_license | https://github.com/P79N6A/WeCloud | 75b0d53f2e72fb9762ec43977927f23d87e88e8a | 7836382e7a47c0b6f24de1813527130e051af97f | refs/heads/master | 2020-07-14T16:35:29.678985 | 2019-08-30T09:42:05 | 2019-08-30T09:42:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
__author__ = 'iambocai'
import urllib2
import requests
import json
import time
import socket
import os
import re
import telnetlib
import sys
import commands
def main():
today = time.strftime("%Y.%m.%d", time.gmtime())
url = 'http://elk-es-6.cluster.koolearn.com/nginx_access_log_' + today + '/_search'
now = time.time();
now_int = int(now) * 1000
#
before_minute = now_int - 1000 * 30
data = []
query_data = {
"size": 0,
"query": {
"bool": {
"must": {
"query_string": {
"query": "status:[0 TO 1000] AND (server_name:www.koo.cn OR server_name:weixin.koo.cn OR server_name:m.koo.cn )",
"analyze_wildcard": "true",
"minimum_should_match": "100%"
}
},
"filter": {
"bool": {
"must": [
{
"range": {
"@timestamp": {
"gte": before_minute,
"lte": now_int,
"format": "epoch_millis"
}
}
}
]
}
}
}
},
"aggs": {
"2": {
"terms": {
"field": "server_name",
"size": 20
},
"aggs": {
"3": {
"terms": {
"field": "status",
"size": 5
}
}
}
}
}
}
query_data = json.dumps(query_data)
headers = {"Content-Type": "application/json; charset=UTF-8"}
response = requests.get(url=url, headers=headers, data=query_data)
result = response.text
response.close()
resultJson = json.loads(result)
buckets = resultJson["aggregations"]["2"]["buckets"]
for each_bucket in buckets:
http_stats={}
status_200 = 0
status_3xx = 0
status_4xx = 0
status_5xx = 0
total = each_bucket["doc_count"]
domain = each_bucket["key"]
status_each_bucket = each_bucket["3"]["buckets"]
for each_status in status_each_bucket:
status = each_status["key"]
status_count=each_status["doc_count"]
if int(status) == 200 :
status_200 += status_count
elif int(status) >= 300 and int(status) < 400:
status_3xx += status_count
elif int(status) >= 400 and int(status) < 500:
status_4xx += status_count
elif int(status) >= 500 and int(status) < 600:
status_5xx += status_count
if int(total) == 0 :
total = 1
falcon_data = {
"metric": "nginx.http.status.200.ratio",
"endpoint": "koo-http-status",
"timestamp": int(now),
"step": 30,
"value": float(status_200)/float(total)*100,
"counterType": "GAUGE",
"tags": "domain="+ domain
}
data.append(falcon_data)
falcon_data = {
"metric": "nginx.http.status.3xx.ratio",
"endpoint": "koo-http-status",
"timestamp": int(now),
"step": 30,
"value": float(status_3xx)/float(total)*100,
"counterType": "GAUGE",
"tags": "domain="+ domain
}
data.append(falcon_data)
ratio_4xx = float(status_4xx)/float(total)*100
if total <= 10 :
ratio_4xx = 0;
falcon_data = {
"metric": "nginx.http.status.4xx.ratio",
"endpoint": "koo-http-status",
"timestamp": int(now),
"step": 30,
"value": ratio_4xx,
"counterType": "GAUGE",
"tags": "domain="+ domain
}
data.append(falcon_data)
falcon_data = {
"metric": "nginx.http.status.5xx.ratio",
"endpoint": "koo-http-status",
"timestamp": int(now),
"step": 30,
"value": float(status_5xx)/float(total)*100,
"counterType": "GAUGE",
"tags": "domain="+ domain
}
data.append(falcon_data)
return data
if __name__ == '__main__':
proc = commands.getoutput(''' ps -ef|grep '30_sharks_http_status.py'|grep -v grep|wc -l ''')
if int(proc) < 3:
print json.dumps(main())
| UTF-8 | Python | false | false | 4,382 | py | 93 | http_status.py | 87 | 0.463943 | 0.438841 | 0 | 153 | 27.633987 | 140 |
avdhesh9316/PythonGit | 18,210,661,335,723 | e89b1e1a9017eb9d98ef56b17193da87d6a0aecf | d02e57bdfa831cb53c6173b9ffa3588f603b1797 | /program32.py | efc9468e7eda0b9730bd6e727632325414255f69 | []
| no_license | https://github.com/avdhesh9316/PythonGit | 1406c6c4148a50ea8ec8acc6f8d38655470165ce | 64f5d6485c158183153f32c3419a9d6b45d8fda0 | refs/heads/master | 2021-05-07T18:15:31.611011 | 2017-10-29T18:45:03 | 2017-10-29T18:45:03 | 108,761,308 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/python3
from sys import stdout
list1 = [4, 5, 1, 6, 78]
list2 = [9, 15, 53, 82, 7]
list3 = [10, 12, 16, 18]
print('Length of List1 : {}'.format(len(list1)))
print('Length of List2 : {}'.format(len(list2)))
print('Length of List3 : {}'.format(len(list3)))
min1 = max1 = 0
min2 = max2 = 0
min3 = max3 = 0
for i in range(len(list1)):
if list1[i] <= list1[min1]:
min1 = i
if list1[i] >= list1[max1]:
max1 = i
for i in range(len(list2)):
if list2[i] <= list2[min2]:
min2 = i
if list2[i] >= list2[max2]:
max2 = i
for i in range(len(list3)):
if list3[i] <= list3[min3]:
min3 = i
if list3[i] >= list3[max3]:
max3 = i
if list1[min1] < list2[min2] and list1[min1] < list3[min3]:
print("List 1 is the smallest")
elif list2[min2] < list1[min1] and list2[min2] < list3[min3]:
print("List 2 is the smallest")
else:
print("List 3 is the smallest")
if list1[max1] > list2[max2] and list1[max1] > list3[max3]:
print("List 1 is the biggest")
elif list2[max2] > list1[max1] and list2[max2] > list3[max3]:
print("List 2 is the biggest")
else:
print("List 3 is the biggest")
print('{} {} {} {} {} {}'.format(list1[min1],list1[max1],list2[min2],list2[max2],list3[min3],list3[max3]))
#### Deleting first and last Element
list1.pop()
list2.pop()
list3.pop()
list1.remove(list1[0])
list2.remove(list2[0])
list3.remove(list3[0])
print("New List1 : ")
for i in range(len(list1)):
stdout.write('{} '.format(list1[i]))
print("\nNew List 2 : ")
for i in range(len(list2)):
stdout.write('{} '.format(list2[i]))
print('\nNew List 3 : ')
for i in range(len(list3)):
stdout.write('{} '.format(list3[i]))
| UTF-8 | Python | false | false | 1,702 | py | 37 | program32.py | 33 | 0.60282 | 0.521152 | 0 | 67 | 24.402985 | 106 |
akhramshaik/Machine-Learning | 16,277,926,088,682 | bfe5c847a395ebc57e42be3cb921f1bd7db9abaa | 44895b2af5051557d44ea573ef93d0266c87d242 | /My Notes/US - Dimentionality Reduction/PCA.py | 4c331770949b48a1dd18d09273014b6bdfa04008 | []
| no_license | https://github.com/akhramshaik/Machine-Learning | a5a4a1c720e8159332ece8a9dfba1b5033d22eda | eefa23262701167be6142d30f81183228da343e8 | refs/heads/master | 2020-09-17T00:32:32.428413 | 2020-01-19T15:24:16 | 2020-01-19T15:24:16 | 223,933,162 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
path = 'C:/Users/akhram/Desktop/AIML/Machine Learning/Utils'
sys.path.append(path)
import common_utils as utils
import pca_utils as putils
import tsne_utils as tutils
import classification_utils as cutils
import pandas as pd
import numpy as np
from sklearn import decomposition
#pca effect on linearly related data
X, y = cutils.generate_linear_synthetic_data_classification(n_samples=1000, n_features=2, n_redundant=0, n_classes=2, class_sep=0, weights=[.5,.5])
X = pd.DataFrame(X, columns=['X1', 'X2'])
utils.plot_data_2d(X)
print(X.corr())
lpca = decomposition.PCA(n_components=0.96)
lpca.fit(X)
print(lpca.components_)
print(lpca.explained_variance_)
print(lpca.explained_variance_ratio_)
np.cumsum(lpca.explained_variance_ratio_)
putils.plot_pca_result(lpca, X)
#pca effect on linearly related data(1 redundant feature)
X, y = cutils.generate_linear_synthetic_data_classification(n_samples=1000, n_features=3, n_redundant=1, n_classes=2, weights=[.5,.5])
X = pd.DataFrame(X, columns=['X1', 'X2', 'X3'])
utils.plot_data_3d(X)
print(X.corr())
lpca = decomposition.PCA(2)
lpca.fit(X)
print(lpca.explained_variance_)
print(lpca.explained_variance_ratio_)
np.cumsum(lpca.explained_variance_ratio_)
putils.plot_pca_result(lpca, X)
#pca effect on linearly related data(2 redundant featues)
X, y = cutils.generate_linear_synthetic_data_classification(n_samples=1000, n_features=3, n_redundant=2, n_classes=2, weights=[.5,.5])
X = pd.DataFrame(X, columns=['X1', 'X2', 'X3'])
utils.plot_data_3d(X)
print(X.corr())
lpca = decomposition.PCA(1)
lpca.fit(X)
print(lpca.explained_variance_)
print(lpca.explained_variance_ratio_)
np.cumsum(lpca.explained_variance_ratio_)
putils.plot_pca_result(lpca, X)
#pca effect on non-linearly related data
X, y = cutils.generate_nonlinear_synthetic_data_classification2(n_samples=1000)
X = pd.DataFrame(X, columns=['X1', 'X2'])
utils.plot_data_2d(X)
print(X.corr())
lpca = decomposition.PCA(2)
lpca.fit(X)
print(lpca.explained_variance_)
print(lpca.explained_variance_ratio_)
np.cumsum(lpca.explained_variance_ratio_)
putils.plot_pca_result(lpca, X)
tutils.plot_tsne_result(X, y, 2)
| UTF-8 | Python | false | false | 2,129 | py | 14 | PCA.py | 10 | 0.752466 | 0.726163 | 0 | 62 | 33.33871 | 147 |
ttcool/league_matches | 10,840,497,457,958 | b0e147ce17cbaf12fd4cb3c0812012aae719c0d1 | 2ca3c1374a188782b05f4283914ef309d429ca2a | /import_mongo.py | c9a751bf2c24223e94831e5864c89c1e88363f9d | []
| no_license | https://github.com/ttcool/league_matches | be520c7129d2ff6f15effa6e4be0c872f3ebcaaf | 3b5660e1fc99c96eb16765f99ef9370170a01838 | refs/heads/master | 2021-01-20T04:42:25.092166 | 2020-10-19T03:48:48 | 2020-10-19T03:48:48 | 41,991,677 | 9 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import pymongo
con = pymongo.Connection('localhost',port=27017)
matches = con.db.matches
match = json.load(open('match.json'))
for i in match:
matches.save(i)
| UTF-8 | Python | false | false | 177 | py | 14 | import_mongo.py | 6 | 0.728814 | 0.700565 | 0 | 7 | 24 | 48 |
mlanden/Useful-Code-Snippits | 17,257,178,607,667 | 7d71ab173ef751924dc2748972edb75f4b13ce65 | 4fa081ed8f01386885591745ef8aa0bd47f60cf7 | /classification/utils.py | 77dc10a2e1913160034ee7229fc8ac1aa14cec36 | []
| no_license | https://github.com/mlanden/Useful-Code-Snippits | 3a8a401356609e7caff1bfd546acf53bec4fce7d | d3fe3be7baf875003defc0001a0a38ddd846b42b | refs/heads/master | 2020-03-20T22:07:29.378780 | 2018-12-29T20:07:37 | 2018-12-29T20:07:37 | 137,780,455 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
def vectorize_data(data, dimension = 10000):
vectors = np.zeros((len(data), dimension))
for i, sample in enumerate(data):
vectors[i, sample] = 1
return vectors
def graph_train_history(history):
import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc = history_dict['acc']
val_acc = history_dict['val_acc']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss_values, 'b', label = 'Validation loss')
plt.plot(epochs, acc, 'ro', label = 'Training accuracy')
plt.plot(epochs, val_acc, 'r', label = 'Validation accuracy')
plt.title('Training and Validation Loss and Accuracy')
plt.xlabel('Epochs')
plt.legend()
plt.show() | UTF-8 | Python | false | false | 916 | py | 12 | utils.py | 10 | 0.66048 | 0.651747 | 0 | 28 | 31.75 | 69 |
TheOneTest/Manage | 11,184,094,874,092 | e9841af435765491f5e0a561c50c65a51672ec77 | 2925b7c26be1c4f407dc7ca3fa5f185faae5b91a | /Interface/interface_project/main.py | ffbc714911c108000b3d2dda50a12dc22e8bb7da | []
| no_license | https://github.com/TheOneTest/Manage | 06429848973c99ed9d77b015e7d5e53477aabd78 | 7f73aebb4401d29408928a75bbff379d78e55ae0 | refs/heads/master | 2021-01-22T12:17:12.724366 | 2017-10-23T07:55:41 | 2017-10-23T07:55:41 | 102,346,083 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'shouke'
import datetime
import json
import time
import configparser
import os.path
from globalpkg.log import logger
from globalpkg.globalpy import testdb
from globalpkg.globalpy import mytestlink
from globalpkg.globalpy import testcase_report_tb
from globalpkg.globalpy import case_step_report_tb
from globalpkg.globalpy import other_tools
from globalpkg.globalpy import executed_history_id
from config.runmodeconfig import RunModeConfig
from testsuite import TestSuite
from testplan import TestPlan
from testproject import TestProject
from httpprotocol import MyHttp
from htmlreporter import HtmlReport
from sendmail import MyMail
if __name__ == '__main__':
# 记录测试开始时间
start_time = datetime.datetime.now()
create_testcase_reporter_tb_sql = 'CREATE TABLE IF NOT EXISTS ' + testcase_report_tb + '\
(id INT NOT NULL PRIMARY KEY AUTO_INCREMENT,\
executed_history_id varchar(50) NOT NULL,\
testcase_id int NOT NULL,\
testcase_name varchar(40) NOT NULL,\
testsuit varchar(40),\
testplan varchar(40),\
project varchar(40),\
runresult varchar(20),\
runtime datetime)'
create_case_step_reporter_tb_sql = 'CREATE TABLE IF NOT EXISTS ' + case_step_report_tb + '\
(id INT NOT NULL PRIMARY KEY AUTO_INCREMENT,\
executed_history_id varchar(50) NOT NULL,\
testcase_id int NOT NULL,\
testcase_name varchar(40) NOT NULL,\
testplan varchar(40) NOT NULL,\
project varchar(40) NOT NULL,\
step_id int,\
step_num int NOT NULL,\
step_action varchar(1000), \
expected_results varchar(1000),\
runresult varchar(10),\
reason varchar(2000),\
protocol_method varchar(40) ,\
protocol varchar(40),\
host varchar(40),\
port int,\
runtime datetime)'
logger.info('正在创建测试用例报告报表')
testdb.execute_create(create_testcase_reporter_tb_sql)
logger.info('正在创建测试步骤报告报表')
testdb.execute_create(create_case_step_reporter_tb_sql)
logger.info('正在读取运行模式配置')
run_mode_conf = RunModeConfig('./config/runmodeconfig.conf')
run_mode = int(run_mode_conf.get_run_mode())
if 1 == run_mode:
logger.info('按项目运行测试')
project_mode = run_mode_conf.get_project_mode()
testplans_name_list = []
if 1 == project_mode:
logger.info('运行所有项目')
projects = mytestlink.getProjects()
for project in projects:
# 构造项目对象
active_status = project['active']
project_name = project['name']
project_notes = other_tools.conver_date_from_testlink(project['notes'])
project_id = int(project['id'])
project_obj = TestProject(active_status, project_name, project_notes, project_id)
logger.info('正在读取测项目[id:%s, project:%s]的协议,host,端口配置...' % (project_id, project_name))
testproject_conf = project_notes
logger.info('成功读取配置信息:%s' % testproject_conf)
if '' == testproject_conf:
logger.error('测试项目[id:%s, project:%s]未配置协议,host,端口信息,暂时无法执行' % (project_id, project_name))
continue
try:
notes = json.loads(testproject_conf)
protocol = notes['protocol']
host = notes['host']
port = notes['port']
except Exception as e:
logger.error('测试项目[id:%s, project:%s]协议,host,端口信息配置错误,暂时无法执行:%s' % (project_id, project_name, e))
continue
# 构造http对象
myhttp = MyHttp(protocol, host, port)
logger.info('正在执行测试项目[id:%s, project:%s]' % (project_id, project_name))
project_obj.run_testproject(myhttp)
elif 2 == project_mode:
logger.info('运行指定项目')
testprojects_name_list = eval(run_mode_conf.get_projects())
for testproject_name in testprojects_name_list:
try:
testproject = mytestlink.getTestProjectByName(testproject_name)
except Exception as e:
logger.error('测试项目[project:%s]获取失败,暂时无法执行:%s' % (testproject_name, e))
continue
# 构造项目对象
active_status = testproject['active']
project_name = testproject['name']
project_notes = other_tools.conver_date_from_testlink(testproject['notes'])
project_id = int(testproject['id'])
project_obj = TestProject(active_status, project_name, project_notes, project_id)
logger.info('正在读取测项目[id:%s, project:%s]的协议,host,端口配置...' % (project_id, project_name))
testproject_conf = project_notes
logger.info('成功读取配置信息:%s' % testproject_conf)
if '' == testproject_conf:
logger.error('测试项目[id:%s, project:%s]未配置协议,host,端口信息,暂时无法执行' % (project_id, project_name))
continue
try:
notes = json.loads(testproject_conf)
protocol = notes['protocol']
host = notes['host']
port = notes['port']
except Exception as e:
logger.error('测试项目[id:%s, project:%s]协议,host,端口信息配置错误,暂时无法执行:%s' % (project_id, project_name, e))
continue
# 构造http对象
myhttp = MyHttp(protocol, host, port)
logger.info('正在执行测试项目[id:%s, project:%s]' % (project_id, project_name))
project_obj.run_testproject(myhttp)
elif 2 == run_mode:
logger.info('按计划运行测试')
project_of_plans = run_mode_conf.get_project_of_testplans()
testplans_name_list = run_mode_conf.get_testplans()
testplans_name_list = eval(testplans_name_list)
logger.info('已获取配置的项目名称[name:%s]及对应的测试计划名称列表[list=%s]' % (project_of_plans, testplans_name_list))
for testplan in testplans_name_list:
# 构造测试计划对象
try:
testplan_info = mytestlink.getTestPlanByName(project_of_plans, testplan)
except Exception as e:
logger.error('测试计划[project:%s,testplan:%s]获取失败,暂时无法执行:%s' % (project_of_plans, testplan, e))
continue
testplan_name = testplan_info[0]['name']
testplan_id = int(testplan_info[0]['id'])
active_status = int(testplan_info[0]['active'])
notes = other_tools.conver_date_from_testlink(testplan_info[0]['notes'])
testplan_obj = TestPlan(testplan_name, testplan_id, active_status, notes, project_of_plans)
logger.info('正在读取测试计划[project:%s,testplan:%s]的协议,host,端口配置...' % (project_of_plans, testplan))
testplan_conf = testplan_obj.notes # 获取套件基本信息
logger.info('成功读取配置信息:%s' % testplan_conf)
if '' == testplan_conf:
logger.error('测试计划[project:%s,testplan:%s]未配置协议,host,端口信息,暂时无法执行' % (project_of_plans, testplan))
continue
try:
notes = json.loads(testplan_conf)
protocol = notes['protocol']
host = notes['host']
port = notes['port']
except Exception as e:
logger.error('测试计划[project:%s,testplan:%s]协议,host,端口信息配置错误,暂时无法执行:%s' % (project_of_plans, testplan, e))
continue
# 构造http对象
myhttp = MyHttp(protocol, host, port)
logger.info('正在执行测试计划[project:%s,testplan:%s]' % (project_of_plans, testplan))
testplan_obj.run_testplan(myhttp)
elif 3 == run_mode:
logger.info('按套件运行测试')
testsuits_id_list = run_mode_conf.get_testsuits()
logger.info('已获取配置的套件id列表:%s' % testsuits_id_list)
testsuits_id_list = eval(testsuits_id_list)
for testsuite_id in testsuits_id_list:
# 构造测试套件对象
try:
testsuite_info = mytestlink.getTestSuiteByID(testsuite_id)
except Exception as e:
logger.error('测试套件[id=%s]不存在,暂时无法执行' % testsuite_id)
continue
testsuite_name = testsuite_info['name']
testsuite_details = other_tools.conver_date_from_testlink(testsuite_info['details'])
project = mytestlink.getFullPath(testsuite_id)
project = project[str(testsuite_id)][0]
testsuite_obj = TestSuite(testsuite_id, testsuite_name, testsuite_details, project)
logger.info('正在读取套件[id=%s,name=%s]的协议,host,端口配置...' % (testsuite_id, testsuite_name))
testsuite_conf = testsuite_obj.get_testsuite_conf() # 获取套件基本信息
if '' == testsuite_conf:
logger.error('测试套件[id=%s ,name=%s]未配置协议,host,端口信息,暂时无法执行' % (testsuite_id, testsuite_name))
continue
try:
details = json.loads(testsuite_conf)
protocol = details['protocol']
host = details['host']
port = details['port']
except Exception as e:
logger.error('测试套件[id=%s ,name=%s]协议,host,端口信息配置错误,未执行:%s'% (testsuite_id, testsuite_name, e))
continue
# 构造http对象
myhttp = MyHttp(protocol, host, port)
logger.info('正在执行测试套件[id=%s ,name=%s]' % (testsuite_id, testsuite_name))
testsuite_obj.run_testsuite(myhttp)
elif 4 == run_mode:
pass
#待定
logger.info('接口测试已执行完成,正在关闭数据库连接')
testdb.close()
# 记录测试结束时间
end_time = datetime.datetime.now()
# 构造测试报告
html_report = HtmlReport('test report', 'interface_autotest_report')
html_report.set_time_took(str(end_time - start_time)) # 计算测试消耗时间
# 读取测试报告路径及文件名
config = configparser.ConfigParser()
config.read('./config/report.conf', encoding='utf-8')
dir_of_report = config['REPORT']['dir_of_report']
report_name = config['REPORT']['report_name']
# 设置报告生成路
html_report.mkdir_of_report(dir_of_report)
# 生成测试报告
html_report.generate_html(report_name)
logger.info('生成测试报告成功')
#发送邮件 --- 先注释起来 2017 -10 -11
# mymail = MyMail('./config/mail.conf')
# mymail.connect()
# mymail.login()
# mail_content = 'Hi,附件为接口测试报告,烦请查阅'
# mail_tiltle = '【测试报告】接口测试报告'+ str(executed_history_id)
# logger.info(html_report.get_filename())
# attachments = set([html_report.get_filename()])
#
# logger.info('正在发送测试报告邮件...')
# mymail.send_mail(mail_tiltle, mail_content, attachments)
# mymail.quit()
#
# logger.info('发送邮件成功')
| UTF-8 | Python | false | false | 12,989 | py | 64 | main.py | 47 | 0.538369 | 0.533127 | 0 | 267 | 42.558052 | 120 |
Serjeel-Ranjan-911/CSES-Solution | 3,977,139,736,577 | 85099a36f03f52693b1735b3714cfd68d93e0068 | 09269ba53ccd65b562712dbaad48b27dd173a835 | /Introductory Problem/Two Knights/sol.py | fa808d2bc6b01433242dea58c7266b64624d5cb3 | []
| no_license | https://github.com/Serjeel-Ranjan-911/CSES-Solution | 85f6b88e55a8586f672c6d0db810a13422404684 | 3117b37766c035b6dababd4d6b8055321d8e1b37 | refs/heads/main | 2023-09-04T23:03:42.588457 | 2021-11-25T10:07:25 | 2021-11-25T10:07:25 | 404,083,112 | 7 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
for k in range(1,n+1):
print(k*k*(k*k-1)//2 - 4*(k-1)*(k-2)) | UTF-8 | Python | false | false | 82 | py | 220 | sol.py | 121 | 0.47561 | 0.390244 | 0 | 4 | 19.75 | 41 |
imsardine/learning | 15,977,278,363,609 | 6e3ed7dac21cab595c08eb4005b6ad681e5238bd | b43c6c03eea348d68d6582c3594760bbe0ecaa08 | /gitlab/tests/test_projects.py | 5974390eecf83e66fea1ccf592b043330888fc5f | [
"MIT"
]
| permissive | https://github.com/imsardine/learning | 1b41a13a4c71c8d9cdd8bd4ba264a3407f8e05f5 | 925841ddd93d60c740a62e12d9f57ef15b6e0a20 | refs/heads/master | 2022-12-22T18:23:24.764273 | 2020-02-21T01:35:40 | 2020-02-21T01:35:40 | 24,145,674 | 0 | 0 | MIT | false | 2022-12-14T20:43:28 | 2014-09-17T13:24:37 | 2020-02-21T01:35:54 | 2022-12-14T20:43:25 | 2,751 | 0 | 0 | 15 | Python | false | false | def test_projects(gitlab):
projects = gitlab.get('/projects')
anyproj = projects[0]
assert anyproj['ssh_url_to_repo'].startswith('git@')
assert anyproj['http_url_to_repo'].startswith('https://') | UTF-8 | Python | false | false | 211 | py | 313 | test_projects.py | 218 | 0.668246 | 0.663507 | 0 | 6 | 34.333333 | 61 |
xiaoting610/lab-program | 2,173,253,477,449 | 0df4e4c1f83ab7696b2f23484aca628e361e1948 | 74204883e72167eeedefc6b407c61f25b49ba07c | /imagej.script/find.trakem2.neurons_.py | c3a78803dc09a0cbd1ffaf3c3010293d0fcae7bd | []
| no_license | https://github.com/xiaoting610/lab-program | d2bb6b81b043e2b39ed1cb9c44199d98e60f85cb | 2a1e9a31776b1dbe02fd33690a0223c194552536 | refs/heads/master | 2021-01-07T18:48:42.608245 | 2015-09-08T15:35:21 | 2015-09-08T15:35:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # For output a list of neurons in currently active TrakEM2 project
# with the same sequence as seen in project tab
# Change output filename at the bottom
#
# author Bangyu Zhou, 2013 Dec 10
from ini.trakem2.display import AreaList, Display, AreaTree, Connector
import csv
from jarray import array
def getNodeXYZ(nd, cal, at):
# get node coordinates, from tut on web
fp = array([nd.getX(), nd.getY()], 'f')
at.transform(fp, 0, fp, 0, 1)
x = fp[0] * cal.pixelWidth
y = fp[1] * cal.pixelHeight
z = nd.getLayer().getZ() * cal.pixelWidth # a TrakEM2 oddity
return [x, y, z]
def getTreeDistanceTable(tree):
# calculate and store each node's distance to its parent node
dTable = {}
layerset = tree.getLayerSet()
calibration = layerset.getCalibration()
affine = tree.getAffineTransform()
root = tree.getRoot()
if root is None:
return
for nd in root.getSubtreeNodes():
x, y, z = getNodeXYZ(nd, calibration, affine)
if nd.getParent() is None:
# root
xp, yp, zp = [x, y, z]
else:
xp, yp, zp = getNodeXYZ(nd.getParent(), calibration, affine)
d = ((x-xp)**2 + (y-yp)**2 + (z-zp)**2) ** (0.5)
dTable[nd] = d
return dTable
header = ['neurite', 'neuron', 'areatreeIds', 'nNodes', 'length']
foundNeurons = [header]
# get the first open project, project root, and 'drosophila_brain'
project = Project.getProjects().get(0)
projectRoot = project.getRootProjectThing()
brain = projectRoot.findChildrenOfType("drosophila_brain")[0]
neurons = brain.findChildrenOfType("neuron")
for neuron in neurons:
neurites = neuron.findChildrenOfType("neurite")
for neurite in neurites:
areatrees = neurite.findChildrenOfType("areatree")
nNodes = 0
areatreeIds = []
length = 0
for areatree in areatrees:
areatree = areatree.getObject()
areatreeIds.append(areatree.getId())
dt = getTreeDistanceTable(areatree)
length += sum([d for nd, d in dt.iteritems()])
root = areatree.getRoot()
if root is None:
continue
nNodes += len(root.getSubtreeNodes())
foundNeurons.append([neurite.getTitle(), neuron.getTitle(), areatreeIds, nNodes, length])
outfile = open('neurons.csv','wb')
writer = csv.writer(outfile)
writer.writerows(foundNeurons)
outfile.close()
| UTF-8 | Python | false | false | 2,426 | py | 92 | find.trakem2.neurons_.py | 46 | 0.637675 | 0.628195 | 0 | 70 | 33.657143 | 97 |
SirGnip/arcade_screensaver_framework | 3,195,455,694,802 | 77b6453265e710a91e77691e0ff45951a9e8ca7f | 2bbf88d60c6100238a004579fcbf514ed61eaf0e | /src/arcade_screensaver_framework/screensaver_framework.py | 157e4f3e8cf4c1e26465e83a0c003c50df92e39c | []
| no_license | https://github.com/SirGnip/arcade_screensaver_framework | 842a8096866170b0413c7154352a8a62bdff0041 | e48bab7908b42221e249f3f5c80f8b12a333d27d | refs/heads/master | 2023-01-02T06:14:00.015972 | 2020-10-26T21:58:20 | 2020-10-26T21:58:20 | 296,334,077 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import ctypes
from pathlib import Path
import pyglet
_all_windows = []
# Event handlers that can be applied to instances of Arcade.Window and Pyglet.window.Window
def _on_keyboard_press(self, symbol, modifiers):
_close_all_windows()
def _on_mouse_press(self, x, y, button, modifiers):
_close_all_windows()
def _on_mouse_motion(self, x, y, dx, dy):
# A Window almost always gets an initial on_mouse_motion event when window opens.
# Ignore the first motion event. I think a motion event is triggered when the
# window is opening and the mouse cursor is already inside the window's boundary.
if self.first_mouse_motion_event:
self.first_mouse_motion_event = False
return
_close_all_windows()
def _on_close(self):
_close_all_windows()
def _close_all_windows():
for win in _all_windows:
win.close()
def _get_preferred_screen(screens):
"""Choose the screen with the most pixels to show the screensaver on"""
ordered_screens = [(s.width*s.height, idx, s) for idx, s in enumerate(screens)]
ordered_screens.sort() # sort by # of pixels, then screen index, then object
return ordered_screens[-1][2] # return screen object from end of sorted list
def _make_windows(screensaver_window_class, is_fullscreen, win_kwargs):
# Monkeypatch Arcade and Pyglet window classes (for easier code-reuse)
screensaver_window_class.on_key_press = _on_keyboard_press
screensaver_window_class.on_mouse_press = _on_mouse_press
screensaver_window_class.on_mouse_motion = _on_mouse_motion
screensaver_window_class.on_close = _on_close
pyglet.window.Window.on_key_press = _on_keyboard_press
pyglet.window.Window.on_mouse_press = _on_mouse_press
pyglet.window.Window.on_mouse_motion = _on_mouse_motion
pyglet.window.Window.on_close = _on_close
display = pyglet.canvas.get_display()
screens = display.get_screens()
preferred_screen = _get_preferred_screen(screens)
main_win = None
for screen in screens:
if screen == preferred_screen:
# Arcade managed screen with screen saver on it
print("Preferred screen:", screen)
win = screensaver_window_class(fullscreen=is_fullscreen, screen=screen, **win_kwargs)
main_win = win
else:
# Blank Pyglet windows will be used for all non-primary screens
print("Secondary screen:", screen)
win = pyglet.window.Window(fullscreen=is_fullscreen, screen=screen)
win.set_mouse_visible(False)
win.first_mouse_motion_event = True
_all_windows.append(win)
return main_win
def create_screensaver_window(screensaver_window_class, **win_kwargs):
forbidden_kwargs = {"fullscreen", "screen"}
invalid_kwargs = forbidden_kwargs.intersection(set(win_kwargs))
if any(invalid_kwargs):
raise Exception(f"Detected forbidden keyword argument(s) passed to create_screensaver_window() in 'win_kwargs': {invalid_kwargs}. These arguments are controlled by arcade_screensaver_framework.")
# Microsoft Windows screen saver command line arguments: https://docs.microsoft.com/en-us/troubleshoot/windows/win32/screen-saver-command-line
if len(sys.argv) >= 2 and sys.argv[1].startswith("/p"):
# generate mini-screen preview for screen saver
pass # skip preview
elif len(sys.argv) >= 2 and sys.argv[1].startswith("/c"):
# settings dialog box
name = Path(sys.argv[0]).stem
MB_ICONINFORMATION = 0x00000040
ctypes.windll.user32.MessageBoxW(0, "This screen saver has no options that you can set.", f"{name} Screen Saver", MB_ICONINFORMATION)
elif len(sys.argv) >= 2 and sys.argv[1] == "/s":
# run screen saver in fullscreen mode
main_win = _make_windows(screensaver_window_class, True, win_kwargs)
return main_win
else:
# run screen saver in windowed mode (no arguments)
main_win = _make_windows(screensaver_window_class, False, win_kwargs)
return main_win
| UTF-8 | Python | false | false | 4,050 | py | 9 | screensaver_framework.py | 6 | 0.686914 | 0.681235 | 0 | 98 | 40.326531 | 203 |
rueckstiess/mtools | 6,614,249,649,121 | d9bbcc602f15c01decaa8ec32cd070f3246ab8ab | 3cf8454257ce93926a39bd379a2f0f7a61a860e8 | /mtools/mlogfilter/filters/mask_filter.py | bd6935690462df9fee110a4c63b75e3f14b3ad31 | [
"Apache-2.0"
]
| permissive | https://github.com/rueckstiess/mtools | 12e6e73a06934bd49dc53623162fd70f6f676905 | 370e598527949077859dbe4a9fcfe0b9c9f2d1d2 | refs/heads/develop | 2023-08-25T02:16:05.206326 | 2023-05-02T04:49:18 | 2023-05-02T05:57:19 | 5,806,020 | 1,654 | 396 | Apache-2.0 | false | 2023-09-04T03:59:53 | 2012-09-14T07:28:16 | 2023-09-02T16:17:10 | 2023-08-08T11:34:54 | 21,521 | 1,824 | 379 | 74 | Python | false | false | from datetime import timedelta
from .datetime_filter import DateTimeFilter
from mtools.util.cmdlinetool import InputSourceAction
class MaskFilter(DateTimeFilter):
"""
MaskFilter class.
This filter takes an argument `--mask <LOGFILE>` and another optional
argument `--mask-size <SECS>`. It will read <LOGFILE> and for each of the
lines extract the datetimes (let's call these "events"). It will add some
padding for each of these events, <SECS>/2 seconds on either side.
MaskFilter will then accept every line from the original log file
(different to <LOGFILE>), that lies within one of these masked intervals.
This feature is very useful to find all correlating lines to certain
events.
For example, find all assertions in a log file, then find all log lines
surrounding these assertions:
grep "assert" mongod.log > assertions.log
mlogfilter mongod.log --mask assertions.log --mask-size 60
"""
filterArgs = [
('--mask', {'action': 'store', 'type': InputSourceAction('rb'),
'help': ('source (log file or system.profile db) '
'to create the filter mask.')}),
('--mask-size', {'action': 'store', 'type': int, 'default': 60,
'help': ('mask size in seconds around each filter '
'point (default: 60 secs, 30 on each side '
'of the event)')}),
('--mask-center', {'action': 'store',
'choices': ['start', 'end', 'both'],
'default': 'end',
'help': ('mask center point for events with '
'duration (default: end). If both is '
'chosen, all events from start to end '
'are returned.')})
]
def __init__(self, mlogfilter):
"""
Constructor.
Init superclass and mark this filter active if `mask`
argument is present.
"""
DateTimeFilter.__init__(self, mlogfilter)
self.active = ('mask' in self.mlogfilter.args and
self.mlogfilter.args['mask'] is not None)
if self.active:
self.mask_end_reached = False
self.mask_source = self.mlogfilter.args['mask']
self.mask_list = []
def setup(self):
"""
Create mask list.
Consists of all tuples between which this filter accepts lines.
"""
# get start and end of the mask and set a start_limit
if not self.mask_source.start:
raise SystemExit("Can't parse format of %s. Is this a log file or "
"system.profile collection?"
% self.mlogfilter.args['mask'])
self.mask_half_td = timedelta(seconds=self.mlogfilter.args
['mask_size'] / 2)
# load filter mask file
logevent_list = list(self.mask_source)
# define start and end of total mask
self.mask_start = self.mask_source.start - self.mask_half_td
self.mask_end = self.mask_source.end + self.mask_half_td
# consider --mask-center
if self.mlogfilter.args['mask_center'] in ['start', 'both']:
if logevent_list[0].duration:
self.mask_start -= timedelta(milliseconds=logevent_list[0]
.duration)
if self.mlogfilter.args['mask_center'] == 'start':
if logevent_list[-1].duration:
self.mask_end -= timedelta(milliseconds=logevent_list[-1]
.duration)
self.start_limit = self.mask_start
# different center points
if 'mask_center' in self.mlogfilter.args:
if self.mlogfilter.args['mask_center'] in ['start', 'both']:
starts = ([(le.datetime - timedelta(milliseconds=le.duration))
if le.duration is not None else le.datetime
for le in logevent_list if le.datetime])
if self.mlogfilter.args['mask_center'] in ['end', 'both']:
ends = [le.datetime for le in logevent_list if le.datetime]
if self.mlogfilter.args['mask_center'] == 'start':
event_list = sorted(starts)
elif self.mlogfilter.args['mask_center'] == 'end':
event_list = sorted(ends)
elif self.mlogfilter.args['mask_center'] == 'both':
event_list = sorted(zip(starts, ends))
mask_list = []
if len(event_list) == 0:
return
start_point = end_point = None
for e in event_list:
if start_point is None:
start_point, end_point = self._pad_event(e)
continue
next_start = (e[0] if type(e) == tuple else e) - self.mask_half_td
if next_start <= end_point:
end_point = ((e[1] if type(e) == tuple else e) +
self.mask_half_td)
else:
mask_list.append((start_point, end_point))
start_point, end_point = self._pad_event(e)
if start_point:
mask_list.append((start_point, end_point))
self.mask_list = mask_list
def _pad_event(self, event):
if type(event) == tuple:
start_point = event[0] - self.mask_half_td
end_point = event[1] + self.mask_half_td
else:
start_point = event - self.mask_half_td
end_point = event + self.mask_half_td
return start_point, end_point
def accept(self, logevent):
"""
Process line.
Overwrite BaseFilter.accept() and return True if the provided
logevent should be accepted (causing output), or False if not.
"""
dt = logevent.datetime
if not dt:
return False
mask = next((mask for mask in self.mask_list
if mask[0] < dt and mask[1] > dt), None)
return True if mask else False
def skipRemaining(self):
"""
Skip remaining lines.
Overwrite BaseFilter.skipRemaining() and return True if all lines
from here to the end of the file should be rejected (no output).
"""
return self.mask_end_reached
| UTF-8 | Python | false | false | 6,464 | py | 94 | mask_filter.py | 67 | 0.539913 | 0.536665 | 0 | 170 | 37.023529 | 79 |
rafanthx13/ultimate_ds | 11,510,512,390,761 | 8c3e70cca66aa354932e09e7bc34a12df6e558aa | af2c959f928f9a5167cc0a4c3a9529403f7d4768 | /Courses/Old/udemy-scrapy-web-scraping/src/my_code/coursera/coursera/spiders/edx_login.py | ebc365f3a8b8e52e2848464e1a118d17b4cc562d | []
| no_license | https://github.com/rafanthx13/ultimate_ds | 14acc99de6d8cecec0286896a290d4dc2b9efa3b | 6ee7ae47c97b2b97139a916736d1c1fc9703ece6 | refs/heads/master | 2020-07-03T00:11:48.036639 | 2020-07-02T02:51:48 | 2020-07-02T02:51:48 | 201,718,909 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scrapy
from courses import settings
class EdxLoginSpider(scrapy.Spider):
name = 'edx_login'
allowed_domains = ['courses.edx.org', 'edx.org']
start_urls = [
'https://courses.edx.org/login?next=/dashboard'
]
def parse(self, response):
# dadosque serao passado pelo formulario
formdata = {
# seting. ==> sao variaveis criadas dentro de settit
'email': settings.EDX_EMAIL,
'password': settings.EDX_PASSWORD,
}
headers = {}
# quando requisitar a pagina de dash, vai voltar um token criado pelo
# servidor, ele vai ficar no cooie, agora vamos pegar isso
cookies = response.headers.getlist('Set-Cookie')
#depois vamos procurar no que retonra (retonara uma lsita, e ,a gnt nao sabe em qual delas esta)
# vamos achar e dividir em 2 partes e pegar a 2 parte
# obs: cookies sao dividios por COOKIE: key=value;key2=value2;
csrf_token = ''
for cookie in cookies:
cookie = cookie.decode('utf-8')
if 'csrf' in cookie:
csrf_token = cookie.split(';')[0].split('=')[1]
break
self.log(csrf_token)
# Coloca esse token e ja o prepara para fazer a quisicao POST
headers['X-CSRFToken'] = csrf_token
# Por via das duvidas, vamos pasar tambem outro dado CTE para nao ter problema
headers['X-Requested-With'] = 'XMLHttpRequest'
yield scrapy.FormRequest(
url='https://courses.edx.org/user_api/v1/account/login_session/',
method='POST', formdata=formdata, callback=self.parse_login,
headers=headers
)
def parse_login(self, response):
yield scrapy.Request(
url='https://courses.edx.org/dashboard',
callback=self.parse_dashboard
)
def parse_dashboard(self, response):
self.log(response.xpath('//title/text()').extract_first()) | UTF-8 | Python | false | false | 2,033 | py | 110 | edx_login.py | 15 | 0.589277 | 0.585342 | 0 | 52 | 37.134615 | 104 |
czofficial/Price-Prediction-of-Mercedes-Benz-Used-Cars | 13,477,607,389,229 | 9b5dc556d946ade4a14db9ac32321069b334a97f | 63b8836f9fbce5ea98da6844d3c46186e34d24ac | /train.py | b894c1e4975406a134d87db8e6f7b7b343297b58 | []
| no_license | https://github.com/czofficial/Price-Prediction-of-Mercedes-Benz-Used-Cars | 0989a0266d19b81a67268e4c1dececcb677127ec | f254062d593878594b38a321ff84499b2e605e3a | refs/heads/master | 2023-04-08T19:17:17.384080 | 2021-04-20T14:40:53 | 2021-04-20T14:40:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from azureml.data.dataset_factory import TabularDatasetFactory
from azureml.core.run import Run
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_absolute_error
import argparse
import os
import numpy as np
import joblib
import pandas as pd
# Imports dataset
mbdataset = TabularDatasetFactory.from_delimited_files("https://raw.githubusercontent.com/czofficial/nd00333-capstone/4a6a4924bdd4c6a188aeb24e0c282bae11c8933b/mercedes.csv")
df = mbdataset.to_pandas_dataframe()
# Cleans dataset
def clean_data(df):
x_df = df
x_df = pd.get_dummies(df, columns=['model', 'transmission', 'fuelType'])
y_df = x_df.pop("price")
return x_df,y_df
x, y = clean_data(df)
# Splits dataset into train and test
x_train,x_test,y_train,y_test = train_test_split(x,y)
run = Run.get_context()
def main():
# Adds arguments
parser = argparse.ArgumentParser()
parser.add_argument('--max_depth',
type=int,
default=1,
help="The maximum depth of the tree.")
parser.add_argument('--min_samples_split',
type=int,
default=2,
help="The minimum number of samples required to split an internal node.")
parser.add_argument('--min_samples_leaf',
type=int,
default=1,
help="The minimum number of samples required to be at a leaf node.")
args = parser.parse_args()
run.log("max_depth:", np.int(args.max_depth))
run.log("min_samples_split:", np.int(args.min_samples_split))
run.log("min_samples_leaf:", np.int(args.min_samples_leaf))
# Trains random forest
model = RandomForestRegressor(
max_depth=args.max_depth,
min_samples_split=args.min_samples_split,
min_samples_leaf=args.min_samples_leaf).fit(x_train, y_train)
# Calculates MAE
y_pred = model.predict(x_test)
mae = mean_absolute_error(y_test, y_pred)
run.log('mae', np.int(mae))
# Saves the model
os.makedirs('outputs', exist_ok=True)
joblib.dump(value=model, filename='outputs/hd-model.pkl')
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 2,332 | py | 4 | train.py | 1 | 0.64494 | 0.631647 | 0 | 76 | 29.697368 | 173 |
sarcDV/tmp-PMC-scripts | 15,762,530,015,498 | aaf8ab241c32080e6ab8749ad73fd05da4d1a252 | 9216cf0a1122302ed3f61fe005f0d25c266e402d | /statistics-logs-PMC/plot_peaks_patterns_PMC.py | 1194bb361f2404f404d75838181a32908a35d397 | []
| no_license | https://github.com/sarcDV/tmp-PMC-scripts | 0725df1fb538507b184c83a6e9c71db72dcc3b51 | c57296b4abd8c442eee7351f1be7288ae658fc5c | refs/heads/main | 2023-03-29T11:49:45.410576 | 2021-04-11T17:36:50 | 2021-04-11T17:36:50 | 327,372,301 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import glob, os
import math
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
from scipy.signal import medfilt
from skimage.feature.peak import peak_local_max
import scipy.stats as stats
def data_loader(input_off, input_on):
time_x = np.squeeze(np.array(input_off['logdata_OFF_realtime']))
y_off_tr_x =np.squeeze(np.array(input_off['logdata_OFF_realtimex']))
y_off_tr_y =np.squeeze(np.array(input_off['logdata_OFF_realtimey']))
y_off_tr_z =np.squeeze(np.array(input_off['logdata_OFF_realtimez']))
y_on_tr_x =np.squeeze(np.array(input_on['logdata_ON_realtimex']))
y_on_tr_y =np.squeeze(np.array(input_on['logdata_ON_realtimey']))
y_on_tr_z =np.squeeze(np.array(input_on['logdata_ON_realtimez']))
y_off_rot_x =np.squeeze(np.array(input_off['logdata_OFF_realtimeRx']))
y_off_rot_y =np.squeeze(np.array(input_off['logdata_OFF_realtimeRy']))
y_off_rot_z =np.squeeze(np.array(input_off['logdata_OFF_realtimeRz']))
y_on_rot_x =np.squeeze(np.array(input_on['logdata_ON_realtimeRx']))
y_on_rot_y =np.squeeze(np.array(input_on['logdata_ON_realtimeRy']))
y_on_rot_z =np.squeeze(np.array(input_on['logdata_ON_realtimeRz']))
check_size_off_ = int(np.asarray(y_off_tr_x.shape))
check_size_on_ = int(np.asarray(y_on_tr_x.shape))
if (check_size_off_ > check_size_on_):
y_off_tr_x = y_off_tr_x[check_size_off_ -check_size_on_:]
y_off_tr_y = y_off_tr_y[check_size_off_ -check_size_on_:]
y_off_tr_z = y_off_tr_z[check_size_off_ -check_size_on_:]
y_off_rot_x = y_off_rot_x[check_size_off_ -check_size_on_:]
y_off_rot_y = y_off_rot_y[check_size_off_ -check_size_on_:]
y_off_rot_z = y_off_rot_z[check_size_off_ -check_size_on_:]
elif (check_size_off_ < check_size_on_):
y_on_tr_x = y_on_tr_x[check_size_on_ -check_size_off_:]
y_on_tr_y = y_on_tr_y[check_size_on_ -check_size_off_:]
y_on_tr_z = y_on_tr_z[check_size_on_ -check_size_off_:]
y_on_rot_x = y_on_rot_x[check_size_on_ -check_size_off_:]
y_on_rot_y = y_on_rot_y[check_size_on_ -check_size_off_:]
y_on_rot_z = y_on_rot_z[check_size_on_ -check_size_off_:]
return y_off_tr_x, y_off_tr_y, y_off_tr_z, y_off_rot_x, y_off_rot_y, y_off_rot_z, \
y_on_tr_x, y_on_tr_y, y_on_tr_z, y_on_rot_x, y_on_rot_y, y_on_rot_z, time_x
sessions = ['Off']#['Off', 'On']
contrasts = ['PD']#['PD', 'T1', 'T2', 'T2s05', 'T2s035', 'T2s025']
offtrx, offtry, offtrz, offrx, offry, offrz = [], [], [], [], [], []
ontrx, ontry, ontrz, onrx, onry, onrz = [], [], [], [], [], []
## PD analysis:
corr_size_ = 19502 ## PD and T2 :19502
# corr_size_ = 80807 ## T1
# corr_size_ = 11038 ## T2s05
# corr_size_ = 15358 ## T2s035
# corr_size_ = 21118 ## T2s025
#filetxt = str(contrasts[0])+"_"+str(sessions[0])+"_haralick.txt"
#myfile = open(filetxt, 'w')
for cont_ in contrasts:
for sess in sessions:
for file in sorted(os.listdir("./"+str(sess)+"/zz_tmp_mat/"+str(cont_)+"/")):
if file.endswith(".mat"):
# print(file)
pmc_off = os.path.join("./"+str(sess)+"/zz_tmp_mat/"+str(cont_)+"/", file)
pmc_on = os.path.join("./On/zz_tmp_mat/"+str(cont_)+"/", file.replace('OFF','ON'))
#print(pmc_off, pmc_on)
tmp_data_OFF = sio.loadmat(pmc_off)
tmp_data_ON = sio.loadmat(pmc_on)
ftrx, ftry, ftrz, frx, fry, frz, ntrx, ntry, ntrz, nrx, nry, nrz, time_x = data_loader(tmp_data_OFF, tmp_data_ON)
# print(time_x.shape)
#print(ftrx.shape, ntrx.shape)
# print(frx.shape, nrx.shape)
## -----------------------------------------
# - pmc off -
ftrx = ftrx[len(ftrx)-corr_size_:]
ftry = ftry[len(ftry)-corr_size_:]
ftrz = ftrz[len(ftrz)-corr_size_:]
frx = frx[len(frx)-corr_size_:]
fry = fry[len(fry)-corr_size_:]
frz = frz[len(frz)-corr_size_:]
# - pmc on -
ntrx = ntrx[len(ntrx)-corr_size_:]
ntry = ntry[len(ntry)-corr_size_:]
ntrz = ntrz[len(ntrz)-corr_size_:]
nrx = nrx[len(nrx)-corr_size_:]
nry = nry[len(nry)-corr_size_:]
nrz = nrz[len(nrz)-corr_size_:]
## -----------------------------------------
offtrx.append(ftrx), offtry.append(ftry), offtrz.append(ftrz),
offrx.append(frx), offry.append(fry), offrz.append(frz)
ontrx.append(ntrx), ontry.append(ntry), ontrz.append(ntrz),
onrx.append(nrx), onry.append(nry), onrz.append(nrz)
## ---------------------------------------------
time_x = time_x[len(time_x)-corr_size_:]
time_x = time_x - time_x[0]
offtrx, offtry, offtrz = np.asarray(offtrx), np.asarray(offtry), np.asarray(offtrz)
offrx, offry, offrz = np.asarray(offrx), np.asarray(offry), np.asarray(offrz)
ontrx, ontry, ontrz = np.asarray(ontrx), np.asarray(ontry), np.asarray(ontrz)
onrx, onry, onrz = np.asarray(onrx), np.asarray(onry), np.asarray(onrz)
# print(ontrx.shape)
# print(np.mean(offtrx, axis=0).shape)
## testing
# ------------------------------------------------- #
# ------------------------------------------------- #
# ------------------------------------------------- #
def peaks_finder(input_):#=[intrx, intry, intrz, inrx, inry, inrz]):
peaks_ = np.array([0])
for ii in input_:
# print(ii.shape)
test_ = ii-ii[0]
prom_ = .2*np.abs(stats.kurtosis(test_))
# 100*np.abs(np.var(test_))
#.2*np.abs(stats.kurtosis(test_)) #2.*np.abs(np.mean(test_))
test_ = medfilt(test_, kernel_size=25)
# peaks, _= find_peaks(test_, threshold=0.5*np.abs(np.mean(test_)), prominence=prom_)
peaks, _= find_peaks(test_, prominence=prom_)
npeaks, _ = find_peaks(1-test_, prominence=prom_)
concpeaks_ = np.concatenate((peaks, npeaks))
peaks_ = np.concatenate((peaks_, concpeaks_))
# print(peaks_.shape)
# peaks_ = np.unique
return np.unique(peaks_[1:])
# --- x ----
# offtrx_ = np.mean(offtrx, axis=0)
# offtrx_ = offtrx_ - offtrx_[0]
# offtrxstd_ = np.std(offtrx, axis=0)
# offtrxstd_ = offtrxstd_ - offtrxstd_[0]
"""
print(offtrx.shape)
test_ = offtrx[0,:]-offtrx[0,0]
print(np.mean(test_), np.std(test_))
prom_ = np.abs(np.mean(test_))
#peaks, properties = find_peaks(test_, prominence=prom_)#0.15)# height=-0.2)#, prominence=1, width=1.5)
#npeaks, nproperties = find_peaks(1-test_, prominence=prom_)#0.15)#, height=0.3)
peaks, _= find_peaks(test_, prominence=prom_)
npeaks, _ = find_peaks(1-test_, prominence=prom_)
concpeaks_ = np.concatenate((peaks, npeaks))
#print(peaks)#, properties)
#skpeaks =peak_local_max(test_, min_distance=500)
plt.figure()
plt.plot(test_)
plt.plot(concpeaks_, test_[concpeaks_],"o")
#plt.plot(peaks, test_[peaks],"x")
#plt.plot(npeaks, test_[npeaks],"o")
#plt.plot(skpeaks, test_[skpeaks], "d")
plt.show()
"""
# print(offtrx.shape)
ssub = 0
eee_ = peaks_finder(input_=[offtrx[ssub,:], offtry[ssub,:], offtrz[ssub,:],
offrx[ssub,:], offry[ssub,:], offrz[ssub,:]])
fff_ = peaks_finder(input_=[ontrx[ssub,:], ontry[ssub,:], ontrz[ssub,:],
onrx[ssub,:], onry[ssub,:], onrz[ssub,:]])
testx_ = offtrx[ssub,:]-offtrx[0,0]
testy_ = offtry[ssub,:]-offtry[0,0]
testz_ = offtrz[ssub,:]-offtrz[0,0]
testrx_ = offrx[ssub,:]-offrx[0,0]
testry_ = offry[ssub,:]-offry[0,0]
testrz_ = offrz[ssub,:]-offrz[0,0]
testfx_ = ontrx[ssub,:]-ontrx[0,0]
testfy_ = ontry[ssub,:]-ontry[0,0]
testfz_ = ontrz[ssub,:]-ontrz[0,0]
testfrx_ = onrx[ssub,:]-onrx[0,0]
testfry_ = onry[ssub,:]-onry[0,0]
testfrz_ = onrz[ssub,:]-onrz[0,0]
print(eee_.shape, fff_.shape)
#plt.figure()
#plt.subplot(211)
#plt.plot(test_)
#plt.subplot(212)
#plt.plot(medfilt(test_, kernel_size=25))
plt.figure()
plt.subplot(231)
plt.plot(testx_)
plt.plot(eee_, testx_[eee_],"o")
plt.plot(testfx_)
plt.plot(fff_, testfx_[fff_], "o")
plt.grid()
plt.subplot(232)
plt.plot(testy_)
plt.plot(eee_, testy_[eee_],"o")
plt.plot(testfy_)
plt.plot(fff_, testfy_[fff_], "o")
plt.grid()
plt.subplot(233)
plt.plot(testz_)
plt.plot(eee_, testz_[eee_],"o")
plt.plot(testfz_)
plt.plot(fff_, testfz_[fff_], "o")
plt.grid()
plt.subplot(234)
plt.plot(testrx_)
plt.plot(eee_, testrx_[eee_],"o")
plt.plot(testfrx_)
plt.plot(fff_, testfrx_[fff_], "o")
plt.grid()
plt.subplot(235)
plt.plot(testry_)
plt.plot(eee_, testry_[eee_],"o")
plt.plot(testfry_)
plt.plot(fff_, testfry_[fff_], "o")
plt.grid()
plt.subplot(236)
plt.plot(testrz_)
plt.plot(eee_, testrz_[eee_],"o")
plt.plot(testfrz_)
plt.plot(fff_, testfrz_[fff_], "o")
plt.grid()
plt.show()
| UTF-8 | Python | false | false | 8,219 | py | 28 | plot_peaks_patterns_PMC.py | 28 | 0.615038 | 0.596788 | 0 | 240 | 33.241667 | 117 |
QuackenbushLab/grand | 9,457,517,996,480 | 41432bae4b6223d3d5389038f5a25021907e399d | be0c6e2071945edcb47ee4f3fadc1f4629a2c6aa | /grandapp/migrations/0173_sendto.py | 02391a0438d68803b7214420c58170325b9e51b0 | []
| no_license | https://github.com/QuackenbushLab/grand | 9719a395e6a30951c3ffdef1eccdb5e422da737c | f23031d1f240550d25c2842b4af0aae08c653bae | refs/heads/master | 2023-08-10T09:58:58.381264 | 2023-07-25T18:23:26 | 2023-07-25T18:23:26 | 201,113,575 | 5 | 2 | null | false | 2022-06-24T19:11:29 | 2019-08-07T19:18:58 | 2022-04-19T17:09:53 | 2022-06-24T19:11:20 | 32,819 | 3 | 1 | 2 | JavaScript | false | false | # Generated by Django 3.0.2 on 2021-04-20 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grandapp', '0172_auto_20210420_0159'),
]
operations = [
migrations.CreateModel(
name='Sendto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('preload', models.IntegerField(default=0)),
('idd', models.IntegerField(default=0)),
],
),
]
| UTF-8 | Python | false | false | 573 | py | 278 | 0173_sendto.py | 233 | 0.5637 | 0.506108 | 0 | 21 | 26.285714 | 114 |
RKrahl/photo-tools | 3,685,081,978,000 | a6c93d720c358a85794ca1b7d9adca76c0e66435 | 6f84a3a2d0aac9b83d4edf2d1f4b901cf83fdeab | /tests/test_05_createupdate.py | 11530d8483671e3bae07d91b33fd15741db415bf | [
"Apache-2.0"
]
| permissive | https://github.com/RKrahl/photo-tools | b025d50b8353ed499ae4c8f8f242aeeb65cb5cae | 732bad94d2c68f934f48ce3a4a5a25a01a3974c2 | refs/heads/master | 2022-08-31T03:09:29.768681 | 2020-05-03T12:06:07 | 2020-05-03T12:06:07 | 48,946,666 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Add images to an index.
"""
import filecmp
import shutil
import pytest
import photo.index
from conftest import tmpdir, gettestdata
testimgs = [
"dsc_4623.jpg", "dsc_4664.jpg", "dsc_4831.jpg",
"dsc_5126.jpg", "dsc_5167.jpg"
]
testimgfiles = [ gettestdata(i) for i in testimgs ]
refindex = gettestdata("index-create.yaml")
def test_createupdate(tmpdir):
for fname in testimgfiles[:3]:
shutil.copy(fname, str(tmpdir))
with photo.index.Index(imgdir=tmpdir) as idx:
idx.write()
for fname in testimgfiles[3:]:
shutil.copy(fname, str(tmpdir))
with photo.index.Index(idxfile=tmpdir, imgdir=tmpdir) as idx:
idx.write()
idxfile = str(tmpdir / ".index.yaml")
assert filecmp.cmp(refindex, idxfile), "index file differs from reference"
| UTF-8 | Python | false | false | 797 | py | 45 | test_05_createupdate.py | 36 | 0.678795 | 0.651192 | 0 | 28 | 27.464286 | 78 |
Stalinho/Machine-Learning-Web-App-Predicts-Survival-On-The-Titanic | 11,622,181,523,244 | 02ea1e47d02e288c420ce098de927618ec9f5a8d | 6bb3219b1d78cb765186c9467c8820b32e8f94c6 | /titanic.py | 5870a7027f7f6f2b0ec886f4d14c08b9ceaffcfc | []
| no_license | https://github.com/Stalinho/Machine-Learning-Web-App-Predicts-Survival-On-The-Titanic | 9d2660d655df90ff7dbcf0f1deee15a34699942c | 2d35bab7807d2f143a13a07428b7191c9d911328 | refs/heads/main | 2023-01-21T07:31:20.248888 | 2022-09-08T08:17:32 | 2022-09-08T08:17:32 | 318,293,326 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 14 18:54:50 2022
@author: hp
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import load_model
from sklearn.model_selection import train_test_split
import streamlit as st
import streamlit.components.v1 as components
from PIL import Image
import pickle
st.title('PREDICT YOUR SURVIVAL ON THE TITANIC')
img = Image.open('titanic.jpg')
st.image(img,width=600, channels='RGB',caption=None)
df = pd.read_csv('titanic.csv')
#drop the name columns
df = df.drop(columns='Name')
#encode the sec column
df.loc[df['Sex']=='male', 'Sex'] = 1
df.loc[df['Sex']=='female', 'Sex'] = 0
#split the data in to independent x and y variables
X = df.drop('Survived', axis =1)
y=df['Survived'].values.astype(np.float32)
X = X.values.astype(np.float32)
X_train, X_val, y_train, y_val = train_test_split(X,y, test_size=0.2, random_state=1)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(6,)),
keras.layers.Dense(6, activation=tf.nn.relu),
keras.layers.Dense(4, activation=tf.nn.relu),
keras.layers.Dense(2, activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
model.save('titanic.h5')
history = model.fit(X_train, y_train, epochs=119, batch_size=1,validation_data=(X_val, y_val))
with open('training_history','wb') as file:
pickle.dump(history.history, file)
model = load_model('titanic.h5')
history = pickle.load(open('training_history','rb'))
def survival():
Pclass = st.sidebar.slider('Pclass',1,3)
sex = st.sidebar.selectbox('Sex', ('Male','Female'))
age = st.sidebar.slider('Age',0,80)
n_siblings_spouses = st.sidebar.slider('N_siblings/spouses',0,3)
n_parents_children = st.sidebar.slider('N_parents/children',0,3)
fair = st.sidebar.slider('Fair',0,200)
if sex == 'Male':
sex = 1
else:
sex = 0
data = [[Pclass, sex, age, n_siblings_spouses,n_parents_children,fair]]
data = tf.constant(data)
return data
survive_or_not=survival()
prediction = model.predict(survive_or_not, steps=1)
pred = [round(x[0]) for x in prediction]
if pred ==[0]:
st.write('You did not survive')
else:
st.write('You survived')
def plot_data():
loss_train=history['loss']
loss_val = history['val_loss']
epochs = range(1,120)
fig, ax = plt.subplots()
ax.scatter([0.25],[0.25])
plt.plot(epochs, loss_train,'g', label = 'Training Loss')
plt.plot(epochs, loss_val,'b', label = 'Validation Loss')
plt.title('Trainig and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
st.pyplot(fig)
loss_train =history['acc']
loss_val = history['val_acc']
epochs = range(1,120)
fig, ax = plt.subplots()
ax.scatter([1], [1])
plt.plot(epochs, loss_train, 'g', label='Training Accuracy')
plt.plot(epochs, loss_val, 'b', label='Validation Accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
st.pyplot(fig)
plot_data()
| UTF-8 | Python | false | false | 3,414 | py | 2 | titanic.py | 1 | 0.639426 | 0.618336 | 0 | 116 | 27.431034 | 94 |
MercenariesEngineering/conan_recipes | 6,133,213,311,817 | b906758d73b3f022ae10e54b2f0062c74c28d562 | 7032fd0d1652cc1bec1bff053af4f486a5704cd5 | /old/partio_1.7.4/conanfile.py | b5e278b67b8ee6a98b33b7dd2ee5548d57307e45 | []
| no_license | https://github.com/MercenariesEngineering/conan_recipes | c8f11ddb3bd3eee048dfd476cdba1ef84b85af5e | 514007facbd1777799d17d041fc34dffef61eff8 | refs/heads/master | 2023-07-09T08:10:35.941112 | 2023-04-19T13:36:38 | 2023-04-19T13:36:38 | 169,575,224 | 7 | 1 | null | false | 2023-04-19T14:11:35 | 2019-02-07T13:23:02 | 2023-04-19T13:36:13 | 2023-04-19T14:11:34 | 9,804 | 4 | 0 | 1 | C++ | false | false | from conans import ConanFile, CMake, tools
import os
# mkdir partio_1.7.4
# cd partio_1.7.4/
# conan new partio/1.7.4 --bare
# write this content to conanfile.py
# conan create partio/1.7.4@pierousseau/stable
class partioConan(ConanFile):
name = "partio"
version_base = "1.7"
version_patch = "4"
version = version_base + "." + version_patch
license = ""
url = "https://github.com/wdas/partio"
description = "partio - A library for particle IO and manipulation"
#requires = "SeExpr/2.11@pierousseau/stable"
requires = "zlib/1.2.11@conan/stable"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
def source(self):
# https://github.com/wdas/partio/archive/v1.7.4.tar.gz
filename = "v%s.tar.gz" % self.version
tools.download("https://github.com/wdas/partio/archive/%s" % filename, filename)
tools.untargz(filename)
os.unlink(filename)
tools.replace_in_file("partio-%s/CMakeLists.txt" % self.version,
"PROJECT(partio LANGUAGES CXX VERSION 1.5.2)",
"""PROJECT(partio LANGUAGES CXX VERSION 1.5.2)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()""")
tools.replace_in_file("partio-%s/CMakeLists.txt" % self.version,
"find_package(GLUT REQUIRED)",
"""find_package(GLUT QUIET)""")
tools.replace_in_file("partio-%s/CMakeLists.txt" % self.version,
"ADD_SUBDIRECTORY (src/py)",
"""""")
tools.replace_in_file("partio-%s/CMakeLists.txt" % self.version,
"ADD_SUBDIRECTORY (src/tools)",
"""""")
def build(self):
cmake = CMake(self)
cmake.configure(source_dir="%s/partio-%s" % (self.source_folder, self.version))
cmake.build()
def package(self):
self.copy("*.h", dst="include/partio/", keep_path=False)
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| UTF-8 | Python | false | false | 2,159 | py | 187 | conanfile.py | 135 | 0.614636 | 0.600278 | 0 | 58 | 36.224138 | 88 |
nsssayom/traffic-semi-auto | 2,800,318,680,822 | 4de551ccc04a37ffb658caeeee2bcb342a98f6bf | 1541e71f1cade91cdcd946b35541ae598a1a95c8 | /src/control.py | 194d70236a30d0ff311e68f7b814bc068ffd1ad9 | []
| no_license | https://github.com/nsssayom/traffic-semi-auto | 49169dbaeda73c718a3e7cdb1e85c4d5588ff91c | 5b8a44be304d1e976cd108ba016500c14b298008 | refs/heads/master | 2020-07-09T04:46:52.412075 | 2020-01-12T18:14:26 | 2020-01-12T18:14:26 | 203,882,652 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:adf6d06867c1a70cb0b00dea233bcecc9375ea4e77d07b01606345ac65b18b66
size 81
| UTF-8 | Python | false | false | 127 | py | 225 | control.py | 139 | 0.88189 | 0.535433 | 0 | 3 | 41.333333 | 75 |
YashashreeKolhe/InformationSecurity | 10,479,720,210,528 | 2ef8eea2e4cafbf5af2ca5efdbc79a5dd65e8a99 | d7ca99cc3eec8ad2afc7ba567ff603c2af4e5e97 | /111508041_Assignment1/decryption.py | be3b8974d00e04d1e26b2eab218089ccd13eb6f3 | []
| no_license | https://github.com/YashashreeKolhe/InformationSecurity | 0fb968771232677ba826ff0a5d7dfcbbc728fd8f | 89677a39b68cfaf5848c72d2a52e859b1931bd96 | refs/heads/master | 2020-03-28T19:43:10.539962 | 2018-12-08T04:48:38 | 2018-12-08T04:48:38 | 149,002,976 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
def main():
fw = open(sys.argv[2], 'w')
fr = open(sys.argv[1], 'r')
for line in fr:
flag = 0
for char in line:
if flag == 0:
decryption_line = char
flag = 1
first_char = ord(char)%10
else:
decryption_line = decryption_line + chr(ord(char) + first_char)
first_char = ord(chr(ord(char) + first_char))%10
fw.write(decryption_line)
fw.flush()
fw.close()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 581 | py | 17 | decryption.py | 12 | 0.44062 | 0.425129 | 0 | 21 | 26.666667 | 83 |
aryankh1409/Misc-Programs | 15,393,162,790,926 | 30f4aa1eb6888296f278e215a2983d0b2fe74be0 | 47a9e194427015dcd9d05abf8ba93bb37c257aa6 | /Beautiful Soup/navigable_string.py | c1d809385617a92fe87e9821a05810bbbd02021a | [
"MIT"
]
| permissive | https://github.com/aryankh1409/Misc-Programs | 617ec53231deddf7517738a7649127935a976535 | d50a1689a89d331d2567486a5dd22fb2311a0d87 | refs/heads/master | 2023-05-27T20:58:54.876607 | 2021-03-21T14:09:29 | 2021-03-21T14:09:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bs4 import BeautifulSoup
soup = BeautifulSoup('<p class="nice">Helloooo</p>', 'lxml')
print(soup.prettify())
ptag = soup.find_all('p')[0]
# print(ptag.string) # Navigable String
# print(type(ptag.string))
# # ptag.string = "Meeee"
# # OR
# ptag.string.replace_with('Hilo')
# print(ptag)
u = str(ptag.string)
print(u)
u = "Noo"
print(ptag) | UTF-8 | Python | false | false | 347 | py | 397 | navigable_string.py | 275 | 0.67147 | 0.665706 | 0 | 15 | 22.2 | 60 |
hadasAizik/Closure | 6,734,508,737,913 | a521f3e186634d4ac719557545c2035d087e5099 | 3d01306b88458b484021119cbb8566271c0b6033 | /Closure_Project/Parser/OfflineParser.py | 5ee68d9cb5fd72290293d96dbcd464e9e32b3088 | [
"MIT"
]
| permissive | https://github.com/hadasAizik/Closure | f5c6abba53487baaa932a344e4b110945cdffedb | a69357733c2026e9560b4b95acc0e4988743a00f | refs/heads/master | 2023-06-09T14:33:20.316741 | 2021-06-14T16:54:18 | 2021-06-14T16:54:18 | 377,073,663 | 0 | 0 | MIT | true | 2021-06-15T07:24:35 | 2021-06-15T07:24:34 | 2021-06-14T16:54:21 | 2021-06-14T19:48:26 | 23,594 | 0 | 0 | 0 | null | false | false | import os
from typing import List, Tuple, Dict
from tqdm import tqdm
import utils
from CornerStoneParser import fetch_insert_corner_stones_into_db
from MoonParser import parse_course_detail_page, NothingToParseException, parse_moon, \
NoTrackParsedException
LOGGED_NOT_PARSED = 'bad.txt'
LOGGED_PARSED = 'good.txt'
utils.setup_django_pycharm()
from rest_api.models import Course, Track, CourseGroup
COURSE_DUMP = 'parsed_courses.json'
def parse_track_folder(html_folder: str, data_year: int, dump: bool = False) -> \
Tuple[List[Dict],
List[List[Dict]],
List[List[int]]]:
"""
Parses a folder of html files for tracks, returning parsed tracks, groups and course ids
:param html_folder: folder with huji track files named `<track_id>.json`
:param data_year: year to which the data is relevant
:param dump: whether to dump results
"""
# print(f'x = parsed with tracks\t (x) = parsed without track')
all_tracks: List[Dict] = []
all_groups: List[List[Dict]] = []
all_course_ids: List[List[int]] = []
for file_name in tqdm(os.listdir(html_folder)):
track_id = int(file_name.split('.')[0])
with open(f'{html_folder}/{file_name}', encoding='utf8') as f:
body = f.read()
if len(body) == 6160: # empty file
continue
try:
track, groups, courses = parse_moon(body, track_id, data_year, dump)
all_tracks.append(track)
all_groups.append(groups)
all_course_ids.append(courses)
except NoTrackParsedException:
pass
except ValueError as e:
if str(e) != 'No tables found':
raise e
return all_tracks, all_groups, all_course_ids
def _parse_course_details_html(file_path: str) -> Dict:
result = None
with open(file_path, 'rt', encoding='utf8') as open_file:
try:
read = open_file.read()
result = parse_course_detail_page(read, 2021)
except NothingToParseException:
pass
except Exception as e:
print(file_path + ' ERROR ' + str(e))
raise e
return result
def parse_course_details_folder(dump: bool) -> List[Dict]:
"""
Parses a folder of html files for courses, returning the course details as dictionary
:param write_log_files: log (append) parsing staus to LOGGED_PARSED,LOGGED_NOT_PARSED
:param dump: should dump into COURSE_DUMP, for faster (no need to parse) loading later
:return: list of dictionaries representing courses
"""
print('parsing course detail folder')
results = []
all_file_paths = [os.path.join('course_details_html', f)
for f in os.listdir('course_details_html')]
nonempty_paths = [p for p in all_file_paths if os.stat(p).st_size != 29_589]
for file_path in tqdm(nonempty_paths):
results.append(_parse_course_details_html(file_path))
if dump:
utils.dump_json(results, COURSE_DUMP)
return results
def load_parsed_track(track_values: Dict) -> None:
# print('inserting track #' + str(track_values['track_number']))
Track.objects.update_or_create(**track_values)
def load_parsed_track_folder(folder: str = 'parsed_tracks') -> None:
for f in tqdm(os.listdir(folder)):
load_parsed_track(utils.load_json(os.path.join(folder, f)))
def load_parsed_group(group_values: Dict) -> None:
track_id = group_values['track_id']
track = Track.objects.get(track_number=track_id)
del group_values['track_id']
group_values['track'] = track
course_ids = group_values['course_ids']
del group_values['course_ids']
group, _ = CourseGroup.objects.update_or_create(
track=track,
year_in_studies=group_values['year_in_studies'],
index_in_track_year=group_values['index_in_track_year'],
defaults=group_values)
group.courses.set(Course.objects.filter(course_id__in=course_ids))
group.save()
def load_parsed_groups_folder(folder_path: str = 'parsed_groups'):
for f in tqdm(os.listdir(folder_path)):
path = os.path.join(folder_path,f)
load_parsed_group(utils.load_json(path))
def load_parsed_course(course_values: Dict) -> None:
Course.objects.update_or_create(course_id=course_values['course_id'],
defaults=course_values)
def load_dumped_courses(only_add_new: bool) -> None:
print('loading parsed courses to db')
# noinspection PyTypeChecker
parsed = [c for c in utils.load_json(COURSE_DUMP)
if c is not None] # some are None because of parsing issues
if only_add_new:
existing_ids = {v[1] for v in Course.objects.values_list()}
parsed = [p for p in parsed if p and p['course_id'] not in existing_ids]
for c in tqdm(parsed):
load_parsed_course(c)
def parse_dump_load_all():
parse_course_details_folder(dump=True)
fetch_insert_corner_stones_into_db()
parse_track_folder('tracks_html', 2021, True) # parses groups too
load_all_dumped()
def load_all_dumped():
load_dumped_courses(False)
fetch_insert_corner_stones_into_db()
load_parsed_track_folder()
load_parsed_groups_folder()
if __name__ == '__main__':
parse_dump_load_all()
| UTF-8 | Python | false | false | 5,367 | py | 4 | OfflineParser.py | 3 | 0.634246 | 0.630334 | 0 | 172 | 30.197674 | 92 |
jimbo95-beep/initio | 8,203,387,542,724 | 3c95cf0f67778a85f4d2288d5ac450aa00f8d1db | e5efe81304ab765594b2a88e336a3bd8f67abbe4 | /test/testspeed.py | 98196765c1ba8aa886f2607df712decb9430a5d3 | [
"MIT"
]
| permissive | https://github.com/jimbo95-beep/initio | c4778fd077360b3770d02c1145ed091a27286308 | f5a9d64044d60e8fe0f3f30e91d90464c0cf2e7f | refs/heads/master | 2021-05-27T23:30:30.883099 | 2014-11-09T07:19:21 | 2014-11-09T07:19:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #PiRoCon - 4Tronix Initio - Motor Controller
#Martin O'Hanlon
#www.stuffaboutcode.com
import sys
import time
import RPi.GPIO as GPIO
#motor pins
MOTORAFWRDPIN = 19
MOTORABWRDPIN = 21
MOTORBFWRDPIN = 24
MOTORBBWRDPIN = 26
#encoder pins
MOTORAENCODERPIN = 7
MOTORBENCODERPIN = 11
#motor speed equivalents
# use this if one motor is significant faster than the other
# to slow down one motor more than the other
#Settings when only powered by the Pi
#MOTORAMAX = 0.8
#MOTORBMAX = 1
MOTORAMAX = 1
MOTORBMAX = 1
#motor states
STATEFORWARD = 1
STATESTOPPED = 0
STATEBACKWARD = -1
#The controller class which manages the motors and encoders
class MotorController:
def __init__(self,
motorAForwardPin = MOTORAFWRDPIN,
motorABackwardPin = MOTORABWRDPIN,
motorBForwardPin = MOTORBFWRDPIN,
motorBBackwardPin = MOTORBBWRDPIN,
motorAEncoderPin = MOTORAENCODERPIN,
motorBEncoderPin = MOTORBENCODERPIN,):
#setup motor classes
self.motorA = Motor(motorAForwardPin, motorABackwardPin, motorAEncoderPin)
self.motorB = Motor(motorBForwardPin, motorBBackwardPin, motorBEncoderPin)
#motor properties
@property
def motorA(self):
return self.motorA
@property
def motorB(self):
return self.motorB
#start
def start(self, powerA, powerB = None):
#if a second power isnt passed in, both motors are set the same
if powerB == None: powerB = powerA
self.motorA.start(powerA * MOTORAMAX)
self.motorB.start(powerB * MOTORBMAX)
#stop
def stop(self):
self.motorA.stop()
self.motorB.stop()
#rotate left
def rotateLeft(self, power):
self.start(power * -1, power)
#rotate right
def rotateRight(self, power):
self.start(power, power * -1)
#class for controlling a motor
class Motor:
def __init__(self, forwardPin, backwardPin, encoderPin):
#persist values
self.forwardPin = forwardPin
self.backwardPin = backwardPin
self.encoderPin = encoderPin
#setup GPIO pins
GPIO.setup(forwardPin, GPIO.OUT)
GPIO.setup(backwardPin, GPIO.OUT)
GPIO.setup(encoderPin,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
#add encoder pin event
GPIO.add_event_detect(encoderPin, GPIO.RISING, callback=self._encoderCallback,bouncetime=2)
#setup pwm
self.forwardPWM = GPIO.PWM(forwardPin,50)
self.backwardPWM = GPIO.PWM(backwardPin,50)
#setup encoder/speed ticks
self.totalTicks = 0
self.currentTicks = 0
self.state = STATESTOPPED
#motor state property
@property
def state(self):
return self.state
#start
def start(self, power):
#forward or backward?
# backward
if(power < 0):
if(self.state != STATEBACKWARD): self.stop()
self._backward(power)
self.state = STATEBACKWARD
# forward
if(power > 0):
if(self.state != STATEFORWARD): self.stop()
self._forward(power)
self.state = STATEFORWARD
# stop
if(power == 0):
self.stop()
#stop
def stop(self):
#stop motor
self.forwardPWM.stop()
self.backwardPWM.stop()
self.state = STATESTOPPED
#reset ticks
self.currentTicks = 0
#reset ticks
def resetTotalTicks(self):
self.totalTicks = 0
#private function to calculate the freq for the PWM
def _calcPowerAndFreq(self, power):
# make power between 0 and 100
power = max(0,min(100,abs(power)))
#my fudge factor
power = power - ((100 - power) / 7)
# make half of freq, minimum of 11
freq = max(11,abs(power/2))
#freq = 50
return power, freq
#forward
def _forward(self, power):
#start forward motor
power, freq = self._calcPowerAndFreq(power)
self.forwardPWM.ChangeFrequency(freq)
self.forwardPWM.start(power)
#backward
def _backward(self, power):
#start backward motor
power, freq = self._calcPowerAndFreq(power)
self.backwardPWM.ChangeFrequency(freq)
self.backwardPWM.start(power)
#encoder callback
def _encoderCallback(self, pin):
self.totalTicks += 1
self.currentTicks += 1
#tests
if __name__ == '__main__':
try:
#setup gpio
GPIO.setmode(GPIO.BOARD)
#create motor control
motors = MotorController()
#run 1
motors.motorA.start(100)
time.sleep(3)
motors.motorA.stop()
run1ticks = motors.motorA.totalTicks
print "run1 " + str(run1ticks)
motors.motorA.resetTotalTicks()
#run 2
motors.motorA.start(50)
time.sleep(3)
motors.motorA.stop()
run2ticks = motors.motorA.totalTicks
print "run2 " + str(run2ticks)
print "ratio " + str(float(run2ticks) / float(run1ticks))
#Ctrl C
except KeyboardInterrupt:
print "User cancelled"
#Error
except:
print "Unexpected error:", sys.exc_info()[0]
raise
finally:
print ("cleanup")
#cleanup gpio
GPIO.cleanup()
| UTF-8 | Python | false | false | 5,603 | py | 11 | testspeed.py | 10 | 0.582724 | 0.569695 | 0 | 205 | 25.282927 | 99 |
gwacter/faucet | 249,108,129,214 | 42f7511dc8b93318e5d1f3498e34bbbed7780419 | 2bc5ea1754e1d885f5ae63785d211617d85f7d69 | /src/ryu_faucet/org/onfsdn/faucet/dp.py | 57cb13c4b3e059abfc3d59f85e1792e04e757816 | [
"Apache-2.0"
]
| permissive | https://github.com/gwacter/faucet | 55dec95308787f40689cd9604ffca84e9211719e | f21b3469e4fd4dae1a3b7b06fde0f09baad8210a | refs/heads/master | 2021-01-15T08:48:50.505601 | 2016-10-12T04:43:49 | 2016-10-12T04:43:49 | 55,377,278 | 1 | 0 | null | true | 2016-04-04T01:03:32 | 2016-04-04T01:03:32 | 2016-04-02T06:30:21 | 2016-03-28T20:26:54 | 2,741 | 0 | 0 | 0 | null | null | null | # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conf import Conf
from vlan import VLAN
from port import Port
import networkx
class DP(Conf):
"""Object to hold the configuration for a faucet controlled datapath."""
acls = None
vlans = None
ports = None
running = False
influxdb_stats = False
name = None
dp_id = None
configured = False
table_offset = None
vlan_table = None
acl_table = None
eth_src_table = None
ipv4_fib_table = None
ipv6_fib_table = None
eth_dst_table = None
flood_table = None
priority_offset = None
low_priority = None
high_priority = None
stack = None
# Values that are set to None will be set using set_defaults
# they are included here for testing and informational purposes
defaults = {
'dp_id': None,
# Name for this dp, used for stats reporting and configuration
'name': None,
'table_offset': 0,
# The table for internally associating vlans
'vlan_table': None,
'acl_table': None,
'eth_src_table': None,
'ipv4_fib_table': None,
'ipv6_fib_table': None,
'eth_dst_table': None,
'flood_table': None,
# How much to offset default priority by
'priority_offset': 0,
# Some priority values
'lowest_priority': None,
'low_priority': None,
'high_priority': None,
'highest_priority': None,
# Identification cookie value to allow for multiple controllers to
# control the same datapath
'cookie': 1524372928,
# inactive MAC timeout
'timeout': 300,
# description, strictly informational
'description': None,
# The hardware maker (for chosing an openflow driver)
'hardware': 'Open vSwitch',
# ARP and neighbor timeout (seconds)
'arp_neighbor_timeout': 500,
# OF channel log
'ofchannel_log': None,
# stacking config, when cross connecting multiple DPs
'stack': None,
}
def __init__(self, _id, conf):
self._id = _id
self.update(conf)
self.set_defaults()
self.acls = {}
self.vlans = {}
self.ports = {}
self.acl_in = {}
def sanity_check(self):
# TODO: this shouldnt use asserts
assert 'dp_id' in self.__dict__
assert isinstance(self.dp_id, (int, long))
for vid, vlan in self.vlans.iteritems():
assert isinstance(vid, int)
assert isinstance(vlan, VLAN)
assert all(isinstance(p, Port) for p in vlan.get_ports())
for portnum, port in self.ports.iteritems():
assert isinstance(portnum, int)
assert isinstance(port, Port)
def set_defaults(self):
for key, value in self.defaults.iteritems():
self._set_default(key, value)
# fix special cases
self._set_default('dp_id', self._id)
self._set_default('name', str(self._id))
self._set_default('vlan_table', self.table_offset)
self._set_default('acl_table', self.table_offset + 1)
self._set_default('eth_src_table', self.acl_table + 1)
self._set_default('ipv4_fib_table', self.eth_src_table + 1)
self._set_default('ipv6_fib_table', self.ipv4_fib_table + 1)
self._set_default('eth_dst_table', self.ipv6_fib_table + 1)
self._set_default('flood_table', self.eth_dst_table + 1)
self._set_default('lowest_priority', self.priority_offset)
self._set_default('low_priority', self.priority_offset + 9000)
self._set_default('high_priority', self.low_priority + 1)
self._set_default('highest_priority', self.high_priority + 98)
self._set_default('description', self.name)
def add_acl(self, acl_ident, acl_conf=None):
if acl_conf is not None:
self.acls[acl_ident] = [x['rule'] for x in acl_conf]
def add_port(self, port):
port_num = port.number
self.ports[port_num] = port
if port.mirror is not None:
# other configuration entries ignored
return
if port.acl_in is not None:
self.acl_in[port_num] = port.acl_in
def add_vlan(self, vlan):
self.vlans[vlan.vid] = vlan
def resolve_stack_topology(self, dps):
def canonical_edge(dp, port):
peer_dp = port.stack['dp']
peer_port = port.stack['port']
sort_edge_a = (
dp.name, port.name, dp, port)
sort_edge_z = (
peer_dp.name, peer_port.name, peer_dp, peer_port)
sorted_edge = sorted((sort_edge_a, sort_edge_z))
edge_a, edge_b = sorted_edge[0][2:], sorted_edge[1][2:]
return edge_a, edge_b
def make_edge_name(edge_a, edge_z):
edge_a_dp, edge_a_port = edge_a
edge_z_dp, edge_z_port = edge_z
return '%s:%s-%s:%s' % (
edge_a_dp.name, edge_a_port.name,
edge_z_dp.name, edge_z_port.name)
def make_edge_attr(edge_a, edge_z):
edge_a_dp, edge_a_port = edge_a
edge_z_dp, edge_z_port = edge_z
return {
'dp_a': edge_a_dp, 'port_a': edge_a_port,
'dp_z': edge_z_dp, 'port_z': edge_z_port}
root_dp = None
for dp in dps:
if dp.stack is not None:
if 'priority' in dp.stack:
assert root_dp is None, 'multiple stack roots'
root_dp = dp
if root_dp is None:
return
edge_count = {}
graph = networkx.MultiGraph()
for dp in dps:
graph.add_node(dp.name)
for port in dp.ports.itervalues():
if port.stack is not None:
edge = canonical_edge(dp, port)
edge_a, edge_z = edge
edge_name = make_edge_name(edge_a, edge_z)
edge_attr = make_edge_attr(edge_a, edge_z)
edge_a_dp, _ = edge_a
edge_z_dp, _ = edge_z
if edge_name not in edge_count:
edge_count[edge_name] = 0
edge_count[edge_name] += 1
graph.add_edge(
edge_a_dp.name, edge_z_dp.name, edge_name, edge_attr)
if len(graph.edges()):
for edge_name, count in edge_count.iteritems():
assert count == 2, '%s defined only in one direction' % edge_name
if self.stack is None:
self.stack = {}
self.stack['root_dp'] = root_dp
self.stack['graph'] = graph
def finalize_config(self, dps):
def resolve_port_no(port_name):
if port_name in port_by_name:
return port_by_name[port_name].number
elif port_name in self.ports:
return port_name
return None
def resolve_stack_dps():
port_stack_dp = {}
for port in self.ports.itervalues():
if port.stack is not None:
stack_dp = port.stack['dp']
port_stack_dp[port] = dp_by_name[stack_dp]
for port, dp in port_stack_dp.iteritems():
port.stack['dp'] = dp
stack_port_name = port.stack['port']
port.stack['port'] = dp.ports[stack_port_name]
def resolve_mirror_destinations():
# Associate mirrored ports, with their destinations.
mirror_from_port = {}
for port in self.ports.itervalues():
if port.mirror is not None:
if port.mirror in port_by_name:
mirror_from_port[port] = port_by_name[port.mirror]
else:
mirror_from_port[self.ports[port.mirror]] = port
for port, mirror_destination_port in mirror_from_port.iteritems():
port.mirror = mirror_destination_port.number
mirror_destination_port.mirror_destination = True
def resolve_port_names_in_acls():
for acl in self.acls.itervalues():
for rule_conf in acl:
for attrib, attrib_value in rule_conf.iteritems():
if attrib == 'actions':
if 'mirror' in attrib_value:
port_name = attrib_value['mirror']
port_no = resolve_port_no(port_name)
# in V2 config, we might have an ACL that does
# not apply to a DP.
if port_no is not None:
attrib_value['mirror'] = port_no
port = self.ports[port_no]
port.mirror_destination = True
if 'output' in attrib_value:
port_name = attrib_value['output']['port']
port_no = resolve_port_no(port_name)
if port_no is not None:
attrib_value['output']['port'] = port_no
port_by_name = {}
for port in self.ports.itervalues():
port_by_name[port.name] = port
dp_by_name = {}
for dp in dps:
dp_by_name[dp.name] = dp
resolve_stack_dps()
resolve_mirror_destinations()
resolve_port_names_in_acls()
def get_native_vlan(self, port_num):
if port_num not in self.ports:
return None
port = self.ports[port_num]
for vlan in self.vlans.values():
if port in vlan.untagged:
return vlan
return None
def __str__(self):
return self.name
| UTF-8 | Python | false | false | 10,567 | py | 29 | dp.py | 17 | 0.537239 | 0.531655 | 0 | 284 | 36.207746 | 81 |
ivanelisandro/PythonStudiesJobAgency | 2,336,462,232,601 | e26b0cf29b123841c5e209abae69b61090a4a189 | c615384fbf8b10ce3cdefc008509454460def72f | /Topics/The pprint module/A better format/main.py | a28987b7d5c5d7bf9fc46af80955123d75e438be | []
| no_license | https://github.com/ivanelisandro/PythonStudiesJobAgency | 07e1981601c5314dc48239cc220db9849468b946 | 28f6ff4d1fc2592f4dd740e3aa68af0bac105f5d | refs/heads/main | 2023-08-25T19:32:10.306188 | 2021-10-22T01:34:21 | 2021-10-22T01:34:21 | 391,798,794 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pprint import pprint
dogs = [
{
"name": "Max",
"breed": "Yorkshire",
"age": 1,
"owners": ["Susan, Camila, Paul"],
},
{
"name": "Duke",
"breed": "Bulldog",
"age": 4,
"owners": ["Thomas, David, Lucia"],
},
]
pprint(dogs, indent=4, width=30)
| UTF-8 | Python | false | false | 326 | py | 336 | main.py | 91 | 0.435583 | 0.420245 | 0 | 18 | 17.111111 | 43 |
xglh/PyDev | 2,757,369,007,176 | dacf7cb7b1b38222e87d824e884d5fdb4b6b492b | dbc440817ef796bcc7e06248b50bcce17231cb36 | /PyDev/cdn/cdn_autotest/base/testCheck.py | 5d2a977ac479f311c2e9c979399e9649fe6c1cb9 | []
| no_license | https://github.com/xglh/PyDev | 64304a8372805cae2a533dd531920444cd7d001b | fb83b7d8e61465f338c9d250b86286dfb44af444 | refs/heads/master | 2018-10-16T18:13:58.051880 | 2018-09-15T13:53:15 | 2018-09-15T13:53:15 | 134,224,933 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding: UTF-8
import re
ss = {
"apimonitor数据结构(#)":"",
"22(#)":"",
"33(#)":""
}
tt = {
"L1.ngx.accessLog.http_origin_host":"1228580",
"L1.tcpdump.to80.response.apimonitor数据结构":-1,
"L1.file.fileSize":"1228580",
"apimonitor数据结构":"122858hello",
"22":"1228580",
"33":"1228580"
}
def format_expectResult(ss):
'''
operaters使用示例:key(op param):value
@:key之间比较 key1 = key2 = value param相同表示一组,param不带默认为0
#:key之间比较 key1 != key2 param相同表示一组,param不带默认为0
>:key >= param + value
<:key <= param + value
=:key = param + value
~: value - param <= key <= value + param
!:key不存在
$:key存在
r:正则匹配
'''
operaters = ["@","#",">","<","=","~","!","$","r"]
ff = {
"cmpKey":{},
"notEqualKey":{}
}
parmaJson = {}
notEqualJson = {}
mKyes = []
for k in ss:
#print k
str_1 = "";key = "";op = "";parma = ""
p = re.compile(r"\(.*\)")
find_str = p.findall(k)
#print find_str
if len(find_str) != 0:
key = k[0:k.index(find_str[0])]
str_1 = find_str[0][1:-1]
op = str_1[0]
parma = "0"
if len(str_1) != 1:
parma = str_1[1:]
else:
key = k
ff[key] = {}
ff[key]["op"] = ""
ff[key]["parma"] = 0
ff[key]["value"] = ss.get(k)
#print key,op,parma
if not key in mKyes:
mKyes.append(key)
if op in operaters:
if op == "@":
if not parmaJson.has_key(parma):
parmaJson[parma] = {}
parmaJson[parma][key] = ss[k]
elif op == "#":
if not notEqualJson.has_key(parma):
notEqualJson[parma] = {}
notEqualJson[parma][key] = ss[k]
else:
ff[key] = {}
ff[key]["op"] = op
ff[key]["parma"] = int(parma)
ff[key]["value"] = ss[k]
ff["cmpKey"] = parmaJson
#notEqualJson队列有多余2个的key有值,置为空;一个key不做处理
if len(notEqualJson) != 0:
for k in notEqualJson:
#print k
if len(notEqualJson.get(k)) > 1:
for kk in notEqualJson.get(k):
notEqualJson[k][kk] = ""
ff["notEqualKey"] = notEqualJson
return ff,mKyes
def check_expectResult(expectJson,actualJson):
check_result = 1
#print expectJson
for k in expectJson:
if not k in ("cmpKey","notEqualKey") :
#print expectJson.get(k)
op = expectJson.get(k).get("op")
#op为空时直接比较value,不转换类型
if op == "":
actualResult = actualJson.get(k)
value = expectJson.get(k).get("value")
if actualResult != value:
print "%s对比失败"%k
return 0
else:
parma = int(expectJson.get(k).get("parma"))
try:
actualResult = int(actualJson.get(k))
value = int(expectJson.get(k).get("value"))
except Exception:
actualResult = 0
value = 0
if op == ">":
if actualResult < value + parma or actualResult == -1:
check_result = 0
print "%s对比失败"%k
break
if op == "<":
if actualResult > value + parma or actualResult == -1:
check_result = 0
print "%s对比失败"%k
break
if op == "=":
if actualResult != value + parma or actualResult == -1:
check_result = 0
print "%s对比失败"%k
break
if op == "~":
if actualResult > value + parma or actualResult < value - parma or actualResult == -1:
check_result = 0
print "%s对比失败"%k
break
if op == "!":
if actualResult != -1:
check_result = 0
print "%s对比失败"%k
break
if op == "$":
if actualResult == -1:
check_result = 0
print "%s对比失败"%k
break
if op == "r":
value = expectJson.get(k).get("value")
actualResult = actualJson.get(k)
p = re.compile(value)
find_str = p.findall(actualResult)
if len(find_str) == 0:
check_result = 0
break
elif k == "cmpKey":
cmpKey = expectJson.get(k)
#print cmpKey
for kk in cmpKey:
json_1 = cmpKey.get(kk)
#print json_1
cmpResult,failKey = cmpKeyJson(json_1, actualJson)
if not cmpResult:
check_result = 0
print "@%s的%s对比失败"%(kk,failKey)
break
elif k == "notEqualKey":
for kk in expectJson.get(k):
keys_array = expectJson.get(k).get(kk).keys()
for i in range(0,len(keys_array) - 1):
for j in range(i+1,len(keys_array)):
key_name1 = keys_array[i]
key_name2 = keys_array[j]
actual_value1 = actualJson.get(key_name1)
actual_value2 = actualJson.get(key_name2)
print key_name1,key_name2,actual_value1,actual_value2
if actual_value1 == actual_value2:
print "#%s的%s %s对比失败"%(kk,key_name1,key_name2)
check_result = 0
break
return check_result
def cmpKeyJson(expectJson,actualJson):
flag = True
keys = expectJson.keys()
failKey = ""
#print keys
target_value = ""
for k in expectJson:
if expectJson.get(k) != "":
target_value = expectJson.get(k)
break
#print "target_value = %s"%target_value
if len(keys) == 1:
if expectJson.get(keys[0]) != actualJson.get(keys[0]) or actualJson.get(keys[0]) == -1:
failKey = keys[0]
flag = False
else:
for i in range(0,len(keys)-1):
#print actualJson.get(keys[i]),actualJson.get(keys[i+1])
if target_value =="":
if actualJson.get(keys[i]) != actualJson.get(keys[i+1]) or actualJson.get(keys[0]) == -1:
failKey = keys[i]
flag = False
else:
if not (actualJson.get(keys[i]) == target_value and \
actualJson.get(keys[i+1]) == target_value) or actualJson.get(keys[0]) == -1:
failKey = keys[i]
flag = False
return flag,failKey
ff,keys = format_expectResult(ss)
print ff,keys
print check_expectResult(ff,tt)
| UTF-8 | Python | false | false | 8,211 | py | 179 | testCheck.py | 118 | 0.395249 | 0.379037 | 0 | 226 | 34.075221 | 109 |
natslins007/DefsAndSyns | 601,295,436,981 | 1ebab9187c5a0243cb8ffcba59c3aafef5f63070 | d25fe89863c9bb464350804bb99288cfa327427b | /defsandsyns_shell.py | 85491424dd7ed473e1a4586df50d49edc718be20 | []
| no_license | https://github.com/natslins007/DefsAndSyns | a1548d0c4ee3c99f6007b4305ebefe68d7b7cbaf | 90ca4c9857d883d0845b0fca2fd2119680c7249c | refs/heads/master | 2022-09-30T02:42:18.104706 | 2020-06-06T00:57:28 | 2020-06-06T00:57:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import dictionary
# main loop
dictionary.initialize()
while True:
s = input('dict: ')
if s == 'quit()':
dictionary.quit_browser()
break
parts = s.split(' ')
if len(parts) != 2:
print('invalid syntax\n')
continue
command = parts[0]
word = parts[1]
if ';' in word:
print('no results found for ' + word + '\n')
continue
if command == 'def':
out = dictionary.definition(word, 2, 2)
for o in out: print(o)
elif command == 'syn':
out = dictionary.synonym(word, 3)
for o in out: print(o)
else:
print('unknown command')
print('') | UTF-8 | Python | false | false | 661 | py | 5 | defsandsyns_shell.py | 3 | 0.527988 | 0.518911 | 0 | 34 | 18.470588 | 52 |
savva-kotov/python-prog | 17,248,588,671,703 | 0c2c9da96c9f9d8d844922bac710d39570a9a430 | 1b7318c7606ef3131f5cc9698b7f0dd7292d846e | /3-7-5.py | afaac6bdc0bd0d6e32ec9c08408c06938ea86778 | []
| no_license | https://github.com/savva-kotov/python-prog | 0349e8c9f12737be663a8a7842972f2b69de5a13 | a9191f0ca13fd5c20c4dff099288576a88d98dc3 | refs/heads/master | 2021-02-23T17:01:45.805976 | 2020-03-10T19:24:09 | 2020-03-10T19:24:09 | 245,405,201 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Дан файл с таблицей в формате TSV с информацией о росте школьников разных классов.
Напишите программу, которая прочитает этот файл и подсчитает для каждого класса средний рост учащегося.
Файл состоит из набора строк, каждая из которых представляет собой три поля:
Класс Фамилия Рост
Класс обозначается только числом. Буквенные модификаторы не используются. Номер класса может быть от 1 до 11 включительно.
В фамилии нет пробелов, а в качестве роста используется натуральное число, но при подсчёте среднего требуется
вычислить значение в виде вещественного числа.
Выводить информацию о среднем росте следует в порядке возрастания номера класса (для классов с первого по одиннадцатый).
Если про какой-то класс нет информации, необходимо вывести напротив него прочерк, например:
Sample Input:
6 Вяххи 159
11 Федотов 172
7 Бондарев 158
6 Чайкина 153
Sample Output:
1 -
2 -
3 -
4 -
5 -
6 156.0
7 158.0
8 -
9 -
10 -
11 172.0
"""
f = open('dataset_3380_5.txt','r')
a = []
for i in f:
a.append(i.split())
f.close()
res = [[] for i in range (11)]#список из 11 пустых списков
for i in a:
res[int(i[0])-1].append(int(i[2]))#
res1 = []
for i in res:
if len(i) == 0:
res1.append(0)#если данных о росте нет, но пишем 0
else:
res1.append(sum(i)/len(i))#если данные есть, то высчитываем среднее
for i in res1:
if i == 0:
print(res1.index(i)+1,'-')#если 0, то выводим прочерк
else:
print(res1.index(i)+1,i)#если не 0,то выводим номер класса и среднее
#значение
| UTF-8 | Python | false | false | 2,339 | py | 32 | 3-7-5.py | 32 | 0.708115 | 0.661649 | 0 | 56 | 26.285714 | 123 |
avoorhis/vamps-node.js | 16,638,703,315,075 | ee71a00354ec85804c6bba59018e9a3e48859d50 | eaa8e10b84dcd96db3495b6fa9d178a1696802b6 | /public/scripts/node_process_scripts/vamps_script_spingo_run.py | 0bdee4959c594e0c1c2e6e328dce8d606469af28 | [
"MIT"
]
| permissive | https://github.com/avoorhis/vamps-node.js | ec783f10ecaca3be054c9ddd8171a50781d937a4 | 7071da6e569a669a06a6d21c23cc9c667ec49250 | refs/heads/master | 2022-07-16T13:09:15.620119 | 2021-08-09T12:52:20 | 2021-08-09T12:52:20 | 20,567,960 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
##!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011, Marine Biological Laboratory
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
from stat import * # ST_SIZE etc
import sys
import shutil
import types
import time
import random
import logging
import csv
from time import sleep
import configparser as ConfigParser
import subprocess
import datetime
datetime = str(datetime.date.today())
def start_spingo(args):
"""
Doc string
"""
logging.info('CMD> '+' '.join(sys.argv))
if args.verbose:
print('CMD> ',sys.argv)
datasets = {}
config_path = os.path.join(args.project_dir, args.config_file)
if not os.path.isfile(config_path):
print( "Could not find config file ("+config_path+") **Exiting**")
sys.exit()
config = ConfigParser.ConfigParser()
config.optionxform=str
config.read(config_path)
general_config_items = {}
# CL take precedence for domain and dna_region
for name, value in config.items('MAIN'):
#print(' %s = %s' % (name, value) )
general_config_items[name] = value
#print(config.items('MAIN.dataset'))
file_prefix = 'testing-fp'
dir_prefix = general_config_items['project_dir']
total_uniques = 0
for dataset_item in config.items('MAIN.dataset'):
dataset = dataset_item[0]
dscount = dataset_item[1] # raw count
print("\nDS KNT",dataset,dscount)
unique_file = os.path.join(args.project_dir, 'analysis', dataset,'seqfile.unique.fa')
spingo_out_file = os.path.join(args.project_dir, 'analysis', dataset, 'spingo_out.txt') # to be created
spingo_args = [ '-i', unique_file, '-d', args.ref_database, '-w', '>', spingo_out_file ]
spingo_cmd = args.path_to_spingo + ' ' + ' '.join(spingo_args)
if args.verbose:
print(spingo_cmd)
subprocess.call(spingo_cmd, shell=True)
"""/Users/avoorhis/programming/SPINGO//spingo
-i /Users/avoorhis/programming/vamps-node.js/user_data/avoorhis/project-avoorhis_365797/analysis/H56Di.736010/seqfile.unique.fa
-d /Users/avoorhis/programming/SPINGO/database/RDP_11.2.species.fa
> /Users/avoorhis/programming/vamps-node.js/user_data/avoorhis/project-avoorhis_365797/analysis/H56Di.736010/spingo.out
"""
if __name__ == '__main__':
import argparse
myusage = """usage: spingo.py [options]
This is will start the (customized) python_pipeline
for the GAST process, creating the vamps_* files
for input to the new_vamps database.
where
-path_to_spingo/--path_to_spingo REQ
-db/--ref_database REQ
-config/--config REQ
-p/--project
-project_dir/--project_dir
"""
parser = argparse.ArgumentParser(description="" ,usage=myusage)
parser.add_argument("-project_dir", "--project_dir",
required=True, action="store", dest = "project_dir",
help = 'project directory')
parser.add_argument("-p", "--project",
required=True, action='store', dest = "project",
help="Project Name")
parser.add_argument("-host", "--host",
required=False, action='store', choices=['vamps','vampsdev','local'], dest = "host", default='local',
help="")
parser.add_argument("-db", "--ref_database",
required=True, action="store", dest = "ref_database",
help = 'See SPINGO README')
parser.add_argument("-path_to_spingo", "--path_to_spingo",
required=True, action="store", dest = "path_to_spingo",
help = 'SPINGO Executable with full path')
parser.add_argument("-config", "--config",
required=True, action="store", dest = "config_file",
help = 'config file name')
parser.add_argument("-v", "--verbose",
required=False, action="store_true", dest = "verbose", default=False,
help = 'chatty')
args = parser.parse_args()
start_spingo(args)
#sys.exit('END: vamps_script_rdp_run.py')
| UTF-8 | Python | false | false | 4,795 | py | 402 | vamps_script_spingo_run.py | 180 | 0.565798 | 0.557456 | 0 | 135 | 34.496296 | 144 |
shenxuehao/A-C-language-novice | 6,674,379,187,652 | 9f7af24e405cb4d9c39f67c66e65f56d85cf7082 | 1f237c3fde884789a8ed82ee877620c45c7efa0a | /code_python/2020-12-30zhushi.py | a0f912a2cae7f46c2d6611f7d0739f38737ba809 | []
| no_license | https://github.com/shenxuehao/A-C-language-novice | 302cc708b78a31edd1a7870c35b1683692ca81d8 | af1a963d6e7857685f9ae3d846a286317c8a6763 | refs/heads/master | 2023-04-08T15:41:42.809758 | 2021-04-12T02:45:11 | 2021-04-12T02:45:11 | 303,631,938 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 单行,输出hello world
print("hello world")
print("hello python")
"""
第一行注释
第二行注释
第三行注释
"""
'''
注释
注释
注释
''' | UTF-8 | Python | false | false | 158 | py | 229 | 2020-12-30zhushi.py | 80 | 0.622642 | 0.622642 | 0 | 15 | 6.133333 | 21 |
dadasko/projektRswta | 9,990,093,977,957 | 696cd8460017e5690ea3235fa670bdb2e89743ff | eff757313f89e21dd26fd007996104d3e7f00608 | /Mecze/migrations/0001_initial.py | 9e38aaa46f193d31e546d2890ffedaab4eb0777d | []
| no_license | https://github.com/dadasko/projektRswta | 47e123d43f2283f7b8c1a0179c4b2bfa67770cdd | 1b2337f82fecf113dfd604f24b78088fee888478 | refs/heads/master | 2016-08-11T14:06:06.323202 | 2016-01-18T21:29:26 | 2016-01-18T21:29:26 | 49,905,377 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bramka',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('minuta', models.IntegerField()),
],
),
migrations.CreateModel(
name='Kolejka',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('data', models.DateTimeField()),
('nr_kolejki', models.IntegerField()),
],
),
migrations.CreateModel(
name='Mecz',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('uwagi', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Pozycja',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('nazwa', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='Sklad',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('id_pozycji', models.ForeignKey(to='Mecze.Pozycja')),
],
),
migrations.CreateModel(
name='Sklad_na_mecz',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('id_meczu', models.ForeignKey(to='Mecze.Mecz')),
('id_skladu', models.ForeignKey(to='Mecze.Sklad')),
],
),
migrations.CreateModel(
name='Wynik',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('gole_gospodarza', models.IntegerField()),
('gole_goscia', models.IntegerField()),
('id_kolejki', models.ForeignKey(to='Mecze.Kolejka')),
('id_meczu', models.ForeignKey(to='Mecze.Mecz')),
],
),
migrations.CreateModel(
name='Zawodnik',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('imie', models.CharField(max_length=50)),
('nazwisko', models.CharField(max_length=50)),
('id_pozycji', models.ForeignKey(to='Mecze.Pozycja')),
],
),
migrations.CreateModel(
name='Zespol',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('nazwa', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='zawodnik',
name='id_zespolu',
field=models.ForeignKey(to='Mecze.Zespol'),
),
migrations.AddField(
model_name='sklad_na_mecz',
name='id_zespolu',
field=models.ForeignKey(to='Mecze.Zespol'),
),
migrations.AddField(
model_name='sklad',
name='id_zawodnika',
field=models.ForeignKey(to='Mecze.Zawodnik'),
),
migrations.AddField(
model_name='sklad',
name='id_zespolu',
field=models.ForeignKey(to='Mecze.Zespol'),
),
migrations.AddField(
model_name='mecz',
name='id_goscia',
field=models.ForeignKey(to='Mecze.Zespol', related_name='mecz_gosc'),
),
migrations.AddField(
model_name='mecz',
name='id_gospodarza',
field=models.ForeignKey(to='Mecze.Zespol', related_name='mecz_gospodarz'),
),
migrations.AddField(
model_name='mecz',
name='id_kolejki',
field=models.ForeignKey(to='Mecze.Kolejka'),
),
migrations.AddField(
model_name='mecz',
name='id_wyniku',
field=models.ForeignKey(to='Mecze.Wynik'),
),
migrations.AddField(
model_name='bramka',
name='id_meczu',
field=models.ForeignKey(to='Mecze.Mecz'),
),
migrations.AddField(
model_name='bramka',
name='id_zawodnika',
field=models.ForeignKey(to='Mecze.Zawodnik'),
),
]
| UTF-8 | Python | false | false | 4,846 | py | 29 | 0001_initial.py | 19 | 0.512381 | 0.509905 | 0 | 133 | 35.43609 | 114 |
aeroniero33/ddpg_scheduler | 16,020,228,052,578 | 8e6b015c6b9f3f397cbbf710c9da6085a416f0bf | e6b67e821cd22820520959e3d918cd875058b1bd | /ddpg/ddpg.py | 518a54d6f43116f4613c459b8add8e48cd437263 | []
| no_license | https://github.com/aeroniero33/ddpg_scheduler | 802637850a4a35be5db585bc83ff917f82f34b11 | 8356960814b333e7b678a84e1b076fc3bf6bb6e7 | refs/heads/master | 2022-11-27T13:31:09.171134 | 2020-08-08T09:59:53 | 2020-08-08T09:59:53 | 238,006,336 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tensorflow as tf
import numpy as np
import gym
from replay_buffer import ReplayBuffer
from actor import ActorNetwork
from critic import CriticNetwork
from ou_noise import OUNoise
from simulation import IoT_Simulation
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.00005
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Soft target update param
TAU = 0.001
RANDOM_SEED = 1234
EXPLORE = 60
DEVICE = '/cpu:0'
class DDPG_Trainer:
def train(self, env, epochs=1, MINIBATCH_SIZE=30, GAMMA = 0.99, epsilon=1.0, min_epsilon=0.01, BUFFER_SIZE=10000):
with tf.Session() as sess:
# configuring the random processes
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# info of the environment to pass to the agent
state_dim = env.observation_space.shape[1]
action_dim = env.action_space.shape[1]
action_bound = np.float64(720)
# Creating agent
ruido = OUNoise(action_dim, mu = 0.4) # this is the Ornstein-Uhlenbeck Noise
actor = ActorNetwork(sess, state_dim, action_dim, action_bound, ACTOR_LEARNING_RATE, TAU, DEVICE)
critic = CriticNetwork(sess, state_dim, action_dim, CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars(), DEVICE)
sess.run(tf.global_variables_initializer())
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
rewards = []
for i in range(epochs):
state = env.reset()
state = np.hstack(state)
ep_reward = 0
done = False
step = 0
epsilon -= (epsilon/EXPLORE)
epsilon = np.maximum(min_epsilon,epsilon)
while (not done):
action_original = actor.predict(np.reshape(state, (1, state_dim)))
action = action_original + max(epsilon, 0)*ruido.noise()
print(f"Step: {step}")
print(f"Action: {action}")
next_state, reward, done = env.step(action)
print(f"Reward: {reward}")
replay_buffer.add(np.reshape(state, (actor.s_dim,)), np.reshape(action, (actor.a_dim,)), reward,
done, np.reshape(next_state, (actor.s_dim,)))
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replay_buffer.sample_batch(MINIBATCH_SIZE)
# Calculate targets
target_q = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA * target_q[k])
critic.train(s_batch, a_batch, np.reshape(y_i, (MINIBATCH_SIZE, 1)))
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
state = next_state
ep_reward = ep_reward + reward
rewards.append(reward)
step +=1
if done:
ruido.reset()
if np.average(rewards[-10:]) > -200:
print(f"Number of steps: {step + i*100}")
return rewards
return rewards
| UTF-8 | Python | false | false | 4,281 | py | 6 | ddpg.py | 4 | 0.498014 | 0.483065 | 0 | 120 | 34.666667 | 130 |
Mygithubtejas-nayak/Hospital_management_system | 2,774,548,878,381 | 0ad9820d75fb6e81242a3e6128a6a52d68581d80 | 6194dc16882de531bfddc5c8bd33bada47ac4497 | /Hospital/Hospital_crm/models.py | 2e6960a8c940334c5cb58491e2a86f0fdbc9f41f | []
| no_license | https://github.com/Mygithubtejas-nayak/Hospital_management_system | b322d378a1736715b00b3e2386024104074f7b6e | c06a03e08f87546b77ca94a960e897dfd7867311 | refs/heads/master | 2023-04-20T05:35:38.502091 | 2021-05-14T17:49:43 | 2021-05-14T17:49:43 | 367,440,017 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.core.exceptions import ValidationError
from phone_field import PhoneField
from django.contrib.auth.models import User
# Create your models here.
class Patient(models.Model):
genderchoices = (
('male', 'Male'),
('female','Female'),
('other','Other')
)
patientchoices = (
('Inpatient', 'Inpatient'),
('Outpatient', 'Outpatient')
)
user = models.OneToOneField(User,null=True, on_delete = models.CASCADE)
Patient_Type = models.CharField(max_length=10, choices=patientchoices, default='Inpatient')
name = models.CharField(max_length=200)
email = models.EmailField(max_length=200, null = True)
Mobile_number = PhoneField(blank=False)
Address = models.CharField(max_length=200, null = True)
gender = models.CharField(max_length=10, default='Male', choices=genderchoices)
def __str__(self):
return self.name
class Receptionist(models.Model):
genderchoices = (('male', 'Male'),('female','Female'),('other','Other'))
user = models.OneToOneField(User,null=True, on_delete = models.CASCADE)
name = models.CharField(max_length=200)
email = models.EmailField(max_length=200, null = True)
Mobile_number = PhoneField(blank=False)
Address = models.CharField(max_length=200, null = True)
gender = models.CharField(max_length=10, default='Male', choices=genderchoices)
def __str__(self):
return self.name
class Doctor(models.Model):
genderchoices = (
('male', 'Male'),
('female','Female'),
('other','Other')
)
user = models.OneToOneField(User,null=True, on_delete = models.CASCADE)
name = models.CharField(max_length=200)
email = models.EmailField(max_length=200, null = True)
Mobile_number = PhoneField(blank=False)
Address = models.CharField(max_length=200, null = True)
gender = models.CharField(max_length=10, default='Male', choices=genderchoices)
bio = models.TextField(max_length=500)
def __str__(self):
return self.name
class Appointment(models.Model):
STATUS = (
('Pending', 'Pending'),
('Approved', 'Approved'),
('Rejected', 'rejected'),
('Completed', 'Completed'),
)
Patient = models.ForeignKey(Patient, null = True, on_delete = models.CASCADE)
Doctor = models.ForeignKey(Doctor, null = True, on_delete = models.CASCADE)
Date = models.DateTimeField(null=True)
Status = models.CharField(max_length=15, null = True, choices=STATUS, default='Pending')
def __str__(self):
return "{0} - {1}".format(self.Patient, self.Doctor)
class Inventory_item(models.Model):
categorychoices = (
('Essential', 'Essential'),
('Sanitization', 'Sanitization')
)
item = models.TextField(max_length=100)
quantity = models.IntegerField()
category = models.CharField(max_length=50, choices=categorychoices, null=True)
supplier = models.CharField(max_length=50, null=True)
supplier_contact = PhoneField(null = True)
def __str__(self):
return "{0} - {1}".format(self.item, self.quantity)
class Bills(models.Model):
billchoices = (
('Lab Test','Lab Test'),
('Room Charges','Room Charges'),
('Doctor Visits','Doctor Visits'),
('Lunch','Lunch'),
('Medicines', 'Medicines'),
('First Visit', 'First Visit'),
('Followup Visit', 'Followup Visit'),
('In Comments', 'In Comments')
)
patient = models.ForeignKey(Patient, on_delete = models.CASCADE)
Date = models.DateField()
amount = models.IntegerField(null=True)
type = models.CharField(max_length=200, choices=billchoices)
Comments = models.TextField(max_length=500, null=True)
def __str__(self):
return "{0} - {1}".format(self.patient, self.type) | UTF-8 | Python | false | false | 3,855 | py | 3 | models.py | 3 | 0.644877 | 0.629572 | 0 | 116 | 32.241379 | 95 |
khalilelghoul01/loader | 5,385,888,998,983 | c244f305c4183b3dd6bdeacc359117a4d7b792b7 | b4bfad1257e3bb1667c61c824d1d488b4abf662a | /backdoor/start.py | 8100277de569593c72287bafb697623d71f3aa9f | []
| no_license | https://github.com/khalilelghoul01/loader | fe73a81af9759fa1031f00299b7b8f0c41ad0e4c | 34fdc97173705438cce8a9527e2bf6a35d3c35ee | refs/heads/master | 2020-05-16T04:42:02.557288 | 2019-04-22T13:08:02 | 2019-04-22T13:08:02 | 182,788,928 | 0 | 0 | null | false | 2021-04-01T20:28:57 | 2019-04-22T13:06:47 | 2019-04-22T13:08:04 | 2021-04-01T20:28:37 | 523 | 0 | 0 | 1 | Python | false | false | import logger1
import keyboard
logger1.logger3()
| UTF-8 | Python | false | false | 54 | py | 12 | start.py | 8 | 0.759259 | 0.703704 | 0 | 4 | 11.5 | 17 |
editorgit/station | 15,685,220,607,759 | 5ca77221ed735d97353ced4df6f3c34bca7e30b1 | 67f57c73809c05e422a66f039fc9a5a8f9e2da43 | /apps/content/admin.py | 83c89f18d26a3f6b12068ed4f93841265f4fd3b3 | []
| no_license | https://github.com/editorgit/station | 8b7377c11370234b79aaaa89eb0c15f39fd4629c | 0e330faf08d17e131cbdb998c9d874a6c41d27df | refs/heads/master | 2019-12-27T07:28:18.477926 | 2018-10-21T07:13:56 | 2018-10-21T07:13:56 | 92,037,002 | 0 | 0 | null | false | 2019-10-21T17:24:30 | 2017-05-22T09:40:24 | 2018-10-21T07:14:07 | 2019-10-21T17:24:29 | 1,958 | 0 | 0 | 1 | HTML | false | false | from django.contrib import admin
from django.utils.safestring import mark_safe
from django.conf import settings
from .models import Text
FILE_PATH = settings.BASE_DIR + '/texts/org/'
def generate_txtfiles(modeladmin, request, queryset):
texts = queryset.filter(translated_at__isnull=True) \
.exclude(created_at__isnull=True) \
.exclude(lang__isnull=True)
for text in texts:
if text.title:
title = f'Title: {text.title} \r\n\r\n'
else:
title = ''
filename = f'{text.pk}_{text.lang.tld_end.upper()}.txt'
print(filename)
with open(FILE_PATH+filename, 'w') as the_file:
the_file.write(title + text.text)
generate_txtfiles.short_description = "Generate txt files for translate"
class TextAdmin(admin.ModelAdmin):
raw_id_fields = ('site', )
list_display = ('task_copywriter', 'site', 'lang', 'created_at',
'translated_at', 'published_at')
list_filter = ('lang', 'site')
# search_fields = ('url', 'short_name')
ordering = ('-created_at',)
actions = [generate_txtfiles]
admin.site.register(Text, TextAdmin)
| UTF-8 | Python | false | false | 1,167 | py | 106 | admin.py | 101 | 0.622108 | 0.622108 | 0 | 40 | 28.175 | 72 |
karthikpappu/pyc_source | 1,340,029,834,916 | dab505e2143c4fb618762da8cf7e519ec4f95521 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pypi_install_script/tinycm_vim-0.1.4.tar/setup.py | 06492535e63a08ad004df0701cdf0490ac2eee2d | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
from setuptools import setup
setup(
name='tinycm_vim',
version='0.1.4',
packages=['tinycm_vim'],
url='https://github.com/MartijnBraam/TinyCM',
license='MIT',
author='Martijn Braam',
author_email='martijn@brixit.nl',
description='VIM definition for TinyCM',
keywords=["configuration management", "puppet"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License"
],
install_requires=[
'tinycm'
]
)
| UTF-8 | Python | false | false | 742 | py | 114,545 | setup.py | 111,506 | 0.543127 | 0.53504 | 0 | 25 | 28.68 | 56 |
Kevinrobot34/atcoder | 1,975,684,999,986 | de7039a33c395fd6c7a12ce037185174f4d3e9ec | 54d2887e3c910f68366bd0aab3c692d54245e22a | /arc/arc014/c.py | 3ba05bcccab2b1b8a857985913abff62f86603f2 | []
| no_license | https://github.com/Kevinrobot34/atcoder | 7aec367fd2c6b589e9d583dae7b3c7520ce9fa12 | 482ea508f098f81e4f19522fe518dd22c781aca9 | refs/heads/master | 2022-07-10T23:44:45.290022 | 2022-06-29T11:30:26 | 2022-06-29T11:30:26 | 158,081,477 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import Counter
n = int(input())
s = input()
ans = sum(v % 2 for v in Counter(s).values())
print(ans)
| UTF-8 | Python | false | false | 119 | py | 1,613 | c.py | 1,304 | 0.663866 | 0.655462 | 0 | 6 | 18.833333 | 45 |
jeongmin14/16-2nd-bearbnb-backend | 10,840,497,463,763 | 24ae14fb965ba343a1d1fcfd574e600fe8f37c52 | 0c50db4b1c2c2e21f73eeb81362a749b5c630609 | /users/models.py | 98d40497cb395d70fe5a461dcbba343cfaaf5bb0 | []
| no_license | https://github.com/jeongmin14/16-2nd-bearbnb-backend | 5c5d5c4f0d2bc9df033033f674da2cb6b4a7ec54 | a814c19c7a03eb5a36921d05fa75a371c2a35311 | refs/heads/main | 2023-04-01T08:35:36.746155 | 2021-03-30T08:18:12 | 2021-03-30T08:18:12 | 348,627,416 | 0 | 0 | null | true | 2021-03-30T08:18:12 | 2021-03-17T08:06:03 | 2021-03-17T08:06:04 | 2021-03-30T08:18:12 | 80 | 0 | 0 | 0 | null | false | false | from django.db import models
class User(models.Model):
country = models.ForeignKey('users.Country', on_delete = models.SET_NULL, null=True)
phone = models.CharField(max_length=20, null=True)
name = models.CharField(max_length=45)
password = models.CharField(max_length=1000, default='')
gender = models.CharField(max_length=20, null=True)
birthdate = models.DateField(null=True)
email = models.EmailField(max_length=200)
profile_photo = models.URLField(max_length=2000, null=True)
is_email_valid = models.BooleanField(default=0)
class Meta:
db_table = 'users'
class SocialUser(models.Model):
social_user = models.CharField(max_length=45)
email = models.EmailField(max_length=100, null=True)
profile_photo = models.URLField(max_length=2000, null=True)
class Meta:
db_table = 'social_users'
class Host(models.Model):
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
profile_photo = models.URLField(max_length=2000, null=True)
id_card_photo = models.URLField(max_length=2000, null=True)
class Meta:
db_table = 'hosts'
class Country(models.Model):
name = models.CharField(max_length=45)
class Meta:
db_table = 'countries'
| UTF-8 | Python | false | false | 1,336 | py | 16 | models.py | 13 | 0.651198 | 0.623503 | 0 | 41 | 31.585366 | 95 |
sgg10/cassie_backend | 2,645,699,865,501 | 954a43086c57b91ebe0ba4e856ff83b970ce71e8 | 0d122ee6615b53167f18b96f4fe16179135d5359 | /cassie/licenses/permissions/__init__.py | e0424dd402d0de2d85a494131bf17564bc65b8a6 | []
| no_license | https://github.com/sgg10/cassie_backend | 383e1d426fa337cee6de429a4d94dc62beb966e6 | 8acacb30470626e7f009cd1ca36ad8fe2d3b1286 | refs/heads/master | 2023-05-13T17:45:16.699646 | 2021-05-30T00:05:39 | 2021-05-30T00:05:39 | 372,090,778 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .licenses import * | UTF-8 | Python | false | false | 23 | py | 45 | __init__.py | 43 | 0.782609 | 0.782609 | 0 | 1 | 23 | 23 |
Jankinnn/Relation-Classification-based-on-Feature-Selection | 14,233,521,651,817 | 8206e3d7930e581327b61e299350e812366db093 | 970a1aab7e900f0675a2b4e80a1363957c451fb5 | /chi_square_test.py | 1ec17ed339c156cfbeac5b20c05b6055f52e645d | []
| no_license | https://github.com/Jankinnn/Relation-Classification-based-on-Feature-Selection | 17d17aaba25038ea1cd675fa996f4dac6e1252e9 | a51e0556ac624044cb6318f804adf7223c08cd60 | refs/heads/master | 2020-03-24T20:25:19.349033 | 2018-07-31T07:20:15 | 2018-07-31T07:20:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf-8
import sys
import numpy as np
reload(sys)
sys.setdefaultencoding('utf-8')
thre = int(sys.argv[1])
fr = open('../data/corpus_train.txt','r')
class_types = {}
class_types_num = 0
all_text = []
while True:
line = fr.readline()
if not line:
break
content = line.strip().split()
all_text.append(content)
if content[0] not in class_types:
class_types[content[0]] = class_types_num
class_types_num+=1
fr.close()
word_text_map = {}
text_num_of_each_class = [0]*class_types_num
for text in all_text:
duplicate = {}
text_num_of_each_class[class_types[text[0]]] +=1
for word in text[1:]:
if word in duplicate:
continue
duplicate[word] = 1
if word in word_text_map:
text_map = word_text_map[word]
else:
text_map = [0]*class_types_num
text_map[class_types[text[0]]] += 1
word_text_map[word] = text_map
total_text_num = len(all_text)
word_text_list = []
word_list = []
for k in word_text_map.keys():
word_list.append(k)
word_text_list.append(word_text_map[k])
word_text_list = np.array(word_text_list,dtype=np.float32)
text_num_of_each_class = np.array(text_num_of_each_class,dtype=np.float32)
word_total_frequence = np.reshape(np.sum(word_text_list,1),(-1,1))
word_in_other_class = word_total_frequence - word_text_list
other_word_in_class = text_num_of_each_class - word_text_list
not_the_word_and_class = total_text_num - word_text_list - word_in_other_class - other_word_in_class
word_num = len(word_list)
print('word num:\t%d' % word_num)
chi_square_value = np.divide(np.square(np.multiply(word_text_list,not_the_word_and_class) - np.multiply(word_in_other_class,other_word_in_class)) , np.multiply(word_text_list+word_in_other_class, other_word_in_class+not_the_word_and_class))
chosed_word = {}
for cla in range(class_types_num):
cur_chi = chi_square_value[:,cla]
order = np.argsort(-cur_chi)
for i in order[:thre]:
if i in chosed_word:
continue
chosed_word[i] = 1
fw = open('../data/chi_square_word_%d.txt' % thre,'w')
for k in chosed_word:
fw.write(word_list[k]+'\n')
fw.close()
| UTF-8 | Python | false | false | 2,062 | py | 6 | chi_square_test.py | 5 | 0.682347 | 0.671193 | 0 | 78 | 25.307692 | 240 |
vishalbelsare/creme | 13,408,887,908,432 | 3521d3af40b992e081f42b6b5d2e22c93adad3c1 | 199f9f936507a10f875902cd94876f6a77a170e5 | /creme/stats/mean.py | 42cd5b4fee65764321323f3243ee2be36adbf819 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/vishalbelsare/creme | 048a5cbadcda5e9213a2f305700d124206770e7e | 582926b42c487ffd05d687932c839dbd5adcb192 | refs/heads/master | 2020-04-21T09:38:06.402754 | 2019-02-06T16:07:22 | 2019-02-06T16:07:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import base
from . import count
class Mean(base.RunningStatistic):
"""Computes a running mean.
Attributes:
count (stats.Count)
mu (float): The current estimated mean.
"""
def __init__(self):
self.count = count.Count()
self.mu = 0
@property
def name(self):
return 'mean'
def update(self, x):
self.mu += (x - self.mu) / self.count.update().get()
return self
def get(self):
return self.mu
| UTF-8 | Python | false | false | 497 | py | 5 | mean.py | 4 | 0.555332 | 0.55332 | 0 | 27 | 17.407407 | 60 |
kort/kort-to-osm | 747,324,321,017 | 417337df05175fa30243c47dd1bfb3507d2cb55f | 0966f5916b78d2f18c7e0ae5b511ed4727545a6a | /kort2osm.py | 21c846be557fd40b5dd203b48074e1865ca895e3 | [
"MIT"
]
| permissive | https://github.com/kort/kort-to-osm | 54d9a3f16aa1d91ab79f36d87f9f256205317c04 | aef7f91380da09c86160948445c4d584f7d7f884 | refs/heads/develop | 2020-12-24T15:40:20.752401 | 2015-07-07T05:56:24 | 2015-07-07T05:56:24 | 11,957,615 | 2 | 0 | null | false | 2015-07-07T05:56:24 | 2013-08-07T18:43:47 | 2014-05-20T16:22:02 | 2015-07-07T05:56:24 | 242 | 3 | 2 | 4 | Python | null | null | """
kort-to-osm
Usage:
kort2osm.py [-d] [-q] [-v] [-c COUNT]
kort2osm.py -h | --help
kort2osm.py --version
Options:
-h, --help Show this help message and exit.
-d, --dry Do not actually make changes, only a dry run
-q, --quiet Run quietly, without any output.
-v, --verbose Show more verbose output.
-c COUNT, --count=COUNT Count of fixes to run through from kort to OSM.
--version Show the version and exit.
"""
import os
import logging
import logging.config
from ConfigParser import ConfigParser
import docopt
import yaml
from helper import osm_fix
__location__ = os.path.realpath(
os.path.join(
os.getcwd(),
os.path.dirname(__file__)
)
)
def setup_logging(
path=os.path.join(__location__, 'logging.yml'),
default_level=logging.INFO):
"""
Setup logging configuration
"""
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
if __name__ == '__main__':
arguments = docopt.docopt(__doc__, version='kort-to-osm 0.1')
# Set up logging
if arguments['--quiet']:
logging.basicConfig(level=logging.WARNING)
elif arguments['--verbose']:
logging.basicConfig(level=logging.DEBUG)
else:
setup_logging()
# Handle a dry run
if arguments['--dry']:
print '### Dry run: ###'
# Parse the configuration
config = ConfigParser()
config.read(os.path.join(__location__, 'setup.cfg'))
# Apply the fixes from kort to OSM
try:
limit = int(arguments['--count'])
except TypeError:
limit = 1
osm = osm_fix.OsmFix(config)
osm.apply_kort_fix(limit, arguments['--dry'])
| UTF-8 | Python | false | false | 1,855 | py | 10 | kort2osm.py | 6 | 0.59407 | 0.590836 | 0 | 76 | 23.407895 | 74 |
CorvustKodi/slurpee-server-docker | 15,779,709,870,003 | ab834ed927958556349114bbf29995f8ef9360d9 | 3b84b554a64594b69c6d942b7c59f46bd5615f5f | /app/search.py | 83f6e3af870687e7fb27b40d7d58cbf78a28592f | []
| no_license | https://github.com/CorvustKodi/slurpee-server-docker | cc7444d29856c8d3b185787b8928e440fbc418ce | 51afc815fec9fd011116faf7c0168ceeb805018e | refs/heads/master | 2023-05-12T09:43:59.298905 | 2022-03-21T15:51:27 | 2022-03-21T15:51:27 | 176,562,244 | 0 | 0 | null | false | 2023-05-01T20:33:27 | 2019-03-19T17:10:36 | 2022-03-21T14:55:40 | 2023-05-01T20:33:27 | 838 | 0 | 0 | 3 | Python | false | false | import sys
from torrent.scrape import scraper
from slurpee.utilities import settingsFromFile, settingsFromEnv
from slurpee.dataTypes import ShowDB
if __name__ == '__main__':
if len(sys.argv) > 1:
settings = settingsFromFile(sys.argv[1])
else:
settings = settingsFromEnv()
allshows = ShowDB(settings['SHOWS_DB_PATH'])
scraper(settings,allshows)
exit(0)
| UTF-8 | Python | false | false | 391 | py | 26 | search.py | 17 | 0.693095 | 0.685422 | 0 | 13 | 28.923077 | 63 |
dougvj/ODM2PythonAPI | 12,463,995,119,151 | e66e77871a7309137020b85dc3a753bb897db154 | 7aeb19a3a4dde82056e988e9e857ee835db778f0 | /Examples/Sample 1.1.py | 03b986eec4822de7d0dd5f28e5b3d098ad15ac4d | [
"BSD-3-Clause"
]
| permissive | https://github.com/dougvj/ODM2PythonAPI | 12d06caf18aa42ad9f27db2c325d9e618060bbef | 9945421691555d22ac5802e241d76257a53fa324 | refs/heads/master | 2020-03-28T20:35:50.703143 | 2018-03-02T22:19:53 | 2018-03-02T22:19:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import (absolute_import, division, print_function)
import sys
import os
from odm2api.ODMconnection import dbconnection
import pprint
from odm2api.ODM1_1_1.services import SeriesService
__author__ = 'stephanie'
this_file = os.path.realpath(__file__)
directory = os.path.dirname(this_file)
sys.path.insert(0, directory)
# ----------------------------------------
conns = [
#connection to the ODM1 database
dbconnection.createConnection('mysql', 'jws.uwrl.usu.edu', 'odm', "ODM", "ODM123!!", 1.1),
#connection to the ODM2 database
dbconnection.createConnection('mssql', '(local)', 'odm2', 'ODM', 'odm', 2.0)]
for conn in conns:
pp = pprint.PrettyPrinter(indent=8)
print
print "************************************************"
print "\t\tODM2 -> ODM1 Demo: "
print "************************************************"
print
odm1service = SeriesService(conn)
pp.pprint(conn)
print
print "************************************************"
print "\t\tUnits: get_all_units()"
print "************************************************"
print
pp.pprint(odm1service.get_unit_by_id(321))
print
print "************************************************"
print "\t\tSites: get_all_sites()"
print "************************************************"
print
pp.pprint(odm1service.get_all_sites())
print
print "************************************************"
print "\t\tMethods: get_all_methods()"
print "************************************************"
print
pp.pprint(odm1service.get_method_by_id(8))
print
print "************************************************"
print "\t\tVariables: get_all_variables()"
print "************************************************"
print
pp.pprint(odm1service.get_variable_by_id(10))
print
print "************************************************"
print "\t\tData Sources: get_all_Source()"
print "************************************************"
print
pp.pprint(odm1service.get_all_sources())
print
print "************************************************"
print "\t\tSeries: get_all_series()"
print "************************************************"
print
ser = odm1service.get_all_series()
pp.pprint(ser)
print
print "************************************************"
print "\t\tData Values: get_all_DataValues()"
print "************************************************"
print
pp.pprint(odm1service.get_values_by_series(ser[0].id))
print "The end"
| UTF-8 | Python | false | false | 2,630 | py | 2 | Sample 1.1.py | 1 | 0.431939 | 0.419011 | 0 | 97 | 26.113402 | 94 |
JZY11/weiboSpider | 15,075,335,229,296 | a300f43155465d0f731d50502d9dd5f6f81ea778 | 2d40a51d3fe9d641cfa0757f338910484389b139 | /weiboSpider.py | 4fee3b7e446f570aa74162ba5919fd0ce902e932 | []
| no_license | https://github.com/JZY11/weiboSpider | 178ffe1888ed2215dbb83f0f1ee855d441ecd92f | 10341073c8314495bb63ab9b92a9b09cf5c61d64 | refs/heads/master | 2023-06-01T23:36:25.535559 | 2019-08-27T05:57:38 | 2019-08-27T05:57:38 | 202,484,138 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time : 2019/8/15 14:40
#@Author: jzy
#@File : weiboSpider.py
import codecs
import csv
import os
import random
import re
import sys
import traceback
from collections import OrderedDict
from datetime import datetime,timedelta
from time import sleep
import requests
from lxml import etree
from requests.adapters import HTTPAdapter
from tqdm import tqdm
class weibo(object):
cookie = {'Cookie':'your cookie'} # 将your cookie替换成自己的cookie
def __init__(self, user_id, filter=0, pic_download=0, video_download=0):
"""weibo类初始化"""
if not isinstance(user_id,int):
sys.exit(u'user_id应为一串数字,请重新输入')
if filter != 0 and filter != 1:
sys.exit(u'filter值应为0或1,请重新输入')
if pic_download != 0 and pic_download != 1:
sys.exit(u'pic_download值应为0或1,请重新输入')
if video_download != 0 and video_download != 1:
sys.exit(u'video_download值应为0或1,请重新输入')
self.user_id = user_id # 用户id,即需要我们输入的数字,如昵称为"Dear-迪丽热巴"的id为1669879400
self.filter = filter # 取值范围为0、1,程序默认值为0,代表要爬取用户的全部微博;1代表只爬取用户的原创微博
self.pic_download = pic_download # 取值范围仍然是0、1,程序默认值是0表示不现在微博原始图片,1表示下载
self.video_download = video_download # 取值范围是0、1,程序默认值是0表示不下载微博视频,1代表下载
self.nickname = '' # 用户昵称,如:“Dear-迪丽热巴”
self.weibonum = 0 # 用户全部微博数
self.got_num = 0 # 爬取到的微博数
self.following = 0 # 用户关注数
self.followers = 0 # 用户粉丝数
self.weibo = [] # 用来存储爬取到的全部微博信息
def deal_html(self, url):
"""处理HTML"""
try:
html = requests.get(url, cookies = self.cookie).content # 返回的是一个包含服务器资源的Response对象(对象包含从爬虫返回的内容)
# 解析HTML文档为HTML DOM模型
selector = etree.HTML(html) # etree.HTML():构造了一个XPath解析对象并对HTML文本进行自动修正,,etree.tostring():输出修正后的结果,类型是bytes,利用decode()方法将其转成str类型
return selector
except Exception as e:
print('Error:', e)
traceback.print_exc()
def deal_garbled(self, info):
"""处理乱码问题"""
try:
info = (info.xpath('string(.)').replace(u'\u200b', '').encode(
sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding))
return info
except Exception as e:
print('Error:', e)
traceback.print_exc()
def get_nickname(self):
"""获取用户昵称"""
try:
url = 'htttps://weibo.cn/%d/info' % (self.user_id)
selector = self.deal_html(url)
nickname = selector.xpath('//title/text()')[0]
self.nickname = nickname[:-3]
if self.nickname == u'登录 - 新' or self.nickname == u'新浪':
sys.exit(u'cookie错误或已过期,请按照README中方法重新获取')
print(u'用户昵称: ' + self.nickname) # 加 u 就可以正常打印出中文
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_user_info(self, selector):
"""获取用户昵称,微博数,关注数,粉丝数"""
try:
self.get_nickname() # 获取用户昵称
user_info = selector.xpath("//div[@class='tip2']/*/text()")
self.weibo_num = int(user_info[0][3:-1])
print(u'微博数: ' + str(self.weibo_num))
self.following = int(user_info[1][3:-1])
print(u'关注数: ' + str(self.following))
self.followers = int(user_info[2][3:-1])
print(u'粉丝数: ' + str(self.followers))
print('*' * 100)
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_page_num(self, selector):
"""获取微博总页数"""
try:
if selector.xpath("//input[@name='mp']") == []:
page_num = 1
else:
page_num = (int)(selector.xpath("//input[@name='mp']")[0].attrib['value'])
return page_num
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_long_weibo(self, weibo_link):
"获取长原始微博"
try:
selector = self.deal_html(weibo_link)
info = selector.xpath("//div[@class='c']")[1]
wb_content = self.deal_garbled(info)
wb_time = info.xpath("//span[@class='ct']/text()")[0]
weibo_content = wb_content[wb_content.find(':') +
1:wb_content.rfind(wb_time)]
return weibo_content
except Exception as e:
return u'网络出错'
print('Error:',e)
traceback.print_exc()
def get_original_weibo(self, info, weibo_id):
"获取原创的微博"
try:
weibo_content = self.deal_garbled(info)
weibo_content = weibo_content[:weibo_content.rfind(u'赞')]
a_text = info.xpath('div//a/text()')
if u'全文' in a_text:
weibo_link = 'https://weibo.cn/comment/' + weibo_id
wb_content = self.get_long_weibo(weibo_link)
if wb_content:
weibo_content = wb_content
return weibo_content
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_long_retweet(self, weibo_link):
"获取长转发微博"
try:
wb_content = self.get_long_weibo(weibo_link)
weibo_content = wb_content[:wb_content.rfind(u'原文转发')]
return weibo_content
except Exception as e:
print('Error: ' , e)
traceback.print_exc()
def get_retweet(self, info, weibo_id):
"获取转发微博"
try:
original_user = info.xpath("div/span[@class='cmt']/a/text()")
if not original_user:
wb_content = u'转发微博已被删除'
return wb_content
else:
original_user = original_user[0]
wb_content = self.deal_garbled(info)
wb_content = wb_content[wb_content.find(':') +
1:wb_content.rfind(u'赞')]
wb_content = wb_content[:wb_content.rfind(u'赞')]
a_text = info.xpath('div//a/text()')
if u'全文' in a_text:
weibo_link = 'http://weibo.cn/comment/' + weibo_id
weibo_content = self.get_long_retweet(weibo_link)
if weibo_content:
wb_content = weibo_content
retweet_reason = self.deal_garbled(info.xpath('div')[-1])
retweet_reason = retweet_reason[:retweet_reason.rindex(u'赞')]
wb_content = (retweet_reason + '\n' + u'原始用户: ' + original_user +
'\n' + u'转发内容: ' + wb_content)
return wb_content
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def is_original(self, info):
"""判断微博是否为原创微博"""
is_original = info.xpath("div/span[@class='cmt']")
if len(is_original) > 3:
return False
else:
return True
def get_weibo_content(self, info, is_original):
"""获取微博内容"""
try:
weibo_id = info.xpath('@id')[0][2:]
if is_original:
weibo_content = self.get_original_weibo(info, weibo_id)
else:
weibo_content = self.getweet(info, weibo_id)
print(weibo_content)
return weibo_content
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_publish_place(self, info):
"""获取微博发布位置"""
try:
div_first = info.xpath('div')[0]
a_list = div_first.xpath('a')
publish_place = u'无'
for a in a_list:
if ('place.weibo.com' in a.xpath('@href')[0]
and a.xpath('text()')[0] == u'显示地图'):
weibo_a = div_first.xpath("span[@class='ctt']/a")
if len(weibo_a) >= 1:
publish_place = weibo_a[-1]
if (u'视频' == div_first.xpath(
"span[@class='ctt']/a/text()")[-1][-2:]):
if len(weibo_a) >= 2:
publish_place = weibo_a[-2]
else:
publish_place = u'无'
publish_place = self.deal_garbled(publish_place)
break
print(u'微博发布位置:' + publish_place)
return publish_place
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_publish_tool(self, info):
"""获取微博发布工具"""
try:
str_time = info.xpath("div/span[@class='ct']")
str_time = self.deal_garbled(str_time[0])
if len(str_time.split(u'来自')) > 1:
publish_tool = str_time.split(u'来自')[1]
else:
publish_tool = u'无'
print(u'微博发布工具: ' + publish_tool)
return publish_tool
except Exception as e:
print('Error: ', e)
traceback.print_exc()
| UTF-8 | Python | false | false | 10,830 | py | 1 | weiboSpider.py | 1 | 0.473507 | 0.463929 | 0 | 244 | 39.221311 | 145 |
mr-atharva-kulkarni/Algorithms-for-Sea-Route-Optimizaiton | 11,785,390,307,394 | abceba9c52a6980dcdc425db9274539d054e0fc2 | 89d6673dbb5c23648072f3cddac6a4e5c35457fd | /routeplanning/ReadingExcelFile.py | 8a95522c553aacedd327c484b3b3c4fd95457b41 | [
"MIT"
]
| permissive | https://github.com/mr-atharva-kulkarni/Algorithms-for-Sea-Route-Optimizaiton | eefa260be0c6c65c3607af4da48511dbbe575798 | 48d90ad2445b60d2228ed0aac60e1acba773b345 | refs/heads/main | 2023-02-23T07:53:26.808342 | 2021-01-26T05:42:08 | 2021-01-26T05:42:08 | 308,237,932 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
def readLatLong(path):
df = pd.read_csv(path)
lat = df['Lat']
long = df['Long']
return lat, long
| UTF-8 | Python | false | false | 135 | py | 51 | ReadingExcelFile.py | 35 | 0.592593 | 0.592593 | 0 | 8 | 15.875 | 26 |
ericli0419/Transcrypt | 15,315,853,384,171 | a144351d940841b5bd5c67d27fdf6387cab9d084 | c73efc12a16cd0973c637a9cac2c143cdf87862f | /transcrypt/demos/turtle_demos/snowflake.py | 3574c72d7721d11c485ea29338e48a3939f644bd | [
"Apache-2.0"
]
| permissive | https://github.com/ericli0419/Transcrypt | dff539e4565a8ed3125e59233431baa06488a54c | f2ca7ed8ac5f2ef43910548df5c70902b1a30dd4 | refs/heads/master | 2020-04-11T18:40:54.862595 | 2018-12-05T15:50:19 | 2018-12-05T15:50:19 | 162,007,582 | 1 | 0 | Apache-2.0 | true | 2018-12-16T14:11:02 | 2018-12-16T14:11:01 | 2018-12-16T14:11:01 | 2018-12-05T15:50:41 | 121,939 | 0 | 0 | 0 | null | false | null | from turtle import *
josh = Turtle ()
def draw (length):
if length > 9:
draw (length / 3)
josh.left (60)
draw (length / 3)
josh.right (120)
draw (length / 3)
josh.left (60)
draw (length / 3)
else:
josh.forward (length)
length = 150
josh.up ()
josh.forward (length / 2)
josh.left (90)
josh.forward (length / 4)
josh.right (90)
josh.down ()
for i in range (3):
josh.right (120)
draw (length)
josh.done ()
| UTF-8 | Python | false | false | 492 | py | 260 | snowflake.py | 197 | 0.542683 | 0.49187 | 0 | 29 | 15.965517 | 29 |
eumsys/eum | 2,963,527,478,705 | 264b521669012148ca3f580f61fd3a1e9cbd29a8 | bf05d4fe1a66eb72c4d044ede15bbd66bd3f5605 | /tests/CAJA_1_Monterrey/cobroVoluntario.py | 229d9db6f2e5abcc581a3895dbde2cb1a65c0a54 | []
| no_license | https://github.com/eumsys/eum | 975789a11f0469a6e5b26244485ac24c1bcb5bcf | e37d1e7660b14ff6d0391c9688d3d1062baeebdc | refs/heads/master | 2021-11-24T09:18:03.971986 | 2021-11-12T18:27:04 | 2021-11-12T18:27:04 | 165,714,507 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from PyQt4 import QtCore, QtGui, uic
# Cargar nuestro archivo .ui
form_class = uic.loadUiType("/home/pi/Documents/CAJA_1_Monterrey/Interfaces_Caja/tarifaVoluntaria.ui")[0]
class MyWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.pushButton.clicked.connect(self.btn_event)
# Evento del boton
def btn_event(self):
valor = self.lineEdit.text()
estado = "bien"
puntos = 0
for letra in valor:
if letra != "1" and letra != "2" and letra != "3" and letra != "4" and letra != "5" and letra != "6" and letra != "7" and letra != "8" and letra != "9" and letra != "0" and letra != ".":
estado = "error"
if letra == ".":
puntos = puntos + 1
if valor == "":
estado = "vacio"
if estado == "error" or puntos > 1:
self.label_3.setText("Cantidad no valida")
elif estado == "vacio":
self.label_3.setText("Ingresa una cantidad")
else:
archivo=open("/home/pi/Documents/CAJA_1_Monterrey/montoVoluntario.txt", "w")
archivo.write(valor)
archivo.close()
exit()
pass
app = QtGui.QApplication(sys.argv)
MyWindow = MyWindowClass(None)
MyWindow.show()
app.exec_()
| UTF-8 | Python | false | false | 1,219 | py | 240 | cobroVoluntario.py | 134 | 0.653815 | 0.638228 | 0 | 40 | 29.475 | 199 |
dragonchain/dragonchain-sdk-python | 5,437,428,646,177 | 45896bac7a34ae529acaadd5dc856da89f83e4de | ef5e7f7d593e550c074d3dea469d6d3d43b6da60 | /dragonchain_sdk/async_helpers.py | 107dad3c230cf4df02b18492259a69aea81a91cf | [
"Apache-2.0"
]
| permissive | https://github.com/dragonchain/dragonchain-sdk-python | d2806e6f8aaa9cf6d1d7d69fdd3ce97e7708b186 | 84606e5fb9ee85cc9ea69e35c189c63c7f9ea1a4 | refs/heads/master | 2021-08-08T12:06:08.486491 | 2020-02-05T20:08:12 | 2020-02-05T20:08:12 | 203,888,623 | 8 | 1 | Apache-2.0 | false | 2020-07-20T20:14:03 | 2019-08-22T23:31:59 | 2020-02-05T20:08:16 | 2020-07-20T20:10:53 | 386 | 7 | 0 | 1 | Python | false | false | # Copyright 2020 Dragonchain, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module should never be imported on python <3.5, as it contains syntax that is not valid before 3.5
import types
import logging
import asyncio
from typing import cast, Optional, Dict, Any, TYPE_CHECKING
import aiohttp
import dragonchain_sdk
from dragonchain_sdk import exceptions
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from dragonchain_sdk import request
from dragonchain_sdk import dragonchain_client
from dragonchain_sdk.types import request_response
async def create_aio_client(*args: Any, **kwargs: Any) -> "dragonchain_client.Client":
"""Construct a new async ``Client`` object
Args:
Refer to dragonchain_sdk.create_client for arguments
Returns:
A new Dragonchain client which makes async requests.
"""
client = dragonchain_sdk.create_client(*args, **kwargs)
# Change out the client's request internals to become async-capable with aiohttp
client.request.session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
client.request._make_request = types.MethodType(_make_request, client.request) # type: ignore
# Add close function to the client for aiohttp cleanup
client.close = types.MethodType(client_close, client) # type: ignore
return client
async def client_close(self: "dragonchain_client.Client") -> None:
"""
Close any aiohttp sessions associated with an instantiated async client
"""
await self.request.session.close()
async def _make_request(
self: "request.Request",
http_verb: str,
path: str,
json_content: Optional[Dict[Any, Any]] = None,
timeout: int = 30,
verify: bool = True,
parse_response: bool = True,
additional_headers: Optional[Dict[str, str]] = None,
) -> "request_response":
"""
Make an async http request to a dragonchain with the given information
Should take and handle exactly like dragonchain_sdk.request.Request._make_request, but asynchronous
"""
full_url, content, header_dict = self._generate_request_data(
http_verb=http_verb, path=path, json_content=json_content, additional_headers=additional_headers
)
# Make request with appropriate data
try:
logger.debug("Making request. Verify SSL: {}, Timeout: {}".format(verify, timeout))
async with self.session.request(
method=http_verb, url=full_url, data=content, headers=header_dict, ssl=verify, timeout=aiohttp.ClientTimeout(total=timeout)
) as r:
try:
return_dict = {}
return_dict["status"] = r.status
logger.debug("Response status code: {}".format(r.status))
return_dict["ok"] = r.status // 100 == 2
return_dict["response"] = await r.json() if parse_response else await r.text()
return cast("request_response", return_dict)
except Exception as e:
raise exceptions.UnexpectedResponseException("Unexpected response from Dragonchain. Error: {}".format(e))
except exceptions.UnexpectedResponseException:
raise
except Exception as e:
raise exceptions.ConnectionException("Error while communicating with the Dragonchain: {}".format(e))
# Can get here if context manager doesn't throw exceptions.UnexpectedResponseException which could have been raised
raise exceptions.UnexpectedResponseException("Unkown error processing result from dragonchain")
| UTF-8 | Python | false | false | 4,053 | py | 45 | async_helpers.py | 25 | 0.70491 | 0.700469 | 0 | 95 | 41.663158 | 135 |
UrbanoFonseca/abnormal_activations | 11,665,131,202,919 | 1bf0ff7a0b53c9f67cf5c5008b9708648547a802 | 72ea3b93cabdcf42acf256b5c7c067dd4e5950e9 | /abnormal_activations/abnormal_activations.py | ac1b2fc0a40615e5e15d95b8a74825e7610f6fba | [
"MIT"
]
| permissive | https://github.com/UrbanoFonseca/abnormal_activations | f3066b9f8bf9db894c67367ca637b03bb69751bb | aaec68fd694a56785143779fd90c4fd4f0749769 | refs/heads/master | 2021-08-30T11:03:39.747066 | 2017-12-17T16:10:13 | 2017-12-17T16:10:13 | 109,316,784 | 0 | 0 | MIT | false | 2017-12-17T16:10:14 | 2017-11-02T20:42:34 | 2017-11-02T21:07:02 | 2017-12-17T16:10:14 | 11 | 0 | 0 | 0 | Python | false | null | import numpy as np
from keras import backend as K
class ActivationFunctions():
def alpha_linear(x, alpha=0.5):
# The linear function Y = A * X
return alpha * x
def step(x, threshold=0.0):
# The step function returns 0 for x < threshold
# and 1 otherwise.
return 1 if x > threshold else 0
def LeCunSigmoid(x, alpha=0.01):
# As presented in the 'Generalization and Network Design Strategies'
# from Y. LeCun
# The alpha parameter is supposed to be a small linear term to avoid
# flat spots.
# f(x) = 1.7159 * tanh(2/3*x) + a*x
return (1.7159 * K.tanh(2 * x / 3) + alpha * x)
def ReSech(x):
# As presented in 'A novel activation for multilayer feed-forward neural networks'
# from Njikam, A.B.S. and Zhao, H.
# where sech is the hyperbolic secant
cosh = (K.exp(x)+K.exp(-x))/2
sech_x = 1 / cosh
return x * sech_x
def scaled_sigmoid(x):
# As presented in 'Revise Saturated Activation Functions'
# by Xu, B, and Huang, R. and Li, M.
return 4 / ( 1 + K.exp(-x)) - 2
def penalized_tanh(x, alpha=0.25):
# As presented in 'Revise Saturated Activation Functions'
# by Xu, B, and Huang, R. and Li, M.
return K.switch(x > K.variable(0), K.tanh(x), alpha * K.tanh(x))
def trunc_sin(x):
# Based on the 'Taming the Waves:
# Sine as Activation Function in Deep Neural Networks'
# from Parascandolo, G. and Huttunen, H. and Virtanen, T.
pi = 3.14159265359
return (K.switch(x < K.constant(-pi/2), K.constant(0), K.switch(x > K.constant(pi/2), K.constant(1), K.sin(x))))
def sin(x):
return K.sin(x)
| UTF-8 | Python | false | false | 1,563 | py | 4 | abnormal_activations.py | 3 | 0.65515 | 0.623161 | 0 | 56 | 26.910714 | 114 |
archivesspace/api-training | 4,509,715,668,621 | a6cda1a8a6e62390dbbe2a473a63bcbd4e836a9d | 5d267dbd0c4daa7baeab82c5c494038bccc76dc3 | /getMARCfromOCLCandPOSTtoASpace.py | 63a2cb3d84dcd585204fc6fccea4fa615c9da974 | [
"MIT"
]
| permissive | https://github.com/archivesspace/api-training | f132684b77e8f6a0166421fd34e49bf70954cda4 | 3aa088c1300cf4f950015d3c3c91a8e229ed41a9 | refs/heads/master | 2020-03-20T15:24:40.633892 | 2018-08-10T11:37:17 | 2018-08-10T11:37:17 | 137,512,373 | 7 | 3 | MIT | false | 2018-08-05T20:27:11 | 2018-06-15T16:56:51 | 2018-06-19T19:38:07 | 2018-08-05T20:25:24 | 26,208 | 1 | 1 | 1 | Python | false | null | import pathlib, authenticate, requests, json, glob, supersecrets, runtime, sys
print("Demo: you can use this simple script to download MARC records from OCLC. However, you will need an API key to authenticate. Once you have one, you can update the 'wskey' variable.")
# See: https://platform.worldcat.org/api-explorer/apis/wcapi
oclcURL = 'http://www.worldcat.org/webservices/catalog/content/'
directory = "marc-files"
wskey = supersecrets.wskey
oclc_numbers = []
oclc_numbers.extend(('647845821', '648010759', '51024910', '647834263', '647909502', '647824468', '647844488', '34336539'))
pathlib.Path(directory).mkdir(exist_ok=True)
for num in oclc_numbers:
response = requests.get(oclcURL + num + '?wskey=' + wskey)
with open(directory+'/'+num+'.xml', 'wb') as file:
file.write(response.content)
file.close()
print("\nNow that we've downloaded the MARC files. Let's post them to ArchivesSpace.\n")
repository = "2"
max_files = 10
import_type = "marcxml"
file_list = []
for f in glob.iglob(directory + '/*.xml'):
file_list.append(("files[]", open(f, "rb")))
if len(oclc_numbers ) > max_files:
print("Woah. Let's not get too crazy. Try again with less files, or update this script so that it will split up the jobs so that each post has no more than {0} files each.".format(max_files))
sys.exit(1)
print("Okay. We're going to upload the following filenames: {0}".format(oclc_numbers))
if not input("Are you sure? (y/n): ").lower().strip()[:1] == "y":
print("You'd be wise to check out the MARC files first, anyway!")
sys.exit(1)
baseURL, headers = authenticate.login()
job = json.dumps(
{
"job_type": "import_job",
"job": {
"import_type": import_type,
"jsonmodel_type": "import_job",
"filenames": oclc_numbers
}
}
)
upload = requests.post(baseURL + "/repositories/" + repository + "/jobs_with_files"
, files=file_list
, params={"job": job}
, headers=headers).json()
print("Import job started...")
print("\nCheck out " + baseURL + upload['uri'] + " in ArchivesSpace.\n")
| UTF-8 | Python | false | false | 2,132 | py | 27 | getMARCfromOCLCandPOSTtoASpace.py | 16 | 0.652439 | 0.615854 | 0 | 59 | 35.135593 | 196 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.