repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Dumebiz/ciscodevnet | 13,125,420,084,938 | 47e63455dd76a3d9cc1bbb9849f947c34bed96fd | c6137f69c56956458326f221153ac204ff63ac01 | /add_device.py | c1f846e5bb1e10f214b405e8992e69257a10b051 | [] | no_license | https://github.com/Dumebiz/ciscodevnet | 0dbd0ed776f1acdeaab2e65cdc4e5a886b790984 | 8332792860e8e59a49a97ca175f2acbc3cf5fef8 | refs/heads/main | 2023-02-14T18:10:42.522006 | 2021-01-11T18:27:19 | 2021-01-11T18:27:19 | 328,751,059 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
Author: Dumebi Umezinne
Purpose: Demonstrate Python "requests" to get an access token
from Cisco DNA Center using the REST API.
"""
import requests
from auth_token import get_token
import time
import json
from pprint import pprint as pprint
new_device_dict = {
"ipAddress": ["172.20.20.20"],
"snmpVersion": "v3",
"snmpROCommunity": "readonly",
"snmpRWCommunity": "readwrite",
"snmpRetry": "1",
"snmpAuthPassphrase": "kjdiDI89",
"snmpAuthProtocol": "",
"snmpMode": "AUTHPRIV",
"snmpPrivPassphrase": "hjdahDue88299",
"snmpTimeout": "120",
"snmpUserName": "admin",
"cliTransport": "ssh",
"userName": "ambrana",
"password": "diablo419!",
"enablePassword": "diavolo678!"
}
def add_device():
token = get_token()
api_path = "https://sandboxdnac.cisco.com/dna"
headers ={"Content-type": "application/json", "X-Auth-Token" : token}
# POST request to add a new device with device details from
# dictionary created earlier
add_resp = requests.post(
f'{api_path}/intent/api/v1/network-device',
json=new_device_dict,
headers=headers
)
print(add_resp.status_code)
#print(add_resp)
#add_data = add_resp.json()
#print(add_data)
#print(add_resp.json()["response"])
#print(add_resp.headers)
print("***********************")
if add_resp.ok:
# Wait a few seconds as this is an aysnc process
print(f"Request accepted: status code {add_resp.status_code}")
time.sleep(10)
# Query DNA center to GET the status of task (task url gotten from response to add)
task_path = add_resp.json()["response"]["url"]
print(f'{api_path}/intent{task_path}')
task_resp = requests.get (
f'{api_path}/intent{task_path}',
headers = headers
)
#Check if task GET is successful
if task_resp.ok:
task_data = task_resp.json()["response"]
#Check if device add async task completed successfully
if not task_data["isError"]:
print("Successfully added new device")
else:
print(f"Async task error see: {task_data['progress']}")
print(f"Aysnc task failure: {task_data['failureReason']}")
else:
print(f'Async GET failed: status code {task_resp.status_code}')
else:
#The initial new device POST failed with details below
print(f"Device addition failed with code {add_resp.status_code}")
print(f"Failure body: {add_resp.text}")
def main():
add_device()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 2,704 | py | 4 | add_device.py | 3 | 0.597263 | 0.586169 | 0 | 95 | 27.463158 | 91 |
yunkb/fibrosis | 19,619,410,641,870 | 538679ac0bb8f19ae13ee71ac82c7c54f835ee6c | 6db8915ad6a58462fe577afbdbaab1c2935fd470 | /source/solvers/ns_steady/nsestimator.py | 6a002182b3ecff23ef0ea4b3d67c1e75ac390bbf | [] | no_license | https://github.com/yunkb/fibrosis | 89a2f91b66e8837a4308d534b772c48b2cb151eb | 7a86e4a6a26aa3248fd7f3d46547c264842953d9 | refs/heads/master | 2020-03-21T03:33:38.368166 | 2017-09-04T13:09:26 | 2017-09-04T13:09:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ''' Steady Navier-Stokes estimator module
Author: David Nolte (dnolte@dim.uchile.cl)
Date: 20-09-2016
'''
# TODO: Throw away bctype option??
from dolfin import *
from functions import inout
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize, brute
from nsproblem import NSProblem
from nssolver import NSSolver
import warnings
class NSEstimator:
''' Estimate the coefficients of Navier-Slip boundary conditions using the
NSSolver class.
The Navier-Slip BCs need to be specified for every boundary segment
(defined as gmsh physical group) in the estimator input file with 'preset:
'navierslip''. Only the Nitsche method is currently supported.
Note: in the iteration process, the previous solution is used as initial
solution for each solve.
Currently implemented optimization methods:
- scipy.optimize.minimize
'''
def __init__(self, opt_ref, opt_est):
''' Initialize Navier-Slip Estimator class.
Args:
opt_ref path to yaml file with settings for reference case
opt_est path to yaml file with settings for estimation
'''
self.optfile_ref = opt_ref
self.optfile_est = opt_est
self.pb_est = None
self.pb_ref = None
self.options = inout.read_parameters(opt_est)
self.uref = None
self.pref = None
self.pref_meas = None
self.uref_meas = None
self.u_meas = None
# TODO: in the end self.u_meas is the LAST iterate, not the optimal
# one!!
self.u_opt = None
self.p_opt = None
self._x0 = None
self._xfun = None
self._bounds = None
self.xlegend = None
self.x_opt = None
self.f_opt = None
self.fval = []
self.x = []
self.beta = []
self.result = None
self.p_stei = None
self.BO = None
self._end = False
self.init_problems()
pass
def init_problems(self):
''' Create reference problem and estimator problem.
Automatically called by self.__init__(). Decoupled by the actual
pb.init() calls at the beginning of self.estimate(), so parameters can
be changed more easily between calls.
'''
self.pb_ref = NSProblem(self.optfile_ref)
self.pb_est = NSProblem(self.optfile_est)
return self
def _init_measurement(self):
''' Initialize measurement function space and functions.'''
opt_meas = self.options['estimation']['measurement']
mesh, _, bnds = inout.read_mesh(opt_meas['mesh'])
self.bnds_meas = bnds # needed for pressure_drop
if opt_meas['elements'] == 'P1':
deg = 1
elif opt_meas['elements'] == 'P2':
deg = 2
else:
raise ValueError('Element type unknown. Available options: P1, P2')
V = FunctionSpace(mesh,
VectorElement('Lagrange', mesh.ufl_cell(), deg))
Q = FunctionSpace(mesh, FiniteElement('Lagrange', mesh.ufl_cell(), 1))
self.pref_meas = Function(Q)
self.uref_meas = Function(V)
self.u_meas = Function(V)
return self
def _interp_measurement(self, u, ref=False):
''' Interpolate velocity field to measurement function space.
Args:
u velocity field to interpolate
ref reference solution flag
'''
if ref:
if not self.uref_meas:
raise Exception('uref_meas is None. Call _init_measurement '
'first!')
uinp = self.uref_meas
else:
if not self.u_meas:
raise Exception('u_meas is None. Call _init_measurement '
'first!')
uinp = self.u_meas
LI = LagrangeInterpolator()
LI.interpolate(uinp, u)
return self
def add_gaussian_noise(self, u, scal_umax):
''' Add Gaussian noise to a velocity field u, with amplitude
scal_umax*umax.
Args:
u (dolfin function) velocity field
scal_umax scaling factor for noise amplitude
'''
assert self.u_meas, 'self.u_meas doesn\'t exist yet!'
if 'random_seed' in self.pb_est.options['estimation']:
np.random.seed(self.pb_est.options['estimation']['random_seed'])
dim = u.vector().size()
umax = abs(u.vector().array()).max()
noise = np.random.normal(0., scal_umax*umax, dim)
noise_fun = Function(u.function_space())
noise_fun.vector()[:] = noise
u.vector().axpy(1.0, noise_fun.vector())
pass
def _update_parameters_inflow(self, x):
''' Update coefficient of inflow profile. Modifies Expressions stored
in NSProblem.bcs.
Procedure: 1) Find inlet BC in list of strong Dirichlet BCs.
2) Update value at position. According to settings, change
U only or U and R.
# TODO: PROBLEM IS, R = R + dR WILL BE OVERWRITTEN !
# SOLVED?: SPLIT options dict and bc_lst which is modified by BCs in
class NSProblem
ATTENTION: This is quite fragile and depends on the fact that the
boundary conditions are processed in order ...
Args:
x parameter: max inflow velocity x[0], dR x[1] (if set)
'''
bc_lst = self.pb_est.bc_lst
param = self.options['estimation']['parameters']['inflow']
assert param['use']
val = None
count_dbc = 0
for i_bc, bc in enumerate(bc_lst):
# count_dbc dirichlet BCs before inlet
if 'preset' in bc and bc['preset'] == 'inlet':
val = bc['value']
bid = bc['id']
break
elif 'method' in bc and bc['method'] == 'essential':
count_dbc += 1
else:
raise KeyError('Inlet BC not found in input file.')
assert bc_lst[i_bc]['id'] == bid
assert bc_lst[i_bc]['preset'] == 'inlet'
val.U = x[0]
if param['use'] == 2:
# use_slip does not matter. dR will always be in second position in
# parameters vector. Maybe necessary TODO this in the future.
warnings.warn('Take care. inlet>value>R needs to be INNER radius.')
R_inlet = self.options['boundary_conditions'][i_bc]['value']['R']
val.R = x[1] + R_inlet
V = self.pb_est.W.sub(0)
if self.pb_est.is_enriched(V):
val = project(val, V.collapse(), solver_type='lu')
self.pb_est.bcs[count_dbc].set_value(val)
return self
def _update_parameters_navierslip(self, x):
''' Update coefficients of Navier-Slip BCs.
Modifies Expressions stored in of NSProblem.bcs_navierslip. Selects
the gamma prefactor or dR according to the 'parameters' setting in the
options file.
Args:
x parameter vector
'''
param = self.options['estimation']['parameters']['navierslip']
assert param['use']
if 'boundary_id' in self.options['estimation']:
boundary_selection = self.options['estimation']['boundary_id']
if type(boundary_selection) is int:
boundary_selection = [boundary_selection]
else:
boundary_selection = [0]
if boundary_selection[0] or len(self.pb_est.bcs_navierslip) > 1:
raise NotImplementedError('Only one parameter per boundary '
'supported currently.')
if not self.pb_est.bcs_navierslip:
raise Exception('No Navier-slip boundary found.')
# find position of Navier-slip coefficient in parameter vector
index = self.xlegend.index('navierslip')
val = x[index]
# for val, bc in zip(, self.pb_est.bcs_navierslip):
# if bc[0] in boundary_selection or boundary_selection == [0]:
for bc in self.pb_est.bcs_navierslip:
if param['use'] == 1:
bc[1].a = val
elif param['use'] == 2:
bc[1].dR = val
return self
def _update_parameters_nitsche(self, beta):
''' Update coefficient of the Nitsche boundary conditions.
NOTE: Careful, this updates the betas of ALL Nitsche BCs!!!
Args:
beta new value for beta
'''
self.pb_est.options['nitsche']['beta1'] = beta
raise Exception('Nitsche Optimization not supported in the current '
'version')
return self
def _update_parameters_transpiration(self, x):
''' Update coefficients of transpiration BC.
Modifies Expression stored in NSProblem.bcs_transpiration.
Args:
x parameters vector
'''
param = self.options['estimation']['parameters']['transpiration']
assert param['use']
if 'boundary_id' in self.options['estimation']:
boundary_selection = self.options['estimation']['boundary_id']
if type(boundary_selection) is int:
boundary_selection = [boundary_selection]
else:
boundary_selection = [0]
if boundary_selection[0] or len(self.pb_est.bcs_transpiration) > 1:
raise NotImplementedError('Only one parameter per boundary '
'supported currently.')
if not self.pb_est.bcs_transpiration:
raise Exception('No transpiration boundary found.')
# find correct index in parameters array
index = self.xlegend.index('transpiration')
val = x[index]
for bc in self.pb_est.bcs_transpiration:
assert self.pb_est.is_Constant(bc[1])
bc[1].assign(val)
return self
def _update_parameters(self, x):
''' Update coefficients of BCs. Depending on the problem:
no-slip: bctype == 0. -> inflow (U)
navier-slip: bctype == 1. -> any combination of inflow(U, dR),
dR or gamma, beta(Nitsche/Transpiration)
Args:
x parameter
'''
param = self.options['estimation']['parameters']
if param['inflow']['use']:
self._update_parameters_inflow(x)
if param['navierslip']['use']:
# navier-slip
self._update_parameters_navierslip(x)
if param['transpiration']['use']:
# transpiration
self._update_parameters_transpiration(x)
self.pb_est.variational_form()
return self
def _apply_xfun(self, x):
''' Apply 'xfun' to parameters x.
xfun == 0: linear, y = x
xfun == 1: exponential, y = 2**x
xfun == 2: tanh, y = a + b*0.5*(np.tanh(x) + 1)
Args:
x (ndarray) parameter
Returns:
y (ndarray) result of xfun(x)
'''
# check for bruteforce 1 parameter corner case
if type(x) in (np.ndarray, np.float64) and not x.shape:
x = np.array([x])
yi = []
for i, (xi, fi, bi) in enumerate(zip(x, self._xfun, self._bounds)):
if fi == 0:
# linear
yi.append(xi)
if fi == 1:
# exponential
yi.append(2**xi)
if fi == 2:
# tanh
yi.append(self.tanh_xfun(xi, bi))
return np.array(yi)
def _apply_inv_xfun(self, x):
''' Apply inverse xfun to initial parameters.
See _apply_xfun().
Args:
x parameters
Returns
y inv_xfun(x)
'''
yi = []
for i, (xi, fi, bi) in enumerate(zip(x, self._xfun, self._bounds)):
if fi == 0:
# linear
yi.append(xi)
if fi == 1:
# exponential
yi.append(np.log2(xi))
if fi == 2:
# tanh
yi.append(self.inv_tanh_xfun(xi, bi))
return np.array(yi)
def _tikhonov_regularization(self, val):
''' Tikhonov regularization.
Returns:
val contribution to fval
'''
raise NotImplementedError()
# if opt_est['xfun'] == 1:
# val0 = 2**self.x0
# elif opt_est['xfun'] == 2:
# val0 = abs(self.x0)
# elif opt_est['xfun'] == 3:
# val0 = self.x0**2
# else:
# val0 = self.x0
# tikh = opt_est['tikhonov']*np.linalg.norm(val0 - val)**2
# if opt_est['error'] == 'rel':
# tikh /= np.linalg.norm(val0)**2
return val
def _compute_error(self):
''' Compute the L2 error of the calculated velocity field w.r.t to the
measurement.
Returns:
fval error
'''
u, _ = self.pb_est.w.split(deepcopy=True)
self._interp_measurement(u) # -> stored to self.u_meas
fval = norm(self.u_meas.vector() - self.uref_meas.vector(), 'l2')
if self.options['estimation']['error'] == 'rel':
fval /= norm(self.uref_meas.vector(), 'l2')
return fval
def _solve(self, x):
''' Solve function called by optimization method.
# TODO: clean up the mess (Tikhonov??)
Args:
x estimation parameter
Returns:
fval value to be minimized: error wrt measurement
'''
opt_est = self.options['estimation']
val = self._apply_xfun(x)
self._update_parameters(val)
solver = NSSolver(self.pb_est)
solver.solve()
assert id(self.pb_est.w) == id(solver.w) and self.pb_est.w == solver.w
fval = self._compute_error()
if opt_est['tikhonov']:
fval += self._tikhonov_regularization(val)
if not self._end:
print('Parameters:\t {0}'.format(str(val)))
print('Fval:\t\t {0}'.format(fval))
self.fval.append(fval)
self.x.append(x)
return fval
def _setup_bruteforce(self):
''' Setup bruteforce arguments. If as_slices is set, make slices
according to Npts (int: uniform, or list) and bounds. Otherwise assure
that Npts is an integer.
Returns:
Npts (int) Number of points (no slices)
bounds tuple of slices or list of limits for each
parameter
'''
opt_est = self.options['estimation']
bounds = self._bounds
Npts = opt_est['bruteforce']['numpts']
if opt_est['bruteforce']['as_slice']:
if not type(Npts) is list:
Npts = [Npts]*len(bounds)
slices = []
for (n, bnd) in zip(Npts, bounds):
step = (bnd[1] - bnd[0])/(n - 1)
slices.append(slice(bnd[0], bnd[1] + 1.e-10, step))
bounds = tuple(slices)
else:
assert type(Npts) is int, (
'If [a, b] ranges are given, numpts must be int. Use '
'slices for nonuniform grids.')
return Npts, bounds
def _parse_parameters(self):
''' Cast parameters into the required form.
Process inflow (U, (dR)), navierslip (gamma or dR), transpiration coef.
The inflow dR can be taken from the navierslip estimate, if dR is
chosen to be optimized in the navierslip section, via the use_slip
switch.
For all optimization parameters, the initial value x0, the parameter
function xfun, and the limits (if any), are added to the instance
variables:
self._x0, self._xfun, self._bounds
The initial values x0 are expected to be the 'true' physical values,
BEFORE re-parametrization. The inverse of 'xfun'
(self._apply_inv_xfun) is applied on x0 in the end in order to get the
correct values.
The order is:
[u_inflow, dR_inflow, navierslip, transpiration]
'''
param = self.options['estimation']['parameters']
# create start vector, x0
self.xlegend = []
self._x0 = []
self._xfun = []
self._bounds = []
if param['inflow']['use']:
self._xfun.append(param['inflow']['velocity']['xfun'])
self._x0.append(param['inflow']['velocity']['x0'])
self._bounds.append(param['inflow']['velocity']['bounds'])
self.xlegend.append('Uin')
if (param['inflow']['use'] == 2 and
param['inflow']['dR']['use_slip'] == 0):
self._x0.append(param['inflow']['dR']['x0'])
self._xfun.append(param['inflow']['dR']['xfun'])
self._bounds.append(param['inflow']['dR']['bounds'])
self.xlegend.append('dR_in')
elif (param['inflow']['dR']['use_slip'] == 1 and not
param['navierslip']['use'] == 2):
raise Exception('Inflow dR to be taken from Navier-Slip dR'
' but dR estimation via Navier-Slip set!')
estim_boundaries = ['navierslip', 'transpiration']
for bnd in estim_boundaries:
if param[bnd]['use']:
self._x0.append(param[bnd]['x0'])
self._xfun.append(param[bnd]['xfun'])
self._bounds.append(param[bnd]['bounds'])
self.xlegend.append(bnd)
self._x0 = self._apply_inv_xfun(self._x0)
return self
def gpyopt_optimization(self):
''' OPtimization using GPyOpt.
Returns
results x_opt, f_opt dict
'''
import GPyOpt
bounds = self.options['estimation']['gpyopt']['bounds']
if not bounds:
gpbounds = None
else:
gpbounds = [
{'name': 'x{0}'.format(i), 'type': 'continuous', 'domain':
tuple(gpbnd)} for (i, gpbnd) in enumerate(bounds)]
if ('x0' in self.options['estimation']['gpyopt'] and
type(self.options['estimation']['gpyopt']['x0']) is list):
Xinit = np.array(self.options['estimation']['gpyopt']['x0'])
else:
Xinit = None
acq_type = self.options['estimation']['gpyopt']['acq_type']
model_type = self.options['estimation']['gpyopt']['model_type']
myBO = GPyOpt.methods.BayesianOptimization(
f=self._solve,
domain=gpbounds,
acquisition_type=acq_type,
model_type=model_type,
X=Xinit
)
max_iter = self.options['estimation']['gpyopt']['max_iter']
max_time = self.options['estimation']['gpyopt']['max_time']
eps = 1e-6
myBO.run_optimization(max_iter, max_time, eps)
plt.ion()
myBO.plot_acquisition()
self.BO = myBO
result = {'x': myBO.x_opt, 'f': myBO.fx_opt}
self.x = np.array(self.x).squeeze()
return result
def measurement(self):
''' Makes measurement: first, compute reference solution, then
interpolate to measurement mesh and add noise. '''
self.reference_solution()
self._init_measurement()
self._interp_measurement(self.uref, ref=True)
noise_intensity = self.options['estimation']['noise']
if noise_intensity:
self.add_gaussian_noise(self.uref_meas, noise_intensity)
return self
def reference_solution(self):
''' Compute reference solution and produce measurement (u_meas). '''
self.pb_ref.init()
sol = NSSolver(self.pb_ref)
sol.solve()
self.uref, self.pref = sol.w.split(deepcopy=True)
return self
def estimate(self):
'''Estimate parameters of Navier-Slip BC.
Setup problem from yaml file and call optimization method with set of
initial values.
Note: NSSolver initialization takes 1.4us, so no reason to setup
beforehand.
TODO NOTE: included now beta optimization via switch in yaml file.
Args:
x0 (optional) initial values
'''
opt_est = self.options['estimation']
self._parse_parameters()
self.measurement()
self.pb_est.init()
method = opt_est['method']
if method == 'Powell':
result = minimize(self._solve, self._x0, method='Powell')
self.x_opt = result['x']
self.f_opt = result['fun']
elif method == 'Nelder-Mead':
result = minimize(self._solve, self._x0, method='Nelder-Mead')
# options={'disp': True,
# 'xtol': 1e-2, 'ftol': 1e-2})
self.x_opt = result['x']
self.f_opt = result['fun']
elif method == 'BFGS':
result = minimize(self._solve, self._x0, method='BFGS',
tol=opt_est['bfgs']['tol'])
# options={
# 'disp': True, 'gtol': 1e-5, 'eps': 1e-3
# })
self.x_opt = result['x']
self.f_opt = result['fun']
elif method == 'bruteforce':
Npts, bfbounds = self._setup_bruteforce()
result = brute(self._solve, bfbounds, Ns=Npts, disp=True,
finish=None, full_output=True)
# finish (default) = scipy.optimize.fmin to polish result
self.x_opt = result[0]
self.f_opt = result[1]
elif method == 'gpyopt':
raise Exception('GPyOpt dropped. Adapt...')
result = self.gpyopt_optimization()
self.x_opt = result['x']
self.f_opt = result['f']
# optimization done.
self._end = True
self.fval = np.array(self.fval)
self.x = np.array(self.x)
print(result)
self.result = result
return self
def solve_opt(self, x=None, init=False):
''' Solve with the optimal parameters.
Args:
x (optional, numpy.ndarray) parameters; if not given, use x_opt
init (optional, bool) reinitialize solution w
'''
if x is None:
x = self.x_opt
# else:
# x = self._apply_inv_xfun(x)
if init:
self.pb_est.w.vector().zero()
print('zeroed')
self._solve(x)
self.u_opt, self.p_opt = self.pb_est.w.split(deepcopy=True)
self.u_meas_opt = self.u_meas
return self
def get_radius_at_vert_boundary(self, bnds, bid):
''' Get the radius of a vertical boundary patch.
Args:
bnds boundary domain object
bid boundary id
Returns:
radius
'''
It_facet = SubsetIterator(bnds, bid)
ycoord = []
for c in It_facet:
for v in vertices(c):
ycoord.append(v.point().y())
ycoord = np.array(ycoord)
if np.allclose(ycoord.min(), 0) or np.allclose(ycoord.max(),
-ycoord.min()):
# symmetric or full (-R, R)
radius = ycoord.max()
else:
warnings.warn('Pressure_drop: careful, geometry not symmetric! '
'ymin = {0}, ymax = {1}'.format(ycoord.min(),
ycoord.max()))
radius = 0.5*(ycoord1.max() - ycoord1.min())
return radius
def pressure_drop(self, p, sin=1, sout=2):
''' Calculate pressure drop for optimized NSE solution or reference
pressure on the respective meshes, between two boundaries sin, sout.
The pressure is integrated over the boundaries and devided by the
respective measure (integral mean), then substracted.
The function detects automatically if the given pressure field p is
defined on a) the reference mesh, b) the estimation mesh, c) the
measurement mesh, and the boundary FacetFunction is chosen
appriopriately.
Args:
p pressure field
sin index of inlet boundary
sout index of outlet boundary
'''
# detect reference or estimator problem
mesh = p.function_space().mesh()
if mesh.id() == self.pref.function_space().mesh().id():
# reference case
bnds = self.pb_ref.bnds
elif (mesh.id() ==
self.pb_est.w.split(deepcopy=True)[1].function_space().
mesh().id()):
bnds = self.pb_est.bnds
elif mesh.id() == self.pref_meas.function_space().mesh().id():
bnds = self.bnds_meas
else:
raise Exception('p not identified.')
ds = Measure('ds', domain=mesh, subdomain_data=bnds)
measure_sin = Constant(self.get_radius_at_vert_boundary(bnds, sin))
measure_sout = Constant(self.get_radius_at_vert_boundary(bnds, sout))
# print('measure_sin: {0}'.format(measure_sin.values()[0]))
# print('measure_sout: {0}'.format(measure_sin.values()[0]))
dP = assemble(p/measure_sin*ds(sin) - p/measure_sout*ds(sout))
return dP
def direct_estimator(self, method='STEint', return_pressure=False):
''' Compute "standalone" pressure estimate; caller function to
encompass Navier-slip optimization via self.estimation().
Args:
method pressure estimation method: STE, STEint, PPE
Returns:
dP estimated pressure drop
'''
if not self.uref_meas:
self.measurement()
if not self.pb_est.w:
self.pb_est.init()
fun = getattr(self, method)
dP, p_est = fun()
if return_pressure:
ret = (dP, p_est)
else:
ret = dP
return ret
def PPE(self, sin=1, sout=2):
''' Compute PPE pressure approximation and pressure drop.
Args (optional):
sin inlet boundary id
sout outlet boundary id
Returns:
dP
'''
assert self.uref_meas, 'Reference measurement does not exist.'
rho = self.pb_ref.options['rho']
mesh = self.uref_meas.function_space().mesh()
E1 = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
P1 = FunctionSpace(mesh, E1)
p = TrialFunction(P1)
q = TestFunction(P1)
bc = DirichletBC(P1, Constant(0.), self.bnds_meas, sout)
u0 = self.uref_meas
a = inner(grad(p), grad(q))*dx
L = - rho*inner(grad(u0)*u0, grad(q))*dx
A, b = assemble_system(a, L, bc)
p_est = Function(P1)
solve(A, p_est.vector(), b, 'mumps')
self.p_est = p_est
dP = self.pressure_drop(p_est)
return dP, p_est
def STE(self, sin=1, sout=2):
''' Compute STE pressure approximation and pressure drop.
Args (optional):
sin inlet boundary id
sout outlet boundary id
Returns:
dP
'''
assert self.uref_meas, 'Reference measurement does not exist.'
rho = self.pb_ref.options['rho']
ndim = self.pb_ref.ndim
mesh = self.uref_meas.function_space().mesh()
P1 = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
B = FiniteElement('Bubble', mesh.ufl_cell(), 1 + ndim)
W = FunctionSpace(mesh, MixedElement(ndim*[P1 + B])*P1)
(w, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
zero = Constant((0,)*ndim)
noslip = project(zero, W.sub(0).collapse())
bc = DirichletBC(W.sub(0), noslip, 'on_boundary')
u0 = self.uref_meas
a = inner(grad(w), grad(v))*dx - p*div(v)*dx + div(w)*q*dx
L = - rho*inner(grad(u0)*u0, v)*dx
# A = assemble(a)
# b = assemble(L)
# bc.apply(A, b)
A, b = assemble_system(a, L, bc)
w1 = Function(W)
solve(A, w1.vector(), b, 'mumps')
_, p_est = w1.split(deepcopy=True)
self.p_est = p_est
dP = self.pressure_drop(p_est)
return dP, p_est
def STEint(self, sin=1, sout=2):
''' Compute STEint pressure approximation and pressure drop.
Args (optional):
sin inlet boundary id
sout outlet boundary id
Returns:
dP
'''
assert self.uref_meas, 'Reference measurement does not exist.'
mu = self.pb_ref.options['mu']
rho = self.pb_ref.options['rho']
ndim = self.pb_ref.ndim
mesh = self.uref_meas.function_space().mesh()
P1 = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
B = FiniteElement('Bubble', mesh.ufl_cell(), 1 + ndim)
W = FunctionSpace(mesh, MixedElement(ndim*[P1 + B])*P1)
(w, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
zero = Constant((0,)*ndim)
noslip = project(zero, W.sub(0).collapse())
bc = DirichletBC(W.sub(0), noslip, 'on_boundary')
u0 = self.uref_meas
a = inner(grad(w), grad(v))*dx - p*div(v)*dx + div(w)*q*dx
L = - mu*inner(grad(u0), grad(v))*dx + rho*inner(grad(v)*u0, u0)*dx
# A = assemble(a)
# b = assemble(L)
# bc.apply(A, b)
A, b = assemble_system(a, L, bc)
w1 = Function(W)
solve(A, w1.vector(), b, 'mumps')
_, p_est = w1.split(deepcopy=True)
self.p_est = p_est
dP = self.pressure_drop(p_est)
return dP, p_est
def gamma(self, x, R_i=0.95, R_o=1.0):
''' Utility function for calculating Navier-slip Gamma from the
optimization parameters. First the poiseuille base gamma is computed,
then the gamma based on the estimation parameters.
Since the Navier-slip BC is defined via x*gamma_pois, where gamma_pois
is the Poiseuille gamma obtained from the physical parameters and only
the proportionality factor x is set, both gammas are returned for
comparability.
Args:
xi parameter(s)
R_i Poiseuille gamma inner radius
R_o Poiseuille gamma outer radius
Returns:
gamma_pois Poiseuille gamma
gamma_opt optimized gamma
'''
use = self.options['estimation']['parameters']['navierslip']['use']
mu = self.options['mu']
gamma_pois = 2.*mu*R_i/(R_i**2 - R_o**2)
if use == 1:
# xi*gamma optimization
gamma_opt = x*2.*mu*R_i/(R_i**2 - R_o**2)
elif use == 2:
# dR optimization
R_o = R_i + x
gamma_opt = 2.*mu*R_i/(R_i**2 - R_o**2)
return gamma_pois, gamma_opt
def tanh_xfun(self, x, bounds):
''' Compute tanh(x) function.
Args:
x evaluation location
bounds tuple with (lower, upper) limits
Return:
beta
'''
return bounds[0] + bounds[1]*0.5*(np.tanh(x) + 1)
def inv_tanh_xfun(self, x, bounds):
''' Compute inverse of tanh(x) function.
Args:
x evaluation location
bounds tuple with (lower, upper) limits
Return:
beta
'''
return np.arctanh(2./bounds[1]*(x - bounds[0]) - 1.)
| UTF-8 | Python | false | false | 31,764 | py | 170 | nsestimator.py | 81 | 0.534851 | 0.527925 | 0 | 971 | 31.712667 | 79 |
hack4impact/vision-zero-philly | 12,043,088,313,703 | 4fdb163f12ea089f35be7650100da428119dabad | 6485b0cc51a8b449cd12ffb042968e7456c98974 | /app/reports/forms.py | d06bc0d90e4ae79af6f46ae5ea54defd9acbf64c | [
"MIT"
] | permissive | https://github.com/hack4impact/vision-zero-philly | 0a82dc63b2e1953be8324dff12c23891ace5b7a4 | c81c8cc8fa8168dcc77fe1e132c1b4649a936d14 | refs/heads/master | 2020-05-26T12:25:58.159744 | 2017-08-23T01:53:48 | 2017-08-23T01:53:48 | 82,479,005 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime as datetime
from flask_wtf import Form
from flask_wtf.file import FileField, FileAllowed
from wtforms.fields import (
SelectField,
StringField,
SubmitField,
IntegerField,
TextAreaField,
HiddenField,
DateField,
RadioField,
FieldList,
BooleanField
)
from wtforms_components import TimeField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import (
InputRequired,
Length,
Optional,
NumberRange,
URL,
Regexp
)
from app.custom_validators import StrippedLength, ValidLocation, RequiredIf, RequireDescription
from .. import db
class IncidentReportForm(Form):
address = StringField('Address', validators=[
InputRequired('Address is required.'),
ValidLocation()
])
latitude = HiddenField('Latitude')
longitude = HiddenField('Longitude')
car = BooleanField('Car', validators=[
Optional()
])
bus = BooleanField('Bus', validators=[
Optional()
])
truck = BooleanField('Truck', validators=[
Optional()
])
bicycle = BooleanField('Bicycle', validators=[
Optional()
])
pedestrian = BooleanField('Pedestrian', validators=[
Optional()
])
injuries = RadioField('Did an injury occur?', choices=[
('Yes', 'Yes'),
('No', 'No')
], validators=[InputRequired()])
injuries_description = TextAreaField('Injuries Description', validators=[
RequireDescription('injuries'),
Length(max=5000)
])
witness = RadioField('Did you observe or experience the incident?', choices=[
('Observed', 'Observed'),
('Experienced', 'Experienced')
], validators=[InputRequired()])
category = SelectField('Category',
choices=[("Failure to stop", "Failure to stop"),
("Running a red light", "Running a red light"),
("Swerving vehicle", "Swerving vehicle"),
("Tailgating", "Tailgating"),
("Cycling on sidewalk", "Cycling on sidewalk"),
("Car door", "Car door"),
("Crossing against signal", "Crossing against signal"),
("Other", "Other")],
validators=[
InputRequired()
])
description = TextAreaField('Description', validators=[
Optional(),
Length(max=5000)
])
road_conditions = TextAreaField('Weather/Road Conditions', validators=[
Optional(),
Length(max=5000)
])
today = datetime.datetime.today()
date = DateField('Date of Event (year-month-day)',
default=today.strftime('%m-%d-%Y'),
validators=[InputRequired()])
time = TimeField('Time of Event (hours:minutes am/pm)',
default=today.strftime('%I:%M %p'),
validators=[InputRequired()])
picture_file = FileField(
'Upload a Photo',
validators=[
Optional(),
FileAllowed(['jpg', 'jpe', 'jpeg', 'png', 'gif', 'svg', 'bmp'],
'Only images are allowed.')
]
)
picture_url = StringField('Picture URL', validators=[
Optional(),
URL(message='Picture URL must be a valid URL. '
'Please upload the image to an image hosting website '
'and paste the link here.')
])
deaths = IntegerField('Number of Deaths', validators=[Optional()])
contact_name = StringField('Contact Name', validators=[
Optional(),
Length(max=1000)
])
contact_phone = StringField('Contact Phone', validators=[
Optional(),
Length(max=1000)
])
contact_email = StringField('Contact E-mail', validators=[
Optional(),
Length(max=100)
])
submit = SubmitField('Create Report')
class EditIncidentReportForm(IncidentReportForm):
submit = SubmitField('Update Report')
| UTF-8 | Python | false | false | 4,075 | py | 40 | forms.py | 21 | 0.574479 | 0.568834 | 0 | 149 | 26.348993 | 95 |
TushaarGVS/SC_Lab | 4,355,096,843,014 | e217e6d86eb38ab5524a3ff1c4d258f97ab8e01e | ee03fb09a9855ab32348c1a4a85769badebca541 | /Lab3/K_Means_Clustering.py | 1ec22e88ce73f8737bf0d12c3525b59c66233b18 | [] | no_license | https://github.com/TushaarGVS/SC_Lab | 4990867249623b51b729719100cf8578d9840335 | 07536d6eb04e6f724a1709e8c6ed4c318eee9e58 | refs/heads/master | 2021-07-24T05:40:05.060513 | 2017-11-06T04:56:56 | 2017-11-06T04:56:56 | 109,649,530 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from csv import reader
from math import sqrt
from random import randrange
def load_csv(filename):
dataset = list()
check = 0
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
if check != 0:
dataset.append(row)
check += 1
change_dataset(dataset)
return dataset
def change_dataset(dataset):
for element in dataset:
for i in range(len(element) - 1):
element[i] = float(element[i].strip())
element[-1] = 1.0 if element[-1] == 'Yes' else 0.0
def pick_centers(dataset, k):
dataset_copy = list(dataset)
clusters = []
for i in range(k):
index = randrange(len(dataset_copy))
clusters.append(dataset_copy.pop(index))
return clusters
def euclidian_distance(datapoint_1, datapoint_2):
value = 0
for i in range(len(datapoint_1) - 1):
value += (datapoint_1[i] - datapoint_2[i])**2
return sqrt(value)
def dataset_segregation(dataset, assigned_clusters):
segregated_dataset = {}
for i in range(len(dataset)):
if (assigned_clusters[i] not in segregated_dataset):
segregated_dataset[assigned_clusters[i]] = []
segregated_dataset[assigned_clusters[i]].append(dataset[i])
return segregated_dataset
def compute_new_clusters(dataset, assigned_clusters):
clusters = []
segregated_dataset = dataset_segregation(dataset, assigned_clusters)
for cluster_num, data_points in segregated_dataset.iteritems():
clusters.append([float(sum(i)) / len(i) for i in zip(*data_points)])
return clusters
def assign_clusters(clusters, dataset, k):
assigned_clusters = []
for data_point in dataset:
distances = []
for i in range(k):
distances.append(euclidian_distance(data_point, clusters[i]))
cluster = distances.index(min(distances))
assigned_clusters.append(cluster)
return assigned_clusters
def k_means_clustering(dataset, k, num_iterations):
clusters = pick_centers(dataset, k)
prev_clusters = []
for i in range(num_iterations):
if (prev_clusters == clusters):
break
assigned_clusters = assign_clusters(clusters, dataset, k)
new_clusters = compute_new_clusters(dataset, assigned_clusters)
prev_clusters = clusters
clusters = new_clusters
final_segregation = dataset_segregation(dataset, assigned_clusters)
print("Total iterations used: %s" %(i + 1))
return final_segregation
def accuracy(final_segregation):
correct = 0
for cluster_num, data_points in final_segregation.iteritems():
count_0 = count_1 = 0
for data_point in data_points:
if(data_point[-1] == 0):
count_0 = count_0 + 1
else:
count_1 = count_1 + 1
if(count_0 > count_1):
class_assigned = 0
else:
class_assigned = 1
print("Cluster: %s; Class Assigned: %s; Number of elements: %s" %(cluster_num, class_assigned, len(data_points)))
for data_point in data_points:
if(data_point[-1] == class_assigned):
correct = correct + 1
return correct
filename = raw_input("Enter file name: ")
k = 2
num_iterations = int(raw_input("Enter the maximum number of iterations: "))
dataset = load_csv(filename)
final_segregation = k_means_clustering(dataset, k, num_iterations)
print("Clusters: %s" % final_segregation)
correct = accuracy(final_segregation)
print("Accuracy: %s" %(correct/float(len(dataset)) * 100)) | UTF-8 | Python | false | false | 3,639 | py | 3 | K_Means_Clustering.py | 3 | 0.62792 | 0.616653 | 0 | 107 | 33.018692 | 121 |
Xav83/Python | 11,596,411,746,174 | ed227feb6f84c8d2b623e23ea440be9542d06e8f | 1db70b1fa7fc99a4c824998036cc6abe2016412a | /roboc/tst/test_Robot.py | 66344a9dcdd11ca7242c6f04f047cb48914971ff | [] | no_license | https://github.com/Xav83/Python | 9e7672261b20b3bedfd8cc80515fcf36e228e887 | 397cb9f6bb1c3a9aeda038e515364130db228f08 | HEAD | 2016-08-12T09:39:01.394235 | 2016-03-14T23:12:40 | 2016-04-03T17:24:11 | 53,791,092 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from src import robot
class TestRobot(unittest.TestCase):
pass
| UTF-8 | Python | false | false | 84 | py | 15 | test_Robot.py | 14 | 0.785714 | 0.785714 | 0 | 5 | 15.8 | 35 |
cuplv/verivita | 10,273,561,812,047 | aaeff97796f9d7780e0b7136de45d7ac93d399a2 | 4d7a7698f74cfa730598b24524d7d17d135daf5b | /cbverifier/traces/__init__.py | a5a6f72760ab6603e3da69d3f48f8b37776b18ab | [] | no_license | https://github.com/cuplv/verivita | 010b5d913fa25600d72980b3f9e4b45992b248b0 | 04e97a8bf582db10186b73a476fbecff1861e526 | refs/heads/master | 2021-03-27T14:19:54.315993 | 2018-09-10T16:10:20 | 2018-09-10T16:10:20 | 61,602,554 | 1 | 1 | null | false | 2018-11-11T19:53:29 | 2016-06-21T04:36:40 | 2018-09-10T16:10:26 | 2018-11-11T19:53:28 | 576,078 | 1 | 0 | 42 | Python | false | null | # package cbverifier.traces
| UTF-8 | Python | false | false | 28 | py | 659 | __init__.py | 76 | 0.821429 | 0.821429 | 0 | 1 | 27 | 27 |
romin991/rocketSales | 4,861,903,010,596 | d6f8d1bc316935cae91137fc8c9f6d394fb4ddc2 | 3c3dc45d0fec06f7e4bc4c3477f3eaa66ad68bb7 | /devices/serializers.py | fe5c0cf34b4b748c22ffcd566159d5d28ad29144 | [] | no_license | https://github.com/romin991/rocketSales | eb23c5a7bee0b7f07fe86d7fb0892f3a4d455030 | f3e7407a8dfbfbfeb6b01a6b13e09652e66e1532 | refs/heads/master | 2021-01-22T22:03:34.967308 | 2017-05-29T15:52:31 | 2017-05-29T15:52:31 | 92,754,001 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from devices.models import *
class DeviceSerializer(serializers.ModelSerializer):
class Meta:
model = Device | UTF-8 | Python | false | false | 161 | py | 120 | serializers.py | 115 | 0.770186 | 0.770186 | 0 | 7 | 22.142857 | 52 |
PrincessGods/deco3801 | 9,509,057,605,346 | 2b622b0379b0bfa2655841a029121a2ec8a4691c | 920b17bf3600daa3d6a8cb6a4005bad7c006f20b | /application/main/routes.py | e3848f3fedff034bca5e44c4bed4b28f5cff4665 | [] | no_license | https://github.com/PrincessGods/deco3801 | b076fc8f1ead67654e6d4aa0d172fe5bbe6a1973 | 160435c80d61b0642098daecda65183d74304c18 | refs/heads/master | 2020-03-27T08:26:25.074661 | 2018-10-26T01:09:52 | 2018-10-26T01:09:52 | 146,254,345 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import render_template, request, Blueprint, flash, redirect, url_for
from application.models import User, Sample_Information, Sample_Location, Search_Results
from application import db, bcrypt
from application.main.forms import HomeSearchForm
from flask_login import current_user
main = Blueprint('main', __name__)
@main.route("/")
@main.route("/home", methods=['GET', 'POST'])
def home():
form = HomeSearchForm()
user_icon = getUserIcon()
if form.validate_on_submit():
c_name = form.search.data
return redirect(url_for('main.search', name=c_name))
return render_template('index.html', title = "QAEHS", form = form, icon = user_icon)
@main.route("/help")
def help():
user_icon = getUserIcon()
return render_template('tutorial.html', title = "Help", icon = user_icon)
@main.route("/search/<name>", methods=['GET', 'POST'])
def search(name):
form = HomeSearchForm()
user_icon = getUserIcon()
samples = Sample_Information.query.filter_by(sample_type = name).all()
return render_template('chemical_search.html', title = "Search Result",
form = form, icon = user_icon, samples = samples)
@main.route("/searchDetails/<id>", methods=['GET', 'POST'])
def searchDetails(id):
form = HomeSearchForm()
user_icon = getUserIcon()
sample = Sample_Information.query.filter_by(id = id).first()
location = Sample_Location.query.filter_by(sample_id = id).first()
return render_template('search_result_details.html', title = "Search Result Details",
form = form, icon = user_icon, sample = sample,
location = location)
def getUserIcon():
if current_user.is_authenticated:
user_icon = url_for('static', filename='imgs/' + current_user.user_icon)
return user_icon
| UTF-8 | Python | false | false | 1,848 | py | 32 | routes.py | 17 | 0.655303 | 0.655303 | 0 | 46 | 39.043478 | 90 |
nwrocketman64/sales-tax-calculator | 16,776,142,302,279 | d8a0a1f9f09d44b4c39787d3f2986de5be54b248 | 66ff40f03f3f4a8f1291ac0f683a514e1b272e71 | /app.py | 0d8ef92e9ba060ebab7dcd4cb3c3fb25b68dcb14 | [
"MIT"
] | permissive | https://github.com/nwrocketman64/sales-tax-calculator | 7742c4c2ac192bc156e3a2d5f341131ac3dc570f | c188c64f744830b204fa6ca204b261ec7279eccc | refs/heads/main | 2023-05-04T18:26:08.596351 | 2021-05-20T19:57:14 | 2021-05-20T19:57:14 | 369,110,545 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Import the needed librarys from flask and sessions.
from flask import Flask, render_template, request, session, redirect, url_for
from flask_session import Session
# Create the web app in flask.
app = Flask(__name__)
# Configure the sessions in the web app.
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
app.secret_key = "\xd5$\xa2\xd5\xd8\x06\xab\xa4\xb5\x86\xec\xf1Tn[s"
Session(app)
# Define the main route as both a POST and GET route.
@app.route('/', methods = ['POST', 'GET'])
def index():
# If the method is POST, the user will be entering the calculations.
if request.method == 'POST':
# Clear all the values from the session if there are any.
session.pop('message', None)
session.pop('total', None)
session.pop('subtotal', None)
session.pop('taxAmount', None)
# Try to receive the input from the user and validate the input.
try:
price = float(request.form['price'])
amount = float(request.form['amount'])
tax = float(request.form['tax'])
# If it fails to validate, add the error message to the session and redirect back to the GET page.
except:
session['message'] = 'You must fill out all input fields.'
return redirect(url_for('index'))
# If everything worked so far, calculate the subtotal, tax, and total.
subtotal = (price * amount)
tax_amount = (price * amount) * (tax / 100.0)
total = (price * amount) * (1 + (tax / 100.0))
# Save all the results to the session.
session['subtotal'] = "${:,.2f}". format(subtotal)
session['taxAmount'] = "${:,.2f}". format(tax_amount)
session['total'] = "${:,.2f}". format(total)
# Then redirect the user back to the GET form of the page.
return redirect(url_for('index'))
else:
# Just render the page if it is a GET request.
return render_template('index.html')
# The 404 handler.
@app.errorhandler(404)
def not_found(e):
return render_template('404.html'), 404
# Start the web application.
if __name__ == '__main__':
app.run() | UTF-8 | Python | false | false | 2,173 | py | 3 | app.py | 1 | 0.623562 | 0.607455 | 0.005062 | 59 | 35.847458 | 106 |
akangupt/a-fiery-vengeance | 3,959,959,868,514 | a1e8135462cd20b641ebcf99b4d163a17103341b | c9eb1f47b4a3a696fcad44f8158752c42b5582d2 | /source code/hero_exit.py | 15ab52c127751f9e282efe025be196db8a891362 | [] | no_license | https://github.com/akangupt/a-fiery-vengeance | 318662ffc72b72ad73f2b37076de7029dea497b0 | 30d529c5dacdffbaf9c964d43ebcad28a41a0876 | refs/heads/master | 2021-01-10T14:23:02.893165 | 2015-10-30T12:58:09 | 2015-10-30T12:58:09 | 45,248,020 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import math
import cPickle as pk
import pygame
import random
try:
import _path
except:
pass
import tiledtmxloader
# checks if hero collides to a portal
# if it does then returns the index of the portal
# otherwise returns -1
def exit_map(hero,portal):
plen=len(portal)
i=0
for i in range(0,plen):
if pygame.Rect.colliderect(hero.rect, portal[i]):
return i
return -1
# if hero collides to portal then it returns the next map corresponding to that portal
def next_map(map_name,portal_num):
sv=pk.load(open("./save.p","rb"))
if map_name=='./maps/village1.tmx':
if portal_num==0:
return ['./maps/tunnel.tmx',0]
elif portal_num==1:
return ['./maps/tunnel2_4.tmx',0]
elif portal_num==2:
return ['./maps/tunnel3.tmx',0]
elif portal_num==3:
return ['./maps/tunnel2_4.tmx',1]
elif map_name=='./maps/tunnel2_4.tmx':
if (portal_num==0 or portal_num==1):
return ['./maps/village1.tmx',1]
elif map_name=='./maps/tunnel3.tmx':
if portal_num==0:
return ['./maps/ship.tmx',0]
elif map_name=='./maps/mountainclimbing.tmx':
if portal_num==0:
return['./maps/mountain_top.tmx',0]
elif map_name=='./maps/village2_out1.tmx':
if portal_num==0:
return ['./maps/village2_inside.tmx',0]
elif map_name=='./maps/village2_inside.tmx':
if portal_num==0:
return ['./maps/village2_out1.tmx',1]
elif (portal_num==1 and sv['spook']==1):
# sv['spook'] == 1 denotes that hero has talked to spooky guy
# 'village2_inside' contains two paths to go out of this map
# portal_num == 1 denotes the first path
return ['./maps/mountainclimbing.tmx',0]
elif (portal_num==2 and sv['spook']==1):
# portal_num == 1 denotes the first path
return ['./maps/mountainclimbing.tmx',0]
elif portal_num==3:
# portal_num == 3 denotes the hotel map
return ['./maps/hotel.tmx',0]
elif map_name=='./maps/tunnel.tmx':
if portal_num==0:
return ['./maps/village1.tmx',1]
elif portal_num==1:
return ['./maps/tunnel2.tmx',0]
elif map_name=='./maps/ship.tmx':
if (portal_num==0 and sv['pirate']==1):
# sv['pirate'] == 1 denotes that hero has talked to pirate guy
if(random.randint(1,2)==1):
return['./maps/maze.tmx',0]
else:
return['./maps/maze2.tmx',0]
elif map_name=='./maps/maze.tmx':
if portal_num==0:
return['./maps/safe1.tmx',0]
elif map_name=='./maps/maze2.tmx':
if portal_num==0:
return['./maps/safe1.tmx',0]
| UTF-8 | Python | false | false | 2,917 | py | 59 | hero_exit.py | 33 | 0.541995 | 0.516627 | 0 | 88 | 31.147727 | 86 |
feyfree/Summary | 4,990,752,043,863 | 6277df950f8afe26d414faf03cb786b3ac889649 | b36b7b71693113e248c8391a871cb10df9acb1a0 | /DataStructure/sort/select_sort.py | 45aeca2dae7c4439e861fde5889ff4e1c60dd44a | [] | no_license | https://github.com/feyfree/Summary | 81e24838de96f9e073d0a5d60ba93913dabeb05b | 67f72f882a6c00f472cb4f5219b87a2cf0243017 | refs/heads/master | 2020-04-07T11:46:41.478087 | 2019-02-14T06:49:14 | 2019-02-14T06:49:14 | 158,340,544 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def select_sort(nums):
n = len(nums)
for i in range(n):
k = i
for j in range(i, n):
if nums[j] < nums[k]:
k = j
if i != k:
nums[i], nums[k] = nums[k], nums[i]
return nums
nums = [1,3,6,7,2,4]
print(select_sort(nums)) | UTF-8 | Python | false | false | 237 | py | 96 | select_sort.py | 69 | 0.544304 | 0.518987 | 0 | 13 | 17.307692 | 38 |
QuailAutomation/yahoo_fantasy_bot | 9,053,791,090,267 | f61098052211ca8cc78a524b76d9c99abe610355 | 04796f68651ae33a498454437804ea94e0a3f8ba | /yahoo_fantasy_bot/nhl.py | 32a76db85b182382d5dd423b8d3e3a52d595b222 | [
"MIT"
] | permissive | https://github.com/QuailAutomation/yahoo_fantasy_bot | 56b4d18575efd0d800c89f39d95d158a3759a778 | 9d3b4205f971babe20faa242ab515b2818083fc2 | refs/heads/master | 2020-09-13T13:05:28.067751 | 2019-11-17T21:15:21 | 2019-11-17T21:15:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import pandas as pd
import numpy as np
from nhl_scraper import nhl
import logging
import datetime
logger = logging.getLogger()
class Builder:
"""Class that constructs prediction datasets for hockey players.
The datasets it generates are fully populated with projected stats taken
from csv files.
:param lg: Yahoo! league
:type lg: yahoo_fantasy_api.league.League
:param skaters_csv: csv file containing skater predictions
:type skaters_csv: str
:param goalies_csv: csv file containing goalie predictions
:type goalies_csv: str
"""
def __init__(self, lg, skaters_csv, goalies_csv):
skaters = pd.read_csv(skaters_csv, index_col='name')
goalies = pd.read_csv(goalies_csv, index_col='name')
self.ppool = pd.concat([skaters, goalies], sort=True)
self.nhl_scraper = nhl.Scraper()
wk_start_date = lg.edit_date()
assert(wk_start_date.weekday() == 0)
wk_end_date = wk_start_date + datetime.timedelta(days=6)
self.team_game_count = self.nhl_scraper.games_count(wk_start_date,
wk_end_date)
self.nhl_players = self.nhl_scraper.players()
def predict(self, roster_cont):
"""Build a dataset of hockey predictions for the week
The pool of players is passed into this function through roster_const.
It will generate a DataFrame for these players with their predictions.
The returning DataFrame has rows for each player, and columns for each
prediction stat.
:param roster_cont: Roster of players to generate predictions for
:type roster_cont: roster.Container object
:return: Dataset of predictions
:rtype: DataFrame
"""
# Produce a DataFrame using preds as the base. We'll filter out
# all of the players not in roster_cont by doing a join of the two
# data frames. This also has the affect of attaching eligible
# positions and Yahoo! player ID from the input player pool.
my_roster = pd.DataFrame(roster_cont.get_roster())
df = my_roster.join(self.ppool, on='name')
# Then we'll figure out the number of games each player is playing
# this week. To do this, we'll verify the team each player players
# for then using the game count added as a column.
team_ids = []
wk_g = []
for plyr_series in df.iterrows():
plyr = plyr_series[1]
(team_id, g) = self._find_players_schedule(plyr['name'])
team_ids.append(team_id)
wk_g.append(g)
df['team_id'] = team_ids
df['WK_G'] = wk_g
return df
def _find_players_schedule(self, plyr_name):
"""Find a players schedule for the upcoming week
:param plyr_name: Name of the player
:type plyr_name: str
:return: Pair of team_id (from NHL) and the number of games
:rtype: (int, int)
"""
df = self.nhl_players[self.nhl_players['name'] == plyr_name]
if len(df.index) == 1:
team_id = df['teamId'].iloc(0)[0]
return (team_id, self.team_game_count[team_id])
else:
return(np.nan, 0)
def init_prediction_builder(lg, cfg):
return Builder(lg, "espn.skaters.proj.csv", "espn.goalies.proj.csv")
class PlayerPrinter:
def __init__(self, cfg):
pass
def printRoster(self, lineup, bench, injury_reserve):
"""Print out the roster to standard out
:param cfg: Instance of the config
:type cfg: configparser
:param lineup: Roster to print out
:type lineup: List
:param bench: Players on the bench
:type bench: List
:param injury_reserve: Players on the injury reserve
:type injury_reserve: List
"""
first_goalie = True
print("{:4}: {:20} "
"{:4} {}/{}/{}/{}/{}".
format('B', '', 'WK_G', 'G', 'A', 'PPP', 'SOG', 'PIM'))
for pos in ['C', 'LW', 'RW', 'D', 'G']:
for plyr in lineup:
if plyr['selected_position'] == pos:
if pos in ["G"]:
if first_goalie:
print("")
print("{:4}: {:20} "
"{:4} {}/{}".
format('G', '', 'WK_G', 'W', 'SV%'))
first_goalie = False
print("{:4}: {:20} "
"{:4} {:.1f}/{:.3f}".
format(plyr['selected_position'],
plyr['name'], plyr['WK_G'], plyr['W'],
plyr['SV%']))
else:
print("{:4}: {:20} "
"{:4} {:.1f}/{:.1f}/{:.1f}/{:.1f}/{:.1f}".
format(plyr['selected_position'], plyr['name'],
plyr['WK_G'], plyr['G'], plyr['A'],
plyr['PPP'], plyr['SOG'], plyr['PIM']))
print("")
print("Bench")
for plyr in bench:
print(plyr['name'])
print("")
print("Injury Reserve")
for plyr in injury_reserve:
print(plyr['name'])
def printListPlayerHeading(self, pos):
if pos in ['G']:
print("{:20} {} {}/{}".format('name', 'WK_G', 'W', 'SV%'))
else:
print("{:20} {} {}/{}/{}/{}/{}".format('name', 'WK_G', 'G', 'A',
'PPP', 'SOG', 'PIM'))
def printPlayer(self, pos, plyr):
if pos in ['G']:
if self._does_player_have_valid_stats(plyr, ['W', 'SV%']):
print("{:20} {:.1f}/{:.3f}".
format(plyr[1]['name'], plyr[1]['W'], plyr[1]['SV%']))
else:
if self._does_player_have_valid_stats(plyr, ['G', 'A', 'PPP',
'SOG', 'PIM']):
print("{:20} {} {:.1f}/{:.1f}/{:.1f}/{:.1f}/{:.1f}".
format(plyr[1]['name'], plyr[1]['WK_G'], plyr[1]['G'],
plyr[1]['A'], plyr[1]['PPP'], plyr[1]['SOG'],
plyr[1]['PIM']))
def _does_player_have_valid_stats(self, plyr, stats):
for stat in stats:
if np.isnan(plyr[1][stat]):
return False
return True
class Scorer:
"""Class that scores rosters that it is given"""
def __init__(self, cfg):
self.cfg = cfg
self.use_weekly_sched = cfg['Scorer'].getboolean('useWeeklySchedule')
def summarize(self, df):
"""Summarize the dataframe into individual stat categories
:param df: Roster predictions to summarize
:type df: DataFrame
:return: Summarized predictions
:rtype: Series
"""
temp_stat_cols = ['GA', 'SV']
stat_cols = ['G', 'A', 'SOG', 'PPP', 'PIM', 'W'] + temp_stat_cols
res = dict.fromkeys(stat_cols, 0)
for plyr in df.iterrows():
p = plyr[1]
for stat in stat_cols:
if not np.isnan(p[stat]):
if self.use_weekly_sched:
res[stat] += p[stat] / 82 * p['WK_G']
else:
res[stat] += p[stat]
# Handle ratio stats
if res['SV'] > 0:
res['SV%'] = res['SV'] / (res['SV'] + res['GA'])
else:
res['SV%'] = None
# Drop the temporary values used to calculate the ratio stats
for stat in temp_stat_cols:
del res[stat]
return res
def is_counting_stat(self, stat):
return stat not in ['SV%']
def is_highest_better(self, stat):
return True
| UTF-8 | Python | false | false | 7,936 | py | 15 | nhl.py | 11 | 0.496472 | 0.488785 | 0 | 215 | 35.911628 | 78 |
wax911/anime-meta | 17,557,826,308,111 | e0c5ae475cf2c3b8e407c24e86bd3c05be443033 | c6b719f46b52535c991fbf4ef8270fd4e290e29d | /di/__init__.py | d2575e17b16f7faa105818e949c9ec3eb333f7b5 | [
"Apache-2.0"
] | permissive | https://github.com/wax911/anime-meta | 2d143845b999ada949de497ab2adcdab1a2ed988 | 5a707a75b277a9c0dc5b9d9447cea44a73d2f83c | refs/heads/main | 2023-06-17T02:12:34.599542 | 2021-07-15T09:37:20 | 2021-07-15T09:37:20 | 355,594,879 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .dependencies import LocalSourceProvider, \
RemoteSourceProvider, \
RepositoryProvider, \
UseCaseProvider, \
UtilityClientScopeProvider, \
MapperScopeProvider, \
SourceUtilityProvider
| UTF-8 | Python | false | false | 213 | py | 47 | __init__.py | 44 | 0.751174 | 0.751174 | 0 | 7 | 29.428571 | 48 |
rishabmamgai/PCA | 19,628,000,550,606 | 222fb27c789d1d469c2ef7f3ad6feeead7f70635 | 02f26085369cecbb55c2f61e4143bc99f52c8cf9 | /main.py | bbda05b2a9480106b02c2c6a6e21072a40b8584e | [] | no_license | https://github.com/rishabmamgai/PCA | 24a36faa0d97f7b088c0b612ef1f73c0cd326cde | 43ce54abd0ffac07b22b8805a62de65f47040984 | refs/heads/main | 2023-07-11T12:27:45.231336 | 2021-08-13T15:10:35 | 2021-08-13T15:10:35 | 395,694,484 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from matplotlib.pyplot import plot
from model import pca, find_k
import numpy as np
import functions
# Loading data and plotting faces
X = functions.load_data(r'D:\ML\PCA\faces.mat')
functions.plot_faces(X)
# Normalizing data
X_normalized = X / 255
# Running PCA
U, S, V = pca(X_normalized)
# Plotting first 36 eigen vectors
functions.plot_faces(np.transpose(U[:, :36]) * 255)
# Finding number of pricipal components
k, variance_retained = find_k(S)
print(f"\nnumber of principal components = {k}")
# Reduction
U_reduce = U[:, :k]
z = np.dot(X_normalized, U_reduce)
# Recovering features
X_recovered = np.dot(z, np.transpose(U_reduce))
X_recovered *= 255
functions.plot_faces(X_recovered)
| UTF-8 | Python | false | false | 740 | py | 3 | main.py | 3 | 0.685135 | 0.667568 | 0 | 37 | 18 | 51 |
kamau96/Binary-Tree | 11,209,864,690,019 | 236caed1aa685441641c5e23bd1f4f4b6a3399c8 | 61fd592afee1c7606f7c1afabc82150914bffe38 | /main.py | e7cfe22f5fed827bb270de067ac7e8cc1ec8de7e | [] | no_license | https://github.com/kamau96/Binary-Tree | 6eadffd5804e205b90d5ed8c4621e9d30e7cdb2f | 890b3f6b62b6c350cfabc1d557e9fe6fb9287994 | refs/heads/master | 2022-12-28T04:44:19.243167 | 2020-10-15T02:05:35 | 2020-10-15T02:05:35 | 304,183,442 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Node:
def __init__(self,data):
self.left=None
self.right=None
self.data=data
def inorder(temp):
if (not temp):
return
inorder(temp.left)
print(temp.key,end = " ")
inorder(temp.right)
def insert(temp, key):
if not temp:
root= Node(key)
return
queue=[]
queue.append(temp)
while (len(queue)):
temp=queue[0]
queue.pop(0)
if (not temp.left):
temp.left=Node(key)
break
else:
queue.append(temp.left)
if (not temp.right):
temp.right=Node(key)
break
else:
queue.append(temp.right)
def deleteDeepest(root,d_node):
queue=[]
queue.append(root)
while len(queue):
temp=queue.pop(0)
if temp is d_node:
temp=None
return
if temp.right:
if temp.right is d_node:
temp.right=None
else:
queue.append(temp.right)
if temp.left:
if temp.left is d_node:
temp.left=None
else:
queue.append(temp.left)
def deletion(root,key):
if root==None:
return None
if root.left==None and root.right==None:
if root.data==key:
return None
else:
return root
key_node=None
queue=[]
queue.append(root)
while len(queue):
temp=queue.pop(0)
if temp.data==key:
key_node=temp
if temp.left:
queue.append(temp.left)
if temp.right:
queue.append(temp.right)
if key_node:
x=temp.data
deleteDeepest(root,temp)
key_node.data=x
return root | UTF-8 | Python | false | false | 1,470 | py | 2 | main.py | 1 | 0.597279 | 0.594558 | 0 | 74 | 18.878378 | 42 |
juiyangchang/LeetCoding | 7,636,451,892,843 | 034810de3a5718b8cdde54dc14bade2dfd505d42 | a357fa6608a03f86a9511fac7f7678a94120b366 | /python/166_Fraction_to_Recurring_Decimal.py | eb1dfadf2e4d1c953fbb1533ca09a721d122c081 | [] | no_license | https://github.com/juiyangchang/LeetCoding | e33e52b256c54da9a7bf007272c891fe11f8da24 | d9590bf791ece34e391bca0055c8536ee2c8061e | refs/heads/master | 2021-09-09T11:43:24.371160 | 2018-03-15T19:10:52 | 2018-03-15T19:10:52 | 110,507,013 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def fractionToDecimal(self, numerator, denominator):
"""
:type numerator: int
:type denominator: int
:rtype: str
"""
if numerator == 0:
return "0"
if (numerator < 0) != (denominator < 0):
sgn = True
else:
sgn = False
numerator, denominator = abs(numerator), abs(denominator)
lookup = {}
quotient, remainder = numerator // denominator, numerator % denominator
if remainder == 0:
return '-' + str(quotient) if sgn else str(quotient)
int_quotient = quotient
fraction = []
while remainder not in lookup and remainder != 0:
lookup[remainder] = len(fraction)
remainder *= 10
quotient, remainder = remainder // denominator, remainder % denominator
fraction.append(quotient)
if remainder == 0:
return ''.join(map(str, ['-' if sgn else ''] + [int_quotient] + ['.'] + fraction))
else:
return ''.join(map(str, ['-' if sgn else ''] + [int_quotient] + ['.'] + fraction[:lookup[remainder]] + ['('] +
fraction[lookup[remainder]:] + [')'])) | UTF-8 | Python | false | false | 1,306 | py | 64 | 166_Fraction_to_Recurring_Decimal.py | 63 | 0.491577 | 0.484686 | 0 | 37 | 34.324324 | 123 |
desingdeveloperdayday/PickedUpMole_ForEarthForUs | 635,655,196,055 | 0922cc651fa73eb9c0a4241206d9dc65f1d7816a | 8de379a6efeb54ee6df94109feb258e9839f4ab2 | /backend/ForEarthForUs_backend/api/models/category_models.py | 158e6bd31c87f5168b27834cf22032b50e547fc8 | [] | no_license | https://github.com/desingdeveloperdayday/PickedUpMole_ForEarthForUs | b8c557037846b4863f985f6b21aaa4cd5495f9b4 | 85e5ab4ab3f5e7011aa6ede983cc66ece6e489bd | refs/heads/master | 2022-12-12T15:19:42.553227 | 2019-09-30T08:28:47 | 2019-09-30T08:28:47 | 181,449,109 | 6 | 3 | null | false | 2022-12-08T05:04:07 | 2019-04-15T08:54:55 | 2022-04-06T15:21:15 | 2022-12-08T05:04:06 | 45,501 | 6 | 2 | 16 | Kotlin | false | false | from django.db import models
from django.db.models.signals import post_delete
from api.utils.media_clean import file_cleanup
class Category(models.Model):
categoryId = models.IntegerField(primary_key=True, unique=True)
image = models.FileField(null=False, upload_to='images/category/')
completeMessage = models.CharField(max_length=100)
class Meta:
db_table = 'categories'
verbose_name = 'category'
verbose_name_plural = 'categories'
post_delete.connect(file_cleanup, sender=Category, dispatch_uid="category.file_cleanup")
| UTF-8 | Python | false | false | 566 | py | 249 | category_models.py | 44 | 0.736749 | 0.731449 | 0 | 15 | 36.733333 | 88 |
kozakusek/ipp-2020-testy | 12,850,542,185,002 | b3cb8d168c990f1c3209c1b19652dc126fde4dc3 | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/interactive/jm/random_fuzzy_arrows_1/753097229.py | a747d802b452446032d933bc6ad307ea5b7daecc | [
"MIT"
] | permissive | https://github.com/kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | true | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | 2020-05-12T20:07:47 | 2020-06-09T21:15:38 | 218,753 | 0 | 0 | 0 | C | false | false | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 753097229
"""
"""
random actions, total chaos
"""
board = gamma_new(4, 6, 6, 3)
assert board is not None
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_busy_fields(board, 2) == 1
assert gamma_free_fields(board, 2) == 22
assert gamma_golden_move(board, 2, 3, 2) == 0
assert gamma_move(board, 3, 3, 2) == 1
assert gamma_move(board, 4, 1, 1) == 1
assert gamma_move(board, 4, 0, 5) == 1
assert gamma_free_fields(board, 4) == 19
assert gamma_move(board, 5, 4, 3) == 0
assert gamma_move(board, 5, 1, 0) == 1
assert gamma_busy_fields(board, 5) == 1
assert gamma_move(board, 6, 5, 1) == 0
assert gamma_move(board, 6, 2, 2) == 1
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 3, 1) == 1
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 4, 1, 5) == 1
assert gamma_move(board, 4, 0, 2) == 1
assert gamma_move(board, 6, 2, 2) == 0
assert gamma_free_fields(board, 6) == 13
assert gamma_golden_possible(board, 6) == 1
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 4, 2, 1) == 0
assert gamma_move(board, 5, 3, 0) == 1
board717240267 = gamma_board(board)
assert board717240267 is not None
assert board717240267 == ("44..\n"
"2...\n"
"..11\n"
"4.63\n"
"2412\n"
".5.5\n")
del board717240267
board717240267 = None
assert gamma_move(board, 6, 2, 1) == 0
assert gamma_move(board, 6, 1, 5) == 0
assert gamma_golden_possible(board, 6) == 1
assert gamma_move(board, 1, 1, 4) == 1
assert gamma_free_fields(board, 1) == 4
assert gamma_move(board, 2, 3, 3) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_move(board, 4, 3, 4) == 0
assert gamma_move(board, 5, 4, 3) == 0
assert gamma_move(board, 5, 0, 3) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 4, 2) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_busy_fields(board, 3) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 0, 2) == 0
assert gamma_move(board, 5, 4, 2) == 0
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 4, 3) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 4, 1, 4) == 0
assert gamma_busy_fields(board, 4) == 4
assert gamma_move(board, 5, 3, 1) == 0
assert gamma_move(board, 5, 3, 0) == 0
assert gamma_busy_fields(board, 5) == 3
assert gamma_move(board, 6, 1, 5) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_free_fields(board, 1) == 4
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 5, 2) == 0
assert gamma_move(board, 5, 5, 2) == 0
assert gamma_move(board, 5, 3, 1) == 0
assert gamma_move(board, 6, 4, 3) == 0
assert gamma_move(board, 6, 2, 1) == 0
assert gamma_move(board, 1, 1, 5) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_golden_move(board, 2, 0, 1) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 4, 5, 2) == 0
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_busy_fields(board, 4) == 4
assert gamma_move(board, 5, 2, 1) == 0
assert gamma_move(board, 6, 0, 2) == 0
assert gamma_busy_fields(board, 6) == 1
assert gamma_free_fields(board, 6) == 8
gamma_delete(board)
| UTF-8 | Python | false | false | 4,058 | py | 1,183 | 753097229.py | 278 | 0.630606 | 0.537457 | 0 | 123 | 31.99187 | 46 |
LuckyGan/LeetCode | 3,521,873,218,548 | d8fe165a5b592f50e37291b4db34f2b8e8834e63 | ebba35952f5499d273168658505a17dc717354a6 | /Python/066.Plus One/Solution.py | a6c0955fd3b90a3ef8075964a6544ef47f318741 | [] | no_license | https://github.com/LuckyGan/LeetCode | 3aaa3ea69776b060ece6dc7380b2dee8b1b2a07f | bdd2808db9d629e84523e203f55b4493c3fc286f | refs/heads/master | 2018-12-24T15:34:37.817690 | 2018-11-09T08:30:21 | 2018-11-09T08:30:21 | 108,117,709 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf-8
class Solution:
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
for idx in range(len(digits) - 1, -1, -1):
if digits[idx] != 9:
digits[idx] += 1
break
else:
digits[idx] = 0
if 0 == digits[0]:
digits.insert(0, 1)
return digits
if __name__ == "__main__":
digits = [1, 2, 3]
print(Solution().plusOne(digits))
digits = [4, 3, 2, 1]
print(Solution().plusOne(digits)) | UTF-8 | Python | false | false | 566 | py | 333 | Solution.py | 332 | 0.45053 | 0.418728 | 0 | 23 | 23.652174 | 50 |
jacksonyoudi/smt | 12,704,513,271,349 | 7542637f49a8de6c745b172702949f4ec062070e | fa27204c5822091039eed29cf6180a631f0d949e | /controller/lib/csv_handle.py | 36ee95fd10be76c01aa6be201e4d948b08f5b37d | [] | no_license | https://github.com/jacksonyoudi/smt | d1040e6455e0519214843865c33ce9bb9e379182 | 3f240da266bb1735e3ec2c9007569c46ed9458b2 | refs/heads/master | 2022-12-05T06:19:16.112029 | 2020-08-31T15:04:04 | 2020-08-31T15:04:04 | 282,649,914 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import csv
import datetime
import time
def table(data):
import sqlite3
con = sqlite3.connect('/Users/changyouliang/project/others/smt/first.db')
cur = con.cursor()
sql = "insert into acv_tab (wo_no,time,c3 ,c4 ,R70 ,c6 ,C158 ,c8 ,R69 ,c10 ,R3 ,c12 ,IC8 ,c14 ,IC3 ,c16 ,D11 ,c18 ,L5 ,c20 ,C150 ,c22 ,C100 ,C24) values ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(
*data)
try:
cur.execute(sql)
except Exception as e:
print(e)
finally:
con.commit()
cur.close()
con.close()
def parse_csv(csv_file, encoding=None):
"""
读取数据返回列表的数据
:param csv_file:
:return:
"""
data = []
with open(csv_file, encoding=encoding) as f:
f_csv = csv.reader(f)
# 去除头部数据
header = next(f_csv)
for row in f_csv:
data.append(row)
return header, data
if __name__ == '__main__':
f = "../../1HZ9011113-05_6433997B.csv"
header, data = parse_csv(f)
result = []
print(header)
length = len(data)
stops = 0
pre_time = None
cur_time = None
start_time = data[0][1]
end_time = data[-1][1]
row = None
for i in range(0, length):
row = data[i]
time_array = time.strptime(row[1], "%Y/%m/%d %H:%M:%S")
other_style_time = int(time.mktime(time_array))
cur_time = other_style_time
if pre_time:
if (cur_time - pre_time) <= 60 * 5 and (cur_time - pre_time) > 0:
stops += 1
pre_time = other_style_time
item = {
"type": "detail",
"品番": row[0][10:19],
"工单号": row[0][22:30],
"面番": row[0][-1],
"开始时间": row[1],
"结束时间": end_time,
"批量": length,
"导入成功时间": datetime.datetime.now()
}
item = {
"type": "agg",
"品番": row[0][10:19],
"工单号": row[0][22:30],
"面番": row[0][-1],
"开始时间": start_time,
"结束时间": end_time,
"批量": length,
"短暂停回数": stops,
"导入成功时间": datetime.datetime.now()
}
result.insert(0, item)
# print(result[0])
# print(stops)
# one = data[0]
# table(one)
| UTF-8 | Python | false | false | 2,447 | py | 12 | csv_handle.py | 10 | 0.465599 | 0.422761 | 0 | 98 | 22.581633 | 287 |
MakerSpaceLeiden/AccesSystem | 15,496,242,013,734 | 64889a76dfc429fc85d1d8d7df95eb2c0a39b63d | b38c5eb548636af44b4c82fc2b57e3103114f2e0 | /lib-python/DrumbeatNode.py | fe98878e705f09201be08927adfacf384daa53f2 | [
"LicenseRef-scancode-other-permissive",
"MIT",
"NTP",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-rsa-md4",
"Beerware",
"RSA-MD",
"HPND-sell-variant",
"Spencer-94",
"LicenseRef-scancode-zeusbench",
"metamail",
"Apache-2.0"
] | permissive | https://github.com/MakerSpaceLeiden/AccesSystem | e652a343c86ac904de8a1a08adc13bbc3ee6de7d | 5e39572f51fafca71750660fcf5f737670f17d54 | refs/heads/master | 2023-05-27T01:44:57.893580 | 2023-05-16T11:27:48 | 2023-05-16T11:27:48 | 54,308,309 | 4 | 4 | Apache-2.0 | false | 2022-02-21T22:37:34 | 2016-03-20T08:46:08 | 2021-12-16T21:13:13 | 2022-02-21T22:37:33 | 18,697 | 4 | 6 | 1 | C++ | false | false | #!/usr/bin/env python3.4
import os
import sys
import time
import hmac
import hashlib
sys.path.append('../lib')
import ACNode
class DrumbeatNode(ACNode.ACNode):
# import SharedSecret
# class DrumbeatNode(SharedSecret.SharedSecret):
default_interval = 60
default_node = "drumbeat"
def parseArguments(self):
self.parser.add('--interval','-i',default=self.default_interval,action='store',type=int,
help='DrumbeatNode interval, in seconds (default: '+str(self.default_interval)+' seconds)'),
super().parseArguments()
last_time = 0
def loop(self):
if time.time() - self.last_time > self.cnf.interval:
self.last_time = time.time()
self.send(self.cnf.node, "beat")
if self.cnf.secrets:
for node in self.cnf.secrets.keys():
self.send(node, "beat")
super().loop()
# Allow this class to auto instanciate if
# we run it on its own.
#
if __name__ == "__main__":
drumbeat = DrumbeatNode()
if not drumbeat:
sys.exit(1)
exitcode = drumbeat.run()
sys.exit(exitcode)
| UTF-8 | Python | false | false | 1,041 | py | 172 | DrumbeatNode.py | 127 | 0.667627 | 0.661864 | 0 | 43 | 23.186047 | 101 |
takuseno/d3rlpy | 1,013,612,322,672 | 13ba43e853eed6fc8455cc68984f18f56b26ab1d | c3ca0bcea4d1b4013a0891f014928922fc81fe7a | /tests/logging/test_logger.py | 55ca4443c89926bbd134af8e7bf8317832b9f761 | [
"MIT"
] | permissive | https://github.com/takuseno/d3rlpy | 47894b17fc21fab570eca39fe8e6925a7b5d7d6f | 4ba297fc6cd62201f7cd4edb7759138182e4ce04 | refs/heads/master | 2023-08-23T12:27:45.305758 | 2023-08-14T12:07:03 | 2023-08-14T12:07:03 | 266,369,147 | 1,048 | 222 | MIT | false | 2023-09-02T08:12:48 | 2020-05-23T15:51:51 | 2023-09-01T07:59:50 | 2023-09-02T08:12:47 | 21,748 | 1,042 | 194 | 47 | Python | false | false | from typing import Any, Dict
import pytest
from d3rlpy.logging import D3RLPyLogger
from d3rlpy.logging.logger import SaveProtocol
class StubLoggerAdapter:
def __init__(self, experiment_name: str):
self.experiment_name = experiment_name
self.is_write_params_called = False
self.is_before_write_metric_called = False
self.is_write_metric_called = False
self.is_after_write_metric_called = False
self.is_save_model_called = False
self.is_close_called = False
def write_params(self, params: Dict[str, Any]) -> None:
self.is_write_params_called = True
def before_write_metric(self, epoch: int, step: int) -> None:
self.is_before_write_metric_called = True
def write_metric(
self, epoch: int, step: int, name: str, value: float
) -> None:
assert self.is_before_write_metric_called
self.is_write_metric_called = True
def after_write_metric(self, epoch: int, step: int) -> None:
assert self.is_before_write_metric_called
assert self.is_write_metric_called
self.is_after_write_metric_called = True
def save_model(self, epoch: int, algo: SaveProtocol) -> None:
self.is_save_model_called = True
def close(self) -> None:
self.is_close_called = True
class StubLoggerAdapterFactory:
def create(self, experiment_name: str) -> StubLoggerAdapter:
return StubLoggerAdapter(experiment_name)
class StubAlgo:
def save(self, fname: str) -> None:
pass
@pytest.mark.parametrize("with_timestamp", [False, True])
def test_d3rlpy_logger(with_timestamp: bool) -> None:
logger = D3RLPyLogger(StubLoggerAdapterFactory(), "test", with_timestamp)
# check experiment_name
adapter = logger.adapter
assert isinstance(adapter, StubLoggerAdapter)
if with_timestamp:
assert adapter.experiment_name != "test"
else:
assert adapter.experiment_name == "test"
assert not adapter.is_write_params_called
logger.add_params({"test": 1})
assert adapter.is_write_params_called
logger.add_metric("test", 1)
with logger.measure_time("test"):
pass
assert not adapter.is_before_write_metric_called
assert not adapter.is_write_metric_called
assert not adapter.is_after_write_metric_called
metrics = logger.commit(1, 1)
assert "test" in metrics
assert "time_test" in metrics
assert adapter.is_before_write_metric_called
assert adapter.is_write_metric_called
assert adapter.is_after_write_metric_called
assert not adapter.is_save_model_called
logger.save_model(1, StubAlgo())
assert adapter.is_save_model_called
assert not adapter.is_close_called
logger.close()
assert adapter.is_close_called
| UTF-8 | Python | false | false | 2,779 | py | 233 | test_logger.py | 187 | 0.6819 | 0.678302 | 0 | 89 | 30.224719 | 77 |
pyvista/pyvista | 16,561,393,909,929 | 2caaf84a7b41af9341a35362708346aaa21cc127 | 2d6d5424e881252898b898fbfbc47fe1487371cf | /examples_trame/advanced/contour.py | a3d129d0c74b6efd9c6542eb0d434ed4d8edc78a | [
"MIT"
] | permissive | https://github.com/pyvista/pyvista | 333e55bfaa6b8bcdb47e2df04c823d35f05db364 | 1b450b23340f367315fc914075d551e0a4df8cc3 | refs/heads/main | 2023-08-20T08:04:27.146062 | 2023-08-20T01:14:03 | 2023-08-20T01:14:03 | 92,974,124 | 1,885 | 389 | MIT | false | 2023-09-14T21:09:28 | 2017-05-31T18:01:42 | 2023-09-14T07:45:35 | 2023-09-14T21:09:28 | 239,155 | 1,956 | 365 | 377 | Python | false | false | from trame.app import get_server
from trame.ui.vuetify import SinglePageLayout
from trame.widgets import vuetify
from vtkmodules.vtkFiltersCore import vtkContourFilter
import pyvista as pv
from pyvista import examples
from pyvista.trame.ui import plotter_ui
# -----------------------------------------------------------------------------
# Trame initialization
# -----------------------------------------------------------------------------
pv.OFF_SCREEN = True
server = get_server()
state, ctrl = server.state, server.controller
state.trame__title = "Contour"
ctrl.on_server_ready.add(ctrl.view_update)
# -----------------------------------------------------------------------------
# Pipeline
# -----------------------------------------------------------------------------
volume = examples.download_head_2()
contour = vtkContourFilter()
contour.SetInputDataObject(volume)
# contour.SetComputeNormals(True)
# contour.SetComputeScalars(False)
# Extract data range => Update store/state
data_range = tuple(volume.get_data_range())
contour_value = 0.5 * (data_range[0] + data_range[1])
state.contour_value = contour_value
state.data_range = (float(data_range[0]), float(data_range[1]))
# Configure contour with valid values
contour.SetNumberOfContours(1)
contour.SetValue(0, contour_value)
# -----------------------------------------------------------------------------
# Plotting
# -----------------------------------------------------------------------------
pl = pv.Plotter()
actor = pl.add_mesh(contour, cmap="viridis", clim=data_range)
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
@state.change("contour_value")
def update_contour(contour_value, **kwargs):
contour.SetValue(0, contour_value)
ctrl.view_update_image()
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
with SinglePageLayout(server) as layout:
layout.title.set_text("Contour")
with layout.toolbar:
vuetify.VSpacer()
vuetify.VSlider(
v_model="contour_value",
min=("data_range[0]",),
max=("data_range[1]",),
hide_details=True,
dense=True,
style="max-width: 300px",
start="trigger('demoAnimateStart')",
end="trigger('demoAnimateStop')",
change=ctrl.view_update,
)
vuetify.VProgressLinear(
indeterminate=True,
absolute=True,
bottom=True,
active=("trame__busy",),
)
with layout.content:
with vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
):
# Use PyVista UI template for Plotters
view = plotter_ui(pl, namespace='demo')
ctrl.view_update = view.update
ctrl.view_update_image = view.update_image
server.start()
| UTF-8 | Python | false | false | 3,062 | py | 488 | contour.py | 358 | 0.483018 | 0.477792 | 0 | 106 | 27.886792 | 79 |
sharathghosh/Python-Intro | 14,594,298,886,934 | c4c64dd560975926abcc44f72fa49d0841b4e755 | 054696b547808b89ea11c34aa30107ef232b4819 | /tutorial_python_basics.py | f3f8e3fda03ac1e03faf9e802385b5742180acfb | [] | no_license | https://github.com/sharathghosh/Python-Intro | d3c746d41997ff0c8b28d45706e63bfaa12aab9b | 30d5ece2fcc882f467f0bff432c0b89617194e17 | refs/heads/master | 2020-07-05T18:36:19.507514 | 2016-11-17T03:29:27 | 2016-11-17T03:29:27 | 73,986,643 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 16 14:26:46 2016
@author: sharath
"""
# Variables
# a,b,c are integer variables
a = 5 # assignment operator
b = 6
c = -8
# d,e,f are floating point variables
d = 5.6
e = 9.0000e-4
f = -1.4
# Find out the type of a variable
print type(a)
print type(e)
# Boolean types - True and False are defined keywords in Python
True
False
# Operators
print (a + b)
print (c - d)
print (a * c)
print (a / b)
print (a % 2)
print (c ** a)
print (b // a)
x = 1
a = a + 1 # shorthand x += 1
# Conditions, make sure the indentation is correct
if b >= 5:
print("if clause is true")
elif b >= 3:
print("elif clause is true")
else:
print("else clause is true")
# conditional operators
# == equal to
# > greater than
# < less than
# >= greater than or equal to
# <= less than or equal to
# != not equal to
# Functions
def function_dummy_1():
# This function does not accept any arguments
# Function body that does something meaningful
return 0 # return some results
def function_add(x, y):
# This function accepts 2 arguments
return x + y # return sum of the two arguments
def function_mul(a, b):
# This function accepts 2 arguments, note that the a and b in the function
# definition is not the same as the variables a and b
return a * b # return product of the two arguments
# Scope of variables within functions
def function_dummy_2():
# This function does not accept any arguments
print y # y is not declared before and is not visible within the function
return y # return some results
def function_dummy_3():
# This function does not accept any arguments
y = 10
z = 15
return True # return some results
print y, z # y and z are declared within the function, and are not visible outside
# Loops, note the indent
# whatever is indented is considered to be a part of the repeatable section
# of the loop
for a in range(1, 4):
print (a)
# range() generates a sequence of integers, often helpful for iterating over a
# series of items
print range(5, 10)
print range(5, 10, 2)
print range(5, 10, -2)
print range(10, 5, -2)
# Another way of looping in Python, note the increment step
# If you forget the increment step, this code will run forever printing values of u
u = 1
while u < 10:
print (u)
u+=1
# Strings
foo = "This is my string"
print foo
bar = "This is your string"
print bar
baz = "This is our string"
print baz
bat = 'We can use single quotes; this is also a string'
print bat
woot = 'We can "mix" quotes'
print woot
toot = " We can 'mix' quotes like this too "
print toot
zoot = 'People\'s Republic'
print zoot
# counts the number of occurrences of 'x'
print woot.count('x')
# returns the position of character 'x'
print foo.find('x')
# returns the stringVar in lowercase (this is temporary)
print foo.lower()
# returns the stringVar in uppercase (this is temporary)
print foo.upper()
# replaces all occurrences of 'my' with 'your' in the string
print foo.replace('my', 'your')
# remove preceeding and trailing spaces
print toot.strip()
# Slicing strings
print foo[0:4]
print foo[4:7]
print foo[4:]
print foo[:-1]
print foo[4:-1]
print foo[-5:]
# Lists
int_list = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
print int_list
# iterating over list items
for item in int_list:
print item, item**2
for idx in range(len(int_list)):
print int_list[idx], int_list[idx]**2
# appends element to end of the list
int_list.append(16)
print int_list
# counts the number of occurrences of 4s in the list
int_list.count(4)
# returns the index of 10 in the list
int_list.index(10)
# inserts 4 at location 2
int_list.insert(2, 4)
print int_list
# returns last element then removes it from the list
int_list.pop()
print int_list
# finds and removes first 4 from list
int_list.remove(4)
print int_list
# reverses the elements in the list
int_list.reverse()
#sorts the list alphabetically in ascending order, or numerical in ascending order
int_list.sort()
# Lists could also be sliced
# Strings could be interpreted as lists
for letter in foo:
print letter
# replace multiple instances in lists - to do
t = (1, 2, 3)
print (t)
t.append(4) # will fail because tuples are immutable
print t[0], t[1], t[2]
t[0] = -1 # will fail because tupes are immutable
# Dictionaries
# Key value data structure, unordered - what you type in need not be the order
# in which the data gets stored
my_dictionary = {'name': 'Foo Bar',
'Age': -220,
'Profession': 'Jobless',
'Qualifications': 'Worthless'}
print(my_dictionary['Profession'])
# Iterating and accessing dictionary keys and values
for k in my_dictionary:
print 'Key:', k, ', - Value:', my_dictionary[k]
# The dictionary data type provides in built methods to access keys and values
print my_dictionary.keys()
print my_dictionary.values()
# Another way of accessing keys and values from a dictionary
for key in my_dictionary.keys():
print key, ' - ', my_dictionary[key]
# Adding a new key value pair
my_dictionary['IQ'] = -911
print my_dictionary
# Removing a key value pair
my_dictionary.pop('IQ')
# pop-ing a non-existent key will result in an error
my_dictionary.pop('IQ') # already pop-ed, cannot pop 'IQ' again
# Update a value for a key
my_dictionary['name'] = 'Foo Baz'
# References and copying objects, mutable types
# We had an int_list
# Let's create a new_list by assignment
new_list = int_list
print new_list
print int_list
# Let's change the first item of new_list
new_list[0] = -10
print new_list # nothing unexpected
print int_list # Why did the first item change here?
# For mutable composite data types, python creates references
# If another copy is reuqired, it must be explicitly created
import copy
new_list = copy.deepcopy(int_list)
new_list[0] = 1
print new_list
print int_list
# This behaviour is not applicable for basic data types like integers, floats etc.
my_input = input('Type in something: ')
print my_input, len(my_input)
my_raw_input = raw_input('Type in some more: ')
print my_raw_input, len(my_raw_input)
file_handle = open('temp.txt', 'w')
file_handle.writelines(['this is a string\n', 'this is another string\n'])
file_handle.close()
file_handle = open('temp.txt', 'r')
x = file_handle.readlines()
file_handle.close()
# there are the readline(), writeline() functions
# there are the read() and write() functions too
#enumerate
list_of_strings = ['aa', 'bb', 'cc', 'dd']
for idx, string in enumerate(list_of_strings):
if idx%2 == 1:
print idx, string
#generators
mygenerator = (x*x for x in range(3))
for i in mygenerator:
print i
#importing modules, code organization etc.
# create folder, two files with classes, __init__ method and another method
# __init__.py file
# __name__ variable, significance
| UTF-8 | Python | false | false | 7,203 | py | 5 | tutorial_python_basics.py | 4 | 0.66028 | 0.641955 | 0 | 313 | 21 | 83 |
Nyumat/Pathfinding-Algorithm-Tool | 8,761,733,325,406 | d73f46cf55fd463a821374e470236b2a706f01be | 203025acaac82b5ba2d135dac704f290ea814ab5 | /astar/astar.py | 04b4a4cd592d500c9d70a3d46d22b29c2762d3df | [
"MIT"
] | permissive | https://github.com/Nyumat/Pathfinding-Algorithm-Tool | d4ce9184996732348d6ecf2f680691097704211b | 164a94c68ece4e990dcb154ff7a21f5d0f1efcbb | refs/heads/master | 2023-03-17T23:22:19.861676 | 2023-03-06T08:01:03 | 2023-03-06T08:01:03 | 286,133,872 | 3 | 0 | MIT | false | 2020-09-11T06:44:49 | 2020-08-08T23:20:08 | 2020-09-11T01:15:31 | 2020-09-11T06:44:48 | 39 | 2 | 0 | 0 | Python | false | false | import pygame
import math
from queue import PriorityQueue
# RGB Colors
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PURPLE = (128, 0, 128)
ORANGE = (255, 165 ,0)
GREY = (128, 128, 128)
TURQUOISE = (64, 224, 208)
# Node class, which contains the object initizalizers and methods that will be used throughout the project.
class Node:
def __init__(self, row, col, window_size, total_rows):
self.row = row
self.col = col
self.x = row * window_size
self.y = col * window_size
self.color = BLACK
self.neighbors = []
self.window_size = window_size
self.total_rows = total_rows
def get_pos(self):
return self.row, self.col
def is_closed(self):
return self.color == TURQUOISE
def is_open(self):
return self.color == RED
def is_barrier(self):
return self.color == WHITE
def is_start(self):
return self.color == ORANGE
def is_end(self):
return self.color == RED
def reset(self):
self.color = BLACK
def make_start(self):
self.color = ORANGE
def make_closed(self):
self.color = TURQUOISE
def make_open(self):
self.color = RED
def make_barrier(self):
self.color = WHITE
def make_end(self):
self.color = RED
def make_path(self):
self.color = PURPLE
def draw(self, client):
pygame.draw.rect(client, self.color, (self.x, self.y, self.window_size, self.window_size))
# Manhattan Heuristic Implementation
# By using this Heuristic, we can only move along the grid in four directions and not "Diagonally" (up,down,left,right)
def update_neighbors(self, grid):
self.neighbors = []
if self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier(): # DOWN
self.neighbors.append(grid[self.row + 1][self.col])
if self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # UP
self.neighbors.append(grid[self.row - 1][self.col])
if self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier(): # RIGHT
self.neighbors.append(grid[self.row][self.col + 1])
if self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # LEFT
self.neighbors.append(grid[self.row][self.col - 1])
def __lt__(self, other):
return False
window_size = 600
client = pygame.display.set_mode((window_size, window_size))
pygame.display.set_caption("[Thomas's Pathfinding Visualization Tool V1] Made by @Nyumat")
def h(p1, p2):
x1, y1 = p1
x2, y2 = p2
return abs(x1 - x2) + abs(y1 - y2)
# Creates path from node to node.
def reconstruct_path(came_from, current, draw):
while current in came_from:
current = came_from[current]
current.make_path()
draw()
# A* Pathfinding Search
def algorithm(draw, grid, start, end):
count = 0
open_set = PriorityQueue()
open_set.put((0, count, start))
came_from = {}
# G score is the cost "so far" to reach node n, which is why it starts at 0.
g_score = {node: float("inf") for row in grid for node in row}
g_score[start] = 0
# F score will represent the total estimated cost of the path through the neighbors
f_score = {node: float("inf") for row in grid for node in row}
f_score[start] = h(start.get_pos(), end.get_pos())
open_set_hash = {start}
while not open_set.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = open_set.get()[2]
open_set_hash.remove(current)
if current == end:
# Draw the optimal path once the search reaches the second placed node.
reconstruct_path(came_from, end, draw)
end.make_end()
start.make_start()
return True
# Algo to simultaneously evaluate the neighbor and search for the ending node in the open set
for neighbor in current.neighbors:
temp_g_score = g_score[current] + 1
# SEE README.md to understand this algorithm.
# To put it simply, it traces through each neighbor to find the end node.
if temp_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = temp_g_score
f_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())
# If the node isnt found within the open set, the search spreads.
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
neighbor.make_open()
draw()
if current != start:
current.make_closed()
return False
# Function controls how our interface, or "grid" will be created.
def make_grid(rows, window_size):
grid = []
gap = window_size // rows
for i in range(rows):
grid.append([])
for j in range(rows):
node = Node(i, j, gap, rows)
grid[i].append(node)
return grid
# Function draws the border and lines for the tool
def draw_grid(client, rows, window_size):
gap = window_size // rows
for i in range(rows):
pygame.draw.line(client, GREY, (0, i * gap), (window_size, i * gap))
for j in range(rows):
pygame.draw.line(client, GREY, (j * gap, 0), (j * gap, window_size))
# Function that will draw the plane for the tool to be used in
def draw(client, grid, rows, window_size):
client.fill(BLACK)
for row in grid:
for node in row:
node.draw(client)
draw_grid(client, rows, window_size)
pygame.display.update()
# Determines the part of the grid we're clicking so we can interact with it.
def get_clicked_pos(pos, rows, window_size):
gap = window_size // rows
y, x = pos
row = y // gap
col = x // gap
return row, col
# Main function to hold a lot of the logic and controls.
def main(client, window_size):
ROWS = 40
grid = make_grid(ROWS, window_size)
start = None
end = None
run = True
while run:
draw(client, grid, ROWS, window_size)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# Left click
if pygame.mouse.get_pressed()[0]:
pos = pygame.mouse.get_pos()
row, col = get_clicked_pos(pos, ROWS, window_size)
node = grid[row][col]
if not start and node != end:
start = node
start.make_start()
elif not end and node != start:
end = node
end.make_end()
elif node != end and node != start:
node.make_barrier()
# Right Click
elif pygame.mouse.get_pressed()[2]:
pos = pygame.mouse.get_pos()
row, col = get_clicked_pos(pos, ROWS, window_size)
node = grid[row][col]
node.reset()
if node == start:
start = None
elif node == end:
end = None
if event.type == pygame.KEYDOWN:
# Draw Path to the other node on run (space bar)
if event.key == pygame.K_SPACE and start and end:
for row in grid:
for node in row:
node.update_neighbors(grid)
# Call algorithm object for pathfinding.
algorithm(lambda: draw(client, grid, ROWS, window_size), grid, start, end)
if event.key == pygame.K_r:
start = None
end = None
grid = make_grid(ROWS, window_size)
pygame.quit()
if __name__ == "__main__":
main(client,window_size)
| UTF-8 | Python | false | false | 6,926 | py | 2 | astar.py | 1 | 0.659111 | 0.643806 | 0 | 250 | 26.688 | 120 |
Coburn37/NASS-Search | 8,787,503,115,780 | cfc7a44dbeaa54b823eb1c652403c9ccffa28a96 | cbba218ea18839f221595c8466838c87b2b1d9cd | /nassAPI/nassGlobal.py | c739c54727aec76208b00663d80e5cb09a80ae5f | [] | no_license | https://github.com/Coburn37/NASS-Search | f1d86ca4dcd4366d481e5cdf6b4712f4c283bfe9 | c6db7dea4c9e79e0f9dc9e1caa2ba1e273a1fc70 | refs/heads/master | 2021-01-17T09:18:20.294506 | 2018-10-21T01:05:11 | 2018-10-21T01:05:11 | 35,383,516 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
NASS Search Tool Global Preferences and Data
This module holds the global preferences and data structures. All prefs & data
should be read from nassGlobal.prefs and nassGlobal.data.
"""
import os.path
import json
#DummyReadOnlyDict - A dict that once read will call init() but only once across all instances
#Only implements __getitem__ so should be read only
class DummyReadOnlyDict():
triggered = False
def __init__(self, target):
self.target = target
def noCheck__getitem__(self, key):
return self.target[key]
def __getitem__(self, key):
#Multiple dicts might be accessed at different times
#Only trigger if none before
if not DummyReadOnlyDict.triggered:
init()
DummyReadOnlyDict.triggered = True
#Replace this function with the noCheck version if a trigger has occured previously
if DummyReadOnlyDict.triggered:
self.__getitem__ = self.noCheck__getitem__
return self.target[key]
#PassThroughDict - Dictionary that will __getitem__ the values of a different dict if they exist
#Finalization will join the two dicts, overwriting everything in self with keys from target
#This allows for values in one dict to be used if they exist in calculating new values (specifically in init())
#but to also allow the programmer to give a different value in place of these calculated values which will
#be applied in the finalization.
class PassThroughDict(dict):
def __init__(self, target, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target = target
def __getitem__(self, key):
if key in self.target:
return self.target.__getitem__(key)
return dict.__getitem__(self, key)
#Turn this dict back into a normal dict, overwriting all values with the ones in target dict
def finalizeDict(self):
self.update(self.target)
self.__getitem__ = dict.__getitem__
#Default configuration
#USER PREFERENCES
userPrefs = {}
_prefs = PassThroughDict(userPrefs) #The final values for preferences that userPrefs will be joined over top of once an init() occurs
prefs = DummyReadOnlyDict(_prefs) #Where all API values are read from (if one is read, we trigger an init())
#GLOBAL DATA
_data = {}
data = DummyReadOnlyDict(_data)
def updateUserPrefs(moreUserPrefs):
"""
Overwrites global API prefs (only before init() is called)
Takes a dict with key-value pairs representing the prefs to be overridden
and the value to override with
"""
#They can only update the preferences (or should only update them) when we haven't init'd
if DummyReadOnlyDict.triggered:
raise RuntimeError("NASS has already been inited. User preferences shouldn't be changed now")
#Join the user prefs overwriting the default prefs
userPrefs.update(moreUserPrefs)
def init():
"""
Inits the global state of prefs and data
Called automatically when either nassGlobal.prefs and nassGlobal.data is
accessed. Calculates all default preferences, substituting those specified
by the programmer in updateUserPrefs when necessary (stored in userPrefs dict)
"""
#DEFAULT USER PREFERENCES
#Folders and files configuration
_prefs["rootPath"] = os.path.realpath(".")
_prefs["dbPath"] = os.path.join(_prefs["rootPath"], "nassDB")
_prefs["configPath"] = _prefs["rootPath"]
_prefs["preprocessJSONFile"] = os.path.join(_prefs["configPath"], "preprocessDBInfo.json")
_prefs["staticJSONFile"] = os.path.join(_prefs["configPath"], "staticDBInfo.json")
#Directories that we're looking for in the nassDB folder
_prefs["dataDirNames"] = [
"ASCII",
"Unformatted Data",
"Expanded SAS",
os.path.normpath("Expanded SAS/UNFORMATTED")]
#Keys kept for stub cases
_prefs["stubKeys"] = ["CASENO", "PSU", "VEHNO", "OCCNO"]
#Default compare functions
#TODO: Shouldn't go in API, should be in web application
def stringIn(found, find):
return str(find) in str(found)
def equal(found, find):
return str(find) == str(found)
def startsWith(found, find):
return str(found).startswith(str(find))
_prefs["supportedCompareFuncs"] = {
"String Inside" : stringIn,
"Equal" : equal,
"Starts With" : startsWith
}
_prefs.finalizeDict()
#GLOBAL DATA
#Json info on dbs
fstaticDBInfo = open(_prefs["staticJSONFile"], "r")
_data["staticDBInfo"] = json.loads(fstaticDBInfo.read())
if not os.path.isfile(_prefs["preprocessJSONFile"]):
raise RuntimeError("No preprocessDBInfo found! Run the preprocessor first!")
else:
fpreprocessDBInfo = open(_prefs["preprocessJSONFile"],"r")
_data["preprocessDBInfo"] = json.loads(fpreprocessDBInfo.read())
#COMMON FUNCTIONALITY
def userYN(msg):
while True:
userIn = input(msg)
if userIn.lower() == "y":
return True
elif userIn.lower() == "n":
return False
print("Invalid response, please choose y or n")
class NASSJSONEncoder(json.JSONEncoder):
def default(self, o, *args, **kwargs):
if getattr(o, "toJSONHelper", None) and callable(o.toJSONHelper):
return o.toJSONHelper()
else:
return super().default(o, *args, **kwargs) | UTF-8 | Python | false | false | 5,412 | py | 26 | nassGlobal.py | 17 | 0.66833 | 0.66833 | 0 | 148 | 35.574324 | 133 |
vramana30/newSnia | 9,887,014,732,928 | 5e833be53867aedc38cbe6e5c02bc273fc73a516 | f6a20ee677ece1d60741d525b22541510754ab06 | /api_emulator/redfish/templates/Subscription.py | baca607c6e1308695327d9356e834e666bef4744 | [
"BSD-3-Clause"
] | permissive | https://github.com/vramana30/newSnia | 99a100c8d0275421f68f279fad2dead213775fdf | 720a96a54d4e4177856ab57977388f7aa1f00643 | refs/heads/master | 2020-03-24T22:49:01.502670 | 2018-08-14T07:17:39 | 2018-08-14T07:17:39 | 143,104,272 | 0 | 2 | null | true | 2018-08-01T04:34:22 | 2018-08-01T04:34:22 | 2018-07-11T04:59:26 | 2018-07-11T04:59:24 | 2,450 | 0 | 0 | 0 | null | false | null | # Copyright Notice:
# Copyright 2017 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Interface-Emulator/LICENSE.md
# Example Resoruce Template
import copy
import strgen
_TEMPLATE = \
{
"@Redfish.Copyright":"Copyright 2014-2016 Distributed Management Task Force, Inc. (DMTF). All rights reserved.",
"@odata.context": "{rb}$metadata#EventDestination.EventDestination",
"@odata.id": "{rb}EventService/Subscriptions/{id}",
"@odata.type": "#EventDestination.v1_0_0.EventDestination",
"Id": "{id}",
"Name": "EventSubscription {id}",
"Destination": "http://www.dnsname.com/Destination{id}",
"EventTypes": [
"Alert"
],
"Context": "ABCDEFGHJLKJ",
"Protocol": "Redfish"
}
def get_Subscription_instance(wildcards):
"""
Instantiate and format the template
Arguments:
wildcard - A dictionary of wildcards strings and their repalcement values
"""
c = copy.deepcopy(_TEMPLATE)
c['@odata.context'] = c['@odata.context'].format(**wildcards)
c['@odata.id'] = c['@odata.id'].format(**wildcards)
c['Id'] = c['Id'].format(**wildcards)
c['Destination'] = c['Destination'].format(**wildcards)
c['Name'] = c['Name'].format(**wildcards)
return c
| UTF-8 | Python | false | false | 1,334 | py | 110 | Subscription.py | 59 | 0.663418 | 0.651424 | 0 | 41 | 31.536585 | 118 |
cocofile/mango-explorer | 6,966,436,996,631 | 399113eb584129a19a3c9ec430b22e6f02a75bb4 | 9f47af1548446fa09d947498f3416d6c88dfffdc | /mango/logmessages.py | d429aecad4a8b9d1015737e2605de6e0dd78202b | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | https://github.com/cocofile/mango-explorer | 281a831647b4d589510dff397b06869774899661 | 2560994b4e2014bdf0c899d833b8f45076619498 | refs/heads/main | 2023-08-27T18:49:47.507309 | 2021-11-14T01:39:11 | 2021-11-14T01:39:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import typing
from .idl import IdlParser, lazy_load_cached_idl_parser
def expand_log_messages(original_messages: typing.Sequence[str]) -> typing.Sequence[str]:
idl_parser: IdlParser = lazy_load_cached_idl_parser("mango_logs.json")
expanded_messages: typing.List[str] = []
parse_next_line: bool = False
for message in original_messages:
if parse_next_line:
encoded: str = message[len("Program log: "):]
name, parsed = idl_parser.decode_and_parse(encoded)
expanded_messages += ["Mango " + name + " " + str(parsed)]
parse_next_line = False
elif message == "Program log: mango-log":
parse_next_line = True
else:
expanded_messages += [message]
parse_next_line = False
return expanded_messages
| UTF-8 | Python | false | false | 1,620 | py | 60 | logmessages.py | 50 | 0.683591 | 0.682353 | 0 | 37 | 42.648649 | 104 |
SplashTheBatya/netologia_homeworks | 9,242,769,622,265 | c27bbf87b9c6e565f72086b6f23a6a31624fa5e3 | 24dc6ad7cbbb697f5ac3be26be90c6efa761c081 | /databases_2/m2m-relations/articles/admin.py | 47c4cca99bcc61cab668f2886eb13bf4474f3dd9 | [] | no_license | https://github.com/SplashTheBatya/netologia_homeworks | a1c2694f17bd31229d14c016d09d6ca4375c04e2 | 11bf6482285783a33685ae0053ae80ae5098f074 | refs/heads/master | 2023-03-26T10:57:33.193559 | 2021-03-19T22:06:46 | 2021-03-19T22:06:46 | 334,151,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from django.core.exceptions import ValidationError
from django.forms import BaseInlineFormSet
from .models import Article, Thematics, ArticleThematics
class ArticleThematicsInlineFormset(BaseInlineFormSet):
def clean(self):
counter = 0
for form in self.forms:
if form.cleaned_data.get('main_thematic', False):
counter += 1
if counter > 1:
raise ValidationError('Выберите только 1 основную тематику')
elif counter < 1:
raise ValidationError('Выберите хотя-бы 1 основную тематику')
return super().clean()
class ArticleThematicsInline(admin.TabularInline):
model = ArticleThematics
formset = ArticleThematicsInlineFormset
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
inlines = [ArticleThematicsInline]
@admin.register(Thematics)
class ThematicsAdmin(admin.ModelAdmin):
pass
| UTF-8 | Python | false | false | 1,005 | py | 23 | admin.py | 19 | 0.715344 | 0.708995 | 0 | 35 | 26 | 73 |
grillzwitu/alx-higher_level_programming | 2,576,980,423,377 | 2f1043ff921d72c2aa3d3d6073bba411cb5a9795 | c8a243e0ee7ac36d9a046e0ca3a8a2454deb1f91 | /0x01-python-if_else_loops_functions/1-last_digit.py | 89db842fc6b123a3111b88adc3617b9759c9dae0 | [] | no_license | https://github.com/grillzwitu/alx-higher_level_programming | 3dbcdc0205e09334f648ebbdb5a4000fd9666b3b | 30db6c6796ca8540cc73a2b6055c3eee42d529f9 | refs/heads/main | 2023-08-11T13:00:39.220136 | 2021-09-23T06:57:57 | 2021-09-23T06:57:57 | 361,830,326 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number < 0:
lastDigit = (int(repr(-number)[-1])) * -1
else:
lastDigit = int(repr(number)[-1])
if lastDigit > 5:
print("Last digit of {} is {} and is greater than 5"
.format(number, lastDigit))
elif lastDigit == 0:
print("Last digit of {} is {} and is 0"
.format(number, lastDigit))
elif lastDigit < 6 and not 0:
print("Last digit of {} is {} and is less than 6 and not 0"
.format(number, lastDigit))
| UTF-8 | Python | false | false | 527 | py | 48 | 1-last_digit.py | 45 | 0.611006 | 0.567362 | 0 | 18 | 28.277778 | 63 |
DesignInformaticsLab/adversarial_challenge | 13,889,924,272,470 | 4ddbb9fd6627c0bcd7bc3e656ea143b2fc79bf5c | 32613645e7e14eaa53ce0906d57c6ffd4a111ef5 | /imagenet_sample_code/centerloss.py | 72e37ae7101fcc38ec600488dc65423c534031ec | [] | no_license | https://github.com/DesignInformaticsLab/adversarial_challenge | a880be7d0f8a1bb8552d3ce7464cef6e9ab8d458 | 139baceaae81f1d61c523231390212792947a5c0 | refs/heads/master | 2020-03-28T00:51:47.532432 | 2018-10-10T10:56:05 | 2018-10-10T10:56:05 | 147,455,430 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | LAMBDA = 1e-1
CENTER_LOSS_ALPHA = 0.5
NUM_CLASSES = 200
batch_size = 128
import os
import numpy as np
import tensorflow as tf
import tflearn
slim = tf.contrib.slim
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
with tf.name_scope('input'):
input_images = tf.placeholder(tf.float32, shape=(batch_size,64,64,3), name='input_images')
labels = tf.placeholder(tf.int64, shape=(batch_size), name='labels')
global_step = tf.Variable(0, trainable=False, name='global_step')
def get_center_loss(features, labels, alpha, num_classes):
"""获取center loss及center的更新op
Arguments:
features: Tensor,表征样本特征,一般使用某个fc层的输出,shape应该为[batch_size, feature_length].
labels: Tensor,表征样本label,非one-hot编码,shape应为[batch_size].
alpha: 0-1之间的数字,控制样本类别中心的学习率,细节参考原文.
num_classes: 整数,表明总共有多少个类别,网络分类输出有多少个神经元这里就取多少.
Return:
loss: Tensor,可与softmax loss相加作为总的loss进行优化.
centers: Tensor,存储样本中心值的Tensor,仅查看样本中心存储的具体数值时有用.
centers_update_op: op,用于更新样本中心的op,在训练时需要同时运行该op,否则样本中心不会更新
"""
# 获取特征的维数,例如256维
len_features = features.get_shape()[1]
# 建立一个Variable,shape为[num_classes, len_features],用于存储整个网络的样本中心,
# 设置trainable=False是因为样本中心不是由梯度进行更新的
centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
# 将label展开为一维的,输入如果已经是一维的,则该动作其实无必要
labels = tf.reshape(labels, [-1])
# 根据样本label,获取mini-batch中每一个样本对应的中心值
centers_batch = tf.gather(centers, labels)
# 计算loss
loss = tf.nn.l2_loss(features - centers_batch)
# 当前mini-batch的特征值与它们对应的中心值之间的差
diff = centers_batch - features
# 获取mini-batch中同一类别样本出现的次数,了解原理请参考原文公式(4)
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
centers_update_op = tf.scatter_sub(centers, labels, diff)
return loss, centers, centers_update_op
def inference(input_images, num_class=200, reuse=False):
with slim.arg_scope([slim.conv2d], kernel_size=3, padding='SAME'):
with slim.arg_scope([slim.max_pool2d], kernel_size=2):
x = slim.conv2d(input_images, num_outputs=64, scope='conv1_1')
x = slim.conv2d(x, num_outputs=64, scope='conv1_2')
x = slim.max_pool2d(x, scope='pool1')
x = slim.conv2d(x, num_outputs=128, scope='conv2_1')
x = slim.conv2d(x, num_outputs=128, scope='conv2_2')
x = slim.max_pool2d(x, scope='pool2')
x = slim.conv2d(x, num_outputs=256, scope='conv3_1')
x = slim.conv2d(x, num_outputs=256, scope='conv3_2')
x = slim.max_pool2d(x, scope='pool3')
x = slim.conv2d(x, num_outputs=512, scope='conv4_1')
x = slim.conv2d(x, num_outputs=512, scope='conv4_2')
x = slim.max_pool2d(x, scope='pool3')
x = slim.flatten(x, scope='flatten')
feature3 = x = slim.fully_connected(x, num_outputs=512, activation_fn=None, scope='fc0')
feature2 = x = slim.fully_connected(x, num_outputs=32, activation_fn=None, scope='fc1')
feature1 = x =slim.fully_connected(x, num_outputs=2, activation_fn=None, scope='fc2')
x = tflearn.prelu(feature3)
x = slim.fully_connected(x, num_outputs=num_class, activation_fn=None, scope='fc3')
feature_list = [feature1, feature2]
return x, feature_list
def build_network(input_images, labels, ratio=0.5, reuse=False):
logits, feature_list = inference(input_images, num_class=NUM_CLASSES)
with tf.name_scope('loss'):
with tf.variable_scope('center_loss1'):
center_loss1, centers1, centers_update_op1 = get_center_loss(feature_list[0], labels, CENTER_LOSS_ALPHA, NUM_CLASSES)
with tf.variable_scope('center_loss2'):
center_loss2, centers2, centers_update_op2 = get_center_loss(feature_list[1], labels, CENTER_LOSS_ALPHA, NUM_CLASSES)
with tf.name_scope('softmax_loss'):
softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
with tf.name_scope('total_loss'):
total_loss = softmax_loss + ratio * center_loss1#(center_loss1*0.8 + center_loss2*0.2) * 4
with tf.name_scope('acc'):
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(logits, 1), labels), tf.float32))
with tf.name_scope('loss/'):
tf.summary.scalar('CenterLoss1', center_loss1)
tf.summary.scalar('CenterLoss2', center_loss2)
tf.summary.scalar('SoftmaxLoss', softmax_loss)
tf.summary.scalar('TotalLoss', total_loss)
centers_update_op_list = [centers_update_op1, centers_update_op2]
return logits, feature_list, total_loss, accuracy, centers_update_op_list
with tf.variable_scope("build_network", reuse=False):
logits, feature_list, total_loss, accuracy, centers_update_op_list = build_network(input_images, labels, ratio=LAMBDA)
features = feature_list[0]
centers_update_op1 = centers_update_op_list[0]
centers_update_op2 = centers_update_op_list[1]
train_images = np.load('/home/doi6/Documents/Guangyu/tiny-imagenet-200/train_data.npy',encoding=('latin1')).item()['image'] / 255.
train_labels = np.load('/home/doi6/Documents/Guangyu/tiny-imagenet-200/train_data.npy',encoding=('latin1')).item()['label']
if 0:
train_image_5 = train_images[train_labels==5]
train_image_7 = train_images[train_labels==7]
train_images = np.concatenate([train_image_5, train_image_7],0)
train_labels = np.asarray( [0]*len(train_image_5) + [1]*len(train_image_7) )
from random import shuffle
idx = list(range(len(train_images)))
shuffle(idx)
train_images = train_images[idx]
train_labels = train_labels[idx]
test_images = train_images[:200]
test_labels = train_labels[:200]
val_images = np.load('/home/doi6/Documents/Guangyu/tiny-imagenet-200/val_data.npy',encoding=('latin1')).item()['image'] / 255.
val_labels = np.load('/home/doi6/Documents/Guangyu/tiny-imagenet-200/val_data.npy',encoding=('latin1')).item()['label']
if 0:
val_image_5 = val_images[val_labels==5]
val_image_7 = val_images[val_labels==7]
val_images = np.concatenate([val_image_5, val_image_7],0)
val_labels = np.asarray( [0]*len(val_image_5) + [1]*len(val_image_7) )
idx_v = list(range(len(val_images)))
shuffle(idx_v)
val_images = val_images[idx_v]
val_labels = val_labels[idx_v]
val_images = val_images[:200]
val_labels = val_labels[:200]
optimizer = tf.train.AdamOptimizer(0.0001)
with tf.control_dependencies([centers_update_op1, centers_update_op2]):
train_op = optimizer.minimize(total_loss, global_step=global_step)
summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
step = sess.run(global_step)
for ep_i in range(50):
train_acc = []
train_loss = []
for jj in range(train_images.shape[0]//batch_size):
_, summary_str, train_acc_i, train_loss_i = sess.run(
[train_op, summary_op, accuracy, total_loss],
feed_dict={
input_images: train_images[jj*batch_size:(1+jj)*batch_size],
labels: train_labels[jj*batch_size:(1+jj)*batch_size]
})
train_acc += [train_acc_i]
train_loss += [train_loss_i]
testing_acc = sess.run(accuracy,feed_dict={input_images: val_images[0:128],labels: val_labels[0:128]})
print(("epoch: {}, train_acc:{:.4f}, train_loss:{:.4f},testing_acc:{:.4f}".
format(ep_i, np.mean(train_acc), np.mean(train_loss),testing_acc)))
saver.save(sess,'./model_save/center_loss.ckpt')
| UTF-8 | Python | false | false | 8,427 | py | 22 | centerloss.py | 4 | 0.659553 | 0.629649 | 0 | 194 | 39.319588 | 130 |
Laleee/poke-gan | 13,907,104,150,789 | 47067030c3a78561ad0556c206cfd9bdb26ad1af | bc18a8177b584c2c3b15520b0486d4ee3538a062 | /training.py | f83dc914f243431baf9956b329f4b58d878d24e1 | [
"MIT"
] | permissive | https://github.com/Laleee/poke-gan | a0b2ad3de8e98c48b3627601672fbb25687b1293 | d93506d249ac37fc971ca6504053a9d3461ebf84 | refs/heads/main | 2023-07-24T11:23:03.147461 | 2021-08-31T14:14:44 | 2021-08-31T15:06:48 | 392,968,909 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import wandb
from Trainer import Trainer
MAX_SUMMARY_IMAGES = 4
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
assert torch.cuda.is_available()
# LR = 2e-4
EPOCHS = 100
# BATCH_SIZE = 64
NUM_WORKERS = 4
# LAMBDA_L1 = 100
sweep_config = {
'method': 'bayes', # grid, random
'metric': {
'name': 'loss_g',
'goal': 'minimize'
},
'parameters': {
'lambda_l1': {
'values': [80, 90, 100, 110, 120, 130]
},
'batch_size': {
'values': [64]
},
'learning_rate': {
'values': [1e-5, 1e-4, 2e-4, 3e-4]
}
}
}
if __name__ == '__main__':
def train_wrapper():
wandb.init()
config = wandb.config
print(f'Config: {config}')
trainer = Trainer(
lr=config.learning_rate,
device=DEVICE,
batch_size=config.batch_size,
epochs=EPOCHS,
lambda_l1=config.learning_rate,
dataloader_num_workers=NUM_WORKERS,
max_summary_images=MAX_SUMMARY_IMAGES
)
trainer.train()
sweep_id = wandb.sweep(sweep_config, project="poke-gan")
wandb.agent(sweep_id, train_wrapper)
| UTF-8 | Python | false | false | 1,232 | py | 9 | training.py | 6 | 0.530844 | 0.497565 | 0 | 54 | 21.814815 | 69 |
arXiv-research/DevLab-III-1 | 10,436,770,535,820 | d0da01e52b4047ee3f2debee6a8a8d4f7fc3074d | 35f9def6e6d327d3a4a4f2959024eab96f199f09 | /developer/lab/tools/NVIDIA/FasterTransformer/sample/tensorflow/tensorflow_bert/ckpt_quantization.py | 56a42063cf6c906980119a8c016c48d34fb17f9b | [
"Apache-2.0",
"CAL-1.0-Combined-Work-Exception",
"CAL-1.0",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-free-unknown"
] | permissive | https://github.com/arXiv-research/DevLab-III-1 | ec10aef27e1ca75f206fea11014da8784752e454 | c50cd2b9154c83c3db5e4a11b9e8874f7fb8afa2 | refs/heads/main | 2023-04-16T19:24:58.758519 | 2021-04-28T20:21:23 | 2021-04-28T20:21:23 | 362,599,929 | 2 | 0 | MIT | true | 2021-04-28T20:36:11 | 2021-04-28T20:36:11 | 2021-04-28T20:35:45 | 2021-04-28T20:23:46 | 560,071 | 0 | 0 | 0 | null | false | false | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
#python ckpt_quantization.py --init_checkpoint=squad_model/QAT_noresidualQuant/model.ckpt-5474 --quantized_checkpoint=squad_model/QAT_noresidualQuant_quantized/model.ckpt
import tensorflow as tf
import numpy as np
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.ops import io_ops
from tensorflow.python.training.saver import BaseSaverBuilder
import os
import re
build_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../lib')
transformer_op_module = tf.load_op_library(
os.path.join(build_path, 'libtf_weight_quantize.so'))
ACTIVATION_AMAX_NUM = 80
INT8O_GEMM_NUM = 8
TRT_FUSED_MHA_AMAX_NUM = 3
def checkpoint_quantization(in_checkpoint_file, out_checkpoint_file, per_channel_quantization):
var_list = checkpoint_utils.list_variables(tf.flags.FLAGS.init_checkpoint)
def init_graph():
restore_vars = []
layer_num = 0
regex = re.compile('layer_\d+')
amaxTotalNum = 0
for name, shape in var_list:
var = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
if "intermediate/dense/kernel" in name and amaxTotalNum == 0:
amaxTotalNum = ACTIVATION_AMAX_NUM + 9*shape[0] + INT8O_GEMM_NUM + TRT_FUSED_MHA_AMAX_NUM
print(amaxTotalNum, shape[0])
recon_dtype = var.dtype
restore_vars.append(tf.get_variable(name, shape=shape, dtype=var.dtype))
tmp = regex.findall(name)
if len(tmp) < 1:
continue
num_tmp = int(tmp[0].replace("layer_", ""))
if layer_num < num_tmp:
layer_num = num_tmp
layer_num = layer_num + 1
#add new var for amax
for i in range(layer_num):
tf.get_variable("bert/encoder/layer_{}/amaxList".format(i), shape=[amaxTotalNum], dtype=tf.float32)
return layer_num, amaxTotalNum, restore_vars
layer_num, amaxTotalNum, restore_vars = init_graph()
restorer = tf.train.Saver(restore_vars)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
restorer.restore(sess, in_checkpoint_file)
kernel_name_list = ["attention/self/query", "attention/self/key", "attention/self/value", "attention/output/dense", "intermediate/dense", "output/dense"]
#input_scale, 0
amax_name_list = ["attention/self/query/input_quantizer",
#Q_aftergemm_scale, 1
"attention/self/query/aftergemm_quantizer",
#Qbias_scale, 2
"attention/self/matmul_q_input_quantizer",
#K_aftergemm_scale, 3
"attention/self/key/aftergemm_quantizer",
#Kbias_scale, 4
"attention/self/matmul_k_input_quantizer",
#V_aftergemm_scale, 5
"attention/self/value/aftergemm_quantizer",
#Vbias_scale, 6
"attention/self/matmul_v_input_quantizer",
#bmm1_scale, 7
"attention/self/softmax_input_quantizer",
#Softmax_scale, 8
"attention/self/matmul_a_input_quantizer",
#bmm2_scale, 9
"attention/output/dense/input_quantizer",
#Proj_aftergemm_scale, 10
"attention/output/dense/aftergemm_quantizer",
#ProjBiasNorm_scale, 11
"intermediate/dense/input_quantizer",
#FC1_aftergemm_scale, 12
"intermediate/dense/aftergemm_quantizer",
#F1Bias_scale, 13
"output/dense/input_quantizer",
#FC2_aftergemm_scale, 14
"output/dense/aftergemm_quantizer",
#F2Bias_scale, 15
"special_F2Bias_scale",
]
int8O_gemm_weight_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
#Q_aftergemm
int8O_gemm_weight_list = ["attention/self/query",
#K_aftergemm
"attention/self/key",
#V_aftergemm
"attention/self/value",
#bmm1_aftergemm
"attention/self/matmul_k_input_quantizer",
#bmm2_aftergemm
"attention/self/matmul_v_input_quantizer",
#Proj_aftergemm
"attention/output/dense",
#FC1_aftergemm
"intermediate/dense",
#FC2_aftergemm
"output/dense"]
int8O_gemm_input_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
#Q_aftergemm
int8O_gemm_input_list = ["attention/self/query/input_quantizer",
#K_aftergemm
"attention/self/key/input_quantizer",
#V_aftergemm
"attention/self/value/input_quantizer",
#bmm1_aftergemm
"attention/self/matmul_q_input_quantizer",
#bmm2_aftergemm
"attention/self/matmul_a_input_quantizer",
#Proj_aftergemm
"attention/output/dense/input_quantizer",
#FC1_aftergemm
"intermediate/dense/input_quantizer",
#FC2_aftergemm
"output/dense/input_quantizer"]
int8O_gemm_output_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
#Q_aftergemm
int8O_gemm_output_list = ["attention/self/query/aftergemm_quantizer",
#K_aftergemm
"attention/self/key/aftergemm_quantizer",
#V_aftergemm
"attention/self/value/aftergemm_quantizer",
#bmm1_aftergemm
"attention/self/softmax_input_quantizer",
#bmm2_aftergemm
"attention/output/dense/input_quantizer",
#Proj_aftergemm
"attention/output/dense/aftergemm_quantizer",
#FC1_aftergemm
"intermediate/dense/aftergemm_quantizer",
#FC2_aftergemm
"output/dense/aftergemm_quantizer"]
factor = 1000000.0
for i in range(layer_num):
amaxList = np.zeros([amaxTotalNum])
amax_id = 0
for amax_name in amax_name_list:
if amax_name == "special_F2Bias_scale":
if i != layer_num - 1:
name = "bert/encoder/layer_{}/{}/quant_max:0".format(i+1, amax_name_list[0])
quant_max = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
name = "bert/encoder/layer_{}/{}/quant_min:0".format(i+1, amax_name_list[0])
quant_min = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
if abs(quant_max) > abs(quant_min):
amax = abs(quant_max)#int(abs(quant_max)*factor)/factor
else:
amax = abs(quant_min)#int(abs(quant_min)*factor)/factor
else:
#not used, placeholder
amax = 1.0
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
continue
name = "bert/encoder/layer_{}/{}/quant_max:0".format(i, amax_name)
quant_max = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
name = "bert/encoder/layer_{}/{}/quant_min:0".format(i, amax_name)
quant_min = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
if abs(quant_max) > abs(quant_min):
amax = abs(quant_max)#int(abs(quant_max)*factor)/factor
else:
amax = abs(quant_min)#int(abs(quant_min)*factor)/factor
if amax_name in int8O_gemm_input_list:
int8O_gemm_input_amax_list[int8O_gemm_input_list.index(amax_name)] = amax
if amax_name == "attention/self/query/input_quantizer":
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attention/self/key/input_quantizer")] = amax
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attention/self/value/input_quantizer")] = amax
if amax_name in int8O_gemm_output_list:
int8O_gemm_output_amax_list[int8O_gemm_output_list.index(amax_name)] = amax
if amax_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(amax_name)] = amax
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
print("done process layer_{} activation amax".format(i))
#kernel amax starts from ACTIVATION_AMAX_NUM
amax_id = ACTIVATION_AMAX_NUM
for kernel_id, kernel_name in enumerate(kernel_name_list):
kernel = tf.get_default_graph().get_tensor_by_name("bert/encoder/layer_{}/{}/kernel:0".format(i, kernel_name))
name = "bert/encoder/layer_{}/{}/kernel_quantizer/quant_max:0".format(i, kernel_name)
quant_max2 = tf.convert_to_tensor(checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name))
name = "bert/encoder/layer_{}/{}/kernel_quantizer/quant_min:0".format(i, kernel_name)
quant_min2 = tf.convert_to_tensor(checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name))
kernel_processed, quant_max_processed = transformer_op_module.weight_quantize(kernel, quant_max2, quant_min2, per_channel_quantization = per_channel_quantization)
kernel_processed_, quant_max_processed_ = sess.run([kernel_processed, quant_max_processed])
sess.run(tf.assign(kernel, kernel_processed_))
if kernel_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(kernel_name)] = quant_max_processed_[0]
for e in quant_max_processed_:
amaxList[amax_id] = e
amax_id += 1
#for int8O gemm deQuant
for j in range(INT8O_GEMM_NUM):
amaxList[amax_id] = (int8O_gemm_input_amax_list[j]*int8O_gemm_weight_amax_list[j])/(127.0*int8O_gemm_output_amax_list[j])
amax_id += 1
#for trt fused MHA amax
#### QKV_addBias_amax
amaxList[amax_id] = np.maximum(np.maximum(amaxList[8],amaxList[16]), amaxList[24])
amax_id += 1
#### softmax amax
amaxList[amax_id] = amaxList[32]
amax_id += 1
#### bmm2 amax
amaxList[amax_id] = amaxList[36]
amax_id += 1
amaxL = tf.get_default_graph().get_tensor_by_name("bert/encoder/layer_{}/amaxList:0".format(i))
sess.run(tf.assign(amaxL, amaxList))
print("done process layer_{} kernel weight".format(i))
saver.save(sess, out_checkpoint_file)
if __name__ == '__main__':
tf.flags.DEFINE_string("quantized_checkpoint", None, "quantized checkpoint file")
tf.flags.DEFINE_string("init_checkpoint", None, "initial checkpoint file")
tf.flags.DEFINE_integer("int8_mode", 1, "int8 mode in FasterTransformer, default as 1")
if tf.flags.FLAGS.int8_mode == 1:
per_channel_quantization = True
elif tf.flags.FLAGS.int8_mode == 2 or tf.flags.FLAGS.int8_mode == 3:
per_channel_quantization = False
else:
raise ValueError("wrong int8_mode argument")
quantized_checkpoint_folder = "/".join(tf.flags.FLAGS.quantized_checkpoint.split("/")[:-1])
if not os.path.exists(quantized_checkpoint_folder):
os.system("mkdir -p " + quantized_checkpoint_folder)
checkpoint_quantization(tf.flags.FLAGS.init_checkpoint, tf.flags.FLAGS.quantized_checkpoint, per_channel_quantization)
| UTF-8 | Python | false | false | 14,605 | py | 1,491 | ckpt_quantization.py | 957 | 0.516535 | 0.502568 | 0 | 276 | 51.916667 | 178 |
dengjunchao/ks_orders | 9,577,777,085,888 | 34fd1ba3ec1befd5cc6b19fcdae2e3ef2a957a0d | 7b23b30f67455249681da56d9bee45cb386a2585 | /libs/query_orders.py | 1a1698f52a866d33fc230fbf7761bd5db88f6864 | [] | no_license | https://github.com/dengjunchao/ks_orders | 829454ac4676498bd025c6ccda3314eb721be970 | f5e07d119f56bc2c801676b2505d4377a2bfdda9 | refs/heads/master | 2022-12-09T17:39:59.059285 | 2020-03-04T17:55:47 | 2020-03-04T17:55:47 | 244,966,718 | 2 | 0 | null | false | 2022-12-08T06:19:08 | 2020-03-04T17:49:53 | 2020-03-04T18:01:36 | 2022-12-08T06:19:08 | 114 | 1 | 0 | 7 | Python | false | false | """
订单退订模块
"""
from flask import request
from config import Status
from flask_restful import Resource
from ky_omm.common.query_order import QueryOrder
class QueryOrders(Resource):
def post(self):
"""
查询订单号
业务逻辑:判断是否符合视频id和订单id长度
条件成立:执行QueryOrder.query_order()
获取查询结果,然后进行判断,如果count =1 就是订单id号,大于1就是视频id号
:return: 状态码和信息
"""
if request.json:
orders_id = request.json.get("orders_id", None).strip()
if orders_id is None:
return {'status': Status.FAILED, 'data': "video_type or order_id none"}
query_result = QueryOrder(orders_id=orders_id).query_order()
if query_result.get('count', None) is None:
return {'status': Status.FAILED, 'data': "订单号不存在"}
return {'status': Status.SUCCEED, 'data': query_result.get('data')}
else:
return {'status': Status.FAILED, 'msg': 'no data'}
| UTF-8 | Python | false | false | 1,119 | py | 25 | query_orders.py | 19 | 0.59171 | 0.589637 | 0 | 34 | 27.382353 | 87 |
ncfeo/mystie | 7,816,840,494,725 | 635f1354bfcc8b52df9aae134430c4b7b43d97e0 | aa25e9abeb99d499a57024a3e604187b6d89faf7 | /blog/migrations/0003_blogcomment.py | 211e2bc71fb5a9a4fc94c51f1bb19a68e84fb3b6 | [] | no_license | https://github.com/ncfeo/mystie | 4daa2a483808ce0a45f65280d28e7b3056a41ceb | 1f7d295a8a97fae65a680a3a5db236a35a15cb33 | refs/heads/master | 2016-09-24T00:03:27.499958 | 2016-09-13T16:29:46 | 2016-09-13T16:29:46 | 68,126,256 | 0 | 0 | null | false | 2016-09-21T05:26:33 | 2016-09-13T16:28:17 | 2016-09-13T16:34:00 | 2016-09-21T05:26:33 | 455 | 0 | 0 | 1 | JavaScript | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-31 14:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20160830_0732'),
]
operations = [
migrations.CreateModel(
name='BlogComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=100, verbose_name=b'\xe8\xaf\x84\xe8\xae\xba\xe8\x80\x85\xe5\x90\x8d\xe5\xad\x97')),
('user_email', models.EmailField(max_length=255, verbose_name=b'\xe8\xaf\x84\xe8\xae\xba\xe8\x80\x85\xe9\x82\xae\xe7\xae\xb1')),
('body', models.TextField(verbose_name=b'\xe8\xaf\x84\xe8\xae\xba\xe5\x86\x85\xe5\xae\xb9')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name=b'\xe8\xaf\x84\xe8\xae\xba\xe5\x8f\x91\xe8\xa1\xa8\xe6\x97\xb6\xe9\x97\xb4')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article', verbose_name=b'\xe8\xaf\x84\xe8\xae\xba\xe6\x89\x80\xe5\xb1\x9e\xe6\x96\x87\xe7\xab\xa0')),
],
),
]
| UTF-8 | Python | false | false | 1,318 | py | 7 | 0003_blogcomment.py | 5 | 0.632018 | 0.543247 | 0.059181 | 27 | 47.814815 | 201 |
dzintars2/mongoDB_sample | 6,760,278,547,128 | 5205ce115cd8f74b5c5ffbabb222ffc6e59df6e8 | 954123b7360d9671f843e854d41aef19288b75a7 | /MD3.py | dca7167c448fbbc6e6feb4dd79d62224368486f3 | [] | no_license | https://github.com/dzintars2/mongoDB_sample | 122044e9662235c25a4a076c6732734db080afff | df39cf6905e202e591b9e219b57d5c4c07b471d6 | refs/heads/master | 2020-04-16T08:19:58.853081 | 2019-01-12T18:21:15 | 2019-01-12T18:21:15 | 165,420,921 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #MongoDB
import os, sys, json, pymongo, random
from json import dumps
from flask import Flask, g, Response, request, render_template
from pathlib import Path
from dateutil import parser
import sampleData
from faker import Faker #nejaušu datu ģerenators
app = Flask(__name__)
app.debug = True
#DB MongoDB Atlas
client = pymongo.MongoClient("localhost", 27017)
db = client.test
mydb = client["lietvediba"]
tableDokVeidi = mydb["dokumentu_veidi"]
tableDarbinieki = mydb["darbinieki"]
tableUznemumi = mydb["uznemumi"]
tableDokumenti = mydb["dokumenti"]
fake = Faker('lv_LV') #neīstu datu ģenerators
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'neo4j_db'):
g.neo4j_db.close()
@app.route("/")
def get_index():
text = '<img src="https://webassets.mongodb.com/_com_assets/cms/mongodb-logo-rgb-j6w271g1xn.jpg" width="100%">'
return render_template('index.html', saturs=text)
#klasifikatoru ierakstu ģenerēšana
@app.route("/generateData")
def get_generateData():
text = '<br>'
tableDarbinieki.insert_many(sampleData.datiPersonas)
tableDokVeidi.insert_many(sampleData.datiDokumentuVeidi)
tableUznemumi.insert_many(sampleData.datiUznemumi)
get_generateDocuments()
text = '<br><table class="table table-hover table-sm"><thead><tr><th>Izveidota kolekcija</th><th>Ierakstu skaits</th></tr></thead><tbody>'
#izdrukā kolekcijas un tajās esošo ierakstu skaitu
for collection in mydb.list_collection_names():
text += '<tr><td>' + collection + '</td><td>'+str(mydb[collection].count()) + '</td></tr>'
text += '</tbody></table>'
return render_template('index.html', saturs='<b>Klasifikatoru datu ģenerēšana</b>'+text)
#dokumentu datu ģenerēšana
@app.route("/generateDocuments")
def get_generateDocuments():
text = '<br>'
mongoData = []
for i in range(500):
sys.stdout.write("\rUzģenerēti %d ieraksti no 500" % i)
sys.stdout.flush()
isParent = 1
while (isParent==1):
#mūs interesē tikai dokumentu veidi, kuriem ir "parent_id" jeb kuri nav dokumentu veidu grupas
dokumentaTips = tableDokVeidi.aggregate([{ "$sample": {"size": 1} }]).next()
if ('parent_id' in dokumentaTips): isParent = 0
nejaussDarbinieks = tableDarbinieki.aggregate([{ "$sample": {"size": 1} }]).next()
datums = fake.date_between(start_date='-1y', end_date='now')
datums = parser.parse(str(datums))
if (dokumentaTips["_id"]=="rikojums_darb"):
rikojumaNr = "2018-" + dokumentaTips["case_id"] + "/"+str(random.randint(1, 999))
apraksts = fake.text()
data = {"dokumentaTips": dokumentaTips["_id"], "persona": nejaussDarbinieks["_id"],
"numurs": rikojumaNr, "temats": "Rīkojums par darbu", "datums":datums, "apraksts": apraksts}
elif (dokumentaTips["_id"]=="rikojums_visp"):
rikojumaNr = "2018-" + dokumentaTips["case_id"] + "/"+str(random.randint(1, 999))
data = {"dokumentaTips": dokumentaTips["_id"], "persona": nejaussDarbinieks["_id"],
"numurs": rikojumaNr, "temats": "Rīkojums", "datums": datums}
elif (dokumentaTips["_id"]=="ligums_darba"):
epastaAdrese = fake.email()
amats = fake.job()
bankasKonts = fake.iban()
data = {"dokumentaTips": dokumentaTips["_id"], "ligumsledzejs": nejaussDarbinieks["_id"],
"epasts": epastaAdrese, "temats": "Darba līgums", "datums": datums,
"amats": amats, "bankas_konts":bankasKonts}
elif (dokumentaTips["_id"]=="ligums_kredits" or dokumentaTips["_id"]=="ligums_saimn"):
ligumsledzejs = tableUznemumi.aggregate([{ "$sample": {"size": 1} }]).next()
if (dokumentaTips["_id"]=="ligums_kredits"):
summa = (random.randint(1, 1000))*1000
temats = "Aizdevuma līgums"
else:
summa = (random.randint(1, 9999999))/100
temats = "Saimnieciskais līgums"
apraksts = fake.text()
apmaksasTermins = fake.date_between(start_date='+1y', end_date='+20y')
apmaksasTermins = parser.parse(str(apmaksasTermins))
ligumsledzejaPersona = fake.name()
bankasKonts = fake.iban()
pastaAdrese = fake.address()
epastaAdrese = fake.email()
amats = fake.job()
data = {"dokumentaTips": dokumentaTips["_id"], "ligumsledzejs": ligumsledzejs["_id"], "pasta_adrese":pastaAdrese,
"epasts": epastaAdrese, "darbinieks": nejaussDarbinieks["_id"],
"summa": summa, "temats": temats, "datums": datums, "termins": apmaksasTermins, "apraksts": apraksts,
"ligumsledzejaPersona": ligumsledzejaPersona, "amats": amats, "bankas_konts":bankasKonts}
mongoData.append(data)
tableDokumenti.insert_many(mongoData)
return render_template('index.html', saturs='<b>Dokumentu datu ģenerēšana</b>'+text)
@app.route("/deleteData")
def get_deleteData():
tableDarbinieki.drop()
tableDokVeidi.drop()
tableUznemumi.drop()
tableDokumenti.drop()
return render_template('index.html', saturs='<b>Datu dzēšana</b><br>Kolekcijas izdzēstas')
#informācija par ierakstu skaitu kolekcijās
@app.route("/statistics")
def get_statistics():
text = '<br><table class="table table-hover table-sm"><thead><tr><th>Kolekcija</th><th>Ierakstu skaits</th></tr></thead><tbody>'
for collection in mydb.list_collection_names():
text += '<tr><td>' + collection + '</td><td>'+str(mydb[collection].count()) + '</td></tr>'
text += '</tbody></table><a class="nav-link" href="/generateDocuments">Papildus dokumentu ģenerēšana</a>'
return render_template('index.html', saturs='<b>Statistika</b>'+text)
@app.route("/report1")
def get_report1():
apraksts = """<b>1.atskaite</b><br>
Kopsavilkums pa dokumentiem<br>
(grupēts pa dokumementiem - summa (ja tāda ir noteikta), skaits (dati sakārtoti pēc dokumentu skaita)"""
result = tableDokumenti.aggregate([
{"$lookup": {
"from": "dokumentu_veidi",
"localField": "dokumentaTips",
"foreignField": "_id",
"as": "dokumenta_veidi_dati"
}
},
{ "$group": {
"_id": "$dokumentaTips",
"skaits": { "$sum": 1 },
"summa": {"$sum": "$summa"},
"dokumentaTipaNosaukums": {"$min":"$dokumenta_veidi_dati.name"}}
},
{ "$sort":{
"skaits": -1}
}])
table = '<br><table class="table table-hover table-sm"><thead><tr><th>Dokumenta tips</th><th>Dokumentu skaits</th><th>Dokumentos norādīta kopsumma (ja norādīts)</th></tr></thead><tbody>'
for ieraksts in result:
table += '<tr><td>'+ieraksts["dokumentaTipaNosaukums"][0]+'</td><td>'+str(ieraksts["skaits"])+'</td><td>'+str('{:5.2f}'.format(ieraksts["summa"]))+'</td></tr>'
table += '</tbody></table>'
return render_template('index.html', saturs=apraksts+table)
@app.route("/report2")
def get_report2():
apraksts = """<b>2.atskaite</b><br>
Aizdevuma līgumu kopsavilkums<br>
Atmaksas termiņa gads, atmaksājamā summa. Iekļauti aizdevumi ar atmaksas termiņu līdz 31.12.2029"""
table = '<br><table class="table table-hover table-sm"><thead><tr><th>Termiņs (gads)</th><th>Atmaksājamā summa</th></tr></thead><tbody>'
datums = parser.parse(str("2030-01-01"))
result = tableDokumenti.aggregate([
{"$match": {"dokumentaTips":"ligums_kredits", "termins": { "$lte" : datums}}},
{ "$group": {
"_id": { "gads": { "$year": "$termins" } },
"summa": {"$sum": "$summa"}}},
{"$sort":{"_id": 1}}
])
for ieraksts in result:
table += '<tr><td>'+str(ieraksts["_id"]["gads"])+'.gads</td><td>'+str(ieraksts["summa"])+'</td></tr>'
table += '</tbody></table>'
return render_template('index.html', saturs=apraksts+table)
@app.route("/report3")
def get_report3():
apraksts = """<b>3.atskaite</b><br>
Kopsavilkums pa rīkojumiem (darbinieks, rīkojumu skaits, TOP10 darbinieki pēc rīkojumu skaita)"""
table = '<br><table class="table table-hover table-sm"><thead><tr><th>#</th><th>Darbinieks</th><th>Rīkojumu skaits</th></tr></thead><tbody>'
result = tableDokumenti.aggregate([
{"$lookup": {
"from": "darbinieki",
"localField": "persona",
"foreignField": "_id",
"as": "darbinieks"
}
},
{"$lookup": {
"from": "dokumentu_veidi",
"localField": "dokumentaTips",
"foreignField": "_id",
"as": "dokumenta_veidi_dati"
}
},
{"$addFields": {
"dok_veida_grupa": "$dokumenta_veidi_dati.parent_id",
"darbinieks_vards": "$darbinieks.name"
}},
{"$match": {"dok_veida_grupa": "rikojums"}},
{"$group":{
"_id": "$persona",
"vards_uzvards": {"$min":"$darbinieks_vards"},
"dokumentu_skaits": {"$sum": 1}
}
},
{"$sort":{"dokumentu_skaits": -1}},
{"$limit": 10}
])
i = 1
for ieraksts in result:
table += '<tr><td>'+str(i)+'</td><td>' + ieraksts["vards_uzvards"][0] + '</td><td>' + str(ieraksts["dokumentu_skaits"]) + '</td></tr>'
i += 1
table += '</tbody></table>'
return render_template('index.html', saturs=apraksts+table)
@app.route("/report4")
def get_report4():
apraksts = """<b>4.atskaite</b><br>
Līgumi pa līgumslēdzējiem<br>
Darījuma partnera nosaukums, līguma veids, līgumu slēgšanas periods (līguma datums min-max),
līgumu skaits, TOP10 partneri pēc līgumu skaita
"""
table = '<br><table class="table table-hover table-sm"><thead><tr><th>#</th><th>Partneris</th><th>Līguma veids</th><th>Līgumu periods</th><th>Līgumu skaits</th><th>Līgumu kopsumma</th></tr></thead><tbody>'
result = tableDokumenti.aggregate([
{"$lookup": {
"from": "uznemumi",
"localField": "ligumsledzejs",
"foreignField": "_id",
"as": "uznemums"
}
},
{"$lookup": {
"from": "dokumentu_veidi",
"localField": "dokumentaTips",
"foreignField": "_id",
"as": "dokumenta_veidi_dati"
}
},
{"$addFields": {
"dok_veida_grupa": "$dokumenta_veidi_dati.parent_id",
"dok_veida_nosaukums": "$dokumenta_veidi_dati.name",
"partneris_nos": "$uznemums.name_in_quotes",
"partnera_tips": "$uznemums.type"
}},
{"$match":
{"dokumentaTips": {"$in": ["ligums_kredits","ligums_saimn"]}}
},
{"$group":{
"_id": {"partneraId":"$ligumsledzejs",
"dok_veids": "$dokumentaTips"},
"dok_veida_nosaukums": {"$min": "$dok_veida_nosaukums"},
"partneris": {"$min":"$partneris_nos"},
"partn_tips": {"$min":"$partnera_tips"},
"ligumsumma": {"$sum":"$summa"},
"datums_no": {"$min":"$datums"},
"datums_lidz": {"$max": "$datums"},
"dokumentu_skaits": {"$sum": 1}
}
},
{"$sort":{"dokumentu_skaits": -1}},
{"$limit": 10}
])
i = 1
for ieraksts in result:
table += ('<tr><td>'+str(i)+'</td><td>'+ieraksts["partneris"][0]+' '+ieraksts["partn_tips"][0]+'</td><td>'+
ieraksts["dok_veida_nosaukums"][0] + '</td><td>'+ str(ieraksts["datums_no"])[0:10] + ' - ' + str(ieraksts["datums_lidz"])[0:10] +
'</td><td>'+str(ieraksts["dokumentu_skaits"]) + '</td><td>'+str('{:5.2f}'.format(ieraksts["ligumsumma"])) + '</td></tr>')
i += 1
table += '</tbody></table>'
return render_template('index.html', saturs=apraksts+table)
@app.route("/report5")
def get_report5():
apraksts = """<b>5.atskaite</b><br>
Lielākie līgumi pa darbiniekiem, kuri tos ir slēguši.
Sakārtots pēc lielākās līgumsummas. TOP10 darbinieki.
"""
table = '<br><table class="table table-hover table-sm"><thead><tr><th>#</th><th>Darbinieks</th><th>Līgumu skaits</th><th>Līgumsummu kopsumma</th><th>Maksimālā līgumsumma</th></tr></thead><tbody>'
result = tableDokumenti.aggregate([
{"$lookup": {
"from": "darbinieki",
"localField": "darbinieks",
"foreignField": "_id",
"as": "darbinieka_dati"
}
},
{"$lookup": {
"from": "dokumentu_veidi",
"localField": "dokumentaTips",
"foreignField": "_id",
"as": "dokumenta_veidi_dati"
}
},
{"$addFields": {
"dok_veida_grupa": "$dokumenta_veidi_dati.parent_id",
"darbinieka_vards": "$darbinieka_dati.name"
}},
{"$match":
{"dokumentaTips": {"$in": ["ligums_kredits","ligums_saimn"]}}
},
{"$group":{
"_id": {"partneraId":"$darbinieks"},
"darbinieks": {"$min":"$darbinieka_vards"},
"ligumu_kopsumma": {"$sum":"$summa"},
"max_ligums": {"$max":"$summa"},
"dokumentu_skaits": {"$sum": 1}
}
},
{"$sort":{"max_ligums": -1}},
{"$limit": 10}
])
i = 1
for ieraksts in result:
table += ('<tr><td>'+str(i)+'</td><td>'+ieraksts["darbinieks"][0] + '</td><td>' +
str(ieraksts["dokumentu_skaits"]) + '</td><td>' + str('{:5.2f}'.format(ieraksts["ligumu_kopsumma"])) +
'</td><td>' + str('{:5.2f}'.format(ieraksts["max_ligums"])) + '</td></tr>')
i += 1
table += '</tbody></table>'
return render_template('index.html', saturs=apraksts+table)
if __name__ == '__main__':
app.run(port=8080)
| UTF-8 | Python | false | false | 12,747 | py | 4 | MD3.py | 2 | 0.619262 | 0.60749 | 0 | 322 | 38.304348 | 206 |
Bojanovski/ChessANN | 3,006,477,112,209 | f8a221702b659cec01321f4c6f681ab6bbc33f82 | 72bf677ee746546f08cc9b65430443dd0d67d2e5 | /Code/dependencies/chess/polyglot.py | 9cb06fde1184fd7925208c495b737634beae6665 | [
"MIT"
] | permissive | https://github.com/Bojanovski/ChessANN | e9cb63800958d37ebeca7c1868d810f96a1754cd | 0181c80c36031f95925f2649c91343664ee00c67 | refs/heads/master | 2020-06-13T22:07:47.918890 | 2017-07-22T16:05:31 | 2017-07-22T16:05:31 | 75,547,719 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2012-2016 Niklas Fiekas <niklas.fiekas@backscattering.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import chess
import struct
import os
import mmap
import random
try:
import backport_collections as collections
except ImportError:
import collections
ENTRY_STRUCT = struct.Struct(">QHHI")
class Entry(collections.namedtuple("Entry", ["key", "raw_move", "weight", "learn"])):
"""An entry from a polyglot opening book."""
def move(self, chess960=False):
"""Gets the move (as a :class:`~chess.Move` object)."""
# Extract source and target square.
to_square = self.raw_move & 0x3f
from_square = (self.raw_move >> 6) & 0x3f
# Extract the promotion type.
promotion_part = (self.raw_move >> 12) & 0x7
promotion = promotion_part + 1 if promotion_part else None
# Convert castling moves.
if not chess960 and not promotion:
if from_square == chess.E1:
if to_square == chess.H1:
return chess.Move(chess.E1, chess.G1)
elif to_square == chess.A1:
return chess.Move(chess.E1, chess.C1)
elif from_square == chess.E8:
if to_square == chess.H8:
return chess.Move(chess.E8, chess.G8)
elif to_square == chess.A8:
return chess.Move(chess.E8, chess.C8)
return chess.Move(from_square, to_square, promotion)
class MemoryMappedReader(object):
"""Maps a polyglot opening book to memory."""
def __init__(self, filename):
self.fd = os.open(filename, os.O_RDONLY | os.O_BINARY if hasattr(os, "O_BINARY") else os.O_RDONLY)
try:
self.mmap = mmap.mmap(self.fd, 0, access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
# Can not memory map empty opening books.
self.mmap = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return self.close()
def close(self):
"""Closes the reader."""
if self.mmap is not None:
self.mmap.close()
try:
os.close(self.fd)
except OSError:
pass
def __len__(self):
if self.mmap is None:
return 0
else:
return self.mmap.size() // ENTRY_STRUCT.size
def __getitem__(self, key):
if self.mmap is None:
raise IndexError()
if key < 0:
key = len(self) + key
try:
key, raw_move, weight, learn = ENTRY_STRUCT.unpack_from(self.mmap, key * ENTRY_STRUCT.size)
except struct.error:
raise IndexError()
return Entry(key, raw_move, weight, learn)
def __iter__(self):
i = 0
size = len(self)
while i < size:
yield self[i]
i += 1
def bisect_key_left(self, key):
lo = 0
hi = len(self)
while lo < hi:
mid = (lo + hi) // 2
mid_key, _, _, _ = ENTRY_STRUCT.unpack_from(self.mmap, mid * ENTRY_STRUCT.size)
if mid_key < key:
lo = mid + 1
else:
hi = mid
return lo
def __contains__(self, entry):
return any(current == entry for current in self.find_all(entry.key, entry.weight))
def find_all(self, board, minimum_weight=1, exclude_moves=()):
"""Seeks a specific position and yields corresponding entries."""
try:
zobrist_hash = board.zobrist_hash()
except AttributeError:
zobrist_hash = int(board)
board = None
i = self.bisect_key_left(zobrist_hash)
size = len(self)
while i < size:
entry = self[i]
i += 1
if entry.key != zobrist_hash:
break
if entry.weight < minimum_weight:
continue
if board:
move = entry.move(chess960=board.chess960)
elif exclude_moves:
move = entry.move()
if exclude_moves and move in exclude_moves:
continue
if board and not board.is_legal(move):
continue
yield entry
def find(self, board, minimum_weight=1, exclude_moves=()):
"""
Finds the main entry for the given position or zobrist hash.
The main entry is the first entry with the highest weight.
By default entries with weight ``0`` are excluded. This is a common way
to delete entries from an opening book without compacting it. Pass
*minimum_weight* ``0`` to select all entries.
Raises :exc:`IndexError` if no entries are found.
"""
try:
return max(self.find_all(board, minimum_weight, exclude_moves), key=lambda entry: entry.weight)
except ValueError:
raise IndexError()
def choice(self, board, minimum_weight=1, exclude_moves=(), random=random):
"""
Uniformly selects a random entry for the given position.
Raises :exc:`IndexError` if no entries are found.
"""
chosen_entry = None
for i, entry in enumerate(self.find_all(board, minimum_weight, exclude_moves)):
if chosen_entry is None or random.randint(0, i) == i:
chosen_entry = entry
if chosen_entry is None:
raise IndexError()
return chosen_entry
def weighted_choice(self, board, exclude_moves=(), random=random):
"""
Selects a random entry for the given position, distributed by the
weights of the entries.
Raises :exc:`IndexError` if no entries are found.
"""
total_weights = sum(entry.weight for entry in self.find_all(board, exclude_moves=exclude_moves))
if not total_weights:
raise IndexError()
choice = random.randint(0, total_weights - 1)
current_sum = 0
for entry in self.find_all(board, exclude_moves=exclude_moves):
current_sum += entry.weight
if current_sum > choice:
return entry
assert False
def open_reader(path):
"""
Creates a reader for the file at the given path.
>>> with open_reader("data/polyglot/performance.bin") as reader:
... for entry in reader.find_all(board):
... print(entry.move(), entry.weight, entry.learn)
e2e4 1 0
d2d4 1 0
c2c4 1 0
"""
return MemoryMappedReader(path)
| UTF-8 | Python | false | false | 7,227 | py | 14 | polyglot.py | 9 | 0.580047 | 0.569531 | 0 | 235 | 29.753191 | 107 |
VovaStasiuk/PostApplication | 6,279,242,194,694 | c744535bfa5a093b3ec767d45e7e6553708505a7 | b6f00dce0ad7c56bf3b4806150b638bb03d56f18 | /apps/accounts/models.py | d476ce1dda11e5802a5e9cb9300a7af29c318de5 | [] | no_license | https://github.com/VovaStasiuk/PostApplication | 3a9b8a369e0e968944bd565591484d8c5d1bdfdc | 55c6eccf714eb1c2623fe246ec745f1df338dd92 | refs/heads/master | 2022-12-13T01:18:09.379451 | 2020-03-09T18:48:06 | 2020-03-09T18:48:49 | 246,096,840 | 0 | 0 | null | false | 2022-12-08T03:46:10 | 2020-03-09T17:15:38 | 2020-03-09T18:49:00 | 2022-12-08T03:46:09 | 19 | 0 | 0 | 5 | Python | false | false | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from .managers import CustomUserManager
class Profile(AbstractBaseUser, PermissionsMixin):
email = models.CharField(max_length=128, unique=True)
date_joined = models.DateTimeField(auto_now_add=True)
first_name = models.CharField(max_length=40, default='')
second_name = models.CharField(max_length=40, default='')
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
difficult_password = models.PositiveSmallIntegerField(default=0)
objects = CustomUserManager()
USERNAME_FIELD = 'email'
class Meta:
verbose_name = 'Profile'
verbose_name_plural = 'Profiles'
| UTF-8 | Python | false | false | 759 | py | 19 | models.py | 15 | 0.740448 | 0.729908 | 0 | 20 | 36.95 | 73 |
lucy74310/PythonPracticeProjects | 11,630,771,479,623 | b5fce16f3779433e8977710e059f1143f8a50b11 | a8cd585753283b31d1ad38da89e75e12f569ae66 | /practice01/prac04.py | 888b2a8ec14310bbf8a87ff05481f857489536f2 | [] | no_license | https://github.com/lucy74310/PythonPracticeProjects | efe3084e4412649bca41f274fa6d8ac4c1f56624 | a233f371adecee68fa0cc8f71c46f69b9acdccf7 | refs/heads/master | 2020-06-05T06:19:52.810826 | 2019-06-17T12:29:23 | 2019-06-17T12:29:23 | 192,342,793 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 문제4.
# 다음과 같은 출력이 되도록 구구단을 작성하세요. (이중 for~in)
for dan in range(1, 10):
for gob in range(1, 10):
print(dan, 'x', gob, '=', dan * gob, end='\t')
print() | UTF-8 | Python | false | false | 217 | py | 21 | prac04.py | 21 | 0.526627 | 0.485207 | 0 | 7 | 23.285714 | 54 |
mpses/AtCoder | 10,350,871,204,481 | 240402afa8130d13d32708e9f9dce77d1d27d194 | 2caa47f0bdb2f03469a847c3ba39496de315d992 | /Contest/ABC077/c/main.py | cf7153bbeb6f6096d7529a44f021c3d6ece8882d | [
"CC0-1.0"
] | permissive | https://github.com/mpses/AtCoder | 9023e44885dc67c4131762281193c24b69d3b6da | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | refs/heads/master | 2023-03-23T17:00:11.646508 | 2021-03-20T12:21:19 | 2021-03-20T12:21:19 | 287,489,233 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
from bisect import*
(n,), a, b, c = [sorted(map(int, o.split())) for o in open(0)]
print(sum(bisect_left(a, i) * (n - bisect(c, i)) for i in b)) | UTF-8 | Python | false | false | 167 | py | 540 | main.py | 488 | 0.60479 | 0.592814 | 0 | 4 | 41 | 62 |
hughfeehan353/conformal-cooling | 16,853,451,701,784 | ec462eda14ee459f2a063125d4377e5eedb07338 | a0930595ef925309c3ecccd9444f9dca14552911 | /Testing/test_paccman.py | 39fb0ebe662b4dc845b9629d8bcaa984e79b40c1 | [] | no_license | https://github.com/hughfeehan353/conformal-cooling | 2f0a4deb001708406be9e0318193fd90dff6ec97 | 9d87f50cea0365daafcf89e0b3ab4a69bf2dccaa | refs/heads/main | 2023-03-13T08:44:11.786500 | 2021-07-29T18:20:24 | 2021-07-29T18:20:24 | 309,420,775 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import isclose
from _pytest.monkeypatch import MonkeyPatch
def test_input(monkeypatch):
monkeypatch.setattr('builtins.input', lambda _: "M")
test_input(MonkeyPatch())
testTC = 15.
#coolant temperature celsius
testPP = 980.
#density of plastic part kg/m^3
testCP = 1300.
#specific heat capacity of plastic part J/KG*K
testLP = 0.001
#half the plastic part thickness m
testW = 0.010
#cooling line pitch distance m
testD = 0.005
#cooling line diameter m
testLM = 0.004
#distance from cooling line to mold wall
testTMelt = 180.
#Part melted temperature
testTEject = 64.9
#Part ejection temperature
testTCycle = 10.
#Cycle time seconds
testTMO = 13.
#Initial mold temperature
testCVV = 0.227
#coolant velocity liters/sec
testDV = 1.002 * 10**-3
#coolant dynamic viscosity
tesWDV = 0.0009775
#coolant dynamic viscosity when near wall
testKC = 0.5918
#thermal conductivity of coolant
testPC = 998.2
#coolant density
testCC = 4187
#specific heat capacity of coolant
testL = 1.15
#coolant line length
testrho_m = 7930.
#First comparison Mold density kg/m^3: 316 steel
testCp_m = 510.
#First comparison Mold specific heat 316 steel
testeps = 0.00015
#First comparison average height of pipe surface irregularities (m) 316 steel
testKM = 16.5
#First comparison thermal conductivity of mold: 316 steel
testPR = 7.089175397093613
testCD = 0.057
from ..paccman import FVfunc
from ..paccman import KVfunc
from ..paccman import REfunc
from ..paccman import PRfunc
from ..paccman import DFfunc
from ..paccman import htc
from ..paccman import GNU
from ..paccman import ATMfunc
from ..paccman import TConstantfunc
from ..paccman import pdropfunc
from ..paccman import helicalDFfunc_lam_bigv
from ..paccman import helicalDFfunc_turb
from ..paccman import helicalNU_lam
from ..paccman import helicalNU_turb
class TestClass:
def testFV(self):
testFV = FVfunc(testCVV, testD)
assert testFV == 11.561015066195278
return testFV
def testKV(self):
testKV = KVfunc(testDV,testPC)
assert testKV == 1.0038068523342016e-06
return testKV
def testRE(self):
testRE = REfunc(11.561015066195278,testD,1.0038068523342016e-06)
assert testRE == 57585.854486407814
return testRE
def testPR(self):
testPR = PRfunc(testDV,testCC,testKC)
assert testPR == 7.089175397093613
return testPR
def testDF(self):
testDF = DFfunc(testeps,testD,57585.854486407814)
assert testDF == 0.010906214575733224
return testDF
def testhtc(self):
testh = htc(testKC,testD,GNU(0.010906214575733224,57585.854486407814,7.089175397093613))
assert testh == 28621.292587276246
return testh
def testATM(self):
testATM = ATMfunc(testPP,testCP,testLP,testKM,testW,28621.292587276246,testD,testLM,testTMelt,testTEject,testTCycle,testTC)
assert testATM == 19.207173469816603
return testATM
def testTConstant(self):
testTConstant = TConstantfunc(testrho_m,testCp_m,testLM,testKM,testW,28621.292587276246,testD)
assert testTConstant == 4.641400260500878
return testTConstant
def testpdrop(self):
testpdrop = pdropfunc(0.010906214575733224,testL,testD,testPC,11.561015066195278)
assert testpdrop == 167332.91558708664
return testpdrop
def testhelicalDFfunc_lam(self):
testhelicalDF_lam = helicalDFfunc_lam_bigv(2492,12/112,1)
testhelicalDF_lam2 = helicalDFfunc_lam_bigv(7912,12/112,1)
assert isclose(testhelicalDF_lam, 0.07922, abs_tol=2.1e-3)
assert isclose(testhelicalDF_lam2, 0.04899, abs_tol=1e-3)
#data from Hydraulic Performance... (2001) by Xu, et al. pulled with WebPlotDigitizer
def testhelicalDFfunc_turb(self):
testhelicalDF_turb = helicalDFfunc_turb(0,10394,12/112,1)
testhelicalDF_turb2 = helicalDFfunc_turb(0,21310,12/112,1)
assert isclose(testhelicalDF_turb, 0.05457, abs_tol=2.5e-3)
assert isclose(testhelicalDF_turb2, 0.05246, abs_tol=5.5e-3)
#data from Hydraulic Performance... (2001) by Xu, et al. pulled with WebPlotDigitizer
def testhelicalNU_lam(self):
testhelicalNU_lam = helicalNU_lam(765.17,testPR)
testhelicalNU_lam2 = helicalNU_lam(15.227,testPR)
assert isclose(testhelicalNU_lam, 23.751*testPR**0.175, abs_tol=3)
assert isclose(testhelicalNU_lam2, 3.9104*testPR**0.175, abs_tol=1.4e-1)
#data from The Effects of... (1997) by Xin, et al. pulled with WebPlotDigitizer and tested using generic Prandtl
def testhelicalNU_turb(self):
testhelicalNU_turb = helicalNU_turb(18152.641,testPR,testD,testCD)
testhelicalNU_turb2 = helicalNU_turb(112074.9,testPR,testD,testCD)
assert isclose(testhelicalNU_turb, 51.304*(testPR**0.4*(1+3.455*(testD/testCD))), abs_tol=1e-1)
assert isclose(testhelicalNU_turb2, 279.538*(testPR**0.4*(1+3.455*(testD/testCD))), abs_tol=17)
#data from The Effects of... (1997) by Xin, et al. pulled with WebPlotDigitizer and tested using generic Prandtl, Diameter, Coil Diameter
| UTF-8 | Python | false | false | 5,136 | py | 14 | test_paccman.py | 6 | 0.714369 | 0.5919 | 0 | 143 | 34.902098 | 137 |
shsingh/secureCodeBox | 11,201,274,726,209 | f5d8b928fadf8f0d32baaa312c6e44a57a1ea9e9 | a02d2ab626e3485add173176efd4613c8f50320b | /scanners/zap-advanced/scanner/tests/test_zap_context.py | 1e0162239d42f44f92597dc9e71001ba97a3f199 | [
"Apache-2.0"
] | permissive | https://github.com/shsingh/secureCodeBox | 401fae49ba8e49d8bf1c12b3b89b71965cfc7453 | f8364d46493ce2ff03c3df3f8640ab7cda3b8d0b | refs/heads/master | 2023-01-28T19:34:55.719040 | 2023-01-11T17:04:21 | 2023-01-12T14:31:02 | 163,702,909 | 0 | 0 | Apache-2.0 | true | 2018-12-31T23:30:12 | 2018-12-31T23:30:12 | 2018-12-28T09:03:13 | 2018-12-19T15:57:38 | 5,401 | 0 | 0 | 0 | null | false | null | #!/usr/bin/env python
# SPDX-FileCopyrightText: the secureCodeBox authors
#
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import MagicMock, Mock, patch
from unittest import TestCase
from zapv2 import ZAPv2
from zapclient.configuration import ZapConfiguration
from zapclient.context.zap_context import ZapConfigureContext
class ZapScannerTests(TestCase):
@pytest.mark.unit
def test_context_empty(self):
pass
# # build our dependencies
# mock_zap = mock.create_autospec(ZAPv2.context.context_list)
# mock_config = mock.create_autospec(ZapConfiguration)
# testobject = ZapConfigureContext(mock_zap, mock_config)
# testobject.configure_contexts()
| UTF-8 | Python | false | false | 758 | py | 496 | test_zap_context.py | 154 | 0.729551 | 0.721636 | 0 | 30 | 24.266667 | 69 |
JacekPierzchlewski/fsCS-repro | 8,057,358,657,136 | f571d409d2d24cc4f6dbe72fef723abd406ba07c | 22779033a9176d1f96485dcb82100effb12daad5 | /RxCS-15Jan2015/rxcs/ana/SNR.py | 2fcb0b1bb8cfe7da46c285d3bd8d5e5b86717d8d | [
"BSD-2-Clause"
] | permissive | https://github.com/JacekPierzchlewski/fsCS-repro | 4101a6e72062c0e06e06fb2c3fc87b77b087968e | 9a90e61edb73433a847d63b2b69f6327b71a73c5 | refs/heads/master | 2016-08-05T04:37:58.266461 | 2015-01-16T15:17:42 | 2015-01-16T15:17:42 | 29,351,869 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This module contains SNR evaluation function of the reconstructed signals. |br|
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
0.1 | 20-MAY-2014 : * Initial version. |br|
0.2 | 21-MAY-2014 : * Success Ratio computation is added. |br|
0.3 | 21-MAY-2014 : * Docstrings added. |br|
0.4 | 21-MAY-2014 : * Configuration with a dictionary |br|
0.5 | 21-MAY-2014 : * Progress and results printing |br|
1.0 | 21-MAY-2014 : * Version 1.0 released. |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import numpy as np
import rxcs
def main(dSigOrig, dSigRecon, dAna, dAnaConf):
"""
This the main function of the generator and the only one which should be
accessed by a user. |br|
The function computes a noise of reconstrucion for every signal;
this noise is equal to the difference between the reconstructed and
the original signals.
Afterwards the function computes the signal-to-noise ratio of
the reconstruction for every signal.
The ratio is computed as: SNR = 10log10(iPs/iPn),
where:
iPs - power of the original signal
iPn - power of noise
The function computes also the average signal-to-noise ratio of the
reconstruction. |br|
Additionally. the function computes the success ratio, which is equal to
the ratio of reconstructed signal with reconstruction SNR higher than
success threshold. |br|
Args:
dSigOrig (dict): dict. with the original signals
dSigRecon (dict): dict. with the reconstructed signals
dAna (dict): dict. with results of system analysis
dAnaConf (dict): dict. with configuration for system analysis
Returns:
dAna (dict): dict. with results of system analysis
"""
# -------------------------------------------------------------------
# Get the signals
(mSig_orig, mSig_recon, nSigs_orig, iSiz_orig) = \
_getSignals(dSigOrig, dSigRecon)
# Get the configuration
# bMute - 'mute the console output' flag
# iSNRSuccess - success threshold
(bMute,
iSNRSuccess) = _getConf(dAnaConf)
# -------------------------------------------------------------------
# Print out the header of the SNR analysis
if bMute == 0:
rxcs.console.progress('System analysis',
'SNR of the reconstructed signal')
tStart = rxcs.console.module_progress('SNR analysis starts!!!')
# -------------------------------------------------------------------
# Compute the SNR
# Compute the noise
mNoise = np.abs(mSig_orig - mSig_recon)
(_, iSizNoise) = mNoise.shape # Size of the noise
# Compute the power of noise
vNoiseP = (np.sum(mNoise * mNoise, axis=1) / iSizNoise)
# Compute the power of orignal signals
vSigP = (np.sum(mSig_orig * mSig_orig, axis=1) / iSiz_orig)
# Compute the SNR for every reconstructed signal and the average SNR
vSNR = 10 * np.log10(vSigP / vNoiseP)
iSNR = vSNR.mean()
# -------------------------------------------------------------------
# Compute the success ratio
iSR = (vSNR >= iSNRSuccess).mean()
# -------------------------------------------------------------------
# Add the vector with computed SNR to the dictionary with system
# analysis results
dAna['vSNR'] = vSNR
# Add the average SNR to the dictionary with system analysis results
dAna['iSNR'] = iSNR
# Add the success ratio to the dictionary with system analysis results
dAna['iSR'] = iSR
# -------------------------------------------------------------------
# SNR analysis is done
if bMute == 0:
rxcs.console.module_progress_done(tStart)
# -------------------------------------------------------------------
# Print results
_printResults(iSNR, iSR, iSNRSuccess, bMute)
# -------------------------------------------------------------------
return dAna
# =================================================================
# Get the signals
# =================================================================
def _getSignals(dSigOrig, dSigRecon):
"""
This function gets the reconstructed and the original signals from
the data dicionaries.
The function checks if:
- the signals are present in the dictionaries
- the signals have the same length
- there is the same number of signals
Args:
dSigOrig (dict): dict. with the original signals
dSigRecon (dict): dict. with the reconstructed signals
Returns:
mSig_orig (matrix): the original non noisy signal
mSig_recon (matrix): the reconstructed signal
nSigs (float): the number of signals
iSigSiz (float): the length of signals
"""
# -------------------------------------------------------------------
# Get the original non noisy signals, the number of orignal signals
# and their length
strErr = 'The original signals (mSigNN) are missing in the "dSigOrig"'
if not 'mSigNN' in dSigOrig:
raise NameError(strErr)
mSig_orig = dSigOrig['mSigNN']
(nSigs_orig, iSiz_orig) = mSig_orig.shape
# -------------------------------------------------------------------
# Get the reconstructed signals, the number of reconstructed signals,
# and their length
strErr = 'The reconstructed signals (mSig) are missing in the "dSigRecon"'
if not 'mSig' in dSigRecon:
raise NameError(strErr)
mSig_recon = dSigRecon['mSig']
(nSigs_recon, iSiz_recon) = mSig_orig.shape
# -------------------------------------------------------------------
# Check if the original and the reconstructed signals have the same
# length
strErr = 'The original and reconstructed signals must have the same length'
if iSiz_orig != iSiz_recon:
raise ValueError(strErr)
# -------------------------------------------------------------------
# Check if there is the same number of original and reconstructed
# signals
strErr = 'There are more original signals than reconstructed signals!'
if nSigs_orig > nSigs_recon:
raise ValueError(strErr)
strErr = 'There are more reconstructed signals than original signals!'
if nSigs_recon > nSigs_orig:
raise ValueError(strErr)
# -------------------------------------------------------------------
nSigs = nSigs_orig
iSigSiz = iSiz_orig
return (mSig_orig, mSig_recon, nSigs, iSigSiz)
# =================================================================
# Get the configuration
# =================================================================
def _getConf(dAnaConf):
"""
This function gets the configuration of the module from the
system analysis configuration dictionary.
The function checks if the correct configuration fields are given in
the configuration dictionary. If not, the default values are assigned to
the configuration values.
Args:
dAnaConf (dict): dict. with the system analysis configuration
Returns:
bMute (float): 'mute the console output' flag
iSNRSuccess (float): success threshold
"""
# -------------------------------------------------------------------
# Get the mute flag
if not 'bMute' in dAnaConf:
bMute = 0
else:
bMute = dAnaConf['bMute']
# -------------------------------------------------------------------
# Get the success threshold
if not 'iSNRSuccess' in dAnaConf:
iSNRSuccess = 20
else:
iSNRSuccess = dAnaConf['iSNRSuccess']
return (bMute, iSNRSuccess)
# =================================================================
# Print results of the analysis
# =================================================================
def _printResults(iSNR, iSR, iSNRSuccess, bMute):
"""
This function print the results of the SNR analysis to the console,
if the 'mute' flag is not set.
Args:
iSNR (float): the measured average SNR of the reconstrucion
iSR (float): success ratio
iSNRSuccess (float): success threshold
bMute (float): 'mute the console output' flag
Returns:
nothing
"""
if bMute == 0:
rxcs.console.bullet_param('The average SNR of the reconstruction',
iSNR, '-', 'dB')
rxcs.console.bullet_param('The Success Ratio', iSR, ' ', '')
rxcs.console.param('(success threshold)', iSNRSuccess, '-', 'dB')
return
| UTF-8 | Python | false | false | 8,591 | py | 46 | SNR.py | 32 | 0.542195 | 0.534396 | 0 | 253 | 32.956522 | 79 |
eddie-dunn/test-selection | 12,249,246,773,711 | f95a08e2a0cbca1ddc7be2326d6bce6de1a2776e | 6a8d6c3dd81b7220d56429c4e26fe6dabefc4670 | /scripts/correlation_parser.py | 7280a82f4f34df903738d21609c0f39be6c87e65 | [] | no_license | https://github.com/eddie-dunn/test-selection | 81be0fc9302abb26dda0efb1b7dc3b00b709abbf | 05a216e50e41eaa18d1b5b9855ed394b4e1c08d5 | refs/heads/master | 2016-12-13T01:07:43.609479 | 2016-04-18T11:23:31 | 2016-04-18T11:24:10 | 37,119,829 | 0 | 0 | null | false | 2016-01-29T17:24:58 | 2015-06-09T08:24:22 | 2015-09-24T14:00:51 | 2016-01-29T17:24:57 | 56 | 0 | 0 | 0 | Python | null | null | #!/usr/bin/env python3
"""Correlation parser
Query correlation file with a package name and get recommended tests to run.
Written for Python3 but should support Python2 as well.
"""
from collections import defaultdict
import argparse
import json
import sys
import operator
MAX_NBR_OF_TESTS = 1203
MODE_CHOICES = ['WIDE', 'wide', 'NARROW', 'narrow']
def read_data(filename):
"""Read json data from filename"""
data = {}
try:
with open(filename, 'r') as fileh:
data = json.loads(fileh.read())
except (OSError, IOError):
pass
return data
def get_tests(module, data):
"""Get tests correlated to module in dict data"""
tests = {}
try:
tests = data[module]
except KeyError:
pass
return tests
def parse_args():
"""Setup argparser"""
parser = argparse.ArgumentParser()
parser.add_argument('modules', nargs='+', help='module(s) to get '
'recommendations on. Space separated list of modules. '
'Ignored if wide mode is specified.')
parser.add_argument('-f', '--correlation-data', required=True,
help='json file to analyze')
parser.add_argument('-c', '--cutoff', help='cutoff limit for correlation '
'weights', default=0, type=int)
parser.add_argument('--mode', default='NARROW',
choices=MODE_CHOICES, help='regression test strategy.')
parser.add_argument('-v', '--verbose', action="store_true",
help='prints additional information')
parser.add_argument(
'--sort', default='weight',
help="Option to sort output in different ways",
choices=['weight', 'weight-reverse', 'alphabet', 'alphabet-reverse'],
dest='order',
)
return parser.parse_args()
def sort_tests(tests: dict, order: str) -> list:
"""Takes a dict containing str/int key/value pairs like:
{'testsname': correlation_weight}
Returns a list ordered as specified by parameter `order`.
"""
if order == 'weight':
return sorted(tests.items(), key=operator.itemgetter(1, 0))
elif order == 'weight-reverse':
return sorted(tests.items(), key=operator.itemgetter(1, 0))[::-1]
elif order == 'alphabet':
return sorted(tests.items())
elif order == 'alphabet-reverse':
return sorted(tests.items())[::-1]
raise ValueError("Order '%s' is not supported" % order)
def narrow(filename, args):
"""Perform narrow test selection."""
# TODO: Refactor this
modules = args.modules
if args.verbose:
print("\nParsing using narrow selection on file '{}' for "
"recommendations on {}\n".format(filename, modules))
data = read_data(filename)
if not data:
print("ERROR: File {} not found".format(filename))
sys.exit(1)
tests = {}
empty_tests = []
for module in modules:
current_module_tests = get_tests(module, data)
if not current_module_tests:
empty_tests.append(module)
tests = {k: tests.get(k, 0) + current_module_tests.get(k, 0) for k in
set(tests) | set(current_module_tests)}
if not tests:
print("WARNING: No tests correlated to specified module(s):"
"{}".format(modules))
sys.exit(1)
ordered_tests = sort_tests(tests, args.order)
if args.verbose:
print("Recommended tests:")
for test in ordered_tests:
if test[1] >= args.cutoff:
print("{: <5} {}".format(test[1], test[0]))
if args.cutoff:
print("(cutoff at weight {})".format(args.cutoff))
print("\nTotal recommended tests: {}".format(len(ordered_tests)))
time_saved = MAX_NBR_OF_TESTS - len(ordered_tests)
print("\nTime savings running only recommended tests: {} units "
"".format(time_saved))
if empty_tests:
print("[INFO]: no tests found for {}".format(', '.join(empty_tests)))
else:
print_list([item[0] for item in ordered_tests])
def wide(filename, args):
"""Perform wide test selection."""
# TODO: Refactor this
if args.verbose:
print("\nParsing using wide selection on file '{}'"
"\n".format(filename))
with open(filename, 'r') as fileh:
string_data = fileh.read()
data = json.loads(string_data)
tests = sum_tests(data)
sorted_tests = sort_tests(tests, args.order)
if args.verbose:
for item in sorted_tests:
print("{: >6} | {}".format(item[1], item[0]))
print("weight | name")
time_savings_percentage = ((1 - len(sorted_tests) / MAX_NBR_OF_TESTS)
* 100)
print("Nbr of correlated tests: {}".format(len(sorted_tests)))
print("Time savings: {:.1f}%".format(time_savings_percentage))
else:
print_list([item[0] for item in sorted_tests])
def print_list(test_list, sep='\n'):
"""Print a list of test items, newline separated by default."""
print(sep.join(test_list))
def sum_tests(data):
"""Go through each package in data, get the tests and their correlations,
and add test and correlation to a list. If test already exists, increment
weight by the weight of the test found."""
tests = defaultdict(int)
for package in data:
for test in data[package]:
tests[test] += data[package][test]
return tests
def main():
"""Main method"""
args = parse_args()
filename = args.correlation_data
if args.mode.lower() == 'narrow':
narrow(filename, args)
if args.mode.lower() == 'wide':
wide(filename, args)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 5,779 | py | 27 | correlation_parser.py | 21 | 0.593701 | 0.588164 | 0 | 193 | 28.943005 | 81 |
pankajkhatiwada/Sen_Machine_Deep_Learning | 4,011,499,465,660 | 49c8590eaff0a64d772eef12300f35bb0523944f | 84d3368a4536d9a1b3cd7548b298ebc28f6b1e81 | /Regression.py | 80a6a9b4700268017d60da355f90bb33f59a1811 | [] | no_license | https://github.com/pankajkhatiwada/Sen_Machine_Deep_Learning | 306ed6158707af9eea12bb18227e190cea7f1e2e | 86f9d284d9538c6a4e845cdf991ecdea8dc0c748 | refs/heads/master | 2022-12-11T08:38:16.490245 | 2020-09-10T13:18:46 | 2020-09-10T13:18:46 | 294,118,706 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import math
import quandl
df = quandl.get("WIKI/GOOGL")
df = df[["Adj. Open", "Adj. High", "Adj. Low", "Adj. Close", "Adj. Volume"]]
df["HCL_PCT"] = (df["Adj. High"] - df["Adj. Close"]) / df["Adj. Close"] * 100
df["PCT_change"] = (df["Adj. Close"] - df["Adj. Open"]) / df["Adj. Open"] * 100
df = df[["Adj. Close", "HCL_PCT", "PCT_change", "Adj. Volume"]]
forecast_col = "Adj. Close"
df.fillna(-99999, inplace=True)
forecast_out = int(math.ceil(0.1*len(df)))
df["label"] = df[forecast_col].shift(-forecast_col)
print(df.head()) | UTF-8 | Python | false | false | 552 | py | 1 | Regression.py | 1 | 0.608696 | 0.585145 | 0 | 19 | 28.105263 | 79 |
vandemaelefelix/sudoku_solver | 1,065,151,892,797 | f820cc020cdf338f9d09aff0bfe344db8389daae | 682e2b36cda6df4e046b7006a9c5b731e2692d41 | /python code/extract_sudoku.py | 508618193118f387b04c7e8593f3843f06984d1a | [] | no_license | https://github.com/vandemaelefelix/sudoku_solver | 410637dd681fe6506807fbf2a027cb9b48984222 | 862ed6881de6ad3993a102d639d7e4423f9734cc | refs/heads/master | 2022-07-09T22:40:54.265131 | 2020-05-20T11:15:02 | 2020-05-20T11:15:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import filedialog
import glob, os
import time
import operator
def preprocess_image(image):
# Make image grayscale to remove colors
processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Blur image so lines stand out more
processed_img = cv2.GaussianBlur(processed_img.copy(), (9, 9), 3)
# Use tresholding to differentiate background and foreground
processed_img = cv2.adaptiveThreshold(processed_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# Invert black and white
processed_img = cv2.bitwise_not(processed_img, processed_img)
return processed_img
def find_corners(image):
contours, _ = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contour = sorted(contours, key=cv2.contourArea, reverse=True)[0]
bottom_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in contour]), key=operator.itemgetter(1))
top_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in contour]), key=operator.itemgetter(1))
bottom_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in contour]), key=operator.itemgetter(1))
top_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in contour]), key=operator.itemgetter(1))
return [contour[top_left][0], contour[top_right][0], contour[bottom_right][0], contour[bottom_left][0]]
def four_point_transform(pts, image):
width = image.shape[0]
height = image.shape[1]
# Corner coördinates in original image
pts1 = np.float32(pts)
# Destination coördinates
pts2 = np.float32([[0, 0], [width, 0], [width, height], [0, height]])
# Apply Perspective Transform Algorithm
matrix = cv2.getPerspectiveTransform(pts1, pts2)
result = cv2.warpPerspective(image, matrix, (width, height))
return result
root = tk.Tk()
root.withdraw()
file_paths = filedialog.askopenfilenames()
for file in file_paths:
# Read image of sudoku and resize
image = cv2.imread(file)
image = cv2.resize(image, (720, 720))
# print('Starting...')
start = time.time()
# Some preprocessing to eliminate everything but the sudoku from the picture
preprocessed_image = preprocess_image(image)
index = 0
contours, _ = cv2.findContours(preprocessed_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contour = sorted(contours, key=cv2.contourArea, reverse=True)[index]
approx = cv2.approxPolyDP(contour, 0.01*cv2.arcLength(contour, True), True)
print(approx)
while len(approx) > 4:
index += 1
contours, _ = cv2.findContours(preprocessed_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contour = sorted(contours, key=cv2.contourArea, reverse=True)[index]
approx = cv2.approxPolyDP(contour, 0.01*cv2.arcLength(contour, True), True)
cv2.drawContours(image, [contour], 0, (0, 255, 0), 5)
corners = find_corners(preprocessed_image)
result = four_point_transform(corners, image)
cv2.imshow('sudoku', image)
key = cv2.waitKey(0)
if key == 27:
break | UTF-8 | Python | false | false | 3,137 | py | 16 | extract_sudoku.py | 10 | 0.67815 | 0.644657 | 0 | 103 | 29.446602 | 119 |
jtokaz/checkio-mission-univocalic-davasaan | 5,299,989,646,147 | e5992a4eaaa7cbed403c2cc3b03205548366f161 | 9b0eac9a0f264a1e4968cfd40f7daa203948dba3 | /verification/tests.py | 4dc73f34ed38802a0295a7c2402f42b76d850c20 | [] | no_license | https://github.com/jtokaz/checkio-mission-univocalic-davasaan | bba35d00bd1b2b0d6f0f999d8bc7f984712481cb | 997de1894e11f757cf1f68fbbb48f1d395e66878 | refs/heads/master | 2021-01-23T14:05:25.984848 | 2015-09-02T12:13:22 | 2015-09-02T12:13:22 | 41,778,812 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
TESTS = {"Basic": [
{"input": 0, "answer": 0},
{"input": 9, "answer": 0},
{"input": 41, "answer": 4},
{"input": 65, "answer": 6},
{"input": 79, "answer": 7},
]}
for x in random.sample(range(0,1000),10):
TESTS["Basic"].append({"input": x, "answer": x//10})
R = range(0,2000000001)
for x in random.sample(range(0,2000000000),30):
TESTS["Basic"].append({"input": x, "answer": x//10})
| UTF-8 | Python | false | false | 426 | py | 3 | tests.py | 1 | 0.551643 | 0.438967 | 0 | 16 | 25.625 | 56 |
rohit-devops-test/python_training | 14,156,212,229,762 | 207033c22b1d59cce57db3ba689b36d00350040d | d01954425c95f59e96b0abae335e2c06ad400ae6 | /day_01/labs/lab_04.py | ca2d2b9aff4c575d5a2e0dca01ffcb524c9e4915 | [] | no_license | https://github.com/rohit-devops-test/python_training | 2ec20f1a03ea8df75d4706b024670822232d4786 | d65ebed4f2c7ef5724614a35bc111bdf5d473d26 | refs/heads/master | 2023-02-18T22:06:13.367733 | 2021-01-21T07:56:20 | 2021-01-21T07:56:20 | 291,370,991 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Program to filter the 2 digit numbers whose sum of individual
# digits is some value say 10
# [100 random numbers]
# [filtered values]
# 64 => 6 + 4 = 10
# 33 => X
# 73 => & + 3 = 10
import random
# Input
RN = []
for i in range(100):
RN.append(random.randint(10, 99))
print(RN)
print('_'*60)
# Process
FN = []
for n in RN:
if((n//10 + n%10) == 10): # Manjeeth
FN.append(n)
# Output
print(FN)
| UTF-8 | Python | false | false | 463 | py | 64 | lab_04.py | 57 | 0.531317 | 0.457883 | 0 | 31 | 12.548387 | 63 |
JoaoMFachinetto/servicedrivenapplication | 19,207,093,785,393 | 99e733cbe67f78ed43329a1b9c13e802c71a66b7 | 8ca8fc63e9db0916a4fec4a54da9888ef9f2fc2e | /flaskr/modules/__init__.py | 8025758ebe25a1be351535b970a2de8f7b499097 | [] | no_license | https://github.com/JoaoMFachinetto/servicedrivenapplication | 3d3465de64a3d118a31611d50dac61810d4ed21e | 0b370166c4198bdb8a6d5b689b0da233cda5d6c4 | refs/heads/master | 2021-05-28T06:23:27.505496 | 2015-01-14T13:10:09 | 2015-01-14T13:10:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append('/modules')
| UTF-8 | Python | false | false | 39 | py | 27 | __init__.py | 24 | 0.74359 | 0.74359 | 0 | 2 | 18.5 | 27 |
supernnova/SuperNNova | 9,526,237,485,474 | bd22ee37d99aeb642b0ab6399e9cddc87e628beb | 13b7614d34150fcaa2f4cd7d379bf4769a2e4423 | /supernnova/utils/experiment_settings.py | 6b98247767d59c8583fdd9a6ca1b7c7d2b6ab755 | [
"MIT"
] | permissive | https://github.com/supernnova/SuperNNova | 4b06395a21b6e6fb7117b54527cbcec52cf49968 | fcf8584b64974ef7a238eac718e01be4ed637a1d | refs/heads/master | 2023-06-21T21:56:07.052935 | 2023-06-13T09:22:53 | 2023-06-13T09:22:53 | 179,472,905 | 31 | 11 | MIT | false | 2023-06-13T09:22:55 | 2019-04-04T10:08:22 | 2023-05-18T19:17:04 | 2023-06-13T09:22:54 | 5,347 | 27 | 7 | 11 | Python | false | false | import os
import json
import h5py
import itertools
import numpy as np
from pathlib import Path
from collections import OrderedDict
class ExperimentSettings:
"""Mother class to control experiment parameters
This class is responsible for the following
- Defining paths and model names
- Choosing the device on which to run computations
- Specifying all hyperparameters such as model configuration, datasets, features etc
Args:
cli_args (argparse.Namespace) command line arguments
"""
def __init__(self, cli_args):
# Transfer attributes
if isinstance(cli_args, dict):
self.__dict__.update(cli_args)
self.cli_args = cli_args
else:
self.__dict__.update(cli_args.__dict__)
self.cli_args = cli_args.__dict__
self.device = "cpu"
if self.use_cuda:
self.device = "cuda"
if self.model == "variational":
self.weight_decay = self.weight_decay
else:
self.weight_decay = 0.0
# Load simulation and training settings and prepare directories
if self.no_dump:
pass
else:
self.setup_dir()
# Set the database file names
self.set_database_file_names()
self.randomforest_features = self.get_randomforest_features()
# Set the feature lists
if "all_features" not in cli_args:
self.set_feature_lists()
self.overwrite = not self.no_overwrite
# filter combination
list_filters_combination = []
for i in range(1, len(self.list_filters) + 1):
tmp = [
"".join(t)
for t in list(itertools.combinations(self.list_filters, i))
]
list_filters_combination = list_filters_combination + tmp
self.list_filters_combination = list_filters_combination
self.set_randomforest_model_name()
self.set_pytorch_model_name()
# Get the feature normalization dict
self.load_normalization()
def get_randomforest_features(self):
"""Specify list of features to be used for RandomForest training"""
features = [
"x1",
"x1ERR",
"c",
"cERR",
"mB",
"mBERR",
"x0",
"x0ERR", # 'COV_x1_c', 'COV_x1_x0','COV_c_x0', 'NDOF',
"FITCHI2",
"m0obs_r",
"m0obs_i",
"m0obs_g",
"m0obs_z",
"em0obs_i",
"em0obs_r",
"em0obs_g",
"em0obs_z",
]
if self.redshift == "zpho":
features += ["HOSTGAL_PHOTOZ", "HOSTGAL_PHOTOZ_ERR"]
elif self.redshift == "zspe":
features += ["HOSTGAL_SPECZ", "HOSTGAL_SPECZ_ERR"]
return features
def setup_dir(self):
"""Configure directories where data is read from or dumped to
during the course of an experiment
"""
for path in [
# f"{self.raw_dir}",
# f"{self.fits_dir}",
f"{self.dump_dir}/explore",
f"{self.dump_dir}/stats",
f"{self.dump_dir}/figures",
f"{self.dump_dir}/lightcurves",
f"{self.dump_dir}/latex",
f"{self.dump_dir}/processed",
f"{self.dump_dir}/preprocessed",
f"{self.dump_dir}/models",
]:
setattr(self, Path(path).name + "_dir", path)
Path(path).mkdir(exist_ok=True, parents=True)
def set_pytorch_model_name(self):
"""Define the model name for all NN based classifiers"""
name = f"{self.model}_S_{self.seed}_CLF_{self.nb_classes}"
name += f"_R_{self.redshift}"
name += f"_{self.source_data}_DF_{self.data_fraction}_N_{self.norm}"
name += f"_{self.layer_type}_{self.hidden_dim}x{self.num_layers}"
name += f"_{self.dropout}"
name += f"_{self.batch_size}"
name += f"_{self.bidirectional}"
name += f"_{self.rnn_output_option}"
if "bayesian" in self.model:
name += (
f"_Bayes_{self.pi}_{self.log_sigma1}_{self.log_sigma2}"
f"_{self.rho_scale_lower}_{self.rho_scale_upper}"
f"_{self.log_sigma1_output}_{self.log_sigma2_output}"
f"_{self.rho_scale_lower_output}_{self.rho_scale_upper_output}"
)
if self.cyclic:
name += "_C"
if self.weight_decay > 0:
name += f"_WD_{self.weight_decay}"
self.pytorch_model_name = name
self.rnn_dir = f"{self.models_dir}/{self.pytorch_model_name}"
# deserializing numpy arrays to save as json
d_tmp = {}
for k, v in self.__dict__.items():
if isinstance(v, np.ndarray):
v = v.tolist()
d_tmp[k] = v
if self.train_rnn:
os.makedirs(self.rnn_dir, exist_ok=True)
# Dump the command line arguments (for model restoration)
with open(Path(self.rnn_dir) / "cli_args.json", "w") as f:
json.dump(d_tmp, f, indent=4, sort_keys=True)
def set_randomforest_model_name(self):
"""Define the model name for all RandomForest based classifiers"""
name = f"randomforest_S_{self.seed}_CLF_{self.nb_classes}"
name += f"_R_{self.redshift}"
name += f"_{self.source_data}_DF_{self.data_fraction}_N_{self.norm}"
self.randomforest_model_name = name
self.rf_dir = f"{self.models_dir}/{self.randomforest_model_name}"
if self.train_rf:
os.makedirs(self.rf_dir, exist_ok=True)
# Dump the command line arguments (for model restoration)
with open(Path(self.rf_dir) / "cli_args.json", "w") as f:
json.dump(self.cli_args, f, indent=4, sort_keys=True)
return name
def check_data_exists(self):
"""Utility to check the database has been built"""
database_file = f"{self.processed_dir}/database.h5"
assert os.path.isfile(database_file)
def set_feature_lists(self):
"""Utility to define the features used to train NN=based models"""
self.training_features_to_normalize = [
f"FLUXCAL_{f}" for f in self.list_filters
]
self.training_features_to_normalize += [
f"FLUXCALERR_{f}" for f in self.list_filters
]
self.training_features_to_normalize += ["delta_time"]
if not self.data:
# If the database has been created, add the list of all features
with h5py.File(self.hdf5_file_name, "r") as hf:
self.all_features = hf["features"][:].astype(str)
self.non_redshift_features = [
f for f in self.all_features if "HOSTGAL" not in f
]
# Optionally add redshift
self.redshift_features = []
if self.redshift == "zpho":
self.redshift_features = [
f for f in self.all_features if "HOSTGAL_PHOTOZ" in f
]
elif self.redshift == "zspe":
self.redshift_features = [
f for f in self.all_features if "HOSTGAL_SPECZ" in f
]
self.training_features = (
self.non_redshift_features + self.redshift_features
)
if self.additional_train_var:
self.training_features += [
k
for k in self.additional_train_var
if k not in self.training_features
]
def set_database_file_names(self):
"""Create a unique database name based on the dataset required
by the settings
"""
out_file = f"{self.processed_dir}/database"
self.pickle_file_name = out_file + ".pickle"
self.hdf5_file_name = out_file + ".h5"
def load_normalization(self):
"""Create an array holding the data-normalization parameters
used to normalize certain features in the NN-based classification
pipeline
"""
if not self.data:
self.idx_features = [
i
for (i, f) in enumerate(self.all_features)
if f in self.training_features
]
self.idx_specz = [
i
for (i, f) in enumerate(self.training_features)
if "HOSTGAL_SPECZ" in f
]
self.idx_flux = [
i for (i, f) in enumerate(self.training_features) if "FLUXCAL_" in f
]
self.idx_fluxerr = [
i for (i, f) in enumerate(self.training_features) if "FLUXCALERR_" in f
]
self.idx_delta_time = [
i for (i, f) in enumerate(self.training_features) if "delta_time" in f
]
self.idx_features_to_normalize = [
i
for (i, f) in enumerate(self.all_features)
if f in self.training_features_to_normalize
]
self.d_feat_to_idx = {f: i for i, f in enumerate(self.all_features)}
list_norm = []
with h5py.File(self.hdf5_file_name, "r") as hf:
for f in self.training_features_to_normalize:
if self.norm == "perfilter":
minv = np.array(hf[f"normalizations/{f}/min"])
meanv = np.array(hf[f"normalizations/{f}/mean"])
stdv = np.array(hf[f"normalizations/{f}/std"])
list_norm.append([minv, meanv, stdv])
else:
if "FLUX" in f:
prefix = f.split("_")[0]
minv = np.array(hf[f"normalizations_global/{prefix}/min"])
meanv = np.array(hf[f"normalizations_global/{prefix}/mean"])
stdv = np.array(hf[f"normalizations_global/{prefix}/std"])
else:
minv = np.array(hf[f"normalizations/{f}/min"])
meanv = np.array(hf[f"normalizations/{f}/mean"])
stdv = np.array(hf[f"normalizations/{f}/std"])
list_norm.append([minv, meanv, stdv])
self.arr_norm = np.array(list_norm)
| UTF-8 | Python | false | false | 10,612 | py | 78 | experiment_settings.py | 42 | 0.516491 | 0.513004 | 0 | 306 | 33.679739 | 88 |
i-DAT-Qualia/Card-Backend | 1,090,921,734,428 | 45572a6b73969b0546193b5381c5007f09f84d7c | ba6c71bc21319a190165b7c2969745c99b3cd6d4 | /cards/management/commands/updateScans.py | 3d1d725b3bb6133e3c812dbf51c7a2117b474ccc | [
"Apache-2.0"
] | permissive | https://github.com/i-DAT-Qualia/Card-Backend | 7926277712d6f6151c00352a77ded80f8ca6dd1d | ad11c4bdecc20f7f8c386c8f3f452cbfd9c9aa73 | refs/heads/master | 2021-01-01T18:22:22.586576 | 2014-10-06T05:07:30 | 2014-10-06T05:07:30 | 17,941,329 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.core.management.base import BaseCommand, CommandError
from cards.models import *
import datetime, math, time
from decimal import Decimal
def updater(reader_id,start_date,end_date,reader_location_id):
scans = Scan.objects.filter(readerLocation__reader__id = reader_id)
scans = scans.filter(added__range=[start_date, end_date])
new_reader_location = ReaderLocation.objects.get(id=reader_location_id)
for scan in scans:
print scan
scan.readerLocation = new_reader_location
scan.save()
print scan
class Command(BaseCommand):
help = 'Updates scans to the correct location'
def handle(self, *args, **options):
print "Updating scans"
#Use this if the scheduler fails
updater('1', '2014-03-21', '2014-03-24','2')
| UTF-8 | Python | false | false | 816 | py | 17 | updateScans.py | 12 | 0.678922 | 0.656863 | 0 | 29 | 27.103448 | 75 |
Sparsh239/Python-Decision-Analysis | 13,872,744,398,923 | 86a144b7c97369cdf96cdd1b74349a125764dc8c | 593a3ead56f15b0a9fa2601e8c6f961d07f5e98b | /Decision_Tree_Analysis.py | 3708f932af983252199775da46c483fc8e916eb0 | [] | no_license | https://github.com/Sparsh239/Python-Decision-Analysis | 309fe74e0c99a88f17e90cf28218b4ddc85c7595 | 38e9431017d188be4b0f330d9a730d2b09514a1f | refs/heads/master | 2021-04-03T19:17:43.438997 | 2020-03-20T23:45:20 | 2020-03-20T23:45:20 | 248,389,379 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 17:59:17 2020
@author: skans
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 22 11:39:33 2020
@author: skans
"""
import json
from operator import itemgetter
class Node:
def __init__(self,node_name,node_type,parent_name,cost,benefits, probability):
self.node_type = node_type
self.node_name = node_name
self.parent_name = parent_name
self.data = {'Cost': cost, 'Benefits': benefits, 'Probability': probability}
self.branches = []
def printJSON(self):
print( json.dumps(self, default=lambda o: o.__dict__,
sort_keys=False, indent=4))
def toJSON(self):
json_string = json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=4)
return json.loads(json_string)
def solve(self):
if self.node_type == "Payoff":
payoff = self.data['Benefits']-self.data['Cost']
self.data['Payoff'] = payoff
return payoff
elif self.node_type == "Chance":
sum = 0
for child in self.branches:
prob = child.data['Probability']
value = child.solve()
final_value = prob*(value - self.data['Cost'])
sum = sum + final_value
self.data['EV'] = sum
return sum
else:
maximization_list = []
for child in self.branches:
value = child.solve()
node_recognition ={'name':child.node_name,'value':value}
maximization_list.append(node_recognition)
print(maximization_list)
sorted_list = sorted(maximization_list, key=itemgetter('value'), reverse = True)
return sorted_list[0]
def insert(self,parent_name, child_node):
if self.node_name == None:
try:
raise KeyboardInterrupt
finally:
print("There is no node")
elif self.node_name != parent_name:
for child in self.branches:
if child.node_name == parent_name:
child.insert(child.node_name, child_node)
else:
self.branches.append(child_node)
@staticmethod
def read_json_formation_node(input_json_data):
nodes_dict = {}
for nodes in input_json_data:
if nodes['parent_node'] == "": ## Basically trying to access the root node andI think we can have a better condition
node_type = nodes['node_type']
node_name = nodes['node_name']
cost = nodes['data']['Cost']
benefits = nodes['data']['Benefits']
probability = nodes['data']['Probability']
parent_node = nodes['parent_node']
new_node = Node(node_name,node_type,parent_node, cost,benefits,probability)
nodes_dict[node_name] = new_node
else:
node_type = nodes['node_type']
node_name = nodes['node_name']
cost = nodes['data']['Cost']
benefits = nodes['data']['Benefits']
probability = nodes['data']['Probability']
parent_node = nodes['parent_node']
new_node = Node(node_name,node_type,parent_node,cost,benefits,probability)
if node_type == "Chance":
node_data = new_node.data
node_data['probability_checker'] = []
nodes_dict[node_name] = new_node
else:
nodes_dict[node_name] = new_node
for decision_tree_node in nodes_dict.keys():
decision_tree_node_class = nodes_dict[decision_tree_node]
if decision_tree_node_class.parent_name == "":
root_node = decision_tree_node_class# It is a root node and that needs to be returned
continue
else:
nodes_parent_name = decision_tree_node_class.parent_name
parent_node_class = nodes_dict.get(nodes_parent_name) #The list includes parent_node, its parent_name
if parent_node_class.node_type == "Chance": #Here we wil access the probability checker list
data_info = decision_tree_node.data # Accessing the data indicator of the node
probability_childnode = data_info['Probability'] # Data info is a dictionary and we will take the probability value
probability_checker_list = parent_node_class.data['probability_checker'] # Accessig the probability checker list of the parent node
if sum(probability_checker_list) > 1:
raise NameError("""The probability sum is greater than 1,
thus cant add a new node anmore """)
else:
probability_checker_list.append(probability_childnode)
parent_node_class.branches.append(decision_tree_node_class)
elif parent_node_class.node_type == "Payoff":
raise NameError("We cant add a child to the parent node")
else:
parent_node_class.branches.append(childnode_with_node_parentnode[0])
return root_node
# Question 1
# final_decision = Node("Computer System", "Final Decision", 0 , 0 , 0)
# advanced_computer_system = Node("Advanced Computer System", "Chance",20,0, 0)
# current_computer_system = Node("Current Computer System", "Payoff", 20, 30, 0 )
# high_prob_adc = Node("High Probability", "Payoff", 0, 60 ,0.70)
# low_prob_adc = Node("Low Probability", "Payoff", 0,30,0.30)
# advanced_computer_system.insert("Advanced Computer System", high_prob_adc)
# advanced_computer_system.insert("Advanced Computer System", low_prob_adc)
# final_decision.insert("Computer System",advanced_computer_system)
# final_decision.insert("Computer System", current_computer_system)
# print("The decision should be:",final_decision.solve())
# final_decision.printJSON()
input_json_data = [
{
"node_type":"Root|Choice",
"node_name":"Computer System",
"data":{
"Cost":0,
"Benefits":0,
"Probability":0
},
"parent_node":""},
{
"node_type":"Chance",
"node_name":"Advanced Computer System",
"data":{
"Cost":20,
"Benefits":0,
"Probability":0
},
"parent_node":"Computer System"},
{
"node_type":"Payoff",
"node_name":"High Probability",
"data":{
"Cost":0,
"Benefits":60,
"Probability":0.7
},
"parent_node":"Advanced Computer System"},
{
"node_type":"Payoff",
"node_name":"Low Probability",
"data":{
"Cost":0,
"Benefits":30,
"Probability":0.3
},
"parent_node":"Advanced Computer System"},
{
"node_type": "Payoff",
"node_name": "Current Computer System",
"data": {
"Cost": 20,
"Benefits": 30,
"Probability": 0,
"Payoff": 10
},
"parent_node":"Computer System"}
]
decision_tree = Node.read_json_formation_node(input_json_data)
decision_tree.printJSON()
print(decision_tree.solve()) | UTF-8 | Python | false | false | 7,594 | py | 1 | Decision_Tree_Analysis.py | 1 | 0.537135 | 0.526468 | 0 | 189 | 39.185185 | 151 |
lilium513/competition_programing | 4,801,773,457,621 | 4494e468b47b66e9f68ec790d3bec855c0534fb0 | eee647635af1583d9b1150b7cd3195336291e1d2 | /74ABC/c.py | d89d46ed774a494c67cc84bac8828f0540821447 | [] | no_license | https://github.com/lilium513/competition_programing | 42f69222290b09b491477b8a2b9c2d4513ebe301 | 45082bf542224b667e753ad357cf145f683fde54 | refs/heads/master | 2020-06-22T03:16:34.510906 | 2019-07-31T18:22:31 | 2019-07-31T18:22:31 | 197,619,005 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import itertools
import math
LIM = 50
def do():
ans = 0
A,B,C,D,E,F= list(map(int,input().split(" ")))
waters = []
for i in range(31):
for j in range(31):
water = A*i* 100+B*j* 100
if water <= F and water != 0:
waters.append(water)
solts = []
for i in range(3000):
for j in range(3000):
solt = C * i + D * j
if solt > E * F /100:
break
solts.append(solt)
max_nodo = -1
ans_water = 0
ans_solt = 0
for solt in solts:
for water in waters:
if solt/(water/100) <= E:
if (100 * solt)/(solt+water) >= max_nodo and water + solt <= F:
max_nodo =(100 * solt)/(solt+water)
ans_water = water + solt
ans_solt = solt
print(ans_water,ans_solt)
if __name__ == "__main__":
do() | UTF-8 | Python | false | false | 923 | py | 250 | c.py | 248 | 0.445287 | 0.4052 | 0 | 43 | 20.488372 | 79 |
alexweav/Deep-Learning | 3,942,779,995,766 | a40c85b29c1dd2e409a51bdb7c24b0a5d9c98de8 | 171fe9df8b2b257fe5898a344e85a51a518c4e0b | /main.py | 8be46c0c64964fb2c9eb8354fae38afefb5dc9dd | [
"MIT"
] | permissive | https://github.com/alexweav/Deep-Learning | 1d7f68501295a6732e77c2689fca64ec507b0976 | 50263de4d472a5e5918d1d6a73c3d3f6eeda58ac | refs/heads/master | 2016-09-14T12:43:58.432584 | 2016-05-22T23:19:54 | 2016-05-22T23:19:54 | 58,339,101 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri May 06 17:56:07 2016
@author: Alexander Weaver
"""
import numpy as np
import LearnyMcLearnface as lml
def main():
d = lml.layers.DropoutLayer(10, 1)
opts = {
'input_dim' : 700,
'init_scheme' : 'xavier'
}
nn = lml.NeuralNetwork(opts)
nn.add_layer('Affine', {'neurons':500})
nn.add_layer('PReLU', {})
nn.add_layer('Dropout', {'dropout_param':0.9})
nn.add_layer('Affine', {'neurons':10})
nn.add_layer('SoftmaxLoss', {})
test_data = np.random.randn(100, 700)
test_y = np.random.randint(1, 10, 100)
d.forward_train(test_data)
data = {
'X_train' : test_data,
'y_train' : test_y,
'X_val' : test_data,
'y_val' : test_y
}
opts = {
'update_options' : {'update_rule' : 'sgd', 'learning_rate' : 1},
'reg_param' : 0,
'num_epochs' : 6
}
trainer = lml.Trainer(nn, data, opts)
accuracy = trainer.accuracy(test_data, test_y)
print('Initial model accuracy:', accuracy)
trainer.train()
accuracy = trainer.accuracy(test_data, test_y)
print('Final model accuracy:', accuracy)
main() | UTF-8 | Python | false | false | 1,240 | py | 4 | main.py | 3 | 0.539516 | 0.506452 | 0 | 56 | 21.160714 | 72 |
stuartarchibald/awkward-1.0 | 8,418,135,901,958 | 12751a0712946692288b30f2834fe70d3a7d66ed | c85ba56fb347f0ae42c2113c6af4653d888cb649 | /tests/test_0331-pandas-indexedarray.py | 539b2e8a5e164f3161c6938a44a2832cd632456f | [
"BSD-3-Clause"
] | permissive | https://github.com/stuartarchibald/awkward-1.0 | 96887e4e5327679dcaf856cfaf2218e131daacb6 | 0a378a1dd8fe93bf82859437116b21225844d365 | refs/heads/master | 2023-01-10T18:29:39.866273 | 2020-11-10T20:25:07 | 2020-11-10T20:25:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
pandas = pytest.importorskip("pandas")
def test():
simple = awkward1.Array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
assert awkward1.to_pandas(simple)["values"].values.tolist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]
index = awkward1.layout.Index64(numpy.array([3, 3, 1, 5], dtype=numpy.int64))
indexed = awkward1.Array(awkward1.layout.IndexedArray64(index, simple.layout))
assert indexed.tolist() == [3.3, 3.3, 1.1, 5.5]
assert awkward1.to_pandas(indexed)["values"].values.tolist() == [3.3, 3.3, 1.1, 5.5]
tuples = awkward1.Array(awkward1.layout.RecordArray([simple.layout, simple.layout]))
assert awkward1.to_pandas(tuples)["1"].values.tolist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]
offsets = awkward1.layout.Index64(numpy.array([0, 1, 1, 3, 4], dtype=numpy.int64))
nested = awkward1.Array(awkward1.layout.ListOffsetArray64(offsets, indexed.layout))
assert awkward1.to_pandas(nested)["values"].values.tolist() == [3.3, 3.3, 1.1, 5.5]
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 3, 4, 6], dtype=numpy.int64))
nested2 = awkward1.Array(awkward1.layout.ListOffsetArray64(offsets2, tuples.layout))
assert awkward1.to_pandas(nested2)["1"].values.tolist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]
recrec = awkward1.Array([{"x": {"y": 1}}, {"x": {"y": 2}}, {"x": {"y": 3}}])
assert awkward1.to_pandas(recrec)["x", "y"].values.tolist() == [1, 2, 3]
recrec2 = awkward1.Array([{"x": {"a": 1, "b": 2}, "y": {"c": 3, "d": 4}}, {"x": {"a": 10, "b": 20}, "y": {"c": 30, "d": 40}}])
assert awkward1.to_pandas(recrec2)["y", "c"].values.tolist() == [3, 30]
recrec3 = awkward1.Array([{"x": 1, "y": {"c": 3, "d": 4}}, {"x": 10, "y": {"c": 30, "d": 40}}])
assert awkward1.to_pandas(recrec3)["y", "c"].values.tolist() == [3, 30]
tuptup = awkward1.Array([(1.0, (1.1, 1.2)), (2.0, (2.1, 2.2)), (3.0, (3.1, 3.2))])
assert awkward1.to_pandas(tuptup)["1", "0"].values.tolist() == [1.1, 2.1, 3.1]
recrec4 = awkward1.Array([[{"x": 1, "y": {"c": 3, "d": 4}}], [{"x": 10, "y": {"c": 30, "d": 40}}]])
assert awkward1.to_pandas(recrec4)["y", "c"].values.tolist() == [3, 30]
def test_broken():
ex = awkward1.Array([[1, 2, 3], [], [4, 5]])
p4 = awkward1.zip({"x": ex})
p4c = awkward1.cartesian({"a": p4, "b": p4})
df = awkward1.to_pandas(p4c)
assert df["a", "x"].values.tolist() == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5]
assert df["b", "x"].values.tolist() == [1, 2, 3, 1, 2, 3, 1, 2, 3, 4, 5, 4, 5]
def test_union_to_record():
recordarray1 = awkward1.Array([{"x": 1, "y": 1.1}, {"x": 3, "y": 3.3}]).layout
recordarray2 = awkward1.Array([{"y": 2.2, "z": 999}]).layout
tags = awkward1.layout.Index8(numpy.array([0, 1, 0], dtype=numpy.int8))
index = awkward1.layout.Index64(numpy.array([0, 0, 1], dtype=numpy.int64))
unionarray = awkward1.layout.UnionArray8_64(tags, index, [recordarray1, recordarray2])
assert awkward1.to_list(unionarray) == [{"x": 1, "y": 1.1}, {"y": 2.2, "z": 999}, {"x": 3, "y": 3.3}]
converted = awkward1._util.union_to_record(unionarray, "values")
assert isinstance(converted, awkward1.layout.RecordArray)
assert awkward1.to_list(converted) == [{"x": 1, "y": 1.1, "z": None}, {"x": None, "y": 2.2, "z": 999}, {"x": 3, "y": 3.3, "z": None}]
otherarray = awkward1.Array(["one", "two"]).layout
tags2 = awkward1.layout.Index8(numpy.array([0, 2, 1, 2, 0], dtype=numpy.int8))
index2 = awkward1.layout.Index64(numpy.array([0, 0, 0, 1, 1], dtype=numpy.int64))
unionarray2 = awkward1.layout.UnionArray8_64(tags2, index2, [recordarray1, recordarray2, otherarray])
assert awkward1.to_list(unionarray2) == [{"x": 1, "y": 1.1}, "one", {"y": 2.2, "z": 999}, "two", {"x": 3, "y": 3.3}]
converted2 = awkward1._util.union_to_record(unionarray2, "values")
assert isinstance(converted2, awkward1.layout.RecordArray)
assert awkward1.to_list(converted2) == [{"x": 1, "y": 1.1, "z": None, "values": None}, {"x": None, "y": None, "z": None, "values": "one"}, {"x": None, "y": 2.2, "z": 999, "values": None}, {"x": None, "y": None, "z": None, "values": "two"}, {"x": 3, "y": 3.3, "z": None, "values": None}]
df_unionarray = awkward1.to_pandas(unionarray)
numpy.testing.assert_array_equal(df_unionarray["x"].values, numpy.array([1, numpy.nan, 3]))
numpy.testing.assert_array_equal(df_unionarray["y"].values, numpy.array([1.1, 2.2, 3.3]))
numpy.testing.assert_array_equal(df_unionarray["z"].values, numpy.array([numpy.nan, 999, numpy.nan]))
df_unionarray2 = awkward1.to_pandas(unionarray2)
numpy.testing.assert_array_equal(df_unionarray2["x"].values, [1, numpy.nan, numpy.nan, numpy.nan, 3])
numpy.testing.assert_array_equal(df_unionarray2["y"].values, [1.1, numpy.nan, 2.2, numpy.nan, 3.3])
numpy.testing.assert_array_equal(df_unionarray2["z"].values, [numpy.nan, numpy.nan, 999, numpy.nan, numpy.nan])
numpy.testing.assert_array_equal(df_unionarray2["values"].values, ["nan", "one", "nan", "two", "nan"])
| UTF-8 | Python | false | false | 5,185 | py | 22 | test_0331-pandas-indexedarray.py | 20 | 0.603086 | 0.524012 | 0 | 91 | 55.978022 | 290 |
elvincalex/Assignment1 | 17,961,553,266,997 | da9be0a6f69bcdce02e1a14d2a922e5983b506d1 | 0c2d0d4afeb1adb7546a3bb187d07f3e8840bd74 | /main.py | b7deff4db8852742f89d46668b15c58d92284f95 | [] | no_license | https://github.com/elvincalex/Assignment1 | a53cd901425cb13e5c5f722957fbd7e713891499 | 9e18af2a4e049fd4c1bbb24b5a812036a5948bbc | refs/heads/master | 2023-07-28T17:56:30.436422 | 2021-09-11T10:20:10 | 2021-09-11T10:20:10 | 404,648,507 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import sys
history = {0:''}
choices_dict = {"r": "Rock", "p": "Paper", "s": "Scissors"}
beats = {'r': 'p', 's': 'r', 'p': 's'}
choices = ["r", "p", "s"]
def play_again():
while True:
print("\n\nPlay another game?\nEnter y for Yes or n for No")
print("\n--------------------------------")
play_again_user_response = input("...")
if (play_again_user_response == "y") or (play_again_user_response == "Y"):
num_games_func()
break
if (play_again_user_response == "n") or (play_again_user_response == "N"):
print("Are you sure you want to quit?\nEnter y to confirm")
play_again_user_response_confirm = input("...")
if (play_again_user_response_confirm == "y") or (play_again_user_response_confirm == "Y"):
sys.exit()
continue
print("Error: Invalid input\nPlease enter y or n")
def result(initial_rounds_num, final_scores):
print("\n--------------------------------")
if final_scores[0] > final_scores[1]:
print("Player Wins with score:"+ str(final_scores[0]) + "/" + str(initial_rounds_num)+"\nComputer lost with a score of " + str(final_scores[1]) +"/"+str(initial_rounds_num) )
elif (final_scores[0] < final_scores[1]):
print("Computer Wins with a score of " + str(final_scores[1]) +"/"+str(initial_rounds_num)+"\nPlayer lost with score:"+ str(final_scores[0]) + "/" + str(initial_rounds_num))
else:
print("Player and Computer have drawn at a score of " + str(final_scores[0]) + "/" + str( initial_rounds_num))
while True:
print("Enter the round which you need more information, to exit enter 99")
round_check = input("..")
if (round_check.isdecimal()):
round_check = int(round_check)
if(round_check==99):
play_again()
elif (round_check>0)or(round_check<=initial_rounds_num):
uc = history[round_check]
uc =list(uc)
ucc=uc[0][1]
cc=uc[0][0]
outc=uc[0][2]
print("Player choice:"+ucc+"\nComputer Choice:"+cc+"\n"+outc+" the round\n")
def game_run(rounds, scores):
initial_rounds_num = rounds
decrease_rounds = True
i = 0
while True:
comp_choice = random.choice(choices)
user_input = input("...")
if (user_input not in choices):
print("Enter a valid input")
decrease_rounds = False
else:
decrease_rounds = True
if (comp_choice == user_input):
i = i + 1
history[i] = {(choices_dict[comp_choice],choices_dict[user_input],'Tied')}
elif comp_choice == beats[user_input]:
i = i + 1
scores[1] += 1
history[i] = {(choices_dict[comp_choice], choices_dict[user_input],'Computer wins')}
else:
i = i + 1
scores[0] += 1
history[i] = {(choices_dict[comp_choice], choices_dict[user_input],"Player wins")}
if (rounds == 1 and decrease_rounds):
final_scores = scores
result(initial_rounds_num, final_scores)
if decrease_rounds:
rounds -= 1
def start(rounds, scores):
print( "\n-------------------\nBest of " + str(rounds) + ":\n-------------------\nEnter:\n\nr for Rock\np for Paper\ns for Scissors")
game_run(rounds, scores)
def num_games_func():
print("This is a new game :\n")
scores = [0, 0]
while True:
num_games_input = input("...")
if (num_games_input.isdecimal()):
num_games_input = int(num_games_input)
if (num_games_input == 10):
rounds_remaining = num_games_input
start(num_games_input, scores)
elif (num_games_input<10) or (num_games_input>10):
rounds_remaining = num_games_input
start(num_games_input, scores)
else:
print("Error: Invalid input\nEnter a valid input")
else:
print("Error: Invalid input\nEnter a valid input")
print("\nEnter the number of rounds for Rock, Paper, Scissors\n")
num_games_func()
| UTF-8 | Python | false | false | 4,254 | py | 2 | main.py | 1 | 0.5315 | 0.522332 | 0 | 95 | 43.778947 | 183 |
harimohanraj/neural-network-reference | 1,958,505,102,947 | 8a26dc4245dee6bf8cbdc3595108794935d88a56 | 101a060b2521ec451984128f2056666077cc010f | /networks.py | da13ed424bb2853b3f988c5e5f713e5f2cfeaedd | [] | no_license | https://github.com/harimohanraj/neural-network-reference | 9c8c75c9f38ed56c5f21b4e3d1c36d28a19a4a9b | c946c825fb703675ad4af982dcd85c911d1d4421 | refs/heads/master | 2021-01-09T21:42:45.879330 | 2016-03-09T20:57:27 | 2016-03-09T20:57:27 | 49,536,393 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 17:37:32 2016
@author: Hari
"""
import numpy as np
from layers import *
from optimizations import *
from costs import *
class Network():
def __init__(self, name, optimizer, cost):
self.name = name
self.layers = []
self.optimizer = optimizer
self.cost = cost
self.weights = None
self.biases = None
# self.dropout = True
# self.regularization_type = "L2"
def add_layer(self, layer):
self.layers.append(layer)
def generate_weights_and_biases(self, training_x):
input_size = training_x[0].shape[0]
layer_sizes = [input_size] + [layer.size for layer in self.layers]
weights = [np.random.randn(y, x) / np.sqrt(x) \
for x, y in zip(layer_sizes, layer_sizes[1:])]
biases = [np.random.randn(1) for y in layer_sizes[1:]]
return weights, biases
def backpropagation(self, x, y):
# 0. Initialize empty arrays to hold gradients
gradients_at_w = [np.zeros(layer.size) for layer in self.layers]
gradients_at_b = [np.zeros(1) for layer in self.layers]
# 1. Initialize first activation as training example
activations = []
weighted_inputs = []
activation = x
# 2. Feedforward
for layer in self.layers:
weighted_input = np.dot(layer.weights, activation)
weighted_inputs.append(weighted_input)
activation = layer.function(weighted_input)
activations.append(activation)
# 3. Compute output "error" (node delta) and gradients at output
sigma_prime_of_wi = self.layers[-1].derivative(weighted_inputs[-1])
error_delta = self.cost.derivative(y, activations[-1]) * sigma_prime_of_wi
gradients_at_w[-1] = np.dot(error_delta, activations[-2].T)
gradients_at_b[-1] = error_delta
# 4. Backpropagate error (node deltas)
for i in range(2,len(self.layers)):
sigma_prime_of_wi = self.layers[-i].derivative(weighted_inputs[-i])
error_delta = np.dot(self.layers[-i+1].weights.T, error_delta) * sigma_prime_of_wi
gradients_at_w[-i] = np.dot(error_delta, activations[-i-1].T)
gradients_at_b[-i]= error_delta
# 5. Output gradient
return gradients_at_w, gradients_at_b
def train(self, training_x, training_y, learning_rate, \
batch_size=10, iterations=5000):
# initialize weights
self.weights, self.biases = self.generate_weights_and_biases(training_x)
# stochastic gradient descent
for i in range(0,iterations, batch_size):
# shuffle data and create batch
data = np.concatenate((training_x, training_y), axis=1)
np.random.shuffle(data)
batch = np.data[i::batch_size]
grad_b = [np.zeros(b.shape) for b in self.biases]
grad_w = [np.zeros(w.shape) for w in self.weights]
for i in batch:
x = data[i, :len(training_x)]
y = data[i, len(training_y):]
delta_w, delta_b = self.backpropagation(x, y)
grad_w = [nw+dnw for nw, dnw in zip(grad_w, delta_w)]
grad_b = [nb+dnb for nb, dnb in zip(grad_b, delta_b)]
# update weights and biases
self.weights = [w-learning_rate*grad for w,grad in zip(self.weights, grad_w)]
self.biases = [b-learning_rate*grad for b,grad in zip(self.biases, grad_b)]
# calculate training cost, etc for epoch
def __str__(self):
opt_method = "Optimizer: " + self.optimizer + "\n"
cost_func = "Cost Function: " + self.cost.__class__.__name__ + "\n"
architecture = "Input Layer => " + " => ".join([layer.name for layer in self.layers]) + "\n\n"
layer_list = "\n\n".join([str(layer) for layer in self.layers]) + "\n\n"
return "~" + self.name + "~" + "\n" + opt_method + cost_func + "\n" + \
architecture + layer_list
# tests
x = np.array([[1,0],[1,1],[0,1],[0,0]])
y = np.array([[1,0],[1,1],[0,1],[1,1]])
network = Network(name="Test Network", \
optimizer="SGD", \
cost=MeanSquaredError())
hidden_layer1 = sigmoidLayer(3, "Hidden Layer 1")
output_layer = sigmoidLayer(2, "Output Layer")
network.add_layer(hidden_layer1)
network.add_layer(output_layer)
network.train(x, y)
| UTF-8 | Python | false | false | 4,662 | py | 5 | networks.py | 4 | 0.556628 | 0.5429 | 0 | 121 | 37.520661 | 102 |
kp258/gitrepo | 17,179,886,986 | 51b1896030fe9ede6b386aaf632c736ae1c459e6 | 9432e9c485c2055b0bd0559e3ef8ffee2e3af9d0 | /package/alerts.py | 15e4d0dffee265468914db1199a0271c19af4344 | [] | no_license | https://github.com/kp258/gitrepo | 4b9abd62bcf1f1af22e6bda0c29127b1e2a7d1d8 | caa4cbf52489ff957ebbb4477aa23882cda00b62 | refs/heads/master | 2016-09-19T08:12:16.137000 | 2016-09-08T12:28:03 | 2016-09-08T12:28:03 | 67,767,036 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def get_alert_level(obj_package):
"""
determines the alert level basis the current location vs the destination
of the given package
if the package is at its destination, the level is L1, else, L2
"""
if (obj_package.get('cn', '') in obj_package.cs.get('sl', '') and
(obj_package.get('cn', None))):
level = u'L1'
else:
level = u'L2'
return level
| UTF-8 | Python | false | false | 411 | py | 1,103 | alerts.py | 744 | 0.588808 | 0.579075 | 0 | 13 | 30.615385 | 76 |
aarjavjain1/Team-Website | 5,592,047,459,049 | 6f957e9579f63f9fab43897b00a60ab74248062c | 4754e61096d32950a06be9c77f44bb15fca43d68 | /WebApp/urls.py | 26217e172dbb7adddaaa086501bcf3a72b28df48 | [
"Apache-2.0"
] | permissive | https://github.com/aarjavjain1/Team-Website | c94e173de68a43cd8986534d8bf9b351f762dd34 | e74e10a95d1ffad89eaebb8cc530f3df1280e691 | refs/heads/master | 2020-06-10T03:17:58.222249 | 2019-06-24T19:36:16 | 2019-06-24T19:36:16 | 193,565,760 | 0 | 0 | Apache-2.0 | true | 2019-06-24T19:23:44 | 2019-06-24T19:23:43 | 2019-06-23T22:16:44 | 2019-06-23T22:16:42 | 20,126 | 0 | 0 | 0 | null | false | false | from django.urls import path
from . import views
app_name = 'webapp'
urlpatterns = [
path('', views.index, name='index'),
path('timeline/', views.timeline, name='timeline'),
]
| UTF-8 | Python | false | false | 186 | py | 9 | urls.py | 3 | 0.66129 | 0.66129 | 0 | 9 | 19.666667 | 55 |
dmarcos1982/Felicidades_Miguel_Telegram | 2,353,642,104,277 | 4e007161004928b5194676160cef5690ad02dce3 | 8828c6ea2cbc7ccbb3210a38ea13855f016b1f49 | /game_NoToken.py | 85592b40aceb5dbb5576cf87ad4f45accf4eb92a | [] | no_license | https://github.com/dmarcos1982/Felicidades_Miguel_Telegram | 4d15855b39ed57e82cc87d6dc128bc3f0a41e5a7 | f44321737032640287f30cb465e309954c494668 | refs/heads/master | 2020-03-19T16:32:19.156314 | 2018-06-10T08:46:37 | 2018-06-10T08:46:37 | 136,718,772 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# Importamos el mapa
import World.map as map
# Librería de la API del bot
import telebot
# Tipos para la API del bot
from telebot import types
# Librería para hacer que el programa que controla el bot no se acabe.
import time
# Libreria para los temporizadores
from threading import Timer
# Libreria para los numeros aleatorios
from random import randint
# Librerias para correr el bot en threads
import threading
from time import sleep
# Token y otros parametros del bot
TOKEN = ''
BOT_INTERVAL = 3
BOT_TIMEOUT = 30
# Creamos la clase del jugador
class player():
def __init__(self):
self.location = 'a0'
self.gameOver = False
self.inventory = []
self.examenStarted = False
self.bjPlayedRounds = 0
self.s3PlayedRounds = 0
self.trasteroQuestionsAnswered = 0
self.testarrosaSung = 0
self.diabolicHen = 0
self.falloAlfonso8 = 0
# Instanciamos el jugador
myPlayer = player()
# Funcion para inicializar el juego
def game_initialize():
myPlayer.location = 'a0'
myPlayer.gameOver = False
myPlayer.inventory = []
myPlayer.examenStarted = False
myPlayer.bjPlayedRounds = 0
myPlayer.s3PlayedRounds = 0
myPlayer.trasteroQuestionsAnswered = 0
myPlayer.testarrosaSung = 0
myPlayer.diabolicHen = 0
myPlayer.falloAlfonso8 = 0
# Creamos una lista de articulos para manejar los objetos
articleList = ['el', 'la', 'los', 'las', 'un', 'una']
# Creamos el objeto de nuestro bot.
bot = telebot.TeleBot(TOKEN)
def bot_polling():
#global bot #Keep the bot object as global variable if needed
print("Starting bot polling now")
while True:
try:
print("New @jblasbot instance started")
bot = telebot.TeleBot(TOKEN) #Generate new bot instance
bot.set_update_listener(listener)
bot.polling(none_stop=True, interval=BOT_INTERVAL, timeout=BOT_TIMEOUT)
except Exception as ex: #Error in polling
print("Bot polling failed, restarting in {}sec. Error:\n{}".format(BOT_TIMEOUT, ex))
bot.stop_polling()
sleep(BOT_TIMEOUT)
else: #Clean exit
bot.stop_polling()
print("Bot polling loop finished")
break #End loop
# Texto de introduccion
display_intro = """Has tenido la inmensa suerte de encontrar a tu media naranja y, además, esta chica que te comprende, que llena tus noches y tus sueños, ha accedido a casarse contigo.\n
Organizar la boda no ha sido tan fácil como te imaginabas en un principio, pero todas las dificultades ya han sido vencidas y el gran día ha llegado.\n
Esta mañana Estefanía te ha dado el 'Sí, quiero' en su resplandeciente vestido blanco, los ojos le brillaban como nunca y tú… Tú estás en una nube de la que no te quieres bajar. Estás en el salón de tu boda, ha empezado la barra libre hace un ratito y todos tus invitados bailan o, al menos, intentan mover la cabeza y los dedos de los pies al ritmo de la música mientras consumen con fruición gin-tonics, whysky-colas y vodka-naranjas.\n
Tu deslumbrante esposa está dándolo todo en la pista con sus amigas mientras las inconfundibles voces de 'Siempre Así' a todo volumen hacen casi imposible el mantener una conversación. Así, no has entendido una sola palabra de las que te ha dicho un joven, más o menos de la edad de Estefanía, al tiempo que te encasquetaba un voluminoso paquete envuelto en papel de regalo. No tienes ni idea de quién era el chico, pero bueno, te pasa con muchos de los invitados, así que das por supuesto que se trata de algún primo lejano de tu mujer.\n
Como la canción de 'Siempre Así' es larguísima y tú no tienes nada mejor que hacer en ese momento, empiezas a romper el envoltorio del regalo que acabas de recibir, mientras recuerdas la sospechosa mirada de la persona que te lo ha dado… No sabes decir el qué pero hay algo raro en ese chico… Tienes ante ti una caja de cartón normal y corriente. Levantas la tapa y…\n\n"""
# Funcion para mostrar la ayuda
def display_help(m):
bot.send_message(m.chat.id, '\n- Siempre que quieras indicar algo con un verbo, usa el *verbo en infinitivo*.\n- Las opciones para moverte son los 4 puntos cardinales.\n- Hay preguntas que se pueden responder con *sí* o *no*.', parse_mode='Markdown')
# Funcion para mostrar nombre de la habitacion y descripcion
def introduce_room(m):
bot.send_message(m.chat.id, '\n' + map.zonemap[myPlayer.location]['NAME'], parse_mode='Markdown')
time.sleep(1)
bot.send_message(m.chat.id, '\n' + map.zonemap[myPlayer.location]['DESCRIPTION'], parse_mode='Markdown')
# Teleco
# PROBABLEMENTE MODIFIQUE EL QUE SE PUEDA RESPONDER DIRECTAMENTE CON EL VALOR, SIN TENER QUE PONER RESPONDER, CONTESTAR O ESCRIBIR ANTES
def room_examen(m):
# Si expira el timer, vamos al Alfonso VIII
def timeout():
if myPlayer.examenStarted is True:
tExamen.cancel()
myPlayer.examenStarted = False
bot.send_message(m.chat.id, "El tiempo del examen ha expirado. Juan Blas va recogiendo los exámenes por los pupitres. Cuando llega a tu puesto, tú te aferras al folio porque no lo has rellenado y realmente quieres hacer ese examen y terminar la carrera de una puñetera vez. El breve tira y afloja es vencido por Juan Blas, que tira con decisión de la hoja de papel y te arranca el exámen de las manos. ¿Y qué vas a hacer ahora? Como no tienes respuesta a esa pregunta, decides ir a un lugar en el que te sientes seguro...")
map.zonemap[myPlayer.location]['VISITED'] = True
myPlayer.location = 'z0'
time.sleep(2)
introduce_room(m)
# Inicializamos el timer del examen
tExamen = Timer(300.0, timeout)
# Solo iniciamos el timer cuando se inicia el juego
if myPlayer.examenStarted is False:
myPlayer.examenStarted = True
tExamen.start()
# Abrimos la imagen que contiene el problema del examen
cuadripolo = open('Recursos/cuadripolo.png', 'rb')
# Comandos que entendemos en esta habitacion
acceptableExamenActions = ['coger', 'responder', 'contestar', 'escribir', 'decir']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
# Si la primera palabra no esta en la lista de los comandos que entendemos, no hacemos nada
if mSplit[0] not in acceptableExamenActions:
bot.send_message(m.chat.id, "No entiendo eso que dices")
# Si quiere coger algo
elif mSplit[0] == "coger":
# Si quiere coger el boli
if "boli" in mSplit:
# Si aun no ha cogido el boli
if 'boli' not in myPlayer.inventory:
# Añadimos el boli al inventario
myPlayer.inventory.append("boli")
# Le mostramos la pregunta del examen
bot.send_message(m.chat.id, "Con mano temblorosa coges el bolígrafo y lees el enunciado de la única pregunta que hay en el folio: Una instalación de telefonía está compuesta por un cuadripolo transmisor, un generador y un receptor. Determinar la potencia máxima que puede recibir el receptor.")
bot.send_photo(m.chat.id, cuadripolo)
else:
bot.send_message(m.chat.id, "Ya tienes el boli")
# Si lo que quiere coger no existe, no hacemos nada
else:
bot.send_message(m.chat.id, "No veo eso que dices")
# Si quiere responder el examen
elif ((mSplit[0] == "responder") or (mSplit[0] == "contestar") or (mSplit[0] == "escribir")):
# Si no ha cogido el boli previamente, no puede hacerlo
if 'boli' not in myPlayer.inventory:
bot.send_message(m.chat.id, "No se como vas a hacer eso sin el boli")
else:
# Si responde correctamente, paramos el temporizador y pasamos al Tenere
if ((mSplit[1] == '49uw') or ((mSplit[1] == '49') and (mSplit[2] == 'uw'))):
tExamen.cancel()
myPlayer.examenStarted = False
bot.send_message(m.chat.id, "¡Enhorabuena, Miguel! Has terminado la carrera y sales a celebrarlo con tus amigos (bueno, sabes que tienes que intentar volver a tu boda de alguna manera, pero ¿a quién no le apetece reverdecer viejos laureles de vez en cuando?).")
map.zonemap[myPlayer.location]['VISITED'] = True
myPlayer.location = 'b3'
time.sleep(2)
introduce_room(m)
# Si responde incorrectamente, muere miserablemente
else:
bot.send_message(m.chat.id, 'Tu respuesta es tan absurda que cuando Juan Blas corrige el examen monta en cólera. En tantos años de exposición a ondas electromagnéticas, ha desarrollado superpoderes como los de Hulk y el mal humor le hace multiplicar su tamaño por 10000 y su fuerza por 1E-06. El edificio de teleco revienta con su crecimiento y toda Castilla y León desaparece con el primer paso que da. Con el segundo paso hace desestabilizar el eje de La Tierra, que interrumpe su rotación y se sale de su órbita. Aún mucho antes de que el planeta azul llegue a chocar contra el rojo, la vida en La Tierra ya se ha hecho imposible debido a los desórdenes climatológicos. Todos los seres vivos, incluidos Estefanía y tú, *desaparecen miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
# Si quiere hablar con Juan Blas
elif mSplit[0] == "decir":
# Si decide no presentar, paramos el temporizador y vamos al Alfonso VIII
if ((mSplit[1] == 'no') and (mSplit[2] == 'presentar')):
tExamen.cancel()
myPlayer.examenStarted = False
bot.send_message(m.chat.id, "Entregas el exámen con una mezcla entre alivio y tristeza por no haber sido capaz de completarlo. Con la mente hecha un lío decides vagar sin rumbo fijo...")
map.zonemap[myPlayer.location]['VISITED'] = True
myPlayer.location = 'z0'
time.sleep(2)
introduce_room(m)
# Cualquier otra cosa que diga, Juan Blas le dice que solo hay una cosa que entiende
else:
bot.send_message(m.chat.id, "Juan Blas te mira con mala cara y te dice: _Si quieres irte y 'no presentar' no tienes mas que decirlo_", parse_mode='Markdown')
# Alfonso VIII
def room_alfonso8(m):
# Comandos que entendemos en esta habitacion
acceptableAlfonso8Actions = ['poner']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
# Marcamos la habitacion como visitada
map.zonemap[myPlayer.location]['VISITED'] = True
# Si la primera palabra no esta en la lista de los comandos que entendemos, no hacemos nada
if mSplit[0] not in acceptableAlfonso8Actions:
if 0 <= myPlayer.falloAlfonso8 <= 2:
bot.send_message(m.chat.id, "No entiendo eso que dices.")
sleep(2)
bot.send_message(m.chat.id, "Vamos, que ya venden turrones en los supermercados.")
myPlayer.falloAlfonso8 += 1
else:
bot.send_message(m.chat.id, 'El director te arrea el collejazo del milenio, el cual te deja sin sentido. Nunca lo vuelves a recuperar y *mueres miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
else:
if ((('decoracion' in mSplit) or (u'decoración' in mSplit)) and (('navidad' in mSplit) or (u'navideña' in mSplit))):
bot.send_message(m.chat.id, 'El suelo de la Alfonso VIII siempre ha tenido fama por su perpetuo lustre. La escalera a la que te has subido para poner guirnaldas en el techo resbala, se abre como una cáscara de plátano y caes al suelo. El golpe te *mata miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
else:
bot.send_message(m.chat.id, 'El director te arrea el collejazo del milenio, el cual te deja sin sentido. Nunca lo vuelves a recuperar y *mueres miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
# Tenere
def room_tenere(m):
# Comandos que entendemos en esta habitacion
acceptableTenereActions = ['si', u'sí', 'no', 'plantarme', 'plantarse', 'pedir']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
# Bucle del blackjack
def blackjack_loop():
rNumber = randint(0, 9)
if 0 <= rNumber <= 4:
bot.send_message(m.chat.id, 'El crupier reparte de nuevo. Tienes una buena jugada en la mesa, así que pides otra carta. Pero desde luego hoy no es tu noche y te pasas de nuevo. Pero este estúpido juego no va a poder contigo, ¿o sí? *¿Deseas jugar otra partida?*', parse_mode='Markdown')
elif 5 <= rNumber <= 9:
bot.send_message(m.chat.id, 'Esta vez decides ser más conservador y te quedas cerca del BlackJack. Cuando el crupier levanta su carta observas con incredulidad cómo la suma de sus cartas es 21. Encima pone una sonrisilla de suficiencia que le borrarías de la cara con un guantazo. El crupier recoge la mesa y te dice: *¿Deseas jugar otra partida? Esto al final es cuestión de estadística...*', parse_mode='Markdown')
# Marcamos la habitacion como visitada
map.zonemap[myPlayer.location]['VISITED'] = True
# Si la primera palabra no esta en la lista de los comandos que entendemos, no hacemos nada
if mSplit[0] not in acceptableTenereActions:
bot.send_message(m.chat.id, "No entiendo eso que dices")
# La primera vez que responde solo puede hacerlo con pedir carta, plantarme o plantarse
elif myPlayer.bjPlayedRounds == 0:
# Si se planta vamos al Alfonso VIII
if ((mSplit[0] == 'plantarme') or (mSplit[0] == 'plantarse')):
bot.send_message(m.chat.id, "No estás para jueguecitos, así que te plantas y que sea lo que dios quiera. Evidentemente pierdes, pero casi mejor, ¿no? Asqueado de cómo se está desarrollando el día en esta realidad paralela decides ir a un lugar seguro y reconfortante.")
map.zonemap[myPlayer.location]['VISITED'] = True
myPlayer.location = 'z0'
time.sleep(2)
introduce_room(m)
elif ((mSplit[0] == 'si') or (mSplit[0] == u'sí')):
bot.send_message(m.chat.id, '¿Sí qué?')
elif mSplit[0] == 'no':
bot.send_message(m.chat.id, '¿No qué?')
elif mSplit[0] == 'pedir':
if "carta" in mSplit:
bot.send_message (m.chat.id, 'Parece que la suerte del exámen no te ha acompañado ahora, sacas una figura y te pasas. El crupier te desea mejor suerte la próxima vez y antes de volver a repartir te pregunta: *¿Deseas jugar otra partida?*', parse_mode='Markdown')
myPlayer.bjPlayedRounds += 1
else:
bot.send_message(m.chat.id, "No veo eso que dices")
else:
bot.send_message(m.chat.id, "No entiendo eso que dices")
elif 1 <= myPlayer.bjPlayedRounds <= 3:
# Si decide jugar, iniciamos el bucle del blackjack
if ((mSplit[0] == 'si') or (mSplit[0] == u'sí')):
blackjack_loop()
myPlayer.bjPlayedRounds += 1
# Si decide no jugar vamos al Alfonso VIII
elif mSplit[0] == 'no':
bot.send_message(m.chat.id, "Parece que el croupier está riéndose de tí, o haciendo trampas (o ambas cosas), así que decides que ya es hora de irte a descansar...")
map.zonemap[myPlayer.location]['VISITED'] = True
myPlayer.location = 'z0'
time.sleep(2)
introduce_room(m)
else:
bot.send_message(m.chat.id, "No entiendo eso que dices")
# Una vez llega al numero de partidas indicado puede elegir el premio
elif myPlayer.bjPlayedRounds == 4:
bot.send_message(m.chat.id, 'El crupier reparte las cartas y tienes un 4 y un 6 sobre la mesa. No te queda otra que pedir carta... así que la pides ¡y sale un as! Por fin la suerte (esa perra caprichosa) ha decidido cambiar de bando. El crupier, ya cansado y con ganas de irse a su casa te da la enhorabuena y te pregunta: *¿Qué quieres pedir de premio?*', parse_mode='Markdown')
myPlayer.bjPlayedRounds += 1
elif myPlayer.bjPlayedRounds == 5:
if mSplit[0] == 'pedir':
if ((' '.join(mSplit[1::]) == 'dos botellas de bourbon') or (' '.join(mSplit[1::]) == '2 botellas de bourbon')):
bot.send_message(m.chat.id, 'El camarero te da tus dos botellas de Bourbon. Con ellas bajo el brazo decides que es hora de cambiar de garito, así que sales a la plaza a decidir cual será tu próximo destino.')
# Añadimos las dos botellas de Bourbon al inventario
myPlayer.inventory.append("dos botellas de bourbon")
# Marcamos la habitacion como resuelta
map.zonemap[myPlayer.location]['SOLVED'] = True
# Movemos al jugador a la plaza
myPlayer.location = 'b0'
time.sleep(2)
introduce_room(m)
else:
bot.send_message(m.chat.id, 'De eso no tenemos, pide otra cosa')
else:
bot.send_message(m.chat.id, "No entiendo eso que dices")
# La Ducha
def room_ducha(m):
# Comandos que entendemos en esta habitacion
acceptableDuchaActions = ['tirar']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
# Marcamos la habitacion como visitada
map.zonemap[myPlayer.location]['VISITED'] = True
# Si la primera palabra no esta en la lista de los comandos que entendemos, no hacemos nada
if mSplit[0] not in acceptableDuchaActions:
bot.send_message(m.chat.id, "No entiendo eso que dices")
else:
if 'dados' in mSplit:
if 0 <= myPlayer.s3PlayedRounds <= 4:
dice1Number = randint(1, 6)
dice2Number = randint(1, 6)
# Si la suma de los dados es multiplo de 3, bebe
if ((dice1Number + dice2Number)%3) == 0:
bot.send_message(m.chat.id, '¡Has sacado un ' + str(dice1Number+dice2Number) + '! Procedes a beberte ese sol y sombra que te toca.')
myPlayer.s3PlayedRounds += 1
# Si no, mostramos la suma
else:
bot.send_message(m.chat.id, 'Sacas un ' + str(dice1Number+dice2Number))
# Cuando llega a 5 partidas, pasa al baño de la ducha
if myPlayer.s3PlayedRounds == 5:
bot.send_message(m.chat.id, 'Ha sido divertido pero basta ya de jueguecitos por hoy. La verdad es que ya llevas un buen rato bebiendo y el señor Roca te llama a gritos, por lo que entras en el baño.')
# Movemos al jugador al baño
myPlayer.location = 'b4a'
time.sleep(2)
introduce_room(m)
# No puede tirar otra cosa que no sean los dados
else:
bot.send_message(m.chat.id, "No entiendo eso que dices")
# El baño de la Ducha
def room_bano_ducha(m):
# Comandos que entendemos en esta habitacion
acceptableBanoDuchaActions = ['subir', 'subirte', 'abrir', 'tirar']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
# Si la primera palabra no esta en la lista de los comandos que entendemos, no hacemos nada
if mSplit[0] not in acceptableBanoDuchaActions:
bot.send_message(m.chat.id, "No entiendo eso que dices")
# Si decide subirse al retrete, muere miserablemente
elif ((mSplit[0] == 'subir') or (mSplit[0] == 'subirte')):
if 'retrete' in mSplit:
bot.send_message(m.chat.id, 'La endeble tapa se hunde y te quedas atascado. Gritas pidiendo auxilio pero nadie puede entrar a rescatarte porque la puerta del baño está atascada. Pasa el tiempo, dejas de sentir las piernas. En cuestión de horas la gangrena te corroe y *mueres miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
else:
bot.send_message(m.chat.id, 'No veo eso que dices.')
# Si decide tirar de la cadena, muere miserablemente
elif mSplit[0] == 'tirar':
if 'cadena' in mSplit:
bot.send_message(m.chat.id, 'Como estás un poco borracho, te haces un lío con la cadena y acabas ahorcándote. *Mueres miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
else:
bot.send_message(m.chat.id, 'No veo eso que dices.')
# Si decide abrir el grifo, resuelve el puzzle y vuelve a la Plaza
elif mSplit[0] == 'abrir':
if 'grifo' in mSplit:
bot.send_message(m.chat.id, '¡Qué astuto, Miguel! Esta solución digna de dibujos animados es la que te salva la vida. El baño se va inundando poco a poco. Te mantienes a flote y, cuando el nivel del agua es lo suficientemente elevado, consigues salir por la ventana. ¡Enhorabuena! Pero ahora estás completamente calado… ni que acabaras de salir de La Ducha (LoL). Así no puedes volver a tu boda, de ninguna manera, qué dirá tu suegra. Lo mejor es que sigas de bares a ver si algún camarero amigo te puede dejar algo con lo que secarte, aunque sea el trapo de secar los vasos, que no ha visto una lavadora desde 1999.')
myPlayer.location = 'b0'
time.sleep(2)
introduce_room(m)
else:
bot.send_message(m.chat.id, 'No veo eso que dices.')
# El Trastero
def room_trastero(m):
# Comandos que entendemos en esta habitacion
acceptableTrasteroActions = ['pedir', 'decir', 'hablar', 'si', u'sí', 'no', '24', 'veinticuatro', '10', 'diez', '4', 'cuatro']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
# Marcamos la habitacion como visitada
map.zonemap[myPlayer.location]['VISITED'] = True
# Si la primera palabra no esta en la lista de los comandos que entendemos, no hacemos nada
if mSplit[0] not in acceptableTrasteroActions:
bot.send_message(m.chat.id, "No entiendo eso que dices")
# Si todavia no le ha dicho si se atreve
elif myPlayer.trasteroQuestionsAnswered == 0:
if ((mSplit[0] == 'si') or (mSplit[0] == u'sí')):
bot.send_message(m.chat.id, '¿Sí qué?')
elif mSplit[0] == 'no':
bot.send_message(m.chat.id, '¿No qué?')
elif ((mSplit[0] == 'pedir') and ('llaves' not in mSplit)):
bot.send_message(m.chat.id, 'No veo eso que dices.')
else:
bot.send_message(m.chat.id, 'Sí, efectivamente tus amigos me han dejado tus llaves, pero me han pagado muy bien para que no te las dé a no ser que aciertes la respuesta a 3 preguntas… un poco estúpidas la verdad… pero qué le voy a hacer, no seré yo el que discuta el color del dinero… ¿Te atreves?')
myPlayer.trasteroQuestionsAnswered += 1
# Cuando le ha planteado el juego. Primera pregunta
elif myPlayer.trasteroQuestionsAnswered == 1:
if ((mSplit[0] == 'si') or (mSplit[0] == u'sí')):
bot.send_message(m.chat.id, '_Primera pregunta:_ *¿Cuál es el record mundial, en días, de tuppers olvidados en la nevera?*', parse_mode='Markdown')
myPlayer.trasteroQuestionsAnswered += 1
# Si dice que no, vuelve a la Plaza - REVISAR
elif mSplit[0] == 'no':
bot.send_message(m.chat.id, 'Con toda la cogorza te vuelves a la plaza.')
# Si dice cualquier otra cosa, vuelve a la Plaza - REVISAR
else:
bot.send_message(m.chat.id, 'A Jose se le acaba la paciencia y decide que no va a darte las llaves, así que te vuelves a la plaza.')
# Segunda pregunta
elif myPlayer.trasteroQuestionsAnswered == 2:
if ((mSplit[0] == '24') or (mSplit[0] == 'veinticuatro')):
bot.send_message(m.chat.id, '_Segunda pregunta:_ *¿Cuál es el récord mundial, en días, de ropa tendida y olvidada en el tendedero?*', parse_mode='Markdown')
myPlayer.trasteroQuestionsAnswered += 1
else:
bot.send_message(m.chat.id, 'Jose te obliga a beber un chupito de Jack Daniels por haber respondido mal, pero tu cuerpo no soporta más cantidad de alcohol en sangre y *mueres miserablemente* de un coma etílico.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
# Tercera pregunta
elif myPlayer.trasteroQuestionsAnswered == 3:
if ((mSplit[0] == '10') or (mSplit[0] == 'diez')):
bot.send_message(m.chat.id, '_Tercera pregunta:_ *¿Cuál es el record mundial, en días, de ropa olvidada en el tambor de la lavadora?*', parse_mode='Markdown')
myPlayer.trasteroQuestionsAnswered += 1
else:
bot.send_message(m.chat.id, 'Jose te obliga a beber un chupito de Jack Daniels por haber respondido mal, pero tu cuerpo no soporta más cantidad de alcohol en sangre y *mueres miserablemente* de un coma etílico.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
elif myPlayer.trasteroQuestionsAnswered == 4:
if ((mSplit[0] == '4') or (mSplit[0] == 'cuatro')):
bot.send_message(m.chat.id, '_Toma las llaves chico, te lo has ganado. Yo no daba un duro por tí y has conseguido acertar las 3 preguntas._\nUn largo escalofrío recorre tu espalda mientras Jose saca del centro de sus pantalones las llaves de tu piso. El sudor de su entrepierna adherido al metal las hace relucir como nunca antes. Tragas saliva, extiendes la mano, las coges y con un cuidado extremo las metes en tu bolsillo mientras te convences de que no volverás nunca a este antro y vuelves a la plaza.', parse_mode='Markdown')
map.zonemap[myPlayer.location]['SOLVED'] = True
myPlayer.location = 'b0'
time.sleep(2)
introduce_room(m)
else:
bot.send_message(m.chat.id, 'Jose te obliga a beber un chupito de Jack Daniels por haber respondido mal, pero tu cuerpo no soporta más cantidad de alcohol en sangre y *mueres miserablemente* de un coma etílico.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
# Testarrosa
def room_testarrosa(m):
# Comandos que entendemos en esta habitacion
acceptableTestarrosaActions = [u'pacharán', 'pacharan', u'patxarán', 'patxaran']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
def horrible_singing_die():
bot.send_message(m.chat.id, 'Menos mal que no te ganas la vida como vocalista, lo haces fatal. La clientela del bar, enfurecida por tu actuación, te despelleja vivo allí mismo, *muriendo miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
# Marcamos la habitacion como visitada
map.zonemap[myPlayer.location]['VISITED'] = True
# Si la primera palabra no esta en la lista de los comandos que entendemos, no hacemos nada
if myPlayer.testarrosaSung == 0:
if ((mSplit[0] == u'pacharán') or (mSplit[0] == 'pacharan') or (mSplit[0] == u'patxarán') or (mSplit[0] == 'patxaran')):
bot.send_message(m.chat.id, "Con tu tubo de pacharan en la mano, escuchas cómo el DJ pincha una canción. Todos los asistentes enardecen con ella y empiezan a sacudir violentamente sus cabezas. Cuando empieza la letra, todo el mundo corea:\n\n_Ohhhh!! De nuevo solos tú y yo. Un lago y una canción,\necho de menos oír tu voz_\n\nLa música se interrumpe bruscamente, todo el mundo calla. Todas las miradas se centran en un punto. ¡Tú! *¡Están esperando a que cantes!*", parse_mode='Markdown')
myPlayer.testarrosaSung += 1
else:
bot.send_message(m.chat.id, "De eso no tenemos, pide otra cosa.")
elif myPlayer.testarrosaSung == 1:
if ((m.text.lower() == u'una estrella te eclipsó') or (m.text.lower() == 'una estrella te eclipso')):
bot.send_message(m.chat.id, "_Los momentos que no volverá a sentir tu piel,\nella no deja de pensar que un día te encontrará..._\n\nDe nuevo todo el mundo calla y te mira, *es tu turno de nuevo para cantar.*", parse_mode='Markdown')
myPlayer.testarrosaSung += 1
else:
# Muere miserablemente
horrible_singing_die()
elif myPlayer.testarrosaSung == 2:
if ((m.text.lower() == u'acércate') or (m.text.lower() == 'acercate')):
bot.send_message(m.chat.id, "_A veces siento al despertar como un susurro, tu calor,\nella no deja de pensar que un día te encontrará..._\n\n¡Ella! ¡Estefanía! Te estará buscando, se preguntará dónde estás… Basta de karaokes y de tragos, es hora de volver a tu boda. ¿Dónde se encontrará la salida de este bucle temporal? No tienes ni idea. De momento, intentas salir del bar, pero una montonera de objetos bloquea la salida: un *oso panda*, un *palé*, un *enano*, una *oveja*, una *gallina* y un *señor disfrazado de hitita*.", parse_mode='Markdown')
myPlayer.testarrosaSung += 1
else:
# Muere miserablemente
horrible_singing_die()
elif myPlayer.testarrosaSung == 3:
if (('oso' in mSplit) and ('panda' in mSplit)):
bot.send_message(m.chat.id, '¡Pero cómo se te ocurre meterte con un oso panda, por mucho que parezca un peluche! El primer zarpazo te secciona la vena subclavia y el segundo, la yugular. *Mueres miserablemente*.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
elif 'gallina' in mSplit:
bot.send_message(m.chat.id, 'Te has topado con un animal especialmente feroz. Esta gallina está cosiéndote a picotazos y más vale que encuentres un remedio rápido o no sabes si podrás sobrevivir. *¿Qué haces?*', parse_mode='Markdown')
myPlayer.testarrosaSung = 10
# Bucle de la gallina
elif myPlayer.testarrosaSung == 10:
if 0 <= myPlayer.diabolicHen <= 9:
if mSplit[0] == 'darle':
if 'bourbon' in mSplit:
if 'dos botellas de bourbon' in myPlayer.inventory:
bot.send_message(m.chat.id, 'Has emborrachado a la gallina y por fin te ha dejado de molestar, pero el resto de objetos aún bloquean la salida: un *oso panda*, un *palé*, un *enano*, una *oveja* y un *señor disfrazado de hitita*.', parse_mode='Markdown')
myPlayer.inventory.remove ('dos botellas de bourbon')
myPlayer.testarrosaSung = 4
else:
bot.send_message(m.chat.id, 'No encuentro eso que dices.')
sleep (1)
bot.send_message(m.chat.id, 'La gallina continúa picoteándote.')
myPlayer.diabolicHen += 1
else:
bot.send_message(m.chat.id, 'No encuentro eso que dices.')
sleep(1)
bot.send_message(m.chat.id, 'La gallina continúa picoteándote.')
myPlayer.diabolicHen += 1
else:
bot.send_message(m.chat.id, 'No entiendo eso que dices.')
sleep(1)
bot.send_message(m.chat.id, 'La gallina continúa picoteándote.')
myPlayer.diabolicHen += 1
else:
bot.send_message(m.chat.id, '*Mueres miserablemente* picoteado por la gallina.\n\nIntroduce _/start_ para iniciar de nuevo el juego.', parse_mode='Markdown')
game_initialize()
################################################################################
# La Plaza
def room_plaza(m):
# Comandos que entendemos en esta habitacion
acceptablePlazaActions = ['norte', 'sur', 'este', 'oeste']
# Cogemos el texto que nos ha enviado y lo dividimos en palabras
mSplit = m.text.lower().split()
# Tenere
if mSplit[0] == 'sur':
bot.send_message(m.chat.id, 'Ya has estado mucho tiempo en el Teneré, casi mejor ir a otro garito, ¿no?')
# La Ducha
elif mSplit[0] == 'oeste':
if map.zonemap['b4']['VISITED'] == True:
bot.send_message(m.chat.id, '¿Estás seguro de que es una buena idea volver a entrar en La Ducha? Elige otro sitio.')
else:
myPlayer.location = 'b4'
time.sleep(2)
introduce_room(m)
room_ducha(m)
# Trastero
elif mSplit[0] == 'norte':
if ((map.zonemap['b1']['VISITED'] == True) and (map.zonemap['b1']['SOLVED'] == False)):
bot.send_message(m.chat.id, 'Ha quedado muy claro que Jose no piensa darte las llaves, así que es mejor que elijas otro sitio.')
elif ((map.zonemap['b1']['VISITED'] == True) and (map.zonemap['b1']['SOLVED'] == True)):
bot.send_message(m.chat.id, 'Una vez has recuperado tus llaves, ¿no sería mejor ir a otro sitio?.')
else:
myPlayer.location = 'b1'
time.sleep(2)
introduce_room(m)
room_trastero(m)
# Testarrosa
elif mSplit[0] == 'este':
myPlayer.location = 'b2'
time.sleep(2)
introduce_room(m)
room_testarrosa(m)
# Bucle principal del juego, en el que se decide en que habitacion esta
def play_rooms(m):
if myPlayer.location == 'a0':
room_examen(m)
elif myPlayer.location == 'b3':
room_tenere(m)
elif myPlayer.location == 'b0':
room_plaza(m)
elif myPlayer.location == 'b4':
room_ducha(m)
elif myPlayer.location == 'b4a':
room_bano_ducha(m)
elif myPlayer.location == 'b1':
room_trastero(m)
elif myPlayer.location == 'b2':
room_testarrosa(m)
elif myPlayer.location == 'z0':
room_alfonso8(m)
# Definimos una función llamada 'listener', que recibe como parámetro un dato llamado 'messages'
def listener(messages):
# Por cada dato 'm' en el dato 'messages'
for m in messages:
# Filtramos mensajes que sean tipo texto
if m.content_type == 'text':
# Almacenaremos el ID de la conversación
cid = m.chat.id
# Y haremos que imprima algo parecido a esto -> [52033876]: /start
print "[" + str(cid) + "]: " + m.text
# Si se lanza el comando /start se inicia el juego
if m.text == '/start':
# Inicializamos el juego y le mandamos la introduccion y el texto de Teleco
game_initialize()
bot.send_message(cid, display_intro)
time.sleep(2)
introduce_room(m)
# Mostramos la ayuda
elif m.text == '/help':
display_help(m)
# PARA TESTING EXCLUSIVAMENTE - HAY QUE ELIMINAR ESTOS ELIF
elif m.text == '/tenere':
myPlayer.location = 'b3'
introduce_room(m)
elif m.text == '/ducha':
myPlayer.location = 'b4'
introduce_room(m)
elif m.text == '/banoducha':
myPlayer.location = 'b4a'
introduce_room(m)
elif m.text == '/trastero':
myPlayer.location = 'b1'
introduce_room(m)
elif m.text == '/testarrosa':
myPlayer.inventory.append('dos botellas de bourbon')
myPlayer.location = 'b2'
introduce_room(m)
elif m.text == '/testarrosagallina':
myPlayer.inventory.append('dos botellas de bourbon')
myPlayer.testarrosaSung = 10
myPlayer.location = 'b2'
introduce_room(m)
elif m.text == '/alfonso8':
myPlayer.location = 'z0'
introduce_room(m)
else:
play_rooms(m)
# Le decimos al bot que utilice como función escuchadora nuestra función 'listener'
# bot.set_update_listener(listener)
# Le decimos al bot que siga funcionando incluso si encuentra algún fallo
# bot.infinity_polling()
polling_thread = threading.Thread(target=bot_polling)
polling_thread.daemon = True
polling_thread.start()
#Keep main program running while bot runs threaded
if __name__ == "__main__":
while True:
try:
sleep(120)
except KeyboardInterrupt:
print("\n@jblasbot instance ended")
break | UTF-8 | Python | false | false | 36,808 | py | 2 | game_NoToken.py | 2 | 0.649844 | 0.642951 | 0 | 634 | 56.664038 | 846 |
flashlightli/math_question | 12,008,728,584,284 | 761e6434328825d75e3b72f40a9ec562a70fe756 | ff1ab60460bc1102c1dc2a540fad92b4e562ff54 | /leetcode_question/easy_question/83_Remove_Duplicates_from_Sorted_List.py | 424dcd73d4edb4666bc516e70cb62885fd5a9734 | [] | no_license | https://github.com/flashlightli/math_question | 0009e707c43f36b8bbbfa98725191e8810e70126 | 9a4a5fc02821f0570170b3c18ef79ec4ad29a56d | refs/heads/master | 2023-05-25T07:12:57.571498 | 2022-04-17T01:43:53 | 2022-04-17T01:43:53 | 214,326,799 | 0 | 0 | null | false | 2023-05-22T22:44:56 | 2019-10-11T02:33:57 | 2021-10-08T16:41:19 | 2023-05-22T22:44:56 | 185 | 0 | 0 | 4 | Python | false | false | """
给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。
示例 1:
输入: 1->1->2
输出: 1->2
示例 2:
输入: 1->1->2->3->3
输出: 1->2->3
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
# 44ms 3.7MB
if head == None or head.next == None:
return head
temp, temp.next = ListNode(0), head
while head and head.next:
if head.val == head.next.val:
head.next = head.next.next
else:
head = head.next
return temp.next
head = ListNode(1)
second = ListNode(2)
third = ListNode(2)
head.next = second
second.next = third
test = Solution()
print(test.deleteDuplicates(
head
)) | UTF-8 | Python | false | false | 1,000 | py | 141 | 83_Remove_Duplicates_from_Sorted_List.py | 139 | 0.587444 | 0.561659 | 0 | 50 | 16.86 | 70 |
HttpRunner/HttpRunner | 1,331,439,889,983 | d166c5ad4feccc34ab43b5c658e96191be8387a8 | 6cb37fa9be336090eb1a8e1c10c417bc4634be0c | /httprunner/loader.py | d7f51502df20d3a380db194d6ecdc54d40292fd9 | [
"MIT"
] | permissive | https://github.com/HttpRunner/HttpRunner | a1bea86db1a2ecba1b98201b02192ad6653fd98f | 67c49d0cbe604c73aff538b2785518600209b673 | refs/heads/master | 2018-10-22T15:04:55.622867 | 2018-10-10T12:49:27 | 2018-10-10T12:49:27 | 94,166,852 | 1,246 | 486 | null | null | null | null | null | null | null | null | null | null | null | null | null | import collections
import csv
import importlib
import io
import json
import os
import sys
import yaml
from httprunner import built_in, exceptions, logger, parser, utils, validator
from httprunner.compat import OrderedDict
###############################################################################
## file loader
###############################################################################
def _check_format(file_path, content):
""" check testcase format if valid
"""
# TODO: replace with JSON schema validation
if not content:
# testcase file content is empty
err_msg = u"Testcase file content is empty: {}".format(file_path)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
elif not isinstance(content, (list, dict)):
# testcase file content does not match testcase format
err_msg = u"Testcase file content format invalid: {}".format(file_path)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
def load_yaml_file(yaml_file):
""" load yaml file and check file content format
"""
with io.open(yaml_file, 'r', encoding='utf-8') as stream:
yaml_content = yaml.load(stream)
_check_format(yaml_file, yaml_content)
return yaml_content
def load_json_file(json_file):
""" load json file and check file content format
"""
with io.open(json_file, encoding='utf-8') as data_file:
try:
json_content = json.load(data_file)
except exceptions.JSONDecodeError:
err_msg = u"JSONDecodeError: JSON file format error: {}".format(json_file)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
_check_format(json_file, json_content)
return json_content
def load_csv_file(csv_file):
""" load csv file and check file content format
@param
csv_file: csv file path
e.g. csv file content:
username,password
test1,111111
test2,222222
test3,333333
@return
list of parameter, each parameter is in dict format
e.g.
[
{'username': 'test1', 'password': '111111'},
{'username': 'test2', 'password': '222222'},
{'username': 'test3', 'password': '333333'}
]
"""
csv_content_list = []
with io.open(csv_file, encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
csv_content_list.append(row)
return csv_content_list
def load_file(file_path):
if not os.path.isfile(file_path):
raise exceptions.FileNotFound("{} does not exist.".format(file_path))
file_suffix = os.path.splitext(file_path)[1].lower()
if file_suffix == '.json':
return load_json_file(file_path)
elif file_suffix in ['.yaml', '.yml']:
return load_yaml_file(file_path)
elif file_suffix == ".csv":
return load_csv_file(file_path)
else:
# '' or other suffix
err_msg = u"Unsupported file format: {}".format(file_path)
logger.log_warning(err_msg)
return []
def load_folder_files(folder_path, recursive=True):
""" load folder path, return all files endswith yml/yaml/json in list.
Args:
folder_path (str): specified folder path to load
recursive (bool): load files recursively if True
Returns:
list: files endswith yml/yaml/json
"""
if isinstance(folder_path, (list, set)):
files = []
for path in set(folder_path):
files.extend(load_folder_files(path, recursive))
return files
if not os.path.exists(folder_path):
return []
file_list = []
for dirpath, dirnames, filenames in os.walk(folder_path):
filenames_list = []
for filename in filenames:
if not filename.endswith(('.yml', '.yaml', '.json')):
continue
filenames_list.append(filename)
for filename in filenames_list:
file_path = os.path.join(dirpath, filename)
file_list.append(file_path)
if not recursive:
break
return file_list
def load_dot_env_file(dot_env_path):
""" load .env file.
Args:
dot_env_path (str): .env file path
Returns:
dict: environment variables mapping
{
"UserName": "debugtalk",
"Password": "123456",
"PROJECT_KEY": "ABCDEFGH"
}
Raises:
exceptions.FileFormatError: If .env file format is invalid.
"""
if not os.path.isfile(dot_env_path):
raise exceptions.FileNotFound(".env file path is not exist.")
logger.log_info("Loading environment variables from {}".format(dot_env_path))
env_variables_mapping = {}
with io.open(dot_env_path, 'r', encoding='utf-8') as fp:
for line in fp:
# maxsplit=1
if "=" in line:
variable, value = line.split("=", 1)
elif ":" in line:
variable, value = line.split(":", 1)
else:
raise exceptions.FileFormatError(".env format error")
env_variables_mapping[variable.strip()] = value.strip()
utils.set_os_environ(env_variables_mapping)
return env_variables_mapping
def locate_file(start_path, file_name):
""" locate filename and return file path.
searching will be recursive upward until current working directory.
Args:
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.FileNotFound: If failed to locate file.
"""
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound("invalid path: {}".format(start_path))
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
return file_path
# current working directory
if os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]:
raise exceptions.FileNotFound("{} not found in {}".format(file_name, start_path))
# locate recursive upward
return locate_file(os.path.dirname(start_dir_path), file_name)
###############################################################################
## debugtalk.py module loader
###############################################################################
def load_python_module(module):
""" load python module.
Args:
module: python module
Returns:
dict: variables and functions mapping for specified python module
{
"variables": {},
"functions": {}
}
"""
debugtalk_module = {
"variables": {},
"functions": {}
}
for name, item in vars(module).items():
if validator.is_function((name, item)):
debugtalk_module["functions"][name] = item
elif validator.is_variable((name, item)):
if isinstance(item, tuple):
continue
debugtalk_module["variables"][name] = item
else:
pass
return debugtalk_module
def load_builtin_module():
""" load built_in module
"""
built_in_module = load_python_module(built_in)
return built_in_module
def load_debugtalk_module():
""" load project debugtalk.py module
debugtalk.py should be located in project working directory.
Returns:
dict: debugtalk module mapping
{
"variables": {},
"functions": {}
}
"""
# load debugtalk.py module
imported_module = importlib.import_module("debugtalk")
debugtalk_module = load_python_module(imported_module)
return debugtalk_module
def get_module_item(module_mapping, item_type, item_name):
""" get expected function or variable from module mapping.
Args:
module_mapping(dict): module mapping with variables and functions.
{
"variables": {},
"functions": {}
}
item_type(str): "functions" or "variables"
item_name(str): function name or variable name
Returns:
object: specified variable or function object.
Raises:
exceptions.FunctionNotFound: If specified function not found in module mapping
exceptions.VariableNotFound: If specified variable not found in module mapping
"""
try:
return module_mapping[item_type][item_name]
except KeyError:
err_msg = "{} not found in debugtalk.py module!\n".format(item_name)
err_msg += "module mapping: {}".format(module_mapping)
if item_type == "functions":
raise exceptions.FunctionNotFound(err_msg)
else:
raise exceptions.VariableNotFound(err_msg)
###############################################################################
## testcase loader
###############################################################################
def _load_test_file(file_path, project_mapping):
""" load testcase file or testsuite file
Args:
file_path (str): absolute valid file path. file_path should be in the following format:
[
{
"config": {
"name": "",
"def": "suite_order()",
"request": {}
}
},
{
"test": {
"name": "add product to cart",
"api": "api_add_cart()",
"validate": []
}
},
{
"test": {
"name": "add product to cart",
"suite": "create_and_check()",
"validate": []
}
},
{
"test": {
"name": "checkout cart",
"request": {},
"validate": []
}
}
]
project_mapping (dict): project_mapping
Returns:
dict: testcase dict
{
"config": {},
"teststeps": [teststep11, teststep12]
}
"""
testcase = {
"config": {},
"teststeps": []
}
for item in load_file(file_path):
# TODO: add json schema validation
if not isinstance(item, dict) or len(item) != 1:
raise exceptions.FileFormatError("Testcase format error: {}".format(file_path))
key, test_block = item.popitem()
if not isinstance(test_block, dict):
raise exceptions.FileFormatError("Testcase format error: {}".format(file_path))
if key == "config":
testcase["config"].update(test_block)
elif key == "test":
def extend_api_definition(block):
ref_call = block["api"]
def_block = _get_block_by_name(ref_call, "def-api", project_mapping)
_extend_block(block, def_block)
# reference api
if "api" in test_block:
extend_api_definition(test_block)
testcase["teststeps"].append(test_block)
# reference testcase
elif "suite" in test_block: # TODO: replace suite with testcase
ref_call = test_block["suite"]
block = _get_block_by_name(ref_call, "def-testcase", project_mapping)
# TODO: bugfix lost block config variables
for teststep in block["teststeps"]:
if "api" in teststep:
extend_api_definition(teststep)
testcase["teststeps"].append(teststep)
# define directly
else:
testcase["teststeps"].append(test_block)
else:
logger.log_warning(
"unexpected block key: {}. block key should only be 'config' or 'test'.".format(key)
)
return testcase
def _get_block_by_name(ref_call, ref_type, project_mapping):
""" get test content by reference name.
Args:
ref_call (str): call function.
e.g. api_v1_Account_Login_POST($UserName, $Password)
ref_type (enum): "def-api" or "def-testcase"
project_mapping (dict): project_mapping
Returns:
dict: api/testcase definition.
Raises:
exceptions.ParamsError: call args number is not equal to defined args number.
"""
function_meta = parser.parse_function(ref_call)
func_name = function_meta["func_name"]
call_args = function_meta["args"]
block = _get_test_definition(func_name, ref_type, project_mapping)
def_args = block.get("function_meta", {}).get("args", [])
if len(call_args) != len(def_args):
err_msg = "{}: call args number is not equal to defined args number!\n".format(func_name)
err_msg += "defined args: {}\n".format(def_args)
err_msg += "reference args: {}".format(call_args)
logger.log_error(err_msg)
raise exceptions.ParamsError(err_msg)
args_mapping = {}
for index, item in enumerate(def_args):
if call_args[index] == item:
continue
args_mapping[item] = call_args[index]
if args_mapping:
block = parser.substitute_variables(block, args_mapping)
return block
def _get_test_definition(name, ref_type, project_mapping):
""" get expected api or testcase.
Args:
name (str): api or testcase name
ref_type (enum): "def-api" or "def-testcase"
project_mapping (dict): project_mapping
Returns:
dict: expected api/testcase info if found.
Raises:
exceptions.ApiNotFound: api not found
exceptions.TestcaseNotFound: testcase not found
"""
block = project_mapping.get(ref_type, {}).get(name)
if not block:
err_msg = "{} not found!".format(name)
if ref_type == "def-api":
raise exceptions.ApiNotFound(err_msg)
else:
# ref_type == "def-testcase":
raise exceptions.TestcaseNotFound(err_msg)
return block
def _extend_block(ref_block, def_block):
""" extend ref_block with def_block.
Args:
def_block (dict): api definition dict.
ref_block (dict): reference block
Returns:
dict: extended reference block.
Examples:
>>> def_block = {
"name": "get token 1",
"request": {...},
"validate": [{'eq': ['status_code', 200]}]
}
>>> ref_block = {
"name": "get token 2",
"extract": [{"token": "content.token"}],
"validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}]
}
>>> _extend_block(def_block, ref_block)
{
"name": "get token 2",
"request": {...},
"extract": [{"token": "content.token"}],
"validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}]
}
"""
# TODO: override variables
def_validators = def_block.get("validate") or def_block.get("validators", [])
ref_validators = ref_block.get("validate") or ref_block.get("validators", [])
def_extrators = def_block.get("extract") \
or def_block.get("extractors") \
or def_block.get("extract_binds", [])
ref_extractors = ref_block.get("extract") \
or ref_block.get("extractors") \
or ref_block.get("extract_binds", [])
ref_block.update(def_block)
ref_block["validate"] = _merge_validator(
def_validators,
ref_validators
)
ref_block["extract"] = _merge_extractor(
def_extrators,
ref_extractors
)
def _convert_validators_to_mapping(validators):
""" convert validators list to mapping.
Args:
validators (list): validators in list
Returns:
dict: validators mapping, use (check, comparator) as key.
Examples:
>>> validators = [
{"check": "v1", "expect": 201, "comparator": "eq"},
{"check": {"b": 1}, "expect": 200, "comparator": "eq"}
]
>>> _convert_validators_to_mapping(validators)
{
("v1", "eq"): {"check": "v1", "expect": 201, "comparator": "eq"},
('{"b": 1}', "eq"): {"check": {"b": 1}, "expect": 200, "comparator": "eq"}
}
"""
validators_mapping = {}
for validator in validators:
validator = parser.parse_validator(validator)
if not isinstance(validator["check"], collections.Hashable):
check = json.dumps(validator["check"])
else:
check = validator["check"]
key = (check, validator["comparator"])
validators_mapping[key] = validator
return validators_mapping
def _merge_validator(def_validators, ref_validators):
""" merge def_validators with ref_validators.
Args:
def_validators (list):
ref_validators (list):
Returns:
list: merged validators
Examples:
>>> def_validators = [{'eq': ['v1', 200]}, {"check": "s2", "expect": 16, "comparator": "len_eq"}]
>>> ref_validators = [{"check": "v1", "expect": 201}, {'len_eq': ['s3', 12]}]
>>> _merge_validator(def_validators, ref_validators)
[
{"check": "v1", "expect": 201, "comparator": "eq"},
{"check": "s2", "expect": 16, "comparator": "len_eq"},
{"check": "s3", "expect": 12, "comparator": "len_eq"}
]
"""
if not def_validators:
return ref_validators
elif not ref_validators:
return def_validators
else:
def_validators_mapping = _convert_validators_to_mapping(def_validators)
ref_validators_mapping = _convert_validators_to_mapping(ref_validators)
def_validators_mapping.update(ref_validators_mapping)
return list(def_validators_mapping.values())
def _merge_extractor(def_extrators, ref_extractors):
""" merge def_extrators with ref_extractors
Args:
def_extrators (list): [{"var1": "val1"}, {"var2": "val2"}]
ref_extractors (list): [{"var1": "val111"}, {"var3": "val3"}]
Returns:
list: merged extractors
Examples:
>>> def_extrators = [{"var1": "val1"}, {"var2": "val2"}]
>>> ref_extractors = [{"var1": "val111"}, {"var3": "val3"}]
>>> _merge_extractor(def_extrators, ref_extractors)
[
{"var1": "val111"},
{"var2": "val2"},
{"var3": "val3"}
]
"""
if not def_extrators:
return ref_extractors
elif not ref_extractors:
return def_extrators
else:
extractor_dict = OrderedDict()
for api_extrator in def_extrators:
if len(api_extrator) != 1:
logger.log_warning("incorrect extractor: {}".format(api_extrator))
continue
var_name = list(api_extrator.keys())[0]
extractor_dict[var_name] = api_extrator[var_name]
for test_extrator in ref_extractors:
if len(test_extrator) != 1:
logger.log_warning("incorrect extractor: {}".format(test_extrator))
continue
var_name = list(test_extrator.keys())[0]
extractor_dict[var_name] = test_extrator[var_name]
extractor_list = []
for key, value in extractor_dict.items():
extractor_list.append({key: value})
return extractor_list
def load_folder_content(folder_path):
""" load api/testcases/testsuites definitions from folder.
Args:
folder_path (str): api/testcases/testsuites files folder.
Returns:
dict: api definition mapping.
{
"tests/api/basic.yml": [
{"api": {"def": "api_login", "request": {}, "validate": []}},
{"api": {"def": "api_logout", "request": {}, "validate": []}}
]
}
"""
items_mapping = {}
for file_path in load_folder_files(folder_path):
items_mapping[file_path] = load_file(file_path)
return items_mapping
def load_api_folder(api_folder_path):
""" load api definitions from api folder.
Args:
api_folder_path (str): api files folder.
api file should be in the following format:
[
{
"api": {
"def": "api_login",
"request": {},
"validate": []
}
},
{
"api": {
"def": "api_logout",
"request": {},
"validate": []
}
}
]
Returns:
dict: api definition mapping.
{
"api_login": {
"function_meta": {"func_name": "api_login", "args": [], "kwargs": {}}
"request": {}
},
"api_logout": {
"function_meta": {"func_name": "api_logout", "args": [], "kwargs": {}}
"request": {}
}
}
"""
api_definition_mapping = {}
api_items_mapping = load_folder_content(api_folder_path)
for api_file_path, api_items in api_items_mapping.items():
# TODO: add JSON schema validation
for api_item in api_items:
key, api_dict = api_item.popitem()
api_def = api_dict.pop("def")
function_meta = parser.parse_function(api_def)
func_name = function_meta["func_name"]
if func_name in api_definition_mapping:
logger.log_warning("API definition duplicated: {}".format(func_name))
api_dict["function_meta"] = function_meta
api_definition_mapping[func_name] = api_dict
return api_definition_mapping
def load_test_folder(test_folder_path):
""" load testcases definitions from folder.
Args:
test_folder_path (str): testcases files folder.
testcase file should be in the following format:
[
{
"config": {
"def": "create_and_check",
"request": {},
"validate": []
}
},
{
"test": {
"api": "get_user",
"validate": []
}
}
]
Returns:
dict: testcases definition mapping.
{
"create_and_check": [
{"config": {}},
{"test": {}},
{"test": {}}
],
"tests/testcases/create_and_get.yml": [
{"config": {}},
{"test": {}},
{"test": {}}
]
}
"""
test_definition_mapping = {}
test_items_mapping = load_folder_content(test_folder_path)
for test_file_path, items in test_items_mapping.items():
# TODO: add JSON schema validation
testcase = {
"config": {},
"teststeps": []
}
for item in items:
key, block = item.popitem()
if key == "config":
testcase["config"].update(block)
if "def" not in block:
test_definition_mapping[test_file_path] = testcase
continue
testcase_def = block.pop("def")
function_meta = parser.parse_function(testcase_def)
func_name = function_meta["func_name"]
if func_name in test_definition_mapping:
logger.log_warning("API definition duplicated: {}".format(func_name))
testcase["function_meta"] = function_meta
test_definition_mapping[func_name] = testcase
else:
# key == "test":
testcase["teststeps"].append(block)
return test_definition_mapping
def locate_debugtalk_py(start_path):
""" locate debugtalk.py file.
Args:
start_path (str): start locating path, maybe testcase file path or directory path
"""
try:
debugtalk_path = locate_file(start_path, "debugtalk.py")
return os.path.abspath(debugtalk_path)
except exceptions.FileNotFound:
return None
def load_project_tests(test_path, dot_env_path=None):
""" load api, testcases, .env, builtin module and debugtalk.py.
api/testcases folder is relative to project_working_directory
Args:
test_path (str): test file/folder path, locate pwd from this path.
dot_env_path (str): specified .env file path
Returns:
dict: project loaded api/testcases definitions, environments and debugtalk.py module.
"""
project_mapping = {}
debugtalk_path = locate_debugtalk_py(test_path)
# locate PWD with debugtalk.py path
if debugtalk_path:
# The folder contains debugtalk.py will be treated as PWD.
project_working_directory = os.path.dirname(debugtalk_path)
else:
# debugtalk.py is not found, use os.getcwd() as PWD.
project_working_directory = os.getcwd()
# add PWD to sys.path
sys.path.insert(0, project_working_directory)
# load .env
dot_env_path = dot_env_path or os.path.join(project_working_directory, ".env")
if os.path.isfile(dot_env_path):
project_mapping["env"] = load_dot_env_file(dot_env_path)
else:
project_mapping["env"] = {}
# load debugtalk.py
if debugtalk_path:
project_mapping["debugtalk"] = load_debugtalk_module()
else:
project_mapping["debugtalk"] = {
"variables": {},
"functions": {}
}
project_mapping["def-api"] = load_api_folder(os.path.join(project_working_directory, "api"))
# TODO: replace suite with testcases
project_mapping["def-testcase"] = load_test_folder(os.path.join(project_working_directory, "suite"))
return project_mapping
def load_tests(path, dot_env_path=None):
""" load testcases from file path, extend and merge with api/testcase definitions.
Args:
path (str/list): testcase file/foler path.
path could be in several types:
- absolute/relative file path
- absolute/relative folder path
- list/set container with file(s) and/or folder(s)
dot_env_path (str): specified .env file path
Returns:
list: testcases list, each testcase is corresponding to a file
[
{ # testcase data structure
"config": {
"name": "desc1",
"path": "testcase1_path",
"variables": [], # optional
"request": {} # optional
"refs": {
"debugtalk": {
"variables": {},
"functions": {}
},
"env": {},
"def-api": {},
"def-testcase": {}
}
},
"teststeps": [
# teststep data structure
{
'name': 'test step desc2',
'variables': [], # optional
'extract': [], # optional
'validate': [],
'request': {},
'function_meta': {}
},
teststep2 # another teststep dict
]
},
testcase_dict_2 # another testcase dict
]
"""
if isinstance(path, (list, set)):
testcases_list = []
for file_path in set(path):
testcases = load_tests(file_path, dot_env_path)
if not testcases:
continue
testcases_list.extend(testcases)
return testcases_list
if not os.path.exists(path):
err_msg = "path not exist: {}".format(path)
logger.log_error(err_msg)
raise exceptions.FileNotFound(err_msg)
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
if os.path.isdir(path):
files_list = load_folder_files(path)
testcases_list = load_tests(files_list, dot_env_path)
elif os.path.isfile(path):
try:
project_mapping = load_project_tests(path, dot_env_path)
testcase = _load_test_file(path, project_mapping)
testcase["config"]["path"] = path
testcase["config"]["refs"] = project_mapping
testcases_list = [testcase]
except exceptions.FileFormatError:
testcases_list = []
return testcases_list
| UTF-8 | Python | false | false | 29,564 | py | 9 | loader.py | 9 | 0.523204 | 0.51786 | 0 | 967 | 29.572906 | 105 |
maximoskp/mel_ble_examples_ICMNC2019 | 5,970,004,590,026 | cb164d758af1627e26cec593222bc68cf5c3fedf | 9d270a42a5bd4b6259560215734bc883a4203556 | /MelVis_styles_N_blends.py | 28456a9faa4ba8cda4dd4679b5d47b75bd3281b2 | [] | no_license | https://github.com/maximoskp/mel_ble_examples_ICMNC2019 | 212b30f2fea267c1f5900d11f6ad863451bd4585 | 720c00038f3fe789a3838ea051dc65e5823b3c17 | refs/heads/master | 2020-04-21T16:58:19.239860 | 2019-02-11T12:38:19 | 2019-02-11T12:38:19 | 169,720,459 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 4 06:43:32 2018
@author: maximoskaliakatsos-papakostas
"""
import os
cwd = os.getcwd()
import glob
import music21 as m21
import numpy as np
from sklearn.decomposition import PCA
import MBL_melody_features_functions as mff
import CM_user_output_functions as uof
import MBL_music_processing_functions as mpf
import pickle
import matplotlib.pyplot as plt
remakedata = True
test_plot = True
if remakedata:
mainFolder = cwd + os.sep + 'all_xmls' + os.sep
styles_folders = ['han' + os.sep, 'jazz' + os.sep]
session_names = ['han', 'jazz']
blending_sessions = [['han0120','fried_bananas'] , ['han0351','i_fall_in_love_too_easy'] , ['han0238','i_hear_rapsody'] , ['han0207','my_silient_love']]
all_names = []
all_features = []
all_features_np = []
blend_names = []
for j in range(14):
blend_names.append( 'blend_' + str(j) + '.xml' )
blending_indexes = []
# first construct the features matrix of all pieces in both styles
for i in range( len( styles_folders ) ):
print('Processing initial: ', styles_folders[i])
folderName = mainFolder + styles_folders[i]
all_files = glob.glob(folderName + "*.xml")
tmp_feats = []
# for all pieces extract features and put them in respective np.arrays
for j in range( len( all_files ) ):
fileName = all_files[j]
all_names.append( fileName.split(os.sep)[-1] )
# print('Processing initial: ', fileName)
p = m21.converter.parse( fileName )
tmp_val = mff.get_features_of_stream( p )
tmp_feats.append( tmp_val )
all_features.append( tmp_val )
# end for styles
# for each blending sessions, append features
for i in range( len( blending_sessions ) ):
session_folder = 'bl'+str(i+1)+'_'+blending_sessions[i][1]+'_'+blending_sessions[i][0]+os.sep
blending_indexes.append( range( len(all_features), len(all_features)+len(blend_names), 1 ) )
print('Processing blend: ', session_folder)
for j in range( len( blend_names ) ):
# print('Processing blend: ', blend_names[j])
fileName = cwd+os.sep+'full'+os.sep+session_folder+ blend_names[j]
p = m21.converter.parse( fileName )
tmp_val = mff.get_features_of_stream( p )
tmp_feats.append( tmp_val )
all_features.append( tmp_val )
# do PCA to all features
# PCA
pca = PCA(n_components=2)
all_features_np = np.vstack( all_features )
# normalise
x = all_features_np
x_max = np.max(x, axis=0)
x_min = np.min(x, axis=0)
y = (x-x_min)/(x_max-x_min);
# all_pca = pca.fit_transform( np.vstack( all_features_np ) )
all_pca = pca.fit_transform( np.vstack( y ) )
tmp_pca = pca.fit(np.vstack( y ))
explained = tmp_pca.explained_variance_ratio_
print('PCA explained variances: ', explained)
print('PCA axes correlations:')
for i in range(2):
for j in range(4):
print('PCA_', i, ' - f_', j, ': ', np.corrcoef( all_pca[:,i], all_features_np[:,j] )[0][1])
print('PCA_0 - f_0+f_2: ', np.corrcoef( all_pca[:,0], all_features_np[:,0]+all_features_np[:,2] )[0][1])
# keep the pca coordinates of the original (not blended) pieces
all_original_pca = all_pca[ :len(all_names) , : ]
for i in range( len( blending_sessions ) ):
# keep indexes of the pieces to be highlighted
han2show = blending_sessions[i][0]
jazz2show = blending_sessions[i][1]
# get indexes to highlight
han_idx = all_names.index( han2show+'.xml' )
jazz_idx = all_names.index( jazz2show+'.xml' )
# get pca of blends on features matrix
blends_pca = all_pca[ blending_indexes[i] , : ]
# plot pca of original pieces
plt.plot(all_original_pca[:488,0], all_original_pca[:488,1], '|', color='grey', alpha=0.5, label='Han')
plt.plot(all_original_pca[488:,0], all_original_pca[488:,1], '_', color='grey', alpha=0.5, label='Jazz')
# highlight inputs
plt.plot(all_original_pca[han_idx,0], all_original_pca[han_idx,1], 'd', color='grey', markerSize=10, label='Han input')
plt.plot(all_original_pca[jazz_idx,0], all_original_pca[jazz_idx,1], 's', color='grey', markerSize=10, label='Jazz input')
# plot blends
for j in range(14):
if j==0:
plt.plot(blends_pca[j,0], blends_pca[j,1], '.', color='black', label='blend')
else:
plt.plot(blends_pca[j,0], blends_pca[j,1], '.', color='black')
plt.text(blends_pca[j,0], blends_pca[j,1], str(j), color='black', fontsize=9, bbox=dict(facecolor='white', alpha=0.2))
plt.xticks([])
plt.yticks([])
plt.legend()
plt.savefig('figs/pca_'+blending_sessions[i][1]+'_'+blending_sessions[i][0]+'.png', dpi=500); plt.clf()
# save
with open('saved_data/all_features.pickle', 'wb') as handle:
pickle.dump(all_features, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/all_pca.pickle', 'wb') as handle:
pickle.dump(all_pca, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/all_names.pickle', 'wb') as handle:
pickle.dump(all_names, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open('saved_data/all_features.pickle', 'rb') as handle:
all_features = pickle.load(handle)
with open('saved_data/all_pca.pickle', 'rb') as handle:
all_pca = pickle.load(handle)
with open('saved_data/all_names.pickle', 'rb') as handle:
all_names = pickle.load(handle)
# PCA
pca = PCA(n_components=2)
all_features_np = np.vstack( all_features )
all_pca = pca.fit_transform( np.vstack( all_features_np ) )
plt.plot(all_pca[:447,0], all_pca[:447,1], '|', color='grey', alpha=0.5)
plt.plot(all_pca[447:,0], all_pca[447:,1], '_', color='grey', alpha=0.5)
for i in range( len( han_highlight ) ):
plt.plot(all_pca[han_highlight[i],0], all_pca[han_highlight[i],1], 'd', color='black')
plt.plot(all_pca[jazz_highlight[i],0], all_pca[jazz_highlight[i],1], 's', color='black')
plt.savefig('figs/pca_all.png', dpi=500); plt.clf()
'''
# PCA
pca = PCA(n_components=2)
all_features_np = np.vstack( all_features )
all_pca = pca.fit_transform( np.vstack( all_features_np ) )
# sort by distance to other centroid
np_styles_idx = np.array( all_styles_idx )
pca_1 = all_pca[ np_styles_idx == 0 , : ]
pca_2 = all_pca[ np_styles_idx == 1 , : ]
features_1 = all_features_np[ np_styles_idx == 0 , : ]
features_2 = all_features_np[ np_styles_idx == 1 , : ]
centr_1 = np.mean(pca_1, axis = 0)
centr_2 = np.mean(pca_2, axis = 0)
# distances
x = np.linalg.norm(pca_1 - centr_2, axis=1)
y = 1/(np.linalg.norm(pca_1 - centr_1, axis=1)+1)
d_pca_1 = x/np.max(x) + 0.3*y/np.max(y)
x = np.linalg.norm(pca_2 - centr_1, axis=1)
y = 1/(np.linalg.norm(pca_2 - centr_2, axis=1)+1)
d_pca_2 = x/np.max(x) + 0.3*y/np.max(y)
# get indexes of sorted distances
sidx1 = np.argsort( d_pca_1 )[::-1]
sidx2 = np.argsort( d_pca_2 )[::-1]
# keep names of each style
idxs_1 = np.where( np_styles_idx == 0 )[0]
names_1 = [all_names[i] for i in idxs_1]
idxs_2 = np.where( np_styles_idx == 1 )[0]
names_2 = [all_names[i] for i in idxs_2]
# keep shorted names
s_names_1 = [names_1[i] for i in sidx1]
s_names_2 = [names_2[i] for i in sidx2]
# keep sorted pcas
s_pca_1 = pca_1[ sidx1, : ]
s_pca_2 = pca_2[ sidx2, : ]
s_features_1 = features_1[ sidx1, : ]
s_features_2 = features_2[ sidx2, : ]
with open('saved_data/all_names.pickle', 'wb') as handle:
pickle.dump(all_names, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/all_styles.pickle', 'wb') as handle:
pickle.dump(all_styles, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/all_styles_idx.pickle', 'wb') as handle:
pickle.dump(all_styles_idx, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/all_features.pickle', 'wb') as handle:
pickle.dump(all_features, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/all_pca.pickle', 'wb') as handle:
pickle.dump(all_pca, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/style_folders.pickle', 'wb') as handle:
pickle.dump(style_folders, handle, protocol=pickle.HIGHEST_PROTOCOL)
# save sorted pcas and names
with open('saved_data/s_pca_1.pickle', 'wb') as handle:
pickle.dump(s_pca_1, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/s_pca_2.pickle', 'wb') as handle:
pickle.dump(s_pca_2, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/s_features_1.pickle', 'wb') as handle:
pickle.dump(s_features_1, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/s_features_2.pickle', 'wb') as handle:
pickle.dump(s_features_2, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/s_names_1.pickle', 'wb') as handle:
pickle.dump(s_names_1, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_data/s_names_2.pickle', 'wb') as handle:
pickle.dump(s_names_2, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open('saved_data/all_names.pickle', 'rb') as handle:
all_names = pickle.load(handle)
with open('saved_data/all_styles.pickle', 'rb') as handle:
all_styles = pickle.load(handle)
with open('saved_data/all_styles_idx.pickle', 'rb') as handle:
all_styles_idx = pickle.load(handle)
with open('saved_data/all_features.pickle', 'rb') as handle:
all_features = pickle.load(handle)
with open('saved_data/all_pca.pickle', 'rb') as handle:
all_pca = pickle.load(handle)
with open('saved_data/style_folders.pickle', 'rb') as handle:
style_folders = pickle.load(handle)
# load sorted pcas and names
with open('saved_data/s_pca_1.pickle', 'rb') as handle:
s_pca_1 = pickle.load(handle)
with open('saved_data/s_pca_2.pickle', 'rb') as handle:
s_pca_2 = pickle.load(handle)
with open('saved_data/s_features_1.pickle', 'rb') as handle:
s_features_1 = pickle.load(handle)
with open('saved_data/s_features_2.pickle', 'rb') as handle:
s_features_2 = pickle.load(handle)
with open('saved_data/s_names_1.pickle', 'rb') as handle:
s_names_1 = pickle.load(handle)
with open('saved_data/s_names_2.pickle', 'rb') as handle:
s_names_2 = pickle.load(handle)
# end if remakedata
if test_plot:
how_many = 100
# style 1
hm = min( [how_many, s_pca_1.shape[0]] )
plt.plot( s_pca_1[ :hm , 0 ], s_pca_1[ :hm , 1 ], 'o' , label=style_folders[0] )
# style 2
hm = min( [how_many, s_pca_2.shape[0]] )
plt.plot( s_pca_2[ :hm , 0 ], s_pca_2[ :hm , 1 ], 'x' , label=style_folders[1] )
plt.legend()
plt.savefig('figs/pca.png', dpi=300); plt.clf()
''' | UTF-8 | Python | false | false | 11,142 | py | 21 | MelVis_styles_N_blends.py | 5 | 0.613445 | 0.590558 | 0 | 239 | 45.623431 | 156 |
poojirules180/Python-Learning- | 4,131,758,547,239 | 4c89fc41abb848fe2df555c209c38af0aaf197e6 | f49498ec6b53221f8cfdfcc1a33aa614d6441f4b | /Data points.py | 56e5c1fd75bf79566359497576b6539e79cb4168 | [] | no_license | https://github.com/poojirules180/Python-Learning- | 460a97847d32af6f08854c9b15fdc4203f5972ad | c3ae8ada8a8a9c590e49fd13827e0d5568b6625f | refs/heads/master | 2022-11-02T09:45:43.390577 | 2020-06-19T05:06:16 | 2020-06-19T05:06:16 | 270,888,898 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Opening file
readFile = open("poojith.txt", "r")
#number of words
data = readFile.read()
#using data.split, It will help me dissect each word in the file.
words = data.split()
numberofWord = ('Number of words in text file :', str(len(words)))
print(numberofWord)
#code for number of lines
lines = 0
with open("poojith.txt", 'r') as f:
for line in f:
lines += 1
numberofLines = ("Total number of lines is:", str(lines))
print(numberofLines)
#total number of spaces in the files
spaces=0
for spacesinText in data:
#isspace method will bascially see if there are any spaces in the file, it will say either true and false. If it says yes, It will add 1 to spaces.
if (spacesinText.isspace()) == True:
spaces += 1
numberofSpaces = ("The number of blank spaces is: ",str(spaces))
print(numberofSpaces)
#total number of characters in the file
characters = 0
for line in data:
#You can use the len method to find how many chracters there are.
characters = characters + len(line)
numberofChar = ("Number of characters in the file: ", str(characters))
print(numberofChar)
#total number of times Python is mentioned in the file
pythoninText = 0
#It is checking each word to see if it says "python" and then it will add 1 everytime it sees the word "Python".
for line in words:
if 'Python' in line:
pythoninText = pythoninText + 1
numberofPython = ("Number of times Python was mentioned: ", str(pythoninText))
print(numberofPython)
| UTF-8 | Python | false | false | 1,472 | py | 18 | Data points.py | 15 | 0.718071 | 0.711957 | 0 | 38 | 37.684211 | 151 |
uw-it-cte/uw-restclients | 19,688,130,107,599 | a2523a18061ff12518edb568489e0df70db02fb7 | 551dc5c9b361ee2ca2f41c762e101980983f8523 | /restclients/canvas/courses.py | 407fd51c939698d3ee6b143f8bcfecfd9cccaafa | [
"Apache-2.0"
] | permissive | https://github.com/uw-it-cte/uw-restclients | d20fe667bea13d0b146af90764579c4cf665ed72 | 2b09348bf066e5508304401f93f281805e965af5 | refs/heads/master | 2021-01-17T05:59:19.818654 | 2017-01-10T23:01:39 | 2017-01-10T23:01:39 | 26,886,439 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from restclients.canvas import Canvas
from restclients.models.canvas import CanvasCourse, CanvasTerm
import re
class Courses(Canvas):
def get_course(self, course_id, params={}):
"""
Return course resource for given canvas course id.
https://canvas.instructure.com/doc/api/courses.html#method.courses.show
"""
include = params.get("include", [])
if "term" not in include:
include.append("term")
params["include"] = include
url = "/api/v1/courses/%s" % (course_id)
return self._course_from_json(self._get_resource(url, params=params))
def get_course_by_sis_id(self, sis_course_id, params={}):
"""
Return course resource for given sis id.
"""
return self.get_course(self._sis_id(sis_course_id, sis_field="course"),
params)
def get_courses_in_account(self, account_id, params={}):
"""
Returns a list of courses for the passed account ID.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.courses_api
"""
if "published" in params:
params["published"] = "true" if params["published"] else ""
url = "/api/v1/accounts/%s/courses" % (account_id)
courses = []
for data in self._get_paged_resource(url, params=params):
courses.append(self._course_from_json(data))
return courses
def get_courses_in_account_by_sis_id(self, sis_account_id, params={}):
"""
Return a list of courses for the passed account SIS ID.
"""
return self.get_courses_in_account(
self._sis_id(sis_account_id, sis_field="account"), params)
def get_published_courses_in_account(self, account_id, params={}):
"""
Return a list of published courses for the passed account ID.
"""
params["published"] = True
return self.get_courses_in_account(account_id, params)
def get_published_courses_in_account_by_sis_id(self, sis_account_id,
params={}):
"""
Return a list of published courses for the passed account SIS ID.
"""
return self.get_published_courses_in_account(
self._sis_id(sis_account_id, sis_field="account"), params)
def get_courses_for_regid(self, regid, params={}):
"""
Return a list of courses for the passed regid.
https://canvas.instructure.com/doc/api/courses.html#method.courses.index
"""
params["as_user_id"] = self._sis_id(regid, sis_field="user")
data = self._get_resource("/api/v1/courses", params=params)
del params["as_user_id"]
courses = []
for datum in data:
if "sis_course_id" in datum:
courses.append(self._course_from_json(datum))
else:
courses.append(self.get_course(datum["id"], params))
return courses
def create_course(self, account_id, course_name):
"""
Create a canvas course with the given subaccount id and course name.
https://canvas.instructure.com/doc/api/courses.html#method.courses.create
"""
url = "/api/v1/accounts/%s/courses" % account_id
body = {"course": {"name": course_name}}
data = self._post_resource(url, body)
return self._course_from_json(data)
def update_sis_id(self, course_id, sis_course_id):
"""
Updates the SIS ID for the course identified by the passed course ID.
https://canvas.instructure.com/doc/api/courses.html#method.courses.update
"""
url = "/api/v1/courses/%s" % course_id
body = {"course": {"sis_course_id": sis_course_id}}
data = self._put_resource(url, body)
return self._course_from_json(data)
def _course_from_json(self, data):
course = CanvasCourse()
course.course_id = data["id"]
course.sis_course_id = data.get("sis_course_id", None)
course.account_id = data["account_id"]
course.code = data["course_code"]
course.name = data["name"]
course.workflow_state = data["workflow_state"]
course.public_syllabus = data["public_syllabus"]
course_url = data["calendar"]["ics"]
course_url = re.sub(r"(.*?[a-z]/).*", r"\1", course_url)
course.course_url = "%scourses/%s" % (course_url, data["id"])
# Optional attributes specified in the course URL
if "term" in data:
canvas_term = data["term"]
course.term = CanvasTerm(
term_id=canvas_term.get("id"),
sis_term_id=canvas_term.get("sis_term_id", None),
name=canvas_term.get("name"))
if "syllabus_body" in data:
course.syllabus_body = data["syllabus_body"]
return course
| UTF-8 | Python | false | false | 4,911 | py | 87 | courses.py | 80 | 0.583181 | 0.581959 | 0 | 138 | 34.586957 | 88 |
liamattard/Language_model-1.0. | 7,395,933,698,088 | 86770e62429c797de7c720bab30d4769cb6a45c8 | 6d74a3665181f78defe9b8ccf44c64c22e202c9f | /language_model/probabilityCalc.py | 49a0177c7be14c617d7a9516cdeaaa5bac97460a | [] | no_license | https://github.com/liamattard/Language_model-1.0. | 62f067cfe9180ca44da3562539d3b35c26da2ee5 | d22cdfa7c0c8d7c0fb1eeefe6a98f34cf95ee013 | refs/heads/master | 2022-04-22T23:55:26.943656 | 2020-04-16T08:11:30 | 2020-04-16T08:11:30 | 256,150,601 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import numpy as np
from collections import Counter
def calculateProbabilityFromUnigram(unigram,sentence,word):
return unigram[word]
def calculateProbabilityFromBigram(bigram,sentence,word):
if (sentence[-1],word) in bigram:
return bigram[sentence[-1],word]
else:
return 0
def calculateProbabilityFromTrigram(trigram,sentence,word):
if (sentence[-2],sentence[-1],word) in trigram:
return trigram[sentence[-2],sentence[-1],word]
else:
return 0
def calculateProbabilityFromLaplaceBigram(trainCount,bigramCounts,sentence,lastWord):
if (sentence[-1],lastWord) in bigramCounts:
return (bigramCounts[sentence[-1],lastWord] +1)/(trainCount[sentence[-1]] + len(trainCount))
else:
if (sentence[-1] in trainCount) and (lastWord in trainCount):
return 1/(trainCount[sentence[-1]] + len(trainCount))
else:
return 0
def calculateProbabilityFromLaplaceTrigram(trainCount, bigramCounts,trigramCounts,sentence,lastWord):
if (sentence[-2],sentence[-1],lastWord) in trigramCounts:
return (trigramCounts[sentence[-2],sentence[-1],lastWord]+1)/(bigramCounts[sentence[-2],sentence[-1]] + len(trainCount))
else:
if ((sentence[-2],sentence[-1]) in bigramCounts) and (lastWord in trainCount):
return 1/(bigramCounts[sentence[-2],sentence[-1]] + len(trainCount))
else:
return 0
def calculateProbabilityInterpolation(unigram,bigram,trigram,sentence,word):
probabilityUnigram = 0.1* (unigram[word])
probabilityBigram = 0.3 * (bigram[sentence[-1],word])
probabilityTrigram = 0.6 * (trigram[sentence[-2],sentence[-1],word])
return probabilityUnigram + probabilityBigram + probabilityTrigram
def calculateProbabilityLaplaceInterpolation(trainCount,unigramCount,bigramCounts,trigramCounts,sentence,lastWord):
probabilityUnigram = 0.1 * (unigramCount[lastWord])
probabilityBigram = 0.3 * (calculateProbabilityFromLaplaceBigram(trainCount,bigramCounts,sentence,lastWord))
probabilityTrigram = 0.6 * (calculateProbabilityFromLaplaceTrigram(trainCount,bigramCounts,trigramCounts,sentence,lastWord))
return probabilityUnigram + probabilityBigram + probabilityTrigram
| UTF-8 | Python | false | false | 2,264 | py | 17 | probabilityCalc.py | 7 | 0.72659 | 0.707155 | 0 | 55 | 40.163636 | 129 |
olse/faq | 12,687,333,426,614 | 3e965c3b4d50571084b5cedb88491cfde9c4e57c | 49572f3e1beb5cbdf97262c772251743783a1274 | /faq/admin.py | 00ec6f0b055b826e9c7b2b8c31a58be5d6b57ca3 | [] | no_license | https://github.com/olse/faq | fac1eeef09a1007e1de3dfb5d5a85cf93dfff740 | c4ae87c38e5f719f3b8beb27beae22a515ec8c24 | refs/heads/master | 2015-07-31T17:38:09.731216 | 2012-07-17T16:16:39 | 2012-07-17T16:16:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from faq.models import Language
class LanguageAdmin(admin.ModelAdmin):
list_display= ['name','code']
admin.site.register(Language,LanguageAdmin)
| UTF-8 | Python | false | false | 183 | py | 35 | admin.py | 15 | 0.79235 | 0.79235 | 0 | 5 | 35.4 | 43 |
Festus254/commentsprediction | 13,520,557,091,926 | e3c1ec07af8fcf75af35de176ce419c9d1c25ddb | f55ce0cf1573c95db6b7c07098d6bb6c2ecb113e | /app.py | c6b4ffc9d84c5f9919335a8f7d585cfc6bc56be5 | [] | no_license | https://github.com/Festus254/commentsprediction | 9abe6e8678e245744dc1eadf7f64e8d53cd13a73 | fc6bf88369381503229c73a7ce6ad40698696bd5 | refs/heads/main | 2023-05-02T09:56:59.004484 | 2021-05-24T14:55:13 | 2021-05-24T14:55:13 | 345,622,505 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import nltk
import re
import string
import pickle
import joblib
import timeit
#from sklearn import
#from prob_svm import LinearSVC_proba
nltk.download('popular')
stopwords = nltk.corpus.stopwords.words('english')
ps = nltk.PorterStemmer()
wn = nltk.WordNetLemmatizer()
option = st.sidebar.selectbox(
'Which ML model would you like to use?',
('Logistic Regression', 'Linear SVC', 'Naive Bayes'))
st.write('You selected:', option)
st.title('Toxic Comment Analysis.')
st.markdown('''
Used Natural language Processing to clean and vectorize input data and
Machine learning algorithmto predict if a comment is toxic or not.
The implemented models named on the sidebar had a F1 accuracy of 72.1%, 72.3% and 65.1% respectively. ''')
# function to remove punctuation, tokenize, remove stopwords and stem
@st.cache
def clean_text(text):
text = ''.join([i for i in text if not i.isdigit()]) #remove integer values
text = "".join([word.lower() for word in text if word not in string.punctuation])#make lowercase and remove punctuation
text = ' '.join( [word for word in text.split() if len(word)>2] )#remove words less than 2 letters
tokens = re.split('\W+', text)
#words = [wn.lemmatize(word, 'v') for word in tokens]
text = [ps.stem(word) for word in tokens if word not in stopwords]
text = [wn.lemmatize(word) for word in text]
text = " ".join(text)
return text
@st.cache
def vectorizing(text):
new_question = text
tfidf_vectorizer = pickle.load(open("tfidf.pickle", "rb"))
vectorized_question = tfidf_vectorizer.transform([new_question])
return vectorized_question
@st.cache
def create_features(cleaned_text, vectorized_text):
text = cleaned_text
vectorized_text = vectorized_text
label = ['toxic', 'severe_toxic', 'obscene', 'threat','insult', 'identity_hate']
toxic = ['fuck', 'shit', 'suck', 'stupid', 'bitch', 'idiot', 'asshol', 'gay', 'dick']
severe_toxic = ['fuck', 'bitch', 'suck', 'shit', 'asshol', 'dick', 'cunt', 'faggot', 'cock']
obscene =['fuck', 'shit', 'suck', 'bitch', 'asshol', 'dick', 'cunt', 'faggot', 'stupid']
threat =['kill', 'die', 'fuck', 'shit', 'rape', 'hope', 'bitch', 'death', 'hell']
insult = ['fuck', 'bitch', 'suck', 'shit', 'idiot', 'asshol', 'stupid', 'faggot', 'cunt']
identity_hate = ['fuck', 'gay', 'nigger', 'faggot', 'shit', 'jew', 'bitch', 'homosexu', 'suck']
contains_toxic = []
contains_severe_toxic = []
contains_obscene = []
contains_threat = []
contains_insult = []
contains_identity_hate =[]
for col in range(len(label)):
toxic_list = vars()[label[col]]
#st.write(toxic_list)
value = "contains_"+label[col]
check = any(substring in text for substring in toxic_list)
if check is True:
vars()[value].append(1)
#st.write("True")
else:
vars()[value].append(0)
#st.write("False")
inp = list([contains_toxic[0],contains_severe_toxic[0],contains_obscene[0], contains_threat[0], contains_insult[0], contains_identity_hate[0]])
df = pd.DataFrame([inp], columns=['contains_toxic_word', 'contains_severe_toxic_word', 'contains_obscene_word', 'contains_threat_word', 'contains_insult_word', 'contains_identity_hate_word'])
X = pd.concat([df, pd.DataFrame(vectorized_text.toarray())], axis=1)
return X
def predict(features, model = 'Linear SVC'):
start_time = timeit.default_timer()
if model == 'Logistic Regression':
svc_from_joblib = joblib.load('lintoxicmodel.pkl')
y = svc_from_joblib.predict_proba(features)
elapsed = timeit.default_timer() - start_time
if model == 'Linear SVC':
svc_from_joblib = joblib.load('svctoxicmodel.pkl')
y = svc_from_joblib.decision_function(features)
elapsed = timeit.default_timer() - start_time
if model == 'Naive Bayes':
svc_from_joblib = joblib.load('bayestoxicmodel.pkl')
y = svc_from_joblib.predict_proba(features)
elapsed = timeit.default_timer() - start_time
return y,elapsed
def main():
message = st.text_area('write a comment here:')
if st.button('Predict'):
#st.write(message)
cleaned_text = clean_text(message)
#st.write(cleaned_text)
vectorized_text = vectorizing(cleaned_text)
#st.write(vectorized_text)
features = create_features(cleaned_text, vectorized_text)
#st.write(features)
prediction, elapsed = predict(features, model = option)
st.write("Time elapsed to predict is {:2f} minutes". format(elapsed/60))
df = pd.DataFrame({
"contains_toxic": prediction[:, 0],
"contains_severe_toxic": prediction[:, 1],
"contains_obscene": prediction[:, 2],
"contains_threat": prediction[:, 3],
"contains_insult":prediction[:, 4],
"contains_identity_hate": prediction[:, 5]
}, index=['Comment'])
st.write(df.T)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 4,765 | py | 7 | app.py | 1 | 0.682057 | 0.675761 | 0 | 120 | 38.708333 | 192 |
poeks/twitterbelle | 19,292,993,118,772 | 8e7ec29ee1a5c318a97412a8de534cdb02c52ac6 | f3c0ec2252db6a5f9ec1418a56ab67bee5e75c55 | /lib/parser.py | 0b4f87ef323c12a4fd60e5671238fa3db26fd242 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | https://github.com/poeks/twitterbelle | b10d8cd78019dc88e5953ab881d609488fcb3f95 | 48d7de8bbfa089391607b3089890128446447056 | refs/heads/master | 2021-01-25T00:16:29.496568 | 2010-10-18T14:35:45 | 2010-10-18T14:35:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from lib.BeautifulSoup import BeautifulSoup
import urllib2
from django.conf import settings
import re
from lib.exception import PoeksException
from lib.BeautifulSoup import BeautifulSoup
import urllib2
class Parser:
def __init__(self):
pass
def open_url(self, page):
try:
this_url = "%s%s" % (self.url, page)
return urllib2.urlopen(this_url)
except:
raise Exception
def get_element(self, search_string, search_type='id', search_contents='string', search_method='method'):
try:
el = self.soup.findAll(**{search_type:search_string})
except:
raise Exception
retval = ""
try:
if search_method == 'dict':
retval = el[0][search_contents]
else:
retval = getattr(el[0], search_contents)
except Exception, e:
#PoeksException(e, "Couldn't do that thing with el %s" % el)
#print "get_element: Couldn't do that thing with el %s search_string %s" % (el, search_string)
pass
return retval
def get_elements(self, search_string, search_type='id', search_contents='string', search_method='method'):
els = self.soup.findAll(**{search_type:search_string})
elements = []
for el in els:
retval = ""
try:
if search_method == 'dict':
retval = el[search_contents]
#print "dict: "+retval
else:
retval = getattr(el, search_contents)
#print "method: "+retval
except Exception, e:
#PoeksException(e, "Couldn't do that thing with el %s" % el)
#print "Oops"
pass
elements.append(retval)
return elements
| UTF-8 | Python | false | false | 1,539 | py | 31 | parser.py | 15 | 0.65822 | 0.654971 | 0 | 67 | 21.955224 | 107 |
sdsubhajitdas/Algorithm-Collection | 18,717,467,491,332 | c364989c97265f02feb3694f53af8a9e106e2e7b | b87387634f2ab0497210513a727addb94a06b1a0 | /Data Structures/Stack/Stack.py | 75aa795171e93a56408514c3fda3baa6a0e50075 | [] | no_license | https://github.com/sdsubhajitdas/Algorithm-Collection | 93036fa3f08bed26dc87bf727a65e99a61e05b4b | 85d644cd4f3737bfdf2ff115d2d56b7d01f7bbb3 | refs/heads/master | 2020-05-30T06:50:21.641213 | 2019-07-13T18:11:11 | 2019-07-13T18:11:11 | 189,586,819 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Stack():
def __init__(self,length):
self.length = length
self.stack = [None]*length
self.pointer = -1
def push(self, element):
if self.pointer >=self.length-1:
print("Stack Overflow")
return
self.pointer+=1
self.stack[self.pointer]=element
def pop(self):
if self.pointer < 0:
print("Stack Underflow")
return
self.pointer-=1
return self.stack[self.pointer + 1]
def peek(self):
return self.stack[self.pointer] if self.pointer >= 0 else None
def print(self):
print(*self.stack if self.pointer>=0 else None)
if __name__ == "__main__":
stack = Stack(3)
stack.push(2)
stack.push(4)
stack.push(6)
stack.push(8)
print(stack.peek())
stack.print()
print(stack.pop())
print(stack.pop())
print(stack.pop())
print(stack.pop()) | UTF-8 | Python | false | false | 937 | py | 37 | Stack.py | 37 | 0.545358 | 0.531483 | 0 | 39 | 23.051282 | 70 |
openstack/cloudkitty-dashboard | 8,160,437,880,606 | e85dcc92430f21c97ecb2e54fe1659cabe9f837b | f18df31d4ba8569b420219f5d52da311a32581d6 | /cloudkittydashboard/dashboards/admin/modules/forms.py | dd0388810d497c6c71a8159d3db8829c2cd83498 | [
"Apache-2.0"
] | permissive | https://github.com/openstack/cloudkitty-dashboard | 418b54a59a93201c79e422ee4571c9f24b6234e5 | 4ed8863c1b15d489a2a78e767b737402647bc4da | refs/heads/master | 2023-08-23T06:09:10.473334 | 2023-07-12T15:40:17 | 2023-07-12T15:40:17 | 23,157,716 | 25 | 14 | Apache-2.0 | false | 2022-01-18T10:16:11 | 2014-08-20T17:35:14 | 2021-12-28T21:37:49 | 2022-01-18T10:15:37 | 525 | 46 | 15 | 0 | Python | false | false | # Copyright 2017 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from cloudkittydashboard.api import cloudkitty as api
class EditPriorityForm(forms.SelfHandlingForm):
priority = forms.IntegerField(label=_("Priority"))
def handle(self, request, data):
ck_client = api.cloudkittyclient(request)
try:
priority = ck_client.rating.update_module(
module_id=self.initial["module_id"], priority=data["priority"])
messages.success(
request,
_('Successfully updated priority'))
return priority
except Exception:
exceptions.handle(request,
_("Unable to update priority."))
| UTF-8 | Python | false | false | 1,407 | py | 53 | forms.py | 26 | 0.677328 | 0.671642 | 0 | 37 | 37.027027 | 79 |
lrutl/nudgebc.com | 6,562,710,032,845 | d8bcafcdeccd5ff2b0275517c8236bc77bace9d8 | 3b6042325da6dbf24fdbed8e1f35fcadf2c34140 | /nudgeproject/urls.py | 101db6d589f8687eca6e6dd315a84daef66e01f1 | [] | no_license | https://github.com/lrutl/nudgebc.com | 45c731f8340b8a4d24a1255f59b0a253f3850768 | 710eb94e72070bb01b734e85d32d8a7fa31db907 | refs/heads/master | 2023-08-25T18:10:01.941857 | 2021-10-28T20:51:54 | 2021-10-28T20:51:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from django.urls import include, path
from django.conf.urls import include, url
from django.views import generic
from material.frontend import urls as frontend_urls
from django.conf import settings
from django.conf.urls.static import static
urlpatterns =[
url(r'', include(frontend_urls)),
path('', include('nudge.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| UTF-8 | Python | false | false | 523 | py | 21 | urls.py | 8 | 0.782027 | 0.782027 | 0 | 14 | 36.357143 | 78 |
pyfibot/pyfibot3 | 11,613,591,585,495 | 39a7926d82f32bc9b3318b8a5fa669c42cde9ffb | d17863f84626328917c36ad36622e4ef4ce26bb4 | /pyfibot/plugins/available/posti.py | 5ddff43102396e61988942112e3bd61a1c5f4368 | [] | no_license | https://github.com/pyfibot/pyfibot3 | 48b479e7b099e6743b23c62e0d9670a8066ddcad | 75b63e60f17735150fdcdb1a72c181efbef9ca08 | refs/heads/master | 2021-01-20T19:34:34.014768 | 2018-12-28T12:33:30 | 2018-12-28T12:33:30 | 63,968,611 | 0 | 0 | null | false | 2019-03-06T07:56:34 | 2016-07-22T16:42:20 | 2018-12-28T12:33:42 | 2018-12-28T12:33:40 | 124 | 0 | 0 | 6 | Python | false | null | """
Get shipment tracking info from Posti
"""
from pyfibot.plugin import Plugin
from pyfibot.url import URL
from pyfibot.utils import parse_datetime, get_relative_time_string
from urllib.parse import quote_plus
class Posti(Plugin):
def init(self):
self.lang = self.config.get('language', 'en')
@Plugin.command('posti')
def posti(self, sender, message, raw_message):
''' Get latest tracking event for a shipment from Posti. Usage: .posti JJFI00000000000000 '''
if not message:
return self.bot.respond('Tracking ID is required.', raw_message)
url = 'http://www.posti.fi/henkiloasiakkaat/seuranta/api/shipments/%s' % quote_plus(message)
data = URL.get_json(url)
if not data or not data.get('shipments'):
return self.bot.respond('Error while getting tracking data. Check the tracking ID or try again later.', raw_message)
shipment = data['shipments'][0]
phase = shipment['phase']
eta_timestamp = shipment.get('estimatedDeliveryTime')
latest_event = shipment['events'][0]
event_time = get_relative_time_string(parse_datetime(latest_event['timestamp']), lang=self.lang)
description = latest_event['description'][self.lang]
location = '%s %s' % (latest_event['locationCode'], latest_event['locationName'])
msg = ' - '.join([event_time, description, location])
if phase != 'DELIVERED' and eta_timestamp:
eta_dt = parse_datetime(eta_timestamp)
eta_txt = eta_dt.strftime('%d.%m.%Y %H:%M')
msg = 'ETA %s - %s' % (eta_txt, msg)
self.bot.respond(msg, raw_message)
| UTF-8 | Python | false | false | 1,662 | py | 42 | posti.py | 34 | 0.643201 | 0.633574 | 0 | 43 | 37.651163 | 128 |
New-arkssac/dwm | 2,027,224,574,870 | 9e3df0d0c84e91baf236f74e9365429034b5962e | 41331559f499fdcefaec97a31abbfaceff42e43a | /statusbar/bar/vol.py | 2e84c8e9dcc165c0825fe3cfa09262c22c62a68e | [
"MIT"
] | permissive | https://github.com/New-arkssac/dwm | 967cfdf387d3acaab9c62eb4b1b25a42b75f7cef | d2ee335a6730c46873cd13cdfe30a631a339258f | refs/heads/master | 2023-03-15T15:16:52.743029 | 2023-02-28T12:25:03 | 2023-02-28T12:25:03 | 580,392,527 | 0 | 0 | MIT | true | 2022-12-20T12:58:40 | 2022-12-20T12:58:39 | 2022-12-19T17:11:37 | 2022-12-15T04:10:42 | 21,485 | 0 | 0 | 0 | null | false | false | #!/bin/python3
import os
import subprocess
import re
class MyVol:
def __init__(self, *args) -> None:
self.this = "vol"
self.dwm = os.environ["DWM"]
self.s2d_reset = "^d^"
# self.color = "^c#1A1A1A^^b#516FAB^"
self.color = "^c#babbf1^^b#1a1b26^"
self.signal = f"^s{self.this}^"
self.handle()
match args[0]:
case "update":
self.update()
case "notify":
self.notify()
case _:
self.click(args[1])
def handle(self):
byte, _ = subprocess.Popen(
["/bin/pactl", "info"], stdout=subprocess.PIPE
).communicate()
sink_stdout = byte.decode()
sink_stdout = re.search("Default Sink: .*", sink_stdout)
self.sink_stdout = sink_stdout and sink_stdout.group().replace(
"\n", ""
).replace("Default Sink: ", "").replace("\n", "")
byte, _ = subprocess.Popen(
[
"/bin/bash",
"-c",
f"pactl list sinks | grep {self.sink_stdout} -A 6 | sed -n '7p' | grep 'Mute: no'",
],
stdout=subprocess.PIPE,
).communicate()
mute_stdout = byte.decode()
byte, _ = subprocess.Popen(
[
"/bin/bash",
"-c",
f"pactl list sinks | grep {self.sink_stdout} -A 7 | sed -n '8p' | awk '{{printf int($5)}}'",
],
stdout=subprocess.PIPE,
).communicate()
vol = int(byte.decode())
self.num = vol
self.vol, self.icon = (
not mute_stdout
and ("--", "ﱝ")
or vol == 0
and ("00" + "%", "婢")
or vol < 10
and ("0" + str(vol) + "%", "奄")
or vol <= 50
and (str(vol) + "%", "奔")
or (str(vol) + "%", "墳")
)
def update(self) -> None:
text = f"{self.icon} {self.vol} "
print(text)
with open(self.dwm + "/statusbar/tmp.py", "r+") as f:
lines = f.readlines()
tmp = []
f.seek(0)
for line in lines:
_ = re.search(rf"{self.this} = .*$", line) or tmp.append(line)
tmp.append(
f'{self.this} = "{self.color}{self.signal}{text}{self.s2d_reset}"\n'
)
f.truncate()
f.writelines(tmp)
def notify(self):
byte, _ = subprocess.Popen(
[
"/bin/bash",
"-c",
f"pactl list sinks | grep '{self.sink_stdout}' -A 10 | grep 'Description: ' | awk -F 'Description: ' '{{print $2}}'",
],
stdout=subprocess.PIPE,
).communicate()
card_name = byte.decode().split("\n")[0]
subprocess.Popen(
[
"/bin/bash",
"-c",
f"notify-send -r 9527 -h int:value:{self.num} -h string:hlcolor:#7F7FFF ' {card_name}[{self.icon} {self.vol}]'",
],
)
def click(self, mode):
match mode:
case "L":
self.notify()
case "M":
subprocess.Popen(
["/bin/bash", "-c", "pactl set-sink-mute @DEFAULT_SINK@ toggle"],
)
case "R":
subprocess.Popen(
["/bin/bash", "-c", "killall pavucontrol || pavucontrol &"],
)
case "U":
subprocess.Popen(
["/bin/bash", "-c", "pactl set-sink-volume @DEFAULT_SINK@ +5%"],
)
self.notify()
case "D":
subprocess.Popen(
["/bin/bash", "-c", "pactl set-sink-volume @DEFAULT_SINK@ -5%"],
)
self.notify()
| UTF-8 | Python | false | false | 3,878 | py | 14 | vol.py | 10 | 0.417486 | 0.406622 | 0 | 126 | 29.68254 | 133 |
arivolispark/datastructuresandalgorithms | 14,001,593,420,932 | 351f109958fb7c9c118e4b5acaa3185639d20b1f | 9a5ad43ce6add59f266074c463c402f4ff717dc5 | /leetcode/30_day_leetcoding_challenge/202004/20200423_bitwise_AND_of_numbers_range/bitwise_AND_of_numbers_range.py | c264df0e5e67c2a936282a3b72f630884411ba39 | [] | no_license | https://github.com/arivolispark/datastructuresandalgorithms | 9cb1cd66f61ab22471d7378fce51f29fcf0ef553 | 57534898c17d058ef1dba2b1cb8cdcd8d1d2a41c | refs/heads/master | 2021-06-24T15:51:04.438627 | 2021-01-12T05:14:37 | 2021-01-12T05:14:37 | 84,909,655 | 0 | 1 | null | false | 2021-01-12T05:14:38 | 2017-03-14T05:38:16 | 2021-01-05T10:18:42 | 2021-01-12T05:14:38 | 2,529 | 0 | 1 | 0 | Python | false | false | """
Title: Bitwise AND of Numbers Range
Given a range [m, n] where 0 <= m <= n <= 2147483647, return
the bitwise AND of all numbers in this range, inclusive.
Example 1:
Input: [5,7]
Output: 4
Example 2:
Input: [0,1]
Output: 0
"""
class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
result = 0
while m > 0 and n > 0:
msb_p1 = most_significant_bit_position(m)
msb_p2 = most_significant_bit_position(n)
if msb_p1 != msb_p2:
break
# add 2^msb_p1 to result
msb_val = (1 << msb_p1)
result += msb_val
# subtract 2^msb_p1 from m and n.
m -= msb_val
n -= msb_val
return result
def most_significant_bit_position(n: int):
msb_p = -1
while n > 0:
n = n >> 1
msb_p += 1
return msb_p
def get_test_case_1() -> (int, int):
m, n = 0, 1
return m, n
def get_test_case_2() -> (int, int):
m, n = 5, 7
return m, n
def get_test_case_3() -> (int, int):
m, n = 1, 10
return m, n
def get_test_case_4() -> (int, int):
m, n = 1, 1
return m, n
def get_test_case_5() -> (int, int):
m, n = 2, 2
return m, n
def get_test_case_6() -> (int, int):
m, n = 3, 3
return m, n
def get_test_case_7() -> (int, int):
m, n = 0, 2147483647
return m, n
def get_test_case_8() -> (int, int):
m, n = 20000, 2147483647
return m, n
if __name__ == "__main__":
solution = Solution()
#m, n = get_test_case_1()
#m, n = get_test_case_2()
#m, n = get_test_case_3()
#m, n = get_test_case_4()
#m, n = get_test_case_5()
#m, n = get_test_case_6()
#m, n = get_test_case_7()
m, n = get_test_case_8()
print("\n m: ", m)
print(" n: ", n)
result = solution.rangeBitwiseAnd(m, n)
print("\n result: ", result)
| UTF-8 | Python | false | false | 1,889 | py | 370 | bitwise_AND_of_numbers_range.py | 265 | 0.499206 | 0.451032 | 0 | 106 | 16.820755 | 60 |
Fritas/destroyer-airplanes | 12,094,627,945,867 | 9a75523ff021b40476f9d5d78adef40240309692 | dba9dc48a6d97ac4fdce93ffc89fdfe689773d1d | /model/objeto_aeronave.py | 566bfaa36eee44435cc759da737c17afcfabba99 | [] | no_license | https://github.com/Fritas/destroyer-airplanes | 569679f5df0007e6f54b15e634662d9a5fdc4bf5 | 628279541d7f89adb2e9c5eb24e05dfd884146b0 | refs/heads/master | 2020-03-28T00:09:44.831421 | 2018-10-23T19:16:00 | 2018-10-23T19:16:00 | 147,380,524 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Created on jun de 2017
@author: Adriano
@author: Andrei
@author: Joao
"""
from .objeto import Objeto
class ObjetoAeronave(Objeto):
"""
Objeto base para aeronaves do jogo
"""
def __init__(self, ambiente, img_aeronave, img_projetil, dano, resistencia, vida, velocidade_tiro, velocidade, pos_x=0, pos_y=0):
"""
O metodo inicia um objeto aeronave
:param ambiente: ambiente pygame que o jogo esta rodando
:param img_aeronave: caminho da imagem da aeronave
:param img_projetil: caminho da imagem do projetil
:param dano: valor numerico do dano
:param resistencia: valor numerico da resistencia
:param vida: valor numerico da vida
:param velocidade_tiro: valor numerico da velocidade do tiro
:param velocidade: tupla com as velocidades da nave (velocidade_x, velocidade_y)
:param pos_x: posicao x inicial no mapa
:param pos_y: posicao y inicial no mapa
"""
super().__init__(ambiente, img_aeronave, velocidade[0], velocidade[1], pos_x, pos_y)
self.definir_velocidade_tiro(velocidade_tiro)
self.dano = dano
self.resistencia = resistencia
self.vida = vida
self.img_projetil = img_projetil
self.grup_tiros = self.ambiente.sprite.Group()
def definir_velocidade_tiro(self, velocidade):
"""
O metodo define velocidade como um numero inteiro
:param velocidade:
:return: None
"""
self.velocidade_tiro = int(velocidade)
def update(self):
"""
Metodo que deve ser reimplementado em toda aeronave para definir as atualizaoes dela a cada loop do jogo
:return:
"""
pass
def tratar_evento(self, key=None, evento=None):
"""
Metodo que deve ser reimplementado em toda aeronave para definir os comportamentos
:param key:
:param evento:
:return:
"""
pass
def status_vida(self):
"""
verifica se a aeronave ainda esta viva
:return: se esta vivo retorna True, se estiver morto retorna False
"""
if self.vida > 0:
return True
return False
def atirar(self):
"""
Metodo que deve ser reimplementado com o tiro da aeronave
:return:
"""
pass
| UTF-8 | Python | false | false | 2,364 | py | 17 | objeto_aeronave.py | 17 | 0.612521 | 0.608714 | 0 | 76 | 30.105263 | 133 |
collinwat/doku | 16,028,817,982,602 | a97d26c54b9660b1f690f9a0a31a725b508b9ceb | 910564164b1dccfec90ebcbbf427472fd0e1fd7c | /doku/sudoku/game.py | b81db36f8a7694d7a898d9131e64563cc3c9116c | [
"MIT"
] | permissive | https://github.com/collinwat/doku | 4d6465108c58c0c10d5840881423f38f990af302 | 77da5a2c71db187c32f765681df08efdc0f47d6c | refs/heads/master | 2021-01-25T05:15:21.684140 | 2013-04-19T21:08:58 | 2013-04-19T21:08:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import doku.utils as utils
import solve
import parse
class Board(object):
def __init__(self, board, size=None):
board = board.strip()
if size is None:
size = utils.box_size(len(board))
if not size:
msg = 'Size not specified and board string is not a perfect square'
raise ValueError(msg)
self.size = size
self.box_size = boxes = utils.box_size(self.size)
self.hr = '+%s+' % '+'.join(['-' * (boxes * 2 + 1)] * boxes)
self.parser = parse.StringParser(self.size)
self.known = self.parser.parse(board)
self.rebuild()
@property
def solutions(self):
if not getattr(self, '_solutions', None):
self._solutions = self.solver.solve()
return self._solutions
def rebuild(self):
self.solver = solve.DLXSolver(self.size, known=self.known)
self._solutions = None
self.reset()
def reset(self):
self.guesses = set()
self.grid = [[None] * self.size for i in xrange(self.size)]
for known in self.known:
self.grid[known[0]][known[1]] = known[2] + 1
def guess(self, guess):
row, column, number = guess
if guess in self.known or \
guess in self.guesses or \
row < 0 or row >= self.size or \
column < 0 or column >= self.size or \
number < 0 or number >= self.size:
return
row = self.grid[row]
old = row[column]
row[column] = number + 1
self.guesses.add(guess)
if old:
old = (guess[0], guess[1], old)
if old in self.guesses:
self.guesses.remove(old)
@property
def ilines(self):
boxes = self.box_size
for row_index, row in enumerate(self.grid):
if row_index % self.box_size == 0:
yield self.hr
row = ['%s' % i if i else '.' for i in row]
segments = []
for i in xrange(boxes):
cells = row[i * boxes:i * boxes + boxes]
segments.append(' '.join(cells))
yield '| %s |' % ' | '.join(segments)
if row_index == len(self.grid) - 1:
yield self.hr
@property
def lines(self):
return list(self.ilines)
@property
def text(self):
return '\n'.join(self.ilines)
@property
def line(self):
return ''.join(['%s' % cell if cell else '.'
for row in self.grid
for cell in row])
def solve(self, index=0):
if len(self.solutions) < 1:
return
for solution in self.solutions[index]:
self.guess(solution)
| UTF-8 | Python | false | false | 2,747 | py | 17 | game.py | 12 | 0.517292 | 0.511467 | 0 | 101 | 26.19802 | 79 |
dllm-pk/ETI_Team02_assignment1 | 12,163,347,400,245 | c53a2d3a9f2322f8d92b3c7a3beecb7f456aad2e | 8f3031b2ec53e9c4f728156e9986faadfb20e6a3 | /code/movement.py | d31a01bd4f1faddf53d6989ed13b6cf25d24b8a6 | [] | no_license | https://github.com/dllm-pk/ETI_Team02_assignment1 | b452d75a8bafcf48196c0ac2f7d4bd498097ed6c | e515dc72ee334310f7d98ca07173e656f0df8402 | refs/heads/main | 2023-03-04T20:37:55.470642 | 2021-02-19T04:35:46 | 2021-02-19T04:35:46 | 312,141,726 | 0 | 0 | null | false | 2021-02-19T04:25:17 | 2020-11-12T02:06:05 | 2021-02-19T04:18:51 | 2021-02-19T04:24:57 | 7,201 | 0 | 0 | 18 | HTML | false | false | from new_save_exit_game import *
from map import *
#User inputs move
def get_move():
moves = [W, A, S, D]
while True:
s = " "+W+ " = up; " + A+ " = left; " + S+ " = down; " + D+ " = right"
print(s)
prompt="Enter move: "
move = input(prompt).upper()
if move in moves: return move
print("invalid input")
#Hero moves
def game_move(game):
view_map(game)
position = 1
x = game[X_INDEX]
y = game[Y_INDEX]
game_map = game[MAP_INDEX]
n = len(game_map [0])
print()
while True:
move = get_move()
bad = False
if move == S:
if x == n-1:
bad = True
print("Cannot move DOWN")
else: x = x+1
if move == W:
if x == 0:
bad = True
print("Cannot move UP")
else: x = x-1
if move == D:
if y == n-1:
bad = True
print("Cannot move RIGHT")
else: y = y+1
if move == A:
if y == 0:
bad = True
print("Cannot move :LEFT")
else: y = y-1
if not bad: break
game[X_INDEX] = x
game[Y_INDEX] = y
def town_move(game):
game_move(game)
game[STATE_INDEX] = OUT_DOOR
view_map(game)
game[DAY_INDEX] = game[DAY_INDEX] + 1
town_move(game) | UTF-8 | Python | false | false | 1,396 | py | 26 | movement.py | 17 | 0.44341 | 0.43553 | 0 | 59 | 22.677966 | 78 |
tkincaid/tkincaid.github.com | 4,466,766,020,246 | 3712de82e2fc9912d170ca7b5b9a67983494a736 | 7b7c81e39169b7769d2b14618b7fb8f3ef5ea1dc | /tests/ModelingMachine/test_Numeric_impute.py | b69de47b9de19c3f8205fa577498c24986b4ac26 | [] | no_license | https://github.com/tkincaid/tkincaid.github.com | cf349c143056b847c8281d8d363b686a679f6499 | 8a9ab9ea4a061573328b5fcca6706536062e3be5 | refs/heads/master | 2016-09-05T09:24:48.325828 | 2014-09-24T16:49:14 | 2014-09-24T16:49:14 | 21,217,649 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #########################################################
#
# Unit Test for Numeric_impute_arbitrary Task
#
# Author: Sergey Yurgenson
#
# Copyright DataRobot, Inc. 2014
#
########################################################
import unittest
import pandas as pd
import numpy as np
import os
import sys
tests_dir = os.path.dirname(os.path.abspath(__file__) )
modeling_machine_dir = os.path.join(tests_dir, '../..')
sys.path.append(modeling_machine_dir)
from ModelingMachine.engine.tasks.converters import Numeric_impute_arbitrary
from ModelingMachine.engine.tasks.converters import Numeric_impute
from ModelingMachine.engine.container import Container
class TestNumericImputeArbitrary(unittest.TestCase):
def setUp(self):
self.c = pd.DataFrame(data=np.array([
[ 1, 2, 11, float('NaN') ],
[ 2, 3, 17, 18 ],
[ 3, 2, float('NaN'), 16 ],
[ 4, 1, 13, 14 ],
[ 20, 1, 45, 46 ]
]),columns=['a','b','c','d'])
self.correct1 = np.array([
[ 1, 2, 11, -9999 ],
[ 2, 3, 17, 18 ],
[ 3, 2, -9999, 16 ],
[ 4, 1, 13, 14 ],
[ 20, 1, 45, 46 ]
])
self.correct2 = np.array([
[ 1, 2, 11, 100 ],
[ 2, 3, 17, 18 ],
[ 3, 2, 100, 16 ],
[ 4, 1, 13, 14 ],
[ 20, 1, 45, 46 ]
])
def test_transform_arbitrary_imputation(self):
nip = Numeric_impute_arbitrary()
X = nip.fit_transform(Container(self.c))
print 'Result is \n{}'.format(X())
self.assertTrue(np.all(X() - self.correct1 < 0.0001))
nip = Numeric_impute_arbitrary('m=100')
X = nip.fit_transform(Container(self.c))
print 'Result is \n{}'.format(X())
self.assertTrue(np.all(X() - self.correct2 < 0.0001))
class TestNumericImpute(unittest.TestCase):
def test_transform_imputation_with_object_type(self):
self.c = pd.DataFrame(data=np.array([
[ 1, 2, 11, float('NaN') ],
[ 2, 3, 17, 18 ],
[ 3, 2, float('NaN'), 16 ],
[ 4, 1, 13, 14 ],
[ 20, 1, 45, 46 ]
]).astype('object'),columns=['a','b','c','d'])
nip = Numeric_impute()
X = nip.fit_transform(Container(self.c))
# passes if no error, should probably assert something
def test_transform_missing_test_only(self):
train = pd.DataFrame(data=np.array([
[ 1, 2, 11, 11 ],
[ 2, 3, 17, 18 ],
[ 3, 2, 14, 16 ],
[ 4, 1, 13, 14 ],
[ 20, 1, 45, 46 ]], dtype=np.float))
test = pd.DataFrame(data=np.array([
[ 1, 2, 11, np.nan ],
[ 2, 3, 17, np.nan ],
[ 3, 2, np.nan, np.nan ],
[ 4, 1, 13, np.nan ],
[ 20, 1, 45, np.nan ]], dtype=np.float))
print(test)
nip = Numeric_impute()
nip.fit(Container(train))
np.testing.assert_array_equal(nip.nan_count_, np.zeros(4))
out = nip.transform(Container(test))
print(out())
self.assertTrue(np.all(np.isfinite(out())))
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 3,293 | py | 394 | test_Numeric_impute.py | 319 | 0.478895 | 0.419982 | 0 | 94 | 34.031915 | 77 |
hima03/log-decorator | 11,244,224,410,100 | 1a7bef81653059f354d5b777b44fe0bfc1d25917 | 9a935990abcadf94abbc2ce32ab693a0ebb6a87a | /log-decorators/calculator.py | 4304451c6a64f43ea8e9fa95f6e68979715febcb | [] | no_license | https://github.com/hima03/log-decorator | d0aabefe74dce4ab920fcad62323b1f9899c9921 | 63697d914a5cd423a16dd74fb3cf774498da7197 | refs/heads/master | 2023-05-03T06:33:22.897743 | 2020-08-28T10:30:26 | 2020-08-28T10:30:26 | 290,425,444 | 21 | 12 | null | false | 2023-02-18T07:02:39 | 2020-08-26T07:21:07 | 2022-09-02T14:01:28 | 2021-05-28T14:15:36 | 9 | 18 | 9 | 1 | Python | false | false | import log_decorator
import log
class Calculator():
def __init__(self, first=0, second=0, log_file_name='', log_file_dir=''):
self.first = first
self.second = second
#log file name and directory which we want to keep
self.log_file_name = log_file_name
self.log_file_dir = log_file_dir
# Initializing logger object to write custom logs
self.logger_obj = log.get_logger(log_file_name=self.log_file_name, log_sub_dir=self.log_file_dir)
@log_decorator.log_decorator()
def add(self, third=0, fourth=0):
# writing custom logs specific to function, outside of log decorator, if needed
self.logger_obj.info("Add function custom log, outside decorator")
try:
return self.first + self.second + third + fourth
except:
raise
@log_decorator.log_decorator()
def divide(self):
self.logger_obj.info("Divide function custom log, outside decorator")
try:
return self.first / self.second
except:
raise
if __name__ == '__main__':
calculator = Calculator(5, 0, 'calculator_file', 'calculator_dir')
calculator.add(third=2,fourth=3)
calculator.divide()
| UTF-8 | Python | false | false | 1,226 | py | 3 | calculator.py | 2 | 0.627243 | 0.620718 | 0 | 34 | 35.058824 | 105 |
Sentinel-One/jenkinsapi | 12,902,081,799,300 | f8d38d1ff8dbcb205d41d09f3f6af4fc5c38176d | fefd3cc9ae20245ca53c05bec40bac00ce1172ac | /examples/how_to/search_artifacts.py | fcd745170f08da2c38a3117efbb6a67fe4be003a | [
"MIT"
] | permissive | https://github.com/Sentinel-One/jenkinsapi | 800254c1fbb31af987daebbf9d7cf48097bbdfc1 | deaaef56dc582bd32d31a54b873c8eda5ffdce9c | refs/heads/master | 2021-01-18T10:01:13.555912 | 2015-04-19T12:21:06 | 2015-04-19T12:21:06 | 34,204,081 | 0 | 0 | null | true | 2015-04-19T11:30:23 | 2015-04-19T11:30:23 | 2015-04-18T23:21:50 | 2015-04-18T23:21:50 | 2,661 | 0 | 0 | 0 | null | null | null | from __future__ import print_function
from jenkinsapi.api import search_artifacts
jenkinsurl = "http://localhost:8080"
jobid = "foo"
artifact_ids = ["test1.txt", "test2.txt"] # I need a build that contains all of these
result = search_artifacts(jenkinsurl, jobid, artifact_ids)
print((repr(result)))
| UTF-8 | Python | false | false | 302 | py | 1 | search_artifacts.py | 1 | 0.741722 | 0.721854 | 0 | 8 | 36.75 | 86 |
ashutosh-narkar/LeetCode | 2,937,757,664,406 | efcd4539b7975db77d19cb84380a811191726603 | aa49120740b051eed9b7199340b371a9831c3050 | /level_order_zigzag.py | 6d52246e70c27e8e4b98b2d57a9c6acb5f899fe1 | [] | no_license | https://github.com/ashutosh-narkar/LeetCode | cd8d75389e1ab730b34ecd860b317b331b1dfa97 | b62862b90886f85c33271b881ac1365871731dcc | refs/heads/master | 2021-05-07T08:37:42.536436 | 2017-11-22T05:18:23 | 2017-11-22T05:18:23 | 109,366,819 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
Given a binary tree, return the zigzag level order traversal of its nodes' values.
(ie, from left to right, then right to left for the next level and alternate between).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# Solution 1 : Runtime O(n)
#1) pop from stack "current" and store the node’s value.
#2) Whenever the current level’s order is from left->right, you push the node’s left child,
# then its right child to stack "next".
# Remember a Stack is a Last In First OUT (LIFO) structure,
# so the next time when nodes are popped off nextLevel, it will be in the reverse order.
# 3) On the other hand, when the current level’s order is from right->left,
# you would push the node’s right child first, then its left child.
# 4) Finally, don’t forget to swap those two stacks at the
# end of each level (ie, when currentLevel is empty).
def zigzagLevelOrder_1(root):
if not root:
return []
current, next = [], []
current.append(root)
leftToRight = True
result = []
temp = []
while current:
node = current.pop()
temp.append(node.val)
if leftToRight:
if node.left:
next.append(node.left)
if node.right:
next.append(node.right)
else:
if node.right:
next.append(node.right)
if node.left:
next.append(node.left)
if not current:
result.append(temp)
temp = []
leftToRight = not leftToRight
current, next = next, current
return result
"""
Solution 1: Minor change in levelOrder.py
Runtime - O(n^2)
"""
def zigzagLevelOrder_2(root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = []
level = [root]
direction = 1
while level:
result.append([node.val for node in level][::direction])
# reverse the direction
direction *= -1
LRpair = [(node.left, node.right) for node in level]
level = []
for pair in LRpair:
for node in pair:
if node:
level.append(node)
return result
| UTF-8 | Python | false | false | 2,100 | py | 418 | level_order_zigzag.py | 414 | 0.655172 | 0.639847 | 0 | 115 | 17.130435 | 91 |
Dom88Finch/recipe-scraper | 1,099,511,643,777 | 7491904374bb38a6dab77f51e8f53ecbb2faeed5 | 4eaa0a86ab8dcd96584afa5d37f36824e4212f5d | /migrations/versions/5e78cc772642_.py | 1a540557a8f0b56cd624877296fd14d1e52515ef | [
"MIT"
] | permissive | https://github.com/Dom88Finch/recipe-scraper | 55c72f4513f500fccef0b6e583b1c5b824a08835 | 9a41c3db75fd28ebd57d442ed9a12635605cd810 | refs/heads/master | 2023-04-22T14:36:00.126343 | 2021-04-30T06:01:27 | 2021-04-30T06:01:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """empty message
Revision ID: 5e78cc772642
Revises: ec21bd75ea92
Create Date: 2020-07-22 22:44:45.754328
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5e78cc772642'
down_revision = 'ec21bd75ea92'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('recipe',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('recipe_name', sa.Text(), nullable=True),
sa.Column('recipe_link', sa.Text(), nullable=True),
sa.Column('image_link', sa.Text(), nullable=True),
sa.Column('instructions', sa.Text(), nullable=True),
sa.Column('servings', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.drop_index('ix_users_email', table_name='users')
op.drop_index('ix_users_username', table_name='users')
op.drop_table('users')
op.drop_table('recipes')
op.drop_constraint(None, 'ingredients', type_='foreignkey')
op.create_foreign_key(None, 'ingredients', 'recipe', ['recipe_id'], ['id'])
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.create_foreign_key(None, 'saved_recipes', 'user', ['user_id'], ['id'])
op.create_foreign_key(None, 'saved_recipes', 'recipe', ['recipe_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.create_foreign_key(None, 'saved_recipes', 'users', ['user_id'], ['id'])
op.create_foreign_key(None, 'saved_recipes', 'recipes', ['recipe_id'], ['id'])
op.drop_constraint(None, 'ingredients', type_='foreignkey')
op.create_foreign_key(None, 'ingredients', 'recipes', ['recipe_id'], ['id'])
op.create_table('recipes',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('recipe_name', sa.TEXT(), nullable=True),
sa.Column('recipe_link', sa.TEXT(), nullable=True),
sa.Column('image_link', sa.TEXT(), nullable=True),
sa.Column('instructions', sa.TEXT(), nullable=True),
sa.Column('servings', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('username', sa.VARCHAR(length=64), nullable=True),
sa.Column('email', sa.VARCHAR(length=120), nullable=True),
sa.Column('password_hash', sa.VARCHAR(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_username', 'users', ['username'], unique=1)
op.create_index('ix_users_email', 'users', ['email'], unique=1)
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_table('recipe')
# ### end Alembic commands ###
| UTF-8 | Python | false | false | 3,487 | py | 41 | 5e78cc772642_.py | 10 | 0.652423 | 0.632922 | 0 | 82 | 41.52439 | 82 |
Alex-zhai/learn_practise | 704,374,662,115 | 145dd011f88daf202f084c88dd8755815936bfb2 | 07604219235be913dc4deccfdba454a4a2e12456 | /kaggle_prac/Titanic/pred_xgboost1.py | 8f45826d2324477bf1a9f527abf47a80ae97a617 | [] | no_license | https://github.com/Alex-zhai/learn_practise | e5a9fc1f9d2791d450f76c4087a74713a5359251 | 994630abd67c1403893f9a027855abca736dfcd1 | refs/heads/master | 2020-12-01T17:01:46.128171 | 2019-12-29T05:15:00 | 2019-12-29T05:15:00 | 230,705,620 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
# print(train.info())
# print(test.info())
# choose useful features by hand
selected_feat = ['Pclass', 'Sex', 'Age', 'Embarked', 'SibSp', 'Parch', 'Fare']
x_train = train[selected_feat]
x_test = test[selected_feat]
y_train = train['Survived']
print(x_train['Embarked'].value_counts())
print(x_test['Embarked'].value_counts())
# 对于类别形特征,使用频率最高的特征值来填充缺失值
x_train['Embarked'].fillna('S', inplace=True)
x_test['Embarked'].fillna('S', inplace=True)
# 对于数值型特征,使用平均值来填充缺失值
x_train['Age'].fillna(x_train['Age'].mean(), inplace=True)
x_test['Age'].fillna(x_test['Age'].mean(), inplace=True)
x_test['Fare'].fillna(x_test['Fare'].mean(), inplace=True)
# print(x_train.info())
# print(x_test.info())
# 特征向量化
dict_vec = DictVectorizer(sparse=False)
x_train = dict_vec.fit_transform(x_train.to_dict(orient='record'))
print(dict_vec.feature_names_)
x_test = dict_vec.transform(x_test.to_dict(orient='record'))
params = {'max_depth': list(range(2, 7)), 'n_estimators': list(range(100, 1100, 200)), 'learning_rate': [0.05, 0.1, 0.25, 0.5, 1.0]}
xgbc_best = XGBClassifier()
gs = GridSearchCV(xgbc_best, params, verbose=1)
gs.fit(x_train, y_train)
print(gs.best_score_)
print(gs.best_params_)
pred_values = gs.predict(x_test)
xgbc_submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': pred_values})
xgbc_submission.to_csv("submission_xgbc_best.csv", index=False) | UTF-8 | Python | false | false | 1,788 | py | 131 | pred_xgboost1.py | 128 | 0.683215 | 0.667849 | 0 | 53 | 29.962264 | 132 |
Cottonwood-Technology/ValidX | 13,288,628,863,751 | 539d5d00855bed491e93f3bbc285e09ee83af71e | 3f388bf0cfcc9f6cc6e0554ba839abe0b35f17af | /tests/unittests/test_containers.py | 64ec98c76eda69f014e6de623abda7869e364b59 | [
"BSD-2-Clause"
] | permissive | https://github.com/Cottonwood-Technology/ValidX | 179e75e8d6d93be0f532b36ecee844e7a60b3bfd | 4798c3ce8ed462c1f16253e9b238e2b5b518e036 | refs/heads/master | 2023-08-09T09:26:47.269971 | 2023-07-20T17:48:27 | 2023-07-20T17:48:27 | 206,943,401 | 23 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pickle
from collections import OrderedDict, defaultdict, deque
from collections.abc import Sequence, Mapping, Iterable
import pytest
from webob.multidict import MultiDict as WebObMultiDict
from werkzeug.datastructures import MultiDict as WerkzeugMultiDict
from multidict import MultiDict
from validx import exc
NoneType = type(None)
@pytest.fixture(params=[WebObMultiDict, WerkzeugMultiDict, MultiDict])
def multidict_class(request):
return request.param
class CustomSequence(Sequence):
def __init__(self, *items):
self.items = items
def __getitem__(self, index):
return self.items[index]
def __len__(self):
return len(self.items)
class CustomIterable(Iterable):
def __init__(self, *items):
self.items = items
def __iter__(self):
return iter(self.items)
class CustomMapping(Mapping):
def __init__(self, content):
self.content = content
def __getitem__(self, key):
return self.content[key]
def __iter__(self):
return iter(self.content)
def __len__(self):
return len(self.content)
# =============================================================================
def test_list(module):
v = module.List(module.Int())
assert v([1, 2, 3]) == [1, 2, 3]
assert v((1, 2, 3)) == [1, 2, 3]
assert v({1}) == [1]
assert v(frozenset([1])) == [1]
assert v(CustomSequence(1, 2, 3)) == [1, 2, 3]
assert v(CustomIterable(1, 2, 3)) == [1, 2, 3]
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
# Any ``Iterable``, but not ``str`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v("1, 2, 3")
assert info.value.expected == Iterable
assert info.value.actual == str
# Any ``Iterable``, but not ``bytes`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v(b"1, 2, 3")
assert info.value.expected == Iterable
assert info.value.actual == bytes
# Any ``Iterable``, but not ``dict`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v({1: 1, 2: 2, 3: 3})
assert info.value.expected == Iterable
assert info.value.actual == dict
# Any ``Iterable``, but not ``Mapping`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v(CustomMapping({1: 1, 2: 2, 3: 3}))
assert info.value.expected == Iterable
assert info.value.actual == CustomMapping
# Test error context from sequence
with pytest.raises(exc.SchemaError) as info:
v([1, "2", 3, None])
assert len(info.value) == 2
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque([1])
assert info.value[0].expected == int
assert info.value[0].actual == str
assert isinstance(info.value[1], exc.InvalidTypeError)
assert info.value[1].context == deque([3])
assert info.value[1].expected == int
assert info.value[1].actual == NoneType
# Test error context from iterable
with pytest.raises(exc.SchemaError) as info:
v(CustomIterable(1, "2", 3, None))
assert len(info.value) == 2
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque([None])
assert info.value[0].expected == int
assert info.value[0].actual == str
assert isinstance(info.value[1], exc.InvalidTypeError)
assert info.value[1].context == deque([None])
assert info.value[1].expected == int
assert info.value[1].actual == NoneType
@pytest.mark.parametrize("nullable", [None, False, True])
def test_list_nullable(module, nullable):
v = module.List(module.Int(), nullable=nullable)
assert v([1, 2, 3]) == [1, 2, 3]
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if nullable:
assert v(None) is None
else:
with pytest.raises(exc.InvalidTypeError) as info:
v(None)
assert info.value.expected == Iterable
assert info.value.actual == NoneType
@pytest.mark.parametrize("sort", [None, 1, -1])
def test_list_sort(module, sort):
v = module.List(module.Int(), sort=sort)
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if not sort:
assert v([3, 1, 2]) == [3, 1, 2]
elif sort > 0:
assert v([3, 1, 2]) == [1, 2, 3]
elif sort < 0:
assert v([3, 1, 2]) == [3, 2, 1]
v = module.List(
module.Dict({"x": module.Int()}),
sort=sort,
sort_key=lambda item: item["x"],
)
assert v.clone() == v
# assert pickle.loads(pickle.dumps(v)) == v # lambda is not pickleable
if not sort:
assert v([{"x": 1}, {"x": 3}, {"x": 2}]) == [{"x": 1}, {"x": 3}, {"x": 2}]
elif sort > 0:
assert v([{"x": 1}, {"x": 3}, {"x": 2}]) == [{"x": 1}, {"x": 2}, {"x": 3}]
elif sort < 0:
assert v([{"x": 1}, {"x": 3}, {"x": 2}]) == [{"x": 3}, {"x": 2}, {"x": 1}]
@pytest.mark.parametrize("minlen", [None, 2])
@pytest.mark.parametrize("maxlen", [None, 5])
def test_list_minlen_maxlen(module, minlen, maxlen):
v = module.List(module.Int(), minlen=minlen, maxlen=maxlen)
assert v([1, 2, 3]) == [1, 2, 3]
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if minlen is None:
assert v([1]) == [1]
else:
with pytest.raises(exc.MinLengthError) as info:
v([1])
assert info.value.expected == minlen
assert info.value.actual == 1
# First item doesn't pass validation, so the result length is 1.
# However, it should not raise MinLengthError, but SchemaError instead.
with pytest.raises(exc.SchemaError) as info:
v(["1", 2])
assert len(info.value) == 1
if maxlen is None:
assert v([1, 2, 3, 4, 5, 6]) == [1, 2, 3, 4, 5, 6]
else:
with pytest.raises(exc.MaxLengthError) as info:
v([1, 2, 3, 4, 5, 6])
assert info.value.expected == maxlen
assert info.value.actual == 6
def test_list_minlen_maxlen_unique(module):
v = module.List(module.Int(), minlen=2, maxlen=5, unique=True)
assert v([1, 2, 3]) == [1, 2, 3]
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
assert v([1, 1, 1, 2, 2, 2, 3, 3, 3]) == [1, 2, 3]
with pytest.raises(exc.MinLengthError) as info:
v([1, 1, 1])
assert info.value.expected == 2
assert info.value.actual == 1
with pytest.raises(exc.MaxLengthError) as info:
v([1, 1, 1, 2, 3, 4, 5, 6])
assert info.value.expected == 5
assert info.value.actual == 6
@pytest.mark.parametrize("unique", [None, False, True])
def test_list_unique(module, unique):
v = module.List(module.Int(), unique=unique)
assert v([1, 2, 3]) == [1, 2, 3]
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if unique:
assert v([1, 2, 3, 3, 2, 1]) == [1, 2, 3]
else:
assert v([1, 2, 3, 3, 2, 1]) == [1, 2, 3, 3, 2, 1]
def test_list_context(module):
class MarkContext(module.Validator):
def __call__(self, value, __context=None):
__context["marked"] = True
return value
v = module.List(MarkContext())
context = {}
v([None], context)
assert context["marked"]
# =============================================================================
def test_set(module):
v = module.Set(module.Int())
assert v([1, 2, 3]) == {1, 2, 3}
assert v((1, 2, 3)) == {1, 2, 3}
assert v({1, 2, 3}) == {1, 2, 3}
assert v(frozenset([1, 2, 3])) == {1, 2, 3}
assert v(CustomSequence(1, 2, 3)) == {1, 2, 3}
assert v(CustomIterable(1, 2, 3)) == {1, 2, 3}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
# Any ``Iterable``, but not ``str`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v("1, 2, 3")
assert info.value.expected == Iterable
assert info.value.actual == str
# Any ``Iterable``, but not ``bytes`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v(b"1, 2, 3")
assert info.value.expected == Iterable
assert info.value.actual == bytes
# Any ``Iterable``, but not ``dict`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v({1: 1, 2: 2, 3: 3})
assert info.value.expected == Iterable
assert info.value.actual == dict
# Any ``Iterable``, but not ``Mapping`` is allowed
with pytest.raises(exc.InvalidTypeError) as info:
v(CustomMapping({1: 1, 2: 2, 3: 3}))
assert info.value.expected == Iterable
assert info.value.actual == CustomMapping
# Test error context from sequence
with pytest.raises(exc.SchemaError) as info:
v([1, "2", 3, None])
assert len(info.value) == 2
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque([1])
assert info.value[0].expected == int
assert info.value[0].actual == str
assert isinstance(info.value[1], exc.InvalidTypeError)
assert info.value[1].context == deque([3])
assert info.value[1].expected == int
assert info.value[1].actual == NoneType
# Test error context from iterable
with pytest.raises(exc.SchemaError) as info:
v(CustomIterable(1, "2", 3, None))
assert len(info.value) == 2
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque([None])
assert info.value[0].expected == int
assert info.value[0].actual == str
assert isinstance(info.value[1], exc.InvalidTypeError)
assert info.value[1].context == deque([None])
assert info.value[1].expected == int
assert info.value[1].actual == NoneType
@pytest.mark.parametrize("nullable", [None, False, True])
def test_set_nullable(module, nullable):
v = module.Set(module.Int(), nullable=nullable)
assert v([1, 2, 3]) == {1, 2, 3}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if nullable:
assert v(None) is None
else:
with pytest.raises(exc.InvalidTypeError) as info:
v(None)
assert info.value.expected == Iterable
assert info.value.actual == NoneType
@pytest.mark.parametrize("minlen", [None, 2])
@pytest.mark.parametrize("maxlen", [None, 5])
def test_set_minlen_maxlen(module, minlen, maxlen):
v = module.Set(module.Int(), minlen=minlen, maxlen=maxlen)
assert v([1, 2, 3]) == {1, 2, 3}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if minlen is None:
assert v([1]) == {1}
else:
with pytest.raises(exc.MinLengthError) as info:
v([1])
assert info.value.expected == minlen
assert info.value.actual == 1
# First item doesn't pass validation, so the result length is 1.
# However, it should not raise MinLengthError, but SchemaError instead.
with pytest.raises(exc.SchemaError) as info:
v(["1", 2])
assert len(info.value) == 1
if maxlen is None:
assert v([1, 2, 3, 4, 5, 6]) == {1, 2, 3, 4, 5, 6}
else:
with pytest.raises(exc.MaxLengthError) as info:
v([1, 2, 3, 4, 5, 6])
assert info.value.expected == maxlen
assert info.value.actual == 6
def test_set_context(module):
class MarkContext(module.Validator):
def __call__(self, value, __context=None):
__context["marked"] = True
return value
v = module.Set(MarkContext())
context = {}
v([None], context)
assert context["marked"]
# =============================================================================
def test_tuple(module):
v = module.Tuple(module.Int(), module.Int())
assert v([1, 2]) == (1, 2)
assert v((1, 2)) == (1, 2)
assert v(CustomSequence(1, 2)) == (1, 2)
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
with pytest.raises(exc.InvalidTypeError) as info:
v("1, 2")
assert info.value.expected == Sequence
assert info.value.actual == str
with pytest.raises(exc.TupleLengthError) as info:
v([1, 2, 3])
assert info.value.expected == 2
assert info.value.actual == 3
with pytest.raises(exc.SchemaError) as info:
v(["1", None])
assert len(info.value) == 2
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque([0])
assert info.value[0].expected == int
assert info.value[0].actual == str
assert isinstance(info.value[1], exc.InvalidTypeError)
assert info.value[1].context == deque([1])
assert info.value[1].expected == int
assert info.value[1].actual == NoneType
@pytest.mark.parametrize("nullable", [None, False, True])
def test_tuple_nullable(module, nullable):
v = module.Tuple(module.Int(), module.Int(), nullable=nullable)
assert v([1, 2]) == (1, 2)
assert v((1, 2)) == (1, 2)
assert v(CustomSequence(1, 2)) == (1, 2)
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if nullable:
assert v(None) is None
else:
with pytest.raises(exc.InvalidTypeError) as info:
v(None)
assert info.value.expected == Sequence
assert info.value.actual == NoneType
def test_tuple_context(module):
class MarkContext(module.Validator):
def __call__(self, value, __context=None):
__context["marked"] = True
return value
v = module.Tuple(MarkContext())
context = {}
v((None,), context)
assert context["marked"]
# =============================================================================
def test_dict(module):
v = module.Dict({"x": module.Int(), "y": module.Int()})
assert v({"x": 1, "y": 2}) == {"x": 1, "y": 2}
assert v(OrderedDict({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(defaultdict(None, {"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(CustomMapping({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
with pytest.raises(exc.InvalidTypeError) as info:
v([("x", 1), ("y", 2)])
assert info.value.expected == Mapping
assert info.value.actual == list
with pytest.raises(exc.SchemaError) as info:
v({"x": "1", "y": None})
assert len(info.value) == 2
info.value.sort()
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque(["x"])
assert info.value[0].expected == int
assert info.value[0].actual == str
assert isinstance(info.value[1], exc.InvalidTypeError)
assert info.value[1].context == deque(["y"])
assert info.value[1].expected == int
assert info.value[1].actual == NoneType
@pytest.mark.parametrize("nullable", [None, False, True])
def test_dict_nullable(module, nullable):
v = module.Dict({"x": module.Int(), "y": module.Int()}, nullable=nullable)
assert v({"x": 1, "y": 2}) == {"x": 1, "y": 2}
assert v(OrderedDict({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(defaultdict(None, {"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(CustomMapping({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if nullable:
assert v(None) is None
else:
with pytest.raises(exc.InvalidTypeError) as info:
v(None)
assert info.value.expected == Mapping
assert info.value.actual == NoneType
@pytest.mark.parametrize("minlen", [None, 2])
@pytest.mark.parametrize("maxlen", [None, 3])
def test_dict_minlen_maxlen(module, minlen, maxlen):
v = module.Dict(extra=(module.Str(), module.Int()), minlen=minlen, maxlen=maxlen)
assert v({"x": 1, "y": 2}) == {"x": 1, "y": 2}
assert v(OrderedDict({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(defaultdict(None, {"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(CustomMapping({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if minlen is None:
assert v({"x": 1}) == {"x": 1}
else:
with pytest.raises(exc.MinLengthError) as info:
v({"x": 1})
assert info.value.expected == minlen
assert info.value.actual == 1
# First key doesn't pass validation, so the result length is 1.
# However, it should not raise MinLengthError, but SchemaError instead.
with pytest.raises(exc.SchemaError) as info:
v({"x": "1", "y": 2})
assert len(info.value) == 1
if maxlen is None:
assert v({"x": 1, "y": 2, "z": 3, "a": 4}) == {
"x": 1,
"y": 2,
"z": 3,
"a": 4,
}
else:
with pytest.raises(exc.MaxLengthError) as info:
v({"x": 1, "y": 2, "z": 3, "a": 4})
assert info.value.expected == maxlen
assert info.value.actual == 4
def default_x():
"""Pickable version of callable default value"""
return 0
@pytest.mark.parametrize("defaults", [None, {"x": 0}, {"x": default_x}])
@pytest.mark.parametrize("optional", [None, ["x"]])
def test_dict_defaults_and_optional(module, defaults, optional):
v = module.Dict(
{"x": module.Int(), "y": module.Int()}, defaults=defaults, optional=optional
)
assert v({"x": 1, "y": 2}) == {"x": 1, "y": 2}
assert v(OrderedDict({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(defaultdict(None, {"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(CustomMapping({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
with pytest.raises(exc.SchemaError) as info:
v({"x": 1})
assert len(info.value) == 1
assert isinstance(info.value[0], exc.MissingKeyError)
assert info.value[0].context == deque(["y"])
if defaults:
assert v({"y": 2}) == {"x": 0, "y": 2}
elif optional:
assert v({"y": 2}) == {"y": 2}
else:
with pytest.raises(exc.SchemaError) as info:
v({"y": 2})
assert len(info.value) == 1
assert isinstance(info.value[0], exc.MissingKeyError)
assert info.value[0].context == deque(["x"])
def test_dict_defaults_validation(module):
v = module.Dict(
{"x": module.Dict({"y": module.Int()}, defaults={"y": 1})},
defaults={"x": {}},
)
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
assert v({}) == {"x": {"y": 1}}
v = module.Dict(
{"x": module.Dict({"y": module.Int()}, defaults={"y": 1})},
defaults={"x": []},
)
with pytest.raises(exc.SchemaError) as info:
v({})
assert len(info.value) == 1
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque(["x"])
assert info.value[0].expected == Mapping
assert info.value[0].actual == list
def test_dict_defaults_and_minlen_maxlen(module):
v = module.Dict(
{"x": module.Int()},
defaults={"x": 1},
extra=(module.Str(), module.Int()),
minlen=2,
maxlen=3,
)
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
assert v({"y": 2, "z": 3}) == {"x": 1, "y": 2, "z": 3}
with pytest.raises(exc.MinLengthError) as info:
v({})
assert info.value.expected == 2
assert info.value.actual == 1
with pytest.raises(exc.MaxLengthError) as info:
v({"y": 2, "z": 3, "too_much": 4})
assert info.value.expected == 3
assert info.value.actual == 4
@pytest.mark.parametrize("extra", [None, True])
def test_dict_extra(module, extra):
if extra:
extra = (module.Str(), module.Int())
v = module.Dict({"x": module.Int(), "y": module.Int()}, extra=extra)
assert v({"x": 1, "y": 2}) == {"x": 1, "y": 2}
assert v(OrderedDict({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(defaultdict(None, {"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(CustomMapping({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if extra:
assert v({"x": 1, "y": 2, "z": 3}) == {"x": 1, "y": 2, "z": 3}
with pytest.raises(exc.SchemaError) as info:
v({"x": 1, "y": 2, 3: None})
assert len(info.value) == 2
info.value.sort()
assert isinstance(info.value[0], exc.InvalidTypeError)
assert info.value[0].context == deque([3, exc.EXTRA_KEY])
assert info.value[0].expected == str
assert info.value[0].actual == int
assert isinstance(info.value[1], exc.InvalidTypeError)
assert info.value[1].context == deque([3, exc.EXTRA_VALUE])
assert info.value[1].expected == int
assert info.value[1].actual == NoneType
else:
with pytest.raises(exc.SchemaError) as info:
v({"x": 1, "y": 2, "z": 3})
assert len(info.value) == 1
assert isinstance(info.value[0], exc.ForbiddenKeyError)
assert info.value[0].context == deque(["z"])
@pytest.mark.parametrize("dispose", [None, ["z"]])
def test_dict_dispose(module, dispose):
v = module.Dict({"x": module.Int(), "y": module.Int()}, dispose=dispose)
assert v({"x": 1, "y": 2}) == {"x": 1, "y": 2}
assert v(OrderedDict({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(defaultdict(None, {"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v(CustomMapping({"x": 1, "y": 2})) == {"x": 1, "y": 2}
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if dispose:
assert v({"x": 1, "y": 2, "z": 3}) == {"x": 1, "y": 2}
else:
with pytest.raises(exc.SchemaError) as info:
v({"x": 1, "y": 2, "z": 3})
assert len(info.value) == 1
assert isinstance(info.value[0], exc.ForbiddenKeyError)
assert info.value[0].context == deque(["z"])
def test_dict_multikeys(module, multidict_class):
v1 = module.Dict({"x": module.Int(), "y": module.Int()})
v2 = module.Dict(
{"x": module.Int(), "y": module.List(module.Int())}, multikeys=["y"]
)
data = multidict_class([("x", 1), ("y", 2), ("y", 3)])
assert v1(data) == {"x": 1, "y": 3} or v1(data) == {"x": 1, "y": 2}
assert v2(data) == {"x": 1, "y": [2, 3]}
assert v1.clone() == v1
assert v2.clone() == v2
def test_dict_context(module):
class MarkContext(module.Validator):
def __call__(self, value, __context=None):
__context["marked"] = True
return value
v = module.Dict({"x": MarkContext()})
context = {}
v({"x": None}, context)
assert context["marked"]
v = module.Dict({"x": MarkContext()}, defaults={"x": None})
context = {}
v({}, context)
assert context["marked"]
v = module.Dict(extra=(module.Str(), MarkContext()))
context = {}
v({"x": None}, context)
assert context["marked"]
v = module.Dict(extra=(MarkContext(), module.Any()))
context = {}
v({"x": None}, context)
assert context["marked"]
| UTF-8 | Python | false | false | 22,973 | py | 90 | test_containers.py | 56 | 0.566143 | 0.539764 | 0 | 704 | 31.632102 | 85 |
Pandinosaurus/ensae_teaching_cs | 5,360,119,189,138 | 3e1363227ef5ce33e2e1e818a3dabe043b367a1d | 7bfbc82845f91a0bf9e56bf4226ea0d445a4e4f8 | /src/ensae_teaching_cs/special/tsp_bresenham.py | d4fe63357d17017fe8769a23f365628d29f7526e | [
"MIT"
] | permissive | https://github.com/Pandinosaurus/ensae_teaching_cs | d79daa2654b97ea94e9c7e98121d0c6f56cbd8a0 | 2abbc7a20c7437f9ab91d1ec83a6aecdefceb028 | refs/heads/master | 2023-04-29T10:13:00.636012 | 2023-04-21T11:37:18 | 2023-04-21T11:37:18 | 160,346,838 | 1 | 0 | MIT | true | 2023-04-21T16:47:24 | 2018-12-04T11:27:27 | 2022-01-11T03:09:41 | 2023-04-21T16:47:22 | 302,512 | 0 | 0 | 0 | Jupyter Notebook | false | false | # -*- coding: utf-8 -*-
"""
@file
@brief Ce module contient la fonction trace_ligne qui retourne l'ensemble des pixels
concernés par le tracé d'une ligne en 8-connexité entre deux pixels.
"""
def trace_ligne_simple(x1, y1, x2, y2):
"""
Trace une ligne entre les points de coordonnées *(x1,y1)* et *(x2,y2)*,
on suppose que *x2 > x1*, *y2 >= y1*,
retourne la ligne sous la forme d'un ensemble de pixels *(x,y)*."""
if y2 - y1 <= x2 - x1: # droite en dessous de la première bissectrice
vx = x2 - x1
vy = y2 - y1
b = vx / 2
y = y1
x = x1
ligne = []
while x <= x2:
ligne.append((x, y))
b -= vy
x += 1
if b < 0:
b += vx
y += 1
return ligne
else: # droite au dessus de la première bissectrice
vx = x2 - x1
vy = y2 - y1
b = vy / 2
y = y1
x = x1
ligne = []
while y <= y2:
ligne.append((x, y))
b -= vx
y += 1
if b < 0:
b += vy
x += 1
return ligne
def draw_line(x1, y1, x2, y2):
"""
Trace une ligne entre les points de coordonnées *(x1,y1)* et *(x2,y2)*,
aucune contrainte sur les coordonnées,
retourne la ligne sous la forme d'un ensemble de pixels *(x,y)*.
Utilise l'algorithme de :epkg:`Bresenham`.
"""
if x1 == x2:
if y1 <= y2:
return [(x1, i) for i in range(y1, y2 + 1)]
else:
return [(x1, i) for i in range(y2, y1 + 1)]
if y1 == y2:
if x1 <= x2:
return [(i, y1) for i in range(x1, x2 + 1)]
else:
return [(i, y1) for i in range(x2, x1 + 1)]
if x1 < x2:
if y1 < y2:
return trace_ligne_simple(x1, y1, x2, y2)
else:
ligne = trace_ligne_simple(x1, y2, x2, y1)
return [(x, y1 + y2 - y) for (x, y) in ligne]
if x2 < x1:
if y1 < y2:
ligne = trace_ligne_simple(x2, y1, x1, y2)
return [(x1 + x2 - x, y) for (x, y) in ligne]
else:
ligne = trace_ligne_simple(x2, y2, x1, y1)
return [(x1 + x2 - x, y1 + y2 - y) for (x, y) in ligne]
raise RuntimeError("All cases have already been processed.")
def draw_ellipse(xc, yc, a, b):
"""
Dessine une ellipse de centre *xc, yc*, de demi axe horizontal *a*,
de demi-axe vertical b, l'ellipse a pour équation x²/a² + y²/b² = 1
si l'origine est placée en *xc, yc*,
l'équation de la tangente au point *x0, y0* est :
:math:`\frac{x x_0}{a^2} + \frac{y y_0}{b^2}=0`,
ou :math:`x x_0 b^2 + y y_0 a^2 = 0`.
Utilise l'algorithme de :epkg:`Bresenham`.
"""
# on évite les cas litigieux
if a == 0:
return [(xc, yc + y) for y in range(-b, b)]
if b == 0:
return [(xc + x, yc) for x in range(-a, a)]
bb = b * b
aa = a * a
# on trace l'ellipse de centre 0,0
ellipse = []
# premier huitième
vx = a * bb
vy = 0
x = a
y = 0
bl = vx / 2
while vx >= vy and x >= 0:
ellipse.append((x, y))
y += 1
vy += aa # vy = y * aa
bl -= vy
if bl < 0:
x -= 1
vx -= bb # vx = x * bb
bl += vx
# second huitième
while x >= 0:
ellipse.append((x, y))
x -= 1
vx -= bb # vx = x * bb
bl += vx
if bl > 0:
y += 1
vy += aa # vy = y * aa
bl -= vy
# second quart, symétrique par rapport à l'axe des ordonnées
ellipse2 = [(-x, y) for (x, y) in ellipse]
ellipse2.reverse()
ellipse.extend(ellipse2)
# troisième et quatrième quarts : symétrique par rapport à l'axe des
# abscisse
ellipse2 = [(x, -y) for (x, y) in ellipse]
ellipse2.reverse()
ellipse.extend(ellipse2)
return [(x + xc, y + yc) for (x, y) in ellipse]
def display_line(ligne, screen, pygame):
"""
Affiche une ligne à l'écran.
"""
color = 0, 0, 0
for p in ligne:
pygame.draw.line(screen, color, p, p)
pygame.display.flip()
| UTF-8 | Python | false | false | 4,242 | py | 1,174 | tsp_bresenham.py | 368 | 0.47758 | 0.443179 | 0 | 158 | 25.677215 | 84 |
kshitjtyagi814/registration | 8,169,027,820,117 | 9688ed0fa8ccc5cc81e98d9c6a3f572f17cf1522 | ff87c3a54713f29cc179ac606db275391fd86b0f | /upload/migrations/0002_auto_20200820_0958.py | 7c64b130096a0915427d763e7d084603890f4aef | [] | no_license | https://github.com/kshitjtyagi814/registration | b52c5033c883f2ce7272b7eaf47a03a5e46049fa | ffa79cfb5028bc579a9feedaee61dd0504a8810b | refs/heads/main | 2023-04-14T09:09:14.063949 | 2021-04-17T11:07:24 | 2021-04-17T11:07:24 | 301,374,710 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.2 on 2020-08-20 09:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('upload', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='document',
old_name='description',
new_name='name',
),
]
| UTF-8 | Python | false | false | 356 | py | 9 | 0002_auto_20200820_0958.py | 6 | 0.564607 | 0.511236 | 0 | 18 | 18.777778 | 47 |
DistributedSystemsGroup/zoe-applications | 3,633,542,339,297 | 9b363cbc498a47bab118c70ace02e48ef4f15f50 | fa5b4e6ccbfdbe0c9a9411216dfbcf2cc6fd1ccb | /applications/tensorflow/tf-google.py | f3be6173fd9f6af3b9c7e6631d9d8176232f69f3 | [
"Apache-2.0"
] | permissive | https://github.com/DistributedSystemsGroup/zoe-applications | 6360f579c38ca55feb69a4809d2943d71429c148 | 04a2be2602a9037afbf7f107c0be3b6fc396b9ad | refs/heads/master | 2017-10-16T13:03:25.931550 | 2017-07-11T11:23:28 | 2017-07-11T11:23:28 | 55,168,985 | 3 | 3 | null | false | 2017-03-06T10:39:28 | 2016-03-31T17:14:24 | 2017-01-12T14:26:43 | 2017-03-06T10:39:28 | 17,783 | 2 | 3 | 0 | Python | null | null | import json
import sys
sys.path.append('../..')
import applications.app_base
def tf_batch_service(mem_limit, image):
"""
:type mem_limit: int
:type image: str
:rtype: dict
"""
service = {
'name': "tensorflow",
'docker_image': image,
'monitor': True,
'required_resources': {"memory": mem_limit},
'ports': [
{
'name': "Tensorboard web interface",
'protocol': "http",
'port_number': 6006,
'path': "/",
'is_main_endpoint': False,
'expose': True
},
{
'name': "Notebook web interface",
'protocol': "http",
'port_number': 8888,
'path': "/",
'is_main_endpoint': False,
'expose': True
}
],
'networks': [],
'total_count': 1,
'essential_count': 1,
'startup_order': 0
}
return service
APP_NAME = 'tensorflow'
def gen_app(mem_limit, tf_image):
services = [tf_batch_service(mem_limit, tf_image)]
return applications.app_base.fill_app_template(APP_NAME, False, services)
DOCKER_REGISTRY = 'docker-registry:5000/'
options = [
('mem_limit', 16 * (1024**3), 'Tensorflow memory limit (bytes)'),
('tf_image', 'gcr.io/tensorflow/tensorflow', 'Tensorflow image'),
]
if __name__ == "__main__":
args = {}
for opt in options:
args[opt[0]] = opt[1]
app_dict = gen_app(**args)
json.dump(app_dict, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| UTF-8 | Python | false | false | 1,607 | py | 75 | tf-google.py | 49 | 0.508401 | 0.492844 | 0 | 61 | 25.327869 | 77 |
inventree/InvenTree | 13,520,557,053,422 | 56d67561858ea517483ad47f76c4b85a1913e0ba | a902290fb3b911676358ae4d93f83061a6c2bd0f | /InvenTree/plugin/samples/integration/custom_panel_sample.py | 0ef2086029dd9857881d1155653fc0e06b727286 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/inventree/InvenTree | a15e54182c9bfafdf5348cc9a66da1004e23e760 | e88a8e99a5f0b201c67a95cba097c729f090d5e2 | refs/heads/master | 2023-09-03T19:32:35.438375 | 2023-08-30T00:25:40 | 2023-08-30T00:25:40 | 85,894,461 | 3,077 | 549 | MIT | false | 2023-09-14T14:21:01 | 2017-03-23T01:44:10 | 2023-09-14T11:33:37 | 2023-09-14T14:21:01 | 114,453 | 3,026 | 504 | 190 | Python | false | false | """Sample plugin which renders custom panels on certain pages."""
from part.views import PartDetail
from plugin import InvenTreePlugin
from plugin.mixins import PanelMixin, SettingsMixin
from stock.views import StockLocationDetail
class CustomPanelSample(PanelMixin, SettingsMixin, InvenTreePlugin):
"""A sample plugin which renders some custom panels."""
NAME = "CustomPanelExample"
SLUG = "samplepanel"
TITLE = "Custom Panel Example"
DESCRIPTION = "An example plugin demonstrating how custom panels can be added to the user interface"
VERSION = "0.1"
SETTINGS = {
'ENABLE_HELLO_WORLD': {
'name': 'Enable Hello World',
'description': 'Enable a custom hello world panel on every page',
'default': False,
'validator': bool,
},
'ENABLE_BROKEN_PANEL': {
'name': 'Enable Broken Panel',
'description': 'Enable a panel with rendering issues',
'default': False,
'validator': bool,
}
}
def get_panel_context(self, view, request, context):
"""Returns enriched context."""
ctx = super().get_panel_context(view, request, context)
# If we are looking at a StockLocationDetail view, add location context object
if isinstance(view, StockLocationDetail):
ctx['location'] = view.get_object()
return ctx
def get_custom_panels(self, view, request):
"""You can decide at run-time which custom panels you want to display!
- Display on every page
- Only on a single page or set of pages
- Only for a specific instance (e.g. part)
- Based on the user viewing the page!
"""
panels = [
{
# Simple panel without any actual content
'title': 'No Content',
}
]
if self.get_setting('ENABLE_HELLO_WORLD'):
# We can use template rendering in the raw content
content = """
<strong>Hello world!</strong>
<hr>
<div class='alert-alert-block alert-info'>
<em>We can render custom content using the templating system!</em>
</div>
<hr>
<table class='table table-striped'>
<tr><td><strong>Path</strong></td><td>{{ request.path }}</tr>
<tr><td><strong>User</strong></td><td>{{ user.username }}</tr>
</table>
"""
panels.append({
# This 'hello world' panel will be displayed on any view which implements custom panels
'title': 'Hello World',
'icon': 'fas fa-boxes',
'content': content,
'description': 'A simple panel which renders hello world',
'javascript': 'console.log("Hello world, from a custom panel!");',
})
if self.get_setting('ENABLE_BROKEN_PANEL'):
# Enabling this panel will cause panel rendering to break,
# due to the invalid tags
panels.append({
'title': 'Broken Panel',
'icon': 'fas fa-times-circle',
'content': '{% tag_not_loaded %}',
'description': 'This panel is broken',
'javascript': '{% another_bad_tag %}',
})
# This panel will *only* display on the PartDetail view
if isinstance(view, PartDetail):
panels.append({
'title': 'Custom Part Panel',
'icon': 'fas fa-shapes',
'content': '<em>This content only appears on the PartDetail page, you know!</em>',
})
# This panel will *only* display on the StockLocation view,
# and *only* if the StockLocation has *no* child locations
if isinstance(view, StockLocationDetail):
try:
loc = view.get_object()
if not loc.get_descendants(include_self=False).exists():
panels.append({
'title': 'Childless Location',
'icon': 'fa-user',
'content_template': 'panel_demo/childless.html', # Note that the panel content is rendered using a template file!
})
except Exception: # pragma: no cover
pass
return panels
| UTF-8 | Python | false | false | 4,420 | py | 1,334 | custom_panel_sample.py | 743 | 0.547964 | 0.547511 | 0 | 119 | 36.142857 | 138 |
theromis/mlpiper | 13,185,549,624,802 | 85aa93c6f8c7d5cdb84ff824aea036f42110d245 | 6e423cddd8698bc662bcc3208eb7a8fdb2eb0d72 | /mlcomp/parallelm/components/spark_data_component.py | ea16b845c2ed020a8c1a026fbccbfde8fa2e90ec | [
"Apache-2.0"
] | permissive | https://github.com/theromis/mlpiper | 7d435343af7b739767f662b97a988c2ccc7665ed | 738356ce6d5e691a5d813acafa3f0ff730e76136 | refs/heads/master | 2020-05-05T04:44:00.494105 | 2019-04-03T19:53:01 | 2019-04-03T22:02:53 | 179,722,926 | 0 | 0 | Apache-2.0 | true | 2019-04-05T17:06:02 | 2019-04-05T17:06:01 | 2019-04-03T22:02:57 | 2019-04-05T17:02:52 | 14,857 | 0 | 0 | 0 | null | false | false | import abc
from pyspark.sql import DataFrame
from parallelm.common.mlcomp_exception import MLCompException
from parallelm.components.spark_session_component import SparkSessionComponent
import traceback
class SparkDataComponent(SparkSessionComponent):
def __init__(self, ml_engine):
super(SparkDataComponent, self).__init__(ml_engine)
def _materialize(self, spark, parent_data_objs, user_data):
df = self._dataframe(spark, user_data)
df_list = df if type(df) is list else [df]
self._logger.debug("Data component '{}' returns: {}".format(self.name(), df_list))
return df_list # Used by child connectable component
def _post_validation(self, df):
self._ml_engine.set_dataframe(df) # Used by Spark ml pipeline
return df
@abc.abstractmethod
def _dataframe(self, spark, user_data):
"""
Supposed to return spark data-frame
"""
pass
| UTF-8 | Python | false | false | 941 | py | 712 | spark_data_component.py | 323 | 0.678002 | 0.678002 | 0 | 27 | 33.851852 | 90 |
obligate/python3-king | 10,179,072,540,897 | 31fb639c772f975c34c3909ef6cdd23800bf5ae5 | 271dbb5f0c23ae40f19a8df7dd3f15a44fbe5ae1 | /it-king/day07-面向对象进阶/lib/aa.py | e13ef81b73e297781abd494c1247b1060f8e1180 | [] | no_license | https://github.com/obligate/python3-king | a4d1c5c145c3b1c42efe059cf2bbd797d0b3c528 | 2b31400468c7a2621f29f24f82e682eb07c0e17d | refs/heads/master | 2020-05-02T11:45:16.218771 | 2019-03-27T08:05:39 | 2019-03-27T08:05:39 | 177,938,256 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = "Peter"
class C:
def __init__(self):
self.name = 'peter'
| UTF-8 | Python | false | false | 92 | py | 164 | aa.py | 124 | 0.445652 | 0.445652 | 0 | 7 | 11.142857 | 27 |
T-K-233/k-load | 14,886,356,672,928 | b22ecb2b0e67c61d38c11365694808f6ca70806d | fed565ae2aa4c33e239661f2c42994ecb3466259 | /build/lib/k_load/downloaders.py | 374907e23a7145f2f4b2582c40eebd15655fd63d | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | https://github.com/T-K-233/k-load | 6a0810cb61844a1509148df43df3f4dab263186c | 6a1cdcfb4a1890779eadb4701cde2d3c44ac3c7e | refs/heads/master | 2020-03-28T21:16:13.882597 | 2018-09-22T06:40:41 | 2018-09-22T06:40:41 | 149,143,002 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import json
import datetime
import requests
class Downloader:
def log_info(self):
print('='*36)
print('= K-Load ')
print('='*36)
print('= Name:\t', self.name)
print('= Type:\t', self.platform)
print('= URL:\t', self.url)
print('= Format:\t', self.format)
print('='*36)
def __repr__(self):
return '<object \'%s\' name=\'%s\'>' % (self.__class__.__name__, self.name)
class KSong(Downloader):
platform = '全民K歌'
def __init__(self, url):
self.url = url
res = requests.get(self.url)
pattern = re.compile(r'(?<=window\.__DATA__ = ).+(?=;.+</script>)')
json_str = re.search(pattern, res.text).group()
self.info = json.loads(json_str)
self.info.update(self.info['detail'])
self.info['ctime'] = datetime.datetime.fromtimestamp(int(self.info['ctime'])).strftime("%Y-%m-%d %H:%M:%S")
self.download_url = self.info['playurl']
self.name = '%s-%s' % (self.info['song_name'], self.info['nick'])
self.format = '.m4a'
def download(self):
print('Downloading...')
res = requests.get(self.download_url)
filename = '.\\%s_k-load%s' % (self.name, self.format)
with open(filename, 'wb') as f:
f.write(res.content)
print('Finished.')
print('Saved as %s' % filename)
| UTF-8 | Python | false | false | 1,396 | py | 9 | downloaders.py | 6 | 0.53741 | 0.532374 | 0 | 42 | 32.095238 | 115 |
Veeresh-Hiremath/project20_07042021 | 6,554,120,130,352 | ac5c53481b3b5860596eb0e6649411eaa44ee07b | f403e829d8d193a2b032f026bcf0e8b5f0c81b0d | /app1/urls.py | ce9aeb113bfb1fe1c65091f4cf625e02caee5de5 | [] | no_license | https://github.com/Veeresh-Hiremath/project20_07042021 | 11857c71ce2c6e760e266e05a278d722ac671302 | 00ac48121bd8331ce7d34d2c0a2ac67621edd1a0 | refs/heads/main | 2023-04-06T21:47:22.547473 | 2021-04-07T03:47:37 | 2021-04-07T03:47:37 | 355,401,261 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from app1 import views
app_mame="app1"
urlpatterns = [
path("",views.samp1, name="samp1")
]
| UTF-8 | Python | false | false | 125 | py | 6 | urls.py | 6 | 0.704 | 0.672 | 0 | 6 | 19.833333 | 38 |
hitandaway100/caba | 996,432,415,821 | e2f4a1cda8d145dfa4fcdd20a501e12a3fde942b | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/management/commands/orig_migrate.py | c18bc0faee939f9c763f1446d5b8efb9f9d5fb7c | [] | no_license | https://github.com/hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import os
import glob
from django.db import connection
from django.db import connections
from django.core.management.base import BaseCommand
import settings
import settings_sub
from platinumegg.app.cabaret.models import sql
class Command(BaseCommand):
"""マイグレーション用.
django の 1.4 に既存のテーブルに対しての変更を検知して Alter Table を当ててくれるものが無いので.
>> python manage.py orig_migrate
このコマンドは, バージョン番号がファイルの先頭に付いている sql を実行します.
バージョン番号の定義は models/sql/__init__.py に書いていますので,
次のカラム変更を行ないたい日をバージョンに設定しておくと良いです.
下記例のファイルの様に, どのモデルに対して何をするのか (add, delete, ... 等) を名前に付ける事を推奨.
ex) dprj/platinumegg/app/cabaret/models/sql/20150914_add_column_gachamaster.sql
"""
def handle(self, *args, **options):
self.stdout.write('Change table ...\n')
if os.path.isdir(sql.DIR):
migrates = glob.glob(sql.DIR + sql.VERSION + '*.sql')
if migrates:
cursor = connections[settings.DB_DEFAULT].cursor()
self.stdout.write(
'!!! Migrate Version: {}, MasterDB: {} !!!'.format(
sql.VERSION,
settings.DATABASES[cursor.db.alias]['HOST']
)
)
for migrate in migrates:
self.stdout.write('\033[0m# {}'.format(os.path.basename(migrate)))
with open(migrate, 'r') as sql_file:
success_count = 0
errors = []
for sql_line in sql_file.readlines():
try:
self.stdout.write('\033[0mexec: {}'.format(sql_line))
cursor.execute(sql_line)
success_count += 1
self.stdout.write('\033[32mexec OK.')
except Exception as errno:
errors.append('\033[31mExecError: {} \n'.format(errno))
self.stdout.write('Success {0}, Error : {1}.'.format(success_count, len(errors)))
for error in errors:
self.stdout.write(error)
else:
self.stdout.write('Not Change SQL ...\n')
| UTF-8 | Python | false | false | 2,602 | py | 1,590 | orig_migrate.py | 1,029 | 0.527361 | 0.512798 | 0 | 53 | 41.754717 | 105 |
fwq777/siut | 4,544,075,414,878 | 11282218a6588f67ef3215c2c407b74c0fe422f1 | 6d9876c5d6d8650afd3ca4574d64db5c2f603a06 | /SIUT/train_G.py | 764a2159be3f20ebabd1420275b3fc0562b46643 | [] | no_license | https://github.com/fwq777/siut | cdc34075d5a940dfc9ae1a64ac10462666dcabef | 71c1dfefed010adba860dc87ca70548a1cbf62f5 | refs/heads/master | 2020-06-12T10:05:25.323223 | 2019-06-28T12:04:52 | 2019-06-28T12:04:52 | 194,266,746 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import os
import numpy as np
from torch.utils import data as data_
from tqdm import tqdm
from utils.config import opt
from data.datasetG import Dataset
from utils.vis_tool import Visualizer
from model.DeviceAdapt_Net import Gtrainer
def test_model(dataloader, model, epoch, ifsave=False, test_num=100000):
LOSS = 0.0
num = 0
model.eval()
dir = './result/resultG0405/'+str(epoch)+'/'
if not os.path.exists(dir) and ifsave:
os.makedirs(dir)
for ii, (img, oriimg) in enumerate(dataloader):
outputimg, loss = model(img, oriimg, vis=True)
LOSS += loss
num += 1
if ifsave:
# for i in range(len(outputimg)):
i = 0
img = img[i][0].numpy()
img = img*255
outimg = outputimg[i]
outimg = outimg.transpose((1, 2, 0))
outimg = outimg*255
img = img.astype(np.uint8)
outimg = outimg.astype(np.uint8)
cv2.imwrite(dir + 'out' + str(ii) + '_' + str(i) + '.jpg', outimg)
cv2.imwrite(dir + 'input' + str(ii) + '_' + str(i) + '.jpg', img)
if ii > test_num:
break
return {"SNR": round(LOSS/num, 5)}
def train():
opt._parse()
vis_tool = Visualizer(env=opt.env)
print("init vis_tool")
print('load data')
train_dataset = Dataset(opt.rootpath, mode="train/")
val_dataset = Dataset(opt.rootpath, mode="val/")
trainer = Gtrainer(opt, image_size=opt.image_size)
# if opt.load_G:
# trainer.load_G(opt.load_G)
# print('model construct completed')
best_map = 0.0
for epoch in range(opt.epoch):
trainer.train()
train_dataloader = data_.DataLoader(train_dataset,
batch_size=opt.train_batch_size,
num_workers=opt.num_workers,
shuffle=True)
val_dataloader = data_.DataLoader(val_dataset,
batch_size=opt.test_batch_size,
num_workers=opt.num_workers,
shuffle=False)
# test_model(test_dataloader, trainer, epoch, ifsave=True, test_num=opt.test_num)
for ii, (img, oriseg) in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
trainer.train_onebatch(img, oriseg)
if ii % 50 == 0:
trainer.eval()
outputimg, loss = trainer(img, oriseg, vis=True)
vis_tool.plot("loss", loss)
input = img[0][0].numpy()
input = (input*255).astype(np.uint8)
vis_tool.img("input", input)
label = oriseg[0].numpy()
label = (label*255).astype(np.uint8)
vis_tool.img("label", label)
trainer.train()
ifsave=False
if (epoch+1)%1 == 0:
ifsave=True
eval_result = test_model(val_dataloader, trainer, epoch, ifsave=ifsave, test_num=opt.test_num)
print('eval_loss: ', eval_result)
best_map = eval_result["SNR"]
best_path = trainer.save_G(best_map=best_map)
print("save to %s !" % best_path)
if __name__ == '__main__':
train()
| UTF-8 | Python | false | false | 3,311 | py | 10 | train_G.py | 10 | 0.529749 | 0.514648 | 0 | 91 | 35.373626 | 102 |
iamaaditya/advent_of_code | 16,913,581,246,716 | 707e2a32e8955879ddac30f4e417bd505785c85e | 18b7e4d3833b61377ac7219778a73d32353f4a7a | /day14.py | 6f94f91edd7888738dae5f72a627a4ce3c0407a9 | [] | no_license | https://github.com/iamaaditya/advent_of_code | 3fb7bcdd25d15456709876ef0257793221432ba8 | 9f77ea45d95c0921b69178b5592e235dbcf5db50 | refs/heads/master | 2021-01-10T10:16:16.134709 | 2015-12-21T06:54:04 | 2015-12-21T06:54:04 | 47,854,069 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
from itertools import repeat
input = open('./input_data/input14.txt').read().splitlines()
regex = r'(\w+) can fly (\d+) km/s for (\d+) seconds, but then must rest for (\d+) seconds.'
data = [(name, int(speed),int(fly),int(sleep)) for ip in input for name,speed,fly,sleep in re.findall(regex, ip)]
distance = list(repeat(0, len(data)))
points = list(repeat(0, len(data)))
deer_fly = [ik[2] for ik in data]
deer_sleep = [ik[3] for ik in data]
time_ = 2503
for t in xrange(1,time_+1):
for index, r in enumerate(data):
if deer_fly[index] :
distance[index] += r[1]
deer_fly[index] -= 1
elif deer_sleep[index] :
deer_sleep[index] -= 1
else:
deer_fly[index] = r[2] -1
deer_sleep[index] = r[3]
distance[index] += r[1]
md = max(distance)
for i,d in enumerate(distance):
if d == max(distance):
points[i] += 1
print max(distance)
print max( points )
| UTF-8 | Python | false | false | 998 | py | 21 | day14.py | 21 | 0.568136 | 0.548096 | 0 | 38 | 25.184211 | 113 |
maayan20-meet/meet2018y1lab5 | 283,467,866,611 | d55babf4ce00cb728bcb0a3b4548ef16cd2a7611 | a3f381360745d146ca9a98c480b7d00db0d263a3 | /fruit_sorter.py | 1426cf10650bca239b3db2d9e868e7a57c2e4a4c | [
"MIT"
] | permissive | https://github.com/maayan20-meet/meet2018y1lab5 | f6bd44f7606fca1ca2aa7cedb8d3737af5521130 | 18e9d8c9947c9d568f61f32545ba4b246a36ad3f | refs/heads/master | 2020-03-22T13:26:08.852159 | 2018-08-15T13:59:45 | 2018-08-15T13:59:45 | 140,107,870 | 0 | 0 | null | true | 2018-07-07T18:21:53 | 2018-07-07T18:21:53 | 2018-06-23T22:48:38 | 2018-06-23T22:48:37 | 1 | 0 | 0 | 0 | null | false | null | u_fruit = input('What fruit am I sorting')
if u_fruit == apples or u_fruit == Apples:
print('Bin 1 (Apples)')
elif u_fruit == oranges or u_fruit == Oranges:
print('Bin 2 (Oranges)')
elif u_fruit == olives or u_fruit == Olives:
print('Bin 3 (Olives)')
else:
print('dont know that fruit')
| UTF-8 | Python | false | false | 307 | py | 26 | fruit_sorter.py | 26 | 0.625407 | 0.615635 | 0 | 13 | 22.615385 | 46 |
vicky12348/avil1245 | 7,232,724,960,317 | f3e7970cd85399bcaf51b3f2768591c4f5c85ff2 | bb07f2f846e96b3a354466f061798139289465fc | /MOv.py | bb9f42727b8ee290a719217c9063548298eaede0 | [] | no_license | https://github.com/vicky12348/avil1245 | b62feb3753dd445875d88147bd0eddcb8ea12bfe | a80d8a60e9ec5905b95256c2ba3ea496af1a0ffa | refs/heads/master | 2022-12-17T22:38:19.186375 | 2020-08-30T06:13:04 | 2020-08-30T06:13:04 | 291,409,008 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class parent():
def fees(self):
print("fees paying")
class child(parent):
a="movies"
def fees(self):
print("movies")
c=child()
c.fees()
print("this is a variable",c.a)
| UTF-8 | Python | false | false | 201 | py | 10 | MOv.py | 9 | 0.572139 | 0.572139 | 0 | 10 | 18.6 | 31 |
excess30/CompetitiveProgramming | 5,497,558,147,766 | 83de3a611f7e48c28524cb557ecd58131d07a9fb | 7d34183fc38360631bd67def135e48be43f0a2f3 | /possiblePaths.py | a01125e521f3fe6d7bbe3da47c4bc5a07a91ba47 | [] | no_license | https://github.com/excess30/CompetitiveProgramming | 4703ff49d6bb5cecd052a3c18bb70c72eb712e25 | 30dec16e5434fd024919572bba570b168608a62f | refs/heads/master | 2020-11-24T01:33:36.549023 | 2019-12-13T19:09:47 | 2019-12-13T19:09:47 | 227,905,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # function to return count of possible paths
# to reach cell at row number m and column
# number n from the topmost leftmost
# cell (cell at 1, 1)
def numberOfPaths(m, n):
# If either given row number is first
# or given column number is first
if(m == 1 or n == 1):
return 1
# If diagonal movements are allowed
# then the last addition
# is required.
return numberOfPaths(m-1, n) + numberOfPaths(m, n-1)
#Main Function
m = 3
n = 3
print(numberOfPaths(m, n)) | UTF-8 | Python | false | false | 485 | py | 7 | possiblePaths.py | 7 | 0.678351 | 0.659794 | 0 | 20 | 23.3 | 56 |
atulvidyarthi29/BlueTech | 6,528,350,319,203 | 4f3686b58bfa376548f0982cae5387ef57feb783 | 021a37a1b948d017975566613219cd287d713038 | /BlueTech/Users/views.py | a554f7d5dd825853ba0d8acbd0d6be2b739d7884 | [] | no_license | https://github.com/atulvidyarthi29/BlueTech | 23efd89ae5a68e8dfa5d27635809fc22a82d4838 | a837a3c851b6f80ece4faac3e2d47b2806364f75 | refs/heads/master | 2022-12-10T11:00:46.118588 | 2020-10-27T20:02:55 | 2020-10-27T20:02:55 | 209,316,140 | 3 | 2 | null | false | 2022-12-08T11:42:45 | 2019-09-18T13:30:46 | 2022-10-26T12:11:56 | 2022-12-08T11:42:44 | 37,794 | 1 | 2 | 7 | JavaScript | false | false | from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.views.decorators.csrf import csrf_exempt
from . import paytm_checksum
from . import payment_config
from .content import team_content, index_content
from .forms import UserRegistrationForm, ProductKeyForm, ProfileEditForm, PaymentForm
from .models import License, Employee
from .models import User
def buy_erp(request):
if request.method == 'POST':
payment_form = PaymentForm(request.POST)
if payment_form.is_valid():
payment_form = payment_form.save(commit=False)
payment_form.validated = False
payment_form.save()
absolute_url = request.build_absolute_uri()[0:-1]
CALLBACK_URL = absolute_url[0:absolute_url.rindex('/')] + '/handle_request/'
print(CALLBACK_URL)
param_dict = {
'MID': payment_config.MID,
'ORDER_ID': str(payment_form.licence),
'TXN_AMOUNT': payment_config.TXN_AMOUNT,
'CUST_ID': payment_form.email,
'INDUSTRY_TYPE_ID': payment_config.INDUSTRY_TYPE_ID,
'WEBSITE': payment_config.WEBSITE,
'CHANNEL_ID': payment_config.CHANNEL_ID,
'CALLBACK_URL': CALLBACK_URL,
}
print(param_dict)
param_dict['CHECKSUMHASH'] = paytm_checksum.generateSignature(param_dict, payment_config.MERCHANT_KEY)
return render(request, 'users/payment_processing.html', {'param_dict': param_dict})
payment_form = PaymentForm()
return render(request, "users/checkout_page.html", {'payment_form': payment_form})
@csrf_exempt
def handle_request(request):
form = request.POST
response = {}
for key in form.keys():
response[key] = form[key]
check = response['CHECKSUMHASH']
verify = paytm_checksum.verifySignature(response, payment_config.MERCHANT_KEY, check)
print(response)
if verify:
if response['RESPCODE'] == '01':
license_object = License.objects.get(licence=response['ORDERID'])
send_email(request, license_object)
return HttpResponse('Thank you for the payment. Please check your email for further instructions.')
else:
return HttpResponse("Something went wrong.")
else:
return HttpResponse("Checksum Verification failed")
def send_email(request, license_object):
tup = str(license_object.email) + ' ' + 'CEO' + ' ' + str(license_object.licence)
current_site = get_current_site(request)
mail_subject = 'Join using this link!'
message = render_to_string('hr/recruitment_email.html', {
'unique_code': license_object.licence,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(tup)),
'token': str(tup),
})
email = EmailMessage(
mail_subject, message, to=[license_object.email]
)
email.send()
def home(request):
if not request.user.is_anonymous:
return redirect('users:dashboard')
try:
license_obj = License.objects.first()
validated = license_obj.validated
except:
validated = False
return render(request, 'home/homepage.html',
context={'validated': validated, 'true': True, 'content': index_content})
def team(request):
if not request.user.is_anonymous:
return redirect('users:dashboard')
try:
license_obj = License.objects.first()
validated = license_obj.validated
except:
validated = False
return render(request, 'home/team.html', context={'content': team_content, 'validated': validated})
def terms_of_service(request):
if not request.user.is_anonymous:
return redirect('users:dashboard')
try:
license_obj = License.objects.first()
validated = license_obj.validated
except:
validated = False
return render(request, 'home/terms_of_service.html', context={'validated': validated, })
def privacy_policy(request):
if not request.user.is_anonymous:
return redirect('users:dashboard')
try:
license_obj = License.objects.first()
validated = license_obj.validated
except:
validated = False
return render(request, 'home/privacy_policy.html', context={'validated': validated, })
def disclaimer(request):
if not request.user.is_anonymous:
return redirect('users:dashboard')
try:
license_obj = License.objects.first()
validated = license_obj.validated
except:
validated = False
return render(request, 'home/disclaimer.html', context={'validated': validated, })
def add_user(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64).decode())
tup = uid.split()
print(tup)
if tup[1] == 'CEO':
element = License.objects.get(licence=tup[2])
element.validated = True
element.save()
except(TypeError, ValueError, OverflowError):
return HttpResponse('Could not verify you!')
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
user_form2 = user_form.save(commit=False)
user_form2.is_active = True
user_form2.save()
user = authenticate(username=user_form.cleaned_data['username'],
password=user_form.cleaned_data['password1'])
login(request, user)
return redirect('users:post_login', tup[1])
return render(request, 'users/register.html',
{'user_form': user_form, 'errors': "Unauthorized"})
else:
p = User()
p.email = tup[0]
user_form = UserRegistrationForm(instance=p)
key_form = ProductKeyForm()
return render(request, 'users/register.html', {'user_form': user_form, 'key_form': key_form, 'dept': uid[1]})
@login_required
def to_post_login(request):
return redirect('users:post_login', '-')
@login_required
def post_login(request, dept):
try:
is_profile_complete = request.user.employee.is_complete
except:
is_profile_complete = False
if is_profile_complete:
return redirect('users:dashboard')
return redirect('users:profile', dept)
@login_required
def profile(request, dept):
if request.method == 'POST':
try:
profile_edit_form = ProfileEditForm(request.POST, request.FILES,
instance=Employee.objects.get(user=request.user))
except:
profile_edit_form = ProfileEditForm(request.POST, request.FILES)
user_form = UserRegistrationForm(request.POST, instance=request.user)
if profile_edit_form.is_valid():
form_object = profile_edit_form.save(commit=False)
form_object.dept = dept
form_object.user = request.user
if dept == 'CEO':
form_object.reporting_to = None
if form_object.dept and profile_edit_form.cleaned_data['gender'] and \
profile_edit_form.cleaned_data['date'] and profile_edit_form.cleaned_data['date_of_joining'] and \
profile_edit_form.cleaned_data['phone_no']:
form_object.is_complete = True
else:
form_object.is_complete = False
form_object.is_verified = False
form_object.save()
return redirect('users:dashboard')
print(profile_edit_form.errors)
return render(request, 'users/profile.html',
{'profile_edit_form': profile_edit_form, 'user_form': user_form,
'errors': profile_edit_form.errors, 'deptat': dept, })
else:
profile_edit_form = ProfileEditForm()
user_form = UserRegistrationForm(instance=request.user)
return render(request, 'users/profile.html',
{'profile_edit_form': profile_edit_form, 'user_form': user_form, 'dept': dept})
@login_required
def dashboard(request):
try:
department = request.user.employee.dept
if department == 'CEO':
return redirect('users:ceo_dashboard')
elif department == 'HR':
return redirect('users:hr:hr_dashboard')
elif department == 'ACCOUNTS':
return redirect('finance:finance_home')
elif department == 'SALES':
return redirect('sales:sales_dashboard')
except:
return render(request, 'home/404.html')
@login_required
def ceo_dashboard(request):
department = request.user.employee.dept
return render(request, 'home/dashboard.html',
context={'department': department, 'user': request.user})
@login_required
def update_profile(request, pk):
employee = get_object_or_404(Employee, id=pk)
profile_update_form = ProfileEditForm(request.POST, request.FILES, instance=employee)
if request.method == 'POST':
if profile_update_form.is_valid():
profile_update_form.save()
return redirect(request.META.get('HTTP_REFERER'))
context = {
'profile_update_form': profile_update_form,
'department': request.user.employee.dept,
'profile': employee,
'user': request.user,
}
return render(request, 'users/profile_update.html', context)
# def boot_start(request):
# if request.method == 'GET':
# get_response = [request.GET.get('payment_id'), request.GET.get('status')]
# print(get_response)
# if len(get_response) == 2 and get_response[1] == 'success':
# un = uuid.uuid4()
# License.objects.create(licence=un, validated=False)
# email_form = EmailForm()
# return render(request, 'users/ceo_email_info.html', context={'email_form': email_form, 'un': str(un)})
# else:
# return render(request, 'home/404.html')
# else:
# return HttpResponse("Something went wrong!")
#
#
# def send_ceo_method(request, un):
# if request.method == 'POST':
# email_form = EmailForm(request.POST)
# if email_form.is_valid():
# to_email = email_form.cleaned_data['email']
# tup = str(to_email) + ' ' + 'CEO' + ' ' + str(un)
# current_site = get_current_site(request)
# mail_subject = 'Join using this link!'
# message = render_to_string('hr/recruitment_email.html', {
# 'unique_code': un,
# 'domain': current_site.domain,
# 'uid': urlsafe_base64_encode(force_bytes(tup)),
# 'token': str(tup),
# })
# email = EmailMessage(
# mail_subject, message, to=[to_email]
# )
# email.send()
# return HttpResponse('Thank you for the payment. Please check your email for further instructions.')
# redirect(request.META.get('HTTP_REFERER'))
# else:
# redirect(request.META.get('HTTP_REFERER'))
# def register(request):
# if request.method == 'POST':
# user_form = UserRegistrationForm(request.POST)
# key_form = ProductKeyForm(request.POST)
# if key_form.is_valid() and user_form.is_valid():
# pd_key = key_form.cleaned_data['product_key']
# lic_obj = License.objects.first()
# if lic_obj.licence == pd_key:
# lic_obj.validated = True
# lic_obj.save()
# user_form2 = user_form.save(commit=False)
# user_form2.is_active = False
# user_form2.save()
# current_site = get_current_site(request)
# mail_subject = 'Please, verify your Email!'
# message = render_to_string('users/activate_email.html', {
# 'user': user_form2,
# 'domain': current_site.domain,
# 'uid': urlsafe_base64_encode(force_bytes(user_form2.pk)).decode(),
# 'token': account_activation_token.make_token(user_form2),
# })
# to_email = user_form.cleaned_data.get('email')
# email = EmailMessage(
# mail_subject, message, to=[to_email]
# )
# email.send()
# return HttpResponse('Please confirm your email address to complete the registration')
# return render(request, 'users/register.html',
# {'user_form': user_form, 'key_form': key_form, 'errors': "Unauthorized"})
# else:
# user_form = UserRegistrationForm()
# key_form = ProductKeyForm()
# return render(request, 'users/register.html', {'user_form': user_form, 'key_form': key_form})
#
#
# def activate(request, uidb64, token):
# try:
# print(uidb64)
# uid = force_text(urlsafe_base64_decode(uidb64).decode())
# print(uid)
# user = User.objects.get(pk=uid)
# except(TypeError, ValueError, OverflowError, User.DoesNotExist):
# user = None
# if user is not None and account_activation_token.check_token(user, token):
# user.is_active = True
# user.save()
# login(request, user)
# return redirect('users:post_login', 'CEO')
# else:
# return HttpResponse('Activation link is invalid!')
| UTF-8 | Python | false | false | 14,218 | py | 93 | views.py | 32 | 0.589323 | 0.585174 | 0 | 352 | 38.392045 | 118 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.