repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
swftszn/lostandfounddatabase | 16,853,451,710,491 | 1eca108ee82f564856d4de3b9e7c07d3d4bb37a0 | fcff6eb8040679663b887907f1ed5317f9416a1e | /events/models.py | f8d7db547404e0b4a8d638cd94cfbcb772e590fa | []
| no_license | https://github.com/swftszn/lostandfounddatabase | 1660211228d5dfa36a42b40ce9d98ca4e4913b74 | 8d4d5c4e1beb184220b77636a15e613a9dae85a3 | refs/heads/master | 2023-09-04T17:23:57.999388 | 2021-11-18T04:46:10 | 2021-11-18T04:46:10 | 429,298,289 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class Event(models.Model):
date = models.DateField()
title = models.CharField(max_length=200)
details = models.TextField()
| UTF-8 | Python | false | false | 197 | py | 5 | models.py | 2 | 0.700508 | 0.685279 | 0 | 8 | 23.125 | 44 |
ascetic-monk/Lane-Detection-with-ERFNet | 12,352,325,988,965 | d94a5dfddf65ea2dedffa5ed8ae912a7244356e0 | 9562193956ef6c08e463f5e880152bd6e2d5d16e | /datasets/__init__.py | 07ae4c364239da4de1d24df165e44ba9c3c981e7 | [
"Apache-2.0"
]
| permissive | https://github.com/ascetic-monk/Lane-Detection-with-ERFNet | 0dca99c7cd8b4b255d04c4f0b2663cbf34b81d24 | 7ec860502e725b2420ea27973308c5b44bd69ccc | refs/heads/main | 2023-02-03T07:14:12.576657 | 2020-12-23T07:16:41 | 2020-12-23T07:16:41 | 325,172,973 | 1 | 0 | Apache-2.0 | true | 2020-12-29T03:08:15 | 2020-12-29T03:08:14 | 2020-12-28T09:13:52 | 2020-12-23T07:16:41 | 26,081 | 0 | 0 | 0 | null | false | false | from .lane_det import LaneDataSet | UTF-8 | Python | false | false | 33 | py | 12 | __init__.py | 10 | 0.848485 | 0.848485 | 0 | 1 | 33 | 33 |
dcollins4096/p19_newscripts | 16,913,581,232,713 | 13bd907dc7e92d267d0cac28688c4599e8fb2f7b | 04a643a77927bc56ab58c7df91d4733321e61e51 | /p66_brho/explore/blos_ytvscyl.py | 2e7debd3cac3523b400d3f88098788fa072347d1 | []
| no_license | https://github.com/dcollins4096/p19_newscripts | d2fae1807170a4d70cf4c87222a6258211f993ff | 23c780dd15b60944ed354406706de85282d0bee6 | refs/heads/master | 2023-07-21T11:53:55.188383 | 2023-07-18T17:38:21 | 2023-07-18T17:38:21 | 215,159,839 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
synthetic observations; version yt
print(ds.field_info['gas', 'magnetic_field_los'].get_source())
def _los_field(field, data):
if data.has_field_parameter(f"bulk_{basename}"):
fns = [(fc[0], f"relative_{fc[1]}") for fc in field_comps]
else:
fns = field_comps
ax = data.get_field_parameter("axis")
if is_sequence(ax):
# Make sure this is a unit vector
ax /= np.sqrt(np.dot(ax, ax))
ret = data[fns[0]] * ax[0] + data[fns[1]] * ax[1] + data[fns[2]] * ax[2]
elif ax in [0, 1, 2]:
ret = data[fns[ax]]
else:
raise NeedsParameter(["axis"])
return ret
'''
from cmath import log
from re import X
from turtle import width
from starter2 import *
import annotate_particles_4_cpy
reload(annotate_particles_4_cpy)
import multiplots
reload(multiplots)
import davetools
reload(davetools)
from scipy.ndimage import gaussian_filter
from scipy.signal import savgol_filter
from matplotlib.ticker import PercentFormatter
np.set_printoptions(threshold=sys.maxsize)
from icecream import ic
# --- --- --- --- --- --- ---
class telescope():
def __init__(self,the_loop):
self.this_looper = the_loop
self.cores_used = []
def qtyRun(self,sim,core_list=None):
print('inside qtyRun')
thtr = self.this_looper.tr
# CORE_LIST
all_cores = np.unique(thtr.core_ids)
if core_list is None:
core_list = all_cores
# FIELDS STORED
self.synthRho = [np.zeros(len(core_list)) for x in range(3)]
self.synthRho_mid = [np.zeros(len(core_list)) for x in range(3)]
self.synthField = [np.zeros(len(core_list)) for x in range(3)]
self.synthField_mid = [np.zeros(len(core_list)) for x in range(3)]
self.synthField_yt_i = [np.zeros(len(core_list)) for x in range(3)]
self.synthField_yt_ii = [np.zeros(len(core_list)) for x in range(3)]
self.synthField_midyt_i = [np.zeros(len(core_list)) for x in range(3)]
self.synthField_midyt_ii = [np.zeros(len(core_list)) for x in range(3)]
# THE FINAL FRAME
the_frame = thtr.frames[-1:]
# CORE-LOOP
for nc,core_id in enumerate(core_list):
ds = self.this_looper.load(the_frame[0])
ms = trackage.mini_scrubber(thtr,core_id,do_velocity=True)
ms.particle_pos(core_id)
self.ms = ms
# PIECES FOR THE OBJECTS
the_center = ms.mean_center[:,-1]
the_normal = [[1,0,0],[0,1,0],[0,0,1]]
the_radius = 1/128
the_area= np.pi * (the_radius**2)
# MAKE THE OBJECTS:
xyz = [0,1,2]
the_cyl = {}
the_mid_cyl = {}
for i in range(3):
the_cyl[xyz[i]] = ds.disk(the_center,the_normal[i],the_radius,height=(1,'code_length'))
the_mid_cyl[xyz[i]] = ds.disk(the_center,the_normal[i],the_radius,height=the_radius)
the_cyl[xyz[i]].set_field_parameter('axis',i)
the_mid_cyl[xyz[i]].set_field_parameter('axis',i)
# THE DENSITY & FIELD, ZONE METHOD & YT BLOS: 2D
B = ['magnetic_field_x','magnetic_field_y','magnetic_field_z']
for j in range(3):
self.synthRho[j][nc] = (the_cyl[j]['density'] * the_cyl[j]['cell_volume']).sum()/the_area
self.synthRho_mid[j][nc] = (the_mid_cyl[j]['density'] * the_mid_cyl[j]['cell_volume']).sum()/the_area
self.synthField[j][nc] = (the_cyl[j]['density'] * the_cyl[j][B[j]] * the_cyl[j]['cell_volume']).sum()/the_cyl[j]['gas','cell_mass'].sum()
self.synthField_yt_i[j][nc] = (the_cyl[j]['magnetic_field_los'] * the_cyl[j]['cell_volume']).sum()
self.synthField_yt_ii[j][nc] = (the_cyl[j]['density'] * the_cyl[j]['magnetic_field_los'] * the_cyl[j]['cell_volume']).sum()/the_cyl[j]['gas','cell_mass'].sum()
self.synthField_mid[j][nc] = (the_mid_cyl[j]['density'] * the_mid_cyl[j][B[j]] * the_mid_cyl[j]['cell_volume']).sum()/the_mid_cyl[j]['gas','cell_mass'].sum()
self.synthField_midyt_i[j][nc] = (the_mid_cyl[j]['magnetic_field_los'] * the_mid_cyl[j]['cell_volume']).sum()
self.synthField_midyt_ii[j][nc] = (the_mid_cyl[j]['density'] * the_mid_cyl[j]['magnetic_field_los'] * the_mid_cyl[j]['cell_volume']).sum()/the_mid_cyl[j]['gas','cell_mass'].sum()
# CACHE TO DISK
if 0:
hfivename = 'p66_brho/h5files/blos_ytvscyls_%s.h5'%(sim)
Fptr = h5py.File(hfivename,'w')
Fptr['synthrho'] = self.synthRho
Fptr['synthrhomid'] = self.synthRho_mid
Fptr['synthblos'] = self.synthField
Fptr['synthblosmid'] = self.synthField_mid
Fptr['synthblos_yt_i'] = self.synthField_yt_i
Fptr['synthblos_yt_ii'] = self.synthField_yt_ii
Fptr['synthblosmid_yt_i'] = self.synthField_midyt_i
Fptr['synthblosmid_yt_ii'] = self.synthField_midyt_ii
Fptr.close
# MAIN
import three_loopers_six as TL6
if 'clobber' not in dir():
clobber=True
if 'scope1' not in dir() or clobber:
scope1=telescope(TL6.loops['u601'])
if 'scope2' not in dir() or clobber:
scope2=telescope(TL6.loops['u602'])
if 'scope3' not in dir() or clobber:
scope3=telescope(TL6.loops['u603'])
simnames = ['u601','u602', 'u603']
for nt,tool in enumerate([scope1,scope2,scope3]):
# WHICH CORES & RUN IF NOT CACHED
if 0:
all_cores = np.unique(tool.this_looper.tr.core_ids)
#core_list = all_cores[:2] #DEBUG
core_list = all_cores
tool.qtyRun(nt,core_list=core_list)
#ONCE CACHED, PLOTS
if 1:
hfivename = 'p66_brho/h5files/blos_ytvscyls_%s.h5'%(nt)
Fptr = h5py.File(hfivename,'r')
synth_rho = Fptr['synthrho'][()]
synth_rhomid = Fptr['synthrhomid'][()]
synth_blos = Fptr['synthblos'][()]
synth_blosmid = Fptr['synthblosmid'][()]
synth_blosyt_one = Fptr['synthblos_yt_i'][()]
synth_blosyt_two = Fptr['synthblos_yt_ii'][()]
synth_blosmidyt_one = Fptr['synthblosmid_yt_i'][()]
synth_blosmidyt_two = Fptr['synthblosmid_yt_ii'][()]
dxyz = ['x','y','z']
color=['b','g','orange']
for i in range(3):
if 1:
plt.scatter(synth_blosmid[i],synth_blosmidyt_one[i],c=color[i],alpha=0.7)
plt.axline((0, 0), slope=1, c='k', linewidth=0.8)
#plt.xscale('log')
#plt.yscale('log')
plt.xlabel(r'$B_{los:x,y,z}$ SHORT')
plt.ylabel(r'$B_{los:x,y,z}$ SHORT, YTM1')
#plt.ylabel(r'$B_{los:x,y,z}$ LONG, YTrho')
#outname ='Bpol_vs_Btol_longlog_%d_%s'%(i,simnames[nt])
outname ='Blos_ytm1_vs_us_short_%s'%(simnames[nt])
# SAVE ONE AXIS AT A TIME
if 0:
plt.savefig(outname)
print('plotted_%d'%i)
plt.close('all')
# SAVING ALL AXES AT ONCE
if 1:
plt.savefig(outname)
print('plotted_%d'%i)
plt.close('all')
# AND CLOSE H5 FILE
Fptr.close()
| UTF-8 | Python | false | false | 7,288 | py | 530 | blos_ytvscyl.py | 500 | 0.55365 | 0.539243 | 0 | 192 | 36.947917 | 194 |
xandrasings/UstaCertificationManager | 11,931,419,155,800 | 721598170ced6387fbbd4d2ddc44937c15a88775 | 8957f9cb874c85c3e380e3cdd34babf1cea3c28c | /Imports/Modules/displayOfficial.py | d0d51316c3d6c12ee216afd524e023c1f3fb7682 | []
| no_license | https://github.com/xandrasings/UstaCertificationManager | 362c2e4044c4fda85290c48b8ac86102908d86d6 | 3eb1da3bdc0f608c21ebb2c3c986f75aea7361b6 | refs/heads/master | 2021-09-05T00:31:19.753542 | 2018-01-23T04:19:28 | 2018-01-23T04:19:28 | 108,066,678 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ..Utilities.input import *
from ..Utilities.output import *
def displayOfficial(officials, disciplines, requirements, achievements, certifications):
output('displayOfficial')
promptContinue() | UTF-8 | Python | false | false | 200 | py | 31 | displayOfficial.py | 30 | 0.8 | 0.8 | 0 | 7 | 27.714286 | 88 |
astroknots/database | 10,703,058,547,843 | 072f813a4a84a6b55aa76dcc0db81c8185a78701 | 4336696c123f3d82b50d8c13e367cba429401c08 | /emptyApertureAnalysis_share/math_fits.py | 20089db4262909591f0628532570ab87172b67d0 | []
| no_license | https://github.com/astroknots/database | 6713015f6a29ab29f6104b5f182f6abb49d99789 | bd802b7e70cf2b3b7c0b1a3e9675fe1fdd178e14 | refs/heads/master | 2021-07-17T12:21:22.857666 | 2020-11-12T00:50:30 | 2020-11-12T00:50:30 | 227,710,177 | 0 | 0 | null | false | 2020-06-14T00:15:22 | 2019-12-12T22:45:03 | 2020-06-13T22:05:34 | 2020-06-14T00:15:22 | 3 | 0 | 0 | 0 | Jupyter Notebook | false | false | import numpy as N
import math_models as mm
from scipy.optimize import leastsq
import scipy.stats as stats
import bootstrap as bs
from statsmodels.formula.api import rlm
def robust_linefit(x, y):
"""Calculates and returns linear fit to given (x,y) pairs using a robust linear fit routine.
Return order is slope, standard deviation of slope, intercept and standard deviation of intercept."""
regression = rlm("y ~ x", data=dict(y=y, x=x)).fit()
slope = regression.params[1]; yint = regression.params[0] # slope and intercept fit values
cov = regression.bcov_scaled.x[0]
sig_slope = regression.bse[1]; sig_yint = regression.bse[0]/N.sqrt(regression.nobs)*.5
# normalized standard deviation errors of the central values for the given sample.
s_yint = N.sqrt(N.sum(regression.resid**2)/regression.nobs)*.6745#/1.105
# s_yint and the 1.105 is an empirical value taken from robust_linefit.pro to match IDL's routine
# for the standard deviations of the given sample.
return slope, sig_slope, yint, sig_yint, s_yint, cov
def linearfit(x, y, runs=1e4):
"""Calculates and returns linear fit to given (x,y) pairs.
Return order is slope, standard deviation of slope, intercept and standard deviation of intercept."""
x *= N.ones(1); y *= N.ones(1)
slope, yint, r, prob2, see = stats.linregress(x, y)
scale = N.mean(y) - N.mean(x)
nsig = Smad(N.log10(10**y/(10**x*10**scale)))
bs_slope = N.zeros(runs); bs_yint = N.zeros(runs)
for i in range(bs_slope.size):
bsrun = bs.bootstrap_resample(N.arange(x.size))
bsuse = N.where((10**y[bsrun]/(10**x[bsrun]*10**scale)>=10**(-1.*nsig)) & (10**y[bsrun]/(10**x[bsrun]*10**scale)<=10.**nsig))
bs_slope[i], bs_yint[i] = stats.linregress(x[bsuse], y[bsuse])[:2]
sd_slope = bs_slope.std(); sd_yint = N.sqrt(sd_slope**2/bs_yint.size*N.sum(bs_yint**2))
return slope, sd_slope, yint, sd_yint
def linearfit_s1(x, y):
"""Calculates and returns linear fit to given (x,y) pairs for slope equals 1.
Return order is slope, standard deviation of slope, intercept and standard deviation of intercept.
Uses robust_linefit to calculate linear parameters."""
x *= N.ones(1); y *= N.ones(1)
mx = N.mean(x); my = N.mean(y)
slope = 1.; yint = my-mx
rl_slope, sd_slope, rl_yint, sd_yint, s_yint, cov = robust_linefit(x, y)
return slope, sd_slope, yint, sd_yint, s_yint, cov
def Smad(x):
"""Computes the median absolute deviation (see Beers et al. 1990) from a sample median."""
x *= N.ones(1)
MAD = N.median(N.abs( x - N.median(x) ))
Smad = MAD/0.6745
return Smad
def gaussfit(x, y, peak=1., center=0., std=.1):
"""Calculates and returns a gaussian fit to given (x,y) pairs with intial guesses.
Default initial guesses are used unless supplied by user."""
def res(p, y, x):
top1, m1, std1 = p
y_fit = mm.gauss(x, top1, m1, std1)
err = y - y_fit
return err
p = [peak, center, std] # Initial guesses for leastsq
plsq = leastsq(res, p, args = (y, x), maxfev=2000)
return plsq[0]
def gaussfit2(x, y, peak=1., center=0., std=.1):
"""Calculates and returns a gaussian fit to given (x,y) pairs with intial guesses.
Default initial guesses are used unless supplied by user."""
def res(p, y, x):
top1, top2, m1, m2, std1, std2 = p
y_fit = mm.gauss(x, top1, m1, std1) + mm.gauss(x, top2, m2, std2)
err = y - y_fit
return err
p = [peak[0], peak[1], center[0], center[1], std[0], std[1]] # Initial guesses for leastsq
plsq = leastsq(res, p, args = (y, x), maxfev=2000)
return plsq[0]
def gaussfit3(x, y, peak=1., center=0., std=.1):
"""Calculates and returns a gaussian fit to given (x,y) pairs with intial guesses.
Default initial guesses are used unless supplied by user."""
def res(p, y, x):
top1, top2, top3, m1, m2, m3, std1, std2, std3 = p
y_fit = mm.gauss(x, top1, m1, std1) + mm.gauss(x, top2, m2, std2) + mm.gauss(x, top3, m3, std3)
err = y - y_fit
return err
p = [peak[0], peak[1], peak[2], center[0], center[1], center[2], std[0], std[1], std[2]] # Initial guesses for leastsq
plsq = leastsq(res, p, args = (y, x), maxfev=2000)
return plsq[0]
def lorentzfit(x, y, peak=1., center=0., std=.1):
"""Calculates and returns a lorentzian fit to given (x,y) pairs with intial guesses.
Default initial guesses are used unless supplied by user."""
def res(p, y, x):
top1, m1, std1 = p
y_fit = mm.lorentz(x, top1, m1, std1)
err = y - y_fit
return err
p = [peak, center, std] # Initial guesses for leastsq
plsq = leastsq(res, p, args = (y, x), maxfev=2000)
return plsq[0]
| UTF-8 | Python | false | false | 4,912 | py | 887 | math_fits.py | 14 | 0.614414 | 0.582248 | 0 | 109 | 43.06422 | 133 |
derekbekoe/azure-cli-interactive-shell | 19,447,611,935,585 | fb3966bd03de951551146ca0c5dff27e1bc84bfa | c878aec599a3812993f5eb047ac13403313e71e7 | /setup.py | a59adc636573cb80a7d78f26ced1188b2cb9fd2f | []
| no_license | https://github.com/derekbekoe/azure-cli-interactive-shell | c48b3d15c73a6ed8f2b3e2eb70e744a0105747f0 | 40b32c9d8e3e562d2a19e6a523df40e813f60b70 | refs/heads/master | 2021-01-22T21:27:51.471301 | 2017-03-17T23:47:13 | 2017-03-17T23:47:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
DEPENDENCIES = [
'azure-cli',
'prompt_toolkit',
'six',
'pyyaml',
'pytest',
'jmespath',
]
setup(
name='az-cli-shell',
version='0.1.1a27',
author='Microsoft Corporation',
scripts=['dev_setup.py', 'az-cli'],
packages=[
"azclishell", "test"
],
namespace_packages=[
'azclishell',
],
url='https://github.com/oakeyc/azure-cli-interactive-shell',
install_requires=DEPENDENCIES,
)
| UTF-8 | Python | false | false | 548 | py | 12 | setup.py | 9 | 0.60219 | 0.593066 | 0 | 28 | 18.571429 | 64 |
jake-orielly/sierpinskiTriangle | 5,420,248,755,369 | 7be62bf5597ec1be398e77f1901f83173f688d92 | 8904f4b16968353b50b6532422280ae5e7160b6f | /triangles.py | bd193afa7c3d88b9862d0c226e56c3461704b693 | []
| no_license | https://github.com/jake-orielly/sierpinskiTriangle | d7f84c8b3d89005aa88cf8d214d2db16cd5dc708 | d267945651f9c4b679341bf25112f754de44fc01 | refs/heads/master | 2021-01-17T18:08:16.501713 | 2016-10-17T20:43:14 | 2016-10-17T20:43:14 | 71,179,489 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
def getAverage (q0,q1,pc0,pc1):
return [((pc0+q0)/2),((pc1+q1)/2)]
def main():
w = 1000
h = 1000
canvas = makeEmptyPicture(w,h)
show(canvas)
p0 = [w / 2, 0]
p1 = [0, h - 1]
p2 = [w - 1, h - 1]
pc = [p0[0],p0[1]]
counter = 0
points = [p0,p1,p2]
while True:
n = random.randrange(3)
pc = getAverage(points[n][0],points[n][1],pc[0],pc[1])
px = getPixel(canvas,pc[0],pc[1])
setRed(px,((float(pc[0])/w)*256))
setGreen(px,(256-(float(pc[0])/w)*256))
setBlue(px,(256-(float(pc[1])/w)*256))
counter = counter + 1
if counter > 1000:
repaint(canvas)
counter = 0
return | UTF-8 | Python | false | false | 784 | py | 1 | triangles.py | 1 | 0.451531 | 0.367347 | 0 | 39 | 19.128205 | 62 |
jiteshfulwariya/gleam_n_flex_web | 9,569,187,167,699 | 42c99835890ff6618de828052e24601a3052a5d7 | 8a648f66af4f7e7c26c96fb743930c0a06cb0b5c | /helper_functions.py | 5e817973679210e6fb5a2daa1a94a12ef459ed2b | []
| no_license | https://github.com/jiteshfulwariya/gleam_n_flex_web | 3493b38c30666a02decf24e64c0a323b7e94e3da | 20dea7fbf3efb57aa1e71ebb0e7ae73d3c884133 | refs/heads/master | 2021-05-09T15:28:49.321336 | 2017-12-19T18:10:35 | 2017-12-19T18:10:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
def convert_date_to_epoch(date):
return long(date.strftime('%s'))*1000 if date else None
def convert_epoch_to_date(epoch):
return datetime.datetime.fromtimestamp(long(epoch)/1000.0) if epoch else None
| UTF-8 | Python | false | false | 229 | py | 45 | helper_functions.py | 28 | 0.742358 | 0.703057 | 0 | 9 | 24.444444 | 81 |
rbhanot4739/PythonPrograms | 6,975,026,899,481 | 535b01b72715993366adf25b5c4b97f4bad7043c | 0cac5687ddf6a7948f9d3f8a238551af8238adaf | /python3/oop/advanced_topics/property_decorators.py | 53f6c246fce61f9ea1d96bba4549e13524dd62e9 | []
| no_license | https://github.com/rbhanot4739/PythonPrograms | e1b2a1502f7886a343887cb71882bd5511b9ae24 | 84b5084d873589f948d40d47bfa7d77c27e6184d | refs/heads/master | 2021-05-09T22:55:48.500313 | 2021-01-20T12:56:39 | 2021-01-20T12:56:39 | 118,767,434 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Employee:
def __init__(self, first, last):
self.first = first
self.last = last
@property
def email(self):
return "{}.{}@email.com".format(self.first, self.last)
@property
def fullname(self):
return "{} {}".format(self.first, self.last)
@fullname.setter
def fullname(self, name):
fname, lname = name.split(' ')
self.first = fname
self.last = lname
emp1 = Employee('Sid', 'Sharma')
print(emp1.fullname)
emp1.fullname = 'Vibhu Kaul'
print(emp1.email)
| UTF-8 | Python | false | false | 543 | py | 413 | property_decorators.py | 310 | 0.589319 | 0.581952 | 0 | 24 | 21.625 | 62 |
alk3ckwd/work | 17,394,617,587,036 | 2705c48021e7328c1a208ad166d1195254deaaab | a4efdaad32bab94838cc26639a8826d1795cd3d5 | /test.py | d1b81f3f6a056be72c7d5f18fc4c9ed0ba1532cb | []
| no_license | https://github.com/alk3ckwd/work | 942c157941983ccbcbbc78152b50508ac9936683 | 68879bf28d6efc65983ee0949a0f2d3a744f9060 | refs/heads/master | 2021-01-23T06:34:06.056001 | 2017-03-27T19:10:09 | 2017-03-27T19:10:09 | 86,371,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
df = pd.DataFrame({'A':['A0', 'A1', 'A1'],
'B':['B0', 'B1', 'B2']})
print(df.groupby(['A'])['B'].apply(", ".join))
| UTF-8 | Python | false | false | 144 | py | 16 | test.py | 16 | 0.472222 | 0.430556 | 0 | 7 | 19.428571 | 46 |
dsitum/code-eval-challenges | 5,007,931,869,241 | 40a7dfc87ec3a90937924b819fe170db21979710 | 07153374a25fdd26d7db55971f7c277ba4908ea4 | /medium/NumberOfOnes.py | be10a65af1abcf019210e1fbb03e040a072a3f86 | []
| no_license | https://github.com/dsitum/code-eval-challenges | 23e294a3206a9c47727309b082e4b128d2add592 | 1ffaf044fcfd23019e2b62d1d1079ca6e15b1045 | refs/heads/master | 2021-01-18T14:58:48.205423 | 2015-02-09T01:02:08 | 2015-02-09T01:04:45 | 30,512,215 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
from sys import argv
with open(argv[1], "r") as f:
for line in f:
n = int(line.rstrip())
ones = 0
while n > 0:
ones += n % 2
n = int(n/2)
print(ones)
| UTF-8 | Python | false | false | 192 | py | 84 | NumberOfOnes.py | 82 | 0.557292 | 0.526042 | 0 | 14 | 12.714286 | 29 |
mourice-oduor/Python-projects | 14,877,766,751,600 | 7c4331e06ebbc9dd7ac8a612fb8e021b379c15b3 | 9a95b2015b978dc89e4c8f3aab5d43b1af4a16bf | /extract-audio-from-video/ffmpeg.py | 3b0e849d748fd29e11cf36835ab6e69cfa3a09e3 | []
| no_license | https://github.com/mourice-oduor/Python-projects | 0712aae1d237fb2f9e0ed900cc432e19250a29c8 | 3c6e27ac2aafe058bb8627e49064aa0f52574e0d | refs/heads/master | 2020-08-31T18:00:52.931117 | 2020-04-22T20:30:14 | 2020-04-22T20:30:14 | 218,750,487 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Download the FFmpeg-python file;
#on the terminal, run this commands:
#FFmpeg -i "name of your video file" -vn output_audio.mp3
#Extracting mp4 vid to ogg
FILE="1.mp4";
ffmpeg -i "${1.mp4}" -vn -acodec libvorbis -y "${1.mp4%.mp4}.ogg"
#In case we want to automatically process (batch process) all .MP4 video files in a folder we can use the following:
for FILE in *.mp4;
do
echo -e "Processing video '\e[32m$FILE\e[0m'";
ffmpeg -i "${FILE}" -vn -acodec libvorbis -y "${FILE%.mp4}.ogg";
done
| UTF-8 | Python | false | false | 520 | py | 33 | ffmpeg.py | 15 | 0.661538 | 0.632692 | 0 | 16 | 31.5 | 116 |
MNDSG/opencv | 1,443,109,054,434 | 5ffedbd03af190a872f92f1cfeec4b7caae94743 | a9faa1a2c3a8385b9ad873e5c05b817c3d3f300e | /不同图片质量保存.py | a33f27409e5c80e1159420db7924dccda4d63122 | []
| no_license | https://github.com/MNDSG/opencv | 33b82d7e31513a9183aec56a706b85292bcfa19c | 8f48f0d219932daada52a6d96d5aa3b662d8ae7e | refs/heads/master | 2021-05-21T14:30:01.255984 | 2020-04-07T08:49:52 | 2020-04-07T08:49:52 | 252,681,941 | 0 | 0 | null | false | 2020-04-03T09:38:11 | 2020-04-03T09:00:50 | 2020-04-03T09:26:06 | 2020-04-03T09:35:42 | 0 | 0 | 0 | 1 | Python | false | false | import cv2
img = cv2.imread("images/cat.jpg",1)
# jpg格式压缩:参数3表示压缩比(0~100),0质量最差
cv2.imwrite("images/cat3.jpg",img,[cv2.IMWRITE_JPEG_QUALITY,100])
# png格式压缩:参数3表示压缩比(0~9),o质量最好
cv2.imwrite("images/test3.png",img,[cv2.IMWRITE_PNG_COMPRESSION,0]) | UTF-8 | Python | false | false | 304 | py | 29 | 不同图片质量保存.py | 28 | 0.745902 | 0.655738 | 0 | 7 | 34 | 67 |
Sommysab/DataScience-MachineLearning-AI | 5,248,450,066,563 | 5bf0344ac9db1d6b6b331f12cde110a06bd64f7d | e7e9dfd78789db82859451c00f33a4145b9eee0a | /Python Projects/AI - Spam Detector/is_spam_or_not.py | bc574ed281c7eeb1521a5e10cce6db3bfc53af08 | []
| no_license | https://github.com/Sommysab/DataScience-MachineLearning-AI | 9c54a20e03456455747ddc2692075629c7f53a10 | e8a0dd42b19997b7ec905ae26119dba733189ba5 | refs/heads/master | 2022-12-24T16:43:51.717208 | 2020-09-25T14:07:22 | 2020-09-25T14:07:22 | 298,590,483 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
spam_df = pd.read_csv('emails.csv')
# VISUAL
ham = spam_df[ spam_df['spam'] == 0]
spam = spam_df[ spam_df['spam'] == 1]
print('Spam Percentage =', (len(spam)/len(spam_df))*100, '%')
print('Ham Percentage =', (len(ham)/len(spam_df))*100, '%')
sns.countplot(spam_df['spam'], label = 'Count Spam vs. Ham')
# TEST, TRAIN & CLEAN
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
spam_ham_countvectorizer = vectorizer.fit_transform(spam_df['text'])
label = spam_df['spam'].values
from sklearn.naive_bayes import MultinomialNB
NB_c = MultinomialNB()
NB_c.fit(spam_ham_countvectorizer, label)
# Testing a Record
# EX 1
t_s = ['Free Money!! Grab and Grab for the last time!', 'Hi Obi, Please let me know if you need any further info.']
t_s_counvectorizer = vectorizer.transform(t_s)
prediction = NB_c.predict(t_s_counvectorizer)
print(prediction)
# EX 2
t_s = ['Hello, this is Sab. Am currently in town, lets met!', 'Make money, get Viagra!, be the superman.']
t_s_countvectorizer = vectorizer.transform(t_s)
prediction = NB_c.predict(t_s_countvectorizer)
print(prediction)
# NORM
X = spam_ham_countvectorizer
y = label
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.naive_bayes import MultinomialNB
NB_c = MultinomialNB()
NB_c.fit(X_train, y_train)
from sklearn.metrics import classification_report, confusion_matrix
y_predict_train = NB_c.predict(X_train)
cm = confusion_matrix(y_train, y_predict_train)
sns.heatmap(cm, annot=True)
y_predict_test = NB_c.predict(X_test)
cm = confusion_matrix(y_test, y_predict_test)
sns.heatmap(cm, annot=True)
print(classification_report(y_test, y_predict_test)) | UTF-8 | Python | false | false | 1,856 | py | 10 | is_spam_or_not.py | 8 | 0.719289 | 0.712823 | 0 | 75 | 23.76 | 115 |
shahrozimtiaz/codingPractice | 15,315,853,400,776 | 91e80651b51b8d45b49086df1c7619783edfd996 | a03ad4bbd84238124e07dfe378a82bcbbaf18597 | /python Practice/allComboCoverage.py | 763498f79a3ed17741fe1ebefb0b54bfdc7a26db | []
| no_license | https://github.com/shahrozimtiaz/codingPractice | 3f2b8ede7663f3610d901877656b546bc6aa7d28 | 9bd1161dd4c4cc425629ff0c39f051eea74acc54 | refs/heads/master | 2020-07-13T10:14:05.958991 | 2020-03-14T18:57:39 | 2020-03-14T18:57:39 | 205,062,836 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #REQUIREMENTS
# a1,a2,a3,a4 = 'non-empty string without special characters', 'non-empty string with special characters', 'more than 1 string as input','empty string'
# b1,b2,b3 = 'clear chosen strings', 'submit', 'restart'
# c1,c2 = 'with replacement', 'without replacement'
# d1,d2 = 'clicked', 'not clicked'
#TEST CASES
a1,a2,a3,a4 = '"software testing"', '"software\\ntesting"', '"software""testing"','""'
b1,b2,b3 = 'clear chosen strings', 'submit', 'restart'
c1,c2 = 'with replacement', 'without replacement'
d1,d2 = 'click refresh', 'don\'t click refresh'
a = [a1,a2,a3,a4]
b = [b1,b2,b3]
c = [c1,c2]
d = [d1,d2]
i = 1
for ab in a:
s = '{ ' + ab
for bb in b:
s1 = ', ' + bb
s+= s1
for cb in c:
s2 = ', ' + cb
s+= s2
for db in d:
s3 = ', ' + db + ' }'
s += s3
print('TC{}:\n inputs ={}\n expected output:\n input box:\n output box:\n string chosen box:\n'.format(i, s))
i += 1
s = s[:-len(s3)]
s = s[:-len(s2)]
s = s[:-len(s1)] | UTF-8 | Python | false | false | 1,108 | py | 97 | allComboCoverage.py | 85 | 0.51444 | 0.473827 | 0 | 33 | 32.606061 | 151 |
AfifCeshter/ccp | 19,086,834,704,300 | f7ca746f8a7284013f385e42b977934097e35176 | eeb0df786140d749f178d24f78c7fe7466e9ab9b | /app/databases/models/computer.py | a8be80bbe427b52f177739f6e65d50d7af7a193d | []
| no_license | https://github.com/AfifCeshter/ccp | daa8feb690487c0d912783dce5e95a88eee4ab10 | a69404429eee6d443535c60fee32fb9bd53e0d11 | refs/heads/master | 2023-03-01T18:26:01.017852 | 2021-02-16T07:15:21 | 2021-02-16T07:15:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from app.databases.db_sql import db_sql
from app.utils.time_utils import datetime_jakarta, timestamp_jakarta
class Computer(db_sql.Model,):
id = db_sql.Column(db_sql.Integer(), primary_key=True)
computer_id = db_sql.Column(db_sql.String(40), nullable=False, unique=True)
computer_name = db_sql.Column(db_sql.String(32), nullable=False)
computer_location = db_sql.Column(db_sql.String(32), nullable=False)
computer_power_status = db_sql.Column(db_sql.Integer(), default=0, nullable=False) # 0: off, 1: on
computer_cmd = db_sql.Column(db_sql.Integer(), default=1, nullable=False) # 0: off, 1: on, 2: restart
computer_cmd_date = db_sql.Column(db_sql.Integer(), default=timestamp_jakarta(), nullable=False)
computer_ping_timestamp = db_sql.Column(db_sql.Integer(), default=timestamp_jakarta(), nullable=False)
computer_instance = db_sql.Column(db_sql.String(32), nullable=False)
created = db_sql.Column(db_sql.DateTime())
updated = db_sql.Column(db_sql.DateTime())
def add_timestamp(self):
self.created = datetime_jakarta()
self.updated = self.created
def update_timestamp(self):
self.updated = datetime_jakarta()
@staticmethod
def add(data):
try:
data.add_timestamp()
db_sql.session.add(data)
db_sql.session.commit()
return True
except Exception as e:
print(e)
db_sql.session.rollback()
db_sql.session.flush()
return False
@staticmethod
def update(data):
try:
data.update_timestamp()
db_sql.session.commit()
return True
except Exception as e:
print(e)
db_sql.session.rollback()
db_sql.session.flush()
return False
@staticmethod
def delete(id_data):
try:
data = Computer.query.get(id_data)
db_sql.session.delete(data)
db_sql.session.commit()
return True
except Exception as e:
print(e)
db_sql.session.rollback()
db_sql.session.flush()
return False
def to_dict(self):
data = {
'id': self.computer_id,
'name': self.computer_name,
'location': self.computer_location,
'power_status': self.computer_power_status,
'instance': self.computer_instance,
'cmd': self.computer_cmd,
'cmd_date': self.computer_cmd_date
}
return data
def on_ping(self):
try:
self.computer_ping_timestamp = timestamp_jakarta()
self.computer_power_status = 1
db_sql.session.commit()
except Exception as e:
print(e)
db_sql.session.rollback()
db_sql.session.flush()
def on_action(self):
self.computer_cmd_date = timestamp_jakarta()
| UTF-8 | Python | false | false | 2,944 | py | 15 | computer.py | 8 | 0.586617 | 0.581182 | 0 | 86 | 33.232558 | 106 |
Elzewire/class_board_images_recognition | 13,812,614,827,103 | ec3ba1bb1ab5bb26d9a506f17938c6f5d6f5ce58 | b3804e689c0fb9bf7c93d10fc3c1ffa21b9cd652 | /recognition/decoder.py | 0ba80cf96e08049663c650147a81e2aa5f3472bc | []
| no_license | https://github.com/Elzewire/class_board_images_recognition | badbc8e47ad5dcf12a8d809dd29863eece97b5d5 | 43079048cc0932f0beabddf474a34f6160727693 | refs/heads/main | 2023-06-05T00:12:47.107730 | 2021-06-21T17:28:13 | 2021-06-21T17:28:13 | 378,292,535 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from difflib import get_close_matches
import numpy as np
def load_dict(lang='ru'):
path = 'data/corpus.txt'
if lang == 'ru':
path = 'data/corpus_ru.txt'
words = []
f = open(path, encoding='utf-8')
for line in f.readlines():
for x in line.split(' '):
words.append(x)
return words
def dict_decode(sequences, lang='ru'):
results = []
for s in sequences:
matches = set(get_close_matches(s, load_dict(lang), cutoff=.7))
if matches:
for m in matches:
results.append(m)
return results
| UTF-8 | Python | false | false | 594 | py | 61 | decoder.py | 18 | 0.563973 | 0.560606 | 0 | 28 | 20.214286 | 71 |
3xepy/binance-hackathon | 2,199,023,274,339 | 59c7cee48312b91b23e8520078c81a3bace81193 | 1e71eb6c691815aa2f7ce42e053d35e4109ca2ce | /config/states.py | b89b097c6fc4cad040e60a9272cf3db5f624e4bb | []
| no_license | https://github.com/3xepy/binance-hackathon | 9d73f333fe06eac5c52fe9828d2949d3c39d33a3 | a63b28fefba4ca6e6683c645e62312888511590c | refs/heads/master | 2023-04-10T05:09:18.937306 | 2021-09-12T08:13:08 | 2021-09-12T08:13:08 | 356,512,081 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from aiogram.dispatcher.filters.state import State, StatesGroup
class CheckAddress(StatesGroup):
eth = State()
bsc = State()
| UTF-8 | Python | false | false | 135 | py | 7 | states.py | 5 | 0.733333 | 0.733333 | 0 | 6 | 21.5 | 63 |
gystar/ml_hws | 8,340,826,517,521 | 6a8179cf413c113c59b714ef022bdd77d23766eb | 38cb30acd3074e30e21e7ac668af2ec515506bfa | /hw4/sentiment.py | 5dda888860d1eeeafb6f6ba7b29903af22f0f557 | []
| no_license | https://github.com/gystar/ml_hws | 0add17e0e9527ecae588836e750b417fa02280a8 | bd2487e796f602c67329cdda385ae089360e7a4d | refs/heads/main | 2023-04-08T18:15:23.282596 | 2021-04-06T12:52:39 | 2021-04-06T12:52:39 | 316,701,471 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 语义识别的RNN模型定义
# 输入语句向量,输出属于positive的概率
from torch import nn
import torch
import math
import numpy as np
import matplotlib.pyplot as plt
import importlib
import utils
importlib.reload(utils)
cuda_ok = torch.cuda.is_available()
print("Cuda is available" if cuda_ok else "There is no gpu available.")
class SentimentModel(nn.Module):
def __init__(self, dim_word):
super(SentimentModel, self).__init__()
HSIZE = 512
self.rnns = nn.LSTM(input_size=dim_word,
hidden_size=HSIZE,
num_layers=3,
batch_first=False, # 第二维为batch
)
self.fcs = nn.Sequential(
nn.Linear(HSIZE, 256),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, x):
_, (x, _) = self.rnns(x)
# 只取hn来使用
x = x.squeeze(0)
x = self.fcs(x)
return x[-1]
def predict(model, sentenses, wv):
# 预测一个大的文本集合,因为较大,所以不宜全部放进内容进行计算
nbatch = 5000
ret = torch.tensor([]).cuda() if cuda_ok else torch.tensor([])
m = model.cuda() if cuda_ok else model.cpu()
m.eval()
with torch.no_grad():
for i in range(0, len(sentenses), nbatch):
x_sentense = sentenses[i:min(
len(sentenses), i+nbatch)]
vecs = utils.sens2vecs(x_sentense, wv)
if cuda_ok:
vecs = vecs.cuda()
y_pred = m(vecs).squeeze()
ret = torch.cat((ret, y_pred), 0)
return ret.cpu()
def train_model(sentenses, labels, wv, dim_word, model = None, epochs=10, nbatch=100):
if model == None:#否则继续训练
model = SentimentModel(dim_word)
if cuda_ok:
model = model.cuda()
loss_func = nn.BCELoss() # 损失函数
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
iters = math.ceil(len(sentenses)/nbatch)
loss_all = np.zeros(epochs)
for i in range(epochs):
model.train()
loss_sum = 0
for j in range(iters):
idx = np.random.randint(0, len(sentenses), 5)
x_sentenses = sentenses[idx]
x_labels = torch.tensor(labels[idx]).float()
vecs = utils.sens2vecs(x_sentenses, wv)
if cuda_ok:
x_labels,vecs = x_labels.cuda(),vecs.cuda()
y_pred = model(vecs).squeeze()
loss = loss_func(y_pred, x_labels)
loss_sum += loss.item()
if (j+1) % 100 == 0:
print((j+1)*nbatch, " sentenses input, avarage loss:", loss_sum/(j+1))
if j+1 == iters:
loss_all[i] = loss_sum/(j+1)
print("[epochs", i+1, "/", epochs,
"], avarage loss:", loss_all[i])
opt.zero_grad()
loss.backward()
opt.step()
# 绘图
plt.figure()
plt.plot([x for x in range(0, epochs)], loss_all)
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
return model.cpu()
| UTF-8 | Python | false | false | 3,157 | py | 37 | sentiment.py | 30 | 0.525091 | 0.511133 | 0 | 97 | 30.020619 | 86 |
ArtemAAA/FortyTwoTestTask | 12,987,981,121,251 | d886ab3c74d405be5eaf0eaa9736a77830a16ffd | 5a5df9520bb0f375078cb4d0d25dcfc87a85ccda | /apps/hello/tests/test_hello_commands.py | af06f21d78c03856bac4662de3932efbdc6a0d43 | []
| no_license | https://github.com/ArtemAAA/FortyTwoTestTask | 0b2c6bc426deb7e9a22411bb23f03b86331e6dad | d19ba5f9317478f4cc25da02820cfcf05366b9ee | refs/heads/master | 2018-03-27T08:49:24.172311 | 2018-01-23T15:03:25 | 2018-01-23T15:03:25 | 65,746,167 | 0 | 0 | null | true | 2016-08-15T16:11:59 | 2016-08-15T16:11:59 | 2016-05-13T12:24:57 | 2016-06-17T11:22:29 | 6,564 | 0 | 0 | 0 | null | null | null | from django.test import TestCase
from hello.models import Bio, Requests
from django.core.management import call_command
from django.utils.six import StringIO
from model_mommy import mommy
import sys
class CommandsTest(TestCase):
def setUp(self):
self.object_bio = mommy.make(Bio)
self.object_request = mommy.make(Requests)
def test_objects_count_stdout_stderr(self):
"""command outputs correct objects count in stdout and stderr"""
stdout = StringIO()
sys.stdout = stdout
stderr = StringIO()
sys.stderr = stderr
call_command('objectscount')
objects_count_bio = str(Bio.objects.all().count())
model_name_bio = Bio.objects.get(id=1).__class__.__name__.lower()
objects_count_requests = str(Requests.objects.all().count())
model_name_requests = (
Requests.objects.get(id=1).__class__.__name__.lower()
)
bio_count = '%s - %s' % (model_name_bio, objects_count_bio)
requests_count = '%s - %s' % (model_name_requests,
objects_count_requests)
self.assertIn(bio_count, stdout.getvalue())
self.assertIn(requests_count, stdout.getvalue())
self.assertIn(bio_count, stderr.getvalue())
self.assertIn(requests_count, stderr.getvalue())
| UTF-8 | Python | false | false | 1,338 | py | 39 | test_hello_commands.py | 23 | 0.627803 | 0.626308 | 0 | 38 | 34.210526 | 73 |
pmisters/django-code-example | 10,934,986,763,957 | b5bcf6804ee717d2123c62eca9ed96079be88bef | dabc981714dd9297e811355fbb2f9f9a45c2281f | /board/models/_reservation_room.py | 80fce76c677caf545bb88a67e056f4a39c490ca0 | []
| no_license | https://github.com/pmisters/django-code-example | 4c9c8c7edb174875ae4df4d32ae73b0897fc2333 | 745ac9d0c89d8ee44885cc862882d6c4d13993a0 | refs/heads/master | 2023-01-07T11:55:55.670943 | 2020-11-14T11:14:19 | 2020-11-14T11:14:19 | 312,801,074 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from decimal import Decimal
from django.db import models
from django.db.models.fields.json import JSONField
from django.utils import timezone
from common.db import TimeableModel
class ReservationRoomQuerySet(models.QuerySet):
def active(self):
return self.filter(is_deleted=False)
class ReservationRoom(TimeableModel):
reservation = models.ForeignKey('board.Reservation', on_delete=models.CASCADE, related_name='rooms')
channel_id = models.CharField(max_length=20)
rate_id = models.PositiveIntegerField(blank=True, null=True)
rate_plan_id = models.PositiveIntegerField(blank=True, null=True)
rate_plan_id_original = models.PositiveIntegerField(blank=True, null=True)
channel_rate_id = models.CharField(max_length=20)
channel_rate_id_changed = models.CharField(max_length=20)
checkin = models.DateField()
checkin_original = models.DateField()
checkout = models.DateField()
checkout_original = models.DateField()
# original price from OTA
price = models.DecimalField(max_digits=10, decimal_places=2, blank=True, default=0)
# price accepted by Hotelier
price_accepted = models.DecimalField(max_digits=10, decimal_places=2, blank=True, default=0)
# original price from OTA
netto_price = models.DecimalField(max_digits=10, decimal_places=2, blank=True, default=0)
# price accepted by Hotelier
netto_price_accepted = models.DecimalField(max_digits=10, decimal_places=2, blank=True, default=0)
external_id = models.CharField(max_length=20, blank=True, null=True)
external_name = models.CharField(max_length=250, blank=True, default='')
guest_name = models.CharField(max_length=150, blank=True, default='')
guest_count = models.PositiveSmallIntegerField(blank=True, default=1)
adults = models.PositiveSmallIntegerField(blank=True, default=0)
children = models.PositiveSmallIntegerField(blank=True, default=0)
max_children = models.PositiveSmallIntegerField(blank=True, default=0)
extra_bed = models.PositiveSmallIntegerField(blank=True, default=0)
with_breakfast = models.BooleanField(default=False)
currency = models.CharField(max_length=3, blank=True, null=True)
tax = models.DecimalField(max_digits=10, decimal_places=2, blank=True, default=0)
fees = models.DecimalField(max_digits=10, decimal_places=2, blank=True, default=0)
notes_extra = models.TextField(blank=True, default='')
notes_facilities = models.TextField(blank=True, default='')
notes_info = models.TextField(blank=True, default='')
notes_meal = models.TextField(blank=True, default='')
policy = JSONField(blank=True, default=dict)
policy_original = JSONField(blank=True, default=dict)
is_deleted = models.BooleanField(default=False, db_index=True)
deleted_at = models.DateTimeField(blank=True, null=True)
objects = ReservationRoomQuerySet.as_manager()
class Meta:
app_label = 'board'
def __str__(self) -> str:
return f"CHANNEL ID={self.channel_id} RATE={self.channel_rate_id}"
def save(self, **kwargs) -> None:
self.guest_count = self.guest_count or 1
for name in ('adults', 'children', 'max_children', 'extra_bed'):
if getattr(self, name) is None:
setattr(self, name, 0)
for name in ('price', 'price_accepted', 'tax', 'fees', 'netto_price', 'netto_price_accepted'):
if getattr(self, name) is None:
setattr(self, name, Decimal(0))
for name in ('external_name', 'guest_name', 'notes_extra', 'notes_facilities', 'notes_info', 'notes_meal'):
if getattr(self, name) is None:
setattr(self, name, '')
if 'update_fields' in kwargs and kwargs['update_fields']:
kwargs['update_fields'].append('updated_at')
super().save(**kwargs)
def delete(self, **kwargs) -> None:
self.is_deleted = True
self.deleted_at = timezone.now()
self.save(update_fields=['is_deleted', 'deleted_at'])
| UTF-8 | Python | false | false | 4,019 | py | 121 | _reservation_room.py | 111 | 0.692212 | 0.680518 | 0 | 92 | 42.684783 | 115 |
rupeshpurum/automation | 7,610,682,094,749 | 5c393e1818b7b09144b03d54acf8863da16b29e9 | 4da481c5d2746a2b2bf34b87bad03288b67edc9f | /pluginimpl/OIDautomation/oid.py | 82af43693db41fb103a6d5e7d657300155e0b1c7 | []
| no_license | https://github.com/rupeshpurum/automation | fc25bbce752996341f0c2d7ac3c1b3b6e89854c6 | bd2d8f66c76699d9c88df3e321dba614a7175981 | refs/heads/master | 2020-04-21T07:22:09.704456 | 2019-04-09T05:49:36 | 2019-04-09T05:49:36 | 169,390,601 | 0 | 0 | null | true | 2019-02-06T10:44:17 | 2019-02-06T10:44:17 | 2019-01-02T13:12:51 | 2019-01-02T13:12:48 | 8,659 | 0 | 0 | 0 | null | false | null | from typing import List, Any, Union
__author__ = 'Shanmukh'
import sys
from array import *
import xml.dom.minidom
from lxml import etree
# Function to generate the OID
def get_oid(doc, col):
"""
:param col: colomn which requires the oid
:param doc: The mib repository object
:return result: returns all possible oid's of col
"""
result=[] # type: List[Union[str, Any]]
entry = doc.getElementsByTagName("entry")
for id in entry:
arg = ''
if id.getAttribute("name") == col:
# print("suffix: ", id.getAttribute("id"), "colomn: ", id.getAttribute("name"), "\n")
suffix=id.getAttribute("id")
ancestors = []
value = id.getAttribute("id")
ancestors.append(int(value))
parent = id.parentNode
while parent != None:
try:
id = parent.getAttribute("id")
except:
pass
try:
ancestors.append(int(id))
except:
pass
parent = parent.parentNode
OID = ancestors[::-1]
for i in OID:
arg = arg + "."
arg = arg + str(i)
result.append(arg)
if len(result) != 0:
try:
print (suffix)
return result
except:
print('No match for ',col)
else:
print("Didn't find a match for \"", col, "\"")
# Global Initializations
result = []
path='/home/sthummala/workspace/vsure/centina/sa/profiles/'
# prompts and stores mib-repo name
# mib_repository = input('Enter the mib-repository name: ')
# @todo get the mib-repository name from the user
# Parsing using dom
doc = xml.dom.minidom.parse("eltek-mib-repository.xml")
# searchString = input()
oid = get_oid(doc, 'alarmUserConfigurable8Trap')
print("OID: ", oid)
profileXml='/home/sthummala/workspace/vsure/centina/sa/profiles/eltek.xml'
parser = etree.XMLParser(strip_cdata=False)
root = etree.parse(profileXml, parser)
dependency=root.find('dependencies')
for i in dependency.findall('file'):
temp=i.get('path')
if 'snmp/inventory/' in temp:
if '.dtd' not in temp and 'if-mib' not in temp and 'pm/templates/' not in temp:
parser=etree.XMLParser(strip_cdata=False)
inventoryFile=etree.parse(path+temp,parser)
table=inventoryFile.findall('table')
for i in table:
print (i.get('name'))
print (get_oid(doc, i.get('name')))
# print (temp)
if 'pm' in temp:
if '.dtd' not in temp and 'if-mib' not in temp and 'pm/templates/' not in temp:
pmRoot = etree.parse(path+temp, parser)
for mg in pmRoot.iter('metricGroup'):
for met in mg.findall('parameter'):
met.attrib['oid']=get_oid(doc, met.get('name'))
print (met.get('oid') )
| UTF-8 | Python | false | false | 2,978 | py | 10 | oid.py | 3 | 0.558428 | 0.557421 | 0 | 98 | 29.346939 | 97 |
FTLiao/AlgoCrawl | 4,209,067,974,262 | 5cb6d0c994984784ee921e40d045c47ce383087b | 2bcea666ed191177d1d3d0d766c4810949d41898 | /leetcode/lc271_Encode_and_Decode_Strings.py | d2ecf4709dfc52d18615340ddc680b5aa06ef61b | []
| no_license | https://github.com/FTLiao/AlgoCrawl | 44f8245ddca422ff96cddf1c76fe0f4cb358b60c | 4d8aa8088a6fe8efdeacbcfa33d4c93aaf3252f8 | refs/heads/master | 2020-12-12T21:30:21.784793 | 2020-10-06T15:18:13 | 2020-10-06T15:18:13 | 234,232,039 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Codec:
def encode(self, strs: [str]) -> str:
"""Encodes a list of strings to a single string.
"""
string = ''
for word in strs:
string += word + chr(257)
return string
def decode(self, s: str) -> [str]:
"""Decodes a single string to a list of strings.
"""
output = []
start = 0
for end in range(len(s)):
if s[end] == chr(257):
output.append(s[start:end])
start = end + 1
return output
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs))
| UTF-8 | Python | false | false | 659 | py | 481 | lc271_Encode_and_Decode_Strings.py | 475 | 0.514416 | 0.502276 | 0 | 23 | 27.652174 | 60 |
sthompsonxyz/Rosalind_Algs | 6,124,623,382,090 | ab18044cdaef3bffb902957eb029c8b0eb2b0f53 | 8b59f690ecfe60191e9e4c80bc7bfe877b7f1c19 | /alg9/ros_alg9_DNAmotif.py | 9d837517716bc545e1afcd325e79fb7a26a7d11d | []
| no_license | https://github.com/sthompsonxyz/Rosalind_Algs | ecb53a822b57bfe741fcafb43dc5d62381069d6f | 1246dbc634dd2897378c3facfeceb1f78a51ed30 | refs/heads/master | 2016-09-05T19:11:56.458900 | 2015-02-28T23:24:11 | 2015-02-28T23:24:11 | 30,469,023 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import re
import pprint
import sys
from Bio import SeqIO
class sequence:
name = ''
bases = ''
seqs = list()
regx = re.compile("^>[^\\n]+", re.I)
myF = open("./fastaseqs.fa")
next_seq = sequence()
# gets fasta sequences from file and puts them into seq object for manip
# look at using biopython class here..
while 1:
line = myF.readline()
if not line:
store_seq = sequence()
store_seq.name = next_seq.name
store_seq.bases = next_seq.bases
seqs.append(store_seq)
break
if regx.search(line):
if next_seq.name:
store_seq = sequence()
store_seq.name = next_seq.name
store_seq.bases = next_seq.bases
seqs.append(store_seq)
next_seq.bases = ''
next_seq.name = line.rstrip()
else:
next_seq.bases = next_seq.bases + line.rstrip()
#check all sequences are the same length
length = len(seqs[0].bases)
for z in seqs:
if length != len(z.bases):
sys.exit("all sequences need to be same length!\n")
Aprofile = list()
Cprofile = list()
Gprofile = list()
Tprofile = list()
profile = list()
for pos in range(0, length-1):
a = c = g = t = 0
for seq in seqs:
if seq.bases[pos] == 'A':
a = a+1
elif seq.bases[pos] == 'C':
c = c+1
elif seq.bases[pos] == 'G':
g = g+1
elif seq.bases[pos] == 'T':
t = t+1
Aprofile.append(a)
Cprofile.append(c)
Gprofile.append(g)
Tprofile.append(t)
print "A: ",
for i in range(0, length-1):
print Aprofile[i],
print "\nC: ",
for i in range(0, length-1):
print Cprofile[i],
print "\nG: ",
for i in range(0, length-1):
print Gprofile[i],
print "\nT: ",
for i in range(0, length-1):
print Tprofile[i],
for seq_record in SeqIO.parse("./fastaseqs.fa", "fasta"):
print seq_record.id
print seq_record.seq
print(len(seq_record)) | UTF-8 | Python | false | false | 1,739 | py | 15 | ros_alg9_DNAmotif.py | 12 | 0.645198 | 0.635423 | 0 | 84 | 19.714286 | 72 |
justjake12/mom-chatbot | 11,536,282,204,153 | f30b294481fe313f81a96d6835c71da9ce736ec2 | 787e416394fbdf14bbacb7aa82daadf3a8ef8065 | /ProjectTemplate/my_module/functions.py | 41fbd4da2aebe99ac8011befee5d27fd8f4c1a7e | []
| no_license | https://github.com/justjake12/mom-chatbot | cb9d9b9f15fadfe027cc1351c46230999791de78 | 7d7b124aeab49cae422d89437aedda6d2822b68a | refs/heads/master | 2020-04-11T01:13:18.034677 | 2018-12-12T00:03:47 | 2018-12-12T00:03:47 | 161,409,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """A collection of functions for doing my project."""
import string
import nltk
import random
#MY OUTPUTS
BYE = ['I will talk to you later', 'Be safe', \
'Call me after class', 'Adios mi amorcito']
ADVICE_IN = ['help', 'stressed', 'depressed', 'nervous', 'advice', 'anxious', 'sad', 'feeling down']
QUESTION_OUT = ["Oh honey, I don't know", "Let me ask Dad", "I'll just call you later"]
RANDOM = ['Oh okay', 'Call me when you can', 'What?', 'Sounds good', 'Watch your tone']
GREETING_IN = ['morning', 'hey', 'whatsup', 'hi', 'hello', 'hola', 'yo']
GREETING_OUT = ['Good morning, honey!', 'How are you?', \
'yo yo yo', 'que pasa', \
'I was just thinking about you', \
'Dad and I were just talking about you']
HUMOR_IN = ['joke', 'haha', 'funny', 'lmao', 'lol']
HUMOR_OUT = ['you get your humor from me', \
'dont be surprised. you know im the funny one', \
'why was that funny?', 'what does that even mean']
LOVEY_IN = ['love', 'miss']
LOVEY_OUT = ['I love you more, sweetie', 'I miss you so much. Come home soon', \
'Not as much as I love you']
HOW_IN = ['How are you?', 'How was your day?']
HOW_OUT = ['Im good, sweetie! Thanks for checking up on me', \
'Everything is good so far today!', 'Better than yesterday, but still not great', \
'Ugh Im a little frustrated but everything will pass!']
def prepare_text(input_string):
"""Function to take a string, and separate it into a list
Code taken from A3"""
temp_string = remove_punctuation(input_string.lower())
out_list = temp_string.split()
return(out_list)
def is_question(input_string):
""" Function to determine if the input is a question or not
Code from Assignment 3 to determine if the input is a question"""
for item in input_string:
if '?' in input_string:
output = random.choice(QUESTION_OUT)
else:
output = random.choice(RANDOM)
return(output)
def selector(input_list, check_list, return_list):
"""Function designed to check if a list of inputs is in the second list, and if so, a list of responses is ready
Code From A3"""
output = None
for item in input_list:
if item in input_list and item in check_list:
output = random.choice(return_list)
return(output)
def remove_punctuation(input_string):
"""
Function designed to get rid of any punctuation in our input string
Code From A3
"""
out_string = ''
for char in input_string:
if char not in string.punctuation:
out_string = out_string + char
return(out_string)
def end_chat(input_list):
"""Function defined to end our chatbot if 'bye' comes in the input
Code From A3"""
if 'bye' in input_list:
return(True)
else:
return(False)
def list_to_string(input_list, separator):
"""Function to combine a list of strings into one coherent string
Code from A3"""
output = input_list[0]
for item in input_list[1:3]:
output = string_concatenator(output, item, separator)
return(output)
def string_concatenator(string1, string2, separator):
"""Function to combine multiple strings into one coherent string
Code From A3"""
output = string1 + separator + string2
return(output)
def is_in_list(list_one, list_two):
"""Check if any element of list_one is in list_two."""
"""CODE GIVEN TO US IN A3"""
for element in list_one:
if element in list_two:
return True
return False
def find_in_list(list_one, list_two):
"""Find and return an element from list_one that is in list_two, or None otherwise."""
for element in list_one:
if element in list_two:
return element
return None | UTF-8 | Python | false | false | 3,860 | py | 5 | functions.py | 3 | 0.621503 | 0.617617 | 0 | 131 | 28.473282 | 116 |
duhoang00/python-data-analysis-lessons | 3,736,621,584,099 | e35b49482b3143a669dcae95c0bd6e0c2b20b97c | 7a24bc8b68e3ced92701da47eb41c3e32fdeb83a | /HomeWork_Function/Bai1_NamNhuan.py | 4020ffdc693b35301d4c3408b3adfcb8b378fe59 | []
| no_license | https://github.com/duhoang00/python-data-analysis-lessons | 4e45a0ade1e638a203f00fabededeb464b06affc | 22e89d517787f6166e4f2089e66a6eb7f83027d6 | refs/heads/main | 2023-06-25T02:43:06.390134 | 2021-07-20T14:08:42 | 2021-07-20T14:08:42 | 382,753,931 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = 0
while True:
user_input = input("Nhập N = ")
try:
n = int(user_input)
if n > 0:
break
except ValueError:
pass
if n % 100 == 0:
if n % 400 == 0:
print(n, "là năm nhuận")
else:
print(n, "không phải là năm nhuận")
elif n % 4 == 0:
print(n, "là năm nhuận")
else:
print(n, "không phải là năm nhuận")
| UTF-8 | Python | false | false | 406 | py | 32 | Bai1_NamNhuan.py | 30 | 0.486911 | 0.455497 | 0 | 20 | 18.1 | 43 |
NimraSadaqat/dynamicwebsitescraper | 7,456,063,253,137 | 2babbf1a5b95eff39c193deb9943de0c207c1d0e | 19f511e826f3f1285d49087fa8a37c4317f44180 | /koovs/koovs/spiders/koov.py | 31600428c4d5bb635ceb6c38ee8143ccb094e29b | []
| no_license | https://github.com/NimraSadaqat/dynamicwebsitescraper | 4131061d00f3b1a83332c45d22146b29f49ab269 | 7fac2f2a4549a005a076fd3720830b17c96d6d85 | refs/heads/master | 2022-12-06T05:21:30.167678 | 2020-08-19T07:49:01 | 2020-08-19T07:49:01 | 288,490,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scrapy
from selenium.common.exceptions import TimeoutException
from ..items import KoovsItem
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
class KoovSpider(scrapy.Spider):
name = 'koov'
start_urls = ['https://www.koovs.com/tags/sweet-summer-vibes']
def __init__(self, name=None, **kwargs):
super(KoovSpider, self).__init__(name, **kwargs)
self.browser = webdriver.Chrome('D:/chromedriver_win32/chromedriver.exe')
@staticmethod
def get_selenium_response(browser, url):
browser.get(url)
WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.XPATH, "//*[@id='prodBox']/li")))
table = browser.find_element_by_css_selector("#prodBox")
get_number = 0
while True:
count = get_number
products = table.find_elements_by_css_selector("li.imageView")
# print(products)
browser.execute_script("arguments[0].scrollIntoView();", products[-1]) # scroll to last row
try:
button = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.ID, "loadMoreList")))
button.click()
except TimeoutException:
print("No more LOAD MORE RESULTS button to be clicked")
break
get_number = len(products)
print(get_number)
time.sleep(1)
if get_number == count:
break
return browser.page_source.encode('utf-8')
def parse(self, response):
koov_response = scrapy.Selector(text=self.get_selenium_response(self.browser, response.url)) # Sending url to selenium webdriver and collecting the selenium response
item = KoovsItem()
products = koov_response.css('li.imageView')
a=1
for x in products:
item['no'] = a
a = a+1
title = x.css('.productName::text').extract()
price = x.css('.product_price').css('::text').extract()
image = x.css('img.prodImg::attr(src)').extract()
item['title'] = ''.join(title)
item['price'] = ''.join(price)
item['image'] = ''.join(image)
with open('data.txt', 'a', encoding="utf-8") as f: # Writing data to file
f.write('Item No: {0}, Title: {1},Price: {2}, Image: {3}\n'.format(item['no'], item['title'], item['price'], item['image']))
yield item # Writing data to sqlite database file
| UTF-8 | Python | false | false | 2,643 | py | 3 | koov.py | 1 | 0.603859 | 0.597049 | 0 | 61 | 42.327869 | 173 |
Armando123x/JuegosPygame | 12,841,952,232,188 | 38b72257525cc5ecad52e81d3695bc211f7170eb | 10cf226c1481d83274939d6a2fe8b7582b30e992 | /Sprites/BreakBlocks/__init__.py | 6110e0cc9d06dea66c344d2e3d20e4e52f279c32 | []
| no_license | https://github.com/Armando123x/JuegosPygame | 3f9bbbdd3b031731849010f48e4463f4034c4f1c | b497a94fcc4e79ab23d8d5f06320a80b2b3e0588 | refs/heads/master | 2021-09-15T00:35:59.447370 | 2018-03-04T06:38:36 | 2018-03-04T06:38:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
import random
from sprites import *
COLOR_FONDO = (40, 40, 220)
ROJO = (180, 20, 40)
COLOR_PROYECTIL = (120, 130, 20)
AMARILLO = (200, 200, 10)
ANCHO_PANTALLA = 700
ALTO_PANTALLA = 500
class Juego(object):
def __init__(self):
self.game_over = False
self.puntuacion = 0
self.lista_colores = [(200, 50, 10), (220, 250, 10), (20, 230, 10), (250, 200, 10)]
self.lista_sprites = pygame.sprite.Group()
self.lista_bloques = pygame.sprite.Group()
self.lista_proyectiles = pygame.sprite.Group()
for y in range(10, 125, 25):
for x in range(10, 680, 62):
bloque = Bloque(self.lista_colores[random.randint(0, 3)])
bloque.position(x, y)
self.lista_bloques.add(bloque)
self.lista_sprites.add(bloque)
self.protagonista = Protagonista()
self.protagonista.rect.y = 470
self.lista_sprites.add(self.protagonista)
def procesa_eventos(self):
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
return True
elif evento.type == pygame.MOUSEBUTTONDOWN:
if self.game_over:
self.__init__()
else:
proyectil = Proyectil(self.protagonista.get_color())
proyectil.rect.x = (self.protagonista.rect.x + self.protagonista.image.get_width() / 2) - 4
proyectil.rect.y = self.protagonista.rect.y
self.lista_sprites.add(proyectil)
self.lista_proyectiles.add(proyectil)
self.protagonista.change_color(self.lista_colores[random.randint(0, 3)])
return False
def logica_ejecucion(self):
if not self.game_over:
self.lista_sprites.update()
for proyectil in self.lista_proyectiles:
lista_bloques_alcanzados = pygame.sprite.spritecollide(proyectil, self.lista_bloques, False)
for bloque in lista_bloques_alcanzados:
self.lista_proyectiles.remove(proyectil)
self.lista_sprites.remove(proyectil)
if proyectil.get_color() == bloque.get_color():
self.lista_sprites.remove(bloque)
self.lista_bloques.remove(bloque)
self.puntuacion += 1
if proyectil.rect.y < -10:
self.lista_proyectiles.remove(proyectil)
self.lista_sprites.remove(proyectil)
if len(self.lista_bloques) == 0:
self.game_over = True
def display_frame(self, pantalla):
pantalla.fill(COLOR_FONDO)
if self.game_over:
fuente = pygame.font.SysFont("Dimitri Swank", 45)
texto = fuente.render("..::BREAK-BLOCKs::..", True, AMARILLO)
centrar_x = (ANCHO_PANTALLA // 2) - (texto.get_width() // 2)
centrar_y = (ALTO_PANTALLA // 2) - (texto.get_height() // 2)
pantalla.blit(texto, [centrar_x, centrar_y])
fuente = pygame.font.SysFont("Dimitri Swank", 25)
texto = fuente.render("Haz click para volver a jugar", True, AMARILLO)
centrar_x = (ANCHO_PANTALLA // 2) - (texto.get_width() // 2)
pantalla.blit(texto, [centrar_x, centrar_y + 50])
if not self.game_over:
self.lista_sprites.draw(pantalla)
pygame.display.flip()
def main():
pygame.init()
dimensiones = [ANCHO_PANTALLA, ALTO_PANTALLA]
pantalla = pygame.display.set_mode(dimensiones)
pygame.display.set_caption(".::BREAK-BLOCK::.")
#pygame.mouse.set_visible(False)
game_loop = False
reloj = pygame.time.Clock()
juego = Juego()
while not game_loop:
game_loop = juego.procesa_eventos()
juego.logica_ejecucion()
juego.display_frame(pantalla)
reloj.tick(60)
pygame.quit()
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 4,012 | py | 49 | __init__.py | 49 | 0.566052 | 0.539133 | 0 | 114 | 34.201754 | 111 |
genomeannotation/reformat | 8,693,013,832,860 | 44f8d971028a8e86446ce0d63770a1291c4040c9 | 28e6cd8e80849392cb372a828c61049f0ab7fff8 | /src/vcf.py | c5601e85a2dc54e5c1c97118373d1af69f84fc61 | []
| no_license | https://github.com/genomeannotation/reformat | e683bf5bd61908af679a4ac682fe590d1ab1f3c8 | 58bbaccfc1c018db7cbc79d2dec07321b4fab9ff | refs/heads/master | 2021-01-17T12:37:16.260366 | 2016-06-22T20:16:16 | 2016-06-22T20:16:16 | 59,068,256 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import os
import sys
class Vcf:
def __init__(self):
"""nothing"""
#read info from vcf
def read_vcf(self, vcf_filename, data):
# Verify and read VCF file
if not os.path.isfile(vcf_filename):
sys.stderr.write("Failed to find " + vcf_filename + ".\n")
sys.exit()
fi = open(vcf_filename, 'r')
allLines = fi.readlines()
g = 0
for i, words in enumerate(allLines):
words = words.strip('\n')
splits = words.split("\t")
data.cleaned_map.append([])
#take individual names
if splits[0] == "#CHROM":
data.ind_names = splits[9:]
data.cleaned_map[-1].append(words)
g = 1
#take SNP data
elif g == 1:
#SNP label
data.SNP_labels.append("S" + splits[2] + "_" + splits[1])
#SNP reference and alternative
data.REF_ALT.append([])
data.REF_ALT[-1].append(splits[3]+splits[4])
#SNP data
data.SNPs.append([])
for snp in splits[9:]:
data.SNPs[-1].append(snp.split(":")[0])
data.cleaned_map[-1].append(splits[0].split("|")[3])
data.cleaned_map[-1].append(splits[1])
#progress output
sys.stdout.write('\r')
sys.stdout.write("[reading " + vcf_filename + ": " + str(round(((i+1)*100/len(allLines)))) + "%]")
sys.stdout.flush()
i = i+1
fi.close()
sys.stdout.write("\n")
| UTF-8 | Python | false | false | 1,298 | py | 10 | vcf.py | 10 | 0.595532 | 0.577042 | 0 | 52 | 23.942308 | 101 |
ZachMillsap/PyCharm | 10,393,820,892,028 | 085638a3af9420413427aef04300a65732b46266 | a228c12564e6dfdd6a0770b2ae008319bd89490d | /Module7/fun_with_collections/Topic 3/write.py | 6fa75a0aac426bd0f819c47bffb77e8f4bbd21f3 | []
| no_license | https://github.com/ZachMillsap/PyCharm | 27158e4e5a06f87d03d0a4c069ae908e19108a4a | 870a81120c55f134ffaa2a22b79a5c3f2de9c6c8 | refs/heads/master | 2022-11-25T09:11:58.504544 | 2020-08-03T01:38:33 | 2020-08-03T01:38:33 | 274,780,004 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def write_to_file(args, kwargs):
f = open('student_info.txt', 'w')
f.write(kwargs)
tuple = (args)
input_list = tuple
f.writelines(input_list)
f.close()
def get_student_info(*args, **kwargs):
for key, value in kwargs.items():
print("%s == %s" % (key, value))
kwargs = str(kwargs)
scores = []
line = input('Enter the list of scores. Enter done to end\n')
while(line != 'done'):
scores.append(tuple(line.split()))
line = input()
args = str(scores)
write_to_file(args, kwargs)
def read_from_file():
import os as os
file_dir = os.path.dirname(__file__)
file_name = "student_info.txt"
f = open(os.path.join(file_dir, file_name), "r")
line1 = f.read()
print(line1)
f.close()
if __name__ == '__main__':
get_student_info(name = 'zach')
get_student_info(name = 'steven')
get_student_info(name = 'tim')
get_student_info(name = 'rachel')
read_from_file()
input("Press any key to end")
| UTF-8 | Python | false | false | 1,010 | py | 25 | write.py | 23 | 0.576238 | 0.574257 | 0 | 38 | 25.578947 | 65 |
blha303/mcmoddb | 13,039,520,721,714 | 99df44ee446eac203cadf045048909dead995580 | 0e2e3c46caa1d048ad6d63408508ed0d45895929 | /platform_utils/moddb_config.py | 3bfa7fae8f81dcea592aeec9bfcf471a0708fd46 | []
| no_license | https://github.com/blha303/mcmoddb | b50b087975284d2e1e77db309c7ac5a57d75ca95 | d87e2c5f047082dde77ba45a7382f3a1966a76f1 | refs/heads/master | 2020-04-06T06:56:24.633246 | 2013-01-13T18:38:51 | 2013-01-13T18:38:51 | 7,591,803 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
Config = {
'autodl': True, # Automatic downloading of the ModDB. Valid options: True False . Case sensitive.
'checkdl': True, # Automatic checking of the ModDB. Valid options: True False . Case sensitive.
'showchecksum': False, # Show the checksums of the DBs downloaded. Valid options: True False . Case sensitive.
'tmpdir': "tmp", # Directory for storing temporary files.
'moddir': "mods", # Directory for storing downloaded mods.
'dbdir': "db", # Directory for storing the DBs
'tmpdbfile': 'moddb_tmp.yml', # Temporary db file
'localdbfile': 'moddb_local.yml', # Local db file
'globaldbfile': 'moddb_global.yml', # Global db file
'serverdb': "http://elemeno.dyndns.org/moddb/moddb_global.yml",
'installdbfile': 'installed.yml',
'force-platform': ''
}
| UTF-8 | Python | false | false | 790 | py | 5 | moddb_config.py | 4 | 0.712658 | 0.711392 | 0 | 16 | 48.375 | 111 |
arischow/Tweetbo | 8,031,588,887,628 | e8784c3f2c9a73e04f3efcdd01e0cb1001e9d5f9 | 6d0a2136d480c41b63b813037606ffe2d31094ac | /auto_json.py | 129b1b77ba65c142041ef99ea9f97543d974f9e1 | []
| no_license | https://github.com/arischow/Tweetbo | f47cf36bccf902152d2d8fe2a30b92454b28151d | 7aad68389f3d616bcb500ddf8e035335e5a4ff5f | refs/heads/master | 2016-09-12T13:54:15.679082 | 2016-07-23T02:58:48 | 2016-07-23T02:58:48 | 63,911,246 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class JSONRepr(object):
def json(self):
raise NotImplementedError
def formatted_json(self, d):
raise NotImplementedError
def excluded(self):
return []
def serialize(self):
return self.__dict__
class SignUpJSON(JSONRepr):
def __init__(self):
self.success = False
self.message = None
self.error = None
#
# sj = SignUpJSON()
# print(sj.__dict__, type(sj.__dict__))
| UTF-8 | Python | false | false | 443 | py | 10 | auto_json.py | 4 | 0.589165 | 0.589165 | 0 | 22 | 19.136364 | 39 |
Raymolu/DjangoDev1 | 12,704,513,267,409 | 1d69fece6dd81cdbae11f9a6529339e1e7a0d4a2 | 8ae01e427d9302abde2768bb6db35fcb16ff9c68 | /DjangoT1/trackit/TicketManager/migrations/0005_auto_20200312_1457.py | 9d74b6fed10236b918b9fadb6f583e1f6a712f0a | []
| no_license | https://github.com/Raymolu/DjangoDev1 | d98e6b372da4812fd26e9ef27e29ee063ae2710a | 4ddd61565db7185c70363f585dede3027c10f796 | refs/heads/master | 2021-03-29T21:20:34.670126 | 2020-03-20T21:53:27 | 2020-03-20T21:53:27 | 247,985,843 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.5 on 2020-03-12 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('TicketManager', '0004_auto_20200312_1454'),
]
operations = [
migrations.RenameField(
model_name='ticket',
old_name='status1',
new_name='status',
),
]
| UTF-8 | Python | false | false | 370 | py | 40 | 0005_auto_20200312_1457.py | 26 | 0.575676 | 0.489189 | 0 | 18 | 19.555556 | 53 |
mwales/security | 12,317,966,220,764 | 84a777bfedfc7f080e36456287684d89c77896d2 | 5a3c7d80cb3c96c91953d53f1e2965fce619db55 | /ctf/tamu_ctf_22/pwn/trivial/solve.py | 852896652b315b0baac7cba07b645bea36052811 | []
| no_license | https://github.com/mwales/security | 6aa45f8938de402b460628654b4cb07e94bd2339 | a532866fc0e747f2c935f64495d97fdf8a364fa0 | refs/heads/master | 2023-09-01T21:41:20.329823 | 2023-08-27T03:45:28 | 2023-08-27T03:45:28 | 68,429,771 | 8 | 9 | null | false | 2018-12-01T06:11:20 | 2016-09-17T03:31:15 | 2018-12-01T06:11:17 | 2018-12-01T06:11:16 | 4,276 | 2 | 3 | 0 | C++ | false | null | #!/usr/bin/env python3
# Simple buffer overflow. Just call win function and then use remote shell
# to dump out the flag.txt
from pwn import *
p = remote("tamuctf.com", 443, ssl=True, sni="trivial")
#p = process("./trivial")
#p = gdb.debug("./trivial",'''
# b main
# run
# ''')
e = ELF("./trivial")
winAddr = e.symbols["win"]
print("Win addr {}".format(hex(winAddr)))
payload = b'a' * 0x58
payload += p64(winAddr)
p.send(payload)
p.send(b"\n")
p.interactive()
| UTF-8 | Python | false | false | 494 | py | 212 | solve.py | 106 | 0.611336 | 0.593117 | 0 | 30 | 15.466667 | 75 |
arthur900530/Python-practice | 14,972,256,001,577 | f7c8bd53a801dcfdf1b73ee2bb26e9bb0b182915 | e70d6ef3a7c44a088e6b4fcaa8c73cac9e85bfa8 | /Ch23/example1.py | 8462237eb45dc0328afc8a759524cf58a8f20dfd | []
| no_license | https://github.com/arthur900530/Python-practice | 731dd3597f6493d95c77c2ed04f6905e7592ac9a | 73e8b106e639964077dc638275d96c830f51afc7 | refs/heads/master | 2023-07-29T04:06:56.556199 | 2021-09-17T12:30:37 | 2021-09-17T12:30:37 | 407,533,594 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import bs4
import requests
url = 'https://www.taiwanlottery.com.tw/index_new.aspx'
html = requests.get(url)
print('Downloading...')
html.raise_for_status()
print('Finish download !')
objSoup = bs4.BeautifulSoup(html.text,'lxml')
dataTag = objSoup.select('.contents_box02')
print('Length:',len(dataTag))
# for i in dataTag:
# print(i)
balls = dataTag[0].find_all('div',{'class':'ball_tx ball_green'})
redBall = dataTag[0].find_all('div',{'class':'ball_red'})
for i in range(6):
print(balls[i].text, end=' ')
print('\n',redBall[0].text) | UTF-8 | Python | false | false | 544 | py | 46 | example1.py | 39 | 0.681985 | 0.667279 | 0 | 19 | 27.684211 | 65 |
dinhluongzalora/Practical-Security-Automation-and-Testing | 4,217,657,912,703 | 6cbc83a95312fe0ebc9a5c448e9d4426c6e79d2c | 55ff0e0d23feb2684bbdf7b29d2302d12d3d1501 | /Chapter06/userregistration_SB.py | ddd0f36f37140f06eb30708a6a4e2e45dcee2ad0 | [
"MIT"
]
| permissive | https://github.com/dinhluongzalora/Practical-Security-Automation-and-Testing | 1b525b5458446b3f92767d08322eb695c39a1b78 | bd864948115d01c55ddf8a2c0d4f5507730cb085 | refs/heads/master | 2021-07-14T02:13:56.315273 | 2021-01-18T08:06:55 | 2021-01-18T08:06:55 | 173,987,517 | 0 | 0 | MIT | true | 2019-03-05T17:05:07 | 2019-03-05T17:05:07 | 2019-02-07T06:16:17 | 2019-02-07T06:16:12 | 72 | 0 | 0 | 0 | null | false | null | # -*- coding: utf-8 -*-
from seleniumbase import BaseCase
class UserRegistration(BaseCase):
def test_user_registration(self):
self.open('http://hackazon.webscantest.com/')
self.click("link=Sign In / Sign Up")
self.click('#username')
self.click("link=New user?")
self.click('#first_name')
self.update_text('#first_name', 'myFirstName')
self.update_text('#last_name', 'myLastName')
self.update_text('#username', 'myUserName1')
self.update_text('#email', 'abc@a.com')
self.update_text('#password', 'pass1234')
self.update_text('#password_confirmation', 'pass1234')
self.click("//input[@value='Register']")
| UTF-8 | Python | false | false | 708 | py | 13 | userregistration_SB.py | 5 | 0.612994 | 0.59887 | 0 | 17 | 40.411765 | 62 |
mizuno-group/enan | 13,460,427,524,877 | c538eb8ece6dd45d95dab67f71f7f14bfd6d2eda | 7e820b016306d13caeca4092f40fc0d179a12685 | /enan/data/data.py | e18c18a12ac3d97df9e53dee4d81aac0b58759a9 | [
"MIT"
]
| permissive | https://github.com/mizuno-group/enan | 821045cb87648a789d39c5e36feab1c0c769494c | 3c9dbe60bebf98e384e858db56980928b5897775 | refs/heads/master | 2023-01-24T20:27:43.966650 | 2020-12-05T07:29:39 | 2020-12-05T07:29:39 | 318,722,639 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 13:05:34 2019
Data class
Analyzer o-- DataControl *-- Data
@author: tadahaya
"""
import pandas as pd
import numpy as np
from .adjuster import *
__all__ = ["Data","SeqData","SetData","SetTSData","VectorData"]
# abstract class
class Data():
def __init__(self):
self.data = None
self.whole = set()
def set_data(self,data):
""" set data """
raise NotImplementedError
def set_whole(self,whole):
""" set whole """
self.whole = whole
def get_data(self):
""" get data """
return self.data
def get_whole(self):
""" get whole """
return self.whole
def adjust(self,**kwargs):
""" adjust data to the indicated whole """
raise NotImplementedError
# concrete class
class SeqData(Data):
""" data given as a set """
def __init__(self):
super().__init__()
self.data = {}
self.__adj = SeqAdjuster()
def set_data(self,data):
""" load data """
if type(data)!=set:
raise TypeError("!! data should be a set !!")
self.data = data
def adjust(self,**kwargs):
""" adjust data to the indicated whole """
self.data = self.__adj.adjust(self.data,self.whole,**kwargs)
# concrete class
class SetData(Data):
""" data given as a dict of {term:sets of group} """
def __init__(self):
super().__init__()
self.data = dict()
self.__adj = SetAdjuster() # private
def set_data(self,data):
""" load data """
if type(data)!=dict:
raise TypeError("!! data should be a dict !!")
self.data = data
def adjust(self,**kwargs):
""" adjust data to the indicated whole """
self.data = self.__adj.adjust(self.data,self.whole,**kwargs)
# concrete class
class SetTSData(Data):
""" data given as a dict of {term:tuples of up/down tags} """
def __init__(self):
super().__init__()
self.data = dict()
self.__adj = SetTSAdjuster() # private
def set_data(self,data):
""" load data """
if type(data)!=dict:
raise TypeError("!! data should be a dict !!")
elif type(list(data.values())[0])!=tuple:
raise TypeError("!! data should be a dict of tuples of up/down tags !!")
self.data = data
def adjust(self,**kwargs):
""" adjust data to the indicated whole """
self.data = self.__adj.adjust(self.data,self.whole,**kwargs)
# concrete class
class VectorData(Data):
""" data given as a dataframe """
def __init__(self):
super().__init__()
self.data = pd.DataFrame()
self.__adj = VectorAdjuster() # private
def set_data(self,data):
""" load data """
if type(data)==pd.core.series.Series:
self.data = pd.DataFrame(data)
elif type(data)==pd.core.frame.DataFrame:
self.data = data
else:
raise TypeError("!! data should be a dataframe !!")
def adjust(self,**kwargs):
""" adjust data to the indicated whole """
self.data = self.__adj.adjust(self.data,self.whole,**kwargs) | UTF-8 | Python | false | false | 3,335 | py | 20 | data.py | 18 | 0.533733 | 0.529535 | 0 | 123 | 25.130081 | 84 |
lallmanish90/coltSteelePythonCourse | 1,271,310,358,701 | b60af5a98bcd9855e47c382001043221e38cc8ba | b8921d9f1823d3a9e4e1142ee7ae5ea9b7729dae | /OOP/creating_classes.py | 69c267d77322dd06839400f5d82c57b3b4d1c694 | []
| no_license | https://github.com/lallmanish90/coltSteelePythonCourse | ef843073057ccc8d01c9df1c8533043e0e706375 | 204b89e489ad119eea98d749dbb821dab3177837 | refs/heads/master | 2023-04-25T01:41:29.501147 | 2021-05-04T04:00:24 | 2021-05-04T04:00:24 | 269,444,978 | 0 | 0 | null | true | 2020-06-04T19:17:54 | 2020-06-04T19:17:53 | 2020-06-04T15:51:37 | 2020-06-04T15:51:34 | 111 | 0 | 0 | 0 | null | false | false | """
Defining the simplest possible class
whenever creating a class , python
"""
# classes are singular with camel case
# self referse to the that current instance
# init will auto run when a class is called
class User:
def __init__(self, first, last, age):
#print("A new user has been made!")
self.first = first
self.last = last
self.age = age
user1 = User("Joe", "Flaco", 32)
user2 = User("Blanca", "Smith", 17)
print(user1.first)
print(user1.last)
print(user1.age)
print(user2.first)
print(user2.last)
print(user2.age)
| UTF-8 | Python | false | false | 564 | py | 125 | creating_classes.py | 122 | 0.666667 | 0.64539 | 0 | 28 | 19.142857 | 43 |
prateek5794/hackerrank | 14,147,622,304,791 | 05ba169f3593d5fff579a364e92e810541973793 | 3f493480bb6d1fde73db470598687d13e818f14f | /strings/anagram-English/anagram.py | f2401094cc5833d0d35420712cc35d6aa4c6e4bb | []
| no_license | https://github.com/prateek5794/hackerrank | b837b0971436778740512927631fa198820505bd | bc6e916f8b5986677d4e9ee5a3a22305818f84e0 | refs/heads/master | 2020-05-20T19:27:52.789177 | 2016-07-08T19:25:29 | 2016-07-08T19:25:29 | 59,769,610 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | t=int(input())
for _ in range(t):
s=input()
if len(s)%2==1:
print('-1')
continue
else:
x=int(len(s)/2)
for k in range(x):
s=s[:x]+s[x:].replace(s[k],'',1)
print(len(s[x:]))
| UTF-8 | Python | false | false | 237 | py | 80 | anagram.py | 78 | 0.413502 | 0.392405 | 0 | 11 | 20.545455 | 48 |
chakilamnaresh/pythonlearning | 16,080,357,593,314 | a5ce881eeff17d01ebcb538100f094ac2f7c04df | d017d6aeebac14dc4ec364a0892cd0e334071364 | /sum/com/naresh/fileOperations.py | c55cd0d29d0e6e396d8204b1929c5dcc4a64eedb | []
| no_license | https://github.com/chakilamnaresh/pythonlearning | f4d4d4a5e42262728963cb93e6f85ef682bae404 | 0c02e0ed34b4f08e4ebb30962d8b131ec2761154 | refs/heads/master | 2020-04-01T11:31:53.139956 | 2018-10-15T19:47:40 | 2018-10-15T19:47:40 | 153,166,453 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
with open('E:\states.json') as f:
data=json.load(f)
for state in data['states']:
del state['area_codes']
print(state)
with open('E:\states2.json','w') as f:
json.dump(data,f)
| UTF-8 | Python | false | false | 231 | py | 64 | fileOperations.py | 61 | 0.558442 | 0.554113 | 0 | 11 | 19.181818 | 43 |
AtlasWHHV/ATLASMCValidation | 12,000,138,673,268 | 7723b844195eb1b581fef7a98b76d4f0b3636fcc | 5f8482008795504e17db32c814586567d8ad8483 | /mcjo/MC12.191300.Pythia8_AU2MSTW2008LO_HV_ggH_mH300_mVPI100.py | a2b12a97f4e8b11054720df85151842a36b53662 | []
| no_license | https://github.com/AtlasWHHV/ATLASMCValidation | 4694cf096f39be3f393ec6f41656aca9e110fd1d | 3b33e86eadb577f00ff9d5201d4d7288b0499820 | refs/heads/master | 2020-12-31T07:11:49.959589 | 2017-02-01T01:53:58 | 2017-02-01T01:53:58 | 80,581,202 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ###############################################################
# Pythia8 H_v -> pi_v pi_v
# contact: Gordon Watts (gwatts@uw.edu)
#===============================================================
evgenConfig.description = "Higgs (mH300) to pi_v (mVPI100) to displaced heavy fermions"
evgenConfig.keywords = ["exotic", "hiddenValley", "longLived"]
evgenConfig.contact = ["gwatts@uw.edu"]
# Specify MSTW2008LO PDF
include("MC12JobOptions/Pythia8_AU2_MSTW2008LO_Common.py")
topAlg.Pythia8.Commands += ["ParticleDecays:limitTau0 = off"] # Allow long-lived particles to decay
topAlg.Pythia8.Commands += ["35:name = H_v"] # Set H_v name
topAlg.Pythia8.Commands += ["36:name = pi_v"] # Set pi_v name
topAlg.Pythia8.Commands += ["Higgs:useBSM = on"] # Turn on BSM Higgses
topAlg.Pythia8.Commands += ["HiggsBSM:gg2H2 = on"] # Turn on gg->H_v production
topAlg.Pythia8.Commands += ["35:onMode = off"] # Turn off all H_v decays
topAlg.Pythia8.Commands += ["35:onIfAll = 36 36"] # Turn back on H_v->pi_vpi_v
topAlg.Pythia8.Commands += ["35:m0 = 300"] # Set H_v mass
topAlg.Pythia8.Commands += ["36:m0 = 100"] # Set pi_v mass
topAlg.Pythia8.Commands += ["36:tau0 = 1330"] # Set pi_v lifetime
# Turn off checks for displaced vertices. Include the filter
# to make sure it is appended and we can access it (the code
# won't include it again).
include("EvgenJobTransforms/Generate_TestHepMC.py")
topAlg.TestHepMC.MaxVtxDisp = 1000*1000 #In mm
topAlg.TestHepMC.MaxTransVtxDisp = 1000*1000
| UTF-8 | Python | false | false | 1,523 | py | 8 | MC12.191300.Pythia8_AU2MSTW2008LO_HV_ggH_mH300_mVPI100.py | 6 | 0.636901 | 0.58503 | 0 | 33 | 45.151515 | 99 |
jackey-qiu/DaFy_P23 | 5,059,471,499,037 | 58478c25216f39bcbed2eb92bafbb1d821072b01 | cb6e5c8a91dce5911afbbbb7a8a4b55bc0c7687e | /scripts/SuperRod/superrod_GUI_pyqtgraph.py | c3e05a7192da23928273300240712403b6111f66 | []
| no_license | https://github.com/jackey-qiu/DaFy_P23 | 3870d4e436b0e9df7f1dcb747caaf38589274f92 | ad2ca8e16e92935233e84c2d9fe2b59f4f114444 | refs/heads/master | 2022-04-10T18:32:24.392046 | 2020-03-22T18:22:46 | 2020-03-22T18:22:46 | 198,180,139 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys,os, qdarkstyle
import traceback
from io import StringIO
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox
from PyQt5 import uic
import random
import numpy as np
import pandas as pd
import types,copy
import matplotlib.pyplot as plt
try:
from . import locate_path
except:
import locate_path
script_path = locate_path.module_path_locator()
DaFy_path = os.path.dirname(os.path.dirname(script_path))
sys.path.append(DaFy_path)
sys.path.append(os.path.join(DaFy_path,'dump_files'))
sys.path.append(os.path.join(DaFy_path,'EnginePool'))
sys.path.append(os.path.join(DaFy_path,'FilterPool'))
sys.path.append(os.path.join(DaFy_path,'util'))
sys.path.append(os.path.join(DaFy_path,'scripts'))
from UtilityFunctions import locate_tag
from UtilityFunctions import apply_modification_of_code_block as script_block_modifier
from models.structure_tools.sxrd_dafy import AtomGroup
from models.utils import UserVars
import diffev
from fom_funcs import *
import parameters
import data_superrod as data
import model
import solvergui
import time
import matplotlib
matplotlib.use("Qt5Agg")
import pyqtgraph as pg
import pyqtgraph.exporters
from PyQt5 import QtCore
from PyQt5.QtWidgets import QCheckBox, QRadioButton, QTableWidgetItem, QHeaderView, QAbstractItemView, QInputDialog
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QTransform, QFont, QBrush, QColor, QIcon
from pyqtgraph.Qt import QtGui
import syntax_pars
from models.structure_tools import sorbate_tool
# from chemlab.graphics.renderers import AtomRenderer
# from chemlab.db import ChemlabDB
#from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)
class RunFit(QtCore.QObject):
updateplot = QtCore.pyqtSignal(str,object)
def __init__(self,solver):
super(RunFit, self).__init__()
self.solver = solver
self.running = False
def run(self):
self.running = True
self.solver.optimizer.stop = False
self.solver.StartFit(self.updateplot)
def stop(self):
self.running = False
self.solver.optimizer.stop = True
class MyMainWindow(QMainWindow):
def __init__(self, parent = None):
super(MyMainWindow, self).__init__(parent)
pg.setConfigOptions(imageAxisOrder='row-major', background = 'k')
pg.mkQApp()
uic.loadUi(os.path.join(DaFy_path,'scripts','SuperRod','superrod3.ui'),self)
self.setWindowTitle('Data analysis factory: CTR data modeling')
icon = QIcon(os.path.join(script_path,"DAFY.png"))
self.setWindowIcon(icon)
self.comboBox_all_motif.insertItems(0, sorbate_tool.ALL_MOTIF_COLLECTION)
# self.show()
self.stop = False
self.show_checkBox_list = []
self.domain_tag = 1
self.data_profiles = []
#set fom_func
#self.fom_func = chi2bars_2
#parameters
#self.parameters = parameters.Parameters()
#scripts
#self.script = ''
#script module
#self.script_module = types.ModuleType('genx_script_module')
self.model = model.Model()
# self.solver = solvergui.SolverController(self)
self.run_fit = RunFit(solvergui.SolverController(self.model))
self.fit_thread = QtCore.QThread()
# self.structure_view_thread = QtCore.QThread()
# self.widget_edp.moveToThread(self.structure_view_thread)
self.run_fit.moveToThread(self.fit_thread)
#self.run_fit.updateplot.connect(self.update_plot_data_view_upon_simulation)
self.run_fit.updateplot.connect(self.update_par_during_fit)
self.run_fit.updateplot.connect(self.update_status)
#self.run_fit.updateplot.connect(self.update_structure_view)
# self.run_fit.updateplot.connect(self.start_timer_structure_view)
self.fit_thread.started.connect(self.run_fit.run)
#tool bar buttons to operate modeling
self.actionNew.triggered.connect(self.init_new_model)
self.actionOpen.triggered.connect(self.open_model)
self.actionSave.triggered.connect(self.save_model)
self.actionSimulate.triggered.connect(self.simulate_model)
self.actionRun.triggered.connect(self.run_model)
self.actionStop.triggered.connect(self.stop_model)
self.actionCalculate.triggered.connect(self.calculate_error_bars)
#pushbuttons for data handeling
self.pushButton_load_data.clicked.connect(self.load_data_ctr)
self.pushButton_append_data.clicked.connect(self.append_data)
self.pushButton_delete_data.clicked.connect(self.delete_data)
self.pushButton_save_data.clicked.connect(self.save_data)
# self.pushButton_calculate.clicked.connect(self.calculate)
self.pushButton_update_mask.clicked.connect(self.update_mask_info_in_data)
#pushbuttons for structure view
self.pushButton_azimuth_0.clicked.connect(self.azimuth_0)
self.pushButton_azimuth_90.clicked.connect(self.azimuth_90)
self.pushButton_elevation_0.clicked.connect(self.elevation_0)
self.pushButton_elevation_90.clicked.connect(self.elevation_90)
self.pushButton_parallel.clicked.connect(self.parallel_projection)
self.pushButton_projective.clicked.connect(self.projective_projection)
#spinBox to save the domain_tag
self.spinBox_domain.valueChanged.connect(self.update_domain_index)
#pushbutton for changing plotting style
# self.pushButton_toggle_bkg_color.clicked.connect(self.change_plot_style)
#pushbutton to load/save script
self.pushButton_load_script.clicked.connect(self.load_script)
self.pushButton_save_script.clicked.connect(self.save_script)
self.pushButton_modify_script.clicked.connect(self.modify_script)
#pushbutton to load/save parameter file
self.pushButton_load_table.clicked.connect(self.load_par)
self.pushButton_save_table.clicked.connect(self.save_par)
self.pushButton_remove_rows.clicked.connect(self.remove_selected_rows)
self.pushButton_add_one_row.clicked.connect(self.append_one_row)
self.pushButton_update_plot.clicked.connect(self.update_structure_view)
self.pushButton_update_plot.clicked.connect(self.update_plot_data_view_upon_simulation)
self.pushButton_update_plot.clicked.connect(self.update_par_bar_during_fit)
self.pushButton_add_par_set.clicked.connect(self.append_par_set)
#select dataset in the viewer
self.comboBox_dataset.activated.connect(self.update_data_view)
#syntax highlight
self.plainTextEdit_script.setStyleSheet("""QPlainTextEdit{
font-family:'Consolas';
font-size:14pt;
color: #ccc;
background-color: #2b2b2b;}""")
self.plainTextEdit_script.setTabStopWidth(self.plainTextEdit_script.fontMetrics().width(' ')*4)
#self.data = data.DataList()
#table view for parameters set to selecting row basis
# self.tableWidget_pars.itemChanged.connect(self.update_par_upon_change)
self.tableWidget_pars.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tableWidget_data.setSelectionBehavior(QAbstractItemView.SelectRows)
self.timer_save_data = QtCore.QTimer(self)
self.timer_save_data.timeout.connect(self.save_model)
self.timer_update_structure = QtCore.QTimer(self)
self.timer_update_structure.timeout.connect(self.pushButton_update_plot.click)
self.setup_plot()
def update_domain_index(self):
self.domain_tag = int(self.spinBox_domain.text())
if self.model.compiled:
self.widget_edp.items = []
self.widget_msv_top.items = []
self.init_structure_view()
else:
pass
def parallel_projection(self):
self.widget_edp.opts['distance'] = 2000
self.widget_edp.opts['fov'] = 1
self.widget_msv_top.opts['distance'] = 2000
self.widget_msv_top.opts['fov'] = 1
self.update_structure_view()
def projective_projection(self):
self.widget_edp.opts['distance'] = 25
self.widget_edp.opts['fov'] = 60
self.widget_msv_top.opts['distance'] = 25
self.widget_msv_top.opts['fov'] = 60
self.update_structure_view()
def update_camera_position(self,widget_name = 'widget_edp', angle_type="azimuth", angle=0):
#getattr(self,widget_name)
getattr(self,widget_name).setCameraPosition(pos=None, distance=None, \
elevation=[None,angle][int(angle_type=="elevation")], \
azimuth=[None,angle][int(angle_type=="azimuth")])
def azimuth_0(self):
self.update_camera_position(angle_type="azimuth", angle=0)
def azimuth_90(self):
self.update_camera_position(angle_type="azimuth", angle=90)
def elevation_0(self):
self.update_camera_position(angle_type="elevation", angle=0)
def elevation_90(self):
self.update_camera_position(angle_type="elevation", angle=90)
#do this after model is loaded, so that you know len(data)
def update_plot_dimension(self, columns = 2):
self.widget_data.clear()
self.widget_data.ci.currentRow = 0
self.widget_data.ci.currentCol = 0
self.data_profiles = []
total_datasets = len(self.model.data)
#current list of ax handle
# ax_list_now = list(range(len(self.data_profiles)))
for i in range(total_datasets):
# if i not in ax_list_now:
if 1:
hk_label = '{}{}L'.format(str(int(self.model.data[i].extra_data['h'][0])),str(int(self.model.data[i].extra_data['k'][0])))
if (i%columns)==0 and (i!=0):
self.widget_data.nextRow()
self.data_profiles.append(self.widget_data.addPlot(title=hk_label))
else:
self.data_profiles.append(self.widget_data.addPlot(title=hk_label))
def setup_plot(self):
self.fom_evolution_profile = self.widget_fom.addPlot()
self.par_profile = self.widget_pars.addPlot()
self.fom_scan_profile = self.widget_fom_scan.addPlot()
def update_data_check_attr(self):
re_simulate = False
for i in range(len(self.model.data)):
self.model.data[i].show = self.tableWidget_data.cellWidget(i,1).isChecked()
self.model.data[i].use_error = self.tableWidget_data.cellWidget(i,3).isChecked()
if self.model.data[i].use!=self.tableWidget_data.cellWidget(i,2).isChecked():
re_simulate = True
self.model.data[i].use = self.tableWidget_data.cellWidget(i,2).isChecked()
if re_simulate:
self.simulate_model()
def update_plot_data_view(self):
if self.model.compiled:
self.update_data_check_attr()
self.update_plot_data_view_upon_simulation()
else:
# plot_data_index = []
for i in range(len(self.model.data)):
# if self.tableWidget_data.cellWidget(i,1).isChecked():
fmt = self.tableWidget_data.item(i,4).text()
fmt_symbol = list(fmt.rstrip().rsplit(';')[0].rsplit(':')[1])
# self.selected_data_profile.plot(self.model.data[i].x, self.model.data[i].y,pen = None, symbolBrush=fmt_symbol[1], symbolSize=int(fmt_symbol[0]),symbolPen=fmt_symbol[2], clear = (len(plot_data_index) == 0))
self.data_profiles[i].plot(self.model.data[i].x, self.model.data[i].y,pen = None, symbolBrush=fmt_symbol[1], symbolSize=int(fmt_symbol[0]),symbolPen=fmt_symbol[2], clear = True)
#plot_data_index.append(i)
[each.setLogMode(x=False,y=True) for each in self.data_profiles]
[each.autoRange() for each in self.data_profiles]
#self.selected_data_profile.autoRange()
def update_plot_data_view_upon_simulation(self):
for i in range(len(self.model.data)):
# if hasattr(self.model.data[i],'mask'):
# mask_index = (self.model.data[i].mask==True)
# else:
# mask_index = np.array([True]*len(self.model.data[i].x))
# if self.tableWidget_data.cellWidget(i,1).isChecked():
if 1:
fmt = self.tableWidget_data.item(i,4).text()
fmt_symbol = list(fmt.rstrip().rsplit(';')[0].rsplit(':')[1])
line_symbol = list(fmt.rstrip().rsplit(';')[1].rsplit(':')[1])
# self.selected_data_profile.plot(self.data[i].x, self.data[i].y, clear = True)
# self.selected_data_profile.plot(self.model.data[i].x, self.model.data[i].y,pen={'color': 'y', 'width': 0}, symbolBrush=(255,0,0), symbolSize=5,symbolPen='w', clear = (len(plot_data_index) == 0))
self.data_profiles[i].plot(self.model.data[i].x, self.model.data[i].y,pen = None, symbolBrush=fmt_symbol[1], symbolSize=int(fmt_symbol[0]),symbolPen=fmt_symbol[2], clear = True)
if self.tableWidget_data.cellWidget(i,2).isChecked():
self.data_profiles[i].plot(self.model.data[i].x, self.model.data[i].y_sim,pen={'color': line_symbol[1], 'width': int(line_symbol[0])}, clear = False)
else:
pass
# plot_data_index.append(i)
[each.setLogMode(x=False,y=True) for each in self.data_profiles]
[each.autoRange() for each in self.data_profiles]
# self.selected_data_profile.setLogMode(x=False,y=True)
# self.selected_data_profile.autoRange()
fom_log = np.array(self.run_fit.solver.optimizer.fom_log)
#print(fom_log)
self.fom_evolution_profile.plot(fom_log[:,0],fom_log[:,1],pen={'color': 'r', 'width': 2}, clear = True)
self.fom_evolution_profile.autoRange()
def update_par_bar_during_fit(self):
if self.run_fit.running:
par_max = self.run_fit.solver.optimizer.par_max
par_min = self.run_fit.solver.optimizer.par_min
vec_best = copy.deepcopy(self.run_fit.solver.optimizer.best_vec)
vec_best = (vec_best-par_min)/(par_max-par_min)
pop_vec = np.array(copy.deepcopy(self.run_fit.solver.optimizer.pop_vec))
trial_vec_min =[]
trial_vec_max =[]
for i in range(len(par_max)):
trial_vec_min.append((np.min(pop_vec[:,i])-par_min[i])/(par_max[i]-par_min[i]))
trial_vec_max.append((np.max(pop_vec[:,i])-par_min[i])/(par_max[i]-par_min[i]))
trial_vec_min = np.array(trial_vec_min)
trial_vec_max = np.array(trial_vec_max)
bg = pg.BarGraphItem(x=range(len(vec_best)), y=(trial_vec_max + trial_vec_min)/2, height=(trial_vec_max - trial_vec_min)/2, brush='b', width = 0.8)
# best_ = pg.ScatterPlotItem(size=10, pen=(200,200,200), brush=pg.mkBrush(255, 255, 255, 120))
# best_.addPoints([{'pos':range(len(vec_best)),'data':vec_best}])
# print(trial_vec_min)
# print(trial_vec_max)
# print(par_min)
# print(par_max)
self.par_profile.clear()
self.par_profile.addItem(bg)
# self.par_profile.addItem(best_)
# p1 = self.par_profile.addPlot()
self.par_profile.plot(vec_best, pen=(0,0,0), symbolBrush=(255,0,0), symbolPen='w')
else:
pass
def calculate_error_bars(self):
try:
error_bars = self.run_fit.solver.CalcErrorBars()
total_num_par = len(self.model.parameters.data)
index_list = [i for i in range(total_num_par) if self.model.parameters.data[i][2]]
for i in range(len(error_bars)):
self.model.parameters.data[index_list[i]][-1] = error_bars[i]
self.update_par_upon_load()
except diffev.ErrorBarError as e:
_ = QMessageBox.question(self, 'Runtime error message', str(e), QMessageBox.Ok)
def init_new_model(self):
reply = QMessageBox.question(self, 'Message', 'Would you like to save the current model first?', QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.save_model()
self.model = model.Model()
self.run_fit.solver.model = self.model
self.tableWidget_data.setRowCount(0)
self.tableWidget_pars.setRowCount(0)
self.plainTextEdit_script.setPlainText('')
self.comboBox_dataset.clear()
self.tabelWidget_data_view.setRowCount(0)
self.update_plot_data_view()
def open_model(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","rod file (*.rod);;zip Files (*.rar)", options=options)
load_add_ = 'success'
if fileName:
self.setWindowTitle('Data analysis factory: CTR data modeling-->{}'.format(fileName))
self.model.load(fileName)
self.update_plot_dimension()
try:
self.load_addition()
except:
load_add_ = 'failure'
#add a mask attribute to each dataset
for each in self.model.data_original:
if not hasattr(each,'mask'):
each.mask = np.array([True]*len(each.x))
for each in self.model.data:
if not hasattr(each,'mask'):
each.mask = np.array([True]*len(each.x))
#add model space to terminal
self.widget_terminal.update_name_space("model",self.model)
self.widget_terminal.update_name_space("solver",self.run_fit.solver)
self.widget_terminal.update_name_space("win",self)
#remove items in the msv and re-initialize it
self.widget_edp.items = []
self.widget_msv_top.items = []
#update other pars
self.update_table_widget_data()
self.update_combo_box_dataset()
self.update_plot_data_view()
self.update_par_upon_load()
self.update_script_upon_load()
# self.init_structure_view()
#model is simulated at the end of next step
self.init_mask_info_in_data_upon_loading_model()
#add name space for cal bond distance after simulation
self.widget_terminal.update_name_space("report_distance",self.model.script_module.sample.inter_atom_distance_report)
#now set the comboBox for par set
self.update_combo_box_list_par_set()
self.statusbar.clearMessage()
self.statusbar.showMessage("Model is loaded, and {} in config loading".format(load_add_))
# self.update_mask_info_in_data()
def update_combo_box_list_par_set(self):
attrs = self.model.script_module.__dir__()
attr_wanted = [each for each in attrs if type(getattr(self.model.script_module, each)) in [AtomGroup, UserVars]]
num_items = self.comboBox_register_par_set.count()
for i in range(num_items):
self.comboBox_register_par_set.removeItem(0)
self.comboBox_register_par_set.insertItems(0,attr_wanted)
def append_par_set(self):
par_selected = self.comboBox_register_par_set.currentText()
#attrs = getattr(self.model.script_module, par_selected)
attrs = eval("self.model.script_module.{}.__dir__()".format(par_selected))
attrs_wanted = [each for each in attrs if each.startswith("set")]
rows = self.tableWidget_pars.selectionModel().selectedRows()
if len(rows) == 0:
row_index = self.tableWidget_pars.rowCount()
else:
row_index = rows[-1].row()
for ii in range(len(attrs_wanted)):
self.tableWidget_pars.insertRow(row_index)
current_value = eval("self.model.script_module."+par_selected+'.'+attrs_wanted[ii].replace('set','get')+"()")
for i in range(6):
if i==2:
check_box = QCheckBox()
check_box.setChecked(False)
self.tableWidget_pars.setCellWidget(row_index,2,check_box)
else:
if i == 0:
qtablewidget = QTableWidgetItem(".".join([par_selected,attrs_wanted[ii]]))
qtablewidget.setFont(QFont('Times',10,QFont.Bold))
elif i in [1]:
qtablewidget = QTableWidgetItem(str(round(current_value,4)))
qtablewidget.setForeground(QBrush(QColor(255,0,255)))
elif i ==5:
qtablewidget = QTableWidgetItem('(0,0)')
elif i ==3:
qtablewidget = QTableWidgetItem(str(round(current_value*0.5,4)))
elif i ==4:
qtablewidget = QTableWidgetItem(str(round(current_value*1.5,4)))
self.tableWidget_pars.setItem(row_index,i,qtablewidget)
self.update_model_parameter()
def save_model(self):
path, _ = QFileDialog.getSaveFileName(self, "Save file", "", "rod file (*.rod);;zip files (*.rar)")
if path:
self.model.script = (self.plainTextEdit_script.toPlainText())
self.model.save(path)
save_add_ = 'success'
try:
self.save_addition()
except:
save_add_ = "failure"
self.statusbar.clearMessage()
self.statusbar.showMessage("Model is saved, and {} in config saving".format(save_add_))
#here save also the config pars for diffev solver
def save_addition(self):
values=\
[self.widget_solver.par.param('Diff.Ev.').param('k_m').value(),
self.widget_solver.par.param('Diff.Ev.').param('k_r').value(),
self.widget_solver.par.param('Diff.Ev.').param('Method').value(),
self.widget_solver.par.param('FOM').param('Figure of merit').value(),
self.widget_solver.par.param('FOM').param('Auto save, interval').value(),
self.widget_solver.par.param('Fitting').param('start guess').value(),
self.widget_solver.par.param('Fitting').param('Generation size').value(),
self.widget_solver.par.param('Fitting').param('Population size').value()]
pars = ['k_m','k_r','Method','Figure of merit','Auto save, interval','start guess','Generation size','Population size']
for i in range(len(pars)):
self.model.save_addition(pars[i],str(values[i]))
def load_addition(self):
funcs=\
[self.widget_solver.par.param('Diff.Ev.').param('k_m').setValue,
self.widget_solver.par.param('Diff.Ev.').param('k_r').setValue,
self.widget_solver.par.param('Diff.Ev.').param('Method').setValue,
self.widget_solver.par.param('FOM').param('Figure of merit').setValue,
self.widget_solver.par.param('FOM').param('Auto save, interval').setValue,
self.widget_solver.par.param('Fitting').param('start guess').setValue,
self.widget_solver.par.param('Fitting').param('Generation size').setValue,
self.widget_solver.par.param('Fitting').param('Population size').setValue]
types= [float,float,str,str,int,bool,int,int]
pars = ['k_m','k_r','Method','Figure of merit','Auto save, interval','start guess','Generation size','Population size']
for i in range(len(pars)):
type_ = types[i]
if type_ == float:
value = np.round(float(self.model.load_addition(pars[i])),2)
elif type_==str:
value = self.model.load_addition(pars[i]).decode("utf-8")
else:
value = type_(self.model.load_addition(pars[i]))
funcs[i](value)
def simulate_model(self):
self.update_par_upon_change()
self.model.script = (self.plainTextEdit_script.toPlainText())
self.widget_solver.update_parameter_in_solver(self)
try:
self.model.simulate()
self.label_2.setText('FOM {}:{}'.format(self.model.fom_func.__name__,self.model.fom))
self.update_plot_data_view_upon_simulation()
self.init_structure_view()
self.statusbar.clearMessage()
self.update_combo_box_list_par_set()
self.statusbar.showMessage("Model is simulated successfully!")
except model.ModelError as e:
_ = QMessageBox.question(self, 'Runtime error message', str(e), QMessageBox.Ok)
#print("test error message!")
#print(str(e))
'''
self.compile_script()
# self.update_pars()
(funcs, vals) = self.get_sim_pars()
# Set the parameter values in the model
#[func(val) for func,val in zip(funcs, vals)]
i = 0
for func, val in zip(funcs,vals):
try:
func(val)
except Exception as e:
(sfuncs_tmp, vals_tmp) = self.parameters.get_sim_pars()
raise ParameterError(sfuncs_tmp[i], i, str(e), 1)
i += 1
self.evaluate_sim_func()
'''
#print(self.widget_solver.par.param("Fitting").param("start guess").value())
#print(self.widget_solver.par.param("Fitting").param("Population size").value())
def run_model(self):
# self.solver.StartFit()
# self.start_timer_structure_view()
# self.structure_view_thread.start()
#button will be clicked every 2 second to update figures
self.timer_update_structure.start(2000)
self.widget_solver.update_parameter_in_solver(self)
self.fit_thread.start()
def stop_model(self):
self.run_fit.stop()
self.fit_thread.terminate()
self.timer_update_structure.stop()
# self.stop_timer_structure_view()
def load_data(self, loader = 'ctr'):
self._empty_data_pool()
exec('self.load_data_{}()'.format(loader))
def append_data(self):
self.load_data_ctr()
def _empty_data_pool(self):
#now empty the data pool
self.model.data.items = [data.DataSet(name='Data 0')]
self.model.data._counter = 1
def load_data_ctr(self):
#8 columns in total
#X, H, K, Y, I, eI, LB, dL
#for CTR data, X column is L column, Y column all 0
#for RAXR data, X column is energy column, Y column is L column
# self.data = data.DataList()
#self.model.compiled = False
self.model.compiled = False
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","csv Files (*.csv);;data Files (*.dat);txt Files (*.txt)", options=options)
current_data_set_name = [self.tableWidget_data.item(i,0).text() for i in range(self.tableWidget_data.rowCount())]
if fileName:
with open(fileName,'r') as f:
data_loaded = np.loadtxt(f,comments = '#',delimiter=None)
data_loaded_pd = pd.DataFrame(data_loaded, columns = ['X','h','k','Y','I','eI','LB','dL'])
data_loaded_pd['h'] = data_loaded_pd['h'].apply(lambda x:int(np.round(x)))
data_loaded_pd['k'] = data_loaded_pd['k'].apply(lambda x:int(np.round(x)))
data_loaded_pd.sort_values(by = ['h','k'], inplace = True)
# print(data_loaded_pd)
hk_unique = list(set(zip(list(data_loaded_pd['h']), list(data_loaded_pd['k']))))
hk_unique.sort()
h_unique = [each[0] for each in hk_unique]
k_unique = [each[1] for each in hk_unique]
for i in range(len(h_unique)):
h_temp, k_temp = h_unique[i], k_unique[i]
name = 'Data-{}{}L'.format(h_temp, k_temp)
tag = sum([int(name in each) for each in current_data_set_name])+1
#if name in current_data_set_name:
name = name + '_{}'.format(tag)
self.model.data.add_new(name = name)
self.model.data.items[-1].x = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['X'].to_numpy()
self.model.data.items[-1].y = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['I'].to_numpy()
self.model.data.items[-1].error = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['eI'].to_numpy()
self.model.data.items[-1].x_raw = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['X'].to_numpy()
self.model.data.items[-1].y_raw = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['I'].to_numpy()
self.model.data.items[-1].error_raw = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['eI'].to_numpy()
self.model.data.items[-1].set_extra_data(name = 'h', value = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['h'].to_numpy())
self.model.data.items[-1].set_extra_data(name = 'k', value = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['k'].to_numpy())
self.model.data.items[-1].set_extra_data(name = 'Y', value = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['Y'].to_numpy())
self.model.data.items[-1].set_extra_data(name = 'LB', value = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['LB'].to_numpy())
self.model.data.items[-1].set_extra_data(name = 'dL', value = data_loaded_pd[(data_loaded_pd['h']==h_temp) & (data_loaded_pd['k']==k_temp)]['dL'].to_numpy())
self.model.data.items[-1].mask = np.array([True]*len(self.model.data.items[-1].x))
#now remove the empty datasets
empty_data_index = []
i=0
for each in self.model.data.items:
if len(each.x_raw) == 0:
empty_data_index.append(i)
i += 1
for i in range(len(empty_data_index)):
self.model.data.delete_item(empty_data_index[i])
for ii in range(len(empty_data_index)):
if empty_data_index[ii]>empty_data_index[i]:
empty_data_index[ii] = empty_data_index[ii]-1
else:
pass
self.model.data_original = copy.deepcopy(self.model.data)
#update script_module
#self.model.script_module.__dict__['data'] = self.data
#update the view
self.update_table_widget_data()
self.update_combo_box_dataset()
self.update_plot_dimension()
self.update_plot_data_view()
def delete_data(self):
self.model.compiled = False
# Delete the selected mytable lines
row_index = [each.row() for each in self.tableWidget_data.selectionModel().selectedRows()]
row_index = sorted(row_index, reverse=True)
for each in row_index:
self.model.data.delete_item(each)
#self._deleteRows(self.tableWidget_data.selectionModel().selectedRows(), self.tableWidget_data)
self.update_table_widget_data()
self.update_combo_box_dataset()
self.update_plot_dimension()
self.update_plot_data_view()
def update_table_widget_data(self):
self.tableWidget_data.clear()
self.tableWidget_data.setRowCount(len(self.model.data))
self.tableWidget_data.setColumnCount(5)
self.tableWidget_data.setHorizontalHeaderLabels(['DataID','Show','Use','Errors','fmt'])
# self.tableWidget_pars.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
for i in range(len(self.model.data)):
current_data = self.model.data[i]
name = current_data.name
for j in range(5):
if j == 0:
qtablewidget = QTableWidgetItem(name)
self.tableWidget_data.setItem(i,j,qtablewidget)
elif j == 4:
qtablewidget = QTableWidgetItem('sym:6yr;l:2g')
self.tableWidget_data.setItem(i,j,qtablewidget)
else:
#note j=1 to j=3 corresponds to data.show, data.use, data.use_error
check = getattr(current_data, ['show', 'use', 'use_error'][j-1])
check_box = QCheckBox()
#self.show_checkBox_list.append(check_box)
check_box.setChecked(check)
check_box.stateChanged.connect(self.update_plot_data_view)
self.tableWidget_data.setCellWidget(i,j,check_box)
def update_combo_box_dataset(self):
new_items = [each.name for each in self.model.data]
self.comboBox_dataset.clear()
self.comboBox_dataset.addItems(new_items)
def update_data_view(self):
dataset_name = self.comboBox_dataset.currentText()
dataset = None
for each in self.model.data_original:
if each.name == dataset_name:
dataset = each
break
else:
pass
column_labels_main = ['x','y','error','mask']
extra_labels = ['h', 'k', 'dL', 'LB']
all_labels = ['x','y','error','h','k','dL','LB','mask']
self.tableWidget_data_view.setRowCount(len(dataset.x))
self.tableWidget_data_view.setColumnCount(len(all_labels))
self.tableWidget_data_view.setHorizontalHeaderLabels(all_labels)
for i in range(len(dataset.x)):
for j in range(len(all_labels)):
if all_labels[j] in column_labels_main:
# print(getattr(dataset,'x')[i])
# qtablewidget = QTableWidgetItem(str(round(getattr(dataset,all_labels[j])[i],4)))
item_ = getattr(dataset,all_labels[j])[i]
if all_labels[j] == 'mask':
qtablewidget = QTableWidgetItem(str(item_))
else:
qtablewidget = QTableWidgetItem(str(round(item_,4)))
elif all_labels[j] in extra_labels:
qtablewidget = QTableWidgetItem(str(dataset.get_extra_data(all_labels[j])[i]))
else:
qtablewidget = QTableWidgetItem('True')
self.tableWidget_data_view.setItem(i,j,qtablewidget)
def update_mask_info_in_data(self):
dataset_name = self.comboBox_dataset.currentText()
dataset = None
for each in self.model.data_original:
if each.name == dataset_name:
dataset = each
break
else:
pass
for i in range(len(dataset.x)):
dataset.mask[i] = (self.tableWidget_data_view.item(i,7).text() == 'True')
self.model.data = copy.deepcopy(self.model.data_original)
[each.apply_mask() for each in self.model.data]
self.simulate_model()
def init_mask_info_in_data_upon_loading_model(self):
self.model.data = copy.deepcopy(self.model.data_original)
[each.apply_mask() for each in self.model.data]
self.simulate_model()
def init_structure_view(self):
domain_tag = int(self.spinBox_domain.text())
size_domain = len(self.model.script_module.sample.domain)
if size_domain<(1+domain_tag):
domain_tag = size_domain -1
else:
pass
# print(domain_tag)
xyz = self.model.script_module.sample.extract_xyz(domain_tag)
self.widget_edp.show_structure(xyz)
self.update_camera_position(widget_name = 'widget_edp', angle_type="azimuth", angle=0)
self.update_camera_position(widget_name = 'widget_edp', angle_type = 'elevation', angle = 0)
xyz,bond_index = self.model.script_module.sample.extract_xyz_top(domain_tag)
self.widget_msv_top.show_structure(xyz,bond_index)
self.update_camera_position(widget_name = 'widget_msv_top', angle_type="azimuth", angle=0)
self.update_camera_position(widget_name = 'widget_msv_top', angle_type = 'elevation', angle = 90)
def update_structure_view(self):
try:
domain_tag = int(self.spinBox_domain.text())
size_domain = len(self.model.script_module.sample.domain)
if size_domain<(1+domain_tag):
domain_tag = size_domain -1
else:
pass
# print(size_domain,domain_tag)
xyz = self.model.script_module.sample.extract_xyz(domain_tag)
self.widget_edp.update_structure(xyz)
xyz, bond_index = self.model.script_module.sample.extract_xyz_top(domain_tag)
self.widget_msv_top.update_structure(xyz, bond_index)
except Exception as e:
outp = StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
_ = QMessageBox.question(self, "",'Runtime error message:\n{}'.format(str(val)), QMessageBox.Ok)
def start_timer_structure_view(self):
self.timer_update_structure.start(2000)
def stop_timer_structure_view(self):
self.timer_update_structure.stop()
#save data plus best fit profile
def save_data(self):
#potential = input('The potential corresponding to this dataset is:')
potential, done = QInputDialog.getDouble(self, 'Potential_info', 'Enter the potential for this dataset (in V):')
if not done:
potential = None
path, _ = QFileDialog.getSaveFileName(self, "Save file", "", "model file (*.*)")
if path!="":
keys_attri = ['x','y','y_sim','error']
keys_extra = ['h','k']
lib_map = {'x': 'L', 'y':'I','y_sim':'I_model','error':'error','h':'H','k':'K'}
export_data = {}
for key in ['x','h','k','y','y_sim','error']:
export_data[lib_map[key]] = []
export_data['use'] = []
export_data['I_bulk'] = []
export_data['potential'] = []
for each in self.model.data:
if each.use:
for key in ['x','h','k','y','y_sim','error']:
if key in keys_attri:
export_data[lib_map[key]] = np.append(export_data[lib_map[key]], getattr(each,key))
elif key in keys_extra:
export_data[lib_map[key]] = np.append(export_data[lib_map[key]], each.extra_data[key])
export_data['use'] = np.append(export_data['use'],[True]*len(each.x))
else:
for key in ['x','h','k','y','y_sim','error']:
if key in keys_attri:
if key=='y_sim':
export_data[lib_map[key]] = np.append(export_data[lib_map[key]], [0]*len(getattr(each,'x')))
else:
export_data[lib_map[key]] = np.append(export_data[lib_map[key]], getattr(each,key))
elif key in keys_extra:
export_data[lib_map[key]] = np.append(export_data[lib_map[key]], each.extra_data[key])
export_data['use'] = np.append(export_data['use'],[False]*len(each.x))
export_data['potential'] = np.append(export_data['potential'],[float(potential)]*len(each.x))
beta = self.model.script_module.rgh.beta
#rough = (1-beta)/((1-beta)**2 + 4*beta*np.sin(np.pi*(each.x-each.extra_data['LB'])/each.extra_data['dL'])**2)**0.5
rough = 1
export_data['I_bulk'] = np.append(export_data['I_bulk'],rough**2*np.array(self.model.script_module.sample.calc_f_ideal(each.extra_data['h'], each.extra_data['k'], each.x)**2))
df_export_data = pd.DataFrame(export_data)
writer_temp = pd.ExcelWriter([path+'.xlsx',path][int(path.endswith('.xlsx'))])
df_export_data.to_excel(writer_temp, columns =['potential']+[lib_map[each_] for each_ in ['x','h','k','y','y_sim','error']]+['I_bulk','use'])
writer_temp.save()
#self.writer = pd.ExcelWriter([path+'.xlsx',path][int(path.endswith('.xlsx'))],engine = 'openpyxl',mode ='a')
#not implemented!
def change_plot_style(self):
if self.background_color == 'w':
self.widget_data.getViewBox().setBackgroundColor('k')
self.widget_edp.getViewBox().setBackgroundColor('k')
self.widget_msv_top.getViewBox().setBackgroundColor('k')
self.background_color = 'k'
else:
self.widget_data.getViewBox().setBackgroundColor('w')
self.widget_edp.getViewBox().setBackgroundColor('w')
self.widget_msv_top.getViewBox().setBackgroundColor('w')
self.background_color = 'w'
def load_script(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","script Files (*.py);;text Files (*.txt)", options=options)
if fileName:
with open(fileName,'r') as f:
self.plainTextEdit_script.setPlainText(f.read())
self.model.script = (self.plainTextEdit_script.toPlainText())
#self.compile_script()
def update_script_upon_load(self):
self.plainTextEdit_script.setPlainText(self.model.script)
def save_script(self):
path, _ = QFileDialog.getSaveFileName(self, "Save script file", "", "script file (*.py)")
with open(path,'w') as f:
f.write(self.model.script)
def modify_script(self):
assert self.model.script!="","No script to work on, please load script first!"
domain_num = int(self.lineEdit_domain_number.text().rstrip())
motif_chain = self.lineEdit_sorbate_motif.text().strip().rsplit(",")
#print(self.lineEdit_sorbate_motif.text().strip().rsplit(","))
#print(self.lineEdit_sorbate_motif.text().strip().rsplit(","))
assert domain_num == len(motif_chain), "Number of domain not match with the motif number. Fix it first!!"
lines = script_block_modifier(self.model.script.rsplit("\n"), 'slabnumber',["num_surface_slabs"],[domain_num])
els_sorbate = []
anchor_index_list = []
flat_down_index = []
xyzu_oc_m = []
structure = []
for each in motif_chain:
each = each.strip()
properties_temp = getattr(sorbate_tool,each)
for each_key in properties_temp:
if each_key == "els_sorbate":
els_sorbate.append(properties_temp[each_key])
elif each_key == "anchor_index_list":
anchor_index_list.append(properties_temp[each_key])
elif each_key == "flat_down_index":
flat_down_index.append(properties_temp[each_key])
elif each_key == "structure":
structure.append("#"+each+properties_temp[each_key])
xyzu_oc_m = [[0.5, 0.5, 1.5, 0.1, 1, 1]]*len(els_sorbate)
tag_list = ['els_sorbate', 'anchor_index_list', 'flat_down_index', 'xyzu_oc_m']
tag_value_list = [els_sorbate, anchor_index_list, flat_down_index, xyzu_oc_m]
lines = script_block_modifier(lines, 'sorbateproperties',tag_list, tag_value_list)
left_, right_ = locate_tag(lines,'sorbatestructure')
del(lines[left_:right_])
if structure[-1][-1] == "\n":
structure[-1] = structure[-1][0:-1]
lines.insert(left_,"\n".join(structure))
self.model.script = '\n'.join(lines)
self.plainTextEdit_script.setPlainText(self.model.script)
def remove_selected_rows(self):
# Delete the selected mytable lines
self._deleteRows(self.tableWidget_pars.selectionModel().selectedRows(), self.tableWidget_pars)
self.update_model_parameter()
# DeleteRows function
def _deleteRows(self, rows, table):
# Get all row index
indexes = []
for row in rows:
indexes.append(row.row())
# Reverse sort rows indexes
indexes = sorted(indexes, reverse=True)
# Delete rows
for rowidx in indexes:
table.removeRow(rowidx)
def append_one_row(self):
rows = self.tableWidget_pars.selectionModel().selectedRows()
if len(rows) == 0:
row_index = self.tableWidget_pars.rowCount()
else:
row_index = rows[-1].row()
self.tableWidget_pars.insertRow(row_index)
for i in range(6):
if i==2:
check_box = QCheckBox()
check_box.setChecked(False)
self.tableWidget_pars.setCellWidget(row_index,2,check_box)
else:
qtablewidget = QTableWidgetItem('')
if i == 0:
qtablewidget.setFont(QFont('Times',10,QFont.Bold))
elif i == 1:
qtablewidget.setForeground(QBrush(QColor(255,0,255)))
self.tableWidget_pars.setItem(row_index,i,qtablewidget)
self.update_model_parameter()
def update_model_parameter(self):
self.model.parameters.data = []
vertical_label = []
label_tag=1
for i in range(self.tableWidget_pars.rowCount()):
if self.tableWidget_pars.item(i,0)==None:
items = ['',0,False,0,0,'-']
vertical_label.append('')
elif self.tableWidget_pars.item(i,0).text()=='':
items = ['',0,False,0,0,'-']
vertical_label.append('')
else:
items = [self.tableWidget_pars.item(i,0).text(),float(self.tableWidget_pars.item(i,1).text()),self.tableWidget_pars.cellWidget(i,2).isChecked(),\
float(self.tableWidget_pars.item(i,3).text()), float(self.tableWidget_pars.item(i,4).text()), self.tableWidget_pars.item(i,5).text()]
self.model.parameters.data.append(items)
vertical_label.append(str(label_tag))
label_tag += 1
self.tableWidget_pars.setVerticalHeaderLabels(vertical_label)
def update_par_upon_load(self):
vertical_labels = []
lines = self.model.parameters.data
how_many_pars = len(lines)
self.tableWidget_pars.clear()
self.tableWidget_pars.setRowCount(how_many_pars)
self.tableWidget_pars.setColumnCount(6)
self.tableWidget_pars.setHorizontalHeaderLabels(['Parameter','Value','Fit','Min','Max','Error'])
# self.tableWidget_pars.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
for i in range(len(lines)):
# print("test{}".format(i))
items = lines[i]
#items = line.rstrip().rsplit('\t')
j = 0
if items[0] == '':
#self.model.parameters.data.append([items[0],0,False,0, 0,'-'])
vertical_labels.append('')
j += 1
else:
#add items to parameter attr
#self.model.parameters.data.append([items[0],float(items[1]),items[2]=='True',float(items[3]), float(items[4]),items[5]])
#add items to table view
if len(vertical_labels)==0:
vertical_labels.append('1')
else:
if vertical_labels[-1] != '':
vertical_labels.append('{}'.format(int(vertical_labels[-1])+1))
else:
vertical_labels.append('{}'.format(int(vertical_labels[-2])+1))
for item in items:
if j == 2:
check_box = QCheckBox()
check_box.setChecked(item==True)
self.tableWidget_pars.setCellWidget(i,2,check_box)
else:
if j == 1:
qtablewidget = QTableWidgetItem(str(round(item,5)))
else:
qtablewidget = QTableWidgetItem(str(item))
# qtablewidget.setTextAlignment(Qt.AlignCenter)
if j == 0:
qtablewidget.setFont(QFont('Times',10,QFont.Bold))
elif j == 1:
qtablewidget.setForeground(QBrush(QColor(255,0,255)))
self.tableWidget_pars.setItem(i,j,qtablewidget)
j += 1
self.tableWidget_pars.resizeColumnsToContents()
self.tableWidget_pars.resizeRowsToContents()
self.tableWidget_pars.setShowGrid(False)
self.tableWidget_pars.setVerticalHeaderLabels(vertical_labels)
def load_par(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","Table Files (*.tab);;text Files (*.txt)", options=options)
vertical_labels = []
if fileName:
with open(fileName,'r') as f:
lines = f.readlines()
# self.parameters.set_ascii_input(f)
lines = [each for each in lines if not each.startswith('#')]
how_many_pars = len(lines)
self.tableWidget_pars.setRowCount(how_many_pars)
self.tableWidget_pars.setColumnCount(6)
self.tableWidget_pars.setHorizontalHeaderLabels(['Parameter','Value','Fit','Min','Max','Error'])
# self.tableWidget_pars.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
for i in range(len(lines)):
line = lines[i]
items = line.rstrip().rsplit('\t')
j = 0
if items[0] == '':
self.model.parameters.data.append([items[0],0,False,0, 0,'-'])
vertical_labels.append('')
j += 1
else:
#add items to parameter attr
self.model.parameters.data.append([items[0],float(items[1]),items[2]=='True',float(items[3]), float(items[4]),items[5]])
#add items to table view
if len(vertical_labels)==0:
vertical_labels.append('1')
else:
if vertical_labels[-1] != '':
vertical_labels.append('{}'.format(int(vertical_labels[-1])+1))
else:
vertical_labels.append('{}'.format(int(vertical_labels[-2])+1))
for item in items:
if j == 2:
check_box = QCheckBox()
check_box.setChecked(item=='True')
self.tableWidget_pars.setCellWidget(i,2,check_box)
else:
qtablewidget = QTableWidgetItem(item)
# qtablewidget.setTextAlignment(Qt.AlignCenter)
if j == 0:
qtablewidget.setFont(QFont('Times',10,QFont.Bold))
elif j == 1:
qtablewidget.setForeground(QBrush(QColor(255,0,255)))
self.tableWidget_pars.setItem(i,j,qtablewidget)
j += 1
self.tableWidget_pars.resizeColumnsToContents()
self.tableWidget_pars.resizeRowsToContents()
self.tableWidget_pars.setShowGrid(False)
self.tableWidget_pars.setVerticalHeaderLabels(vertical_labels)
@QtCore.pyqtSlot(str,object)
def update_par_during_fit(self,string,model):
#labels = [data[0] for each in self.model.parameters.data]
for i in range(len(model.parameters.data)):
if model.parameters.data[i][0]!='':
# print(self.model.parameters.data[i][0])
#print(len(self.model.parameters.data))
# print(model.parameters.data[i][0])
item_temp = self.tableWidget_pars.item(i,1)
#print(type(item_temp))
item_temp.setText(str(model.parameters.data[i][1]))
self.tableWidget_pars.resizeColumnsToContents()
self.tableWidget_pars.resizeRowsToContents()
self.tableWidget_pars.setShowGrid(False)
# self.update_structure_view()
def update_par_upon_change(self):
#print("before update:{}".format(len(self.model.parameters.data)))
self.model.parameters.data = []
for each_row in range(self.tableWidget_pars.rowCount()):
if self.tableWidget_pars.item(each_row,0)==None:
items = ['',0,False,0,0,'-']
elif self.tableWidget_pars.item(each_row,0).text()=='':
items = ['',0,False,0,0,'-']
else:
# print(each_row,type(self.tableWidget_pars.item(each_row,0)))
items = [self.tableWidget_pars.item(each_row,0).text()] + [float(self.tableWidget_pars.item(each_row,i).text()) for i in [1,3,4]] + [self.tableWidget_pars.item(each_row,5).text()]
items.insert(2, self.tableWidget_pars.cellWidget(each_row,2).isChecked())
self.model.parameters.data.append(items)
#print("after update:{}".format(len(self.model.parameters.data)))
@QtCore.pyqtSlot(str,object)
def update_status(self,string,model):
self.statusbar.clearMessage()
self.statusbar.showMessage(string)
self.label_2.setText('FOM {}:{}'.format(self.model.fom_func.__name__,self.run_fit.solver.optimizer.best_fom))
def save_par(self):
path, _ = QFileDialog.getSaveFileName(self, "Save tab file", "", "table file (*.*)")
with open(path,'w') as f:
f.write(self.model.parameters.get_ascii_output())
if __name__ == "__main__":
QApplication.setStyle("windows")
app = QApplication(sys.argv)
myWin = MyMainWindow()
myWin.setWindowIcon(QtGui.QIcon('DAFY.png'))
hightlight = syntax_pars.PythonHighlighter(myWin.plainTextEdit_script.document())
myWin.plainTextEdit_script.show()
myWin.plainTextEdit_script.setPlainText(myWin.plainTextEdit_script.toPlainText())
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
myWin.show()
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 55,146 | py | 126 | superrod_GUI_pyqtgraph.py | 65 | 0.583633 | 0.576379 | 0 | 1,097 | 49.269827 | 224 |
thomastrg/Price_prediction_footballers | 9,234,179,709,423 | 8d84ce7dea64c3795347950a98cd0d89177937ae | 56842faabc8275101568ec1894e5e759d48a1c57 | /Scrapping/Scrapping_fifaindex_function.py | 5261d6c161481bc82af686489423030b1c381b81 | [
"MIT"
]
| permissive | https://github.com/thomastrg/Price_prediction_footballers | 125f7f12abec4fe9c127238f479abdc7e48a250c | 00fe5677f470d0a1a1f15e118a07df049a9a247d | refs/heads/main | 2023-04-28T15:44:59.135279 | 2021-05-15T15:06:10 | 2021-05-15T15:06:10 | 367,439,041 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import re
import pandas as pd
def scrapp_fifa_index(page_start=1, page_end=2):
assert page_start >= 1
assert page_end >= page_start
all_data = []
url_base ="https://www.fifaindex.com"
#Wanted description informations
wanted = ['Age','Valeur','Salaire']
url = f'https://www.fifaindex.com/fr/players/fifa19/?page={page_start}'
while(page_start <= page_end and requests.get(url).status_code == 200):
main_page = requests.get(url).content
main_page_html = BeautifulSoup(main_page, 'html.parser')
players_soup = main_page_html.find_all('a', class_='link-player')
players = {player['href'] for player in players_soup}
for player in players:
data_dict = {}
page_html = requests.get(url_base+player).content #go to player's page and retrieve content
soup = BeautifulSoup(page_html,'html.parser')
#Get the player's name
result = soup.find('div', class_='card mb-5').find('h5', class_='card-header').get_text()
player_name = re.split(r'(\d+)', result)[0]
data_dict['Name'] = player_name
# Get the player's description
result = soup.find('div', class_='card-body')
description = result.get_text()
description_data = re.split(r'\n+', description)
for elm in description_data:
if any(word in elm for word in wanted):
temp = re.split(r'([\d\.]+)', elm)
temp[1] = temp[1].replace('.','')
temp[0] = re.sub(r'\s+$','', temp[0]).replace(r'\s+',' ')
temp[0] = re.sub(r'\s+',' ', temp[0])
data_dict[temp[0]] = int(temp[1])
# Get player's stats
result = soup.find_all('div', class_='col-12 col-md-4 item')
first_text = [elm.get_text() for elm in result]
data = [re.split(r'\n+', sub_text) for sub_text in first_text]
for sub_data in data:
for attribute in sub_data :
if bool(re.search(r'\d', attribute)):
temp = re.split(r' (\d+)', attribute)
data_dict[temp[0]] = int(re.search('(\d+)$', attribute).group(1))
# Add player data to our main list
all_data.append(data_dict)
# Go to the next page
page_start += 1
url = f'https://www.fifaindex.com/fr/players/fifa19/?page={page_start}'
#RETURN THE FINAL LIST
return all_data
if __name__ =="__main__":
data = scrapp_fifa_index(1, 10)
columns_names = ['Name', 'Age', 'Value €', 'Value $', 'Value £', 'Wage €', 'Wage $',
'Wage £', 'Ball Control', 'Dribbling', 'Marking', 'Slide Tackle',
'Stand Tackle', 'Aggression', 'Reactions', 'Att. Position',
'Interceptions', 'Vision', 'Composure', 'Crossing', 'Short Pass',
'Long pass', 'Acceleration', 'Stamina', 'Strength', 'Balance',
'Sprint Speed', 'Agility', 'Jumping', 'Heading', 'Shot Power', 'Finishing',
'Long Shots', 'Curve', 'FK Acc.', 'Penalties', 'Volleys',
'GK Positioning', 'GK Diving', 'GK Handling', 'GK Kicking', 'GK Reflexes']
df = pd.DataFrame(data)
df.columns = columns_names
df.to_csv('fifaindex_21.csv', index=False)
| UTF-8 | Python | false | false | 3,554 | py | 10 | Scrapping_fifaindex_function.py | 2 | 0.52593 | 0.516347 | 0 | 91 | 37.989011 | 103 |
Tocknicsu/bot | 19,550,691,150,709 | 83963237600cab1446c04579899fb07113c2ad95 | 998a9fa97dfb9512d2b2debd4cdd8ce02ccc0ab7 | /telegram_bot/server.py | c40c249abf9e882499885dd1834ecc78d72c8a84 | []
| no_license | https://github.com/Tocknicsu/bot | c65c59c02279dd3345b03948a26fd7ed49016e99 | 09e0d3220de83f501b565b34fc91745b2ecb13af | refs/heads/master | 2021-01-17T16:56:32.357497 | 2016-07-27T05:45:15 | 2016-07-27T05:45:15 | 64,179,588 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import telepot
import config
bot = telepot.Bot(config.key)
print(bot.getMe())
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print(content_type, chat_type, chat_id)
if content_type == 'text':
print(msg['text'])
bot.sendMessage(chat_id, msg['text'])
bot.message_loop(handle)
while True:
x = input()
print("User Input: ", x)
| UTF-8 | Python | false | false | 412 | py | 10 | server.py | 3 | 0.643204 | 0.640777 | 0 | 20 | 19.6 | 58 |
DongliangLu1995/Summer_Research-CBS | 1,176,821,081,155 | ee04d9e963ea027bf034e80fd3e7c7d2ab73f316 | c810c8c09e244a5810223bad95c55e0ad535cdbd | /merge_similarity_citation.py | 74c00b4fa0cd272055ce52e6148b1d644b4acb16 | []
| no_license | https://github.com/DongliangLu1995/Summer_Research-CBS | f98bca71d21981d252e07fd1cd4a9487ad858983 | 050116e542dc8b3738be2a07fdcf728e99b13cdd | refs/heads/master | 2020-03-29T00:50:05.393332 | 2019-03-29T19:06:04 | 2019-03-29T19:06:04 | 149,358,414 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 15:14:55 2018
@author: DongliangLu
"""
#merge similarity citation
import pandas as pd
import numpy as np
import pickle
similarity_save_basic_add=r"C:\Users\DongliangLu\Desktop\Columbia\research\CBS\lobby\data_cleaned\comments_similarity\One"
old_data_save_basic_add = r"C:\Users\DongliangLu\Desktop\Columbia\research\CBS\lobby\data_cleaned"
old_data_save_add = old_data_save_basic_add + "\\SEC_regression.csv"
citation_df=pd.read_csv(old_data_save_add, index_col=0)
final_rule_good = [0,2,5,6,7,8,9,10,12,16,30,31,32,33,35,36]
#rule_type_i=0
inf_need_list = ['rule type','identify org','comment len','set similarity', 'count similarity', 'tfidf similarity',
'pca set similarity', 'pca count similarity', 'pca tfidf similarity',
'count distance', 'set distance', 'tfidf distance',
'pca count distance', 'pca set distance', 'pca tfidf distance']
new_df=pd.DataFrame()
for rule_type_i in final_rule_good:
similarity_save_add = similarity_save_basic_add +"\\similarity_"+"One"+"_" + str(rule_type_i) +".pickle"
with open(similarity_save_add,"rb") as f:
tem=pickle.load(f, encoding='latin1')
comments_inf = tem
comments_inf_needed = comments_inf[inf_need_list]
comments_inf_needed = comments_inf_needed[ comments_inf_needed['identify org'] != "Individual" ]
total_comment_len = comments_inf_needed.groupby(['identify org'])['comment len'].sum()
comment_similarity = comments_inf_needed.groupby(['identify org'])[['tfidf similarity','set similarity','count similarity','pca set similarity',
'pca count similarity', 'pca tfidf similarity',]].mean()
comment_distance = comments_inf_needed.groupby(['identify org'])[['tfidf distance','set distance','count distance',
'pca count distance', 'pca set distance', 'pca tfidf distance']].mean()
merge_1=pd.concat([total_comment_len,comment_similarity,comment_distance], axis=1)
merge_1["organization"] = merge_1.index
merge_1['rule type'] = rule_type_i
merge_1.reset_index(inplace=True)
new_df = new_df.append(merge_1)
new_df = new_df[['organization','tfidf similarity','set similarity','count similarity',
'pca set similarity', 'pca count similarity', 'pca tfidf similarity',
'count distance', 'set distance', 'tfidf distance',
'pca count distance', 'pca set distance', 'pca tfidf distance',
"rule type"]]
new_df = new_df.rename(columns={'rule type': 'rule type num'})
new_df['count similarity transformed'] = -np.log(1 - new_df['count similarity']**2)
new_df['set similarity transformed'] = -np.log(1 - new_df['set similarity']**2)
new_df['tfidf similarity transformed'] = -np.log(1 - new_df['tfidf similarity']**2)
new_df['pca count similarity transformed'] = -np.log(1 - new_df['pca count similarity']**2)
new_df['pca set similarity transformed'] = -np.log(1 - new_df['pca set similarity']**2)
new_df['pca tfidf similarity transformed'] = -np.log(1 - new_df['pca tfidf similarity']**2)
new_df.fillna(0)
#has some problem on pca set similarity, and all pca transformed similarity
data_reg=citation_df.merge(new_df, how="left", on=['organization', 'rule type num'])
data_reg = data_reg.fillna(0)
data_reg.to_csv(r"C:\Users\DongliangLu\Desktop\Columbia\research\CBS\lobby\data_cleaned"+"\\new_reg.csv")
| UTF-8 | Python | false | false | 3,626 | py | 46 | merge_similarity_citation.py | 43 | 0.63872 | 0.621622 | 0 | 72 | 47.333333 | 149 |
RobyJacob/Coding-problems | 19,628,000,563,278 | e6548c8c7143e1ab85b13f7061c6c4ea5c69d3eb | ed359ad7c72a6c1c924c0ae012f881517c70e9f8 | /foobar_1.py | eb050061da187b0c28ad053a73ab15615eac2bcd | []
| no_license | https://github.com/RobyJacob/Coding-problems | 485ff6cc88e90b7f672bb33ccccac19139fba4b8 | 418b2207025540f78a3e0c574eea5237b06a1579 | refs/heads/master | 2022-10-28T06:49:48.433806 | 2020-06-18T08:49:34 | 2020-06-18T08:49:34 | 260,744,336 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def inifinite_sequence():
num = 2
while True:
yield num
num += 1
def is_prime(num):
import math
count = 0
for n in range(1, int(math.sqrt(num))+1):
if num % n == 0:
count += 1
if count < 2:
return True
return False
def solution(i):
index = 0
id = ""
for num in inifinite_sequence():
if is_prime(num):
id += str(num)
index += 1
if index > i+5:
break
return id[i:i+5]
print(solution(3))
print(solution(0))
print(solution(5))
print(solution(10))
print(solution(50))
print(solution(100))
print(solution(300))
print(solution(500))
print(solution(1000))
print(solution(1500))
print(solution(10000))
| UTF-8 | Python | false | false | 745 | py | 28 | foobar_1.py | 26 | 0.551678 | 0.496644 | 0 | 39 | 18.102564 | 45 |
bmrb-io/ligand-expo | 8,443,905,741,359 | 4ab31475679356aa2c3e987efe2cfab6b567ec03 | 3d7873ecb3668e1f5be5693c2daf977ce08c7b5b | /ligandexpo2bmrb/checker.py | 66d1ab58096a8eecb0cdb465a1e87ea6b570eb4b | [
"Unlicense"
]
| permissive | https://github.com/bmrb-io/ligand-expo | 3f78cc1b68a9c0bc151c6acf102f4fce262e2acf | 6b5a2f60e1c5d73a03ae236685d81ece55660b2c | refs/heads/master | 2022-11-28T20:20:18.836020 | 2020-08-05T01:03:01 | 2020-08-05T01:03:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python -u
#
# -*- coding: utf-8 -*-
#
# Read modified date from mmCIF
#
from __future__ import absolute_import
import sys
import os
import re
import datetime
#import traceback
try :
import sas
except ImportError :
sys.path.append( "/projects/BMRB/SAS/python" )
import sas
#
#
#
class UpdateChecker( sas.ContentHandler, sas.ErrorHandler ) :
"""Extract pdbx initial and modified dates, replaced by and subcomponent list.
Either date will do (it's an error if both are missing). For subcomponent list and
replaced by we only care if they exits: if they do, the chem comp is 'compound' or
obsolete -- we don't want either."""
@classmethod
def read_file( cls, filename, verbose = False ) :
rc = None
with open( os.path.realpath( filename ), "rU" ) as f :
rc = cls.read( fp = f, verbose = verbose )
return rc
@classmethod
def read( cls, fp, verbose = False ) :
chk = cls( verbose = verbose )
lex = sas.StarLexer( fp, bufsize = 0, verbose = verbose )
p = sas.CifParser.parse( lexer = lex, content_handler = chk, error_handler = chk, verbose = verbose )
return chk
#
#
def __init__( self, verbose = False ) :
self._verbose = bool( verbose )
self._re_date = re.compile( r"^(\d{4})-(\d{1,2})-(\d{1,2})$" ) # 2011-05-31
self._modified = None
self._initial = None
self._compound = False
self._obsolete = False
self._errs = False
self._blockid = ""
@property
def verbose( self ) :
"""verbose flag"""
return bool( self._verbose )
@verbose.setter
def verbose( self, flag ) :
self._verbose = bool( flag )
@property
def has_errors( self ) :
return self._errs
@property
def initial_date( self ) :
return self._initial
@property
def modified_date( self ) :
return self._modified
@property
def is_compound( self ) :
return self._compound
@property
def is_obsolete( self ) :
return self._obsolete
@property
def __data__( self ) :
return {
"id" : self._blockid,
"created" : self._initial,
"modified" : self._modified,
"obsolete" : self._obsolete,
"compound" : self._compound,
"error" : self._errs
}
# SAS callbacks
#
def fatalError( self, line, msg ) :
sys.stderr.write( "ERR: fatal parse error in line %d: %s\n" % (line, msg,) )
self._errs = True
def error( self, line, msg ) :
sys.stderr.write( "ERR: parse error in line %d: %s\n" % (line, msg,) )
self._errs = True
return True
# warnings are errors
#
def warning( self, line, msg ) :
sys.stderr.write( "ERR: parse warning in line %d: %s\n" % (line, msg,) )
self._errs = True
return True
#
#
def comment( self, line, text ) :
return False
def startSaveFrame( self, line, name ) :
return False
def endSaveFrame( self, line, name ) :
return False
def endData( self, line, name ) :
pass
def startLoop( self, line ) :
return False
def endLoop( self, line ) :
return False
#
#
def startData( self, line, name ) :
self._blockid = name
return False
#
#
def data( self, tag, tagline, val, valline, delim, inloop ) :
if self._verbose :
sys.stdout.write( "Data: %s - %s\n" % (tag,val,) )
if (val is None) or (str( val ).strip() in ("", ".", "?",)) :
val = None
if tag == "_chem_comp.pdbx_initial_date" :
if self._verbose :
sys.stdout.write( "initial date (raw): %s\n" % (val,) )
if val is None :
self._initial = None
return False
m = self._re_date.search( val )
if not m :
sys.stderr.write( "ERR: Invalid initial date: %s\n" % (val,) )
self._initial = None
return False
if (len( m.group( 2 ) ) < 2) or (len( m.group( 3 ) ) < 2) :
sys.stderr.write( "WARN: bad initial date format: %s (%s)\n" % (val, self._blockid) )
self._initial = datetime.date( int( m.group( 1 ) ), int( m.group( 2 ) ), int( m.group( 3 ) ) )
if self._verbose :
sys.stdout.write( "initial date (parsed): %s\n" % (self._initial,) )
return False
if tag == "_chem_comp.pdbx_modified_date" :
if val is None :
self._modified = None
return False
m = self._re_date.search( val )
if not m :
sys.stderr.write( "ERR: Invalid last-modified date: %s\n" % (val,) )
self._modified = None
return False
self._modified = datetime.date( int( m.group( 1 ) ), int( m.group( 2 ) ), int( m.group( 3 ) ) )
if self._verbose :
sys.stdout.write( "modified date (parsed): %s\n" % (self._modified,) )
return False
# don't care what it was repalced with, just that it's no longer current
#
if tag == "_chem_comp.pdbx_replaced_by" :
if val is not None :
self._obsolete = True
return False
if tag == "_chem_comp.pdbx_release_status" :
if str( val ).strip().upper() in ("OBS","WDRN",) :
self._obsolete = True
return False
# ditto for subcomponents
#
if tag == "_chem_comp.pdbx_subcomponent_list" :
if val is not None :
self._compound = True
return False
# done with metadata
# could probably ingnore errors if it's obsolete or compound
#
if tag [:16] == "_chem_comp_atom." :
if (self._initial is None) and (self._modified is None) :
sys.stderr.write( "ERR: Chem comp %s has neither inital nor modified date\n" % (self._blockid,) )
self._errs = True
return True
return False
return False
#
#
#
#
#
if __name__ == "__main__" :
import pprint
chk = UpdateChecker.read_file( sys.argv[1] )
pprint.pprint( chk.__data__ )
# eof
#
| UTF-8 | Python | false | false | 6,317 | py | 8 | checker.py | 7 | 0.527624 | 0.523191 | 0 | 223 | 27.327354 | 113 |
Rutvik-C/SE-Hackathon-20 | 17,188,459,133,629 | 1f489f5d6f1e9bd3f3b0fa6f2ddbc72fdb20c085 | 09e05c4f477bf721c008eb080497929f9501947e | /Solveasy/Student/migrations/0001_initial.py | 9ebbb07e27dc78748a44f445090a2844a53caf4a | []
| no_license | https://github.com/Rutvik-C/SE-Hackathon-20 | 3f4d0bca4c9273b5ea4ce32911aa7e2106b3429e | b661c69670469b7f3b2850ac57bccfdc59e16292 | refs/heads/main | 2023-01-29T08:18:29.078083 | 2020-12-07T05:01:40 | 2020-12-07T05:01:40 | 318,972,210 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2020-12-06 13:54
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Authority', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='rate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ratings', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('comments', models.TextField(max_length=200)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='foods11', related_query_name='foods11', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='problem_selected',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('p_id', models.IntegerField(default=0)),
('s', models.IntegerField(default=1)),
('problem_title', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Authority.problem')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_foods', related_query_name='order_foods', to=settings.AUTH_USER_MODEL)),
],
),
]
| UTF-8 | Python | false | false | 1,729 | py | 23 | 0001_initial.py | 16 | 0.633892 | 0.615963 | 0 | 38 | 44.5 | 203 |
sagoyanfisic/NuevoProyectoDisSist | 6,339,371,729,598 | 6ecc716153bf195fc72a8add6dcb07ab4f5bb2bf | 1cf947450e39131fc571ef953138e292db495c2f | /project/principal/views.py | f1cb10b901c51e61297107f633fa8022ed577282 | []
| no_license | https://github.com/sagoyanfisic/NuevoProyectoDisSist | bed93d1d6786402a21b987c32a57b79d29b00190 | f5c3a12050aca62ced48483c218452a16dce5756 | refs/heads/master | 2020-06-05T07:08:53.642356 | 2014-07-01T23:39:05 | 2014-07-01T23:39:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from principal.models import Bebida , Receta , Comentario
from django.contrib.auth.models import User
from django.shortcuts import render_to_response , get_object_or_404
from django.http import HttpResponse, Http404
def lista_bebidas(request):
bebidas = Bebida.objects.all()
return render_to_response('lista_bebidas.html',{'lista':bebidas})
def sobre(request):
html = "<html><body>Proyecto akakkaka en MDW</body></hmtl>"
return HttpResponse(html)
def inicio(request):
recetas = Receta.objects.all()
return render_to_response('inicio.html',{'recetas':recetas})
pass
| UTF-8 | Python | false | false | 577 | py | 3 | views.py | 2 | 0.762565 | 0.752166 | 0 | 17 | 32.823529 | 67 |
YangStark/33ics | 472,446,424,894 | 4d60a731971a1c7a02565de409f37509747cfb87 | 30b635f7798bea6e02746ddad322ff04d883a03a | /program1/sbsbsbsbsbsbsb.py | e86156e9e289fb6cf80658ab9ae9bdbac658e2e6 | []
| no_license | https://github.com/YangStark/33ics | 74ee85d1bd2a406b981d4380295e254ebfb4a41a | a7d3b68ad965ee7e08365731ec203a0cb778221f | refs/heads/master | 2020-04-13T17:26:01.685496 | 2019-09-27T23:19:25 | 2019-09-27T23:19:25 | 163,348,001 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import choice
from collections import defaultdict
# For use in read_corpus: leave unchanged in this file
def word_at_a_time(file : open):
file = open("wginput1.txt","r")
for line in file:
for item in line.strip().split():
yield item
def read_corpus(os : int, file : open) -> {(str):[str]}:
return_dict = defaultdict(set)
word_list = [result_item for result_item in word_at_a_time(file)]
for i in range(len(word_list)-os+1):
key_tuple = tuple(word_list[i:i+os])
try:
return_dict[key_tuple].add(word_list[i+os])
except IndexError:
pass
return_dict = dict(return_dict)
for keys in return_dict.keys():
return_dict[keys] = list(return_dict[keys])
return return_dict
def corpus_as_str(corpus : {(str):[str]}) -> str:
prt_str = "Corpus\n"
len_min = min([len(i) for i in list(k.values())])
len_max = max([len(i) for i in list(k.values())])
for keys in sorted(list(corpus.keys())):
prt_str += " "+str(keys)+ " can be followed by any of "+str(corpus[keys])+"\n"
prt_str += "max/min list lengths = "+str(len_max)+"/"+str(len_min)
return prt_str
def produce_text(corpus : {(str):[str]}, start : [str], count : int) -> [str]:
random_text = start
len_pattern = len(list(corpus.keys())[0])
for i in range(count):
if tuple(random_text[-len_pattern:]) not in list(corpus.keys()):
random_text.append(None)
break
else:
random_text.append(choice(corpus[tuple(random_text[-len_pattern:])]))
return random_text
k = read_corpus(2,"asdf")
print(k)
print(corpus_as_str(k))
print(produce_text(k,["a","d"],10))
| UTF-8 | Python | false | false | 1,765 | py | 8 | sbsbsbsbsbsbsb.py | 8 | 0.581303 | 0.577904 | 0 | 51 | 32.607843 | 87 |
SEED-platform/seed | 19,533,511,285,937 | fd0264509a64e18295c77b7d38954c4f6b85776b | 7e6f0efd6f4733d09e61b4c6658455e6727cd48f | /seed/tests/api/seed_readingtools.py | e3148cf9fa0d5a2aae181964d35463a5b41755fb | [
"BSD-2-Clause"
]
| permissive | https://github.com/SEED-platform/seed | 0e4a6a2fa93f4c2528d0c295163a91f836a4253d | 680b6a2b45f3c568d779d8ac86553a0b08c384c8 | refs/heads/develop | 2023-09-01T10:46:25.502697 | 2023-08-30T18:44:21 | 2023-08-30T18:44:21 | 25,450,714 | 108 | 75 | NOASSERTION | false | 2023-09-13T22:18:47 | 2014-10-20T04:26:53 | 2023-09-11T19:24:59 | 2023-09-13T22:18:47 | 160,901 | 101 | 60 | 335 | Python | false | false | # !/usr/bin/env python
# encoding: utf-8
"""
SEED Platform (TM), Copyright (c) Alliance for Sustainable Energy, LLC, and other contributors.
See also https://github.com/seed-platform/seed/main/LICENSE.md
"""
import csv
import logging
import os
import pathlib
import pprint
import time
from http.client import RemoteDisconnected
import psutil
import requests
import urllib3
def report_memory():
mem = psutil.virtual_memory()
print(mem)
print(f'Free mem (MB): {mem.available / 1024}')
min_amount = 100 * 1024 * 1024 # 100MB
if mem.available <= min_amount:
print("WARNING: Memory is low on system")
# also report the processes (that we care about)
ps = [p.info for p in psutil.process_iter(attrs=['pid', 'name']) if 'python' in p.info['name']]
print(f'Python processes: {ps}')
ps = [p.info for p in psutil.process_iter(attrs=['pid', 'name']) if 'celery' in p.info['name']]
print(f'Celery processes: {ps}')
# Three-step upload process
def upload_file(upload_header, organization_id, upload_filepath, main_url, upload_dataset_id, upload_datatype):
"""
Proceeds with the filesystem upload.
Args:
upload_header: GET request headers
upload_filepath: full path to file
main_url: Host
upload_dataset_id: What ImportRecord to associate file with.
upload_datatype: Type of data in file (Assessed Raw, Portfolio Raw)
Returns:
{
"import_file_id": 54,
"success": true,
"filename": "DataforSEED_dos15.csv"
}
"""
upload_url = "%s/api/v3/upload/?organization_id=%s" % (main_url, organization_id)
params = {
'import_record': upload_dataset_id,
'source_type': upload_datatype
}
return requests.post(
upload_url,
params=params,
files=[
('file', (pathlib.Path(upload_filepath).name, pathlib.Path(upload_filepath).read_bytes())),
],
headers=upload_header
)
def check_status(result_out, part_msg, log, piid_flag=None):
"""Checks the status of the API endpoint and makes the appropriate print outs."""
passed = '\033[1;32m...passed\033[1;0m'
failed = '\033[1;31m...failed\033[1;0m'
if result_out.status_code in [200, 201, 403, 401]:
try:
if piid_flag == 'export':
content_str = result_out.content.decode()
if content_str.startswith('id'):
msg = "Data exported successfully"
# the data are returned as text. No easy way to check the status. If ID
# exists, then claim success.
else:
msg = content_str
elif 'status' in result_out.json() and result_out.json()['status'] == 'error':
msg = result_out.json()['message']
log.error(part_msg + failed)
log.debug(msg)
raise RuntimeError
elif 'success' in result_out.json() and not result_out.json()['success']:
msg = result_out.json()
log.error(part_msg + failed)
log.debug(msg)
raise RuntimeError
else:
if piid_flag == 'organizations':
msg = 'Number of organizations:\t' + str(
len(result_out.json()['organizations'][0]))
elif piid_flag == 'users':
msg = 'Number of users:\t' + str(len(result_out.json()['users'][0]))
elif piid_flag == 'mappings':
msg = pprint.pformat(result_out.json()['suggested_column_mappings'],
indent=2, width=70)
else:
msg = pprint.pformat(result_out.json(), indent=2, width=70)
except BaseException:
log.error(part_msg + failed)
log.debug('Unknown error during request results recovery')
raise RuntimeError
log.info(part_msg + passed)
log.debug(msg)
elif result_out.status_code in [204]:
msg = result_out.content
log.info(part_msg + passed)
log.debug(msg)
else:
msg = result_out.reason
log.error(part_msg + failed)
log.debug(msg)
raise RuntimeError
return
def check_progress(main_url, header, progress_key):
"""Delays the sequence until progress is at 100 percent."""
time.sleep(2)
print("checking progress {}".format(progress_key))
try:
progress_result = requests.get(
main_url + '/api/v3/progress/{}/'.format(progress_key),
headers=header
)
print("... {} ...".format(progress_result.json()['progress']))
except [urllib3.exceptions.ProtocolError, RemoteDisconnected, requests.exceptions.ConnectionError]:
print("Server is not responding... trying again in a few seconds")
progress_result = None
except Exception:
print("Other unknown exception caught!")
progress_result = None
if progress_result and progress_result.json()['progress'] == 100:
return progress_result
else:
progress_result = check_progress(main_url, header, progress_key)
return progress_result
def read_map_file(mapfile_path):
"""Read in the mapping file"""
assert (os.path.isfile(mapfile_path)), "Cannot find file:\t" + mapfile_path
map_reader = csv.reader(open(mapfile_path, 'r'))
map_reader.__next__() # Skip the header
# Open the mapping file and fill list
maplist = list()
for rowitem in map_reader:
maplist.append(
{
'from_field': rowitem[0],
'from_units': rowitem[1],
'to_table_name': rowitem[2],
'to_field': rowitem[3],
}
)
return maplist
def setup_logger(filename, write_file=True):
"""Set-up the logger object"""
logging.getLogger("requests").setLevel(logging.WARNING)
_log = logging.getLogger()
_log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
formatter_console = logging.Formatter('%(levelname)s - %(message)s')
if write_file:
fh = logging.FileHandler(filename, mode='a')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
_log.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter_console)
_log.addHandler(ch)
return _log
def write_out_django_debug(partmsg, result):
if result.status_code != 200:
filename = '{}_fail.html'.format(partmsg)
with open(filename, 'w') as fail:
fail.writelines(result.text)
print('Wrote debug -> {}'.format(filename))
| UTF-8 | Python | false | false | 6,779 | py | 998 | seed_readingtools.py | 801 | 0.587576 | 0.574886 | 0 | 206 | 31.898058 | 111 |
jkingben/minos | 12,996,571,071,360 | f54646505539aa00ff7c9a4e93b4fcdb014c3dae | c0d63b25f5ae6c0dc18d80a2fa8b14d7681e027d | /supervisor/supervisor/medusa/demo/start_medusa.py | 2c72437b61c42dd9fa8e4000dfda8cb39fe7191e | [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
]
| permissive | https://github.com/jkingben/minos | e8804dc03503497fc55c9570e8495c563587ab69 | 23d26fda71fcd1068e65958b3aef03d9cafd5453 | refs/heads/master | 2020-05-07T13:33:44.962162 | 2019-04-10T10:47:27 | 2019-04-10T10:47:27 | 180,554,473 | 0 | 0 | Apache-2.0 | true | 2019-04-10T10:03:25 | 2019-04-10T10:03:24 | 2019-04-08T06:34:02 | 2015-05-28T09:15:32 | 13,381 | 0 | 0 | 0 | null | false | false | # -*- Mode: Python -*-
#
# Sample/Template Medusa Startup Script.
#
# This file acts as a configuration file and startup script for Medusa.
#
# You should make a copy of this file, then add, change or comment out
# appropriately. Then you can start up the server by simply typing
#
# $ python start_medusa.py
#
import os
import sys
from supervisor.medusa import asyncore_25 as asyncore
from supervisor.medusa import http_server
from supervisor.medusa import ftp_server
from supervisor.medusa import chat_server
from supervisor.medusa import monitor
from supervisor.medusa import filesys
from supervisor.medusa import default_handler
from supervisor.medusa import status_handler
from supervisor.medusa import resolver
from supervisor.medusa import logger
if len(sys.argv) > 1:
# process a few convenient arguments
[HOSTNAME, IP_ADDRESS, PUBLISHING_ROOT] = sys.argv[1:]
else:
HOSTNAME = 'www.nightmare.com'
# This is the IP address of the network interface you want
# your servers to be visible from. This can be changed to ''
# to listen on all interfaces.
IP_ADDRESS = '205.160.176.5'
# Root of the http and ftp server's published filesystems.
PUBLISHING_ROOT = '/home/www'
HTTP_PORT = 8080 # The standard port is 80
FTP_PORT = 8021 # The standard port is 21
CHAT_PORT = 8888
MONITOR_PORT = 9999
# ===========================================================================
# Caching DNS Resolver
# ===========================================================================
# The resolver is used to resolve incoming IP address (for logging),
# and also to resolve hostnames for HTTP Proxy requests. I recommend
# using a nameserver running on the local machine, but you can also
# use a remote nameserver.
rs = resolver.caching_resolver ('127.0.0.1')
# ===========================================================================
# Logging.
# ===========================================================================
# There are several types of logging objects. Multiple loggers may be combined,
# See 'logger.py' for more details.
# This will log to stdout:
lg = logger.file_logger (sys.stdout)
# This will log to syslog:
#lg = logger.syslog_logger ('/dev/log')
# This will wrap the logger so that it will
# 1) keep track of the last 500 entries
# 2) display an entry in the status report with a hyperlink
# to view these log entries.
#
# If you decide to comment this out, be sure to remove the
# logger object from the list of status objects below.
#
lg = status_handler.logger_for_status (lg)
# ===========================================================================
# Filesystem Object.
# ===========================================================================
# An abstraction for the file system. Filesystem objects can be
# combined and implemented in interesting ways. The default type
# simply remaps a directory to root.
fs = filesys.os_filesystem (PUBLISHING_ROOT)
# ===========================================================================
# Default HTTP handler
# ===========================================================================
# The 'default' handler for the HTTP server is one that delivers
# files normally - this is the expected behavior of a web server.
# Note that you needn't use it: Your web server might not want to
# deliver files!
# This default handler uses the filesystem object we just constructed.
dh = default_handler.default_handler (fs)
# ===========================================================================
# HTTP Server
# ===========================================================================
hs = http_server.http_server (IP_ADDRESS, HTTP_PORT, rs, lg)
# Here we install the default handler created above.
hs.install_handler (dh)
# ===========================================================================
# Unix user `public_html' directory support
# ===========================================================================
if os.name == 'posix':
from supervisor.medusa import unix_user_handler
uh = unix_user_handler.unix_user_handler ('public_html')
hs.install_handler (uh)
# ===========================================================================
# FTP Server
# ===========================================================================
# Here we create an 'anonymous' ftp server.
# Note: the ftp server is read-only by default. [in this mode, all
# 'write-capable' commands are unavailable]
ftp = ftp_server.ftp_server (
ftp_server.anon_authorizer (
PUBLISHING_ROOT
),
ip=IP_ADDRESS,
port=FTP_PORT,
resolver=rs,
logger_object=lg
)
# ===========================================================================
# Monitor Server:
# ===========================================================================
# This creates a secure monitor server, binding to the loopback
# address on port 9999, with password 'fnord'. The monitor server
# can be used to examine and control the server while it is running.
# If you wish to access the server from another machine, you will
# need to use '' or some other IP instead of '127.0.0.1'.
ms = monitor.secure_monitor_server ('fnord', '127.0.0.1', MONITOR_PORT)
# ===========================================================================
# Chat Server
# ===========================================================================
# The chat server is a simple IRC-like server: It is meant as a
# demonstration of how to write new servers and plug them into medusa.
# It's a very simple server (it took about 2 hours to write), but it
# could be easily extended. For example, it could be integrated with
# the web server, perhaps providing navigational tools to browse
# through a series of discussion groups, listing the number of current
# users, authentication, etc...
cs = chat_server.chat_server (IP_ADDRESS, CHAT_PORT)
# ===========================================================================
# Status Handler
# ===========================================================================
# These are objects that can report their status via the HTTP server.
# You may comment out any of these, or add more of your own. The only
# requirement for a 'status-reporting' object is that it have a method
# 'status' that will return a producer, which will generate an HTML
# description of the status of the object.
status_objects = [
hs,
ftp,
ms,
cs,
rs,
lg
]
# Create a status handler. By default it binds to the URI '/status'...
sh = status_handler.status_extension(status_objects)
# ... and install it on the web server.
hs.install_handler (sh)
# become 'nobody'
if os.name == 'posix':
if hasattr (os, 'seteuid'):
import pwd
[uid, gid] = pwd.getpwnam ('nobody')[2:4]
os.setegid (gid)
os.seteuid (uid)
# Finally, start up the server loop! This loop will not exit until
# all clients and servers are closed. You may cleanly shut the system
# down by sending SIGINT (a.k.a. KeyboardInterrupt).
asyncore.loop()
| UTF-8 | Python | false | false | 7,216 | py | 241 | start_medusa.py | 148 | 0.551414 | 0.542544 | 0 | 196 | 35.816327 | 79 |
guoyandan/anwen | 17,952,963,305,980 | f7dd6af610d915611ce46cc385cce995a7d2e75e | a69507f4f022cd7fec4eed7f6ea182124af343e7 | /admin/fix_hit.py | 75693faf98352ffc23a1f8eedbac1c0625499571 | []
| no_license | https://github.com/guoyandan/anwen | ce195a8457fac2156e7c20bca2335509ea230dbb | 05218fc6cdea8c7b67744459f0d78928ef27ceb0 | refs/heads/master | 2022-03-27T05:00:57.801153 | 2020-01-01T11:29:47 | 2020-01-01T11:29:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# encoding:utf-8
import sys
import options
from pymongo import MongoClient
conn = MongoClient()
adb = conn.anwen
adb.authenticate(options.db['username'], options.db['password'])
def fix():
n = 1
for i in adb.Hit_Col_v2.find().sort('_id', 1):
i['id'] = n
i['hitnum'] = 1
adb.Hit_Col.insert(i)
n += 1
fix()
| UTF-8 | Python | false | false | 374 | py | 147 | fix_hit.py | 78 | 0.596257 | 0.57754 | 0 | 22 | 16 | 64 |
ttwoodbury/IWTBA | 9,242,769,657,239 | 532c5e40b0a141d875123dafe2d78e12d1ebf4c3 | 3f3a485c04fb16c7d351319dc8ed315ef92b6ab4 | /scrapers/github_scrape.py | ed768d624f7d3423dba5b09e09431933f27a0c92 | []
| no_license | https://github.com/ttwoodbury/IWTBA | 1c370e45d919fa08f92d022ffa516068dccca318 | 6bb410ee0191bb43d599119bf9a9e8f8dcf67ce9 | refs/heads/master | 2021-01-20T23:40:21.772031 | 2015-07-17T20:37:19 | 2015-07-17T20:37:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # quick and dirty github scrape
# only 265 jobs, pages 0 to 5 (50 per page)
postings = []
for x in range(6):
r = requests.get('https://jobs.github.com/positions.json?description=&page=%s' % x)
postings.extend(r.json()) #r.json is a list of dicts
json.dump(postings, open('github_postings', 'w')) | UTF-8 | Python | false | false | 305 | py | 13 | github_scrape.py | 6 | 0.678689 | 0.652459 | 0 | 9 | 33 | 87 |
theo-ardouin/impose_ton_anonymat | 10,161,892,666,313 | 07bb9f2fc837d5bb9ff45d8d02507f1827c962ae | 5bfb0657ef9b72192d408e6b28e3a70f70a7330e | /impose/interfaces/__init__.py | 31b157e1370deb9e7cb02e372be0cbefd6ab99e2 | []
| no_license | https://github.com/theo-ardouin/impose_ton_anonymat | 807ea87ad1bfd1ffbfb1abfe39eaa9d2f921ab9b | 2aede8307974ff84564439ad5af5aa056c932334 | refs/heads/master | 2023-05-18T12:10:54.582556 | 2020-07-29T18:17:10 | 2020-07-29T18:17:10 | 275,205,197 | 0 | 0 | null | false | 2021-02-26T02:56:12 | 2020-06-26T16:54:38 | 2020-07-29T18:17:13 | 2021-02-26T02:56:12 | 39 | 0 | 0 | 1 | Python | false | false | from .database import IDatabase, ISession
from .discord import IDiscord
from .gateway import IImageGateway, ITaskGateway, IPermissionGateway
| UTF-8 | Python | false | false | 144 | py | 43 | __init__.py | 38 | 0.833333 | 0.833333 | 0 | 3 | 46 | 68 |
pavel-odintsov/RPKI-toolkit | 13,563,506,759,678 | 37ddaa415f79d0b1a06d84dbba712bf5775949d0 | 6494ad67744da5347db30940454261a226439937 | /rpki/gui/cacheview/misc.py | 544312240abec63f3809e0302161d68485401770 | []
| no_license | https://github.com/pavel-odintsov/RPKI-toolkit | 88f16027bf9dcb498fb6a5a1805cec06368439a8 | c55a0694c637acfd08b42f20366aba387b110adf | refs/heads/master | 2021-01-10T05:30:44.507868 | 2016-01-17T21:03:45 | 2016-01-17T21:03:45 | 49,835,419 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
from rpki.resource_set import resource_range_ipv4, resource_range_ipv6
from rpki.exceptions import BadIPResource
def parse_ipaddr(s):
# resource_set functions only accept str
if isinstance(s, unicode):
s = s.encode()
s = s.strip()
r = resource_range_ipv4.parse_str(s)
try:
r = resource_range_ipv4.parse_str(s)
return 4, r
except BadIPResource:
r = resource_range_ipv6.parse_str(s)
return 6, r
# vim:sw=4 ts=8 expandtab
| UTF-8 | Python | false | false | 1,261 | py | 92 | misc.py | 62 | 0.733545 | 0.723236 | 0 | 31 | 39.677419 | 77 |
sami-adam/odoo-hr | 8,598,524,546,097 | 2b29d5bb2bbee85864d2b70d57fd70d2c95ba632 | c18c0d2ffda84f605cfca91290ff16ba37ff3bb2 | /hr_attendance_zkteco/controllers/controllers.py | cf8bf03e08346cb41b6594117179eb98745a7544 | []
| no_license | https://github.com/sami-adam/odoo-hr | 0f80f26a7850b472db3c90a71df82e7975c08ff1 | 65b9dc36edba484a886dd7f1bd0943ba02905e8f | refs/heads/main | 2023-05-31T22:51:08.884588 | 2021-07-07T11:42:09 | 2021-07-07T11:42:09 | 383,777,896 | 0 | 0 | null | false | 2021-07-07T11:42:10 | 2021-07-07T11:40:05 | 2021-07-07T11:40:09 | 2021-07-07T11:42:09 | 0 | 0 | 0 | 0 | null | false | false | # -*- coding: utf-8 -*-
from odoo import http
# class HrAttendanceZkteco(http.Controller):
# @http.route('/hr_attendance_zkteco/hr_attendance_zkteco/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/hr_attendance_zkteco/hr_attendance_zkteco/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('hr_attendance_zkteco.listing', {
# 'root': '/hr_attendance_zkteco/hr_attendance_zkteco',
# 'objects': http.request.env['hr_attendance_zkteco.hr_attendance_zkteco'].search([]),
# })
# @http.route('/hr_attendance_zkteco/hr_attendance_zkteco/objects/<model("hr_attendance_zkteco.hr_attendance_zkteco"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('hr_attendance_zkteco.object', {
# 'object': obj
# }) | UTF-8 | Python | false | false | 892 | py | 12 | controllers.py | 7 | 0.623318 | 0.622197 | 0 | 20 | 43.65 | 144 |
romankurnovskii/parsers | 14,370,960,616,689 | 044df45844951beb12e8750b37eb0a70cba9221a | 0f30ebdb88c67361a45a6677806237a7eb78363f | /parseLeroymerlin/leroymerlinparser/items.py | 8c35eef1ec00a4a3916a187d5f1a680dfad7c991 | [
"MIT"
]
| permissive | https://github.com/romankurnovskii/parsers | c8daacbd3650dd167c7f4d2ffe8c90479f113de2 | 9e908fff7fbbfbc9654008d947cfeb0b45318238 | refs/heads/master | 2022-12-25T15:18:04.461059 | 2020-10-13T18:47:31 | 2020-10-13T18:47:31 | 292,517,773 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scrapy
from scrapy.loader.processors import TakeFirst, MapCompose
import re
def convert_to_int(num):
if num:
return int(num)
def convert_to_float(num):
if num:
num = num.replace(" ", "")
return float(num)
def cleaner_photo(value):
if value[:2] == '//':
return f'http:{value}'
return value
def split_characteristics(value):
characteristic = {}
try:
characteristic['name'] = value.xpath(
'./dt[@class="def-list__term"]/text()').extract_first()
characteristic['value'] = formatting_string(
value.xpath('./dd[@class="def-list__definition"]/text()').extract_first())
except Exception as ex:
print(f'split_characteristics - {ex}')
return characteristic
def formatting_string(string):
if string:
res = re.sub('^\s+|\n|\r|\s+$', '', string)
return res
class LeroymerlinparserItem(scrapy.Item):
name = scrapy.Field(output_processor=TakeFirst())
photo = scrapy.Field(input_processor=MapCompose(cleaner_photo))
price = scrapy.Field(input_processor=MapCompose(
convert_to_float), output_processor=TakeFirst())
currency = scrapy.Field(output_processor=TakeFirst())
link = scrapy.Field(output_processor=TakeFirst())
unit = scrapy.Field(output_processor=TakeFirst())
characteristics = scrapy.Field(
input_processor=MapCompose(split_characteristics))
_id = scrapy.Field(input_processor=MapCompose(
convert_to_int), output_processor=TakeFirst())
| UTF-8 | Python | false | false | 1,536 | py | 37 | items.py | 29 | 0.651693 | 0.651042 | 0 | 52 | 28.538462 | 86 |
erp5/erp5 | 15,315,853,382,223 | 6c697b2378ac20b3c576404aefff9c50184a98fc | f2c29558b6c2c28e30e307c6eb8f1d7a06dffbb4 | /bt5/erp5_base/SkinTemplateItem/portal_skins/erp5_base/CurrencyExchangeLine_asCellRange.py | 80ad4e332733012c91d3f8b1c24d4597c31f738f | []
| no_license | https://github.com/erp5/erp5 | 253e8053b39af58dbce56658ba8eb990ea720cda | 94db42159ce41f3cf7ad285d04d372948064dcb0 | refs/heads/master | 2021-01-23T15:06:33.385280 | 2021-01-21T22:20:46 | 2021-01-22T12:11:07 | 1,761,476 | 20 | 8 | null | false | 2014-12-04T09:07:04 | 2011-05-17T16:00:11 | 2014-12-04T01:43:56 | 2014-12-04T06:26:47 | 516,896 | 29 | 18 | 1 | Python | null | null | currency_exchange_type_list = context.portal_categories.currency_exchange_type.getCategoryChildRelativeUrlList()
resource_list = ['resource/%s' % context.getParentValue().getRelativeUrl()]
price_currency_list = [context.getPriceCurrency(base=True)]
return (currency_exchange_type_list, resource_list, price_currency_list)
| UTF-8 | Python | false | false | 324 | py | 5,838 | CurrencyExchangeLine_asCellRange.py | 4,341 | 0.805556 | 0.805556 | 0 | 5 | 63.8 | 113 |
BeefCakes/CS112-Spring2012 | 9,328,668,982,350 | 35abfc9f5388949c391bb373766dc815a386bfb2 | d8bfd7b8bbb6a5fc98428458d6fe5d9556cdbeb1 | /hw04/sect4_for.py | 241ee4c114e0cdc4c808691278e78f5fd9afe055 | []
| no_license | https://github.com/BeefCakes/CS112-Spring2012 | d6030ae71598a4ad4a1bf2e343a51d4ff47d4176 | 2f403f17b62d79dbf4bdc43e6f14f870324460ec | refs/heads/master | 2021-01-18T10:08:26.074959 | 2012-03-28T15:44:18 | 2012-03-28T15:44:18 | 3,266,345 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from hwtools import *
print "Section 4: For Loops"
print "-----------------------------"
nums = input_nums()
# 1. What is the sum of all the numbers in nums?
print "1. Sum of",sum(nums)
# 2. Print every even number in nums
print "2. Even numbers:",
evens = []
for i in nums:
if i%2 == 0:
evens.append(i)
print evens
#CODE GOES HERE
# 3. Does nums only contain even numbers?
for i in nums:
if i%2 !=0:
only_even = False
else:
only_even = True
#CODE GOES HERE
print "3.",
if only_even:
print "only even"
else:
print "some odd"
# 4. Generate a list every odd number less than 100. Hint: use range()
odds = []
print "4. Every odd number under 100:",
for i in range(100):
if i%2 !=0:
odds.append(i)
print odds
| UTF-8 | Python | false | false | 795 | py | 43 | sect4_for.py | 43 | 0.597484 | 0.567296 | 0 | 45 | 16.666667 | 70 |
junyang10734/leetcode-python | 16,071,767,648,371 | 5f0f1a2a6908fc7fd996e1fbf7693773ff725234 | 5ce1c0ab1b6147428fc30bcd1698e4d0e53b688e | /40.py | 50a82013ec3e3e79cb254b7bb4d9a9bff1131265 | []
| no_license | https://github.com/junyang10734/leetcode-python | 035b12df3f7d9fc33553140d1eb0692750b44f0a | eff322f04d22ffbc4f9b10e77f97c28aac5c7004 | refs/heads/master | 2023-07-22T11:16:38.740863 | 2023-07-14T00:22:00 | 2023-07-14T00:22:00 | 189,197,380 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 40. Combination Sum II
# backtrack
# 模板
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
res = []
path, pathSum = [], 0
def backtrack(start):
nonlocal pathSum
if pathSum == target:
res.append(path.copy())
return
elif pathSum > target:
return
else:
for i in range(start, len(candidates)):
if i > start and candidates[i] == candidates[i-1]:
continue
path.append(candidates[i])
pathSum += candidates[i]
backtrack(i+1)
path.pop()
pathSum -= candidates[i]
backtrack(0)
return res | UTF-8 | Python | false | false | 866 | py | 651 | 40.py | 650 | 0.454756 | 0.446636 | 0 | 29 | 28.758621 | 85 |
tlktoash/dailycodingproblem | 13,657,996,033,971 | 2d29719925754b587bea43a313f9c865cc4b6ba8 | e31650aeba55b0ce70b789d6609c2d1b37698369 | /python/dynamic-programming/dp_stairs_climbing.py | fd0d5bf123a117f5a7c78ef579cc339808fb0ab7 | []
| no_license | https://github.com/tlktoash/dailycodingproblem | ef1703b7d70d0063b0b57756e8f059fdb8ef0bd2 | 2d4ecccd5cf7ce0a5b430b7aba9598687f863b8d | refs/heads/master | 2018-10-31T02:51:11.509591 | 2018-10-30T04:16:03 | 2018-10-30T04:16:03 | 142,648,363 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # A child is climbing up a staircase with n steps, and can hop either 1 step, 2 steps, or 3
# steps at a time. Implement a method to count how many possible ways the child can jump up the stairs.
#
# Example:
#
# Number of stairs : 3
#
# Number of ways = 4 ( {1,1,1}, {1,2}, {2,1}, {3} )
def possible_ways_rec(n):
if n < 1:
return 0
return 1 + possible_ways_rec(n - 1) + possible_ways_rec(n - 2) + possible_ways_rec(n - 3)
def possible_ways_dp(n, memz):
if n < 1:
return 0
if memz[n] > 0:
return memz[n]
memz[n] = 1 + possible_ways_dp(n - 1, memz) + possible_ways_dp(n - 2, memz) + \
possible_ways_dp(n - 3, memz)
return memz[n]
print(possible_ways_dp(500, [0] * 501))
| UTF-8 | Python | false | false | 744 | py | 107 | dp_stairs_climbing.py | 101 | 0.581989 | 0.537634 | 0 | 31 | 22.967742 | 104 |
jepemo/basic-agent | 12,962,211,347,926 | 53edbd0fc4008919c306daf6e3716e3650625df1 | 837d9427101ab1e904a602096c637ff4525577bf | /tests/test_messages.py | 2ef9ab7a46ae538c481cd5f6ddbe1e9edf3e04b1 | [
"MIT"
]
| permissive | https://github.com/jepemo/basic-agent | bead69fddc5812f6bfa3f0dacc6fc8f76a6c7e53 | 4c0d82ed4c68ddc0138374538fba3cec9090dc16 | refs/heads/master | 2020-03-07T06:08:03.272881 | 2018-04-30T15:57:45 | 2018-04-30T15:57:45 | 127,314,120 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# This file is part of bagent
#
# Copyright (C) 2018-present Jeremies Pérez Morata
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import unittest
from bagent import *
MSG_RECEIVED1 = None
MSG_RECEIVED2 = None
async def agent1a(ctx):
pid = await ctx.start(agent1b)
await ctx.send(pid, "msg1")
async def agent1b(ctx):
global MSG_RECEIVED1
(sender, msg) = await ctx.recv()
MSG_RECEIVED1 = msg
async def agent2a(ctx):
pid = await ctx.start(agent2b)
await ctx.send(pid, "msg2")
async def agent2b(ctx):
global MSG_RECEIVED2
async with ctx.get_message() as m:
MSG_RECEIVED2 = m.msg
class TestMessages(unittest.TestCase):
def test_recv(self):
with get_agent_context() as ctx:
ctx.start(agent1a)
self.assertEqual(MSG_RECEIVED1, "msg1")
def test_get_message(self):
with get_agent_context() as ctx:
ctx.start(agent2a)
self.assertEqual(MSG_RECEIVED2, "msg2")
| UTF-8 | Python | false | false | 1,571 | py | 10 | test_messages.py | 9 | 0.678981 | 0.663057 | 0 | 49 | 30.040816 | 71 |
Yang-Jianlin/python-learn | 14,431,090,145,403 | ed7c6777d4a41dcd76d27153ab4f5b79507a460a | 7b8fd24cc6dbed385173a3857c06f2935724ace6 | /LeetCode/T-7.py | bc1028ffb35ed9c61bb2955f3ee58de8e18dc4b8 | []
| no_license | https://github.com/Yang-Jianlin/python-learn | eb1cfd731039a8e375827e80b8ef311f9ed75bfb | 048cde2d87e4d06a48bd81678f6a82b02e7c4cb4 | refs/heads/master | 2023-07-12T16:35:13.489422 | 2021-08-23T11:54:10 | 2021-08-23T11:54:10 | 357,464,365 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def reverse(self, x: int) -> int:
if x == 0:
return 0
res = 0
if x < 0:
x = str(x)[1:]
x = x[::-1]
x = '-' + x
res = int(x)
elif x > 0:
x = str(x)
x = x[::-1]
res = int(x)
if res > 2**31-1 or res <-2**31:
res = 0
return res
if __name__ == '__main__':
s = Solution()
x = -120
print(s.reverse(x))
| UTF-8 | Python | false | false | 485 | py | 320 | T-7.py | 307 | 0.327835 | 0.28866 | 0 | 23 | 20.086957 | 40 |
happyhairfish/ctg2 | 5,102,421,194,646 | b8c7a5946583de741f1bdde261c524192b871d99 | 54288f6ba493d650d67e10ef0505170260f5abda | /test.py | 90b4db72a001ce266cf4f1ccb8998faa59cef9e7 | [
"MIT"
]
| permissive | https://github.com/happyhairfish/ctg2 | e70d436ea129b61c7950bce803c50b05cc269d44 | d5b5ed1281b55e3a8b00ef4551fe6b233d153aad | refs/heads/master | 2020-08-05T14:26:09.517140 | 2016-11-25T00:34:08 | 2016-11-25T00:34:08 | 67,298,314 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
dado1 = random.randint(1,6)
print('dado1',dado1)
dado2 = random.randint(1,6)
print('dado2',dado2)
suma=dado1+dado2
print('suma',suma)
while ((suma==7) or (dado1==6 and dado2==6) or (((dado1==6) and (dado2%2==1)) or ((dado2==6) and (dado1%2==1)))):
dado1 = random.randint(1,6)
dado2 = random.randint(1,6)
suma=dado1+dado2
print('dado1',dado1)
print('dado2',dado2)
print('suma',suma)
| UTF-8 | Python | false | false | 422 | py | 22 | test.py | 2 | 0.637441 | 0.545024 | 0 | 16 | 25.375 | 113 |
SeattleTestbed/integrationtests | 12,738,873,039,846 | 4f652d048724dd25ca7129b6a432da01e3ce42a0 | 83721ff65be6fce299d74b9e0741bd838c679c5d | /pushsoftwareupdate/resetblackboxupdate.py | bc75aa054dea578f706be6a6ebac0c02b7e1288c | [
"MIT"
]
| permissive | https://github.com/SeattleTestbed/integrationtests | 602f0db994013b7d0fb02402306d94bf4b87390e | ae6cf2f819ff7e95ec60abeac8dc3886295825c7 | refs/heads/master | 2020-12-25T17:16:26.291603 | 2017-05-15T11:41:07 | 2017-05-15T11:41:07 | 20,136,902 | 0 | 3 | null | false | 2016-08-01T18:15:07 | 2014-05-24T18:44:48 | 2015-01-12T16:58:29 | 2016-08-01T18:15:07 | 297 | 0 | 4 | 9 | Python | null | null | """
<Program Name>
resetblackboxupdate.py
<Started>
December 17, 2008
<Author>
Brent Couvrette
couvb@cs.washington.edu
<Purpose>
This script sets up the environment for pushsoftareupdate to run correctly.
This includes running it through the repy preprocessor, making sure it knows
where all its imports are, clearing out the blackbox update site, initializing
the current_folder_num, current_key_num, and current_update_num files, creating
the initial keypair, and creating the initial installers to be used.
<Usage>
This script should be run once before starting the update blackbox test cron
job. After that, it should only be run again if the blackbox test system
becomes so corrupt that it needs to be completely started over.
Note that it makes use of the constants in pushsoftwareupdate, so that should
be confirmed correct before running this.
Also note that for simplicity, this uses some linux specific features.
"""
import subprocess
import os
# Runs the repy preprocessor on pushsoftwareupdate.mix, then imports
# the resulting pushsoftwareupdate.py. Assumes that we are running from
# within the integrationtests folder in a standard svn checkout of trunk.
def preprocess():
# Move to the seattlelib directory
orig_dir = os.getcwd()
os.chdir('../seattlelib/')
# Run the preprocessor
subprocess.call('python repypp.py ../integrationtests/pushsoftwareupdate.mix \
../integrationtests/pushsoftwareupdate.py', shell=True)
# Get back to our original directory
os.chdir(orig_dir)
# Need to copy over certain files so that the import repyportability works.
subprocess.call('cp ../portability/repyportability.py ../repy/* .',
shell=True)
# Need to copy over certain files so that the import make_base_installers
# works.
subprocess.call('cp ../dist/make_base_installers.py ../dist/clean_folder.py \
../dist/build_installers.py .', shell=True)
# Importing pushsoftwareupdate should now be successful.
print "Finished preprocessing"
# clears out the folder specified as the root of the update site in
# pushsoftwareupdate.py. Also clears out the folder containing the keypairs
# in the current directory. We only remove the specific things we make,
# just in case say the update_base_directory gets set to /. Just doing
# rm -r * there would almost certainly be extremely devastating.
def clear_blackbox():
import pushsoftwareupdate
# Remove the file keeping track of the folder number
subprocess.call('rm ' + pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.foldernum_fn, shell=True)
# Remove the file keeping track of the key number
subprocess.call('rm ' + pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.keynum_fn, shell=True)
# Remove the file keeping track of the update number
subprocess.call('rm ' + pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.updatenum_fn, shell=True)
# Remove all update folders
subprocess.call('rm -r ' + pushsoftwareupdate.update_base_directory +
'update_location*', shell=True)
# Remove any current installed files
subprocess.call('rm -r ' + pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.installer_folder, shell=True)
# Remove all previous keypairs
subprocess.call('rm -r ' + pushsoftwareupdate.key_folder, shell=True)
print "Finished clearing"
# Creates and initializes the counting files
def init_counts():
import pushsoftwareupdate
# init the folder count
numfile = open(pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.foldernum_fn, 'w')
numfile.write('0')
numfile.close()
# init the key count
numfile = open(pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.keynum_fn, 'w')
numfile.write('0')
numfile.close()
# init the update count
numfile = open(pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.updatenum_fn, 'w')
numfile.write('0')
numfile.close()
print "Finished creating count files"
# Creates the folder to contain the keys, as well as the first keypair
def init_first_keys():
import pushsoftwareupdate
os.mkdir(pushsoftwareupdate.key_folder)
pushsoftwareupdate.makekeys(0)
print "Finished making keys"
# Creates the installer folder, as well as the initial installers
def init_first_installers():
import pushsoftwareupdate
os.mkdir(pushsoftwareupdate.update_base_directory +
pushsoftwareupdate.installer_folder)
trunk = pushsoftwareupdate.svn_trunk_location
pubkey = pushsoftwareupdate.key_folder + 'pubkey0'
privkey = pushsoftwareupdate.key_folder + 'privkey0'
updatesite = pushsoftwareupdate.update_base_url + 'update_location0'
pushsoftwareupdate.create_installers(trunk, pubkey, privkey, updatesite,
pubkey, 0)
print "Finished making installers"
def main():
preprocess()
clear_blackbox()
init_counts()
init_first_keys()
init_first_installers()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 5,006 | py | 25 | resetblackboxupdate.py | 23 | 0.748102 | 0.745306 | 0 | 135 | 36.081481 | 81 |
entrepreneur-interet-general/CIS-front | 3,530,463,143,551 | 7439df6d23e0759658515dd07bfc86ac53d75166 | 6fa8d2766d5738c11cec8064f04e70ca884221c5 | /cis/app/settings/app_nomenclature_tags.py | 47c1889cd702ff62777b16aab6a5eba6ca23b167 | [
"MIT"
]
| permissive | https://github.com/entrepreneur-interet-general/CIS-front | 2b0875288f1488051eece1637ee6075d3eec407d | 1d07e2d25e9da54a0ba6868d251a072028de4e0c | refs/heads/master | 2020-03-07T22:26:23.284724 | 2019-03-28T15:23:29 | 2019-03-28T15:23:29 | 127,753,949 | 8 | 3 | MIT | false | 2019-03-28T15:24:09 | 2018-04-02T12:35:26 | 2019-03-28T15:23:35 | 2019-03-28T15:24:08 | 218,132 | 4 | 4 | 155 | JavaScript | false | null | # -*- encoding: utf-8 -*-
# import os
# import json
from .. import os, inspect, log_cis, pformat, json, json_util
cwd = os.getcwd()
json_folderpath = cwd + "/app/static/json/"
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### NORMALIZATION TAGS / UTILS
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
def normalization_as_json( var_name, norm_dict, filepath=json_folderpath ) :
"""
dumps all norm_dict in a pseudo-json file
"""
# locals_function = locals()
# log_cis.info("... normalization_as_json / locals_function : %s", locals_function )
# intro_var = [ k for k,v in locals_function.iteritems() ]
# log_cis.info("... normalization_as_json / intro_var : %s", intro_var )
log_cis.info("... normalization_as_json / file : %s.js", var_name )
full_filepath = filepath + var_name + '.js'
json_file = open(full_filepath, "w")
json_file.write( "var " + var_name + " = " )
# json_file.write('[')
json_file.write(json.dumps(norm_dict,indent=4, default=json_util.default))
# json_file.write(',')
# json_file.write(']')
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### NORMALIZATION / CHOICES FOR CHECKBOXES INPUTS
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
# CHOICES_DOMAINS = [
# (u"life_env" , u"Cadre de vie"),
# (u"sust_env" , u"Développement durable"),
# (u"eco_dev" , u"Développement économique"),
# (u"habitat" , u"Habitat"),
# (u"inclusion " , u"Inclusion"),
# (u"social_net" , u"Lien social"),
# (u"health_sport" , u"Santé et sport"),
# (u"employment" , u"Travail"),
# ]
# CHOICES_METHODS = [
# (u"cooperate" , u"Coopération"),
# (u"evaluate" , u"Evaluation"),
# (u"govern" , u"Gouvernance"),
# (u"philantropy" , u"Mécénat"),
# (u"mediate" , u"Médiation"),
# (u"participate" , u"Participation"),
# (u"research" , u"Recherche"),
# ]
# CHOICES_PUBLICS = [
# (u"" , u"Tous publics"),
# (u"handicap" , u"Handicap"),
# (u"youth" , u"Jeunesse"),
# (u"senior" , u"Seniors"),
# ]
# CHOICES_FILTERS_TAGS = [
# { "domains" : {
# "fullname" : u"Domaines",
# "choices" : CHOICES_DOMAINS ,
# }
# },
# # { "geoloc" : {
# # "fullname" : u"Localisations",
# # "choices" : [] ,
# # }
# # },
# # { "partners" : { "fullname" : u"Sourceurs",
# # "choices" : [] ,
# # }
# # },
# { "publics" : { "fullname" : u"Publics",
# "choices" : CHOICES_PUBLICS ,
# }
# },
# { "methods" : {
# "fullname" : u"Méthodes",
# "choices" : CHOICES_METHODS ,
# }
# },
# ]
# CHOICES_FILTERS_PARTNERS = [
# { "partners" : { "fullname" : u"Sourceurs",
# "choices" : [] ,
# }
# },
# ]
# CHOICES_FILTERS_GEOLOC = [
# { "geoloc" : {
# "fullname" : u"Localisations",
# "choices" : [] ,
# }
# },
# ]
# normalization_as_json( "CHOICES_FILTERS_TAGS", CHOICES_FILTERS_TAGS )
# normalization_as_json( "CHOICES_FILTERS_PARTNERS", CHOICES_FILTERS_PARTNERS )
# normalization_as_json( "CHOICES_FILTERS_GEOLOC", CHOICES_FILTERS_GEOLOC )
CHOICES_FILTERS_TAGS = [
{ "name" : u"domains_",
"fullname" : u"Domaines",
"choices" : [
{"name" : u"life_env" , "fullname" : u"Cadre de vie"},
{"name" : u"sust_env" , "fullname" : u"Développement durable"},
{"name" : u"eco_dev" , "fullname" : u"Développement économique"},
{"name" : u"habitat" , "fullname" : u"Habitat"},
{"name" : u"inclusion" , "fullname" : u"Inclusion"},
{"name" : u"social_net" , "fullname" : u"Lien social"},
{"name" : u"health_sport" , "fullname" : u"Santé et sport"},
{"name" : u"employment" , "fullname" : u"Travail"},
]
},
{ "name" : u"pubics_",
"fullname" : u"Publics",
"choices" : [
# {"name" : u"" , "fullname" : u"Tous publics"},
{"name" : u"handicap" , "fullname" : u"Handicap"},
{"name" : u"youth" , "fullname" : u"Jeunesse"},
{"name" : u"senior" , "fullname" : u"Seniors"},
]
},
{ "name" : u"methods_",
"fullname" : u"Méthodes",
"choices" : [
{"name" : u"cooperate" , "fullname" : u"Coopération"},
{"name" : u"evaluate" , "fullname" : u"Evaluation"},
{"name" : u"govern" , "fullname" : u"Gouvernance"},
{"name" : u"philantropy" , "fullname" : u"Mécénat"},
{"name" : u"mediate" , "fullname" : u"Médiation"},
{"name" : u"participate" , "fullname" : u"Participation"},
{"name" : u"research" , "fullname" : u"Recherche"},
] ,
},
]
CHOICES_FILTERS_PARTNERS = [
{ "name" : u"sources_",
"fullname" : u"Sourceurs",
"choices" : [
# {"id" : "aaaaaaaaaaaaaaaaaaaaaaaa" , "name" : u"test", "fullname" : u"test"},
{"id" : "5a9da08d0a82868f49cd3816" , "name" : u"AG2R la mondiale", "fullname" : u"AG2R la mondiale"},
{"id" : "5aa3de630a828651b2ebfe3e" , "name" : u"Apriles", "fullname" : u"Apriles"},
{"id" : "5a9d9fd80a82868f4973d2d3" , "name" : u"Avise", "fullname" : u"Avise"},
{"id" : "5ab3f5fe0a8286585d3a307e" , "name" : u"Bretagne Creative", "fullname" : u"Bretagne Creative"},
{"id" : "5aabf8250a8286bf097cec82" , "name" : u"Fondation Daniel et Nina Carasso", "fullname" : u"Fondation Daniel et Nina Carasso"},
{"id" : "5aaff7360a82860b06c94a6d" , "name" : u"Fondation Veolia", "fullname" : u"Fondation Veolia"},
{"id" : "5aaff8aa0a82860b06c94a6e" , "name" : u"Fondation Vinci", "fullname" : u"Fondation Vinci"},
{"id" : "5aaff93f0a82860b06c94a6f" , "name" : u"MOT", "fullname" : u"MOT"},
{"id" : "5aaffa390a82860b06c94a70" , "name" : u"Semeoz", "fullname" : u"Semeoz"},
{"id" : "5ab410c00a82863566d5fd05" , "name" : u"My Positive Impact", "fullname" : u"My Positive Impact"},
] ,
},
]
CHOICES_FILTERS_GEOLOC = [
{ "name" : u"sources_",
"fullname" : u"Sourceurs",
"choices" : [] ,
},
]
### create ids:
def add_id_key( list_of_dicts ) :
for filters_list in list_of_dicts :
for filter_ in filters_list["choices"] :
if filter_ != [] :
filter_["id"] = filters_list["name"] + filter_["name"]
else :
filter_["id"] = u""
return list_of_dicts
CHOICES_FILTERS_TAGS = add_id_key(CHOICES_FILTERS_TAGS)
# CHOICES_FILTERS_PARTNERS = add_id_key(CHOICES_FILTERS_PARTNERS)
CHOICES_FILTERS_GEOLOC = add_id_key(CHOICES_FILTERS_GEOLOC)
normalization_as_json( "CHOICES_FILTERS_TAGS", CHOICES_FILTERS_TAGS )
normalization_as_json( "CHOICES_FILTERS_PARTNERS", CHOICES_FILTERS_PARTNERS )
normalization_as_json( "CHOICES_FILTERS_GEOLOC", CHOICES_FILTERS_GEOLOC )
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### NORMALIZATION / VARS
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
CATEGORIES_CIS_DICT = {
"pubics_" : {
u"handicap" : [ "HAN" ],
u"youth" : [ "JEU" ],
u"senior" : [ "SEN" ],
},
"domains_" : {
u"life_env" : [ "NUM","LOI","SAN","CUL","AME","URB","HAB" ],
u"sust_env" : [ "BIO","EAU","CLI","DEC","ENE","AGR" ],
u"eco_dev" : [ "ECO","FIN","COM","INS","SER","EMP" ],
u"habitat" : [ "URB","HAB","MOB","AME","ENE" ],
u"inclusion" : [ "HAB","INS","DRO","CIT","PAR","EDU","MED","MOB","SAN","EMP" ],
u"social_net" : [ "DRO","CIT","COH","CUL","SPO","LOI" ],
u"health_sport" : [ "SPO","SAN","AGR","HAN","SEN","JEU" ],
u"employment" : [ "INS","EMP","FOR","EDU" ],
},
"methods_" : {
u"cooperate" : [ "COO" ] ,
u"evaluate" : [ "EVA" ] ,
u"govern" : [ "GOU" ] ,
u"philantropy" : [ "MEC" ] ,
u"mediate" : [ "MED" ] ,
u"participate" : [ "PAR" ] ,
u"research" : [ "REC" ] ,
}
}
normalization_as_json( "CATEGORIES_CIS_DICT", CATEGORIES_CIS_DICT )
CATEGORIES_CIS_DICT_FLAT = {}
for categ, metatags in CATEGORIES_CIS_DICT.iteritems() :
for k,v in metatags.iteritems() :
CATEGORIES_CIS_DICT_FLAT[ categ + k ] = v
normalization_as_json( "CATEGORIES_CIS_DICT_FLAT", CATEGORIES_CIS_DICT_FLAT )
NOMENCLATURE_CIS_DICT = {
"AGR":{"fullname":u"Agriculture","description":u"alimentation, foret"},
"AME":{"fullname":u"Aménagement","description":u"espaces publics, gestion de l'espace"},
"AUT":{"fullname":u"Autres","description":u"société civile, Europe, observation, équipements publics"},
"BIO":{"fullname":u"Biodiversité","description":u"environnement, espaces verts"},
"CIT":{"fullname":u"Citoyenneté","description":u"citoyenneté, engagement, mobilisation citoyenne"},
"CLI":{"fullname":u"Climat","description":u"risques naturels"},
"COH":{"fullname":u"energie","description":u"solidarités, lien social, vivre ensemble, autonomie, isolement"},
"COM":{"fullname":u"Communication","description":u""},
"COP":{"fullname":u"Achats","description":u"commande publique, achats durables"},
"COR":{"fullname":u"Commerce","description":u"commerce équitable"},
"CUL":{"fullname":u"Culture","description":u"arts, patrimoine"},
"DEC":{"fullname":u"Déchets","description":u"recyclage, économie circulaire"},
"EAU":{"fullname":u"Eau","description":u""},
"ECO":{"fullname":u"Economie","description":u"développement économique, ESS, entreprenariat, monnaie locale, assurance, auto-production "},
"EDU":{"fullname":u"Education","description":u"orientation, mobilité internationale, enseignement supérieur, éducation populaire"},
"EMP":{"fullname":u"Emploi","description":u"groupement employeurs"},
"ENE":{"fullname":u"Energie","description":u""},
"EVA":{"fullname":u"Evaluation","description":u""},
"FIN":{"fullname":u"Finances","description":u"finance solidaire, micro crédit, financements européens"},
"FOR":{"fullname":u"Formation","description":u"apprentissage, enseignement"},
"GOU":{"fullname":u"Gouvernance","description":u"organisation, élus, RH, RSE"},
"HAB":{"fullname":u"Habitat","description":u"logement, éco-construction, travaux publics, rénovation"},
"HAN":{"fullname":u"Handicap","description":u""},
"INS":{"fullname":u"Insertion","description":u"insertion professionnelle"},
"JEU":{"fullname":u"Jeunesse","description":u"enfance"},
"DRO":{"fullname":u"Droits","description":u"justice, accès aux droits, lutte contre discriminations, harcèlement, sécurité, protection, égalité homme femmes, non recours"},
"LOI":{"fullname":u"Loisirs","description":u"vacances"},
"MED":{"fullname":u"Médiation","description":u""},
"MOB":{"fullname":u"Mobilité","description":u"permis de conduire, transports"},
"NUM":{"fullname":u"Numérique","description":u""},
"PAR":{"fullname":u"Participation","description":u"démarches participatives, mobilisation locale, participation usagers"},
"REC":{"fullname":u"Recherche ","description":u""},
"SAN":{"fullname":u"Santé","description":u""},
"SEN":{"fullname":u"Seniors","description":u"intergénérationnel"},
"SER":{"fullname":u"Services","description":u"services à la population, accueil de jour, accompagnement, animation"},
"SPO":{"fullname":u"Sports","description":u""},
"TOU":{"fullname":u"Tourisme","description":u""},
"URB":{"fullname":u"Urbanisme","description":u""},
"COO":{"fullname":u"Coopération","description":u"coopération, partenariats, réseaux"},
"MEC":{"fullname":u"Mécénat","description":u"mécénat, mécénat d'entreprises"}
}
normalization_as_json( "NOMENCLATURE_CIS_DICT", NOMENCLATURE_CIS_DICT )
NOMENCLATURE_CIS_LIST = [ {"code" : k , "fullname" : v["fullname"], "description" : v["description"] } for k,v in NOMENCLATURE_CIS_DICT.iteritems() ]
### TEMPORARY SOLUTION <-- from https://docs.google.com/spreadsheets/d/1c5m5sxza2ehtBPB7mCc8GWKG21pxTVPSuIy0xQ6mYSo/edit#gid=694276620
NORMALIZATION_TAGS_SOURCES_CIS = [
{"name":u"Energie","codes":["ENE"],"from":u"ADEME innovation sociale"},
{"name":u"Alimentation","codes":["AGR"],"from":u"ADEME innovation sociale"},
{"name":u"Partage","codes":["AUT"],"from":u"ADEME innovation sociale"},
{"name":u"Autonomie","codes":["SEN","HAN","COH"],"from":u"ODAS - APRILES"},
{"name":u"Insertion","codes":["INS","FOR","COH"],"from":u"ODAS - APRILES"},
{"name":u"Enfance / Famille","codes":["JEU","EDU","COH"],"from":u"ODAS - APRILES"},
{"name":u"Vie des territoires","codes":["COO","PAR","COH"],"from":u"ODAS - APRILES"},
{"name":u"Santé","codes":["SAN","SER","COH"],"from":u"ODAS - APRILES"},
{"name":u"identité","codes":["AUT"],"from":u"Atlaas - villes internet"},
{"name":u"vie associative","codes":["AUT"],"from":u"Atlaas - villes internet"},
{"name":u"services publics","codes":["AUT"],"from":u"Atlaas - villes internet"},
{"name":u"démocratie locale","codes":["GOU","PAR","COO"],"from":u"Atlaas - villes internet"},
{"name":u"vie quotidienne","codes":["AUT"],"from":u"Atlaas - villes internet"},
{"name":u"cohésion sociale","codes":["COH"],"from":u"Atlaas - villes internet"},
{"name":u"solidarités","codes":["COH"],"from":u"Atlaas - villes internet"},
{"name":u"territoire durable","codes":["GOU","AME"],"from":u"Atlaas - villes internet"},
{"name":u"éducation","codes":["EDU"],"from":u"Atlaas - villes internet"},
{"name":u"médiation","codes":["MED"],"from":u"Atlaas - villes internet"},
{"name":u"coopération","codes":["GOU","COO"],"from":u"Atlaas - villes internet"},
{"name":u"culture et patrimoine","codes":["CUL"],"from":u"Atlaas - villes internet"},
{"name":u"emploi","codes":["EMP"],"from":u"Atlaas - villes internet"},
{"name":u"innovation","codes":["AUT"],"from":u"Atlaas - villes internet"},
{"name":u"développement économique ","codes":["ECO"],"from":u"Atlaas - villes internet"},
{"name":u"attractivité","codes":["ECO"],"from":u"Atlaas - villes internet"},
{"name":u"Alimentation","codes":["AGR"],"from":u"Avise"},
{"name":u"Commerce équitable","codes":["COR"],"from":u"Avise"},
{"name":u"Développement durable","codes":["BIO","ECO","COH"],"from":u"Avise"},
{"name":u"Économie circulaire","codes":["ECO"],"from":u"Avise"},
{"name":u"Égalité femmes hommes","codes":["DRO"],"from":u"Avise"},
{"name":u"Emploi","codes":["EMP"],"from":u"Avise"},
{"name":u"Entrepreneuriat social","codes":["ECO"],"from":u"Avise"},
{"name":u"Environnement","codes":["BIO"],"from":u"Avise"},
{"name":u"Evaluation de l'impact social","codes":["EVA"],"from":u"Avise"},
{"name":u"Finance solidaire","codes":["FIN"],"from":u"Avise"},
{"name":u"Groupement d'employeurs","codes":["EMP"],"from":u"Avise"},
{"name":u"Habitat","codes":["HAB"],"from":u"Avise"},
{"name":u"Handicap","codes":["HAN"],"from":u"Avise"},
{"name":u"Innovation sociale","codes":["GOU","AUT"],"from":u"Avise"},
{"name":u"Insertion par l’activité économique","codes":["INS"],"from":u"Avise"},
{"name":u"Jeunesse","codes":["JEU"],"from":u"Avise"},
{"name":u"Lien social","codes":["COH"],"from":u"Avise"},
{"name":u"Mécénat","codes":["MEC"],"from":u"Avise"},
{"name":u"Mobilisation citoyenne","codes":["CIT"],"from":u"Avise"},
{"name":u"Mobilité","codes":["MOB"],"from":u"Avise"},
{"name":u"Numérique","codes":["NUM"],"from":u"Avise"},
{"name":u"Partenariats associations entreprises","codes":["GOU","COO"],"from":u"Avise"},
{"name":u"Quartiers prioritaires","codes":["COH"],"from":u"Avise"},
{"name":u"Silver économie","codes":["SEN"],"from":u"Avise"},
{"name":u"Accompagnement vers l’emploi coaching tri textile","codes":["EMP"],"from":u"Avise"},
{"name":u"Action sanitaire et sociale","codes":["SAN","COH"],"from":u"Avise"},
{"name":u"Agriculture","codes":["AGR"],"from":u"Avise"},
{"name":u"Agriculture biologique","codes":["AGR"],"from":u"Avise"},
{"name":u"Agriculture tourisme et aménagement du territoire","codes":["AGR","TOU","AME"],"from":u"Avise"},
{"name":u"Agro-alimentaire","codes":["AGR"],"from":u"Avise"},
{"name":u"Agroécologie et permaculture","codes":["AGR"],"from":u"Avise"},
{"name":u"Aide alimentaire","codes":["SAN"],"from":u"Avise"},
{"name":u"Alimentation","codes":["AGR"],"from":u"Avise"},
{"name":u"Alimentation solidaire","codes":["AGR","COH"],"from":u"Avise"},
{"name":u"Animation urbanisme expérimentation","codes":["AME","AUT"],"from":u"Avise"},
{"name":u"Archivage papier et numérique GED","codes":["NUM"],"from":u"Avise"},
{"name":u"Boulangerie","codes":["AGR"],"from":u"Avise"},
{"name":u"Boulangerie artisanale","codes":["AGR","COM"],"from":u"Avise"},
{"name":u"Café associatif","codes":["COM"],"from":u"Avise"},
{"name":u"Collecte tri réparation vente d’occasion","codes":["DEC","ECO"],"from":u"Avise"},
{"name":u"Commerce multiservice","codes":["COM","ECO"],"from":u"Avise"},
{"name":u"Commerce équitable (distribution de produits)","codes":["COM","COH"],"from":u"Avise"},
{"name":u"Commercialisation de produits agricoles en circuits courts","codes":["COM"],"from":u"Avise"},
{"name":u"Conciergerie","codes":["SER"],"from":u"Avise"},
{"name":u"Conciergerie d’entreprises","codes":["SER","ECO"],"from":u"Avise"},
{"name":u"Culture","codes":["CUL"],"from":u"Avise"},
{"name":u"Distribution de produits biologiques et écologiques","codes":["ECO","BIO"],"from":u"Avise"},
{"name":u"Eco-construction","codes":["HAB","CLI"],"from":u"Avise"},
{"name":u"Education au développement durable","codes":["EDU","CLI","BIO"],"from":u"Avise"},
{"name":u"Emploi insertion","codes":["EMP","INS"],"from":u"Avise"},
{"name":u"Environnement et déchets","codes":["BIO","DEC"],"from":u"Avise"},
{"name":u"Enérgie","codes":["ENE"],"from":u"Avise"},
{"name":u"Filière brassicole (agriculture)","codes":["AGR"],"from":u"Avise"},
{"name":u"Finance","codes":["FIN"],"from":u"Avise"},
{"name":u"Gestion d'équipement informatique","codes":["NUM"],"from":u"Avise"},
{"name":u"Gestion des déchets","codes":["DEC"],"from":u"Avise"},
{"name":u"Habitat (bailleur social)","codes":["HAB"],"from":u"Avise"},
{"name":u"Habitat social","codes":["HAB","COH"],"from":u"Avise"},
{"name":u"Informatique","codes":["NUM"],"from":u"Avise"},
{"name":u"Insertion","codes":["INS"],"from":u"Avise"},
{"name":u"Insertion par l’activité économique recyclage et vente de vêtements","codes":["INS","ECO"],"from":u"Avise"},
{"name":u"Insertion sociale","codes":["INS"],"from":u"Avise"},
{"name":u"Logement","codes":["HAB"],"from":u"Avise"},
{"name":u"Lutte contre le gaspillage alimentaire","codes":["AGR","DEC"],"from":u"Avise"},
{"name":u"Maraîchage scierie éco-construction","codes":["AGR","HAB"],"from":u"Avise"},
{"name":u"Microfinance","codes":["FIN"],"from":u"Avise"},
{"name":u"Mobilité","codes":["MOB"],"from":u"Avise"},
{"name":u"Mécénat de compétences","codes":["MEC"],"from":u"Avise"},
{"name":u"Médiation sociale","codes":["COH"],"from":u"Avise"},
{"name":u"NTIC personnes âgées","codes":["NUM","SEN"],"from":u"Avise"},
{"name":u"Numérique","codes":["NUM"],"from":u"Avise"},
{"name":u"Numérique et inclusion sociale","codes":["NUM","COH"],"from":u"Avise"},
{"name":u"Numérique et multimédia","codes":["NUM"],"from":u"Avise"},
{"name":u"PA/PH","codes":["SER"],"from":u"Avise"},
{"name":u"Paysagisme","codes":["AME"],"from":u"Avise"},
{"name":u"Petite enfance","codes":["JEU"],"from":u"Avise"},
{"name":u"Petite enfance jeunesse","codes":["JEU"],"from":u"Avise"},
{"name":u"Promotion des arts des techniques et des professions liés à la Production et à l’utilisation de l’ocre et des matériaux de la couleur.","codes":["CUL"],"from":u"Avise"},
{"name":u"Recyclage","codes":["DEC"],"from":u"Avise"},
{"name":u"Recyclage des encombrants","codes":["DEC"],"from":u"Avise"},
{"name":u"Restauration","codes":["AGR"],"from":u"Avise"},
{"name":u"Robotique et santé","codes":["SAN","NUM"],"from":u"Avise"},
{"name":u"Réinsertion par la culture et le tourisme","codes":["CUL"],"from":u"Avise"},
{"name":u"Rénovation et location de logements sociaux","codes":["HAB","COH"],"from":u"Avise"},
{"name":u"Réparation et recyclage de vélos","codes":["DEC"],"from":u"Avise"},
{"name":u"Santé","codes":["SAN"],"from":u"Avise"},
{"name":u"Santé et action sociale","codes":["SAN","COH"],"from":u"Avise"},
{"name":u"Sciences citoyennes et technologies participatives","codes":["CIT","PAR","REC"],"from":u"Avise"},
{"name":u"Second-œuvre bâtiment éco-construction","codes":["HAB","CLI"],"from":u"Avise"},
{"name":u"Sensibilisation expérimentation et animation socio-culturelle","codes":["PAR"],"from":u"Avise"},
{"name":u"Services à la personne","codes":["SER"],"from":u"Avise"},
{"name":u"Silver économie","codes":["SEN","ECO"],"from":u"Avise"},
{"name":u"Solidarité","codes":["COH"],"from":u"Avise"},
{"name":u"Technologie innovante revalorisation de produits invendables ou invendus","codes":["DEC","NUM"],"from":u"Avise"},
{"name":u"Technologies de l’information et de la communication","codes":["NUM"],"from":u"Avise"},
{"name":u"Transport","codes":["MOB"],"from":u"Avise"},
{"name":u"Transport intra-urbain de marchandises insertion professionnelle","codes":["MOB"],"from":u"Avise"},
{"name":u"Upcycling","codes":["EDU","AUT"],"from":u"Avise"},
{"name":u"Vie étudiante","codes":["JEU","EDU"],"from":u"Avise"},
{"name":u"coworking","codes":["EMP"],"from":u"Avise"},
{"name":u"Écoconception","codes":["ECO","BIO"],"from":u"Avise"},
{"name":u"Politiques publiques","codes":["CIT","GOU"],"from":u"Bretagne Créative"},
{"name":u"Professionnalisation / formation","codes":["AUT"],"from":u"Bretagne Créative"},
{"name":u"PTCE","codes":["CUL"],"from":u"Bretagne Créative"},
{"name":u"Quartiers prioritaires","codes":["AUT"],"from":u"Bretagne Créative"},
{"name":u"Reprise d'entreprise","codes":["BIO"],"from":u"Bretagne Créative"},
{"name":u"Ressources humaines","codes":["GOU"],"from":u"Bretagne Créative"},
{"name":u"RSE","codes":["GOU"],"from":u"Bretagne Créative"},
{"name":u"Silver économie","codes":["SAN","SEN"],"from":u"Bretagne Créative"},
{"name":u"Social","codes":["COH"],"from":u"Bretagne Créative"},
{"name":u"Solidarité","codes":["COH"],"from":u"Bretagne Créative"},
{"name":u"Organisation sociale des territoires et des villes","codes":["GOU"],"from":u"Citego"},
{"name":u"Capital humain des territoires","codes":["CUL","AUT"],"from":u"Citego"},
{"name":u"Inégalités sociales et territoire","codes":["COH","DRO"],"from":u"Citego"},
{"name":u"conflits sociaux et luttes sociales dans un territoire","codes":["COH"],"from":u"Citego"},
{"name":u"Modes de vie","codes":["CIT","AUT"],"from":u"Citego"},
{"name":u"Identité territoriale","codes":["AUT"],"from":u"Citego"},
{"name":u"Structure économique des territoires","codes":["ECO"],"from":u"Citego"},
{"name":u"Structures commerciales","codes":["COM"],"from":u"Citego"},
{"name":u"Structures foncières, propriété foncière et marchés fonciers","codes":["ECO"],"from":u"Citego"},
{"name":u"Emploi et marché de l'emploi","codes":["EMP"],"from":u"Citego"},
{"name":u"Agriculture et production agricole","codes":["AGR"],"from":u"Citego"},
{"name":u"Actvités économiques","codes":["ECO"],"from":u"Citego"},
{"name":u"Marché foncier","codes":["ECO"],"from":u"Citego"},
{"name":u"Écosystème des territoires","codes":["GOU"],"from":u"Citego"},
{"name":u"Empreinte écologique des territoires","codes":["BIO","EAU","DEC"],"from":u"Citego"},
{"name":u"ressources naturelles, capital naturel et territoire","codes":["BIO","EAU","DEC"],"from":u"Citego"},
{"name":u"air et territoire","codes":["BIO"],"from":u"Citego"},
{"name":u"Sols et territoire","codes":["BIO"],"from":u"Citego"},
{"name":u"Biodiversité et territoire","codes":["BIO"],"from":u"Citego"},
{"name":u"Climat et territoire","codes":["CLI"],"from":u"Citego"},
{"name":u"Eau et territoire","codes":["BIO"],"from":u"Citego"},
{"name":u"Patrimoine naturel","codes":["BIO"],"from":u"Citego"},
{"name":u"Équipements collectifs","codes":["AUT"],"from":u"Citego"},
{"name":u"Habitat et marché du logement","codes":["HAB"],"from":u"Citego"},
{"name":u"Réseaux urbains et infrastructures","codes":["URB"],"from":u"Citego"},
{"name":u"Culture, diversité culturelle et territoires","codes":["CUL"],"from":u"Citego"},
{"name":u"capital immatériel des territoires","codes":["CUL"],"from":u"Citego"},
{"name":u"capacités d'adaptation, résilience","codes":["GOU"],"from":u"Citego"},
{"name":u"patrimoine historique des territoires","codes":["CUL"],"from":u"Citego"},
{"name":u"patrimoine culturel","codes":["CUL"],"from":u"Citego"},
{"name":u"Patrimoine bâti","codes":["HAB","CUL"],"from":u"Citego"},
{"name":u"Espace public","codes":["URB"],"from":u"Citego"},
{"name":u"paysages","codes":["BIO"],"from":u"Citego"},
{"name":u"Formes urbaines","codes":["URB"],"from":u"Citego"},
{"name":u"Centralité","codes":["AUT"],"from":u"Citego"},
{"name":u"Flux et réseaux de transports des personnes (déplacements internes)","codes":["MOB"],"from":u"Citego"},
{"name":u"flux d'argent (circulation interne )","codes":["ECO"],"from":u"Citego"},
{"name":u"Flux de matière (circulation interne et échanges avec l'extérieur) et réseaux de transports","codes":["MOB"],"from":u"Citego"},
{"name":u"flux d’information et réseaux de télécommunication (circulation interne et échanges avec l’extérieur)","codes":["COM"],"from":u"Citego"},
{"name":u"flux d’énergie et territoires","codes":["ENE"],"from":u"Citego"},
{"name":u"Systèmes démocratiques","codes":["GOU"],"from":u"Citego"},
{"name":u"Systèmes politiques","codes":["GOU"],"from":u"Citego"},
{"name":u"Territoires dans l'économie mondiale, territoires et filières mondiales de production","codes":["AUT"],"from":u"Citego"},
{"name":u"Relations entre territoires: interdépendance, dépendance, concurrence","codes":["COO"],"from":u"Citego"},
{"name":u"Atouts économiques des territoires","codes":["ECO"],"from":u"Citego"},
{"name":u"flux de personnes (échanges avec l'extérieur)","codes":["MOB"],"from":u"Citego"},
{"name":u"Flux d'argent (échanges avec l'extérieur)","codes":["ECO"],"from":u"Citego"},
{"name":u"flux de matières (échanges avec l’extérieur)","codes":["ECO"],"from":u"Citego"},
{"name":u"flux d'information (échanges avec l'extérieur)","codes":["COM"],"from":u"Citego"},
{"name":u"Types de territoires selon leurs caractéristiques géographiques, naturelles et morphologiques","codes":["AUT"],"from":u"Citego"},
{"name":u"Types de villes","codes":["AUT"],"from":u"Citego"},
{"name":u"Échelles territoriales","codes":["AUT"],"from":u"Citego"},
{"name":u"Dynamiques de la société","codes":["AUT"],"from":u"Citego"},
{"name":u"Transition vers des sociétés durables","codes":["AUT"],"from":u"Citego"},
{"name":u"Dynamiques de l'économie","codes":["ECO"],"from":u"Citego"},
{"name":u"Dynamiques de l'écosystème","codes":["AUT"],"from":u"Citego"},
{"name":u"Dynamiques culturelles (cohabitation des cultures, héritage culturel, évolutions culturelles)","codes":["CUL"],"from":u"Citego"},
{"name":u"Dynamique des idées et des systèmes de pensée","codes":["AUT"],"from":u"Citego"},
{"name":u"Dynamiques spatiales","codes":["AUT"],"from":u"Citego"},
{"name":u"Dynamiques de développement et d'évolution des territoires","codes":["AUT"],"from":u"Citego"},
{"name":u"Dynamiques des systèmes techniques","codes":["AUT"],"from":u"Citego"},
{"name":u"Dynamiques de la gouvernance","codes":["GOU"],"from":u"Citego"},
{"name":u"Mouvements migratoires, flux migratoires et territoires","codes":["MOB"],"from":u"Citego"},
{"name":u"Acteurs de la société","codes":["AUT"],"from":u"Citego"},
{"name":u"Acteurs de l'économie","codes":["ECO"],"from":u"Citego"},
{"name":u"Acteurs de la gestion des écosystèmes territoriaux","codes":["AUT"],"from":u"Citego"},
{"name":u"Acteurs de la gestion des infrastructures et des équipements","codes":["AUT"],"from":u"Citego"},
{"name":u"Acteurs de la culture, de la connaissance et de l'information","codes":["CUL","COM"],"from":u"Citego"},
{"name":u"Acteurs de l'organisation de l'espace, de l'aménagement","codes":["URB"],"from":u"Citego"},
{"name":u"Acteurs de la gestion des flux","codes":["AUT"],"from":u"Citego"},
{"name":u"Acteurs de la gouvernance","codes":["GOU"],"from":u"Citego"},
{"name":u"Acteurs de la mondialisation","codes":["AUT"],"from":u"Citego"},
{"name":u"Société","codes":["AUT"],"from":u"Citego"},
{"name":u"Développement économique","codes":["ECO"],"from":u"Citego"},
{"name":u"Pensée et modèles de développement","codes":["ECO"],"from":u"Citego"},
{"name":u"Gestion des écosystèmes territoriaux","codes":["GOU"],"from":u"Citego"},
{"name":u"Gestion des ressources naturelles","codes":["BIO","EAU","DEC"],"from":u"Citego"},
{"name":u"Recherche et production de connaissances","codes":["REC"],"from":u"Citego"},
{"name":u"Arts et culture","codes":["CUL"],"from":u"Citego"},
{"name":u"Éducation et diffusion de connaissances","codes":["EDU","FOR"],"from":u"Citego"},
{"name":u"Aménagement du territoire","codes":["URB"],"from":u"Citego"},
{"name":u"Gestion des flux","codes":["AUT"],"from":u"Citego"},
{"name":u"Relations du territoire avec l'extérieur","codes":["GOU","COO"],"from":u"Citego"},
{"name":u"Organisation politique et institutionnelle","codes":["GOU"],"from":u"Citego"},
{"name":u"Élaboration et mise en œuvre des droits et des règles","codes":["DRO","AUT"],"from":u"Citego"},
{"name":u"Organisation et délivrance de services d'intérêt général","codes":["AUT"],"from":u"Citego"},
{"name":u"Fiscalité et financement","codes":["FIN"],"from":u"Citego"},
{"name":u"Coordination des acteurs","codes":["GOU","COO"],"from":u"Citego"},
{"name":u"Établissement de préférences collectives","codes":["AUT"],"from":u"Citego"},
{"name":u"Outils de l’action","codes":["AUT"],"from":u"Citego"},
{"name":u"Démocratie et citoyenneté","codes":["CIT"],"from":u"Citego"},
{"name":u"Légitimité de la gouvernance","codes":["GOU"],"from":u"Citego"},
{"name":u"Pertinence des dispositifs de gouvernance","codes":["GOU"],"from":u"Citego"},
{"name":u"Co-production du bien public","codes":["GOU"],"from":u"Citego"},
{"name":u"Articulation des échelles de gouvernance","codes":["GOU","COO"],"from":u"Citego"},
{"name":u"Oasis de vie / Habitat participatif","codes":["HAB"],"from":u"Colibris / carte des oasis"},
{"name":u"Oasis ressources","codes":["AUT"],"from":u"Colibris / carte des oasis"},
{"name":u"Autre","codes":["HAB"],"from":u"Colibris / carte des oasis"},
{"name":u"Emploi & Citoyenneté","codes":["EMP","CIT"],"from":u"COORACE - Nos territoires ont de l'avenir"},
{"name":u"Mobilisation locale & Coopérations innovantes ","codes":["GOU","PAR"],"from":u"COORACE - Nos territoires ont de l'avenir"},
{"name":u"Environnement & Nouvelle économie","codes":["ECO","BIO"],"from":u"COORACE - Nos territoires ont de l'avenir"},
{"name":u"Alimentation durable","codes":["AGR","DEC"],"from":u"Fondation Daniel et Nina Carasso"},
{"name":u"Art citoyen","codes":["CUL","CIT"],"from":u"Fondation Daniel et Nina Carasso"},
{"name":u"Alternance","codes":["FOR","EMP","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"ASE - PJJ - Justice ","codes":["DRO","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Culture","codes":["CUL","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Décrochage scolaire","codes":["EDU","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Insertion professionnelle","codes":["INS","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Logement","codes":["HAB","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Lutte contre les discriminations ","codes":["DRO","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Mobilité internationale ","codes":["EDU","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Orientation","codes":["EDU","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Permis de conduire","codes":["MOB","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Santé ","codes":["SAN","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Engagement des jeunes","codes":["CIT","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Décrochage universitaire ","codes":["EDU","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Livret de compétences ","codes":["EDU","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Réussite scolaire ","codes":["EDU","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Revenu contractualisé d’autonomie","codes":["EMP","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Education populaire","codes":["EDU","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Lutte contre le harcèlement à l'école","codes":["DRO","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Numérique","codes":["NUM","JEU"],"from":u"Fonds d'expérimentation pour la jeunesse"},
{"name":u"Espace agricole","codes":["AGR"],"from":u"Fondation RTE"},
{"name":u"Employabilité","codes":["EMP","INS"],"from":u"Fondation RTE"},
{"name":u"Patrimoine rural","codes":["CUL"],"from":u"Fondation RTE"},
{"name":u"Échanges ville & campagne","codes":["GOU","COO"],"from":u"Fondation RTE"},
{"name":u"Habitat","codes":["HAB"],"from":u"Fondation RTE"},
{"name":u"Égalité territoriale","codes":["AUT"],"from":u"Fondation RTE"},
{"name":u"Urgence & Développement","codes":["COH"],"from":u"Fondation Veolia"},
{"name":u"Emploi & Lien social","codes":["EMP","COH"],"from":u"Fondation Veolia"},
{"name":u"Environnement & Biodiversité","codes":["BIO"],"from":u"Fondation Veolia"},
{"name":u"Mécénat de compétences","codes":["MEC"],"from":u"Fondation Veolia"},
{"name":u"Accès à l'emploi","codes":["EMP"],"from":u"Fondation Vinci pour la cité"},
{"name":u"Mobilité solidaire","codes":["MOB"],"from":u"Fondation Vinci pour la cité"},
{"name":u"Quartiers prioritaires et lien social","codes":["COH"],"from":u"Fondation Vinci pour la cité"},
{"name":u"Insertion par le logement","codes":["HAB"],"from":u"Fondation Vinci pour la cité"},
{"name":u"Patrimoine & culture","codes":["CUL"],"from":u"FNCE"},
{"name":u"Patrimoine & culture","codes":["CUL"],"from":u"FNCE"},
{"name":u"Solidarité","codes":["COH","INS","HAN","EDU"],"from":u"FNCE"},
{"name":u"Solidarité","codes":["COH","INS","HAN","EDU"],"from":u"FNCE"},
{"name":u"Solidarité","codes":["COH","INS","HAN"],"from":u"FNCE"},
{"name":u"Solidarité","codes":["COH","INS","HAN"],"from":u"FNCE"},
{"name":u"Solidarité","codes":["COH","INS","HAN","EDU","HAB","AGR"],"from":u"FNCE"},
{"name":u"Education financière","codes":["EDU","FIN"],"from":u"FNCE"},
{"name":u"Soutien à la création d'entreprise","codes":["EMP"],"from":u"FNCE"},
{"name":u"Autres","codes":["AUT","SPO"],"from":u"FNCE"},
{"name":u"Autres","codes":["AUT","EDU","REC"],"from":u"FNCE"},
{"name":u"Autres","codes":["AUT","BIO","EAU","DEC"],"from":u"FNCE"},
{"name":u"Agriculture ","codes":["AGR"],"from":u"Le Labo de l'ESS"},
{"name":u"Alimentation","codes":["AGR"],"from":u"Le Labo de l'ESS"},
{"name":u"Circuits courts","codes":["ECO"],"from":u"Le Labo de l'ESS"},
{"name":u"Commerce équitable","codes":["COR"],"from":u"Le Labo de l'ESS"},
{"name":u"Communs","codes":["GOU","COO"],"from":u"Le Labo de l'ESS"},
{"name":u"Culture","codes":["CUL"],"from":u"Le Labo de l'ESS"},
{"name":u"Démocratie","codes":["GOU","PAR"],"from":u"Le Labo de l'ESS"},
{"name":u"Développement durable","codes":["BIO","ECO","COH"],"from":u"Le Labo de l'ESS"},
{"name":u"Eco-construction","codes":["HAB"],"from":u"Le Labo de l'ESS"},
{"name":u"Economie circulaire","codes":["ECO","DEC"],"from":u"Le Labo de l'ESS"},
{"name":u"Economie collaborative","codes":["ECO"],"from":u"Le Labo de l'ESS"},
{"name":u"Education","codes":["EDU"],"from":u"Le Labo de l'ESS"},
{"name":u"Emploi ","codes":["EMP"],"from":u"Le Labo de l'ESS"},
{"name":u"Europe","codes":["AUT"],"from":u"Le Labo de l'ESS"},
{"name":u"Evaluation","codes":["EVA"],"from":u"Le Labo de l'ESS"},
{"name":u"Finance solidaire","codes":["FIN"],"from":u"Le Labo de l'ESS"},
{"name":u"Financement","codes":["FIN"],"from":u"Le Labo de l'ESS"},
{"name":u"Insertion","codes":["INS"],"from":u"Le Labo de l'ESS"},
{"name":u"Jeunes","codes":["JEU"],"from":u"Le Labo de l'ESS"},
{"name":u"Logement","codes":["HAB"],"from":u"Le Labo de l'ESS"},
{"name":u"Mécénat","codes":["MEC"],"from":u"Le Labo de l'ESS"},
{"name":u"Mobilité","codes":["MOB"],"from":u"Le Labo de l'ESS"},
{"name":u"Monnaie locale","codes":["ECO"],"from":u"Le Labo de l'ESS"},
{"name":u"Numérique","codes":["NUM"],"from":u"Le Labo de l'ESS"},
{"name":u"PTCE","codes":["GOU","ECO"],"from":u"Le Labo de l'ESS"},
{"name":u"Responsabilité sociale des entreprises (RSE)","codes":["GOU"],"from":u"Le Labo de l'ESS"},
{"name":u"Rural","codes":["AUT"],"from":u"Le Labo de l'ESS"},
{"name":u"Santé","codes":["SAN"],"from":u"Le Labo de l'ESS"},
{"name":u"Seniors","codes":["SEN"],"from":u"Le Labo de l'ESS"},
{"name":u"Services à la personne","codes":["SER"],"from":u"Le Labo de l'ESS"},
{"name":u"Solidarité","codes":["COH"],"from":u"Le Labo de l'ESS"},
{"name":u"Tiers-lieux","codes":["HAB","AUT"],"from":u"Le Labo de l'ESS"},
{"name":u"Tourisme","codes":["TOU"],"from":u"Le Labo de l'ESS"},
{"name":u"Transition énergétique","codes":["ENE"],"from":u"Le Labo de l'ESS"},
{"name":u"Changement climatique, gestion et prévention des risques","codes":["CLI"],"from":u"MOT"},
{"name":u"Culture","codes":["CUL"],"from":u"MOT"},
{"name":u"Culture et société civile","codes":["CUL"],"from":u"MOT"},
{"name":u"Développement économique transfrontalier","codes":["ECO"],"from":u"MOT"},
{"name":u"Education, formation, langues","codes":["EDU","FOR"],"from":u"MOT"},
{"name":u"Emploi","codes":["EMP"],"from":u"MOT"},
{"name":u"Energies","codes":["ENE"],"from":u"MOT"},
{"name":u"Environnement, ressources, déchets","codes":["BIO","DEC"],"from":u"MOT"},
{"name":u"Inclusion sociale","codes":["COH"],"from":u"MOT"},
{"name":u"Recherche et innovation","codes":["REC"],"from":u"MOT"},
{"name":u"Santé","codes":["SAN"],"from":u"MOT"},
{"name":u"Sécurité, police","codes":["DRO"],"from":u"MOT"},
{"name":u"Société civile","codes":["AUT"],"from":u"MOT"},
{"name":u"Sport","codes":["SPO"],"from":u"MOT"},
{"name":u"TIC, télécommunications et services postaux","codes":["COM","NUM"],"from":u"MOT"},
{"name":u"Tourisme","codes":["TOU"],"from":u"MOT"},
{"name":u"Transports","codes":["MOB"],"from":u"MOT"},
{"name":u"Alimentation","codes":["AGR"],"from":u"My Positive Impact (Fondation Nicolas Hulot)"},
{"name":u"Citoyenneté","codes":["CIT"],"from":u"My Positive Impact (Fondation Nicolas Hulot)"},
{"name":u"Déchets","codes":["DEC"],"from":u"My Positive Impact (Fondation Nicolas Hulot)"},
{"name":u"Eau et biodiversité","codes":["BIO","EAU"],"from":u"My Positive Impact (Fondation Nicolas Hulot)"},
{"name":u"Énergie","codes":["ENE"],"from":u"My Positive Impact (Fondation Nicolas Hulot)"},
{"name":u"Habitat et cadre de vie","codes":["HAB"],"from":u"My Positive Impact (Fondation Nicolas Hulot)"},
{"name":u"Mobilité","codes":["MOB"],"from":u"My Positive Impact (Fondation Nicolas Hulot)"},
{"name":u"Agriculture","codes":["AGR"],"from":u"Réseau rural "},
{"name":u"Coopération, recherche et innovation","codes":["GOU","REC","COO"],"from":u"Réseau rural "},
{"name":u"Environnement","codes":["BIO"],"from":u"Réseau rural "},
{"name":u"Economie","codes":["ECO"],"from":u"Réseau rural "},
{"name":u"Alimentation","codes":["AGR"],"from":u"Réseau rural "},
{"name":u"Accueil et services à la population et aux entreprises","codes":["COH","ECO","SER"],"from":u"Réseau rural "},
{"name":u"Forêt","codes":["AGR"],"from":u"Réseau rural "},
{"name":u"Territoire et gestion de l'espace","codes":["AME"],"from":u"Réseau rural "},
{"name":u"alimentation durable","codes":["AGR"],"from":u"RTES"},
{"name":u"communs","codes":["GOU"],"from":u"RTES"},
{"name":u"culture","codes":["CUL"],"from":u"RTES"},
{"name":u"économie circulaire","codes":["ECO","DEC"],"from":u"RTES"},
{"name":u"entrepreneuriat","codes":["ECO"],"from":u"RTES"},
{"name":u"foncier","codes":["AME"],"from":u"RTES"},
{"name":u"habitat","codes":["HAB"],"from":u"RTES"},
{"name":u"immigration","codes":["AUT"],"from":u"RTES"},
{"name":u"lieux partagés","codes":["COO","AUT"],"from":u"RTES"},
{"name":u"logement","codes":["HAB"],"from":u"RTES"},
{"name":u"marchés publics","codes":["FIN"],"from":u"RTES"},
{"name":u"mobilité durable","codes":["MOB"],"from":u"RTES"},
{"name":u"numérique","codes":["NUM"],"from":u"RTES"},
{"name":u"PTCE","codes":["ECO","GOU","COO"],"from":u"RTES"},
{"name":u"rénovation thermique","codes":["HAB","ENE"],"from":u"RTES"},
{"name":u"rural","codes":["AUT"],"from":u"RTES"},
{"name":u"Scic","codes":["ECO","GOU","COO"],"from":u"RTES"},
{"name":u"transition énergétique","codes":["ENE"],"from":u"RTES"},
{"name":u"utilité sociale","codes":["COH"],"from":u"RTES"},
{"name":u"(Biens) communs","codes":["COO"],"from":u"Semeoz"},
{"name":u"Accessibilité","codes":["COH","INS"],"from":u"Semeoz"},
{"name":u"Actions citoyennes","codes":["CIT"],"from":u"Semeoz"},
{"name":u"Agriculture","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Agriculture biologique","codes":["AGR","BIO"],"from":u"Semeoz"},
{"name":u"Agriculture urbaine","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Agroécologie","codes":["AGR","BIO"],"from":u"Semeoz"},
{"name":u"Alimentation","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Alternatives","codes":["AUT"],"from":u"Semeoz"},
{"name":u"AMAP","codes":["AGR","ECO"],"from":u"Semeoz"},
{"name":u"Apprentissages","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Arts / Culture","codes":["CUL"],"from":u"Semeoz"},
{"name":u"Associatif / Humanitaire","codes":["COO"],"from":u"Semeoz"},
{"name":u"Auto-construction","codes":["HAB"],"from":u"Semeoz"},
{"name":u"Auto-gestion","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Autonomie","codes":["COH"],"from":u"Semeoz"},
{"name":u"Banque éthique","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Bien-être / Beauté","codes":["SAN"],"from":u"Semeoz"},
{"name":u"Biodiversité","codes":["BIO"],"from":u"Semeoz"},
{"name":u"Biodynamie","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Blockchain","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Bonheur","codes":["SAN"],"from":u"Semeoz"},
{"name":u"Bonheur au travail","codes":["SAN","EMP"],"from":u"Semeoz"},
{"name":u"Bonheur National Brut","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Budget participatif","codes":["FIN","CIT"],"from":u"Semeoz"},
{"name":u"Cartographie","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Circuits courts","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Climat","codes":["CLI"],"from":u"Semeoz"},
{"name":u"Co-création","codes":["COO"],"from":u"Semeoz"},
{"name":u"Co-production","codes":["COO","ECO"],"from":u"Semeoz"},
{"name":u"Collapsologie","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Commerce","codes":["COM"],"from":u"Semeoz"},
{"name":u"Commerce équitable","codes":["COM"],"from":u"Semeoz"},
{"name":u"CommonsTous2017","codes":["GOU","COO"],"from":u"Semeoz"},
{"name":u"Communaux collaboratifs","codes":["GOU","COO"],"from":u"Semeoz"},
{"name":u"Communication / Presse","codes":["COM"],"from":u"Semeoz"},
{"name":u"Compost","codes":["DEC"],"from":u"Semeoz"},
{"name":u"Consommaction","codes":["CIT","BIO"],"from":u"Semeoz"},
{"name":u"Consommation collaborative","codes":["CIT","COO"],"from":u"Semeoz"},
{"name":u"Consommation responsable","codes":["CIT","COO"],"from":u"Semeoz"},
{"name":u"Conspirateurs positifs","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Construction écologique","codes":["HAB"],"from":u"Semeoz"},
{"name":u"Coopérative","codes":["GOU","COO"],"from":u"Semeoz"},
{"name":u"Copyleft","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Cosmétiques naturels","codes":["SAN"],"from":u"Semeoz"},
{"name":u"Covoiturage","codes":["MOB"],"from":u"Semeoz"},
{"name":u"Coworking","codes":["EMP"],"from":u"Semeoz"},
{"name":u"Créatifs culturels","codes":["CUL"],"from":u"Semeoz"},
{"name":u"Création participative","codes":["CUL"],"from":u"Semeoz"},
{"name":u"Crowdfunding (Financement participatif)","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Crowdlending","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Décroissance","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Démocratie directe","codes":["GOU","PAR"],"from":u"Semeoz"},
{"name":u"Démocratie liquide","codes":["GOU","PAR"],"from":u"Semeoz"},
{"name":u"Démocratie ouverte (OpenGov)","codes":["GOU","PAR"],"from":u"Semeoz"},
{"name":u"Démocratie participative","codes":["GOU","PAR"],"from":u"Semeoz"},
{"name":u"Développement durable","codes":["ECO","COH","BIO"],"from":u"Semeoz"},
{"name":u"Développement rural","codes":["AME"],"from":u"Semeoz"},
{"name":u"Do it yourself","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Don","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Droit ouvert (Open Law)","codes":["DRO"],"from":u"Semeoz"},
{"name":u"Echange","codes":["COO"],"from":u"Semeoz"},
{"name":u"Eco-conception","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Eco-habitat","codes":["HAB"],"from":u"Semeoz"},
{"name":u"Ecole alternative","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Ecologie","codes":["BIO","DEC","CLI"],"from":u"Semeoz"},
{"name":u"Economie circulaire","codes":["ECO","DEC"],"from":u"Semeoz"},
{"name":u"Economie collaborative","codes":["ECO","COO"],"from":u"Semeoz"},
{"name":u"Economie contributive","codes":["ECO","COO"],"from":u"Semeoz"},
{"name":u"Economie de partage","codes":["ECO","COO"],"from":u"Semeoz"},
{"name":u"Economie ouverte","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Economie positive","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Economie responsable","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Economie sociale","codes":["ECO","COH"],"from":u"Semeoz"},
{"name":u"Economie solidaire","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Economie verte","codes":["ECO","BIO"],"from":u"Semeoz"},
{"name":u"Economie / finance","codes":["ECO","FIN"],"from":u"Semeoz"},
{"name":u"Ecosystème","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Ecotourisme","codes":["TOU"],"from":u"Semeoz"},
{"name":u"Ecovillage","codes":["HAB","GOU"],"from":u"Semeoz"},
{"name":u"Ecriture participative","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Education populaire","codes":["EDU","COH"],"from":u"Semeoz"},
{"name":u"Education positive","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Education / parentalisé","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Energie","codes":["ENE"],"from":u"Semeoz"},
{"name":u"Energie solaire","codes":["ENE"],"from":u"Semeoz"},
{"name":u"Energies renouvelables","codes":["ENE"],"from":u"Semeoz"},
{"name":u"Engagement","codes":["CIT"],"from":u"Semeoz"},
{"name":u"Entraide","codes":["COO"],"from":u"Semeoz"},
{"name":u"Entreprenariat","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Entreprenariat social","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Environnement","codes":["BIO"],"from":u"Semeoz"},
{"name":u"Epargne solidaire","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Epicerie vrac","codes":["ECO","DEC"],"from":u"Semeoz"},
{"name":u"Espace collaboratif","codes":["COO"],"from":u"Semeoz"},
{"name":u"Fab Lab","codes":["COO"],"from":u"Semeoz"},
{"name":u"Faircoin","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Féminisme","codes":["DRO"],"from":u"Semeoz"},
{"name":u"Finance équitable","codes":["FIN","COH"],"from":u"Semeoz"},
{"name":u"Finance responsable ","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Finance solidaire","codes":["FIN","COH"],"from":u"Semeoz"},
{"name":u"Fleur biologique","codes":["BIO"],"from":u"Semeoz"},
{"name":u"Freegan","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Gouvernance","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Gouvernance collective","codes":["GOU","PAR"],"from":u"Semeoz"},
{"name":u"Gratuit","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Habitat","codes":["HAB"],"from":u"Semeoz"},
{"name":u"Habitat léger","codes":["HAB"],"from":u"Semeoz"},
{"name":u"Habitat partagé","codes":["HAB","COO"],"from":u"Semeoz"},
{"name":u"Habitat participatif","codes":["HAB","PAR"],"from":u"Semeoz"},
{"name":u"Holacratie","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Informatique durable","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Initiatives","codes":["CIT"],"from":u"Semeoz"},
{"name":u"Innovation sociale","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Insertion","codes":["INS"],"from":u"Semeoz"},
{"name":u"Instruction en famille","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Intérêt général","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Internet libre","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Investissement d'impact","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Investissement d'éthique","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Investissement solidaire","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Jardinage / Bricolage","codes":["AGR","BIO"],"from":u"Semeoz"},
{"name":u"Jardins partagés","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Jeux / Loisirs","codes":["CUL"],"from":u"Semeoz"},
{"name":u"Journalisme constructif","codes":["COM"],"from":u"Semeoz"},
{"name":u"Journalisme participatif","codes":["COM","PAR"],"from":u"Semeoz"},
{"name":u"Juridique","codes":["DRO"],"from":u"Semeoz"},
{"name":u"Légumes bio","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Licences libres","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Lien social","codes":["COH"],"from":u"Semeoz"},
{"name":u"Livre","codes":["CUL"],"from":u"Semeoz"},
{"name":u"Logiciel libre","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Low Tech","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Makers","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Matériel libre","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Mécénat","codes":["MEC"],"from":u"Semeoz"},
{"name":u"Médecine / Santé","codes":["SAN"],"from":u"Semeoz"},
{"name":u"Medialab","codes":["COM"],"from":u"Semeoz"},
{"name":u"Méthode Montessori","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Micro-crédit","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Microdon","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Microfinance","codes":["FIN"],"from":u"Semeoz"},
{"name":u"mobilité","codes":["MOB"],"from":u"Semeoz"},
{"name":u"Mode de vie","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Monnaies alternatives","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Mooc","codes":["EDU","NUM"],"from":u"Semeoz"},
{"name":u"Musique libre","codes":["CUL"],"from":u"Semeoz"},
{"name":u"No Poo","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Non violence","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Objets connectés","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Open data","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Open education","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Open Science","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Open-Access","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Open-source","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Optimisme","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Partage","codes":["COO"],"from":u"Semeoz"},
{"name":u"Pédagogie / Instruction","codes":["EDU"],"from":u"Semeoz"},
{"name":u"Peer Production Licence","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Peer-to-peer (pair à pair)","codes":["COO"],"from":u"Semeoz"},
{"name":u"Pensée / Savoirs","codes":["REC"],"from":u"Semeoz"},
{"name":u"Permaculture","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Produits bio","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Produits locaux","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Recherche collaborative","codes":["REC"],"from":u"Semeoz"},
{"name":u"Récup","codes":["DEC"],"from":u"Semeoz"},
{"name":u"Recyclage","codes":["DEC"],"from":u"Semeoz"},
{"name":u"Repair Café","codes":["DEC"],"from":u"Semeoz"},
{"name":u"Réseaux sociaux","codes":["COM"],"from":u"Semeoz"},
{"name":u"Résilience","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Revenu contributif","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Revenu de base","codes":["FIN"],"from":u"Semeoz"},
{"name":u"Savoir libre","codes":["REC"],"from":u"Semeoz"},
{"name":u"Sciences collaboratives","codes":["REC"],"from":u"Semeoz"},
{"name":u"Sciences libres ","codes":["REC"],"from":u"Semeoz"},
{"name":u"Sciences / Technologies","codes":["REC"],"from":u"Semeoz"},
{"name":u"Self-management","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Sobriété","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Société civile","codes":["CIT"],"from":u"Semeoz"},
{"name":u"Sociocratie","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Solidarité","codes":["COH"],"from":u"Semeoz"},
{"name":u"Solutions","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Supermarché coopératif","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Système d'échange local","codes":["COO"],"from":u"Semeoz"},
{"name":u"Think Tank","codes":["REC"],"from":u"Semeoz"},
{"name":u"Tiers lieux","codes":["HAB"],"from":u"Semeoz"},
{"name":u"Tirage au sort","codes":["GOU"],"from":u"Semeoz"},
{"name":u"Tourisme équitable","codes":["TOU"],"from":u"Semeoz"},
{"name":u"Tourisme solidaire","codes":["TOU"],"from":u"Semeoz"},
{"name":u"Transition","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Transition écologique","codes":["BIO","CLI","ENE"],"from":u"Semeoz"},
{"name":u"Transition économique","codes":["ECO"],"from":u"Semeoz"},
{"name":u"Transition énergétique","codes":["ENE"],"from":u"Semeoz"},
{"name":u"Transition sociale","codes":["COH"],"from":u"Semeoz"},
{"name":u"Transports","codes":["MOB"],"from":u"Semeoz"},
{"name":u"Transports doux","codes":["MOB"],"from":u"Semeoz"},
{"name":u"Travail collaboratif","codes":["EMP","COO"],"from":u"Semeoz"},
{"name":u"Troc","codes":["COO"],"from":u"Semeoz"},
{"name":u"Urbanisme","codes":["AME"],"from":u"Semeoz"},
{"name":u"Urbanisme participatif","codes":["AME","PAR"],"from":u"Semeoz"},
{"name":u"Vacances / Voyages","codes":["CUL"],"from":u"Semeoz"},
{"name":u"Veganisme","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Végétalisme","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Végétarisme","codes":["AGR"],"from":u"Semeoz"},
{"name":u"Vie de quartier","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Vie pratique","codes":["AUT"],"from":u"Semeoz"},
{"name":u"Vie sociale","codes":["COH"],"from":u"Semeoz"},
{"name":u"Web sémantique","codes":["NUM"],"from":u"Semeoz"},
{"name":u"Wiki","codes":["NUM","COO"],"from":u"Semeoz"},
{"name":u"Zéro déchet","codes":["DEC"],"from":u"Semeoz"},
{"name":u"Activités à la maison, Auto-production, Travail domestique","codes":["ECO","AUT"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Activités professionnelles, scientifiques et techniques","codes":["EMP","AUT"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Activités sociales, Réparations, Bien-être","codes":["COH"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Administration et management, tourisme et location","codes":["TOU","GOU"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Administration publique et sécurité sociale","codes":["SAN","COH"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Agriculture et environnement","codes":["AGR","BIO"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Art, culture, loisirs et sports","codes":["CUL","LOI","SPO"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Artisanat et manifacturier","codes":["ECO"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Commerce et distribution","codes":["COM"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Construction, travaux publics et rénovation","codes":["HAB"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Diplomatie internationale et Coopération","codes":["GOU","COO"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Éducation et formation","codes":["EDU","FOR"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Finance, assurance et activités connexes","codes":["FIN"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Habitat et logement","codes":["HAB"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Information, communication et technologies","codes":["COM","NUM"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Mines et carrières","codes":["ECO"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Production et distribution de l'énergie","codes":["ENE","ECO"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Recyclage, traitement des déchets, cycle de l'eau et la restauration écologique","codes":["DEC","EAU","BIO"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Restauration, hôtellerie, catering","codes":["TOU"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Services sociaux et santé","codes":["COH","SAN"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Transport, logistique et entreposage","codes":["MOB"],"from":u"Socioéco / Carte ESS global"},
{"name":u"Vivre ensemble","codes":["COH"],"from":u"Fondation Cognacq Jay"},
{"name":u"Vivre ensemble","codes":["COH"],"from":u"Fondation Cognacq Jay"},
{"name":u"Vivre ensemble","codes":["COH","DRO"],"from":u"Fondation Cognacq Jay"},
{"name":u"Besoins essentiels","codes":["AGR","SAN","HAB","EDU"],"from":u"Fondation Cognacq Jay"},
{"name":u"Besoins essentiels","codes":["AGR","SAN","HAB"],"from":u"Fondation Cognacq Jay"},
{"name":u"Besoins essentiels","codes":["AGR","SAN","HAB","DRO"],"from":u"Fondation Cognacq Jay"},
{"name":u"Inclusion sociale","codes":["COH","INS"],"from":u"Fondation Cognacq Jay"},
{"name":u"Inclusion sociale","codes":["COH"],"from":u"Fondation Cognacq Jay"},
{"name":u"Inclusion sociale","codes":["COH"],"from":u"Fondation Cognacq Jay"},
{"name":u"Santé","codes":["SAN"],"from":u"Fondation Cognacq Jay"},
{"name":u"Santé","codes":["SAN"],"from":u"Fondation Cognacq Jay"},
{"name":u"Santé","codes":["SAN"],"from":u"Fondation Cognacq Jay"},
{"name":u"Culture et savoirs","codes":["CUL","FOR"],"from":u"Fondation Cognacq Jay"},
{"name":u"Culture et savoirs","codes":["CUL"],"from":u"Fondation Cognacq Jay"},
{"name":u"Culture et savoirs","codes":["CUL","FOR"],"from":u"Fondation Cognacq Jay"},
{"name":u"Bâtiment durable","codes":["HAB"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u"Démarches participatives","codes":["PAR"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u"Energie","codes":["ENE"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u"Environnement","codes":["BIO"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u"Gouvernance","codes":["GOU"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u"Mobilite","codes":["MOB"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u"Urbanisme","codes":["URB"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u"Social - Santé","codes":["SAN","COH"],"from":u"Synergies, réseau de collectivités 53 (Mayenne)"},
{"name":u" Développement économique - Tourisme","codes":["ECO","TOU"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Environnement - Energie - Transports","codes":["BIO","ENE","MOB"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Culture - Sports - Loisirs","codes":["CUL","LOI","SPO"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Habitat - Urbanisme - Paysage","codes":["HAB"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Organisation territoriale - Elus","codes":["GOU"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Numérique - Communication","codes":["COM","NUM"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Finances - Commande publique","codes":["FIN","COP"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Citoyenneté - Associations - Jeunesse","codes":["CIT","COH","JEU"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Education - Enseignement Supérieur","codes":["EDU"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u" Emploi - Formation","codes":["EMP","FOR"],"from":u"Territoires Conseils (ex Mairie Conseils)"},
{"name":u"accès à l’énergie","codes":["COH","ENE"],"from":u"UNCCAS"},
{"name":u"accès aux loisirs","codes":["COH","LOI"],"from":u"UNCCAS"},
{"name":u"accès aux soins","codes":["COH","SAN"],"from":u"UNCCAS"},
{"name":u"accès aux transports","codes":["COH","MOB"],"from":u"UNCCAS"},
{"name":u"accompagnement","codes":["SER"],"from":u"UNCCAS"},
{"name":u"accompagnement social au logement","codes":["COH","HAB"],"from":u"UNCCAS"},
{"name":u"accueil de jour","codes":["SER"],"from":u"UNCCAS"},
{"name":u"accueil parents enfants","codes":["SER","JEU"],"from":u"UNCCAS"},
{"name":u"accueil temporaire","codes":["SER"],"from":u"UNCCAS"},
{"name":u"action d’éducation","codes":["EDU"],"from":u"UNCCAS"},
{"name":u"activité d’animation","codes":["SER"],"from":u"UNCCAS"},
{"name":u"activité de prévention","codes":["COH"],"from":u"UNCCAS"},
{"name":u"adaptation à la perte d’autonomie de l’occupant","codes":["SER"],"from":u"UNCCAS"},
{"name":u"aide à la mobilité et transport","codes":["SER","MOB"],"from":u"UNCCAS"},
{"name":u"aide à la parentalité","codes":["SER"],"from":u"UNCCAS"},
{"name":u"aide alimentaire","codes":["SER","AGR"],"from":u"UNCCAS"},
{"name":u"aide aux aidants","codes":["SER"],"from":u"UNCCAS"},
{"name":u"aide et activités à domicile","codes":["COH","SER"],"from":u"UNCCAS"},
{"name":u"aide juridique","codes":["DRO"],"from":u"UNCCAS"},
{"name":u"allocation logement temporaire","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"alzheimer","codes":["SAN"],"from":u"UNCCAS"},
{"name":u"amélioration de la qualité","codes":["COH"],"from":u"UNCCAS"},
{"name":u"aménagement de l’habitat","codes":["HAB","AME"],"from":u"UNCCAS"},
{"name":u"aménagement durable","codes":["AME"],"from":u"UNCCAS"},
{"name":u"analyse des besoins sociaux","codes":["COH"],"from":u"UNCCAS"},
{"name":u"animation et convivialité","codes":["SER"],"from":u"UNCCAS"},
{"name":u"auto-réhabilitation","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"certification","codes":["AUT"],"from":u"UNCCAS"},
{"name":u"chantier d’insertion","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"chèque d’accompagnement personnalisé","codes":["SER"],"from":u"UNCCAS"},
{"name":u"CHRS","codes":["COH"],"from":u"UNCCAS"},
{"name":u"CLIC","codes":["COH"],"from":u"UNCCAS"},
{"name":u"communication","codes":["COM"],"from":u"UNCCAS"},
{"name":u"coordination des aides","codes":["COH"],"from":u"UNCCAS"},
{"name":u"coordination et organisation du service","codes":["GOU"],"from":u"UNCCAS"},
{"name":u"crèches","codes":["COH"],"from":u"UNCCAS"},
{"name":u"dépendance","codes":["COH"],"from":u"UNCCAS"},
{"name":u"développement durable","codes":["COH","BIO","ECO"],"from":u"UNCCAS"},
{"name":u"domiciliation","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"économie durable et solidaire","codes":["ECO"],"from":u"UNCCAS"},
{"name":u"économies d’énergie","codes":["ENE"],"from":u"UNCCAS"},
{"name":u"éducation nutritionnelle","codes":["EDU","AGR"],"from":u"UNCCAS"},
{"name":u"enfance","codes":["JEU"],"from":u"UNCCAS"},
{"name":u"établissement et service social et médico-social","codes":["COH","SAN"],"from":u"UNCCAS"},
{"name":u"étude / diagnostic / évaluation","codes":["EVA"],"from":u"UNCCAS"},
{"name":u"évaluation","codes":["EVA"],"from":u"UNCCAS"},
{"name":u"exclusions financières","codes":["FIN"],"from":u"UNCCAS"},
{"name":u"expulsion","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"famille","codes":["COH"],"from":u"UNCCAS"},
{"name":u"foyer de jeunes travailleurs","codes":["HAB","JEU"],"from":u"UNCCAS"},
{"name":u"foyer logement","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"garde et téléassistance","codes":["SER"],"from":u"UNCCAS"},
{"name":u"groupement de coopération sociale et médico-sociale","codes":["COH","SAN"],"from":u"UNCCAS"},
{"name":u"habitat","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"halte-garderie","codes":["COH"],"from":u"UNCCAS"},
{"name":u"hébergement d’urgence","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"insertion","codes":["INS"],"from":u"UNCCAS"},
{"name":u"insertion par l’activité économique","codes":["EMP","INS"],"from":u"UNCCAS"},
{"name":u"insertion par l’emploi","codes":["EMP","INS"],"from":u"UNCCAS"},
{"name":u"insertion par la culture","codes":["INS","CUL"],"from":u"UNCCAS"},
{"name":u"insertion par le sport","codes":["INS","SPO"],"from":u"UNCCAS"},
{"name":u"intercommunalité","codes":["GOU","AUT"],"from":u"UNCCAS"},
{"name":u"intergénérations","codes":["SEN"],"from":u"UNCCAS"},
{"name":u"isolement","codes":["COH"],"from":u"UNCCAS"},
{"name":u"jeunesse","codes":["JEU"],"from":u"UNCCAS"},
{"name":u"logement","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"logement social","codes":["COH","HAB"],"from":u"UNCCAS"},
{"name":u"lutte contre le non-recours","codes":["DRO","SER"],"from":u"UNCCAS"},
{"name":u"lutte contre les exclusions","codes":["COH"],"from":u"UNCCAS"},
{"name":u"maltraitance","codes":["COH"],"from":u"UNCCAS"},
{"name":u"médiation familiale","codes":["MED"],"from":u"UNCCAS"},
{"name":u"médiation locative","codes":["MED","HAB"],"from":u"UNCCAS"},
{"name":u"mesure de protection","codes":["DRO"],"from":u"UNCCAS"},
{"name":u"microcrédit","codes":["FIN"],"from":u"UNCCAS"},
{"name":u"observation des territoires et des publics","codes":["COH","AUT"],"from":u"UNCCAS"},
{"name":u"organisation de manifestations et d’évènements","codes":["AUT"],"from":u"UNCCAS"},
{"name":u"organisation du CCAS","codes":["GOU"],"from":u"UNCCAS"},
{"name":u"participation des usagers","codes":["PAR"],"from":u"UNCCAS"},
{"name":u"personnes âgées","codes":["SEN"],"from":u"UNCCAS"},
{"name":u"personnes handicapées","codes":["HAN"],"from":u"UNCCAS"},
{"name":u"petits travaux et entretien de la maison","codes":["HAB"],"from":u"UNCCAS"},
{"name":u"plate-forme de services","codes":["COH","SER"],"from":u"UNCCAS"},
{"name":u"point information famille","codes":["SER"],"from":u"UNCCAS"},
{"name":u"pôles d’accueil en réseau pour l’accès aux droits sociaux (PARADS)","codes":["DRO"],"from":u"UNCCAS"},
{"name":u"politique de la ville","codes":["COH"],"from":u"UNCCAS"},
{"name":u"prévention canicule","codes":["SER"],"from":u"UNCCAS"},
{"name":u"prévention coupures","codes":["SER"],"from":u"UNCCAS"},
{"name":u"prévention santé","codes":["SAN"],"from":u"UNCCAS"},
{"name":u"programmes/financements européens","codes":["AUT"],"from":u"UNCCAS"},
{"name":u"REAAP","codes":["COH"],"from":u"UNCCAS"},
{"name":u"relais assistantes maternelles","codes":["COH"],"from":u"UNCCAS"},
{"name":u"repas et restauration","codes":["AGR"],"from":u"UNCCAS"},
{"name":u"restauration de l’image de soi","codes":["COH"],"from":u"UNCCAS"},
{"name":u"santé","codes":["SAN"],"from":u"UNCCAS"},
{"name":u"santé mentale","codes":["SAN"],"from":u"UNCCAS"},
{"name":u"semaine bleue","codes":["COH"],"from":u"UNCCAS"},
{"name":u"services aux familles","codes":["COH"],"from":u"UNCCAS"},
{"name":u"structure d’accueil de la petite enfance","codes":["COH"],"from":u"UNCCAS"},
{"name":u"technologies de l’information et de la communication","codes":["COM","NUM"],"from":u"UNCCAS"},
{"name":u"urgence sociale","codes":["COH"],"from":u"UNCCAS"},
{"name":u"vacances","codes":["LOI"],"from":u"UNCCAS"},
{"name":u"veille sociale","codes":["COH"],"from":u"UNCCAS"},
{"name":u"Achats durables","codes":["COP"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Culture et tourisme","codes":["CUL","TOU"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Démarche globale","codes":["GOU"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Déplacement ","codes":["MOB"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Eau et assainissement","codes":["EAU"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Economie locale, actions sociales","codes":["COH","ECO"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Energie","codes":["ENE"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Engagements","codes":["CIT"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Equipement public","codes":["AUT"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Espaces publics","codes":["AME","URB"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Espaces verts et biodiversité","codes":["BIO"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Quartiers d'habitation et renouvellement","codes":["HAB","AME","URB"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"Restauration collective","codes":["AGR"],"from":u"Bruded - réseau breton collectivités DD"},
{"name":u"AGIR POUR LA BIODIVERSITE","codes":["BIO"],"from":u"cerdd"},
{"name":u"ALIMENTATION DURABLE","codes":["AGR"],"from":u"cerdd"},
{"name":u"CHANGEMENT CLIMATIQUE","codes":["CLI"],"from":u"cerdd"},
{"name":u"TERRITOIRES DURABLES","codes":["COH","ECO","BIO"],"from":u"cerdd"},
{"name":u"TRANSITIONS ECONOMIQUES","codes":["ECO"],"from":u"cerdd"},
{"name":u"URBANISME ET PLANIFICATION DURABLES","codes":["URB","AME"],"from":u"cerdd"},
{"name":u"L'Européen d'à côté","codes":["AUT"],"from":u"Europe en France"},
{"name":u"Agriculture","codes":["AGR"],"from":u"Europe en France"},
{"name":u"Agriculture et pêche","codes":["AGR","AGR"],"from":u"Europe en France"},
{"name":u"Aides aux entreprises","codes":["ECO"],"from":u"Europe en France"},
{"name":u"Attractivité du territoire","codes":["AUT","ECO"],"from":u"Europe en France"},
{"name":u"Coopération","codes":["GOU","COO"],"from":u"Europe en France"},
{"name":u"Culture","codes":["CUL"],"from":u"Europe en France"},
{"name":u"Culture et tourisme","codes":["CUL","TOU"],"from":u"Europe en France"},
{"name":u"Développement durable","codes":["ECO","BIO","COH"],"from":u"Europe en France"},
{"name":u"Développement durable et environnement","codes":["BIO"],"from":u"Europe en France"},
{"name":u"Développement local et cohésion territoriale","codes":["COH","AME","URB"],"from":u"Europe en France"},
{"name":u"Développement rural","codes":["AME","ECO","URB"],"from":u"Europe en France"},
{"name":u"Développement urbain","codes":["AME","ECO","URB"],"from":u"Europe en France"},
{"name":u"Eco-tourisme","codes":["TOU","BIO"],"from":u"Europe en France"},
{"name":u"Emploi et inclusion sociale","codes":["EMP","COH"],"from":u"Europe en France"},
{"name":u"Energie","codes":["ENE"],"from":u"Europe en France"},
{"name":u"Environnement","codes":["BIO"],"from":u"Europe en France"},
{"name":u"Espaces naturels à enjeux","codes":["BIO"],"from":u"Europe en France"},
{"name":u"Formation","codes":["FOR"],"from":u"Europe en France"},
{"name":u"Institutions","codes":["GOU"],"from":u"Europe en France"},
{"name":u"Les grands projets européens 2007-2013","codes":["AUT"],"from":u"Europe en France"},
{"name":u"Prévention des risques","codes":["CLI"],"from":u"Europe en France"},
{"name":u"Recherche et Innovation","codes":["REC"],"from":u"Europe en France"},
{"name":u"Santé","codes":["SAN"],"from":u"Europe en France"},
{"name":u"Services au public","codes":["SER"],"from":u"Europe en France"},
{"name":u"TIC","codes":["COM","NUM"],"from":u"Europe en France"},
{"name":u"Tourisme","codes":["TOU"],"from":u"Europe en France"},
{"name":u"Transports","codes":["MOB"],"from":u"Europe en France"},
{"name":u"Urbain","codes":["AUT"],"from":u"Europe en France"},
{"name":u"Inclusion sociale","codes":["COH","INS","EMP"],"from":u"Europe en France V2"},
{"name":u"Innovation","codes":["NUM","REC","AUT"],"from":u"Europe en France V2"},
{"name":u"Protection de l'environnement","codes":["CLI","BIO","ENE"],"from":u"Europe en France V2"},
{"name":u"Energies renouvelables","codes":["ENE"],"from":u"Europe en France V2"},
{"name":u"Emploi ","codes":["EMP","INS","FOR"],"from":u"Europe en France V2"},
{"name":u"Développement régional","codes":["AME","ECO","URB"],"from":u"Europe en France V2"},
{"name":u"Egalité femmes-hommes","codes":["DRO"],"from":u"Europe en France V2"},
{"name":u"Elections européennes","codes":["CIT","PAR","GOU"],"from":u"Europe en France V2"},
{"name":u"Recherche ","codes":["REC"],"from":u"Europe en France V2"},
{"name":u"Sylviculture","codes":["AGR","BIO","AME"],"from":u"Europe en France V2"},
{"name":u"TIC, numérique","codes":["NUM"],"from":u"Europe en France V2"},
{"name":u"Transition énergétique","codes":["ENE","CLI","HAB"],"from":u"Europe en France V2"},
{"name":u"Transports","codes":["MOB","SER","AME"],"from":u"Europe en France V2"},
{"name":u"Agriculture","codes":["AGR"],"from":u"Europe en France V2"},
{"name":u"Alimentation","codes":["AGR"],"from":u"Europe en France V2"},
{"name":u"Aménagement du territoire et cadre de vie","codes":["AME","SER"],"from":u"Europe en France V2"},
{"name":u"Développement urbain","codes":["URB","AME","ECO"],"from":u"Europe en France V2"},
{"name":u"Infrastructures locales","codes":["AME","SER"],"from":u"Europe en France V2"},
{"name":u"Patrimoine","codes":["CUL"],"from":u"Europe en France V2"},
{"name":u"Santé","codes":["SAN"],"from":u"Europe en France V2"},
{"name":u"Développement rural","codes":["AME","AGR","ECO"],"from":u"Europe en France V2"},
{"name":u"Culture","codes":["CUL"],"from":u"Europe en France V2"},
{"name":u"Services au public","codes":["SER"],"from":u"Europe en France V2"},
{"name":u"Sport","codes":["SPO"],"from":u"Europe en France V2"},
{"name":u"Développement durable","codes":["BIO","ECO","COH"],"from":u"Europe en France V2"},
{"name":u"Développement économique","codes":["ECO"],"from":u"Europe en France V2"},
{"name":u"Tourisme","codes":["TOU"],"from":u"Europe en France V2"},
{"name":u"Entreprises en difficulté","codes":["FIN","ECO"],"from":u"Europe en France V2"},
{"name":u"Economie sociale et solidaire","codes":["ECO","INS","COH"],"from":u"Europe en France V2"},
{"name":u"Activité économique nouvelle","codes":["NUM","ECO"],"from":u"Europe en France V2"},
{"name":u"Aide aux PME","codes":["ECO","FIN"],"from":u"Europe en France V2"},
{"name":u"Changement climatique","codes":["CLI"],"from":u"Europe en France V2"},
{"name":u"Coopération","codes":["COO"],"from":u"Europe en France V2"},
{"name":u"Tranfrontalier (hors programme de CTE)","codes":["GOU"],"from":u"Europe en France V2"},
{"name":u"Stratégies macro-régionales","codes":["AME","GOU","AUT"],"from":u"Europe en France V2"},
{"name":u"Coopération territoriale européenne","codes":["AME","GOU"],"from":u"Europe en France V2"},
{"name":u"Affaires maritimes, pêche, aquaculture","codes":["AGR","EAU","BIO"],"from":u"Europe en France V2"},
{"name":u"Collectif citoyen & mise en réseau","codes":["CIT","GOU","COO"],"from":u"GNIAC"},
{"name":u"Communauté d'internautes","codes":["GOU","COO"],"from":u"GNIAC"},
{"name":u"Conseil","codes":["SER"],"from":u"GNIAC"},
{"name":u"Echange de services","codes":["SER"],"from":u"GNIAC"},
{"name":u"Entreprises solidaires","codes":["ECO"],"from":u"GNIAC"},
{"name":u"Expertise pratique collaborative","codes":["AUT"],"from":u"GNIAC"},
{"name":u"Finance participative","codes":["FIN"],"from":u"GNIAC"},
{"name":u"Lien social dans les quartiers","codes":["COH"],"from":u"GNIAC"},
{"name":u"Logement, santé, éducation, environnement","codes":["HAB","SAN","EDU"],"from":u"GNIAC"},
{"name":u"Plateforme fédératrice d'entreprises","codes":["ECO","GOU","COO"],"from":u"GNIAC"},
{"name":u"Plateformes web","codes":["NUM"],"from":u"GNIAC"},
{"name":u"Promoteur d'initiatives","codes":["AUT"],"from":u"GNIAC"},
{"name":u"Réseaux","codes":["GOU","COO"],"from":u"GNIAC"},
{"name":u"Soutien création d'entreprise","codes":["ECO"],"from":u"GNIAC"},
{"name":u"Soutien recherche d'emploi","codes":["EMP"],"from":u"GNIAC"},
{"name":u"Think Tank / Do Tank","codes":["AUT"],"from":u"GNIAC"},
# {"name":u"Pas de catégories thématiques","codes":[""],"from":u"La part du colibri"},
]
normalization_as_json( "NORMALIZATION_TAGS_SOURCES_CIS", NORMALIZATION_TAGS_SOURCES_CIS )
NORMALIZATION_TAGS_SOURCES_CIS_DICT = { k : [] for k in NOMENCLATURE_CIS_DICT.keys() }
for tag in NORMALIZATION_TAGS_SOURCES_CIS :
# print tag
for code in tag["codes"] :
if tag["name"] not in NORMALIZATION_TAGS_SOURCES_CIS_DICT[code] :
NORMALIZATION_TAGS_SOURCES_CIS_DICT[code].append(tag["name"])
for k,v in NORMALIZATION_TAGS_SOURCES_CIS_DICT.iteritems() :
v.sort()
normalization_as_json( "NORMALIZATION_TAGS_SOURCES_CIS_DICT", NORMALIZATION_TAGS_SOURCES_CIS_DICT )
# from pprint import pprint, pformat
# pprint(NORMALIZATION_TAGS_SOURCES_CIS_DICT)
| UTF-8 | Python | false | false | 76,755 | py | 166 | app_nomenclature_tags.py | 41 | 0.624402 | 0.621298 | 0 | 1,162 | 64.435456 | 180 |
SamiraHuber/Python-Mini-Games | 17,978,733,125,516 | 70ae8fd4f597b0208aeaee2dbfc6706ce068af25 | 0134d0262626fb6fa50550160decf25a5cb43de4 | /tic-tac-toe/board.py | 5707699b094711470a1a9704b10dcbc34996732e | []
| no_license | https://github.com/SamiraHuber/Python-Mini-Games | 98bd0c0fd480692cd90c3945dc5d00ae034ec809 | 4e440b4aee6f1a983052b140a6f61b2c2dc9a3fb | refs/heads/master | 2020-09-14T13:57:45.251126 | 2019-11-25T15:43:05 | 2019-11-25T15:43:05 | 223,148,357 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class board:
array = [[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]]
def print_it(self):
for row in range(3):
print(self.array[row])
def make_turn(self, sign, position):
self.array[position[0]][position[1]] = sign
def is_empty(self, position):
return self.array[position[0]][position[1]] == ' '
def is_player(self, position):
return self.array[position[0]][position[1]] == 'O'
def is_cpu(self, position):
return self.array[position[0]][position[1]] == 'X'
def is_finished(self):
for row in range(3):
amount = 0
for col in range(3):
if self.is_player([row, col]):
amount = amount + 1
elif self.is_cpu([row, col]):
amount = amount - 1
if amount == 3:
print('YOU WON')
return 1
if amount == -3:
print('YOU LOST')
return 1
for row in range(3):
amount = 0
for col in range(3):
if self.is_player([col, row]):
amount = amount + 1
elif self.is_cpu([col, row]):
amount = amount - 1
if amount == 3:
print('YOU WON')
return 1
if amount == -3:
print('YOU LOST')
return 1
amount = 0
for pos in range(3):
if self.is_player([pos, pos]):
amount = amount + 1
if self.is_cpu([pos, pos]):
amount = amount - 1
if amount == 3:
print('YOU WON')
return 1
if amount == -3:
print('YOU LOST')
return 1
if self.is_player([0, 2]) and self.is_player([2, 0]) and self.is_player([1, 1]):
print('YOU WON')
return 1
if self.is_cpu([0, 2]) and self.is_cpu([2, 0]) and self.is_cpu([1, 1]):
print('YOU LOST')
return 1
for row in range(3):
for col in range(3):
if self.is_empty([row, col]):
return 0
print('NO WINNER')
return 1
| UTF-8 | Python | false | false | 2,244 | py | 9 | board.py | 7 | 0.424242 | 0.400624 | 0 | 76 | 28.513158 | 88 |
0therGuys/exile | 11,519,102,317,647 | 970f01d4356946dce1ae88e628618d0cf29ca694 | 596f1e2eaf3198c4fc157ed9115edcad25753666 | /exile/scard/const.py | 86eccfbd671f743061d41d5574b56881e5fbfe1c | [
"Apache-2.0"
]
| permissive | https://github.com/0therGuys/exile | 19aff9ab7f316295ec622e6d1a401ea3e5e85a7a | 38fd9c4d22edcecae0b6fd4dbc9a9d894f366b2f | refs/heads/master | 2023-08-18T05:30:43.858100 | 2021-09-24T16:13:39 | 2021-09-24T16:13:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from enum import Enum
class SCardConstants:
"""
https://docs.microsoft.com/en-us/windows/desktop/api/winscard
"""
MAX_BUFFER_SIZE = 264
"""Maximum Tx/Rx Buffer for short APDU"""
MAX_BUFFER_SIZE_EXTENDED = 4 + 3 + (1 << 16) + 3
"""enhanced (64K + APDU + Lc + Le) Tx/Rx Buffer"""
MAX_ATR_SIZE = 33
MAX_READERNAME = 52
class SCardStatus(Enum):
S_SUCCESS = 0x00000000
"""No error was encountered."""
F_INTERNAL_ERROR = 0x80100001
"""An internal consistency check failed."""
E_CANCELLED = 0x80100002
"""The action was cancelled by an SCardCancel request."""
E_INVALID_HANDLE = 0x80100003
"""The supplied handle was invalid."""
E_INVALID_PARAMETER = 0x80100004
"""One or more of the supplied parameters could not be properly interpreted."""
E_INVALID_TARGET = 0x80100005
"""Registry startup information is missing or invalid."""
E_NO_MEMORY = 0x80100006
"""Not enough memory available to complete this command."""
F_WAITED_TOO_LONG = 0x80100007
"""An internal consistency timer has expired."""
E_INSUFFICIENT_BUFFER = 0x80100008
"""The data buffer to receive returned data is too small for the returned data."""
E_UNKNOWN_READER = 0x80100009
"""The specified reader name is not recognized."""
E_TIMEOUT = 0x8010000A
"""The user-specified timeout value has expired."""
E_SHARING_VIOLATION = 0x8010000B
"""The smart card cannot be accessed because of other connections outstanding."""
E_NO_SMARTCARD = 0x8010000C
"""The operation requires a Smart Card, but no Smart Card is currently in the device."""
E_UNKNOWN_CARD = 0x8010000D
"""The specified smart card name is not recognized."""
E_CANT_DISPOSE = 0x8010000E
"""The system could not dispose of the media in the requested manner."""
E_PROTO_MISMATCH = 0x8010000F
"""The requested protocols are incompatible with the protocol currently in use with the smart card."""
E_NOT_READY = 0x80100010
"""The reader or smart card is not ready to accept commands."""
E_INVALID_VALUE = 0x80100011
"""One or more of the supplied parameters values could not be properly interpreted."""
E_SYSTEM_CANCELLED = 0x80100012
"""The action was cancelled by the system, presumably to log off or shut down."""
F_COMM_ERROR = 0x80100013
"""An internal communications error has been detected."""
F_UNKNOWN_ERROR = 0x80100014
"""An internal error has been detected, but the source is unknown."""
E_INVALID_ATR = 0x80100015
"""An ATR obtained from the registry is not a valid ATR string."""
E_NOT_TRANSACTED = 0x80100016
"""An attempt was made to end a non-existent transaction."""
E_READER_UNAVAILABLE = 0x80100017
"""The specified reader is not currently available for use."""
P_SHUTDOWN = 0x80100018
"""The operation has been aborted to allow the server application to exit."""
E_PCI_TOO_SMALL = 0x80100019
"""The PCI Receive buffer was too small."""
E_READER_UNSUPPORTED = 0x8010001A
"""The reader driver does not meet minimal requirements for support."""
E_DUPLICATE_READER = 0x8010001B
"""The reader driver did not produce a unique reader name."""
E_CARD_UNSUPPORTED = 0x8010001C
"""The smart card does not meet minimal requirements for support."""
E_NO_SERVICE = 0x8010001D
"""The Smart card resource manager is not running."""
E_SERVICE_STOPPED = 0x8010001E
"""The Smart card resource manager has shut down."""
E_UNEXPECTED = 0x8010001F
"""An unexpected card error has occurred."""
E_ICC_INSTALLATION = 0x80100020
"""No primary provider can be found for the smart card."""
E_ICC_CREATEORDER = 0x80100021
"""The requested order of object creation is not supported."""
E_UNSUPPORTED_FEATURE = 0x80100022
"""This smart card does not support the requested feature."""
E_DIR_NOT_FOUND = 0x80100023
"""The identified directory does not exist in the smart card."""
E_FILE_NOT_FOUND = 0x80100024
"""The identified file does not exist in the smart card."""
E_NO_DIR = 0x80100025
"""The supplied path does not represent a smart card directory."""
E_NO_FILE = 0x80100026
"""The supplied path does not represent a smart card file."""
E_NO_ACCESS = 0x80100027
"""Access is denied to this file."""
E_WRITE_TOO_MANY = 0x80100028
"""The smart card does not have enough memory to store the information."""
E_BAD_SEEK = 0x80100029
"""There was an error trying to set the smart card file object pointer."""
E_INVALID_CHV = 0x8010002A
"""The supplied PIN is incorrect."""
E_UNKNOWN_RES_MNG = 0x8010002B
"""An unrecognized error code was returned from a layered component."""
E_NO_SUCH_CERTIFICATE = 0x8010002C
"""The requested certificate does not exist."""
E_CERTIFICATE_UNAVAILABLE = 0x8010002D
"""The requested certificate could not be obtained."""
E_NO_READERS_AVAILABLE = 0x8010002E
"""Cannot find a smart card reader."""
E_COMM_DATA_LOST = 0x8010002F
"""A communications error with the smart card has been detected. Retry the operation."""
E_NO_KEY_CONTAINER = 0x80100030
"""The requested key container does not exist on the smart card."""
E_SERVER_TOO_BUSY = 0x80100031
"""The Smart Card Resource Manager is too busy to complete this operation."""
W_UNSUPPORTED_CARD = 0x80100065
"""The reader cannot communicate with the card, due to ATR string configuration conflicts."""
W_UNRESPONSIVE_CARD = 0x80100066
"""The smart card is not responding to a reset."""
W_UNPOWERED_CARD = 0x80100067
"""Power has been removed from the smart card, so that further communication is not possible."""
W_RESET_CARD = 0x80100068
"""The smart card has been reset, so any shared state information is invalid."""
W_REMOVED_CARD = 0x80100069
"""The smart card has been removed, so further communication is not possible."""
W_SECURITY_VIOLATION = 0x8010006A
"""Access was denied because of a security violation."""
W_WRONG_CHV = 0x8010006B
"""The card cannot be accessed because the wrong PIN was presented."""
W_CHV_BLOCKED = 0x8010006C
"""The card cannot be accessed because the maximum number of PIN entry attempts has been reached."""
W_EOF = 0x8010006D
"""The end of the smart card file has been reached."""
W_CANCELLED_BY_USER = 0x8010006E
"""The user pressed "Cancel" on a Smart Card Selection Dialog."""
W_CARD_NOT_AUTHENTICATED = 0x8010006F
"""No PIN was presented to the smart card."""
class Scope:
USER = 0x0000
"""Scope in user space"""
TERMINAL = 0x0001
"""Scope in terminal"""
SYSTEM = 0x0002
"""Scope in system"""
class Protocol:
UNDEFINED = 0x0000
"""protocol not set"""
UNSET = UNDEFINED
T0 = 0x0001
"""T=0 active protocol."""
T1 = 0x0002
"""T=1 active protocol."""
RAW = 0x0004
"""Raw active protocol."""
T15 = 0x0008
"""T=15 protocol."""
ANY = T0 | T1
"""IFD determines prot."""
class ShareMode:
EXCLUSIVE = 0x0001
"""Exclusive mode only"""
SHARED = 0x0002
"""Shared mode only"""
DIRECT = 0x0003
"""Raw mode only"""
class Disposition:
LEAVE_CARD = 0x0000
"""Do nothing on close"""
RESET_CARD = 0x0001
"""Reset on close"""
UNPOWER_CARD = 0x0002
"""Power down on close"""
EJECT_CARD = 0x0003
"""Eject on close"""
class CardState:
UNKNOWN = 0x0001
"""Unknown state"""
ABSENT = 0x0002
"""Card is absent"""
PRESENT = 0x0004
"""Card is present"""
SWALLOWED = 0x0008
"""Card not powered"""
POWERED = 0x0010
"""Card is powered"""
NEGOTIABLE = 0x0020
"""Ready for PTS"""
SPECIFIC = 0x0040
"""PTS has been set"""
class ReaderState:
UNAWARE = 0x0000
"""App wants status"""
IGNORE = 0x0001
"""Ignore this reader"""
CHANGED = 0x0002
"""State has changed"""
UNKNOWN = 0x0004
"""Reader unknown"""
UNAVAILABLE = 0x0008
"""Status unavailable"""
EMPTY = 0x0010
"""Card removed"""
PRESENT = 0x0020
"""Card inserted"""
ATRMATCH = 0x0040
"""ATR matches card"""
EXCLUSIVE = 0x0080
"""Exclusive Mode"""
INUSE = 0x0100
"""Shared Mode"""
MUTE = 0x0200
"""Unresponsive card"""
UNPOWERED = 0x0400
"""Unpowered card"""
| UTF-8 | Python | false | false | 9,217 | py | 10 | const.py | 8 | 0.607573 | 0.529456 | 0 | 222 | 40.518018 | 110 |
mspgeek/Client_Portal | 9,887,014,755,413 | badae2c31f367b5bf777aec2a78be50a1404f3f9 | bf683eb4a6080cf67669de90d1afdad53fccb738 | /Lib/site-packages/viewflow/flow/nodes.py | daaf1d6897e4b14a4646c0cf180149dadc1a30ab | [
"MIT"
]
| permissive | https://github.com/mspgeek/Client_Portal | cd513308840aa4203554ebc1160f17f0dd4b17cf | 0267168bb90e8e9c85aecdd715972b9622b82384 | refs/heads/master | 2023-03-07T21:33:22.767108 | 2020-04-08T01:43:19 | 2020-04-08T01:43:19 | 253,946,635 | 6 | 0 | MIT | false | 2022-12-31T07:01:43 | 2020-04-08T00:43:07 | 2020-08-15T17:12:06 | 2022-12-31T07:01:41 | 71,679 | 4 | 0 | 3 | HTML | false | false | from .. import nodes
from . import views
from .activation import ManagedStartViewActivation, ManagedViewActivation
class StartFunction(nodes.StartFunction):
"""
StartNode that can be executed within you code.
Example::
class MyFlow(Flow):
start = flow.StartFunction(this.create_request)
def create_request(self, activation, **kwargs):
activation.prepare()
activation.done()
MyFlow.create_request.run(**kwargs)
.. note::
Any kwarg you pass of the run call will be passed to the function.
"""
activate_next_view_class = views.ActivateNextTaskView
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class Function(nodes.Function):
"""
Node that can be executed within you code.
Example::
class MyFlow(Flow):
my_task = flow.Function(this.perform_my_task)
@method_decorator(flow.flow_func(task_loader=lambda flow_task, **kwargs: ... ))
def perform_my_task(self, activation, **kwargs):
activation.prepare()
activation.done()
MyFlow.my_task.run(**kwargs)
.. note::
Any kwarg you pass of the run call will be passed to the function.
"""
activate_next_view_class = views.ActivateNextTaskView
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class Handler(nodes.Handler):
"""
Node that can be executed automatically after task was created.
In difference to :class:`.Function` a :class:`.Handler` is not explicitly called
in code, but executes automatically.
Example::
class MyFlow(Flow):
my_task = (
flow.Handler(this.handler_proc)
.Next(this.End)
)
def my_handler(self. activation):
# Your custom code
pass
.. note::
You don't need to call ``prepare()`` or ``done()`` on the
activation in you handler callback.
"""
activate_next_view_class = views.ActivateNextTaskView
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class If(nodes.If):
"""
Activates one of paths based on condition.
Example::
class MyFlow(Flow):
check_approve = (
flow.If(lambda activation: activation.process.is_approved)
.Then(this.send_message)
.Else(this.end_rejected)
)
"""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class Switch(nodes.Switch):
"""Activates first path with matched condition."""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class Join(nodes.Join):
"""
Waits for one or all incoming links and activates next path.
Join should be connected to one split task only.
Example::
join_on_warehouse = flow.Join().Next(this.next_task)
"""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class Split(nodes.Split):
"""
Activates outgoing path in-parallel depends on per-path condition.
Example::
split_on_decision = (
flow.Split()
.Next(check_post, cond=lambda p: p,is_check_post_required)
.Next(this.perform_task_always)
)
"""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class AbstractJob(nodes.AbstractJob):
"""Base task for background jobs."""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
class StartSignal(nodes.StartSignal):
"""
StartNode that connects to a django signal.
Example::
def my_start_receiver(activation, **signal_kwargs):
activation.prepare()
# You custom code
activation.done()
class MyFlow(Flow):
start = flow.StartSignal(post_save, my_start_receiver, sender=MyModelCls)
.. note::
The first argument of your receiver will be the activation.
"""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
undo_view_class = views.UndoTaskView
class Signal(nodes.Signal):
"""
Node that connects to a django signal.
Example::
create_model = flow.Signal(post_create, my_receiver, sender=MyModelCls)
.. note::
Other than the :class:`.StartSignal` you will need to provide activation
for your receiver yourself. This can be done using the :func:`.flow_signal`
decorator.
"""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
undo_view_class = views.UndoTaskView
class Start(nodes.Start):
"""
Start process event.
Example::
start = (
flow.Start(StartView, fields=["some_process_field"])
.Available(lambda user: user.is_super_user)
.Next(this.next_task)
)
In case of function based view::
start = flow.Start(start_process)
@flow_start_view()
def start_process(request, activation):
if not activation.has_perm(request.user):
raise PermissionDenied
activation.prepare(request.POST or None)
form = SomeForm(request.POST or None)
if form.is_valid():
form.save()
activation.done()
return redirect('/')
return render(request, {'activation': activation, 'form': form})
Ensure to include `{{ activation.management_form }}` inside template, to proper
track when task was started and other task performance statistics::
<form method="POST">
{{ form }}
{{ activation.management_form }}
<button type="submit"/>
</form>
"""
activate_next_view_class = views.ActivateNextTaskView
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
undo_view_class = views.UndoTaskView
start_view_class = views.CreateProcessView
activation_class = ManagedStartViewActivation
class View(nodes.View):
"""
View task.
Example::
task = (
flow.View(some_view)
.Permission('my_app.can_do_task')
.Next(this.next_task)
)
In case of function based view::
task = flow.Task(task)
@flow_start_view()
def task(request, activation):
if not activation.flow_task.has_perm(request.user):
raise PermissionDenied
activation.prepare(request.POST or None)
form = SomeForm(request.POST or None)
if form.is_valid():
form.save()
activation.done()
return redirect('/')
return render(request, {'activation': activation, 'form': form})
Ensure to include `{{ activation.management_form }}` inside template, to proper
track when task was started and other task performance statistics::
<form method="POST">
{{ form }}
{{ activation.management_form }}
<button type="submit"/>
</form>
"""
activate_next_view_class = views.ActivateNextTaskView
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
undo_view_class = views.UndoTaskView
assign_view_class = views.AssignTaskView
unassign_view_class = views.UnassignTaskView
activation_class = ManagedViewActivation
class End(nodes.End):
"""End process event."""
cancel_view_class = views.CancelTaskView
detail_view_class = views.DetailTaskView
perform_view_class = views.PerformTaskView
undo_view_class = views.UndoTaskView
| UTF-8 | Python | false | false | 8,713 | py | 425 | nodes.py | 277 | 0.62401 | 0.62401 | 0 | 322 | 26.059006 | 91 |
yuanyuhua/1807-2 | 2,456,721,336,272 | 83f447e2e31e2dd72a0deb860f368fbf13a0f152 | 72ae2d96751a4ebba9ddd9a4ead92cea0ec78445 | /08day/飞机大战主类.py | c3b3470642b08b2d32150a4a5bc60ef923928467 | []
| no_license | https://github.com/yuanyuhua/1807-2 | ef667167d4fce242f1963001ba9bb30bf00b2305 | 1a3efcfa6898cfa1e650ae3c90681a9b921ac1f8 | refs/heads/master | 2020-03-25T10:05:03.969310 | 2018-08-20T07:48:57 | 2018-08-20T07:48:57 | 143,683,243 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
from jinglingzu import *
pygame.init() #初始化
yh = pygame.Rect(480,300,120,120)
screen = pygame.display.set_mode((480,700))#创建游戏窗口,宽度和高度
bg = pygame.image.load("./images/background.png")#插入背景
hero = pygame.image.load("./images/hero1.png")#插入英雄
#screen.blit(hero,(200,500))#英雄的位置
herorect = pygame.Rect(200,500,120,120)#把不规则的飞机圈到规则的长方形中
clock = pygame.time.Clock()#while true太快,创建时间钟
enemy = EnemySprite()#创建敌机精灵
enemy1 = EnemySprite()#创建敌机精灵
enemy1.rect.x = 50 #往右边靠点,避免飞机撞在一起
enemy1.rect.y = 700
enemy1.speed = -2 #速度不同
enemy_group = pygame.sprite.Group(enemy,enemy1)#把精灵加到精灵组
while True: #游戏循环
clock.tick(60)#一秒刷新60次
herorect.y -= 10#飞机往上走
screen.blit(bg,(0,0)) #先绘制背景
screen.blit(hero,herorect) #再绘制飞机
if herorect.bottom <= 0:#飞机飞出去了
herorect.top = 700#y轴为700,控制飞机返航
enemy_group.update() #精灵组更新
enemy_group.draw(screen) #画到哪
#事件监听
for event in pygame.event.get():
# 判断用户是否点击了关闭按钮
if event.type == pygame.QUIT:
print("退出游戏...")
pygame.quit()
# 直接退出系统
#exit()
pygame.display.update()#更新
| UTF-8 | Python | false | false | 1,391 | py | 43 | 飞机大战主类.py | 42 | 0.699717 | 0.640227 | 0 | 48 | 21 | 56 |
StephenGodard/python | 8,186,207,681,131 | f4f82d532dd27afa11cc6903527be9ca0e1fd3ea | c17a680277827b940668e3cc4381886331c55f61 | /python/scratch.py | b8326994c9ef5cdc0654578afa9127ecef619294 | []
| no_license | https://github.com/StephenGodard/python | 731f20580e421022c7c2f2a03924db6519537f9f | 831d12ab392ac08fea96dce3f54567b9269ba834 | refs/heads/master | 2020-04-07T13:34:05.864993 | 2018-12-08T21:25:44 | 2018-12-08T21:25:44 | 158,412,283 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | t=6.892
d=19.7
v=d/t
print(round(v,1)) | UTF-8 | Python | false | false | 38 | py | 23 | scratch.py | 21 | 0.631579 | 0.421053 | 0 | 4 | 8.75 | 17 |
yashgoyal07/contacts_manager | 16,406,775,082,665 | fa723c85f774578142dba93b6c666c2d167a7247 | 9d24b43fb248ebac151fc143960245522e6932d0 | /src/models/mysql_model.py | c3f5d4c1971eaacfc2e2d4ff49a960565917f34d | []
| no_license | https://github.com/yashgoyal07/contacts_manager | 49699c6940448a60fea528453b7884e0f0ffeda7 | ef834b0bde04ec6cc785be19e597e65d8a94884f | refs/heads/main | 2023-06-04T10:49:39.518863 | 2021-06-28T17:11:46 | 2021-06-28T17:11:46 | 319,005,596 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mysql.connector
from helpers.utils import get_environment
from configs.mysql_config import MysqlConfig
# model class for communication with mysql or any database.
class ModelMysql(object):
def __init__(self):
self.infra_env = get_environment() # function from utils file of helpers package
# initialization below is basically give particular mysql config from INSTANCE_CONFIG
# from configs package according to environment(local, QA or prod)
self.instance_config = MysqlConfig.INSTANCE_CONFIG.get(self.infra_env, {})
# This method return an connection from mysql database using mysql.connector
def get_connection(self):
connection = mysql.connector.connect(host=self.instance_config.get("host"),
port=self.instance_config.get("port"),
user=self.instance_config.get("username"),
password=self.instance_config.get("password"),
)
return connection
# method for executing queries which are not return any result such as insert, delete etc.
def insert_query(self, query, query_params):
connection = self.get_connection()
cur = connection.cursor()
cur.execute(query, query_params)
connection.commit()
cur.close()
# method for executing queries which are return results such as select etc.
def extract_query(self, query, query_params):
connection = self.get_connection()
cur = connection.cursor()
cur.execute(query, query_params)
result = cur.fetchall() # here, fetchall returns list of tuples
connection.commit()
cur.close()
return result # returns result
| UTF-8 | Python | false | false | 1,819 | py | 14 | mysql_model.py | 7 | 0.631116 | 0.631116 | 0 | 40 | 44.475 | 94 |
mstfbl/data | 9,251,359,574,460 | a79f36a65d962c9a799917a7214addbe5e7b9f13 | 31bdf96faa216a43c3712210797fe9e2c8c02a7f | /torchdata/datapipes/iter/util/combining.py | a17df721e5d2ae311e87e3c3a182651ab0ea48ff | [
"BSD-3-Clause"
]
| permissive | https://github.com/mstfbl/data | 93d58053bc26b80915f4c92ae8041e6c7636f997 | 4e48de45c28159054589eb017d2e9445e079b0bf | refs/heads/main | 2023-08-26T21:59:03.897135 | 2021-10-20T15:05:54 | 2021-10-20T15:06:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import warnings
from collections import OrderedDict
from torch.utils.data import IterDataPipe, MapDataPipe, functional_datapipe
from typing import Callable
@functional_datapipe("zip_by_key")
class KeyZipperIterDataPipe(IterDataPipe):
r""":class:`KeyZipperIterDataPipe`.
Iterable DataPipe to zip two DataPipes based on the matching key.
Args:
source_datapipe: KeyZipper will yield data based on the order of this DataPipe
ref_datapipe: Reference datapipe to find matching key for `source_datapipe`
key_fn: Callable to extract key of data from source_datapipe
ref_key_fn: Callable to extract key of data from ref_datapipe.
If it's not specified, the `key_fn` would be applied to reference data
keep_key: Option to yield matching key
buffer_size: The size of buffer used to hold key-data pair from reference DataPipe.
If it's specified as None, the buffer size becomes infinite
"""
def __init__(
self, source_datapipe, ref_datapipe, key_fn, ref_key_fn=None, keep_key=False, buffer_size=10000,
):
self.source_datapipe = source_datapipe
self.ref_datapipe = ref_datapipe
self.key_fn = key_fn
self.ref_key_fn = key_fn if ref_key_fn is None else ref_key_fn
self.keep_key = keep_key
if buffer_size is not None and buffer_size <= 0:
raise ValueError("'buffer_size' is required to be either None or a positive integer.")
self.buffer_size = buffer_size
def __iter__(self):
buffer: OrderedDict = OrderedDict()
ref_it = iter(self.ref_datapipe)
warn_once_flag = True
for data in self.source_datapipe:
key = self.key_fn(data)
while key not in buffer:
try:
ref_data = next(ref_it)
except StopIteration:
raise BufferError(
f"No matching key can be found from reference DataPipe for the data {data}. "
"Please consider increasing the buffer size."
)
ref_key = self.ref_key_fn(ref_data)
if ref_key in buffer:
raise ValueError("Duplicate key is found in reference DataPipe")
if self.buffer_size is not None and len(buffer) > self.buffer_size:
if warn_once_flag:
warn_once_flag = False
warnings.warn(
"Buffer reaches the upper limit, so reference key-data pair begins to "
"be removed from buffer in FIFO order. Please consider increase buffer size."
)
buffer.popitem(last=False)
buffer[ref_key] = ref_data
if self.keep_key:
yield key, data, buffer.pop(key)
else:
yield data, buffer.pop(key)
def __len__(self):
return len(self.source_datapipe)
def tuple_merge(item, item_from_map):
return (item, item_from_map)
@functional_datapipe("zip_with_map")
class MapZipperIterDataPipe(IterDataPipe):
r""" :class:`MapZipperIterDataPipe`.
IterDataPipe that joins the items from the source IterDataPipe with items from a MapDataPipe. The
matching is done by the key function, which maps an item from source IterDataPipe to
a key that exists in MapDataPipe. The return value is created by the merge function, which returns
a tuple of the two items by default.
Args:
source_iterdatapipe: IterDataPipe from which items are yield and will be combined with an item from map_datapipe
map_datapipe: MapDataPipe that takes a key from key_fn, and returns an item
key_fn: Function that maps each item from source_iterdatapipe to a key that exists in map_datapipe
merge_fn: Function that combines the item from source_iterdatapipe and the item from map_datapipe,
by default a tuple is created
"""
def __init__(
self,
source_iterdatapipe: IterDataPipe,
map_datapipe: MapDataPipe,
key_fn: Callable,
merge_fn: Callable = tuple_merge,
):
if not isinstance(map_datapipe, MapDataPipe):
raise TypeError(f"map_datapipe must be a MapDataPipe, but its type is {type(map_datapipe)} instead.")
self.source_iterdatapipe = source_iterdatapipe
self.map_datapipe = map_datapipe
self.key_fn = key_fn
self.merge_fn = merge_fn
self.length = -1
def __iter__(self):
for item in self.source_iterdatapipe:
key = self.key_fn(item)
try:
map_item = self.map_datapipe[key]
except (KeyError, IndexError):
raise KeyError(f"key_fn maps {item} to {key}, which is not a valid key in the given MapDataPipe.")
yield self.merge_fn(item, map_item)
def __len__(self) -> int:
if self.length == -1:
self.length = len(self.source_iterdatapipe)
return self.length
| UTF-8 | Python | false | false | 5,146 | py | 46 | combining.py | 34 | 0.617761 | 0.616207 | 0 | 121 | 41.528926 | 120 |
ArashMoshirnia/store_with_smart_recommender | 18,562,848,656,030 | 1cbcd9efcae9abec270e53a190ef502746a37916 | 8f619dc8adb894d92590b14af0e5a4b6f7981c44 | /products/admin.py | 2406114d609e89a1dc359bf9c3c7dc4df701f91e | []
| no_license | https://github.com/ArashMoshirnia/store_with_smart_recommender | e427c97ae18945bb3a0d63b6a8efd2620c048314 | ccfddd3c03963d820054f2f418037ae0b81b10f5 | refs/heads/master | 2023-07-05T03:09:50.181457 | 2021-08-10T14:20:34 | 2021-08-10T14:20:34 | 352,128,514 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from products.models import Product, Category, ProductRating
class ProductAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'is_enabled')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
class RatingAdmin(admin.ModelAdmin):
list_display = ('user', 'product', 'rating')
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProductRating, RatingAdmin)
| UTF-8 | Python | false | false | 484 | py | 13 | admin.py | 11 | 0.747934 | 0.747934 | 0 | 20 | 23.2 | 60 |
promediacorp/fbnames | 12,807,592,523,288 | a85504b757a939285bff2debb6f8ac6ed624f833 | d415ba2dfd6a74c2cc4e2002badf0f22d77853a4 | /fbnames/spiders/test.py | 99f519206b8c4096fb2ea6bfcc4fb6d096d04e7b | []
| no_license | https://github.com/promediacorp/fbnames | 2635e465ab14a96b5badd3f81b48bd7683ce2a68 | 76d8e519d729af58516a832461b402cf68622752 | refs/heads/master | 2021-01-21T02:59:18.291244 | 2014-12-19T04:09:12 | 2014-12-19T04:09:12 | 28,012,560 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.spider import BaseSpider
#from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from fbnames.items import FbnamesItem
from scrapy.contrib.linkextractors import LinkExtractor
class MySpider(CrawlSpider):
name = "fb"
allowed_domains = ["uphail.com"]
start_urls = ["http://www.uphail.com"]
# rules = (Rule (LxmlLinkExtractor(allow=(r"",) deny=())
# , callback="parseo", follow= False),
# )
rules = (
Rule(
LinkExtractor(allow=('',),
deny=('vt/',
),
),
callback='parse_item',
follow=True
),
)
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
urls = hxs.xpath('//a')
items = []
for url in urls:
item = FbnamesItem()
item["anchor"] = url.xpath("text()").extract()
item["link"] = url.xpath("@href").extract()
items.append(item)
return items
#crawls | UTF-8 | Python | false | false | 1,137 | py | 10 | test.py | 9 | 0.565523 | 0.565523 | 0 | 43 | 25.465116 | 65 |
StanfordAHA/garnet | 1,211,180,790,812 | 71cc91dbf6a9f91cc7f2537a62bffdcc4c21b812 | 8e3321fecc0135d694c30f79f726205692f4763c | /global_buffer/design/SRAM.py | 6b25c83a5fc7d2fa16c1132ced3104c5627e9309 | [
"BSD-3-Clause"
]
| permissive | https://github.com/StanfordAHA/garnet | ea178b93244c2be41d04d90725e61a4f8b760254 | 70f71eeda2812d53116aa09580f837b2e0d2558a | refs/heads/master | 2023-08-31T16:08:49.068457 | 2023-07-28T14:21:20 | 2023-07-28T14:21:20 | 140,670,939 | 83 | 17 | BSD-3-Clause | false | 2023-09-14T14:38:04 | 2018-07-12T06:30:10 | 2023-08-27T12:58:31 | 2023-09-14T14:38:04 | 19,595 | 82 | 13 | 80 | Python | false | false | from kratos import Generator, always_ff, posedge
class SRAM(Generator):
def __init__(self, process: str, name: str, word_size: int, num_words: int):
super().__init__(name)
self.word_size = word_size
self.num_words = num_words
self.data_array = self.var("data_array", word_size, size=num_words)
if process == "TSMC":
self.CLK = self.clock("CLK")
self.CEB = self.input("CEB", 1)
self.WEB = self.input("WEB", 1)
self.A = self.input("A", self.num_words.bit_length() - 1)
self.D = self.input("D", self.word_size)
self.BWEB = self.input("BWEB", self.word_size)
self.Q = self.output("Q", self.word_size)
self.RTSEL = self.input("RTSEL", 2)
self.WTSEL = self.input("WTSEL", 2)
self.add_always(self.tsmc_ff)
elif process == "GF":
self.CLK = self.clock("CLK")
self.CEN = self.input("CEN", 1)
self.RDWEN = self.input("RDWEN", 1)
self.A = self.input("A", self.num_words.bit_length() - 1)
self.D = self.input("D", self.word_size)
self.BW = self.input("BW", self.word_size)
self.Q = self.output("Q", self.word_size)
self.T_LOGIC = self.input("T_LOGIC", 1)
self.T_Q_RST = self.input("T_Q_RST", 1)
self.MA_SAWL1 = self.input("MA_SAWL1", 1)
self.MA_SAWL0 = self.input("MA_SAWL0", 1)
self.MA_WL1 = self.input("MA_WL1", 1)
self.MA_WL0 = self.input("MA_WL0", 1)
self.MA_WRAS1 = self.input("MA_WRAS1", 1)
self.MA_WRAS0 = self.input("MA_WRAS0", 1)
self.MA_VD1 = self.input("MA_VD1", 1)
self.MA_VD0 = self.input("MA_VD0", 1)
self.MA_WRT = self.input("MA_WRT", 1)
self.MA_STABAS1 = self.input("MA_STABAS1", 1)
self.MA_STABAS0 = self.input("MA_STABAS0", 1)
self.add_always(self.gf_ff)
@always_ff((posedge, "CLK"))
def tsmc_ff(self):
if self.CEB == 0:
self.Q = self.data_array[self.A]
if self.WEB == 0:
for i in range(64):
if self.BWEB[i] == 0:
self.data_array[self.A][i] = self.D[i]
@always_ff((posedge, "CLK"))
def gf_ff(self):
if self.CEN == 0:
self.Q = self.data_array[self.A]
if self.RDWEN == 0:
for i in range(64):
if self.BW[i]:
self.data_array[self.A][i] = self.D[i]
| UTF-8 | Python | false | false | 2,580 | py | 562 | SRAM.py | 199 | 0.498837 | 0.479457 | 0 | 63 | 39.952381 | 80 |
emchoko/SuperMario- | 3,470,333,588,442 | d37ced85327360cd81009d80df4e5fdd85f0f0f7 | 7bb9bb8c56ff5abfc9d5f3f2a3ec3fc0434c5af5 | /animators/startscreenloader.py | 4a568affc5e36fa8b0f3556d271f1458079f7ed7 | []
| no_license | https://github.com/emchoko/SuperMario- | c577dd6b98f6d5f04b04f6394a75792087af0e57 | f8d5cfdb3005ed635635be754f5004dcb7a4fc32 | refs/heads/master | 2021-01-25T11:39:06.599909 | 2018-03-16T03:49:07 | 2018-03-16T03:49:07 | 123,413,132 | 0 | 0 | null | false | 2018-03-16T03:49:07 | 2018-03-01T09:31:58 | 2018-03-16T00:14:22 | 2018-03-16T03:49:07 | 65 | 0 | 0 | 0 | Python | false | null | from constants import Constants
from handlers.imageinfo import ImageInfo
try:
import simplegui
except ImportError:
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
# <Leila code>
class StartScreenLoader:
def __init__(self, state, is_gameover, progress, score):
self.background = ImageInfo([320, 240], [640, 480])
self.backgroundImage = simplegui.load_image("https://i.imgur.com/fY8di.jpg")
self.game_over_logo = "http://www.powerpointhintergrund.com/uploads/red-text-game-over-png-transparent-4.png"
# image
self.bkinfo = ImageInfo([200, 150], [400, 300])
self.bkimg = simplegui.load_image(
"https://png.pngtree.com/thumb_back/fw800/back_pic/00/06/34/7956298ae43266e.jpg")
if not is_gameover:
self.logo = simplegui.load_image(
"https://fontmeme.com/permalink/180307/3d9848417b5eb71c112dcb18cf23b2d0.png")
else:
self.logo = simplegui.load_image(
"http://www.powerpointhintergrund.com/uploads/red-text-game-over-png-transparent-4.png")
self.logo_info = ImageInfo([self.logo.get_width() / 2, self.logo.get_height() / 2],
[self.logo.get_width(), self.logo.get_height()])
self.time = 0
self.state = state
self.progress = progress
self.score = score
def draw(self, canvas):
center = self.background.get_center()
size = self.background.get_size()
self.time += 1
wtime = (self.time / 2) % Constants.WIDTH
# two images which create a seemless looping background
canvas.draw_image(self.backgroundImage, center, size, [(Constants.WIDTH / 2) + wtime, Constants.HEIGHT / 2],
[Constants.WIDTH, Constants.HEIGHT])
canvas.draw_image(self.backgroundImage, center, size, [(-Constants.WIDTH / 2) + wtime, Constants.HEIGHT / 2],
[Constants.WIDTH, Constants.HEIGHT])
# draw the lives and score to the screen
canvas.draw_text("Score", [50, 50], 22, "White", "sans-serif")
canvas.draw_text("Progress", [680, 50], 22, "White", "sans-serif")
# TODO draw lives and score
canvas.draw_text(str(self.score), [50, 80], 22, "White", "sans-serif")
canvas.draw_text(str(self.progress), [680, 80], 22, "White", "sans-serif")
canvas.draw_image(self.logo, self.logo_info.get_center(), self.logo_info.get_size(),
[Constants.WIDTH / 2, Constants.HEIGHT / 2], self.logo_info.get_size())
# </Leila code>
# <Emil code>
def click(self, pos):
self.state.load_playground()
| UTF-8 | Python | false | false | 2,682 | py | 19 | startscreenloader.py | 18 | 0.616331 | 0.574198 | 0 | 61 | 42.967213 | 117 |
thimontenegro/Dataquest | 5,746,666,261,686 | b8f801f3760cfed507f57a0e9336cab530ae4b47 | cb24e4fb75371fa1078fa786ebf88fd3c1ec388c | /Conditional Probability/Bayes Theorem-431.py | 2eabc93f0d2bb947e39e62229a72a328b387e4d5 | []
| no_license | https://github.com/thimontenegro/Dataquest | 19d91856b002ba0d574d4fb78a9291462fbe0a21 | abb768698835d77868050f7d0672133dfa1c74e4 | refs/heads/master | 2021-07-25T10:54:45.335791 | 2021-01-07T13:01:04 | 2021-01-07T13:01:04 | 236,377,362 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ## 1. Independence vs. Exclusivity ##
statement_1 = False
statement_2 = True
statement_3 = True
## 2. Example Walk-through ##
p_spam = 0.2388
p_secret_given_spam = 0.4802
p_secret_given_non_spam = 0.1284
p_non_spam = 1 - p_spam
p_spam_and_secret = p_spam * p_secret_given_spam
p_non_spam_and_secret = p_non_spam * p_secret_given_non_spam
p_secret = p_spam_and_secret + p_non_spam_and_secret
## 3. A General Formula ##
p_boeing = 0.73
p_airbus = 0.27
p_delay_given_boeing = 0.03
p_delay_given_airbus = 0.08
p_delay = p_boeing * p_delay_given_boeing + p_airbus * p_delay_given_airbus
## 4. Formula for Three Events ##
p_boeing = 0.62
p_airbus = 0.35
p_erj = 0.03
p_delay_boeing = 0.06
p_delay_airbus = 0.09
p_delay_erj = 0.01
p_delay = p_boeing * p_delay_boeing + p_airbus * p_delay_airbus + p_erj * p_delay_erj
## 6. Bayes' Theorem ##
p_boeing = 0.73
p_airbus = 0.27
p_delay_given_boeing = 0.03
p_delay_given_airbus = 0.08
p_delay = p_boeing * p_delay_given_boeing + p_airbus * p_delay_given_airbus
p_airbus_delay = (p_airbus * p_delay_given_airbus) / p_delay
## 7. Prior and Posterior Probability ##
p_spam = 0.2388
p_secret_given_spam = 0.4802
p_secret_given_non_spam = 0.1284
p_non_spam = 1 - p_spam
p_secret = p_spam * p_secret_given_spam + p_non_spam * p_secret_given_non_spam
p_spam_given_secret = (p_spam * p_secret_given_spam) / p_secret
prior = p_spam
posterior = p_spam_given_secret
ratio = posterior / prior | UTF-8 | Python | false | false | 1,441 | py | 26 | Bayes Theorem-431.py | 19 | 0.674532 | 0.616933 | 0 | 65 | 21.184615 | 85 |
icompsofteduardos/Treino-ICS | 15,573,551,423,555 | 68e7c9e2cf745c5a221fd58d88173eb28f2c7431 | d07dbecd446b6e4fd51dce4694d528ff0efd73c1 | /git_curso/ddd.py | 0cbf3939426bba3989017fedb3d4f2647338ce4e | []
| no_license | https://github.com/icompsofteduardos/Treino-ICS | 784ce9c91a54af7e312524c3cba7b5c36cf75de4 | f0af6c5b5ed90e61820200ede157f39937ddafe8 | refs/heads/master | 2020-12-26T06:03:51.348670 | 2020-02-07T12:08:02 | 2020-02-07T12:08:02 | 237,410,777 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | sp = 45
pr = 46
jk = 50 | UTF-8 | Python | false | false | 23 | py | 23 | ddd.py | 15 | 0.521739 | 0.26087 | 0 | 3 | 7 | 7 |
not7cd/circulation | 11,012,296,187,001 | cda3ac3e964b54ea51e1f63714f355c6386e7dfb | 22d481ff60d722fb0bbd6b0cb66a8727e48391c1 | /circulation/main/index/views.py | 9ae8c849a181c998b412fefe275940b1b63787f7 | [
"MIT"
]
| permissive | https://github.com/not7cd/circulation | b7a745a38fb55ab7266a0a04ab52abc180f5d6ad | 19a48d04354ffeba4be2b56c0437f69b8220d89c | refs/heads/master | 2020-03-18T01:20:03.932825 | 2019-05-28T15:41:26 | 2019-05-28T15:41:26 | 134,137,937 | 2 | 1 | MIT | true | 2019-05-28T15:41:27 | 2018-05-20T09:43:12 | 2019-05-14T08:14:44 | 2019-05-28T15:41:27 | 1,109 | 2 | 1 | 0 | Python | false | false | from circulation.web import db
from circulation.models import User, Book, Comment, Log, Permission
from flask import render_template
from flask_login import current_user
from circulation.main.index import main
from circulation.main.book.forms import SearchForm
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
@main.route('/')
def index():
search_form = SearchForm()
the_books = Book.query
if not current_user.can(Permission.UPDATE_BOOK_INFORMATION):
the_books = the_books.filter_by(hidden=0)
popular_books = the_books.outerjoin(Log).group_by(Book.id).order_by(db.func.count(Log.id).desc()).limit(5)
popular_users = User.query.outerjoin(Log).group_by(User.id).order_by(db.func.count(Log.id).desc()).limit(5)
recently_comments = Comment.query.filter_by(deleted=0).order_by(Comment.edit_timestamp.desc()).limit(5)
return render_template("index.html", books=popular_books, users=popular_users, recently_comments=recently_comments,
search_form=search_form)
| UTF-8 | Python | false | false | 1,064 | py | 59 | views.py | 34 | 0.733083 | 0.728383 | 0 | 24 | 43.333333 | 119 |
Aasthaengg/IBMdataset | 7,318,624,281,701 | 7f0babab19d27aa49583ff919a1f973eb0612380 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03042/s581701801.py | e99b5d359defcd18629bb2ca245e4fd623077242 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n=input()
n1,n2=int(n[:2]),int(n[2:])
#if (n1>12 and n2>12) or (n1==0 and n2==0):
# print("NA")
if (n1!=0 and n1<=12) and (n2!=0 and n2<=12):
print("AMBIGUOUS")
elif n1!=0 and n1<=12:
print("MMYY")
elif n2!=0 and n2<=12:
print("YYMM")
else:
print("NA")
| UTF-8 | Python | false | false | 275 | py | 202,060 | s581701801.py | 202,055 | 0.534545 | 0.410909 | 0 | 13 | 19.923077 | 45 |
xydinesh/learnpythonthehardway | 17,489,106,851,404 | 9ca7093ca2fe2c1dddc04b5788bbbdc3bd963e89 | 699016c085d3a6a30916f559fb28957cc67ff3ba | /ex11.1.py | a5a3df42483ea8f4f00cba4b799086df28726285 | []
| no_license | https://github.com/xydinesh/learnpythonthehardway | 1879bf08e474b5a05d6559e426fd0ef91d6171cd | 556e96f264d4720f2de01ae31d7aa05eebf3d4d8 | refs/heads/master | 2021-01-01T18:03:21.007674 | 2013-07-20T04:45:50 | 2013-07-20T04:45:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # comma ',' at the end of line is important
# as it allow you to enter answer next to the question
# i.e. it won't insert a \n at the end of the line
print "What is your eye color?",
eye_color = raw_input()
print "What is your hair color?",
hair_color = raw_input()
print "Enter a number you can think of",
n1 = int(raw_input())
print "Enter another number",
n2 = int(raw_input())
print "You have %s eys, %s hair and sum of numbers you entered %d" %\
(eye_color, hair_color, n1 + n2)
| UTF-8 | Python | false | false | 485 | py | 20 | ex11.1.py | 17 | 0.684536 | 0.676289 | 0 | 14 | 33.642857 | 69 |
abeaumont/competitive-programming | 2,216,203,166,669 | 830a39b6b967bfb21ea5b9ea0bf421f634454cf5 | 51f2492a5c207e3664de8f6b2d54bb93e313ca63 | /codejam/2018-qualification/c.py | 22410ab8fef593c45d9e72cfa65d9c2db6659125 | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/abeaumont/competitive-programming | 23c5aabd587d7bb15a61efd3428838cb934233dd | a24c9b89941a59d344b51dc1010de66522b1a0dd | refs/heads/master | 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 | WTFPL | false | 2023-07-12T17:36:20 | 2018-01-15T20:00:56 | 2023-07-07T10:24:49 | 2023-07-12T17:36:20 | 12,728 | 511 | 192 | 1 | C++ | false | false | #!/usr/bin/env python3
# https://codejam.withgoogle.com/2018/challenges/00000000000000cb/dashboard/0000000000007a30
import sys
for _ in range(int(input())):
a = int(input())
b = (a - 1) // 3 + 1
m = [None] * 3
for i in range(3): m[i] = [False] * b
i = 1
while True:
print(i + 1, 2)
sys.stdout.flush()
x, y = map(int, input().split())
if x == -1 and y == -1: sys.exit(0)
if x == 0 and y == 0: break
m[y - 1][x - 1] = True
while i < b - 2:
ok = True
for j in range(3):
if not m[j][i - 1]:
ok = False
break
if ok: i += 1
else: break
| UTF-8 | Python | false | false | 715 | py | 2,049 | c.py | 1,363 | 0.441958 | 0.367832 | 0 | 25 | 27.6 | 92 |
Orisland/HibiAPI | 16,269,336,134,807 | 5662d5a4ba053426372b0b066f07b0b7754a9975 | bb20684f29922d28124adf30a639ebe215dccf75 | /utils/routing.py | 2eca704385157933eca92535e58453a300305f8d | [
"Apache-2.0"
]
| permissive | https://github.com/Orisland/HibiAPI | c38207e1b0bd1f120268c72fa10dcb8380ac3cd3 | eb54dfebae0e7431daa42cd0567936927d2d9f66 | refs/heads/main | 2023-03-09T20:04:11.779272 | 2021-02-28T03:25:27 | 2021-02-28T03:25:27 | 341,846,278 | 0 | 0 | Apache-2.0 | true | 2021-02-24T09:28:02 | 2021-02-24T09:28:01 | 2021-02-24T09:27:59 | 2021-02-23T04:37:35 | 605 | 0 | 0 | 0 | null | false | false | import inspect
from enum import Enum
from fnmatch import fnmatch
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from urllib.parse import ParseResult, urlparse
from fastapi.routing import APIRouter
from httpx import URL
from pydantic import AnyHttpUrl, validate_arguments
from pydantic.errors import UrlHostError
from .net import AsyncHTTPClient
def exclude_params(func: Callable, params: Mapping[str, Any]) -> Dict[str, Any]:
func_params = inspect.signature(func).parameters
return {k: v for k, v in params.items() if k in func_params}
class SlashRouter(APIRouter):
def api_route(self, path: str, **kwargs):
path = path if path.startswith("/") else ("/" + path)
return super().api_route(path, **kwargs)
class BaseEndpoint:
type_checking: bool = True
def __init__(self, client: AsyncHTTPClient):
self.client = client
@staticmethod
def _join(base: str, endpoint: str, params: Dict[str, Any]) -> URL:
host: ParseResult = urlparse(base)
params = {
k: (v.value if isinstance(v, Enum) else v)
for k, v in params.items()
if v is not None
}
return URL(
url=ParseResult(
scheme=host.scheme,
netloc=host.netloc,
path=endpoint.format(**params),
params="",
query="",
fragment="",
).geturl(),
params=params,
)
def __getattribute__(self, name: str) -> Any:
obj = super().__getattribute__(name)
if name.startswith("_"):
return obj
elif not callable(obj):
return obj
elif not self.type_checking:
return obj
return validate_arguments(obj)
class BaseHostUrl(AnyHttpUrl):
allowed_hosts: List[str] = []
@classmethod
def validate_host(
cls, parts: Dict[str, str]
) -> Tuple[str, Optional[str], str, bool]:
host, tld, host_type, rebuild = super().validate_host(parts)
if not cls._check_domain(host):
raise UrlHostError(allowed=cls.allowed_hosts)
return host, tld, host_type, rebuild
@classmethod
def _check_domain(cls, host: str) -> bool:
return any(
filter(
lambda x: fnmatch(host, x), # type:ignore
cls.allowed_hosts,
)
)
| UTF-8 | Python | false | false | 2,422 | py | 1 | routing.py | 1 | 0.586705 | 0.586705 | 0 | 82 | 28.536585 | 80 |
amahfouz/python-prog-challenge | 15,367,393,010,216 | 8742c2c97f07ee0eb70624eea44d2081ec908c17 | a03ada31a063a36588ce6a1d0a5a4701905ee0e9 | /geeks4geeks/sum-of-bitwise-or-in-power-set.py | 1933c67bbf7254efacec4aeff598eaa331232e7e | []
| no_license | https://github.com/amahfouz/python-prog-challenge | 5cb97299f827b884eff557e2580fd5eb7ad8afe3 | d79de2a4e394048cf75c58110571be26178e8051 | refs/heads/master | 2021-01-10T19:44:16.005639 | 2020-04-22T21:09:12 | 2020-04-22T21:09:12 | 35,351,298 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/bin/python
#
# Solution for
# https://www.geeksforgeeks.org/sum-of-bitwise-or-of-all-possible-subsets-of-given-set/
# Let 'n' be the number of members of the original set
#
# Total number of subsets is 2^n (including empty set)
#
# In any given subset, for the i[th] bit to be
# zero all corresponding bits in set members must
# be zero. Let the number of members where that
# bit is zero to be 'zi'. So the number of sets
# made up of these numbers is 2^zi. Negating the
# condition, the number of sets where this bit is
# 1 is equal 2^n - 2^zi.
#
# So all has to be done is to compute the number of
# members that have the i[th] bit as zero for every
# i from 0 to 31.
#
# length of integers in the problem
NUM_SIZE = 32
def or_sum(arr):
zero_count = [0 for _ in range(NUM_SIZE)]
for index in range(NUM_SIZE):
for num in arr:
if not (num & (1 << index)):
zero_count[index] += 1
num >> 1
# all zeros have now been computed
result = 0
two_pow_n = 1 << len(arr)
for i in range(NUM_SIZE):
num_subsets = two_pow_n - (1 << zero_count[i])
val_of_bit = 1 << i
contribution = num_subsets * val_of_bit
result += contribution
return result
if __name__ == "__main__":
arr = [ 1, 2, 3 ]
print or_sum(arr) | UTF-8 | Python | false | false | 1,327 | py | 43 | sum-of-bitwise-or-in-power-set.py | 42 | 0.614921 | 0.599096 | 0 | 48 | 26.666667 | 87 |
taojin0505/hawq-toolkit | 6,691,559,068,316 | 411fad1e4f94c3e4bd27c20624de6aac85b5bea5 | 89806713beb2b22a5b0ae47dc83b9afc69408a21 | /word-segment-algorithm/boundarystab-prallal.py | 2f3d6ad34e57c56a73821894d0e222933f58d14f | []
| no_license | https://github.com/taojin0505/hawq-toolkit | a8830964c3c2f54351970a1ff5758ae427f5e5c7 | e7787164b3944b1edda4a3303e6231dcb47ffe65 | refs/heads/master | 2020-12-25T14:58:21.019085 | 2017-01-05T08:23:03 | 2017-01-05T08:23:03 | 66,072,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*
from __future__ import division
import os.path
import os
import getopt
import sys
import string
import time
import math
import multiprocessing
import Queue
starttime=int(time.time())
shortargs='h'
longargs=['help']
argvcount=2
numprocess=8
worddict={}
wordcount=0
wordset=set()
maxwordlength=6
boundarysize=1
wordgroup=[]
def printhelp():
usageMsg='Script Usage:\n \
python template.py word.txt wordcount.txt'
scriptname=sys.argv[0]
print(usageMsg.replace('template.py',scriptname))
def ischs(s):
if s>=u'\u4e00' and s<=u'\u9fa5':
return True
else:
return False
def getentropy(pid,wordqueue,wordgroup):
word=u''
rboundaryword={}
lboundaryword={}
rboundaryset=set()
lboundaryset=set()
wordappears=0
lbe=1.0
rbe=1.0
returnlist=[]
counter=0
print 'start process %d'%pid
while not wordqueue.empty():
try:
word=wordqueue.get(False)
except Queue.Empty:
print 'process %d finished'%pid
return returnlist
#print 'process %d ,word %s ,wordgroup %d'%(pid,word,len(wordgroup))
wordlen=len(word)
rboundaryword={}
lboundaryword={}
rboundaryset=set()
lboundaryset=set()
for length in xrange(wordlen+1,maxwordlength+1):
for keyword in wordgroup[length-1]:
startidx=keyword.find(word)
if startidx>0:
#print 'start index %d word %s keyword %s'%(startidx,word,keyword)
if keyword[startidx-1] in lboundaryset:
lboundaryword[keyword[startidx-1]]+=1
else:
lboundaryword[keyword[startidx-1]]=1
lboundaryset.add(keyword[startidx-1])
#print 'left word is %s '%keyword[startidx-1]
if startidx+wordlen<len(keyword):
if keyword[startidx+wordlen] in rboundaryset:
rboundaryword[keyword[startidx+wordlen]]+=1
else:
rboundaryword[keyword[startidx+wordlen]]=1
rboundaryset.add(keyword[startidx+wordlen])
#print 'right word is %s '%keyword[startidx+wordlen]
wordappears+=1
for i in lboundaryword.values():
lbe+=-(i/wordappears)*math.log10(i/wordappears)
for i in rboundaryword.values():
rbe+=-(i/wordappears)*math.log10(i/wordappears)
avl=len(lboundaryset)
avr=len(rboundaryset)
counter+=1
if counter%1000==0:
print 'process %d run for %d words and %d seconds'%(pid,counter,int(time.time())-starttime)
returnlist.append((word,str(lbe),str(rbe),str(len(lboundaryset)),str(len(rboundaryset))))
#print 'lbe %f,rbe %f lav %d rav %d'%(lbe,rbe,len(lboundaryset),len(rboundaryset))
return returnlist
if __name__=='__main__':
try:
option,args=getopt.getopt(sys.argv[1:],shortargs,longargs)
except getopt.GetoptError:
print(usageMsg)
sys.exit()
for name,value in option:
if name in ('-h','--help'):
printhelp()
sys.exit()
if len(args)!=argvcount:
printhelp()
sys.exit()
else:
wordfilename=args[0]
wordcountfilename=args[1]
for i in xrange(maxwordlength):
wordgroup.append([])
wordfile=open(wordfilename,'r')
wordtext=wordfile.readlines()
wordcountfile=open(wordcountfilename,'r')
text=wordcountfile.readlines()
procspool=multiprocessing.Pool(processes=numprocess)
mgr=multiprocessing.Manager()
wordqueue=mgr.Queue()
for line in wordtext:
line=line.decode('utf-8').replace('\n','').strip()
if ischs(line):
wordqueue.put(line)
for line in text:
words=line.split(' ')
word0=words[0].decode('utf-8').strip()
word1=int(words[1])
worddict[word0]=word1
wordcount+=word1
if ischs(word0[0]):
wordlen=len(word0)
wordgroup[wordlen-1].append(word0)
print 'totaly %d word ,count is %s '%(wordqueue.qsize(),wordcount)
procsrel=[]
for i in xrange(numprocess):
procsrel.append(procspool.apply_async(getentropy,args=(i,wordqueue,wordgroup)))
procspool.close()
procspool.join()
outfile=open('boundary-p.txt','wb')
a=[]
for res in procsrel:
for resword in res.get():
outfile.write(u','.join(resword).encode('utf-8')+'\n')
#print 'mi word count is %d,actually word count is %s '%(len(wordset),len(worddict))
#print 'it runs %d seconds '%(int(time.time())-starttime)
| UTF-8 | Python | false | false | 4,792 | py | 14 | boundarystab-prallal.py | 10 | 0.588689 | 0.576169 | 0 | 159 | 29.138365 | 103 |
etjoa003/medical_imaging | 5,686,536,734,674 | 8582d83bbe07cb343540164f30a490669fa51b0e | f122e9d57a2664c1d7189cf36e06afcfc8397a36 | /isles2017/pipeline/get_results/results_publication_1/main_result.py | d7173f5f72b8745e79ad48e40983b11c6c1277bb | []
| no_license | https://github.com/etjoa003/medical_imaging | a8eda7e0da6e971310ab54c8d2671a9cb69c4357 | 9e55b80b3b4dde2127983447d3e2937ed5a84fed | refs/heads/master | 2021-07-01T14:23:18.664651 | 2021-05-26T05:08:02 | 2021-05-26T05:08:02 | 193,107,888 | 8 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from utils.utils import *
# DEBUG MODE?
# See utils/debug_switche
import pipeline.training as tr
import pipeline.evaluation as ev
import pipeline.lrp as lr
import pipeline.visual as vh
def example_process_get_main_figure(config_data):
print('pipeline/get_results/restuls_publication_1.example_process_get_main_figure(). \
You can hardcode parameters or config here.')
# In the paper, other sizes are used as well.
config_data['dataloader']['resize'] = [48,48,19]
config_data['model_label_name'] = "UNet3D_AXXXS1"
tr.training_UNet3D(config_data)
if not DEBUG_VERSION:
config_data['LRP']['filter_sweeper']['case_numbers'] = [1,2,4,7,11,15,28,27,45]
else:
config_data['LRP']['filter_sweeper']['case_numbers'] = DEBUG_EVAL_TRAINING_CASE_NUMBERS
ev.evaluation_UNet3D_overfit(config_data)
lr.lrp_UNet3D_filter_sweeper_0002(config_data,verbose=0)
lr.lrp_UNet3D_filter_sweeper_0003(config_data,verbose=0)
config_data['LRP']['filter_sweeper']['submode'] = '0002'
vh.lrp_UNet3D_filter_sweep_visualizer(config_data)
| UTF-8 | Python | false | false | 1,034 | py | 103 | main_result.py | 88 | 0.737911 | 0.697292 | 0 | 30 | 33.466667 | 89 |
Cainuriel/Training-Python- | 16,484,084,487,322 | 2dcb945616f541f56f6b36e657b15bb0deeadf80 | 491002828e6d21bba3ef95561fd5a6de73579c0a | /metodo maketrans.py | 6e7a784e3edceb088260bab960988b9c1e7b2ae5 | []
| no_license | https://github.com/Cainuriel/Training-Python- | bc892b3edec4b5e55083ef81697603643ef89a01 | 750235d2d515aa76da49002e6d3f2a90f573d3b7 | refs/heads/master | 2020-04-18T19:17:47.861335 | 2019-02-13T21:49:36 | 2019-02-13T21:49:36 | 167,708,625 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | string = "This website is for losers LOL!"
vocals = "AaEeIiOoUu"
def disemvowel(string):
vocals = "AaEeIiOoUu"
result =""
for i in string:
count = 0
for j in vocals:
if i != j:
count = count + 1
else:
pass
if count == 10:
result += i
return result
# asi de facil es con el metodo translate
maketrans = string.maketrans(vocals," ")
print(maketrans)
result = string.translate(maketrans)
print(result)
| UTF-8 | Python | false | false | 465 | py | 75 | metodo maketrans.py | 70 | 0.615054 | 0.606452 | 0 | 24 | 17.291667 | 49 |
AndreButhner/xoola | 18,683,107,756,443 | b64a14e42dc9a36b819321bee0da26c069d5cb74 | 98f33f4d21266da2a53c03a270f9d876691d5680 | /app/controllers/auth/forms.py | 7d6f8ea61ba8fba410c60b2985cf3e999d713ba5 | [
"Apache-2.0"
]
| permissive | https://github.com/AndreButhner/xoola | 78b13fe06d01290caaa497529fd9f6c344852723 | cb606eb7340f56377659c2f2e5d5a51cff1a9854 | refs/heads/master | 2021-03-19T11:27:27.811951 | 2018-06-26T02:13:39 | 2018-06-26T02:13:39 | 104,412,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_wtf import Form
from wtforms import StringField, BooleanField, FloatField, IntegerField, DateField,TextAreaField, SelectField, FileField, PasswordField,validators
from wtforms.validators import DataRequired, EqualTo
from app.model import Empresa
class LoginForm(Form):
login = StringField('login', validators=[DataRequired()])
password = PasswordField('Password',validators=[DataRequired()])
| UTF-8 | Python | false | false | 424 | py | 52 | forms.py | 33 | 0.783019 | 0.783019 | 0 | 8 | 51.875 | 146 |
PyJay/advent_of_code | 17,712,445,162,375 | 4000e2656f13129b092b91c4797efac9a72cd2b8 | 8cd0611b87bf84a46a671032b85b5f661e0aa29f | /2018/day02/solve_2.py | cc910010408356e590babbba53535b0d10b723a7 | []
| no_license | https://github.com/PyJay/advent_of_code | 5af362b1783c333a8fcb030726470ef05e53afff | b1d6bc48cf82ef3b9076430f788eef3208654771 | refs/heads/master | 2021-07-12T09:34:26.059770 | 2020-07-05T10:15:59 | 2020-07-05T10:15:59 | 161,941,370 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def similar_ids_common_letters(box_ids):
for i in range(len(box_ids)):
check_id = box_ids.pop()
for box_id in box_ids:
diff_idxs = []
for i in range(len(check_id)):
if box_id[i] != check_id[i]:
diff_idxs.append(i)
if len(diff_idxs) == 2:
continue
if len(diff_idxs) == 1:
id_chrs = list(box_id)
id_chrs.pop(diff_idxs[0])
return "".join(id_chrs)
def test_similar_ids_common_letters():
box_ids = [
'abcde',
'fghij',
'klmno',
'pqrst',
'fguij',
'axcye',
'wvxyz',
]
assert 'fgij' == similar_ids_common_letters(box_ids)
if __name__ == '__main__':
with open('input_1.txt', 'r') as f:
box_ids = f.read().split('\n')
print(similar_ids_common_letters(box_ids)) | UTF-8 | Python | false | false | 915 | py | 5 | solve_2.py | 5 | 0.460109 | 0.455738 | 0 | 35 | 25.171429 | 56 |
luis4ngel09/sistema_web_comida | 1,563,368,130,161 | 77d89e5e1a79d511114f274885e799f7f819101b | f26f69d7fff5b5279892814dc72441ef19608bae | /comida/models.py | 21a5764a7cd3846295df866dbca972071c4cafba | []
| no_license | https://github.com/luis4ngel09/sistema_web_comida | 28f9ff325982c199ecded36b0196b31384075b26 | 1443b9ecbe85e98087a7f31d0e9a2e416d68563f | refs/heads/master | 2021-08-23T17:41:51.800961 | 2017-12-06T05:06:07 | 2017-12-06T05:06:07 | 113,204,922 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
class comidas (models.Model):
author = models.ForeignKey('auth.User')
nombre = models.CharField(max_length=200)
descripcion = models.TextField()
costo = models.IntegerField()
imagen = models.ImageField(upload_to='comida/images/')
fecha_creacion = models.DateTimeField(default=timezone.now)
def publish(self):
self.fecha_creacion = timezone.now()
self.save()
def __str__(self):
return self.nombre
class usuarios(models.Model):
nombre = models.CharField(max_length=200)
password = models.CharField(max_length=200)
#email = models.EmailField(max_length=200)
descripcion = models.TextField()
imagen = models.ImageField(upload_to='comida/images/')
tipo_usuario= models.CharField(max_length=200)
def __str__(self):
return self.nombre
class pedidos(models.Model):
#author=models.ForeignKey('auth.User')
comida_id = models.ForeignKey(comidas, on_delete=models.CASCADE)
#usuario_id = models.ForeignKey(usuarios,on_delete=models.CASCADE)
nombre = models.CharField(max_length=200)
cantidad = models.IntegerField()
descripcion = models.TextField()
fecha_creacion = models.DateTimeField(default=timezone.now)
def publish(self):
self.fecha_creacion = timezone.now()
self.save()
def __str__(self):
return self.nombre | UTF-8 | Python | false | false | 1,422 | py | 12 | models.py | 6 | 0.71308 | 0.700422 | 0 | 45 | 30.622222 | 71 |
jaycrowww/COM3110 | 11,081,015,660,549 | 942b6941656c1dd49833acce27997a5c203e5a3f | 06684f6c17422e0c08c0271a54c0a08012f4d460 | /my_retriever.py | a2f3dbcbf0694d7e8367ab75beb329f286a7c2ca | []
| no_license | https://github.com/jaycrowww/COM3110 | bc7812d74c8a602c2340e5b276b077a1ebc17986 | 132ebbff5f927c632f4aaf2908d057f77765c55a | refs/heads/master | 2021-09-27T16:12:01.333810 | 2018-11-09T13:37:40 | 2018-11-09T13:37:40 | 156,698,812 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import defaultdict
import math
class Retrieve:
""""
Takes an inverted index of a document collection, a term weighting scheme
('binary', 'tf' or 'tfidf') and a query, and returns the ids of ten top
documents based on their cosine similarity to the query.
"""
# Create new Retrieve object storing index and termWeighting scheme
def __init__(self, index, termWeighting):
self.index = index
self.termWeighting = termWeighting
# Total Number of Documents in Collection - will be determined as a running total
self.collection_size = 0
# Dictionary of IDF values of Collection (key = term, value = idfValue)
self.collection_idf_values = defaultdict(float)
# Recording number of elements per document (key = docid, value = total number of terms in document)
self.docid_num_elements = defaultdict(int)
# Recording list of vector values per docid (key = docid, value = list of values)
self.docid_vector_elements = defaultdict(list)
# Recording Document Vector Magnitude for each docid ([docid] = magnitude)
self.docid_magnitude = defaultdict(float)
# ----------------------------------------------------------------------------
# Calculating Collection Size by storing unique docids
set_of_docids = set()
for term in self.index:
for docid in self.index[term]:
set_of_docids.add(docid)
self.collection_size = len(set_of_docids)
# ----------------------------------------------------------------------------
# Loop through every term in the index and calculating Document Vector
# Magnitude per document for given term weighting
# Term Frequency Approach
if self.termWeighting == 'tf':
# find the TF values for every document's terms, and the total
# number of elements in each document by looping over all of the
# terms in the entire index
for term in self.index:
for docid in self.index[term]:
self.docid_num_elements[docid] += self.index[term][docid]
self.docid_vector_elements[docid].append(self.index[term][docid])
# calculate the magnitude of the document vectors based on normalised TF elements.
for (docid, tf_values) in self.docid_vector_elements.items():
total = 0
num_elems = self.docid_num_elements[docid]
for elem in tf_values:
elem = elem/num_elems
total += elem * elem
self.docid_magnitude[docid] = math.sqrt(total)
# TFIDF Approach
elif self.termWeighting == 'tfidf':
# find the TF and IDF values for every document's terms, and the
# total number of elements in each document by looping over all of
# the terms in the entire index
for term in self.index:
# Calculate the IDF value for this term
num_occurrences = len(self.index[term])
idf = math.log10(self.collection_size/num_occurrences)
# Store idf value of term in collection for later use
self.collection_idf_values[term] = idf
for docid in self.index[term]:
self.docid_num_elements[docid] += self.index[term][docid]
self.docid_vector_elements[docid].append(self.index[term][docid] * idf)
# calculate the magnitude of the document vectors based on
# normalised TF elements * idf value.
for (docid, vector_elems) in self.docid_vector_elements.items():
total = 0
num_elems = self.docid_num_elements[docid]
for elem in vector_elems:
elem = elem/num_elems
total += elem * elem
self.docid_magnitude[docid] = math.sqrt(total)
# Binary Approach
else:
# find the Binary values for every document's terms, and the total
# number of elements in each document by looping over all of the
# terms in the entire index
for term in self.index:
for docid in self.index[term]:
self.docid_num_elements[docid] += 1
self.docid_magnitude[docid] = math.sqrt(self.docid_num_elements[docid])
# -----------------------------------------------------------------------------
# Method performing retrieval for specified query
def forQuery(self, query):
# Recording cosine similarity values for each document
# (key = docid, value = cosine similarity score for this query)
cosine_sim_values = defaultdict(float)
# Number of terms in the query (length)
len_query = len(query)
# looping through each document, and skipping if there are no query terms in doc,
# else calculate cosine similarity and add to candidates list [docid] = cosine_sim
for docid in range(1, self.collection_size + 1):
# Skip documents with document vector magnitude of zero
if self.docid_magnitude[docid] == 0:
continue
# numerator of cosine similarity equation
qd_dot_product = 0
# sub-parts of numerator of cosine similarity equation
query_i = 0
doc_i = 0
# sum the squares of each element as we go to eventually figure
# out the vector magnitude, which will be the denominator
d_vector_magnitude = self.docid_magnitude[docid]
for term in query:
if term in self.index.keys() and docid in self.index[term].keys():
# Term Frequency Approach
if self.termWeighting == 'tf':
# Used to normalised TF values from preprocessing stage
# by dividing by total number of elements of docid
norm_scale = 1 / self.docid_num_elements[docid]
query_i = query[term]/len_query
doc_i = self.index[term][docid] * norm_scale
# TFIDF Approach
elif self.termWeighting == 'tfidf':
norm_scale = 1 / self.docid_num_elements[docid]
query_i = query[term]/len_query * self.collection_idf_values[term]
doc_i = self.index[term][docid] * norm_scale * self.collection_idf_values[term]
# Binary Approach
else:
# only matters whether term appears or not (0 or 1)
query_i += 1
doc_i += 1
# Calculation of Numerator
qd_dot_product += query_i * doc_i
# Cosine Similarity Calculation
cosine_sim = qd_dot_product / d_vector_magnitude
# Recording Cosine Similarity Values per docid
cosine_sim_values[docid] = cosine_sim
# Finished processing all the documents and obtaining scores
# returns top 10 ranked results
top_10 = sorted(cosine_sim_values, key = cosine_sim_values.get, reverse = True)[:10]
return top_10
| UTF-8 | Python | false | false | 7,934 | py | 1 | my_retriever.py | 1 | 0.524704 | 0.521427 | 0 | 175 | 44.337143 | 108 |
E06HPM/DeployInv | 2,765,958,968,605 | 6b4635d52a956309984419a8df9d0e5ddaa51891 | b9106f93d6bc5404ee04d141b676e07c8c61f50c | /prediction/migrations/0005_modelsave.py | 5801b949cfe04e19669a7e7ea537bd641225f2d9 | []
| no_license | https://github.com/E06HPM/DeployInv | 54376393afd8ba63e0d66ce54298a8ca10a62ef6 | 1dc6ab7422de187116b7c3155ae01a44ca813366 | refs/heads/master | 2023-07-25T00:29:46.383746 | 2021-09-08T23:28:21 | 2021-09-08T23:28:21 | 403,999,285 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2021-06-24 00:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prediction', '0004_auto_20210623_0415'),
]
operations = [
migrations.CreateModel(
name='ModelSave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now=True)),
('model_name', models.CharField(max_length=50)),
('model_mape', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
]
| UTF-8 | Python | false | false | 669 | py | 25 | 0005_modelsave.py | 14 | 0.571001 | 0.518685 | 0 | 22 | 29.409091 | 114 |
0xcccccccccccc/tranhost-server | 16,277,926,072,277 | 0b7c46ea6219dcbcb8354668588ebbfafdd26725 | 6297a4ba41f0c30d5411a1c5cab67814305f109c | /tran/apps.py | bd80c9076186f3a30b1eefae2f6ac33e1a29a99f | [
"MIT"
]
| permissive | https://github.com/0xcccccccccccc/tranhost-server | c7609683b11ec93859558442a6b02756ae486878 | 20f9abfa3365f500efd04b69aea3fca2db5520e9 | refs/heads/main | 2023-05-06T02:45:49.269561 | 2021-05-26T09:04:03 | 2021-05-26T09:04:03 | 363,026,948 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class TranConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'tran'
| UTF-8 | Python | false | false | 140 | py | 8 | apps.py | 5 | 0.735714 | 0.735714 | 0 | 6 | 22.333333 | 56 |
s-c-vaz/GoogleCodeJam | 18,519,898,999,482 | e28b5e940d3b06385570d8514ff1199e2115dec0 | d50a81a548f1729d7cd618906296397433cb19d2 | /gWheels.py | 5390a221f8406351039f49c19581f9c58c9c5b7e | []
| no_license | https://github.com/s-c-vaz/GoogleCodeJam | 9f798b6d7f5ed380de12b346a22c5a43939b79e2 | 7c3a7ca783aa892e3d7c7671226fb7c8a3c34936 | refs/heads/master | 2020-05-21T13:29:30.823889 | 2018-08-30T07:15:46 | 2018-08-30T07:15:46 | 65,164,734 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on 16-May-2018
@author: annervaz
couldnt get better that n^3logn, large is not completing with that
'''
def check_possibility(P,Q,pedal_gear_teeths,extra_gear_teeths,tire_gear_teeths):
tire_gear_teeths.sort()
for a_index in range(0,len(pedal_gear_teeths)):
a = pedal_gear_teeths[a_index]
for c_index in range(0,len(extra_gear_teeths)):
c = extra_gear_teeths[c_index]
rhs = a * c * Q
for b_index in range(0,len(extra_gear_teeths)):
if b_index == c_index:
continue
b = extra_gear_teeths[b_index]
d_index_start = 0
d_index_end = len(tire_gear_teeths)-1
while(d_index_start<=d_index_end):
d_index = int((d_index_start+d_index_end)/2)
d = tire_gear_teeths[d_index]
lhs = P * b * d
if lhs == rhs:
return 'Yes'
elif lhs < rhs:
d_index_start = d_index+1
elif lhs > rhs:
d_index_end = d_index-1
return 'No'
if __name__ == '__main__':
testcases = int(input())
for testcase in range(1, testcases+1):
input()
print('Case #'+ str(testcase) + ':')
N_p,N_e,N_t = map(lambda x: int(x),input().split(' '))
pedal_gear_teeths = list(map(lambda x: int(x),input().split(' ')))
extra_gear_teeths = list(map(lambda x: int(x),input().split(' ')))
tire_gear_teeths = list(map(lambda x: int(x),input().split(' ')))
M = int(input())
for query in range(1,M+1):
P, Q = map(lambda x: int(x),input().split(' '))
print(check_possibility(P,Q,pedal_gear_teeths,extra_gear_teeths,tire_gear_teeths))
| UTF-8 | Python | false | false | 1,838 | py | 93 | gWheels.py | 60 | 0.504353 | 0.494015 | 0 | 52 | 34.346154 | 94 |
coderSkyChen/Action_Recognition_Zoo | 6,674,379,199,770 | d479e4cd8c16a941015941ff25c17c4ac555e975 | 2d7979c3d5931c61d0a728aa8395942da6fc7e72 | /model_zoo/models/namignizer/data_utils.py | fcb0f257fb21ffcbdd8ce6306755feef6be1d78d | [
"MIT",
"Apache-2.0"
]
| permissive | https://github.com/coderSkyChen/Action_Recognition_Zoo | 5b450a5a80eeb034fb4e619a6218c764137dda4c | 92ec5ec3efeee852aec5c057798298cd3a8e58ae | refs/heads/master | 2021-01-25T12:59:58.595288 | 2019-03-07T08:34:45 | 2019-03-07T08:34:45 | 123,521,178 | 246 | 41 | MIT | false | 2019-04-08T19:01:36 | 2018-03-02T02:46:27 | 2019-04-03T21:27:21 | 2019-03-07T08:35:31 | 45,517 | 115 | 18 | 1 | Python | false | false | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for parsing Kaggle baby names files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import tensorflow as tf
import pandas as pd
# the default end of name rep will be zero
_EON = 0
def read_names(names_path):
"""read data from downloaded file. See SmallNames.txt for example format
or go to https://www.kaggle.com/kaggle/us-baby-names for full lists
Args:
names_path: path to the csv file similar to the example type
Returns:
Dataset: a namedtuple of two elements: deduped names and their associated
counts. The names contain only 26 chars and are all lower case
"""
names_data = pd.read_csv(names_path)
names_data.Name = names_data.Name.str.lower()
name_data = names_data.groupby(by=["Name"])["Count"].sum()
name_counts = np.array(name_data.tolist())
names_deduped = np.array(name_data.index.tolist())
Dataset = collections.namedtuple('Dataset', ['Name', 'Count'])
return Dataset(names_deduped, name_counts)
def _letter_to_number(letter):
"""converts letters to numbers between 1 and 27"""
# ord of lower case 'a' is 97
return ord(letter) - 96
def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size):
"""Takes a list of names and counts like those output from read_names, and
makes an iterator yielding a batch_size by num_steps array of random names
separated by an end of name token. The names are choosen randomly according
to their counts. The batch may end mid-name
Args:
names: a set of lowercase names composed of 26 characters
counts: a list of the frequency of those names
batch_size: int
num_steps: int
epoch_size: number of batches to yield
Yields:
(x, y): a batch_size by num_steps array of ints representing letters, where
x will be the input and y will be the target
"""
name_distribution = counts / counts.sum()
for i in range(epoch_size):
data = np.zeros(batch_size * num_steps + 1)
samples = np.random.choice(names, size=batch_size * num_steps // 2,
replace=True, p=name_distribution)
data_index = 0
for sample in samples:
if data_index >= batch_size * num_steps:
break
for letter in map(_letter_to_number, sample) + [_EON]:
if data_index >= batch_size * num_steps:
break
data[data_index] = letter
data_index += 1
x = data[:batch_size * num_steps].reshape((batch_size, num_steps))
y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps))
yield (x, y)
def name_to_batch(name, batch_size, num_steps):
""" Takes a single name and fills a batch with it
Args:
name: lowercase composed of 26 characters
batch_size: int
num_steps: int
Returns:
x, y: a batch_size by num_steps array of ints representing letters, where
x will be the input and y will be the target. The array is filled up
to the length of the string, the rest is filled with zeros
"""
data = np.zeros(batch_size * num_steps + 1)
data_index = 0
for letter in map(_letter_to_number, name) + [_EON]:
data[data_index] = letter
data_index += 1
x = data[:batch_size * num_steps].reshape((batch_size, num_steps))
y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps))
return x, y
| UTF-8 | Python | false | false | 4,239 | py | 233 | data_utils.py | 162 | 0.651333 | 0.643548 | 0 | 119 | 34.621849 | 83 |
s3nh/plate-detection | 395,137,030,242 | f3d36340e2a23c2d458879d6d1427dfe9a00b55d | 601f8bae2846023fe8caee2d86f01417c4d888f4 | /dataset/dataloader.py | 0c77c96bba6c72c29d4372b881ef09acad315403 | []
| no_license | https://github.com/s3nh/plate-detection | 1f13f39839bac2a0296592809c6206e24156dff9 | ac1b14663b9bc106c9f002b774140ab61b1bd01d | refs/heads/master | 2020-07-25T20:23:17.394164 | 2019-09-21T11:27:28 | 2019-09-21T11:27:28 | 208,413,976 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import re
from PIL import Image
import json
def boxes_detect(image):
pass
class LicencePlateLoader():
def __init__(self, data, labels, ratio):
self.data = data
self.labels = labels
self.ratio = ratio
@staticmethod
def load_image(path):
pass
@staticmethod
def load_batch(path):
pass
@staticmethod
def get_labels(path):
pass | UTF-8 | Python | false | false | 457 | py | 9 | dataloader.py | 5 | 0.575492 | 0.575492 | 0 | 33 | 12.878788 | 45 |
shashankmittra/python_automation | 14,542,759,303,946 | 61515672062a966be0701218c6356ccba95b214c | 143ecc0d7269aff9fd63febc2dbd354d631ee934 | /moving_files/main.py | 93cdf6679ab2081c5d27b6e96b8d3a3008c80671 | []
| no_license | https://github.com/shashankmittra/python_automation | 18b4cbea69eb82c81ca39f0ef6663f1540db3472 | 909fcbc3f9f7460f0146eb5002b5967b73514fe6 | refs/heads/master | 2023-03-27T22:16:08.459512 | 2021-03-23T10:23:27 | 2021-03-23T10:23:27 | 350,668,146 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import shutil
import datetime
# to find if any new file has been created in the directory
def findNewFile():
arr = os.listdir('.')
numberOfFiles = len(arr)
informationList = []
if numberOfFiles > 0:
informationList.append(1)
for i in arr:
informationList.append(i)
return informationList
# to remove the main.py and creating the final list of files for processing
def filterDirectory(namesOfFiles):
if namesOfFiles[0] == 1:
checkingTheIndex = 0
finalList = []
for i in namesOfFiles:
if checkingTheIndex != 0 and i != 'main.py':
finalList.append(i)
checkingTheIndex = checkingTheIndex + 1
movefiles(finalList)
# to cut paste files from one to another directory
def movefiles(filteredList):
for i in filteredList:
partitionningFile = i.partition('.')
formatOfFile = partitionningFile[2]
if formatOfFile == 'py':
destination = '/home/shashank/shashank/practise/python/test/python/'
if formatOfFile == 'txt':
destination = '/home/shashank/shashank/practise/python/test/text/'
source = f'/home/shashank/shashank/prg/python_automation/{i}'
shutil.move(source, destination)
renameFile(source, destination, i)
# to rename any particular file
def renameFile(source, destination, fileName):
newFileName = f'{datetime.date.today()}'
count = fileCount(destination, fileName)
if count < 10:
finalCount = f'0{count}'
else:
finalCount = f'{count}'
newDestination = f'{destination}{newFileName}_{finalCount}_.txt'
print(newDestination)
os.rename(f'{destination}{fileName}', newDestination)
# determine the file count for proper renaming
def fileCount(destination, fileName):
fileNames = os.listdir(destination)
count = 0
print('length of FileNames - ', len(fileNames))
if len(fileNames) == 1:
count = 1
else:
for i in fileNames:
arr = i.partition('_')
print('arr - ', arr)
middle = arr[2].partition('_')
print('middle - ', middle)
final = middle[0]
print('final - ', final)
initalCount = int(final)
print(initalCount)
count = max(initalCount,count)
print(count, 'count')
return count
newList = findNewFile()
filterDirectory(newList) | UTF-8 | Python | false | false | 2,164 | py | 1 | main.py | 1 | 0.707024 | 0.69963 | 0 | 88 | 23.602273 | 75 |
Yurie-Koga/Python | 16,690,242,923,239 | 7c61fbe5507d29f7f94a0802a857ff05c22fc6b8 | a6eee6335273f29f62048747125d89f04aa691aa | /IntroToAlgorithms-master/6_lists/0_lists_basics.py | ae819455eb7a8933dc6ec791f859b8a5829501bd | []
| no_license | https://github.com/Yurie-Koga/Python | b008e73d184a0c20558477b7b25eeae6253f9685 | 38e76735253c819c164effb4700f3be776fde1f7 | refs/heads/main | 2023-01-14T12:47:30.175012 | 2020-10-09T00:44:54 | 2020-10-09T00:44:54 | 300,479,340 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # List, Array
# A sequence of items (elements)
# 1. create a list
squares = [1, 4, 9, 16, 25, 36, 49]
print(squares)
# 2. list operations
squares += [64, 81] # add two elements at the end of the list
print(squares)
# 3. list methods
animals = ["Tiger", "Beaver", "Eagle", "Jaguar", "Bull", "Condor", "Panda", "Koi"]
animals.append("Dog") # add "Dog" at the end of the list
animals.insert(0, "Cat") # insert "Cat" at 0 00_index
animals.remove("Panda") # remove "Panda" from the list, returns None
print(animals.pop(0)) # pop(remove) the element at 00_index 0, returns the popped element
num_koi = animals.count("Koi") # count the number of "Koi" in the list
print(num_koi)
animals.index("Beaver") # return the 00_index if the first occurrence of "Beaver" in the list
print(animals)
# Indexing a list
print("-------- Indexing a list --------")
countries = ["Canada", "Japan", "Germany",
"Brazil", "India", "Spain",
"Ecuador", "South Korea", "China"]
print(f"index 0: {countries[0]}")
print(f"len(countries): {len(countries)}, index len(countries)-1: {countries[len(countries)-1]}")
print(f"index -1: {countries[-1]}")
# '-1' is common for the last element
# Slicing a list (sublist)
print("-------- Slicing a list --------")
print(countries[0:3])
print(countries[3:])
print(countries[:2])
countries[5:8] = [] # removes elements from 5 <= < 8
countries[0:3] = ["UN"] # replaces first 3 elements to "UN"
print(countries)
print(["1", 2, True] * 5)
# String vs List
# Strings are IMMUTABLE (cannot change)
# Lists are MUTABLE
city = "Vancouver"
# city[0] = "B"
print(city)
l = ["String", 10, True, 3.14, [1, 2, 3], "Hello"]
for item in l:
print(item)
l[0] = "Nicer String"
print(l)
# subscript []
print(l[4][1])
print(l[5][0]) # H
| UTF-8 | Python | false | false | 1,778 | py | 48 | 0_lists_basics.py | 48 | 0.634983 | 0.59955 | 0 | 61 | 28.147541 | 97 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.