repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
franck-roland/django-gitlean
| 5,617,817,234,034 |
5e339e4a26e5560a931710a9bade5d86fb913c69
|
5049bf1c915459d8b7deae532843a452d63da478
|
/gitlean/apps.py
|
406f5f4a9ccefb9ce64d0561af843c3ccafd4c27
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/franck-roland/django-gitlean
|
27f5eeb97fd7cdd8083cdf5c85e4b4c0d81575de
|
76260186d2d78bbe5ba3b2c6222a437d6d535d7e
|
refs/heads/master
| 2020-12-03T05:16:01.855877 | 2016-08-03T17:07:55 | 2016-08-03T17:07:55 | 63,898,827 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.apps import AppConfig
class GitleanConfig(AppConfig):
name = 'gitlean'
|
UTF-8
|
Python
| false | false | 89 |
py
| 10 |
apps.py
| 8 | 0.752809 | 0.752809 | 0 | 5 | 16.8 | 33 |
earlew/py_scripts
| 13,915,694,064,361 |
afc00ad664dc3b2c0219509b593cf4bac6489b96
|
66f4b432ba38dbd75fbd54e12577b096ea76c4cb
|
/BoB_work/compare_hycom_argo.py
|
c84218a9065b5bb572b9064a9205c5a7573f09e1
|
[] |
no_license
|
https://github.com/earlew/py_scripts
|
7eadeaa5724f55bd3ea9c5dd83f56cf77ee51d8a
|
269780a4453956b2af46d3b4dda466f51491e469
|
refs/heads/master
| 2016-09-06T05:28:08.791427 | 2015-03-18T18:37:24 | 2015-03-18T18:37:24 | 27,857,664 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import netCDF4 as nc4
import os
import numpy as np
from IPython.core.debugger import Tracer
import nc_functions as ncf
import sys
import timeit
reload(ncf)
debug_here = Tracer()
def drawPolyOnMap(m, verts, facecolor='orange', alpha=0.3):
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
lons = [vert[0] for vert in verts]
lats = [vert[1] for vert in verts]
lons_m, lats_m = m(lons, lats)
verts_m = zip(lons_m, lats_m)
path = Path(verts)
path_m = Path(verts_m)
poly = patches.PathPatch(path_m, facecolor=facecolor, lw=2, alpha=alpha)
ax = plt.gca()
ax.add_patch(poly)
return path, path_m
def overlayRegionShading(m):
swbb_verts = [(80,10), (80,15), (83.5,18), (86.5,18), (82.5,15), (82.5,7), (80,10)]
nbb_verts = [(83.5,18), (94.5,18), (91.5,22), (88,22), (83.5,18)]
cbb_verts = [(82.5,10), (82.5,15), (86.5,18), (94.5,18), (93,14), (93,10), (83,10)]
sbb_verts = [(82.5,10), (93,10), (93,7), (82.5,7), (82.5,10)]
# swbb_lons = [vert[0] for vert in box_vert]
# swbb_lats = [vert[1] for vert in box_vert]
# box_lons_m, box_lats_m = m(box_lons, box_lats)
# box_vert_m = zip(box_lons_m, box_lats_m)
fcolors = ['mediumblue', 'mediumseagreen', 'crimson', 'darkorange']
swbb_path, swbb_path_m = drawPolyOnMap(m, swbb_verts, facecolor=fcolors[0])
nbb_path, nbb_path_m = drawPolyOnMap(m, nbb_verts, facecolor=fcolors[1])
cbb_path, cbb_path_m = drawPolyOnMap(m, cbb_verts, facecolor=fcolors[2])
sbb_path, sbb_path_m = drawPolyOnMap(m, sbb_verts, facecolor=fcolors[3])
region_paths = {}
region_paths['swbb'] = swbb_path
region_paths['nbb'] = nbb_path
region_paths['cbb'] = cbb_path
region_paths['sbb'] = sbb_path
return region_paths, fcolors
def getshortname(vblename):
vblename_short = {'salinity':'sal', 'temperature':'temp'}
assert vblename in vblename_short.keys()
return vblename_short[vblename]
def convertArgoTime(tnum):
"""Function that converts argo day num (days since 1700-1-1) to python datetime
returns,
dvec - a matlab-like datevec array dvec.shape = (len(tnum), 3)
dtnum_vec - a matlab-like datenum array
"""
dt0 = datetime(1770, 1, 1, 0, 0)
dt0_num = datetime.toordinal(dt0)
dvec = np.zeros((len(tnum),3))
dtnum_vec = np.zeros((len(tnum),1))
for i,t in enumerate(tnum):
dti = datetime.fromordinal(int(t)+dt0_num)
dtnum_vec[i,:] = int(t)+dt0_num
dvec[i,:] = np.array([dti.year, dti.month, dti.day])
return dvec, dtnum_vec
def findNearestIndex(array,value):
#TODO: ensure the argo profile is actually in the nearest grid box
#neg_diff = (array-value) < 0
#take only values
idx = (np.abs(array-value)).argmin()
return idx
def loadHycomError(vname):
ncfile = '/Users/ewilson2011/python_work/research/nio_wod_data_stdlvl/nio_hycom_argo.nc'
f = nc4.Dataset(ncfile, 'r')
#get variables
vble_list = f.variables.keys()
vbles_match = [vble for vble in vble_list if vble.endswith(vname)]
argo_vble_name = [vble for vble in vbles_match if vble.startswith('argo')]
hycom_vble_name = [vble for vble in vbles_match if vble.startswith('hycom')]
assert len(argo_vble_name) == 1, "too many (or not enough) variables starting with 'argo' "
assert len(hycom_vble_name) == 1, "too many (or not enough) variables starting with 'hycom' "
argo_vble = f.variables[argo_vble_name[0]]
hycom_vble = f.variables[hycom_vble_name[0]]
return f, argo_vble, hycom_vble
def selectRegion(f, region):
#TODO: Add custom region selector. Maybe using polygons/patches
#select for BoB
regions = ['nio', 'BoB', 'AS']
assert region in regions, "region keyword argument needs to be 'nio', 'BoB' or 'AS'. "
if region is 'BoB':
lonmin=75;
latmin=5;
bobi = (f.variables['Longitude'][:]>lonmin) & (f.variables['Latitude'][:]>latmin)
elif region is 'AS':
lonmax = 75
latmin = 5
bobi = (f.variables['Longitude'][:]<lonmax) & (f.variables['Latitude'][:]>latmin)
elif region is 'nio':
lonmin = 40
latmin = 5
bobi = (f.variables['Longitude'][:]>lonmin) & (f.variables['Latitude'][:]>latmin)
return bobi.flatten()
def getColorVble(color_vble):
#TODO: finish implementing
col = {}
if color_vble is 'lat':
col['vble'] = lat
col['vname'] = 'Latitude'
col['units'] = 'degrees north'
elif color_vble is 'lon':
col['vble'] = lon
col['vname'] = 'Longitude'
col['units']= 'degrees east'
return col
def plotError_scatter(vname='sal', zlayers = [10., 30., 50., 125.], region='BoB', color_vble='lat', savefmt='png', saveplot=False):
#vname = 'sal' or 'temp'
import matplotlib.pyplot as plt
#load data and output
f, argo_vble, hycom_vble = loadHycomError(vname)
#select for levels
lvls = f.variables['zlvl'][0,:]
zi = np.in1d(lvls, zlayers)
argo_zi = argo_vble[:,zi]
hycom_zi = hycom_vble[:,zi]
#select for BoB
bobi = selectRegion(f, region)
lat = f.variables['Latitude'][:]; lat = lat[bobi]
lon = f.variables['Longitude'][:]; lon = lon[bobi]
argo_zi = argo_zi[bobi,:]
hycom_zi = hycom_zi[bobi,:]
#mask any lingering nans
argo_zi = np.ma.masked_where(np.isnan(argo_zi), argo_zi)
hycom_zi = np.ma.masked_where(np.isnan(hycom_zi), hycom_zi)
#catch spurious salinity measurements in arabian sea
if vname is 'sal' and region is 'AS':
bad = argo_zi < 28
num_bad = argo_zi[bad].flatten()
debug_here()
argo_zi = np.ma.masked_where(bad, argo_zi)
print "number of bad: %s" %len(num_bad)
#set color variable
if color_vble is 'lat':
col = lat
color_vname = 'Latitude'
color_units = 'degrees north'
elif color_vble is 'lon':
col = lon
color_vname = 'Longitude'
color_units = 'degrees east'
#debug_here()
plt_lbls = ['a', 'b', 'c', 'd']
plt.close('all')
fig,axes = plt.subplots(2,2, figsize=(12,8))
if vname is 'temp':
units = r'$^{\circ}$C'
else:
units = argo_vble.units
axes = axes.flatten()
fntsz = 14
dx = 0.5 #for text box offset
dy = 0.5 #for text box offset
if region is 'BoB':
if vname is 'sal':
xlims = np.array([[28, 36],[28, 36], [30, 37], [30, 37]])
elif vname is 'temp':
xlims = np.array([[23, 33], [23, 33], [13, 33], [10, 30]])
elif region is 'AS':
if vname is 'sal':
xlims = np.array([[33, 38],[33, 38], [33, 38], [33, 38]])
dx = 0.2 #for text box offset
dy = 0.2 #for text box offset
elif vname is 'temp':
xlims = np.array([[22, 32], [20, 32], [13, 33], [10, 30]])
ylims = xlims
for i,ax in enumerate(axes):
plt.sca(ax)
im1 = ax.scatter(argo_zi[:,i], hycom_zi[:,i], s=8, c=col, cmap=plt.cm.RdYlBu, alpha=0.7, lw=0)
ax.set_title('(%s) Hycom vs. Argo at %.0fm (%s)' %(plt_lbls[i], zlayers[i], region), fontsize = fntsz+2)
one2one = np.linspace(xlims[i,0], xlims[i,1],50)
ax.plot(one2one, one2one, '-k')
ax.set_xlim(xlims[i,:])
ax.set_ylim(ylims[i,:])
argo_std = np.std(argo_zi[:,i])
hycom_std = np.std(hycom_zi[:,i])
sq_err = (hycom_zi[:,i] - argo_zi[:,i])**2
rmse = np.sqrt(np.mean(sq_err))
#place text with stats in top right corner of plot
text_xpos = xlims[i,0]+dx
text_ypos = ylims[i,1]-dy
stats = "Argo STD: %.2f \nHYCOM STD: %.2f \nRMSE: %.2f" %(argo_std, hycom_std, rmse)
ax.text(text_xpos, text_ypos, stats, fontsize=8, verticalalignment='top', horizontalalignment='left',
bbox=dict(facecolor='#F5BCA9', alpha=0.5))
if i==2 or i==3:
ax.set_xlabel(argo_vble.long_name + ' (%s)' %units, fontsize=fntsz)
if i==0 or i==2:
ax.set_ylabel(hycom_vble.long_name + ' (%s)' %units, fontsize=fntsz)
cbar_ax = fig.add_axes([0.82, 0.15, 0.025, 0.7]) #make a new axes for the colorbar
fig.subplots_adjust(right=0.8) #adjust sublot to make colorbar fit
cb = fig.colorbar(im1, cax=cbar_ax, format='%i') #plot colorbar in cbar_ax using colormap of im1
cb.set_label('%s (%s)' %(color_vname, color_units),fontsize=fntsz)
cb.ax.tick_params(labelsize=fntsz)
plt.show()
#debug_here()
if saveplot==True:
plt.savefig('../py_plots/hycom_vs_argo_%s_%s.%s' %(vname, region, savefmt), bbox_inches='tight', dpi=300)
import test
fig,axes = plt.subplots(2,2, figsize=(12,8))
axes = axes.flatten()
cvec = ['red','purple','green']
for i,ax in enumerate(axes):
k=3
centroids, idx, good_data = test.kmeans_cluster(argo_zi[:,i], hycom_zi[:,i], k=k, makeplot=False)
for j in xrange(k):
ax.scatter(good_data[idx==j,0], good_data[idx==j,1], s=8, c=cvec[j], alpha=0.7, lw=0)
#ax.plot(centroids[j,0],centroids[j,1],'*', color=cvec[j], markersize=15)
ax.set_title('(%s) Hycom vs. Argo at %.0fm (%s)' %(plt_lbls[i], zlayers[i], region), fontsize = fntsz+2)
one2one = np.linspace(xlims[i,0], xlims[i,1],50)
ax.plot(one2one, one2one, '-k')
ax.set_xlim(xlims[i,:])
ax.set_ylim(ylims[i,:])
if i==2 or i==3:
ax.set_xlabel(argo_vble.long_name + ' (%s)' %units, fontsize=fntsz)
if i==0 or i==2:
ax.set_ylabel(hycom_vble.long_name + ' (%s)' %units, fontsize=fntsz)
if saveplot==True:
plt.savefig('../py_plots/hycom_vs_argo_%s_%s_clustered.%s' %(vname, region, savefmt), bbox_inches='tight', dpi=300)
# f.close()
# cbar_ax = fig.add_axes([0.82, 0.15, 0.025, 0.7]) #make a new axes for the colorbar
# fig.subplots_adjust(right=0.8) #adjust sublot to make colorbar fit
#
# cb = fig.colorbar(im1, cax=cbar_ax, format='%i') #plot colorbar in cbar_ax using colormap of im1
# cb.set_label('%s (%s)' %(color_vname, color_units),fontsize=fntsz)
# cb.ax.tick_params(labelsize=fntsz)
plt.show()
f.close()
#return argo_zi, hycom_zi
#
#
# hycom_err = hycom_zi-argo_zi
# fig,axes = plt.subplots(2,2, figsize=(12,8))
# axes = axes.flatten()
# xlims = [0,25]
# for i,ax in enumerate(axes):
#
# #plt.sca(ax)
#
# im1 = ax.scatter(lat, np.abs(hycom_err[:,i]), s=8, c='k', alpha=0.7, lw=0)
#
# ax.set_title('(%s) Hycom error vs. Latitude at %.0fm (%s)' %(plt_lbls[i], zlayers[i], region), fontsize = fntsz+2)
# ax.set_ylim(0, 2.5)
# ax.set_xlim(*xlims)
#
# #ax.hlines(0,*xlims, color='b')
#
# if i==2 or i==3:
# ax.set_xlabel('Latitude (%s)' %color_units, fontsize=fntsz)
# if i==0 or i==2:
# ax.set_ylabel('HYCOM error (%s)' %units, fontsize=fntsz)
def plotError_tseries(vname='sal', zlayers=[10., 50.], region='BoB', ylims = (-6,6), color_vble='lat', savefmt='png'):
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
f, argo_vble, hycom_vble = loadHycomError(vname)
#select for zlvl
lvls = f.variables['zlvl'][0,:]
zi = np.in1d(lvls, zlayers)
argo_zi = argo_vble[:,zi]
hycom_zi = hycom_vble[:,zi]
#select for BoB
bobi = selectRegion(f, region)
lat = f.variables['Latitude'][:]
lon = f.variables['Longitude'][:]
lat = lat[bobi,:]
lon = lon[bobi,:]
hycom_zi = hycom_zi[bobi,:]
argo_zi = argo_zi[bobi,:]
hycom_error_zi = hycom_zi - argo_zi
#mask any lingering nans
argo_zi = np.ma.masked_where(np.isnan(argo_zi), argo_zi)
hycom_zi = np.ma.masked_where(np.isnan(hycom_zi), hycom_zi)
#mask spurious salinity measurements in arabian sea
if vname is 'sal' and region is 'AS':
bad = argo_zi < 28
num_bad = argo_zi[bad].flatten()
debug_here()
argo_zi = np.ma.masked_where(bad, argo_zi)
print "number of bad: %s" %len(num_bad)
#get dates
dnum = f.variables['Time'][:]
dnum = dnum[bobi,:]
dnum_nomask = dnum[dnum.mask==False]
dt_vec = np.array([datetime.fromordinal(dnum_i) for dnum_i in dnum_nomask])
maski = dnum.mask==False
hycom_error_zi = hycom_error_zi[maski[:,0], :]
lat = lat[maski[:,0], :]
lon = lon[maski[:,0], :]
#set color variable
if color_vble is 'lat':
col = lat
color_vname = 'Latitude'
color_units = 'degrees north'
elif color_vble is 'lon':
col = lon
color_vname = 'Longitude'
color_units = 'degrees east'
plt.close('all')
fig,axes = plt.subplots(2,1, figsize=(11,8))
plt_lbls = ['a', 'b', 'c', 'd']
fntsz = 14
if vname is 'temp':
units = r'$^{\circ}$C'
else:
units = argo_vble.units
for i,ax in enumerate(axes):
plt.sca(ax)
im1 = ax.scatter(dt_vec, hycom_error_zi[:,i], s=8, c=col, cmap=plt.cm.RdYlBu, alpha=0.7, lw=0)
ax.set_title('(%s) Hycom minus Argo at %.0fm (%s)' %(plt_lbls[i], zlayers[i], region), fontsize = fntsz+2)
ax.set_ylabel(hycom_vble.long_name + ' error (%s)' %units, fontsize=fntsz)
ax.set_ylim(ylims)
ax.grid(True)
#format dates
years = mdates.YearLocator(1,month=9)
major_months = mdates.MonthLocator(bymonth=(3,9))
minor_months = mdates.MonthLocator()
myFmt = mdates.DateFormatter('%b%y')
ax.xaxis.set_major_locator(major_months)
ax.xaxis.set_minor_locator(minor_months)
ax.xaxis.set_major_formatter(myFmt)
datemin = datetime(2008, 9, 1)
datemax = datetime(2013, 9, 1)
ax.set_xlim((datemin, datemax))
ax.hlines(0, datemin, datemax)
#compute stats
argo_std = np.std(argo_zi[:,i])
hycom_std = np.std(hycom_zi[:,i])
sq_err = (hycom_zi[:,i] - argo_zi[:,i])**2
rmse = np.sqrt(np.mean(sq_err))
#place text with stats in top right corner of plot
text_xpos = datetime(2008, 10, 1)
text_ypos = ylims[1]-0.5
stats = "Argo STD: %.2f \nHYCOM STD: %.2f \nRMSE: %.2f" %(argo_std, hycom_std, rmse)
ax.text(text_xpos, text_ypos, stats, fontsize=8, verticalalignment='top', horizontalalignment='left',
bbox=dict(facecolor='#F5BCA9', alpha=0.5))
cbar_ax = fig.add_axes([0.84, 0.15, 0.025, 0.7]) #make a new axes for the colorbar
fig.subplots_adjust(right=0.8, hspace=0.35) #adjust sublot to make colorbar fit
cb = fig.colorbar(im1, cax=cbar_ax, format='%i') #plot colorbar in cbar_ax using colormap of im1
cb.set_label('%s (%s)' %(color_vname, color_units),fontsize=fntsz)
cb.ax.tick_params(labelsize=fntsz)
plt.show()
f.close()
debug_here()
plt.savefig('../py_plots/hycom_error_tseries_%s_%s.%s' %(vname, region, savefmt), bbox_inches='tight', dpi=300)
def plotErrorMap(vname='sal', plotlvls=[10,50], months=[(2,3), (5,6), (8,9), (11,12)], clims=(-1.5,1.5), savefmt='png'):
import string
from datetime import datetime
import calendar as cal
import matplotlib.pyplot as plt
import plotting_fun as pfun
reload(pfun)
region='BoB'
latmin=5
lonmin=75
assert len(plotlvls) == 2, "plotlvls must have two elements"
f, argo_vble, hycom_vble = loadHycomError(vname)
#select for zlvl
lvls = f.variables['zlvl'][0,:]
zi = np.in1d(lvls, plotlvls)
msg = "Choose plotlvls from available levels: " +str(lvls)
assert np.any(zi==True), msg
argo_zi = argo_vble[:,zi]
hycom_zi = hycom_vble[:,zi]
#select for BoB
bobi = (f.variables['Longitude'][:]>lonmin) & (f.variables['Latitude'][:]>latmin)
bobi = bobi.flatten()
lat = f.variables['Latitude'][:];
lon = f.variables['Longitude'][:]
dnum = f.variables['Time'][:]
dnum = dnum[bobi,:]
lat = lat[bobi,:];
lon = lon[bobi,:]
hycom_error_zi = hycom_zi[bobi,:] - argo_zi[bobi,:]
#mask any lingering nans
hycom_error_zi = np.ma.masked_where(np.isnan(hycom_error_zi), hycom_error_zi)
#eliminate profiles with masked dates
maski = dnum.mask==False
dnum_nomask = dnum[maski]
hycom_error_zi = hycom_error_zi[maski[:,0], :]
lat = lat[maski[:,0], :]
lon = lon[maski[:,0], :]
#create array of datetime objects
dt_vec = np.array([datetime.fromordinal(dnum_i) for dnum_i in dnum_nomask])
#get the indicies for each selected month
moni_dict = {}
for mon in months:
key = cal.month_abbr[mon[0]] + "-" + cal.month_abbr[mon[1]]
moni_dict[key] = np.array([(dt.month==mon[0]) or (dt.month==mon[1]) for dt in dt_vec])
month_names_sorted = [cal.month_abbr[mon[0]] + "-" + cal.month_abbr[mon[1]] for mon in months]
#debug_here()
#now prep for plotting
plt.close('all')
fig,axes = plt.subplots(2,4, figsize=(16,5.3))
plt_lbls = string.lowercase
fntsz = 14
#temperature units is stated as K when it is degrees C. Fixed in findHycomArgo() but function needs to re-run.
if vname is 'temp':
units = r'$^{\circ}$C'
else:
units = argo_vble.units
axes = axes.flatten()
i=0
for zi, dep in enumerate(plotlvls):
for month_name in month_names_sorted:
#select for correct month
moni = moni_dict[month_name][:]
lon_m = lon[moni]
lat_m = lat[moni]
hycom_error_zi_m = hycom_error_zi[moni,zi]
hycom_error_zi_m = hycom_error_zi_m[:,np.newaxis]
#switch to right plot
plt.sca(axes[i])
#create Basemap intance
m = pfun.createNIOmap(region=region)
im1 = m.scatter(lon_m, lat_m, s=40, c=hycom_error_zi_m, cmap=plt.cm.coolwarm, lw=0, latlon=True)
plt.clim(clims)
#plt.clim(clim[0],clim[1])
plt.title('%s at %sm' %(month_name, int(dep)), fontsize=fntsz)
i+=1
plt.suptitle('%s errors' %hycom_vble.long_name, fontsize=fntsz+3)
cbar_ax = fig.add_axes([0.81, 0.12, 0.025, 0.73]) #make a new axes for the colorbar
fig.subplots_adjust(right=0.8, top=0.85, hspace=0.4) #adjust sublot to make colorbar fit
cb = fig.colorbar(im1, cax=cbar_ax) #plot colorbar in cbar_ax using colormap of im1
cb.set_label(units, fontsize=fntsz)
cb.ax.tick_params(labelsize=fntsz)
debug_here()
plt.savefig('../py_plots/hycom_error_%s_map_%s.%s' %(region, vname, savefmt), bbox_inches='tight', dpi=300)
def plotSubRegions(months=[(2,3), (5,6), (8,9), (11,12)], zmax=(200,300), savefmt='png'):
import plotting_fun as pfun
import matplotlib.pyplot as plt
from datetime import datetime
import string
import calendar as cal
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
reload(pfun)
#definitions
fntsz = 14
region='BoB'
subregions = ['swbb', 'nbb', 'cbb', 'sbb']
latmin=5; lonmin=75
ncfile = '/Users/ewilson2011/python_work/research/nio_wod_data_stdlvl/nio_hycom_argo.nc'
f = nc4.Dataset(ncfile, 'r')
##read argo and hycom data
argo_temp = f.variables['argo_temp'][:]
argo_sal = f.variables['argo_sal'][:]
hycom_temp = f.variables['hycom_temp'][:]
hycom_sal = f.variables['hycom_sal'][:]
##select for BoB
bobi = (f.variables['Longitude'][:]>lonmin) & (f.variables['Latitude'][:]>latmin)
bobi = bobi.flatten()
lat = f.variables['Latitude'][:]
lon = f.variables['Longitude'][:]
dnum = f.variables['Time'][:]
lat = lat[bobi,:];
lon = lon[bobi,:]
dnum = dnum[bobi,:]
##get errors
hycom_temp_error = hycom_temp[bobi,:] - argo_temp[bobi,:]
hycom_sal_error = hycom_sal[bobi,:] - argo_sal[bobi,:]
##mask any lingering nans
hycom_temp_error = np.ma.masked_where(np.isnan(hycom_temp_error), hycom_temp_error)
hycom_sal_error = np.ma.masked_where(np.isnan(hycom_sal_error), hycom_sal_error)
##eliminate profiles with masked dates
maski = dnum.mask==False
dnum_nomask = dnum[maski]
hycom_temp_error = hycom_temp_error[maski[:,0], :]
hycom_sal_error = hycom_sal_error[maski[:,0], :]
lat = lat[maski[:,0], :]
lon = lon[maski[:,0], :]
coords = np.hstack((lon,lat))
plt.close('all')
fig = plt.figure()
m = pfun.createNIOmap(region=region)
im1 = m.scatter(lon, lat, s=20, c='k', lw=0, latlon=True)
subregion_paths, fcolors = overlayRegionShading(m)
plt.title('Map showing location of Argo profiles and subregions', fontsize=fntsz)
##Draw map of points
fig = plt.figure()
m = pfun.createNIOmap(region=region)
for i,subregion in enumerate(subregions):
subregion_select = subregion_paths[subregion].contains_points(coords)
numprof = len(lat[subregion_select])
m.scatter(lon[subregion_select], lat[subregion_select], s=20, c=fcolors[i], lw=0, label= "%s (%s profiles)" %(subregion.upper(), numprof), latlon=True)
plt.title('Map showing location of all Argo profiles in the BoB \nbetween September 2008 and July 2013', fontsize=fntsz+3)
plt.legend(fontsize='small', loc=2)
debug_here()
plt.savefig('../py_plots/argo_loc_%s_regions.%s' %(region, savefmt), bbox_inches='tight', dpi=300)
debug_here()
#Now plot seasonal error profiles
##select for zlvl
lvls = f.variables['zlvl'][0,:]
zi_sal = lvls <= zmax[0]
zi_temp = lvls <= zmax[1]
sal_plot_lvls = lvls[zi_sal]
temp_plot_lvls = lvls[zi_temp]
hycom_temp_error = hycom_temp_error[:,zi_temp]
hycom_sal_error = hycom_sal_error[:,zi_sal]
#create array of datetime objects
dt_vec = np.array([datetime.fromordinal(dnum_i) for dnum_i in dnum_nomask])
#get the indicies for each selected month
moni_dict = {}
for mon in months:
key = cal.month_abbr[mon[0]] + "-" + cal.month_abbr[mon[1]]
moni_dict[key] = np.array([(dt.month==mon[0]) or (dt.month==mon[1]) for dt in dt_vec])
month_names_sorted = [cal.month_abbr[mon[0]] + "-" + cal.month_abbr[mon[1]] for mon in months]
fig,axes = plt.subplots(2,4, figsize=(16,8))
plt_lbls = string.lowercase
fntsz = 14
yticks_top = [0,10,25,50,75,100,150,200]
yticks_bottom = [0,20,50,75,100,150,200,250,300]
sal_ticks = np.arange(-1.5, 1.51,0.5);
temp_ticks = np.arange(-3, 3.01,1)
majorlocator_top = MultipleLocator(0.5)
minorlocator_top = MultipleLocator(0.1)
majorlocator_bottom = MultipleLocator(1)
minorlocator_bottom = MultipleLocator(0.2)
for m,month_name in enumerate(month_names_sorted):
#select for correct month and take average of all points
moni = moni_dict[month_name][:]
hycom_temp_error_m = hycom_temp_error[moni,:]
hycom_sal_error_m = hycom_sal_error[moni,:]
lon_m = lon[moni,:]
lat_m = lat[moni,:]
coords_m = np.hstack((lon_m,lat_m))
#set axis propeties
ax_top = axes[0,m]
ax_top.set_xticks(sal_ticks)
ax_top.set_yticks(yticks_top)
ax_top.xaxis.set_major_locator(majorlocator_top)
ax_top.xaxis.set_minor_locator(minorlocator_top)
#ax_top.invert_yaxis()
ax_top.set_ylim(max(yticks_top), min(yticks_top))
ax_top.set_xlim(min(sal_ticks), max(sal_ticks))
ax_top.grid(True)
ax_top.set_xlabel('Sal (PSS)', fontsize=fntsz)
ax_bottom = axes[1,m]
ax_bottom.set_xticks(temp_ticks)
ax_bottom.set_yticks(yticks_bottom)
ax_bottom.set_ylim(max(yticks_bottom), min(yticks_bottom))
ax_bottom.set_xlim(min(temp_ticks), max(temp_ticks))
ax_bottom.xaxis.set_major_locator(majorlocator_bottom)
ax_bottom.xaxis.set_minor_locator(minorlocator_bottom)
#ax_bottom.invert_yaxis()
ax_bottom.grid(True)
ax_bottom.set_xlabel('Temp ($^{\circ}$ C)', fontsize=fntsz)
if m==0:
ax_bottom.set_ylabel('Depth (m)', fontsize=fntsz)
ax_top.set_ylabel('Depth (m)', fontsize=fntsz)
for i,subregion in enumerate(subregions):
#select for subregion
subregion_select = subregion_paths[subregion].contains_points(coords_m)
hycom_sal_error_mr = hycom_sal_error_m[subregion_select,:]
hycom_temp_error_mr = hycom_temp_error_m[subregion_select,:]
#compute mean and confidence intervals for means based on t-test
sal_subregion_mean = np.mean(hycom_sal_error_mr, axis=0)
temp_subregion_mean = np.mean(hycom_temp_error_mr, axis=0)
sal_conf_int = pfun.t_interval(hycom_sal_error_mr, axis=0, alpha=0.05)
temp_conf_int = pfun.t_interval(hycom_temp_error_mr, axis=0, alpha=0.05)
#plot salinity error profile on top row
ax_top.plot(sal_subregion_mean, sal_plot_lvls, color=fcolors[i], lw=2, label=subregion.upper())
ax_top.fill_betweenx(sal_plot_lvls, sal_conf_int[0], sal_conf_int[1], color=fcolors[i], alpha=0.5)
ax_top.set_title('Sal errors for %s' %month_name, fontsize=fntsz)
#plot temp error profile on top row
ax_bottom.plot(temp_subregion_mean, temp_plot_lvls, color=fcolors[i], lw=2, label=subregion.upper())
ax_bottom.fill_betweenx(temp_plot_lvls, temp_conf_int[0], temp_conf_int[1], color=fcolors[i], alpha=0.5)
ax_bottom.set_title('Temp errors for %s' %month_name, fontsize=fntsz)
if m==0 or m==3:
ax_top.legend(fontsize='small', loc=3)
ax_bottom.legend(fontsize='small', loc=3)
#add vertical zero lines after profile errors are drawn
ax_top.vlines(0, min(yticks_top), max(yticks_top))
ax_bottom.vlines(0, min(yticks_bottom), max(yticks_bottom))
plt.suptitle('Seasonal HYCOM error profiles', fontsize=fntsz+3)
fig.subplots_adjust(hspace=0.4) #adjust sublot to make colorbar fit
plt.show()
f.close()
debug_here()
plt.savefig('../py_plots/hycom_error_seas_profiles_%s.%s' %(region, savefmt), bbox_inches='tight', dpi=300)
def findHycomArgo(testing=True):
"function that finds co-locating HYCOM grid point values and in situ Argo data"
if testing is True:
print "Running in test mode..."
elif testing is False:
print "Starting full run. Getting time estimate..."
#get list of hycom files
hycom_temp_dir = '/Volumes/Free Space/NIO_hycom/temp_nc/'
all_temp_files = os.listdir(hycom_temp_dir)
hycom_temp_files = [file for file in all_temp_files if file.endswith('.nc')]
hycom_sal_dir = '/Volumes/Free Space/NIO_hycom/sal_nc/'
all_sal_files = os.listdir(hycom_sal_dir)
hycom_sal_files = [file for file in all_sal_files if file.endswith('.nc')]
#load argo data
argo_file_dir = '/Users/ewilson2011/python_work/research/nio_wod_data_stdlvl/'
argoFilePath = argo_file_dir + 'nio_combined_stdlvl.nc'
f_argo = nc4.Dataset(argoFilePath, 'r')
lat_full = f_argo.variables['Latitude'][:]
lon_full = f_argo.variables['Longitude'][:]
tnum_full = f_argo.variables['Time'][:]
temp_full = f_argo.variables['Temperature'][:]
sal_full = f_argo.variables['Salinity'][:]
argo_zlvls = f_argo.variables['zlvl'][0,:]
#take only data have time values if there are masked times
if type(tnum_full) is np.ma.core.MaskedArray:
tmask = tnum_full.mask
argo_tnum = tnum_full[tmask==False]
argo_lat = lat_full[tmask==False]
argo_lon = lon_full[tmask==False]
argo_temp_full = temp_full[:,np.newaxis]
argo_sal_full = sal_full[:,np.newaxis]
argo_temp = argo_temp_full[tmask==False]
argo_sal = argo_sal_full[tmask==False]
else:
argo_tnum = tnum_full[:]
argo_lat = lat_full[:]
argo_lon = lon_full[:]
argo_temp = temp_full[:,np.newaxis]
argo_sal = sal_full[:,np.newaxis]
del temp_full, sal_full, lon_full, lat_full, tnum_full
#convert argo "time since" vector to actual date vector
argo_dvec, argo_dtnum_vec = convertArgoTime(argo_tnum)
#limit argo profiles to those that fall within HYCOM run time
min_hytime = datetime(2008,9,17).toordinal()
max_hytime = datetime(2013,8,31).toordinal()
valid_argo = (argo_dtnum_vec>min_hytime) & (argo_dtnum_vec<max_hytime)
valid_argo = valid_argo.flatten()
argo_dvec = argo_dvec[valid_argo,:]
argo_dtnum_vec = argo_dtnum_vec[valid_argo,:]
argo_lat = argo_lat[valid_argo]
argo_lon = argo_lon[valid_argo]
argo_temp = argo_temp[valid_argo,:]
argo_sal = argo_sal[valid_argo,:]
#create new argo_hycom comparison file. Dimensions need to match f_argo. Maybe write a function that does this
newFilename = 'nio_hycom_argo.nc'
newFilePath = argo_file_dir+newFilename
try:
fnew = nc4.Dataset(newFilePath, 'w')
except RuntimeError as e:
print e.message
sys.exit("RuntimeError: Close fnew if it is open in the command line workspace.")
fnew.createDimension('numProf', None) #specify here, so copy_ncDataset skips over it
newVariables = ('Salinity', 'Temperature', 'Time', 'Longitude', 'Latitude', 'zlvl')
fnew = ncf.copy_ncDataset(f_argo, fnew, newVariables)
fnew.renameVariable('Salinity', 'argo_sal')
fnew.renameVariable('Temperature', 'argo_temp')
fnew.createVariable('hycom_sal', 'f4', ('numProf', 'z'))
fnew.createVariable('hycom_temp', 'f4', ('numProf', 'z'))
f_argo.close()
numProf = len(argo_dtnum_vec)
#create loop that iterates through each argo profile
checkpoint = 20
start_time = timeit.default_timer()
for i in xrange(numProf):
# find hycom daily output corresponding day of argo profile
## get day number for year
yri = int(argo_dvec[i,0])
argo_dti_num = argo_dtnum_vec[i,0]
yri_jan01 = datetime(yri,1,1)
yri_jan01_num = datetime.toordinal(yri_jan01)
yr_day_num = argo_dti_num - yri_jan01_num + 1
yr_day_num_str = "%03d" %yr_day_num
## load correct daily files
hycom_fname_sal = 'salinity-%s_%s_00_3zs-nio.nc' %(yri,yr_day_num_str)
hycom_fname_temp = 'temperature-%s_%s_00_3zt-nio.nc' %(yri,yr_day_num_str)
if hycom_fname_sal not in hycom_sal_files:
continue
if hycom_fname_temp not in hycom_temp_files:
continue
f_hytemp = nc4.Dataset(hycom_temp_dir+hycom_fname_temp, 'r')
f_hysal = nc4.Dataset(hycom_sal_dir+hycom_fname_sal, 'r')
#find the model grid point corresponding to the location of the location of the float.
hy_lon = f_hytemp.variables['longitude'][:]
hy_lat = f_hytemp.variables['latitude'][:]
closest_lati = findNearestIndex(hy_lat, argo_lat[i])
closest_loni = findNearestIndex(hy_lon, argo_lon[i])
## make sure hycom grid box is within 0.08 degrees of the argo profile location
assert np.abs(hy_lon[closest_loni]-argo_lon[i]) < 0.08, 'hycom grid point not close enough'
assert np.abs(hy_lat[closest_lati]-argo_lat[i]) < 0.08, 'hycom grid point not close enough'
hytemp_prof = f_hytemp.variables['temperature'][0, :, closest_lati, closest_loni]
hysal_prof = f_hysal.variables['salinity'][0, :, closest_lati, closest_loni]
#find the difference between hycom and argo at the levels where argo data are available
## find zlvls in hycom the correspond to zlvls in argo
hy_zlvls = f_hytemp.variables['zlevels'][:]
ahi = np.in1d(hy_zlvls, argo_zlvls) #argo in hycom
hai = np.in1d(argo_zlvls, hy_zlvls) #hycom in argo
#read data into argo_hycom comparison file
# fnew.variables["sal_error"][i,hai] = argo_sal[i, hai] - hysal_prof[ahi]
# fnew.variables["temp_error"][i,hai] = argo_temp[i, hai] - hytemp_prof[ahi]
fnew.variables["hycom_sal"][i,hai] = hysal_prof[ahi]
fnew.variables["hycom_temp"][i,hai] = hytemp_prof[ahi]
fnew.variables["argo_sal"][i,hai] = argo_sal[i, hai]
fnew.variables["argo_temp"][i,hai] = argo_temp[i, hai]
fnew.variables["Time"][i] = argo_dti_num
fnew.variables["Longitude"][i] = argo_lon[i]
fnew.variables["Latitude"][i] = argo_lat[i]
fnew.variables["zlvl"][0,:] = argo_zlvls
#set attributes
fnew.variables["hycom_sal"].units = 'PSS'
fnew.variables["argo_sal"].units = 'PSS'
fnew.variables["hycom_temp"].units = r'$^{\circ}$C'
fnew.variables["argo_temp"].units = r'$^{\circ}$C'
fnew.variables["hycom_sal"].long_name = 'Hycom Salinity'
fnew.variables["argo_sal"].long_name = 'Argo Salinity'
fnew.variables["hycom_temp"].long_name = 'Hycom Temperature'
fnew.variables["argo_temp"].long_name = 'Argo Temperature'
fnew.variables["zlvl"].long_name = 'Depth levels'
fnew.variables["zlvl"].units = 'm'
fnew.variables["Time"].units = 'days since Jan 01, 0001'
#close hycom nc dataset objects
f_hysal.close()
f_hytemp.close()
#give completion time estimate
if i==checkpoint:
check_time = timeit.default_timer()
elapsed_time = check_time - start_time
av_time = elapsed_time/checkpoint
files_rem = numProf-checkpoint
est_time_left = av_time*files_rem/60.
print '%s files processed. %s remaining. \nEstimated completion time: %.1f minutes' %(checkpoint, files_rem, est_time_left)
if testing:
fnew.close()
print "Test complete!"
return
fnew.close()
end_time = timeit.default_timer()
completion_time = (end_time - start_time)/60
print "Success!"
print "Actual completion time: %.1f minutes" %completion_time
|
UTF-8
|
Python
| false | false | 35,283 |
py
| 25 |
compare_hycom_argo.py
| 24 | 0.583822 | 0.562084 | 0 | 961 | 35.683663 | 159 |
cash2one/xai
| 4,827,543,283,195 |
fddbc023b9f6a8f42b59d8bfaccb0abb005b0824
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_foxiest.py
|
86bda8a51b4f3712a3b5d317196bd67ada4da945
|
[
"MIT"
] |
permissive
|
https://github.com/cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#calss header
class _FOXIEST():
def __init__(self,):
self.name = "FOXIEST"
self.definitions = foxy
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['foxy']
|
UTF-8
|
Python
| false | false | 218 |
py
| 37,275 |
_foxiest.py
| 37,266 | 0.591743 | 0.591743 | 0 | 13 | 15.615385 | 25 |
vaidasj/alg-mod-rev
| 13,262,859,044,321 |
8725d6ce30f582d73cb757be71c45aa1bd0fc6d5
|
c4249ce9e7cb26ae006bc9951ea676ae2250777b
|
/gamslib/indus/indus-scalar.py
|
4d24cd61866fb8560cd12be80bc2f5134b876969
|
[] |
no_license
|
https://github.com/vaidasj/alg-mod-rev
|
79de3ef1e110f4bd07cbdef6951de2e4216f47f1
|
a3ec6b5c21700a2f28ac6bf7db6aa22540748c6e
|
refs/heads/master
| 2021-06-27T14:06:39.997411 | 2020-10-19T15:47:54 | 2020-10-19T15:47:54 | 180,074,989 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# LP written by GAMS Convert at 12/13/18 10:24:46
#
# Equation counts
# Total E G L N X C B
# 275 183 18 74 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 404 404 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 4145 4145 0 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,511),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,392),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,2168.4510478624),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,929.3361633696),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,41354.33401568),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,17723.28600672),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x325 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x326 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x327 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x328 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x329 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x330 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x331 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x332 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x333 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x334 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x335 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x336 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x337 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x338 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x339 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x340 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x341 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x342 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x343 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x344 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x345 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x346 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x347 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x348 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x349 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x350 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x351 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x352 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x353 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x354 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x355 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x357 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x358 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x359 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x360 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x361 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x362 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x363 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x364 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x365 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x366 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x367 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x368 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x369 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x370 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x371 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x372 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x373 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x374 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x375 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x376 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x377 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x378 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x379 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x380 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x381 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x382 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x383 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x384 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x385 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x386 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x387 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x388 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x389 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x390 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x391 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x392 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x393 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x394 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x395 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x396 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x397 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x398 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x399 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x400 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x401 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x402 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x403 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x404 = Var(within=Reals,bounds=(0,None),initialize=0)
m.obj = Objective(expr= 0.001*m.x1 + 0.001*m.x2 - 0.0574470093527871*m.x5 - 0.0574470093527871*m.x6
- 0.00044516833461718*m.x292 - 0.00044516833461718*m.x293 - 0.00044516833461718*m.x294
- 0.00044516833461718*m.x295 - 0.00044516833461718*m.x296 - 0.00044516833461718*m.x297
- 0.00044516833461718*m.x298 - 0.00044516833461718*m.x299 - 0.00044516833461718*m.x300
- 0.00044516833461718*m.x301 - 0.00044516833461718*m.x302 - 0.00044516833461718*m.x303
- 0.00044516833461718*m.x304 - 0.00044516833461718*m.x305 - 0.00044516833461718*m.x306
- 0.00044516833461718*m.x307 - 0.00044516833461718*m.x308 - 0.00044516833461718*m.x309
- 0.00044516833461718*m.x310 - 0.00044516833461718*m.x311 - 0.00044516833461718*m.x312
- 0.00044516833461718*m.x313 - 0.00044516833461718*m.x314 - 0.00044516833461718*m.x315
- 0.00044516833461718*m.x316 - 0.00044516833461718*m.x317 - 0.00044516833461718*m.x318
- 0.00044516833461718*m.x319 - 0.00044516833461718*m.x320 - 0.00044516833461718*m.x321
- 0.00044516833461718*m.x322 - 0.00044516833461718*m.x323 - 0.00044516833461718*m.x324
- 0.00044516833461718*m.x325 - 0.00044516833461718*m.x326 - 0.00044516833461718*m.x327
- 0.00044516833461718*m.x328 - 0.00044516833461718*m.x329 - 0.00044516833461718*m.x330
- 0.00044516833461718*m.x331 - 0.00044516833461718*m.x332 - 0.00044516833461718*m.x333
- 0.00044516833461718*m.x334 - 0.00044516833461718*m.x335 - 0.00044516833461718*m.x336
- 0.00044516833461718*m.x337 - 0.00044516833461718*m.x338 - 0.00044516833461718*m.x339
- 0.00044516833461718*m.x340 - 0.00044516833461718*m.x341 - 0.00044516833461718*m.x342
- 0.00044516833461718*m.x343 - 0.00044516833461718*m.x344 - 0.00044516833461718*m.x345
- 0.00044516833461718*m.x346 - 0.00044516833461718*m.x347 - 0.00044516833461718*m.x348
- 0.00044516833461718*m.x349 - 0.00044516833461718*m.x350 - 0.00044516833461718*m.x351
- 0.00044516833461718*m.x352 - 0.00044516833461718*m.x353 - 0.00044516833461718*m.x354
- 0.00044516833461718*m.x355, sense=maximize)
m.c2 = Constraint(expr= m.x1 - 1.086*m.x55 - 1.544*m.x56 - 0.45*m.x57 - 0.485*m.x58 - 0.45*m.x59 - 0.72*m.x60
- 0.07*m.x61 - 0.65*m.x62 - 0.5*m.x63 - 0.45*m.x73 - 0.485*m.x74 - 0.45*m.x75 - 0.72*m.x76
- 0.65*m.x77 - 0.5*m.x78 + 0.675*m.x85 + 0.7275*m.x86 + 0.675*m.x87 + 1.08*m.x88 + 0.975*m.x89
+ 0.75*m.x90 - 1.25*m.x97 - 0.95*m.x98 - 2.5*m.x99 - 1.25*m.x103 - 0.95*m.x104 - 2.5*m.x105
+ 2.5*m.x109 + 1.9*m.x110 + 5*m.x111 + m.x115 == 0)
m.c3 = Constraint(expr= m.x2 - 1.086*m.x64 - 1.544*m.x65 - 0.45*m.x66 - 0.485*m.x67 - 0.45*m.x68 - 0.72*m.x69
- 0.07*m.x70 - 0.65*m.x71 - 0.5*m.x72 - 0.45*m.x79 - 0.485*m.x80 - 0.45*m.x81 - 0.72*m.x82
- 0.65*m.x83 - 0.5*m.x84 + 0.675*m.x91 + 0.7275*m.x92 + 0.675*m.x93 + 1.08*m.x94 + 0.975*m.x95
+ 0.75*m.x96 - 1.25*m.x100 - 0.95*m.x101 - 2.5*m.x102 - 1.25*m.x106 - 0.95*m.x107 - 2.5*m.x108
+ 2.5*m.x112 + 1.9*m.x113 + 5*m.x114 + m.x116 == 0)
m.c4 = Constraint(expr= - m.x1 + m.x3 + 0.225*m.x85 + 0.2425*m.x86 + 0.225*m.x87 + 0.36*m.x88 + 0.325*m.x89 + 0.25*m.x90
- 1.25*m.x109 - 0.95*m.x110 - 2.5*m.x111 == 0)
m.c5 = Constraint(expr= - m.x2 + m.x4 + 0.225*m.x91 + 0.2425*m.x92 + 0.225*m.x93 + 0.36*m.x94 + 0.325*m.x95 + 0.25*m.x96
- 1.25*m.x112 - 0.95*m.x113 - 2.5*m.x114 == 0)
m.c6 = Constraint(expr= 74.4583*m.x209 + 70.01583*m.x211 + 34.507355*m.x212 + 74.4583*m.x213 + 70.01583*m.x214
+ 78.806915*m.x215 - 30.72187*m.x216 + 40.263795*m.x219 - m.x292 + m.x324 == 0)
m.c7 = Constraint(expr= 65.19794*m.x209 - 37.635855*m.x211 + 6.38214*m.x212 + 65.19794*m.x213 - 37.635855*m.x214
- 53.716345*m.x215 - 42.48503*m.x216 + 23.71403*m.x219 - m.x293 + m.x325 == 0)
m.c8 = Constraint(expr= - 71.20466*m.x209 - 49.962145*m.x210 - 87.97342*m.x211 + 7.039125*m.x212 - 71.20466*m.x213
- 87.97342*m.x214 - 60.50519*m.x215 - 17.300605*m.x216 + 163.43284*m.x217 + 163.43284*m.x218
- 36.822445*m.x219 - m.x294 + m.x326 == 0)
m.c9 = Constraint(expr= - 38.1677*m.x209 - 53.528635*m.x210 - 109.18465*m.x211 + 26.122975*m.x212 - 38.1677*m.x213
- 109.18465*m.x214 + 76.366685*m.x215 + 4.724035*m.x216 + 56.81356*m.x217 + 56.81356*m.x218
- 29.53304*m.x219 - m.x295 + m.x327 == 0)
m.c10 = Constraint(expr= - 48.8046*m.x209 + 178.355785*m.x210 - 67.5756*m.x211 - 41.98447*m.x212 - 48.8046*m.x213
- 67.5756*m.x214 - 51.18226*m.x215 - 39.950945*m.x216 - 325.9897*m.x217 - 325.9897*m.x218
+ 1.783245*m.x219 - m.x296 + m.x328 == 0)
m.c11 = Constraint(expr= - 106.900845*m.x209 - 19.803405*m.x210 + 36.60345*m.x211 + 33.97551*m.x212 - 106.900845*m.x213
+ 36.60345*m.x214 - 10.042485*m.x215 + 50.087285*m.x216 - 34.72635*m.x217 - 34.72635*m.x218
- 4.94303*m.x219 - m.x297 + m.x329 == 0)
m.c12 = Constraint(expr= - 42.42246*m.x209 - 3.87934*m.x210 + 54.717465*m.x211 - 61.913015*m.x212 - 42.42246*m.x213
+ 54.717465*m.x214 + 3.785485*m.x215 + 72.64377*m.x216 + 13.7654*m.x217 + 13.7654*m.x218
- 104.898605*m.x219 - m.x298 + m.x330 == 0)
m.c13 = Constraint(expr= - 35.72747*m.x209 - 129.426045*m.x210 + 56.281715*m.x211 - 26.560965*m.x212 - 35.72747*m.x213
+ 56.281715*m.x214 - 50.587845*m.x215 + 52.93422*m.x216 - 610.99605*m.x217 - 610.99605*m.x218
+ 88.8494*m.x219 - m.x299 + m.x331 == 0)
m.c14 = Constraint(expr= 299.27231*m.x209 - 199.03517*m.x210 + 51.463825*m.x211 - 48.49175*m.x212 + 299.27231*m.x213
+ 51.463825*m.x214 + 81.810275*m.x215 - 67.70074*m.x216 - 79.83932*m.x217 - 79.83932*m.x218
+ 16.58105*m.x219 - m.x300 + m.x332 == 0)
m.c15 = Constraint(expr= - 50.24371*m.x209 - 133.77466*m.x210 + 127.611515*m.x211 + 25.09057*m.x212 - 50.24371*m.x213
+ 127.611515*m.x214 + 58.283955*m.x215 + 45.80124*m.x216 + 473.90518*m.x217 + 473.90518*m.x218
- 5.44359*m.x219 - m.x301 + m.x333 == 0)
m.c16 = Constraint(expr= - 18.176585*m.x209 - 90.60136*m.x210 + 31.78556*m.x211 + 55.0616*m.x212 - 18.176585*m.x213
+ 31.78556*m.x214 - 1.18883*m.x215 + 106.74442*m.x216 + 577.802665*m.x217 + 577.802665*m.x218
+ 57.845965*m.x219 - m.x302 + m.x334 == 0)
m.c17 = Constraint(expr= - 21.71179*m.x209 + 115.097515*m.x210 - 7.602255*m.x211 - 46.207945*m.x212 - 21.71179*m.x213
- 7.602255*m.x214 - 88.943255*m.x215 + 71.548795*m.x216 - 29.814605*m.x217 - 29.814605*m.x218
- 55.530875*m.x219 - m.x303 + m.x335 == 0)
m.c18 = Constraint(expr= 8.57209*m.x209 + 96.138805*m.x210 + 63.101845*m.x211 - 9.01008*m.x212 + 8.57209*m.x213
+ 63.101845*m.x214 + 38.91854*m.x215 - 116.286345*m.x216 - 430.4816*m.x217 - 430.4816*m.x218
+ 44.11185*m.x219 - m.x304 + m.x336 == 0)
m.c19 = Constraint(expr= 5.725155*m.x209 + 126.45397*m.x210 - 4.536325*m.x211 + 29.72075*m.x212 + 5.725155*m.x213
- 4.536325*m.x214 - 21.242515*m.x215 - 53.716345*m.x216 + 591.9122*m.x217 + 591.9122*m.x218
- 29.65818*m.x219 - m.x305 + m.x337 == 0)
m.c20 = Constraint(expr= - 25.121855*m.x209 + 245.993955*m.x210 - 72.14321*m.x211 - 54.905175*m.x212 - 25.121855*m.x213
- 72.14321*m.x214 - 160.648475*m.x215 - 79.27619*m.x216 - 67.231465*m.x217 - 67.231465*m.x218
- 63.539835*m.x219 - m.x306 + m.x338 == 0)
m.c21 = Constraint(expr= 5.287165*m.x209 - 310.753905*m.x210 - 105.18017*m.x211 + 71.110805*m.x212 + 5.287165*m.x213
- 105.18017*m.x214 + 160.11663*m.x215 + 42.92302*m.x216 - 414.2134*m.x217 - 414.2134*m.x218
+ 57.25155*m.x219 - m.x307 + m.x339 == 0)
m.c22 = Constraint(expr= 74.4583*m.x220 + 70.01583*m.x222 + 34.507355*m.x223 + 74.4583*m.x224 + 70.01583*m.x225
+ 78.806915*m.x226 - 30.72187*m.x227 + 40.263795*m.x230 - m.x308 + m.x340 == 0)
m.c23 = Constraint(expr= 65.19794*m.x220 - 37.635855*m.x222 + 6.38214*m.x223 + 65.19794*m.x224 - 37.635855*m.x225
- 53.716345*m.x226 - 42.48503*m.x227 + 23.71403*m.x230 - m.x309 + m.x341 == 0)
m.c24 = Constraint(expr= - 71.20466*m.x220 - 49.962145*m.x221 - 87.97342*m.x222 + 7.039125*m.x223 - 71.20466*m.x224
- 87.97342*m.x225 - 60.50519*m.x226 - 17.300605*m.x227 + 163.43284*m.x228 + 163.43284*m.x229
- 36.822445*m.x230 - m.x310 + m.x342 == 0)
m.c25 = Constraint(expr= - 38.1677*m.x220 - 53.528635*m.x221 - 109.18465*m.x222 + 26.122975*m.x223 - 38.1677*m.x224
- 109.18465*m.x225 + 76.366685*m.x226 + 4.724035*m.x227 + 56.81356*m.x228 + 56.81356*m.x229
- 29.53304*m.x230 - m.x311 + m.x343 == 0)
m.c26 = Constraint(expr= - 48.8046*m.x220 + 178.355785*m.x221 - 67.5756*m.x222 - 41.98447*m.x223 - 48.8046*m.x224
- 67.5756*m.x225 - 51.18226*m.x226 - 39.950945*m.x227 - 325.9897*m.x228 - 325.9897*m.x229
+ 1.783245*m.x230 - m.x312 + m.x344 == 0)
m.c27 = Constraint(expr= - 106.900845*m.x220 - 19.803405*m.x221 + 36.60345*m.x222 + 33.97551*m.x223 - 106.900845*m.x224
+ 36.60345*m.x225 - 10.042485*m.x226 + 50.087285*m.x227 - 34.72635*m.x228 - 34.72635*m.x229
- 4.94303*m.x230 - m.x313 + m.x345 == 0)
m.c28 = Constraint(expr= - 42.42246*m.x220 - 3.87934*m.x221 + 54.717465*m.x222 - 61.913015*m.x223 - 42.42246*m.x224
+ 54.717465*m.x225 + 3.785485*m.x226 + 72.64377*m.x227 + 13.7654*m.x228 + 13.7654*m.x229
- 104.898605*m.x230 - m.x314 + m.x346 == 0)
m.c29 = Constraint(expr= - 35.72747*m.x220 - 129.426045*m.x221 + 56.281715*m.x222 - 26.560965*m.x223 - 35.72747*m.x224
+ 56.281715*m.x225 - 50.587845*m.x226 + 52.93422*m.x227 - 610.99605*m.x228 - 610.99605*m.x229
+ 88.8494*m.x230 - m.x315 + m.x347 == 0)
m.c30 = Constraint(expr= 299.27231*m.x220 - 199.03517*m.x221 + 51.463825*m.x222 - 48.49175*m.x223 + 299.27231*m.x224
+ 51.463825*m.x225 + 81.810275*m.x226 - 67.70074*m.x227 - 79.83932*m.x228 - 79.83932*m.x229
+ 16.58105*m.x230 - m.x316 + m.x348 == 0)
m.c31 = Constraint(expr= - 50.24371*m.x220 - 133.77466*m.x221 + 127.611515*m.x222 + 25.09057*m.x223 - 50.24371*m.x224
+ 127.611515*m.x225 + 58.283955*m.x226 + 45.80124*m.x227 + 473.90518*m.x228 + 473.90518*m.x229
- 5.44359*m.x230 - m.x317 + m.x349 == 0)
m.c32 = Constraint(expr= - 18.176585*m.x220 - 90.60136*m.x221 + 31.78556*m.x222 + 55.0616*m.x223 - 18.176585*m.x224
+ 31.78556*m.x225 - 1.18883*m.x226 + 106.74442*m.x227 + 577.802665*m.x228 + 577.802665*m.x229
+ 57.845965*m.x230 - m.x318 + m.x350 == 0)
m.c33 = Constraint(expr= - 21.71179*m.x220 + 115.097515*m.x221 - 7.602255*m.x222 - 46.207945*m.x223 - 21.71179*m.x224
- 7.602255*m.x225 - 88.943255*m.x226 + 71.548795*m.x227 - 29.814605*m.x228 - 29.814605*m.x229
- 55.530875*m.x230 - m.x319 + m.x351 == 0)
m.c34 = Constraint(expr= 8.57209*m.x220 + 96.138805*m.x221 + 63.101845*m.x222 - 9.01008*m.x223 + 8.57209*m.x224
+ 63.101845*m.x225 + 38.91854*m.x226 - 116.286345*m.x227 - 430.4816*m.x228 - 430.4816*m.x229
+ 44.11185*m.x230 - m.x320 + m.x352 == 0)
m.c35 = Constraint(expr= 5.725155*m.x220 + 126.45397*m.x221 - 4.536325*m.x222 + 29.72075*m.x223 + 5.725155*m.x224
- 4.536325*m.x225 - 21.242515*m.x226 - 53.716345*m.x227 + 591.9122*m.x228 + 591.9122*m.x229
- 29.65818*m.x230 - m.x321 + m.x353 == 0)
m.c36 = Constraint(expr= - 25.121855*m.x220 + 245.993955*m.x221 - 72.14321*m.x222 - 54.905175*m.x223 - 25.121855*m.x224
- 72.14321*m.x225 - 160.648475*m.x226 - 79.27619*m.x227 - 67.231465*m.x228 - 67.231465*m.x229
- 63.539835*m.x230 - m.x322 + m.x354 == 0)
m.c37 = Constraint(expr= 5.287165*m.x220 - 310.753905*m.x221 - 105.18017*m.x222 + 71.110805*m.x223 + 5.287165*m.x224
- 105.18017*m.x225 + 160.11663*m.x226 + 42.92302*m.x227 - 414.2134*m.x228 - 414.2134*m.x229
+ 57.25155*m.x230 - m.x323 + m.x355 == 0)
m.c38 = Constraint(expr= - 75*m.x7 - 75*m.x8 - 75*m.x9 - 75*m.x10 - 75*m.x11 - 75*m.x12 - 75*m.x13 - 75*m.x14 - 75*m.x15
- 75*m.x16 - 75*m.x17 - 75*m.x18 - 20*m.x31 - 20*m.x32 - 20*m.x33 - 20*m.x34 - 20*m.x35
- 20*m.x36 - 20*m.x37 - 20*m.x38 - 20*m.x39 - 20*m.x40 - 20*m.x41 - 20*m.x42 + m.x115
- 183.268417301075*m.x117 - 183.268417301075*m.x118 - 163.43261191348*m.x119
- 163.43261191348*m.x120 - 163.43261191348*m.x121 - 163.43261191348*m.x122 - 60.966*m.x123
- 60.966*m.x124 - 30.096*m.x125 - 30.096*m.x126 - 254.360840590973*m.x127 - 180.6012*m.x129
- 180.6012*m.x130 - 109.002*m.x131 - 109.002*m.x132 - 16.8*m.x133 - 16.8*m.x134
- 504.114*m.x135 - 504.114*m.x136 - 504.114*m.x137 - 504.114*m.x138 - 155.269590643677*m.x139
- 119.649644624064*m.x140 - 140.349836320895*m.x141 - 119.649644624064*m.x142
- 143.169496895445*m.x143 - 111.632543664972*m.x144 - 129.513271502313*m.x145
- 111.632543664972*m.x146 - 155.269590643677*m.x147 - 119.649644624064*m.x148
- 140.349836320895*m.x149 - 119.649644624064*m.x150 - 155.269590643677*m.x151
- 119.649644624064*m.x152 - 140.349836320895*m.x153 - 119.649644624064*m.x154
- 143.169496895445*m.x155 - 111.632543664972*m.x156 - 129.513271502313*m.x157
- 111.632543664972*m.x158 - 155.269590643677*m.x159 - 119.649644624064*m.x160
- 140.349836320895*m.x161 - 119.649644624064*m.x162 - 2.25*m.x237 - 2.25*m.x238 - 0.75*m.x241
- 0.75*m.x242 - 0.75*m.x243 - 1.5*m.x244 - 1.5*m.x245 - 0.75*m.x246 - 0.75*m.x247 - 0.75*m.x248
- 0.75*m.x249 - 1.5*m.x250 - 1.5*m.x251 - 0.75*m.x252 - 5300*m.x265 - 15890*m.x266 == 0)
m.c39 = Constraint(expr= - 75*m.x19 - 75*m.x20 - 75*m.x21 - 75*m.x22 - 75*m.x23 - 75*m.x24 - 75*m.x25 - 75*m.x26
- 75*m.x27 - 75*m.x28 - 75*m.x29 - 75*m.x30 - 20*m.x43 - 20*m.x44 - 20*m.x45 - 20*m.x46
- 20*m.x47 - 20*m.x48 - 20*m.x49 - 20*m.x50 - 20*m.x51 - 20*m.x52 - 20*m.x53 - 20*m.x54
+ m.x116 - 183.268417301075*m.x163 - 183.268417301075*m.x164 - 163.43261191348*m.x165
- 163.43261191348*m.x166 - 163.43261191348*m.x167 - 163.43261191348*m.x168 - 60.966*m.x169
- 60.966*m.x170 - 30.096*m.x171 - 30.096*m.x172 - 254.360840590973*m.x173 - 180.6012*m.x175
- 180.6012*m.x176 - 109.002*m.x177 - 109.002*m.x178 - 16.8*m.x179 - 16.8*m.x180
- 504.114*m.x181 - 504.114*m.x182 - 504.114*m.x183 - 504.114*m.x184 - 155.269590643677*m.x185
- 119.649644624064*m.x186 - 140.349836320895*m.x187 - 119.649644624064*m.x188
- 143.169496895445*m.x189 - 111.632543664972*m.x190 - 129.513271502313*m.x191
- 111.632543664972*m.x192 - 155.269590643677*m.x193 - 119.649644624064*m.x194
- 140.349836320895*m.x195 - 119.649644624064*m.x196 - 155.269590643677*m.x197
- 119.649644624064*m.x198 - 140.349836320895*m.x199 - 119.649644624064*m.x200
- 143.169496895445*m.x201 - 111.632543664972*m.x202 - 129.513271502313*m.x203
- 111.632543664972*m.x204 - 155.269590643677*m.x205 - 119.649644624064*m.x206
- 140.349836320895*m.x207 - 119.649644624064*m.x208 - 2.25*m.x239 - 2.25*m.x240 - 0.75*m.x253
- 0.75*m.x254 - 0.75*m.x255 - 1.5*m.x256 - 1.5*m.x257 - 0.75*m.x258 - 0.75*m.x259 - 0.75*m.x260
- 0.75*m.x261 - 1.5*m.x262 - 1.5*m.x263 - 0.75*m.x264 - 15890*m.x267 == 0)
m.c40 = Constraint(expr= - m.x55 + 854.719106519546*m.x117 + 854.719106519546*m.x118 == 0)
m.c41 = Constraint(expr= - m.x56 + 967.287547768436*m.x119 + 967.287547768436*m.x120 + 967.287547768436*m.x121
+ 967.287547768436*m.x122 == 0)
m.c42 = Constraint(expr= - m.x57 - m.x73 + m.x85 + 452.573*m.x125 + 452.573*m.x126 == 0)
m.c43 = Constraint(expr= - m.x58 - m.x74 + m.x86 + 1772.19713847098*m.x127 == 0)
m.c44 = Constraint(expr= - m.x59 - m.x75 + m.x87 + 822.86*m.x131 + 822.86*m.x132 == 0)
m.c45 = Constraint(expr= - m.x60 - m.x76 + m.x88 + 493.716*m.x133 + 493.716*m.x134 == 0)
m.c46 = Constraint(expr= - m.x61 + 38063.9849154703*m.x135 + 38063.9849154703*m.x136 == 0)
m.c47 = Constraint(expr= - m.x62 - m.x77 + m.x89 + 3045.11879323763*m.x137 + 3045.11879323763*m.x138 == 0)
m.c48 = Constraint(expr= - m.x63 - m.x78 + m.x90 + 1736.62555730054*m.x139 + 1125.73553711019*m.x140
+ 1479.20467962168*m.x141 + 1125.73553711019*m.x142 + 1530.48508703852*m.x143
+ 987.97116168261*m.x144 + 1297.68854460025*m.x145 + 987.97116168261*m.x146
+ 1736.62555730054*m.x147 + 1125.73553711019*m.x148 + 1479.20467962168*m.x149
+ 1125.73553711019*m.x150 + 1736.62555730054*m.x151 + 1125.73553711019*m.x152
+ 1479.20467962168*m.x153 + 1125.73553711019*m.x154 + 1530.48508703852*m.x155
+ 987.97116168261*m.x156 + 1297.68854460025*m.x157 + 987.97116168261*m.x158
+ 1736.62555730054*m.x159 + 1125.73553711019*m.x160 + 1479.20467962168*m.x161
+ 1125.73553711019*m.x162 == 0)
m.c49 = Constraint(expr= - m.x64 + 854.719106519546*m.x163 + 854.719106519546*m.x164 == 0)
m.c50 = Constraint(expr= - m.x65 + 967.287547768436*m.x165 + 967.287547768436*m.x166 + 967.287547768436*m.x167
+ 967.287547768436*m.x168 == 0)
m.c51 = Constraint(expr= - m.x66 - m.x79 + m.x91 + 452.573*m.x171 + 452.573*m.x172 == 0)
m.c52 = Constraint(expr= - m.x67 - m.x80 + m.x92 + 1772.19713847098*m.x173 == 0)
m.c53 = Constraint(expr= - m.x68 - m.x81 + m.x93 + 822.86*m.x177 + 822.86*m.x178 == 0)
m.c54 = Constraint(expr= - m.x69 - m.x82 + m.x94 + 493.716*m.x179 + 493.716*m.x180 == 0)
m.c55 = Constraint(expr= - m.x70 + 38063.9849154703*m.x181 + 38063.9849154703*m.x182 == 0)
m.c56 = Constraint(expr= - m.x71 - m.x83 + m.x95 + 3045.11879323763*m.x183 + 3045.11879323763*m.x184 == 0)
m.c57 = Constraint(expr= - m.x72 - m.x84 + m.x96 + 1736.62555730054*m.x185 + 1125.73553711019*m.x186
+ 1479.20467962168*m.x187 + 1125.73553711019*m.x188 + 1530.48508703852*m.x189
+ 987.97116168261*m.x190 + 1297.68854460025*m.x191 + 987.97116168261*m.x192
+ 1736.62555730054*m.x193 + 1125.73553711019*m.x194 + 1479.20467962168*m.x195
+ 1125.73553711019*m.x196 + 1736.62555730054*m.x197 + 1125.73553711019*m.x198
+ 1479.20467962168*m.x199 + 1125.73553711019*m.x200 + 1530.48508703852*m.x201
+ 987.97116168261*m.x202 + 1297.68854460025*m.x203 + 987.97116168261*m.x204
+ 1736.62555730054*m.x205 + 1125.73553711019*m.x206 + 1479.20467962168*m.x207
+ 1125.73553711019*m.x208 == 0)
m.c58 = Constraint(expr= - m.x97 - m.x103 + m.x109 + 750*m.x232 == 0)
m.c59 = Constraint(expr= - m.x98 - m.x104 + m.x110 + 550*m.x233 == 0)
m.c60 = Constraint(expr= - m.x99 - m.x105 + m.x111 + 55.4*m.x231 + 35.2*m.x232 + 26*m.x233 == 0)
m.c61 = Constraint(expr= - m.x100 - m.x106 + m.x112 + 750*m.x235 == 0)
m.c62 = Constraint(expr= - m.x101 - m.x107 + m.x113 + 550*m.x236 == 0)
m.c63 = Constraint(expr= - m.x102 - m.x108 + m.x114 + 55.4*m.x234 + 35.2*m.x235 + 26*m.x236 == 0)
m.c64 = Constraint(expr= - 0.0254473333333333*m.x3 + m.x73 >= 1590.45712440009)
m.c65 = Constraint(expr= - 0.0152031340206186*m.x3 + m.x74 >= 36928.459489312)
m.c66 = Constraint(expr= - 0.00602046666666667*m.x3 + m.x75 >= 11522.9955108483)
m.c67 = Constraint(expr= - 0.0214065347222222*m.x3 + m.x76 >= -4471.05545332801)
m.c68 = Constraint(expr= - 0.0638093076923077*m.x3 + m.x77 >= -3933.32854216354)
m.c69 = Constraint(expr= - 0.1485876*m.x3 + m.x78 >= 138311.983715238)
m.c70 = Constraint(expr= - 0.0254473333333333*m.x4 + m.x79 >= 681.624481885752)
m.c71 = Constraint(expr= - 0.0152031340206186*m.x4 + m.x80 >= 15826.4826382766)
m.c72 = Constraint(expr= - 0.00602046666666667*m.x4 + m.x81 >= 4938.42664750639)
m.c73 = Constraint(expr= - 0.0214065347222222*m.x4 + m.x82 >= -1916.16662285486)
m.c74 = Constraint(expr= - 0.0638093076923077*m.x4 + m.x83 >= -1685.7122323558)
m.c75 = Constraint(expr= - 0.1485876*m.x4 + m.x84 >= 59276.5644493876)
m.c76 = Constraint(expr= - 0.119026488*m.x3 + m.x103 >= -20137.6590455391)
m.c77 = Constraint(expr= - 0.1043406*m.x3 + m.x104 >= -20268.5473801287)
m.c78 = Constraint(expr= - 0.007127736*m.x3 + m.x105 >= 2011.33342770906)
m.c79 = Constraint(expr= - 0.119026488*m.x4 + m.x106 >= -8630.42530523105)
m.c80 = Constraint(expr= - 0.1043406*m.x4 + m.x107 >= -8686.52030576945)
m.c81 = Constraint(expr= - 0.007127736*m.x4 + m.x108 >= 862.000040446738)
m.c82 = Constraint(expr= - 691.2024*m.x117 - 691.2024*m.x118 - 691.2024*m.x119 - 691.2024*m.x120 - 691.2024*m.x121
- 691.2024*m.x122 - 409.78428*m.x125 - 409.78428*m.x126 - 691.2024*m.x127 - 691.2024*m.x128
- 2212.999684*m.x129 - 2212.999684*m.x130 - 123.429*m.x133 - 123.429*m.x134 - 691.2024*m.x135
- 691.2024*m.x136 - 691.2024*m.x137 - 691.2024*m.x138 - 1173.5118246115*m.x139
- 760.515992627875*m.x140 - 997.603156023924*m.x141 - 760.515992627875*m.x142
- 1031.78769912709*m.x143 - 670.102179228205*m.x144 - 876.584314895534*m.x145
- 670.102179228205*m.x146 - 1173.5118246115*m.x147 - 760.515992627875*m.x148
- 997.603156023924*m.x149 - 760.515992627875*m.x150 - 1173.5118246115*m.x151
- 760.515992627875*m.x152 - 997.603156023924*m.x153 - 760.515992627875*m.x154
- 1031.78769912709*m.x155 - 670.102179228205*m.x156 - 876.584314895534*m.x157
- 670.102179228205*m.x158 - 1173.5118246115*m.x159 - 760.515992627875*m.x160
- 997.603156023924*m.x161 - 760.515992627875*m.x162 + 2800*m.x231 + 2300*m.x232 + 1500*m.x233
<= 0)
m.c83 = Constraint(expr= - 997.17229093947*m.x117 - 997.17229093947*m.x118 - 5425.93884*m.x123 - 5425.93884*m.x124
- 1432.61445325499*m.x127 - 1234.29*m.x131 - 1234.29*m.x132 - 1205.54675394556*m.x135
- 1205.54675394556*m.x136 - 1205.54675394556*m.x137 - 1205.54675394556*m.x138 - 345.6012*m.x139
- 224.64078*m.x140 - 293.76102*m.x141 - 224.64078*m.x142 - 304.129056*m.x143
- 196.992684*m.x144 - 258.048896*m.x145 - 196.992684*m.x146 - 345.6012*m.x147
- 224.64078*m.x148 - 293.76102*m.x149 - 224.64078*m.x150 - 345.6012*m.x151 - 224.64078*m.x152
- 293.76102*m.x153 - 224.64078*m.x154 - 304.129056*m.x155 - 196.992684*m.x156
- 258.048896*m.x157 - 196.992684*m.x158 - 345.6012*m.x159 - 224.64078*m.x160 - 293.76102*m.x161
- 224.64078*m.x162 + 2800*m.x231 + 2300*m.x232 + 1500*m.x233 <= 0)
m.c84 = Constraint(expr= - 691.2024*m.x163 - 691.2024*m.x164 - 691.2024*m.x165 - 691.2024*m.x166 - 691.2024*m.x167
- 691.2024*m.x168 - 409.78428*m.x171 - 409.78428*m.x172 - 691.2024*m.x173 - 691.2024*m.x174
- 2212.999684*m.x175 - 2212.999684*m.x176 - 123.429*m.x179 - 123.429*m.x180 - 691.2024*m.x181
- 691.2024*m.x182 - 691.2024*m.x183 - 691.2024*m.x184 - 1173.5118246115*m.x185
- 760.515992627875*m.x186 - 997.603156023924*m.x187 - 760.515992627875*m.x188
- 1031.78769912709*m.x189 - 670.102179228205*m.x190 - 876.584314895534*m.x191
- 670.102179228205*m.x192 - 1173.5118246115*m.x193 - 760.515992627875*m.x194
- 997.603156023924*m.x195 - 760.515992627875*m.x196 - 1173.5118246115*m.x197
- 760.515992627875*m.x198 - 997.603156023924*m.x199 - 760.515992627875*m.x200
- 1031.78769912709*m.x201 - 670.102179228205*m.x202 - 876.584314895534*m.x203
- 670.102179228205*m.x204 - 1173.5118246115*m.x205 - 760.515992627875*m.x206
- 997.603156023924*m.x207 - 760.515992627875*m.x208 + 2800*m.x234 + 2300*m.x235 + 1500*m.x236
<= 0)
m.c85 = Constraint(expr= - 997.17229093947*m.x163 - 997.17229093947*m.x164 - 5425.93884*m.x169 - 5425.93884*m.x170
- 1432.61445325499*m.x173 - 1234.29*m.x177 - 1234.29*m.x178 - 1205.54675394556*m.x181
- 1205.54675394556*m.x182 - 1205.54675394556*m.x183 - 1205.54675394556*m.x184 - 345.6012*m.x185
- 224.64078*m.x186 - 293.76102*m.x187 - 224.64078*m.x188 - 304.129056*m.x189
- 196.992684*m.x190 - 258.048896*m.x191 - 196.992684*m.x192 - 345.6012*m.x193
- 224.64078*m.x194 - 293.76102*m.x195 - 224.64078*m.x196 - 345.6012*m.x197 - 224.64078*m.x198
- 293.76102*m.x199 - 224.64078*m.x200 - 304.129056*m.x201 - 196.992684*m.x202
- 258.048896*m.x203 - 196.992684*m.x204 - 345.6012*m.x205 - 224.64078*m.x206 - 293.76102*m.x207
- 224.64078*m.x208 + 2800*m.x234 + 2300*m.x235 + 1500*m.x236 <= 0)
m.c86 = Constraint(expr= - 83.93172*m.x117 - 83.93172*m.x118 - 83.93172*m.x119 - 83.93172*m.x120 - 83.93172*m.x121
- 83.93172*m.x122 - 68.29738*m.x125 - 68.29738*m.x126 - 83.93172*m.x127 - 83.93172*m.x128
- 268.7213902*m.x129 - 268.7213902*m.x130 - 1.23429*m.x133 - 1.23429*m.x134 - 83.93172*m.x135
- 83.93172*m.x136 - 83.93172*m.x137 - 83.93172*m.x138 - 11.735118246115*m.x139
- 7.60515992627875*m.x140 - 9.97603156023924*m.x141 - 7.60515992627875*m.x142
- 10.3178769912709*m.x143 - 6.70102179228205*m.x144 - 8.76584314895534*m.x145
- 6.70102179228205*m.x146 - 11.735118246115*m.x147 - 7.60515992627875*m.x148
- 9.97603156023924*m.x149 - 7.60515992627875*m.x150 - 11.735118246115*m.x151
- 7.60515992627875*m.x152 - 9.97603156023924*m.x153 - 7.60515992627875*m.x154
- 10.3178769912709*m.x155 - 6.70102179228205*m.x156 - 8.76584314895534*m.x157
- 6.70102179228205*m.x158 - 11.735118246115*m.x159 - 7.60515992627875*m.x160
- 9.97603156023924*m.x161 - 7.60515992627875*m.x162 + 256*m.x231 + 210*m.x232 + 135*m.x233
- m.x237 <= 0)
m.c87 = Constraint(expr= - 9.97172290939471*m.x117 - 9.97172290939471*m.x118 - 968.91765*m.x123 - 968.91765*m.x124
- 14.3261445325499*m.x127 - 246.858*m.x131 - 246.858*m.x132 - 67.2583762925165*m.x135
- 67.2583762925165*m.x136 - 67.2583762925165*m.x137 - 67.2583762925165*m.x138 - 41.96586*m.x139
- 27.277809*m.x140 - 35.670981*m.x141 - 27.277809*m.x142 - 36.9299568*m.x143
- 23.9205402*m.x144 - 31.3345088*m.x145 - 23.9205402*m.x146 - 41.96586*m.x147
- 27.277809*m.x148 - 35.670981*m.x149 - 27.277809*m.x150 - 41.96586*m.x151 - 27.277809*m.x152
- 35.670981*m.x153 - 27.277809*m.x154 - 36.9299568*m.x155 - 23.9205402*m.x156
- 31.3345088*m.x157 - 23.9205402*m.x158 - 41.96586*m.x159 - 27.277809*m.x160 - 35.670981*m.x161
- 27.277809*m.x162 + 256*m.x231 + 210*m.x232 + 135*m.x233 - m.x238 <= 0)
m.c88 = Constraint(expr= - 83.93172*m.x163 - 83.93172*m.x164 - 83.93172*m.x165 - 83.93172*m.x166 - 83.93172*m.x167
- 83.93172*m.x168 - 68.29738*m.x171 - 68.29738*m.x172 - 83.93172*m.x173 - 83.93172*m.x174
- 268.7213902*m.x175 - 268.7213902*m.x176 - 1.23429*m.x179 - 1.23429*m.x180 - 83.93172*m.x181
- 83.93172*m.x182 - 83.93172*m.x183 - 83.93172*m.x184 - 11.735118246115*m.x185
- 7.60515992627875*m.x186 - 9.97603156023924*m.x187 - 7.60515992627875*m.x188
- 10.3178769912709*m.x189 - 6.70102179228205*m.x190 - 8.76584314895534*m.x191
- 6.70102179228205*m.x192 - 11.735118246115*m.x193 - 7.60515992627875*m.x194
- 9.97603156023924*m.x195 - 7.60515992627875*m.x196 - 11.735118246115*m.x197
- 7.60515992627875*m.x198 - 9.97603156023924*m.x199 - 7.60515992627875*m.x200
- 10.3178769912709*m.x201 - 6.70102179228205*m.x202 - 8.76584314895534*m.x203
- 6.70102179228205*m.x204 - 11.735118246115*m.x205 - 7.60515992627875*m.x206
- 9.97603156023924*m.x207 - 7.60515992627875*m.x208 + 256*m.x234 + 210*m.x235 + 135*m.x236
- m.x239 <= 0)
m.c89 = Constraint(expr= - 9.97172290939471*m.x163 - 9.97172290939471*m.x164 - 968.91765*m.x169 - 968.91765*m.x170
- 14.3261445325499*m.x173 - 246.858*m.x177 - 246.858*m.x178 - 67.2583762925165*m.x181
- 67.2583762925165*m.x182 - 67.2583762925165*m.x183 - 67.2583762925165*m.x184 - 41.96586*m.x185
- 27.277809*m.x186 - 35.670981*m.x187 - 27.277809*m.x188 - 36.9299568*m.x189
- 23.9205402*m.x190 - 31.3345088*m.x191 - 23.9205402*m.x192 - 41.96586*m.x193
- 27.277809*m.x194 - 35.670981*m.x195 - 27.277809*m.x196 - 41.96586*m.x197 - 27.277809*m.x198
- 35.670981*m.x199 - 27.277809*m.x200 - 36.9299568*m.x201 - 23.9205402*m.x202
- 31.3345088*m.x203 - 23.9205402*m.x204 - 41.96586*m.x205 - 27.277809*m.x206 - 35.670981*m.x207
- 27.277809*m.x208 + 256*m.x234 + 210*m.x235 + 135*m.x236 - m.x240 <= 0)
m.c90 = Constraint(expr= - 691.2024*m.x117 - 691.2024*m.x118 - 691.2024*m.x119 - 691.2024*m.x120 - 691.2024*m.x121
- 691.2024*m.x122 - 691.2024*m.x127 - 691.2024*m.x128 - 2212.999684*m.x129 - 2212.999684*m.x130
- 691.2024*m.x135 - 691.2024*m.x136 - 691.2024*m.x137 - 691.2024*m.x138 + 840*m.x231
+ 690*m.x232 + 450*m.x233 <= 0)
m.c91 = Constraint(expr= - 5425.93884*m.x123 - 5425.93884*m.x124 - 345.6012*m.x135 - 345.6012*m.x136 - 345.6012*m.x137
- 345.6012*m.x138 - 345.6012*m.x139 - 224.64078*m.x140 - 293.76102*m.x141 - 224.64078*m.x142
- 304.129056*m.x143 - 196.992684*m.x144 - 258.048896*m.x145 - 196.992684*m.x146
- 345.6012*m.x147 - 224.64078*m.x148 - 293.76102*m.x149 - 224.64078*m.x150 - 345.6012*m.x151
- 224.64078*m.x152 - 293.76102*m.x153 - 224.64078*m.x154 - 304.129056*m.x155
- 196.992684*m.x156 - 258.048896*m.x157 - 196.992684*m.x158 - 345.6012*m.x159
- 224.64078*m.x160 - 293.76102*m.x161 - 224.64078*m.x162 + 840*m.x231 + 690*m.x232 + 450*m.x233
<= 0)
m.c92 = Constraint(expr= - 691.2024*m.x163 - 691.2024*m.x164 - 691.2024*m.x165 - 691.2024*m.x166 - 691.2024*m.x167
- 691.2024*m.x168 - 691.2024*m.x173 - 691.2024*m.x174 - 2212.999684*m.x175 - 2212.999684*m.x176
- 691.2024*m.x181 - 691.2024*m.x182 - 691.2024*m.x183 - 691.2024*m.x184 + 840*m.x234
+ 690*m.x235 + 450*m.x236 <= 0)
m.c93 = Constraint(expr= - 5425.93884*m.x169 - 5425.93884*m.x170 - 345.6012*m.x181 - 345.6012*m.x182 - 345.6012*m.x183
- 345.6012*m.x184 - 345.6012*m.x185 - 224.64078*m.x186 - 293.76102*m.x187 - 224.64078*m.x188
- 304.129056*m.x189 - 196.992684*m.x190 - 258.048896*m.x191 - 196.992684*m.x192
- 345.6012*m.x193 - 224.64078*m.x194 - 293.76102*m.x195 - 224.64078*m.x196 - 345.6012*m.x197
- 224.64078*m.x198 - 293.76102*m.x199 - 224.64078*m.x200 - 304.129056*m.x201
- 196.992684*m.x202 - 258.048896*m.x203 - 196.992684*m.x204 - 345.6012*m.x205
- 224.64078*m.x206 - 293.76102*m.x207 - 224.64078*m.x208 + 840*m.x234 + 690*m.x235 + 450*m.x236
<= 0)
m.c94 = Constraint(expr= 2*m.x123 + m.x133 + 16.85*m.x135 + 17.85*m.x137 + 11*m.x138 - 96*m.x231 <= 0)
m.c95 = Constraint(expr= 4*m.x123 + m.x133 + 15.1*m.x135 + 2.5*m.x136 + 11.6*m.x137 + 6.5*m.x138 - 96*m.x231 <= 0)
m.c96 = Constraint(expr= 4*m.x123 + 7*m.x125 + 16*m.x129 + m.x133 + 15*m.x135 + 8.2*m.x137 + 2.5*m.x138 - 96*m.x231
<= 0)
m.c97 = Constraint(expr= 16*m.x120 + 2*m.x123 + m.x129 + 12*m.x135 + 8.4*m.x139 + 5.5*m.x140 + 7.1*m.x141 + 5.5*m.x142
+ 7.4*m.x143 + 4.8*m.x144 + 6.3*m.x145 + 4.8*m.x146 + 16.8*m.x147 + 10.9*m.x148 + 14.3*m.x149
+ 10.9*m.x150 + 5.9*m.x151 + 3.8*m.x152 + 5*m.x153 + 3.8*m.x154 + 5.2*m.x155 + 3.4*m.x156
+ 4.4*m.x157 + 3.4*m.x158 + 11.8*m.x159 + 7.7*m.x160 + 10*m.x161 + 7.7*m.x162 - 96*m.x231 <= 0)
m.c98 = Constraint(expr= 17.09*m.x119 + 4*m.x120 + 2*m.x123 + m.x129 + 8*m.x135 + 1.75*m.x136 + 1.75*m.x137
+ 1.75*m.x138 + 8.4*m.x139 + 5.5*m.x140 + 7.1*m.x141 + 5.5*m.x142 + 7.4*m.x143 + 4.8*m.x144
+ 6.3*m.x145 + 4.8*m.x146 + 5.9*m.x151 + 3.8*m.x152 + 5*m.x153 + 3.8*m.x154 + 5.2*m.x155
+ 3.4*m.x156 + 4.4*m.x157 + 3.4*m.x158 - 77*m.x231 <= 0)
m.c99 = Constraint(expr= 22*m.x117 + 15.2*m.x119 + 12.3*m.x120 + 18.9*m.x127 + 16*m.x129 + 10.8*m.x131 + 1.75*m.x135
+ 1.75*m.x136 + 1.75*m.x137 + 1.75*m.x138 - 77*m.x231 <= 0)
m.c100 = Constraint(expr= 17.2*m.x117 + 19.2*m.x127 + 1.5*m.x129 + 4.5*m.x131 - 96*m.x231 <= 0)
m.c101 = Constraint(expr= 15*m.x129 + 14.2*m.x131 + 3*m.x132 - 96*m.x231 <= 0)
m.c102 = Constraint(expr= m.x119 + m.x120 + 10.1*m.x123 + 10.8*m.x125 + m.x129 - 96*m.x231 <= 0)
m.c103 = Constraint(expr= m.x119 + m.x120 + 13.01*m.x123 + 5.6*m.x125 + 0.5*m.x129 + 10.8*m.x133 + 18.6*m.x139
+ 18.6*m.x140 + 18.6*m.x141 + 18.6*m.x142 + 18.6*m.x147 + 18.6*m.x148 + 18.6*m.x149
+ 18.6*m.x150 - 96*m.x231 <= 0)
m.c104 = Constraint(expr= 2*m.x117 + m.x119 + m.x120 + 7.6*m.x123 + 1.5*m.x127 + 10.1*m.x133 + 10*m.x135 + 11.5*m.x137
+ 11.5*m.x138 + 20.6*m.x139 + 20.6*m.x140 + 20.6*m.x141 + 20.6*m.x142 + 39.2*m.x143
+ 39.2*m.x144 + 39.2*m.x145 + 39.2*m.x146 + 20.6*m.x147 + 20.6*m.x148 + 20.6*m.x149
+ 20.6*m.x150 - 96*m.x231 <= 0)
m.c105 = Constraint(expr= 15.6*m.x117 + 13.6*m.x118 + m.x119 + m.x120 + 2*m.x123 + 18.4*m.x127 + 16.4*m.x128
+ 5*m.x131 + m.x133 + 12.5*m.x135 + 13.5*m.x137 + 11*m.x138 - 96*m.x231 <= 0)
m.c106 = Constraint(expr= 2*m.x169 + m.x179 + 16.85*m.x181 + 17.85*m.x183 + 11*m.x184 - 96*m.x234 <= 0)
m.c107 = Constraint(expr= 4*m.x169 + m.x179 + 15.1*m.x181 + 2.5*m.x182 + 11.6*m.x183 + 6.5*m.x184 - 96*m.x234 <= 0)
m.c108 = Constraint(expr= 4*m.x169 + 7*m.x171 + 16*m.x175 + m.x179 + 15*m.x181 + 8.2*m.x183 + 2.5*m.x184 - 96*m.x234
<= 0)
m.c109 = Constraint(expr= 16*m.x166 + 2*m.x169 + m.x175 + 12*m.x181 + 8.4*m.x185 + 5.5*m.x186 + 7.1*m.x187
+ 5.5*m.x188 + 7.4*m.x189 + 4.8*m.x190 + 6.3*m.x191 + 4.8*m.x192 + 16.8*m.x193 + 10.9*m.x194
+ 14.3*m.x195 + 10.9*m.x196 + 5.9*m.x197 + 3.8*m.x198 + 5*m.x199 + 3.8*m.x200 + 5.2*m.x201
+ 3.4*m.x202 + 4.4*m.x203 + 3.4*m.x204 + 11.8*m.x205 + 7.7*m.x206 + 10*m.x207 + 7.7*m.x208
- 96*m.x234 <= 0)
m.c110 = Constraint(expr= 17.09*m.x165 + 4*m.x166 + 2*m.x169 + m.x175 + 8*m.x181 + 1.75*m.x182 + 1.75*m.x183
+ 1.75*m.x184 + 8.4*m.x185 + 5.5*m.x186 + 7.1*m.x187 + 5.5*m.x188 + 7.4*m.x189 + 4.8*m.x190
+ 6.3*m.x191 + 4.8*m.x192 + 5.9*m.x197 + 3.8*m.x198 + 5*m.x199 + 3.8*m.x200 + 5.2*m.x201
+ 3.4*m.x202 + 4.4*m.x203 + 3.4*m.x204 - 77*m.x234 <= 0)
m.c111 = Constraint(expr= 22*m.x163 + 15.2*m.x165 + 12.3*m.x166 + 18.9*m.x173 + 16*m.x175 + 10.8*m.x177 + 1.75*m.x181
+ 1.75*m.x182 + 1.75*m.x183 + 1.75*m.x184 - 77*m.x234 <= 0)
m.c112 = Constraint(expr= 17.2*m.x163 + 19.2*m.x173 + 1.5*m.x175 + 4.5*m.x177 - 96*m.x234 <= 0)
m.c113 = Constraint(expr= 15*m.x175 + 14.2*m.x177 + 3*m.x178 - 96*m.x234 <= 0)
m.c114 = Constraint(expr= m.x165 + m.x166 + 10.1*m.x169 + 10.8*m.x171 + m.x175 - 96*m.x234 <= 0)
m.c115 = Constraint(expr= m.x165 + m.x166 + 13.01*m.x169 + 5.6*m.x171 + 0.5*m.x175 + 10.8*m.x179 + 18.6*m.x185
+ 18.6*m.x186 + 18.6*m.x187 + 18.6*m.x188 + 18.6*m.x193 + 18.6*m.x194 + 18.6*m.x195
+ 18.6*m.x196 - 96*m.x234 <= 0)
m.c116 = Constraint(expr= 2*m.x163 + m.x165 + m.x166 + 7.6*m.x169 + 1.5*m.x173 + 10.1*m.x179 + 10*m.x181 + 11.5*m.x183
+ 11.5*m.x184 + 20.6*m.x185 + 20.6*m.x186 + 20.6*m.x187 + 20.6*m.x188 + 39.2*m.x189
+ 39.2*m.x190 + 39.2*m.x191 + 39.2*m.x192 + 20.6*m.x193 + 20.6*m.x194 + 20.6*m.x195
+ 20.6*m.x196 - 96*m.x234 <= 0)
m.c117 = Constraint(expr= 15.6*m.x163 + 13.6*m.x164 + m.x165 + m.x166 + 2*m.x169 + 18.4*m.x173 + 16.4*m.x174
+ 5*m.x177 + m.x179 + 12.5*m.x181 + 13.5*m.x183 + 11*m.x184 - 96*m.x234 <= 0)
m.c118 = Constraint(expr= m.x231 - 1.25*m.x233 <= 0)
m.c119 = Constraint(expr= m.x234 - 1.25*m.x236 <= 0)
m.c120 = Constraint(expr= - m.x31 + 1.5*m.x124 + m.x134 + 6.4*m.x136 + 2.4*m.x138 == 0)
m.c121 = Constraint(expr= - m.x32 + 1.5*m.x124 + m.x134 + 6.6*m.x136 + 0.6*m.x138 == 0)
m.c122 = Constraint(expr= - m.x33 + 1.5*m.x124 + 3.7*m.x126 + 2.5*m.x130 + m.x134 + 5.2*m.x136 + 0.7*m.x138 == 0)
m.c123 = Constraint(expr= - m.x34 + 1.5*m.x124 + 0.5*m.x130 + 4.5*m.x136 + 0.7*m.x151 + 0.5*m.x152 + 0.6*m.x153
+ 0.5*m.x154 + 0.6*m.x155 + 0.4*m.x156 + 0.5*m.x157 + 0.4*m.x158 + 1.5*m.x159 + m.x160
+ 1.3*m.x161 + m.x162 == 0)
m.c124 = Constraint(expr= - m.x35 + 2.5*m.x121 + 4.7*m.x122 + 1.5*m.x124 + 1.8*m.x130 + 4*m.x136 + 0.8*m.x151
+ 0.5*m.x152 + 0.7*m.x153 + 0.5*m.x154 + 0.7*m.x155 + 0.4*m.x156 + 0.6*m.x157 + 0.4*m.x158
== 0)
m.c125 = Constraint(expr= - m.x36 + 2.7*m.x118 + 2.2*m.x121 + 2.6*m.x128 + m.x130 == 0)
m.c126 = Constraint(expr= - m.x37 + 1.7*m.x118 + 2.2*m.x128 + 2.2*m.x130 + 1.8*m.x132 == 0)
m.c127 = Constraint(expr= - m.x38 + 0.5*m.x122 + 1.5*m.x130 + 1.6*m.x132 == 0)
m.c128 = Constraint(expr= - m.x39 + 0.5*m.x121 + 0.5*m.x122 + 1.3*m.x124 + 1.3*m.x126 + m.x130 == 0)
m.c129 = Constraint(expr= - m.x40 + 0.5*m.x121 + 0.5*m.x122 + 2*m.x124 + 0.8*m.x126 + 0.5*m.x130 + 1.3*m.x134 == 0)
m.c130 = Constraint(expr= - m.x41 + 1.5*m.x118 + 0.5*m.x121 + 0.5*m.x122 + 2.3*m.x124 + m.x128 + 1.3*m.x134 + 5.8*m.x136
+ 7.6*m.x151 + 7.6*m.x152 + 7.6*m.x153 + 7.6*m.x154 + 4*m.x155 + 4*m.x156 + 4*m.x157
+ 4*m.x158 + 7.6*m.x159 + 7.6*m.x160 + 7.6*m.x161 + 7.6*m.x162 == 0)
m.c131 = Constraint(expr= - m.x42 + 0.5*m.x118 + 0.5*m.x121 + 1.5*m.x124 + 0.5*m.x128 + 2.5*m.x132 + m.x134 + 5.8*m.x136
+ 0.3*m.x138 + 3.6*m.x155 + 3.6*m.x156 + 3.6*m.x157 + 3.6*m.x158 == 0)
m.c132 = Constraint(expr= - m.x43 + 1.5*m.x170 + m.x180 + 6.4*m.x182 + 2.4*m.x184 == 0)
m.c133 = Constraint(expr= - m.x44 + 1.5*m.x170 + m.x180 + 6.6*m.x182 + 0.6*m.x184 == 0)
m.c134 = Constraint(expr= - m.x45 + 1.5*m.x170 + 3.7*m.x172 + 2.5*m.x176 + m.x180 + 5.2*m.x182 + 0.7*m.x184 == 0)
m.c135 = Constraint(expr= - m.x46 + 1.5*m.x170 + 0.5*m.x176 + 4.5*m.x182 + 0.7*m.x197 + 0.5*m.x198 + 0.6*m.x199
+ 0.5*m.x200 + 0.6*m.x201 + 0.4*m.x202 + 0.5*m.x203 + 0.4*m.x204 + 1.5*m.x205 + m.x206
+ 1.3*m.x207 + m.x208 == 0)
m.c136 = Constraint(expr= - m.x47 + 2.5*m.x167 + 4.7*m.x168 + 1.5*m.x170 + 1.8*m.x176 + 4*m.x182 + 0.8*m.x197
+ 0.5*m.x198 + 0.7*m.x199 + 0.5*m.x200 + 0.7*m.x201 + 0.4*m.x202 + 0.6*m.x203 + 0.4*m.x204
== 0)
m.c137 = Constraint(expr= - m.x48 + 2.7*m.x164 + 2.2*m.x167 + 2.6*m.x174 + m.x176 == 0)
m.c138 = Constraint(expr= - m.x49 + 1.7*m.x164 + 2.2*m.x174 + 2.2*m.x176 + 1.8*m.x178 == 0)
m.c139 = Constraint(expr= - m.x50 + 0.5*m.x168 + 1.5*m.x176 + 1.6*m.x178 == 0)
m.c140 = Constraint(expr= - m.x51 + 0.5*m.x167 + 0.5*m.x168 + 1.3*m.x170 + 1.3*m.x172 + m.x176 == 0)
m.c141 = Constraint(expr= - m.x52 + 0.5*m.x167 + 0.5*m.x168 + 2*m.x170 + 0.8*m.x172 + 0.5*m.x176 + 1.3*m.x180 == 0)
m.c142 = Constraint(expr= - m.x53 + 1.5*m.x164 + 0.5*m.x167 + 0.5*m.x168 + 2.3*m.x170 + m.x174 + 1.3*m.x180 + 5.8*m.x182
+ 7.6*m.x197 + 7.6*m.x198 + 7.6*m.x199 + 7.6*m.x200 + 4*m.x201 + 4*m.x202 + 4*m.x203
+ 4*m.x204 + 7.6*m.x205 + 7.6*m.x206 + 7.6*m.x207 + 7.6*m.x208 == 0)
m.c143 = Constraint(expr= - m.x54 + 0.5*m.x164 + 0.5*m.x167 + 1.5*m.x170 + 0.5*m.x174 + 2.5*m.x178 + m.x180 + 5.8*m.x182
+ 0.3*m.x184 + 3.6*m.x201 + 3.6*m.x202 + 3.6*m.x203 + 3.6*m.x204 == 0)
m.c144 = Constraint(expr= 32.3*m.x123 + 31.1*m.x124 + 21.7*m.x133 + 21.7*m.x134 + 90*m.x135 + 86.5*m.x136 + 159*m.x137
+ 151*m.x138 + 4.3*m.x139 + 4.3*m.x140 + 4.3*m.x141 + 4.3*m.x142 + 4.3*m.x143 + 4.3*m.x144
+ 4.3*m.x145 + 4.3*m.x146 + 4.3*m.x147 + 4.3*m.x148 + 4.3*m.x149 + 4.3*m.x150 + 4.3*m.x151
+ 4.3*m.x152 + 4.3*m.x153 + 4.3*m.x154 + 4.3*m.x155 + 4.3*m.x156 + 4.3*m.x157 + 4.3*m.x158
+ 4.3*m.x159 + 4.3*m.x160 + 4.3*m.x161 + 4.3*m.x162 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233
- m.x241 - m.x268 == 0)
m.c145 = Constraint(expr= 41.5*m.x123 + 39.8*m.x124 + 20.5*m.x133 + 20.2*m.x134 + 85*m.x135 + 81.5*m.x136
+ 80.4*m.x137 + 80.6*m.x138 + 3.9*m.x139 + 3.9*m.x140 + 3.9*m.x141 + 3.9*m.x142 + 3.9*m.x143
+ 3.9*m.x144 + 3.9*m.x145 + 3.9*m.x146 + 3.9*m.x147 + 3.9*m.x148 + 3.9*m.x149 + 3.9*m.x150
+ 3.9*m.x151 + 3.9*m.x152 + 3.9*m.x153 + 3.9*m.x154 + 3.9*m.x155 + 3.9*m.x156 + 3.9*m.x157
+ 3.9*m.x158 + 3.9*m.x159 + 3.9*m.x160 + 3.9*m.x161 + 3.9*m.x162 + 30.1*m.x231 + 33.6*m.x232
+ 25.1*m.x233 - m.x242 - m.x269 == 0)
m.c146 = Constraint(expr= 41.8*m.x123 + 40.3*m.x124 + 21.7*m.x125 + 20*m.x126 + 18.5*m.x129 + 6*m.x130 + 6*m.x133
+ 6*m.x134 + 95*m.x135 + 90*m.x136 + 54.4*m.x137 + 48.9*m.x138 + 3.9*m.x139 + 3.9*m.x140
+ 3.9*m.x141 + 3.9*m.x142 + 3.9*m.x143 + 3.9*m.x144 + 3.9*m.x145 + 3.9*m.x146 + 3.9*m.x147
+ 3.9*m.x148 + 3.9*m.x149 + 3.9*m.x150 + 3.9*m.x151 + 3.9*m.x152 + 3.9*m.x153 + 3.9*m.x154
+ 3.9*m.x155 + 3.9*m.x156 + 3.9*m.x157 + 3.9*m.x158 + 3.9*m.x159 + 3.9*m.x160 + 3.9*m.x161
+ 3.9*m.x162 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233 - m.x243 - m.x270 == 0)
m.c147 = Constraint(expr= 26.8*m.x120 + 29.4*m.x123 + 28.7*m.x124 + 2*m.x129 + 1.3*m.x130 + 72.1*m.x135 + 64*m.x136
+ 22.5*m.x137 + 22.5*m.x138 + 64.4*m.x139 + 41.9*m.x140 + 54.7*m.x141 + 41.9*m.x142
+ 51.4*m.x143 + 33.2*m.x144 + 43.4*m.x145 + 33.2*m.x146 + 88.1*m.x147 + 57.3*m.x148
+ 74.9*m.x149 + 57.3*m.x150 + 60.1*m.x151 + 39.1*m.x152 + 51.1*m.x153 + 39.1*m.x154
+ 52.8*m.x155 + 34.3*m.x156 + 44.9*m.x157 + 34.3*m.x158 + 79.6*m.x159 + 44*m.x160
+ 67.7*m.x161 + 44*m.x162 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233 - m.x244 - m.x271 == 0)
m.c148 = Constraint(expr= 35.6*m.x119 + 6.7*m.x120 + 5*m.x121 + 18.2*m.x123 + 17.6*m.x124 + 3*m.x129 + 4.5*m.x130
+ 30*m.x135 + 34.5*m.x136 + 4.75*m.x137 + 4.75*m.x138 + 23.8*m.x139 + 15.5*m.x140
+ 20.2*m.x141 + 15.5*m.x142 + 18.9*m.x143 + 12.3*m.x144 + 16.1*m.x145 + 12.3*m.x146
+ 19.5*m.x151 + 12.7*m.x152 + 16.6*m.x153 + 12.7*m.x154 + 17.2*m.x155 + 11.2*m.x156
+ 14.6*m.x157 + 11.2*m.x158 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233 - m.x245 - m.x272 == 0)
m.c149 = Constraint(expr= 29.1*m.x117 + 8.4*m.x118 + 18.4*m.x119 + 20.5*m.x120 + 9.4*m.x121 + 14.4*m.x122
+ 22.9*m.x127 + 5.3*m.x128 + 18.7*m.x129 + 6.5*m.x130 + 10.8*m.x131 + 5.05*m.x135
+ 5.05*m.x136 + 5.05*m.x137 + 5.05*m.x138 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233 - m.x246
- m.x273 == 0)
m.c150 = Constraint(expr= 88.8*m.x117 + 71.6*m.x118 + 2.5*m.x119 + 2.5*m.x120 + 2.5*m.x121 + 2.5*m.x122 + 122.3*m.x127
+ 105.5*m.x128 + 4*m.x129 + 4.2*m.x130 + 4.5*m.x131 + 2.4*m.x132 + 3*m.x135 + 3*m.x136
+ 3*m.x137 + 3*m.x138 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233 - m.x247 - m.x274 == 0)
m.c151 = Constraint(expr= 65.9*m.x117 + 65.9*m.x118 + 7.6*m.x119 + 7.6*m.x120 + 7.6*m.x121 + 7.6*m.x122 + 35.9*m.x127
+ 29.9*m.x128 + 18*m.x129 + 5.5*m.x130 + 16.7*m.x131 + 8.4*m.x132 + 3*m.x135 + 3*m.x136
+ 3*m.x137 + 3*m.x138 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233 - m.x248 - m.x275 == 0)
m.c152 = Constraint(expr= 5.6*m.x117 + 5.6*m.x118 + 13.2*m.x119 + 13.2*m.x120 + 12.7*m.x121 + 12.7*m.x122
+ 10.1*m.x123 + 7.5*m.x124 + 10.8*m.x125 + 1.6*m.x126 + 8.4*m.x127 + 8.4*m.x128 + 3*m.x129
+ 2*m.x130 + 44.2*m.x131 + 44.2*m.x132 + 3*m.x135 + 3*m.x136 + 3*m.x137 + 3*m.x138
+ 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233 - m.x249 - m.x276 == 0)
m.c153 = Constraint(expr= 5.6*m.x117 + 5.6*m.x118 + 41.3*m.x119 + 41.3*m.x120 + 40.8*m.x121 + 40.8*m.x122
+ 15.5*m.x123 + 13.2*m.x124 + 8.9*m.x125 + 4.4*m.x126 + 6.4*m.x127 + 6.4*m.x128 + m.x129
+ 1.5*m.x130 + 2.5*m.x131 + 2.5*m.x132 + 10.8*m.x133 + 1.6*m.x134 + 1.5*m.x135 + 1.5*m.x136
+ 1.5*m.x137 + 1.5*m.x138 + 18.6*m.x139 + 18.6*m.x140 + 18.6*m.x141 + 18.6*m.x142
+ 18.6*m.x147 + 18.6*m.x148 + 18.6*m.x149 + 18.6*m.x150 + 30.1*m.x231 + 33.6*m.x232
+ 25.1*m.x233 - m.x250 - m.x277 == 0)
m.c154 = Constraint(expr= 47.9*m.x117 + 47.4*m.x118 + 57.4*m.x119 + 57.4*m.x120 + 56.9*m.x121 + 56.9*m.x122
+ 23.5*m.x123 + 21.6*m.x124 + 0.7*m.x125 + 0.7*m.x126 + 43.9*m.x127 + 43.4*m.x128
+ 26.8*m.x131 + 26*m.x132 + 13.6*m.x133 + 5.1*m.x134 + 85*m.x135 + 80.5*m.x136 + 150.3*m.x137
+ 148.3*m.x138 + 23.9*m.x139 + 23.9*m.x140 + 23.9*m.x141 + 23.9*m.x142 + 45.2*m.x143
+ 45.2*m.x144 + 45.2*m.x145 + 45.2*m.x146 + 23.9*m.x147 + 23.9*m.x148 + 23.9*m.x149
+ 23.9*m.x150 + 15.6*m.x151 + 15.6*m.x152 + 15.6*m.x153 + 15.6*m.x154 + 8*m.x155 + 8*m.x156
+ 8*m.x157 + 8*m.x158 + 15.6*m.x159 + 15.6*m.x160 + 15.6*m.x161 + 15.6*m.x162 + 30.1*m.x231
+ 33.6*m.x232 + 25.1*m.x233 - m.x251 - m.x278 == 0)
m.c155 = Constraint(expr= 17.6*m.x117 + 16.1*m.x118 + 22.1*m.x119 + 22.1*m.x120 + 21.6*m.x121 + 21.6*m.x122
+ 29.1*m.x123 + 28.4*m.x124 + 2.5*m.x125 + 2.5*m.x126 + 20.4*m.x127 + 17.9*m.x128 + 27*m.x131
+ 26.8*m.x132 + 13.9*m.x133 + 13.9*m.x134 + 95*m.x135 + 90.5*m.x136 + 148.5*m.x137
+ 142.3*m.x138 + 3.9*m.x139 + 3.9*m.x140 + 3.9*m.x141 + 3.9*m.x142 + 3.9*m.x143 + 3.9*m.x144
+ 3.9*m.x145 + 3.9*m.x146 + 3.9*m.x147 + 3.9*m.x148 + 3.9*m.x149 + 3.9*m.x150 + 3.9*m.x151
+ 3.9*m.x152 + 3.9*m.x153 + 3.9*m.x154 + 11.5*m.x155 + 11.5*m.x156 + 11.5*m.x157 + 11.5*m.x158
+ 3.9*m.x159 + 3.9*m.x160 + 3.9*m.x161 + 3.9*m.x162 + 30.1*m.x231 + 33.6*m.x232 + 25.1*m.x233
- m.x252 - m.x279 == 0)
m.c156 = Constraint(expr= 32.3*m.x169 + 31.1*m.x170 + 21.7*m.x179 + 21.7*m.x180 + 90*m.x181 + 86.5*m.x182 + 159*m.x183
+ 151*m.x184 + 4.3*m.x185 + 4.3*m.x186 + 4.3*m.x187 + 4.3*m.x188 + 4.3*m.x189 + 4.3*m.x190
+ 4.3*m.x191 + 4.3*m.x192 + 4.3*m.x193 + 4.3*m.x194 + 4.3*m.x195 + 4.3*m.x196 + 4.3*m.x197
+ 4.3*m.x198 + 4.3*m.x199 + 4.3*m.x200 + 4.3*m.x201 + 4.3*m.x202 + 4.3*m.x203 + 4.3*m.x204
+ 4.3*m.x205 + 4.3*m.x206 + 4.3*m.x207 + 4.3*m.x208 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236
- m.x253 - m.x280 == 0)
m.c157 = Constraint(expr= 41.5*m.x169 + 39.8*m.x170 + 20.5*m.x179 + 20.2*m.x180 + 85*m.x181 + 81.5*m.x182
+ 80.4*m.x183 + 80.6*m.x184 + 3.9*m.x185 + 3.9*m.x186 + 3.9*m.x187 + 3.9*m.x188 + 3.9*m.x189
+ 3.9*m.x190 + 3.9*m.x191 + 3.9*m.x192 + 3.9*m.x193 + 3.9*m.x194 + 3.9*m.x195 + 3.9*m.x196
+ 3.9*m.x197 + 3.9*m.x198 + 3.9*m.x199 + 3.9*m.x200 + 3.9*m.x201 + 3.9*m.x202 + 3.9*m.x203
+ 3.9*m.x204 + 3.9*m.x205 + 3.9*m.x206 + 3.9*m.x207 + 3.9*m.x208 + 30.1*m.x234 + 33.6*m.x235
+ 25.1*m.x236 - m.x254 - m.x281 == 0)
m.c158 = Constraint(expr= 41.8*m.x169 + 40.3*m.x170 + 21.7*m.x171 + 20*m.x172 + 18.5*m.x175 + 6*m.x176 + 6*m.x179
+ 6*m.x180 + 95*m.x181 + 90*m.x182 + 54.4*m.x183 + 48.9*m.x184 + 3.9*m.x185 + 3.9*m.x186
+ 3.9*m.x187 + 3.9*m.x188 + 3.9*m.x189 + 3.9*m.x190 + 3.9*m.x191 + 3.9*m.x192 + 3.9*m.x193
+ 3.9*m.x194 + 3.9*m.x195 + 3.9*m.x196 + 3.9*m.x197 + 3.9*m.x198 + 3.9*m.x199 + 3.9*m.x200
+ 3.9*m.x201 + 3.9*m.x202 + 3.9*m.x203 + 3.9*m.x204 + 3.9*m.x205 + 3.9*m.x206 + 3.9*m.x207
+ 3.9*m.x208 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236 - m.x255 - m.x282 == 0)
m.c159 = Constraint(expr= 26.8*m.x166 + 29.4*m.x169 + 28.7*m.x170 + 2*m.x175 + 1.3*m.x176 + 72.1*m.x181 + 64*m.x182
+ 22.5*m.x183 + 22.5*m.x184 + 64.4*m.x185 + 41.9*m.x186 + 54.7*m.x187 + 41.9*m.x188
+ 51.4*m.x189 + 33.2*m.x190 + 43.4*m.x191 + 33.2*m.x192 + 88.1*m.x193 + 57.3*m.x194
+ 74.9*m.x195 + 57.3*m.x196 + 60.1*m.x197 + 39.1*m.x198 + 51.1*m.x199 + 39.1*m.x200
+ 52.8*m.x201 + 34.3*m.x202 + 44.9*m.x203 + 34.3*m.x204 + 79.6*m.x205 + 44*m.x206
+ 67.7*m.x207 + 44*m.x208 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236 - m.x256 - m.x283 == 0)
m.c160 = Constraint(expr= 35.6*m.x165 + 6.7*m.x166 + 5*m.x167 + 18.2*m.x169 + 17.6*m.x170 + 3*m.x175 + 4.5*m.x176
+ 30*m.x181 + 34.5*m.x182 + 4.75*m.x183 + 4.75*m.x184 + 23.8*m.x185 + 15.5*m.x186
+ 20.2*m.x187 + 15.5*m.x188 + 18.9*m.x189 + 12.3*m.x190 + 16.1*m.x191 + 12.3*m.x192
+ 19.5*m.x197 + 12.7*m.x198 + 16.6*m.x199 + 12.7*m.x200 + 17.2*m.x201 + 11.2*m.x202
+ 14.6*m.x203 + 11.2*m.x204 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236 - m.x257 - m.x284 == 0)
m.c161 = Constraint(expr= 29.1*m.x163 + 8.4*m.x164 + 18.4*m.x165 + 20.5*m.x166 + 9.4*m.x167 + 14.4*m.x168
+ 22.9*m.x173 + 5.3*m.x174 + 18.7*m.x175 + 6.5*m.x176 + 10.8*m.x177 + 5.05*m.x181
+ 5.05*m.x182 + 5.05*m.x183 + 5.05*m.x184 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236 - m.x258
- m.x285 == 0)
m.c162 = Constraint(expr= 88.8*m.x163 + 71.6*m.x164 + 2.5*m.x165 + 2.5*m.x166 + 2.5*m.x167 + 2.5*m.x168 + 122.3*m.x173
+ 105.5*m.x174 + 4*m.x175 + 4.2*m.x176 + 4.5*m.x177 + 2.4*m.x178 + 3*m.x181 + 3*m.x182
+ 3*m.x183 + 3*m.x184 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236 - m.x259 - m.x286 == 0)
m.c163 = Constraint(expr= 65.9*m.x163 + 65.9*m.x164 + 7.6*m.x165 + 7.6*m.x166 + 7.6*m.x167 + 7.6*m.x168 + 35.9*m.x173
+ 29.9*m.x174 + 18*m.x175 + 5.5*m.x176 + 16.7*m.x177 + 8.4*m.x178 + 3*m.x181 + 3*m.x182
+ 3*m.x183 + 3*m.x184 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236 - m.x260 - m.x287 == 0)
m.c164 = Constraint(expr= 5.6*m.x163 + 5.6*m.x164 + 13.2*m.x165 + 13.2*m.x166 + 12.7*m.x167 + 12.7*m.x168
+ 10.1*m.x169 + 7.5*m.x170 + 10.8*m.x171 + 1.6*m.x172 + 8.4*m.x173 + 8.4*m.x174 + 3*m.x175
+ 2*m.x176 + 44.2*m.x177 + 44.2*m.x178 + 3*m.x181 + 3*m.x182 + 3*m.x183 + 3*m.x184
+ 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236 - m.x261 - m.x288 == 0)
m.c165 = Constraint(expr= 5.6*m.x163 + 5.6*m.x164 + 41.3*m.x165 + 41.3*m.x166 + 40.8*m.x167 + 40.8*m.x168
+ 15.5*m.x169 + 13.2*m.x170 + 8.9*m.x171 + 4.4*m.x172 + 6.4*m.x173 + 6.4*m.x174 + m.x175
+ 1.5*m.x176 + 2.5*m.x177 + 2.5*m.x178 + 10.8*m.x179 + 1.6*m.x180 + 1.5*m.x181 + 1.5*m.x182
+ 1.5*m.x183 + 1.5*m.x184 + 18.6*m.x185 + 18.6*m.x186 + 18.6*m.x187 + 18.6*m.x188
+ 18.6*m.x193 + 18.6*m.x194 + 18.6*m.x195 + 18.6*m.x196 + 30.1*m.x234 + 33.6*m.x235
+ 25.1*m.x236 - m.x262 - m.x289 == 0)
m.c166 = Constraint(expr= 47.9*m.x163 + 47.4*m.x164 + 57.4*m.x165 + 57.4*m.x166 + 56.9*m.x167 + 56.9*m.x168
+ 23.5*m.x169 + 21.6*m.x170 + 0.7*m.x171 + 0.7*m.x172 + 43.9*m.x173 + 43.4*m.x174
+ 26.8*m.x177 + 26*m.x178 + 13.6*m.x179 + 5.1*m.x180 + 85*m.x181 + 80.5*m.x182 + 150.3*m.x183
+ 148.3*m.x184 + 23.9*m.x185 + 23.9*m.x186 + 23.9*m.x187 + 23.9*m.x188 + 45.2*m.x189
+ 45.2*m.x190 + 45.2*m.x191 + 45.2*m.x192 + 23.9*m.x193 + 23.9*m.x194 + 23.9*m.x195
+ 23.9*m.x196 + 15.6*m.x197 + 15.6*m.x198 + 15.6*m.x199 + 15.6*m.x200 + 8*m.x201 + 8*m.x202
+ 8*m.x203 + 8*m.x204 + 15.6*m.x205 + 15.6*m.x206 + 15.6*m.x207 + 15.6*m.x208 + 30.1*m.x234
+ 33.6*m.x235 + 25.1*m.x236 - m.x263 - m.x290 == 0)
m.c167 = Constraint(expr= 17.6*m.x163 + 16.1*m.x164 + 22.1*m.x165 + 22.1*m.x166 + 21.6*m.x167 + 21.6*m.x168
+ 29.1*m.x169 + 28.4*m.x170 + 2.5*m.x171 + 2.5*m.x172 + 20.4*m.x173 + 17.9*m.x174 + 27*m.x177
+ 26.8*m.x178 + 13.9*m.x179 + 13.9*m.x180 + 95*m.x181 + 90.5*m.x182 + 148.5*m.x183
+ 142.3*m.x184 + 3.9*m.x185 + 3.9*m.x186 + 3.9*m.x187 + 3.9*m.x188 + 3.9*m.x189 + 3.9*m.x190
+ 3.9*m.x191 + 3.9*m.x192 + 3.9*m.x193 + 3.9*m.x194 + 3.9*m.x195 + 3.9*m.x196 + 3.9*m.x197
+ 3.9*m.x198 + 3.9*m.x199 + 3.9*m.x200 + 11.5*m.x201 + 11.5*m.x202 + 11.5*m.x203 + 11.5*m.x204
+ 3.9*m.x205 + 3.9*m.x206 + 3.9*m.x207 + 3.9*m.x208 + 30.1*m.x234 + 33.6*m.x235 + 25.1*m.x236
- m.x264 - m.x291 == 0)
m.c168 = Constraint(expr= m.x123 + m.x124 + m.x125 + m.x126 + m.x133 + m.x134 + m.x135 + m.x136 + m.x137 + m.x138
+ m.x139 + m.x140 + m.x141 + m.x142 + m.x143 + m.x144 + m.x145 + m.x146 + m.x147 + m.x148
+ m.x149 + m.x150 + m.x151 + m.x152 + m.x153 + m.x154 + m.x155 + m.x156 + m.x157 + m.x158
+ m.x159 + m.x160 + m.x161 + m.x162 + m.x357 == 862.6652)
m.c169 = Constraint(expr= m.x123 + m.x124 + m.x125 + m.x126 + m.x133 + m.x134 + m.x135 + m.x136 + m.x137 + m.x138
+ m.x139 + m.x140 + m.x141 + m.x142 + m.x143 + m.x144 + m.x145 + m.x146 + m.x147 + m.x148
+ m.x149 + m.x150 + m.x151 + m.x152 + m.x153 + m.x154 + m.x155 + m.x156 + m.x157 + m.x158
+ m.x159 + m.x160 + m.x161 + m.x162 + m.x358 == 862.6652)
m.c170 = Constraint(expr= m.x123 + m.x124 + m.x125 + m.x126 + 0.5*m.x129 + 0.5*m.x130 + m.x133 + m.x134 + m.x135
+ m.x136 + m.x137 + m.x138 + m.x139 + m.x140 + m.x141 + m.x142 + m.x143 + m.x144 + m.x145
+ m.x146 + m.x147 + m.x148 + m.x149 + m.x150 + m.x151 + m.x152 + m.x153 + m.x154 + m.x155
+ m.x156 + m.x157 + m.x158 + m.x159 + m.x160 + m.x161 + m.x162 + m.x359 == 862.6652)
m.c171 = Constraint(expr= 0.5*m.x120 + m.x123 + m.x124 + 0.5*m.x129 + 0.5*m.x130 + m.x135 + m.x136 + m.x137 + m.x138
+ m.x139 + m.x140 + m.x141 + m.x142 + m.x143 + m.x144 + m.x145 + m.x146 + 0.5*m.x147
+ 0.5*m.x148 + 0.5*m.x149 + 0.5*m.x150 + m.x151 + m.x152 + m.x153 + m.x154 + m.x155 + m.x156
+ m.x157 + m.x158 + 0.5*m.x159 + 0.5*m.x160 + 0.5*m.x161 + 0.5*m.x162 + m.x360 == 862.6652)
m.c172 = Constraint(expr= m.x119 + m.x120 + m.x121 + m.x123 + m.x124 + 0.5*m.x129 + 0.5*m.x130 + m.x135 + m.x136
+ m.x137 + m.x138 + m.x361 == 862.6652)
m.c173 = Constraint(expr= m.x117 + m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + m.x127 + m.x128 + m.x129 + m.x130
+ 0.5*m.x131 + m.x135 + m.x136 + m.x137 + m.x138 + m.x362 == 862.6652)
m.c174 = Constraint(expr= m.x117 + m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + m.x127 + m.x128 + m.x129 + m.x130
+ m.x131 + m.x132 + m.x135 + m.x136 + m.x137 + m.x138 + m.x363 == 862.6652)
m.c175 = Constraint(expr= m.x117 + m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + m.x127 + m.x128 + m.x129 + m.x130
+ m.x131 + m.x132 + m.x135 + m.x136 + m.x137 + m.x138 + m.x364 == 862.6652)
m.c176 = Constraint(expr= m.x117 + m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + 0.5*m.x123 + 0.25*m.x124 + 0.5*m.x125
+ 0.25*m.x126 + m.x127 + m.x128 + 0.5*m.x129 + 0.5*m.x130 + m.x131 + m.x132 + m.x135 + m.x136
+ m.x137 + m.x138 + m.x365 == 862.6652)
m.c177 = Constraint(expr= m.x117 + m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + m.x123 + m.x124 + m.x125 + m.x126
+ m.x127 + m.x128 + 0.5*m.x129 + 0.5*m.x130 + m.x131 + m.x132 + m.x133 + m.x134 + m.x135
+ m.x136 + m.x137 + m.x138 + 0.5*m.x139 + 0.5*m.x140 + 0.5*m.x141 + 0.5*m.x142 + 0.5*m.x147
+ 0.5*m.x148 + 0.5*m.x149 + 0.5*m.x150 + m.x366 == 862.6652)
m.c178 = Constraint(expr= m.x117 + m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + m.x123 + m.x124 + m.x125 + m.x126
+ m.x127 + m.x128 + m.x131 + m.x132 + m.x133 + m.x134 + m.x135 + m.x136 + m.x137 + m.x138
+ m.x139 + m.x140 + m.x141 + m.x142 + m.x143 + m.x144 + m.x145 + m.x146 + m.x147 + m.x148
+ m.x149 + m.x150 + m.x151 + m.x152 + m.x153 + m.x154 + 0.5*m.x155 + 0.5*m.x156 + 0.5*m.x157
+ m.x159 + m.x160 + m.x161 + m.x162 + m.x367 == 862.6652)
m.c179 = Constraint(expr= 0.5*m.x119 + 0.5*m.x120 + 0.5*m.x121 + 0.5*m.x122 + m.x123 + m.x124 + m.x125 + m.x126
+ 0.5*m.x131 + 0.5*m.x132 + m.x133 + m.x134 + m.x135 + m.x136 + m.x137 + m.x138 + m.x139
+ m.x140 + m.x141 + m.x142 + m.x143 + m.x144 + m.x145 + m.x146 + m.x147 + m.x148 + m.x149
+ m.x150 + m.x151 + m.x152 + m.x153 + m.x154 + m.x155 + m.x156 + m.x157 + m.x158 + m.x159
+ m.x160 + m.x161 + m.x162 + m.x368 == 862.6652)
m.c180 = Constraint(expr= m.x169 + m.x170 + m.x171 + m.x172 + m.x179 + m.x180 + m.x181 + m.x182 + m.x183 + m.x184
+ m.x185 + m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191 + m.x192 + m.x193 + m.x194
+ m.x195 + m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + m.x201 + m.x202 + m.x203 + m.x204
+ m.x205 + m.x206 + m.x207 + m.x208 + m.x369 == 369.7136)
m.c181 = Constraint(expr= m.x169 + m.x170 + m.x171 + m.x172 + m.x179 + m.x180 + m.x181 + m.x182 + m.x183 + m.x184
+ m.x185 + m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191 + m.x192 + m.x193 + m.x194
+ m.x195 + m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + m.x201 + m.x202 + m.x203 + m.x204
+ m.x205 + m.x206 + m.x207 + m.x208 + m.x370 == 369.7136)
m.c182 = Constraint(expr= m.x169 + m.x170 + m.x171 + m.x172 + 0.5*m.x175 + 0.5*m.x176 + m.x179 + m.x180 + m.x181
+ m.x182 + m.x183 + m.x184 + m.x185 + m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191
+ m.x192 + m.x193 + m.x194 + m.x195 + m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + m.x201
+ m.x202 + m.x203 + m.x204 + m.x205 + m.x206 + m.x207 + m.x208 + m.x371 == 369.7136)
m.c183 = Constraint(expr= 0.5*m.x166 + m.x169 + m.x170 + 0.5*m.x175 + 0.5*m.x176 + m.x181 + m.x182 + m.x183 + m.x184
+ m.x185 + m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191 + m.x192 + 0.5*m.x193
+ 0.5*m.x194 + 0.5*m.x195 + 0.5*m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + m.x201 + m.x202
+ m.x203 + m.x204 + 0.5*m.x205 + 0.5*m.x206 + 0.5*m.x207 + 0.5*m.x208 + m.x372 == 369.7136)
m.c184 = Constraint(expr= m.x165 + m.x166 + m.x167 + m.x169 + m.x170 + 0.5*m.x175 + 0.5*m.x176 + m.x181 + m.x182
+ m.x183 + m.x184 + m.x373 == 369.7136)
m.c185 = Constraint(expr= m.x163 + m.x164 + m.x165 + m.x166 + m.x167 + m.x168 + m.x173 + m.x174 + m.x175 + m.x176
+ 0.5*m.x177 + m.x181 + m.x182 + m.x183 + m.x184 + m.x374 == 369.7136)
m.c186 = Constraint(expr= m.x163 + m.x164 + m.x165 + m.x166 + m.x167 + m.x168 + m.x173 + m.x174 + m.x175 + m.x176
+ m.x177 + m.x178 + m.x181 + m.x182 + m.x183 + m.x184 + m.x375 == 369.7136)
m.c187 = Constraint(expr= m.x163 + m.x164 + m.x165 + m.x166 + m.x167 + m.x168 + m.x173 + m.x174 + m.x175 + m.x176
+ m.x177 + m.x178 + m.x181 + m.x182 + m.x183 + m.x184 + m.x376 == 369.7136)
m.c188 = Constraint(expr= m.x163 + m.x164 + m.x165 + m.x166 + m.x167 + m.x168 + 0.5*m.x169 + 0.25*m.x170 + 0.5*m.x171
+ 0.25*m.x172 + m.x173 + m.x174 + 0.5*m.x175 + 0.5*m.x176 + m.x177 + m.x178 + m.x181 + m.x182
+ m.x183 + m.x184 + m.x377 == 369.7136)
m.c189 = Constraint(expr= m.x163 + m.x164 + m.x165 + m.x166 + m.x167 + m.x168 + m.x169 + m.x170 + m.x171 + m.x172
+ m.x173 + m.x174 + 0.5*m.x175 + 0.5*m.x176 + m.x177 + m.x178 + m.x179 + m.x180 + m.x181
+ m.x182 + m.x183 + m.x184 + 0.5*m.x185 + 0.5*m.x186 + 0.5*m.x187 + 0.5*m.x188 + 0.5*m.x193
+ 0.5*m.x194 + 0.5*m.x195 + 0.5*m.x196 + m.x378 == 369.7136)
m.c190 = Constraint(expr= m.x163 + m.x164 + m.x165 + m.x166 + m.x167 + m.x168 + m.x169 + m.x170 + m.x171 + m.x172
+ m.x173 + m.x174 + m.x177 + m.x178 + m.x179 + m.x180 + m.x181 + m.x182 + m.x183 + m.x184
+ m.x185 + m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191 + m.x192 + m.x193 + m.x194
+ m.x195 + m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + 0.5*m.x201 + 0.5*m.x202 + 0.5*m.x203
+ m.x205 + m.x206 + m.x207 + m.x208 + m.x379 == 369.7136)
m.c191 = Constraint(expr= 0.5*m.x165 + 0.5*m.x166 + 0.5*m.x167 + 0.5*m.x168 + m.x169 + m.x170 + m.x171 + m.x172
+ 0.5*m.x177 + 0.5*m.x178 + m.x179 + m.x180 + m.x181 + m.x182 + m.x183 + m.x184 + m.x185
+ m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191 + m.x192 + m.x193 + m.x194 + m.x195
+ m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + m.x201 + m.x202 + m.x203 + m.x204 + m.x205
+ m.x206 + m.x207 + m.x208 + m.x380 == 369.7136)
m.c192 = Constraint(expr= - m.x117 - m.x118 + m.x209 == 0)
m.c193 = Constraint(expr= - m.x119 - m.x120 - m.x121 - m.x122 + m.x210 == 0)
m.c194 = Constraint(expr= - m.x123 - m.x124 + m.x211 == 0)
m.c195 = Constraint(expr= - m.x125 - m.x126 + m.x212 == 0)
m.c196 = Constraint(expr= - m.x127 - m.x128 + m.x213 == 0)
m.c197 = Constraint(expr= - m.x129 - m.x130 + m.x214 == 0)
m.c198 = Constraint(expr= - m.x131 - m.x132 + m.x215 == 0)
m.c199 = Constraint(expr= - m.x133 - m.x134 + m.x216 == 0)
m.c200 = Constraint(expr= - m.x135 - m.x136 + m.x217 == 0)
m.c201 = Constraint(expr= - m.x137 - m.x138 + m.x218 == 0)
m.c202 = Constraint(expr= - m.x139 - m.x140 - m.x141 - m.x142 - m.x143 - m.x144 - m.x145 - m.x146 - m.x147 - m.x148
- m.x149 - m.x150 - m.x151 - m.x152 - m.x153 - m.x154 - m.x155 - m.x156 - m.x157 - m.x158
- m.x159 - m.x160 - m.x161 - m.x162 + m.x219 == 0)
m.c203 = Constraint(expr= - m.x163 - m.x164 + m.x220 == 0)
m.c204 = Constraint(expr= - m.x165 - m.x166 - m.x167 - m.x168 + m.x221 == 0)
m.c205 = Constraint(expr= - m.x169 - m.x170 + m.x222 == 0)
m.c206 = Constraint(expr= - m.x171 - m.x172 + m.x223 == 0)
m.c207 = Constraint(expr= - m.x173 - m.x174 + m.x224 == 0)
m.c208 = Constraint(expr= - m.x175 - m.x176 + m.x225 == 0)
m.c209 = Constraint(expr= - m.x177 - m.x178 + m.x226 == 0)
m.c210 = Constraint(expr= - m.x179 - m.x180 + m.x227 == 0)
m.c211 = Constraint(expr= - m.x181 - m.x182 + m.x228 == 0)
m.c212 = Constraint(expr= - m.x183 - m.x184 + m.x229 == 0)
m.c213 = Constraint(expr= - m.x185 - m.x186 - m.x187 - m.x188 - m.x189 - m.x190 - m.x191 - m.x192 - m.x193 - m.x194
- m.x195 - m.x196 - m.x197 - m.x198 - m.x199 - m.x200 - m.x201 - m.x202 - m.x203 - m.x204
- m.x205 - m.x206 - m.x207 - m.x208 + m.x230 == 0)
m.c214 = Constraint(expr= - 0.67545*m.x7 + 0.186275745657289*m.x123 + 0.186275745657289*m.x124
+ 0.086275745657289*m.x125 + 0.086275745657289*m.x126 + 0.186275745657289*m.x133
+ 0.186275745657289*m.x134 + 0.080275745657289*m.x135 + 0.080275745657289*m.x136
+ 0.080275745657289*m.x137 + 0.080275745657289*m.x138 + 0.220275745657289*m.x139
+ 0.267275745657289*m.x140 + 0.220275745657289*m.x141 + 0.220275745657289*m.x143
+ 0.267275745657289*m.x144 + 0.220275745657289*m.x145 + 0.220275745657289*m.x147
+ 0.267275745657289*m.x148 + 0.220275745657289*m.x149 + 0.220275745657289*m.x151
+ 0.267275745657289*m.x152 + 0.220275745657289*m.x153 + 0.220275745657289*m.x155
+ 0.267275745657289*m.x156 + 0.220275745657289*m.x157 + 0.220275745657289*m.x159
+ 0.267275745657289*m.x160 + 0.220275745657289*m.x161 + m.x381 == 33.1648634053726)
m.c215 = Constraint(expr= - 0.67545*m.x8 + 0.283170516228474*m.x123 + 0.283170516228474*m.x124
+ 0.033170516228474*m.x125 + 0.033170516228474*m.x126 + 0.133170516228474*m.x133
+ 0.133170516228474*m.x134 + 0.123170516228474*m.x135 + 0.123170516228474*m.x136
+ 0.123170516228474*m.x137 + 0.123170516228474*m.x138 + 0.357170516228474*m.x139
+ 0.357170516228474*m.x141 + 0.264170516228474*m.x142 + 0.357170516228474*m.x143
+ 0.357170516228474*m.x145 + 0.264170516228474*m.x146 + 0.357170516228474*m.x147
+ 0.357170516228474*m.x149 + 0.264170516228474*m.x150 + 0.357170516228474*m.x151
+ 0.357170516228474*m.x153 + 0.264170516228474*m.x154 + 0.357170516228474*m.x155
+ 0.357170516228474*m.x157 + 0.264170516228474*m.x158 + 0.357170516228474*m.x159
+ 0.357170516228474*m.x161 + 0.264170516228474*m.x162 + m.x382 == 38.6431466113418)
m.c216 = Constraint(expr= - 0.67545*m.x9 + 0.427861483676044*m.x123 + 0.427861483676044*m.x124
+ 0.0778614836760444*m.x129 + 0.0778614836760444*m.x130 + 0.211861483676044*m.x135
+ 0.211861483676044*m.x136 + 0.211861483676044*m.x137 + 0.211861483676044*m.x138
+ 0.445861483676044*m.x139 + 0.445861483676044*m.x143 + 0.445861483676044*m.x147
+ 0.445861483676044*m.x151 + 0.445861483676044*m.x155 + 0.445861483676044*m.x159 + m.x383
== 40.7781297689446)
m.c217 = Constraint(expr= - 0.67545*m.x10 + 0.167441181790281*m.x120 + 0.530441181790282*m.x123
+ 0.530441181790282*m.x124 + 0.130441181790281*m.x129 + 0.130441181790281*m.x130
+ 0.354441181790281*m.x135 + 0.354441181790281*m.x136 + 0.354441181790281*m.x137
+ 0.354441181790281*m.x138 + m.x384 == 79.6609082081682)
m.c218 = Constraint(expr= - 0.595*m.x11 + 0.167778380917306*m.x119 + 0.0747783809173062*m.x120
+ 0.167778380917306*m.x121 + 0.354778380917306*m.x122 + 0.230778380917306*m.x123
+ 0.230778380917306*m.x124 + 0.230778380917306*m.x129 + 0.230778380917306*m.x130
+ 0.354778380917306*m.x135 + 0.354778380917306*m.x136 + 0.354778380917306*m.x137
+ 0.354778380917306*m.x138 + m.x385 == 121.291256973504)
m.c219 = Constraint(expr= - 0.52*m.x12 + 0.351666519938619*m.x117 + 0.351666519938619*m.x118 + 0.258666519938619*m.x119
+ 0.351666519938619*m.x120 + 0.258666519938619*m.x121 + 0.304666519938619*m.x122
+ 0.351666519938619*m.x127 + 0.351666519938619*m.x128 + 0.277666519938619*m.x129
+ 0.277666519938619*m.x130 + 0.0776665199386191*m.x131 + 0.304666519938619*m.x135
+ 0.304666519938619*m.x136 + 0.304666519938619*m.x137 + 0.304666519938619*m.x138 + m.x386
== 110.737946854126)
m.c220 = Constraint(expr= - 0.52*m.x13 + 0.755537393186701*m.x117 + 0.755537393186701*m.x118 + 0.240537393186701*m.x119
+ 0.240537393186701*m.x120 + 0.240537393186701*m.x121 + 0.287537393186701*m.x122
+ 0.755537393186701*m.x127 + 0.755537393186701*m.x128 + 0.313537393186701*m.x129
+ 0.313537393186701*m.x130 + 0.0135373931867009*m.x131 + 0.0635373931867009*m.x132
+ 0.287537393186701*m.x135 + 0.287537393186701*m.x136 + 0.287537393186701*m.x137
+ 0.287537393186701*m.x138 + m.x387 == 126.060763601253)
m.c221 = Constraint(expr= - 0.52*m.x14 + 0.861953354052856*m.x117 + 0.861953354052856*m.x118 + 0.300953354052856*m.x119
+ 0.300953354052856*m.x120 + 0.300953354052856*m.x121 + 0.394953354052856*m.x122
+ 0.861953354052856*m.x127 + 0.861953354052856*m.x128 + 0.226953354052856*m.x129
+ 0.226953354052856*m.x130 + 0.226953354052856*m.x131 + 0.226953354052856*m.x132
+ 0.394953354052856*m.x135 + 0.394953354052856*m.x136 + 0.394953354052856*m.x137
+ 0.394953354052856*m.x138 + m.x388 == 118.20855444677)
m.c222 = Constraint(expr= - 0.595*m.x15 + 0.917501344750213*m.x117 + 0.917501344750213*m.x118 + 0.450501344750213*m.x119
+ 0.450501344750213*m.x120 + 0.450501344750213*m.x121 + 0.309501344750213*m.x122
+ 0.182501344750213*m.x123 + 0.182501344750213*m.x124 + 0.232501344750213*m.x125
+ 0.232501344750213*m.x126 + 0.917501344750213*m.x127 + 0.917501344750213*m.x128
+ 0.182501344750213*m.x129 + 0.182501344750213*m.x130 + 0.282501344750213*m.x131
+ 0.282501344750213*m.x132 + 0.450501344750213*m.x135 + 0.450501344750213*m.x136
+ 0.450501344750213*m.x137 + 0.450501344750213*m.x138 + m.x389 == 153.550323696193)
m.c223 = Constraint(expr= - 0.67545*m.x16 + 0.460213503365729*m.x117 + 0.460213503365729*m.x118
+ 0.319213503365729*m.x119 + 0.319213503365729*m.x120 + 0.319213503365729*m.x121
+ 0.086213503365729*m.x122 + 0.242213503365729*m.x123 + 0.242213503365729*m.x124
+ 0.142213503365729*m.x125 + 0.142213503365729*m.x126 + 0.460213503365729*m.x127
+ 0.460213503365729*m.x128 + 0.392213503365729*m.x131 + 0.392213503365729*m.x132
+ 0.292213503365729*m.x133 + 0.292213503365729*m.x134 + 0.273213503365729*m.x135
+ 0.273213503365729*m.x136 + 0.273213503365729*m.x137 + 0.273213503365729*m.x138
+ 0.179213503365729*m.x139 + 0.179213503365729*m.x140 + 0.179213503365729*m.x141
+ 0.179213503365729*m.x142 + 0.179213503365729*m.x147 + 0.179213503365729*m.x148
+ 0.179213503365729*m.x149 + 0.179213503365729*m.x150 + m.x390 == 138.238819242656)
m.c224 = Constraint(expr= - 0.67545*m.x17 + 0.0842575717715261*m.x119 + 0.0842575717715261*m.x120
+ 0.0842575717715261*m.x121 + 0.140257571771526*m.x123 + 0.140257571771526*m.x124
+ 0.0902575717715261*m.x125 + 0.0902575717715261*m.x126 + 0.190257571771526*m.x131
+ 0.240257571771526*m.x132 + 0.190257571771526*m.x133 + 0.190257571771526*m.x134
+ 0.364257571771526*m.x135 + 0.364257571771526*m.x136 + 0.364257571771526*m.x137
+ 0.364257571771526*m.x138 + 0.177257571771526*m.x139 + 0.177257571771526*m.x140
+ 0.177257571771526*m.x141 + 0.177257571771526*m.x142 + 0.271257571771526*m.x143
+ 0.271257571771526*m.x144 + 0.271257571771526*m.x145 + 0.271257571771526*m.x146
+ 0.177257571771526*m.x147 + 0.177257571771526*m.x148 + 0.177257571771526*m.x149
+ 0.177257571771526*m.x150 + 0.271257571771526*m.x151 + 0.271257571771526*m.x152
+ 0.271257571771526*m.x153 + 0.271257571771526*m.x154 + 0.224257571771526*m.x155
+ 0.224257571771526*m.x156 + 0.224257571771526*m.x157 + 0.224257571771526*m.x158
+ 0.271257571771526*m.x159 + 0.271257571771526*m.x160 + 0.271257571771526*m.x161
+ 0.271257571771526*m.x162 + m.x391 == 68.1739205167211)
m.c225 = Constraint(expr= - 0.67545*m.x18 + 0.138242344*m.x123 + 0.138242344*m.x124 + 0.0882423440000001*m.x125
+ 0.0882423440000001*m.x126 + 0.188242344*m.x133 + 0.188242344*m.x134 + 0.175242344*m.x135
+ 0.175242344*m.x136 + 0.175242344*m.x137 + 0.175242344*m.x138 + 0.175242344*m.x139
+ 0.175242344*m.x140 + 0.175242344*m.x141 + 0.175242344*m.x142 + 0.175242344*m.x143
+ 0.175242344*m.x144 + 0.175242344*m.x145 + 0.175242344*m.x146 + 0.175242344*m.x147
+ 0.175242344*m.x148 + 0.175242344*m.x149 + 0.175242344*m.x150 + 0.175242344*m.x151
+ 0.175242344*m.x152 + 0.175242344*m.x153 + 0.175242344*m.x154 + 0.222242344*m.x155
+ 0.222242344*m.x156 + 0.222242344*m.x157 + 0.222242344*m.x158 + 0.175242344*m.x159
+ 0.175242344*m.x160 + 0.175242344*m.x161 + 0.175242344*m.x162 + m.x392 == 35.5890481828349)
m.c226 = Constraint(expr= 0.178345303867403*m.x169 + 0.178345303867403*m.x170 + 0.0783453038674033*m.x171
+ 0.0783453038674033*m.x172 + 0.178345303867403*m.x179 + 0.178345303867403*m.x180
+ 0.0723453038674033*m.x181 + 0.0723453038674033*m.x182 + 0.0723453038674033*m.x183
+ 0.0723453038674033*m.x184 + 0.212345303867403*m.x185 + 0.259345303867403*m.x186
+ 0.212345303867403*m.x187 + 0.212345303867403*m.x189 + 0.259345303867403*m.x190
+ 0.212345303867403*m.x191 + 0.212345303867403*m.x193 + 0.259345303867403*m.x194
+ 0.212345303867403*m.x195 + 0.212345303867403*m.x197 + 0.259345303867403*m.x198
+ 0.212345303867403*m.x199 + 0.212345303867403*m.x201 + 0.259345303867403*m.x202
+ 0.212345303867403*m.x203 + 0.212345303867403*m.x205 + 0.259345303867403*m.x206
+ 0.212345303867403*m.x207 + m.x393 == 32.8373413501853)
m.c227 = Constraint(expr= 0.270787548066298*m.x169 + 0.270787548066298*m.x170 + 0.0207875480662983*m.x171
+ 0.0207875480662983*m.x172 + 0.120787548066298*m.x179 + 0.120787548066298*m.x180
+ 0.110787548066298*m.x181 + 0.110787548066298*m.x182 + 0.110787548066298*m.x183
+ 0.110787548066298*m.x184 + 0.344787548066298*m.x185 + 0.344787548066298*m.x187
+ 0.251787548066298*m.x188 + 0.344787548066298*m.x189 + 0.344787548066298*m.x191
+ 0.251787548066298*m.x192 + 0.344787548066298*m.x193 + 0.344787548066298*m.x195
+ 0.251787548066298*m.x196 + 0.344787548066298*m.x197 + 0.344787548066298*m.x199
+ 0.251787548066298*m.x200 + 0.344787548066298*m.x201 + 0.344787548066298*m.x203
+ 0.251787548066298*m.x204 + 0.344787548066298*m.x205 + 0.344787548066298*m.x207
+ 0.251787548066298*m.x208 + m.x394 == 57.1078093758598)
m.c228 = Constraint(expr= 0.407347881399632*m.x169 + 0.407347881399632*m.x170 + 0.0573478813996317*m.x175
+ 0.0573478813996317*m.x176 + 0.191347881399632*m.x181 + 0.191347881399632*m.x182
+ 0.191347881399632*m.x183 + 0.191347881399632*m.x184 + 0.425347881399632*m.x185
+ 0.425347881399632*m.x189 + 0.425347881399632*m.x193 + 0.425347881399632*m.x197
+ 0.425347881399632*m.x201 + 0.425347881399632*m.x205 + m.x395 == 73.4259111626889)
m.c229 = Constraint(expr= 0.13696977053407*m.x166 + 0.49996977053407*m.x169 + 0.49996977053407*m.x170
+ 0.09996977053407*m.x175 + 0.09996977053407*m.x176 + 0.32396977053407*m.x181
+ 0.32396977053407*m.x182 + 0.32396977053407*m.x183 + 0.32396977053407*m.x184 + m.x396
== 58.438744224694)
m.c230 = Constraint(expr= 0.129080668876611*m.x165 + 0.0360806688766114*m.x166 + 0.129080668876611*m.x167
+ 0.316080668876611*m.x168 + 0.192080668876611*m.x169 + 0.192080668876611*m.x170
+ 0.192080668876611*m.x175 + 0.192080668876611*m.x176 + 0.316080668876611*m.x181
+ 0.316080668876611*m.x182 + 0.316080668876611*m.x183 + 0.316080668876611*m.x184 + m.x397
== 54.46015010971)
m.c231 = Constraint(expr= 0.307336995948435*m.x163 + 0.307336995948435*m.x164 + 0.214336995948435*m.x165
+ 0.307336995948435*m.x166 + 0.214336995948435*m.x167 + 0.260336995948435*m.x168
+ 0.307336995948435*m.x173 + 0.307336995948435*m.x174 + 0.233336995948435*m.x175
+ 0.233336995948435*m.x176 + 0.0333369959484346*m.x177 + 0.260336995948435*m.x181
+ 0.260336995948435*m.x182 + 0.260336995948435*m.x183 + 0.260336995948435*m.x184 + m.x398
== 73.5908838818816)
m.c232 = Constraint(expr= 0.718245529281768*m.x163 + 0.718245529281768*m.x164 + 0.203245529281768*m.x165
+ 0.203245529281768*m.x166 + 0.203245529281768*m.x167 + 0.250245529281768*m.x168
+ 0.718245529281768*m.x173 + 0.718245529281768*m.x174 + 0.276245529281768*m.x175
+ 0.276245529281768*m.x176 + 0.0262455292817679*m.x178 + 0.250245529281768*m.x181
+ 0.250245529281768*m.x182 + 0.250245529281768*m.x183 + 0.250245529281768*m.x184 + m.x399
== 72.5947837053058)
m.c233 = Constraint(expr= 0.827680702025783*m.x163 + 0.827680702025783*m.x164 + 0.266680702025783*m.x165
+ 0.266680702025783*m.x166 + 0.266680702025783*m.x167 + 0.360680702025783*m.x168
+ 0.827680702025783*m.x173 + 0.827680702025783*m.x174 + 0.192680702025783*m.x175
+ 0.192680702025783*m.x176 + 0.192680702025783*m.x177 + 0.192680702025783*m.x178
+ 0.360680702025783*m.x181 + 0.360680702025783*m.x182 + 0.360680702025783*m.x183
+ 0.360680702025783*m.x184 + m.x400 == 87.4955408928402)
m.c234 = Constraint(expr= 0.885562467771639*m.x163 + 0.885562467771639*m.x164 + 0.418562467771639*m.x165
+ 0.418562467771639*m.x166 + 0.418562467771639*m.x167 + 0.277562467771639*m.x168
+ 0.150562467771639*m.x169 + 0.150562467771639*m.x170 + 0.200562467771639*m.x171
+ 0.200562467771639*m.x172 + 0.885562467771639*m.x173 + 0.885562467771639*m.x174
+ 0.150562467771639*m.x175 + 0.150562467771639*m.x176 + 0.250562467771639*m.x177
+ 0.250562467771639*m.x178 + 0.418562467771639*m.x181 + 0.418562467771639*m.x182
+ 0.418562467771639*m.x183 + 0.418562467771639*m.x184 + m.x401 == 120.952831010239)
m.c235 = Constraint(expr= 0.43861143038674*m.x163 + 0.43861143038674*m.x164 + 0.29761143038674*m.x165
+ 0.29761143038674*m.x166 + 0.29761143038674*m.x167 + 0.0646114303867403*m.x168
+ 0.22061143038674*m.x169 + 0.22061143038674*m.x170 + 0.12061143038674*m.x171
+ 0.12061143038674*m.x172 + 0.43861143038674*m.x173 + 0.43861143038674*m.x174
+ 0.37061143038674*m.x177 + 0.37061143038674*m.x178 + 0.27061143038674*m.x179
+ 0.27061143038674*m.x180 + 0.25161143038674*m.x181 + 0.25161143038674*m.x182
+ 0.25161143038674*m.x183 + 0.25161143038674*m.x184 + 0.15761143038674*m.x185
+ 0.15761143038674*m.x186 + 0.15761143038674*m.x187 + 0.15761143038674*m.x188
+ 0.15761143038674*m.x193 + 0.15761143038674*m.x194 + 0.15761143038674*m.x195
+ 0.15761143038674*m.x196 + m.x402 == 95.1225286879146)
m.c236 = Constraint(expr= 0.0709898519337017*m.x165 + 0.0709898519337017*m.x166 + 0.0709898519337017*m.x167
+ 0.126989851933702*m.x169 + 0.126989851933702*m.x170 + 0.0769898519337017*m.x171
+ 0.0769898519337017*m.x172 + 0.176989851933702*m.x177 + 0.226989851933702*m.x178
+ 0.176989851933702*m.x179 + 0.176989851933702*m.x180 + 0.350989851933702*m.x181
+ 0.350989851933702*m.x182 + 0.350989851933702*m.x183 + 0.350989851933702*m.x184
+ 0.163989851933702*m.x185 + 0.163989851933702*m.x186 + 0.163989851933702*m.x187
+ 0.163989851933702*m.x188 + 0.257989851933702*m.x189 + 0.257989851933702*m.x190
+ 0.257989851933702*m.x191 + 0.257989851933702*m.x192 + 0.163989851933702*m.x193
+ 0.163989851933702*m.x194 + 0.163989851933702*m.x195 + 0.163989851933702*m.x196
+ 0.257989851933702*m.x197 + 0.257989851933702*m.x198 + 0.257989851933702*m.x199
+ 0.257989851933702*m.x200 + 0.210989851933702*m.x201 + 0.210989851933702*m.x202
+ 0.210989851933702*m.x203 + 0.210989851933702*m.x204 + 0.257989851933702*m.x205
+ 0.257989851933702*m.x206 + 0.257989851933702*m.x207 + 0.257989851933702*m.x208 + m.x403
== 57.6379265410218)
m.c237 = Constraint(expr= 0.1196826*m.x169 + 0.1196826*m.x170 + 0.0696826*m.x171 + 0.0696826*m.x172 + 0.1696826*m.x179
+ 0.1696826*m.x180 + 0.1566826*m.x181 + 0.1566826*m.x182 + 0.1566826*m.x183 + 0.1566826*m.x184
+ 0.1566826*m.x185 + 0.1566826*m.x186 + 0.1566826*m.x187 + 0.1566826*m.x188 + 0.1566826*m.x189
+ 0.1566826*m.x190 + 0.1566826*m.x191 + 0.1566826*m.x192 + 0.1566826*m.x193 + 0.1566826*m.x194
+ 0.1566826*m.x195 + 0.1566826*m.x196 + 0.1566826*m.x197 + 0.1566826*m.x198 + 0.1566826*m.x199
+ 0.1566826*m.x200 + 0.2036826*m.x201 + 0.2036826*m.x202 + 0.2036826*m.x203 + 0.2036826*m.x204
+ 0.1566826*m.x205 + 0.1566826*m.x206 + 0.1566826*m.x207 + 0.1566826*m.x208 + m.x404
== 33.7923398929774)
m.c238 = Constraint(expr= m.x7 - 44.6*m.x265 <= 227)
m.c239 = Constraint(expr= m.x8 - 44.6*m.x265 <= 227)
m.c240 = Constraint(expr= m.x9 - 44.6*m.x265 <= 227)
m.c241 = Constraint(expr= m.x10 - 44.6*m.x265 <= 227)
m.c242 = Constraint(expr= m.x11 - 44.6*m.x265 <= 227)
m.c243 = Constraint(expr= m.x12 - 44.6*m.x265 <= 227)
m.c244 = Constraint(expr= m.x13 - 44.6*m.x265 <= 227)
m.c245 = Constraint(expr= m.x14 - 44.6*m.x265 <= 227)
m.c246 = Constraint(expr= m.x15 - 44.6*m.x265 <= 227)
m.c247 = Constraint(expr= m.x16 - 44.6*m.x265 <= 227)
m.c248 = Constraint(expr= m.x17 - 44.6*m.x265 <= 227)
m.c249 = Constraint(expr= m.x18 - 44.6*m.x265 <= 227)
m.c250 = Constraint(expr= - 0.756784608460846*m.x7 - 0.77064599459946*m.x8 - 0.799250855085509*m.x9
- 0.828989828982898*m.x10 - 0.787822371737174*m.x11 - 0.742*m.x12 - 0.73205400540054*m.x13
- 0.718831683168317*m.x14 - 0.766193208820882*m.x15 - 0.80076300630063*m.x16
- 0.773040234023402*m.x17 - 0.786019531953195*m.x18 + 1.5*m.x209 + 1.5*m.x213
+ 3.11739130434783E-6*m.x357 + 3.58840579710145E-6*m.x358 + 4.24927536231884E-6*m.x359
+ 2.64347826086957E-6*m.x360 + 1.98405797101449E-6*m.x361 + 2.64347826086957E-6*m.x362
+ 2.48260869565217E-5*m.x363 + 2.0768115942029E-5*m.x364 + 2.17246376811594E-6*m.x365
+ 1.41739130434783E-7*m.x366 + 1.51159420289855E-6*m.x367 + 1.7E-6*m.x368
== -1478.35221628198)
m.c251 = Constraint(expr= - m.x5 + m.x6 + 1.5*m.x220 + 1.5*m.x224 + 3.1182320441989E-6*m.x369
+ 3.54088397790055E-6*m.x370 + 4.10755064456722E-6*m.x371 + 2.55156537753223E-6*m.x372
+ 1.98489871086556E-6*m.x373 + 2.55156537753223E-6*m.x374 + 2.35182320441989E-5*m.x375
+ 1.99742173112339E-5*m.x376 + 2.12578268876611E-6*m.x377 + 1.4182320441989E-7*m.x378
+ 1.55911602209945E-6*m.x379 + 1.7E-6*m.x380 == -248.587050752182)
m.c252 = Constraint(expr= m.x31 - 250*m.x266 <= 274)
m.c253 = Constraint(expr= m.x32 - 250*m.x266 <= 274)
m.c254 = Constraint(expr= m.x33 - 250*m.x266 <= 274)
m.c255 = Constraint(expr= m.x34 - 250*m.x266 <= 274)
m.c256 = Constraint(expr= m.x35 - 250*m.x266 <= 274)
m.c257 = Constraint(expr= m.x36 - 250*m.x266 <= 274)
m.c258 = Constraint(expr= m.x37 - 250*m.x266 <= 274)
m.c259 = Constraint(expr= m.x38 - 250*m.x266 <= 274)
m.c260 = Constraint(expr= m.x39 - 250*m.x266 <= 274)
m.c261 = Constraint(expr= m.x40 - 250*m.x266 <= 274)
m.c262 = Constraint(expr= m.x41 - 250*m.x266 <= 274)
m.c263 = Constraint(expr= m.x42 - 250*m.x266 <= 274)
m.c264 = Constraint(expr= m.x43 - 250*m.x267 <= 117)
m.c265 = Constraint(expr= m.x44 - 250*m.x267 <= 117)
m.c266 = Constraint(expr= m.x45 - 250*m.x267 <= 117)
m.c267 = Constraint(expr= m.x46 - 250*m.x267 <= 117)
m.c268 = Constraint(expr= m.x47 - 250*m.x267 <= 117)
m.c269 = Constraint(expr= m.x48 - 250*m.x267 <= 117)
m.c270 = Constraint(expr= m.x49 - 250*m.x267 <= 117)
m.c271 = Constraint(expr= m.x50 - 250*m.x267 <= 117)
m.c272 = Constraint(expr= m.x51 - 250*m.x267 <= 117)
m.c273 = Constraint(expr= m.x52 - 250*m.x267 <= 117)
m.c274 = Constraint(expr= m.x53 - 250*m.x267 <= 117)
m.c275 = Constraint(expr= m.x54 - 250*m.x267 <= 117)
|
UTF-8
|
Python
| false | false | 114,776 |
py
| 1,335 |
indus-scalar.py
| 246 | 0.575373 | 0.237637 | 0 | 1,604 | 70.55611 | 120 |
vietodoo/course-rest-api
| 1,778,116,504,134 |
cec01723076770077aadb8b6c060c95fdbae0b14
|
f6171f08f645b7a50b8bac88d378f758eefe16e8
|
/src/profiles_project/profiles_api/views.py
|
46a60dc8d3fb7598841d11e3687f4861e2513acc
|
[
"MIT"
] |
permissive
|
https://github.com/vietodoo/course-rest-api
|
c5c63c8c756914a39a108ff5c8941f4671d7ee47
|
3872549a277bb85ae139968a8a0d2aa9dd7266dc
|
refs/heads/master
| 2023-01-27T15:58:08.882816 | 2020-12-16T09:42:00 | 2020-12-16T09:42:00 | 319,248,863 | 0 | 0 |
MIT
| true | 2020-12-07T08:15:07 | 2020-12-07T08:15:07 | 2020-10-27T15:28:13 | 2018-09-01T08:42:09 | 26 | 0 | 0 | 0 | null | false | false |
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import viewsets
# Create your views here.
from . import serializers, models
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profiles."""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
|
UTF-8
|
Python
| false | false | 395 |
py
| 5 |
views.py
| 3 | 0.792405 | 0.792405 | 0 | 14 | 27.214286 | 58 |
epidersis/olymps
| 8,899,172,252,460 |
fe7c5207350fb8f6c1b03445bcb0e1b68f35ce56
|
ba1061443f83d65033347c8e8896618005fbb32e
|
/617B/617B.py
|
56b9c0998828e4a64c10a5b00d28410758f34f57
|
[] |
no_license
|
https://github.com/epidersis/olymps
|
9388f690d4cc282bb5af2b8f57094a5bacce77eb
|
ff22f97d8cc7e6779dc8533e246d0d651e96033e
|
refs/heads/master
| 2022-07-31T01:50:42.950753 | 2022-07-18T21:51:47 | 2022-07-18T21:51:47 | 130,722,706 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
input()
nuts = input().replace(' ', '').replace('0', ' ').strip().split('1')
if len(nuts) == 1 and nuts[0] == '':
print(0)
exit(0)
mult = 1
for el in nuts:
mult *= len(el) + 1
print(mult)
|
UTF-8
|
Python
| false | false | 204 |
py
| 126 |
617B.py
| 125 | 0.509804 | 0.470588 | 0 | 13 | 14.692308 | 68 |
AhmadRazaAwan/Files
| 14,602,888,833,294 |
561daf012a698f8f1f05d424a1bfcc53bd7fac8e
|
f590fa124c2818297cdd44e5a6d8aeb5e0e71d6d
|
/dir_4.py
|
66267a75a26f64b0c3bb9c70092b9e01dfc7a637
|
[] |
no_license
|
https://github.com/AhmadRazaAwan/Files
|
45a7364e99048f78f9360bfb68b820409536d1f7
|
0e6bd0c18c085a8f9e021eb22fb160493ec42a18
|
refs/heads/master
| 2023-01-24T09:18:01.749087 | 2020-11-11T14:16:10 | 2020-11-11T14:16:10 | 311,991,721 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Removing a directory or a file
import os
# Print list of directory
print(os.listdir())
# rename name (old name), (new name)
os.rename("Allah", "My Allah")
# Again print list to check
print(os.listdir())
|
UTF-8
|
Python
| false | false | 208 |
py
| 40 |
dir_4.py
| 38 | 0.701923 | 0.701923 | 0 | 11 | 17.909091 | 36 |
aqp1234/gitVR
| 15,599,321,245,448 |
e2637dadaf41b8be05e286042a9994ece9ce677a
|
36957a9ce540846d08f151b6a2c2d582cff1df47
|
/VR/Python/Python36/Lib/encodings/mac_arabic.py
|
646fc054f035aa04b9eb5302bd543b89452cd093
|
[] |
no_license
|
https://github.com/aqp1234/gitVR
|
60fc952307ef413e396d31e0d136faffe087ed2b
|
e70bd82c451943c2966b8ad1bee620a0ee1080d2
|
refs/heads/master
| 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | false | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | 2020-08-25T09:03:47 | 2020-08-25T09:15:40 | 0 | 0 | 1 | 0 |
C#
| false | false |
version https://git-lfs.github.com/spec/v1
oid sha256:f5c262f930f3b7d83466283347f8b0d7b5c7cbf18dd6fceb4faf93dbcd58839e
size 37165
|
UTF-8
|
Python
| false | false | 130 |
py
| 9,567 |
mac_arabic.py
| 1,742 | 0.884615 | 0.538462 | 0 | 3 | 42.333333 | 75 |
jirenuki-69/AlienStrike_RetributionDay
| 4,303,557,269,561 |
60417848444f0fc55351d733cbe17ad5d359a192
|
eca81659957eb096179e3dc992487e2ca7d882f7
|
/intro_LVL_2.py
|
ef6666886e9e5ff425cc0343d139156406532c3f
|
[] |
no_license
|
https://github.com/jirenuki-69/AlienStrike_RetributionDay
|
26e59abbda7ad831e72cdbfbde225e76fe0c9266
|
1c95349b14218630ca1a3efad8468410ecaf7821
|
refs/heads/main
| 2023-01-28T12:38:27.466690 | 2020-12-09T06:04:25 | 2020-12-09T06:04:25 | 310,120,201 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pygame, sys, const, LVL_2
from clases.Sound import Sound
from clases.Music import Music
from clases.Nave import Nave
from clases.Texto import Texto
def conseguir_nombre():
with open ("nombre.txt") as archivo:
for linea in archivo.readlines():
return str(linea.split("-")[0])
def intro_lvl_2(cursor, controller, difficulty, shields, vidas):
music = Music()
music.stop()
sound = Sound()
width = 1200
height = 800
size = (width, height)
screen = pygame.display.set_mode(size)
#Global values
background = pygame.image.load("assets/visual/gameplay_assets/mas_ciudad.png")
background = pygame.transform.scale(background, size)
dialogo = pygame.image.load("assets/visual/gameplay_assets/dialogo_negro.png")
get_ready = pygame.image.load("assets/visual/gameplay_assets/get_ready.png")
font = pygame.font.Font("fonts/Pixel LCD-7.ttf", 15)
clock = pygame.time.Clock()
fps = 60
cont = 0
cont2 = 0
dialogue_open = False
is_get_ready_opened = False
secs = 30
nombre = conseguir_nombre()
dialogo_intro = [
"Cada vez nos vamos adentrando mas a las fuerzas de los snatchers... se que eres un profesional pero ten cuidado.",
f"procura que tus balas sean certeras {nombre}, no pierdas tus escudos por tonteras.",
]
index = 0
texto = Texto(
dialogo_intro[index],
(int(width * 0.15), int(height * 0.9)),
font,
screen,
75,
const.WHITE,
)
nave = Nave(
(int(width * 0.50), int(height * 1.2)),
5,
size,
screen,
"assets/visual/gameplay_assets/main_ship.png"
)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN and dialogue_open:
if index + 1 == len(dialogo_intro):
if not is_get_ready_opened and dialogue_open:
sound.get_ready()
music.lvl_2()
is_get_ready_opened = True
dialogue_open = False
else:
sound.dialogue_change()
index += 1
texto.text = dialogo_intro[index]
elif event.type == pygame.MOUSEBUTTONDOWN:
if pygame.mouse.get_pressed()[0]:
if index + 1 == len(dialogo_intro):
if not is_get_ready_opened and dialogue_open:
sound.get_ready()
music.lvl_2()
is_get_ready_opened = True
dialogue_open = False
else:
sound.dialogue_change()
index += 1
texto.text = dialogo_intro[index]
elif event.type == pygame.JOYBUTTONDOWN:
if index + 1 == len(dialogo_intro):
if not is_get_ready_opened and dialogue_open:
sound.get_ready()
music.lvl_2()
is_get_ready_opened = True
dialogue_open = False
else:
sound.dialogue_change()
index += 1
texto.text = dialogo_intro[index]
screen.blit(background, [0 , 0])
screen.blit(nave.image, nave.rect)
if nave.rect.y != int(height * 0.9) - 60:
nave.rect.y -= nave.movementSpeed
if nave.rect.y == int(height * 0.9) - 60:
if not dialogue_open:
cont += 1
if cont >= secs and not is_get_ready_opened:
dialogue_open = True
if dialogue_open:
screen.blit(dialogo, [0, height - 200])
texto.show_text()
if is_get_ready_opened:
cont2 += 1
if cont2 >= secs * 2:
LVL_2.lvl_2(cursor, controller, difficulty, shields, vidas)
screen.blit(get_ready, [width / 2 - 150, height / 2 - 75])
pygame.display.flip()
clock.tick(fps)
pygame.quit()
|
UTF-8
|
Python
| false | false | 4,334 |
py
| 38 |
intro_LVL_2.py
| 35 | 0.506922 | 0.490309 | 0 | 128 | 32.859375 | 123 |
breenbo/tictactoe
| 10,522,669,902,094 |
f45faef60b43c4ada206e92da75a088f0eddfbf0
|
11e867eaabe3a8ab01b823696955ef3dd553d2aa
|
/ticTacToe.py
|
7a15ca3a3abe2ffbac91719765136fbd43a8c54d
|
[] |
no_license
|
https://github.com/breenbo/tictactoe
|
11c7b8a2730ca685200571210ebe293748a82cda
|
a0fc5d8fdcbead53af426ce6e45f75512d403383
|
refs/heads/master
| 2021-07-20T11:23:01.109958 | 2017-10-27T18:57:46 | 2017-10-27T18:57:46 | 108,582,801 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from random import *
def printBoard(liste, lenLine=3):
'''
liste : list of num or string
lenLine : length of the desired board
return : line to be printed to have the board
'''
counter = 1
line = ''
# set a counter and add \n every lenLine
for char in liste:
if counter % lenLine == 0:
line += '| ' + str(char) + ' |' + '\n'
else:
line += '| ' + str(char) + ' '
pass
counter += 1
return(line)
def playersNumberName():
'''
input : none
return : list with name(s) of player(s)
'''
playersNumber = '0'
while playersNumber != '1' and playersNumber != '2':
playersNumber = input("How many to play (choose 1 or 2 players) ?")
# control if number of players is correct :
if playersNumber == '2':
name1 = input("What's your name player 1 ?")
name2 = input("What's your name player 2 ?")
names = [name1, name2]
return(names)
else:
name = input("Who cares, but what's your name ?")
return([name])
def play(player, liste, marker):
'''
player : human or computer
liste : list of cases
return : list of played cases
take player or computer input, check if a int, in the good range, and if
the case is empty in the liste. Marker is defined by an external counter.
'''
# set game for computer player
if player == 'I':
# check if the case is empty
marker = 'O'
check = False
while not check:
case = randint(0,8)
if liste[case] == '.':
liste[case] = marker
check = True
else:
case = ''
check = False
# check if number entered by user, in the good range
while not check:
case = input('\n' + player + ', choose an empty box (1 to 9) :\n')
if case.isdigit():
if int(case) in range(1, 10):
case = int(case) - 1
if liste[case] == '.':
liste[case] = marker
check = True
return(liste)
def checkWin(liste, counter, name1='You', name2='I'):
'''
list : list of played case
name : name of players
counter : external counter to check if draw
return : win, lost or draw status depending of an external counter
'''
winList = ['123', '456', '789', '147', '258', '369', '159', '357']
resultX = ''
resultO = ''
for i in range(0, 9):
if liste[i] == 'X':
# add 1 to index iot compare to win list !!!!
resultX += str(i+1)
elif liste[i] == 'O':
resultO += str(i+1)
# check if one of the players have won
for win in winList:
if win in resultX:
return(name1 + ' win !')
elif win in resultO:
return(name2 + ' win !')
# set counter to 10 to be sure there is no win on last turn
elif counter == 10:
return("It's a draw !")
def printGame(names, intro):
if intro == 'yes':
print('\nPython TicTacToe Game\n=====================')
if len(names) == 2:
print("Ok, let's get to Rumble " + names[0] + " and " + names[1] + " !\n")
else:
print("Ok, let's fight, I'll crush you 'dear' " + names[0]
+ " !\nYou start, but I keep the O\n")
print('Reference Board\n')
print(printBoard(example))
if len(names) == 2:
print('The Battlefield\n')
else:
print('Your board of defeat\n')
print(printBoard(board))
##############################
example = range(1, 10)
text = '. '*9
emptyBoard = text.split()
board = emptyBoard
counter = 0
names = playersNumberName()
printGame(names, 'yes')
if len(names) == 2:
player1 = names[0]
player2 = names[1]
else:
player1 = 'You'
player2 = 'I'
while checkWin(board, counter) is None:
for player in [player1, player2]:
# check parity of counter to set marker
if counter % 2 == 0:
marker = 'X'
else:
marker = 'O'
# players play and print the board after each play
board = play(player, board, marker)
printGame(names, 'no')
# check if one of the player has won
if checkWin(board, counter) is None:
# if not, add 1 to counter to count the played turn
counter += 1
# check if it's the last turn
else:
# if there is a winner, break the loop and print him
break
if counter == 9:
# if so, without a winner, add 1 to counter iot print the draw
counter += 1
# break the loop and print the draw
break
print(checkWin(board, counter, player1, player2))
|
UTF-8
|
Python
| false | false | 4,826 |
py
| 2 |
ticTacToe.py
| 1 | 0.52528 | 0.506838 | 0 | 161 | 28.975155 | 86 |
apolopino/4G-Final-Backend
| 5,299,989,684,032 |
664c2173d95b4150dd20682d8301506a645bef64
|
4ef2f21bffc23336097fcb3b96b35c381ada0001
|
/src/admin.py
|
da4599b1954a435cf8b8b54e19925b27fe549c60
|
[] |
no_license
|
https://github.com/apolopino/4G-Final-Backend
|
2ee7920e02f940a9fbbf82eb25c65dbcb39fbfdb
|
56332592ae9aa877556e7a9d78166e1eab74e045
|
refs/heads/develop
| 2023-08-07T07:53:01.159833 | 2021-09-29T19:03:20 | 2021-09-29T19:03:20 | 396,526,383 | 0 | 3 | null | false | 2021-09-29T19:03:21 | 2021-08-15T23:04:35 | 2021-09-29T18:56:09 | 2021-09-29T19:03:20 | 4,684 | 0 | 1 | 0 |
Python
| false | false |
import os
from flask_admin import Admin
from models import db, User, ExtrasUsuarios, TodoUsuario, Extras, TemplateTodo, Dias, Desafios
from flask_admin.contrib.sqla import ModelView
def setup_admin(app):
app.secret_key = os.environ.get('FLASK_APP_KEY', 'sample key')
app.config['FLASK_ADMIN_SWATCH'] = 'cerulean'
admin = Admin(app, name='4Geeks Admin', template_mode='bootstrap3')
# Add your models here, for example this is how we add a the User model to the admin
admin.add_view(ModelView(User, db.session))
admin.add_view(ModelView(Desafios, db.session))
admin.add_view(ModelView(Dias, db.session))
admin.add_view(ModelView(Extras, db.session))
admin.add_view(ModelView(TemplateTodo, db.session))
admin.add_view(ModelView(TodoUsuario, db.session))
admin.add_view(ModelView(ExtrasUsuarios, db.session))
# You can duplicate that line to add mew models
# admin.add_view(ModelView(YourModelName, db.session))
|
UTF-8
|
Python
| false | false | 967 |
py
| 7 |
admin.py
| 5 | 0.730093 | 0.728025 | 0 | 22 | 43 | 94 |
DanielBok/copulae
| 11,175,504,926,900 |
490a2fd2ab96d565fe3ab89f83c015de672730eb
|
c8d98c2101a2932c4449183c9e8bd6501c57345f
|
/copulae/mixtures/gmc/summary.py
|
3486e2dca9475042b37b023f03d8bc8f02e0a6f4
|
[
"MIT"
] |
permissive
|
https://github.com/DanielBok/copulae
|
a9af8fa88a212a5436226a22d59799d671d78645
|
d48fbd064426605b8784684114844758e3ffc90d
|
refs/heads/master
| 2023-07-08T09:52:31.815899 | 2023-06-14T04:29:39 | 2023-06-14T05:22:31 | 165,516,660 | 131 | 30 |
MIT
| false | 2023-06-14T05:22:32 | 2019-01-13T14:43:39 | 2023-06-13T07:41:45 | 2023-06-14T05:22:31 | 1,888 | 113 | 24 | 7 |
Jupyter Notebook
| false | false |
import pandas as pd
from copulae.copula.summary import SummaryType
from .parameter import GMCParam
class Summary(SummaryType):
def __init__(self, params: GMCParam, fit_details: dict):
self.name = "Gaussian Mixture Copula"
self.params = params
self.fit = fit_details
def _repr_html_(self):
params = [f"<strong>{title}</strong>" + pd.DataFrame(values).to_html(header=False, index=False)
for title, values in [("Mixture Probability", self.params.prob),
("Means", self.params.means)]]
params.append(
f"<strong>Covariance</strong>" +
'<br/>'.join(
f"<div>Margin {i + 1}</div>{pd.DataFrame(c).to_html(header=False, index=False)}"
for i, c in enumerate(self.params.covs)
)
)
fit_details = ''
if self.fit['method'] is not None:
fit_details = f"""
<div>
<h3>Fit Details</h3>
<div>Algorithm: {self.fit['method']}</div>
</div>
""".strip()
html = f"""
<div>
<h2>{self.name} Summary</h2>
<div>{self.name} with {self.params.n_clusters} components and {self.params.n_dim} dimensions</div>
<hr/>
<div>
<h3>Parameters</h3>
{'<br/>'.join(params)}
</div>
{fit_details}
</div>
""".strip()
return html
def __str__(self):
return '\n'.join([
f"{self.name} Summary",
"=" * 80,
str(self.params)
])
|
UTF-8
|
Python
| false | false | 1,523 |
py
| 161 |
summary.py
| 116 | 0.523309 | 0.5174 | 0 | 54 | 27.203704 | 103 |
danmao124/url_shortener
| 3,530,463,150,810 |
bfdc6556190ee2b1a3c887f1908faba2bbed33c5
|
862edac0971f5b5449773b8a3419c2b005fc3eea
|
/link_shortener.py
|
e20f46cb6e87c1e78747de230d88bcdc8c11d049
|
[] |
no_license
|
https://github.com/danmao124/url_shortener
|
d2e093454c7f13bf6c0ad68dcc22f178648de7d2
|
a986e28831e491567708d4f2028b749cddb2cb27
|
refs/heads/master
| 2021-06-09T18:24:32.304005 | 2017-01-13T19:54:03 | 2017-01-13T19:54:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, url_for, request, json, redirect
import MySQLdb, sys, ConfigParser
#Config reader helper method. Taken from online
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
#Base62(0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ) encoding algorithm. Taken from online
def encode(num, alphabet="123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"):
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
num, rem = divmod(num, base)
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
#Read in variable from settings.ini
Config = ConfigParser.ConfigParser()
Config.read("settings.ini")
host = ConfigSectionMap("Database")['host'] #your host, usually localhost
user = ConfigSectionMap("Database")['user'] #your username
password = ConfigSectionMap("Database")['passwd'] #your password
database_name = ConfigSectionMap("Database")['database_name'] #your database name
port = ConfigSectionMap("Server")['port'] #your port number
is_redirect = ConfigSectionMap("Server")['is_redirect'] #does the server redirect you to the url? Is either "ON" or "OFF"
#Connecting to the MySQL database. Set parameters in the ini file
try:
db = MySQLdb.connect(host, user, password, database_name)
cursor = db.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS links (
link_id INT UNSIGNED NOT NULL AUTO_INCREMENT,
link varchar(255),
link_key varchar(64),
PRIMARY KEY (link_id))""")
except MySQLdb.Error:
sys.exit("ERROR IN DATABASE CONNECTION")
#Setting up the server.
app = Flask(__name__)
#Main page. User sends url post request and gets a encoded url as a response.
@app.route('/', methods = ['GET', 'POST'])
def api_root():
if request.method == 'GET':
return "yay it works"
elif request.method == 'POST' and request.headers['Content-Type'] == 'application/json':
dataDict = json.loads(request.data)
if 'url' in dataDict.keys():
try:
url = dataDict.get('url')
#check if url is in the table
cursor.execute("""SELECT * FROM links
WHERE link = %s""", (url,))
#if the url is not in the table, we add it and encode its key using the primary_id number
if cursor.rowcount == 0:
cursor.execute("""INSERT INTO links (link)
VALUES (%s)""", (url,))
key = encode(cursor.lastrowid)
cursor.execute("""UPDATE links SET link_key = %s
WHERE link_id = %s""", (key, cursor.lastrowid,))
db.commit()
return "{'response': 'localhost:" + port + "/" + key + "'}"
#if the url is in the table, we just return the link key
else:
return "{'response': 'localhost:" + port + "/" + cursor.fetchone()[2] + "'}"
except MySQLdb.Error:
return "{'response': 'Something blew up in the sql query'}"
else:
return "{'response': 'You need to have a url key in the json request. See the readme!'}"
else:
return "{'response': 'You need to send a json request. See the readme!'}";
#Encoded url page. Returns the website link.
@app.route('/<link_key>')
def api_link_key(link_key):
try:
cursor.execute("""SELECT * FROM links
WHERE link_key = %s""", (link_key,))
if cursor.rowcount == 0:
return "{'response': 'invalid key'}"
else:
if is_redirect == "OFF":
return "{'response': '" + cursor.fetchone()[1] + "'}"
else:
return redirect(cursor.fetchone()[1])
except MySQLdb.Error:
return "{'response': 'Something blew up in the sql query'}"
#Run the server!
app.run(port = port)
|
UTF-8
|
Python
| false | false | 3,928 |
py
| 3 |
link_shortener.py
| 1 | 0.641548 | 0.631619 | 0 | 109 | 35.018349 | 125 |
Vohsty/Awaards
| 18,047,452,615,234 |
a152c6643016f3dca7bd72009c365207438a3cd2
|
890a7db25425d21743199677fe79071b97934f78
|
/rating/views.py
|
0526a117ba302ce8bc9ad99ece6fe03f12fa31d7
|
[
"MIT"
] |
permissive
|
https://github.com/Vohsty/Awaards
|
841828523d4168c69c2cabca876071beaad99697
|
4add09ac6b48fd6963ad6f2b36f921530536a83c
|
refs/heads/master
| 2021-09-09T11:53:23.368667 | 2019-07-08T06:34:38 | 2019-07-08T06:34:38 | 194,614,928 | 0 | 1 |
MIT
| false | 2021-09-08T01:06:47 | 2019-07-01T06:43:00 | 2019-07-08T13:02:27 | 2021-09-08T01:06:45 | 37,212 | 0 | 1 | 4 |
Python
| false | false |
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from .models import Project, Profile, Rating, categories, technologies
from .forms import ProfileForm, UploadForm, RatingForm
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import ProfileSerializer, ProjectSerializer
from rest_framework import status
from .permissions import IsAdminOrReadOnly
from django.core.exceptions import ObjectDoesNotExist
@login_required(login_url='/accounts/login')
def index(request):
current_user = request.user
projects = Project.objects.order_by('-overall').all()
top = projects[0]
runners=Project.objects.all()[:4]
try:
current_user = request.user
profile =Profile.objects.get(user=current_user)
except ObjectDoesNotExist:
return redirect('edit')
return render(request, 'index.html', locals())
@login_required(login_url='/accounts/login')
def profile(request):
current_user=request.user
profile =Profile.objects.get(user=current_user)
projects = Project.objects.filter(user=current_user)
my_profile = Profile.objects.get(user=current_user)
return render(request, 'profile.html', locals())
@login_required(login_url='/accounts/login')
def edit_profile(request):
current_user = request.user
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
prof = form.save(commit=False)
prof.user = current_user
prof.save()
return redirect('myprofile')
else:
form = ProfileForm()
return render(request, 'edit_profile.html', {'form': form, 'profile':profile})
@login_required(login_url='/accounts/login')
def new_project(request):
current_user = request.user
profile =Profile.objects.get(user=current_user)
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.user = current_user
image.save()
return redirect('index')
else:
form = UploadForm()
return render(request, 'new_project.html', {'form': form,'profile':profile})
@login_required(login_url='/accounts/login')
def project(request, project_id):
current_user = request.user
profile =Profile.objects.get(user=current_user)
message = "Thank you for voting"
try:
project = Project.objects.get(id=project_id)
except Project.DoesNotExist:
raise ObjectDoesNotExist()
total_design = 0
total_usability = 0
total_creativity = 0
total_content = 0
overall_score = 0
ratings = Rating.objects.filter(project=project_id)
if len(ratings) > 0:
users = len(ratings)
else:
users = 1
design = list(Rating.objects.filter(project=project_id).values_list('design',flat=True))
usability = list(Rating.objects.filter(project=project_id).values_list('usability',flat=True))
creativity = list(Rating.objects.filter(project=project_id).values_list('creativity',flat=True))
content = list(Rating.objects.filter(project=project_id).values_list('content',flat=True))
total_design=sum(design)/users
total_usability=sum(usability)/users
total_creativity=sum(creativity)/users
total_content=sum(content)/users
overall_score=(total_design+total_content+total_usability+total_creativity)/4
project.design = total_design
project.usability = total_usability
project.creativity = total_creativity
project.content = total_content
project.overall = overall_score
project.save()
if request.method == 'POST':
form = RatingForm(request.POST, request.FILES)
if form.is_valid():
rating = form.save(commit=False)
rating.project= project
rating.profile = profile
if not Rating.objects.filter(profile=profile, project=project).exists():
rating.overall_score = (rating.design+rating.usability+rating.creativity+rating.content)/4
rating.save()
else:
form = RatingForm()
return render(request, "project.html",{"project":project,"profile":profile,"ratings":ratings,"form":form, "message":message, 'total_design':total_design, 'total_usability':total_usability, 'total_creativity':total_creativity, 'total_content':total_content})
@login_required(login_url='/accounts/login')
def search(request):
current_user = request.user
profile =Profile.objects.get(user=current_user)
if 'project' in request.GET and request.GET["project"]:
search_term = request.GET.get("project")
projects = Project.search_project(search_term)
message = f"{search_term}"
return render(request, 'search.html', {"message":message, "projects":projects, 'profile':profile})
else:
message = "Please enter search term"
return render(request, 'search.html', {"message":message, "projects":projects,'profile':profile})
class ProfileList(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
all_profiles = Profile.objects.all()
serializers = ProfileSerializer(all_profiles, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProfileSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectList(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
all_projects = Project.objects.all()
serializers = ProjectSerializer(all_projects, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProfileSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfileDescription(APIView):
def get_profile(self, pk):
try:
return Profile.objects.get(pk=pk)
except Profile.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
profile = self.get_profile(pk)
serializers = ProfileSerializer(profile)
return Response(serializers.data)
class ProjectDescription(APIView):
def get_project(self, pk):
try:
return Project.objects.get(pk=pk)
except Project.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
project = self.get_project(pk)
serializers = ProjectSerializer(project)
return Response(serializers.data)
|
UTF-8
|
Python
| false | false | 7,072 |
py
| 14 |
views.py
| 6 | 0.680713 | 0.676188 | 0 | 186 | 37.026882 | 261 |
Dantes-Shade/Code_Guild
| 7,507,602,862,481 |
3f0ef4909173fd3dd18a980f5a3fd4004ef0bd2c
|
6ad7763cfe3e3cbe9be5c0cf31fde4c88f79cc78
|
/IronEnclave/IronEnclave/iEaccounts/migrations/0002_auto_20200625_1846.py
|
be01eb85f1bb37a29c55606e380e67aaca5ac1b1
|
[] |
no_license
|
https://github.com/Dantes-Shade/Code_Guild
|
3766e4d063045444a110b56d712dee646058bc94
|
102a1f709796e6dff9431d38235e826a5a930ff3
|
refs/heads/master
| 2021-12-12T19:07:00.751277 | 2020-07-29T02:52:44 | 2020-07-29T02:52:44 | 243,426,674 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.0.7 on 2020-06-26 01:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('iEaccounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profiles',
name='profile_img',
field=models.ImageField(blank=True, upload_to='profile_image'),
),
]
|
UTF-8
|
Python
| false | false | 410 |
py
| 93 |
0002_auto_20200625_1846.py
| 58 | 0.597561 | 0.55122 | 0 | 18 | 21.777778 | 75 |
michaelhe/micserver
| 16,183,436,787,908 |
768391c0f1ab11afbbad019cc5e346faf90991f6
|
78bc1cee3a9990e6f0601f6afaf275661413410e
|
/micsocket.py
|
662f47212e34ff14ed0c10ca880ed8fb22a27c56
|
[] |
no_license
|
https://github.com/michaelhe/micserver
|
07d9fdffadea7893564d6325d5e5d3d7121d825d
|
8897ead9c0d219bdcd0a08cff900b2324700ffc5
|
refs/heads/master
| 2021-01-19T20:30:23.124259 | 2017-05-02T01:35:29 | 2017-05-02T01:35:29 | 88,512,269 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import SocketServer
import logging
import time
logging.basicConfig(
level = logging.DEBUG,
format = '%(threadName)s | %(message)s'
)
class MicSocket(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0].strip()
addr = self.request[1]
logging.debug('receive data %s from %s' % (data,self.client_address[0]))
if __name__=='__main__':
address = ('127.0.0.1',30000)
server = SocketServer.UDPServer(address, MicSocket)
server.serve_forever()
|
UTF-8
|
Python
| false | false | 518 |
py
| 14 |
micsocket.py
| 13 | 0.689189 | 0.660232 | 0 | 24 | 20.625 | 74 |
imsardine/learning
| 738,734,383,510 |
0ef6925e0bd4463b0f3233ac69a6fade52112fdc
|
b43c6c03eea348d68d6582c3594760bbe0ecaa08
|
/python/tests/test_enum.py
|
5e845a0f81191d2151135c9cff75f90b53924388
|
[
"MIT"
] |
permissive
|
https://github.com/imsardine/learning
|
1b41a13a4c71c8d9cdd8bd4ba264a3407f8e05f5
|
925841ddd93d60c740a62e12d9f57ef15b6e0a20
|
refs/heads/master
| 2022-12-22T18:23:24.764273 | 2020-02-21T01:35:40 | 2020-02-21T01:35:40 | 24,145,674 | 0 | 0 |
MIT
| false | 2022-12-14T20:43:28 | 2014-09-17T13:24:37 | 2020-02-21T01:35:54 | 2022-12-14T20:43:25 | 2,751 | 0 | 0 | 15 |
Python
| false | false |
import pytest
from enum import Enum
class Season(Enum):
SPRING = 1
SUMMER = 9
FALL = 3
WINTER = 5
AUTUMN = 3 # alias
def test_str_name_repr():
assert str(Season.SPRING) == 'Season.SPRING' # w/ type
assert Season.SPRING.name == 'SPRING' # w/o type
assert repr(Season.SPRING) == '<Season.SPRING: 1>' # w/ value
def test_type__is_enum_class():
assert type(Season.SPRING) == Season
assert isinstance(Season.SPRING, Season)
assert isinstance(Season.SPRING, Enum)
def test_iteration__in_definition_or_order_exclusing_aliases(py2):
names = [season.name for season in Season]
if py2: # in value order
assert names == ['SPRING', 'FALL', 'WINTER', 'SUMMER']
else: # in definition order
assert names == ['SPRING', 'SUMMER', 'FALL', 'WINTER']
def test_hashable():
try:
hash(Season.SPRING)
except TypeError:
pytest.fail()
favorites = {Season.SPRING: 'wind', Season.FALL: 'temp.'}
assert favorites[Season.SPRING] == 'wind'
def test_member_from_value__or_raise_valueerror():
assert Season(1) == Season.SPRING
with pytest.raises(ValueError) as excinfo:
Season(99)
assert str(excinfo.value) == '99 is not a valid Season'
def test_member_from_name__or_raise_keyerror():
assert Season['AUTUMN'] == Season.FALL
with pytest.raises(KeyError) as excinfo:
Season['UNKNOWN']
assert str(excinfo.value) == "'UNKNOWN'"
def test_alias__same_identity_value_and_name():
assert id(Season.FALL) == id(Season.AUTUMN)
assert Season.FALL == Season.AUTUMN
assert Season.FALL.value == Season.AUTUMN.value == 3
assert Season.FALL.name == Season.AUTUMN.name == 'FALL'
def test_custom_value_attrs():
class Color(Enum):
RED = ('RD', (255, 0, 0))
GREEN = ('GN', (0, 255, 0))
BLUE = ('BL', (0, 0, 255))
def __new__(cls, code, rgb):
obj = object.__new__(cls)
obj._value_ = code
return obj
def __init__(self, code, rgb):
self.code = code
self.rgb = rgb
blue = Color('BL')
assert blue == Color['BLUE'] == Color.BLUE
assert blue.value == 'BL'
assert blue.rgb == (0, 0, 255)
|
UTF-8
|
Python
| false | false | 2,243 |
py
| 313 |
test_enum.py
| 218 | 0.599643 | 0.584485 | 0 | 81 | 26.703704 | 66 |
jbitshine/hacker-rank
| 3,633,542,333,829 |
50e7d9a228c4e4db390343dce45687b2b544e5b5
|
fce0bffdca4eddc15d6288ae4a204de666819bb6
|
/python/itertools/01-product.py
|
c89d105eb7707b176e75c993b3573a242ddcf774
|
[] |
no_license
|
https://github.com/jbitshine/hacker-rank
|
c8d7e7445e87b837f4fae3096e87e4dcb549a705
|
27b746c8a0b399b4f69eb40218fcb7b8795c69c7
|
refs/heads/master
| 2016-09-14T06:09:40.811046 | 2016-09-05T18:58:57 | 2016-09-05T18:58:57 | 64,609,894 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# itertools.product() - Python
from itertools import product
for element in product(map(int, raw_input().strip().split()), map(int, raw_input().strip().split())):
print element,
|
UTF-8
|
Python
| false | false | 184 |
py
| 143 |
01-product.py
| 137 | 0.690217 | 0.690217 | 0 | 6 | 29.666667 | 101 |
rsd13/wetherPLN
| 19,069,654,799,922 |
d09894bac6abc98362cf528142327e4ef0d213e7
|
2714ce48b53d7c1c614b9f481f223b9e1956230b
|
/src/BBDD/BDHora.py
|
4e3aa535294f5f7d3a41802560da90608be9f74a
|
[] |
no_license
|
https://github.com/rsd13/wetherPLN
|
96a84921a2f91cd7e74131c4de980bfb80db87b9
|
dab6aad5e20733f438c11b27d18ecbcbf63c26d8
|
refs/heads/master
| 2020-08-05T05:21:55.026807 | 2019-10-02T18:27:08 | 2019-10-02T18:27:08 | 212,411,438 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sqlite3
import os
from src.Dataset.Hora import Hora
from llvmlite.tests.test_binding import asm_inlineasm
class BDHora:
#hora,dai,mes,año,estadoCielo,precipitacion,probPreci,probTormenta,nieve,probNieve
#temperatura,sensTeermica,humedadRelativa,velocidadV,rachaMax,direcionViento,codLocalidad
#codProvincia
def __init__(self):
#ruta de la base de datos
dir_path = os.path.dirname(os.path.abspath(__file__))
self.bbdd = sqlite3.connect(dir_path + "/Weather.db",timeout=10)
self.bbdd.row_factory = sqlite3.Row
self.cursor = self.bbdd.cursor()
#TIENES QUE REVISAR LOS TIEMPOS RELACIONADOS CON EL VOIENTO
def insertHora(self,hora):
#comprobamos si existe
self.cursor.execute("select * " +
"from hora " +
"where hora=? and dia=? and mes=? and año=? and codProvincia=?" +
" and codLocalidad=?",(
hora.hora,
hora.fecha.dia,
hora.fecha.mes,
hora.fecha.año,
hora.codProvincia,
hora.codLocalidad))
lineas = self.cursor.fetchall()
#si no existe esa fecha se inserta
if len(lineas) == 0:
self.cursor.execute("INSERT INTO hora VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",(
hora.hora,
hora.fecha.dia,
hora.fecha.mes,
hora.fecha.año,
hora.estadoCielo,
hora.precipitacion,
hora.probPrecipitacion,
hora.probTormenta,
hora.nieve,
hora.probNieve,
hora.temperatura,
hora.sensTermica,
hora.humedadRelativa,
hora.velocidadViento,
hora.direccionViento,
hora.rachaMax,
hora.codLocalidad,
hora.codProvincia))
self.bbdd.commit()
def getCodProvincia(self,ciudad):
if(ciudad == "ALACANT/ALICANTE"): ciudad = "Alicante/Alacant"
if(ciudad == "TENERIFE"): ciudad = "Santa Cruz de Tenerife"
self.cursor.execute("Select codigo FROM Provincia where nombre like ?",(
ciudad,))
lineas = self.cursor.fetchall()
return lineas[0][0]
def getNube(self,ciudad,fecha):
fechas = fecha.split("-")
anyo = fechas[0]
mes = fechas[1]
dia = fechas[2]
codProvincia = self.getCodProvincia(ciudad)
self.cursor.execute("Select hora, estadoCielo FROM hora where dia=? and mes=? and año=? and codProvincia = ?",(
dia,
mes,
anyo,
codProvincia))
lineas = self.cursor.fetchall()
dicci ={}
list = []
sum = 0
for linea in lineas:
hora = linea[0]
estado = linea[1]
dicci[hora] = estado
#list.append(datos)
sum+= 1
if(sum == 24):
list.append(dicci)
sum = 0
dicci ={}
return list
def getViento(self,ciudad,fecha):
fechas = fecha.split("-")
anyo = fechas[0]
mes = fechas[1]
dia = fechas[2]
codProvincia = self.getCodProvincia(ciudad)
self.cursor.execute("Select hora, velocidadViento, direccionViento, rachaMax" +
" FROM hora where dia=? and mes=? and año=? and codProvincia = ?",(
dia,
mes,
anyo,
codProvincia))
lineas = self.cursor.fetchall()
dicVelocidad ={}
dicDirecion = {}
dicRacha = {}
listTotal = []
listPrecipitacion = []
listProbPrecipitacion = []
listProbTormenta = []
sum = 0
for linea in lineas:
hora = linea[0]
velocidad = linea[1]
direcion = linea[2]
racha = linea[3]
dicVelocidad[hora] = velocidad
dicDirecion[hora] = direcion
dicRacha[hora] = racha
sum+= 1
if(sum == 24):
listPrecipitacion.append(dicVelocidad)
listProbPrecipitacion.append(dicDirecion)
listProbTormenta.append(dicRacha)
sum = 0
dicVelocidad ={}
dicDirecion = {}
dicRacha = {}
listTotal.append(listPrecipitacion)
listTotal.append(listProbPrecipitacion)
listTotal.append(listProbTormenta)
return listTotal
def getPrecipitacion(self,ciudad,fecha):
fechas = fecha.split("-")
anyo = fechas[0]
mes = fechas[1]
dia = fechas[2]
codProvincia = self.getCodProvincia(ciudad)
self.cursor.execute("Select hora, precipitacion, probPrecipitacion, probTormenta, nieve, probNieve" +
" FROM hora where dia=? and mes=? and año=? and codProvincia = ?",(
dia,
mes,
anyo,
codProvincia))
lineas = self.cursor.fetchall()
dicPrecipitacion ={}
dicProbPrecipitacion = {}
dictProbTormenta = {}
dicNieve = {}
dicProbNieve = {}
listTotal = []
listPrecipitacion = []
listProbPrecipitacion = []
listProbTormenta = []
listNieve = []
listProbNieve = []
sum = 0
for linea in lineas:
hora = linea[0]
precipitacion = linea[1]
probPrecipitacion = linea[2]
probTormenta = linea[3]
nieve = linea[4]
probNieve = linea[5]
dicPrecipitacion[hora] = precipitacion
dicProbPrecipitacion[hora] = probPrecipitacion
dictProbTormenta[hora] = probTormenta
dicNieve[hora] = nieve
dicProbNieve[hora] = probNieve
sum+= 1
if(sum == 24):
listPrecipitacion.append(dicPrecipitacion)
listProbPrecipitacion.append(dicProbPrecipitacion)
listProbTormenta.append(dictProbTormenta)
listNieve.append(dicNieve)
listProbNieve.append(dicProbNieve)
sum = 0
dicPrecipitacion ={}
dicProbPrecipitacion = {}
dictProbTormenta = {}
dicNieve = {}
dicProbNieve = {}
listTotal.append(listPrecipitacion)
listTotal.append(listProbPrecipitacion)
listTotal.append(listProbTormenta)
listTotal.append(listNieve)
listTotal.append(listProbNieve)
return listTotal
def getCiudades(self,codProvincia):
self.cursor.execute("SELECT codigo,nombre" +
" FROM LOCALIDAD " +
" WHERE codprovincia= "+ codProvincia)
rows = self.cursor.fetchall()
ciudades = []
for row in rows:
item = (row[0],row[1])
ciudades.append(item)
return ciudades
def getTemperatura(self,provincia,fecha,esMayor):
fechas = fecha.split("-")
anyo = fechas[0]
mes = fechas[1]
dia = fechas[2]
result = []
dicHora ={}
codProvincia = self.getCodProvincia(provincia)
ciudades = self.getCiudades(codProvincia)
lista = []
for ciudad in ciudades:
codLocalidad = ciudad[0]
nombre = ciudad[1]
self.cursor.execute(" SELECT temperatura,hora " +
" FROM Hora,localidad " +
" WHERE hora.codProvincia = localidad.codProvincia and hora.codLocalidad = localidad.codigo " +
" and localidad.codigo=? and localidad.codProvincia =? " +
" and dia=? and mes=? and año=?",(
codLocalidad,
codProvincia,
dia,
mes,
anyo))
dic = {}
dicHora ={}
temperatura = []
rows = self.cursor.fetchall()
mayor = -999
menor = 999
for row in rows:
if esMayor:
dato = int(row[0])
if dato > mayor:
mayor = dato
if dato < menor:
menor = dato
else:
lista.append(row[0])
if esMayor:
result.append(nombre +":" + str(mayor) + ":" + str(menor))
else:
result = lista
return result
|
UTF-8
|
Python
| false | false | 9,968 |
py
| 233 |
BDHora.py
| 31 | 0.443574 | 0.437651 | 0 | 333 | 28.372372 | 123 |
elioudakis/COMP211_Software-development-tools-and-systems-programming
| 13,606,456,397,013 |
e8d32e18a5a18fe73366729d6dc9aa106c0dea92
|
cf618901be5bb4fb8c397a767b86022982bd2e90
|
/2_Python exercise/computeSales.py
|
cedb6adee6714a539ca5907aaaa5972206777b18
|
[] |
no_license
|
https://github.com/elioudakis/COMP211_Software-development-tools-and-systems-programming
|
d0c304399bed1119216607bde287f443c869d68b
|
0307a93c62339102efa18de16a3af5ce3f21c7fb
|
refs/heads/master
| 2020-12-30T04:44:16.308196 | 2020-02-07T07:45:10 | 2020-02-07T07:45:10 | 238,864,810 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##
##Author: elioudakis
##A.M.:
##email: elioudakis@isc.tuc.gr
##
###################################################
#
#function checkFile(arg1, arg2)
#The function checkFile gets two arguments, the input file (which contains correct and incorrect receipts) and the output file.
#The correct receipts are appended to the output file.
#When the user choices 2 or 3 in the main menu, the program will work with the output file.
#
###################################################
def checkFile(input_File):
collected_receipts_file=open('tmp.txt', 'a+',encoding='utf-8')
rec_list=input_File.read().split()
startStopIndexes=[]
tmp=[]
for i in range(len(rec_list)):
if not rec_list[i].find('-') and len(set(rec_list[i]))==1: #Check that the lines with "---" contain only this character
startStopIndexes.append(i)
for i in range(len(startStopIndexes)-1):
for j in range(startStopIndexes[i]+1, startStopIndexes[i+1]): #The part of the list between two "----"
tmp.append(rec_list[j].upper())
#The list tmp contains all the fields of ONE receipt.
totalAmount=0.0
if not(len(tmp)%4==0):
tmp=[]
continue
if not ((tmp[0]=='ΑΦΜ:') and (len(tmp[1])==10) and tmp[1].isdigit()):
tmp=[]
continue
if (tmp.count('ΑΦΜ:')!=1): ##the line for AFM exists more than once
tmp=[]
continue
if (tmp.count('ΣΥΝΟΛΟ:')!=1): ##the line for total amount exists more than once
tmp=[]
continue
if not (tmp[-2]=='ΣΥΝΟΛΟ:'):
tmp=[]
continue
for k in range(2, len(tmp)-2, 4): ##the lines with the products
#we increment the counter by 4, to access the four fields of a product each time
#cast. Previously they were strings
if (abs(float(tmp[k+3])-float(float(tmp[k+2])*float(tmp[k+1])))<0.0001):
totalAmount=float(totalAmount+float(tmp[k+3]))##The product has right calculated total price. We add it to the total counter
else:##wrong total price in one of the products of the receipt
tmp[-1]=-1 #We set it to -1 in order to do the comparison
break
if not (abs(totalAmount-float(tmp[-1]))<0.0001):##wrongly calculated total amount
tmp=[]
continue
##If we have reached here, the receipt is correct. We will write it to the total receipts file.
##
##The function "write" supports only 1 argument, so we will place manually the spaces and the newline
##
collected_receipts_file.write('--\n')
collected_receipts_file.write(tmp[0])
collected_receipts_file.write(' ')
collected_receipts_file.write(tmp[1])
collected_receipts_file.write('\n')
for a in range(2, len(tmp)-2, 4):
collected_receipts_file.write(tmp[a])
collected_receipts_file.write(' ')
collected_receipts_file.write(tmp[a+1])
collected_receipts_file.write(' ')
collected_receipts_file.write(tmp[a+2])
collected_receipts_file.write(' ')
collected_receipts_file.write(tmp[a+3])
collected_receipts_file.write('\n')
collected_receipts_file.write('--\n')
collected_receipts_file.flush()
tmp=[] ##Re-initialize the list, to have it empty in the next loop
collected_receipts_file.close()#close the file, to update its content
def choice2():
collected_receipts_file=open('tmp.txt',encoding='utf-8') ##we had to close and re-open the file, to update its content
listOfLines=[]
collected_receipts_file.seek(0)
for line in collected_receipts_file:
listOfLines.append(line.split())
data={} #defining a dictionary
for i in listOfLines:
if len(i)==1: #the lines with ------
continue #do nothing
elif len(i)==2: #the lines with the AFM
if i[0]=="ΑΦΜ:":
afm=i[1]
continue
else: ##the lines with products
prodName=i[0]
totalPrice=i[3]
prodName=prodName.replace(":", "") #throw away the ':' which each prodName has
prodName=prodName.upper() #convert prodName to capitals
if prodName in data.keys():
tmp=data[prodName]
if afm in tmp.keys():
oldTotal=tmp[afm]
totalPrice=float(totalPrice)+float(oldTotal)
del tmp[afm]
data[prodName].update({afm:float(totalPrice)})
else:
data.update({prodName:{afm:float(totalPrice)}})
prodChoice=input('Give the product\'s name... ')
prodChoice=prodChoice.upper()
ToPrint=sorted(data[prodChoice])##list with the sorted keys of the dictionary data[prodChoice])
for i in ToPrint:
print(i,"%0.2f" % data[prodChoice][i])
collected_receipts_file.close()
def choice3():
collected_receipts_file=open('tmp.txt',encoding='utf-8') ##we had to close and re-open the file, to update its content
listOfLines=[]
collected_receipts_file.seek(0)
for line in collected_receipts_file:
listOfLines.append(line.split())
data={} #defining a dictionary
for i in listOfLines:
if len(i)==1: #the lines with ------
continue #do nothing
elif len(i)==2: #the lines with the AFM
if i[0]=="ΑΦΜ:":
afm=i[1]
continue
else: ##the lines with products
prodName=i[0]
totalPrice=i[3]
prodName=prodName.replace(":", "") #throw away the ':' which each prodName has
prodName=prodName.upper() #convert prodName to capitals
if afm in data.keys():
tmp=data[afm]
if prodName in tmp.keys():
oldTotal=tmp[prodName]
totalPrice=float(totalPrice)+float(oldTotal)
del tmp[prodName]
data[afm].update({prodName:float(totalPrice)})
else:
data.update({afm:{prodName:float(totalPrice)}})
afmChoice=input('Give the afm... ') #afm is used as a str
unsortedToPrintList=data[afmChoice]
ToPrint=sorted(data[afmChoice])##ist with the sorted keys of the dictionary data[afmChoice])
for i in ToPrint:
print(i,"%0.2f" % data[afmChoice][i])
collected_receipts_file.close()
def main():
while True:
try:
choice=int(input('Give your preference: (1: read new input file, 2: print statistics for a specific product, 3: print statistics for a specific AFM, 4: exit the program)'))
except:
continue
if choice==1:
input_File_Name=input('Please enter the file\'s name...')
try:
input_File=open(input_File_Name, 'r',encoding='utf-8')
checkFile(input_File)
input_File.close()
continue
except FileNotFoundError:
continue
elif choice==2:
try:
choice2()
continue
except:
continue
elif choice==3:
try:
choice3()
continue
except:
continue
elif choice==4:
import os
import sys
try:
os.remove('tmp.txt')
sys.exit(0)
except:
sys.exit(0)
else:
continue #simply ask again for a number
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 8,489 |
py
| 7 |
computeSales.py
| 3 | 0.507738 | 0.49746 | 0 | 229 | 35.965066 | 184 |
robin3773/Codeforces-Problem-Solution-in-Python-3
| 9,637,906,641,865 |
192928ee813f38faef877eb0e89debb7652cd4fc
|
ee1e6c0c2234387b9040527206a01b4b60587c48
|
/Type A/996A - Hit the Lottery.py
|
8ce20e61feeffbd54e0a70c0d2dd8c0ab3367923
|
[] |
no_license
|
https://github.com/robin3773/Codeforces-Problem-Solution-in-Python-3
|
2d8e7cdf11e4823c1a8fe64dad9af53211132d5f
|
9bb5e6cdf64fe0cf6628c40fd64324b70acc0cb9
|
refs/heads/master
| 2022-11-29T19:19:04.550904 | 2020-08-05T14:44:03 | 2020-08-05T14:44:03 | 276,883,825 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n = int(input())
number_of_bills = 0
if n >= 100:
number_of_bills += n // 100
n = n % 100
if n >= 20:
number_of_bills += n // 20
n = n % 20
if n >= 10:
number_of_bills += n // 10
n = n % 10
if n >= 5:
number_of_bills += n // 5
n = n % 5
number_of_bills += n
print(number_of_bills)
|
UTF-8
|
Python
| false | false | 337 |
py
| 150 |
996A - Hit the Lottery.py
| 149 | 0.459941 | 0.385757 | 0 | 19 | 15.631579 | 31 |
enridaga/data-journey
| 5,592,047,460,680 |
652bd68cf5e3580861a3dfc96180decf3f25cf66
|
23ffdb532938efb80b4710b63b488af2f4d31224
|
/sources/what-makes-a-kaggler-valuable.py
|
035c60b1e78fac563d936d0cb9a93df424243d9b
|
[] |
no_license
|
https://github.com/enridaga/data-journey
|
6865622739d318dfd4e1157a2d535b3478d0ed94
|
252353a61874005cd0d8854a61a0caf3c0bc9671
|
refs/heads/master
| 2021-07-19T14:00:53.824896 | 2021-06-21T10:34:28 | 2021-06-21T10:34:28 | 250,588,858 | 5 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import pandas as pd
# Loading the multiple choices dataset, we will not look to the free form data on this study
mc = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', low_memory=False)
# Separating questions from answers
# This Series stores all questions
mcQ = mc.iloc[0,:]
# This DataFrame stores all answers
mcA = mc.iloc[1:,:]
# removing everyone that took less than 4 minutes or more than 600 minutes to answer the survey
less3 = mcA[round(mcA.iloc[:,0].astype(int) / 60) <= 4].index
mcA = mcA.drop(less3, axis=0)
more300 = mcA[round(mcA.iloc[:,0].astype(int) / 60) >= 600].index
mcA = mcA.drop(more300, axis=0)
# removing gender trolls, because we noticed from other kernels thata there are some ouliers here
gender_trolls = mcA[(mcA.Q1 == 'Prefer to self-describe') | (mcA.Q1 == 'Prefer not to say')].index
mcA = mcA.drop(list(gender_trolls), axis=0)
# removing student trolls, because a student won't make more than 250k a year.
student_trolls = mcA[((mcA.Q6 == 'Student') & (mcA.Q9 > '500,000+')) | \
((mcA.Q6 == 'Student') & (mcA.Q9 > '400-500,000')) | \
((mcA.Q6 == 'Student') & (mcA.Q9 > '300-400,000')) | \
((mcA.Q6 == 'Student') & (mcA.Q9 > '250-300,000'))].index
mcA = mcA.drop(list(student_trolls), axis=0)
# dropping all NaN and I do not wish to disclose my approximate yearly compensation, because we are only interested in respondents that revealed their earnings
mcA = mcA[~mcA.Q9.isnull()].copy()
not_disclosed = mcA[mcA.Q9 == 'I do not wish to disclose my approximate yearly compensation'].index
mcA = mcA.drop(list(not_disclosed), axis=0)
# Creating a table with personal data
personal_data = mcA.iloc[:,:13].copy()
# renaming columns
cols = ['survey_duration', 'gender', 'gender_text', 'age', 'country', 'education_level', 'undergrad_major', 'role', 'role_text',
'employer_industry', 'employer_industry_text', 'years_experience', 'yearly_compensation']
personal_data.columns = cols
# Drop text and survey_duration columns
personal_data.drop(['survey_duration', 'gender_text', 'role_text', 'employer_industry_text'], axis=1, inplace=True)
personal_data.head(3)
from pandas.api.types import CategoricalDtype
# transforming compensation into category type and ordening the values
categ = ['0-10,000', '10-20,000', '20-30,000', '30-40,000', '40-50,000',
'50-60,000', '60-70,000', '70-80,000', '80-90,000', '90-100,000',
'100-125,000', '125-150,000', '150-200,000', '200-250,000', '250-300,000',
'300-400,000', '400-500,000', '500,000+']
cat_type = CategoricalDtype(categories=categ, ordered=True)
personal_data.yearly_compensation = personal_data.yearly_compensation.astype(cat_type)
# Doing this we are transforming the category "I do not wish to disclose my approximate yearly compensation" into NaN
# transforming age into category type and sorting the values
categ = ['18-21', '22-24', '25-29', '30-34', '35-39', '40-44',
'45-49', '50-54', '55-59', '60-69', '70-79', '80+']
cat_type = CategoricalDtype(categories=categ, ordered=True)
personal_data.age = personal_data.age.astype(cat_type)
# transforming years of experience into category type and sorting the values
categ = ['0-1', '1-2', '2-3', '3-4', '4-5', '5-10',
'10-15', '15-20', '20-25', '25-30', '30+']
cat_type = CategoricalDtype(categories=categ, ordered=True)
personal_data.years_experience = personal_data.years_experience.astype(cat_type)
# transforming education level into category type and sorting the values
categ = ['No formal education past high school', 'Some college/university study without earning a bachelor’s degree',
'Professional degree', 'Bachelor’s degree', 'Master’s degree', 'Doctoral degree', 'I prefer not to answer']
cat_type = CategoricalDtype(categories=categ, ordered=True)
personal_data.education_level = personal_data.education_level.astype(cat_type)
personal_data.yearly_compensation.value_counts(dropna=False, sort=False)
compensation = personal_data.yearly_compensation.str.replace(',', '').str.replace('500000\+', '500-500000').str.split('-')
personal_data['yearly_compensation_numerical'] = compensation.apply(lambda x: (int(x[0]) * 1000 + int(x[1]))/ 2) / 1000 # it is calculated in thousand dollars
print('Dataset Shape: ', personal_data.shape)
personal_data.head(3)
# Finding the compensation that separates the Top 20% most welll paid from the Bottom 80%
top20flag = personal_data.yearly_compensation_numerical.quantile(0.8)
top20flag
# Creating a flag to identify who belongs to the Top 20%
personal_data['top20'] = personal_data.yearly_compensation_numerical > top20flag
# creating data for future mapping of values
top20 = personal_data.groupby('yearly_compensation', as_index=False)['top20'].min()
# Some helper functions to make our plots cleaner with Plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
from plotly import tools
init_notebook_mode(connected=True)
def gen_xaxis(title):
"""
Creates the X Axis layout and title
"""
xaxis = dict(
title=title,
titlefont=dict(
color='#AAAAAA'
),
showgrid=False,
color='#AAAAAA',
)
return xaxis
def gen_yaxis(title):
"""
Creates the Y Axis layout and title
"""
yaxis=dict(
title=title,
titlefont=dict(
color='#AAAAAA'
),
showgrid=False,
color='#AAAAAA',
)
return yaxis
def gen_layout(charttitle, xtitle, ytitle, lmarg, h, annotations=None):
"""
Creates whole layout, with both axis, annotations, size and margin
"""
return go.Layout(title=charttitle,
height=h,
width=800,
showlegend=False,
xaxis=gen_xaxis(xtitle),
yaxis=gen_yaxis(ytitle),
annotations = annotations,
margin=dict(l=lmarg),
)
def gen_bars(data, color, orient):
"""
Generates the bars for plotting, with their color and orient
"""
bars = []
for label, label_df in data.groupby(color):
if orient == 'h':
label_df = label_df.sort_values(by='x', ascending=True)
if label == 'a':
label = 'lightgray'
bars.append(go.Bar(x=label_df.x,
y=label_df.y,
name=label,
marker={'color': label},
orientation = orient
)
)
return bars
def gen_annotations(annot):
"""
Generates annotations to insert in the chart
"""
if annot is None:
return []
annotations = []
# Adding labels
for d in annot:
annotations.append(dict(xref='paper', x=d['x'], y=d['y'],
xanchor='left', yanchor='bottom',
text= d['text'],
font=dict(size=13,
color=d['color']),
showarrow=False))
return annotations
def generate_barplot(text, annot_dict, orient='v', lmarg=120, h=400):
"""
Generate the barplot with all data, using previous helper functions
"""
layout = gen_layout(text[0], text[1], text[2], lmarg, h, gen_annotations(annot_dict))
fig = go.Figure(data=gen_bars(barplot, 'color', orient=orient), layout=layout)
return iplot(fig)
# Counting the quantity of respondents per compensation
barplot = personal_data.yearly_compensation.value_counts(sort=False).to_frame().reset_index()
barplot.columns = ['yearly_compensation', 'qty']
# mapping back to get top 20% label
barplot = barplot.merge(top20, on='yearly_compensation')
barplot.columns = ['x', 'y', 'top20']
# apply color for top 20% and bottom 80%
barplot['color'] = barplot.top20.apply(lambda x: 'mediumaquamarine' if x else 'lightgray')
# Create title and annotations
title_text = ['<b>How Much Does Kagglers Get Paid?</b>', 'Yearly Compensation (USD)', 'Quantity of Respondents']
annotations = [{'x': 0.06, 'y': 2200, 'text': '80% of respondents earn up to USD 90k','color': 'gray'},
{'x': 0.51, 'y': 1100, 'text': '20% of respondents earn more than USD 90k','color': 'mediumaquamarine'}]
# call function for plotting
generate_barplot(title_text, annotations)
# creating masks to identify students and not students
is_student_mask = (personal_data['role'] == 'Student') | (personal_data['employer_industry'] == 'I am a student')
not_student_mask = (personal_data['role'] != 'Student') & (personal_data['employer_industry'] != 'I am a student')
# Counting the quantity of respondents per compensation (where is student)
barplot = personal_data[is_student_mask].yearly_compensation.value_counts(sort=False).to_frame().reset_index()
barplot.columns = ['yearly_compensation', 'qty']
# mapping back to get top 20%
barplot.columns = ['x', 'y',]
barplot['highlight'] = barplot.x != '0-10,000'
# applying color
barplot['color'] = barplot.highlight.apply(lambda x: 'lightgray' if x else 'crimson')
# title and annotations
title_text = ['<b>Do Students Get Paid at All?</b><br><i>only students</i>', 'Yearly Compensation (USD)', 'Quantity of Respondents']
annotations = [{'x': 0.06, 'y': 1650, 'text': '75% of students earn up to USD 10k','color': 'crimson'}]
# ploting
generate_barplot(title_text, annotations)
# Finding the compensation that separates the Top 20% most welll paid from the Bottom 80% (without students)
top20flag_no_students = personal_data[not_student_mask].yearly_compensation_numerical.quantile(0.8)
top20flag_no_students
# Creating a flag for Top 20% when there are no students in the dataset
personal_data['top20_no_students'] = personal_data.yearly_compensation_numerical > top20flag_no_students
# creating data for future mapping of values
top20 = personal_data[not_student_mask].groupby('yearly_compensation', as_index=False)['top20_no_students'].min()
# Counting the quantity of respondents per compensation (where is not student)
barplot = personal_data[not_student_mask].yearly_compensation.value_counts(sort=False).to_frame().reset_index()
barplot.columns = ['yearly_compensation', 'qty']
# mapping back to get top 20%
barplot = barplot.merge(top20, on='yearly_compensation')
barplot.columns = ['x', 'y', 'top20']
barplot['color'] = barplot.top20.apply(lambda x: 'mediumaquamarine' if x else 'lightgray')
title_text = ['<b>How Much Does Kagglers Get Paid?</b><br><i>without students</i>', 'Yearly Compensation (USD)', 'Quantity of Respondents']
annotations = [{'x': 0.06, 'y': 1600, 'text': '80% of earn up to USD 100k','color': 'gray'},
{'x': 0.56, 'y': 800, 'text': '20% of earn more than USD 100k','color': 'mediumaquamarine'}]
generate_barplot(title_text, annotations)
# Creating a helper function to generate lineplot
def gen_lines(data, colorby):
"""
Generate the lineplot with data
"""
if colorby == 'top20':
colors = {False: 'lightgray',
True: 'mediumaquamarine'}
else:
colors = {False: 'lightgray',
True: 'deepskyblue'}
traces = []
for label, label_df in data.groupby(colorby):
traces.append(go.Scatter(
x=label_df.x,
y=label_df.y,
mode='lines+markers+text',
line={'color': colors[label], 'width':2},
connectgaps=True,
text=label_df.y.round(),
hoverinfo='none',
textposition='top center',
textfont=dict(size=12, color=colors[label]),
marker={'color': colors[label], 'size':8},
)
)
return traces
# Grouping data to get compensation per gender of Top20% and Bottom 80%
barplot = personal_data[not_student_mask].groupby(['gender', 'top20_no_students'], as_index=False)['yearly_compensation_numerical'].mean()
barplot = barplot[(barplot['gender'] == 'Female') | (barplot['gender'] == 'Male')]
barplot.columns = ['x', 'gender', 'y']
# Creates annotations
annot_dict = [{'x': 0.05, 'y': 180, 'text': 'The top 20% men are almost 12% better paid than the top 20% woman','color': 'deepskyblue'},
{'x': 0.05, 'y': 60, 'text': 'At the bottom 80% there is almost no difference in payment','color': 'gray'}]
# Creates layout
layout = gen_layout('<b>What is the gender difference in compensation at the top 20%?</b><br><i>without students</i>',
'Gender',
'Average Yearly Compensation (USD)',
120,
400,
gen_annotations(annot_dict)
)
# Make plot
fig = go.Figure(data=gen_lines(barplot, 'gender'),
layout=layout)
iplot(fig, filename='color-bar')
# Calculates compensation per education level
barplot = personal_data[not_student_mask].groupby(['education_level'], as_index=False)['yearly_compensation_numerical'].mean()
barplot['no_college'] = (barplot.education_level == 'No formal education past high school') | \
(barplot.education_level == 'Doctoral degree')
# creates a line break for better visualisation
barplot.education_level = barplot.education_level.str.replace('study without', 'study <br> without')
barplot.columns = ['y', 'x', 'no_college']
barplot = barplot.sort_values(by='x', ascending=True)
barplot['color'] = barplot.no_college.apply(lambda x: 'coral' if x else 'a')
# Add title and annotations
title_text = ['<b>Impact of Formal Education on Compenstaion</b><br><i>without students</i>', 'Average Yearly Compensation (USD)', 'Level of Education']
annotations = []
generate_barplot(title_text, annotations, orient='h', lmarg=300)
# Calculates compensation per industry
barplot = personal_data[not_student_mask].groupby(['employer_industry'], as_index=False)['yearly_compensation_numerical'].mean()
# Flags the top 5 industries to add color
barplot['best_industries'] = (barplot.employer_industry == 'Medical/Pharmaceutical') | \
(barplot.employer_industry == 'Insurance/Risk Assessment') | \
(barplot.employer_industry == 'Military/Security/Defense') | \
(barplot.employer_industry == 'Hospitality/Entertainment/Sports') | \
(barplot.employer_industry == 'Accounting/Finance')
barplot.columns = ['y', 'x', 'best_industries']
barplot = barplot.sort_values(by='x', ascending=True)
barplot['color'] = barplot.best_industries.apply(lambda x: 'darkgoldenrod' if x else 'a')
title_text = ['<b>Average Compensation per Industry | Top 5 in Color</b><br><i>without students</i>', 'Average Yearly Compensation (USD)', 'Industry']
annotations = []
generate_barplot(title_text, annotations, orient='h', lmarg=300, h=600)
# Calculates compensation per role
barplot = personal_data[not_student_mask].groupby(['role'], as_index=False)['yearly_compensation_numerical'].mean()
# Flags the top 5 roles to add color
barplot['role_highlight'] = (barplot.role == 'Data Scientist') | \
(barplot.role == 'Product/Project Manager') | \
(barplot.role == 'Consultant') | \
(barplot.role == 'Data Journalist') | \
(barplot.role == 'Manager') | \
(barplot.role == 'Principal Investigator') | \
(barplot.role == 'Chief Officer')
barplot.columns = ['y', 'x', 'role_highlight']
barplot = barplot.sort_values(by='x', ascending=True)
barplot['color'] = barplot.role_highlight.apply(lambda x: 'mediumvioletred' if x else 'lightgray')
title_text = ['<b>Average Compensation per Role | Top 7 in Color</b><br><i>without students</i>', 'Average Yearly Compensation (USD)', 'Job Title']
annotations = [{'x': 0.6, 'y': 11.5, 'text': 'The first step into the ladder<br>of better compensation is<br>becoming a Data Scientist','color': 'mediumvioletred'}]
generate_barplot(title_text, annotations, orient='h', lmarg=300, h=600)
# Replacing long country names
personal_data.country = personal_data.country.str.replace('United Kingdom of Great Britain and Northern Ireland', 'United Kingdom')
personal_data.country = personal_data.country.str.replace('United States of America', 'United States')
personal_data.country = personal_data.country.str.replace('I do not wish to disclose my location', 'Not Disclosed')
personal_data.country = personal_data.country.str.replace('Iran, Islamic Republic of...', 'Iran')
personal_data.country = personal_data.country.str.replace('Hong Kong \(S.A.R.\)', 'Hong Kong')
personal_data.country = personal_data.country.str.replace('Viet Nam', 'Vietnam')
personal_data.country = personal_data.country.str.replace('Republic of Korea', 'South Korea')
# Calculates compensation per country
barplot = personal_data[not_student_mask].groupby(['country'], as_index=False)['yearly_compensation_numerical'].mean()
# Flags the top 10 countries to add color
barplot['country_highlight'] = (barplot.country == 'United States') | \
(barplot.country == 'Switzerland') | \
(barplot.country == 'Australia') | \
(barplot.country == 'Israel') | \
(barplot.country == 'Denmark') | \
(barplot.country == 'Canada') | \
(barplot.country == 'Hong Kong') | \
(barplot.country == 'Norway') | \
(barplot.country == 'Ireland') | \
(barplot.country == 'United Kingdom')
barplot.columns = ['y', 'x', 'country_highlight']
barplot = barplot.sort_values(by='x', ascending=True)
barplot['color'] = barplot.country_highlight.apply(lambda x: 'mediumseagreen' if x else 'lightgray')
title_text = ['<b>Average Compensation per Country - Top 10 in Color</b><br><i>without students</i>', 'Average Yearly Compensation (USD)', 'Country']
annotations = []
generate_barplot(title_text, annotations, orient='h', lmarg=300, h=1200)
# Loading the cost of living
cost_living = pd.read_csv('../input/cost-of-living-per-country/cost_of_living.csv')
cost_living.columns = ['ranking', 'country', 'price_index']
cost_living.head()
# joining both tables
personal_data = personal_data.merge(cost_living, on='country') # doing an inner join to avoid nans on normalized compensation
# calculating the normalized compensation
personal_data['normalized_compensation'] = personal_data.yearly_compensation_numerical / personal_data.price_index * 10
personal_data['normalized_compensation'] = personal_data['normalized_compensation'].round() * 10
# recreating masks
is_student_mask = (personal_data['role'] == 'Student') | (personal_data['employer_industry'] == 'I am a student')
not_student_mask = (personal_data['role'] != 'Student') & (personal_data['employer_industry'] != 'I am a student')
# Calculates compensation per country
barplot = personal_data[not_student_mask].groupby(['country'], as_index=False)['normalized_compensation'].mean()
# Flags the top 10 countries to add color
barplot['country_highlight'] = (barplot.country == 'United States') | \
(barplot.country == 'Australia') | \
(barplot.country == 'Israel') | \
(barplot.country == 'Switzerland') | \
(barplot.country == 'Canada') | \
(barplot.country == 'Tunisia') | \
(barplot.country == 'Germany') | \
(barplot.country == 'Denmark') | \
(barplot.country == 'Colombia') | \
(barplot.country == 'South Korea')
barplot.columns = ['y', 'x', 'country_highlight']
barplot = barplot.sort_values(by='x', ascending=True)
barplot['color'] = barplot.country_highlight.apply(lambda x: 'mediumseagreen' if x else 'lightgray')
title_text = ['<b>Normalized Average Compensation per Country - Top 10 in Color</b><br><i>without students</i>',
'Normalized Average Yearly Compensation (USD)', 'Country']
annotations = []
generate_barplot(title_text, annotations, orient='h', lmarg=300, h=1200)
# Defining the threshold for top 20% most paid
top20_tresh = personal_data.normalized_compensation.quantile(0.8)
personal_data['top20'] = personal_data.normalized_compensation > top20_tresh
# creating data for future mapping of values
top20 = personal_data.groupby('normalized_compensation', as_index=False)['top20'].min()
# Calculates respondents per compensation
barplot = personal_data.normalized_compensation.value_counts(sort=False).to_frame().reset_index()
barplot.columns = ['normalized_compensation', 'qty']
# mapping back to get top 20% and 50%
barplot = barplot.merge(top20, on='normalized_compensation')
barplot.columns = ['x', 'y', 'top20']
barplot['color'] = barplot.top20.apply(lambda x: 'mediumaquamarine' if x else 'lightgray')
title_text = ['<b>How Much Does Kagglers Get Paid?<br></b><i>normalized by cost of living</i>', 'Normalized Yearly Compensation', 'Quantity of Respondents']
annotations = [{'x': 0.1, 'y': 1000, 'text': '20% Most well paid','color': 'mediumaquamarine'}]
generate_barplot(title_text, annotations)
# First we store all answers in a dict
answers = {'Q1': mcA.iloc[:,1],
'Q2': mcA.iloc[:,3],
'Q3': mcA.iloc[:,4],
'Q4': mcA.iloc[:,5],
'Q5': mcA.iloc[:,6],
'Q6': mcA.iloc[:,7],
'Q7': mcA.iloc[:,9],
'Q8': mcA.iloc[:,11],
'Q9': mcA.iloc[:,12],
'Q10': mcA.iloc[:,13],
'Q11': mcA.iloc[:,14:21],
'Q12': mcA.iloc[:,22],
'Q13': mcA.iloc[:,29:44],
'Q14': mcA.iloc[:,45:56],
'Q15': mcA.iloc[:,57:64],
'Q16': mcA.iloc[:,65:83],
'Q17': mcA.iloc[:,84],
'Q18': mcA.iloc[:,86],
'Q19': mcA.iloc[:,88:107],
'Q20': mcA.iloc[:,108],
'Q21': mcA.iloc[:,110:123],
'Q22': mcA.iloc[:,124],
'Q23': mcA.iloc[:,126],
'Q24': mcA.iloc[:,127],
'Q25': mcA.iloc[:,128],
'Q26': mcA.iloc[:,129],
'Q27': mcA.iloc[:,130:150],
'Q28': mcA.iloc[:,151:194],
'Q29': mcA.iloc[:,195:223],
'Q30': mcA.iloc[:,224:249],
'Q31': mcA.iloc[:,250:262],
'Q32': mcA.iloc[:,263],
'Q33': mcA.iloc[:,265:276],
'Q34': mcA.iloc[:, 277:283],
'Q35': mcA.iloc[:, 284:290],
'Q36': mcA.iloc[:,291:304],
'Q37': mcA.iloc[:,305],
'Q38': mcA.iloc[:,307:329],
'Q39': mcA.iloc[:,330:332],
'Q40': mcA.iloc[:,332],
'Q41': mcA.iloc[:,333:336],
'Q42': mcA.iloc[:,336:341],
'Q43': mcA.iloc[:,342],
'Q44': mcA.iloc[:,343:348],
'Q45': mcA.iloc[:,349:355],
'Q46': mcA.iloc[:,355],
'Q47': mcA.iloc[:,356:371],
'Q48': mcA.iloc[:,372],
'Q49': mcA.iloc[:,373:385],
'Q50': mcA.iloc[:,386:394]}
# Then we store all questions in another dict
questions = {
'Q1': 'What is your gender?',
'Q2': 'What is your age (# years)?',
'Q3': 'In which country do you currently reside?',
'Q4': 'What is the highest level of formal education that you have attained or plan to attain within the next 2 years?',
'Q5': 'Which best describes your undergraduate major?',
'Q6': 'Select the title most similar to your current role (or most recent title if retired)',
'Q7': 'In what industry is your current employer/contract (or your most recent employer if retired)?',
'Q8': 'How many years of experience do you have in your current role?',
'Q9': 'What is your current yearly compensation (approximate $USD)?',
'Q10': 'Does your current employer incorporate machine learning methods into their business?',
'Q11': 'Select any activities that make up an important part of your role at work',
'Q12': 'What is the primary tool that you use at work or school to analyze data?',
'Q13': 'Which of the following integrated development environments (IDEs) have you used at work or school in the last 5 years?',
'Q14': 'Which of the following hosted notebooks have you used at work or school in the last 5 years?',
'Q15': 'Which of the following cloud computing services have you used at work or school in the last 5 years?',
'Q16': 'What programming languages do you use on a regular basis?',
'Q17': 'What specific programming language do you use most often?',
'Q18': 'What programming language would you recommend an aspiring data scientist to learn first?',
'Q19': 'What machine learning frameworks have you used in the past 5 years?',
'Q20': 'Of the choices that you selected in the previous question, which ML library have you used the most?',
'Q21': 'What data visualization libraries or tools have you used in the past 5 years?',
'Q22': 'Of the choices that you selected in the previous question, which specific data visualization library or tool have you used the most?',
'Q23': 'Approximately what percent of your time at work or school is spent actively coding?',
'Q24': 'How long have you been writing code to analyze data?',
'Q25': 'For how many years have you used machine learning methods (at work or in school)?',
'Q26': 'Do you consider yourself to be a data scientist?',
'Q27': 'Which of the following cloud computing products have you used at work or school in the last 5 years?',
'Q28': 'Which of the following machine learning products have you used at work or school in the last 5 years?',
'Q29': 'Which of the following relational database products have you used at work or school in the last 5 years?',
'Q30': 'Which of the following big data and analytics products have you used at work or school in the last 5 years?',
'Q31': 'Which types of data do you currently interact with most often at work or school?',
'Q32': 'What is the type of data that you currently interact with most often at work or school? ',
'Q33': 'Where do you find public datasets?',
'Q34': 'During a typical data science project at work or school, approximately what proportion of your time is devoted to the following?',
'Q35': 'What percentage of your current machine learning/data science training falls under each category?',
'Q36': 'On which online platforms have you begun or completed data science courses?',
'Q37': 'On which online platform have you spent the most amount of time?',
'Q38': 'Who/what are your favorite media sources that report on data science topics?',
'Q39': 'How do you perceive the quality of online learning platforms and in-person bootcamps as compared to the quality of the education provided by traditional brick and mortar institutions?',
'Q40': 'Which better demonstrates expertise in data science: academic achievements or independent projects? ',
'Q41': 'How do you perceive the importance of the following topics?',
'Q42': 'What metrics do you or your organization use to determine whether or not your models were successful?',
'Q43': 'Approximately what percent of your data projects involved exploring unfair bias in the dataset and/or algorithm?',
'Q44': 'What do you find most difficult about ensuring that your algorithms are fair and unbiased? ',
'Q45': 'In what circumstances would you explore model insights and interpret your models predictions?',
'Q46': 'Approximately what percent of your data projects involve exploring model insights?',
'Q47': 'What methods do you prefer for explaining and/or interpreting decisions that are made by ML models?',
'Q48': 'Do you consider ML models to be "black boxes" with outputs that are difficult or impossible to explain?',
'Q49': 'What tools and methods do you use to make your work easy to reproduce?',
'Q50': 'What barriers prevent you from making your work even easier to reuse and reproduce?',
'top7_job_title': 'Select the title most similar to your current role (or most recent title if retired)',
'job_title_student': 'Select the title most similar to your current role (or most recent title if retired)',
'top10_country': 'In which country do you currently reside?',
'age': 'What is your age (# years)?',
'gender-Male': 'What is your gender?',
'top2_education_level': 'What is the highest level of formal education that you have attained or plan to attain within the next 2 years?',
'top5_industries': 'In what industry is your current employer/contract (or your most recent employer if retired)?',
'industry_student': 'In what industry is your current employer/contract (or your most recent employer if retired)?',
'years_experience': 'How many years of experience do you have in your current role?'}
def normalize_labels(full_label):
"""
treat labels for new column names
"""
try:
label = full_label.split('<>')[1] # split and get second item
except IndexError:
label = full_label.split('<>')[0] # split and get first item
return label
def treat_data(data, idx, tresh):
"""
Clean and get dumies for columns
"""
# get dummies with a distinct separator
result = pd.get_dummies(data, prefix_sep='<>', drop_first=False)
# gets and normalize dummies names
cols = [normalize_labels(str(x)) for x in result.columns]
# build columns labels with questions
try:
Qtext = mcQ['Q{}'.format(idx)]
except KeyError:
try:
Qtext = mcQ['Q{}_Part_1'.format(idx)]
except KeyError:
Qtext = mcQ['Q{}_MULTIPLE_CHOICE'.format(idx)]
# Build new columns names
prefix = 'Q{}-'.format(idx)
result.columns = [prefix + x for x in cols]
# dropping columns that had less than 10% of answers to avoid overfitting
percent_answer = result.sum() / result.shape[0]
for row in percent_answer.iteritems():
if row[1] < tresh:
result = result.drop(row[0], axis=1)
return result
# selecting the questions
selected_questions = [1, 2, 3, 4, 6, 7, 8, 10, 11, 15, 16, 17, 18, 19, 21, 23, 24, 25, 26, 29, 31, 36, 38, 40, 42, 47, 48, 49]
treated_data = {}
# Formatting all answers from the selected questions, dropping answers with less than 5%
for sq in selected_questions:
treated_data['Q{}'.format(sq)] = treat_data(answers['Q{}'.format(sq)], sq, 0.05)
# Done! Now we are able to rebuild a much cleaner dataset!
# Define target variable
compensation = mcA.Q9.str.replace(',', '').str.replace('500000\+', '500-500000').str.split('-')
mcA['yearly_compensation_numerical'] = compensation.apply(lambda x: (int(x[0]) * 1000 + int(x[1]))/ 2) / 1000 # it is calculated in thousand dollars
clean_dataset = (mcA.yearly_compensation_numerical > 100).reset_index().astype(int)
clean_dataset.columns = ['index', 'top20']
# Join with treated questions
for key, value in treated_data.items():
value = value.reset_index(drop=True)
clean_dataset = clean_dataset.join(value, how='left')
clean_dataset = clean_dataset.drop('index', axis=1)
# saving back to csv so others may use it
clean_dataset.to_csv('clean_dataset.csv')
clean_dataset.head()
shape = clean_dataset.shape
print('Our cleaned dataset has {} records and {} features'.format(shape[0], shape[1]))
# Create correlation matrix
correl = clean_dataset.corr().abs()
# Select upper triangle of correlation matrix
upper = correl.where(np.triu(np.ones(correl.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than 0.5
to_drop = [column for column in upper.columns if any(upper[column] > 0.5)]
# Drop features
clean_dataset_dropped = clean_dataset.drop(to_drop, axis=1)
shape = clean_dataset_dropped.shape
print('After dropping highly correlated features, our has {} records and {} features'.format(shape[0], shape[1]))
print('Dropped features: ', to_drop)
# Finding NANs
df = clean_dataset_dropped.isnull().sum().to_frame()
print('We found {} NaNs on the dataset after treatment'.format(df[df[0] > 0].shape[0]))
from sklearn.model_selection import train_test_split
train, test = train_test_split(clean_dataset_dropped, test_size=0.2, random_state=42)
print('Train Shape:', train.shape)
print('Test Shape:', test.shape)
# Separating X,y train and test sets
ytrain = train['top20'].copy()
Xtrain = train.drop(['top20'], axis=1).copy() # removing both target variables from features
ytest = test['top20'].copy()
Xtest = test.drop(['top20'], axis=1).copy() # removing both target variables from features
# Helper function to help evaluating the model
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
def display_scores(predictor, X, y):
"""
Calculates metrics and display it
"""
print('\n### -- ### -- ' + str(type(predictor)).split('.')[-1][:-2] + ' -- ### -- ###')
# Getting the predicted values
ypred = predictor.predict(X)
ypred_score = predictor.predict_proba(X)
# calculating metrics
accuracy = accuracy_score(y, ypred)
roc = roc_auc_score(y, pd.DataFrame(ypred_score)[1])
confusion = confusion_matrix(y, ypred)
print('Confusion Matrix: ', confusion)
print('Accuracy: ', accuracy)
print('AUC: ', roc)
type1_error = confusion[0][1] / confusion[0].sum() # False Positive - model predicted in top 20%, while it wasn't
type2_error = confusion[1][0] / confusion[1].sum() # False Negative - model predicted out of top 20%, while it was
print('Type 1 error: ', type1_error)
print('Type 2 error: ', type2_error)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
rforest = RandomForestClassifier(n_estimators=100, random_state=42)
lreg = LogisticRegression(solver='liblinear', random_state=42)
# Fit the models
rforest.fit(Xtrain, ytrain)
lreg.fit(Xtrain, ytrain)
# Check some metrics
display_scores(rforest, Xtrain, ytrain)
display_scores(lreg, Xtrain, ytrain)
from sklearn.model_selection import cross_val_score
def do_cv(predictor, X, y, cv):
"""
Executes cross validation and display scores
"""
print('### -- ### -- ' + str(type(predictor)).split('.')[-1][:-2] + ' -- ### -- ###')
cv_score = cross_val_score(predictor, X, y, scoring='roc_auc', cv=5)
print ('Mean AUC score after a 5-fold cross validation: ', cv_score.mean())
print ('AUC score of each fold: ', cv_score)
do_cv(rforest, Xtrain, ytrain, 5)
print('\n ----------------------------- \n')
do_cv(lreg, Xtrain, ytrain, 5)
from collections import Counter
from imblearn.under_sampling import RandomUnderSampler
print('Quantity of samples on each class BEFORE undersampling: ', sorted(Counter(ytrain).items()))
rus = RandomUnderSampler(random_state=42)
X_resampled, y_resampled = rus.fit_resample(Xtrain, ytrain)
print('Quantity of samples on each class AFTER undersampling: ', sorted(Counter(y_resampled).items()))
# refit the model
rforest.fit(X_resampled, y_resampled)
lreg.fit(X_resampled, y_resampled)
# do Cross Validation
do_cv(rforest, Xtrain, ytrain, 5)
display_scores(rforest, Xtrain, ytrain)
print('\n ----------------------------- \n')
do_cv(lreg, Xtrain, ytrain, 5)
display_scores(lreg, Xtrain, ytrain)
display_scores(lreg, Xtest, ytest)
# calculating scores
scores = pd.DataFrame(lreg.predict_proba(Xtest)).iloc[:,1]
scores = pd.DataFrame([scores.values, ytest.values]).transpose()
scores.columns = ['score', 'top20']
# Add histogram data
x0 = scores[scores['top20'] == 0]['score']
x1 = scores[scores['top20'] == 1]['score']
bottom80 = go.Histogram(
x=x0,
opacity=0.5,
marker={'color': 'lightgray'},
name='Bottom 80%'
)
top20 = go.Histogram(
x=x1,
opacity=0.5,
marker={'color': 'mediumaquamarine'},
name='Top 20%'
)
annot_dict = [{'x': 0.2, 'y': 180, 'text': 'The 80% less paid tend<br>to have lower scores','color': 'gray'},
{'x': 0.75, 'y': 95, 'text': 'Top 20% tend to have<br>higher scores','color': 'mediumaquamarine'}]
layout = gen_layout('<b>Distribution of Scores From the Top 20% and Bottom 80%</b><br><i>test data</i>',
'Probability Score',
'Quantity of Respondents',
annotations=gen_annotations(annot_dict),
lmarg=150, h=400
)
layout['barmode'] = 'overlay'
data = [bottom80, top20]
layout = go.Layout(layout)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
from sklearn.metrics import roc_curve
yscore = pd.DataFrame(lreg.predict_proba(Xtest)).iloc[:,1]
fpr, tpr, _ = roc_curve(ytest, yscore)
trace1 = go.Scatter(x=fpr, y=tpr,
mode='lines',
line=dict(color='mediumaquamarine', width=3),
name='ROC curve'
)
trace2 = go.Scatter(x=[0, 1], y=[0, 1],
mode='lines',
line=dict(color='lightgray', width=1, dash='dash'),
showlegend=False)
layout = gen_layout('<b>Receiver Operating Characteristic Curve</b><br><i>test data</i>',
'False Positive Rate',
'True Positive Rate',
lmarg=50, h=600
)
fig = go.Figure(data=[trace1, trace2], layout=layout)
iplot(fig)
def calc_proba(model):
# calculating scores for the test data
scores = pd.DataFrame(model.predict_proba(Xtest)).iloc[:,1]
scores = pd.DataFrame([scores.values, ytest.values]).transpose()
scores.columns = ['score', 'top20']
# create 10 evenly spaced bins
scores['bin'] = pd.cut(scores.score, [-0.01, 0.05, 0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.975, 1])
# count number of individuals in Top20% and Bottom80% per bin
prob = scores.groupby(['bin', 'top20'], as_index=False)['score'].count()
prob = pd.pivot_table(prob, values='score', index=['bin'], columns=['top20'])
# calculates the probability
prob['probability'] = prob[1.0] / (prob[0.0] + prob[1.0])
return prob['probability']
# Calculates the probabilities of belonging to Top20% per range of score based on test data
calc_proba(lreg).to_frame()
print('Our model\'s intercept is:', lreg.intercept_[0])
# treating the questions just to display better names
features = pd.DataFrame([Xtrain.columns, lreg.coef_[0]]).transpose()
features.columns = ['feature', 'coefficient']
features['abs_coefficient'] = features['coefficient'].abs()
features['question_number'] = features.feature.str.split('-').str[0]
features['answer'] = features.feature.str[3:]
features['answer'] = features.answer.apply(lambda x: x[1:] if x[0] == '-' else x)
features['question'] = features['question_number'].map(questions)
answers_dict = {'age': 'continuous feature',
'top10_country': 'live at one of the top 10 countries',
'top7_job_title': 'has one of the top 7 job titles',
}
features['question'] = features['question_number'].map(questions)
features = features[['question_number', 'question', 'answer', 'coefficient', 'abs_coefficient']]
# Helper functions for building clean plots
def gen_yaxis(title):
"""
Create y axis
"""
yaxis=dict(
title=title,
titlefont=dict(
color='#AAAAAA'
),
showgrid=False,
color='#AAAAAA',
tickfont=dict(
size=12,
color='#444444'
),
)
return yaxis
def gen_layout(charttitle, xtitle, ytitle, annotations=None, lmarg=120, h=400):
"""
Create layout
"""
return go.Layout(title=charttitle,
height=h,
width=800,
showlegend=False,
xaxis=gen_xaxis(xtitle),
yaxis=gen_yaxis(ytitle),
annotations = annotations,
margin=dict(l=lmarg),
)
def split_string(string, lenght):
"""
Split a string adding a line break at each "lenght" words
"""
result = ''
idx = 1
for word in string.split(' '):
if idx % lenght == 0:
result = result + '<br>' + ''.join(word)
else:
result = result + ' ' + ''.join(word)
idx += 1
return result
def gen_bars_result(data, color, orient):
"""
Create bars
"""
bars = []
for label, label_df in data.groupby(color):
if orient == 'h':
label_df = label_df.sort_values(by='x', ascending=True)
if label == 'a':
label = 'lightgray'
bars.append(go.Bar(x=label_df.x,
y=label_df.y,
name=label,
marker={'color': label},
orientation = orient,
text=label_df.x.astype(float).round(3),
hoverinfo='none',
textposition='auto',
textfont=dict(size=12, color= '#444444')
)
)
return bars
def plot_result (qnumber):
"""
Plot coefficients for a given question number
"""
data = features[features.question_number == qnumber]
title = qnumber + '. ' + data.question.values[0]
title = split_string(title, 8)
barplot = data[['answer', 'coefficient']].copy()
barplot.answer = barplot.answer.apply(lambda x: split_string(x, 5))
barplot.columns = ['y', 'x']
bartplot = barplot.sort_values(by='x', ascending=False)
barplot['model_highlight'] = barplot.x > 0
barplot['color'] = barplot.model_highlight.apply(lambda x: 'cornflowerblue' if x else 'a')
layout = gen_layout('<b>{}</b>'.format(title),
'Model Coefficient',
'',
lmarg=300,
h= 600)
fig = go.Figure(data=gen_bars_result(barplot, 'color', orient='h'),
layout=layout)
iplot(fig, filename='color-bar')
plot_result('Q1')
plot_result('Q2')
plot_result('Q3')
plot_result('Q4')
plot_result('Q6')
plot_result('Q7')
plot_result('Q8')
plot_result('Q10')
plot_result('Q11')
plot_result('Q15')
plot_result('Q16')
plot_result('Q17')
plot_result('Q18')
plot_result('Q19')
plot_result('Q21')
plot_result('Q23')
plot_result('Q24')
plot_result('Q26')
plot_result('Q29')
plot_result('Q31')
plot_result('Q36')
plot_result('Q38')
plot_result('Q40')
plot_result('Q42')
plot_result('Q47')
plot_result('Q48')
plot_result('Q49')
### Training the model again with fewer questions
# Selecting just the questions we are putting in production
selected_questions = [1, 2, 3, 4, 6, 7, 8, 10, 11, 15, 16, 23, 31, 42]
treated_data = {}
# Let's select answers that had more than 5% of answers
for sq in selected_questions:
treated_data['Q{}'.format(sq)] = treat_data(answers['Q{}'.format(sq)], sq, 0.05)
# Done! Now we are able to rebuild a much cleaner dataset!
# Define target variable
compensation = mcA.Q9.str.replace(',', '').str.replace('500000\+', '500-500000').str.split('-')
mcA['yearly_compensation_numerical'] = compensation.apply(lambda x: (int(x[0]) * 1000 + int(x[1]))/ 2) / 1000 # it is calculated in thousand dollars
clean_dataset = (mcA.yearly_compensation_numerical > 100).reset_index().astype(int)
clean_dataset.columns = ['index', 'top20']
# Join wit treated questions
for key, value in treated_data.items():
value = value.reset_index(drop=True)
clean_dataset = clean_dataset.join(value, how='left')
clean_dataset = clean_dataset.drop('index', axis=1)
# saving back to csv so others may use it
clean_dataset.to_csv('production_clean_dataset.csv')
# Create correlation matrix
correl = clean_dataset.corr().abs()
# Select upper triangle of correlation matrix
upper = correl.where(np.triu(np.ones(correl.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than 0.5
to_drop = [column for column in upper.columns if any(upper[column] > 0.5)]
# Drop features
clean_dataset_dropped = clean_dataset.drop(to_drop, axis=1)
# splitting train and test data
train, test = train_test_split(clean_dataset_dropped, test_size=0.2, random_state=42)
ytrain = train['top20'].copy()
Xtrain = train.drop(['top20'], axis=1).copy() # removing both target variables from features
ytest = test['top20'].copy()
Xtest = test.drop(['top20'], axis=1).copy() # removing both target variables from features
# undersampling
X_resampled, y_resampled = rus.fit_resample(Xtrain, ytrain)
# fitting the model
lreg = LogisticRegression(solver='liblinear', random_state=42)
lreg.fit(X_resampled, y_resampled)
# validating on test data
display_scores(lreg, Xtest, ytest)
# Calculates the probabilities of belonging to Top20% per range of score based on test data
calc_proba(lreg).to_frame()
input_json = {
"Q1": "q1_other",
"Q2": "q2_25_29",
"Q3": "q3_united_",
"Q4": "q4_other",
"Q6": "q6_student",
"Q7": "q7_other2",
"Q8": "q8_2_3",
"Q10": "q10_we_rec",
"q11_analyz": "on",
"q11_run_a_": "on",
"q11_build_": "on",
"q15_amazon": "on",
"other": "on",
"q16_python": "on",
"q16_sql": "on",
"Q23": "q23_25_to_",
"q31_catego": "on",
"q31_geospa": "on",
"q31_numeri": "on",
"q31_tabula": "on",
"q31_text_d": "on",
"q31_time_s": "on",
"q42_revenu": "on"
}
import re
# treating the questions to match the input json
features = pd.DataFrame([Xtrain.columns, lreg.coef_[0]]).transpose()
features.columns = ['feature', 'coefficient']
features['answer'] = features.feature
features['answer'] = features['answer'].apply(lambda x: re.sub(r"[^a-zA-Z0-9]+", ' ', x))
features['answer'] = features['answer'].str.replace(' ', '_')
features['answer'] = features['answer'].str.lower()
features['answer'] = features['answer'].str.replace('_build_and_or_', '_')
features['answer'] = features['answer'].str.replace('_metrics_that_consider_', '_')
features['answer'] = features['answer'].str[:10]
features['question_number'] = features['answer'].str.split('_').str[0]
features = features[['question_number', 'answer', 'coefficient']]
features.head(3)
# treating the input json to keep it in the same format as the coeficcients
def treat_input(input_json):
treated = dict()
for key, value in input_json.items():
if key[0] == 'Q':
treated[value] = 1
else:
treated[key] = 1
return treated
treated_input_json = treat_input(input_json)
print('First 8 elements of the treated input:', dict(list(treated_input_json.items())[0:8]))
features['positive'] = features['answer'].map(treated_input_json)
features.fillna(0, inplace=True)
features['points'] = features.positive * features.coefficient
features.head(5)
from math import exp
# Creating a function to normalize the scores between 0 and 1000
def normalize(points):
"""
Normalize to get values between 0 and 1000
"""
return int(1 / (1 + exp(-points)) * 1000)
# suming all points + intercept then normalizing between 0 and 1
score = features['points'].sum() + lreg.intercept_[0]
print('Calculated score is:', normalize(score))
import requests
import json
input_json = {
"Q1": "q1_other",
"Q2": "q2_25_29",
"Q3": "q3_united_",
"Q4": "q4_other",
"Q6": "q6_student",
"Q7": "q7_other2",
"Q8": "q8_2_3",
"Q10": "q10_we_rec",
"q11_analyz": "on",
"q11_run_a_": "on",
"q11_build_": "on",
"q15_amazon": "on",
"other": "on",
"q16_python": "on",
"q16_sql": "on",
"Q23": "q23_25_to_",
"q31_catego": "on",
"q31_geospa": "on",
"q31_numeri": "on",
"q31_tabula": "on",
"q31_text_d": "on",
"q31_time_s": "on",
"q42_revenu": "on"
}
treated_input_json = treat_input(input_json)
header = {'Content-Type': 'application/x-www-form-urlencoded'}
url = 'https://tk9k0fkvyj.execute-api.us-east-2.amazonaws.com/default/top20-predictor'
requests.post(url, params=treated_input_json, headers=header).json()
# Making a get to our API. It triggers a lambda function that counts the number of objects inside our bucket.
url = 'https://wucg3iz2r4.execute-api.us-east-2.amazonaws.com/default/count-kaggle-top20-objects'
requests.get(url).json()
|
UTF-8
|
Python
| false | false | 48,501 |
py
| 2,600 |
what-makes-a-kaggler-valuable.py
| 860 | 0.639344 | 0.606434 | 0 | 1,104 | 42.92663 | 193 |
MatthewSteen/Toolbox
| 9,964,324,135,621 |
37ae14f7b7399d369de09215ed3ca1c5fa492050
|
7c4f4568f2322c42d202f8e68e0e92692b4f9fee
|
/keeplines.py
|
3f74d60dc66ca4e3c3350b98d1144febafe7e09d
|
[] |
no_license
|
https://github.com/MatthewSteen/Toolbox
|
4048520c7c867489e04de5661adce9f7d5c42017
|
c3a34c1f44aeeb729c9b26f52b0a5f24f81e542c
|
refs/heads/master
| 2020-04-09T23:55:13.825326 | 2015-08-11T14:51:19 | 2015-08-11T14:51:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
# In[1]:
import os
# In[7]:
'''
cwd = os.getcwd()
file_name = "14095_Kilroy_T24_v3.log"
string_list = ["Warning:"]
suffix = "_errors"
ext = ".log"
'''
def remove_lines(string, read_file, read_file_suffix, read_file_ext):
# Assign file paths
cwd = os.getcwd()
read_path = os.path.join(cwd, read_file)
write_file = read_file + read_file_suffix + read_file_ext
# Check if read file exists
#TODO
# Check if write file exists and delete
if os.path.exists(write_file):
os.remove(write_file)
# Read write
with open(read_path) as oldfile, open(write_file, 'w') as newfile:
#print os.path.join(os.getcwd(), file_name)
#print string_list
for line in oldfile:
if string in line:
newfile.write(line)
'''
if not any(string in line for string in string_list):
newfile.write(line)
'''
'''#TODO open file
import subprocess as sp
programName = "notepad.exe"
fileName = newfile
sp.Popen([programName, fileName])
'''
if __name__ == "__main__":
import sys
#arg1 = str(sys.argv[1]) #first command line argument
#arg2 = str(sys.argv[2])
remove_lines(str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4]))
# In[ ]:
|
UTF-8
|
Python
| false | false | 1,339 |
py
| 2 |
keeplines.py
| 1 | 0.577296 | 0.563107 | 0 | 57 | 22.473684 | 88 |
1e16miin/backjoon
| 5,677,946,793,454 |
89cd983bf1ae2290b80951f214de52b0523a515e
|
d51ad1d67502e651c8d7aaba7f262109c4d188a9
|
/1222.py
|
d76ef07e305d69a73d62e0a98999e228a99aab00
|
[] |
no_license
|
https://github.com/1e16miin/backjoon
|
94fd8ff69d100a58a036788db21d665e3143edc7
|
78f6cdf4bd19b70371415cfcc468c7331c44e669
|
refs/heads/master
| 2023-06-12T14:11:49.764491 | 2021-06-22T04:08:20 | 2021-06-22T04:08:20 | 379,137,562 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
for test_case in range(1,11):
N = int(input())
numbers = list(map(int, input().split("+")))
print("#" + str(test_case) + " " + str(sum(numbers)))
|
UTF-8
|
Python
| false | false | 157 |
py
| 42 |
1222.py
| 42 | 0.547771 | 0.528662 | 0 | 4 | 38.5 | 57 |
adrianrojek/Bakalarka
| 8,392,366,128,478 |
a1dc30aaddb25dfa0cdb86d46561a9ff613bea3f
|
965a750cb28ac1a48c4ee5a7f1d5a0bc6aa2c722
|
/AttributeClasses/AnonymizaciaIpAdresy.py
|
695eb74c4f60d0177fa030d360408331a86108c1
|
[] |
no_license
|
https://github.com/adrianrojek/Bakalarka
|
11d2f187b73aa68c27ec227b0748c6386a523ebf
|
a2987bf16b75a0af00fa5be1a3718018153b43da
|
refs/heads/master
| 2023-05-04T06:51:50.279247 | 2021-05-16T23:28:29 | 2021-05-16T23:28:29 | 344,475,700 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
from AttributeClasses.AttributeExtractor import AttributeExtractor
class anonymizacia_ip_adresy(AttributeExtractor):
def __init__(self):
pass
def extrahuj_txt(self, Dokument):
array=Dokument.naSlova()
i = 0
while i < len(array):
if array[i] == "IP":
if "XX" in array[i+2]:
return 1
if (char.isdigit() for char in array[i+2]):
return 0
i += 1
def extrahuj_json(self, Dokument):
return
|
UTF-8
|
Python
| false | false | 535 |
py
| 444 |
AnonymizaciaIpAdresy.py
| 24 | 0.543925 | 0.53271 | 0 | 22 | 23.272727 | 66 |
jztang/scalica-access-control
| 14,860,586,863,394 |
f8d2ef2054b50ab16a1bdb283f100fe37d8ac065
|
f8f22938533f04569d0256e7fa6304963be6d7b7
|
/groupDatabase-Django/accessControl/groupDB_server.py
|
15b3c513c281f4dbd00d71e6d11e79ce08f4a8b0
|
[] |
no_license
|
https://github.com/jztang/scalica-access-control
|
09061719058eda506197b44aefbd50fc400672c8
|
dbb6aeb1d6ea6c949e8b219c850ec708a55993a9
|
refs/heads/master
| 2021-06-23T11:59:02.350561 | 2019-12-16T01:28:52 | 2019-12-16T01:28:52 | 221,736,715 | 1 | 0 | null | false | 2021-06-10T22:15:37 | 2019-11-14T16:06:47 | 2019-12-16T01:29:11 | 2021-06-10T22:15:35 | 22,125 | 1 | 0 | 2 |
Python
| false | false |
import copy
import grpc
import logging
import os
import django
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "accessControl.settings")
#from django.core.management import execute_from_command_line
django.setup()
from groupDatabase.models import user, group
from concurrent import futures
import groupDB_pb2
import groupDB_pb2_grpc
import groups_pb2
import groups_pb2_grpc
channel = grpc.insecure_channel("localhost:50051")
stub = groups_pb2_grpc.Groups_ManagerStub(channel)
class database(groupDB_pb2_grpc.databaseServicer):
def addGroup(self, request, context):
currentUserId = request.userId
currentGroupName = request.groupName
#lookup
try:
currentUser = user.objects.get(userNumber = currentUserId)
filterSet = group.objects.filter(user=currentUser)
for i in filterSet:
if i.groupName == currentGroupName:
return groupDB_pb2.addGroupReply(success = False)
except user.DoesNotExist:
currentUser = user(userNumber = currentUserId)
currentUser.save()
currentGroup = group(groupName = currentGroupName, user = currentUser)
currentGroup.save()
return groupDB_pb2.addGroupReply(success = True)
def deleteGroup(self, request, context):
currentUserId = request.userId
currentGroupName = request.groupName
print(currentGroupName)
print(currentUserId)
#lookup
try:
currentUser = user.objects.get(userNumber = currentUserId)
filterSet = group.objects.filter(user=currentUser)
except user.DoesNotExist:
return groupDB_pb2.deleteGroupReply(success = False)
print("hi")
for i in filterSet:
if i.groupName == currentGroupName:
#with grpc.insecure_channel('localhost:50051') as channel:
#stub = groups_pb2_grpc.Groups_ManagerStub(channel)
#stub.DeleteGroup(groups_pb2.DeleteGroupRequest(group_id = str(i.id)))
i.delete()
print("wsa able to delete")
return groupDB_pb2.deleteGroupReply(success = True)
return groupDB_pb2.deleteGroupReply(success = False)
def getGroupId(self, request, context):
currentUserId = request.userId
currentGroupName = request.groupName
print("current group name "+request.groupName)
try:
currentUser = user.objects.get(userNumber = currentUserId)
filterSet = group.objects.filter(user=currentUser)
#currentGroup = group.objects.get(user = currentUser, groupName = currentGroupName)
except user.DoesNotExist:
print("user dne")
return groupDB_pb2.getGroupReply(groupId = 0)
#print(user.objects.all())
print(filterSet)
for i in filterSet:
if i.groupName == currentGroupName:
returnID = i.id
return groupDB_pb2.getGroupReply(groupId = returnID)
print("end")
return groupDB_pb2.getGroupReply(groupId = 0)
def removeAll(self, request, context):
user.objects.all().delete()
group.objects.all().delete()
return groupDB_pb2.removeAllReply(success = True)
def getGroupNames(self, request, context):
currentUserId = request.userId
currentUser = user.objects.get(userNumber = currentUserId)
filterSet = group.objects.filter(user=currentUser)
listOfGroupNames = ""
for i in filterSet:
listOfGroupNames = listOfGroupNames + str(i.groupName) + ","
listOfGroupNames = listOfGroupNames[0: len(listOfGroupNames) - 1]
return groupDB_pb2.getGroupNamesReply(groupNames = listOfGroupNames)
groupIdCounter = 0
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
groupDB_pb2_grpc.add_databaseServicer_to_server(database(), server)
server.add_insecure_port('[::]:50052')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
serve()
|
UTF-8
|
Python
| false | false | 3,732 |
py
| 21 |
groupDB_server.py
| 16 | 0.748124 | 0.737138 | 0 | 131 | 27.48855 | 86 |
markillob/python_basics
| 10,883,447,151,435 |
d239fa7478813bf527216b14d3f2d1da104bab0c
|
75022fcc62508ebe5f9e075b4ad6c4826e145b97
|
/basics/strings_search.py
|
2c765d258a7a30fbf46c9d81e0694430a76bc63e
|
[] |
no_license
|
https://github.com/markillob/python_basics
|
5304a8f4733c94ba389ee333f691a6def4aade31
|
c285816e9432566337f832daa1493ff69df6c4f5
|
refs/heads/master
| 2021-07-01T14:02:22.575904 | 2021-03-20T13:07:28 | 2021-03-20T13:07:28 | 222,870,357 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
def get_longest_substring ( full_string : str):
counter = 0
counter_temp = 0
anchor_str_temp = ""
i = 0
anchor_str = ""
if full_string == "" :
return print("empty")
while i < (len(full_string)-1): #7
if full_string[i+1] != full_string[i] and full_string[i] not in anchor_str:
anchor_str = anchor_str + (full_string[i])
counter +=1
elif i > 1 and i < (len(full_string)-2) and full_string[i] not in anchor_str:
anchor_str_temp = anchor_str #ab
anchor_str = ""
counter_temp = counter # 2
counter = 0
i -=1
elif i == 1 and full_string[i + 1] == full_string[i]:
anchor_str = full_string[i]
i +=1
if full_string[i] not in anchor_str:
anchor_str = anchor_str + (full_string[i])
counter +=1
elif full_string[i] not in anchor_str_temp:
anchor_str_temp = anchor_str_temp + full_string[i]
counter_temp +=1
if counter_temp > counter:
print( anchor_str_temp,counter_temp)
else:
print(anchor_str,counter)
return
def get_longest_substring_one ( full_string : str):
counter = 0
counter_temp = 0
anchor_str_temp = ""
i = 0
anchor_str = ""
while i < (len(full_string)-1): #7
if full_string[i+1] > len(str):
if full_string[i] not in anchor_str:
anchor_str = anchor_str + (full_string[i])
counter +=1
elif full_string[i] not in anchor_str_temp:
anchor_str_temp = anchor_str_temp + full_string[i]
counter_temp +=1
if fuull_string[i+1] != full_string[i] and full_string[i] not in anchor_str:
anchor_str = anchor_str + (full_string[i])
counter +=1
elif i > 1 and i < (len(full_string)-2) and full_string[i] not in anchor_str:
anchor_str_temp = anchor_str #ab
anchor_str = ""
counter_temp = counter # 2
counter = 0
i -=1
elif i == 1 and full_string[i + 1] == full_string[i]:
anchor_str = full_string[i]
i +=1
if counter_temp > counter:
print( anchor_str_temp,counter_temp)
else:
print(anchor_str,counter)
return
def get_list_sum_numbers ( full_list : list):
return list_temporar
def main ():
get_longest_substring("dbdadb")
if __name__ =="__main__":
main()
|
UTF-8
|
Python
| false | false | 2,493 |
py
| 22 |
strings_search.py
| 19 | 0.532692 | 0.518652 | 0 | 78 | 30.961538 | 85 |
fabregas/nimbusfs-client
| 14,448,270,028,443 |
b2e188eda8e20610b242fcc18bbceab687d8bfec
|
67696fc94000dede419d0943cf6a98722addc641
|
/id_client/webdav_mounter.py
|
eb51847f2ac10269c97b6506c35cb35fb8821757
|
[] |
no_license
|
https://github.com/fabregas/nimbusfs-client
|
35ffca5efc421c44ffa57e1f8e03181d385cd3dc
|
2be2db7f6dce6f47407ba5537b64d56efa7f51de
|
refs/heads/master
| 2020-05-18T10:17:49.926114 | 2013-07-10T15:17:34 | 2013-07-10T15:17:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
Copyright (C) 2013 Konstantin Andrusenko
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
@package id_client.webdav_mounter
@author Konstantin Andrusenko
@date May 08, 2013
"""
import os
import sys
import string
from id_client.utils import Subprocess, logger
LINUX_MOUNTER_BIN = os.path.abspath(os.path.join(os.path.dirname(__file__), '../bin/webdav_mount'))
#-------- for win32 ----------
ALL_DRIVES_LIST = list(string.ascii_uppercase)
ALL_DRIVES_LIST.reverse() #for win32
#-----------------------------
OS_MAC = 'mac'
OS_LINUX = 'linux'
OS_WINDOWS = 'windows'
OS_UNKNOWN = 'unknown'
class WebdavMounter:
def __init__(self, nofork=False):
system = sys.platform
if system.startswith('linux'):
self.cur_os = OS_LINUX
elif system == 'darwin':
self.cur_os = OS_MAC
elif system == 'win32':
self.cur_os = OS_WINDOWS
else:
self.cur_os = OS_UNKNOWN
self.nofork = nofork
self.__mountpoint = ''
def get_mount_point(self):
return self.__mountpoint
def __run_linux_mounter(self, cmd):
proc = Subprocess('%s %s'%(LINUX_MOUNTER_BIN, cmd))
cout, cerr = proc.communicate()
if proc.returncode:
logger.error('webdav mounter error: %s %s'%(cout, cerr))
return proc.returncode
def mount(self, host, port):
if self.cur_os == OS_MAC:
return self.mount_mac(host, port)
elif self.cur_os == OS_LINUX:
try:
if not self.nofork:
return self.__run_linux_mounter('mount')
return self.mount_linux(host, port)
finally:
self.update_linux_mountpoint('%s:%s'%(host, port))
elif self.cur_os == OS_WINDOWS:
self.mount_windows(host, port)
def unmount(self):
try:
if self.cur_os == OS_LINUX:
if not self.nofork:
return self.__run_linux_mounter('umount')
if self.cur_os in (OS_MAC, OS_LINUX):
self.unmount_unix(self.get_mount_point())
elif self.cur_os == OS_WINDOWS:
self.umount_windows()
finally:
self.__mountpoint = ''
def update_linux_mountpoint(self, url):
p = Subprocess('df')
out, err = p.communicate()
for line in out.splitlines():
if url in line:
self.__mountpoint = line.split()[-1]
return
def mount_linux(self, bind_host, bind_port):
mount_point = '/media/iDepositBox'
if os.path.exists(mount_point):
self.unmount_unix(mount_point)
else:
os.makedirs(mount_point)
p = Subprocess('mount -t davfs -o rw,user,dir_mode=0777 http://%s:%s/ %s'\
% (bind_host, bind_port, mount_point), with_input=True)
out, err = p.communicate('anonymous\nanonymous')
if p.returncode:
sys.stderr.write('%s\n'%err)
return p.returncode
def mount_mac(self, bind_host, bind_port):
self.__mountpoint = mount_point = '/Volumes/iDepositBox'
if os.path.exists(mount_point):
os.system('umount %s'%mount_point)
else:
os.mkdir(mount_point)
if bind_host == '127.0.0.1':
bind_host = 'localhost'
return os.system('mount_webdav -v iDepositBox http://%s:%s/ %s'\
% (bind_host, bind_port, mount_point))
def __get_win_unused_drive(self):
import win32api
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')
a_drives = []
for s in drives:
s = s.strip()
if s: a_drives.append(s[0])
for drive in ALL_DRIVES_LIST:
if drive in a_drives:
continue
return '%s:'%drive
def mount_windows(self, host, port):
self.umount_windows()
drive = self.__get_win_unused_drive()
self.__mountpoint = 'drive %s'%drive
p = Subprocess(['sc', 'create', 'iDepositBoxMount', 'binPath=', 'cmd /b /c net use %s http://%s:%s/'%\
(drive, host, port), 'type=', 'share'])
p = Subprocess(['sc', 'create', 'iDepositBoxUnmount', 'binPath=', 'cmd /b /c net use /delete %s /Y'%\
drive, 'type=', 'share'])
out, err = p.communicate()
logger.debug('sc create iDepositBoxUnmount: [%s] %s %s'%(p.returncode, out, err))
p = Subprocess('net start iDepositBoxMount')
p.communicate()
return 0
def umount_windows(self):
p = Subprocess('sc query iDepositBoxUnmount')
out, err = p.communicate()
if p.returncode:
logger.debug('no iDepositBoxUnmount service found...')
return
p = Subprocess('net start iDepositBoxUnmount')
p.communicate()
p = Subprocess('sc delete iDepositBoxMount')
out, err = p.communicate()
logger.debug('sc delete iDepositBoxMount: %s %s'%(out, err))
p = Subprocess('sc delete iDepositBoxUnmount')
out, err = p.communicate()
logger.debug('sc delete iDepositBoxUnmount: %s %s'%(out, err))
def unmount_unix(self, mount_point):
if os.path.exists(mount_point):
p = Subprocess('umount %s'%mount_point)
out, err = p.communicate()
if p.returncode:
logger.debug('"umount %s" output: %s %s'%(mount_point, out, err))
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write('usage: webdav_mount mount|umount\n')
sys.exit(1)
from id_client.config import Config
wdm = WebdavMounter(nofork=True)
config = Config()
cmd = sys.argv[1]
if cmd == 'mount':
err = ''
try:
ret_code = wdm.mount('127.0.0.1', config.webdav_bind_port)
except Exception, err:
ret_code = 1
if ret_code:
sys.stderr.write('Webdav does not mounted locally! %s\n'%err)
sys.exit(1)
elif cmd == 'umount':
wdm.update_linux_mountpoint( '127.0.0.1:%s'%config.webdav_bind_port)
wdm.unmount()
else:
sys.stderr.write('unknown command "%s"!\n'%cmd)
sys.exit(1)
sys.stdout.write('ok\n')
sys.exit(0)
|
UTF-8
|
Python
| false | false | 6,426 |
py
| 66 |
webdav_mounter.py
| 63 | 0.552132 | 0.543573 | 0 | 199 | 31.286432 | 110 |
du-debug/tornado_SDK
| 15,899,968,937,486 |
6007af90ee28a9762e8b45ac15c6be3999bb20ed
|
c656411d42db388c805c14e3b46dd402d2048d16
|
/tornado_SDK/common/notify_url.py
|
5194bd76a1d373c789f7475ab58e9ce8dd1c4f4a
|
[] |
no_license
|
https://github.com/du-debug/tornado_SDK
|
ebff9606c5f9fb0e88430966b71e28ac1009d29b
|
8b78411413aae01e7ade0eec36f37746d0e54cd4
|
refs/heads/master
| 2020-09-11T00:14:34.456824 | 2019-11-27T05:28:55 | 2019-11-27T05:28:55 | 221,876,675 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
单线程的去维护 notify_url
"""
from utils.cache import CacheMixin
class NotifyUrl(CacheMixin):
is_instance = None
def __new__(cls, *args, **kwargs):
if not cls.is_instance:
cls.is_instance = super(NotifyUrl, cls).__new__(cls, *args, **kwargs)
return cls.is_instance
@classmethod
def instance(cls):
if cls.is_instance is None:
NotifyUrl()
return cls.is_instance
if __name__ == "__main__":
test01 = NotifyUrl()
test02 = NotifyUrl()
print(id(test01))
print(id(test02))
|
UTF-8
|
Python
| false | false | 572 |
py
| 30 |
notify_url.py
| 30 | 0.584229 | 0.569892 | 0 | 27 | 19.62963 | 81 |
marcelocra/generic-code
| 4,054,449,170,878 |
5dfb6482ad03140f63a064a6245273442223e387
|
89d8a87f6f9eca558f874e7f9f9bc294f9b7b486
|
/sort_quicksort.py
|
7621c88805fa5e56cae805ddeec326e71a485ce2
|
[] |
no_license
|
https://github.com/marcelocra/generic-code
|
18cc8bfa44da0f856783be2f4f2c69104d03dbc9
|
9844a228f2a91e50625cdf72c49d23f8bcdaf06f
|
refs/heads/master
| 2016-09-06T14:32:59.000196 | 2013-02-28T04:08:22 | 2013-02-28T04:08:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import random
import time
def quicksort(array):
if len(array) <= 1:
return array
pivot = array[len(array)/2]
array.remove(pivot)
smaller = []
bigger = []
for x in array:
if x <= pivot:
smaller.append(x)
else:
bigger.append(x)
return quicksort(smaller) + [pivot] + quicksort(bigger)
def calculate_time(number):
my_list = []
for i in range(number):
my_list.append(random.random())
inicio = time.time()
quicksort(my_list)
return time.time() - inicio
my_file = open('sort_quicksort_result.txt', 'r+')
try:
my_res = my_file.read()
my_result = json.loads(my_res)
except:
my_result = {}
for i in range(10000, 100000, 10000):
if i not in my_result.keys():
my_result[i] = [calculate_time(i)]
print 'if'
else:
my_result[i].append(calculate_time(i))
print 'else'
my_file.write(json.dumps(my_result))
my_file.close()
|
UTF-8
|
Python
| false | false | 980 |
py
| 15 |
sort_quicksort.py
| 13 | 0.585714 | 0.567347 | 0 | 45 | 20.777778 | 59 |
kool7/Data_Structures_And_Algorithms_nd256
| 13,984,413,527,640 |
095c551b0e3be8fcd4bc473eeb1aaa11641d0e38
|
fa264b31b1fdc13dc0532f99a86b8dd7615b0116
|
/interview_Cake/Array and strings/merging_meeting_times.py
|
498f9c30c4348e6a0639c9aec68b48039c057065
|
[] |
no_license
|
https://github.com/kool7/Data_Structures_And_Algorithms_nd256
|
7e718a1a8ae81f4d08c245a9a6a1bccf8c562fd3
|
24d37d011a3914b667a97efedd5f0078bdce9913
|
refs/heads/master
| 2021-02-18T01:48:51.462575 | 2021-01-16T11:03:51 | 2021-01-16T11:03:51 | 245,146,425 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
data = [(0, 1), (3, 5), (4, 8), (10, 12), (9, 10)]
def merge_range(time:list):
'''
Arguments
time -- list of meeting time ranges
'''
output = []
for item in sorted(data, key=lambda x: x[0]):
if output and item[0] <= output[-1][1]:
output[-1][1] = max(item[1], output[-1][1])
else:
output.append(item)
return output
merge_range(data)
|
UTF-8
|
Python
| false | false | 406 |
py
| 49 |
merging_meeting_times.py
| 48 | 0.504926 | 0.450739 | 0 | 19 | 20.368421 | 55 |
tinajer/personal
| 3,272,765,080,214 |
878aa24c3987356ef5694c8cac61c311581a4374
|
110150911ec1e4b54dc1d8b12c006b3636a766c9
|
/solopy/dorade.py
|
365e0d0097649327b47a731aebd8dcacb629acad
|
[] |
no_license
|
https://github.com/tinajer/personal
|
ee8400249afbbc4633096a4b09fd44874a768cfb
|
807e50ce3ba8a1287165fc28ab1da3274bb9f8fc
|
refs/heads/master
| 2021-01-18T12:36:31.320074 | 2015-06-07T03:09:05 | 2015-06-07T03:09:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import struct
import numpy as np
class DoradeFile:
def __init__(self, file_name, mode):
self._defaults = []
self._dorade = open(file_name, mode)
self._defaults = dir(self)
if 'r' in mode:
self._readHeaders()
self._readSweep()
return
def _readHeaders(self):
self._readSuperSweepIDBlock()
self._readVolumeDescriptor()
self._readSensorDescriptors(self.num_sens_descr)
return
def _readSuperSweepIDBlock(self):
_marker = self._read('s4')
if _marker != "SSWB":
print "Error: Expected volume descriptor marker (SSWB)"
super_sweep_id_block_length = self._read('i')
self.last_used = self._read('i')
self.start_time = self._read('i')
self.stop_time = self._read('i')
file_size = self._read('i')
self.compression_flag = self._read('i')
self.volume_time_stamp = self._read('i')
self.num_params = self._read('i')
self.radar_name = self._read('s8')
start_time = self._read('d')
stop_time = self._read('d')
self.version_number = self._read('i')
num_key_tables = self._read('i')
status = self._read('i')
for idx in range(7):
placeholder = self._read('i')
self.key_table = []
for idx in range(8):
key = {}
key['offset'] = self._read('i')
key['size'] = self._read('i')
key['type'] = self._read('i')
self.key_table.append(key)
return
def _readVolumeDescriptor(self):
_marker = self._read('s4')
if _marker != "VOLD":
print "Error: Expected volume descriptor marker (VOLD)"
_vol_descr_length = self._read('i')
self.revision_number = self._read('h')
self.volume_number = self._read('h')
self.max_record_length = self._read('i')
self.project_name = self._read('s20')
self.year_data = self._read('h')
self.month_data = self._read('h')
self.day_data = self._read('h')
self.hour_data = self._read('h')
self.minute_data = self._read('h')
self.second_data = self._read('h')
self.flight_number = self._read('s8')
self.record_source_id = self._read('s8')
self.year_recording = self._read('h')
self.month_recording = self._read('h')
self.day_recording = self._read('h')
self.num_sens_descr = self._read('h')
return
def _readSensorDescriptors(self, n_sensor_descr):
self.radar_descriptors = []
for sens_descr in range(n_sensor_descr):
descriptor = {}
_marker = self._read('s4')
if _marker != "RADD":
print "Error: Expected sensor descriptor marker (RADD)"
descriptor_length = self._read('i')
descriptor['name'] = self._read('s8')
descriptor['radar_constant'] = self._read('f')
descriptor['peak_power'] = self._read('f')
descriptor['noise_power'] = self._read('f')
descriptor['receiver_gain'] = self._read('f')
descriptor['antenna_gain'] = self._read('f')
descriptor['system_gain'] = self._read('f')
descriptor['horiz_beam_width'] = self._read('f')
descriptor['vert_beam_width'] = self._read('f')
descriptor['radar_type'] = self._read('h')
descriptor['scan_mode'] = self._read('h')
descriptor['antenna_rot_vel'] = self._read('f')
descriptor['scan_param_1'] = self._read('f')
descriptor['scan_param_2'] = self._read('f')
num_param_descr = self._read('h')
num_additional_descr = self._read('h')
data_compression = self._read('h')
data_reduction = self._read('h')
data_reduction_param_1 = self._read('f')
data_reduction_param_2 = self._read('f')
descriptor['radar_longitude'] = self._read('f')
descriptor['radar_latitude'] = self._read('f')
descriptor['radar_altitude'] = self._read('f')
# print descriptor['radar_longitude']
# print descriptor['radar_latitude']
# print descriptor['radar_altitude']
descriptor['unambig_velocity'] = self._read('f')
descriptor['unambig_range'] = self._read('f')
num_frequencies = self._read('h')
num_interpulse_per = self._read('h')
descriptor['frequencies'] = self._read('fffff')
descriptor['interpulse_per'] = self._read('fffff')
descriptor['parameters'] = self._readParameterDescriptors(num_param_descr)
descriptor['cell_range_vec'] = self._readCellRangeVector()
descriptor['corr_factor'] = self._readCorrectionFactorDescriptor()
self.radar_descriptors.append(descriptor)
return
def _readParameterDescriptors(self, n_parameter_descr):
parameter_descriptors = []
for parm_desc in range(n_parameter_descr):
descriptor = {}
_marker = self._read('s4')
if _marker != "PARM":
print "Error: Expected parameter descriptor marker (PARM)"
param_descr_length = self._read('i')
descriptor['name'] = self._read('s8')
descriptor['description'] = self._read('s40')
descriptor['units'] = self._read('s8')
descriptor['interpulse_used'] = self._read('h')
descriptor['frequency_used'] = self._read('h')
descriptor['receiver_bandwidth'] = self._read('f')
descriptor['pulse_width'] = self._read('h')
descriptor['polarization'] = self._read('h')
descriptor['num_samples'] = self._read('h')
descriptor['binary_format'] = self._read('h')
descriptor['threshold_param'] = self._read('s8')
descriptor['thershold_value'] = self._read('f')
descriptor['scale_factor'] = self._read('f')
descriptor['bias_factor'] = self._read('f')
descriptor['bad_data_flag'] = self._read('i')
print descriptor['name']
parameter_descriptors.append(descriptor)
return parameter_descriptors
def _readCellRangeVector(self):
_marker = self._read('s4')
if _marker != "CELV":
print "Error: Expected cell range vector marker (CELV)"
comment_length = self._read('i')
cell_vector_length = self._read('i')
cell_vector = self._read('f' * 1500) # is 1500 constant for all files?
return cell_vector
def _readCorrectionFactorDescriptor(self):
descriptor = {}
_marker = self._read('s4')
if _marker != "CFAC":
print "Error: Expected correction factor descriptor marker (CFAC)"
corr_fact_descr_length = self._read('i')
descriptor['azimuth'] = self._read('f')
descriptor['elevation'] = self._read('f')
descriptor['range_delay'] = self._read('f')
descriptor['longitude'] = self._read('f')
descriptor['latitude'] = self._read('f')
descriptor['pressure_alt'] = self._read('f')
descriptor['physical_alt'] = self._read('f')
descriptor['platform_u'] = self._read('f')
descriptor['platform_v'] = self._read('f')
descriptor['platform_w'] = self._read('f')
descriptor['platform_heading'] = self._read('f')
descriptor['platform_roll'] = self._read('f')
descriptor['platform_pitch'] = self._read('f')
descriptor['platform_drift'] = self._read('f')
descriptor['rotation_angle'] = self._read('f')
descriptor['tilt_angle'] = self._read('f')
return descriptor
def _readSweep(self):
self._readSweepDescriptor()
self._readRays(self.num_rays, self.radar_descriptors[0]['parameters'])
return
def _readSweepDescriptor(self):
_marker = self._read('s4')
if _marker != "SWIB":
print "Error: Expected sweep descriptor marker (SWIB)"
sweep_descr_length = self._read('i')
sweep_comment = self._read('s8')
sweep_number = self._read('i')
self.num_rays = self._read('i')
self.true_start_angle = self._read('f')
self.true_end_angle = self._read('f')
fixed_angle = self._read('f')
filter_flag = self._read('i')
return
def _readRays(self, n_rays, parameter_descriptors):
self._rays = []
for ray in range(n_rays):
descriptor = {}
descriptor['ray_info'] = self._readRayInfoBlock()
descriptor['platform_info'] = self._readPlatformInfoBlock()
descriptor['param_data'] = {}
for param_desc in parameter_descriptors:
_marker = self._read('s4')
if _marker != "RDAT":
print "Error: Expected radar data marker (RDAT): ray %d" % ray
radar_data_length = self._read('i')
parameter_name = self._read('s8')
if parameter_name != param_desc['name']:
print "Error: Expected parameter %s, but got %s" % (param_desc['name'], parameter_name)
data_type = {1:'b', 2:'h', 3:'i', 4:'f'}[ param_desc['binary_format'] ]
data_width = {1:1, 2:2, 3:4, 4:4}[ param_desc['binary_format'] ]
data_compressed = np.array(self._read(data_type * ((radar_data_length - 16) / data_width)))
data = self._decompressHRD(data_compressed)#, debug=(ray == 0))
data = self._remap(data, param_desc)
descriptor['param_data'][parameter_name] = data
self._rays.append(descriptor)
return
def _decompressHRD(self, compressed_data, debug=False):
decompressed_data = []
idx = 0
if debug:
print compressed_data
while idx < len(compressed_data) and compressed_data[idx] != 1:
count = compressed_data[idx] & int("0x7fff", 0)
good_data = compressed_data[idx] & int("0x8000", 0)
if debug:
print count, bool(good_data)
if good_data:
decompressed_data.extend(compressed_data[(idx + 1):(idx + count + 1)])
idx += count + 1
else:
decompressed_data.extend([ -int("0x8000", 0) for jdy in range(count) ])
idx += 1
return np.array(decompressed_data)
def _remap(self, data, parameter_desc):
return np.where(data > -10000, data / parameter_desc['scale_factor'] - parameter_desc['bias_factor'], data)
def _readRayInfoBlock(self):
descriptor = {}
_marker = self._read('s4')
if _marker != "RYIB":
print "Error: Expected ray info block marker (RYIB)"
ray_info_block_length = self._read('i')
descriptor['sweep_number'] = self._read('i')
descriptor['julian_day'] = self._read('i')
descriptor['hour'] = self._read('h')
descriptor['minute'] = self._read('h')
descriptor['second'] = self._read('h')
descriptor['millisecond'] = self._read('h')
descriptor['azimuth'] = self._read('f')
descriptor['elevation'] = self._read('f')
descriptor['peak_tx_power'] = self._read('f')
descriptor['scan_rate'] = self._read('f')
descriptor['ray_status'] = self._read('i')
return descriptor
def _readPlatformInfoBlock(self):
descriptor = {}
_marker = self._read('s4')
if _marker != "ASIB":
print "Error: Expected platform info block marker (ASIB)"
platform_info_block_length = self._read('i')
descriptor['longitude'] = self._read('f')
descriptor['latitude'] = self._read('f')
descriptor['altitude_msl'] = self._read('f')
descriptor['altitude_agl'] = self._read('f')
descriptor['antenna_u'] = self._read('f')
descriptor['antenna_v'] = self._read('f')
descriptor['antenna_w'] = self._read('f')
descriptor['heading'] = self._read('f')
descriptor['roll'] = self._read('f')
descriptor['pitch'] = self._read('f')
descriptor['drift'] = self._read('f')
descriptor['beam_sweep_angle'] = self._read('f')
descriptor['beam_scan_angle'] = self._read('f')
descriptor['air_u'] = self._read('f')
descriptor['air_v'] = self._read('f')
descriptor['air_w'] = self._read('f')
descriptor['heading_chg_rate'] = self._read('f')
descriptor['pitch_chg_rate'] = self._read('f')
return descriptor
def _read(self, type_string):
if type_string[0] != 's':
size = struct.calcsize(type_string)
data = struct.unpack("<%s" % type_string, self._dorade.read(size))
else:
size = int(type_string[1:])
data = tuple([ self._dorade.read(size).strip("\0") ])
if len(data) == 1:
return data[0]
else:
return list(data)
def getSweep(self, parameter_name):
sweep = np.empty((self.num_rays, len(self._rays[0]['param_data'][parameter_name])))
for idx in range(self.num_rays):
sweep[idx] = self._rays[idx]['param_data'][parameter_name]
return sweep
def open_file(file_name, mode):
return DoradeFile(file_name, mode)
if __name__ == "__main__":
dor = open_file("swp.1090605194442.KFTG.486.0.5_SUR_v531", 'r')
print dor.getSweep("REF").max()
|
UTF-8
|
Python
| false | false | 14,594 |
py
| 39 |
dorade.py
| 34 | 0.509182 | 0.501919 | 0 | 382 | 37.201571 | 115 |
DracoMindz/holbertonschool-machine_learning
| 1,151,051,283,055 |
30f92306744f4c8844cbba02c8b1e6184e241008
|
8406a55dcd26a111486a99d4a7a0cd556bd8348c
|
/supervised_learning/0x06-keras/9-model.py
|
60d9690c5534ead83339476cf5bc442297368430
|
[] |
no_license
|
https://github.com/DracoMindz/holbertonschool-machine_learning
|
d486ad55865622d81527a31ee844c82b7d06286b
|
4ac942126918c7acaa9ef88d18efe299b2f726fe
|
refs/heads/master
| 2020-12-21T20:44:01.026482 | 2020-10-09T02:31:36 | 2020-10-09T02:31:36 | 236,553,914 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
"""
function save_model: saves entire model
function load_model: loadsd entire model
"""
import tensorflow.keras as K
def save_model(network, filename):
"""
saves entire model
"""
network.save(filename)
return None
def load_model(filename):
"""
loads entire model
"""
loaded_model = K.models.load_model(filename)
return loaded_model
|
UTF-8
|
Python
| false | false | 401 |
py
| 274 |
9-model.py
| 220 | 0.665835 | 0.663342 | 0 | 23 | 16.434783 | 48 |
zexpp5/houseAiApi
| 6,502,580,499,703 |
7876668c07fa8554328585ef61ae136cb2a4610c
|
332b2aad6c2ca2cafe4209c9789285b80ed76190
|
/src/AnalyAPI.py
|
dfa8b7dbd40625fcdf969143af68ea2c032678ab
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/zexpp5/houseAiApi
|
4e65c90973044ddf15f73cc7fd88ba00a3448fcc
|
8784c1bac82f21b5f8f96eb68e6aab9ea0694aa7
|
refs/heads/master
| 2020-05-27T21:14:12.108127 | 2016-06-02T09:52:02 | 2016-06-02T09:52:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding:utf-8
#语义解析api接口,端口号8778
import subprocess
import os
import logging
from flask import Flask,request
app = Flask(__name__)
@app.route('/fc/<text>')
def hello_world(text):
#param = request.args.get('text')
fc=subprocess.check_output('/usr/bin/python ../ree.py ' + text,shell=True)
return fc #input
if __name__ == '__main__':
app.run(host='0.0.0.0',port=8778)
|
UTF-8
|
Python
| false | false | 418 |
py
| 31 |
AnalyAPI.py
| 19 | 0.638191 | 0.605528 | 0 | 16 | 23.9375 | 82 |
fank-cd/python_leetcode
| 7,052,336,324,199 |
3f7ec0715afd128548c8af4a83d6ca7fbc85c600
|
8f7b7a910520ba49a2e614da72f7b6297f617409
|
/Problemset/jewels-and-stones/jewels-and-stones.py
|
f8ac7f2af5a27836f21b9364b4179ddd0dd6d87d
|
[] |
no_license
|
https://github.com/fank-cd/python_leetcode
|
69c4466e9e202e48502252439b4cc318712043a2
|
61f07d7c7e76a1eada21eb3e6a1a177af3d56948
|
refs/heads/master
| 2021-06-16T23:41:55.591095 | 2021-03-04T08:31:47 | 2021-03-04T08:31:47 | 173,226,640 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# @Title: 宝石与石头 (Jewels and Stones)
# @Author: 2464512446@qq.com
# @Date: 2020-10-02 20:38:40
# @Runtime: 40 ms
# @Memory: 13.4 MB
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
d = Counter(S)
res = 0
for i in J:
res += d[i]
return res
|
UTF-8
|
Python
| false | false | 317 |
py
| 287 |
jewels-and-stones.py
| 277 | 0.547231 | 0.449511 | 0 | 13 | 22.538462 | 55 |
ngoclam9415/all-authorization
| 9,947,144,270,572 |
9edd39c247a18a7ff33b558a764a38342956f8b8
|
4fe6b7a8f149cfbf16c3f47e7de607a05cc40157
|
/all_authentication.py
|
e726f76323389ecd78330afb60c1cc6fd2d46e06
|
[] |
no_license
|
https://github.com/ngoclam9415/all-authorization
|
4759ce50269deffb977cec846836dba683ff4999
|
e8e2b9eec5d576700bf9dafa15512f5f149294b0
|
refs/heads/master
| 2020-09-20T03:44:18.608663 | 2019-11-27T07:22:17 | 2019-11-27T07:22:17 | 224,369,382 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, request, render_template, session, jsonify
import base64
import jwt
import time
import random
import re
import hashlib
app = Flask(__name__)
app.config["BASIC_AUTH_USERNAME"] = "ngoclam_athena"
app.config["BASIC_AUTH_PASSWORD"] = "athenaforthewin"
app.secret_key = 'any random string'
data = base64.b64encode("ngoclam_athena:athenaforthewin".encode("utf-8"))
# print(data.decode("utf-8"))
def parse_diggest_header(diggest_header):
reg = re.compile(r'(\w+)[:=] ?"?([\w\/]+)"?')
return dict(reg.findall(diggest_header))
def generate_random_value():
return "%032x"% random.getrandbits(128)
@app.route("/basic_auth", methods=["POST"])
def basic_auth():
auth_header = request.headers.get("Authorization", None)
if auth_header == "Basic " + data.decode("utf-8"):
current_time = time.time()
value = "%032x"% random.getrandbits(128)
encoded_data = jwt.encode({"from" : current_time, "to" : current_time + 60, "data" : value}, "secret", algorithm="HS256").decode("utf-8") # THIS OUTPUT IS BYTE
if "Bearer" not in session:
session["Bearer"] = []
session["Bearer"].append(encoded_data)
return jsonify({"access_token" : encoded_data})
else :
return "FAIL"
@app.route("/bearer_auth", methods=["POST"])
def bearer_auth():
bearer_header = request.headers.get("Authorization", None)
encoded_data = bearer_header.split("Bearer ")[-1]
if encoded_data in session["Bearer"]:
data = jwt.decode(encoded_data, "secret", algorithms=["HS256"])
if time.time() < data["to"]:
return "THIS IS WHAT YOU WANT"
else:
return "TOKEN EXPIRED"
else:
return "INVALID TOKEN"
return "FAIL"
@app.route("/diggest_auth", methods=["GET", "POST"])
def diggest_auth():
if request.method == "GET":
realm="diggest_auth"
nonce=generate_random_value()
algorithm="MD5"
qop="auth"
session["server_nonce"] = nonce
print(generate_random_value())
return jsonify({"realm" : realm, "nonce" : nonce, "algorithm" : algorithm, "qop" : qop})
elif request.method == "POST":
header_dict = parse_diggest_header(request.headers.get("Authorization"))
print(header_dict)
md1 = hashlib.md5("{}:{}:{}".format(header_dict["username"], header_dict["realm"], app.config.get("BASIC_AUTH_PASSWORD")).encode("utf-8")).digest()
md2 = hashlib.md5("{}:{}".format(request.method, header_dict["uri"]).encode("utf-8")).digest()
result = hashlib.md5("{}:{}:{}:{}:{}".format(md1, header_dict["nonce"], header_dict["nonceCount"], header_dict["cnonce"], md2).encode("utf-8")).hexdigest()
if result == header_dict["response"]:
return "THIS IS WHAT YOU WANT"
return "FAIL"
if __name__ == "__main__":
app.run(debug=True)
|
UTF-8
|
Python
| false | false | 2,873 |
py
| 2 |
all_authentication.py
| 2 | 0.619213 | 0.604595 | 0 | 75 | 37.32 | 167 |
Joakimad/IINI4014-Python-for-programmers
| 14,053,132,997,929 |
6f6728131d1b8e27c4b9587376edd4ce26eeb82b
|
6f84027b5d3cb29ccd75409bf713c5a5fe932aa5
|
/oving7/decrypt.py
|
156db6081e141374a647744a35c2c64d5c211d4d
|
[] |
no_license
|
https://github.com/Joakimad/IINI4014-Python-for-programmers
|
de77f093406bdbdbf53aef409e8fe8a48fab67c9
|
8b4959d9bcadce58e28c9264b30982767ee19a4f
|
refs/heads/master
| 2021-01-14T20:48:48.335124 | 2020-04-19T15:47:19 | 2020-04-19T15:47:19 | 242,755,030 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from oving7.rsa import decrypt
def decrypt_with_publickey(publickey, encrypted_msg, start, end):
e, n = publickey
possible_solutions = []
counter = 0
for i in range(start, end):
if counter == 100:
print(i)
counter = 0
counter += 1
decrypted_msg = decrypt((i, n), encrypted_msg)
if decrypted_msg.startswith('h'):
print(decrypted_msg)
possible_solutions.append((i, decrypted_msg))
return possible_solutions
def PrimeGen(n=10000):
primes = []
chk = 2
while len(primes) < n:
ptest = [chk for i in range(2, chk) if chk % i == 0]
primes += [] if ptest else [chk]
chk += 1
return primes
encrypted_msg = [84620, 66174, 66174, 5926, 9175, 87925, 54744, 54744, 65916, 79243, 39613, 9932, 70186, 85020, 70186,
5926, 65916, 72060, 70186, 21706, 39613, 11245, 34694, 13934, 54744, 9932, 70186, 85020, 70186, 54744,
81444, 32170, 53121, 81327, 82327, 92023, 34694, 54896, 5926, 66174, 11245, 9175, 54896, 9175, 66174,
65916, 43579, 64029, 34496, 53121, 66174, 66174, 21706, 92023, 85020, 9175, 81327, 21706, 13934, 21706,
70186, 79243, 9175, 66174, 81327, 5926, 74450, 21706, 70186, 79243, 81327, 81444, 32170, 53121]
publickey = (29815, 100127)
print(decrypt_with_publickey(publickey, encrypted_msg, 64300, 64400))
|
UTF-8
|
Python
| false | false | 1,418 |
py
| 10 |
decrypt.py
| 8 | 0.603667 | 0.324401 | 0 | 38 | 36.315789 | 120 |
Aasthaengg/IBMdataset
| 13,503,377,215,686 |
fa6d642ece22153be2315b6325b7db1547631299
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02797/s604469972.py
|
3c4c4d455f81e5f7cf9f26b9659943f6db4d8491
|
[] |
no_license
|
https://github.com/Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
N,K,S=map(int,input().split())
ans=[S]*K
if N-K>=S:
ans.extend([S+1]*(N-K))
else:
ans.extend([1]*(N-K))
print(*ans)
|
UTF-8
|
Python
| false | false | 123 |
py
| 202,060 |
s604469972.py
| 202,055 | 0.536585 | 0.520325 | 0 | 9 | 12.777778 | 30 |
kigold/fullApp
| 1,640,677,547,667 |
f5236ce8ef8339bca3aad72f64f81aa9f3a9d799
|
a5937fd971d8c728c3652c49b96796db0f3da153
|
/api/fullapp/userprofile/migrations/0002_auto_20200409_0021.py
|
1c67ec7831f20898241bd8a1ac8a31f95d225279
|
[
"MIT"
] |
permissive
|
https://github.com/kigold/fullApp
|
a779b5354db335636f30b59820bbfd9b736a3770
|
0782648590524739df07eeeefdacf7e5ceb66332
|
refs/heads/master
| 2023-01-29T00:37:31.522077 | 2020-10-31T16:20:52 | 2020-10-31T16:20:52 | 214,624,839 | 0 | 0 | null | false | 2023-01-06T15:52:13 | 2019-10-12T10:07:39 | 2020-10-31T16:20:55 | 2023-01-06T15:52:12 | 1,766 | 0 | 0 | 36 |
Python
| false | false |
# Generated by Django 3.0.5 on 2020-04-09 00:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='game',
name='away_score',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='game',
name='home_score',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='game',
name='penalty_shootout',
field=models.BooleanField(null=True),
),
]
|
UTF-8
|
Python
| false | false | 694 |
py
| 71 |
0002_auto_20200409_0021.py
| 61 | 0.550432 | 0.523055 | 0 | 28 | 23.785714 | 49 |
lsjsss/PythonClass
| 5,437,428,598,816 |
416d4dd650f759dc53e1ed8909ed49f786a76c66
|
da99b8e2a22318f1cafb0c78adb17c8fdebe01df
|
/PythonBookAdditional/第09章 GUI编程/code/tkinter_RegionCapture.py
|
e43d485df7e75e9d4cfa0ac694857580a4384128
|
[
"MIT"
] |
permissive
|
https://github.com/lsjsss/PythonClass
|
f185873113d54ed6ae9b3ccc22cc5a71bf8f611d
|
0d38d2ca4d14d5e0e2062e22ae2dbbefea279179
|
refs/heads/master
| 2023-02-18T13:43:32.453478 | 2023-02-08T07:17:09 | 2023-02-08T07:17:09 | 247,711,629 | 0 | 0 | null | false | 2022-04-25T07:03:53 | 2020-03-16T13:38:15 | 2020-06-23T15:11:37 | 2022-04-25T07:03:53 | 29,236 | 0 | 0 | 1 |
Python
| false | false |
import tkinter
import tkinter.filedialog
import os
from PIL import ImageGrab
from time import sleep
root = tkinter.Tk()
root.geometry('100x40+400+300')
root.resizable(False, False)
class MyCapture:
def __init__(self, png):
#变量X和Y用来记录鼠标左键按下的位置
self.X = tkinter.IntVar(value=0)
self.Y = tkinter.IntVar(value=0)
#屏幕尺寸
screenWidth = root.winfo_screenwidth()
screenHeight = root.winfo_screenheight()
#创建顶级组件容器
self.top = tkinter.Toplevel(root, width=screenWidth, height=screenHeight)
#不显示最大化、最小化按钮
self.top.overrideredirect(True)
self.canvas = tkinter.Canvas(self.top,bg='white', width=screenWidth, height=screenHeight)
#显示全屏截图,在全屏截图上进行区域截图
self.image = tkinter.PhotoImage(file=png)
self.canvas.create_image(screenWidth//2, screenHeight//2, image=self.image)
#鼠标左键按下的位置
def onLeftButtonDown(event):
self.X.set(event.x)
self.Y.set(event.y)
#开始截图
self.sel = True
self.canvas.bind('<Button-1>', onLeftButtonDown)
#鼠标左键移动,显示选取的区域
def onLeftButtonMove(event):
if not self.sel:
return
global lastDraw
try:
#删除刚画完的图形,要不然鼠标移动的时候是黑乎乎的一片矩形
self.canvas.delete(lastDraw)
except Exception as e:
pass
lastDraw = self.canvas.create_rectangle(self.X.get(), self.Y.get(), event.x, event.y, outline='black')
self.canvas.bind('<B1-Motion>', onLeftButtonMove)
#获取鼠标左键抬起的位置,保存区域截图
def onLeftButtonUp(event):
self.sel = False
try:
self.canvas.delete(lastDraw)
except Exception as e:
pass
sleep(0.1)
#考虑鼠标左键从右下方按下而从左上方抬起的截图
left, right = sorted([self.X.get(), event.x])
top, bottom = sorted([self.Y.get(), event.y])
pic = ImageGrab.grab((left+1, top+1, right, bottom))
#弹出保存截图对话框
fileName = tkinter.filedialog.asksaveasfilename(title='保存截图', filetypes=[('image', '*.jpg *.png')])
if fileName:
pic.save(fileName)
#关闭当前窗口
self.top.destroy()
self.canvas.bind('<ButtonRelease-1>', onLeftButtonUp)
self.canvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)
#开始截图
def buttonCaptureClick():
#最小化主窗口
root.state('icon')
sleep(0.2)
filename = 'temp.png'
im = ImageGrab.grab()
im.save(filename)
im.close()
#显示全屏幕截图
w = MyCapture(filename)
buttonCapture.wait_window(w.top)
#截图结束,恢复主窗口,并删除临时的全屏幕截图文件
root.state('normal')
os.remove(filename)
buttonCapture = tkinter.Button(root, text='截图', command=buttonCaptureClick)
buttonCapture.place(x=10, y=10, width=80, height=20)
#启动消息主循环
root.mainloop()
|
UTF-8
|
Python
| false | false | 3,333 |
py
| 252 |
tkinter_RegionCapture.py
| 244 | 0.597506 | 0.586422 | 0 | 86 | 32.569767 | 114 |
camillemonchicourt/Geotrek
| 12,962,211,343,817 |
e55ba4f4338edffbd29a5da0d2be1486f7d4d557
|
ba6ac9acfbf969eac1d6f3e06e9ce8174fef1cfd
|
/geotrek/trekking/tests/__init__.py
|
1a32c7cf858e7ec3d8b83fbe23815676d192ee60
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/camillemonchicourt/Geotrek
|
550e71917ff577ccd99506432fde55d731c59475
|
c33eac7e4479e3aa5b16608c0aa7665c4a72e9a1
|
refs/heads/master
| 2023-08-03T13:16:51.929524 | 2014-11-28T16:16:21 | 2014-11-28T16:16:21 | 24,842,186 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# pylint: disable=W0401
from .base import *
from .test_views import *
from .test_filters import *
from .test_translation import *
from .test_trek_relationship import *
from .test_models import *
from .test_admin import *
|
UTF-8
|
Python
| false | false | 221 |
py
| 38 |
__init__.py
| 28 | 0.751131 | 0.733032 | 0 | 9 | 23.666667 | 37 |
lavaluv/hadoop-test
| 7,894,149,924,452 |
36a736c62830a5f8631a13b8aa966aab06a3959b
|
539eb0d494d22c67c5f0c4a075382acf1bbe322d
|
/pcap/pcap.py
|
6bce84b7449d8c98baf17a96c8d8b29e2f7626da
|
[] |
no_license
|
https://github.com/lavaluv/hadoop-test
|
9679142072aa0cecc6bf1ec662edc6e3366d921a
|
aec1e7ef760c1357de3eabd102ea2d20c272aec4
|
refs/heads/master
| 2020-04-01T20:13:54.080103 | 2019-03-08T07:37:59 | 2019-03-08T07:37:59 | 153,594,518 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
import copy
import PcapAnalyzer.protocol as protocol
inputFilePath = 'test.pcap'
outputFilePath = 'result.txt'
fpcap = open(inputFilePath,'rb')
file = open(outputFilePath,'w')
input_data = fpcap.read()
#pcap header
pHeader = protocol.Pcap(input_data[0:24])
pHeader.writeIntoFile(file,'magicNum','verMajor','verMinor','thiszone','sigfigs','snaplen','linktype')
#data
pDataArray = []
i = 24
while (i < len(input_data)):
#dataHeader
pData = protocol.PcapData(input_data[i:])
#write into pData
pDataArray.append(copy.deepcopy(pData))
i = i + pData.getValue('caplen') + 16
#pcap data packet
for data in pDataArray:
data.writeIntoFile(file,'GMTTime','microTime','caplen','datalen','content')
file.write('Have'+str(len(pDataArray))+"pcakets"+'\n')
file.close()
fpcap.close()
|
UTF-8
|
Python
| false | false | 793 |
py
| 28 |
pcap.py
| 20 | 0.726356 | 0.716267 | 0 | 31 | 24.612903 | 102 |
bgruening/ngsutils
| 17,214,228,958,724 |
1014cce40fffea7656ff2a81f473501c4f2da19a
|
94bd032bc21bfd24e6dcbcfe642331f58829e574
|
/ngsutils/bam/junctioncount.py
|
46114dfa66cdebeb374eb973bbbc4f4d575ecac1
|
[
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI"
] |
permissive
|
https://github.com/bgruening/ngsutils
|
4c1d935eb0ff337de996ce9d71b8e79ebf2faee7
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
refs/heads/master
| 2021-01-21T20:33:45.678884 | 2019-06-25T20:48:45 | 2019-06-25T20:48:45 | 45,920,499 | 0 | 0 |
BSD-3-Clause
| true | 2019-07-16T10:09:01 | 2015-11-10T15:21:30 | 2015-11-10T15:21:32 | 2019-07-16T10:08:58 | 5,658 | 0 | 0 | 0 |
Python
| false | false |
#!/usr/bin/env python
## category General
## desc Counts the number of reads spanning individual junctions.
'''
Counts the number of reads that span each junction found in the BAM file.
You can specify a particular genome range to scan (like a gene region).
'''
import sys
import os
from ngsutils.bam import bam_iter, bam_open
def bam_junction_count(bam, ref=None, start=None, end=None, out=sys.stdout, quiet=False):
last_tid = None
junctions = {}
for read in bam_iter(bam, ref=ref, start=start, end=end, quiet=quiet):
if read.is_unmapped:
continue
if read.tid != last_tid and junctions:
for junction in junctions:
sys.stdout.write('%s\t%s\n' % (junction, len(junctions[junction])))
junctions = {}
last_tid = read.tid
hasgap = False
pos = read.pos
end = None
for op, size in read.cigar:
if op == 0:
pos += size
elif op == 1:
pass
elif op == 2:
pos += size
elif op == 3:
hasgap = True
end = pos + size
break
elif op == 4:
pos += size
if not hasgap:
continue
junction = '%s:%s-%s' % (bam.references[read.tid], pos, end)
if not junction in junctions:
junctions[junction] = set()
junctions[junction].add(read.qname)
for junction in junctions:
sys.stdout.write('%s\t%s\n' % (junction, len(junctions[junction])))
def usage(msg=""):
if msg:
print msg
print
print __doc__
print """\
Usage: bamutils junctioncount {opts} bamfile {region}
Region should be: chr:start-end (start 1-based)
"""
sys.exit(1)
if __name__ == "__main__":
fname = None
ref = None
start = None
end = None
for arg in sys.argv[1:]:
if arg == '-h':
usage()
elif not fname:
if os.path.exists(arg):
fname = arg
else:
usage("%s doesn't exist!")
else:
chrom, se = arg.split(':')
start, end = [int(x) for x in se.split('-')]
start = start - 1
if not fname:
usage()
bamfile = bam_open(fname)
bam_junction_count(bamfile, ref, start, end)
bamfile.close()
|
UTF-8
|
Python
| false | false | 2,393 |
py
| 126 |
junctioncount.py
| 121 | 0.522357 | 0.518596 | 0 | 95 | 24.189474 | 89 |
fair-workflows/fairworkflows-ui
| 9,354,438,795,664 |
d1c66e8703c4177b9dd5c4b55b631fc22c0e2270
|
b1f816d36ec77ea4f7a622d5708cb0268844445b
|
/app/app.py
|
e30eec92dcdbe3a9ebc0328db1ae125d19b45a3a
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/fair-workflows/fairworkflows-ui
|
03380033a39705e9522f14814342c177cb9c2c75
|
0036e2ff9a2cd9039dc955d21b8175386a44c8d5
|
refs/heads/main
| 2022-12-27T05:56:13.975755 | 2020-10-06T10:40:42 | 2020-10-06T10:40:42 | 301,308,768 | 0 | 0 |
Apache-2.0
| false | 2020-10-06T10:40:44 | 2020-10-05T06:05:00 | 2020-10-05T13:37:17 | 2020-10-06T10:40:43 | 20 | 0 | 0 | 0 |
HTML
| false | false |
import time
from fairworkflows import FairWorkflow, FairStep
from flask import Flask, render_template, request, redirect, jsonify
cache = {}
def create_app():
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.route("/", methods=['GET', 'POST'])
def index():
all_count, in_hat_count = 2, 1
if request.method == 'POST':
if 'create_workflow' in request.form:
description = request.form['workflow_description']
cache['workflow'] = FairWorkflow(description=description)
update_visualization()
return redirect('/workflow')
elif 'empty_hat' in request.form:
return redirect('/')
elif 'start_game' in request.form:
return redirect('/game')
elif 'fill_hat' in request.form:
return redirect('/')
return render_template('index.html',
workflow=cache.get('workflow'),
all_count=all_count,
in_hat_count=in_hat_count)
def update_visualization():
workflow = cache['workflow']
ts = time.time()
filepath = 'static/cache/dag' + str(int(ts))
cache['filepath'] = filepath + '.dot.png'
workflow.draw('app/' + filepath)
@app.route("/workflow", methods=['GET', 'POST'])
def workflow():
if 'add_step' in request.form:
uri = request.form['step_uri']
if request.form.get('from_nanopub'):
step = FairStep.from_nanopub(uri)
else:
step = FairStep(uri)
workflow = cache['workflow']
workflow.add(step)
update_visualization()
return redirect('/workflow')
if 'publish' in request.form:
publication_info = cache['workflow'].publish_as_nanopub()
nanopub_uri = publication_info.get('nanopub_uri')
if nanopub_uri is None:
print('Failed to publish to nanopub')
cache['nanopub_uri'] = nanopub_uri
return render_template('workflow.html',
workflow=cache.get('workflow'),
image_path=cache.get('filepath'),
nanopub_uri=cache.get('nanopub_uri'))
return app
def main():
app = create_app()
app.run(debug=True, host='0.0.0.0')
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,504 |
py
| 5 |
app.py
| 1 | 0.527955 | 0.52516 | 0 | 72 | 33.777778 | 73 |
afarrellsherman/Woolf
| 15,453,292,340,023 |
0b62a79825dbf28cc78a0e3062ecc5a2fea09982
|
e8a8b8c6b308ef7ee353e5207dcafddd125011b8
|
/tests/test_scripts.py
|
315d30765669500fd6271dfd1ba0985a3ebdcdc7
|
[
"MIT"
] |
permissive
|
https://github.com/afarrellsherman/Woolf
|
0cd112aefcb1c03ead97302165e6f0dad7331f1d
|
43fd5ba3ac74c115a7e59203a876701ab0aac03f
|
refs/heads/master
| 2020-03-30T08:04:03.230669 | 2019-06-27T17:07:41 | 2019-06-27T17:57:30 | 150,986,346 | 2 | 1 |
MIT
| false | 2019-06-27T17:57:32 | 2018-09-30T16:52:59 | 2019-06-27T17:52:19 | 2019-06-27T17:57:31 | 12,293 | 1 | 1 | 9 |
Python
| false | false |
import os
import tempfile
import wget
import zipfile
tut_file_url = 'https://osf.io/gtjfq/download'
def command_filter(line):
commands = ['trainWoolf', 'featureTable']
cmd = line.split()[0] # get first item
return cmd in commands
tmp = tempfile.TemporaryDirectory()
tutorial_file = os.path.normpath(os.path.join(
os.path.dirname(__file__),
'..', 'docs', 'usermanual.md'
))
tutorial_commands = []
with open(tutorial_file) as tut:
for line in tut:
if line.startswith("$ "):
line = line.lstrip('$ ')
if command_filter(line):
tutorial_commands.append(line)
wget.download(tut_file_url, os.path.join(tmp.name, "files.zip"))
zipfile.ZipFile(os.path.join(tmp.name, "files.zip")).extractall(tmp.name)
def test_trainWoolf_help(script_runner):
ret = script_runner.run('trainWoolf', '--help')
assert ret.success
assert ret.stderr == ''
def test_trainWoolf_help(script_runner):
ret = script_runner.run('featureTable', '--help')
assert ret.success
assert ret.stderr == ''
def test_tutorial_commands(script_runner):
os.chdir(tmp.name)
for cmd in tutorial_commands:
args = cmd.split()
ret = script_runner.run(*args)
assert ret.success
assert ret.stderr == ''
|
UTF-8
|
Python
| false | false | 1,349 |
py
| 18 |
test_scripts.py
| 11 | 0.617494 | 0.616753 | 0 | 48 | 27.104167 | 73 |
shangqd/BlockChain
| 644,245,143,671 |
f788cff132f733767a6f01841866aae94608147c
|
e2ddc18286efd27ac7d59be64131302eca0fd731
|
/python/btc.py
|
b59f0a7aea765c8f77295d252d657f23d403650f
|
[] |
no_license
|
https://github.com/shangqd/BlockChain
|
cebceca6460be8f99684a22c667afc2b566d9808
|
d4d8db6fce415f5bf2ad650b40f5340acbd3e152
|
refs/heads/master
| 2023-04-07T10:54:28.294663 | 2023-03-27T14:54:35 | 2023-03-27T14:54:35 | 143,091,340 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: UTF-8 -*-
'''
富豪榜
https://btc.com/stats/rich-list
'''
import requests
import json
import time
import os
import pymysql.cursors
import sys
import threading
from decimal import Decimal
import re
import traceback
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append(re.sub("btc$","",os.getcwd(),1))
from config import config
class Work(threading.Thread):
def __init__(self,tag,threads):
super(Work, self).__init__()
self.tag = tag
self.connection = pymysql.connect(host=config.host,user=config.user,password=config.password,db=config.db,cursorclass=pymysql.cursors.DictCursor,charset='utf8')
self.cursor = self.connection.cursor()
self.threads = threads
def ExecSql(self,sql):
self.cursor.execute(sql)
self.connection.commit()
def GetBN(self):
req = requests.get("https://blockchain.info/latestblock")
text = json.loads(req.text);
return text["height"]
def GetBN_(self):
self.cursor.execute("SELECT next_block from currency where symbol = 'btc'");
self.connection.commit()
result = self.cursor.fetchone()
return int(result["next_block"])
def TxInsert(self,tx_hash,bn,from_addr,to_addr,token_transfer,ts):
sql = ("INSERT INTO tx_btc (tx_hash, block_number, from_addr,`to_addr`, token_transfer, tx_time) VALUES ('%s','%s','%s','%s',%s,from_unixtime(%s))"
% (tx_hash,bn,from_addr,to_addr,token_transfer,ts))
self.ExecSql(sql);
def run(self):
bn = self.GetBN()
bn = bn - bn % self.threads + self.tag
print "%s_%s_%s\n" % (bn,self.tag,self.threads)
while (True):
try:
url = "https://blockchain.info/block-height/%s?format=json" % bn;
req = requests.get(url)
if req.text == "Unknown Error Fetching Blocks From Database":
print("%s_sleep(100)" % self.tag);
time.sleep(100)
continue;
text = json.loads(req.text);
if text.has_key("blocks"):
for b in text["blocks"]:
for tx in b["tx"]:
index = 0;
if len(tx["inputs"]) > len(tx["out"]):
for vin in tx["inputs"]:
from_addr = ""
from_token = 0
if vin.has_key("prev_out"):
from_addr = vin["prev_out"]["addr"];
from_token = Decimal(vin["prev_out"]["value"]) / Decimal(10 ** 8)
sql = ""
if index >= len(tx["out"]):
sql = ("INSERT into tx_btc1(tx_hash,from_addr,from_token,block_number,tx_time)values('%s','%s',%s,%s,from_unixtime(%s))"
% (tx["hash"],from_addr,from_token,bn,tx["time"]))
else:
to_addr = ""
token_transfer = 0
if tx["out"][index].has_key("addr"):
to_addr = tx["out"][index]["addr"];
token_transfer = Decimal(tx["out"][index]["value"]) / Decimal(10 ** 8);
sql = ("INSERT into tx_btc1(tx_hash,from_addr,from_token,to_addr,token_transfer,block_number,tx_time)values('%s','%s',%s,'%s',%s,%s,from_unixtime(%s))"
%(tx["hash"],from_addr,from_token,to_addr,token_transfer,bn,tx["time"]))
self.ExecSql(sql)
index = index + 1
else:
for vout in tx["out"]:
to_addr = ""
token_transfer = 0
if vout.has_key("addr"):
to_addr = vout["addr"]
token_transfer = Decimal(vout["value"]) / Decimal(10 ** 8);
sql = ""
if index >= len(tx["inputs"]):
sql = ("INSERT into tx_btc1(tx_hash,to_addr,token_transfer,block_number,tx_time)values('%s','%s',%s,%s,from_unixtime(%s))"
% (tx["hash"],to_addr,token_transfer,bn,tx["time"]))
else:
from_addr = ""
from_token = 0
if tx["inputs"][index].has_key("prev_out"):
from_addr = tx["inputs"][index]["prev_out"]["addr"]
from_token = Decimal(tx["inputs"][index]["prev_out"]["value"]) / Decimal(10 ** 8)
sql = ("INSERT into tx_btc1(tx_hash,from_addr,from_token,to_addr,token_transfer,block_number,tx_time)values('%s','%s',%s,'%s',%s,%s,from_unixtime(%s))"
% (tx["hash"],from_addr,from_token,to_addr,token_transfer,bn,tx["time"]))
self.ExecSql(sql)
index = index + 1
bn = bn + self.threads;
sql = ("update currency set next_block = %d where symbol = 'btc'" % bn);
self.ExecSql(sql);
print("bn:%d;tag:%d" % (bn,self.tag));
except Exception as e:
print 'traceback.format_exc():\n%s' % traceback.format_exc()
print e
if __name__ == '__main__':
threads = 1
t = Work(0,threads)
t.run()
sys.exit();
for i in xrange(threads):
t = Work(i,threads)
t.start()
|
UTF-8
|
Python
| false | false | 6,281 |
py
| 81 |
btc.py
| 53 | 0.423083 | 0.417504 | 0 | 125 | 48.2 | 191 |
hoon4233/Data-Science
| 13,597,866,464,027 |
e9711d74ef50e61a8a2a10c77ad039dbabbd3ea2
|
ed60b2983bd14601df31c283a40c9f560fb66865
|
/assignment1/apriori.py
|
fa3ab7e45a9d8d206a29f2a20ab801b77ee6b4f4
|
[] |
no_license
|
https://github.com/hoon4233/Data-Science
|
e01cd3988d69742afc1560f33e5089af3e17bde2
|
43b866e1bc97c0c864c762e1b6b9007639a2f6c9
|
refs/heads/master
| 2023-07-19T02:06:01.665867 | 2021-09-04T06:31:07 | 2021-09-04T06:31:07 | 364,768,689 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
from itertools import chain, combinations
min_support = float(sys.argv[1]) / 100
input_file, output_file = sys.argv[2], sys.argv[3]
def apriori(trxs):
global min_support
trxs_len = len(trxs)
idxs = set([])
for trx in trxs :
idxs = idxs.union(trx)
# 일반 set 은 unhashable type 이므로 frozenset 사용
candidates = { frozenset({i}) for i in idxs }
# candidates = { set({i}) for i in idxs }
item_set = dict()
K = 1
while candidates :
count = dict()
for trx in trxs :
for candidate in candidates:
if candidate.issubset(trx):
try :
count[candidate] += 1
except KeyError :
count[candidate] = 1
# pruning
after_pruning = { key : (float(value) / trxs_len) for (key, value) in count.items() if (float(value) / trxs_len) >= min_support }
item_set[K] = after_pruning
#self_joining
K += 1
candidates = { i.union(j) for i in after_pruning for j in after_pruning if len(i.union(j)) == K }
return item_set
def print_output(trxs, fps):
for patt_len, patt_len_fps in fps.items():
if patt_len == 1 :
continue
for fp in patt_len_fps :
com_len_cases = [ combinations(fp, length) for length in range(1, len(fp)+1, 1) ]
all_cases = []
for cases in com_len_cases :
for case in cases :
all_cases.append(frozenset(case))
for case in all_cases:
remainder = fp.difference(case)
if remainder :
confidence = fps[len(fp)][fp] / fps[len(case)][case]
prt_case, prt_remainder = str(set(map(int,case))).replace(" ", ""), str(set(map(int,remainder))).replace(" ", "")
prt_supp, prt_confi = str('%.2f' % round(fps[len(fp)][fp] * 100, 2)), str('%.2f' % round(confidence * 100, 2))
string = prt_case + '\t' + prt_remainder + '\t' + prt_supp + '\t' + prt_confi + '\n'
with open(output_file, 'a') as f :
f.write(string)
with open(input_file, 'r') as f :
trxs = [ trx.split('\t') for trx in f.read().splitlines() ]
fps = apriori(trxs)
print_output(trxs, fps)
|
UTF-8
|
Python
| false | false | 2,396 |
py
| 17 |
apriori.py
| 4 | 0.510924 | 0.50084 | 0 | 75 | 30.746667 | 137 |
awmace/Demo
| 15,607,911,161,878 |
70c1a32b2ab4ed6c44c4814f5f34726ccb9cd9be
|
f2ba48da8c66c454470dd1441904797e5fbc509c
|
/Crawl/tender2/crawl_data.py
|
d96e47232743f6e686a9cf2a492cf5bda9727348
|
[] |
no_license
|
https://github.com/awmace/Demo
|
862800b81e4088615a92d2da0a576a5660e0fec9
|
9734e50578bda139d3781478b7170af723c5881a
|
refs/heads/master
| 2023-03-06T09:01:02.321914 | 2021-02-20T07:41:25 | 2021-02-20T07:41:25 | 340,599,537 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import jieba, json
import scrapy, requests
import time, hashlib, re
from simhash import Simhash
from lxml import etree
from Crawl.tender2.read_data import filter_data, set_data
def md5_jm(v):
md5 = hashlib.md5()
md5.update(v.encode())
md5_v = str(md5.hexdigest())
return md5_v
for page in range(1, 100):
page_url = 'http://search.ccgp.gov.cn/bxsearch?searchtype=1&page_index={}&bidSort=0&buyerName=&projectId=&pinMu=0&bidType=0&dbselect=bidx&kw=%E9%92%A2&start_time=2020%3A05%3A12&end_time=2020%3A11%3A10&timeType=5&displayZone=&zoneId=&pppStatus=0&agentName='.format(
page)
headers = {
'Referer': 'http://search.ccgp.gov.cn/bxsearch?searchtype=1&page_index=1&bidSort=0&buyerName=&projectId=&pinMu=0&bidType=0&dbselect=bidx&kw=%E9%92%A2&start_time=2020%3A08%3A10&end_time=2020%3A11%3A10&timeType=4&displayZone=&zoneId=&pppStatus=0&agentName=',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36'
}
response = requests.get(page_url, headers=headers).text
ele = etree.HTML(response)
li_urls = ele.xpath('//ul[@class="vT-srch-result-list-bid"]/li/a/@href')
old_data = []
for index, li_url in enumerate(li_urls):
res = requests.get(li_url)
res.encoding = 'utf-8'
res = res.text
data = dict()
try:
data['rowKey'] = md5_jm(li_url) # 唯一标识
data['crawl_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
data['title'] = re.findall('<title>(.*?)</title>', res)[0]
print(data['title'])
data['url'] = li_url
data['source'] = '中国政府采购网'
publish_time = re.findall('<span id="pubTime">(.*?)</span>', res)[0]
data['publish_time'] = publish_time.replace('年', '-').replace('月', '-').replace('日', '')
data['d_type'] = re.findall('<a.*?class="CurrChnlCls">(.*?)</a>', res)[-1]
data['elements'] = \
re.findall('<div class="vF_deail_maincontent">(.*?)<div class="footer mt13">', res, re.S)[0]
data['text'] = re.sub(r'\s+|<.+?>', '', data['elements'])
old_data.append(data)
except:
pass
# new_data = filter_data(old_data)
# print(new_data)
# set_data(old_data, new_data)
# print(page)
time.sleep(2)
|
UTF-8
|
Python
| false | false | 2,418 |
py
| 85 |
crawl_data.py
| 81 | 0.597071 | 0.550628 | 0 | 52 | 44.961538 | 268 |
Marilyth/mss-data-retrieval
| 5,282,809,820,131 |
7700cffca26dd1f0d664077dba0a4ffb06ac9f85
|
8497c58b1758925ed29726b69d8f00820b5e3afd
|
/bin/add_ancillary.py
|
ac59b73a79464cbbbb13f49f420a2e36575e8ebf
|
[] |
no_license
|
https://github.com/Marilyth/mss-data-retrieval
|
45da4ab9856819820adbda32a648b398bcb0c347
|
33fbacd4c787e583d0f63cde7924b756e48b346b
|
refs/heads/main
| 2023-04-05T18:15:21.437651 | 2021-04-22T09:48:17 | 2021-04-22T09:48:17 | 354,835,334 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Copyright (C) 2012 by Forschungszentrum Juelich GmbH
Author(s): Joern Ungermann
Please see docstring of main().
"""
import datetime
import itertools
import optparse
import os
import sys
from metpy.calc import potential_temperature, potential_vorticity_baroclinic, brunt_vaisala_frequency_squared, geopotential_to_height
import xarray as xr
import netCDF4
import numpy as np
import tqdm
VARIABLES = {
"pressure": ("FULL", "hPa", "air_pressure", "Pressure"),
"pt": ("FULL", "K", "air_potential_temperature", "Potential Temperature"),
"pv": ("FULL", "m^2 K s^-1 kg^-1 10E-6", "ertel_potential_vorticity", "Potential Vorticity"),
"mod_pv": ("FULL", "m^2 K s^-1 kg^-1 10E-6", "", "Modified Potential Vorticity"),
"EQLAT": ("FULL", "degree N", "equivalent_latitude", "Equivalent Latitude"),
"zh": ("FULL", "km", "geopotential_height", "Geopotential Altitude"),
"n2": ("FULL", "s^-2", "square_of_brunt_vaisala_frequency_in_air", "N^2"),
"SURFACE_UV": ("HORIZONTAL", "m s^-1", "", "Horizontal Wind Speed at "),
"SURFACE_PV": ("HORIZONTAL", "m^2 K s^-1 kg^-1", "", "Potential Vorticity at "),
"TROPOPAUSE": ("HORIZONTAL", "km", "tropopause_altitude",
"vertical location of first WMO thermal tropopause"),
"TROPOPAUSE_PRESSURE": ("HORIZONTAL", "Pa", "tropopause_air_pressure",
"vertical location of first WMO thermal tropopause"),
"TROPOPAUSE_THETA": ("HORIZONTAL", "K", "tropopause_air_potential_temperature",
"vertical location of first WMO thermal tropopause"),
"TROPOPAUSE_SECOND": ("HORIZONTAL", "km", "secondary_tropopause_altitude",
"vertical location of second WMO thermal tropopause"),
"TROPOPAUSE_SECOND_PRESSURE": ("HORIZONTAL", "Pa", "secondary_tropopause_air_pressure",
"vertical location of second WMO thermal tropopause"),
"TROPOPAUSE_SECOND_THETA": ("HORIZONTAL", "K", "secondary_tropopause_air_potential_temperature",
"vertical location of second WMO thermal tropopause"),
}
def get_create_variable(ncin, name):
"""
Either retrieves a variable from NetCDF or creates it,
in case it is not yet present.
"""
is_surface = False
if name not in ncin.variables:
if name in VARIABLES:
dim, units, standard_name, long_name = VARIABLES[name]
else:
fields = name.split("_")
assert fields[1] == "SURFACE"
dim, units, long_name = VARIABLES["_".join(fields[1:4:2])]
long_name += fields[2]
is_surface = True
dims = ("time", "lev_2", "lat", "lon") if not is_surface else ("time", "lat", "lon")
var_id = ncin.createVariable(name, "f4", dims,
**{"zlib": 1, "shuffle": 1, "fletcher32": 1, "fill_value": np.nan})
var_id.units = units
var_id.long_name = long_name
if standard_name:
var_id.standard_name = standard_name
return ncin.variables[name]
def find_tropopause(alts, temps):
"""
Identifies position of thermal tropopauses in given altitude/temperature
profile. Has some issues with inversions, which is circumventyed partly by
setting seek to False, which is not strictly necessary by WMO definition.
The thermal definition of the tropopause, WMO, 1957:
(a) The first tropopause is defined as the lowest level at which the lapse
rate decreases to 2 degree C/km or less, provided also the average lapse rate
between this level and all higher levels within 2 km does not exceed 2 degree C/km.
(b) If above the first tropopause the average lapse rate between any level
and all higher levels within 1 km exceeds 3 degree C/km, then a second tropopause
is defined by the same criterion as under (a). This tropopause may be either
within or above the 1 km layer.
"""
dtdz_wmo = -2
zmin = 5
zmax = 22
alts = np.asarray(alts)
temps = np.asarray(temps)
valid = (~(np.isnan(alts) | np.isnan(temps))) & (alts > 2.0) & (alts < 30.0)
alts, temps = alts[valid], temps[valid]
if len(alts) < 3:
return []
if alts[0] > alts[1]: # check for proper order and reverse if necessary
alts = alts[::-1]
temps = temps[::-1]
result = []
# This differentiation is sufficient as we are looking at average lapse rate
# with respect to higher levels anyway, so using a more accurate left/right
# differentiation does not really improve things here.
lapse_rate = (temps[1:] - temps[:-1]) / (alts[1:] - alts[:-1])
lapse_alts = (alts[1:] + alts[:-1]) / 2.
seek = True
for j in range(1, len(lapse_rate)):
if not seek and lapse_rate[j] < -3:
ks = [k for k in range(len(temps)) if lapse_alts[j] <= alts[k] <= lapse_alts[j] + 1.]
# This way of calculating the average lapse rate is optimal. Don't
# try to improve. Integrate t'/(z1-z0) numerically (no trapez! do it
# stupid way) with infinitesimal h. Differentiate numerically using
# same h. Simplify. Voila. As h can be assumed as small as possible,
# this is accurate.
if len(ks) > 1:
k, ks = ks[0], ks[1:]
avg_lapse = (temps[ks] - temps[k]) / (alts[ks] - alts[k])
if all(avg_lapse < -3):
seek = True
else:
seek = True
if seek and lapse_rate[j - 1] <= dtdz_wmo < lapse_rate[j] \
and zmin < lapse_alts[j] < zmax:
alt = np.interp([dtdz_wmo],
lapse_rate[j - 1:j + 1], lapse_alts[j - 1:j + 1])[0]
ks = [_k for _k in range(len(temps)) if alt <= alts[_k] <= alt + 2.]
if len(ks) > 1:
k, ks = ks[0], ks[1:]
avg_lapse = (temps[ks] - temps[k]) / (alts[ks] - alts[k])
if all(avg_lapse > dtdz_wmo):
result.append(alt)
seek = False
else:
result.append(alt)
seek = False
return result
def parse_args(args):
oppa = optparse.OptionParser(usage="""
add_pv.py
Adds PV and ancillary quantities to 4D model data given as NetCDF.
Supported model types are ECMWFP (ECMWF on pressure levels), ECMWFZ
(JURASSIC ECMWF format on altitude levels), FNL, WACCM.
Usage: add_pv.py [options] <model type> <netCDF file>
Example:
add_pv.py ECMWFP ecmwfr_ana_ml_06072912.nc
""")
oppa.add_option('--theta', '', action='store_true',
help="Add pt potential temperature field")
oppa.add_option('--n2', '', action='store_true',
help="Add n2 static stability.")
oppa.add_option('--pv', '', action='store_true',
help="Add pv potential vorticity.")
oppa.add_option('--tropopause', '', action='store_true',
help="Add first and second tropopause")
oppa.add_option('--eqlat', '', action='store_true',
help="Add equivalent latitude")
oppa.add_option('--surface_pressure', '', action='store', type=str,
help="Add PV and UV on given hPa surfaces, e.g., 200:300:400.")
oppa.add_option('--surface_theta', '', action='store', type=str,
help="Add PV and UV on given theta surfaces, e.g., 200:300:400.")
opt, arg = oppa.parse_args(args)
if len(arg) != 1:
print(oppa.get_usage())
exit(1)
if not os.path.exists(arg[0]):
print("Cannot find model data at", arg[1])
exit(1)
return opt, arg[0]
def add_eqlat(ncin):
print("Adding EQLAT...")
pv = ncin.variables["pv"][:]
theta = ncin.variables["pt"][:]
eqlat = np.zeros(pv.shape)
latc = ncin.variables["lat"][:]
lonc = ncin.variables["lon"][:]
if min(latc) > -75 or max(latc) < 75:
print("WARNING:")
print(" Not enough latitudes present for this to be a global set.")
print(" EQLAT may not be meaningful.")
lats = np.zeros(len(latc) + 1)
lats[:-1] = latc
lats[1:] += latc
lats[1:-1] /= 2
lats = np.deg2rad(lats)
area = np.absolute(np.sin(lats[:-1]) - np.sin(lats[1:])) / (2 * len(lonc))
assert area[0] > 0
if latc[0] > latc[1]:
baseareas = (np.sin(np.deg2rad(latc[0])) -
np.sin(np.deg2rad(latc))) / 2.
else:
baseareas = (np.sin(np.deg2rad(latc[-1])) -
np.sin(np.deg2rad(latc)))[::-1] / 2.
latc = latc[::-1]
assert(baseareas[1] > baseareas[0])
thetagrid = np.hstack([np.arange(250., 400., 2),
np.arange(400., 500., 5.),
np.arange(500., 750., 10.),
np.arange(750., 1000., 25.),
np.arange(1000., 3000., 100.)])
log_thetagrid = np.log(thetagrid)
newshape = list(pv.shape)
newshape[1] = len(thetagrid)
p_theta = np.zeros(newshape)
p_theta.swapaxes(1, 3)[:] = thetagrid
# convert u, v, theta to pressure grid
theta_pv = np.zeros(newshape)
lp = np.log(theta[0, :, 0, 0])
reverse = False
if lp[0] > lp[-1]:
theta = theta[:, ::-1]
pv = pv[:, ::-1]
reverse = True
for iti, ilo, ila in tqdm.tqdm(
itertools.product(range(newshape[0]), range(newshape[3]), range(newshape[2])),
total=newshape[0] * newshape[3] * newshape[2], ascii=True,
desc="Interpolation to theta levels:"):
lp = np.log(theta[iti, :, ila, ilo])
theta_pv[iti, :, ila, ilo] = np.interp(
log_thetagrid, lp, pv[iti, :, ila, ilo],
left=np.nan, right=np.nan)
theta_eqlat = np.zeros(newshape)
for iti in range(newshape[0]):
for lev in tqdm.tqdm(range(newshape[1]), desc="Integration", ascii=True):
areas = np.zeros(len(latc) + 1)
pv_limits = np.zeros(len(area))
loc_thpv = theta_pv[iti, lev, :, :]
loc_lat = np.zeros(loc_thpv.shape, dtype="i8")
loc_lat.swapaxes(0, 1)[:] = range(len(latc))
loc_lat = loc_lat.reshape(-1)
thpv_list = loc_thpv.reshape(-1)
notnanpv = ~(np.isnan(thpv_list))
if len(thpv_list[notnanpv]) == 0:
theta_eqlat[iti, lev, :, :] = np.nan
continue
missing_area = area[loc_lat[np.isnan(thpv_list)]].sum()
areas = baseareas.copy()
missing_fac = (areas[-1] - missing_area) / areas[-1]
if missing_fac < 0.99:
areas *= missing_fac
print("\nWARNING")
print(" 'Fixing' area due to nan in PV at theta ", thetagrid[lev], end=' ')
print("by a factor of ", missing_fac)
minpv, maxpv = thpv_list[notnanpv].min(), thpv_list[notnanpv].max()
thpv_list = sorted(zip(-thpv_list[notnanpv], loc_lat[notnanpv]))
aind_lat = np.asarray([x[1] for x in thpv_list], dtype="i8")
apv = np.asarray([x[0] for x in thpv_list])[:-1]
cum_areas = np.cumsum(area[aind_lat])[1:]
if len(cum_areas) >= 2:
pv_limits = np.interp(areas, cum_areas, apv)
pv_limits[0], pv_limits[-1] = -maxpv, -minpv
loc_eqlat = np.interp(-loc_thpv, pv_limits, latc)
theta_eqlat[iti, lev, :, :] = loc_eqlat
else:
print("\nWARNING")
print(" Filling one level to NaN due to missing PV values")
theta_eqlat[iti, lev, :, :] = np.nan
# convert pv back to model grid
for iti, ilo, ila in tqdm.tqdm(
itertools.product(range(eqlat.shape[0]), range(eqlat.shape[3]), range(eqlat.shape[2])),
total=eqlat.shape[0] * eqlat.shape[3] * eqlat.shape[2], ascii=True,
desc="Interpolation back to model levels:"):
lp = np.log(theta[iti, :, ila, ilo])
eqlat[iti, :, ila, ilo] = np.interp(
lp, log_thetagrid, theta_eqlat[iti, :, ila, ilo],
left=np.nan, right=np.nan)
if reverse:
eqlat = eqlat[:, ::-1]
get_create_variable(ncin, "EQLAT")[:] = eqlat
def add_surface(ncin, typ, levels):
"""
This function takes PV and hor. Wind from a model and adds a variable where
these entities are interpolated on the given horizontal hPa planes.
"""
if levels is None:
return
for p in [int(x) for x in levels.split(":")]:
print("Adding PV, UV on", typ, "level", p)
pv = ncin.variables["pv"][:]
if typ == "pressure":
vert = ncin.variables["pressure"][:]/100
elif typ == "pt":
vert = ncin.variables["pt"][:]
else:
vert = ncin.variables[typ][:]
u = ncin.variables["u"][:]
v = ncin.variables["v"][:]
pv_surf = np.zeros((pv.shape[0], pv.shape[2], pv.shape[3]))
uv_surf = np.zeros(pv_surf.shape)
uv = np.sqrt(u ** 2 + v ** 2)
if vert[0, 0, 0, 0] < vert[0, -1, 0, 0]:
order = 1
else:
order = -1
for iti, ilo, ila in tqdm.tqdm(
itertools.product(range(pv.shape[0]), range(pv.shape[3]), range(pv.shape[2])),
total=pv.shape[0] * pv.shape[3] * pv.shape[2], ascii=True,
desc="Interpolation to {} level {}".format(typ, p)):
uv_surf[iti, ila, ilo] = np.interp(
[p], vert[iti, ::order, ila, ilo], uv[iti, ::order, ila, ilo],
left=np.nan, right=np.nan)
pv_surf[iti, ila, ilo] = np.interp(
[p], vert[iti, ::order, ila, ilo], pv[iti, ::order, ila, ilo],
left=np.nan, right=np.nan)
get_create_variable(ncin, "%s_SURFACE_%04d_UV" % (typ, p))[:] = uv_surf
get_create_variable(ncin, "%s_SURFACE_%04d_PV" % (typ, p))[:] = pv_surf
def add_tropopauses(ncin):
"""
Adds first and second thermal WMO tropopause to model. Fill value is -999.
"""
print("Adding first and second tropopause")
temp = ncin.variables["t"][:]
press = ncin.variables["pressure"][:]/100
gph = ncin.variables["zh"][:]
theta = ncin.variables["pt"][:]
if gph[0, 1, 0, 0] < gph[0, 0, 0, 0]:
gph = gph[:, ::-1, :, :]
press = press[:, ::-1, :, :]
temp = temp[:, ::-1, :, :]
theta = theta[:, ::-1, :, :]
valid = np.isfinite(gph[0, :, 0, 0])
assert gph[0, valid, 0, 0][1] > gph[0, valid, 0, 0][0]
assert press[0, valid, 0, 0][1] < press[0, valid, 0, 0][0]
above_tropo1 = np.empty((gph.shape[0], gph.shape[2], gph.shape[3]))
above_tropo1[:] = np.nan
above_tropo2 = above_tropo1.copy()
above_tropo1_press = above_tropo1.copy()
above_tropo2_press = above_tropo1.copy()
above_tropo1_theta = above_tropo1.copy()
above_tropo2_theta = above_tropo1.copy()
for iti, ilo, ila in tqdm.tqdm(
itertools.product(range(gph.shape[0]), range(gph.shape[3]), range(gph.shape[2])),
total=gph.shape[0] * gph.shape[3] * gph.shape[2], ascii=True):
tropopauses = find_tropopause(gph[iti, :, ila, ilo], temp[iti, :, ila, ilo])
tropopauses = [x for x in tropopauses if 5 < x < 22]
if len(tropopauses) > 0:
above_tropo1[iti, ila, ilo] = min(tropopauses)
above_tropo1_press[iti, ila, ilo] = np.interp(
above_tropo1[iti, ila, ilo], gph[iti, :, ila, ilo], press[iti, :, ila, ilo])
above_tropo1_theta[iti, ila, ilo] = np.interp(
above_tropo1[iti, ila, ilo], gph[iti, :, ila, ilo], theta[iti, :, ila, ilo])
second = [x for x in tropopauses if x > above_tropo1[iti, ila, ilo]]
if len(second) > 0:
above_tropo2[iti, ila, ilo] = min(second)
above_tropo2_press[iti, ila, ilo] = np.interp(
above_tropo2[iti, ila, ilo], gph[iti, :, ila, ilo], press[iti, :, ila, ilo])
above_tropo2_theta[iti, ila, ilo] = np.interp(
above_tropo2[iti, ila, ilo], gph[iti, :, ila, ilo], theta[iti, :, ila, ilo])
above_tropo1_press = np.exp(above_tropo1_press)
above_tropo2_press = np.exp(above_tropo2_press)
get_create_variable(ncin, "TROPOPAUSE")[:] = above_tropo1
get_create_variable(ncin, "TROPOPAUSE_SECOND")[:] = above_tropo2
get_create_variable(ncin, "TROPOPAUSE_PRESSURE")[:] = above_tropo1_press * 100
get_create_variable(ncin, "TROPOPAUSE_SECOND_PRESSURE")[:] = above_tropo2_press * 100
get_create_variable(ncin, "TROPOPAUSE_THETA")[:] = above_tropo1_theta
get_create_variable(ncin, "TROPOPAUSE_SECOND_THETA")[:] = above_tropo2_theta
def add_metpy(option, filename):
"""
Adds the variables possible through metpy (theta, pv, n2)
"""
with xr.load_dataset(filename) as xin:
if option.theta or option.pv:
print("Adding potential temperature...")
xin["pt"] = potential_temperature(xin["pressure"], xin["t"])
xin["pt"].data = np.array(xin["pt"].data)
xin["pt"].attrs["units"] = "K"
xin["pt"].attrs["standard_name"] = VARIABLES["pt"][2]
if option.pv:
print("Adding potential vorticity...")
xin = xin.metpy.assign_crs(grid_mapping_name='latitude_longitude',
earth_radius=6.356766e6)
xin["pv"] = potential_vorticity_baroclinic(xin["pt"], xin["pressure"], xin["u"], xin["v"])
xin["pv"].data = np.array(xin["pv"].data * 10 ** 6)
xin = xin.drop("metpy_crs")
xin["pv"].attrs["units"] = "kelvin * meter ** 2 / kilogram / second"
xin["pv"].attrs["standard_name"] = VARIABLES["pv"][2]
xin["mod_pv"] = xin["pv"] * ((xin["pt"] / 360) ** (-4.5))
xin["mod_pv"].attrs["standard_name"] = VARIABLES["mod_pv"][2]
if option.n2:
print("Adding N2...")
xin["n2"] = brunt_vaisala_frequency_squared(geopotential_to_height(xin["zh"]), xin["pt"])
xin["n2"].data = np.array(xin["n2"].data)
xin["n2"].attrs["units"] = VARIABLES["n2"][1]
xin["n2"].attrs["standard_name"] = "square_of_brunt_vaisala_frequency_in_air"
xin.to_netcdf(filename)
def add_rest(option, filename):
"""
Adds the variables not possible through metpy
"""
# Open NetCDF file as passed from command line
with netCDF4.Dataset(filename, "r+") as ncin:
history = datetime.datetime.now().isoformat() + ":" + " ".join(sys.argv)
if hasattr(ncin, "history"):
history += "\n" + ncin.history
ncin.history = history
ncin.date_modified = datetime.datetime.now().isoformat()
if option.eqlat:
add_eqlat(ncin)
add_surface(ncin, "pressure", option.surface_pressure)
add_surface(ncin, "pt", option.surface_theta)
if option.tropopause:
add_tropopauses(ncin)
def main():
option, filename = parse_args(sys.argv[1:])
add_metpy(option, filename)
add_rest(option, filename)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 19,228 |
py
| 9 |
add_ancillary.py
| 6 | 0.556012 | 0.535833 | 0 | 457 | 41.074398 | 133 |
wujonathan/517proj
| 12,541,304,542,538 |
a6838f1034c03516c9938a41f0f09b1a376fb4e3
|
f00b716a220810b80d0f09feee98f2480754be0b
|
/milestone_4/modelFitting.py
|
e98cdedc51433c5d45bedff8614a39d06cde34b2
|
[] |
no_license
|
https://github.com/wujonathan/517proj
|
453b607d4ce8f68d16a58f2e0e898ccbcd4ceb3d
|
78320eb9cda2fc30fcc1f80aef8c46eb6a82cedb
|
refs/heads/master
| 2021-01-24T16:19:33.751247 | 2018-05-05T06:55:30 | 2018-05-05T06:55:30 | 123,183,650 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from sklearn import linear_model as lm
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error as mse
import pyGPs
import json
xFilename = '../dataset/bostonX.csv'
yFilename = '../dataset/bostonY.csv'
XTrain = np.loadtxt(xFilename, delimiter=",")
yTrain = np.loadtxt(yFilename, delimiter=",")
yTrain = np.array([[i] for i in yTrain])
K = 10
scoresMSE = {"lr" : [], "clf": [], "gp": [], "lrPCA" : [], "gpPCA" : []}
finalScoresMSE = {"lr" : [], "clf": [], "gp": [], "lrPCA" : [], "gpPCA" : []}
for i in xrange(10):
for x_train, x_test, y_train, y_test in pyGPs.Validation.valid.k_fold_validation(XTrain, yTrain, K, randomise = True):
lr = lm.LinearRegression()
lr.fit(x_train, y_train)
y_pred = lr.predict(x_test)
scoresMSE["lr"].append(mse(y_test, y_pred))
clf = svm.SVR()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
scoresMSE["clf"].append(mse(y_test, y_pred))
model = pyGPs.GPR()
model.optimize(x_train, y_train)
ymu, ys2, fmu, fs2, lp = model.predict(x_test, ys = y_test)
scoresMSE["gp"].append(mse(y_test, ymu))
pca = PCA(n_components=2)
new_x_train = pca.fit_transform(x_train)
new_x_test = pca.transform(x_test)
lr = lm.LinearRegression()
lr.fit(new_x_train, y_train)
y_pred = lr.predict(new_x_test)
scoresMSE["lrPCA"].append(mse(y_test, y_pred))
model = pyGPs.GPR()
model.optimize(new_x_train, y_train)
ymu, ys2, fmu, fs2, lp = model.predict(new_x_test, ys = y_test)
scoresMSE["gpPCA"].append(mse(y_test, ymu))
for key in scoresMSE:
finalScoresMSE[key].append(np.mean(scoresMSE[key]))
scoresMSE = {"lr" : [], "clf": [], "gp": [], "lrPCA" : [], "gpPCA" : []}
print i
with open('ttest.txt', 'w') as o:
json.dump(finalScoresMSE, o)
|
UTF-8
|
Python
| false | false | 1,796 |
py
| 30 |
modelFitting.py
| 9 | 0.642539 | 0.637528 | 0 | 68 | 25.411765 | 119 |
kxu68/Cell_model
| 4,105,988,778,363 |
89cdea91e740cf2cbfdfc0c8dcb88e43f3a0eb6d
|
036efa8e627e6ec5cada0a115dd42c5c50830525
|
/files made by Ke/testfiles/test_iml1515second_check.py
|
dd82f586bbb345070d9a3daae8d454b1cd004a29
|
[] |
no_license
|
https://github.com/kxu68/Cell_model
|
2c68f321b3a8e43bda169d23fcf77ef72ef7aed2
|
ce095d10760d4d3ec2f7d4d492fc34ab096c8ce8
|
refs/heads/main
| 2023-07-15T19:49:00.277231 | 2022-02-02T19:42:16 | 2022-02-02T19:42:16 | 436,499,505 | 0 | 0 | null | true | 2021-12-09T05:56:21 | 2021-12-09T05:56:21 | 2021-08-18T16:22:37 | 2021-08-18T16:22:35 | 11,060 | 0 | 0 | 0 | null | false | false |
#!/usr/bin/env python
# coding: utf-8
# In[21]:
import unittest
# import pytest
import cobra
import copy
# generate a sample for loading the models in the origin way for comparison
model_origin= cobra.io.read_sbml_model('iML1515.xml')
OV_origin= model_origin.optimize()
#this is objective value
model_test = cobra.io.read_sbml_model('IMPROVED_iML1515.xml')
OV_test= model_test.optimize()
SM_test= model_test.summary()
class TestIML1515(unittest.TestCase):
# # generate a sample for loading the models in the origin way for comparison
# model_origin= cobra.io.read_sbml_model('iML1515.xml')
# OV_origin=model_origin.optimize()
# #this is objective value
# model_test = cobra.io.read_sbml_model('IMPROVED_iML1515.xml')
# OV_test= model_test.optimize()
# SM_test= model_test.summary()
def setUp(self):
print('setup...')
# generate a sample for loading the models in the origin way for comparison
# model_origin= cobra.io.read_sbml_model('iML1515.xml')
# OV_origin= model_origin.optimize()
# #this is objective value
# model_test = cobra.io.read_sbml_model('IMPROVED_iML1515.xml')
# OV_test = model_test.optimize()
# SM_test = model_test.summary()
# SETUP is only used for each test therefore the variables can not be passed on to other cases.
# NEED to use fixture to pass the variables(OV_test e.g.) to give other testcases values
# @pytest.fixture
# def OV_test():
# model_test = cobra.io.read_sbml_model('IMPROVED_iML1515.xml')
# OV_test = model_test.optimize()
# @pytest.fixture
# def OV_origin():
# model_origin= cobra.io.read_sbml_model('iML1515.xml')
# OV_origin= model_origin.optimize()
def test_objectivevaluepass(self):
self.assertTrue(isinstance(OV_test, cobra.core.Solution))
self.assertTrue(isinstance(OV_origin, cobra.core.Solution))
def test_randomreactiondeletioncheck(self):
modifymodel= copy.deepcopy(model_origin)
print('complete model: ', modifymodel.optimize())
with modifymodel:
# model_origin.genes.b3940.knock_out()
# print('metL knocked out: ', model_origin.optimize())
modifymodel.genes.b4034.knock_out()
print(' knocked out: ', modifymodel.optimize())
self.assertNotEqual(modifymodel.optimize().objective_value,0)
# def test_fluxchange(self):
# self.assert
# also need a for checking flux 100%
# and another for checking gene is essential or not(this is using the deletions)
# def tearDown(self):
# print('tearDown...')
# this is for test script to be able to run in python as a normal python script
# if __name__ == '__main__':
# unittest.main()
# this is for test script to be able to run in ipython shell with notebook
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
# In[22]:
# # testing the isinstance function
# import unittest
# import pytest
# import cobra
# import copy
# # generate a sample for loading the models in the origin way for comparison
# model_origin= cobra.io.read_sbml_model('iML1515.xml')
# OV_origin= model_origin.optimize()
# #this is objective value
# model_test = cobra.io.read_sbml_model('IMPROVED_iML1515.xml')
# OV_test= model_test.optimize()
# SM_test= model_test.summary()
# isinstance(OV_test, cobra.core.Solution)
# In[ ]:
# In[ ]:
# In[ ]:
|
UTF-8
|
Python
| false | false | 3,525 |
py
| 34 |
test_iml1515second_check.py
| 31 | 0.65617 | 0.638865 | 0 | 120 | 28.341667 | 99 |
shekhuverma/competitive-coding
| 7,567,732,425,798 |
b055019eb89a7c154fc3e148fc97633465ca2e06
|
f7e0fa8fc5e944e9f6da2e6382072ad27580cf53
|
/Gridland_metro/gridland_metro.py
|
d8561472b375ef7cf7e85f9a17057f7518976484
|
[] |
no_license
|
https://github.com/shekhuverma/competitive-coding
|
0b897b7602311826d31e0c266f2de47349d060b8
|
b263c401f3681e7c18970cff9420fa814b8c4064
|
refs/heads/master
| 2021-01-21T23:45:14.355595 | 2018-02-03T15:26:02 | 2018-02-03T15:26:02 | 102,180,926 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#gridland metro
ip=[int(x) for x in raw_input().split()]
temp={}
j=0
ans=0
while(j<ip[2]):
ip1=[int(y) for y in raw_input().split()]
r=ip1[0]
c1=ip1[1]
c2=ip1[2]
if temp.has_key(r):
temp[r].extend([c1,c2])
else:
temp[r]=[c1,c2]
j+=1
print temp
##for key,value in temp.iteritems():
## for a in range(len(value)):
## frm=a
## till=a+1
|
UTF-8
|
Python
| false | false | 394 |
py
| 30 |
gridland_metro.py
| 26 | 0.525381 | 0.479695 | 0 | 20 | 18.7 | 45 |
Kay212MD/breakfast_planner_online
| 2,628,520,009,560 |
8373e4772cae65cef36d59d5946cafbe4ff7209a
|
50f2277623e177dd3484a4e7a6a6381512bfe97f
|
/plan/views.py
|
d84d12ff16512c24d8e2c507d2eea95e6b24fbd1
|
[] |
no_license
|
https://github.com/Kay212MD/breakfast_planner_online
|
c43bf3a79abe3616ded0b0b3d909761d73bc681e
|
ea0a22a0de3d36643cf83234934ea2c7f686159b
|
refs/heads/master
| 2022-06-28T17:03:19.095487 | 2020-05-09T17:51:37 | 2020-05-09T17:51:37 | 260,449,615 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, redirect
from .models import PersonalPlan, FoodMainInformation
from .forms import PersonalPlanForm, FoodMainInformationForm
# Create your views here.
def index(request):
"""The home page of breakfast planner online"""
return render(request, 'plan/index.html')
def personal_plans(request):
"""Show all plans"""
personal_plans = PersonalPlan.objects.order_by()
context = {'personal_plans':personal_plans}
return render(request, 'plan/personal_plans.html', context)
def personal_plan(request, personal_plan_id):
"""Show all food labels and descriptions from oner personal plan"""
personal_plan = PersonalPlan.objects.get(id=personal_plan_id)
food_informations = personal_plan.foodmaininformation_set.all()
context = {'personal_plan':personal_plan, 'food_informations':food_informations}
return render(request, 'plan/personal_plan.html', context)
def new_personal_plan(request):
"""Add new personal plan"""
if request != 'POST':
# No data submitted, create a blank form
form = PersonalPlanForm()
else:
# Post data submitted, process data
form = PersonalPlanForm(data=request.POST)
if form.is_valid():
form.save()
return redirect('plan:personal_plans')
# Display a blank or invalid form.
context={'form':form}
return render(request, 'plan/new_personal_plan.html', context)
def new_food(request, personal_plan_id):
"""Add new food"""
personal_plan = PersonalPlan.objects.get(id=personal_plan_id)
if request != 'POST':
# No data submitted, create a blank form
form = FoodMainInformationForm()
else:
# Post data submitted, process data
form = FoodMainInformationForm(data=request.POST)
if form.is_valid():
new_food = form.save(commit=False)
new_food.personal_plan = personal_plan
new_food.save()
return redirect('plan:personal_plan', personal_plan_id=personal_plan_id)
# Display a blank or invalid form.
context={'personal_plan':personal_plan, 'form':form}
return render(request, 'plan/new_food.html', context)
|
UTF-8
|
Python
| false | false | 2,208 |
py
| 10 |
views.py
| 5 | 0.675725 | 0.675725 | 0 | 58 | 37.017241 | 84 |
andersthuesen/DTU-Course-Project-02466
| 12,962,211,329,255 |
37f409f2448327a98efe599eefa07c013b1dd5e5
|
c42f1e68acac80855d79002d5ffe910656eec35e
|
/scripts/synthesize_dataset.py
|
87c2e5d09ae30a3831c891a6d1f61b2b23f9ec3e
|
[
"MIT"
] |
permissive
|
https://github.com/andersthuesen/DTU-Course-Project-02466
|
ff8a7bb77f34fed97b08640a7627bab3f0ef2167
|
67f16f0264fb2ec5e76d7b0edbafc92bced1f73c
|
refs/heads/master
| 2022-11-13T02:19:00.576789 | 2020-06-24T10:21:38 | 2020-06-24T10:21:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/env python
import sys
import os
import argparse
import json
import sys
import numpy as np
import torch
import soundfile
from tqdm import tqdm
import shutil
import matplotlib.pyplot as plt
from torchaudio.transforms import Resample
os.chdir("flowtron")
sys.path.insert(0, ".")
from flowtron import Flowtron
from torch.utils.data import DataLoader
from data import Data
from train import update_params
sys.path.insert(0, "tacotron2")
sys.path.insert(0, "tacotron2/waveglow")
from glow import WaveGlow
from scipy.io.wavfile import write
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
seed = 1234
sigma = 0.5
gate_threshold = 0.5
n_frames = 400 * 4
flowtron_speaker_id = 0
params = []
target_sample_rate = 16000
waveglow_path = "models/waveglow_256channels_universal_v4.pt"
flowtron_path = "models/flowtron_ljs.pt"
config_path = "config.json"
chunk_size = 1
with open(config_path) as f:
data = f.read()
config = json.loads(data)
update_params(config, params)
data_config = config["data_config"]
model_config = config["model_config"]
samplerate = data_config["sampling_rate"]
hop_length = data_config["hop_length"]
# Load seeds
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Resample function
resample = Resample(orig_freq=samplerate, new_freq=target_sample_rate)
# load waveglow
waveglow = torch.load(waveglow_path)['model'].cuda().eval()
waveglow.cuda().half()
for k in waveglow.convinv:
k.float()
waveglow.eval()
# load flowtron
model = Flowtron(**model_config).cuda()
state_dict = torch.load(flowtron_path, map_location='cpu')['state_dict']
model.load_state_dict(state_dict)
model.eval()
print("Loaded checkpoint '{}')".format(flowtron_path))
ignore_keys = ['training_files', 'validation_files']
trainset = Data(
data_config['training_files'],
**dict((k, v) for k, v in data_config.items() if k not in ignore_keys))
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__":
_, path, output_path = sys.argv
for speaker_id in tqdm(os.listdir(path)):
speaker_path = os.path.join(path, speaker_id)
for chapter_id in os.listdir(speaker_path):
chapter_path = os.path.join(speaker_path, chapter_id)
transcript_filename = f"{speaker_id}-{chapter_id}.trans.txt"
transcript_path = os.path.join(chapter_path, transcript_filename)
audio_output_dir = os.path.join(output_path, speaker_id, chapter_id)
# Create output directory
if not os.path.isdir(audio_output_dir):
os.makedirs(audio_output_dir)
os.chmod(audio_output_dir, 0o775)
transcript_output_path = os.path.join(audio_output_dir,
transcript_filename)
if not os.path.exists(transcript_output_path):
shutil.copy(transcript_path, transcript_output_path)
with open(transcript_path, "r") as file:
for lines in chunks(file.readlines(), chunk_size):
batch_size = len(lines)
audio_names, texts = zip(*[line.split(" ", 1) for line in lines])
texts = [text.lower() for text in texts]
audio_filenames = [f"{audio_name}.flac" for audio_name in audio_names]
audio_output_paths = [
os.path.join(audio_output_dir, audio_filename)
for audio_filename in audio_filenames
]
if os.path.exists(audio_output_paths[0]):
continue
speaker_vecs = trainset.get_speaker_id(flowtron_speaker_id)
speaker_vecs = speaker_vecs.repeat(batch_size, 1)
speaker_vecs = speaker_vecs.cuda()
text_lengths = torch.tensor([len(text) for text in texts])
max_text_length = torch.max(text_lengths)
encoded_texts = [trainset.get_text(text) for text in texts]
encoded_text_lengths = torch.tensor(
[text.size(0) for text in encoded_texts])
max_encoded_text_length = torch.max(encoded_text_lengths)
padded_texts = torch.LongTensor(batch_size, max_encoded_text_length)
padded_texts.zero_()
for i, encoded_text in enumerate(encoded_texts):
padded_texts[i, :encoded_text.size(0)] = encoded_text
padded_texts = padded_texts.cuda()
frames = max_text_length * 6
with torch.no_grad():
residual = torch.cuda.FloatTensor(batch_size, 80,
frames).normal_() * sigma
mels, attentions, masks = model.infer(
residual,
speaker_vecs.T,
padded_texts,
gate_threshold=gate_threshold)
audio = waveglow.infer(mels.half(), sigma=0.8).float()
audio = resample(audio)
audio_max, _ = audio.abs().max(dim=1, keepdim=True)
audio = audio / audio_max
audio = audio.cpu().numpy()
for i, wav in enumerate(audio):
resampled_hop_length = int(samplerate / target_sample_rate *
hop_length)
start_index = masks[i][0] * resampled_hop_length
stop_index = masks[i][1] * resampled_hop_length
wav = wav[start_index:stop_index]
soundfile.write(
audio_output_paths[i], wav, target_sample_rate, format="flac")
print(audio_output_paths[i])
|
UTF-8
|
Python
| false | false | 5,419 |
py
| 25 |
synthesize_dataset.py
| 15 | 0.635173 | 0.626684 | 0 | 178 | 29.449438 | 80 |
RainingNight0329/JerryHW
| 18,262,200,964,616 |
47f99317ff4b2899cd85bd498998dbd96e3fe5d5
|
f0d4bfac9918fc32656fa08542c8111ef1200f02
|
/0416/0416/mysite/cms/views.py
|
9e532a6b4f629b848f03631263709c56ef027718
|
[] |
no_license
|
https://github.com/RainingNight0329/JerryHW
|
299e09ac9fe062b932587e3dc5bd3b57c43c450f
|
3e0c939dd71d2ca30df76c917c31648738f4ef10
|
refs/heads/master
| 2021-03-30T20:57:14.049674 | 2018-05-21T04:18:21 | 2018-05-21T04:18:21 | 124,831,974 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render_to_response
from django.http import HttpResponse
from .models import Information
# Create your views here.
def index(request):
informations=Information.objects.all()
return render_to_response('cms/menu.html',locals())
#return HttpResponse("Hello mom I'm Here")
|
UTF-8
|
Python
| false | false | 308 |
py
| 15 |
views.py
| 6 | 0.772727 | 0.772727 | 0 | 8 | 37.625 | 55 |
dencesun/Algorithm
| 16,544,214,051,872 |
2f9612f3a53b0e86eefec90e89ed450dcfc23851
|
e4fdb9cd960e6366cc56417bd26a134d0d0b0073
|
/387.py
|
d4e96a5a0b272bbc1c41f6eaef4413b7bc99af7c
|
[] |
no_license
|
https://github.com/dencesun/Algorithm
|
9ea98d6baf13020ef1c9cca40e5e3e2b5b259d1a
|
846bcc0a304c12535fd353f78f041a2b8da89d9d
|
refs/heads/master
| 2020-07-13T02:36:27.353763 | 2017-11-04T02:24:52 | 2017-11-04T02:24:52 | 67,869,179 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import collections
class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0: return -1
counts = collections.Counter(s)
for ch in s:
if counts[ch] == 1:
return s.index(ch)
return -1
test = Solution()
print test.firstUniqChar('loveleetcode')
print test.firstUniqChar('leetcode')
print test.firstUniqChar('cc')
print test.firstUniqChar("")
|
UTF-8
|
Python
| false | false | 470 |
py
| 80 |
387.py
| 79 | 0.582979 | 0.574468 | 0 | 21 | 21.380952 | 40 |
Engineering-Course/CIHP_PGN
| 10,496,900,074,529 |
e35b6b8da7683db2c7e5fd3c8a619f9f155aa50b
|
7e873b17a7e464ddb5b0e3367ef277f1254bdd6c
|
/train_pgn.py
|
328e766c17f8986e3be81c11f57c052126483e97
|
[
"MIT"
] |
permissive
|
https://github.com/Engineering-Course/CIHP_PGN
|
b230976dfffd8ab0bf4c39d08728aabbb90ed88c
|
0cf1cbe54a44fc86abe2023b0e762df3f9605242
|
refs/heads/master
| 2023-08-31T12:16:40.864057 | 2022-11-24T03:25:21 | 2022-11-24T03:25:21 | 143,100,053 | 408 | 122 |
MIT
| false | 2023-08-20T12:57:15 | 2018-08-01T03:38:07 | 2023-08-17T04:33:26 | 2023-08-20T12:57:05 | 843 | 386 | 104 | 40 |
Python
| false | false |
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
import random
from utils import *
# Set gpus
gpus = [0]
os.environ["CUDA_VISIBLE_DEVICES"]=','.join([str(i) for i in gpus])
num_gpus = len(gpus) # number of GPUs to use
### parameters setting
DATA_DIR = './datasets/CIHP'
LIST_PATH = './datasets/CIHP/list/train_rev.txt'
DATA_ID_LIST = './datasets/CIHP/list/train_id.txt'
SNAPSHOT_DIR = './checkpoint/CIHP_pgn'
LOG_DIR = './logs/CIHP_pgn'
N_CLASSES = 20
INPUT_SIZE = (512, 512)
BATCH_I = 1
BATCH_SIZE = BATCH_I * len(gpus)
SHUFFLE = True
RANDOM_SCALE = True
RANDOM_MIRROR = True
LEARNING_RATE = 1e-5
MOMENTUM = 0.9
POWER = 0.9
p_Weight = 50
e_Weight = 0.005
Edge_Pos_W = 2
with open(DATA_ID_LIST, 'r') as f:
TRAIN_SET = len(f.readlines())
SAVE_PRED_EVERY = TRAIN_SET / BATCH_SIZE + 1 # save model per epoch (number of training set / batch)
NUM_STEPS = int(SAVE_PRED_EVERY) * 100 + 1 # 100 epoch
def main():
RANDOM_SEED = random.randint(1000, 9999)
tf.set_random_seed(RANDOM_SEED)
## Create queue coordinator.
coord = tf.train.Coordinator()
h, w = INPUT_SIZE
## Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReaderPGN(DATA_DIR, LIST_PATH, DATA_ID_LIST, INPUT_SIZE, RANDOM_SCALE, RANDOM_MIRROR, SHUFFLE, coord)
image_batch, label_batch, edge_batch = reader.dequeue(BATCH_SIZE)
tower_grads = []
reuse1 = False
# Define loss and optimisation parameters.
base_lr = tf.constant(LEARNING_RATE)
step_ph = tf.placeholder(dtype=tf.float32, shape=())
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - step_ph / NUM_STEPS), POWER))
optim = tf.train.MomentumOptimizer(learning_rate, MOMENTUM)
for i in range(num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('Tower_%d' % (i)) as scope:
if i == 0:
reuse1 = False
else:
reuse1 = True
next_image = image_batch[i*BATCH_I:(i+1)*BATCH_I,:]
next_label = label_batch[i*BATCH_I:(i+1)*BATCH_I,:]
next_edge = edge_batch[i*BATCH_I:(i+1)*BATCH_I,:]
# Create network.
with tf.variable_scope('', reuse=reuse1):
net = PGNModel({'data': next_image}, is_training=False, n_classes=N_CLASSES, keep_prob=0.9)
# parsing net
parsing_out1 = net.layers['parsing_fc']
parsing_out2 = net.layers['parsing_rf_fc']
# edge net
edge_out1_final = net.layers['edge_fc']
edge_out1_res5 = net.layers['fc1_edge_res5']
edge_out1_res4 = net.layers['fc1_edge_res4']
edge_out1_res3 = net.layers['fc1_edge_res3']
edge_out2_final = net.layers['edge_rf_fc']
# combine resize
edge_out1 = tf.image.resize_images(edge_out1_final, tf.shape(next_image)[1:3,])
edge_out2 = tf.image.resize_images(edge_out2_final, tf.shape(next_image)[1:3,])
edge_out1_res5 = tf.image.resize_images(edge_out1_res5, tf.shape(next_image)[1:3,])
edge_out1_res4 = tf.image.resize_images(edge_out1_res4, tf.shape(next_image)[1:3,])
edge_out1_res3 = tf.image.resize_images(edge_out1_res3, tf.shape(next_image)[1:3,])
### Predictions: ignoring all predictions with labels greater or equal than n_classes
raw_prediction_p1 = tf.reshape(parsing_out1, [-1, N_CLASSES])
raw_prediction_p2 = tf.reshape(parsing_out2, [-1, N_CLASSES])
label_proc = prepare_label(next_label, tf.stack(parsing_out1.get_shape()[1:3]), one_hot=False) # [batch_size, h, w]
raw_gt = tf.reshape(label_proc, [-1,])
indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, N_CLASSES - 1)), 1)
gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
prediction_p1 = tf.gather(raw_prediction_p1, indices)
prediction_p2 = tf.gather(raw_prediction_p2, indices)
raw_edge = tf.reshape(tf.sigmoid(edge_out2_final), [-1,])
edge_cond = tf.multiply(tf.cast(tf.greater(raw_edge, 0.1), tf.int32), tf.cast(tf.less_equal(raw_gt, N_CLASSES - 1), tf.int32))
edge_mask = tf.squeeze(tf.where(tf.equal(edge_cond, 1)), 1)
gt_edge = tf.cast(tf.gather(raw_gt, edge_mask), tf.int32)
p1_lc = tf.gather(raw_prediction_p1, edge_mask)
p2_lc = tf.gather(raw_prediction_p2, edge_mask)
### Pixel-wise softmax loss.
loss_p1_gb = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction_p1, labels=gt))
loss_p2_gb = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction_p2, labels=gt))
loss_p1_lc = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=p1_lc, labels=gt_edge))
loss_p2_lc = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=p2_lc, labels=gt_edge))
loss_p1 = loss_p1_lc + loss_p1_gb
loss_p2 = loss_p2_lc + loss_p2_gb
### Sigmoid cross entropy
edge_pos_mask = tf.equal(next_edge, 1)
edge_neg_mask = tf.logical_not(edge_pos_mask)
edge_pos_mask = tf.cast(edge_pos_mask, tf.float32)
edge_neg_mask = tf.cast(edge_neg_mask, tf.float32)
total_pixels = tf.cast(tf.shape(next_edge)[1] * tf.shape(next_edge)[2], tf.int32)
pos_pixels = tf.reduce_sum(tf.to_int32(next_edge))
neg_pixels = tf.subtract(total_pixels, pos_pixels)
pos_weight = tf.cast(tf.divide(neg_pixels, total_pixels), tf.float32)
neg_weight = tf.cast(tf.divide(pos_pixels, total_pixels), tf.float32)
parsing_mask = tf.cast(tf.greater(next_label, 0), tf.float32)
edge_gt = tf.cast(next_edge, tf.float32)
t_loss_e1 = tf.nn.sigmoid_cross_entropy_with_logits(logits=edge_out1, labels=edge_gt)
loss_e1_pos_gb = tf.reduce_sum(tf.multiply(t_loss_e1, edge_pos_mask), [1, 2])
loss_e1_neg_gb = tf.reduce_sum(tf.multiply(t_loss_e1, edge_neg_mask), [1, 2])
loss_e1_pos_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1, parsing_mask), edge_pos_mask), [1, 2])
loss_e1_neg_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1, parsing_mask), edge_neg_mask), [1, 2])
loss_e1_pos = (loss_e1_pos_gb + loss_e1_pos_lc)* pos_weight
loss_e1_neg = (loss_e1_neg_gb + loss_e1_neg_lc) * neg_weight
loss_e1 = tf.reduce_mean(loss_e1_pos * Edge_Pos_W + loss_e1_neg)
t_loss_e2 = tf.nn.sigmoid_cross_entropy_with_logits(logits=edge_out2, labels=edge_gt)
loss_e2_pos_gb = tf.reduce_sum(tf.multiply(t_loss_e2, edge_pos_mask), [1, 2])
loss_e2_neg_gb = tf.reduce_sum(tf.multiply(t_loss_e2, edge_neg_mask), [1, 2])
loss_e2_pos_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e2, parsing_mask), edge_pos_mask), [1, 2])
loss_e2_neg_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e2, parsing_mask), edge_neg_mask), [1, 2])
loss_e2_pos = (loss_e2_pos_gb + loss_e2_pos_lc)* pos_weight
loss_e2_neg = (loss_e2_neg_gb + loss_e2_neg_lc) * neg_weight
loss_e2 = tf.reduce_mean(loss_e2_pos * Edge_Pos_W + loss_e2_neg)
t_loss_e1_res5 = tf.nn.sigmoid_cross_entropy_with_logits(logits=edge_out1_res5, labels=edge_gt)
loss_e1_res5_pos_gb = tf.reduce_sum(tf.multiply(t_loss_e1_res5, edge_pos_mask), [1, 2])
loss_e1_res5_neg_gb = tf.reduce_sum(tf.multiply(t_loss_e1_res5, edge_neg_mask), [1, 2])
loss_e1_res5_pos_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1_res5, parsing_mask), edge_pos_mask), [1, 2])
loss_e1_res5_neg_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1_res5, parsing_mask), edge_neg_mask), [1, 2])
loss_e1_res5_pos = (loss_e1_res5_pos_gb + loss_e1_res5_pos_lc)* pos_weight
loss_e1_res5_neg = (loss_e1_res5_neg_gb + loss_e1_res5_neg_lc) * neg_weight
loss_e1_res5 = tf.reduce_mean(loss_e1_res5_pos * Edge_Pos_W + loss_e1_res5_neg)
t_loss_e1_res4 = tf.nn.sigmoid_cross_entropy_with_logits(logits=edge_out1_res4, labels=edge_gt)
loss_e1_res4_pos_gb = tf.reduce_sum(tf.multiply(t_loss_e1_res4, edge_pos_mask), [1, 2])
loss_e1_res4_neg_gb = tf.reduce_sum(tf.multiply(t_loss_e1_res4, edge_neg_mask), [1, 2])
loss_e1_res4_pos_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1_res4, parsing_mask), edge_pos_mask), [1, 2])
loss_e1_res4_neg_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1_res4, parsing_mask), edge_neg_mask), [1, 2])
loss_e1_res4_pos = (loss_e1_res4_pos_gb + loss_e1_res4_pos_lc)* pos_weight
loss_e1_res4_neg = (loss_e1_res4_neg_gb + loss_e1_res4_neg_lc) * neg_weight
loss_e1_res4 = tf.reduce_mean(loss_e1_res4_pos * Edge_Pos_W + loss_e1_res4_neg)
t_loss_e1_res3 = tf.nn.sigmoid_cross_entropy_with_logits(logits=edge_out1_res3, labels=edge_gt)
loss_e1_res3_pos_gb = tf.reduce_sum(tf.multiply(t_loss_e1_res3, edge_pos_mask), [1, 2])
loss_e1_res3_neg_gb = tf.reduce_sum(tf.multiply(t_loss_e1_res3, edge_neg_mask), [1, 2])
loss_e1_res3_pos_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1_res3, parsing_mask), edge_pos_mask), [1, 2])
loss_e1_res3_neg_lc = tf.reduce_sum(tf.multiply(tf.multiply(t_loss_e1_res3, parsing_mask), edge_neg_mask), [1, 2])
loss_e1_res3_pos = (loss_e1_res3_pos_gb + loss_e1_res3_pos_lc)* pos_weight
loss_e1_res3_neg = (loss_e1_res3_neg_gb + loss_e1_res3_neg_lc) * neg_weight
loss_e1_res3 = tf.reduce_mean(loss_e1_res3_pos * Edge_Pos_W + loss_e1_res3_neg)
loss_parsing = loss_p1 + loss_p2
loss_edge = loss_e1 + loss_e2 + loss_e1_res5 + loss_e1_res4 + loss_e1_res3
reduced_loss = loss_parsing * p_Weight + loss_edge * e_Weight
trainable_variable = tf.trainable_variables()
grads = optim.compute_gradients(reduced_loss, var_list=trainable_variable)
tower_grads.append(grads)
tf.add_to_collection('loss_p', loss_parsing)
tf.add_to_collection('loss_e', loss_edge)
tf.add_to_collection('reduced_loss', reduced_loss)
# Average the gradients
grads_ave = average_gradients(tower_grads)
# apply the gradients with our optimizers
train_op = optim.apply_gradients(grads_ave)
loss_p_ave = tf.reduce_mean(tf.get_collection('loss_p'))
loss_e_ave = tf.reduce_mean(tf.get_collection('loss_e'))
loss_ave = tf.reduce_mean(tf.get_collection('reduced_loss'))
loss_summary_p = tf.summary.scalar("loss_p_ave", loss_p_ave)
loss_summary_e = tf.summary.scalar("loss_e_ave", loss_e_ave)
loss_summary_ave = tf.summary.scalar("loss_ave", loss_ave)
loss_summary = tf.summary.merge([loss_summary_ave, loss_summary_p, loss_summary_e])
summary_writer = tf.summary.FileWriter(LOG_DIR, graph=tf.get_default_graph())
# Saver for storing checkpoints of the model.
all_saver_var = tf.global_variables()
restore_var = [v for v in all_saver_var if 'parsing' not in v.name and 'edge' not in v.name and 'Momentum' not in v.name]
saver = tf.train.Saver(var_list=all_saver_var, max_to_keep=100)
loader = tf.train.Saver(var_list=restore_var)
# Set up tf session and initialize variables.
config = tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
if load(loader, sess, SNAPSHOT_DIR):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps.
for step in range(NUM_STEPS):
start_time = time.time()
loss_value = 0
feed_dict = { step_ph : step }
# Apply gradients.
summary, loss_value, par_loss, edge_loss, _ = sess.run([loss_summary, reduced_loss, loss_parsing, loss_edge, train_op], feed_dict=feed_dict)
summary_writer.add_summary(summary, step)
if step % SAVE_PRED_EVERY == 0:
save(saver, sess, SNAPSHOT_DIR, step)
duration = time.time() - start_time
print('step {:d} \t loss = {:.3f}, parsing_loss = {:.3f}, edge_loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, par_loss, edge_loss, duration))
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
if __name__ == '__main__':
main()
##########################################
|
UTF-8
|
Python
| false | false | 14,522 |
py
| 1,996 |
train_pgn.py
| 18 | 0.603154 | 0.576573 | 0 | 285 | 49.954386 | 161 |
jujoohwan/Algorithm_Problem
| 15,977,278,375,630 |
a6f91666acf722ce5f0119a32466ddc90040a145
|
abdee00961399404666d7d9e9c181b1f55e89767
|
/CodeUp/6067.py
|
caa9c69f49bf9b28de07952be8ed79579a2b8cb9
|
[] |
no_license
|
https://github.com/jujoohwan/Algorithm_Problem
|
e744240f155071038b75da217fb48541a178e4e9
|
bebe3f13ce16b9a821dce8057658bec41022d8aa
|
refs/heads/main
| 2023-08-01T11:47:23.750263 | 2022-12-21T14:24:26 | 2022-12-21T14:24:26 | 406,480,036 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
result = int(input())
if result%2==0 and result<0:
print('A')
elif result%2 !=0 and result<0:
print('B')
elif result%2==0 and result>0:
print('C')
elif result%2!=0 and result>0:
print('D')
|
UTF-8
|
Python
| false | false | 214 |
py
| 74 |
6067.py
| 71 | 0.579439 | 0.523364 | 0 | 10 | 19.6 | 31 |
beshoyAtefZaki/diet
| 1,391,569,421,905 |
79e129c6fc8f0cc241a9710b521107bea8897878
|
6bdd77b2b716f738225768caa56f989209e600f2
|
/doctors/migrations/0006_auto_20201114_1233.py
|
ed36e9435b0de073605c01f613e7975747365ffe
|
[] |
no_license
|
https://github.com/beshoyAtefZaki/diet
|
9bbcfef732aa3d4bfe1dcb849fa8fff0a80634ba
|
891b4a0f48aaad8b5c43c324db78203c0e2b7acb
|
refs/heads/master
| 2023-01-27T14:00:10.104349 | 2020-11-20T21:22:10 | 2020-11-20T21:22:10 | 308,125,859 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-11-14 12:33
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doctors', '0005_auto_20201114_1219'),
]
operations = [
migrations.AddField(
model_name='patienttfr',
name='ASH',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='Calcium',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='Carbohydrate',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='coper',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='enerygy',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='fat',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='fiber',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='iron',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='magnisum',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='phorphorus',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='potasium',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='protein',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='refuse',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='riboflabn',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='sodium',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='thiamen',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='vitamen_a',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='vitamen_c',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='water',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='patienttfr',
name='zinc',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AlterField(
model_name='doctorprofile',
name='strat_date',
field=models.DateField(default=datetime.datetime(2020, 11, 14, 12, 33, 29, 422159)),
),
]
|
UTF-8
|
Python
| false | false | 4,516 |
py
| 23 |
0006_auto_20201114_1233.py
| 19 | 0.570195 | 0.545394 | 0 | 121 | 36.322314 | 96 |
evanchan92/learnpython
| 19,585,050,898,325 |
6a4afa4b9dba8e5d39547fe3eb0ed6852112278a
|
59f01588055395ae1b3b1981326a2b46780c6d56
|
/Oreilly-Scraper/ScrappingENV/lib/python3.7/ntpath.py
|
a4489ee454ed06a4c5d0c5f67ec9170035c3bb2d
|
[] |
no_license
|
https://github.com/evanchan92/learnpython
|
bf05d93771ebf21ba7bf628a44697947b9af3ce8
|
6174d0f650dbeb514ac7aee9eddba4481f1b3d6b
|
refs/heads/master
| 2020-09-02T13:12:12.736723 | 2019-04-02T15:57:02 | 2019-04-02T15:57:02 | 98,376,246 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
/Users/evan/anaconda3/lib/python3.7/ntpath.py
|
UTF-8
|
Python
| false | false | 45 |
py
| 49 |
ntpath.py
| 45 | 0.822222 | 0.755556 | 0 | 1 | 45 | 45 |
RevansChen/online-judge
| 8,581,344,679,116 |
5bec560d6dc704afe76756169b04ba23e1984ed1
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/4kyu/codewars-style-ranking-system/Python/solution1.py
|
1d2a030572d8083ec55324c6b99ba85bb9598624
|
[
"MIT"
] |
permissive
|
https://github.com/RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Python - 3.6.0
class User:
RANK = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8]
def __init__(self):
self.progress = 0
self.rank = User.RANK[0]
def inc_progress(self, activityRank):
if not activityRank in User.RANK:
raise ValueError()
if self.rank == User.RANK[-1]:
return
d = User.RANK.index(activityRank) - User.RANK.index(self.rank)
p = 1 if d < 0 else (3 if d == 0 else (10 * d * d))
self.progress += p
if self.progress >= 100:
r, self.progress = divmod(self.progress, 100)
i = User.RANK.index(self.rank)
self.rank = User.RANK[min(i + r, len(User.RANK) - 1)]
if self.rank == User.RANK[-1]:
self.progress = 0
|
UTF-8
|
Python
| false | false | 794 |
py
| 2,569 |
solution1.py
| 1,607 | 0.5 | 0.453401 | 0 | 26 | 29.538462 | 70 |
RachelElysia/ratings-v2
| 13,872,744,390,884 |
c74d4cd9c1a82cd0f3aae3aed1eedc40429a29c5
|
a9a050b4464b8de43e58900f7bdc2377909e6b9c
|
/seed_database.py
|
40c7ecd3e7e9cd7a748726003897a23083fae7d3
|
[] |
no_license
|
https://github.com/RachelElysia/ratings-v2
|
70a9e38b72ab07de444e5df8bbfd5826ceed2d3e
|
971503752a46a3dfacfdfc97ab11268801f7cc51
|
refs/heads/main
| 2023-02-26T03:07:11.234342 | 2021-02-04T07:45:53 | 2021-02-04T07:45:53 | 335,814,255 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Script to seed database."""
import os
import json
from random import choice, randint
from datetime import datetime
import crud
import model
import server
os.system('dropdb ratings')
os.system('createdb ratings')
model.connect_to_db(server.app)
model.db.create_all()
# read in movie data to movie_data variable
with open('data/movies.json') as f:
movie_data = json.loads(f.read())
movie_list = []
# populate movies database
for movie in movie_data:
release_date = datetime.strptime(movie['release_date'], '%Y-%m-%d')
curr_movie = crud.create_movie(movie['title'], movie['overview'], release_date, movie['poster_path'])
movie_list.append(curr_movie)
# generate 10 random users
for n in range(10):
email = f'user{n}@test.com'
password = 'test123'
curr_user = crud.create_user(email, password)
# Make each user make 10 ratings
for userratings in range(10):
# Randomly choose a Movie to rate
random_movie = choice(movie_list)
# Rate 1-5
score = randint(1, 5)
# Use data: User, random movie chosen, and random score to make a fake rating
crud.create_rating(curr_user, random_movie, score)
|
UTF-8
|
Python
| false | false | 1,202 |
py
| 2 |
seed_database.py
| 2 | 0.671381 | 0.658902 | 0 | 45 | 25.511111 | 105 |
anishnarang/data_mining_project
| 12,498,354,843,121 |
1493f033a23b3ccd4b7edcadd0c3325552ce2f26
|
59401a6d106960f22edd54e47d8575c3216e7eb2
|
/remove_redundant.py
|
8532558257371c64bce7411b524d0b077f520605
|
[] |
no_license
|
https://github.com/anishnarang/data_mining_project
|
e666d309889da5eed5c477e273531ad6b01fb771
|
8b42fe2dc31943e82d4a305922dd1d6bed1d0cd0
|
refs/heads/master
| 2020-03-31T20:34:37.068794 | 2018-12-04T02:05:16 | 2018-12-04T02:05:16 | 152,546,099 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
data = {}
with open("util_data/user_to_cuicine_test.csv") as input_file:
for row in input_file:
user, category, rating = row.strip().split(",")
rating = int(rating)
if user in data:
if category in data[user]:
data[user][category] = (data[user][category][0]+rating, data[user][category][1]+1)
else:
data[user][category] = (rating, 1)
else:
data[user] = {}
data[user][category] = (rating, 1)
for user in data.keys():
for category in data[user].keys():
data[user][category] = float(data[user][category][0])/data[user][category][1]
with open("util_data/user_to_cuisine_unique_test.csv","w+") as output_file:
output_file.write("User,Category,Rating\n")
for user in data.keys():
for category in data[user].keys():
output_file.write(str(user) + "," + str(category) + "," + str(data[user][category]) + "\n")
|
UTF-8
|
Python
| false | false | 973 |
py
| 23 |
remove_redundant.py
| 15 | 0.561151 | 0.553957 | 0 | 27 | 35.074074 | 103 |
veronikanska/pyladies
| 2,284,922,633,731 |
db1fc5c4c1195a9f9df2201e8386975e068d868a
|
ee10ae2dddfac07e2cfbbabfe41484b5989c16bf
|
/03/kalkulacka.py
|
29b80f14d60fc36ff6bc0a2090ac9d62ebee8ccc
|
[] |
no_license
|
https://github.com/veronikanska/pyladies
|
4f5e69199b9000b8fd15dfe75f2735a4bc67d27d
|
a8a191fceddff0bba8c8553e4326e3f85e6f2326
|
refs/heads/master
| 2020-05-04T12:42:17.507172 | 2019-04-15T18:15:30 | 2019-04-15T18:15:30 | 179,130,866 | 0 | 0 | null | false | 2019-06-11T19:31:38 | 2019-04-02T17:55:43 | 2019-04-15T18:15:33 | 2019-06-11T19:25:33 | 38 | 0 | 0 | 4 |
Python
| false | false |
# Uzivatel zada dve cisla a operaci a program operaci s cislz provede
cislo_1 = int(input('Zadej prvni cislo: '))
cislo_2 = int(input('Zadej druhe cislo: '))
operace = str(input('Vyber operaci: +, -, * nebo /: '))
if operace != '+' and operace != '-' and operace != '*' and operace != '/':
print('Nerozumim')
else:
print('Prvni cislo: ', cislo_1)
print('Druhe cislo: ', cislo_2)
print('Operace: ', operace)
if operace == '+':
print(cislo_1, ' + ', cislo_2, ' = ', cislo_1 + cislo_2)
elif operace == '-':
print(cislo_1, ' - ', cislo_2, ' = ', cislo_1 - cislo_2)
elif operace == '*':
print(cislo_1, ' * ', cislo_2, ' = ', cislo_1 * cislo_2)
elif operace == '/':
print(cislo_1, ' / ', cislo_2, ' = ', cislo_1 / cislo_2)
|
UTF-8
|
Python
| false | false | 791 |
py
| 34 |
kalkulacka.py
| 34 | 0.532238 | 0.506953 | 0 | 21 | 36.52381 | 75 |
joewen85/mycmdb
| 9,483,287,799,487 |
f538932262f69303685311cdeac1ac7558e6dac0
|
20f37928911ec08475aa44c8226eb1671553b069
|
/apps/domain/urls.py
|
4198136fad8a4e1c806835d43da0e5fbe031a482
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/joewen85/mycmdb
|
b63a747780704eceab5c504d11dd9cc5c28293f7
|
cd5883b1a884fea645d5488d55bed0a0ce4d81c0
|
refs/heads/main
| 2023-09-01T12:54:15.587227 | 2023-08-21T02:12:58 | 2023-08-21T02:12:58 | 188,344,131 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019-08-09 22:38
# @Author : Joe
# @Site :
# @File : urls.py
# @Software: PyCharm
# @function: xxxxx
from django.urls import path
from domain.views import DomainView, BlackListList, BlackListDetail
from rest_framework.documentation import include_docs_urls
API_TITLE = 'devops api documentation'
API_DESCRIPTION = 'devops'
urlpatterns = [
path('', DomainView.as_view()),
path('blacklist/', BlackListList.as_view(), name='blacklist_list'),
path('blacklist/<int:pk>', BlackListDetail.as_view(), name='blacklist_list'),
path('docs/', include_docs_urls(title=API_TITLE, description=API_DESCRIPTION, authentication_classes=[], permission_classes=[]))
]
|
UTF-8
|
Python
| false | false | 711 |
py
| 207 |
urls.py
| 77 | 0.697609 | 0.679325 | 0 | 21 | 32.857143 | 132 |
thelearningcurves/ERINN
| 15,049,565,446,619 |
974c9bb57238e4d14ffb54c932863618539ddc92
|
768e5cd65886cc092f50e5507e94bf6aa4f66ae6
|
/erinn/preprocessing.py
|
659cac7fe6f7f1c115b286fd77024f034bf806fe
|
[
"MIT"
] |
permissive
|
https://github.com/thelearningcurves/ERINN
|
b6ca4170d159304777463430139ea41ae209e00f
|
8297db51f63d5ef961672ae7ccb01c5ef18c70a3
|
refs/heads/master
| 2022-04-12T00:04:56.506791 | 2020-03-20T08:36:21 | 2020-03-20T08:36:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Custom preprocessing functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import generator_stop
from __future__ import print_function
import multiprocessing as mp
import os
import re
from functools import partial
import numba
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from erinn.utils.io_utils import read_config_file
from erinn.utils.io_utils import read_pkl
from erinn.utils.io_utils import write_pkl
def log_transform(arr, inverse=False, inplace=True):
"""
Perform a logarithmic transformation or an inverse logarithmic transformation.
new_array[i] = log10(arr[i] + 1), arr[i] >= 0
new_array[i] = -log10(abs(arr[i] - 1)), arr[i] < 0
Parameters
----------
arr : numpy.ndarray
An array which you want to perform logarithmic transformation or inverse logarithmic transformation.
inverse : bool
Whether to perform an inverse transformation.
inplace : bool
Whether to use inplace mode.
Returns
-------
new_arr : numpy.ndarray, optional
If `inplace` is False, then a transformed array is returned.
References
----------
https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html
https://stackoverflow.com/questions/21610198/runtimewarning-divide-by-zero-encountered-in-log
"""
if inplace:
# method 1: use boolean mask
if inverse:
mask = (arr >= 0)
arr[mask] = np.power(10, arr[mask]) - 1
arr[~mask] = -np.power(10, -arr[~mask]) + 1
else:
mask = (arr >= 0)
arr[mask] = np.log10(arr[mask] + 1)
arr[~mask] = -np.log10(np.abs(arr[~mask] - 1))
# method 2: use index
# ge0 = np.where(arr >= 0) # greater equal 0
# lt0 = np.where(arr < 0) # less than 0
# ge0 = np.asarray(arr >= 0).nonzero()
# lt0 = np.asarray(arr < 0).nonzero()
# arr[ge0] = np.log10(arr[ge0] + 1)
# arr[lt0] = -np.log10(np.abs(arr[lt0] - 1))
# method 3: use numpy.where(condition[, x, y])
# An array with elements from x where condition is True, and elements from y elsewhere.
# Note: numpy.log10(prob) is being evaluated before the numpy.where is being evaluated.
# arr = np.where(arr >= 0, np.log10(arr + 1), -np.log10(np.abs(arr - 1)))
else:
new_arr = arr.copy()
if inverse:
mask = (new_arr >= 0)
new_arr[mask] = np.power(10, new_arr[mask]) - 1
new_arr[~mask] = -np.power(10, -new_arr[~mask]) + 1
else:
mask = (new_arr >= 0)
new_arr[mask] = np.log10(new_arr[mask] + 1)
new_arr[~mask] = -np.log10(np.abs(new_arr[~mask] - 1))
return new_arr
@numba.njit()
def add_noise(x, scale=0.05, noise_type='normal'):
"""Add noise to each element of the array by a certain percentage.
In order to handle large arrays under memory constraints, this function uses in-place mode.
Parameters
----------
x : numpy.ndarray
Array that you wanted to add noise.
scale : float, default 0.05
If noise_type is 'normal', scale is represent the standard deviation.
If noise_type is 'uniform', the noise added to element is proportional to this variable.
noise_type: str, {'normal', 'uniform'}, default normal
Noise type.
"normal" indicates that the noise is sampled from a Gaussian probability distribution function.
"uniform" indicates that the noise is sampled from a uniform probability distribution function.
Returns
-------
None
References
----------
.. [1] https://stackoverflow.com/questions/44257931/fastest-way-to-add-noise-to-a-numpy-array
.. [2] https://github.com/simpeg/simpeg/blob/178b54417af0b892a3920685056a489ab2b6cda1/SimPEG/Survey.py#L501-L502
.. [3] https://stackoverflow.com/questions/14058340/adding-noise-to-a-signal-in-python/53688043#53688043
.. [4] https://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
"""
# Since version 0.28.0, the generator is thread-safe and fork-safe.
# Each thread and each process will produce independent streams of random numbers.
# x = x.reshape(-1) # flat view
x = x.ravel() # flat view
if noise_type == 'normal':
for i in range(len(x)):
x[i] += scale * abs(x[i]) * np.random.normal(0.0, 1.0)
elif noise_type == 'uniform':
for i in range(len(x)):
x[i] += scale * abs(x[i]) * np.random.uniform(-1.0, 1.0)
else:
raise(NotImplementedError('noise_type is not supported.'))
def source_receiver_midpoints(Tx_locations, Rx_locations):
"""
Calculate source receiver midpoints.
Parameters
----------
Tx_locations : numpy.ndarray
Transmitter locations.
Rx_locations : numpy.ndarray
Receiver locations.
Returns
-------
midx : numpy.ndarray
midpoints x location
midz : numpy.ndarray
midpoints z location
References
----------
https://github.com/simpeg/simpeg/blob/b8d716f86a4ea07ba3085fabb24c2bc974788040/SimPEG/EM/Static/Utils/StaticUtils.py#L128
"""
# initialize midx and midz
midx = []
midz = []
for i in range(len(Tx_locations)):
# midpoint of the current electrode (Tx) pair (in x direction)
Cmid = (Tx_locations[i, 0] + Tx_locations[i, 2]) / 2
# midpoint of the potential electrode (Rx) pair (in x direction)
Pmid = (Rx_locations[i, 0] + Rx_locations[i, 2]) / 2
# midpoint of the current electrode (Tx) pair (in z direction)
zsrc = (Tx_locations[i, 1] + Tx_locations[i, 3]) / 2
# midpoint of the Cmid and Pmid (x direction)
midx.append(((Cmid + Pmid) / 2))
# Half the length between Cmid and Pmid, then add zsrc (in z direction, positive down)
midz.append(np.abs(Cmid - Pmid) / 2 + zsrc)
midx = np.array(midx).reshape(-1, 1) # form an 2D array
midz = np.array(midz).reshape(-1, 1) # form an 2D array
return midx, midz
def to_midpoint(array, Tx_locations, Rx_locations, value=0.0):
"""Reshape inputs tensor to midpoint image.
shape = (accumulated number of same midpoint, number of midpoint, 1)
Parameters
----------
array : numpy.ndarray
The array you want to reshape.
Tx_locations : numpy.ndarray
Transmitter locations.
Rx_locations : numpy.ndarray
Receiver locations.
value : float
The value of the blank element you want to fill in.
Returns
-------
new_array : numpy.ndarray
Reshaped array.
"""
array = array.reshape(-1) # flatten input arrays
midx, midz = source_receiver_midpoints(Tx_locations, Rx_locations) # calculate midpoint
unique_midx, index_midx = np.unique(midx, return_inverse=True)
num_unique_midx = len(unique_midx)
num_midpoint = len(midx) # number of midpoint
new_array = [[] for i in range(num_unique_midx)] # initialize new array (list of lists)
# accumulate at same midpoint
for i in range(num_midpoint):
new_array[index_midx[i]].append([array[i], midz[i]])
# sort by depth at the same midpoint
for i in range(num_unique_midx):
new_array[i].sort(key=lambda x: x[1]) # sort by midz (depth)
new_array[i] = [ii[0] for ii in new_array[i]] # drop midz
# pad the list of lists to form an array
new_array = tf.keras.preprocessing.sequence.pad_sequences(new_array,
dtype='float64',
padding='post',
value=value)
new_array = np.expand_dims(new_array.T, axis=2) # reshape to 3D array
return new_array
def to_txrx(array, Tx_locations, Rx_locations, value=0.0):
"""Reshape inputs tensor to Tx-Rx image.
shape = (number of Tx pair, number of Rx pair, 1)
Parameters
----------
array : numpy.ndarray
The array you want to reshape.
Tx_locations : numpy.ndarray
Transmitter locations.
Rx_locations : numpy.ndarray
Receiver locations.
value : float
The value of the blank element you want to fill in.
Returns
-------
new_array : numpy.ndarray
Reshaped array.
"""
array = array.reshape(-1) # flatten input arrays
# find unique Tx pair and unique Rx pair
unique_Tx_locations, index_src = np.unique(Tx_locations, return_inverse=True, axis=0)
unique_Rx_locations, index_rec = np.unique(Rx_locations, return_inverse=True, axis=0)
# create new zero array and assign value
num_index = len(index_src)
new_array = np.ones((unique_Tx_locations.shape[0],
unique_Rx_locations.shape[0]),
dtype=np.float) * value
for i in range(num_index):
new_array[index_src[i], index_rec[i]] = array[i]
new_array = np.expand_dims(new_array, axis=2) # reshape to 3D array
return new_array
def to_section(array, nCx, nCy):
"""Reshape inputs tensor to section image.
shape = (
number of cell center mesh in the z (y) direction,
number of cell center mesh in the x direction,
1
)
Parameters
----------
array : numpy.ndarray
The array you want to reshape.
nCx : int
Number of cell center mesh in the x direction.
nCy : int
Number of cell center mesh in the z (y) direction.
Returns
-------
new_array : numpy.ndarray
Reshaped array.
"""
array = array.reshape(-1) # flatten input arrays
new_array = np.flipud(array.reshape((nCy, nCx)))
new_array = np.expand_dims(new_array, axis=2) # reshape to 3D array
return new_array
# TODO: use tfRecord
def make_processed_dataset(config_file):
"""
Preprocess raw dataset and save it to processed directory.
Parameters
----------
config_file : str, pathlib.Path or dict
The path to the configured yaml file or the dictionary for configuration.
Returns
-------
None
"""
config = read_config_file(config_file)
raw_data_dir = config['raw_data_dir']
save_processed_data_dir = config['save_processed_data_dir']
preprocess = config['preprocess']
simulator_pkl = os.path.join(raw_data_dir, 'simulator.pkl')
save_simulator_pkl = os.path.join(save_processed_data_dir, 'simulator.pkl')
do_preprocess = any(value['perform'] for action, value in preprocess.items())
simulator = read_pkl(simulator_pkl)
# read nCx and nCy
nCx = simulator.mesh.nCx # number of cell center mesh in the x direction
nCy = simulator.mesh.nCy # number of cell center mesh in the z (y) direction
# read Tx_locations and Rx_locations
Tx_locations = simulator.urf.abmn_locations[:, :4]
Rx_locations = simulator.urf.abmn_locations[:, 4:]
# expand simulator.config and save it
simulator.config = {
'generate': simulator.config, # config for generate data
'preprocess': config # config for preprocess data
}
os.makedirs(save_processed_data_dir, exist_ok=True)
write_pkl(simulator, save_simulator_pkl)
if do_preprocess:
pattern_raw_pkl = re.compile('raw_data_\d{6}.pkl')
for root_dir, sub_dirs, files in os.walk(raw_data_dir):
# filter files list so the files list will contain pickle files that match the pattern
files = list(filter(pattern_raw_pkl.match, files))
# If the files list is empty, continue to the next iteration of the loop
if not files:
continue
# make sub directory
sub_dir_in_processed = re.sub(raw_data_dir, save_processed_data_dir, root_dir)
os.makedirs(sub_dir_in_processed, exist_ok=True)
# Parallel version!
par = partial(
_make_processed_dataset,
preprocess=preprocess,
root_dir=root_dir,
sub_dir_in_processed=sub_dir_in_processed,
Tx_locations=Tx_locations, Rx_locations=Rx_locations,
nCx=nCx, nCy=nCy
)
pool = mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1)
for data in tqdm(pool.imap_unordered(par, files),
desc=f'Preprocess data and save to {sub_dir_in_processed}',
total=len(files)):
pass
pool.close()
pool.join()
# Serial version!
# for filename in files:
# pkl_name = os.path.join(root_dir, filename)
# data = read_pkl(pkl_name)
# # check if the data is dict and have "resistance" and "resistivity_log10" keys
# if (not isinstance(data, dict)
# or data.get('resistance') is None
# or data.get('resistivity_log10') is None):
# continue
# # preprocess
# for k, v in preprocess.items():
# if k == 'add_noise' and v.get('perform'):
# add_noise(data['resistance'], **v.get('kwargs'))
# elif k == 'log_transform' and v.get('perform'):
# log_transform(data['resistance'], **v.get('kwargs'))
# elif k == 'to_midpoint' and v.get('perform'):
# data['resistance'] = to_midpoint(
# data['resistance'], Tx_locations, Rx_locations
# )
# elif k == 'to_txrx' and v.get('perform'):
# data['resistance'] = to_txrx(
# data['resistance'], Tx_locations, Rx_locations
# )
# elif k == 'to_section' and v.get('perform'):
# data['resistivity_log10'] = to_section(
# data['resistivity_log10'], nCx, nCy
# )
# # save pickle in processed dir
# new_pkl_name = os.path.join(
# sub_dir_in_processed,
# re.sub(r'raw', r'processed', filename)
# )
# write_pkl(data, new_pkl_name)
# show information about input / target tensor shape
try:
print("The shape of resistance (shape of NN input data): "
+ f"{data['resistance'].shape}")
print("The shape of resistivity (shape of NN target data): "
+ f"{data['resistivity_log10'].shape}")
print("IF YOU WANT TO GET THE RAW resistivity_log10, YOU SHOULD USE"
+ " `raw_resistivity_log10 = np.flipud(resistivity_log10).flatten()`")
except NameError as err:
pass # no pickle files
def _make_processed_dataset(filename, preprocess, root_dir, sub_dir_in_processed,
Tx_locations, Rx_locations, nCx, nCy):
# for filename in files:
pkl_name = os.path.join(root_dir, filename)
data = read_pkl(pkl_name)
# check if the data is dict and have "resistance" and "resistivity_log10" keys
if (not isinstance(data, dict)
or data.get('resistance') is None
or data.get('resistivity_log10') is None):
raise Exception('data is not a dict or dict does not contain essential keys')
# preprocess
for k, v in preprocess.items():
if k == 'add_noise' and v.get('perform'):
add_noise(data['resistance'], **v.get('kwargs'))
elif k == 'log_transform' and v.get('perform'):
log_transform(data['resistance'], **v.get('kwargs'))
elif k == 'to_midpoint' and v.get('perform'):
data['resistance'] = to_midpoint(
data['resistance'], Tx_locations, Rx_locations
)
elif k == 'to_txrx' and v.get('perform'):
data['resistance'] = to_txrx(
data['resistance'], Tx_locations, Rx_locations
)
elif k == 'to_section' and v.get('perform'):
data['resistivity_log10'] = to_section(
data['resistivity_log10'], nCx, nCy
)
# save pickle in processed dir
new_pkl_name = os.path.join(
sub_dir_in_processed,
re.sub(r'raw', r'processed', filename)
)
write_pkl(data, new_pkl_name)
return data
|
UTF-8
|
Python
| false | false | 17,009 |
py
| 13 |
preprocessing.py
| 10 | 0.568346 | 0.552943 | 0 | 440 | 36.656818 | 125 |
TheCulliganMan/flask-admin-profiler
| 5,334,349,425,705 |
0f912f432848c077e5cb71bad718f2da1ac35d47
|
ae349c0dea196b637632cb3220115e68b13d4895
|
/flask_admin_profiler/base.py
|
48ffc8dfe17c7867f2155b2e7340b7c6d672b633
|
[] |
no_license
|
https://github.com/TheCulliganMan/flask-admin-profiler
|
20a728f888d6a0a09c8332404522b675f06a6892
|
c343586c1155418fa95553c84e2cf1a3b2be0b95
|
refs/heads/master
| 2020-05-15T23:51:10.488962 | 2019-04-22T15:05:21 | 2019-04-22T15:05:21 | 182,563,464 | 0 | 0 | null | true | 2019-04-21T17:35:00 | 2019-04-21T17:34:59 | 2017-08-03T10:32:03 | 2015-05-29T15:44:35 | 124 | 0 | 0 | 0 | null | false | false |
import os
import os.path as op
from flask_admin import BaseView
class ProfilerBaseView(BaseView):
base_template = 'admin/base.html'
def __init__(self, name, category=None, **kwargs):
self.base_path = op.dirname(__file__)
print(self.base_path)
super(ProfilerBaseView, self).__init__(name,
category=category,
static_folder=op.join(self.base_path, 'static'),
**kwargs)
# Override template path
def create_blueprint(self, admin):
blueprint = super(ProfilerBaseView, self).create_blueprint(admin)
blueprint.template_folder = op.join(self.base_path, 'templates')
blueprint.static_folder=op.join(self.base_path, 'static')
print(blueprint.template_folder)
return blueprint
|
UTF-8
|
Python
| false | false | 892 |
py
| 15 |
base.py
| 7 | 0.57287 | 0.57287 | 0 | 25 | 34.68 | 95 |
MichaelGlasbrenner/MGFileSystem
| 2,834,678,419,452 |
697b81c1f986f245b234cf85c66ab12ba79a856f
|
9401eda4be1d15a0fae20a33a3d68de3b20f8c11
|
/tests/test_touch.py
|
789dddf2bf9c9bccbbf446f5834cc4bf404aede0
|
[] |
no_license
|
https://github.com/MichaelGlasbrenner/MGFileSystem
|
6d23503508e09596e5e8204d6e41b65a2f702235
|
10fdd49eebb2a7ee06ebcab4191743c24f41ce4b
|
refs/heads/master
| 2020-06-28T08:51:52.030216 | 2020-06-26T21:31:48 | 2020-06-26T21:31:48 | 200,192,369 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import test_tools
import subprocess
def test_touch_new_file():
success = False;
error_message = "";
file_list = test_tools.get_list_of_files("testdir");
if test_tools.file_exists( "new_file", file_list ):
success = False;
error_message = "file already existed";
subprocess.call(["rm","testdir/new_file"])
return success, error_message;
with open('temp_output', "w") as outfile:
subprocess.call(["touch","testdir/new_file"])
file_list = test_tools.get_list_of_files("testdir");
ls_output = test_tools.ls_output("testdir");
if test_tools.file_exists( "new_file", file_list ):
success = True;
else:
error_message = "file was not created";
return success, error_message;
if not (test_tools.get_file_property("permissions", "new_file", ls_output) == "-rw-r--r--"):
success = False;
error_message = "wrong file permissions";
subprocess.call(["rm","testdir/new_file"])
return success, error_message;
test_tools.check_creation_time( "new_file", ls_output);
subprocess.call(["rm","testdir/new_file"])
return success, error_message;
|
UTF-8
|
Python
| false | false | 1,227 |
py
| 22 |
test_touch.py
| 18 | 0.599022 | 0.599022 | 0 | 38 | 31.263158 | 100 |
AnimaTakeshi/windmill-django
| 7,627,861,966,880 |
3f08e00b9330ba69ba442a2c22b2247c79e6cb57
|
65e73c6c4a9e66715be2cbdd93339ebcab93976e
|
/windmill/ativos/migrations/0015_auto_20181018_1801.py
|
94d876e1e2de5cc99fed28dfc8bd1e2bdaf58cf3
|
[] |
no_license
|
https://github.com/AnimaTakeshi/windmill-django
|
3577f304d5e7f74750c7d95369e87d37209f1ac6
|
78bde49ace1ed215f6238fe94c142eac16e164dc
|
refs/heads/master
| 2022-12-13T11:13:21.859012 | 2019-02-07T20:50:01 | 2019-02-07T20:50:01 | 150,470,109 | 0 | 0 | null | false | 2022-12-08T01:29:36 | 2018-09-26T18:13:54 | 2019-02-07T20:53:11 | 2022-12-08T01:29:36 | 1,254 | 0 | 0 | 19 |
Python
| false | false |
# Generated by Django 2.0 on 2018-10-18 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ativos', '0014_auto_20181018_1754'),
]
operations = [
migrations.RemoveField(
model_name='fundo_local',
name='data_cotizacao_aplicacao',
),
migrations.AddField(
model_name='fundo_local',
name='data_cotizacao_aplicacao',
field=models.DurationField(),
),
migrations.RemoveField(
model_name='fundo_local',
name='data_cotizacao_resgate',
),
migrations.AddField(
model_name='fundo_local',
name='data_cotizacao_resgate',
field=models.DurationField(),
),
migrations.RemoveField(
model_name='fundo_local',
name='data_liquidacao_aplicacao',
),
migrations.AddField(
model_name='fundo_local',
name='data_liquidacao_aplicacao',
field=models.DurationField(),
),
migrations.RemoveField(
model_name='fundo_local',
name='data_liquidacao_resgate',
),
migrations.AddField(
model_name='fundo_local',
name='data_liquidacao_resgate',
field=models.DurationField(),
),
migrations.RemoveField(
model_name='fundo_offshore',
name='data_cotizacao_aplicacao',
),
migrations.AddField(
model_name='fundo_offshore',
name='data_cotizacao_aplicacao',
field=models.DurationField(),
),
migrations.RemoveField(
model_name='fundo_offshore',
name='data_cotizacao_resgate',
),
migrations.AddField(
model_name='fundo_offshore',
name='data_cotizacao_resgate',
field=models.DurationField(),
),
migrations.RemoveField(
model_name='fundo_offshore',
name='data_liquidacao_aplicacao',
),
migrations.AddField(
model_name='fundo_offshore',
name='data_liquidacao_aplicacao',
field=models.DurationField(),
),
migrations.RemoveField(
model_name='fundo_offshore',
name='data_liquidacao_resgate',
),
migrations.AddField(
model_name='fundo_offshore',
name='data_liquidacao_resgate',
field=models.DurationField(),
),
]
|
UTF-8
|
Python
| false | false | 2,574 |
py
| 161 |
0015_auto_20181018_1801.py
| 149 | 0.539239 | 0.527584 | 0 | 85 | 29.282353 | 46 |
rlcjj/cequant
| 14,817,637,203,304 |
4f98922d0a0217c22f557853fc41092e2dea2067
|
adc1f09c948d4250e4cbfedfa2daf3158c4e954b
|
/service/dumpdata/main.py
|
19085f13210ddbf6f96dcaa5b43d0ff75a5c4599
|
[] |
no_license
|
https://github.com/rlcjj/cequant
|
e1dd85237826911c45ab1adb4e549bc7a0f8dcc8
|
597d3151a9991d35244ce5c061acd31742189d6f
|
refs/heads/master
| 2017-12-03T17:35:53.683787 | 2017-03-23T16:56:16 | 2017-03-23T16:56:16 | 86,044,280 | 1 | 0 | null | true | 2017-03-24T08:22:06 | 2017-03-24T08:22:06 | 2017-03-23T16:57:28 | 2017-03-23T16:57:26 | 45 | 0 | 0 | 0 | null | null | null |
#coding=utf-8
from core.scanner import set_trace
from core.settings import DATA_BASE_PATH
from .dumpdbtrace import dump_trace
@set_trace('dumpdata.stock_value_factor')
def dump_stock_value_factor(iostream,cmd):
model_class_name = 'StockValueFactor'
dump_trace(model_class_name,DATA_BASE_PATH)
return iostream
@set_trace('dumpdata.stock_report_factor')
def dump_stock_report_factor(iostream,cmd):
model_class_name = 'StockReportFactor'
dump_trace(model_class_name,DATA_BASE_PATH)
return iostream
|
UTF-8
|
Python
| false | false | 522 |
py
| 48 |
main.py
| 43 | 0.758621 | 0.756705 | 0 | 18 | 28.055556 | 47 |
TheVinhLuong102/FLAML
| 15,702,400,458,201 |
e174c6a09e3f485bca875140a45aca6f8dd3572a
|
ac5e821a3016d7157ed0558e32f6c7e379b31057
|
/test/tune/test_flaml_raytune_consistency.py
|
dee393c3a5d719e19a80cf92f93cfb5ebad1d784
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
https://github.com/TheVinhLuong102/FLAML
|
06d1d6227902e9345c995868f24d7188c69a0ea8
|
2f5d6169d3b5cc025eb2516cbd003fced924a88e
|
refs/heads/main
| 2023-06-23T23:22:22.749417 | 2021-12-26T00:13:39 | 2021-12-26T00:13:39 | 389,860,327 | 0 | 0 |
MIT
| true | 2021-07-27T05:32:00 | 2021-07-27T05:31:59 | 2021-07-27T05:31:58 | 2021-07-25T06:28:31 | 18,277 | 0 | 0 | 0 | null | false | false |
# import unittest
import numpy as np
# require: pip install flaml[blendsearch, ray]
# require: pip install flaml[ray]
import time
from flaml import tune
def evaluate_config(config):
"""evaluate a hyperparameter configuration"""
# we uss a toy example with 2 hyperparameters
metric = (round(config["x"]) - 85000) ** 2 - config["x"] / config["y"]
# usually the evaluation takes an non-neglible cost
# and the cost could be related to certain hyperparameters
# in this example, we assume it's proportional to x
time.sleep(config["x"] / 100000)
# use tune.report to report the metric to optimize
tune.report(metric=metric)
config_search_space = {
"x": tune.lograndint(lower=1, upper=100000),
"y": tune.randint(lower=1, upper=100000),
}
low_cost_partial_config = {"x": 1}
def setup_searcher(searcher_name):
from flaml.searcher.blendsearch import BlendSearch, CFO, RandomSearch
if "cfo" in searcher_name:
searcher = CFO(
space=config_search_space, low_cost_partial_config=low_cost_partial_config
)
elif searcher_name == "bs":
searcher = BlendSearch(
metric="metric",
mode="min",
space=config_search_space,
low_cost_partial_config=low_cost_partial_config,
)
elif searcher_name == "random":
searcher = RandomSearch(space=config_search_space)
else:
return None
return searcher
def _test_flaml_raytune_consistency(
num_samples=-1, max_concurrent_trials=1, searcher_name="cfo"
):
try:
from ray import tune as raytune
except ImportError:
print(
"skip _test_flaml_raytune_consistency because ray tune cannot be imported."
)
return
np.random.seed(100)
searcher = setup_searcher(searcher_name)
analysis = tune.run(
evaluate_config, # the function to evaluate a config
config=config_search_space, # the search space
low_cost_partial_config=low_cost_partial_config, # a initial (partial) config with low cost
metric="metric", # the name of the metric used for optimization
mode="min", # the optimization mode, 'min' or 'max'
num_samples=num_samples, # the maximal number of configs to try, -1 means infinite
time_budget_s=None, # the time budget in seconds
local_dir="logs/", # the local directory to store logs
search_alg=searcher,
# verbose=0, # verbosity
# use_ray=True, # uncomment when performing parallel tuning using ray
)
flaml_best_config = analysis.best_config
flaml_config_in_results = [v["config"] for v in analysis.results.values()]
print(analysis.best_trial.last_result) # the best trial's result
print("best flaml", searcher_name, flaml_best_config) # the best config
print("flaml config in results", searcher_name, flaml_config_in_results)
np.random.seed(100)
searcher = setup_searcher(searcher_name)
from ray.tune.suggest import ConcurrencyLimiter
search_alg = ConcurrencyLimiter(searcher, max_concurrent_trials)
analysis = raytune.run(
evaluate_config, # the function to evaluate a config
config=config_search_space,
metric="metric", # the name of the metric used for optimization
mode="min", # the optimization mode, 'min' or 'max'
num_samples=num_samples, # the maximal number of configs to try, -1 means infinite
local_dir="logs/", # the local directory to store logs
# max_concurrent_trials=max_concurrent_trials,
# resources_per_trial={"cpu": max_concurrent_trials, "gpu": 0},
search_alg=search_alg,
)
ray_best_config = analysis.best_config
ray_config_in_results = [v["config"] for v in analysis.results.values()]
print(analysis.best_trial.last_result) # the best trial's result
print("ray best", searcher_name, analysis.best_config) # the best config
print("ray config in results", searcher_name, ray_config_in_results)
assert ray_best_config == flaml_best_config, "best config should be the same"
assert (
flaml_config_in_results == ray_config_in_results
), "results from raytune and flaml should be the same"
def test_consistency():
_test_flaml_raytune_consistency(
num_samples=5, max_concurrent_trials=1, searcher_name="random"
)
_test_flaml_raytune_consistency(
num_samples=5, max_concurrent_trials=1, searcher_name="cfo"
)
_test_flaml_raytune_consistency(
num_samples=5, max_concurrent_trials=1, searcher_name="bs"
)
if __name__ == "__main__":
# unittest.main()
test_consistency()
|
UTF-8
|
Python
| false | false | 4,684 |
py
| 84 |
test_flaml_raytune_consistency.py
| 44 | 0.661827 | 0.652007 | 0 | 123 | 37.081301 | 100 |
jnash2001/unfone
| 19,061,064,903,021 |
ecd9b1b3908d420d634545b57fc0eee5c995f533
|
6e81b08e7a8c64f7c92136acee8c80e8ee959f2c
|
/run2.py
|
dccf5ad3f0fab5b8596a04a3f3e9a2eaff10f942
|
[] |
no_license
|
https://github.com/jnash2001/unfone
|
b49650848f282c0bee38f113fbdf983eb9f736b7
|
ed1e063907bbadbd57499807773259f3b333b0b7
|
refs/heads/master
| 2022-11-27T14:16:20.168669 | 2020-08-04T08:13:12 | 2020-08-04T08:13:12 | 284,918,405 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import datetime
from pytz import timezone
os.system("sudo airmon-ng start wlan1")
timestamp = str(datetime.datetime.now(timezone('Asia/Kolkata')))
newstamp = ""
for i in range(len(timestamp)):
if timestamp[i] == "." or timestamp[i]== ":" or timestamp[i]== " ":
newstamp = newstamp + "_"
else:
newstamp = newstamp + timestamp[i]
os.system("sudo python probemon4.py -i wlan1mon -o logs/"+newstamp+" -f -r -s -l")
|
UTF-8
|
Python
| false | false | 475 |
py
| 1 |
run2.py
| 1 | 0.614737 | 0.608421 | 0 | 18 | 25.388889 | 82 |
ccjeremiahlin/DataAnalytics_96_777
| 16,758,962,401,227 |
a26115d87a2434b3e30ea64af33a8e958e9b2bf5
|
3232d72500afc4c0d23ad6f4d3cf6d51aa039804
|
/xlsx2csv_p3.py
|
c02b3dab49489b34a8d481b4308e295febeeb69e
|
[] |
no_license
|
https://github.com/ccjeremiahlin/DataAnalytics_96_777
|
805c687f126702340371ace7fa519418dd35c0ed
|
02a627f189035ac3b7d31abae53982ba53016e39
|
refs/heads/master
| 2021-01-17T06:33:47.247571 | 2015-04-26T00:03:55 | 2015-04-26T00:03:55 | 33,592,632 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Simple python script to convert XLSX sheets to CSV
Usage:
python xls2csv.py <excelfile.xlsx>
Restrictions:
Requires xlrd module:
sudo pip install xlrd
Sheet name hardcoded: EKO_NEFTDetailsAuto
"""
import xlrd
import csv
import sys
import os
import pandas as pd
def csv_from_excel(xlsx_file):
print("Reading Excel spreadsheet data..."),
xls = pd.ExcelFile(xlsx_file)
print("Done!")
file_name, file_extension = os.path.splitext(xlsx_file)
df = xls.parse(index_col=None, na_values=['NA'])
print("Writing csv file..."),
df.to_csv(file_name+'.csv')
print("Done!")
def main():
try:
csv_from_excel(sys.argv[1])
except Exception as e:
print('Something might have gone wrong. Did you called the program correctly?')
print('Usage:')
print(' python xls2csv.py <excelfile.xlsx>')
print("Here is the error, in any case:")
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 962 |
py
| 5 |
xlsx2csv_p3.py
| 5 | 0.638254 | 0.635135 | 0 | 40 | 23.025 | 87 |
SPSingh1998/Banking
| 7,086,696,086,163 |
49384c5eb4ccb62713e7a8fd86dbf5f350bdc0ad
|
fa7ed7e7b97d7a4a7dfa78507855cdbc2f0a118b
|
/View.py
|
d0781041379114f9fecec570b81b647ac666c3be
|
[] |
no_license
|
https://github.com/SPSingh1998/Banking
|
a895a9319e3c202cfd61cf0a409f83ec889e378e
|
8f1b84d6ed4550f9eb4bf35e0757714485236cf1
|
refs/heads/master
| 2020-08-31T03:14:24.739620 | 2019-10-30T16:56:47 | 2019-10-30T16:56:47 | 218,570,554 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pymysql import cursors
from tkinter import *
from tkinter import messagebox
from PIL import ImageTk,Image
import pymysql.cursors
from time import gmtime, strftime
import DB
import main
class view_class:
def __init__(self,id):
self.id=id
self.root=Tk()
self.root.geometry("600x500+400+175")
self.root.resizable(False,False)
self.root.title("View All Account Window")
self.root.config(background="light blue")
self.conn=pymysql.connect(host='localhost',user='root',password='root',db='dbbank')
self.cursor=self.conn.cursor()
self.l0=Label(self.root,text='this is demo')
self.i1=ImageTk.PhotoImage(Image.open("images\\view_all_accounts.png"))
self.l0.config(image=self.i1,background="light blue")
self.l0.place(relx=0.2,rely=0.05)
self.l11=Listbox(self.root,selectmode=SINGLE,height=40,width=30)
qry="select * from tbuser"
self.cursor.execute(qry)
for row in self.cursor:
self.l11.insert(END,str(row[0]))
self.l11.bind("<<ListboxSelect>>", self.onSelect)
self.l11.place(relx=0.72,rely=0.15)
self.f1=Frame(self.root,height=240,width=360,background="light blue")
self.l1=Label(self.f1,text="Account No",background="light blue")
self.l2=Label(self.f1,text="Name",background="light blue")
self.l3=Label(self.f1,text="Address",background="light blue")
self.l4=Label(self.f1,text="Gender",background="light blue")
self.l5=Label(self.f1,text="Phone No",background="light blue")
self.l6=Label(self.f1,text="Email",background="light blue")
self.l7=Label(self.f1,text="Opening Date",background="light blue")
self.l8=Label(self.f1,text="Balance",background="light blue")
self.t1=Label(self.f1,background="light blue")
self.t2=Label(self.f1,background="light blue")
self.t3=Label(self.f1,background="light blue")
self.t4=Label(self.f1,background="light blue")
self.t5=Label(self.f1,background="light blue")
self.t6=Label(self.f1,background="light blue")
self.t7=Label(self.f1,background="light blue")
self.t8=Label(self.f1,background="light blue")
self.l1.place(relx=0.09,rely=0.08)
self.l2.place(relx=0.09,rely=0.18)
self.l3.place(relx=0.09,rely=0.28)
self.l4.place(relx=0.09,rely=0.38)
self.l5.place(relx=0.09,rely=0.48)
self.l6.place(relx=0.09,rely=0.58)
self.l7.place(relx=0.09,rely=0.68)
self.l8.place(relx=0.09,rely=0.78)
self.t1.place(relx=0.4,rely=0.08)
self.t2.place(relx=0.4,rely=0.18)
self.t3.place(relx=0.4,rely=0.28)
self.t4.place(relx=0.4,rely=0.38)
self.t5.place(relx=0.4,rely=0.48)
self.t6.place(relx=0.4,rely=0.58)
self.t7.place(relx=0.4,rely=0.68)
self.t8.place(relx=0.4,rely=0.78)
self.b1=Button(self.root,text="Back",command=self.back)
self.b1.place(relx=0,rely=0)
self.root.mainloop()
def back(self):
self.root.destroy()
obj2=main.Main(self.id)
def onSelect(self,event):
widget = event.widget
selection=widget.curselection()
value=str((widget.get(ANCHOR)))
#print(qry)
#print(value)
self.cursor.execute("select * from tbuser where accno=%s",value)
self.f1.place(relx=0.1,rely=0.15)
r=self.cursor.rowcount
row=self.cursor.fetchone()
if r>0:
self.t1.config(text=row[0])
self.t2.config(text=row[1])
self.t3.config(text=row[2])
self.t4.config(text=row[3])
self.t5.config(text=row[4])
self.t6.config(text=row[5])
self.t7.config(text=row[6])
self.t8.config(text=row[7])
else:
messagebox.showinfo("Info","Internal Error")
#print(selection)
#print(event)
#obj=view_class("aa@gmail.com")
|
UTF-8
|
Python
| false | false | 4,114 |
py
| 19 |
View.py
| 19 | 0.599174 | 0.547642 | 0 | 110 | 36.409091 | 91 |
rdevnoah/python_practice03
| 1,451,698,964,784 |
273f7ad02fe54162e870154403e435e545156f54
|
199d9f0dfb86bd85f8b9cc975469c942eaeb3f94
|
/prob02.py
|
bdb21e7b627ef03f83413c0e647d835eaad41987
|
[] |
no_license
|
https://github.com/rdevnoah/python_practice03
|
1a7493caf00768cdeb033ad6f2d50243d633854d
|
223ed81ed06c08d721683ad0358c5669533172f0
|
refs/heads/master
| 2020-06-04T06:42:05.908596 | 2019-06-14T08:54:22 | 2019-06-14T08:54:22 | 191,909,129 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# range() 함수와 유사한 frange() 함수를 작성해 보세요. frange() 함수는 실수 리스트를 반환합니다.
def frange(stop, start=0.0, step=0.1):
if stop < start:
stop, start = start, stop
i = start
l = []
while i < stop:
l.append(round(i, 2))
i += step
return l
print(frange(2))
print(frange(2.0))
print(frange(2))
print(frange(1.0, 2.0))
print(frange(1.0, 3.0, 0.5))
|
UTF-8
|
Python
| false | false | 438 |
py
| 3 |
prob02.py
| 3 | 0.560526 | 0.510526 | 0 | 19 | 18.894737 | 67 |
westzyan/blastpipeline
| 12,283,606,471,484 |
734abc963f56f952c3701fbffc0c359498f8a99c
|
d444aa5b6de348e63e0036c4c9c37de40714bd67
|
/logreader-dill.py
|
043d65c7670dced15f4d01c5de4f7b82954ac9be
|
[] |
no_license
|
https://github.com/westzyan/blastpipeline
|
1fbb3ae39b6821937f100ea1ba006d9d496f51d1
|
8dab8f86e8e861bab58e422c67898719a28ba04f
|
refs/heads/master
| 2021-02-12T11:57:06.751727 | 2020-02-07T05:05:21 | 2020-02-07T05:05:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#logreader.py produces 3 dill files "-small" "-large" "-open"
#this file reads them to get results
import time
import calendar
import re
##import parse
#change from tbrreader: Server contains both ent and ci, replacing them in Resources and Connection.
class Resource: #holds data for a HttpTransaction
def __init__(self):
self.ptr = ""
self.URI = ""
#self.methodname = ""
self.countWritten = 0
self.countRead = 0
self.ind = None #used by printout of referrer
self.Connection = None
self.Server = None
self.started = 0 #DispatchTransaction
self.ended = 0 #mResponseIsComplete
self.dispatched = 0 #how many times this was dispatched, in total. dropped count is this - 1
self.pipelined = 0 #is this pipelined? (the first resource counts too)
self.context = None
self.parent = None #referrer is not necessarily parent
self.parentrule = None #1-6
self.parentwritten = None
self.timeCreated = None #InitTransaction
self.timeStarted = None #DispatchTransaction
self.timeRead = None #first ReadRequestSegment
self.timeWritten = None #first WriteRequestSegment
self.timeEnded = None #mResponseIsComplete
self.curActive = None #for steps 1 and 2: list of currently active resources when this staretd
self.neighbors = [] #used for step 4
self.lastwrites = [] #used for steps 5 and 6: last resources written to (within 0.2s)
self.mUsingSpdy = 0 #was this using http/1.1 (0) or http/2 (1)?
def __str__(self):
string = ""
name = self.URI
if len(name) > 100:
name = name[:97] + "..."
string += name
string += " at ptr " + self.ptr
if self.Connection != None:
string += " on Connection " + self.Connection.ptr
else:
string += " on Connection (None) "
return string
def __repr__(self):
return str(self)
class Connection:
def __init__(self):
self.ptr = ""
self.Transactions = []
self.SocketIn = None
self.SocketOut = None
self.Server = None
self.timeCreated = None #creation
self.timeNPN = None #npn negotiation completed
self.timeSPDY = None #earliest use of spdy
self.timeClosed = None #if closed.
self.ind = None #index in Connections
def __str__(self):
if self.Transactions != []:
string = "Connection {} carrying Transaction {} on Socket {} {}".format(
self.ptr, self.Transactions, self.SocketIn.ptr, self.SocketOut.ptr)
else:
string = "Connection {} carrying Transaction (None) on Socket {} {}".format(
self.ptr, self.SocketIn.ptr, self.SocketOut.ptr)
return string
def __repr__(self):
return str(self)
class Socket:
def __init__(self):
self.ptr = ""
self.Connection = None
self.totalInc = 0
self.totalOut = 0
def __str__(self):
string = "Socket {} ({} out, {} inc)".format(
self.ptr, self.totalOut, self.totalInc)
return string
def __repr__(self):
return str(self)
class Server:
def __init__(self):
self.ptr = None
self.ci = ""
self.is_tls = None
self.is_http2 = None
self.is_pipelined = None
self.cert_length = None
self.rec_length = 0
self.events = []
def printevents(self):
print "Events for " + repr(self)
for e in self.events:
print e
def __str__(self):
return "Server [ci={}, ptr={}]".format(self.ci, self.ptr)
def __repr__(self):
return str(self)
def str_to_epochs(string):
# string is like: "2019-06-10 12:40:46.289654 UTC"
string = string.strip()
string = string[:-4] #cut off UTC
milli = float("0." + string.split(".")[1]) #grab milliseconds separately
string = string.split(".")[0]
a = time.strptime(string, "%Y-%m-%d %H:%M:%S") #construct struct_time
t = calendar.timegm(a) + milli
return t
def epochs_to_str(t):
#converts unix epochs back to string
milli = repr(t).split(".")[1]
while len(milli) < 6:
milli += "0"
s = time.gmtime(t)
string = "{}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{} UTC".format(
s.tm_year, s.tm_mon, s.tm_mday,
s.tm_hour, s.tm_min, s.tm_sec, milli)
return string
def parse(line):
time = str_to_epochs(line.split(" - ")[0])
line = line.split(" - ")[1].split("]: ")[1]
if OLD_LOG == 0:
line = " ".join(line.split(" ")[1:])
line = line.strip()
params = {"t":time}
try:
li = line.split("\t")
for l in li:
if not "=" in l:
params["text"] = l
else:
params[l.split("=")[0]] = l.split("=")[1]
except:
params["text"] = line
return params
def URI_format(URI):
#remove fragment identifier because they don't matter on the wire
if ("#" in URI):
URI = URI.split("#")[0]
return URI
def ci_to_URI(ci):
return ci.split(":")[0][7:]
import numpy
import dill
def proc_data(rets, results):
for k in results.keys():
[Resources, Connections, Servers, Sockets] = results[k]
## if "comp0" in k and not "3-35-comp0.tbrlog" in k:
## spdyServers = [0] * len(Servers)
## countServers = [0] * len(Servers)
## for r in Resources:
## if r.mUsingSpdy == 1 and r.Server != None:
## spdyServers[Servers.index(r.Server)] = 1
## if r.Server != None:
## countServers[Servers.index(r.Server)] += 1
##
##
## for i in range(len(Servers)):
## if spdyServers[i] == 0:
## if countServers[i] > 20:
## print k
## for r in Resources:
## if Servers.index(r.Server) == i:
## print r.timeCreated, r.timeStarted,
## print r.timeRead, r.timeWritten,
## print r.timeEnded, r.ptr, r.URI
## sys.exit(-1)
if len(Resources) == 0:
continue
## for r in Resources:
## #tempfix for possible time issues
## r_times = [r.timeCreated, r.timeStarted, r.timeRead, r.timeWritten, r.timeEnded]
## for i in reversed(range(4)):
## if r_times[i] > r_times[i+1]:
## r_times[i] = r_times[i+1]
## [r.timeCreated, r.timeStarted, r.timeRead, r.timeWritten, r.timeEnded] = r_times
## this_rets = []
## for r in Resources:
## this_rets.append([r.dispatched, r.mUsingSpdy])
## if not k in rets.keys():
## rets[k] = {}
## rets[k]["res.dispatch"] = this_rets
#PAGE LOAD TIME AND RES COUNT
sResources = []
for r in Resources:
if r.timeEnded != None:
sResources.append(r)
sResources = sorted(sResources, key = lambda r:r.timeEnded)
eind = int(len(sResources) * 0.95)
endtime = sResources[eind].timeEnded
starttime = Resources[0].timeCreated
for r in Resources:
if r.timeCreated != None:
if starttime == None:
starttime = r.timeCreated
starttime = min(r.timeCreated, starttime)
if (endtime != None and starttime != None):
if not k in rets.keys():
rets[k] = {}
rets[k]["page.t"] = endtime-starttime
rets[k]["res.count"] = len(Resources)
#Resource server ids and using spdy
this_rets = []
for r_i in range(len(Resources)):
if Resources[r_i].Server != None:
this_rets.append([Resources[r_i].mUsingSpdy, Servers.index(Resources[r_i].Server)])
rets[k]["res.spdy"] = this_rets
if len(this_rets) != 0:
count = 0
for i in range(len(this_rets)):
if this_rets[i][0] == 1:
count += 1
rets[k]["page.spdypct"] = float(count)/float(len(this_rets))
else:
rets[k]["page.spdypct"] = 0
## if rets[k]["page.spdypct"] != 0:
## print rets[k]["page.spdypct"]
pagesize = 0
for r in Resources:
if r.timeEnded != None:
pagesize += r.countWritten
if not k in rets.keys():
rets[k] = {}
rets[k]["page.size"] = pagesize
#TOTAL LOAD TIMES
sResources = sorted(Resources, key = lambda r:r.timeEnded)
eind = int(len(Resources) * 0.95)
er = sResources[eind]
listr = []
cr = er
while cr.parentind != -1:
listr.append(cr)
if cr.parentind >= Resources.index(cr):
print "Warning:", k, "has parent greater than child"
break
cr = Resources[cr.parentind]
listr.append(Resources[0]) #root was not included in above
listr = listr[::-1] #list of "critical" resources
this_times = [0, 0, 0, 0]
rtt = 0
for rind in range(len(listr)):
r = listr[rind]
if rind == len(listr) - 1:
nexttime = r.timeEnded
else:
nexttime = listr[rind+1].timeCreated
if nexttime == None:
continue
r_times = [r.timeCreated, r.timeStarted, r.timeRead, r.timeWritten, r.timeEnded]
if r_times[4] == None: #sometimes a resource does not declare itself finished
r_times[4] = nexttime
if not (None in r_times):
for i in range(4):
diff = min(r_times[i+1] - r_times[i], nexttime - r_times[i])
if diff < 0:
diff = 0
this_times[i] += diff
rets[k]["page.tcat"] = this_times
#RTT counting
#the following incur RTT:
#default = 1 RTT
#new connection = 1 RTT, 2 RTT if HTTPS
seenConnections = []
for r in listr:
rtt += 1
if not (r.Connection in seenConnections):
seenConnections.append(r.Connection)
rtt += 1
if len(r.URI) > 5 and r.URI[:5] == "https":
rtt += 1 #another one
rets[k]["page.rtt"] = rtt
## #RESOURCE LOAD TIMES
## this_rets = []
## for r in Resources:
## r_times = [r.timeCreated, r.timeStarted, r.timeRead, r.timeWritten, r.timeEnded]
## r_rets = []
## if not (None in r_times):
## for i in range(4):
## r_rets.append(r_times[i+1] - r_times[i])
## this_rets.append(r_rets)
## if not (k in rets.keys()):
## rets[k] = {}
## rets[k]["res.t"] = this_rets
##
## #SLOW RESOURCE LOAD TIMES
## this_rets = []
## for r in Resources:
## if r.mUsingSpdy == 0:
## continue
## accept = 0
## for r2 in Resources:
## if r != r2:
## if r.Connection == r2.Connection:
## if r2.timeWritten < r.timeCreated and \
## r2.timeEnded > r.timeCreated:
## accept = 1
## if accept == 0:
## continue
## r_times = [r.timeCreated, r.timeStarted, r.timeRead, r.timeWritten, r.timeEnded]
## r_rets = []
## if not (None in r_times):
## for i in range(4):
## r_rets.append(r_times[i+1] - r_times[i])
## this_rets.append(r_rets)
## if not (k in rets.keys()):
## rets[k] = {}
## rets[k]["res.slowt"] = this_rets
generations = [0] * len(Resources)
for rind in range(len(Resources)):
r = Resources[rind]
if r.parentind == -1:
generations[rind] = 0
else:
generations[rind] = generations[r.parentind] + 1
if not (k in rets.keys()):
rets[k] = {}
rets[k]["page.gencount"] = max(generations)
r = Resources[0]
r_times = [r.timeCreated, r.timeStarted, r.timeRead, r.timeWritten, r.timeEnded]
r_rets = []
if not (None in r_times):
for i in range(4):
r_rets.append(r_times[i+1] - r_times[i])
rets[k]["firstrestimes"] = r_rets
#RESOURCE TRANSFER RATES
this_rets = []
for r in Resources:
if r.timeWritten != None and r.timeEnded != None:
this_rets.append([r.countWritten, r.timeEnded - r.timeWritten, r.mUsingSpdy])
rets[k]["res.writetimes"] = this_rets
#DISPATCHED count
## this_rets = []
## for r in Resources:
## if r.timeEnded != None:
## this_rets.append([Servers.index(r.Server), r.pipelined, r.dispatched])
## rets.append(this_rets)
## for rind in range(len(Resources)):
## r = Resources[rind]
#find simultaneously dispatched previous resources
## waittime = 0
## for sind in range(rind):
## s = Resources[sind]
## if r.Server == s.Server and \
## r.timeStarted != None and \
## s.timeStarted != None and \
## s.timeEnded != None and\
## s.timeWritten != None and \
## abs(r.timeStarted - s.timeStarted) < 0.01:
## if r.timeEnded > s.timeStarted:
## waittime += s.timeEnded - s.timeWritten
## rets.append(waittime)
##
## for r in Resources:
## if not (None in [r.timeCreated, r.timeStarted,
## r.timeRead, r.timeWritten, r.timeEnded]):
## this_times[-1].append([r.timeStarted - r.timeCreated,
## r.timeRead - r.timeStarted,
## r.timeWritten - r.timeRead,
## r.timeEnded - r.timeWritten])
##
## dispatchtimes = [None] * len(Connections)
## for r in Resources:
## if r.Connection != None:
## conind = Connections.index(r.Connection)
## if dispatchtimes[conind] == None:
## dispatchtimes[conind] = r.timeStarted
## else:
## dispatchtimes[conind] = min(dispatchtimes[conind],
## r.timeStarted)
## for cind in range(len(Connections)):
## c = Connections[cind]
## if dispatchtimes[cind] != None and c.timeCreated != None:
## rets.append(dispatchtimes[cind] - c.timeCreated)
## print generations
## starttime = None
## for r in Resources:
## if r.timeCreated != None:
## starttime = r.timeCreated
## break
## for r in Resources:
## if r.timeCreated < starttime:
## starttime = r.timeCreated
## endtimes = []
## for r in Resources:
## if r.timeEnded != None:
## endtimes.append(r.timeEnded)
## ind = int(len(endtimes) * 0.95)
## endtime = endtimes[ind]
## this_times.append(endtime-starttime)
## Server_sizes = [0]*len(Servers)
## for r in Resources:
## if r.Server != None:
## Server_ind = Servers.index(r.Server)
## Server_sizes[Server_ind] += 1
## rets.append(max(Server_sizes))
## if len(this_times) == 5:
## times.append(this_times)
return rets
import numpy
count = 0
rcount = 0
rsize = 0
rsizes = [0]*1000000
fold = "data/treebatch-new/"
fnames = ["comp-all-0.dill", "comp-all-1.dill", "comp-all-2.dill"]
rets = {} #dictionary of file name: relevant returns, just like the dill itself
results = []
for fname in fnames:
print "Loading dill..."
f = open(fold + fname, "r")
results = dill.load(f)
f.close()
print "Processing dill..."
proc_data(rets, results)
del results
rfnames = []
sfnames = []
word = "comp"
for i in range(10):
for j in range(50):
rfnames.append(fold + "{}-{}-{}".format(i, j, word))
for i in range(200):
for j in range(5):
rfnames.append(fold + "{}-{}-{}".format(i, j, word))
for i in range(1000):
rfnames.append(fold + "{}-{}".format(i+200, word))
sfnames.append(fold + "{}-{}".format(i+200, word))
##grfnames = []
##rfnames = []
##for i in range(10):
## for j in range(50):
## rfnames.append(fold + "{}-{}-{}".format(i, j, word))
##grfnames.append(rfnames)
##rfnames = []
##for i in range(200):
## for j in range(5):
## rfnames.append(fold + "{}-{}-{}".format(i, j, word))
##grfnames.append(rfnames)
##rfnames = []
##for i in range(1000):
## rfnames.append(fold + "{}-{}".format(i+200, word))
##grfnames.append(rfnames)
rts = []
for k in range(3):
totalWritten = 0
totalt = 0
spdyWritten = 0
spdyt = 0
totalCount = 0
spdyCount = 0
for rfname in rfnames:
fname = rfname + str(k) + ".tbrlog"
if fname in rets.keys():
this_rets = rets[fname]["res.writetimes"]
for r in this_rets:
if r[0] > 500000:
totalWritten += r[0]
totalt += r[1]
totalCount += 1
if k == 0:
rts.append([r[0], r[1]])
if r[2] == 1:
spdyWritten += r[0]
spdyt += r[1]
spdyCount += 1
## print totalWritten, totalt, totalCount, totalWritten/totalt
print totalWritten, totalt, totalCount
## print spdyWritten, spdyt, spdyCount
##fout = open("rtt-time.txt", "w")
##for k in range(3):
## for rfname in rfnames:
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
## fout.write("\t".join(str(a) for a in [k, rets[fname]["page.rtt"], rets[fname]["page.t"]]) + "\n")
##fout.close()
##for k in range(3):
## totallen = 0
## totaltime = 0
## count = 0
## for rfname in rfnames:
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
## for s in rets[fname]["res.writetimes"]:
## if s[0] > 50000 and s[2] == 1:
## count += 1
## totallen += s[0]
## totaltime += s[1]
## print count, totallen, totaltime, totallen/totaltime
##for k in range(4):
## for m in range(4):
## print times[k][m]/counts[k]
##f = open("features.txt", "w")
##for rfname in rfnames:
## feats = []
## for k in range(2):
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
## feats.append([rets[fname]["page.t"],
## rets[fname]["page.size"],
## rets[fname]["page.gencount"],
## rets[fname]["page.spdypct"],
## rets[fname]["res.count"]])
## else:
## break
## if len(feats) == 2:
## combfeats = []
## for i in range(5):
## combfeats.append(feats[0][i] - feats[1][i])
## for i in range(5):
## combfeats.append((feats[0][i] + feats[1][i])/2.0)
## f.write("\t".join([str(fs) for fs in combfeats]) + "\n")
##f.close()
##counts = [0, 0, 0, 0]
##times = []
##slowtimes = []
##for k in range(4):
## times.append([0, 0, 0, 0])
## slowtimes.append([0, 0, 0, 0])
##for rfname in rfnames:
## for k in range(4):
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
#### for r in rets[fname]["res.t"]:
#### if len(r) == 4:
#### for m in range(4):
#### times[k][m] += r[m]
#### counts[k] += 1
##
## for r in rets[fname]["res.slowt"]:
## if len(r) == 4:
## for m in range(4):
## slowtimes[k][m] += r[m]
## counts[k] += 1
##
##for k in range(4):
## for m in range(4):
#### print times[k][m]/counts[k]
## print slowtimes[k][m]/counts[k]
##countpos = 0
##countneg = 0
##for rfname in rfnames:
## k = 0
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
## for r in rets[fname]["res.spdy"]:
## if r[0] == 1:
## countpos += 1
## if r[0] == 0:
## countneg += 1
##
##haspos = 0
##hasneg = 0
##for rfname in rfnames:
## k = 0
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
## foundpos = 0
## for r in rets[fname]["res.spdy"]:
## if r[0] == 1:
## foundpos = 1
## if foundpos == 1:
## haspos += 1
## else:
## hasneg += 1
##diffs = []
##for rfname in rfnames:
## this_times = []
## for k in range(2):
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
## this_times.append(rets[fname]["page.t"])
## else:
## break
## if len(this_times) == 2:
## diffs.append([rfname, this_times[1] - this_times[0]])
##for time in times:
## print time/count
for rfnames in grfnames:
times = []
count = 0
for i in range(1, 2):
times.append([0, 0, 0, 0])
for rfname in rfnames:
this_times = []
for k in range(1, 2):
fname = rfname + str(k) + ".tbrlog"
if fname in rets.keys():
this_times.append(rets[fname]["page.tcat"])
else:
continue
if len(this_times) == 1:
for k in range(0, 1):
for m in range(4):
times[k][m] += this_times[k][m]
count += 1
c = 0
for t in times:
for a in t:
c += a/count
print c
##tars = [18.371, 17.641, 18.7]
##for time in times:
## c = 0
## for m in range(4):
## c += time[m]/count
## print c,
## print tars[times.index(time)] - c, ""
##times = [0, 0, 0, 0]
##count = 0
##for rfname in rfnames:
## this_times = []
## for k in range(4):
## fname = rfname + str(k) + ".tbrlog"
## if fname in rets.keys():
## this_times.append(rets[fname]["page.t"])
## else:
## break
## if len(this_times) == 4:
## for k in range(4):
## times[k] += this_times[k]
## count += 1
##for time in times:
## print time/count
#for pipelining size experiment
#first, re-sort
##time_sizes = [[], [], [], []]
##for rets in total_rets:
## if len(rets) != 5:
## continue
## for i in range(4):
## time_sizes[i].append(rets[i+1])
##
##for i in range(4):
## ts = time_sizes[i]
## ts = sorted(ts, key = lambda k:k[1])
## for j in range(4):
## start = int(j/4.0 * len(ts))
## end = min(int((j+1)/4.0 * len(ts)), len(ts) - 1)
## print numpy.mean([t[0] for t in ts[start:end]])
##goodservercount = 0
##badservercount = 0
##for page in rets:
## maxserver_ind = 0
## for res in page:
## [server_ind, is_pipeline, dispatchcount] = res
## maxserver_ind = max(maxserver_ind, server_ind)
## goodservers = [-1] * (maxserver_ind+1)
## for res in page:
## [server_ind, is_pipeline, dispatchcount] = res
## if is_pipeline == 1:
## if goodservers[server_ind] == -1:
## goodservers[server_ind] = 1
## if dispatchcount > 1:
## goodservers[server_ind] = 0
## goodservercount += goodservers.count(1)
## badservercount += goodservers.count(0)
##times = [0, 0, 0, 0, 0]
##for x in total_rets:
## for i in range(5):
## times[i] += x[i]
##for t in times:
## print t/float(len(total_rets))
#code for getting transfer times
##times = [0, 0, 0, 0]
##for x in total_rets:
## for i in range(4):
## times[i] += x[i]
##c = 0
##for i in range(4):
## c += times[i]/len(total_rets)
## print c
## print times[i]/len(total_rets)
|
UTF-8
|
Python
| false | false | 24,986 |
py
| 8 |
logreader-dill.py
| 5 | 0.484511 | 0.468943 | 0 | 745 | 32.538255 | 111 |
will-hill/AI_Based_Hyperparameter_Tuning
| 4,398,046,521,257 |
f9b0a2590972d01026e13026e49425f603f4ccdc
|
2a9b6d13213a8a8e816cf0480ddee80ed6af5102
|
/src/rf_objective.py
|
b5ad4d1ccdd1059df4e7f0415ce0740f1fe6d17a
|
[] |
no_license
|
https://github.com/will-hill/AI_Based_Hyperparameter_Tuning
|
84ab65156bb0ce4be94b8d87005323207bfb4893
|
abd0090113fd1e90d5c47d9997a9920c5c41f946
|
refs/heads/master
| 2020-12-13T15:53:04.104367 | 2020-02-11T04:01:10 | 2020-02-11T04:01:10 | 234,463,468 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# %%
def rf_ojbective(trial):
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import pandas as pd
# Random Forest Params
rf_params = {
'n_estimators': trial.suggest_int('n_estimators', 5, 2000),
'max_depth': trial.suggest_int('max_depth', 1, 1000)
}
"""
'criterion': trial.suggest_categorical('criterion', ['gini', 'entropy']),
'min_samples_split': trial.suggest_int('min_samples_split', 2, 2000),
'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, 100),
'min_weight_fraction_leaf': trial.suggest_int('min_weight_fraction_leaf', 0.0, 0.99),
'max_features': trial.suggest_uniform('max_features', 0.01, 0.99),
'max_leaf_nodes': trial.suggest_int('max_leaf_nodes', 0, 999),
'min_impurity_decrease': trial.suggest_int('min_impurity_decrease', 0.0, 0.99),
'min_impurity_split': trial.suggest_uniform('min_impurity_split', 0.0, 0.99),
'bootstrap': trial.suggest_categorical('bootstrap', [True, False]),
'oob_score': trial.suggest_categorical('oob_score', [False, True]),
'warm_start': trial.suggest_categorical('warm_start', [False, True]),
'class_weight': trial.suggest_categorical('class_weight', [None, "balanced", "balanced_subsample"]),
'ccp_alpha': trial.suggest_uniform('ccp_alpha', 0.0, 0.99),
'max_samples': trial.suggest_uniform('max_samples', 0.5, 1.0)
}"""
results = []
# Induction
for i in range(0,6):
train = pd.read_feather('../data/train_'+str(i)+'.ftr').set_index('TransactionID')
X_train = train.drop(['isFraud', 'ProductCD'], axis=1)
y_train = train['isFraud']
print(y_train.sample(3))
del train
test = pd.read_feather('../data/test_' +str(i)+'.ftr').set_index('TransactionID')
X_test = test.drop(['isFraud', 'ProductCD'], axis=1)
y_test = test['isFraud']
print(y_test.sample(3))
del test
print('rf')
clf = RandomForestClassifier(random_state=0, **rf_params)
print('rf fit')
clf.fit(X_train, y_train)
print('rf score')
# auc, roc_auc_score, average_precision_score
fpr, tpr, thresholds = roc_curve(y_test, [y_hat[1] for y_hat in clf.predict_proba(X_test)], pos_label=1)
result = 1 - auc(fpr, tpr)
results.append(result)
print('result ')
return results.mean()
#%%
import optuna
study = optuna.create_study()
study.optimize(rf_ojbective, n_trials=1)
|
UTF-8
|
Python
| false | false | 2,656 |
py
| 13 |
rf_objective.py
| 4 | 0.596386 | 0.572289 | 0 | 62 | 41.854839 | 112 |
fkuhn/fedora_microservices
| 11,527,692,273,304 |
ef27d8d7944d741d5ef23146301da9632a98fbca
|
a29bf8fba2deb473580e12b9e24326df772f6ba6
|
/src/content_model_listeners/islandoradm.py
|
2757f771554ff0e93066dfece341b7c500f73d60
|
[] |
no_license
|
https://github.com/fkuhn/fedora_microservices
|
18b269f4d1e7a19dbf5b7415fc0a54208be486f8
|
b0701e258c77732fa0aff450c96f96acf6c6060a
|
refs/heads/master
| 2021-01-16T17:45:39.514082 | 2010-11-24T19:55:34 | 2010-11-24T19:55:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 2010-10-16
@author: jesterhazy
'''
import os
import subprocess
import sys
import fcrepo.connection
import tempfile
from shutil import rmtree
from fcrepo.client import FedoraClient
from fcrepo.utils import NS
abby_home = '/usr/local/ABBYYData/FRE80_M5_Linux_part_498-28_build_8-1-0-7030/Samples/CLI/'
def read_in_chunks(file_object, chunk_size=524288):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 512k."""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def sysout(msg, end='\n'):
sys.stdout.write(str(msg) + end)
def make_jp2():
subprocess.call(["kdu_compress", "-i", "tmp.tiff", "-o", "tmp.jp2", "-rate", "0.5", "Clayers=1", "Clevels=7", "Cprecincts={256,256},{256,256},{256,256},{128,128},{128,128},{64,64},{64,64},{32,32},{16,16}", "Corder=RPCL", "ORGgen_plt=yes", "ORGtparts=R", "Cblk={32,32}", "Cuse_sop=yes"])
def make_jp2_lossless():
subprocess.call(["kdu_compress", "-i", "tmp.tiff", "-o", "tmp_lossless.jp2", "-rate", "-,0.5", "Clayers=2", "Creversible=yes", "Clevels=8", "Cprecincts={256,256},{256,256},{128,128}", "Corder=RPCL", "ORGgen_plt=yes", "ORGtparts=R", "Cblk={32,32}"])
def make_tn():
# would like 85x110^ instead of 85x110!, but need imagemagick upgrade first ( >= 6.3.8-2)
subprocess.call(["convert", "tmp.tiff", "-thumbnail", "85x110!", "-gravity", "center", "-extent", "85x110", "tmp.jpg"])
def make_ocr(tmpdir):
global abby_home
os.chdir(abby_home)
subprocess.call(["./CLI", "-ics", "-if", "%(dir)s/tmp.tiff" % {'dir': tmpdir},
"-f", "PDF", "-pem", "ImageOnText", "-pfpf", "Automatic", "-pfq", "90", "-pfpr", "150", "-of", "%(dir)s/tmp.pdf" % {'dir': tmpdir},
"-f", "XML", "-xaca", "-of", "%(dir)s/tmp.xml" % {'dir': tmpdir},
"-f", "Text", "-tel", "-tpb", "-tet", "UTF8", "-of", "%(dir)s/tmp.txt" % {'dir': tmpdir}])
def attach_datastream(obj, tmpdir, filename, dsid, dslabel, mime_type):
if dsid not in obj:
f = open('%s/%s' % (tmpdir, filename), 'r')
obj.addDataStream(dsid, dslabel, controlGroup=unicode('M'), mimeType=unicode(mime_type))
obj[dsid].setContent(f)
f.close()
else:
sysout('datastream %s already exists' % (dsid))
def process(obj, dsid):
if dsid == 'tiff':
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
# fetch tiff
f = open(tmpdir + '/tmp.tiff', 'w')
# f.write(obj['tiff'].getContent().read())
# f.close()
content = obj['tiff'].getContent()
for chunk in read_in_chunks(content):
f.write(chunk)
f.flush()
os.fsync(f.fileno())
f.close()
# do conversions
make_tn()
make_jp2()
make_jp2_lossless()
make_ocr(tmpdir)
# attach to fedora object
attach_datastream(obj, tmpdir, 'tmp.jpg', 'tn', 'thumbnail image', 'image/jpeg')
attach_datastream(obj, tmpdir, 'tmp.jp2', 'jp2', 'jp2 image', 'image/jp2')
attach_datastream(obj, tmpdir, 'tmp_lossless.jp2', 'jp2lossless', 'jp2 image (lossless)', 'image/jp2')
attach_datastream(obj, tmpdir, 'tmp.xml', 'xml', 'ocr xml', 'text/xml')
attach_datastream(obj, tmpdir, 'tmp.txt', 'text', 'ocr text', 'text/plain')
attach_datastream(obj, tmpdir, 'tmp.pdf', 'pdf', 'pdf', 'application/pdf')
rmtree(tmpdir, ignore_errors = True)
os.chdir(cwd)
else:
sysout('islandoradm: ignoring dsid: %s' % (dsid))
return obj
|
UTF-8
|
Python
| false | false | 3,646 |
py
| 6 |
islandoradm.py
| 4 | 0.575699 | 0.532913 | 0 | 94 | 37.787234 | 290 |
eldojk/Workspace
| 2,903,397,918,501 |
a2073f92abdb8a0812eb6b522ca36d1b308d9957
|
a9dfc35814bde9f387bb78db2e8566c08e38b635
|
/WS/G4G/Problems/arrays/check_duplicates_at_k_distance.py
|
231fa0c474c7fb2ff3f8e47c9d46bc51d65338cb
|
[] |
no_license
|
https://github.com/eldojk/Workspace
|
b6c87f7ab74c4a3bb8585fdfa36a24a731f280f8
|
224626665a2b4c0cf701731f4e4dc96c93a26266
|
refs/heads/master
| 2021-01-19T13:33:09.378172 | 2017-11-14T17:53:09 | 2017-11-14T17:53:09 | 82,396,876 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
amzn
http://www.geeksforgeeks.org/check-given-array-contains-duplicate-elements-within-k-distance/
"""
def check_duplicates(array, k):
dict = {}
for i in range(len(array)):
if dict.get(array[i]):
return True
else:
dict[array[i]] = True
if i - k >= 0:
dict[array[i - k]] = False
return False
if __name__ == '__main__':
print check_duplicates([1, 2, 3, 4, 1, 2, 3, 4], 3)
print check_duplicates([1, 2, 3, 1, 4, 5], 3)
print check_duplicates([1, 2, 3, 4, 5], 3)
print check_duplicates([1, 2, 3, 4, 4], 3)
|
UTF-8
|
Python
| false | false | 602 |
py
| 586 |
check_duplicates_at_k_distance.py
| 583 | 0.534884 | 0.486711 | 0 | 27 | 21.296296 | 93 |
PetrCala/Hearthstone_Archmage
| 4,492,535,828,703 |
dfa9374bf7e92e106b3fb9885f60a6eed27fc5a2
|
f3c16983880fc5f8b3824e81ebc9686f176340fd
|
/pyscripts/DataExtractor.py
|
47976c175cf13adeb75c8a719075f9c836f82a67
|
[] |
no_license
|
https://github.com/PetrCala/Hearthstone_Archmage
|
32697a75e4fc0e7efd060c937df82ee06e99c532
|
621c6ec7ac0be99078f1320738bce4918beacf71
|
refs/heads/main
| 2023-07-11T06:40:32.128171 | 2021-08-11T14:55:30 | 2021-08-11T14:55:30 | 377,523,117 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#External browser Selenium
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#Other useful packages
import sys
from datetime import date
import pandas as pd
import numpy as np
import re
import warnings
import os
#Silence the deprecation warning when minimizing the external drivers
warnings.filterwarnings('ignore', category=DeprecationWarning)
class DataExtractor:
'''Extract data from the hsreplay.net website for either some or all archetypes in the game.
'''
def __init__(self, driver_path = None, deck_folder = None):
'''
The constructor for DataExtractor class.
'''
#Defining file paths
self.base_path = re.search(f'(.+)Hearthstone_Archmage', os.getcwd()).group(1)\
+ 'Hearthstone_Archmage'
script_path = self.base_path + '\\pyscripts'
if script_path not in sys.path:
sys.path.insert(0, script_path)
if driver_path == None:
self.driver_path = f'{self.base_path}\\tools\\chromedriver'
else:
self.driver_path = driver_path
if deck_folder == None:
self.deck_folder = f'{self.base_path}\\data'
else:
self.deck_folder = deck_folder
def open_driver(self):
'''Open an empty driver with the specified driver path.
:returns:
- None: An open empty driver.
'''
self.driver = webdriver.Chrome(self.driver_path)
return None
def open_website(self, link = f'https://hsreplay.net/decks'):
'''Insert a link and open a website using said link.
:args:
- link (str): The link to open the website on. Set to f'https://hsreplay.net/decks' by default.
:usage:
self.open_website(f'https://hsreplay.net/decks')
:returns:
- None: An open website using a specified link.
'''
self.open_driver()
self.driver.get(link)
self.driver.maximize_window()
try:
WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_class_name('css-flk0bs'))
self.driver.find_element_by_class_name('css-flk0bs').click()
except TimeoutException:
raise Exception('The privacy window has not shown up; try running the script again')
print('Website successfully opened')
return None
def get_card_info(self):
'''Analyze the mulligan guide page of a deck and store this information in a data frame.
:assumptions:
- An already opened driver with a window containing the mulligan guide information.
:usage:
(self.open_website(specify_link_here)) -> self.get_card_info()
:returns:
- df (pd.DataFrame): A data frame containing data about the cards from a given deck.
'''
url = self.driver.current_url
name_of_class = self.driver.find_element_by_xpath('//*[@id="deck-container"]/div/aside/ul/li[1]/a').text
try:
name_of_deck = self.driver.find_element_by_xpath('//*[@id="deck-container"]/div/aside/ul/li[2]/span/a').text
except:
name_of_deck = 'Other'
code = re.search('decks/(.+?)/', url).group(1)
date_of_deck = date.today()
#Generating the card names data
card_names = self.driver.find_elements_by_class_name('table-row-header')
cards = []
for c in card_names:
info = c.text
txt = info.rsplit('\n')
if len(txt) == 3:
mana_cost = int(txt[0])
card_name = txt[2]
card_count = int(txt[1].replace('★', '1'))
row = [name_of_class, name_of_deck, code, date_of_deck, mana_cost, card_name, card_count]
cards.append(row)
elif len(txt) == 2:
mana_cost = int(txt[0])
card_name = txt[1]
card_count = 1
row = [name_of_class, name_of_deck, code, date_of_deck, mana_cost, card_name, card_count]
cards.append(row)
else:
raise Exception('Error - the scraper is not reading the card information properly')
#Generating the card details data
data = self.driver.find_elements_by_class_name('table-cell')
further_info = []
for d in range(int(len(data)/6)):
try:
mull_wr = data[0+6*d].text.replace('▼', '').replace('▲', '')
per_kept = data[1+6*d].text
drawn_wr = data[2+6*d].text.replace('▼', '').replace('▲', '')
played_wr = data[3+6*d].text.replace('▼', '').replace('▲', '')
turns_held = float(data[4+6*d].text)
turns_played = float(data[5+6*d].text)
row = [mull_wr, per_kept, drawn_wr, played_wr, turns_held, turns_played]
except ValueError:
print('Some cards in this deck contain missing data')
row = []
further_info.append(row)
#Concatenating the two data frames together
df_card = pd.DataFrame(cards, columns = ['Class', 'Deck Name', 'Deck Code', 'Date',
'Mana Cost', 'Card Name', 'Card Count'])
df_further = pd.DataFrame(further_info, columns = ['Mulligan WR', 'Kept', 'Drawn WR',
'Played WR', 'Turns Held', 'Turn Played'])
df = pd.concat([df_card, df_further], axis = 1)
return df
def get_overview(self):
'''Analyze the overview page of a deck and store this information in a data frame.
:assumptions:
- An already opened driver with a window containing the overview information.
:usage:
(self.open_website(specify_link_here)) -> self.get_overview()
:returns:
- df (pd.DataFrame): A data frame containing an overview a given deck. (e.g., deck code, win rates, game sample size)
'''
data = self.driver.find_elements_by_xpath("//tr/td[2]")
url = self.driver.current_url
name_of_class = self.driver.find_element_by_xpath('//*[@id="deck-container"]/div/aside/ul/li[1]/a').text
try:
name_of_deck = self.driver.find_element_by_xpath('//*[@id="deck-container"]/div/aside/ul/li[2]/span/a').text
except:
name_of_deck = 'Other'
code = re.search('decks/(.+?)/', url).group(1)
date_of_deck = date.today()
overview = [name_of_class, name_of_deck, code, date_of_deck]
for d in data:
text = d.text.replace('▼', '').replace('▲', '')
overview.append(text)
#Add sample size manually
sample_size = int(self.driver.find_element_by_xpath("//*[@id='deck-container']/div/aside/section/ul/li[1]/span").text.replace(' games', '').replace(',',''))
overview.append(sample_size)
overview = [overview]
df = pd.DataFrame(overview, columns = ['Class', 'Deck Name', 'Deck Code', 'Date',
'Match Duration', 'Turns', 'Turn Duration', 'Overall Winrate',
'vs. Demon Hunter', 'vs. Druid', 'vs. Hunter',
'vs. Mage', 'vs. Paladin', 'vs. Priest', 'vs. Rogue',
'vs. Shaman', 'vs. Warlock', 'vs. Warrior', 'Sample Size'])
return df
def get_archetype_data(self, class_name, arch_name):
'''Specify the name for the archetype and return the data from the hsreplay website for the given archetype.
:args:
- class_name (str): Name of the class.
- arch_name (str): Name of the archetype.
:usage:
self.driver.get_archetype_data(class_name = 'Rogue', arch_name = 'Miracle Rogue')
- The method is case sensitive. An wrongly formatted input returns error.
:returns:
- data_frames (pandas.DataFrame): A data frame containing data for the given archetype.
'''
#Pre-processing and identifying the data
class_name = class_name.title()
arch_name = arch_name.title()
class_codes = {'Demon Hunter' : 1, 'Druid' : 2, 'Hunter' : 3, 'Mage' : 4, 'Paladin' : 5,
'Priest' : 6, 'Rogue' : 7 , 'Shaman' : 8, 'Warlock' : 9, 'Warrior' : 10}
class_index = class_codes.get(class_name)
if class_index == None:
raise Exception('The class name is not correctly specified (e.g. Demon Hunter, Warlock, etc.)')
else:
pass
#The actual process
self.open_website()
#Open the page for the specified archetype
u = WebDriverWait(self.driver, 8)
u.until(EC.presence_of_element_located((By.CLASS_NAME,"deck-tile")))
xpath_class = f'//*[@id="player-class-filter"]/div/div[1]/span[{class_index}]/div/img'
x = self.driver.find_element_by_xpath(xpath_class)
x.click()
xpath_arch = f'//*[@id="player-class-filter"]/div/div[2]/div/ul/li/span[text() = "{arch_name}"]'
y = self.driver.find_element_by_xpath(xpath_arch)
y.click()
deck_amount = len(self.driver.find_elements_by_xpath('//*[@id="decks-container"]/main/div[3]/section/ul/li/a'))
#Generate the card info for each of the decks of a given archetype
data_frames = []
overviews_df = pd.DataFrame()
for d in range(deck_amount):
u = WebDriverWait(self.driver, 8)
u.until(EC.presence_of_element_located((By.CLASS_NAME,"deck-tile")))
index = d + 2
xpath_deck = f'//*[@id="decks-container"]/main/div[3]/section/ul/li[{index}]/a'
l = self.driver.find_element_by_xpath(xpath_deck)
l.click()
try:
u.until(EC.presence_of_element_located((By.CLASS_NAME,"sort-header__title")))
card_info = self.get_card_info()
data_frames.append(card_info)
except:
print('This deck is missing card data')
pass
#Switch to overview
overview_button = self.driver.find_element_by_id('tab-overview')
overview_button.click()
try:
u.until(EC.presence_of_element_located((By.CLASS_NAME,"winrate-cell")))
overview = self.get_overview()
overviews_df = overviews_df.append(overview)
except:
print('This deck is missing overview data')
pass
deck_position = d + 1
print(f'Extracted data for {deck_position}/{deck_amount} decks of archetype {arch_name}')
self.driver.back()
data_frames.insert(0, overviews_df)
self.driver.quit()
return data_frames
def archetype_to_excel(self, class_name, arch_name):
'''Specify the class name, archetype name and folder path
and return an excel file with all informations about said archetype in said folder
:args:
- class_name (str): Name of the class.
- arch_name (str): Name of the archetype.
:usage:
self.archetype_to_excel(class_name = 'Rogue', archetype = 'Miracle Rogue',
'path' = )
'''
class_name = class_name.title()
arch_name = arch_name.title()
today = date.today().strftime("%m-%d")
path_partial = f'{self.deck_folder}\\{today}'
#Assert the existence of a folder into which to add the data
if not os.path.exists(path_partial):
os.makedirs(path_partial)
print(f'Creating a folder {today} where the data will be stored')
#Get the archetype data
df = self.get_archetype_data(class_name, arch_name)
#Get the number of data frames to write into excel
sheet_n = len(df)
#Write these data frames into excel
path = f'{self.deck_folder}\\{today}\\{class_name} - {arch_name} {today}.xlsx'
with pd.ExcelWriter(path) as writer:
for i in range(sheet_n):
if i == 0:
df[i].to_excel(writer, sheet_name = 'Overview', index = False)
else:
index = i - 1
temp = df[0].reset_index()
deck_code = temp.loc[index, 'Deck Code']
df[i].to_excel(writer, sheet_name = f'{deck_code}', index = False)
print('All done')
return df
def get_all_data(self, classes_skip = 0):
'''Return all the data from the hsreplay website as several data frames.
The data is chronologically collected in the order:
Demon Hunter, Druid, Hunter, Mage, Paladin, Priest, Rogue, Shaman, Warlock, Warrior.
:args:
- classes_skip (int): Define how many classes to skip when collecting the data.
'''
today = date.today().strftime("%m-%d")
path_partial = f'{self.deck_folder}\\{today}'
#Assert the existence of a folder into which to add the data
if not os.path.exists(path_partial):
os.makedirs(path_partial)
print(f'Creating a folder {today} where the data will be stored')
self.open_website()
#Get the classes as a list of the html elements
u = WebDriverWait(self.driver, 8)
u.until(EC.presence_of_element_located((By.CLASS_NAME,"deck-tile")))
classes_len = len(self.driver.find_elements_by_xpath('//*[@id="player-class-filter"]/div/div[1]/span/div/img'))
for c in range(classes_len):
index = c + classes_skip + 1
xpath_class = f'//*[@id="player-class-filter"]/div/div[1]/span[{index}]/div/img'
c = self.driver.find_element_by_xpath(xpath_class)
class_name = c.get_attribute('alt').title()
c.click() #Go to the website of the class
archetype_length = len(self.driver.find_elements_by_xpath('//*[@id="player-class-filter"]/div/div[2]/div/ul/li/span'))
for a in range(archetype_length):
index = a + 1
xpath_arch = f'//*[@id="player-class-filter"]/div/div[2]/div/ul/li[{index}]/span'
k = self.driver.find_element_by_xpath(xpath_arch)
k.click()
data_frames = []
arch_name = k.text.title()
url = self.driver.current_url
arch_code = re.search('archetypes=(.+)', url).group(1)
overviews_df = pd.DataFrame()
deck_amount = len(self.driver.find_elements_by_xpath('//*[@id="decks-container"]/main/div[3]/section/ul/li/a'))
#Generate the card info for each of the decks of a given archetype
for d in range(deck_amount):
u = WebDriverWait(self.driver, 8)
u.until(EC.presence_of_element_located((By.CLASS_NAME,"deck-tile")))
index = d + 2
xpath_deck = f'//*[@id="decks-container"]/main/div[3]/section/ul/li[{index}]/a'
l = self.driver.find_element_by_xpath(xpath_deck)
l.click()
try:
u.until(EC.presence_of_element_located((By.CLASS_NAME,"sort-header__title")))
card_info = self.get_card_info()
data_frames.append(card_info)
except:
print('This deck is missing card data')
pass
#Switch to overview
overview_button = self.driver.find_element_by_id('tab-overview')
overview_button.click()
try:
u.until(EC.presence_of_element_located((By.CLASS_NAME,"winrate-cell")))
overview = self.get_overview()
overviews_df = overviews_df.append(overview)
except:
print('This deck is missing overview data')
pass
deck_position = d + 1
print(f'Extracted data for {deck_position}/{deck_amount} decks of archetype {arch_name}')
self.driver.back()
u = WebDriverWait(self.driver, 8)
u.until(EC.presence_of_element_located((By.CLASS_NAME,"deck-tile")))
k = self.driver.find_element_by_xpath(xpath_arch)
k.click()
#Add the overview data frame to the beginning of the list
data_frames.insert(0, overviews_df)
#Get the number of data frames to write into excel
sheet_n = len(data_frames)
#Write these data frames into excel
path = f'{path_partial}\\{class_name} - {arch_name} {today}.xlsx'
with pd.ExcelWriter(path) as writer:
for i in range(sheet_n):
if i == 0:
data_frames[i].to_excel(writer, sheet_name = 'Overview', index = False)
else:
index = i - 1
temp = data_frames[0].reset_index()
deck_code = temp.loc[index, 'Deck Code']
data_frames[i].to_excel(writer, sheet_name = f'{deck_code}', index = False)
self.driver.quit()
print('All done')
return data_frames
if __name__ == '__main__':
E = DataExtractor()
E.open_website()
|
UTF-8
|
Python
| false | false | 18,503 |
py
| 27 |
DataExtractor.py
| 9 | 0.529132 | 0.524858 | 0 | 452 | 39.89823 | 164 |
gwgundersen/codebook
| 16,338,055,602,200 |
d933a447a92b9a9cc2850c37fdfb9bef04864274
|
25757d40e1b2640dbed688d6cc803e3d9a5341be
|
/codebook/endpoints/index.py
|
fd52e02293ed43739d7187dad3edaf852d33911b
|
[
"MIT"
] |
permissive
|
https://github.com/gwgundersen/codebook
|
99d0b2c1fa65e81222e78a29d2bdcd6045b178b1
|
ff6d808c4c50de80f9a3c3f9d6898b9b3fdeae74
|
refs/heads/master
| 2021-01-20T04:47:55.469912 | 2017-04-30T22:41:19 | 2017-04-30T22:41:19 | 89,736,423 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Return JSON representations of codebook data."""
from flask import Blueprint, jsonify, request
import pickle
index_blueprint = Blueprint('index',
__name__,
url_prefix='/api')
db = pickle.load(file('codebook/db.pck', 'rb'))
@index_blueprint.route('/codebook/', methods=['GET'])
def get_all_codebook_data():
"""Return all codebook data
"""
if 'q' in request.args:
q = request.args.get('q')
results = _search_description_by_query(q)
else:
results = [_prepare_data(code, data) for code, data in db.items()]
return jsonify(results)
@index_blueprint.route('/codebook/<string:code>', methods=['GET'])
def get_specific_codebook_data(code):
"""Return codebook data for associated code.
"""
try:
data = db[code]
except KeyError:
return jsonify({
'status': 'error',
'message': 'Invalid codename.'
})
return jsonify(_prepare_data(code, data))
def _prepare_data(code, data):
"""Format data for API.
"""
results = {
'code': code,
'description': data['description']
}
results.update(_add_metadata(data))
return results
def _add_metadata(data):
"""Builds question metadata based on specified fields.
"""
keys = ['type', 'label', 'range', 'units', 'unique values', 'missing',
'source file']
results = {}
for key, val in data.items():
if key in keys:
results[key] = val
return results
def _search_description_by_query(q):
"""Returns questions whose description contains the query.
"""
results = []
for code, data in db.items():
if q in data['description']:
results.append(_prepare_data(code, data))
return results
|
UTF-8
|
Python
| false | false | 1,817 |
py
| 19 |
index.py
| 5 | 0.583379 | 0.583379 | 0 | 71 | 24.605634 | 74 |
alejogonza/holbertonschool-higher_level_programming
| 9,680,856,329,045 |
0059474ee5ce6b774597d21533fd2d3771224c5d
|
0c47c4fe166c2d457c608aa9339ec4aac5f19bb3
|
/0x04-python-more_data_structures/12-roman_to_int.py
|
efa88951ced6f0b103cedd23bff39ffaf1fba99d
|
[] |
no_license
|
https://github.com/alejogonza/holbertonschool-higher_level_programming
|
dbc6c933be158e330aff7f035a107af62b3dd61c
|
7a7ab877a8eb220e3238ede9110353069ddfe3c7
|
refs/heads/master
| 2020-07-22T21:11:59.823541 | 2020-02-13T21:42:19 | 2020-02-13T21:42:19 | 207,329,112 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
def roman_to_int(roman_string):
if isinstance(roman_string, str) is False or roman_string is None:
return (0)
res = 0
rom_str = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
for i in range(len(roman_string), 0, -1):
if roman_string[i-1] in rom_str:
res += rom_str[roman_string[i-1]]
if i != 0 and (i-2) >= 0:
if rom_str[roman_string[i - 2]] < rom_str[roman_string[i - 1]]:
res -= (2 * rom_str[roman_string[i - 2]])
else:
return (0)
return (res)
|
UTF-8
|
Python
| false | false | 600 |
py
| 72 |
12-roman_to_int.py
| 61 | 0.49 | 0.438333 | 0 | 15 | 39 | 79 |
rkaramc/celery-dedupe
| 6,949,257,088,648 |
2df7aa75a166786e05db11380290b1778cf5d170
|
1384f98033dbd5020f53506b524263322c7a7e30
|
/setup.py
|
a50f9c6d22aa82e75ea957b63a771a45d06e0df8
|
[
"MIT"
] |
permissive
|
https://github.com/rkaramc/celery-dedupe
|
a626de0c2d0b74b2639382e4e19f450e664f58b5
|
d31c8ec3e40b58c7b58fb1adb1472e46db1bbafd
|
refs/heads/master
| 2020-07-28T08:50:19.912255 | 2016-11-10T22:00:37 | 2016-11-10T22:00:37 | 73,416,331 | 0 | 0 | null | true | 2016-11-10T20:19:54 | 2016-11-10T19:57:16 | 2016-11-10T19:57:17 | 2016-11-10T20:19:53 | 196 | 0 | 0 | 0 |
Python
| null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='celery-dedupe',
version='0.0.1',
description='Deduplication of Celery tasks',
author='Joe Alcorn',
author_email='joealcorn123@gmail.com',
url='https://github.com/joealcorn/celery-dedupe',
packages=find_packages(),
package_data={
'celery_dedupe': ['README.md'],
},
long_description=readme,
license='MIT',
classifiers=(
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
|
UTF-8
|
Python
| false | false | 740 |
py
| 13 |
setup.py
| 9 | 0.614865 | 0.604054 | 0 | 29 | 24.517241 | 53 |
cindy820219/python-realbook
| 5,454,608,501,748 |
a36bcb422b4b36df6f0b382b3bd4fd81efbcc979
|
b89bc3608d525d6b3719e0d14269f8d28b285196
|
/realbook/measure.py
|
96631df1deec1f9941f6262d92367fecd674010b
|
[] |
no_license
|
https://github.com/cindy820219/python-realbook
|
4764b53b2a200112d3a9d82b3476cbea5661c850
|
b99daace9f05095c64c23337c03ab986f48bd2f3
|
refs/heads/master
| 2021-01-10T13:41:57.842336 | 2010-05-20T13:47:57 | 2010-05-20T13:47:57 | 42,981,886 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (C) 2010 Vittorio Palmisano <vpalmisano at gmail dot com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import math
from chord import Chord
from symbol import Symbol
def make_key_signatures():
key_signatures = {}
# maj
key_signatures['C'] = {'maj': []}
key_signatures['Cb'] = {'maj': ['B4b', 'E5b', 'A4b', 'D5b', 'G4b', 'C5b', 'F4b']}
key_signatures['C#'] = {'maj': ['F5#', 'C5#', 'G5#', 'D5#', 'A4#', 'E5#', 'B4#']}
key_signatures['D'] = {'maj': ['F5#', 'C5#']}
key_signatures['Db'] = {'maj': ['B4b', 'E5b', 'A4b', 'D5b', 'G4b']}
key_signatures['E'] = {'maj': ['F5#', 'C5#', 'G5#', 'D5#']}
key_signatures['Eb'] = {'maj': ['B4b', 'E5b', 'A4b']}
key_signatures['F'] = {'maj': ['B4b']}
key_signatures['F#'] = {'maj': ['F5#', 'C5#', 'G5#', 'D5#', 'A4#', 'E5#']}
key_signatures['G'] = {'maj': ['F5#']}
key_signatures['Gb'] = {'maj': ['B4b', 'E5b', 'A4b', 'D5b', 'G4b', 'C5b']}
key_signatures['G#'] = {'maj': ['F5#', 'C5#', 'G5#', 'D5#', 'A4#']}
key_signatures['A'] = {'maj': ['F5#', 'C5#', 'G5#']}
key_signatures['Ab'] = {'maj': ['B4b', 'E5b', 'A4b', 'D5b']}
key_signatures['B'] = {'maj': ['F5#', 'C5#', 'G5#', 'D5#', 'A4#']}
key_signatures['Bb'] = {'maj': ['B4b', 'E5b']}
# min
key_signatures['C']['min'] = key_signatures['Eb']['maj']
key_signatures['Cb']['min'] = key_signatures['Bb']['maj']
key_signatures['C#']['min'] = key_signatures['E']['maj']
key_signatures['D']['min'] = key_signatures['F']['maj']
key_signatures['Db']['min'] = key_signatures['E']['maj']
key_signatures['E']['min'] = key_signatures['G']['maj']
key_signatures['Eb']['min'] = key_signatures['Gb']['maj']
key_signatures['F']['min'] = key_signatures['Ab']['maj']
key_signatures['F#']['min'] = key_signatures['A']['maj']
key_signatures['G']['min'] = key_signatures['Bb']['maj']
key_signatures['Gb']['min'] = key_signatures['A']['maj']
key_signatures['G#']['min'] = key_signatures['B']['maj']
key_signatures['A']['min'] = key_signatures['C']['maj']
key_signatures['Ab']['min'] = key_signatures['B']['maj']
key_signatures['B']['min'] = key_signatures['D']['maj']
key_signatures['Bb']['min'] = key_signatures['Db']['maj']
return key_signatures
class Measure:
def __init__(self, staff, index=0, time=(), key_signature=(),
start_barline='single', stop_barline='single',
ending='', section='', empty=False):
self.staff = staff
self.index = index
self.time = time
self.start_barline = start_barline
self.stop_barline = stop_barline
self.ending = ending
self.section = section
self.empty = empty
self.chords = []
self.symbols = []
self.key_signature = key_signature
self.key_signatures = make_key_signatures()
# drawing properties
self.reset_drawing()
def __repr__(self):
return '<Measure %d>' %(self.index)
def add_chord(self, index, chord='', **kw):
c = Chord(self, index, chord, **kw)
self.chords.append(c)
return c
def add_symbol(self, index, symbol='', **kw):
s = Symbol(self, index, symbol, **kw)
self.symbols.append(s)
return s
def add_chords(self, chords, **kw):
added = []
for i in xrange(len(chords)):
c = Chord(self, i, chords[i], **kw)
self.chords.append(c)
added.append(c)
return added
def num_chords(self):
n = 0
for chord in self.chords:
if not chord.alternate:
n += 1
return n
def total_height(self):
return self.height + self.top_height + self.bottom_height
def reset_drawing(self):
self.padding_left = 0
self.width = 0
self.height = 0
self.top_height = 0
self.bottom_height = 0
self.chords_left = 0
self.chords_padding_left = 0
self.top_heights = []
def draw(self, width, simulate=False):
self.reset_drawing()
self.width = width
self.simulate = simulate
self.height = self.staff.staff_lines_pos[-1]-self.staff.staff_lines_pos[0]
cr = self.staff.score.cr
self.padding_left = self.staff.score.padding_left+self.index*self.width
#
if self.empty:
return
self.draw_lines()
# draw start measure
if self.index == 0 and self.staff.index == 0:
self.draw_clef()
if self.start_barline:
self.draw_start_barline()
if self.stop_barline:
self.draw_stop_barline()
if self.time:
self.draw_time_signature()
if self.key_signature:
self.draw_key_signature()
if self.section:
self.draw_section()
# draw chords
for chord in self.chords:
chord.draw(simulate=self.simulate)
self.top_height = max(self.top_height, chord.height)
self.top_heights.append((chord.left, chord.width, chord.height))
# draw symbols
for symbol in self.symbols:
symbol.draw(simulate=self.simulate)
self.top_height = max(self.top_height, symbol.height)
self.top_heights.append((symbol.left, symbol.width, symbol.height))
if self.ending:
self.draw_ending()
def get_measure_height(self, position):
for left, width, height in self.top_heights:
if left <= position and (left+width) >= position:
return height
return 0
def draw_lines(self):
if self.simulate:
return
cr = self.staff.score.cr
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(0.5)
for i in xrange(5):
left = self.staff.score.padding_left + self.index*self.width
right = self.staff.score.padding_left + (self.index+1)*self.width
cr.move_to(left, self.staff.top+i*self.staff.lines_distance)
cr.line_to(right, self.staff.top+i*self.staff.lines_distance)
cr.stroke()
def draw_clef(self):
cr = self.staff.score.cr
cr.set_font_face(self.staff.score.face_jazz)
cr.set_font_size(30)
xbear, ybear, fwidth, fheight, xadv, yadv = cr.text_extents('V')
self.padding_left += 2
cr.move_to(self.padding_left, self.staff.staff_lines_pos[3])
if not self.simulate:
cr.show_text('&')
# update dist
top_dist = self.staff.staff_lines_pos[3]-self.staff.staff_lines_pos[0]
height = -ybear - top_dist
self.top_height = max(self.top_height, height)
self.top_heights.append((self.padding_left, fwidth+2, height))
self.padding_left += fwidth + 2
bottom_dist = self.staff.staff_lines_pos[-1]-self.staff.staff_lines_pos[3]
self.bottom_height = max(self.bottom_height, fheight + ybear - bottom_dist)
def draw_start_barline(self):
if self.start_barline == 'single':
if self.staff.index == 0 and self.index == 0:
return
self.draw_measure_start()
elif self.start_barline == 'double':
self.draw_measure_start_double()
elif self.start_barline == 'repeat':
self.draw_start_repeat()
def draw_stop_barline(self):
if self.stop_barline == 'single':
self.draw_measure_stop()
elif self.stop_barline == 'double':
self.draw_measure_stop_double()
elif self.stop_barline == 'repeat':
self.draw_stop_repeat()
elif self.stop_barline == 'final':
self.draw_measure_stop_final()
def draw_measure_start(self):
if not self.simulate:
cr = self.staff.score.cr
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(1.0)
cr.move_to(self.padding_left, self.staff.staff_lines_pos[0])
cr.line_to(self.padding_left, self.staff.staff_lines_pos[-1])
cr.stroke()
self.padding_left += 2
def draw_measure_start_double(self):
cr = self.staff.score.cr
if not self.simulate:
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(1.0)
cr.move_to(self.padding_left, self.staff.staff_lines_pos[0])
cr.line_to(self.padding_left, self.staff.staff_lines_pos[-1])
cr.move_to(self.padding_left+3, self.staff.staff_lines_pos[0])
cr.line_to(self.padding_left+3, self.staff.staff_lines_pos[-1])
cr.stroke()
self.padding_left += 5
def draw_measure_stop(self):
if not self.simulate:
cr = self.staff.score.cr
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(1.0)
left = self.staff.score.padding_left+(self.index+1)*self.width
cr.move_to(left, self.staff.staff_lines_pos[0])
cr.line_to(left, self.staff.staff_lines_pos[-1])
cr.stroke()
def draw_measure_stop_double(self):
if not self.simulate:
cr = self.staff.score.cr
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(1.0)
left = self.staff.score.padding_left+(self.index+1)*self.width
cr.move_to(left-3, self.staff.staff_lines_pos[0])
cr.line_to(left-3, self.staff.staff_lines_pos[-1])
cr.move_to(left, self.staff.staff_lines_pos[0])
cr.line_to(left, self.staff.staff_lines_pos[-1])
cr.stroke()
def draw_measure_stop_final(self):
if not self.simulate:
cr = self.staff.score.cr
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(1.0)
left = self.staff.score.padding_left+(self.index+1)*self.width
cr.move_to(left-4, self.staff.staff_lines_pos[0])
cr.line_to(left-4, self.staff.staff_lines_pos[-1])
cr.stroke()
cr.set_line_width(3.0)
cr.move_to(left, self.staff.staff_lines_pos[0])
cr.line_to(left, self.staff.staff_lines_pos[-1])
cr.stroke()
def draw_start_repeat(self):
cr = self.staff.score.cr
cr.set_font_face(self.staff.score.face_jazz)
cr.set_source_rgb(0, 0, 0)
cr.set_font_size(32)
cr.move_to(self.padding_left-2, self.staff.staff_lines_pos[2])
text = u'Ú'
if not self.simulate:
cr.show_text(text)
xbear, ybear, fwidth, fheight, xadv, yadv = cr.text_extents(text)
self.padding_left += fwidth + 2
def draw_stop_repeat(self):
cr = self.staff.score.cr
cr.set_font_face(self.staff.score.face_jazz)
cr.set_source_rgb(0, 0, 0)
cr.set_font_size(32)
left = self.staff.score.padding_left+(self.index+1)*self.width
cr.move_to(left+2, self.staff.staff_lines_pos[2])
text = u'Ú'
cr.rotate(math.pi)
if not self.simulate:
cr.show_text(text)
cr.rotate(-math.pi)
ending_padding_bottom = 10
ending_padding_top = 0
def draw_ending(self):
cr = self.staff.score.cr
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(1.0)
left = self.staff.score.padding_left+self.index*self.width
if self.ending:
cr.set_font_face(self.staff.score.face_jazz)
cr.set_font_size(22)
xbear, ybear, fwidth, fheight, xadv, yadv = cr.text_extents(self.ending)
else:
fheight = 0
top = self.staff.staff_lines_pos[0] - self.top_height - \
self.ending_padding_top - fheight - 10
cr.move_to(left, self.staff.staff_lines_pos[0]-self.ending_padding_bottom)
if not self.simulate:
cr.line_to(left, top)
cr.line_to(left+self.width*0.9, top)
cr.stroke()
if self.ending != 'empty':
cr.move_to(left+2, top+fheight-4)
if not self.simulate:
cr.show_text(self.ending)
self.top_height += self.ending_padding_top + fheight + 10
section_table = {
'A': u'Ø', 'B': u'Ù', 'C': u'Ú', 'D': u'Û', 'E': u'Ü', 'F': u'Ý',
'G': u'Þ', 'H': u'ß', 'I': u'à', 'J': u'á', 'K': u'â', 'L': u'ã',
'M': u'ä', 'N': u'å', 'O': u'æ', 'P': u'ç', 'Q': u'è', 'R': u'é',
'S': u'ê', 'T': u'ë', 'U': u'ì', 'V': u'í', 'W': u'î', 'X': u'ï',
'Y': u'ð', 'Z': u'ñ', ' ': u'ò',
'intro': u'Intro', 'verse': u'Verse',
}
section_padding_bottom = 4
def draw_section(self):
cr = self.staff.score.cr
cr.set_font_face(self.staff.score.face_jazztext)
cr.set_source_rgb(0, 0, 0)
cr.set_font_size(25)
text = self.section_table.get(self.section)
xbear, ybear, fwidth, fheight, xadv, yadv = cr.text_extents(text)
left = self.staff.score.padding_left+self.index*self.width
top = self.staff.staff_lines_pos[0] - self.top_height - (fheight+ybear) - \
self.section_padding_bottom
cr.move_to(left, top)
if not self.simulate:
cr.show_text(text)
self.top_height += fheight + self.section_padding_bottom
self.chords_left = left + fwidth + 8
self.chords_padding_left = fwidth + 8
self.top_heights.append((left, fwidth, fheight + self.section_padding_bottom))
def draw_time_signature(self):
cr = self.staff.score.cr
cr.set_font_face(self.staff.score.face_jazztext)
cr.set_source_rgb(0, 0, 0)
cr.set_font_size(22)
self.padding_left += 2
#
num = str(self.time[0])
den = str(self.time[1])
xbear, ybear, num_width, fheight, xadv, yadv = cr.text_extents(num)
xbear, ybear, den_width, fheight, xadv, yadv = cr.text_extents(den)
if num_width > den_width:
num_padding = 0
den_padding = (num_width-den_width)*0.5
else:
den_padding = 0
num_padding = (den_width-num_width)*0.5
# draw num
cr.move_to(self.padding_left+num_padding, self.staff.staff_lines_pos[2])
if not self.simulate:
cr.show_text(num)
# draw den
cr.move_to(self.padding_left+den_padding, self.staff.staff_lines_pos[-1])
if not self.simulate:
cr.show_text(den)
# update padding left
self.padding_left += max(num_width, den_width) + 2
def get_note_y(self, note):
d = self.staff.lines_distance
return {
'G5': self.staff.staff_lines_pos[0]-d/2,
'F5': self.staff.staff_lines_pos[0],
'E5': self.staff.staff_lines_pos[0]+d/2.,
'D5': self.staff.staff_lines_pos[1],
'C5': self.staff.staff_lines_pos[1]+d/2.,
'B4': self.staff.staff_lines_pos[2],
'A4': self.staff.staff_lines_pos[2]+d/2.,
'G4': self.staff.staff_lines_pos[3],
'F4': self.staff.staff_lines_pos[3]+d/2,
}[note]
def draw_key_signature(self):
cr = self.staff.score.cr
cr.set_font_face(self.staff.score.face_jazz)
cr.set_source_rgb(0, 0, 0)
cr.set_font_size(25)
key, mode = self.key_signature
top_height = 0
left = self.padding_left
for note in self.key_signatures[key][mode]:
top = self.get_note_y(note[:2])
cr.move_to(self.padding_left, top)
xbear, ybear, width, fheight, xadv, yadv = cr.text_extents(note[2])
top_height = max(top_height, self.staff.staff_lines_pos[0] - top - ybear)
if not self.simulate:
cr.show_text(note[2])
self.padding_left += 6
self.padding_left += 6
self.top_height = max(self.top_height, top_height)
self.top_heights.append((left, self.padding_left, top_height))
|
UTF-8
|
Python
| false | false | 16,646 |
py
| 6 |
measure.py
| 6 | 0.556779 | 0.540711 | 0 | 412 | 39.332524 | 86 |
vzhng/python_practice
| 18,021,682,805,873 |
e154618816eb20295ba04506ae07f44b8f664e33
|
d1f35be125b2ac85f0cb123c461e00dd6f21cdd6
|
/python_learn1/wordcount1.py
|
9fc9a65b6c81bb53e2534ed1f769d71ba1bde813
|
[] |
no_license
|
https://github.com/vzhng/python_practice
|
5ceeaf1441622f227ac691b76f3621817a268e5c
|
eb454c8833038e20250af85751fe124bfc539575
|
refs/heads/main
| 2023-06-01T13:13:15.955964 | 2021-06-11T21:15:06 | 2021-06-11T21:15:06 | 376,143,210 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#s1 = "nba cxy word history type cxy nba history word word type type history"
s1 = input("Please input words:")
s2 = s1.split()
dict={}
for word in s2:
print (word)
if word in dict:
n = dict[word]
n = n+1
dict[word] = n
else:
dict[word] = 1
print (dict)
|
UTF-8
|
Python
| false | false | 317 |
py
| 51 |
wordcount1.py
| 47 | 0.536278 | 0.514196 | 0 | 15 | 19 | 77 |
ddank0/Python-ex
| 10,642,928,961,836 |
59554e5c3ba66c247cc77e415251d89bb95fb658
|
4438a397db52f1dad60edc7f583d2dad103f217a
|
/lista 1.26.py
|
69c10678d31ca51af9ebf07d32ce803cb0f56a3d
|
[] |
no_license
|
https://github.com/ddank0/Python-ex
|
07c20ed2f609fad700f0801d7d174626285a3bb2
|
ab0ed0d7228d19695e7e320a973a928cddbe9055
|
refs/heads/master
| 2021-07-01T18:17:05.729400 | 2021-02-22T01:30:05 | 2021-02-22T01:30:05 | 224,468,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n = float(input("Digite um numero:"))
maior_n = 0
soma = 0
cont = 0
while n > 0:
cont += 1
soma += n
if n > maior_n:
maior_n = n
x = soma - maior_n
n = float(input("Digite um numero:"))
if cont < 3:
print('Não é possivel formar o poligono')
elif maior_n < x:
print('É possivel formar o poligono')
else:
print('Não é possivel formar o poligono')
|
UTF-8
|
Python
| false | false | 393 |
py
| 119 |
lista 1.26.py
| 114 | 0.582474 | 0.56701 | 0 | 19 | 19.421053 | 45 |
kalachand/codes
| 12,249,246,751,930 |
074e08f9d39e37f8cb8c4fd461a64825c02b248a
|
26551769200eafa5bdd72ea5a51f87e61dbd8d6d
|
/codechef/julylong2k14/lastsgarden.py
|
930671191f760b59a80466d1ccb31cc962504c9d
|
[] |
no_license
|
https://github.com/kalachand/codes
|
f894945a2cdc4c7868fd1f24c3b7727f32cf5ba1
|
ed45d7ffe380e4e5d52f95e9542a108e4ceeceb7
|
refs/heads/master
| 2021-01-15T12:44:29.552598 | 2015-11-03T21:02:36 | 2015-11-03T21:02:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def gcd(a,b):
if(b==0):
return a
return gcd(b,a%b)
maxit=100009
primeit=[]
isprime={}
for i in range(0,maxit+1):
isprime[i]=0
isprime[0]=1
isprime[1]=1
isprime[2]=1
primeit.append(2)
for i in range(4,maxit,2):
isprime[i]=1
sqrtit=maxit**0.5
sqrtit=int(sqrtit)+1
for i in range(3,sqrtit,2):
if(isprime[i]==0):
for j in range(i*i,maxit,2*i):
isprime[j]=1
primeit.append(i)
t=input()
for i in range(0,t):
n=input()
primetrack=[]
calcit=[]
newit=[]
for j in range(0,100001):
primetrack.append(0)
arr=[]
dictit={}
xx=raw_input().split()
arr.append(0)
dictit[0]=0
for j in xx:
val=int(j)
arr.append(val)
dictit[val]=0
for j in range(1,n+1):
if dictit[arr[j]]==0:
dictit[arr[j]]=1
x=arr[j]
y=arr[x]
cnt=1
while(y!=x):
cnt+=1
dictit[y]=1
y=arr[y]
calcit.append(cnt)
calcit.sort()
sizeit=len(calcit)
for j in range(0,sizeit):
if(j==sizeit-1 or calcit[j]!=calcit[j+1]):
newit.append(calcit[j])
sizeit=len(newit)
if(sizeit==1):
print newit[0]
else:
calc=newit[0]
modit=1000000007
for i in range(1,sizeit):
calc1=gcd(calc,newit[i])
calc2=(calc*newit[i])/calc1
calc=calc2
calc%=modit
print calc
|
UTF-8
|
Python
| false | false | 1,181 |
py
| 401 |
lastsgarden.py
| 378 | 0.624894 | 0.566469 | 0 | 67 | 16.626866 | 44 |
syslabcomarchive/gfb.policy
| 6,287,832,133,030 |
58d635aadc15ba8a753f46e6d42b728833ba2469
|
071cf102c9ccb8c833cadb782029aaa4f553bda8
|
/gfb/policy/order.py
|
c0b2b0306e7ef312847be3c4ade1633b9eb59e49
|
[] |
no_license
|
https://github.com/syslabcomarchive/gfb.policy
|
f8975cc8b44966b03b9d38928446d4f8419ae40e
|
32d11b8a408f2f43580bc012b8460950725d881a
|
refs/heads/master
| 2023-02-25T04:37:55.385359 | 2016-11-04T16:55:58 | 2016-11-04T16:55:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from plone.folder.default import DefaultOrdering
class PrependOrdering(DefaultOrdering):
"""prepend new added content
copied from collective.folderorder
"""
def notifyAdded(self, id):
"""
Inform the ordering implementation that an item was added
"""
order = self._order(True)
pos = self._pos(True)
order.insert(0, id)
pos.clear()
for n, id in enumerate(order):
pos[id] = n
def set_prepend(object, event):
object.setOrdering("prepend")
|
UTF-8
|
Python
| false | false | 536 |
py
| 48 |
order.py
| 27 | 0.615672 | 0.613806 | 0 | 22 | 23.363636 | 65 |
zebengberg/wasatch
| 10,625,749,128,761 |
ba2bc7ea1947b697cb6ebb3861f427ad73b2b338
|
abca6158aafd16a5be7fd81a9743eac09c99dd10
|
/python/pygame_examples/snake.py
|
bd6398be18ae264a2b13f1cbf74541fc95427461
|
[] |
no_license
|
https://github.com/zebengberg/wasatch
|
0a6bed336220772e216ca4e01930d1efd392a4c6
|
423b679644901f5bec0272175ab582de0ae61997
|
refs/heads/master
| 2023-03-31T12:12:51.295511 | 2021-04-14T20:18:34 | 2021-04-14T20:18:34 | 240,130,163 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# run with pythonw on mac
import random
import pygame
pygame.init()
screen = pygame.display.set_mode([400, 400])
pygame.display.set_caption('Hello World')
class Snake:
def __init__(self):
self.x = screen.get_width() // 2
self.y = screen.get_height() // 2
self.r = 10
self.direction = 'RIGHT'
self.color = (0, 0, 255)
self.food_position = (random.randint(10, screen.get_width() - 10),
random.randint(10, screen.get_width() - 10))
self.food_color = (255, 127, 0)
def update_position(self):
if self.direction == 'RIGHT':
self.x += 1
elif self.direction == 'LEFT':
self.x -= 1
elif self.direction == 'UP':
self.y -= 1
elif self.direction == 'DOWN':
self.y += 1
def draw(self):
pygame.draw.circle(screen, self.food_color, self.food_position, 10)
pygame.draw.circle(screen, self.color, (self.x, self.y), self.r)
def update_direction(self, key):
key_codes = {pygame.K_UP: 'UP',
pygame.K_DOWN: 'DOWN',
pygame.K_RIGHT: 'RIGHT',
pygame.K_LEFT: 'LEFT'}
if key in key_codes:
target_direction = key_codes[key]
direction_set = {target_direction, self.direction}
valid_directions = [{'UP', 'RIGHT'},
{'UP', 'LEFT'},
{'DOWN', 'RIGHT'},
{'DOWN', 'LEFT'}]
if direction_set in valid_directions:
self.direction = target_direction
def hit_wall(self):
w, h = screen.get_width(), screen.get_height()
if (self.x - self.r < 0 or self.x + self.r > w or
self.y - self.r < 0 or self.y + self.r > h):
self.direction = None
def is_near_food(self):
x = self.x - self.food_position[0]
y = self.y - self.food_position[1]
return x ** 2 + y ** 2 <= (self.r + 20) ** 2
def eat(self):
self.food_position = (random.randint(10, screen.get_width() - 10),
random.randint(10, screen.get_width() - 10))
self.r += 2
s = Snake()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
# update the direction
s.update_direction(event.key)
# restart the game
if event.key == pygame.K_r:
s.__init__()
screen.fill((255, 255, 255))
s.update_position()
s.hit_wall()
if s.is_near_food():
s.eat()
s.draw()
# updates pygame display
pygame.display.flip()
pygame.quit()
|
UTF-8
|
Python
| false | false | 2,541 |
py
| 26 |
snake.py
| 19 | 0.558835 | 0.534042 | 0 | 92 | 26.619565 | 71 |
ustcrding/ai-study
| 17,824,114,282,737 |
a83736b506345f9062c6451b11907d2089bca5e8
|
9d5531e60a7b2866952fb0e30af7e89c6739fc08
|
/作业/第一次作业/代码/ljw/文本分类/cluster.py
|
de607a7ad56c9548eb3a67ecadf753bb35f15aad
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/ustcrding/ai-study
|
8e99db453fae6ad9f7687a3aba77b155957445cc
|
d09bf8e1ae15e8b4c2d5eeb81a733087495475ca
|
refs/heads/master
| 2020-04-30T09:15:03.927375 | 2019-03-23T12:57:02 | 2019-03-23T12:57:02 | 176,741,730 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import jieba
from sklearn.cluster import KMeans
import numpy as np
import os
def read_from_file(file_name):
with open(file_name, "r", encoding="utf-8") as fp:
words = fp.read()
return words
def stop_words(stop_word_file):
words = read_from_file(stop_word_file)
result = jieba.cut(words)
new_words = []
for r in result:
new_words.append(r)
return set(new_words)
def del_stop_words(words, stop_words_set):
# words是已经切词但是没有去除停用词的文档。
# 返回的会是去除停用词后的文档
result = jieba.cut(words)
new_words = []
for r in result:
if r not in stop_words_set:
new_words.append(r)
return new_words
def get_all_vector(file_path, stop_words_set):
posts = open(file_path, encoding="utf-8").read().split("\n")
docs = []
word_set = set()
for post in posts:
doc = del_stop_words(post, stop_words_set)
docs.append(doc)
word_set |= set(doc)
# print len(doc),len(word_set)
word_set = list(word_set)
docs_vsm = []
# for word in word_set[:30]:
# print word.encode("utf-8"),
for doc in docs:
temp_vector = []
for word in word_set:
temp_vector.append(doc.count(word) * 1.0)
# print temp_vector[-30:-1]
docs_vsm.append(temp_vector)
docs_matrix = np.array(docs_vsm)
column_sum = [float(len(np.nonzero(docs_matrix[:, i])[0])) for i in range(docs_matrix.shape[1])]
column_sum = np.array(column_sum)
column_sum = docs_matrix.shape[0] / column_sum
idf = np.log(column_sum)
idf = np.diag(idf)
# 请仔细想想,根绝IDF的定义,计算词的IDF并不依赖于某个文档,所以我们提前计算好。
# 注意一下计算都是矩阵运算,不是单个变量的运算。
for doc_v in docs_matrix:
if doc_v.sum() == 0:
doc_v = doc_v / 1
else:
doc_v = doc_v / (doc_v.sum())
tfidf = np.dot(docs_matrix, idf)
return posts, tfidf
if __name__ == "__main__":
stop_words = stop_words("./stopwords.txt")
names, tfidf_mat = get_all_vector("./意见反馈.txt", stop_words)
km = KMeans(n_clusters=10)
km.fit(tfidf_mat)
clusters = km.labels_.tolist()
str_clusters = {}
for i in range(len(clusters)):
if str_clusters.get(clusters[i]) is None:
str_clusters.setdefault(clusters[i], [])
str_clusters.get(clusters[i]).append(names[i])
|
UTF-8
|
Python
| false | false | 2,503 |
py
| 14 |
cluster.py
| 8 | 0.591872 | 0.584522 | 0 | 83 | 26.86747 | 100 |
Gvex95/BlackJackMindFuck
| 13,511,967,118,706 |
29c0b13b1fc493a920bcf4bf567b7e9737204b81
|
e088dbd98fcfecc0c95bc7d1693e252d86fc4385
|
/BlackJack/BlackJack.py
|
f34a91b11589a02e8ba9a7502db736b0c4559e52
|
[] |
no_license
|
https://github.com/Gvex95/BlackJackMindFuck
|
872713c9202ae318826b159385d7683588aeb699
|
84a26a4292ec6eaa0967ecd5ef06d5519401a62f
|
refs/heads/master
| 2021-01-25T07:55:07.619589 | 2017-06-07T23:09:21 | 2017-06-07T23:09:21 | 93,686,163 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import random
izracunato = [None] * 52
spil = [1,2,3,4,5,6,7,8,9,10,10,10,10,
1,2,3,4,5,6,7,8,9,10,10,10,10,
1,2,3,4,5,6,7,8,9,10,10,10,10,
1,2,3,4,5,6,7,8,9,10,10,10,10,]
def IzracunajMaxPara(i):
if izracunato[i] == -1: #taj slucaj nije obradjen
izbor = [0]
for h in range (4+i,52):
tmp = IshodRunde(i,h)
if (tmp!=-4):
#nema greske, il sam izgubio,nereseno,ili pobedio
izbor.append(tmp + IzracunajMaxPara(h)) #posto nema greska, a proslo je h karata, apenduj rez i izracunaj opet od h-te karte
izracunato[i] = max(izbor)
return izracunato[i]
def IshodRunde(i,h):
igrac = 0
delilac = 0
#ako delimo neparan broj karti, npr delimo do 7 do 12 karte,dele se od 7 do 11, a zadnju kartu gledamo posebno
#while deli 4 karte koje i igrac i delilac moraju da uzmu
while(i<(h-(h-i)%2)):
if (spil[i] == 1 and igrac <=10):
igrac+=11
else:
if (igrac + spil[i]<=21):
igrac+=spil[i]
if(spil[i+1] == 1 and delilac<=10):
delilac+=11
else:
if (delilac<17):
delilac+=spil[i+1]
else:
#ako delilac ima vise od 17 uzece kartu samo ako nece preci 21
if(delilac + spil[i+1] <= 21):
delilac += spil[i+1]
i+=2 #jer prvo uzima igrac pa delilac pa opet igrac pa opet delilac
#nakon podeljene 4 karte
if ((h-i)%2 != 0):
if (spil[h-1] == 1 and igrac<=10):
igrac+=11
else:
if(igrac + spil[h-1]<=21):
igrac += spil[h-1]
#igrac ne vuce jer ce preci 21, znaci red je na dilera
else:
if (spil[h-1] == 1 and delilac <=10):
delilac+=11
else:
if(delilac>igrac):
return -4
else:
delilac+= spil[h-1]
if (delilac<17):
return -4
if (igrac>21):
return -1
if (delilac>21):
return 1
if (igrac>delilac):
return 1
if (igrac == delilac):
return 0
if (igrac < delilac):
return -1
for i in range(0,10):
random.shuffle(spil)
print("SPIL: ",spil)
for i in range(0,52):
izracunato[i] = -1
print("Maksimalno para za ceo dek karata: ")
print(IzracunajMaxPara(0), "$")
|
UTF-8
|
Python
| false | false | 2,485 |
py
| 1 |
BlackJack.py
| 1 | 0.495775 | 0.433803 | 0 | 84 | 28.52381 | 141 |
bx0709/Price_Comparator
| 19,095,424,624,976 |
0e4f967c04b770b4f784a98b975c5734fa3a3c4d
|
dfaa566aea6dcd5d8c94802154a309e102869385
|
/book_comp.py
|
19818be642081612cd68a76b96faa4f3452d7222
|
[] |
no_license
|
https://github.com/bx0709/Price_Comparator
|
cd5cc4dfeeadd85f37ec25397f217c60a609dd00
|
dafe3ee6adedd11bfea8af9d494a9a6565ff0d61
|
refs/heads/master
| 2022-11-26T06:40:50.919598 | 2020-08-08T20:50:39 | 2020-08-08T20:50:39 | 286,116,818 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from tkinter import *
import time
from tkinter import ttk
from tkinter.ttk import *
import local_price
import ib_store
import compare_prices
import sqlite3
from tkinter import Tk
book_name = ['18 Years JEE Main Physics Chapterwise Solutions',
'Daily Practice Problems of PCM for JEE Main/Advanced 1 Edition',
'Errorless Chemistry for JEE MAIN 2020 by NTA ',
'Organic Chemistry 8th Edition by Leroy G. Wade',
' Organic Chemistry 2nd Edition by Clayden, Greeves, Warren',
'JEE MAIN EXPLORER',
' Principles of Physics by Walker, Halliday, Resnick',
' Mathematics MCQ', 'Differential Calculus ,Author : S.K. Goyal',
' Skill in Mathematics - Algebra for JEE Main', 'NEW PATTERN JEE PROBLEMS PHYSICS FOR JEE MAIN',
' Problems in Physical Chemistry for JEE',
'Concise Inorganic Chemistry: Fifth Edition by J.D. Lee',
'Fundamentals of Mathematics for JEE Main/Advanced - Integral Calculus',
'Chapterwise Solutions of Physics for JEE Main 2002-2017']
updated=[]
for i in range(1,16):
updated.append(0)
class Login_page():
# ----------------------------CONNECT TO DATABASE---------------------------#
def Database(self):
global conn, cursor
conn = sqlite3.connect("accounts.db")
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS `members` (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, username TEXT, password TEXT)")
cursor.execute("SELECT * FROM `members` WHERE `username` = 'admin' AND `password` = 'admin'")
if cursor.fetchone() is None:
cursor.execute("INSERT INTO `members` (username, password) VALUES('admin', 'admin')")
conn.commit()
# model_numb = ['8174505172', '8174504893', '86858']
cursor.execute("DROP TABLE `books`")
conn.commit()
final_model_no = ['9789389310788', '9789353501488', '9788193766095', '9789332578586', '9780198728719',
'9789388899796', '9788126552566', '9788177098471', '9789384934064', '9789313191889',
'9789313191353', '9789384934873', '9788126515547', '9789332570276', '9789386650788']
book_name = ['18 Years JEE Main Physics Chapterwise Solutions',
'Daily Practice Problems of PCM for JEE Main/Advanced 1 Edition',
'Errorless Chemistry for JEE MAIN 2020 by NTA ',
'Organic Chemistry 8th Edition by Leroy G. Wade',
' Organic Chemistry 2nd Edition by Clayden, Greeves, Warren',
'JEE MAIN EXPLORER',
' Principles of Physics by Walker, Halliday, Resnick',
' Mathematics MCQ', 'Differential Calculus ,Author : S.K. Goyal',
' Skill in Mathematics - Algebra for JEE Main', 'NEW PATTERN JEE PROBLEMS PHYSICS FOR JEE MAIN',
' Problems in Physical Chemistry for JEE',
'Concise Inorganic Chemistry: Fifth Edition by J.D. Lee',
'Fundamentals of Mathematics for JEE Main/Advanced - Integral Calculus',
'Chapterwise Solutions of Physics for JEE Main 2002-2017']
cursor.execute(
"CREATE TABLE IF NOT EXISTS `books` (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, model_no TEXT, name TEXT)") # *****BOOKS NAME TO BE ADDED****
cursor.execute("SELECT * FROM `books` WHERE `id` = 1 AND `model_no` = '9789389310788'")
if cursor.fetchone() is None:
for i in range(len(final_model_no)):
cursor.execute("INSERT INTO `books` (model_no, name) VALUES (?,?)",
(final_model_no[i], book_name[i]))
conn.commit()
cursor.execute(
"CREATE TABLE IF NOT EXISTS `book_record` (sno INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, user_id INTEGER, book_id INTEGER, count INTEGER )")
conn.commit()
# -------------------------------FRONTEND-------------------------------------#
def __init__(self, master):
self.master = master
self.style = ttk.Style()
self.master.title("Book Comparator-Login")
self.master.geometry('1000x1000')
self.style.configure('TFrame', background='Lightskyblue2')
self.frame=ttk.Frame(self.master,style='TFrame')
self.frame.pack()
self.style.configure('W.TButton', font=
('verdana', 10, 'bold'),
foreground='black',background='blue2')
self.style.configure('TLabel', background='Lightskyblue2')
self.topframe=Frame(self.frame, width=10000, height=3000,style='new.TFrame')
self.topframe.grid(row=5,column=0,pady=20)
self.bottomframe = Frame(self.frame, width=1000, height=5000,style='new.TFrame')
self.bottomframe.grid(row=20,column=0,)
self.login_btn = ttk.Button(self.bottomframe, text="Login",style="W.TButton",command=self.Login)
self.login_btn.grid(row=3, column=2)
self.login_btn.bind('<Return>', self.Login)
self.register_btn = ttk.Button(self.bottomframe, text="Register",style="W.TButton", command=self.Register)
self.register_btn.grid(row=3, column=3,padx=15)
self.register_btn.bind('<Return>', self.Register)
self.title=ttk.Label(self.topframe,text="Book Comparator!",font=('Cambria',30,'bold'),style='TLabel')
self.title.grid()
self.headline= ttk.Label(self.topframe, text="Hello user! Register if you are a new user or login if you already have an account.",font=('Cambria',15),style='TLabel')
self.headline.grid(row=1, column=0,pady=20)
# -------------------------------LOGIN FORM-------------------------------------#
self.caption = Label(self.topframe, text="", font=('arial', 10))
self.username = StringVar()
self.password = StringVar()
self.user_text = ttk.Label(self.topframe, text="Username:", font=('arial', 18),style='TLabel')
self.user_text.grid(row=2, column=0, pady=5, sticky=W)
self.user_value = Entry(self.topframe, textvariable=self.username)
self.user_value.grid(row=2, column=0)
self.pwd_text = ttk.Label(self.topframe, text="Password:", font=('arial', 18),style='TLabel')
self.pwd_text.grid(row=3, column=0, sticky=W)
self.pwd_value = Entry(self.topframe, textvariable=self.password, show="*")
self.pwd_value.grid(row=3, column=0)
self.lbl_text = ttk.Label(self.bottomframe,style='TLabel')
self.lbl_text.grid(row=5, column=2, pady=10)
#-------------------------------------------------#
def new_page(self):
self.NewPage = Toplevel(self.master)
self.project = main_page(self.NewPage)
self.project.config(bg='Lightskyblue2')
# -------------------------------LOGIN FUNCTION-------------------------------------#
def Login(self, event=None):
self.Database()
if self.username.get() == "" or self.password.get() == "":
self.lbl_text.config(text="Please complete the required field!")
else:
cursor.execute("SELECT * FROM `members` WHERE `username` = ? AND `password` = ?",
(self.username.get(), self.password.get()))
data = cursor.fetchone()
cursor.execute("SELECT * FROM `members` WHERE `username` = ? AND `password` = ?",
(self.username.get(), self.password.get()))
if cursor.fetchone() is not None:
self.username.set("")
self.password.set("")
self.lbl_text.config(text="")
self.user_id = data[0]
main_page.get_id(main_page, self.user_id) # sends id of the logged in user
self.new_page()
else:
self.lbl_text.config(text="Invalid username or password")
self.username.set("")
self.password.set("")
cursor.close()
conn.close()
# -------------------------------REGISTER FUNCTION-------------------------------------#
def Register(self, event=None):
self.Database()
if self.username.get() == "" or self.password.get() == "":
self.lbl_text.config(text="Please complete the required field!")
else:
cursor.execute("SELECT * FROM `members` WHERE `username` = ? AND `password` = ?",
(self.username.get(), self.password.get()))
if cursor.fetchone() is not None:
self.username.set("")
self.password.set("")
self.lbl_text.config(text="")
self.lbl_text.config(text="Account with this username already exists.")
else:
cursor.execute("INSERT INTO `members` (username, password) VALUES (?,?)",
(self.username.get(), self.password.get()))
conn.commit()
self.lbl_text.config(text="Account created successfully!")
self.username.set("")
self.password.set("")
cursor.close()
conn.close()
# -------------------------------NEW WINDOW(MAIN)-------------------------------------#
class main_page():
def get_id(self, user_id):
self.user_id = user_id
return self.user_id
def get_book_id(self, recieved):
self.book_id = recieved
return self.book_id
# -------------------------------FETCH PRICES-------------------------------------#
def project(id, list):
Login_page.Database(Login_page)
sql = "SELECT * FROM `books` WHERE id=(?)"
cursor.execute(sql, (id,))
model = cursor.fetchone()
model_number = model[1]
list.delete(0, END)
price = local_price.get_local_price(model_number)
list.insert(0,"Book price in offline stores : "+price)
on_price1, on_price2 = ib_store.get_online_price(model_number)
list.insert(1,"Book price on Amazon : "+on_price1)
list.insert(2, "Book price on Flipkart : "+on_price2)
updated[id-1]=min(compare_prices.price_ib(on_price1),compare_prices.price_ib(on_price2),compare_prices.price_local(price))
print(updated)
# -------------------------------SHOW SECOND WINDOW-------------------------------------#
def __init__(self, master1):
self.master1 = master1
self.style = ttk.Style()
self.master1.title("Book Comparator")
self.width=root.winfo_screenwidth()
self.height=root.winfo_screenheight()
self.master1.geometry("%dx%d" % (self.width,self.height))
self.frame = Frame(self.master1)
self.frame.pack()
self.style.configure('TFrame',background='Lightskyblue2')
Mainframe = ttk.Frame(self.master1,style='TFrame')
Mainframe.pack()
self.style.configure('TLabel', background='Lightskyblue2')
self.title=ttk.Label(Mainframe,text="Book Comparator!",font=('Cambria',30,'bold'),style='TLabel')
self.title.pack()
self.sub_title = ttk.Label(Mainframe, text="Your Prices are shown here:", font=('Cambria', 15),style='TLabel')
self.sub_title.place(x=10,y=66)
Dataframe = Frame(Mainframe, width=900, height=100)
Dataframe.pack(side=LEFT)
ARframe=Frame(Mainframe,width=500,height=100)
ARframe.pack(side=BOTTOM)
self.ARframe_2=ARframe
self.Dataframe_2 = Dataframe # new frame created so as to display the "add to cart" content
self.sub_title = ttk.Label(self.Dataframe_2, text="Amount to be paid:", font=('Cambria', 15),style='TLabel')
self.sub_title.place(x=10,y=210)
self.show_price= Listbox(Dataframe, width=20, bd=10, relief='groove', fg='Gray')
self.show_price.grid(row=1, column=0, padx=8,pady=20)
Buttonframe = Frame(Mainframe, width=600, height=950)
Buttonframe.pack(side=RIGHT)
self.Buttonframe=Buttonframe
list = Listbox(Dataframe, width=50)
list.grid(row=0,column=0,sticky=N,pady=30)
self.display_cost = 0
self.style.configure('W.TButton', font= ('verdana', 9), foreground='black',background='blue2')
self.style.configure('TButton', font=('Times New Roman',12,'bold'), foreground='black',background='navy',relief='flat')
# created 15 buttons each with different functionality by assigning different id's to them.
for buttons in range(1, 16):
book1 = ttk.Button(Buttonframe, text=book_name[buttons - 1],style='W.TButton',
command=lambda buttons=buttons: main_page.get_book_id(main_page,buttons))
book1.grid(row=buttons+1, column=0,sticky=W,pady=5)
showprice = ttk.Button(Buttonframe, text="SHOW PRICE",style='TButton',
command=lambda buttons=buttons: main_page.project(self.book_id,list))
showprice.grid(row=0, column=0,pady=10)
add_to_cart_button = ttk.Button(Buttonframe, text="ADD TO CART", style='TButton', command=self.add_to_cart)
add_to_cart_button.grid(row=18, column=0,pady=7)
# -------------------------------ADD TO CART-------------------------------------#
def add_book(self, book, cart_list):
Login_page.Database(Login_page)
cursor.execute("SELECT * FROM `book_record` WHERE `user_id` = ? AND `book_id` = ?", (self.user_id, book))
if cursor.fetchone() is not None:
cursor.execute("UPDATE `book_record` SET count=count+1 WHERE `user_id` = ? AND `book_id` = ?",
(self.user_id, book))
conn.commit()
else:
cursor.execute("INSERT INTO `book_record` (user_id, book_id, count) VALUES (?,?,?)", (self.user_id, book, 1))
conn.commit()
sql = "SELECT model_no,name FROM `books` WHERE id=(?)"
cursor.execute(sql, (book,))
model = cursor.fetchone()
cart_list.insert(END, "Added " + str(model[1]))
if updated[book-1]==0:
cost, avail = compare_prices.compare(model[0])
updated[book - 1]=cost
else:
cost=updated[book-1]
self.display_cost += cost
self.show_price.delete(0, END)
self.show_price.insert(END, self.display_cost)
print(self.display_cost)
# -------------------------------REMOVE FROM CART-------------------------------------#
def remove_book(self, book, cart_list):
Login_page.Database(Login_page)
sql = "SELECT model_no FROM `books` WHERE id=(?)"
cursor.execute(sql, (book,))
model = cursor.fetchone()
cursor.execute("SELECT count FROM `book_record` WHERE `user_id` = ? AND `book_id` = ?", (self.user_id, book))
rec = cursor.fetchone()
if rec is not None:
if updated[book-1] == 0:
cost, avail = compare_prices.compare(model[0])
updated[book - 1]=cost
else:
cost = updated[book-1]
self.display_cost -= cost
if rec[0] == 1:
cursor.execute("DELETE FROM `book_record` WHERE `user_id` = ? AND `book_id` = ?", (self.user_id, book))
conn.commit()
else:
cursor.execute("UPDATE `book_record` SET count=count-1 WHERE `user_id` = ? AND `book_id` = ?",
(self.user_id, book))
conn.commit()
else:
pass
cart_list.delete(0, END)
self.show_price.delete(0, END)
self.show_price.insert(END, self.display_cost)
record_sql = "SELECT * FROM `book_record` WHERE user_id=(?)"
cursor.execute(record_sql, (self.user_id,))
result = cursor.fetchall()
for r in result:
sql_1 = "SELECT model_no,name FROM `books` WHERE id=(?)"
cursor.execute(sql_1, (r[2],))
book_display = cursor.fetchone()
b = book_display[1]
string = str(b) + " Quantity :" + str(r[3])
cart_list.insert(END, string)
# -------------------------------DISPLAY CURRENT CART-------------------------------------#
def add_to_cart(self):
cart_list = Listbox(self.Dataframe_2, width=75)
scroll = Scrollbar(self.Dataframe_2, command=cart_list.yview, orient=VERTICAL)
cart_list.configure(yscrollcommand=scroll.set)
cart_list.grid(row=3, column=0, padx=8)
scroll.grid(row=3, column=3, sticky=N + S)
self.sub_title = ttk.Label(self.Dataframe_2, text="Your Cart:", font=('Cambria', 15),style='TLabel')
self.sub_title.place(x=10,y=410)
add_1 = ttk.Button(self.Buttonframe, text="Add", command=lambda: main_page.add_book(self, self.book_id, cart_list))
add_1.grid(row=6, column=2,sticky='W')
remove_1 = ttk.Button(self.Buttonframe, text="Remove",
command=lambda: main_page.remove_book(self, self.book_id, cart_list))
remove_1.grid(row=6, column=3,sticky='E')
Login_page.Database(Login_page)
#self.display_cost = 0
record_sql = "SELECT * FROM `book_record` WHERE user_id=(?)"
cursor.execute(record_sql, (self.user_id,))
result = cursor.fetchall()
for r in result:
sql_1 = "SELECT model_no,name FROM `books` WHERE id=(?)"
cursor.execute(sql_1, (r[2],))
book_display = cursor.fetchone()
modelno = book_display[0]
quantity = r[3]
string = str(book_display[1]) + " Quantity :" + str(quantity)
cart_list.insert(END, string)
if updated[r[2]-1] == 0:
cost, avail = compare_prices.compare(modelno)
updated[r[2] - 1]=cost
else:
cost = updated[r[2]-1]
if (avail == 0):
pass
else:
self.display_cost += quantity * (cost)
self.show_price.delete(0, END)
self.show_price.insert(END, self.display_cost)
print(self.display_cost)
def config(self, bg):
self.master1.configure(bg='Lightskyblue2')
root=Tk()
project = Login_page(root)
imagelist = ['images/book1.gif','images/book2.gif','images/book3.gif','images/book4.gif','images/book5.gif','images/book6.gif']
photo = PhotoImage(file=imagelist[0])
width = photo.width()
height = photo.height()
canvas = Canvas(width=width, height=height)
canvas.pack()
giflist = []
for imagefile in imagelist:
photo = PhotoImage(file=imagefile)
giflist.append(photo)
for k in range(0, 1000):
for gif in giflist:
canvas.delete(ALL)
canvas.create_image(width/2.0, height/2.0, image=gif)
canvas.update()
time.sleep(1)
#root.configure(bg='Lightskyblue2')
root.mainloop()
|
UTF-8
|
Python
| false | false | 19,018 |
py
| 5 |
book_comp.py
| 4 | 0.562888 | 0.535177 | 0 | 394 | 47.271574 | 174 |
genomoncology/related
| 12,360,915,894,899 |
cef3726bf446d02eff10eb1d2ba8d85742c16405
|
a21ccc8ced1b57b351e3772e3939714e330de2d5
|
/tests/ex08_self_reference/models.py
|
34564014a623eea4f5b534a8000e7835f40c0e03
|
[
"MIT"
] |
permissive
|
https://github.com/genomoncology/related
|
f7d4310fd5776441afd045a96f07f9dad9797a6c
|
2f3db6b07f5515792370d97f790cd46ec0882a9d
|
refs/heads/master
| 2023-01-12T08:46:16.068921 | 2022-08-05T13:34:47 | 2022-08-05T13:34:47 | 92,290,249 | 208 | 18 |
MIT
| false | 2022-12-27T17:03:18 | 2017-05-24T12:31:44 | 2022-12-19T22:03:05 | 2022-12-27T17:03:16 | 328 | 190 | 15 | 21 |
Python
| false | false |
import related
node_cls = "ex08_self_reference.models.Node"
@related.mutable
class Node(object):
name = related.StringField()
node_child = related.ChildField(node_cls, required=False)
node_list = related.SequenceField(node_cls, required=False)
node_map = related.MappingField(node_cls, "name", required=False)
|
UTF-8
|
Python
| false | false | 329 |
py
| 42 |
models.py
| 31 | 0.735562 | 0.729483 | 0 | 11 | 28.909091 | 69 |
fedspendingtransparency/data-act-broker-backend
| 12,524,124,641,488 |
7033d7005c88eaf5a027556d78cd02e8c6f17831
|
d0081f81996635e913b1f267a4586eb0bfd3dcd5
|
/tests/unit/dataactvalidator/test_b7_object_class_program_activity_1.py
|
70b9591f77b0130db935e1bb7b512cb1f882e414
|
[
"CC0-1.0"
] |
permissive
|
https://github.com/fedspendingtransparency/data-act-broker-backend
|
71c10a6c7c284c8fa6556ccc0efce798870b059b
|
b12c73976fd7eb5728eda90e56e053759c733c35
|
refs/heads/master
| 2023-09-01T07:41:35.449877 | 2023-08-29T20:14:45 | 2023-08-29T20:14:45 | 57,313,310 | 55 | 36 |
CC0-1.0
| false | 2023-09-13T16:40:58 | 2016-04-28T15:39:36 | 2023-04-06T18:18:03 | 2023-09-13T16:40:56 | 94,931 | 44 | 30 | 5 |
Python
| false | false |
from tests.unit.dataactcore.factories.staging import ObjectClassProgramActivityFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b7_object_class_program_activity_1'
def test_column_headers(database):
expected_subset = {'row_number', 'gross_outlays_delivered_or_fyb', 'ussgl490800_authority_outl_fyb', 'difference',
'uniqueid_TAS', 'uniqueid_DisasterEmergencyFundCode', 'uniqueid_ProgramActivityCode',
'uniqueid_ProgramActivityName', 'uniqueid_ObjectClass',
'uniqueid_ByDirectReimbursableFundingSource'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Test Object Class Program Activity gross_outlays_delivered_or_fyb equals ussgl490800_authority_outl_fyb """
op = ObjectClassProgramActivityFactory(gross_outlays_delivered_or_fyb=1, ussgl490800_authority_outl_fyb=1)
assert number_of_errors(_FILE, database, models=[op]) == 0
def test_failure(database):
""" Test Object Class Program Activity gross_outlays_delivered_or_fyb doesn't equal
ussgl490800_authority_outl_fyb
"""
op = ObjectClassProgramActivityFactory(gross_outlays_delivered_or_fyb=1, ussgl490800_authority_outl_fyb=0)
assert number_of_errors(_FILE, database, models=[op]) == 1
|
UTF-8
|
Python
| false | false | 1,394 |
py
| 931 |
test_b7_object_class_program_activity_1.py
| 790 | 0.725968 | 0.698709 | 0 | 32 | 42.5625 | 118 |
jinwei2016211483/jinwei
| 4,191,888,121,023 |
9dac69432aea4225318a66e15063b3f1dec1df33
|
b5a7dada98e842ba08063a5eb2573879c0d416a5
|
/untitled0.py
|
d6da41a9e336bbe6d759201d9e70f473fcb77cc7
|
[] |
no_license
|
https://github.com/jinwei2016211483/jinwei
|
1c03d86ee462c6653a576bf5ed0cd7b72c04d8b5
|
cbc47b6474c7ee280f7f60baa8416a2b50cba3fb
|
refs/heads/master
| 2023-08-14T01:58:18.036150 | 2021-09-22T07:56:16 | 2021-09-22T07:56:16 | 410,473,975 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 22 15:10:07 2021
@author: Dell
"""
import tensorflow as tf
|
UTF-8
|
Python
| false | false | 108 |
py
| 1 |
untitled0.py
| 1 | 0.62037 | 0.5 | 0 | 8 | 12.5 | 35 |
cheneyuwu/TD3fD-through-Shaping-using-Generative-Models
| 163,208,790,172 |
69c33375393a9c74cf7ff377a6bc3ad3be8a838b
|
9488b24f3b44577529de25b71919c01de3f161f9
|
/Package/td3fd/td3fd/ddpg/torch/ddpg.py
|
c718033ddebd192bf173bc21403fc11ee4d6d25c
|
[
"MIT"
] |
permissive
|
https://github.com/cheneyuwu/TD3fD-through-Shaping-using-Generative-Models
|
0858b6227a5b77e123b38257ea157903bd8c469b
|
f23bf3f219fa20cb7d14370acdf22c17f49f9b7d
|
refs/heads/master
| 2020-04-10T14:14:53.357667 | 2019-11-12T21:26:16 | 2019-11-12T21:26:16 | 161,072,134 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pickle
import numpy as np
import torch
from td3fd.memory import RingReplayBuffer, UniformReplayBuffer
from td3fd.ddpg.torch.model import Actor, Critic
# from td3fd.actor_critic import ActorCritic
# from td3fd.demo_shaping import EnsGANDemoShaping, EnsNFDemoShaping
from td3fd.ddpg.torch.normalizer import Normalizer
class DDPG(object):
def __init__(
self,
input_dims,
use_td3,
layer_sizes,
polyak,
buffer_size,
batch_size,
q_lr,
pi_lr,
norm_eps,
norm_clip,
max_u,
action_l2,
clip_obs,
scope,
eps_length,
fix_T,
clip_pos_returns,
clip_return,
sample_demo_buffer,
batch_size_demo,
use_demo_reward,
num_demo,
demo_strategy,
bc_params,
shaping_params,
gamma,
info,
num_epochs,
num_cycles,
num_batches,
):
"""
Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER). Added functionality
to use demonstrations for training to Overcome exploration problem.
Args:
# Environment I/O and Config
max_u (float) - maximum action magnitude, i.e. actions are in [-max_u, max_u]
T (int) - the time horizon for rollouts
fix_T (bool) - every episode has fixed length
clip_obs (float) - clip observations before normalization to be in [-clip_obs, clip_obs]
clip_pos_returns (boolean) - whether or not positive returns should be clipped (i.e. clip to 0)
clip_return (float) - clip returns to be in [-clip_return, clip_return]
# Normalizer
norm_eps (float) - a small value used in the normalizer to avoid numerical instabilities
norm_clip (float) - normalized inputs are clipped to be in [-norm_clip, norm_clip]
# NN Configuration
scope (str) - the scope used for the TensorFlow graph
input_dims (dict of ints) - dimensions for the observation (o), the goal (g), and the actions (u)
layer_sizes (list of ints) - number of units in each hidden layers
initializer_type (str) - initializer of the weight for both policy and critic
reuse (boolean) - whether or not the networks should be reused
# Replay Buffer
buffer_size (int) - number of transitions that are stored in the replay buffer
# Dual Network Set
polyak (float) - coefficient for Polyak-averaging of the target network
# Training
batch_size (int) - batch size for training
Q_lr (float) - learning rate for the Q (critic) network
pi_lr (float) - learning rate for the pi (actor) network
action_l2 (float) - coefficient for L2 penalty on the actions
gamma (float) - gamma used for Q learning updates
# Use demonstration to shape critic or actor
sample_demo_buffer (int) - whether or not to sample from demonstration buffer
batch_size_demo (int) - number of samples to be used from the demonstrations buffer, per mpi thread
use_demo_reward (int) - whether or not to assue that demonstration dataset has rewards
num_demo (int) - number of episodes in to be used in the demonstration buffer
demo_strategy (str) - whether or not to use demonstration with different strategies
bc_params (dict)
shaping_params (dict)
"""
# Store initial args passed into the function
self.init_args = locals()
# Parameters
self.num_epochs = num_epochs
self.num_cycles = num_cycles
self.num_batches = num_batches
self.input_dims = input_dims
self.use_td3 = use_td3
self.layer_sizes = layer_sizes
# self.initializer_type = initializer_type
self.polyak = polyak
self.buffer_size = buffer_size
self.batch_size = batch_size
self.q_lr = q_lr
self.pi_lr = pi_lr
self.norm_eps = norm_eps
self.norm_clip = norm_clip
self.max_u = max_u
self.action_l2 = action_l2
# self.clip_obs = clip_obs
self.eps_length = eps_length
self.fix_T = fix_T
# self.clip_pos_returns = clip_pos_returns
# self.clip_return = clip_return
self.sample_demo_buffer = sample_demo_buffer
self.batch_size_demo = batch_size_demo
self.use_demo_reward = use_demo_reward
self.num_demo = num_demo
self.demo_strategy = demo_strategy
assert self.demo_strategy in ["none", "bc", "gan", "nf"]
self.bc_params = bc_params
self.shaping_params = shaping_params
self.gamma = gamma
self.info = info
# Prepare parameters
self.dimo = self.input_dims["o"]
self.dimg = self.input_dims["g"]
self.dimu = self.input_dims["u"]
self._create_memory()
self._create_network()
def get_actions(self, o, g, compute_q=False):
o = torch.from_numpy(o)
g = torch.from_numpy(g)
o, g = self._normalize_state(o, g)
u = self.main_actor(o=o, g=g)
if compute_q:
q = self.main_critic(o=o, g=g, u=u)
if self.demo_shaping:
p = torch.Tensor((0.0,)) # TODO
else:
p = q
u = u * self.max_u
if compute_q:
return [u.data.numpy(), p.data.numpy(), q.data.numpy()]
else:
return u.data.numpy()
def init_demo_buffer(self, demo_file, update_stats=True):
"""Initialize the demonstration buffer.
"""
# load the demonstration data from data file
episode_batch = self.demo_buffer.load_from_file(data_file=demo_file, num_demo=self.num_demo)
self._update_demo_stats(episode_batch)
if update_stats:
self._update_stats(episode_batch)
def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key ('o' and 'ag' is of size T+1, others are of size T)
"""
self.replay_buffer.store_episode(episode_batch)
if update_stats:
self._update_stats(episode_batch)
def sample_batch(self):
# use demonstration buffer to sample as well if demo flag is set TRUE
if self.sample_demo_buffer:
transitions = {}
transition_rollout = self.replay_buffer.sample(self.batch_size)
transition_demo = self.demo_buffer.sample(self.batch_size_demo)
assert transition_rollout.keys() == transition_demo.keys()
for k in transition_rollout.keys():
transitions[k] = np.concatenate((transition_rollout[k], transition_demo[k]))
else:
transitions = self.replay_buffer.sample(self.batch_size) # otherwise only sample from primary buffer
return transitions
def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, "wb") as f:
pickle.dump(self, f)
def save_replay_buffer(self, path):
pass
def load_replay_buffer(self, path):
pass
def save_weights(self, path):
pass
def load_weights(self, path):
pass
def train_shaping(self):
pass
def train(self):
batch = self.sample_batch()
r_tc = torch.from_numpy(batch["r"])
o_tc, g_tc = self._normalize_state(torch.from_numpy(batch["o"]), torch.from_numpy(batch["g"]))
o_2_tc, g_2_tc = self._normalize_state(torch.from_numpy(batch["o_2"]), torch.from_numpy(batch["g_2"]))
u_tc = torch.from_numpy(batch["u"]) / self.max_u
u_2_tc = self.target_actor(o=o_2_tc, g=g_2_tc)
# Critic update
target_tc = r_tc
if self.use_td3:
target_tc += self.gamma * torch.min(
self.target_critic(o=o_2_tc, g=g_2_tc, u=u_2_tc), self.target_critic_twin(o=o_2_tc, g=g_2_tc, u=u_2_tc)
)
critic_loss = self.q_criterion(target_tc, self.main_critic(o=o_tc, g=g_tc, u=u_tc))
critic_twin_loss = self.q_criterion(target_tc, self.main_critic_twin(o=o_tc, g=g_tc, u=u_tc))
else:
target_tc += self.gamma * self.target_critic(o=o_2_tc, g=g_2_tc, u=u_2_tc)
critic_loss = self.q_criterion(target_tc, self.main_critic(o=o_tc, g=g_tc, u=u_tc))
self.critic_adam.zero_grad()
critic_loss.backward()
self.critic_adam.step()
if self.use_td3:
self.critic_twin_adam.zero_grad()
critic_twin_loss.backward()
self.critic_twin_adam.step()
# Actor update
pi_tc = self.main_actor(o=o_tc, g=g_tc)
actor_loss = -torch.mean(self.main_critic(o=o_tc, g=g_tc, u=pi_tc))
actor_loss += self.action_l2 * torch.mean(pi_tc)
self.actor_adam.zero_grad()
actor_loss.backward()
self.actor_adam.step()
return actor_loss.data.numpy(), critic_loss.data.numpy()
def initialize_target_net(self):
func = lambda v: v[0].data.copy_(v[1].data)
map(func, zip(self.target_actor.parameters(), self.main_actor.parameters()))
map(func, zip(self.target_critic.parameters(), self.main_critic.parameters()))
if self.use_td3:
map(func, zip(self.target_critic_twin.parameters(), self.main_critic_twin.parameters()))
def update_target_net(self):
func = lambda v: v[0].data.copy_(self.polyak * v[0].data + (1.0 - self.polyak) * v[1].data)
map(func, zip(self.target_actor.parameters(), self.main_actor.parameters()))
map(func, zip(self.target_critic.parameters(), self.main_critic.parameters()))
if self.use_td3:
map(func, zip(self.target_critic_twin.parameters(), self.main_critic_twin.parameters()))
def logs(self, prefix=""):
logs = []
logs.append((prefix + "stats_o/mean", self.o_stats.mean_tc.numpy()))
logs.append((prefix + "stats_o/std", self.o_stats.std_tc.numpy()))
if self.dimg != 0:
logs.append((prefix + "stats_g/mean", self.g_stats.mean_tc.numpy()))
logs.append((prefix + "stats_g/std", self.g_stats.std_tc.numpy()))
return logs
def _create_memory(self):
# buffer shape
buffer_shapes = {}
if self.fix_T:
buffer_shapes["o"] = (self.eps_length + 1, self.dimo)
buffer_shapes["u"] = (self.eps_length, self.dimu)
buffer_shapes["r"] = (self.eps_length, 1)
if self.dimg != 0: # for multigoal environment - or states that do not change over episodes.
buffer_shapes["ag"] = (self.eps_length + 1, self.dimg)
buffer_shapes["g"] = (self.eps_length, self.dimg)
for key, val in self.input_dims.items():
if key.startswith("info"):
buffer_shapes[key] = (self.eps_length, *(tuple([val]) if val > 0 else tuple()))
else:
buffer_shapes["o"] = (self.dimo,)
buffer_shapes["o_2"] = (self.dimo,)
buffer_shapes["u"] = (self.dimu,)
buffer_shapes["r"] = (1,)
if self.dimg != 0: # for multigoal environment - or states that do not change over episodes.
buffer_shapes["ag"] = (self.dimg,)
buffer_shapes["g"] = (self.dimg,)
buffer_shapes["ag_2"] = (self.dimg,)
buffer_shapes["g_2"] = (self.dimg,)
for key, val in self.input_dims.items():
if key.startswith("info"):
buffer_shapes[key] = tuple([val]) if val > 0 else tuple()
# need the "done" signal for restarting from training
buffer_shapes["done"] = (1,)
# initialize replay buffer(s)
if self.fix_T:
self.replay_buffer = UniformReplayBuffer(buffer_shapes, self.buffer_size, self.eps_length)
if self.demo_strategy != "none" or self.sample_demo_buffer:
self.demo_buffer = UniformReplayBuffer(buffer_shapes, self.buffer_size, self.eps_length)
else:
self.replay_buffer = RingReplayBuffer(buffer_shapes, self.buffer_size)
if self.demo_strategy != "none" or self.sample_demo_buffer:
self.demo_buffer = RingReplayBuffer(buffer_shapes, self.buffer_size)
def _create_network(self):
# Normalizer for goal and observation.
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip)
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip)
# Models
self.main_actor = Actor(
dimo=self.dimo, dimg=self.dimg, dimu=self.dimu, layer_sizes=self.layer_sizes, noise=False
)
self.target_actor = Actor(
dimo=self.dimo, dimg=self.dimg, dimu=self.dimu, layer_sizes=self.layer_sizes, noise=self.use_td3
)
self.actor_adam = torch.optim.Adam(self.main_actor.parameters(), lr=self.pi_lr)
self.main_critic = Critic(dimo=self.dimo, dimg=self.dimg, dimu=self.dimu, layer_sizes=self.layer_sizes)
self.target_critic = Critic(dimo=self.dimo, dimg=self.dimg, dimu=self.dimu, layer_sizes=self.layer_sizes)
self.critic_adam = torch.optim.Adam(self.main_critic.parameters(), lr=self.q_lr)
if self.use_td3:
self.main_critic_twin = Critic(
dimo=self.dimo, dimg=self.dimg, dimu=self.dimu, layer_sizes=self.layer_sizes
)
self.target_critic_twin = Critic(
dimo=self.dimo, dimg=self.dimg, dimu=self.dimu, layer_sizes=self.layer_sizes
)
self.critic_twin_adam = torch.optim.Adam(self.main_critic_twin.parameters(), lr=self.q_lr)
self.demo_shaping = None # TODO
self.q_criterion = torch.nn.MSELoss()
self.initialize_target_net()
def _normalize_state(self, o, g):
o = self.o_stats.normalize(o)
# for multigoal environments, we have goal as another states
if self.dimg != 0:
g = self.g_stats.normalize(g)
return o, g
def _update_stats(self, episode_batch):
# add transitions to normalizer
if self.fix_T:
episode_batch["o_2"] = episode_batch["o"][:, 1:, :]
if self.dimg != 0:
episode_batch["ag_2"] = episode_batch["ag"][:, :, :]
episode_batch["g_2"] = episode_batch["g"][:, :, :]
num_normalizing_transitions = episode_batch["u"].shape[0] * episode_batch["u"].shape[1]
transitions = self.replay_buffer.sample_transitions(episode_batch, num_normalizing_transitions)
else:
transitions = episode_batch.copy()
self.o_stats.update(torch.from_numpy(transitions["o"]))
if self.dimg != 0:
self.g_stats.update(torch.from_numpy(transitions["g"]))
def _update_demo_stats(self, episode_batch):
# add transitions to normalizer
if self.fix_T:
episode_batch["o_2"] = episode_batch["o"][:, 1:, :]
if self.dimg != 0:
episode_batch["ag_2"] = episode_batch["ag"][:, :, :]
episode_batch["g_2"] = episode_batch["g"][:, :, :]
num_normalizing_transitions = episode_batch["u"].shape[0] * episode_batch["u"].shape[1]
transitions = self.demo_buffer.sample_transitions(episode_batch, num_normalizing_transitions)
else:
transitions = episode_batch.copy()
self.demo_o_stats.update(torch.from_numpy(transitions["o"]))
if self.dimg != 0:
self.demo_g_stats.update(torch.from_numpy(transitions["g"]))
def __getstate__(self):
pass
# """
# Our policies can be loaded from pkl, but after unpickling you cannot continue training.
# """
# state = {k: v for k, v in self.init_args.items() if not k == "self"}
# state["tf"] = self.sess.run([x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)])
# return state
def __setstate__(self, state):
pass
# kwargs = state["kwargs"]
# del state["kwargs"]
# self.__init__(**state, **kwargs)
# vars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)]
# assert len(vars) == len(state["tf"])
# node = [tf.assign(var, val) for var, val in zip(vars, state["tf"])]
# self.sess.run(node)
|
UTF-8
|
Python
| false | false | 17,003 |
py
| 124 |
ddpg.py
| 74 | 0.570311 | 0.565724 | 0 | 391 | 42.485934 | 123 |
le1nux/pandattack
| 16,183,436,776,373 |
a15e45e1be823da050f1205bb9942b53192dcb4c
|
5bc231e45984a2bfb1d93c548e0d8cb395fa9d05
|
/tests/dataset_loaders/test_dataset.py
|
54471ee8d38db4eeb4f304a9183efb55c0f471c2
|
[
"MIT"
] |
permissive
|
https://github.com/le1nux/pandattack
|
9232075c160bca11876e314e6b3f8c4bd2f2ce98
|
c3671fc11b74019fb20b7507e09f7b8425f9bb81
|
refs/heads/master
| 2022-12-24T21:17:05.442606 | 2019-10-08T14:28:32 | 2019-10-08T14:28:32 | 207,798,327 | 2 | 1 |
MIT
| false | 2022-12-15T18:35:06 | 2019-09-11T11:42:53 | 2021-05-13T13:29:59 | 2019-10-08T14:28:43 | 335 | 1 | 1 | 1 |
Python
| false | false |
#!/usr/bin/env python3
import pytest
import tempfile
class TestDataset:
@pytest.fixture(autouse=True)
def setup_and_teardown(self):
tmp_dir = tempfile.TemporaryDirectory()
yield
tmp_dir.cleanup()
|
UTF-8
|
Python
| false | false | 230 |
py
| 28 |
test_dataset.py
| 26 | 0.673913 | 0.669565 | 0 | 11 | 19.909091 | 47 |
Reyoth/initiationPython
| 15,839,839,429,658 |
a0df200b73e445df460230cc32bfb28574a0ed2d
|
0102ccf6c44c225868813bbcc9b37e22092e67cf
|
/06_exercices.py
|
9d8589d7d48ac3dd0f37bff161754b9ee449e5ed
|
[] |
no_license
|
https://github.com/Reyoth/initiationPython
|
c1e72ec4669c22a4ff5c06c8fa978b2af1625af0
|
d0cc3473be0b9f286e89921f9ff72a34c1eedcb0
|
refs/heads/master
| 2022-03-26T07:35:00.040124 | 2022-02-28T10:51:42 | 2022-02-28T10:51:42 | 150,640,858 | 0 | 10 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Règles du jeu de la roulette ZCasino de OpenClassRooms.com
# On va simplifier les règles et je vous présente tout de suite ce que l'on obtient :
# Le joueur mise sur un numéro compris entre 0 et 49 (50 numéros en tout).
# En choisissant son numéro, il y dépose la somme qu'il souhaite miser.
# La roulette est constituée de 50 cases allant naturellement de 0 à 49.
# Les numéros pairs sont de couleur noire, les numéros impairs sont de couleur rouge.
# Le croupier lance la roulette, lâche la bille et quand la roulette s'arrête,
# relève le numéro de la case dans laquelle la bille s'est arrêtée.
# Dans notre programme, nous ne reprendrons pas tous ces détails « matériels » mais
# ces explications sont aussi à l'intention de ceux qui ont eu la chance d'éviter les salles de casino jusqu'ici.
# Le numéro sur lequel s'est arrêtée la bille est, naturellement, le numéro gagnant.
# Si le numéro gagnant est celui sur lequel le joueur a misé (probabilité de 1/50, plutôt faible),
# le croupier lui remet 3 fois la somme misée.
# Sinon, le croupier regarde si le numéro misé par le joueur est de la même couleur que
# le numéro gagnant (s'ils sont tous les deux pairs ou tous les deux impairs).
# Si c'est le cas, le croupier lui remet 50 % de la somme misée. Si ce n'est pas le cas, le joueur perd sa mise.
# Dans les deux scénarios gagnants vus ci-dessus
# (le numéro misé et le numéro gagnant sont identiques ou ont la même couleur),
# le croupier remet au joueur la somme initialement misée avant d'y ajouter ses gains.
# Cela veut dire que, dans ces deux scénarios, le joueur récupère de l'argent.
# Il n'y a que dans le troisième cas qu'il perd la somme misée.
# On utilisera pour devise le dollar $ à la place de l'euro pour des raisons d'encodage sous la console Windows.
# Attention, arrondir un nombre
# Vous l'avez peut-être bien noté, dans l'explication des règles je spécifiais que
# si le joueur misait sur la bonne couleur, il obtenait 50% de sa mise.
# Oui mais… c'est quand même mieux de travailler avec des entiers.
# Si le joueur mise 3$, par exemple, on lui rend 1,5$. C'est encore acceptable mais, si cela se poursuit,
# on risque d'arriver à des nombres flottants avec beaucoup de chiffres après la virgule.
# Alors autant arrondir au nombre supérieur. Ainsi, si le joueur mise 3$, on lui rend 2$.
# Pour cela, on va utiliser une fonction du module math nommée ceil.
# Je vous laisse regarder ce qu'elle fait, il n'y a rien de compliqué.
# SOLUTION ------------------------------------------------------------
import os
from random import randrange
from math import ceil
# Déclaration des variables de départ
argent = 1000 # On a 1000 $ au début du jeu
continuer_partie = True # Booléen qui est vrai tant qu'on doit
# continuer la partie
print("Vous vous installez à la table de roulette avec", argent, "$.")
while continuer_partie: # Tant qu'on doit continuer la partie
# on demande à l'utilisateur de saisir le nombre sur
# lequel il va miser
nombre_mise = -1
while nombre_mise < 0 or nombre_mise > 49:
nombre_mise = input("Tapez le nombre sur lequel vous voulez miser (entre 0 et 49) : ")
# On convertit le nombre misé
try:
nombre_mise = int(nombre_mise)
except ValueError:
print("Vous n'avez pas saisi de nombre")
nombre_mise = -1
continue
if nombre_mise < 0:
print("Ce nombre est négatif")
if nombre_mise > 49:
print("Ce nombre est supérieur à 49")
# À présent, on sélectionne la somme à miser sur le nombre
mise = 0
while mise <= 0 or mise > argent:
mise = input("Tapez le montant de votre mise : ")
# On convertit la mise
try:
mise = int(mise)
except ValueError:
print("Vous n'avez pas saisi de nombre")
mise = -1
continue
if mise <= 0:
print("La mise saisie est négative ou nulle.")
if mise > argent:
print("Vous ne pouvez miser autant, vous n'avez que", argent, "$")
# Le nombre misé et la mise ont été sélectionnés par
# l'utilisateur, on fait tourner la roulette
numero_gagnant = randrange(50)
print("La roulette tourne... ... et s'arrête sur le numéro", numero_gagnant)
# On établit le gain du joueur
if numero_gagnant == nombre_mise:
print("Félicitations ! Vous obtenez", mise * 3, "$ !")
argent += mise * 3
elif numero_gagnant % 2 == nombre_mise % 2: # ils sont de la même couleur
mise = ceil(mise * 0.5)
print("Vous avez misé sur la bonne couleur. Vous obtenez", mise, "$")
argent += mise
else:
print("Désolé l'ami, c'est pas pour cette fois. Vous perdez votre mise.")
argent -= mise
# On interrompt la partie si le joueur est ruiné
if argent <= 0:
print("Vous êtes ruiné ! C'est la fin de la partie.")
continuer_partie = False
else:
# On affiche l'argent du joueur
print("Vous avez à présent", argent, "$")
quitter = input("Souhaitez-vous quitter le casino (o/n) ? ")
if quitter == "o" or quitter == "O":
print("Vous quittez le casino avec vos gains.")
continuer_partie = False
# On met en pause le système (Windows)
os.system("pause")
|
UTF-8
|
Python
| false | false | 5,528 |
py
| 18 |
06_exercices.py
| 18 | 0.655255 | 0.644763 | 0 | 195 | 26.866667 | 113 |
davidbrownell/Common_Environment
| 4,011,499,481,987 |
ea96589fcc6b9fab622d8e1ec5a662b7e09ef257
|
8ac43a158d42db9b9b4098415cb8119f9e8d092c
|
/Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/SchemaConverters/UnitTests/SimpleSchemaConverter_UnitTest.py
|
af0809e260f4ff5f0f243374434e62044399009d
|
[
"BSL-1.0"
] |
permissive
|
https://github.com/davidbrownell/Common_Environment
|
834ae4fd86284115dc2f0c2a85434ebc6bffeb89
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
refs/heads/master
| 2021-01-23T04:29:17.336612 | 2018-04-15T18:58:41 | 2018-04-15T18:58:41 | 88,362,950 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# ----------------------------------------------------------------------
# |
# | SimpleSchemaConverter_UnitTest.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2016-09-06 17:31:15
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2016-18.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
import os
import sys
import unittest
from CommonEnvironment import Package
# ----------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with Package.NameInfo(__package__) as ni:
__package__ = ni.created
from ..SimpleSchemaConverter import *
from ...FundamentalTypes import *
__package__ = ni.original
# ----------------------------------------------------------------------
class UnitTest(unittest.TestCase):
# ----------------------------------------------------------------------
def test_Name(self):
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(), name="Foo"), "<Foo boolean>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity='?'), name="Foo"), "<Foo boolean ?>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity='?'), name="Foo", arity_override='*'), "<Foo boolean *>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity='?'), name="Foo", arity_override=Arity.FromString('+')), "<Foo boolean +>")
# ----------------------------------------------------------------------
def test_Collection(self):
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo()), "<boolean>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity="(2)")), "<boolean {2}>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity="(2,10)")), "<boolean {2,10}>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity="?")), "<boolean ?>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity="1")), "<boolean>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity="*")), "<boolean *>")
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo(arity="+")), "<boolean +>")
# ----------------------------------------------------------------------
def test_SimpleItems(self):
self.assertEqual(SimpleSchemaConveter.Convert(BoolTypeInfo()), "<boolean>")
self.assertEqual(SimpleSchemaConveter.Convert(DateTimeTypeInfo()), "<datetime>")
self.assertEqual(SimpleSchemaConveter.Convert(DateTypeInfo()), "<date>")
self.assertEqual(SimpleSchemaConveter.Convert(DurationTypeInfo()), "<duration>")
self.assertEqual(SimpleSchemaConveter.Convert(GuidTypeInfo()), "<guid>")
self.assertEqual(SimpleSchemaConveter.Convert(TimeTypeInfo()), "<time>")
# ----------------------------------------------------------------------
def test_FilenameDirectory(self):
self.assertEqual(SimpleSchemaConveter.Convert(DirectoryTypeInfo()), '<filename must_exist="True" type="directory">')
self.assertEqual(SimpleSchemaConveter.Convert(FilenameTypeInfo()), '<filename must_exist="True" type="file">')
self.assertEqual(SimpleSchemaConveter.Convert(FilenameTypeInfo(ensure_exists=False, match_any=True)), '<filename must_exist="False" type="either">')
# ----------------------------------------------------------------------
def test_Enum(self):
self.assertEqual(SimpleSchemaConveter.Convert(EnumTypeInfo([ "one", "two", "three", ])), '<enum values=[ "one", "two", "three" ]>')
self.assertEqual(SimpleSchemaConveter.Convert(EnumTypeInfo([ "one", "two", "three", ], friendly_values=[ "1", "2", "3", ])), '<enum values=[ "one", "two", "three" ] friendly_values=[ "1", "2", "3" ]>')
# ----------------------------------------------------------------------
def test_Float(self):
self.assertEqual(SimpleSchemaConveter.Convert(FloatTypeInfo()), '<number>')
self.assertEqual(SimpleSchemaConveter.Convert(FloatTypeInfo(min=2.0)), '<number min="2.0">')
self.assertEqual(SimpleSchemaConveter.Convert(FloatTypeInfo(max=10.5)), '<number max="10.5">')
self.assertEqual(SimpleSchemaConveter.Convert(FloatTypeInfo(min=2.0, max=10.5)), '<number min="2.0" max="10.5">')
# ----------------------------------------------------------------------
def test_Int(self):
self.assertEqual(SimpleSchemaConveter.Convert(IntTypeInfo()), '<integer>')
self.assertEqual(SimpleSchemaConveter.Convert(IntTypeInfo(min=2)), '<integer min="2">')
self.assertEqual(SimpleSchemaConveter.Convert(IntTypeInfo(max=10)), '<integer max="10">')
self.assertEqual(SimpleSchemaConveter.Convert(IntTypeInfo(min=2, max=10)), '<integer min="2" max="10">')
self.assertEqual(SimpleSchemaConveter.Convert(IntTypeInfo(bytes=4)), '<integer min="-2147483648" max="2147483647" bytes="4">')
# ----------------------------------------------------------------------
def test_String(self):
self.assertEqual(SimpleSchemaConveter.Convert(StringTypeInfo()), '<string min_length="1">')
self.assertEqual(SimpleSchemaConveter.Convert(StringTypeInfo(min_length=2)), '<string min_length="2">')
self.assertEqual(SimpleSchemaConveter.Convert(StringTypeInfo(max_length=10)), '<string min_length="1" max_length="10">')
self.assertEqual(SimpleSchemaConveter.Convert(StringTypeInfo(min_length=2, max_length=10)), '<string min_length="2" max_length="10">')
self.assertEqual(SimpleSchemaConveter.Convert(StringTypeInfo(validation_expression="foo")), '<string validation_expression="foo">')
# ---------------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(unittest.main(verbosity=2))
except KeyboardInterrupt: pass
|
UTF-8
|
Python
| false | false | 6,374 |
py
| 222 |
SimpleSchemaConverter_UnitTest.py
| 196 | 0.559931 | 0.542673 | 0 | 100 | 61.74 | 209 |
madskillz254/watchlist-tutorial
| 16,106,127,402,549 |
30355eefb5b70748d2673da7d7eb17874711dbd0
|
50a72c754521700825bb0fd2e7a38af200435525
|
/config.py
|
0eb0c29c08645643fdd2f554bb8540a83996e1dc
|
[] |
no_license
|
https://github.com/madskillz254/watchlist-tutorial
|
4160945c24c79e686560de81b417a376db1df7f3
|
977f001031c9b32ef9248a4ce13b3143bd801e34
|
refs/heads/master
| 2020-05-07T09:30:05.783420 | 2019-04-17T09:40:31 | 2019-04-17T09:40:31 | 180,379,966 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
class Config:
'''
General configuration parent class
'''
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
MOVIE_API_BASE_URL ='https://api.themoviedb.org/3/movie/{}?api_key={}'
MOVIE_API_KEY = os.environ.get('MOVIE_API_KEY')
SECRET_KEY = os.environ.get('SECRET_KEY')
UPLOADED_PHOTOS_DEST ='app/static/photos'
# It is not advisable to store files inside the database. Instead we store the files inside our application and we store the path to the files in our database.
# # email configurations
MAIL_SERVER = 'smtp.googlemail.com' #Flask uses the Flask-Mail extension to send emails to users.
MAIL_PORT = 587
MAIL_USE_TLS = True #enables a transport layer security to secure the emails when sending the emails.
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://mugambi254:pitchdb@localhost/watchlist_test' # Here we create a new database watchlist_test. We use WITH TEMPLATE to copy the schema of the watchlist database so both databases can be identical. ie CREATE DATABASE watchlist_test WITH TEMPLATE watchlist
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://mugambi254:pitchdb@localhost/watchlist' #this is the location of the database with authentication.
DEBUG = True
#enables debig mode in our app
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
}
|
UTF-8
|
Python
| false | false | 1,730 |
py
| 12 |
config.py
| 11 | 0.704046 | 0.69711 | 0 | 43 | 39.255814 | 314 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.