repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edufanelli/simAD | 3,702,261,848,033 | fb4d2003fd73ae664b68addd9863fc5cd25162ed | be0c6c5818916bf275dacc0a875198a42ed83d71 | /eventos.py | a47652b82314acfcf72ad6486a731320890ff48f | []
| no_license | https://github.com/edufanelli/simAD | 386be26fbd23386e9c8f188a512193fe850552a2 | 74ffac6c1b6563d80f526586da78b694a3102850 | refs/heads/master | 2021-01-10T12:31:22.071681 | 2016-03-12T19:28:58 | 2016-03-12T19:28:58 | 53,749,422 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import numpy
#from main import MAX_EVENTOS
###
def geraListaEventos(MAX_EVENTOS, taxaChegada1, tipoSaida1, taxaSaida1, taxaChegada2=None, tipoSaida2=None, taxaSaida2=None):
prox_evento = ""
lista_eventos = []
###1 Saída Exponencial (Cenário 1)###
if tipoSaida1 == "exponencial" and tipoSaida2 == None:
for k in range(MAX_EVENTOS):
tempo_chegada = numpy.random.exponential(1/taxaChegada1) #Pega um tempo de chegada
tempo_saida = numpy.random.exponential(1/taxaSaida1) #Pega um tempo de saída
#Confere qual tempo é menor e atribui se o próximo evento é chegada ou saída
if tempo_chegada < tempo_saida:
prox_evento = "chegada"
elif tempo_chegada > tempo_saida:
prox_evento = "saida"
lista_eventos.append(prox_evento)
###2 Saídas Exponenciais (Cenário 2)###
if tipoSaida1 == "exponencial" and tipoSaida2 == "exponencial":
fila_tipo1 = 0 ###OBS: inclui servidor
fila_tipo2 = 0 ###OBS: inclui servidor
#lista_eventos.append("debug")
for k in range(MAX_EVENTOS):
#print fila_tipo1+fila_tipo2, lista_eventos[-1]
if fila_tipo1 == 0 and fila_tipo2 == 0: #Se não há ninguém na fila, só chegadas podem ocorrer
#print "ninguem na fila no evento " + str(k)
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
if tempo_chegada1 < tempo_chegada2:
prox_evento = "chegada1"
fila_tipo1+=1
else:
prox_evento = "chegada2"
fila_tipo2+=1
else:#Se há alguém na fila
if fila_tipo1 == 0 and fila_tipo2 != 0:#Só há tipo2 na fila, logo só chegadas e saídas2 podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida2 = numpy.random.exponential(1/(taxaSaida2))
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida2)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida2:#ocorreu uma saida2
prox_evento = "saida2"
fila_tipo2-=1
elif fila_tipo1 != 0 and fila_tipo2 == 0:#Apenas chegadas e saídas1 podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida1 = numpy.random.exponential(1/(taxaSaida1))
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida1)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida1:#ocorreu uma saida1
prox_evento = "saida1"
fila_tipo1-=1
else: #Chegadas e saídas de quaisquer tipo podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida1 = numpy.random.exponential(1/(taxaSaida1))
tempo_saida2 = numpy.random.exponential(1/(taxaSaida2))
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida1, tempo_saida2)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida1:#ocorreu uma saida1
prox_evento = "saida1"
fila_tipo1-=1
elif menor_tempo == tempo_saida2:#ocorreu uma saida2
prox_evento = "saida2"
fila_tipo2-=1
lista_eventos.append(prox_evento)
###2 Saídas Determinísticas (Cenário 3)###
if tipoSaida1 == "deterministica" and tipoSaida2 == "deterministica":
fila_tipo1 = 0 ###OBS: inclui servidor
fila_tipo2 = 0 ###OBS: inclui servidor
#lista_eventos.append("debug")
for k in range(MAX_EVENTOS):
#print fila_tipo1+fila_tipo2, lista_eventos[-1]
if fila_tipo1 == 0 and fila_tipo2 == 0: #Se não há ninguém na fila, só chegadas podem ocorrer
#print "ninguem na fila no evento " + str(k)
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
if tempo_chegada1 < tempo_chegada2:
prox_evento = "chegada1"
fila_tipo1+=1
else:
prox_evento = "chegada2"
fila_tipo2+=1
else:#Se há alguém na fila
if fila_tipo1 == 0 and fila_tipo2 != 0:#Só há tipo2 na fila, logo só chegadas e saídas2 podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida2 = 1/taxaSaida2
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida2)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida2:#ocorreu uma saida2
prox_evento = "saida2"
fila_tipo2-=1
elif fila_tipo1 != 0 and fila_tipo2 == 0:#Apenas chegadas e saídas1 podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida1 = 1/taxaSaida1
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida1)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida1:#ocorreu uma saida1
prox_evento = "saida1"
fila_tipo1-=1
else: #Chegadas e saídas de quaisquer tipo podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida1 = 1/taxaSaida1
tempo_saida2 = 1/taxaSaida2
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida1, tempo_saida2)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida1:#ocorreu uma saida1
prox_evento = "saida1"
fila_tipo1-=1
elif menor_tempo == tempo_saida2:#ocorreu uma saida2
prox_evento = "saida2"
fila_tipo2-=1
lista_eventos.append(prox_evento)
###2 Saídas Uniformes (Cenário 4)###
if tipoSaida1 == "uniforme" and tipoSaida2 == "uniforme":
fila_tipo1 = 0 ###OBS: inclui servidor
fila_tipo2 = 0 ###OBS: inclui servidor
for k in range(MAX_EVENTOS):
#print fila_tipo1+fila_tipo2, lista_eventos[-1]
if fila_tipo1 == 0 and fila_tipo2 == 0: #Se não há ninguém na fila, só chegadas podem ocorrer
#print "ninguem na fila no evento " + str(k)
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
if tempo_chegada1 < tempo_chegada2:
prox_evento = "chegada1"
fila_tipo1+=1
else:
prox_evento = "chegada2"
fila_tipo2+=1
else:#Se há alguém na fila
if fila_tipo1 == 0 and fila_tipo2 != 0:#Só há tipo2 na fila, logo só chegadas e saídas2 podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida2 = numpy.random.uniform(taxaSaida2[0], taxaSaida2[1])
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida2)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida2:#ocorreu uma saida2
prox_evento = "saida2"
fila_tipo2-=1
elif fila_tipo1 != 0 and fila_tipo2 == 0:#Apenas chegadas e saídas1 podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida1 = numpy.random.uniform(taxaSaida1[0], taxaSaida1[1])
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida1)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida1:#ocorreu uma saida1
prox_evento = "saida1"
fila_tipo1-=1
else: #Chegadas e saídas de quaisquer tipo podem ocorrer
tempo_chegada1 = numpy.random.exponential(1/(taxaChegada1))
tempo_chegada2 = numpy.random.exponential(1/(taxaChegada2))
tempo_saida1 = numpy.random.uniform(taxaSaida1[0], taxaSaida1[1])
tempo_saida2 = numpy.random.uniform(taxaSaida2[0], taxaSaida2[1])
menor_tempo = min(tempo_chegada1, tempo_chegada2, tempo_saida1, tempo_saida2)
if menor_tempo == tempo_chegada1:#ocorreu uma chegada1
prox_evento = "chegada1"
fila_tipo1+=1
elif menor_tempo == tempo_chegada2:#ocorreu uma chegada2
prox_evento = "chegada2"
fila_tipo2+=1
elif menor_tempo == tempo_saida1:#ocorreu uma saida1
prox_evento = "saida1"
fila_tipo1-=1
elif menor_tempo == tempo_saida2:#ocorreu uma saida2
prox_evento = "saida2"
fila_tipo2-=1
lista_eventos.append(prox_evento)
'''
for k in range(MAX_EVENTOS):
tempo_chegada = numpy.random.exponential(1/(taxaChegada1+taxaChegada2))
tempo_saida1 = numpy.random.uniform(taxaSaida1[0], taxaSaida1[1])
tempo_saida2 = numpy.random.uniform(taxaSaida2[0], taxaSaida2[1])
#print tempo_chegada, tempo_saida1, tempo_saida2
if tempo_chegada < min(tempo_saida1, tempo_saida2):
prox_evento = "chegada"
elif tempo_chegada > min(tempo_saida1, tempo_saida2):
if tempo_saida1 == min(tempo_saida1, tempo_saida2):
prox_evento = "saida1"
else:
prox_evento = "saida2"
lista_eventos.append(prox_evento)
'''
#print lista_eventos
return lista_eventos
###
| UTF-8 | Python | false | false | 13,953 | py | 4 | eventos.py | 3 | 0.50766 | 0.476228 | 0 | 261 | 52.268199 | 125 |
Slugskickass/NDR_detection | 3,453,153,711,327 | b7214508440616b7844e17f40a9adee6c0c7d345 | cae558765de2455baa45333b16227f3f34b26a72 | /Detect.py | 68a71aca01cdad826e62f1c1bf405d368671320f | []
| no_license | https://github.com/Slugskickass/NDR_detection | 8858b6bde5441de49890b3a57e99e5f594d2361c | 8ef4c38cd2d1cec73fbf394257e75a36bfdf52b9 | refs/heads/master | 2023-01-24T00:57:36.043703 | 2020-12-04T10:17:05 | 2020-12-04T10:17:05 | 318,478,582 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PIL import Image
import numpy as np
from scipy import stats
import ruptures as rpt
import utilities as util
import matplotlib.pyplot as plt
from scipy import stats
def loadtiffs(file_name):
"""
This function returns an array of images in numpy format
:param file_name:
:return: an array containing an image
"""
img = Image.open(file_name)
print('The Image is', img.size, 'Pixels.')
print('With', img.n_frames, 'frames.')
imgArray = np.zeros((img.size[1], img.size[0], img.n_frames), np.int16)
for I in range(img.n_frames):
img.seek(I)
imgArray[:, :, I] = np.asarray(img)
img.close()
return(imgArray)
def collect_non_event(data, per):
"""
:param data: The data as a 3D stack, this is the NDR data
:param per: The percentile of the data to be filteres
:return: This returns a list containg all the voxels which are in the % percentile data
basically all the lines with no event in them
"""
final_frame = data[:, :, 299] - data[:, :, 0]
perce = np.percentile(final_frame.flatten(), per)
positions = np.where(final_frame < perce)
X = positions[0]
Y = positions[1]
empty = []
for item in range(len(X)):
empty.append(data[X[item], Y[item], :])
return (empty)
def fit_line(line_data):
'''
My fitting program because I dont like elliots
:param line_data: A list of pixel values, intended to take data from the above function (collect non event
:return: The slopes of all the data
'''
line_length = len(line_data)
# Place to store the results
hold_all = np.zeros(line_length)
# build an array to supply as the X values for fitting
x = np.linspace(0, len(line_data[0])-1, len(line_data[0]))
#start_time = time.clock()
for I in range(line_length):
y = line_data[I]
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
hold_all[I] = slope
return hold_all
def change_point(signal, sigma=5):
n = len(signal)
model = 'l2'
algorithm = rpt.Pelt(model=model, min_size=10).fit((signal))
breakpoints = algorithm.predict(pen=np.log(n) * sigma ** 2)
data = np.split(signal, breakpoints)
breakpoints.insert(0, 0)
def fit(p):
y, start = p
x = util.make_lin_fit_x(start=start, length=len(y))
y = util.make_lin_fit_y(y)
return util.lin_fit(x, y)
intercepts, slopes, = zip(*map(fit, zip(data, breakpoints[:-1])))
return list(slopes), list(intercepts), breakpoints
def ck_filter(data, width):
filtered_data = []
for I in range(width, len(data)-width):
before = data[I-width:I]
after = data[I:width+I]
step = np.int(width /2)
current = data[I-step:I+step]
if (np.mean(current) - np.mean(before)) < (np.mean(current) - np.mean(after)):
filtered_data.append(np.mean(before))
else:
filtered_data.append(np.mean(after))
return filtered_data
def get_gradients(y_data, breakpoints):
slopes = []
intercepts =[]
breakpoints = np.append(breakpoints, len(y_data))
x_data = np.linspace(0, len(y_data)-1, len(y_data))
for I in range(len(breakpoints)-1):
slope, intercept, r_value, p_value, std_err = stats.linregress(x_data[breakpoints[I]:breakpoints[I+1]], y_data[breakpoints[I]:breakpoints[I+1]])
slopes.append(slope)
intercepts.append(intercept)
return slopes, intercepts
def plot_CP(data, breakpoints, slopes, intercepts):
x_data = np.linspace(0, len(data) - 1, len(data))
for I in range(len(breakpoints) - 1):
x_data_s = x_data[breakpoints[I]:breakpoints[I + 1]]
y_data = x_data_s * slopes[I] + intercepts[I]
plt.plot(x_data_s, y_data)
return 0 | UTF-8 | Python | false | false | 3,788 | py | 3 | Detect.py | 3 | 0.627244 | 0.618268 | 0 | 116 | 31.663793 | 152 |
krisekenes/semiRestfulPets | 12,154,757,458,619 | 336ef13391d034a0b5587f2e4241bae1a312b555 | 517bee7b3153e303bb275284ae018f48f5450ad0 | /apps/pets/views.py | b9b7e9634f5bdb6a55406535eb6814bc352d6e11 | []
| no_license | https://github.com/krisekenes/semiRestfulPets | 5144dda7ac4233157f29d3b1e65d76f98bead4b9 | d689972f8fc84b901ec4e3943a9839f723b4555b | refs/heads/master | 2018-12-31T14:10:14.728623 | 2016-09-23T19:04:53 | 2016-09-23T19:04:53 | 69,044,912 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from .models import Pets
def index(request):
context = {
'pets': Pets.objects.all()
}
return render(request, 'pets/index.html', context)
def new(request):
return render(request, 'pets/new.html')
def create(request):
Pets.objects.create(name=request.POST['name'], description=request.POST['description'], breed=request.POST['breed'])
return redirect('/pets')
def show(request, id):
context = {
'pet': Pets.objects.get(id=id)
}
return render(request, 'pets/show.html', context)
def edit(request, id):
context = {
'pet': Pets.objects.get(id=id)
}
return render(request, 'pets/edit.html', context)
def update(request, id):
pet = Pets.objects.get(id=id)
pet.name = request.POST['name']
pet.description = request.POST['description']
pet.breed = request.POST['breed']
pet.save()
route = '/pets/show/'+id
return redirect(route)
def destroy(request, id):
Pets.objects.get(id=id).delete()
return redirect('/pets')
| UTF-8 | Python | false | false | 1,065 | py | 4 | views.py | 2 | 0.649765 | 0.649765 | 0 | 40 | 25.625 | 120 |
kg0r0/atcoder | 14,731,737,861,791 | 7faa6975cc3dc900fcd6faae08f7830aee26412b | 42d0298db18fd6bae5628527cd1a37097d0c6106 | /abc/146/c.py | e6c078296ad7f7d5efe1ca92853fb36d22e09b87 | [
"MIT"
]
| permissive | https://github.com/kg0r0/atcoder | c242f8e8d57510a259cde816019b7f3c4f73e089 | 4deb31157235085a6512631fa8522b00df6c88e9 | refs/heads/master | 2020-06-23T16:50:57.508932 | 2020-06-22T14:48:41 | 2020-06-22T14:48:41 | 198,686,649 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a,b,x=map(int,input().split())
l=0
r=10**9+1
while r-l>1:
c=(r+l)//2
v=a*c+b*len(str(c))
if v<=x:
l=c
else:
r=c
print(l)
| UTF-8 | Python | false | false | 137 | py | 403 | c.py | 366 | 0.489051 | 0.437956 | 0 | 11 | 11.454545 | 30 |
SD2E/python-datacatalog | 19,258,633,384,449 | 8bdb0fd5a87babcf009a2b878ffbf26e332e158b | 089f1d32f83e380499c263be654595671a17fdfb | /datacatalog/formats/ginkgo/convert.py | 83cd20a60431566c79603c5d32732f96e8a64e60 | [
"Python-2.0",
"BSD-3-Clause"
]
| permissive | https://github.com/SD2E/python-datacatalog | 4930f7e1de33d035a42f319383c7ee7faee5d370 | 51ab366639505fb6e8a14cd6b446de37080cd20d | refs/heads/master | 2021-12-06T23:38:57.288649 | 2021-08-31T16:17:28 | 2021-08-31T16:17:28 | 149,484,835 | 0 | 1 | NOASSERTION | false | 2019-10-21T15:20:22 | 2018-09-19T17:06:26 | 2019-08-16T12:46:14 | 2019-10-21T15:13:40 | 70,753 | 0 | 1 | 0 | Python | false | false | import sys
from ..converter import Converter, ConversionError, ValidationError
class Ginkgo(Converter):
"""Convert Transcriptic samples.json to sample-set schema"""
VERSION = '0.0.2'
FILENAME = 'ginkgo_samples'
def convert(self, input_fp, output_fp=None, verbose=True, config={}, enforce_validation=True):
"""Do the conversion by running a method in runner.py"""
from .runner import convert_ginkgo
passed_config = config if config != {} else self.options
return convert_ginkgo(self.targetschema, self.encoding, input_fp,
verbose=verbose,
config=passed_config,
output_file=output_fp,
enforce_validation=enforce_validation)
| UTF-8 | Python | false | false | 792 | py | 840 | convert.py | 388 | 0.612374 | 0.608586 | 0 | 17 | 45.588235 | 98 |
Neptuos/TIL | 6,262,062,332,547 | 473332932e9bebc22c689387b865bd4b6a368044 | 5eed713b9c49bb7cae36b8d5d391b9a01a4446ee | /Python/periodic_table.py | 66209e97fee199a073e7c0a99ce0c164f2707d2f | []
| no_license | https://github.com/Neptuos/TIL | 8842b54ffdf2626901355555a1a671130e8a013d | d964f5a85c3840b56cd900cb8116907220de0232 | refs/heads/master | 2023-06-07T10:57:00.093072 | 2023-06-05T19:02:18 | 2023-06-05T19:02:18 | 256,842,243 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Just enter the correct name of element and know all about it
#for example if you want to know about hydrogen just enter hydrogen as input.
#this periodic data contain only data till atomic number 118
try:
import pandas as pd
a = input().capitalize()
a = " ".join(a.split())
periodicdata = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/c2dd862cd38f21b0ad36b8f96b4bf1ee/raw/1d92663004489a5b6926e944c1b3d9ec5c40900e/Periodic%2520Table%2520of%2520Elements.csv",index_col="Element")
print(periodicdata.loc[a,:])
except KeyError:
print("Ops ! No such element found. \nEnter the correct element. TRY AGAIN !!")
| UTF-8 | Python | false | false | 648 | py | 257 | periodic_table.py | 194 | 0.759259 | 0.666667 | 0 | 11 | 57.909091 | 225 |
karthikpappu/pyc_source | 3,032,246,928,260 | 382982edb3a38085142f1146fc23087c6036fafa | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/egi-0.9.0-py2.7/socket_wrapper.py | e78f5e3ebf3d26c95a37c998327bec1d8e57c8d1 | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.macosx-10.6-intel/egg/egi/socket_wrapper.py
# Compiled at: 2016-09-08 05:35:24
import socket
class Socket:
""" wrap the socket() class """
def connect(self, str_address, port_no):
""" connect to the given host at the specified port ) """
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((str_address, port_no))
self._connection = self._socket.makefile('rwb', 0)
def disconnect(self):
""" close the connection """
self._connection.close()
self._socket.close()
del self._connection
del self._socket
def write(self, data):
""" write to the socket -- the socket must be opened """
self._connection.write(data)
def read(self, size=-1):
""" read from the socket; warning -- it blocks on reading! """
if size < 0:
return self._connection.read()
else:
return self._connection.read(size) | UTF-8 | Python | false | false | 1,147 | py | 114,545 | socket_wrapper.py | 111,506 | 0.61116 | 0.568439 | 0 | 34 | 32.764706 | 77 |
anon-legion/Noobseries_utilities | 3,272,765,081,742 | 7270f579dc9b307adaaf60793377e762183e583d | 72479b0f78a923dc9a7be75891a8ff17872084ae | /wifi_stored_password/wifi_stored_password.py | f27ab00db4a652746ecbce5f3e04d45c9af80faa | []
| no_license | https://github.com/anon-legion/Noobseries_utilities | 4e163bc50e933539e3429b88a966840af07754e5 | db524eb26071dcd98b81227b737f20b98c391a73 | refs/heads/main | 2023-08-21T18:40:19.387976 | 2021-10-15T05:37:00 | 2021-10-15T05:37:00 | 329,648,678 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 14:58:23 2020
@author: =GV=
"""
from datetime import datetime as dt
import subprocess as sp
import webbrowser as wb
try:
wifis = [(ssid.split(':')[1][1:-1], password.strip().split(':')[1][1:]) for ssid in sp.check_output(['netsh','wlan', 'show', 'profiles']).decode('utf-8').split('\n') if 'All User Profile' in ssid for password in sp.check_output(['netsh', 'wlan', 'show', 'profile', ssid.split(':')[1][1:-1], 'key=clear']).decode('utf-8').split('\n') if 'Key Content' in password]
except IndexError:
ssids = [line.split(':')[1][1:-1] for line in sp.check_output(['netsh', 'wlan', 'show', 'profiles']).decode('utf-8').split('\n') if 'All User Profile' in line]
wifis = []
for ssid in ssids:
try:
pw = [line.strip().split(':')[1][1:] for line in sp.check_output(['netsh', 'wlan', 'show', 'profile', ssid, 'key=clear']).decode('utf-8').split('\n') if 'Key Content' in line]
wifis.append((ssid, pw[0]))
except IndexError:
wifis.append((ssid, ''))
date_tag = dt.strftime(dt.date(dt.now()), '%Y%m%d')
with open(f'WIFI_stored_{date_tag}.txt', 'w') as f:
f.write('disclaimer: I am not responsible for how this program is used\n')
for wifi in wifis:
f.write(f'\nSSID: \t{wifi[0]}\n')
f.write(f'PW: \t{wifi[1]}\n')
f.write("\nEPSTEIN DIDN'T KILL HIMSELF!!!")
f.write('\n\n\nthanks!! \nby: =GV=')
wb.open(f'WIFI_stored_{date_tag}.txt')
| UTF-8 | Python | false | false | 1,514 | py | 3 | wifi_stored_password.py | 1 | 0.579921 | 0.558124 | 0 | 32 | 45.3125 | 366 |
Yuvv/LeetCode | 3,186,865,772,959 | 1d366b5c98880a84da1759296c74c9ca757be9c2 | 422f49ef76dcc5a0ef5f380cc69b81c56bfc9116 | /1301-1400/1331-rank-transform-of-an-array.py | 6a2f59c223f5b229ecd1c600d68405aac2e139db | []
| no_license | https://github.com/Yuvv/LeetCode | ddac7a722a4c665a2b7f6da196fc3c16458b7d80 | c1297d9f3696ced0e8174ae675f348552fe6561a | refs/heads/master | 2023-08-19T02:58:28.471587 | 2023-08-13T15:58:05 | 2023-08-13T15:58:05 | 136,820,599 | 0 | 1 | null | false | 2020-06-26T04:25:17 | 2018-06-10T15:25:59 | 2020-06-26T04:19:22 | 2020-06-26T04:19:20 | 174 | 0 | 1 | 0 | Java | false | false | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 1331-rank-transform-of-an-array.py
# @Date : 2020/01/26
# @Author : Yuvv <yuvv_th@outlook.com>
from typing import List
class Solution:
def arrayRankTransform(self, arr: List[int]) -> List[int]:
numbers = {}
for i, it in enumerate(sorted(set(arr))):
numbers[it] = i + 1
return [numbers[it] for it in arr]
if __name__ == "__main__":
s = Solution()
# [4,1,2,3] expected
print(s.arrayRankTransform([40, 10, 20, 30]))
# [5,3,4,2,8,6,7,1,1,3] expected
print(s.arrayRankTransform([37, 12, 28, 9, 100, 56, 80, 5, 5, 12]))
| UTF-8 | Python | false | false | 642 | py | 959 | 1331-rank-transform-of-an-array.py | 951 | 0.570093 | 0.484424 | 0 | 25 | 24.68 | 71 |
MaesterPycoder/Python_Programming_Language | 2,594,160,274,492 | 5fb44591566400d0a7ab051079a5f46b469cde7c | 87667097e487b51cb1338b3694834b41f219a24b | /python_codes/pro1.py | 8603e67fe2fe5e7b4be19069466634e77fa44b55 | []
| no_license | https://github.com/MaesterPycoder/Python_Programming_Language | 2c8e50589470f04f223926339f24a00959c52882 | 122f6d1e665a7540684672a9f4fd468be9872eb5 | refs/heads/main | 2022-12-27T07:36:42.537711 | 2020-10-13T17:32:32 | 2020-10-13T17:32:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | d=[]
s=input("Enter you array:")
j=0
for i in range(len(s)):
f=0
for j in range(i+1,len(s)):
if (s[i]==s[j]):
f+=1
if (f==0):
d.append(s[i])
print(d.(","))
| UTF-8 | Python | false | false | 227 | py | 179 | pro1.py | 169 | 0.365639 | 0.343612 | 0 | 11 | 16.909091 | 31 |
sharonLuo/LeetCode_py | 8,246,337,254,217 | 35f268ffe0213c53b1ed021851f98e5189e68ef5 | 9dbc9b51736f23e72ecefead6585564b3ddfcb7e | /balanced-binary-tree.py | 5a7861f0b6e50b9a3f607ed514bae17fc42e7195 | []
| no_license | https://github.com/sharonLuo/LeetCode_py | 4784a89c037d322f3f8f3ccd87a85615168e8902 | b903cbed6026535cad62c02719c8939eaf82ac6f | refs/heads/master | 2021-01-10T04:15:17.174285 | 2016-03-14T21:50:24 | 2016-03-14T21:50:24 | 45,428,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Given a binary tree, determine if it is height-balanced.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#### simple and clever
class Solution:
# @param root, a tree node
# @return a boolean
def isBalanced(self, root):
return self.getBalanceHeight(root) != -1
# @param root, a tree node
# @return a int, if the root is balanced return height, or return -1
def getBalanceHeight(self, root):
if root is None:
return 0;
leftHeight = self.getBalanceHeight(root.left)
rightHeight = self.getBalanceHeight(root.right)
# if left (or right) child tree is not balanced, return -1 directly to stop recursion
if leftHeight < 0 or rightHeight < 0 or math.fabs(leftHeight - rightHeight) >1:
return -1
return max(leftHeight, rightHeight) + 1
################
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root == None:
return True
if abs(self.depth(root.left)-self.depth(root.right)) > 1:
return False
return self.isBalanced(root.left) and self.isBalanced(root.right)
def depth(self, root):
if root == None:
return 0
if root.left == None and root.right == None:
return 1
if root.left == None:
return 1 + self.depth(root.right)
if root.right == None:
return 1 + self.depth(root.left)
return 1 + max(self.depth(root.left), self.depth(root.right))
| UTF-8 | Python | false | false | 2,005 | py | 93 | balanced-binary-tree.py | 91 | 0.600499 | 0.592519 | 0 | 66 | 29.30303 | 157 |
voidblue/network_programming | 5,935,644,833,300 | c8394025419739bf1267d19401d3f7d3d153d954 | 124e073d15b69128dd7e56b636b07a12150e3013 | /beforemidtest/Http_method.py | ba4230b3127a46a21b8c2e851de7fbd0a5bc9018 | []
| no_license | https://github.com/voidblue/network_programming | 62a130c6fa8e892181a583af3fdfa0763498a03a | f123ece24a80a8a4c6717f5a81c273e5b60e0e36 | refs/heads/master | 2021-01-18T07:29:42.212889 | 2017-04-28T06:18:35 | 2017-04-28T06:18:35 | 84,290,184 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from urllib.request import Request, urlopen
from urllib.parse import urlencode,urljoin
data_dict = {'q':'Python'}
data = 'search?' + urlencode(data_dict) #딕셔너리를 '키=값'의 스트링 형태로 변환
print(data)
url = urljoin('http://search.daum.net/',data)
req = Request(url)
print(req.full_url) #리퀘스트 url출력
print(req.get_header('Content-Type'))
res = urlopen(req)
print(res.getheader('Content-Type'))
print(res.read()) | UTF-8 | Python | false | false | 455 | py | 29 | Http_method.py | 28 | 0.705596 | 0.705596 | 0 | 14 | 28.428571 | 66 |
qiurongsong/Tourism_NLP | 11,647,951,352,776 | d1c58ef28b34bf100359aca60e5847de455b64bc | 8fe7bd5058defeb0656a1478a6713638258b3c9d | /data_admin.py | c4069545d5c2a8986ed85fb92c58a799d9e43926 | []
| no_license | https://github.com/qiurongsong/Tourism_NLP | b435bba5f3a250f4028d146b856026ce4bd8a0ad | 193af0d4be5e81ea36b20ec1e646a9482412b452 | refs/heads/master | 2022-12-12T17:15:37.268088 | 2020-08-27T15:06:08 | 2020-08-27T15:06:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author : Gary
import pymysql
# ******************************************数据库操作***************************************************#
class Database:
# 数据库初始化,port得是数值int型,暂时先这样,(后续情况需要可以开放多库多服务器查询(允许传入参数)
def __init__(self, ):
self.host = '127.0.0.1' # 主机名,默认本地
self.port = 3306 # 端口默认3306
self.user = 'root' # 用户名,默认root
self.password = 'iloveyou0604.' # 密码
self.db = 'tourism' # 数据库名
self.charset = 'utf8mb4' # 数据库字符集,默认utf-8,7.31 xts,修改为utf8mb4,支持存储表情
# **********************************数据库连接并执行的相关函数*********************************************#2.12更新可以批量插入和查询一个
def database(self, sql, fetch='all', data_list=None):
if data_list is None:
data_list = 'one'
o_type = sql.split(' ')[0].lower()
conn = pymysql.connect(host=self.host, port=self.port, user=self.user, password=self.password, db=self.db,
charset=self.charset) # 连接数据库
cur = conn.cursor() # 用于访问和操作数据库中的数据(一个游标,像一个指针)
if o_type == 'select':
cur.execute(sql) # 执行操作
if fetch == 'all':
result = cur.fetchall() # 匹配所有满足的
else:
result = cur.fetchone() # 匹配一个满足的
cur.close() # 关闭游标
conn.close() # 关闭数据库连接
return result
elif o_type == 'insert' or o_type == 'update' or o_type == 'delete':
if data_list == 'one':
try:
cur.execute(sql)
conn.commit() # 提交事务
# print("{} ok".format(type))
except Exception as e: # 发生错误时回滚
print(e)
conn.rollback() # 回滚事务
else: # 一次插入多条数据
try:
cur.executemany(sql, data_list) # 列表里面的单个数据必须是元组
conn.commit() # 提交事务
# print("{} ok".format(type))
except Exception as e: # 发生错误时回滚
print(e)
conn.rollback() # 回滚事务
cur.close() # 关闭游标
conn.close() # 关闭数据库连接
# ****************选择部分******************************
# 选择所有的携程评论数据
def select_data(self):
sql = 'select autoId,content from ctrip_comments'
data = self.database(sql)
return data
# ****************插入部分******************************
def insert_sentiments(self, data_list):
sql = 'insert into sentiment(autoId,content,TencentSentiment,TencentConfidence) values (%s,%s,%s,%s)'
self.database(sql, data_list=data_list)
def update_sentiments(self,Sentiment,Confidence,autoId):
sql='update sentiment set BaiduSentiment="{}" , BaiduConfidence="{}" where autoId={}'.format(Sentiment,Confidence,autoId)
# print(sql)
self.database(sql)
if __name__ == '__main__':
Data_admin = Database()
output = Data_admin.select_data()
# print(output)
| UTF-8 | Python | false | false | 3,531 | py | 14 | data_admin.py | 5 | 0.480226 | 0.469924 | 0 | 75 | 39.12 | 129 |
ladnir/ocr | 13,417,477,859,255 | a216fe015805b797a198e125b67c44893534efd2 | 423ec2a2d7c993415e2da563cf7d5de057bbf62c | /examples/parallel_mergesort/parse_pthread_speedup.py | 498079e126fdbc89a7b2427ca18d6e154ce24f6f | [
"BSD-2-Clause"
]
| permissive | https://github.com/ladnir/ocr | 1752d6f36c2cae7c9cba089a55ad6e998879cf02 | a7a9ec87a64e5e7ad6125c5cdd30d26d6a25bfb0 | refs/heads/master | 2016-09-16T11:20:38.839639 | 2014-05-16T06:33:26 | 2014-05-16T06:33:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
import sys, csv, string
if len(sys.argv) != 3:
print "Usage: parse_pthread_speedup.py <input csv file> \
<output csv file>"
exit()
inFile = open(sys.argv[1], "rb") #Open the input file
inReader = csv.reader(inFile) #Grind it into CSV format
inputRow = inReader.next() #Grab the first line in the file \
#Which we'll discard, since it's just the header.
outFile = open(sys.argv[2], "wb")
outWriter = csv.writer(outFile)
outWriter.writerow(["array_size", "num_cores", "speedup"])
sequential = 0 # define this variable
for row in inReader:
if int(row[1]) == 1: #This is the single-core time for this
#size
sequential = float(row[2])
print sequential
speedup = sequential/float(row[2])
outWriter.writerow([row[0], row[1], speedup])
| UTF-8 | Python | false | false | 815 | py | 245 | parse_pthread_speedup.py | 26 | 0.656442 | 0.644172 | 0 | 29 | 27.068966 | 64 |
rwflick/djangoXNFLDemo | 14,267,881,377,983 | 4b60de257d8022fde5fa7e8038e5934d45aed488 | 20d403fc88418bc6decc75c31a86bd5f6474446c | /nfl/models.py | 5da1343273d9cd046db13f854ab5be118a58bea8 | [
"MIT"
]
| permissive | https://github.com/rwflick/djangoXNFLDemo | 314f698c3a714b4bb011717fb982d55b04e53259 | 825072b25b9b33eba9687d7ec358d59c7706a16f | refs/heads/master | 2022-12-15T01:56:54.387187 | 2020-09-13T23:37:48 | 2020-09-13T23:37:48 | 295,256,920 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class Team(models.Model):
CONFERENCES = [
('AFC', 'American Football Conference'),
('NFC', 'National Football Conference')
]
DIVISIONS = [
('N', 'North'),
('S', 'South'),
('E', 'East'),
('W', 'West')
]
location = models.CharField(max_length=50)
nickname = models.CharField(max_length=50)
conference = models.CharField(max_length=50, choices=CONFERENCES, null=True)
division = models.CharField(max_length=50, choices=DIVISIONS, null=True)
def __str__(self):
return "{} {}".format(self.location, self.nickname)
class Player(models.Model):
POSITION = [
('QB', 'Quarterback'),
('RB', 'Running Back'),
('FB', 'Fullback'),
('WR', 'Wide Receiver'),
('TE', 'Tight End'),
('C', 'Center'),
('OT', 'Offensive Tackle'),
('OG', 'Offensive Guard'),
('DE', 'Defensive End'),
('DT', 'Defensive Tackle'),
('LB', 'Line Backer'),
('DB', 'Defensive Back'),
('CB', 'Cornerback'),
('S', 'Safety'),
('K', 'Kicker'),
('P', 'Punter'),
('LS', 'Long Snapper'),
('KR', 'Kick Returner'),
('PR', 'Punt Returner')
]
first_name = models.CharField(max_length=50, null=True)
last_name = models.CharField(max_length=50, null=True)
position = models.CharField(max_length=25, null=True, choices=POSITION)
team = models.ForeignKey(Team, on_delete=models.PROTECT)
def __str__(self):
return '{}, {}'.format(self.last_name, self.first_name)
| UTF-8 | Python | false | false | 1,645 | py | 15 | models.py | 11 | 0.543465 | 0.534954 | 0 | 56 | 28.375 | 80 |
ml4ai/BioContext_annotator | 2,430,951,532,374 | 0b1fb2c8d8262d096204545d883db1a03082acb4 | f4af064c16f3defc90b2666f7e5783d5821006c7 | /server/app/exceptions.py | 53d211783ca0190a1c307ea20dc42c0225f87c28 | []
| no_license | https://github.com/ml4ai/BioContext_annotator | 1144daad8d3012658663534c9587d7a05f84dff5 | 4ddc8a87ccdd91df60e033c7e3b806e5f1f97486 | refs/heads/master | 2021-07-26T00:56:51.645698 | 2021-06-25T20:15:58 | 2021-06-25T20:15:58 | 149,360,468 | 0 | 0 | null | false | 2021-06-25T20:17:01 | 2018-09-18T22:39:04 | 2021-06-25T20:16:09 | 2021-06-25T20:15:59 | 7,729 | 0 | 0 | 0 | Python | false | false | """
Exceptions
"""
class RestartInterrupt(Exception):
"""
Server Restart
"""
def __init__(self):
self.value = "RestartInterrupt"
def __str__(self):
return repr(self.value)
class ShutdownInterrupt(Exception):
"""
Server Shutdown
"""
def __init__(self):
self.value = "ShutdownInterrupt"
def __str__(self):
return repr(self.value)
class CustomError(Exception):
"""
For sending custom messages out of server processes via exceptions
(amongst other things)
"""
def __init__(self, message, pre='', post=''):
self.pre = pre
self.message = message
self.post = post
def __str__(self):
return ''.join([self.pre, self.message, self.post])
| UTF-8 | Python | false | false | 768 | py | 394 | exceptions.py | 30 | 0.572917 | 0.572917 | 0 | 42 | 17.285714 | 70 |
stephanie-wang/ray | 8,358,006,407,021 | 1eaf2448c0a94967785d2104612410daf20ec0de | 613d9b4259e63e6b5ee4f85131c93389569e3f17 | /python/ray/autoscaler/kubernetes/node_provider.py | a65f7e82350f2417a8e23cbf9ef9e26429232fa0 | [
"Apache-2.0",
"MIT"
]
| permissive | https://github.com/stephanie-wang/ray | 097986a02eb517eeeddddc3c636eb4c380f1c714 | 62bb26d5d04343e339d014f302a9fbacbd4482d7 | refs/heads/master | 2023-08-31T08:07:38.864072 | 2020-02-06T17:49:40 | 2020-02-06T17:49:40 | 77,302,388 | 4 | 3 | Apache-2.0 | true | 2022-05-24T22:22:31 | 2016-12-24T22:37:23 | 2021-04-14T14:37:25 | 2022-05-24T22:22:30 | 234,120 | 2 | 0 | 3 | Python | false | false | import logging
from ray.autoscaler.kubernetes import core_api, log_prefix
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME
logger = logging.getLogger(__name__)
def to_label_selector(tags):
label_selector = ""
for k, v in tags.items():
if label_selector != "":
label_selector += ","
label_selector += "{}={}".format(k, v)
return label_selector
class KubernetesNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cluster_name = cluster_name
self.namespace = provider_config["namespace"]
def non_terminated_nodes(self, tag_filters):
# Match pods that are in the 'Pending' or 'Running' phase.
# Unfortunately there is no OR operator in field selectors, so we
# have to match on NOT any of the other phases.
field_selector = ",".join([
"status.phase!=Failed",
"status.phase!=Unknown",
"status.phase!=Succeeded",
"status.phase!=Terminating",
])
tag_filters[TAG_RAY_CLUSTER_NAME] = self.cluster_name
label_selector = to_label_selector(tag_filters)
pod_list = core_api().list_namespaced_pod(
self.namespace,
field_selector=field_selector,
label_selector=label_selector)
return [pod.metadata.name for pod in pod_list.items]
def is_running(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.status.phase == "Running"
def is_terminated(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.status.phase not in ["Running", "Pending"]
def node_tags(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.metadata.labels
def external_ip(self, node_id):
raise NotImplementedError("Must use internal IPs with Kubernetes.")
def internal_ip(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.status.pod_ip
def set_node_tags(self, node_id, tags):
body = {"metadata": {"labels": tags}}
core_api().patch_namespaced_pod(node_id, self.namespace, body)
def create_node(self, node_config, tags, count):
pod_spec = node_config.copy()
tags[TAG_RAY_CLUSTER_NAME] = self.cluster_name
pod_spec["metadata"]["namespace"] = self.namespace
pod_spec["metadata"]["labels"] = tags
logger.info(log_prefix + "calling create_namespaced_pod "
"(count={}).".format(count))
for _ in range(count):
core_api().create_namespaced_pod(self.namespace, pod_spec)
def terminate_node(self, node_id):
core_api().delete_namespaced_pod(node_id, self.namespace)
def terminate_nodes(self, node_ids):
for node_id in node_ids:
self.terminate_node(node_id)
| UTF-8 | Python | false | false | 3,097 | py | 677 | node_provider.py | 505 | 0.631902 | 0.631902 | 0 | 83 | 36.313253 | 76 |
madelinemarshall/MeraxesCodes | 15,857,019,289,373 | fdccc54940ae7605bdd2f76de44d70b8b6cf4b9e | f52ae11610277969b07d7010b4e82272b4d6dc23 | /Paper2Plots/BHGrowthMechanism_modelComparison_T125.py | 1321371347b1c0d6fa0d57ba2e4c57bd5fcc9652 | []
| no_license | https://github.com/madelinemarshall/MeraxesCodes | 35f03c9672858142638b511920e69c3ed538972c | 7baf2390dae9d8127460c6392c63b3c17f1b8350 | refs/heads/master | 2022-11-28T18:04:47.221839 | 2020-07-29T04:18:08 | 2020-07-29T04:18:08 | 265,444,740 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from dragons import meraxes
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import pandas as pd
#Sets plot defaults
matplotlib.rcParams['font.size'] = (9)
matplotlib.rcParams['figure.figsize'] = (7.2,2.5)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3',\
'#ff7f00','#a65628','#f781bf','#999999']*4
def load_data(filename,snapshot):
gals=meraxes.io.read_gals(data_folder+filename,\
snapshot=snapshot,\
h=cosmo['h'],quiet=True)
gals=gals[(gals["GhostFlag"]==0)]#remove ghosts
return gals
def plot_BH_frac(gals,axes,linestyle='-',lw=2.5):
MaxMass=10.5
MinMass=6
BinWidth=0.25
nBins=int(np.ceil((MaxMass-MinMass)/BinWidth))
BinStart=np.zeros(nBins)
TotMinBin=np.zeros(nBins)
TotNinBin=np.zeros(nBins)
ID_MinBin=np.zeros(nBins)
MD_MinBin=np.zeros(nBins)
Coalescence_MinBin=np.zeros(nBins)
Radio_MinBin=np.zeros(nBins)
BH=gals['BlackHoleMass']*1e10
ID_BH=gals['BlackHoleMass_ID']*1e10
MD_BH=gals['BlackHoleMass_MD']*1e10
Coalescence_BH=gals['BlackHoleMass_Coalescence']*1e10
Radio_BH=gals['BlackHoleMass_Radio']*1e10
for ii in range(0,nBins):
BinStart[ii]=MinMass+ii*BinWidth
BinEnd=MinMass+(ii+1)*BinWidth
TotMinBin[ii]=np.nansum(BH[(np.log10(BH)>=BinStart[ii])&(np.log10(BH)<BinEnd)])
TotNinBin[ii]=np.size(BH[(np.log10(BH)>=BinStart[ii])&(np.log10(BH)<BinEnd)])
ID_MinBin[ii]=np.nansum(ID_BH[(np.log10(BH)>=BinStart[ii])&(np.log10(BH)<BinEnd)])
MD_MinBin[ii]=np.nansum(MD_BH[(np.log10(BH)>=BinStart[ii])&(np.log10(BH)<BinEnd)])
Coalescence_MinBin[ii]=np.nansum(Coalescence_BH[(np.log10(BH)>=BinStart[ii])&(np.log10(BH)<BinEnd)])
Radio_MinBin[ii]=np.nansum(Radio_BH[(np.log10(BH)>=BinStart[ii])&(np.log10(BH)<BinEnd)])
ID_frac=ID_MinBin/TotMinBin
MD_frac=MD_MinBin/TotMinBin
Coalescence_frac=Coalescence_MinBin/TotMinBin
Radio_frac=Radio_MinBin/TotMinBin
Rest=1-(ID_MinBin+MD_MinBin+Coalescence_MinBin+Radio_MinBin)/TotMinBin
Rest_MinBin=TotMinBin-(ID_MinBin+MD_MinBin+Coalescence_MinBin+Radio_MinBin)
axes.plot(BinStart[TotNinBin>5]+0.5*BinWidth,np.log10(Rest[TotNinBin>5]),label='Seed Condition',linewidth=lw,linestyle=linestyle,color=colors[0])
axes.plot(BinStart[TotNinBin>5]+0.5*BinWidth,np.log10(Radio_frac[TotNinBin>5]),label='Radio-Mode',linewidth=lw,linestyle=linestyle,color=colors[1])
axes.plot(BinStart[TotNinBin>5]+0.5*BinWidth,np.log10(Coalescence_frac[TotNinBin>5]),label='BH-BH Coalescence',linewidth=lw,linestyle=linestyle,color=colors[2])
axes.plot(BinStart[TotNinBin>5]+0.5*BinWidth,np.log10(MD_frac[TotNinBin>5]),label='Merger-Driven\nQuasar-Mode',linewidth=lw,linestyle=linestyle,color=colors[3])
axes.plot(BinStart[TotNinBin>5]+0.5*BinWidth,np.log10(ID_frac[TotNinBin>5]),label='Instability-Driven\nQuasar-Mode',linewidth=lw,linestyle=linestyle,color=colors[4])
axes.set_xlabel(r'$\log(M_{\mathrm{BH}}/M_\odot)$')
axes.set_xlim(MinMass-0.15,MaxMass-0.05)
axes.set_ylim(-5.3,0.1)
if __name__=="__main__":
redshift={63:7,78:6,100:5,116:4,134:3,158:2,194:0.95,213:0.55,250:0}
cosmo = {'omega_M_0' : 0.308,
'omega_lambda_0' : 0.692, 'omega_b_0' : 0.04839912,
'omega_b_0' : 0.04839912,
'omega_n_0' : 0.0,
'N_nu' : 0,
'h' : 0.678,
'n' : 0.968,
'sigma_8' : 0.815
}
data_folder='/home/mmarshal/data_dragons/'
filenames=['paper2/output/meraxes.hdf5']+['paper2_kc_models/output_0p0'+str(f)+'/meraxes.hdf5' for f in [1,3,9]]
filenames_T125=['paper2_T125/output/meraxes.hdf5']+['paper2_kc_models_T125/output_0p0'+str(f)+'/meraxes.hdf5' for f in [1,3,9]]
kc=[0.005,0.01,0.03,0.09]
props=['BlackHoleMass','BlackHoleMass_ID','BlackHoleMass_MD','GhostFlag','BlackHoleMass_Coalescence','BlackHoleMass_Radio','BulgeStellarMass','StellarMass']
fig,axes=plt.subplots(1,5,gridspec_kw = {'wspace':0,'hspace':0.3},sharey=True,sharex=True)
#ii=-1
#snapshot=158
#for filename in filenames:
# ii+=1
# gals=load_data(filename,snapshot)
# plot_BH_frac(gals,axes[0,ii])
# axes[0,ii].set_title(r'$k_c={}$'.format(kc[ii]))
ii=-1
snapshot=250
for filename in filenames_T125:
ii+=1
gals=load_data(filename,snapshot)
plot_BH_frac(gals,axes[ii],linestyle='--',lw=1.5)
axes[0].set_ylim([-4.5,0.2])
axes[0].set_xlim([6.5,10.8])
axes[4].axis('off')
#axes[1,4].axis('off')
lgd=axes[3].legend(fontsize='small',loc=(1.01,0.5))
axes[0].set_ylabel(r'$\log\left(M_{\rm{BH,~ growth~ mode}}/M_{\rm{BH,~ total}}\right)$')
#axes[1,0].set_ylabel(r'$\log\left(M_{\rm{BH,~ growth~ mode}}/M_{\rm{BH,~ total}}\right)$')
#plt.tight_layout()
plt.subplots_adjust(bottom=0.2,left=0.08,right=0.95)
#plt.savefig('/home/mmarshal/results/plots/Paper2/BHGrowthModes.pdf', format='pdf',bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig('/home/mmarshal/results/plots/Paper2/BHGrowthModes_modelComparison_T125.pdf',format='pdf')
plt.show()
| UTF-8 | Python | false | false | 4,990 | py | 265 | BHGrowthMechanism_modelComparison_T125.py | 143 | 0.689178 | 0.625852 | 0 | 119 | 40.932773 | 167 |
kstensland/Mood_Checker | 9,491,877,751,554 | 42b428a199e41f0aa4b3e0b002b10582e3e88acb | 1cb219988e97c841b88250f86bd7686f6c9fdd80 | /makeDB.py | 5c5872a1ddb3e58bbb4393c19cda208b7c08f4a9 | []
| no_license | https://github.com/kstensland/Mood_Checker | e2f96c2f19fb603180e9b33f18e8c75069cea9e6 | 88bdcecb7019bfb71a38853aaa11100979a2a745 | refs/heads/master | 2021-01-21T22:26:54.812241 | 2014-02-24T18:29:31 | 2014-02-24T18:29:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import MySQLdb as mysql
con = mysql.connect(user="root", passwd="a1b2c3d4", host='localhost')
db = con.cursor()
def recreateDB():
db.execute('DROP DATABASE IF EXISTS mood_prover')
db.execute('CREATE DATABASE mood_prover')
db.execute('USE mood_prover')
db.execute('CREATE TABLE user('+
'id INT NOT NULL auto_increment,'+
'user_name varchar(30) NOT NULL,'+
'PRIMARY KEY(id))')
db.execute('CREATE TABLE tweet('+
'id INT NOT NULL auto_increment,'+
'twitterID varchar(24) NOT NULL,'+
'userID INT NOT NULL,'+
'text varchar(140) NOT NULL,'+
'creation_time DATETIME NOT NULL,'+
'valence FLOAT,'+
'is_music_tweet BOOLEAN,'+
'PRIMARY KEY(id),'+
'FOREIGN KEY (userID) REFERENCES user(id))')
db.execute('CREATE TABLE Song('+
'id INT NOT NULL auto_increment,'+
'echonestID varchar(24) NOT NULL,'+
'name varchar(140) NOT NULL,'+
'artist varchar(140) NOT NULL,'+
'valence FLOAT NOT NULL,'+
#'hyperlink varchar(60),'+
'PRIMARY KEY(id))')
db.execute('CREATE TABLE TweetToSong('+
'id INT NOT NULL auto_increment,'+
'songID INT NOT NULL,'+
'tweetID INT NOT NULL,'+
'PRIMARY KEY(id),'+
'FOREIGN KEY (songId) references Song(id),'+
'FOREIGN KEY (tweetID) REFERENCES tweet(id))')
print "DONE"
def main():
created = recreateDB()
"""
if input("ENTER 1 IF YOU ARE SURE YOU WANT TO RECREATE THE DATABSE: ") == 1:
print "Gonna create the database"
created = recreateDB()
else:
print "No database created"
"""
main()
| UTF-8 | Python | false | false | 1,875 | py | 223 | makeDB.py | 6 | 0.523733 | 0.511467 | 0 | 58 | 31.327586 | 80 |
dim-stef/django | 14,826,227,132,395 | ff0b127f137a943ab050c961f9f6199d88fa5fb8 | 1694c38a9666c7c3c3b8d2edba500cf30821e86a | /groups/signals.py | 22e1adcee02b6e61338ee47572ac30a83ed2862c | []
| no_license | https://github.com/dim-stef/django | 207936bd400ea64e4a4711755d58435690fe8623 | 953bb03ce84bacb6bad1c5df61977d7fcc78d111 | refs/heads/master | 2019-07-13T23:28:23.421690 | 2018-11-17T16:24:22 | 2018-11-17T16:24:22 | 124,302,240 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db.models.signals import m2m_changed
from django.core.exceptions import ValidationError
from groups.models import Group
'''def parent_changed(instance, action, **kwargs):
if action == "post_add":
if instance in instance.parents.all():
raise ValidationError({"parents": "Cannot branch group to self"})
m2m_changed.connect(parent_changed, sender=Group.parents.through, dispatch_uid="groups.signals.parent_changed")'''
| UTF-8 | Python | false | false | 456 | py | 77 | signals.py | 67 | 0.736842 | 0.732456 | 0 | 12 | 37 | 114 |
Raju379/Tweet-classifier | 16,157,666,975,561 | 973cbfbd7455cf10ec8852bcb6bd847336925d43 | f8515234cfacc7ad5667bfd67567a5c781cca84a | /app.py | b848222d010a0308932b25d9dca2b916be88e1e7 | []
| no_license | https://github.com/Raju379/Tweet-classifier | ea46a9df7476e9a2a174cc690271a5f38c95b9c3 | 64c6f17c26a804825398816f84857d752bc204f7 | refs/heads/main | 2023-04-16T05:43:59.619000 | 2021-04-27T08:58:10 | 2021-04-27T08:58:10 | 362,024,237 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import pickle
import re
import string
import nltk
#nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
def clean_tweet(tweet): # for cleaning the sentence
tweet = re.sub(r'http\S+', '', tweet) # remove http links
tweet = re.sub(r'bit.ly/\S+', '', tweet) # rempve bitly links
tweet = tweet.strip('[link]') # remove [links]
my_punctuation = '!"$%&\'()*+,-./:;<=>?[\\]\\\\\^_`{|}~•@#'
tweet = re.sub('(RT\s@[A-Za-z0-9-_]+[A-Za-z0-9-_]+)', '', tweet) # remove retweet
tweet = re.sub('(@[A-Za-z0-9-_]+[A-Za-z0-9-_]+)', '', tweet)
tweet = tweet.lower() # lower case
tweet = re.sub('['+my_punctuation + ']+', ' ', tweet) # strip punctuation
tweet = re.sub('([0-9]+)', '', tweet) # remove numbers
tweet = re.sub('amp', '', tweet) # remove amp
tweet = re.sub('\s+', ' ', tweet) #remove double spacing
return tweet
def main():
st.sidebar.title("About")
st.sidebar.info("Blue Ocean Project")
st.title("Disaster tweets classifier")
choice = st.selectbox("Classification type: ",
['Source', 'Type', 'Informativeness'])
user_text = st.text_input("Enter the tweet", max_chars=280)
#button = st.button("Predict")
if st.button("Predict"):
# removing punctuation
#user_text = re.sub('[%s]' % re.escape(string.punctuation), '', user_text)
user_text = clean_tweet(user_text)
# Customizing stop words list
stop_words = stopwords.words('english')
newStopWords = ['ur','u','nd'] # new stop word
remove_stopword = ['not','no','nor',"don","aren","couldn","didn","hadn","hasn","haven","isn","mustn","mightn","needn","shouldn",
"wasn","wouldn","won"] # stop words that we don't want
stop_words.extend(newStopWords) # add new stop word
stop_words = [OldStopWords for OldStopWords in stop_words if OldStopWords not in remove_stopword] # remove some stop words
# tokenizing
tokens = nltk.word_tokenize(user_text)
# removing stop words
stopwords_removed = [token.lower() for token in tokens if not token.lower() in set(stop_words)]
# taking root word
lemmatizer = WordNetLemmatizer()
lemmatized_output = []
for word in stopwords_removed:
lemmatized_output.append(lemmatizer.lemmatize(word))
lemmatized_output = ' '.join(lemmatized_output)
if choice == 'Source':
source_model = pickle.load(open('pickle/source.pkl', 'rb'))
out_put1 = source_model[:-1].transform([lemmatized_output])
result = source_model[-1].predict(out_put1)[0]
st.success(result)
if choice == 'Type':
source_model = pickle.load(open('pickle/type.pkl', 'rb'))
out_put1 = source_model[:-1].transform([lemmatized_output])
result = source_model[-1].predict(out_put1)[0]
st.success(result)
if choice == 'Informativeness':
source_model = pickle.load(open('pickle/Informativeness.pkl', 'rb'))
out_put1 = source_model[:-1].transform([lemmatized_output])
result = source_model[-1].predict(out_put1)[0]
st.success(result)
if __name__ =='__main__':
main()
| UTF-8 | Python | false | false | 3,480 | py | 2 | app.py | 1 | 0.57188 | 0.56383 | 0 | 83 | 39.831325 | 136 |
imanursar/sains-komputasi | 7,043,746,397,438 | 58b896dec8812bba0b7d07021c1c2547d27cf944 | 46aac4e9d35279fbddd18149df8a7d288e944b77 | /tugas Algoritma dan Rancangan Perangkat Lunak/find_root.py | f1e2e4b5541871f6a53cfefe66c506678e9bff8d | []
| no_license | https://github.com/imanursar/sains-komputasi | 73b09145f47f625e4648f4d5eb25b57ee9cd6ee3 | 2367a4fc7b5fc3afbaa0647ac2d6b1fe49cdf255 | refs/heads/master | 2020-04-03T20:01:00.563254 | 2018-10-31T11:11:00 | 2018-10-31T11:17:20 | 155,544,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
# import tooltip_oop as tt
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2TkAgg)
from matplotlib.figure import Figure
from skimage import color
from scipy.signal import convolve2d as conv2
from datetime import datetime
class OOP():
def __init__(self, win, xy1, xy2):
self.win = win
#tab ===================================================================
nb = ttk.Notebook(win, width=1350, height=700)
nb.pack()
tab = ttk.Frame(nb)
nb.add(tab, text='main program')
#add a window title ====================================================
self.win.title("Find root")
#subtab title ==========================================================
self.frame_tab1_1 = ttk.LabelFrame(tab, text="Input")
self.frame_tab1_1.grid(column=0, row=0, sticky=tk.N)
self.frame_tab1_2 = ttk.LabelFrame(tab, text="origin image")
self.frame_tab1_2.grid(column=1, row=0, sticky=tk.W)
self.frame_tab1_3 = ttk.LabelFrame(tab, text="edited image")
self.frame_tab1_3.grid(column=2, row=0, sticky=tk.W)
self.frame_tab1_4 = ttk.LabelFrame(tab, text="display origin image")
self.frame_tab1_4.grid(column=1, row=1, sticky=tk.W, columnspan=10)
self.frame_tab1_5 = ttk.LabelFrame(tab, text="display edited image")
self.frame_tab1_5.grid(column=2, row=1, sticky=tk.W, columnspan=10)
#gaussian filter
#adding block and label
self.frame_tab1_a1 = ttk.LabelFrame(self.frame_tab1_1, text="gaussian filter")
self.frame_tab1_a1.grid(column=0, row=3, sticky='WE')
self.Label_1 = ttk.Label(self.frame_tab1_a1)
self.Label_1.grid(column=0, row=2, sticky=tk.W)
#adding dropdown box
ttk.Label(self.frame_tab1_a1, text="The size gauss filter:").grid(column=0, row=0, padx=15)
# self.int_gauss = tk.IntVar()
# self.gauss = ttk.Combobox(self.frame_tab1_a1, width=12, textvariable=self.int_gauss)
self.gauss = ttk.Combobox(self.frame_tab1_a1, width=12)
self.gauss['values'] = (1,2,3,4,5,6,7,8,9,10)
self.gauss.grid(column=0, row=1)
self.gauss.current(6) #default value
#adding dropdown box
ttk.Label(self.frame_tab1_a1, text="sigma:").grid(column=1, row=0)
self.sigma = ttk.Combobox(self.frame_tab1_a1, width=12)
self.sigma['values'] = (1,2,3,4,5,6,7,8,9,10)
self.sigma.grid(column=1, row=1, padx=15)
self.sigma.current(2) #default value
# action button
self.action_1 = ttk.Button(self.frame_tab1_a1, text="gauss Filter", command=self._gauss)
self.action_1.grid(column=2,row=1)
#inside origin image ===================================================
#display image
fig_1 = plt.figure(figsize=(5,5))
plt.axis('off')
# im_1 = plt.imshow(self.image_1)
fig_1.tight_layout()
fig_1.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
self.canvas_1 = FigureCanvasTkAgg(fig_1, master=self.frame_tab1_2)
self.canvas_1.draw()
self.canvas_1._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar = NavigationToolbar2TkAgg(self.canvas_1,self.frame_tab1_4)
self.toolbar.update()
self.canvas_1._tkcanvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=4)
#inside edited image ===================================================
#display image
self.fig_2 = plt.figure(figsize=(5,5))
plt.axis('off')
# self.im_2 = plt.imshow(self.image_2)
self.fig_2.tight_layout()
self.fig_2.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
self.canvas_2 = FigureCanvasTkAgg(self.fig_2, master=self.frame_tab1_3)
self.canvas_2.draw()
self.canvas_2._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar = NavigationToolbar2TkAgg(self.canvas_2,self.frame_tab1_5)
self.toolbar.update()
self.canvas_2._tkcanvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=4)
#adding menu bar =======================================================
menu = Menu(win, tearoff=0)
win.config(menu=menu)
#add menu Item
fileMenu = Menu(menu, tearoff=0)
menu.add_cascade(label="File", menu=fileMenu)
fileMenu.add_command(label="New")
fileMenu.add_separator()
fileMenu.add_command(label='Exit', command=win.destroy)
helpMenu = Menu(menu, tearoff=0) #tearoff =0 menghilangkan dash line diawal
menu.add_cascade(label="help", menu=helpMenu)
helpMenu.add_command(label="About")
#Button callback ===========================================================
def _gauss(self):
self.image_g = color.rgb2gray(self.image_1)
self.filter_g = self._gauss_filter((int(self.gauss.get()),int(self.gauss.get())),int(self.sigma.get()))
self.image_gf = conv2(self.image_g, self.filter_g,mode='same')
self.im_2 = plt.imshow(self.image_gf)
self.canvas_2.draw()
self.Label_1.configure(foreground='blue')
self.Label_1.configure(text="gauss= " + self.gauss.get() + " | sigma= " + self.sigma.get())
print("gauss= " + self.gauss.get() + " | sigma= " + self.sigma.get())
self.scr1.insert(tk.INSERT, datetime.now().strftime('%H:%M:%S') + " >> " + "Gaussian filter with parameter: " + '\n')
self.scr1.insert(tk.INSERT, "\t " + "The size of gauss filter = " + self.gauss.get() + " | sigma = " + self.sigma.get() + '\n')
self.scr2.insert(tk.INSERT, datetime.now().strftime('%H:%M:%S') + " >> " + "Gaussian filter with parameter: " + '\n')
self.scr2.insert(tk.INSERT, "\t " + "The size of gauss filter = " + self.gauss.get() + " | sigma = " + self.sigma.get() + '\n')
def _findroot(self):
def _newton_raphson(self):
root = tk.Tk()
# root.geometry('1366x786')
# root.resizable(width=False, height=False)
# app=FullScreenApp(root)
root.iconbitmap(r'D:\software\python\conda\Anaconda3\DLLs\pyc.ico')
# root.overrideredirect(True)
# root.overrideredirect(False)
root.attributes('-fullscreen',True)
# root.state('zoomed')
# root.bind('<Escape>',lambda e: root.destroy())
main_win = OOP(root,0,1400)
root.mainloop()
| UTF-8 | Python | false | false | 6,579 | py | 35 | find_root.py | 28 | 0.593251 | 0.563155 | 0 | 163 | 39.361963 | 138 |
colinkelleher/CorkCity_ShortestPath | 2,920,577,772,044 | 3b7574b4e0451fd340e873ac1e3292ee9b2cb0b6 | 0eff8ca11f23305a06f389f9c45ee809a839fc20 | /Element.py | ad46ff38aca1e87a6332e7d028f9d3b25e9a1229 | []
| no_license | https://github.com/colinkelleher/CorkCity_ShortestPath | 7b9422e39e09165c13a41d8caf6283d8949ca924 | 723b97043d1c7cac765db8b7e45addca92aa8c6d | refs/heads/master | 2022-04-27T18:37:39.418610 | 2020-04-30T11:17:38 | 2020-04-30T11:17:38 | 215,151,552 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # *********************************************************
# CLASS ELEMENT
# COLIN KELLEHER 117303363
# ASSIGNMENT 2 - CS2516
# *********************************************************
class Element:
# *********************************************************
# __init__
# Creating an element of a key, value and index
# *********************************************************
def __init__(self, k, v, i):
self._key = k # initialising key
self._value = v # initialising value
self._index = i # initialising index
# *********************************************************
# __lt__
# Equality operation - less than
# *********************************************************
def __lt__(self, other):
return self._key < other._key
# *********************************************************
# __str__
# Return a string representation of the index
# *********************************************************
def __str__(self):
return "Value %i,Index %i,Key %i" % (self._value, self._index, self._key)
# *********************************************************
# GETTERS & SETTERS FOR KEY, VALUE & INDEX
# *********************************************************
# GETTER & SETTER FOR KEY
def getKey(self):
return self._key
def setKey(self, newkey):
self._key = newkey
# GETTER & SETTER FOR VALUE
def getValue(self):
return self._value
def setValue(self, newvalue):
self._value = newvalue
# GETTER & SETTER FOR INDEX
def getIndex(self):
return self._index
def setIndex(self, newindex):
self._index = newindex
# *********************************************************
# _WIPE
# Reset the key, value, and index
# *********************************************************
def _wipe(self):
self._key = None
self._value = None
self._index = None
| UTF-8 | Python | false | false | 1,926 | py | 7 | Element.py | 6 | 0.371236 | 0.363967 | 0 | 65 | 28.630769 | 81 |
yw778/COMS4705_nlp | 8,254,927,178,889 | 2ea450c950f88c70db352ae1625471b71d4f2338 | 197d1e555430b8524b2f8ef62539a65005d53f44 | /hw1_tagging/4_2.py | c037d1ff67db4e756d039dfdbf5085726996426d | []
| no_license | https://github.com/yw778/COMS4705_nlp | b721aa5c20d7620d826cd39887b6f16699f51efb | b66dedf26af7e746454e6becfa01a314c5bf664e | refs/heads/master | 2021-03-27T19:57:36.796081 | 2018-05-26T22:21:16 | 2018-05-26T22:21:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/python
import os
from collections import defaultdict
from math import log
def get_emission_prob(input_file = "ner_rare.counts"):
lines = []
count = defaultdict(int)
with open(input_file,'r') as input:
for line in input.readlines():
if line.strip():
lines.append(line.split())
for line in lines:
if line[1] == "WORDTAG":
tag = line[2]
count[tag] += float(line[0])
emission = defaultdict(lambda:defaultdict(lambda: float("-inf")))
for line in lines:
if line[1] == "WORDTAG":
word = " ".join(line[3:])
tag = line[2]
emission[word][tag] = log(float(line[0])) - log(count[tag])
return emission
def naive_tagger(output_file = "4_2.txt"):
emission = get_emission_prob()
tag_dict = defaultdict(tuple)
for word in emission:
max_prob = -float("inf")
max_tag = ""
for tag in emission[word]:
if emission[word][tag] > max_prob:
max_prob = emission[word][tag]
max_tag = tag
tag_dict[word] = (max_tag, max_prob)
cache = []
rare = '_RARE_'
with open("ner_dev.dat", 'r') as input:
for line in input.readlines():
word = line.strip()
if not word:
cache.append('')
elif word in tag_dict:
cache.append('{} {} {}'.format(word, tag_dict[word][0], tag_dict[word][1]))
else:
cache.append('{} {} {}'.format(word, tag_dict[rare][0], tag_dict[rare][1]))
with open(output_file,'w') as output:
output.write("\n".join(cache))
output.write("\n")
if __name__ == "__main__":
os.system("python count_freqs.py ner_train_rare.dat > ner_rare.counts")
naive_tagger()
| UTF-8 | Python | false | false | 1,822 | py | 9 | 4_2.py | 7 | 0.532382 | 0.525247 | 0 | 60 | 29.366667 | 91 |
yamaton/codeforces | 5,815,385,733,462 | aeb8baa2bb340d41bdc8964cf523d4239b16fa0a | bb151500b0fc5bb9ef1b1a9e5bba98e485b4b34d | /problemSet/600C_Make_Palindrome.py | 855783e09dc68c5e6d5a334621dd66747e864556 | []
| no_license | https://github.com/yamaton/codeforces | 47b98b23da0a3a8237d9021b0122eaa498d98628 | e0675fd010df852c94eadffdf8b801eeea7ad81b | refs/heads/master | 2021-01-10T01:22:02.338425 | 2018-11-28T02:45:04 | 2018-11-28T03:21:45 | 45,873,825 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Codeforces Educational Round 2
Problem 600 C. Make Palindrome
@author yamaton
@date 2015-11-30
"""
import collections
def solve(s: str) -> [str]:
n = len(s)
chars = collections.Counter(s)
base = [c for c in chars if chars[c] >= 2 for _ in range(chars[c] // 2)]
rest = sorted(c for c in chars if chars[c] % 2 == 1)
keep = len(rest) // 2
selected = sorted(base + rest[:keep])
middle = rest[keep]
middle_elem = [] if n % 2 == 0 else [middle]
return selected + middle_elem + selected[::-1]
def main():
s = input().strip()
result = solve(s)
print(''.join(result))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 658 | py | 222 | 600C_Make_Palindrome.py | 168 | 0.579027 | 0.548632 | 0 | 32 | 19.5625 | 76 |
hxca/58CrackVerification | 19,018,115,233,989 | d44bfadce26eeb325b7169afe381a934028ff71a | 8c42b99d2dc7017e1c1e492749627afb13cc4511 | /crack_verification_58.py | 3e34940e06356989cb6913e37b46f32e5068d408 | []
| no_license | https://github.com/hxca/58CrackVerification | d1e1a17691823a557cd4cef308f15db2a3ec784b | ee900f30f9614d1b4bfe5899e04cf1cd8fa99f64 | refs/heads/master | 2020-12-05T19:36:23.583442 | 2020-05-01T05:54:40 | 2020-05-01T05:54:40 | 232,226,626 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
import json
import math
import random
import re
import time
from binascii import b2a_hex, a2b_hex
import cv2
import numpy as np
import requests
from Crypto.Cipher import AES
class CrackVerification(object):
def __init__(self):
self.__bg_img = 'bg_img.jpg'
self.__puzzle_img = 'puzzle_img.jpg'
self.__serial_id = None
self.__code = None
self.__sign = None
self.__namespace = None
self.__url_parm = None
self.__referer_url = None
self.__session_id = None
self.__token = None
self.__response_id = None
self.__puzzle_img_url = None
self.__bg_img_url = None
def __request_from_server(self, url, data=None, timeout=30, method=0):
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate, br',
'cookie': '58home=gz; f=n; id58=e87rZl4OEAGsjqmoBoEIAg==; '
'commontopbar_new_city_info=3%7C%E5%B9%BF%E5%B7%9E%7Cgz; city=gz; '
'commontopbar_ipcity=gz%7C%E5%B9%BF%E5%B7%9E%7C0; '
'58tj_uuid=f1ea0783-9973-4fd1-8e59-9fc0ceaa0e94; new_session=1; new_uv=1; utm_source=; spm=; '
'init_refer=; als=0; xxzl_cid=1751ee096fa144e38be6491509a7980b; '
'xzuid=16febb1a-018b-4959-ba5e-426afc75a3c1; wmda_uuid=81c7378e93d18d368b0b54f811479606; '
'wmda_new_uuid=1; wmda_session_id_11187958619315=1577979915690-d730d94a-7fea-5205; '
'wmda_visited_projects=%3B11187958619315',
'referer': self.__referer_url,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/78.0.3904.97 Safari/537.36'
}
if method == 0:
r = requests.get(url, headers=headers,
params=data, timeout=timeout)
elif method == 1:
r = requests.post(url, headers=headers, data=data, timeout=timeout)
else:
r = None
return r
def __aes_encrypt(self, key, text):
key = bytes(key, encoding="utf8")
text = bytes(text, encoding="utf8")
length = 16 - (len(text) % 16)
text += bytes([length]) * length
crypto = AES.new(key, AES.MODE_CBC, key)
data = b2a_hex(crypto.encrypt(text))
return str(data, encoding='utf8')
def __aes_decrypt(self, key, data):
key = bytes(key, encoding="utf8")
crypto = AES.new(key, AES.MODE_CBC, key)
text = crypto.decrypt(a2b_hex(data))
print(text)
return str(text, encoding='utf8')
# def __get_track(self, y):
#
# i = 8
# avg = (y / 30) + 1
# x_list = [i]
# while True:
# i += avg
# x_list.append(i)
# if i >= y:
# break
# x_list.append(y + 6)
# x_list.append(y + 7)
# x_list.append(y + 8)
# x_list.append(y + 8)
#
# time_list = [
# 1, 304, 329, 346, 361, 378, 487, 496, 512, 529, 547, 562, 579, 712, 729, 746, 763, 778, 886, 895, 913, 929,
# 946,
# 1039, 1471, 1479, 1497, 1513, 2071, 2195, 2250, 2283, 3215, 3520, 3803, 4135, 4515, 4896, 5263, 5322,
# 5499, 5597
# ]
#
# y_list = [19] * 17 + [21] * 25
#
# track = ""
# for i in range(len(x_list)):
# track += str(x_list[i]) + ',' + str(y_list[i]) + "," + str(time_list[i]) + "|"
# return track
def __generate_slide_trace(self, distance):
"""
生成滑块验证码轨迹
:param distance: 缺口距离
:return:
"""
start_x = random.randint(10, 40)
start_y = random.randint(10, 20)
back = random.randint(2, 6)
distance += back
# 初速度
v = 0
# 位移/轨迹列表,列表内的一个元素代表0.02s的位移
tracks_list = []
# 当前的位移
current = 0
while current < distance:
# 加速度越小,单位时间的位移越小,模拟的轨迹就越多越详细
a = random.randint(10000, 12000) # 加速运动
# 初速度
v0 = v
t = random.randint(9, 18)
s = v0 * t / 1000 + 0.5 * a * ((t / 1000) ** 2)
# 当前的位置
current += s
# 速度已经达到v,该速度作为下次的初速度
v = v0 + a * t / 1000
# 添加到轨迹列表
if current < distance:
tracks_list.append(round(current))
# 减速慢慢滑
if round(current) < distance:
for i in range(round(current) + 1, distance + 1):
tracks_list.append(i)
else:
for i in range(tracks_list[-1] + 1, distance + 1):
tracks_list.append(i)
# 回退
for _ in range(back):
current -= 1
tracks_list.append(round(current))
tracks_list.append(round(current) - 1)
if tracks_list[-1] != distance - back:
tracks_list.append(distance - back)
# 生成时间戳列表
timestamp_list = []
timestamp = random.randint(20, 60)
for i in range(len(tracks_list)):
if i >= len(tracks_list) - 6:
t = random.randint(80, 180)
else:
t = random.randint(11, 18)
timestamp += t
timestamp_list.append(timestamp)
i += 1
y_list = []
zy = 0
for j in range(len(tracks_list)):
y = random.choice(
[0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0,
-1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, -1, 0, 0])
zy += y
y_list.append(zy)
j += 1
trace = [{'p': f'{start_x},{start_y}', 't': random.choice([0, 1])}]
for index, x in enumerate(tracks_list):
trace.append({
'p': ','.join([str(x + start_x), str(y_list[index] + start_y)]),
't': timestamp_list[index]
})
trace.append({
'p': f'{tracks_list[-1] + start_x},{y_list[-1] + start_y}',
't': timestamp_list[-1] + random.randint(100, 300)
})
return trace
def __get_track(self, distance):
"""
处理轨迹
:param distance: 轨迹
:return:
"""
trace = self.__generate_slide_trace(distance)
def merge(s): return ','.join([str(s['p']), str(s['t'])])
new_trace = '|'.join([merge(i) for i in trace]) + '|'
return new_trace
def __get_distance(self):
content = self.__request_from_server(url=self.__bg_img_url).content
with open(self.__bg_img, 'wb') as f:
f.write(content)
content = self.__request_from_server(url=self.__puzzle_img_url).content
with open(self.__puzzle_img, 'wb') as f:
f.write(content)
target = cv2.imread(self.__bg_img, 1)
template = cv2.imread(self.__puzzle_img, 0)
target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
target = abs(255 - target)
result = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
x, y = np.unravel_index(result.argmax(), result.shape)
distance = math.floor(y / 1.6 + 0.5)
return distance
def __get_parm_from_url(self, url):
result = re.findall(r'serialId=(.*?)&', url)
serial_id = result[0]
self.__serial_id = serial_id
result = re.findall(r'code=(.*?)&', url)
code = result[0]
self.__code = code
result = re.findall(r'sign=(.*?)&', url)
sign = result[0]
self.__sign = sign
result = re.findall(r'namespace=(.*?)&', url)
namespace = result[0]
self.__namespace = namespace
result = re.findall(r'url=(.*?)$', url)
url_parm = result[0]
self.__url_parm = url_parm
self.__referer_url = url
def __get_session_id(self):
# get session id
url = 'https://callback.58.com/antibot/codev2/getsession.do?{}'.format(
int(time.time() * 1000))
data = {
"serialId": self.__serial_id,
"code": self.__code,
"sign": self.__sign,
"url": self.__url_parm
}
json_obj = self.__request_from_server(
url=url, data=data, method=1).json()
if json_obj['code'] != 1:
self.__session_id = json_obj['data']['sessionId']
else:
print(json_obj)
def __get_token(self):
url = 'https://cdata.58.com/fpToken'
r = self.__request_from_server(url)
result = re.findall(r'null\((.*?)\)', r.text)
json_obj = json.loads(result[0])
self.__token = json_obj['token']
def __get_response_id_img_url(self):
url = 'https://verifycode.58.com/captcha/getV3'
parm = {
"showType": "win",
"sessionId": self.__session_id,
"_": int(time.time() * 1000)
}
json_obj = self.__request_from_server(url=url, data=parm).json()
self.__response_id = json_obj['data']['responseId']
self.__puzzle_img_url = "https://verifycode.58.com{}".format(
json_obj['data']['puzzleImgUrl'])
self.__bg_img_url = "https://verifycode.58.com{}".format(
json_obj['data']['bgImgUrl'])
def __verify_code(self, distance, track):
key = self.__response_id[0:16]
text = '{{"x":"{}","track":"{}","p":"0,0","finger":"{}"}}'.format(
distance, track, self.__token)
data = self.__aes_encrypt(key, text)
# verify
url = 'https://verifycode.58.com/captcha/checkV3'
parm = {
"responseId": self.__response_id,
"sessionId": self.__session_id,
"data": data,
"_": str(int(time.time() * 1000))
}
json_obj = self.__request_from_server(url=url, data=parm).json()
if json_obj['message'] == "校验成功":
source_img = "https://verifycode.58.com{}".format(
json_obj['data']['sourceimg'])
success_token = json_obj['data']['successToken']
return source_img, success_token
else:
return []
def __check_code(self, source_img_success_token):
url = 'https://callback.58.com/antibot/checkcode.do'
data = {
"namespace": self.__namespace,
"sessionId": self.__session_id,
"url": self.__url_parm,
"successToken": source_img_success_token[1],
"serialId": self.__serial_id
}
r = self.__request_from_server(url=url, data=data, method=1)
print(r.text)
r = self.__request_from_server(url=source_img_success_token[0])
print(r.cookies)
def verify(self, url):
# get parm
self.__get_parm_from_url(url)
# get session id
self.__get_session_id()
if self.__session_id is None:
print("校检已通过")
return True
# get token
self.__get_token()
# get image url, response id
self.__get_response_id_img_url()
# get distance and track
distance = self.__get_distance()
track = self.__get_track(distance)
# verify code
source_img_success_token = self.__verify_code(distance, track)
# check code
if source_img_success_token:
self.__check_code(source_img_success_token)
print("校检成功")
return True
else:
print("校检失败")
return False
def main():
# url = 'https://callback.58.com/antibot/verifycode?serialId' \
# '=b258b952f085ac8eee4e07837a86dc2c_61f4852a64894c9c914c290e9db30edc&code=22&sign' \
# '=39ef095fa5175996adad8c0eb1636f1c&namespace=chuzulistphp&url=gz.58.com%2Fchuzu%2Fpn78%2F%3FPGTID' \
# '%3D0d3090a7-0000-36ce-c765-5f1d3e3dd81b%26ClickID%3D2 '
url = 'https://callback.58.com/antibot/verifycode?serialId' \
'=62cbf64a21ab4d309e722680c623a4e4_31ec33353c0648b7a5c97b1f2fb35976&code=22&sign' \
'=e6760461fc971994a2f3809ff536fbe9&namespace=anjuke_zufang_pc&url=.zu.anjuke.com%2Ffangyuan' \
'%2F1206610285371393%3Fibsauction%3D1%26shangquan_id%3D1846 '
crack_verification = CrackVerification()
crack_verification.verify(url)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 12,868 | py | 3 | crack_verification_58.py | 1 | 0.514538 | 0.453607 | 0 | 373 | 32.747989 | 121 |
jae-yong-2/IP_HW | 14,809,047,273,389 | 0b470a9d5386c986e0c25e143a2571909a28b3d1 | 8145dbfe0771b2ce5e3bf32f4d7e3332c6a99697 | /my_JPEG_DCT.py | 318a0dd5caae665c4bbfe9a4e78ee042fc71ce34 | []
| no_license | https://github.com/jae-yong-2/IP_HW | f703a79cf48c2e31d2c08a2b1040d95cecdc8c4e | f9d929b584ece08010f0ffa976c1279bb7a9c67c | refs/heads/master | 2022-11-09T12:16:03.276159 | 2020-06-30T10:13:16 | 2020-06-30T10:13:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
def my_normalize(src):
dst = src.copy()
if np.min(dst) != np.max(dst):
dst = dst - np.min(dst)
dst = dst / np.max(dst) * 255
return dst.astype(np.uint8)
def my_DCT(src, n=8):
###############################
# TODO #
# my_DCT 완성 #
# src : input image #
# n : block size #
###############################
(h, w) = src.shape
dct_img = (src.copy()).astype(np.float)
dst = np.zeros((h, w))
mask = np.zeros((n, n), dtype=np.float)
C_left = 0
C_right = 0
for row_num in range(h // n):
for col_num in range(w // n):
for block_row in range(n):
for block_col in range(n):
if row_num == 0:
C_left = np.sqrt(1 / n)
elif row_num != 0:
C_left = np.sqrt(2 / n)
if col_num == 0:
C_right = np.sqrt(1 / n)
elif col_num != 0:
C_right = np.sqrt(2 / n)
for row in range(n):
for col in range(n):
mask[row][col] = np.cos((2 * row + 1) * np.pi * block_row / (2 * n)) * np.cos(
(2 * col + 1) * np.pi * block_col / (2 * n))
dst[row_num*n + block_col][col_num*n + block_row] = C_left*C_right*np.sum(
dct_img[row_num*n: (row_num+1)*n, col_num*n: (col_num+1)*n] * mask)
return my_normalize(dst)
if __name__ == '__main__':
src = cv2.imread('Lena.png', cv2.IMREAD_GRAYSCALE)
dst = my_DCT(src, 8)
dst = my_normalize(dst)
cv2.imshow('my DCT', dst)
cv2.waitKey()
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 1,886 | py | 9 | my_JPEG_DCT.py | 9 | 0.39373 | 0.37779 | 0 | 60 | 29.366667 | 106 |
avcjeewantha/Online-Karate-Event-Management-System | 2,310,692,422,231 | 06aa1ceddad1bd5e788516530e3decad84eae724 | 0e6acfaa78a66cd77b5cee1d37d2d4aa633203ae | /mysite/EventManagementSystem/models.py | 1db6aa1b62046141d5cbd9e99a15dbbbe2ad281f | []
| no_license | https://github.com/avcjeewantha/Online-Karate-Event-Management-System | 8f46e54c833979d6f9ab8f973d1246f4881f5bf4 | 39b7a20feeede9b221fd3b82b1399a902010bc2b | refs/heads/master | 2020-03-28T06:19:26.996262 | 2018-12-11T17:27:48 | 2018-12-11T17:27:48 | 147,827,922 | 1 | 0 | null | true | 2018-09-07T18:02:56 | 2018-09-07T13:35:26 | 2018-09-07T14:59:48 | 2018-09-07T18:02:56 | 498 | 0 | 0 | 0 | HTML | false | null | from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser
from django.core.validators import MinValueValidator
from django.db import models
class User(AbstractUser):
# New field needs to be added to the User table.
# This gives a select box to the user to choose from. First parameter is the one stored in the database.
USER_TYPE_CHOICES = (
('SL', 'SLKF'),
('AS', 'association'),
('DI', 'district'),
('PR', 'province'),
('AD', 'admin')
)
userType = models.CharField(max_length=2, choices=USER_TYPE_CHOICES, default='AD')
def __unicode__(self):
return self.username
class Slkf(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
# memberName = models.CharField(max_length=100)
# position in the SLKF
position = models.CharField(max_length=50, blank=False, verbose_name="Position at SLKF")
telephone = models.CharField(max_length=12, blank=False)
def __unicode__(self):
return self.user.username
# user object will return according to its __unicode__ method (username)
class Association(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
# Association does not need a unique ID because it can have username from User model.
# associationID = models.CharField(max_length=100, blank=False, unique=True)
associationName = models.CharField(max_length=100, blank=False, verbose_name="Association Name")
address = models.CharField(max_length=1000, blank=False)
telephone = models.CharField(max_length=12, blank=False)
chiefInstructorName = models.CharField(max_length=100, blank=False, verbose_name="Chief Instructor Name")
def __unicode__(self):
return self.user.username
class Province(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
# provinceName = models.CharField(max_length=50, blank=False)
# Province Name is not needed - this is already in User table as the username.
provinceSecretaryName = models.CharField(max_length=100, blank=False, verbose_name="Province Secretary Name")
telephone = models.CharField(max_length=12, blank=False)
def __unicode__(self):
return self.user.username
class District(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
province = models.ForeignKey(Province, on_delete=models.CASCADE)
# districtName = models.CharField(max_length=50, blank=False)
# District Name is not needed - this is already in User table as the username.
districtSecretaryName = models.CharField(max_length=100, blank=False, verbose_name="District Secretary Name")
telephone = models.CharField(max_length=12, blank=False)
def __unicode__(self):
return self.user.username
class Event(models.Model):
BOOL_CHOICES = ((True, 'Kumite'), (False, 'Kata'))
eventID = models.SmallIntegerField(primary_key=True, verbose_name="Event Number", validators=[MinValueValidator(1)])
eventName = models.CharField(max_length=100, blank=False, verbose_name="Event Name")
kumite = models.BooleanField(choices=BOOL_CHOICES, default=True, verbose_name="Event Category")
def __unicode__(self):
# return self.eventID
# Cannot return an int. It must be a String or buffer - to show in html.
return str(self.eventID)
class Player(models.Model):
# Auto generated ID is the primary key.
playerName = models.CharField(max_length=100, blank=False, verbose_name="Player Name")
telephone = models.CharField(max_length=12, blank=False, verbose_name="Telephone")
association = models.ForeignKey(Association, on_delete=models.CASCADE, verbose_name="Association Name")
district = models.ForeignKey(District, on_delete=models.CASCADE)
# players should be submitted to the system for each event he participates.
event = models.ForeignKey(Event, on_delete=models.CASCADE)
def __unicode__(self):
return unicode(self.id)
class Coach(models.Model):
coachID = models.CharField(primary_key=True, max_length=10, blank=False, verbose_name="Registration Number")
coachName = models.CharField(max_length=100, blank=False, verbose_name="Name of the Coach")
association = models.ForeignKey(Association, on_delete=models.CASCADE, verbose_name="Association Name")
telephone = models.CharField(max_length=12, blank=False)
def __unicode__(self):
return unicode(self.coachID)
# State table for Open/close registration
class State(models.Model):
stateID = models.SmallIntegerField(primary_key=True)
isOpen = models.BooleanField(default=True, help_text="Designates whether registrations is open",
verbose_name='active')
def __unicode__(self):
return bool(self.isOpen)
| UTF-8 | Python | false | false | 4,940 | py | 56 | models.py | 28 | 0.709109 | 0.698381 | 0 | 124 | 38.83871 | 120 |
Snufkin0866/pyliquidpnl | 15,917,148,827,026 | 379190f9053fea905d39ef3a57f56102d6e63c56 | d83f45ecff6b408dc23b46d5f4b2092dfed37958 | /calc_INITIAL_CAPITAL.py | 5f28800a4b676bb0585e826f0a84544367fc04be | [
"MIT"
]
| permissive | https://github.com/Snufkin0866/pyliquidpnl | 40826c2944fb3c2c519f5f19445dc8c3ca73cdde | 0350cf1063acff4c6b6dedcbc51df7c6de9a4e46 | refs/heads/master | 2020-05-09T22:38:34.647089 | 2019-04-28T04:50:50 | 2019-04-28T04:50:50 | 181,477,781 | 0 | 9 | MIT | false | 2019-04-26T01:23:55 | 2019-04-15T12:01:48 | 2019-04-26T01:23:23 | 2019-04-26T01:23:55 | 34 | 0 | 4 | 0 | Python | false | false | # -*- coding: utf-8 -*-
import pyliquid
import config
key = config.KEY
secret = config.SECRET
api = pyliquid.API(key, secret)
FUNDING_CURRENCIES = config.FUNDING_CURRENCIES
def get_rates():
product_id = {'BTC':5, 'ETH':29, 'XRP':83, 'QASH':50, 'BCH':41}
rate = {}
rate['JPY'] = 1
rate['USD'] = 112
for f in FUNDING_CURRENCIES:
if FUNDING_CURRENCIES[f]:
if f != 'JPY' and f != 'USD':
rate[f] = float(api.get_a_product(id=product_id[f])['last_traded_price'])
return rate
def calc_INITIAL_CAPITAL():
INITIAL_CAPITAL = 0
rate = get_rates()
balance = api.get_all_acountbalance()
for b in balance:
if b['currency'] in rate.keys():
INITIAL_CAPITAL += float(b['balance']) * rate[b['currency']]
print(f'INITIAL_CAPITAL : {int(INITIAL_CAPITAL)}')
if __name__ == '__main__':
calc_INITIAL_CAPITAL()
| UTF-8 | Python | false | false | 942 | py | 8 | calc_INITIAL_CAPITAL.py | 4 | 0.561571 | 0.545648 | 0 | 33 | 26.424242 | 93 |
btravers/ABR | 12,601,434,053,742 | 3d8165bd98e8db555257dee32ef0418f262bab2c | c879d57414cfb2f68490b33b5f8a90357bc7bcd3 | /abr/document/urls.py | 3c8cbee26a147ced9d2a404daba1990a2df9478e | []
| no_license | https://github.com/btravers/ABR | c1bf683c8b35f4fee5fc96f920d818d9df214126 | 3555bc2c4a96e09bd85b61d6b2c29cc8edae5723 | refs/heads/master | 2016-08-07T04:28:50.666336 | 2015-08-20T20:04:17 | 2015-08-20T20:04:17 | 35,184,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from . import views
from .views import DocumentCreateView, DocumentDeleteView, DocumentListView
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^new/$', DocumentCreateView.as_view(), name='upload-new'),
url(r'^delete/(?P<pk>\d+)$', DocumentDeleteView.as_view(), name='upload-delete'),
url(r'^view/$', DocumentListView.as_view(), name='upload-view'),
] | UTF-8 | Python | false | false | 415 | py | 25 | urls.py | 11 | 0.686747 | 0.686747 | 0 | 11 | 36.818182 | 85 |
matematik1903/Django_Framework_-_Business_Employer_DB | 1,460,288,896,927 | d0b9241554c0b3700010faace2a8a5f231ccd737 | 036138a37565306f6074643a56eb4c26b275a02d | /landing/migrations/0003_auto_20171105_2035.py | b9ea5f5f0984d926825c31cec16687932b9af0f9 | []
| no_license | https://github.com/matematik1903/Django_Framework_-_Business_Employer_DB | 9df078f20d1fd3fadd795978c2cdb17124d66934 | 25c07af2cbbf9a8f137a94e71f33d065e86fb3d0 | refs/heads/master | 2021-05-05T13:58:18.422758 | 2018-01-22T06:19:29 | 2018-01-22T06:19:29 | 118,414,120 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-05 18:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('landing', '0002_auto_20171105_2026'),
]
operations = [
migrations.RenameModel(
old_name='user_job',
new_name='users_job',
),
]
| UTF-8 | Python | false | false | 400 | py | 35 | 0003_auto_20171105_2035.py | 20 | 0.59 | 0.5075 | 0 | 19 | 20.052632 | 48 |
yaswanth1199/zero.c | 12,738,873,013,388 | 7f81cc244972a59c587b882d5ac8c6953669d7c3 | c6537d77b62e7c7762b1460a12421e8bbcd221e6 | /sdkfs.py | 2f79790fe6a46257dc0dda5034ffa6ba8149c3f5 | []
| no_license | https://github.com/yaswanth1199/zero.c | 9b9e3913b177009c6adf4fc8c97db907cf40f095 | 44c56800ed6cbd47ab9a0d23f080ff750a4446e1 | refs/heads/master | 2020-06-05T17:00:00.397858 | 2019-07-16T09:25:41 | 2019-07-16T09:25:41 | 192,490,403 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | a=int(input())
x= range(1,11)
if a in x:
print('yes')
else:
print('no')
| UTF-8 | Python | false | false | 81 | py | 38 | sdkfs.py | 7 | 0.530864 | 0.493827 | 0 | 6 | 12.5 | 16 |
SamuelDGeorge/JupyterCloudInstructorNotebook | 11,914,239,293,906 | d835ff19ac072ce3c86fe29e325bb46c2955a423 | 1fb18ae0d1270652096e760f7f85ddce5d562af9 | /DataTools/__init__.py | 39cd9cc2c38a9a8534a0da4833853ba2ad2a9117 | []
| no_license | https://github.com/SamuelDGeorge/JupyterCloudInstructorNotebook | 3876202be93d0e6d1a6eb03caa4a7851814e1beb | a27e3c613db99d1dad04531ed1c60f72eb8792ca | refs/heads/main | 2023-03-24T02:02:47.519554 | 2021-03-14T20:30:15 | 2021-03-14T20:30:15 | 347,694,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .DataProcess import DataProcess | UTF-8 | Python | false | false | 36 | py | 4 | __init__.py | 3 | 0.888889 | 0.888889 | 0 | 1 | 36 | 36 |
tainona/lesson01 | 16,363,825,411,180 | 36d60ba455027fdeb488fe9f26ad090372bbe46d | 0cc5f9a36a901d463f59af8fddc8890cfa13e7fa | /4-4/main.py | fada0ff15a3dbdecd79205e968e1be8928876ab5 | []
| no_license | https://github.com/tainona/lesson01 | 07e91e64bbd678b51d6b9c8ce6066c2c5c82ef81 | 47852b2a1a3b2dbed76771df4a5130485d984c36 | refs/heads/main | 2023-04-16T02:56:57.515577 | 2021-04-30T15:18:26 | 2021-04-30T15:18:26 | 360,157,124 | 0 | 0 | null | true | 2021-04-30T15:18:26 | 2021-04-21T12:13:06 | 2021-04-30T15:14:06 | 2021-04-30T15:18:26 | 22 | 0 | 0 | 0 | Python | false | false | import random
a = []
for i in range(10):
a.append(random.randint(0,9))
print(*a)
| UTF-8 | Python | false | false | 85 | py | 22 | main.py | 21 | 0.635294 | 0.588235 | 0 | 5 | 16 | 33 |
GuiSeSanz/myScripts | 8,409,546,002,694 | ae4d500fff5895074bc98ad7497c12bbf68c52a4 | 9fd31e1fcb941124840a80ce3a9d9440afad8b5a | /Rename_prot.py | 9bd31f36112ba9c57a812fae8a225971ce029f9c | [
"Apache-2.0"
]
| permissive | https://github.com/GuiSeSanz/myScripts | d4011c67b50f07b5fcdf613cd5ae13ec0a20ea75 | 21ae4940fceeaba1549ab6c9cd5f2e2bee22b970 | refs/heads/master | 2021-01-19T05:07:45.726085 | 2018-06-04T07:16:04 | 2018-06-04T07:16:04 | 64,406,323 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 18 10:39:09 2016
@author: guillermo
"""
import sys
def LeerProt(archivoprot):
infile = open(archivoprot, "r")
lista = []
sequence = False
for line in infile:
if (line.startswith("# protein sequence of predicted genes")):
sequence = True
continue
if (sequence):
line=line.replace('\t', ' ' )
lista.extend(line[:-1].split())
infile.close()
return lista
def ReescribirProt(lista, cabecera):
ID = 0
for i in xrange(len(lista)):
if '>' in lista[i]:
ID += 1
lista[i] = '>' + str(cabecera) + '_prot_' + str(ID)
archivo = cabecera + '.faa'
outfile = open(archivo, "w")
for i in xrange(len(lista)):
outfile.write(lista[i]+'\n')
print archivo, " succesfully created!!"
outfile.close()
return
def LeerGff3(archivogff3):
infile = open(archivogff3, "r")
header =[]
listacoord = []
coord = False
for line in infile:
if(coord == False):
header.append(line)
if (line.startswith("##sequence-region")):
coord = True
continue
if(coord):
#line=line.replace('\t', ' ' )
listacoord.append(line)
infile.close()
return (header, listacoord)
def Reescribirgff3(cabecera, header, listacoord):
archivo = cabecera + 'bis.gff'
outfile = open(archivo, "w")
for i in xrange(len(header)):
outfile.write(header[i])
ID=0
for i in xrange(len(listacoord)):
if ("\tgene" in listacoord[i]):
ID +=1
listacoord[i]=listacoord[i].split("\t")
listacoord[i][0] = cabecera+'_prot_'+str(ID)
else:
listacoord[i]=listacoord[i].split("\t")
listacoord[i][0] = cabecera+'_prot_'+str(ID)
listacoord[i]= ("\t").join(listacoord[i])
outfile.write(listacoord[i])
print archivo, " succesfully created!!"
outfile.close()
return
if __name__ == "__main__":
#archivoprot='Salidach3prot'
archivogff3='A.thaliana_chr3.gff'
cabecera='A.thaliana_ch3'
#archivo = sys.argv[1]
#archivogff3 = sys.argv[2]
#cabecera = sys.argv[3]
#lista = LeerProt(archivoprot)
#ReescribirProt(lista, cabecera)
header, listacoord = LeerGff3(archivogff3)
Reescribirgff3(cabecera, header, listacoord)
| UTF-8 | Python | false | false | 2,582 | py | 48 | Rename_prot.py | 35 | 0.530209 | 0.516654 | 0 | 94 | 26.414894 | 70 |
jiang43605/BJGuaHaoSpider | 10,926,396,826,318 | ebb8a4705b3309305ede598e52d46c3c1ab6cb08 | af7689c3d43ae49f9aa9d3bfa614c2a6f3da8eef | /departmentSpider.py | 02478725ee0db510d2863673fe3f327fa1663ade | []
| no_license | https://github.com/jiang43605/BJGuaHaoSpider | 8083af9114c79ae3c188adccb997732ab4fe16e3 | 6ae96e7e05cceb2327147bd791e542f99013459f | refs/heads/master | 2021-01-09T06:50:19.599912 | 2016-04-19T08:15:25 | 2016-04-19T08:15:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: UTF-8 -*-
import re
import urllib.request
import urllib.error
import urllib.parse
import timeSpider
import Validator
# 获取科室信息类
class DepartmentInfo:
# 初始化方法
def __init__(self, baseUrl):
# base链接
self.baseUrl = baseUrl
# 科室列表信息
self.departments = []
# 返回科室列表
def getDepartments(self, pageCode):
# 生成获取科室信息的正则表达式对象
pattern = re.compile('<a class="kfyuks_islogin" href="(.*?)">(.*?)</a>', re.S)
items = re.findall(pattern, pageCode)
# 遍历结果
for item in items:
print(item[0], item[1])
if Validator.compileurl(self.baseUrl + item[0]):
ids = self.parseUrl(item[0])
self.departments.append([item[0], item[1], ids[0], ids[1]])
else:
result = self.combineUrlComponent(item[0])
self.departments.append([result[0], item[1], result[1], result[2]])
return self.departments
# 加载网页
def loadPage(self, subPath):
url = self.baseUrl + subPath
# request对象
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
return response.read().decode('utf-8')
# 组装url
def combineUrlComponent(self, item):
subItem = item[len('javascript:encodeUrl('):len(item)-2].split(',')
subPath = '/dpt/appoints/%s,%s.htm?departmentName=%s&deptSpec=%s' % (subItem[0].strip('\''),
subItem[1].strip('\''),
urllib.parse.quote(subItem[2].strip('\'').encode('utf-8')),
urllib.parse.quote(subItem[3].strip('\'').encode('utf-8')))
return [subPath, subItem[0].strip('\''), subItem[1].strip('\'')]
def parseUrl(self, subPath):
# 正则表达式对象 url e.g.:/dpt/appoint/3-200000002.htm
print(subPath)
pattern = re.compile('.*/(.*?)-(.*?).htm', re.S)
result = re.search(pattern, subPath)
if not result:
print('解析url获取医院id和科室id失败')
return None
self.hospitalId = result.group(1).strip()
self.departmentId = result.group(2).strip()
print('医院ID:%s\t科室ID:%s' % (self.hospitalId, self.departmentId))
return [self.hospitalId, self.departmentId]
# 开始方法
def start(self, subPath):
pageCode = self.loadPage(subPath)
# 抓取科室信息
self.getDepartments(pageCode)
# 轮询科室列表并抓取科室预约号信息
ts = timeSpider.timeSpider(self.baseUrl)
for department in self.departments:
# print(department)
ts.start(department[0], department[2], department[3])
if __name__ == '__main__':
baseUrl = 'http://www.bjguahao.gov.cn'
di = DepartmentInfo(baseUrl)
di.start('/hp/appoint/91.htm')
| UTF-8 | Python | false | false | 3,138 | py | 3 | departmentSpider.py | 3 | 0.538881 | 0.524898 | 0 | 89 | 31.94382 | 136 |
072jiajia/AIHW3 | 3,204,045,638,177 | c17f8d8dd0b1eee17afeaad20229e152921a7f22 | 7905318a26026f7a46632ba6459929f617312e27 | /HW3/Tools/neighbor.py | 3083bffd778e94272a09372baf1144ee0c12429b | []
| no_license | https://github.com/072jiajia/AIHW3 | 5f58c2eae6b263049b05498c522bbe781253d9d9 | c02138132df371ee65438eae7bcf5a962b02e66e | refs/heads/master | 2022-07-18T20:25:57.740545 | 2020-05-21T10:35:22 | 2020-05-21T10:35:22 | 265,820,017 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def neighbor(size, pos):
'''get neighboring blocks'''
(x, y) = pos
ret = []
for (dx, dy) in neighbor.neighbor_list:
if (x + dx < 0 or x + dx >= size[0] or
y + dy < 0 or y + dy >= size[1]):
continue
ret.append((x + dx, y + dy))
return ret
neighbor.neighbor_list = [(-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 1), (1, -1), (1, 0), (1, 1)]
| UTF-8 | Python | false | false | 437 | py | 9 | neighbor.py | 9 | 0.400458 | 0.354691 | 0 | 14 | 28.785714 | 62 |
0lidaxiang/fu2DoList | 19,602,230,773,849 | d2fe944f833bf186f89068d6ff6c68c3f24e4cf5 | 6dc445aa82de153f589ad09a2fcd04f064407289 | /myCalendar/migrations/0001_initial.py | 088a7c2a639ecc1723b7a9a3412e5b2e5e058ba0 | [
"MIT"
]
| permissive | https://github.com/0lidaxiang/fu2DoList | beba86edb3f8b4712648fe525c2df90f51ffbe68 | 40b845cfac5c71cdacadca18a2609169e25ea4ac | refs/heads/master | 2023-01-19T04:18:41.430480 | 2020-11-27T09:39:59 | 2020-11-27T09:39:59 | 125,611,544 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.5 on 2018-07-03 05:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('projectId', models.CharField(max_length=100)),
('task', models.CharField(max_length=100)),
('ownerId', models.CharField(max_length=100)),
('add_time', models.DateTimeField(auto_now_add=True)),
('end_time', models.DateTimeField(auto_now_add=True)),
],
),
]
| UTF-8 | Python | false | false | 716 | py | 14 | 0001_initial.py | 12 | 0.551676 | 0.518156 | 0 | 25 | 27.64 | 76 |
VyUng0711/competitive-programming-solutions | 18,597,208,427,723 | 32cf75f41022aea7218b7f828aca9b16e4bd0838 | a63e490f79716bf1271ee56063ce6552e2aa5c6e | /leetcode/sort_colors.py | 807264077632dc54fb18c7640d1ade94e09b1dc0 | []
| no_license | https://github.com/VyUng0711/competitive-programming-solutions | 07166b9bf0eb2615e9fbf8c836a7f1ea0861bb5c | d88b522681569ac2c2bfec57d77ed1cc9e2079a5 | refs/heads/master | 2020-03-14T03:53:15.357812 | 2019-02-11T06:05:29 | 2019-02-11T06:05:29 | 131,429,375 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def sort_colors(nums):
end_of_0 = 0
end_of_2 = len(nums) - 1
i = 0
while i < end_of_2:
if nums[i] == 0:
nums[end_of_0], nums[i] = nums[i], nums[end_of_0]
end_of_0 += 1
elif nums[i] == 2:
nums[end_of_2], nums[i] = nums[i], nums[end_of_2]
end_of_2 -= 1
i -= 1
i += 1
print(nums)
def sort_colors1(nums):
dict_count = {}
for i in nums:
dict_count[i] = dict_count.get(i,0) + 1
index = 0
for key, value in dict_count.items():
for j in range(value):
nums[index] = key
index += 1
sort_colors([2,0,2,1,1,0]) | UTF-8 | Python | false | false | 584 | py | 342 | sort_colors.py | 322 | 0.517123 | 0.467466 | 0 | 26 | 21.5 | 55 |
XinyanWang/ZhihuAnalyse | 2,164,663,536,094 | 260118106236ccaacc1af09d800fde2d65016a31 | ff348930faa51f9ad32a103fda9a39749c9d280b | /UsersSpider.py | 1eecd7b8ab0980e246178ecf82805bde6d77ab45 | []
| no_license | https://github.com/XinyanWang/ZhihuAnalyse | 5677b12727ee84bedb2c23008e5a3e645daeb590 | d45466adf21ffd34afe1f93bef22d4eeb496f5b8 | refs/heads/master | 2021-01-01T05:34:13.128710 | 2015-06-01T09:39:17 | 2015-06-01T09:39:17 | 31,769,101 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
import re,time,pickle,requests
from BeautifulSoup import BeautifulSoup
from multiprocessing.dummy import Pool as ThreadPool
from lxml import etree
import zhihu_tools as tools
import UserInfos
def bridge(tp):
print 'working...'
try:
t=UserInfos.User(tp[0],tp[1])
except requests.ChunkedEncodingError,e:
try:
t=UserInfos.User(tp[0],tp[1])
except requests.ChunkedEncodingError,e:
try:
t=UserInfos.User(tp[0],tp[1])
except requests.ChunkedEncodingError,e:
t=None
print 'ChunkedEncodingError'
print 'done'
return t
def logintools(tp):
return tools.newlogin(tp[0],tp[1])
def setsessions():
infos=[
('wangxy940930@gmail.com','WXY940930'),
# ('2824935672@qq.com','WXY940930'),
('k64_cc@163.com','WXY940930'),
('chh_cc@yeah.net','WXY940930'),
('owenclan@163.com','WXY940930'),
('jiangjiaqingmiao@163.com','WXY940930'),
('jiangjiaqingmiao@126.com','WXY940930'),
('zhihubot1@163.com','WXY940930'),
('zhihubot2@163.com','WXY940930'),
('zhihubot3@163.com','WXY940930')
# ,('zhihubot4@163.com','WXY940930')
# ,('zhihubot5@163.com','WXY940930')
# ,('zhihubot6@163.com','WXY940930')
# ,('zhihubot7@163.com','WXY940930')
# ,('zhihubot8@163.com','WXY940930')
# ,('zhihubot9@163.com','WXY940930')
# ,('zhihubot10@163.com','WXY940930')
# ,('zhihubota@163.com','WXY940930')
]
pool=ThreadPool(9)
sessions=pool.map(logintools,infos)
pool.close()
pool.join()
print 'Log in success'
return sessions
def spider(filename,sessions,start=None,end=None):
f=open(filename)
homepages=pickle.load(f)[start:end]
f.close()
#读取主页列表
print 'Task Start'
workers=[]
for i in range(len(homepages)/9+1):
workers+=sessions
print len(homepages),len(workers)
pool=ThreadPool(7)
results=pool.map(bridge,zip(workers,homepages))
pool.close()
pool.join()
print 'saving...'
f=open('124173_'+str(start)+'-'+str(end)+'.data','w')
for i in results:
pickle.dump(i.infos,f)
f.close()
print 'Task End'
print time.localtime()
#print 'We have two functions. First, you should use setsessions() to get workers for your spider. Then you can use spider() to download infos from Internet.'
#sessions=setsessions()
#i=16000
#while True:
# spider('p1_124173_num.data',sessions,start=i,end=i+2000)
# i+=2000
# if i>124173:
# spider('p1_124173_num.data',sessions,start=i-2000)
# break
| UTF-8 | Python | false | false | 2,690 | py | 3 | UsersSpider.py | 2 | 0.609492 | 0.520553 | 0 | 111 | 23.063063 | 158 |
BlaCkinkGJ/Safety-Helmet-Embedded-Device | 7,198,365,204,332 | ac2328afbd3fbb0008e08ffd4095882c95fbaffa | 22628cdf787b56ca4705d3d38dfda5619a1a07ef | /Admin/res/signUp.py | 5c45ee7e7f9cc8b821f041cc64e6fe2c2bea965b | [
"MIT"
]
| permissive | https://github.com/BlaCkinkGJ/Safety-Helmet-Embedded-Device | e9a57a40f2a9b23ad58ec275d7b38267c3b4e287 | 0134f1e4698ef34caee2d5a8cd875c51507b3527 | refs/heads/master | 2020-03-22T15:29:12.353031 | 2018-09-11T09:17:43 | 2018-09-11T09:17:43 | 140,256,718 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'signUp.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import pipeline as pipe
import re
import hashlib
from res import signIn
from PyQt5.QtWidgets import QMessageBox
class Ui_Form(object):
def setupUi(self, Form):
pipe.db.changeCollection(pipe.info['loginDB'])
Form.setObjectName("Form")
Form.resize(288, 297)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 271, 281))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.Email = QtWidgets.QLabel(self.groupBox)
self.Email.setGeometry(QtCore.QRect(30, 60, 81, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
self.Email.setFont(font)
self.Email.setObjectName("Email")
self.EmailText = QtWidgets.QLineEdit(self.groupBox)
self.EmailText.setGeometry(QtCore.QRect(140, 60, 113, 21))
self.EmailText.setObjectName("EmailText")
self.UsernameText = QtWidgets.QLineEdit(self.groupBox)
self.UsernameText.setGeometry(QtCore.QRect(140, 30, 113, 21))
self.UsernameText.setObjectName("UsernameText")
self.userName = QtWidgets.QLabel(self.groupBox)
self.userName.setGeometry(QtCore.QRect(30, 30, 91, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
self.userName.setFont(font)
self.userName.setObjectName("userName")
self.FirstNameText = QtWidgets.QLineEdit(self.groupBox)
self.FirstNameText.setGeometry(QtCore.QRect(140, 90, 113, 21))
self.FirstNameText.setObjectName("FirstNameText")
self.firstName = QtWidgets.QLabel(self.groupBox)
self.firstName.setGeometry(QtCore.QRect(30, 90, 81, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
self.firstName.setFont(font)
self.firstName.setObjectName("firstName")
self.LastNameText = QtWidgets.QLineEdit(self.groupBox)
self.LastNameText.setGeometry(QtCore.QRect(140, 120, 113, 21))
self.LastNameText.setObjectName("LastNameText")
self.lastName = QtWidgets.QLabel(self.groupBox)
self.lastName.setGeometry(QtCore.QRect(30, 120, 81, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
self.lastName.setFont(font)
self.lastName.setObjectName("lastName")
self.PasswordText = QtWidgets.QLineEdit(self.groupBox)
self.PasswordText.setGeometry(QtCore.QRect(140, 150, 113, 21))
self.PasswordText.setEchoMode(QtWidgets.QLineEdit.Password)
self.PasswordText.setObjectName("PasswordText")
self.password = QtWidgets.QLabel(self.groupBox)
self.password.setGeometry(QtCore.QRect(30, 150, 81, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
self.password.setFont(font)
self.password.setObjectName("password")
self.PasswordchkText = QtWidgets.QLineEdit(self.groupBox)
self.PasswordchkText.setGeometry(QtCore.QRect(140, 180, 113, 21))
self.PasswordchkText.setObjectName("PasswordchkText")
self.PasswordchkText.setEchoMode(QtWidgets.QLineEdit.Password)
self.passwordCHK = QtWidgets.QLabel(self.groupBox)
self.passwordCHK.setGeometry(QtCore.QRect(30, 180, 111, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
self.passwordCHK.setFont(font)
self.passwordCHK.setObjectName("passwordCHK")
self.ChkLabel = QtWidgets.QLabel(self.groupBox)
self.ChkLabel.setGeometry(QtCore.QRect(30, 200, 121, 31))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(7)
self.ChkLabel.setFont(font)
self.ChkLabel.setObjectName("ChkLabel")
self.submit = QtWidgets.QPushButton(self.groupBox)
self.submit.setGeometry(QtCore.QRect(100, 240, 71, 31))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setPointSize(10)
self.submit.setFont(font)
self.submit.setObjectName("submit")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
Form.setTabOrder(self.UsernameText, self.EmailText)
Form.setTabOrder(self.EmailText, self.FirstNameText)
Form.setTabOrder(self.FirstNameText, self.LastNameText)
Form.setTabOrder(self.LastNameText, self.PasswordText)
Form.setTabOrder(self.PasswordText, self.PasswordchkText)
Form.setTabOrder(self.PasswordchkText, self.submit)
self.submit.clicked.connect(self.submitButton)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "작업자 통합 관리 시스템"))
self.groupBox.setTitle(_translate("Form", "Registration"))
self.Email.setText(_translate("Form", "E-mail"))
self.userName.setText(_translate("Form", "Username"))
self.firstName.setText(_translate("Form", "First Name"))
self.lastName.setText(_translate("Form", "Last Name"))
self.password.setText(_translate("Form", "Password"))
self.passwordCHK.setText(_translate("Form", "Password"))
self.ChkLabel.setText(_translate("Form", "( Check again )"))
self.submit.setText(_translate("Form", "Submit"))
def msgbox(self, title, content):
return QMessageBox.question(pipe.window, title, content, QMessageBox.Yes, QMessageBox.Yes)
def errorCheck(self, data):
emailRegex = '[^@]+@[^@]+\.[^@]+'
if re.match(emailRegex, data['email']) is None:
self.msgbox('이메일 형 불일치', '이메일 형식이 불일치합니다.')
return True
if data['password'] != self.PasswordchkText.text():
self.msgbox('비밀번호 불일치', '비밀번호가 불일치합니다.')
return True
for value in data.values():
if len(value) == 0:
self.msgbox('입력 오류', '빈 칸을 가진 필드가 존재합니다.')
return True
isExist = (pipe.db.collection.find_one({'email': data['email']}) is not None) \
or (pipe.db.collection.find_one({'username': data['username']}) is not None)
if isExist:
self.msgbox('계정 오류', '이미 있는 계정입니다.')
return True
return False
def pushDatabase(self, data):
data['password'] = hashlib.sha256(data['password'].encode()).hexdigest()
pipe.db.collection.insert(data)
return self.msgbox('등록 성공', '계정이 정상적으로 데이터베이스에 등록되었습니다.')
def submitButton(self):
data = {}
data['username'] = self.UsernameText.text()
data['email'] = self.EmailText.text()
data['firstName'] = self.FirstNameText.text()
data['lastName'] = self.LastNameText.text()
data['password'] = self.PasswordText.text()
if self.errorCheck(data) : return
if self.pushDatabase(data) == QMessageBox.Yes:
temp = pipe.window
pipe.window = QtWidgets.QWidget()
pipe.ui = signIn.Ui_Form()
pipe.ui.setupUi(pipe.window)
pipe.window.show()
temp.close()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 8,288 | py | 26 | signUp.py | 16 | 0.647736 | 0.625344 | 0 | 194 | 40.201031 | 100 |
ersaijun/python_liaoxuefeng | 10,720,238,382,916 | 3c60fab6eb4d962af7442e9bac08b6935571e230 | b82afdc116e71a5f7c77fe46554a89cdd61598af | /6class.py | db66dceab035c6539f9b33555124bfd2d78f85bb | []
| no_license | https://github.com/ersaijun/python_liaoxuefeng | 600689e0154fc0f211d5071f8b9cb7befcf17caf | 7624a24cf06097a33f643e0245ad8425d4c862c7 | refs/heads/master | 2020-04-25T03:09:06.487807 | 2019-02-25T10:38:13 | 2019-02-25T10:38:13 | 172,465,798 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding : utf-8 -*-
# @Time : 2019/1/30 9:52
# @Author : Kaspar
# @File :6class.py
# 在Python中,实例的变量名
# 如果以__开头,就变成了一个私有变量(private),只有内部可以访问,外部不能访问
class Student(object):
def __init__(self, name, score):
self.__name = name # 私有
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
bart = Student('Bart Simpson', 98)
bart.name = "kaspar"
bart.age = 20
# print(bart.__name) # 错误
print(bart._Student__name) # 可以,但不建议
bart.print_score()
print(bart.name )
print(dir("ABC")) | UTF-8 | Python | false | false | 669 | py | 11 | 6class.py | 10 | 0.590018 | 0.561497 | 0 | 24 | 22.416667 | 53 |
sgdlavoie/python-coding-challenges | 6,399,501,297,072 | 74de9498d6703bf8f85c8af269054e5fe652b42f | fc99e21a006400b8b47d6ecee1d3ac346f0b364f | /websites_programming_challenges/euler/problem017.py | 6b5d811c6c9cc9baaf4ad0cdf888cdadb79cb99f | []
| no_license | https://github.com/sgdlavoie/python-coding-challenges | 0624528e3d4470ece3c521db037615d4afd20708 | 8f533d325a989a2439258cf18539d4d207d756e3 | refs/heads/master | 2018-10-15T03:26:33.723015 | 2018-10-14T14:54:08 | 2018-10-14T14:54:08 | 117,031,573 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # What is the sum of the digits of the number 2^1000?
number_exponent = str(2 ** 1000)
list_number_exponent = []
for char in number_exponent:
list_number_exponent.append(int(char))
print(sum(list_number_exponent))
| UTF-8 | Python | false | false | 221 | py | 165 | problem017.py | 164 | 0.719457 | 0.674208 | 0 | 9 | 23.555556 | 53 |
SamElliott4/LOST | 18,305,150,649,339 | 73e9382c494396edc1da3c5fd7ce67477fa4eaf7 | b9af5ab6f6c8f1fd10f5d6ce80236661baea3dec | /import/import_data.py | 5c30889c88f5105c0a784f8356e00bfff075a462 | []
| no_license | https://github.com/SamElliott4/LOST | 280d22edad275259f71293b50686dd1ad24399b3 | 61c4d302fb90ea7218c3810bb0e04a4e42fdc8c3 | refs/heads/master | 2021-01-11T23:35:58.423879 | 2017-03-22T05:00:17 | 2017-03-22T05:00:17 | 78,607,018 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # vim: background=dark
import csv
import sys
import psycopg2
dbname = sys.argv[1]
path = sys.argv[2]
db = psycopg2.connect(dbname=dbname, host='/tmp/', port='5432')
cur = db.cursor()
def main():
global db, cur, path
# import users and facilties first, no internal dependencies
# users
with open(path + '/users.csv', 'r') as csvfile:
users = []
reader = csv.DictReader(csvfile)
for r in reader:
users.append([r['username'], r['password'], r['role'], r['active']])
for u in users:
cur.execute("SELECT role_pk FROM roles WHERE role ILIKE %s;", (u[2],))
role_key = cur.fetchone()[0]
cur.execute("""INSERT INTO users (user_id, username, password, role_fk, active)
VALUES (%s, %s, %s, %s, %s);""", (u[0].lower(), u[0], u[1], role_key, u[3]))
db.commit()
# facilities
with open(path + '/facilities.csv', 'r') as csvfile:
facilities = []
reader = csv.DictReader(csvfile)
for r in reader:
facilities.append([r['fcode'], r['common_name']])
for f in facilities:
# try block, log failed entries
cur.execute("INSERT INTO facilities (f_code, common_name) VALUES (%s, %s);", (f[0], f[1]))
db.commit()
# import assets
with open(path + '/assets.csv', 'r') as csvfile:
assets = []
reader = csv.DictReader(csvfile)
for r in reader:
assets.append([r['asset_tag'], r['description'], r['facility'], r['acquired'], r['disposed']])
for a in assets:
# new asset in asset table
cur.execute("INSERT INTO assets (asset_tag, description) VALUES (%s, %s);", (a[0], a[1]))
db.commit()
# new entry in asset_at table
cur.execute("SELECT asset_pk FROM assets WHERE asset_tag=%s;", (a[0],))
asset_key = cur.fetchone()[0]
cur.execute("SELECT facility_pk FROM facilities WHERE f_code=%s;", (a[2],))
fac_key = cur.fetchone()[0]
cur.execute("INSERT INTO asset_at (asset_fk, facility_fk, intake_dt, expunge_dt) VALUES (%s, %s, %s, %s);",
(asset_key, fac_key, a[3], a[4] if a[4] != 'NULL' else None))
# asset status is unknown until after transfer import
db.commit()
# import tranfers
with open(path + '/transfers.csv', 'r') as csvfile:
transfers = []
reader = csv.DictReader(csvfile)
for r in reader:
cur.execute("SELECT * FROM transfer_requests;")
req_num = len(cur.fetchall())+1
cur.execute("SELECT asset_pk FROM assets WHERE asset_tag=%s;", (r['asset_tag'],))
asset_key = cur.fetchone()[0]
cur.execute("SELECT facility_pk FROM facilities WHERE f_code=%s;", (r['source'],))
src_key = cur.fetchone()[0]
cur.execute("SELECT facility_pk FROM facilities WHERE f_code=%s;", (r['destination'],))
dest_key = cur.fetchone()[0]
req_id = r['source'] + str(req_num)
if r['approve_by'] == 'NULL':
t_status = 0
approve_by = None
approve_dt = None
else:
t_status = 1
approve_by = r['approve_by']
approve_dt = r['approve_dt']
cur.execute("""INSERT INTO transfer_requests (request_id, asset_fk, requester, request_dt, src, dest, approver, approve_dt, status)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);""",
(req_id, asset_key, r['request_by'], r['request_dt'], src_key, dest_key, approve_by, approve_dt, t_status))
if t_status == 1:
load_dt = r['load_dt']
if load_dt == 'NULL':
load_dt = None
unload_dt = r['unload_dt']
if unload_dt == 'NULL':
unload_dt = None
cur.execute("""INSERT INTO asset_moving (request_id, asset_fk, src, dest, load_dt, unload_dt)
VALUES (%s, %s, %s, %s, %s, %s);""", (req_id, asset_key, src_key, dest_key, load_dt, unload_dt))
db.commit()
# fill out asset_at based on asset_moving table
cur.execute("SELECT asset_pk FROM assets;")
assets = cur.fetchall()
for a in assets:
cur.execute("SELECT dest, load_dt, unload_dt FROM asset_moving WHERE asset_fk=%s;", (a[0],))
res = cur.fetchall()
rem = []
for i in res:
if i[1] == None:
rem.append(i)
for i in rem:
res.remove(i)
asc_res = sorted(res, key=lambda res: res[1], reverse=False)
for b in asc_res:
current_loc = latest_asset_loc(a[0])
cur.execute("UPDATE asset_at SET expunge_dt=%s WHERE intake_dt=%s;", (b[1], current_loc[2]))
a_status = 2
if b[2]:
cur.execute("INSERT INTO asset_at (asset_fk, facility_fk, intake_dt) VALUES (%s, %s, %s);", (a[0], b[0], b[2]))
a_status = 1
# update asset_status
cur.execute("UPDATE assets SET status=%s WHERE asset_pk=%s;", (a_status, a[0]))
db.commit()
#check for disposed assets:
cur.execute("SELECT asset_pk FROM assets;")
assets = cur.fetchall()
for a in assets:
latest = latest_asset_loc(a[0])
if latest[3]:
cur.execute("SELECT * FROM asset_at a JOIN asset_moving m ON expunge_dt=load_dt WHERE a.asset_fk=%s AND a.expunge_dt=%s;", (a[0], latest[3]))
if len(cur.fetchall()) == 0:
cur.execute("UPDATE assets SET status=0 WHERE asset_pk=%s;", (a[0],))
else:
cur.execute("UPDATE assets SET status=1 WHERE asset_pk=%s;", (a[0],))
db.commit()
def latest_asset_loc(asset_key):
cur.execute("SELECT asset_fk, facility_fk, intake_dt, expunge_dt FROM asset_at WHERE asset_fk=%s;", (asset_key,))
res = cur.fetchall()
return sorted(res, key=lambda res: res[2], reverse=True)[0]
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 6,151 | py | 30 | import_data.py | 11 | 0.528369 | 0.519265 | 0 | 143 | 41.993007 | 153 |
primarinovic/python | 3,289,944,991,392 | cac052f2f4c24656901536b225fb2ec5d9efeceb | ff4adaa003e35007057fd6d36a1ce004a610cffc | /18 seno, cosseno e tangente.py | ed98e5a70c6896a802b07e334950e5fec04ee676 | []
| no_license | https://github.com/primarinovic/python | 8ec1981565c4edcd6bb01b8303da1785ca3a0792 | b4a980479e0c5186dc63eaca95bda1a969053935 | refs/heads/master | 2022-12-14T06:30:06.852018 | 2020-09-26T15:06:51 | 2020-09-26T15:06:51 | 282,308,240 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import sin, cos, tan, radians
angulo = float(input('Informe o ângulo: '))
print(f'O seno de {angulo}° é {sin(radians(angulo)):.2f}. \nO cosseno de {angulo}° é {cos(radians(angulo)):.2f}.')
print(f'A tangente de {angulo}° é {tan(radians(angulo)):.2f}.')
| UTF-8 | Python | false | false | 272 | py | 45 | 18 seno, cosseno e tangente.py | 44 | 0.664151 | 0.65283 | 0 | 6 | 43.166667 | 114 |
nobleblackk/InterviewBit | 6,519,760,391,864 | b7889eda32f7113d5c34d289308d79a5a049bc6e | e962954e5fb86615e30449fe7e437d8c8d4010c9 | /Arrays/Maximum_Absolute_Difference.py | 4bbdf5f26eecc547a7d463c5bf0d19185a631965 | []
| no_license | https://github.com/nobleblackk/InterviewBit | d41f4c9c1601f2a8de07c26c40b5ec9a9c7004a4 | 78485d70585bf9b58445cc8fd559647bb6c8aa68 | refs/heads/master | 2020-09-04T05:14:52.346876 | 2019-11-24T15:12:39 | 2019-11-24T15:12:39 | 219,665,598 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
# @param A : list of integers
# @return an integer
def count(self,A,i,j):
Sum=abs(A[i]-A[j])+abs(i-j)
return Sum
def maxArr(self, A):
res=0
for i in range(len(A)):
for j in range(1,len(A)):
temp=self.count(A,i,j)
if temp>res:
res=temp
return res
| UTF-8 | Python | false | false | 408 | py | 11 | Maximum_Absolute_Difference.py | 10 | 0.433824 | 0.428922 | 0 | 14 | 26.857143 | 39 |
jketts/QuantumGraphState | 3,152,506,021,665 | c978796ecb8d1dab718167a440d7b46f1c522f95 | af4477bcc8160afab0c1ac8477311c6e7946dbb6 | /convert.py | 378a92aa9c71e069a5c11bef852e789627bab0b7 | []
| no_license | https://github.com/jketts/QuantumGraphState | 0c6586dcfc7d2f2b6c97cb3d390b2b73a6901685 | 5bff321e450d3db6fc94d620acdf7900afe1b3ce | refs/heads/master | 2021-01-18T13:32:26.006097 | 2015-09-16T06:23:17 | 2015-09-16T06:23:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!usr/bin/python
import itertools
import numpy as np
def EdgeListtoAdjacency(edgeList,dim):
#generating adjacency matrix from ground up
Matrix = np.zeros((dim,dim),dtype=int)
for edge in edgeList:
Matrix[edge[0]][edge[1]]=1
Matrix[edge[1]][edge[0]]=1
return Matrix
def AdjacencytoEdgeList(Matrix):
#matrix must be an array
(n,n)=Matrix.shape
edgeList=[[x,y] for x,y in itertools.combinations(range(n),2)
if Matrix[x][y]]
return edgeList
| UTF-8 | Python | false | false | 502 | py | 8 | convert.py | 4 | 0.659363 | 0.645418 | 0 | 19 | 25.368421 | 65 |
nickvandewiele/RMG-database | 18,313,740,582,530 | 704df1658be6b8a7889dc1ec8565e37904eeac42 | 104a0ec7cfb5d4bf948f22b47edb59122a886363 | /input/kinetics/libraries/Sulfur/DMDS.py | fb061e4303ff8bc85be61c161ef286df152b55d8 | []
| no_license | https://github.com/nickvandewiele/RMG-database | 3afbe88df46a5641c6abbaf032bf4a0b6b9aae73 | dc3cbc7048501d730062426a65d87ea452e8705f | refs/heads/master | 2020-12-25T08:19:49.436773 | 2014-08-04T21:37:26 | 2014-08-04T21:37:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# encoding: utf-8
name = "Sulfur/DMDS"
shortDesc = u""
longDesc = u"""
"""
entry(
index = 1,
reactant1 =
"""
C2H5SJ1
1 C 0 0 {2,S} {3,S} {4,S} {5,S}
2 C 1 0 {1,S} {6,S} {7,S}
3 S 0 2 {1,S} {8,S}
4 H 0 0 {1,S}
5 H 0 0 {1,S}
6 H 0 0 {2,S}
7 H 0 0 {2,S}
8 H 0 0 {3,S}
""",
product1 =
"""
C2H5SJ2
1 C 0 0 {2,S} {4,S} {5,S} {6,S}
2 C 0 0 {1,S} {3,S} {7,S} {8,S}
3 S 1 2 {2,S}
4 H 0 0 {1,S}
5 H 0 0 {1,S}
6 H 0 0 {1,S}
7 H 0 0 {2,S}
8 H 0 0 {2,S}
""",
degeneracy = 1,
kinetics = Arrhenius(
A = (85.5, 's^-1'),
n = 3.04,
Ea = (11.62, 'kcal/mol'),
T0 = (1, 'K'),
comment = 'Reaction and kinetics from Sulfur\\DMDS.\nsmall molecule oxidation library, reaction file, version 2, JS, August 6, 2003\noriginally from Leeds methane oxidation mechanism v1.5\nhttp://www.chem.leeds.ac.uk/Combustion/Combustion.html\nfix bug for O2 + HCO = HO2 + CO 1.52E13 0.00 -7.09, change E into positive, change A into 5.12E13 according to NIST\nOntbinding DMDS',
),
shortDesc = u"""""",
longDesc =
u"""
small molecule oxidation library, reaction file, version 2, JS, August 6, 2003
originally from Leeds methane oxidation mechanism v1.5
http://www.chem.leeds.ac.uk/Combustion/Combustion.html
fix bug for O2 + HCO = HO2 + CO 1.52E13 0.00 -7.09, change E into positive, change A into 5.12E13 according to NIST
Ontbinding DMDS
""",
)
entry(
index = 2,
reactant1 =
"""
C2H4
1 C 0 0 {2,D} {3,S} {4,S}
2 C 0 0 {1,D} {5,S} {6,S}
3 H 0 0 {1,S}
4 H 0 0 {1,S}
5 H 0 0 {2,S}
6 H 0 0 {2,S}
""",
reactant2 =
"""
SH
1 S 1 2 {2,S}
2 H 0 0 {1,S}
""",
product1 =
"""
C2H5SJ2
1 C 0 0 {2,S} {4,S} {5,S} {6,S}
2 C 0 0 {1,S} {3,S} {7,S} {8,S}
3 S 1 2 {2,S}
4 H 0 0 {1,S}
5 H 0 0 {1,S}
6 H 0 0 {1,S}
7 H 0 0 {2,S}
8 H 0 0 {2,S}
""",
degeneracy = 1,
kinetics = Arrhenius(
A = (9960, 'cm^3/(mol*s)'),
n = 2.7,
Ea = (-0.8, 'kcal/mol'),
T0 = (1, 'K'),
comment = 'Reaction and kinetics from Sulfur\\DMDS.',
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 3,
reactant1 =
"""
SJJ
1 S 2S 2
""",
reactant2 =
"""
SJJ
1 S 2S 2
""",
product1 =
"""
S2
1 S 1 2 {2,S}
2 S 1 2 {1,S}
""",
degeneracy = 1,
kinetics = Arrhenius(
A = (160000000000.0, 'cm^3/(mol*s)'),
n = 1.3,
Ea = (-0.88, 'kcal/mol'),
T0 = (1, 'K'),
comment = 'Reaction and kinetics from Sulfur\\DMDS.',
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
| UTF-8 | Python | false | false | 2,531 | py | 18 | DMDS.py | 18 | 0.519953 | 0.406164 | 0 | 134 | 17.880597 | 387 |
buaahsh/AnomalyClassifier | 13,168,369,746,154 | bf81b1da4757d089804a5d66056ffdda78500c24 | 728096b0983bbc5102bd89eae22d8be3552574e2 | /DBSCAN4AP/Model/Core/StandardScaler.py | c0cb1f176c7a8190742911e3de37f8609c233ad5 | []
| no_license | https://github.com/buaahsh/AnomalyClassifier | e0377375329aa960a2b74ee268da47ba914c3a2f | 428b0521ce8aa810b59061c658166b625287d7a3 | refs/heads/master | 2021-01-10T12:55:12.384184 | 2017-03-03T01:51:50 | 2017-03-03T01:51:50 | 46,794,905 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
standard scaler
The standard scaler is the preprocessing module, which can process the raw data.
"""
# Author: Shaohan Huang <buaahsh@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis)
from sklearn.utils.validation import check_is_fitted, FLOAT_DTYPES
def _handle_zeros_in_scale(scale, copy=True):
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
class StandardScaler(BaseEstimator, TransformerMixin):
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X | UTF-8 | Python | false | false | 4,131 | py | 86 | StandardScaler.py | 47 | 0.498184 | 0.494069 | 0 | 125 | 32.056 | 80 |
arossbrian/my_short_scripts | 9,277,129,374,823 | 1b81cee68a487b5e50df3678ae38088ee66a5382 | e0cbea0cb68f0ba5dba837dbe60067eb88e3d151 | /factors.py | 120c668ef52ae290eca71ba2f68c33e1e14fec23 | []
| no_license | https://github.com/arossbrian/my_short_scripts | 74fb689ac25feaffb14437496902ee1a0dcc5b60 | a34923f7ecbf027d8a0704400fcfb3e71ed662fd | refs/heads/master | 2023-05-24T16:45:51.050321 | 2023-05-11T16:20:30 | 2023-05-11T16:20:30 | 195,057,250 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #has a function that divide evenly an integer
def factors():
number = input("Please enter an integer to find its factors : ")
print("THe number entered is ", str(number))
for number in range(0, int(number)):
if (int(number) % 2) == 0:
print(str(number), "is divisble by 2")
elif (int(number) % 2 == 1):
print(number, " Is not divisble by 2")
return number
factors()
| UTF-8 | Python | false | false | 437 | py | 76 | factors.py | 66 | 0.578947 | 0.562929 | 0 | 12 | 34.25 | 68 |
xxxwarrior/Basic-Design-Patterns-Python | 3,255,585,230,770 | 28e3862697dc263e9ee2515a0f9159709df4f953 | 5d57c2a7d66ec3605e86d0ac52ee55d0e736bee8 | /Observer/observer.py | 1eac3ba842b3572d99be651f51d69104ccbe03db | []
| no_license | https://github.com/xxxwarrior/Basic-Design-Patterns-Python | 1183fb5caea8fd9981dc0b7dc73154fd2a180beb | 882edf988e958e3605edcfb607410ffdbc539d3d | refs/heads/main | 2023-01-07T14:45:04.765236 | 2020-11-07T21:50:55 | 2020-11-07T21:50:55 | 310,418,401 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Observer is used to define "one-to-many" relations betweeen the objects,
so when the state of one (observable) object changes, all its dependents (observers) are notified,
usually by calling one of their methods.
"""
from typing import Any
class Observer:
def update(self, observable: 'Observable') -> None:
print(f"The subject {observable.name} is {observable.state}")
class ConcreteObserver(Observer):
pass
class Observable:
def __init__(self, *args, **kwargs) -> None:
self.observers = []
def registerObserver(self, observer: Observer) -> None:
if observer not in self.observers:
self.observers.append(observer)
def removeObserver(self, observer: Observer) -> None:
if observer in self.observers:
self.observers.remove(observer)
def notifyObservers(self) -> None:
for observer in self.observers:
observer.update(self)
class ConcreteSubject(Observable):
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
def changeState(self, state: Any) -> None:
self.state = state
self.notifyObservers()
if __name__ == "__main__":
subject = ConcreteSubject("Test")
observer = ConcreteObserver()
subject.registerObserver(observer)
subject.changeState("Testing")
| UTF-8 | Python | false | false | 1,407 | py | 47 | observer.py | 23 | 0.628287 | 0.628287 | 0 | 53 | 24.509434 | 99 |
don-luigi/asc | 16,243,566,318,426 | e75f22738ba281c0e093b748ccc468d4051b3359 | a0d81d56f064b37bcb1e8c137b10127349880c70 | /lab03 - concurenta/lab03.py | 21f9e4cb8b9ee5c18cf65c33f2dfba8fb470c9ce | []
| no_license | https://github.com/don-luigi/asc | 1dae31c643e9d0e37cae8f5013d13c935430022a | ce634276f9f4fa3b53cb87f7d79bb89d476b9662 | refs/heads/master | 2022-08-01T15:56:16.825438 | 2020-05-28T15:13:38 | 2020-05-28T15:13:38 | 267,662,828 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from threading import *
numThreads = 4
self.numThreads = numThreads
self.countThreads = numThreads
self.countLock = Lock()
self.countLock2 = Lock()
self.threadsSem = Semaphore(0)
self.threadsSem2 = Semaphore(numThreads)
self.countThreads2 = numThreads
def barrier(self):
self.threadsSem2.acquire()
self.countLock.acquire()
self.countThreads -= 1
if (countThreads == 0):
for i in range(self.numThreads):
self.ThreadsSem.release()
countThreads = numThreads
self.countLock.release()
self.ThreadsSem.acquire()
self.countLock2.acquire()
countThreads2 -= 1 # se traduce prin mai multe instr de asamblare:
# mov eax, [countThreads2]
# sub eax, 1
# [mov countThreads2], eax
# poate aparea switch de context in cadrul instructiunii efectiv (printre cele de sus atomice^)
if (countThreads2 == 0):
for i in range(numThreads):
self.ThreadsSem2.release()
countThreads2 = numThreads
self.countLock2.release() | UTF-8 | Python | false | false | 940 | py | 25 | lab03.py | 15 | 0.738298 | 0.718085 | 0 | 36 | 25.138889 | 96 |
Senuch-Uzair-Tariq/Workspace | 14,568,529,079,968 | 1921fb3abab0ea542280e3cf95211f516d5cf3a4 | d63387eedaeca0f86177ea61eb856679b86b3017 | /Python Mark Lutz- Volume 1/Python52.py | d74dc67633eef6aec5d5d2f6bec32ccb1110e038 | []
| no_license | https://github.com/Senuch-Uzair-Tariq/Workspace | c5923cb0dc135672a819f162845ea2c90abcc795 | 2f7b35c207397f07a781c31d70a57f64e5fe59a4 | refs/heads/master | 2021-06-27T16:22:46.438618 | 2017-09-17T15:33:51 | 2017-09-17T15:33:51 | 103,842,193 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | l1,l2,l3,l4=[1,2,3,4,5],[5,4,3,2,1],{1,2,3,4,5},set([1,2,3,4]);
print(l1);
print(l2);
print(l3)
print(l4)
s={'spam'}
print(s) | UTF-8 | Python | false | false | 125 | py | 262 | Python52.py | 257 | 0.552 | 0.336 | 0 | 7 | 17 | 63 |
BrandonCline/wheeler | 6,270,652,286,672 | 463153e9f10507d67e166d1e9408bc99ef4cb410 | 1cc1703c4d0c7d61c682272d89f9a0184e96ac5a | /tests/caviar/math_tests.py | 195227fb3a25fd5ba4f093e7b6242e1f614cc7a5 | []
| no_license | https://github.com/BrandonCline/wheeler | a254a743854395a2aa0964a7c2c3f3c253ca7cf1 | f23f7deddb3feb6ba90e46b5c670f6f20a26e91a | refs/heads/master | 2021-01-22T19:36:37.369334 | 2013-09-14T20:31:42 | 2013-09-14T20:31:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from category import *
from common import *
from interpreter.tools import parse
class TestMath(unittest.TestCase):
def test_simple_addition(self):
expression = '1 + 2'
result = parse(expression, ROOT).evaluate()
self.assertTrue(result.has('3'), "Expected 3 in the result but I see: %s" % result.contents )
def test_simple_subtraction(self):
expression = '8 - 5'
# self.fail("Subtraction doesn't work yet")
result = parse(expression, ROOT).evaluate()
self.assertTrue(result.has('3'), "Expected 3 in the result but I see: %s" % result.contents )
def test_addition_of_sames(self):
pass
# expression = '1 + 1'
#
# root = Category("*")
#
# adder = root.create("+")
#
# expression_category = parse(expression, root)
#
# result = expression_category.evaluate()
#
# self.assertTrue(result.has('2'), "This language sucks at math! :) Expected 2." )
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 957 | py | 26 | math_tests.py | 21 | 0.659352 | 0.646813 | 0 | 43 | 21.255814 | 95 |
tarunkant/webhacking.kr | 2,465,311,250,441 | 158e2728011f5b60b6c96859a87839a8da2ef547 | 83e44cdfe8fada6a8e53df830e851a3f9f577886 | /c_21.py | 9f2f648779db4bdb9629a1151183f7adb997de68 | []
| no_license | https://github.com/tarunkant/webhacking.kr | f675073c73a1fbd6a86ecf990f905f4e6731d9fb | e1d7858703aeffcfabb56415f7e6173edc4e650b | refs/heads/master | 2021-06-13T10:26:57.062502 | 2017-03-29T12:46:08 | 2017-03-29T12:46:08 | 82,685,224 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from time import time
import requests
cookie = {'PHPSESSID': 'c8b31fd8eb78951438782461b686983a'}
url = "http://webhacking.kr/challenge/bonus/bonus-1/index.php?no=1 "
dbname= ""
#Password is
for i in range(1, 20):
for j in range(40, 125):
query = url + " or if(ascii(substr(pw," + str(i) + ",1))=" + str(j) + ",sleep(10),null)-- -"
start = time()
req = requests.get(query, cookies=cookie)
end = time()
diff = end - start
if (diff >= 10):
dbname += chr(j)
break
print "Password is : " + dbname
| UTF-8 | Python | false | false | 582 | py | 5 | c_21.py | 4 | 0.556701 | 0.489691 | 0 | 22 | 25.090909 | 100 |
GabrielSalvadorCardoso/restful_ide | 2,911,987,870,031 | 9a7a836e6660520e37abc28e55651e6c7efd82f8 | 7089434d753c769a81598096d9d63ce8766a450d | /bcim/temp.py | 5eb9d81fb5487d3a01946ff5e92fb877f4e4d1b3 | []
| no_license | https://github.com/GabrielSalvadorCardoso/restful_ide | 86404da80af27431d3cc2a54d394c04c0a578b30 | 4aa065b19f5461f7fac3031ed93b3f8f72ef0d18 | refs/heads/master | 2022-05-22T09:38:47.288791 | 2019-12-06T19:42:42 | 2019-12-06T19:42:42 | 201,837,422 | 0 | 0 | null | false | 2022-04-22T22:08:57 | 2019-08-12T01:40:13 | 2019-12-06T19:42:45 | 2022-04-22T22:08:55 | 165 | 0 | 0 | 2 | Python | false | false | 'edif-pub-militar': reverse('bcim:EdifPubMilitar_list', args=args, kwargs=kwargs, request=request),
'adm-edif-pub-militar-p': reverse('bcim:AdmEdifPubMilitarP_list', args=args, kwargs=kwargs, request=request),
'posto-fiscal': reverse('bcim:PostoFiscal_list', args=args, kwargs=kwargs, request=request),
'edif-agropec-ext-vegetal-pesca': reverse('bcim:EdifAgropecExtVegetalPesca_list', args=args, kwargs=kwargs, request=request),
'ext-mineral-a': reverse('bcim:ExtMineralA_list', args=args, kwargs=kwargs, request=request),
'ext-mineral-p': reverse('bcim:ExtMineralP_list', args=args, kwargs=kwargs, request=request),
'est-gerad-energia-eletrica': reverse('bcim:EstGeradEnergiaEletrica_list', args=args, kwargs=kwargs, request=request),
'hidreletrica': reverse('bcim:Hidreletrica_list', args=args, kwargs=kwargs, request=request),
'termeletrica': reverse('bcim:Termeletrica_list', args=args, kwargs=kwargs, request=request),
'banco-areia': reverse('bcim:BancoAreia_list', args=args, kwargs=kwargs, request=request),
'barragem-l': reverse('bcim:BarragemL_list', args=args, kwargs=kwargs, request=request),
'barragem-p': reverse('bcim:BarragemP_list', args=args, kwargs=kwargs, request=request),
'corredeira-l': reverse('bcim:CorredeiraL_list', args=args, kwargs=kwargs, request=request),
'corredeira-p': reverse('bcim:CorredeiraP_list', args=args, kwargs=kwargs, request=request),
'ilha': reverse('bcim:Ilha_list', args=args, kwargs=kwargs, request=request),
'massa-dagua': reverse('bcim:MassaDagua_list', args=args, kwargs=kwargs, request=request),
'queda-dagua': reverse('bcim:QuedaDagua_list', args=args, kwargs=kwargs, request=request),
'recife': reverse('bcim:Recife_list', args=args, kwargs=kwargs, request=request),
'rocha-em-agua': reverse('bcim:RochaEmAgua_list', args=args, kwargs=kwargs, request=request),
'sumidouro-vertedouro': reverse('bcim:SumidouroVertedouro_list', args=args, kwargs=kwargs, request=request),
'terreno-sujeito-inundacao': reverse('bcim:TerrenoSujeitoInundacao_list', args=args, kwargs=kwargs, request=request),
'trecho-drenagem': reverse('bcim:TrechoDrenagem_list', args=args, kwargs=kwargs, request=request),
'trecho-massa-dagua': reverse('bcim:TrechoMassaDagua_list', args=args, kwargs=kwargs, request=request),
'municipio': reverse('bcim:Municipio_list', args=args, kwargs=kwargs, request=request),
'outros-limites-oficiais': reverse('bcim:OutrosLimitesOficiais_list', args=args, kwargs=kwargs, request=request),
'pais': reverse('bcim:Pais_list', args=args, kwargs=kwargs, request=request),
'terra-indigena-a': reverse('bcim:TerraIndigenaA_list', args=args, kwargs=kwargs, request=request),
'terra-indigena-p': reverse('bcim:TerraIndigenaP_list', args=args, kwargs=kwargs, request=request),
'unidade-conservacao-nao-snuc': reverse('bcim:UnidadeConservacaoNaoSnuc_list', args=args, kwargs=kwargs, request=request),
'unidade-federacao': reverse('bcim:UnidadeFederacao_list', args=args, kwargs=kwargs, request=request),
'unidade-protecao-integral': reverse('bcim:UnidadeProtecaoIntegral_list', args=args, kwargs=kwargs, request=request),
'unidade-uso-sustentavel': reverse('bcim:UnidadeUsoSustentavel_list', args=args, kwargs=kwargs, request=request),
'aglomerado-rural-isolado': reverse('bcim:AglomeradoRuralIsolado_list', args=args, kwargs=kwargs, request=request),
'aldeia-indigena': reverse('bcim:AldeiaIndigena_list', args=args, kwargs=kwargs, request=request),
'area-edificada': reverse('bcim:AreaEdificada_list', args=args, kwargs=kwargs, request=request),
'capital': reverse('bcim:Capital_list', args=args, kwargs=kwargs, request=request),
'cidade': reverse('bcim:Cidade_list', args=args, kwargs=kwargs, request=request),
'vila': reverse('bcim:Vila_list', args=args, kwargs=kwargs, request=request),
'curva-batimetrica': reverse('bcim:CurvaBatimetrica_list', args=args, kwargs=kwargs, request=request),
'curva-nivel': reverse('bcim:CurvaNivel_list', args=args, kwargs=kwargs, request=request),
'duna': reverse('bcim:Duna_list', args=args, kwargs=kwargs, request=request),
'elemento-fisiografico-natural-l': reverse('bcim:ElementoFisiograficoNaturalL_list', args=args, kwargs=kwargs, request=request),
'elemento-fisiografico-natural-p': reverse('bcim:ElementoFisiograficoNaturalP_list', args=args, kwargs=kwargs, request=request),
'pico': reverse('bcim:Pico_list', args=args, kwargs=kwargs, request=request),
'ponto-cotado-altimetrico': reverse('bcim:PontoCotadoAltimetrico_list', args=args, kwargs=kwargs, request=request),
'ponto-cotado-batimetrico': reverse('bcim:PontoCotadoBatimetrico_list', args=args, kwargs=kwargs, request=request),
'eclusa': reverse('bcim:Eclusa_list', args=args, kwargs=kwargs, request=request),
'edif-constr-aeroportuaria': reverse('bcim:EdifConstrAeroportuaria_list', args=args, kwargs=kwargs, request=request),
'edif-const-portuaria': reverse('bcim:EdifConstPortuaria_list', args=args, kwargs=kwargs, request=request),
'edif-metro-ferroviaria': reverse('bcim:EdifMetroFerroviaria_list', args=args, kwargs=kwargs, request=request),
'pista-ponto-pouso': reverse('bcim:PistaPontoPouso_list', args=args, kwargs=kwargs, request=request),
'ponte': reverse('bcim:Ponte_list', args=args, kwargs=kwargs, request=request),
'sinalizacao': reverse('bcim:Sinalizacao_list', args=args, kwargs=kwargs, request=request),
'travessia-l': reverse('bcim:TravessiaL_list', args=args, kwargs=kwargs, request=request),
'travessia-p': reverse('bcim:TravessiaP_list', args=args, kwargs=kwargs, request=request),
'trecho-duto': reverse('bcim:TrechoDuto_list', args=args, kwargs=kwargs, request=request),
'trecho-ferroviario': reverse('bcim:TrechoFerroviario_list', args=args, kwargs=kwargs, request=request),
'trecho-hidroviario': reverse('bcim:TrechoHidroviario_list', args=args, kwargs=kwargs, request=request),
'trecho-rodoviario': reverse('bcim:TrechoRodoviario_list', args=args, kwargs=kwargs, request=request),
'tunel': reverse('bcim:Tunel_list', args=args, kwargs=kwargs, request=request),
'brejo-pantano': reverse('bcim:BrejoPantano_list', args=args, kwargs=kwargs, request=request),
'mangue': reverse('bcim:Mangue_list', args=args, kwargs=kwargs, request=request),
'veg-restinga': reverse('bcim:VegRestinga_list', args=args, kwargs=kwargs, request=request),
| UTF-8 | Python | false | false | 6,209 | py | 14 | temp.py | 9 | 0.774199 | 0.774199 | 0 | 63 | 97.555556 | 128 |
aminehadbi/binance-alerts | 4,226,247,850,802 | 129089bc267409571038787316fdc07be13ded7b | 68b9ea8391efa466d21906b6db63fd6fb40bf07b | /server.py | 8207d07607dc001729f9049f1a601c9752ef7721 | []
| no_license | https://github.com/aminehadbi/binance-alerts | f7fb1ecbe8fdd624c1c2724e2b50ef8f2878baf2 | c3189800fd447ea8cdf3c9fc5963025491e877c6 | refs/heads/main | 2023-04-28T23:35:22.828483 | 2021-05-12T15:54:38 | 2021-05-12T15:54:38 | 356,705,386 | 7 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import requests
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Create server
with SimpleXMLRPCServer(('0.0.0.0', 8000),
requestHandler=RequestHandler,
logRequests=False) as server:
def check_price(symbol):
try:
url = 'https://fapi.binance.com/fapi/v1/depth?symbol='+symbol+'&limit=10'
#print(url)
data = requests.get(url)
data = data.json()
ob_bid = []
ob_ask = []
total_bids = 0
total_asks = 0
for i in data['bids']:
ob_bid.append(float(i[0])*float(i[1]))
total_bids += float(i[1])
for i in data['asks']:
ob_ask.append(float(i[0])*float(i[1]))
total_asks += float(i[1])
best_ask = sum(ob_ask)/total_asks
best_bid = sum(ob_bid)/total_bids
print(symbol,best_bid, best_ask)
return best_bid, best_ask
except Exception as E:
print(E)
return 0, 0
def check_ob_imbalance(symbol):
try:
url = 'https://fapi.binance.com/fapi/v1/depth?symbol='+symbol+'&limit=100'
#print(url)
data = requests.get(url)
data = data.json()
ob_bid = []
ob_ask = []
for i in data['bids']:
ob_bid.append(float(i[0])*float(i[1]))
for i in data['asks']:
ob_ask.append(float(i[0])*float(i[1]))
ob_imbalance = sum(ob_ask)/sum(ob_bid)
if ob_imbalance > 1:
print(symbol, ob_imbalance)
return ob_imbalance
except Exception as E:
print(E)
return symbol
server.register_function(check_price, 'check_price')
server.register_function(check_ob_imbalance, 'check_ob_imbalance')
# Run the server's main loop
server.serve_forever()
| UTF-8 | Python | false | false | 2,082 | py | 9 | server.py | 7 | 0.534582 | 0.519693 | 0 | 67 | 30.074627 | 85 |
realnumber666/LeetCodeEveryday | 14,001,593,397,314 | 505b97ea3cef1132deabc978063bda930833935d | 6289684e00446f49e35acd14b9b6c78a172160d9 | /21.merge-two-sorted-lists.py | cfabbca490f183f838c294f8df17541f0ae001fa | []
| no_license | https://github.com/realnumber666/LeetCodeEveryday | 08eab16a80ec09cf2a8f7b301eff4d464f62fe17 | 9fa6778e48808db5ceab00833a60ac712acbed7e | refs/heads/master | 2023-07-23T06:22:43.815413 | 2023-07-17T10:35:31 | 2023-07-17T10:35:31 | 168,924,686 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# @lc app=leetcode id=21 lang=python3
#
# [21] Merge Two Sorted Lists
#
# https://leetcode.com/problems/merge-two-sorted-lists/description/
#
# algorithms
# Easy (45.31%)
# Total Accepted: 513.1K
# Total Submissions: 1.1M
# Testcase Example: '[1,2,4]\n[1,3,4]'
#
# Merge two sorted linked lists and return it as a new list. The new list
# should be made by splicing together the nodes of the first two lists.
#
# Example:
#
# Input: 1->2->4, 1->3->4
# Output: 1->1->2->3->4->4
#
#
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
'''
两个指针分别指l1,l2的头
新建一个listnode
比较俩指针的值,小的那个连上新队列,该指针往后走
连新队列的方法:val赋值;新建node;指针跳到下一个node
直到两个指针都指着none结束
'''
head = ListNode(0)
ptr = head
ptr1 = l1
ptr2 = l2
if(ptr1 == None or ptr2 == None):
return ptr1 if ptr2 == None else ptr2
while(ptr1 or ptr2):
if(ptr1 == None):
ptr.val = ptr2.val
ptr.next = ptr2.next if ptr2.next else None
return head
if(ptr2 == None):
ptr.val = ptr1.val
ptr.next = ptr1.next if ptr1.next else None
return head
if(ptr1.val <= ptr2.val):
value = ptr1.val
ptr1 = ptr1.next
else:
value = ptr2.val
ptr2 = ptr2.next
print(value, ptr1, ptr2)
ptr.val = value
ptr.next = ListNode(0)
ptr = ptr.next
return head
'''
1. 链表一定要记得判断next到底有没有东西,到底什么时候结束流程
'''
| UTF-8 | Python | false | false | 1,998 | py | 29 | 21.merge-two-sorted-lists.py | 26 | 0.52051 | 0.482262 | 0 | 67 | 25.791045 | 73 |
alainrk/pretix | 13,340,168,437,814 | cc633f374bd47aed63c4262f6ef49d61ada6af9f | 2660859a9e1a73da695a42d73b75863e02185dce | /src/pretix/plugins/stripe/views.py | eb74521d88bc6b59834aca87d6c55664322d291b | [
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | https://github.com/alainrk/pretix | d4931a5528cfd42b1a9d9fb1b1df02aeee507171 | 867a8132aa1ed73dd9513efae5b3c46b5bbae140 | refs/heads/master | 2021-01-18T19:49:27.366758 | 2017-04-01T13:23:11 | 2017-04-01T13:23:11 | 86,915,380 | 1 | 0 | null | true | 2017-04-01T13:31:04 | 2017-04-01T13:31:04 | 2017-04-01T08:00:33 | 2017-04-01T13:23:32 | 10,595 | 0 | 0 | 0 | null | null | null | import json
import logging
import stripe
from django.contrib import messages
from django.db import transaction
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from pretix.base.models import Order, Quota, RequiredAction
from pretix.base.services.orders import mark_order_paid, mark_order_refunded
from pretix.control.permissions import event_permission_required
from pretix.plugins.stripe.payment import Stripe
from pretix.presale.utils import event_view
logger = logging.getLogger('pretix.plugins.stripe')
@csrf_exempt
@require_POST
@event_view(require_live=False)
def webhook(request, *args, **kwargs):
event_json = json.loads(request.body.decode('utf-8'))
# We do not check for the event type as we are not interested in the event it self,
# we just use it as a trigger to look the charge up to be absolutely sure.
# Another reason for this is that stripe events are not authenticated, so they could
# come from anywhere.
if event_json['data']['object']['object'] == "charge":
charge_id = event_json['data']['object']['id']
elif event_json['data']['object']['object'] == "dispute":
charge_id = event_json['data']['object']['charge']
else:
return HttpResponse("Not interested in this data type", status=200)
prov = Stripe(request.event)
prov._init_api()
try:
charge = stripe.Charge.retrieve(charge_id)
except stripe.error.StripeError:
logger.exception('Stripe error on webhook. Event data: %s' % str(event_json))
return HttpResponse('Charge not found', status=500)
metadata = charge['metadata']
if 'event' not in metadata:
return HttpResponse('Event not given in charge metadata', status=200)
if int(metadata['event']) != request.event.pk:
return HttpResponse('Not interested in this event', status=200)
try:
order = request.event.orders.get(id=metadata['order'], payment_provider='stripe')
except Order.DoesNotExist:
return HttpResponse('Order not found', status=200)
order.log_action('pretix.plugins.stripe.event', data=event_json)
is_refund = charge['refunds']['total_count'] or charge['dispute']
if order.status == Order.STATUS_PAID and is_refund:
RequiredAction.objects.create(
event=request.event, action_type='pretix.plugins.stripe.refund', data=json.dumps({
'order': order.code,
'charge': charge_id
})
)
elif order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and charge['status'] == 'succeeded' and not is_refund:
try:
mark_order_paid(order, user=None)
except Quota.QuotaExceededException:
if not RequiredAction.objects.filter(event=request.event, action_type='pretix.plugins.stripe.overpaid',
data__icontains=order.code).exists():
RequiredAction.objects.create(
event=request.event, action_type='pretix.plugins.stripe.overpaid', data=json.dumps({
'order': order.code,
'charge': charge.id
})
)
return HttpResponse(status=200)
@event_permission_required('can_view_orders')
@require_POST
def refund(request, **kwargs):
with transaction.atomic():
action = get_object_or_404(RequiredAction, event=request.event, pk=kwargs.get('id'),
action_type='pretix.plugins.stripe.refund', done=False)
data = json.loads(action.data)
action.done = True
action.user = request.user
action.save()
order = get_object_or_404(Order, event=request.event, code=data['order'])
if order.status != Order.STATUS_PAID:
messages.error(request, _('The order cannot be marked as refunded as it is not marked as paid!'))
else:
mark_order_refunded(order, user=request.user)
messages.success(
request, _('The order has been marked as refunded and the issue has been marked as resolved!')
)
return redirect(reverse('control:event.order', kwargs={
'organizer': request.event.organizer.slug,
'event': request.event.slug,
'code': data['order']
}))
| UTF-8 | Python | false | false | 4,533 | py | 148 | views.py | 78 | 0.655416 | 0.649239 | 0 | 110 | 40.209091 | 124 |
bressanmarcos/pade-plus | 16,724,602,666,138 | 48765eb39fd9a971fa184e5c7817cd57d50a808e | 619840d0d50450c298665187ba9167301b9c9faa | /examples/concurrent_request.py | 5100c2bdedce80108b15e6ac5d3e4557ded8e762 | [
"MIT"
]
| permissive | https://github.com/bressanmarcos/pade-plus | fe28b1df693c5bbc01ce16710fdd4104d38114fe | b879a3c543f6c291a8779879efdc8119ce8ed0d5 | refs/heads/master | 2023-02-26T10:17:20.527373 | 2021-02-01T02:13:46 | 2021-02-01T02:13:46 | 318,867,663 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from random import randint, random
from pade.acl.aid import AID
from pade.acl.messages import ACLMessage
from pade.behaviours.highlevel import *
from pade.behaviours.highlevel import FipaRequestProtocol
from pade.plus.agent import ImprovedAgent
from pade.misc.utility import display_message, start_loop
from pade.misc.utility import call_later
class Client(ImprovedAgent):
def __init__(self, aid, servers):
super().__init__(aid)
self.req = FipaRequestProtocol(self)
self.servers = servers
def on_start(self):
super().on_start()
self.make_request()
def one_request(self, receiver):
message = ACLMessage()
message.add_receiver(receiver)
while True:
try:
response = yield from self.req.send_request(message)
except FipaMessageHandler as h:
response = h.message
except FipaProtocolComplete:
break
return response
@AgentSession.session
def make_request(self):
r1, r2 = yield from AgentSession.gather(
*(self.one_request(s) for s in self.servers)
)
display_message(self.aid.name,
f"Final response 1: [{r1.performative}] {r1.content}")
display_message(self.aid.name,
f"Final response 2: [{r2.performative}] {r2.content}")
class Server(ImprovedAgent):
def __init__(self, aid):
super().__init__(aid)
self.req = FipaRequestProtocol(self, False)
self.req.set_request_handler(self.on_request)
def on_request(self, message: ACLMessage):
display_message(self.aid.name, "Received a message")
reply = message.create_reply()
reply.set_content(f'Take this! {random()}')
delay = 10 * random()
display_message(self.aid.name, "Starting job")
print('Job delay:', delay)
if random() < 0.5:
call_later(delay, self.req.send_inform, reply)
else:
call_later(delay, self.req.send_failure, reply)
if __name__ == "__main__":
agents = list()
server1 = Server(AID(f'server1@localhost:{randint(2000, 65000)}'))
agents.append(server1)
server2 = Server(AID(f'server2@localhost:{randint(2000, 65000)}'))
agents.append(server2)
client = Client(AID(f'client@localhost:{randint(2000, 65000)}')
, servers = [server1.aid, server2.aid])
agents.append(client)
start_loop(agents)
| UTF-8 | Python | false | false | 2,493 | py | 15 | concurrent_request.py | 14 | 0.620939 | 0.602086 | 0 | 80 | 30.1625 | 78 |
Leon109/IDCMS-Web | 7,370,163,896,699 | 5ec65c3afb9c53683d5a4ad5d32f2363ddac0c40 | 5251be65c0e7297be4f9f27626827e2181e24622 | /web/app/ticket/notice/views.py | 5fe8d885a07bbb3c3461d4133debbdf88b75807f | [
"Apache-2.0"
]
| permissive | https://github.com/Leon109/IDCMS-Web | 0c119d00f44367f4e3af992d62c54504817b1e16 | b15c9ef447f89d08f836123eb87e8f1e0e550428 | refs/heads/master | 2017-11-02T13:46:34.055635 | 2016-09-07T09:07:53 | 2016-09-07T09:07:53 | 42,795,127 | 3 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf-8
from .forms import NoticeForm
from ..same import *
from app.models import Notice
check_field = ['title', 'body']
endpoint = '.notice_view'
@ticket.route('/ticket/notice/view', methods=['GET', 'POST'])
@login_required
def notice_view():
task_info = ticket_status()
page = request.args.get('page', 1, type=int)
pagination = Notice.query.order_by(Notice.datetime.desc()).paginate(
page, 100, True)
items = pagination.items
return render_template('ticket/notice_view.html', task_info=task_info, sidebar=sidebar,
endpoint=endpoint, pagination=pagination, items=items)
@ticket.route('/ticket/notice/add', methods=['GET', 'POST'])
@login_required
@permission_validation(Permission.ALTER)
def notice_add():
notice_form = NoticeForm()
task_info = ticket_status()
if notice_form.validate_on_submit():
notice = Notice(
title=notice_form.title.data,
body=notice_form.body.data,
user = current_user.alias
)
add_sql = edit(notice, record=False)
add_sql.run('add')
flash(u'公告添加成功')
return redirect(url_for('.notice_view'))
else:
for key in check_field :
if notice_form.errors.get(key, None):
flash(notice_form.errors[key][0])
break
return render_template('ticket/notice_add.html', task_info=task_info,
sidebar=sidebar, notice_form=notice_form)
@ticket.route('/ticket/notice/view/<int:id>', methods=['GET'])
@login_required
def notice_view_id(id):
notice = Notice.query.get_or_404(id)
task_info = ticket_status()
return render_template('ticket/notice_view_id.html', task_info=task_info,
sidebar=sidebar, notice=notice)
| UTF-8 | Python | false | false | 1,825 | py | 86 | views.py | 66 | 0.622173 | 0.617209 | 0 | 53 | 33.207547 | 92 |
boraxpr/bitesofpy | 9,268,539,474,426 | 835c410c1ea013c7d6c0447a64c6dd7e08bf0448 | b24302829278afbc8d95fa6e70aa11fd9ff983b6 | /127/ordinal.py | 6f1a8b117dea4bf42078069f140c31e98b383681 | []
| no_license | https://github.com/boraxpr/bitesofpy | 1e01351021153dfe8a69b958593e90c6438f84b0 | ff0176e029ddbc6469ecf79ea9fc1c3ff284c2e5 | refs/heads/master | 2023-05-10T22:27:01.093996 | 2023-05-07T11:59:52 | 2023-05-07T11:59:52 | 215,345,303 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def get_ordinal_suffix(number):
"""Receives a number int and returns it appended with its ordinal suffix,
so 1 -> 1st, 2 -> 2nd, 4 -> 4th, 11 -> 11th, etc.
Rules:
https://en.wikipedia.org/wiki/Ordinal_indicator#English
- st is used with numbers ending in 1 (e.g. 1st, pronounced first)
- nd is used with numbers ending in 2 (e.g. 92nd, pronounced ninety-second)
- rd is used with numbers ending in 3 (e.g. 33rd, pronounced thirty-third)
- As an exception to the above rules, all the "teen" numbers ending with
11, 12 or 13 use -th (e.g. 11th, pronounced eleventh, 112th,
pronounced one hundred [and] twelfth)
- th is used for all other numbers (e.g. 9th, pronounced ninth).
"""
suffixes = {0: "th", 1: "st", 2: "nd", 3: "rd", 4:"th", 5:"th"
, 6:"th", 7:"th", 8:"th", 9:"th", 11: "th", 12: "th", 13:"th"}
#For teen numbers exceptions 11,12,13
if number == 11 or number == 12 or number == 13:
return str(number) + suffixes.get(number)
#For 111,1111,11111...
elif number % 100 == 11:
return str(number) + suffixes.get(number % 100)
#For all other numbers
elif number % 10 in suffixes.keys():
return str(number) + suffixes.get(number % 10)
| UTF-8 | Python | false | false | 1,283 | py | 75 | ordinal.py | 73 | 0.603274 | 0.539361 | 0 | 25 | 50.28 | 82 |
profitware/restful-image-rotator | 5,592,047,448,504 | 48fed2046c07cb9cbda4ddd1a7e2702af4dab270 | dbf128fe8ac2bfd891b0c680902b315b18a69e58 | /rotator/api/v1/__init__.py | fe87e00964556f748bc40791ac35e7651e0a6331 | [
"MIT"
]
| permissive | https://github.com/profitware/restful-image-rotator | ffd1914c59e8a9ec7b7121906614fc4c260473f4 | d396b60e905a9d96ef5dd5d77df741b64094c660 | refs/heads/master | 2020-05-19T08:46:22.942916 | 2015-02-10T14:38:46 | 2015-02-10T14:38:46 | 30,429,682 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'Sergey Sobko'
from json import dumps
from twisted.web.resource import Resource
from rotator.api import log_me
from rotator.api.v1.common import check_content_type, has_api_method, generate_link, \
API_VERSION, RELS
class RootResource(Resource):
# pylint: disable=too-few-public-methods
def getChild(self, name, request):
log_me('getChild', name, request)
assert check_content_type(request)
if name == '':
return self
return Resource.getChild(self, name, request)
def render_GET(self, request):
# pylint: disable=invalid-name
api_version = API_VERSION
links_list = list()
request.setHeader('content-type', 'application/json')
for resource_name, child in self.children.iteritems():
for method, rel_name in RELS.iteritems():
if has_api_method(child, method):
links_list.append(
generate_link(request, resource_name, rel_name, method)
)
return_dict = {
'version': api_version,
'links': links_list
}
log_me(return_dict)
request.setResponseCode(200)
return dumps(return_dict)
class V1Resource(RootResource):
# pylint: disable=too-few-public-methods
pass
| UTF-8 | Python | false | false | 1,372 | py | 17 | __init__.py | 14 | 0.599854 | 0.595481 | 0 | 58 | 22.655172 | 86 |
MMaltez/CLItools | 6,451,040,916,468 | 66553232d8cae4545053c359039e3dc2338acbdb | 4b9bed2f2d618a74cc1d0fe3c055d743093fd175 | /spaces2tab.py | aa0f984c1f6dd586c26760841fd6ba84265c5707 | []
| no_license | https://github.com/MMaltez/CLItools | e05d14b510c774dea8967db3803ac334a1de2f66 | 9c6af2019a2910301f8401a736e47d66df276f2a | refs/heads/master | 2020-04-21T02:49:03.041256 | 2019-11-04T11:53:52 | 2019-11-04T11:53:52 | 169,266,232 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert leading spaces to tabs.
@author Miguel Maltez Jose
@created 20180525
"""
import sys
def spaceCount(line):
"""Returns number of leading spaces."""
i = 0
while line[i] == " ":
i += 1
return i
def processFile(infile, outfile, identation=4):
for line in infile:
## space count
sc = spaceCount(line)
if sc % identation == 0:
## tab count
tc = sc // identation
line = line.replace(" "*identation, "\t", tc)
outfile.write(line)
def main():
import argparse
parser = argparse.ArgumentParser(description="Converts spaces to tabs.")
parser.add_argument("infile"
, help="input file, defaults to stdin."
, type=argparse.FileType('r')
, default=sys.stdin
)
parser.add_argument("outfile"
, help="output file, defaults to stdout."
, nargs='?'
, type=argparse.FileType('w')
, default=sys.stdout
)
parser.add_argument("-s", "--spaces"
, help="number of spaces for each tab, defaults to 4"
, default=4
)
args = parser.parse_args()
processFile(args.infile, args.outfile, args.spaces)
if "__main__" == __name__:
main()
| UTF-8 | Python | false | false | 1,117 | py | 10 | spaces2tab.py | 8 | 0.655327 | 0.641898 | 0 | 50 | 21.34 | 73 |
LizaPersonal/personal_exercises | 4,501,125,735,818 | ad2e21d4c35932b4de9e978ff3d240de3a01f85f | c6dc8b682aea706b18b05952f791e01989db3669 | /LearnHardWay/ex48/tests/parser_tests.py | a91091c85cd33a844b266b0549e58d6a7f2ac156 | []
| no_license | https://github.com/LizaPersonal/personal_exercises | aeb9ceb2593a6d5ee1a8e9f7c0862ce638acd29b | 649dc0c116861995fbf58b4736a0c66fd75d648c | refs/heads/master | 2021-04-03T02:17:51.850676 | 2018-07-31T21:10:59 | 2018-07-31T21:10:59 | 125,123,778 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from nose.tools import *
from ..ex48 import parser
def test_peek():
assert_equal(parser.peek([('direction', 'north')]), 'direction')
result = parser.peek([('noun', 'ball'),
('verb', 'throw')])
assert_equal(result, 'noun')
def test_match():
assert_equal(parser.match([('direction', 'north')], 'direction'), ('direction', 'north'))
result = parser.match([('noun', 'ball'),
('verb', 'throw')], 'noun')
assert_equal(result, ('noun', 'ball'))
assert_equal(parser.match([('stop', 'the')], 'noun'), None)
def test_skip():
pass
def test_parse_verb():
assert_equal(parser.parse_verb([('verb', 'jump')]), ('verb', 'jump'))
result = parser.parse_verb([('stop', 'the'),
('verb', 'throw')])
assert_equal(result, ('verb', 'throw'))
# assert_raises(parser.ParserError("Expected a verb next."), parser.parse_verb([('noun', 'cat')]))
def test_parse_object():
assert_equal(parser.parse_object([('noun', 'building')]), ('noun', 'building'))
assert_equal(parser.parse_object([('direction', 'west')]), ('direction', 'west'))
result = parser.parse_object([('stop', 'the'),
('noun', 'chair')])
assert_equal(result, ('noun', 'chair'))
def test_parse_subject():
assert_equal(parser.parse_subject([('noun', 'hair')]), ('noun', 'hair'))
assert_equal(parser.parse_subject([('verb', 'run')]), ('noun', 'player'))
result = parser.parse_subject([('stop', 'the'),
('noun', 'tree')])
assert_equal(result, ('noun', 'tree'))
def test_parse_sentence():
result = parser.parse_sentence([('stop', 'the'),
('noun', 'boy'),
('verb', 'ran'),
('stop', 'to'),
('noun', 'town')])
assert_equal(result.subject, 'boy')
assert_equal(result.verb, 'ran')
assert_equal(result.object, 'town')
result = parser.parse_sentence([('verb', 'walks'),
('direction', 'left')])
assert_equal(result.subject, 'player')
assert_equal(result.verb, 'walks')
assert_equal(result.object, 'left')
def test_errors():
pass | UTF-8 | Python | false | false | 2,310 | py | 80 | parser_tests.py | 68 | 0.515584 | 0.514719 | 0 | 65 | 34.553846 | 102 |
nicolassnider/python_39 | 5,798,205,880,354 | 12af0f49f5090bf0cfb4be5a65668642c2f81f57 | 94a20c93910120d673d2a0f3dd7d648912d0df53 | /seccion16/47 - ej1.py | 314243a98f7a3bf39baa5c56ffd4401e42456138 | []
| no_license | https://github.com/nicolassnider/python_39 | 548a8af706e163ec34bf8cea606c5171a4a736b0 | 2edf11be473a842fa426d47aacbe80fd370d8f10 | refs/heads/master | 2023-02-05T23:11:20.636687 | 2020-12-18T02:58:32 | 2020-12-18T02:58:32 | 316,996,398 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def superposicion(lista1, lista2):
"""
docstring
"""
for el in lista1:
for el2 in lista2:
return True
pass
return False
l1 = [1,2,3,4,5]
l2 = [10,9,8,7,6]
print (superposicion(l1,l2))
| UTF-8 | Python | false | false | 233 | py | 65 | 47 - ej1.py | 56 | 0.540773 | 0.454936 | 0 | 15 | 14.533333 | 34 |
Smirl/baroness | 609,885,369,983 | 343852212262224b71720e8be6fd10c077279ad1 | 1bc94e2249e01a5992fdb8ff6f9d95da58eb37b4 | /tests/check_version.py | 6d711a7ec08b409e9bc2a536205dda516e952f87 | []
| no_license | https://github.com/Smirl/baroness | 34ce0762d34dead02db73f69d2a88d45a5ccb4dc | 3286ecbb835d0f9dcbcba83a37b182bc8b59e7d0 | refs/heads/master | 2021-09-24T21:54:45.081672 | 2018-10-15T09:40:49 | 2018-10-15T09:40:49 | 112,948,604 | 3 | 0 | null | false | 2018-10-15T09:40:50 | 2017-12-03T17:33:32 | 2018-10-15T09:32:32 | 2018-10-15T09:40:49 | 23 | 1 | 0 | 0 | Python | false | null | """Test that version in version.txt if unique when we deploy."""
from collections import namedtuple
from functools import total_ordering
import os
import subprocess
import re
SEMVER_REGEX = re.compile('^(\d+)\.(\d+)\.(\d+)$')
@total_ordering
class Version(namedtuple('Version', 'major minor patch')):
"""A namedtuple with semver ordering."""
def __eq__(self, other):
"""Compare the parts."""
return (
(self.major == other.major) and
(self.minor == other.minor) and
(self.patch == other.patch)
)
def __gt__(self, other):
"""Compare versions by their parts."""
return (
(self.major > other.major) or
(self.major == other.major and self.minor > other.minor) or
(self.major == other.major and self.minor == other.minor and self.patch > other.patch)
)
def __str__(self):
"""The original string hopefully."""
return '.'.join([self.major, self.minor, self.patch])
def main():
"""Get the latest git tag and compare to version.txt."""
path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'baroness', 'version.txt')
with open(path) as f:
current_version = Version(*f.read().strip().split('.'))
tags = subprocess.check_output(['git', 'tag']).splitlines()
latest_version = next(iter(sorted(
(Version(*SEMVER_REGEX.match(tag).groups()) for tag in tags if SEMVER_REGEX.match(tag)),
reverse=True
)))
assert str(current_version) not in tags, '{} is in {}'.format(current_version, tags)
assert current_version > latest_version, '{} not greater than {}'.format(current_version, latest_version)
print('{} is a valid version'.format(current_version))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,808 | py | 14 | check_version.py | 8 | 0.605088 | 0.605088 | 0 | 58 | 30.172414 | 109 |
woohams/Python | 7,112,465,860,402 | 4d51270476e3a0027df81e88c0de8e499becae2c | 8f72a48dbc8e066f2c8aa72a04d913c254b12781 | /01_start.py | ae9e63eafcca672edecb70010492d77e015c511a | []
| no_license | https://github.com/woohams/Python | 2a0ee367cfdc609ee3ce72a0275a14921795d923 | af72dea3e0e5747dab056d0ff5bbe9456d22cfed | refs/heads/master | 2020-06-18T16:06:19.216659 | 2019-10-05T16:33:43 | 2019-10-05T16:33:43 | 196,359,124 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #주석
'''
들여쓰기 4개 / 들여쓰기 2개 / tab 1개
REPL : Read Eval Print Loop
Interactive shell : 대화식 프로그램 환경, 대화형
'''
# 변수 = 값
a = 100
b = 200
# 출력
print(a)
print(b)
print(a+b)
# 문자 출력
print('a')
print("a")
print('a'+"b")
# type이 달라서 + 불가
print("a = " + a)
| UTF-8 | Python | false | false | 373 | py | 34 | 01_start.py | 33 | 0.477816 | 0.447099 | 0 | 26 | 9.115385 | 41 |
byuccl/spydrnet | 6,528,350,335,336 | f3c254225790a049327d24330dd7b2f4aa2ff20f | b19c00e9af1a6971e0a38220d8936afb0c0d312e | /spydrnet/composers/verilog/tests/test_composer_unit.py | 2c8082078e07ce4ee122d5ab421ecc7687608d1f | [
"BSD-3-Clause"
]
| permissive | https://github.com/byuccl/spydrnet | 94519f4f6731552890b155af54f4e06df78dd7ed | ea580b6b9f81821f6ed9a5e14929ccb21498e5b4 | refs/heads/master | 2023-06-01T13:26:29.410556 | 2023-04-18T17:12:23 | 2023-04-18T17:12:23 | 210,388,987 | 66 | 16 | BSD-3-Clause | false | 2023-09-14T18:25:04 | 2019-09-23T15:25:09 | 2023-09-11T04:51:53 | 2023-09-14T18:25:03 | 47,821 | 65 | 17 | 44 | Python | false | false | #Copyright 2021
#Author Dallin Skouson
#see the license for details
#
#Tests the verilog composers functions and output
from collections import OrderedDict
import unittest
from unittest.case import expectedFailure
import spydrnet as sdn
from spydrnet.composers.verilog.composer import Composer
from collections import OrderedDict
class TestVerilogComposerUnit(unittest.TestCase):
class TestFile:
'''represents a file (has a write function for the composer)
can be used as a drop in replacement for the composer file.write function
saves all written stuff to a string'''
def __init__(self):
self.written = ""
def write(self, text):
self.written += text
def clear(self):
self.written = ""
def compare(self, text, should_match = True):
self.written = self.written.lstrip()
if (text == self.written) == should_match:
return True
else:
print("The composer wrote:")
print('"' + self.written + '"')
print("This was compared to:")
print('"' + text + '"')
if not should_match:
print("and these are not supposed to match")
else:
print("and these should have matched")
print("\n")
return False
def initialize_tests(self):
composer = Composer()
composer.file = self.TestFile()
return composer
def initialize_netlist(self):
netlist = sdn.Netlist()
netlist.name = "test_netlist"
return netlist
def initialize_library(self):
netlist = self.initialize_netlist()
library = netlist.create_library()
library.name = "test_library"
return library
def initialize_definition(self):
library = self.initialize_library()
definition = library.create_definition()
definition.name = "test_definition"
return definition
def initialize_instance_parameters(self, instance):
instance["VERILOG.Parameters"] = OrderedDict()
instance["VERILOG.Parameters"]["key"] = "value"
instance["VERILOG.Parameters"]["key2"] = "value2"
expected1 = ".key(value)"
expected2 = ".key2(value2)"
return expected1, expected2
def initialize_instance_port_connections(self, instance, definition):
ref_def = definition.library.create_definition()
instance.reference = ref_def
ref_def.name = "reference_definition"
single_bit_port = ref_def.create_port()
single_bit_port.create_pin()
single_bit_port.is_downto = True
single_bit_port.name = "single_bit_port"
single_bit_cable = definition.create_cable()
single_bit_cable.name = "single_bit_cable"
single_bit_cable.is_downto = True
single_bit_cable.create_wire()
multi_bit_port = ref_def.create_port()
multi_bit_port.is_downto = True
multi_bit_port.create_pins(4)
multi_bit_port.name = "multi_bit_port"
multi_bit_port_offset = ref_def.create_port()
multi_bit_port_offset.lower_index = 4
multi_bit_port_offset.is_downto = True
multi_bit_port_offset.create_pins(4)
multi_bit_port_offset.name = "multi_bit_port_offset"
partial_port = ref_def.create_port()
partial_port.create_pins(2)
partial_port.is_downto = True
partial_port.name = "partial_port"
multi_bit_cable = definition.create_cable()
multi_bit_cable.create_wires(4)
multi_bit_cable.name = "multi_bit_cable"
multi_bit_cable.is_downto = True
concatenated_port = ref_def.create_port()
concatenated_port.create_pins(4)
concatenated_port.name = "concatenated_port"
ccs = []
for i in range(4):
cable = definition.create_cable()
cable.create_wire()
cable.is_downto = True
cable.name = "cc_" + str(i)
ccs.append(cable)
single_bit_cable.wires[0].connect_pin(instance.pins[single_bit_port.pins[0]])
for i in range(4):
multi_bit_cable.wires[i].connect_pin(instance.pins[multi_bit_port.pins[i]])
multi_bit_cable.wires[i].connect_pin(instance.pins[multi_bit_port_offset.pins[i]])
ccs[i].wires[0].connect_pin(instance.pins[concatenated_port.pins[i]])
for i in range(2):
multi_bit_cable.wires[i].connect_pin(instance.pins[partial_port.pins[i]])
single_bit_expected = "." + single_bit_port.name + "(" + single_bit_cable.name + ")"
multi_bit_expected = "." + multi_bit_port.name + "(" + multi_bit_cable.name + "[" + str(len(multi_bit_cable.wires) - 1 + multi_bit_cable.lower_index) + ":" + \
str(multi_bit_cable.lower_index) + "]"")"
offset_expected = "." + multi_bit_port_offset.name + "(" + multi_bit_cable.name + "[" + str(len(multi_bit_cable.wires) - 1 + multi_bit_cable.lower_index) + ":" + \
str(multi_bit_cable.lower_index) + "]"")"
partial_expected = "." + partial_port.name + "(" + multi_bit_cable.name + "[1:0])"
concatenated_expected = "." + concatenated_port.name + "({" + ccs[3].name + ', ' + ccs[2].name + ', ' + ccs[1].name + ', ' + ccs[0].name + "})"
return single_bit_port, single_bit_expected, \
multi_bit_port, multi_bit_expected, \
multi_bit_port_offset, offset_expected, \
partial_port, partial_expected,\
concatenated_port, concatenated_expected\
def test_write_header(self):
composer = self.initialize_tests()
netlist = sdn.Netlist()
netlist.name = "Netlist_name"
composer._write_header(netlist)
assert composer.file.compare("//Generated from netlist by SpyDrNet\n//netlist name: Netlist_name\n")
def test_write_brackets_single_bit(self):
#def _write_brackets(self, bundle, low_index, high_index):
composer = self.initialize_tests()
port = sdn.Port()
cable = sdn.Cable()
cable_name = "my_cable"
port_name = "my_port"
port.name = port_name
cable.name = cable_name
port.create_pin()
cable.create_wire()
composer._write_brackets(port, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, 0, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, None, 0)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, 0, 0)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, 0, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, None, 0)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, 0, 0)
assert composer.file.compare("")
composer.file.clear()
#none of these should write because they are all single bit.
def test_write_brackets_single_bit_offset(self):
#def _write_brackets(self, bundle, low_index, high_index):
composer = self.initialize_tests()
port = sdn.Port()
cable = sdn.Cable()
cable_name = "my_cable"
port_name = "my_port"
port.name = port_name
cable.name = cable_name
port.create_pin()
cable.create_wire()
port.lower_index = 4
cable.lower_index = 4
composer._write_brackets(port, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, 4, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, None, 4)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, 4, 4)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, 4, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, None, 4)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, 4, 4)
assert composer.file.compare("")
composer.file.clear()
#none of these should write because they are all single bit.
def test_write_brackets_multi_bit(self):
composer = self.initialize_tests()
port = sdn.Port()
cable = sdn.Cable()
cable_name = "my_cable"
port_name = "my_port"
port.name = port_name
cable.name = cable_name
port.create_pins(4) #input [3:0] my_input;
port.is_downto = True
cable.create_wires(4) #wire [3:0] my_wire;
cable.is_downto = True
composer._write_brackets(port, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, 1, None)
assert composer.file.compare("[1]")
composer.file.clear()
composer._write_brackets(port, None, 2)
assert composer.file.compare("[2]")
composer.file.clear()
composer._write_brackets(port, 2, 2)
assert composer.file.compare("[2]")
composer.file.clear()
composer._write_brackets(port, 0, 3)
assert composer.file.compare("[3:0]")
composer.file.clear()
composer._write_brackets(port, 1, 2)
assert composer.file.compare("[2:1]")
composer.file.clear()
composer._write_brackets(cable, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, 1, None)
assert composer.file.compare("[1]")
composer.file.clear()
composer._write_brackets(cable, None, 2)
assert composer.file.compare("[2]")
composer.file.clear()
composer._write_brackets(cable, 2, 2)
assert composer.file.compare("[2]")
composer.file.clear()
composer._write_brackets(cable, 0, 3)
assert composer.file.compare("[3:0]")
composer.file.clear()
composer._write_brackets(cable, 1, 2)
assert composer.file.compare("[2:1]")
composer.file.clear()
def test_write_brackets_multi_bit_offset(self):
composer = self.initialize_tests()
port = sdn.Port()
cable = sdn.Cable()
cable_name = "my_cable"
port_name = "my_port"
port.name = port_name
cable.name = cable_name
port.create_pins(4) #input [3:0] my_input;
port.is_downto = True
port.lower_index = 4
cable.create_wires(4) #wire [3:0] my_wire;
cable.is_downto = True
cable.lower_index = 4
composer._write_brackets(port, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(port, 5, None)
assert composer.file.compare("[5]")
composer.file.clear()
composer._write_brackets(port, None, 6)
assert composer.file.compare("[6]")
composer.file.clear()
composer._write_brackets(port, 6, 6)
assert composer.file.compare("[6]")
composer.file.clear()
composer._write_brackets(port, 4, 7)
assert composer.file.compare("[7:4]")
composer.file.clear()
composer._write_brackets(port, 5, 6)
assert composer.file.compare("[6:5]")
composer.file.clear()
composer._write_brackets(cable, None, None)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets(cable, 5, None)
assert composer.file.compare("[5]")
composer.file.clear()
composer._write_brackets(cable, None, 6)
assert composer.file.compare("[6]")
composer.file.clear()
composer._write_brackets(cable, 6, 6)
assert composer.file.compare("[6]")
composer.file.clear()
composer._write_brackets(cable, 4, 7)
assert composer.file.compare("[7:4]")
composer.file.clear()
composer._write_brackets(cable, 5, 6)
assert composer.file.compare("[6:5]")
composer.file.clear()
def test_write_brackets_fail(self):
pass #we should add some tests to test out of bounds on the brackets.
def test_write_brackets_defining(self):
composer = self.initialize_tests()
def initialize_bundle(bundle, offset, width):
if isinstance(bundle, sdn.Port):
bundle.create_pins(width)
else: #it's a cable
bundle.create_wires(width)
bundle.is_downto = True
bundle.lower_index = offset
return bundle
b1 = initialize_bundle(sdn.Port(), 0, 1)
b2 = initialize_bundle(sdn.Cable(), 4, 1)
b3 = initialize_bundle(sdn.Port(), 0, 4)
b4 = initialize_bundle(sdn.Cable(), 4, 4)
composer._write_brackets_defining(b1)
assert composer.file.compare("")
composer.file.clear()
composer._write_brackets_defining(b2)
assert composer.file.compare("[4:4]")
composer.file.clear()
composer._write_brackets_defining(b3)
assert composer.file.compare("[3:0]")
composer.file.clear()
composer._write_brackets_defining(b4)
assert composer.file.compare("[7:4]")
composer.file.clear()
def test_write_name(self):
composer = self.initialize_tests()
o = sdn.Cable() #Type of this shouldn't really matter
valid_names = ["basic_name", "\\escaped ", "\\fads#@%!$!@#%$[0:4320] "]
for n in valid_names:
o.name = n
composer._write_name(o)
assert composer.file.compare(n)
composer.file.clear()
@unittest.expectedFailure
def test_write_none_name(self):
composer = self.initialize_tests()
o = sdn.Cable()
composer._write_name(o)
def test_write_invalid_name(self):
composer = self.initialize_tests()
o = sdn.Cable()
o.name = "\\escaped_no_space"
composer._write_name(o)
def test_write_instance_port(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
instance = definition.create_child()
instance.name = "ports_test"
single_bit_port, single_bit_expected, \
multi_bit_port, multi_bit_expected, \
multi_bit_port_offset, offset_expected, \
partial_port, partial_expected, \
concatenated_port, concatenated_expected\
= self.initialize_instance_port_connections(instance, definition)
composer._write_instance_port(instance, single_bit_port)
assert composer.file.compare(single_bit_expected)
composer.file.clear()
composer._write_instance_port(instance, multi_bit_port)
assert composer.file.compare(multi_bit_expected)
composer.file.clear()
composer._write_instance_port(instance, multi_bit_port_offset)
assert composer.file.compare(offset_expected)
composer.file.clear()
composer._write_instance_port(instance, partial_port)
assert composer.file.compare(partial_expected)
composer.file.clear()
composer._write_instance_port(instance, concatenated_port)
assert composer.file.compare(concatenated_expected)
composer.file.clear()
composer._write_instance_ports(instance)
expected = "(\n"
first = True
expected_strs = [single_bit_expected, multi_bit_expected, offset_expected, partial_expected, concatenated_expected]
for i in expected_strs:
if not first:
expected += ",\n"
expected += " "
expected += i
first = False
expected += "\n );"
assert composer.file.compare(expected)
def test_write_instance_parameters(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
instance = definition.create_child()
instance.name = "ports_test"
ref_def = definition.library.create_definition()
instance.reference = ref_def
expected1, expected2 =self.initialize_instance_parameters(instance)
#instance["VERILOG.Parameters"]["no_value"] = None #always has value?
composer._write_instance_parameter("key", "value")
assert composer.file.compare(expected1)
composer.file.clear()
composer._write_instance_parameter("key2", "value2")
assert composer.file.compare(expected2)
composer.file.clear()
# composer._write_instance_parameter("no_value", None)
# expected3 = ".key()"
# assert composer.file.compare(expected2)
# composer.file.clear()
composer._write_instance_parameters(instance)
expected = "#(\n " + expected1 + ",\n " + expected2 + "\n )\n"
assert composer.file.compare(expected)
def test_write_full_instance(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
instance = definition.create_child()
instance.name = "instance_test"
expected1, expected2 = self.initialize_instance_parameters(instance)
parameters_expected = "#(\n " + expected1 + ",\n " + expected2 + "\n )\n"
single_bit_port, single_bit_expected, \
multi_bit_port, multi_bit_expected, \
multi_bit_port_offset, offset_expected, \
partial_port, partial_expected, \
concatenated_port, concatenated_expected\
= self.initialize_instance_port_connections(instance, definition)
port_expected = "\n (\n"
first = True
expected_strs = [single_bit_expected, multi_bit_expected, offset_expected, partial_expected, concatenated_expected]
for i in expected_strs:
if not first:
port_expected += ",\n"
port_expected += " "
port_expected += i
first = False
port_expected += "\n );"
composer._write_module_body_instance(instance)
expected = instance.reference.name + " " + parameters_expected + " " + instance.name + port_expected + "\n"
assert composer.file.compare(expected)
def test_write_module_header(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
definition["VERILOG.Parameters"] = OrderedDict()
definition["VERILOG.Parameters"]["key"] = "value"
definition["VERILOG.Parameters"]["no_default"] = None
port = definition.create_port()
port.name = "my_port"
port.create_pin()
port = definition.create_port()
port.name = "my_port2"
port.create_pin()
port.direction = sdn.Port.Direction.IN
composer._write_module_header(definition)
expected = "module " + definition.name + "\n#(\n parameter key = value,\n parameter no_default\n)(\n my_port,\n my_port2\n);\n\n"
assert composer.file.compare(expected)
def test_write_module_ports_header_and_body_alias(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
port_alias = definition.create_port()
port_alias.name = "aliased"
port_alias.create_pins(2)
port_alias.direction = sdn.Port.Direction.IN
c1 = definition.create_cable("c1")
c2 = definition.create_cable("c2")
c1.create_wire()
c2.create_wire()
c1.wires[0].connect_pin(port_alias.pins[0])
c2.wires[0].connect_pin(port_alias.pins[1])
composer._write_module_header_port(port_alias)
assert composer.file.compare("." + port_alias.name + "({"+ c2.name + ", " + c1.name +"})")
composer.file.clear()
composer._write_module_body_port(port_alias)
assert composer.file.compare("input " + c1.name + ";\n " + "input " + c2.name + ";\n")
composer.file.clear()
def test_write_module_ports_header_and_body_multi(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
port_multi = definition.create_port("multi_bit")
port_multi.name = "multi_bit"
port_multi.create_pins(4)
port_multi.direction = sdn.Port.Direction.OUT
cable_multi = definition.create_cable("multi_bit")
cable_multi.create_wires(4)
for i in range(4):
cable_multi.wires[i].connect_pin(port_multi.pins[i])
composer._write_module_header_port(port_multi)
assert composer.file.compare(port_multi.name)
composer.file.clear()
composer._write_module_body_port(port_multi)
assert composer.file.compare("output [3:0]" + port_multi.name + ";\n")
composer.file.clear()
def test_write_module_ports_header_and_body_disconnect(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
port_disconnect = definition.create_port("disconnected")
port_disconnect.direction = sdn.Port.Direction.INOUT
port_disconnect.create_pin()
composer._write_module_header_port(port_disconnect)
assert composer.file.compare(port_disconnect.name)
composer.file.clear()
composer._write_module_body_port(port_disconnect)
assert composer.file.compare("inout " + port_disconnect.name + ';\n')
composer.file.clear()
def test_write_module_body_cables(self):
composer = self.initialize_tests()
definition = self.initialize_definition()
cable = definition.create_cable(name = "test_cable", is_downto = True)
cable.create_wires(4)
composer._write_module_body_cable(cable)
assert composer.file.compare("wire [3:0]" + cable.name + ";\n")
def test_assignment_single_bit(self):
pass
def test_assignment_multi_bit(self):
pass
| UTF-8 | Python | false | false | 22,662 | py | 198 | test_composer_unit.py | 149 | 0.6003 | 0.591828 | 0 | 632 | 34.856013 | 171 |
lmb633/leetcode | 8,297,876,834,201 | cbefdd38853ed4d731579671cad6f52d45894b04 | c0fad90611a6e943277c3d79eeb48ccd5f0d0a88 | /300lengthOfLIS.py | 619c2d00f89b68d69730096a3f6b594710c323aa | []
| no_license | https://github.com/lmb633/leetcode | e2da31984af07b9e16787f4d57f82dab2dcb551a | d91568d245dd8fb66f46ff73737cbad974f490a6 | refs/heads/master | 2021-07-19T16:07:40.864854 | 2021-02-24T10:57:40 | 2021-02-24T10:57:40 | 243,146,182 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def lengthOfLIS(self, nums):
length = len(nums)
if length == 0:
return 0
result = [1 for i in range(length)]
for i in range(length):
for j in range(i):
if nums[i] > nums[j]:
if result[j] > result[i] - 1:
result[i] = result[j] + 1
return max(result)
class Solution2(object):
def lengthOfLIS(self, nums):
if not nums:
return 0
max_len = 1
stack = []
stack.append(nums[0])
for num in nums:
if stack[-1] < num:
stack.append(num)
if len(stack) > max_len:
max_len = len(stack)
elif stack[-1] > num:
idx = self.find(stack, 0, len(stack) - 1, num)
# print(stack,idx,num)
if stack[idx] > num:
stack[idx] = num
elif stack[idx] < num:
stack[idx + 1] = num
# print(stack)
return max_len
def find(self, nums, low, high, target):
if low >= high:
return low
mid = (low + high) / 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
return self.find(nums, low, mid - 1, target)
else:
return self.find(nums, mid + 1, high, target)
| UTF-8 | Python | false | false | 1,480 | py | 138 | 300lengthOfLIS.py | 138 | 0.426351 | 0.414865 | 0 | 46 | 30.173913 | 62 |
francocruces/todolst-bdd | 5,042,291,633,586 | 19be062b5ef9a6966b06fcd071116a8e5994c031 | 9f5f2a3a507a23ac5be4e4e5411d0b987b30a334 | /todolist/models.py | 09621505bb64bfbaa7918c805aa69a3e0f385e40 | []
| no_license | https://github.com/francocruces/todolst-bdd | df0adbe25cae2f03f904613dc3acb6e9eaba3428 | b9ed6aaf94e7f25d033b515f9459b2af9494968d | refs/heads/master | 2020-03-09T21:16:03.458769 | 2018-04-27T13:53:20 | 2018-04-27T13:53:20 | 129,004,959 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import unicode_literals
from django.db import models
class Task(models.Model):
name = models.CharField(max_length=256)
| UTF-8 | Python | false | false | 145 | py | 9 | models.py | 3 | 0.731034 | 0.710345 | 0 | 6 | 22.5 | 43 |
opendr-eu/opendr | 1,778,116,490,453 | c160a7734be71d10297247cc3e670151ed2c6152 | 5f69a6549b8d5e417553d910622e6855b2ae679b | /projects/python/perception/pose_estimation/lightweight_open_pose/demos/wave_detection_demo.py | c7febd532f66a3ae362f8ad73d880dab9ca66411 | [
"Apache-2.0"
]
| permissive | https://github.com/opendr-eu/opendr | 822219f709613d77c5eb62c5d02808d344239835 | b3d6ce670cdf63469fc5766630eb295d67b3d788 | refs/heads/master | 2023-08-31T07:02:36.375231 | 2023-08-29T06:39:51 | 2023-08-29T06:39:51 | 293,755,225 | 535 | 82 | Apache-2.0 | false | 2023-09-13T16:53:34 | 2020-09-08T08:55:04 | 2023-09-11T17:57:18 | 2023-09-13T16:53:33 | 2,450,511 | 539 | 86 | 22 | Python | false | false | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cv2
import time
from opendr.perception.pose_estimation import LightweightOpenPoseLearner
from opendr.perception.pose_estimation import draw, get_bbox
from numpy import std
def wave_detector(frame_list_):
"""
The wave detector works by first detecting the left or right wrist keypoints,
then checking if the keypoints are higher than the neck keypoint, which empirically produces
a more natural wave gesture, and finally calculating the standard deviation of these keypoints
on the x-axis over the last frames. If this deviation is higher than a threshold it assumes
that the hand is making a waving gesture.
:param frame_list_: A list where each element is the list of poses for a frame
:type frame_list_: list
:return: A dict where each key is a pose ID and the value is whether a wave gesture is
detected, 1 for waving, 0 for not waving, -1 can't detect gesture
:rtype: dict
"""
pose_waves_ = {} # pose_id: waving (waving = 1, not waving = 0, can't detect = -1)
# Loop through pose ids in last frame to check for waving in each one
for pose_id_ in frame_list_[-1].keys():
pose_waves_[pose_id_] = 0
# Get average position of wrists, get list of wrists positions on x-axis
r_wri_avg_pos = [0, 0]
l_wri_avg_pos = [0, 0]
r_wri_x_positions = []
l_wri_x_positions = []
for frame in frame_list_:
try:
if frame[pose_id_]["r_wri"][0] != -1:
r_wri_avg_pos += frame[pose_id_]["r_wri"]
r_wri_x_positions.append(frame[pose_id_]["r_wri"][0])
if frame[pose_id_]["l_wri"][0] != -1:
l_wri_avg_pos += frame[pose_id_]["l_wri"]
l_wri_x_positions.append(frame[pose_id_]["l_wri"][0])
except KeyError: # Couldn't find this pose_id_ in previous frames
pose_waves_[pose_id_] = -1
continue
r_wri_avg_pos = [r_wri_avg_pos[0] / len(frame_list_), r_wri_avg_pos[1] / len(frame_list_)]
l_wri_avg_pos = [l_wri_avg_pos[0] / len(frame_list_), l_wri_avg_pos[1] / len(frame_list_)]
r_wri_x_positions = [r_wri_x_positions[i] - r_wri_avg_pos[0] for i in range(len(r_wri_x_positions))]
l_wri_x_positions = [l_wri_x_positions[i] - l_wri_avg_pos[0] for i in range(len(l_wri_x_positions))]
pose_ = None # NOQA
if len(frame_list_) > 0:
pose_ = frame_list_[-1][pose_id_]
else:
pose_waves_[pose_id_] = -1
continue
r_wri_height, l_wri_height = r_wri_avg_pos[1], l_wri_avg_pos[1]
nose_height, neck_height = pose_["nose"][1], pose_["neck"][1]
if nose_height == -1 or neck_height == -1:
# Can't detect upper pose_ (neck-nose), can't assume waving
pose_waves_[pose_id_] = -1
continue
if r_wri_height == 0 and l_wri_height == 0:
# Can't detect wrists, can't assume waving
pose_waves_[pose_id_] = -1
continue
# Calculate the standard deviation threshold based on the distance between neck and nose to get proportions
# The farther away the pose is the smaller the threshold, as the standard deviation would be smaller due to
# the smaller pose
distance = neck_height - nose_height
std_threshold = 5 + ((distance - 50) / (200 - 50))*10
# Check for wrist movement over multiple frames
# Wrist movement is determined from wrist x position standard deviation
r_wrist_movement_detected = False
l_wrist_movement_detected = False
r_wri_x_pos_std, l_wri_x_pos_std = 0, 0 # NOQA
if r_wri_height < neck_height:
if len(r_wri_x_positions) > len(frame_list_) / 2:
r_wri_x_pos_std = std(r_wri_x_positions)
if r_wri_x_pos_std > std_threshold:
r_wrist_movement_detected = True
if l_wri_height < neck_height:
if len(l_wri_x_positions) > len(frame_list_) / 2:
l_wri_x_pos_std = std(l_wri_x_positions)
if l_wri_x_pos_std > std_threshold:
l_wrist_movement_detected = True
if r_wrist_movement_detected:
pose_waves_[pose_id_] = 1
elif l_wrist_movement_detected:
pose_waves_[pose_id_] = 1
return pose_waves_
class VideoReader(object):
def __init__(self, file_name):
self.file_name = file_name
try: # OpenCV needs int to read from webcam
self.file_name = int(file_name)
except ValueError:
pass
def __iter__(self):
self.cap = cv2.VideoCapture(self.file_name)
if not self.cap.isOpened():
raise IOError('Video {} cannot be opened'.format(self.file_name))
return self
def __next__(self):
was_read, img_ = self.cap.read()
if not was_read:
raise StopIteration
return img_
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--onnx", help="Use ONNX", default=False, action="store_true")
parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda")
parser.add_argument("--accelerate", help="Enables acceleration flags (e.g., stride)", default=False,
action="store_true")
args = parser.parse_args()
onnx, device, accelerate = args.onnx, args.device, args.accelerate
if accelerate:
stride = True
stages = 0
half_precision = True
else:
stride = False
stages = 2
half_precision = False
pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=stages,
mobilenet_use_stride=stride, half_precision=half_precision)
pose_estimator.download(path=".", verbose=True)
pose_estimator.load("openpose_default")
if onnx:
pose_estimator.optimize()
# Use the first camera available on the system
image_provider = VideoReader(0)
fps = 0
try:
counter = 0
frame_list = []
for img in image_provider:
start_time = time.perf_counter()
# Perform inference
poses = pose_estimator.infer(img)
# convert to dict with pose id as key for convenience
poses = {k: v for k, v in zip([poses[i].id for i in range(len(poses))], poses)}
pose_waves = {}
if len(poses) > 0:
frame_list.append(poses)
# Keep poses of last fps/2 frames (half a second of poses)
if fps != 0:
if len(frame_list) > int(fps/2):
frame_list = frame_list[1:]
else:
if len(frame_list) > 15:
frame_list = frame_list[1:]
pose_waves = wave_detector(frame_list)
end_time = time.perf_counter()
fps = 1.0 / (end_time - start_time)
for pose_id, pose in poses.items():
draw(img, pose)
x, y, w, h = get_bbox(pose)
if pose_waves[pose_id] == 1:
x, y, w, h = get_bbox(pose)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(img, "Waving", (x + 5, y + 20), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2, cv2.LINE_AA)
if pose_waves[pose_id] == 0:
x, y, w, h = get_bbox(pose)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, "Not waving", (x + 5, y + 20), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 0, 255), 2, cv2.LINE_AA)
if pose_waves[pose_id] == -1:
x, y, w, h = get_bbox(pose)
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 2)
cv2.putText(img, "Can't detect waving", (x + 5, y + 20), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (255, 255, 255), 2, cv2.LINE_AA)
# Wait a few frames for FPS to stabilize
if counter > 5:
cv2.putText(img, "FPS: %.2f" % (fps,), (50, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Result', img)
cv2.waitKey(1)
counter += 1
except KeyboardInterrupt as e:
print(e)
print("Average inference fps: ", fps)
| UTF-8 | Python | false | false | 9,241 | py | 1,235 | wave_detection_demo.py | 821 | 0.558922 | 0.539011 | 0 | 217 | 41.585253 | 115 |
JoeBarcus/flask_api | 2,551,210,592,756 | 548a783efef389db30db9ae95064c69bb1533e0d | 83faa30de7906563453529db8085632293d9b946 | /flask-restful/venv/lib/python3.7/heapq.py | 23f252edf8c0714f770ce6263e54bafbfb72071f | []
| no_license | https://github.com/JoeBarcus/flask_api | 4aac140659ec1532b6aadb3a5d161fadba05092d | 2d3d110d8217fb04447a98a18fec6b3f5afaba2f | refs/heads/master | 2020-05-26T18:04:52.842158 | 2019-05-24T01:29:31 | 2019-05-24T01:29:31 | 188,328,999 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /home/joe/anaconda3/lib/python3.7/heapq.py | UTF-8 | Python | false | false | 42 | py | 46 | heapq.py | 44 | 0.809524 | 0.738095 | 0 | 1 | 42 | 42 |
gadia-aayush/sample | 10,093,173,167,334 | a1d7829491c95b519998cd6836801290c828ea80 | aa3b7c6a81a323d2e17a1be7cb7ce90a20d6099a | /cproject/get_address/apps.py | 52669702c1b80e14bca69b5dcc7a10ec50311b69 | []
| no_license | https://github.com/gadia-aayush/sample | fdf00a4a890af6e4380b133cc64d7df89c1defff | 145b83206f9fb0972d19bef9229da0c1bf0aede0 | refs/heads/master | 2022-12-22T16:54:50.228277 | 2020-08-18T20:26:05 | 2020-08-18T20:26:05 | 288,516,734 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class GetAddressConfig(AppConfig):
name = 'get_address'
| UTF-8 | Python | false | false | 96 | py | 82 | apps.py | 54 | 0.760417 | 0.760417 | 0 | 5 | 18.2 | 34 |
djohnson67/sPython3rd | 11,699,490,917,685 | 9cf1af42c51a4af9ae5f149de184fb9f9af0f6ce | c7a6cab7f62a55ca3afbca629103e2d28d822f26 | /math/io/file_write.py | 2a557be59b3b6e6513d2d52ee18654636223e289 | []
| no_license | https://github.com/djohnson67/sPython3rd | e7610c967aa5164e0ed2d2f414dba7e0ef6c1cd3 | dae305182fbd84bb58761a3fcd8e564afe88779d | refs/heads/master | 2020-05-15T09:20:54.878008 | 2019-05-05T16:45:45 | 2019-05-05T16:45:45 | 182,175,315 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #writes 3 lines of data to a file
def main():
#open file
outfile = open('philosophers.txt','w')
#write three names
outfile.write('John Locke\n')
outfile.write('David Hume\n')
outfile.write('Edmend Burke\n')
#close file
outfile.close()
#cal main
main() | UTF-8 | Python | false | false | 300 | py | 43 | file_write.py | 42 | 0.606667 | 0.603333 | 0 | 15 | 18.133333 | 42 |
k-pom/turbo-adventure | 3,032,246,958,830 | 3f9f872c979dcad980243cd1e87701309129dbcf | 92962659c2c183fab4b0fb41906a2038767bbd81 | /lib/python2.7/site-packages/motor/util.py | d97f7ad62096947c56377954080ed2f6f8f96bdd | []
| no_license | https://github.com/k-pom/turbo-adventure | 67c37454094558879c50f26f236c5debd4b3e5ad | 06021f1ad07b5ca5c024d2b41b8f2b414af9388c | refs/heads/master | 2021-01-01T15:24:54.415683 | 2013-07-19T02:47:49 | 2013-07-19T02:47:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2013 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A version of PyMongo's thread_util for Motor."""
import weakref
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
import greenlet
class MotorGreenletIdent(object):
def __init__(self):
self._refs = {}
def watching(self):
"""Is the current thread or greenlet being watched for death?"""
return self.get() in self._refs
def unwatch(self, tid):
self._refs.pop(tid, None)
def get(self):
"""An id for this greenlet"""
return id(greenlet.getcurrent())
def watch(self, callback):
"""Run callback when this greenlet dies.
callback takes one meaningless argument.
"""
current = greenlet.getcurrent()
tid = self.get()
if hasattr(current, 'link'):
# This is a Gevent Greenlet (capital G), which inherits from
# greenlet and provides a 'link' method to detect when the
# Greenlet exits.
current.link(callback)
self._refs[tid] = None
else:
# This is a non-Gevent greenlet (small g), or it's the main
# greenlet.
self._refs[tid] = weakref.ref(current, callback)
class MotorGreenletCounter(object):
"""A greenlet-local counter.
"""
def __init__(self):
self.ident = MotorGreenletIdent()
self._counters = {}
def inc(self):
# Copy these references so on_thread_died needn't close over self
ident = self.ident
_counters = self._counters
tid = ident.get()
_counters.setdefault(tid, 0)
_counters[tid] += 1
if not ident.watching():
# Before the tid is possibly reused, remove it from _counters
def on_thread_died(ref):
ident.unwatch(tid)
_counters.pop(tid, None)
ident.watch(on_thread_died)
return _counters[tid]
def dec(self):
tid = self.ident.get()
if self._counters.get(tid, 0) > 0:
self._counters[tid] -= 1
return self._counters[tid]
else:
return 0
def get(self):
return self._counters.get(self.ident.get(), 0)
class ExceededMaxWaiters(Exception):
pass
| UTF-8 | Python | false | false | 2,846 | py | 38 | util.py | 28 | 0.611384 | 0.605411 | 0 | 101 | 27.168317 | 74 |
CatdBD/assorted_PhD_code | 1,975,684,978,801 | 47ec15cbbbb36dc910e2d52912422ebb3e750196 | f3986effabae6251136b72a1f98c209a08ab02af | /TG_cf_Moster.py | 2f844cf12d534a8a0e2f1869e4f9a5ab1ea6f8a2 | []
| no_license | https://github.com/CatdBD/assorted_PhD_code | 261cef54fda090e1ee56f2e1467c170e862db693 | fdebaf554b97513a66f53465184134f42f9d4193 | refs/heads/master | 2019-11-19T20:49:40.086186 | 2017-02-03T11:34:37 | 2017-02-03T11:34:37 | 80,823,717 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
def import_digitised(fname):
import numpy as np
f = open(fname,"rb")
c = 0; x = []; y = []
print 'importing', fname,':'
print '======================='
for line in f:
c+=1
if c>6:
xy = line.rstrip(',\n').split(',')
x.append(xy[0])
y.append(xy[1])
print x[c-7],y[c-7]
print '======================='
print
f.close()
return np.array(x).astype('f'), np.array(y).astype('f')
H_0 = 70.4 #km/s/Mpc (Lambda-CDM cosmology)
h = H_0/100.
TG_data_solidline_V_circ, TG_data_solidline_M_bar = import_digitised("./digitised/Fig11TG_digitised_solidline.csv")
TG_data_dotdashedline_V_circ, TG_data_dotdashedline_M_bar = import_digitised("./digitised/Fig11TG_digitised_dotdashedline.csv")
TG_solidline_m = TG_data_solidline_M_bar + np.log10(h**(-2.))
TG_dotdashedline_m = TG_data_dotdashedline_M_bar + np.log10(h**(-2.))
Moster_data_solidline_M_halo, Moster_data_solidline_m = import_digitised("./digitised/Fig4Moster_digitised.csv")
V_circ_Moster = ( 2.8*(10.**(-2.)) ) * ( ( (10.**Moster_data_solidline_M_halo)*h )**0.316 )
import matplotlib as mpl
f = 40
import time
mpl.rc('text', usetex = True )
mpl.rc('font', family = 'serif', size = f)
mpl.rc('axes', labelsize = f )
mpl.rc('figure', dpi = 800, figsize = (8*3,6*3) )
plt.clf()
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.set_title( 'Comparison of circular halo velocity-baryonic mass relations in Moster and T-G\n' + str( time.ctime() ), fontsize = f-10 )
ax.set_xlabel('$V_\mathrm{circ}$',fontsize=f)
ax.set_ylabel('$M_\mathrm{halo}/M_\odot$',fontsize=f)
ax.yaxis.labelpad = 20
ax.xaxis.labelpad = 30
ax.set_xscale('log')
ax.scatter(TG_data_solidline_V_circ,TG_solidline_m, 60, label = 'T-G Figure 11', marker='s', color="k")
ax.plot(TG_data_solidline_V_circ,TG_solidline_m, '-', color="k")
ax.scatter(TG_data_dotdashedline_V_circ,TG_dotdashedline_m, 60, label = 'T-G Figure 11 (neglecting halo concentration)', marker='s', color="b")
ax.plot(TG_data_dotdashedline_V_circ,TG_dotdashedline_m, '-', color="b")
ax.scatter(V_circ_Moster,Moster_data_solidline_m, 60, label = 'Moster Figure 4', marker='o', color="r")
ax.plot(V_circ_Moster,Moster_data_solidline_m, '-', color="r")
logrange = [40,60,80,100,150,200,300,400,600,800,1000]
lograngestr = ['40 ', '60 ','80 ','100 ', '150 ', '200 ','300 ', '400 ','600 ','800 ','1000 ']
ax.axis( [ 40,1000, 7,12 ] )
ax.xaxis.set_ticks( logrange )
ax.yaxis.set_ticks(np.arange(7.,12.,0.5))
ax.set_xticklabels( lograngestr, rotation=0, fontsize = f-10)
ax.set_yticklabels( ['']+[ '%0.2f' % (i) for i in ( np.arange(7.,12,0.5) ) ][1:], rotation=0, fontsize = f-10)
ax.grid(True, which = 'both')
ax.legend(fancybox = True, prop = {'size':f-20}, loc = (0.02, 0.88))
fig.savefig('TG_cf_Moster.png')
| UTF-8 | Python | false | false | 2,778 | py | 6 | TG_cf_Moster.py | 5 | 0.645428 | 0.586033 | 0 | 70 | 38.657143 | 143 |
manish33scss/comp_vision | 3,367,254,360,776 | 6c447f7859309c6bd487b16658aa5796422a897f | 39872eea40405c5434480670d089aa7ae226603c | /convertimage2videocv.py | 076aac85ba6028c4bf57fceda2da748922720b49 | []
| no_license | https://github.com/manish33scss/comp_vision | 663c11801870f5aa7da546d2d62be52066dcb568 | fffd2736df7aa319a07f31ed533bf7cfb26a426d | refs/heads/master | 2023-02-03T18:31:02.307378 | 2023-01-28T10:23:17 | 2023-01-28T10:23:17 | 187,157,861 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import glob
import os
img_array = []
for filename in os.scandir(r"D:\Work\Data\steps"):
img = cv2.imread(filename.path)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
out = cv2.VideoWriter('try1.avi',cv2.VideoWriter_fourcc(*'DIVX'), 24, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
| UTF-8 | Python | false | false | 402 | py | 6 | convertimage2videocv.py | 5 | 0.674129 | 0.656716 | 0 | 18 | 21.333333 | 75 |
Rossonero/qlin | 12,927,851,601,329 | e3567a402e678cb972d6a0cbe1ea4fc59ddf0355 | 4d2a007fb3810d0b1de676d0193eb24cb0134180 | /parser/UrlTransID.py | 3e75f9766dd36a913b77ce1f300132ecaadcdd11 | []
| no_license | https://github.com/Rossonero/qlin | 241a822402beb4b05fe69f024978148a5758a765 | 02cc017daf3e540cfd9b5bfe7e021c176d5e90e1 | refs/heads/master | 2021-01-18T00:26:45.747983 | 2012-02-13T01:43:12 | 2012-02-13T01:43:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import UrlSorter
class UrlTransID:
def __init__(self,url_ph):
'''
将urllist转化为docid
通过根据hash排序的方法
'''
self.urls=[]
f=open(url_ph)
lines=f.readlines()
f.close()
for l in lines:
self.urls.append(l.split())
self.trans_ph=url_ph
def sort(self):
'''
开始进行排序 使用cython进行优化
'''
sort=UrlSorter.UrlSorter(self.urls)
sort.quicksort(0,len(self.urls)-1)
def save(self,ph):
'''
将排序结果进行保存
'''
print 'begin to save the sorted url list'
strr=''
for i in self.urls:
strr+= i[0]+' '+i[1]+'\n'
f=open(ph,'w')
f.write(strr)
f.close()
def show(self):
for i in self.urls:
print hash(i[1])
class UrlTransDir:
'''
将 doc 重新民明
'''
def __init__(self,urlph):
'''
init
'''
self.urls=[]
f=open(urlph)
lines=f.readlines()
f.close()
for doc in lines:
self.urls.append(doc.split())
def renameDoc(self,ph):
'''
将文件rename
'''
print 'begin to rename doc'
for i,li in enumerate(self.urls):
docname = li[0]
print 'the doc is',
print docname
print 'the ph is',ph+'/'+docname
try:
os.rename(ph + '/' + docname,ph + '/'+str(i))
except:
print 'no such file'
if __name__=='__main__':
sort=UrlTransID('../store/urltest.txt')
sort.show()
sort.sort()
sort.save('../store/sorted_url.txt')
sort.show()
| UTF-8 | Python | false | false | 1,927 | py | 133 | UrlTransID.py | 41 | 0.450353 | 0.446012 | 0 | 107 | 15.607477 | 61 |
Eddie6382/python_practice | 8,718,783,610,973 | 0eaadbaa9bbbde24b101415c15da0aaf1d0af8e3 | 8ae87ae1120a4c30e43009eb393da939bc2cd1ec | /physics_hw/project/magnus_omega.py | bed33e4971cd3b4a8457d325568a90cd15748637 | []
| no_license | https://github.com/Eddie6382/python_practice | cb7e4d1b649f856e4c7e481ba51f7d8ff3e066b9 | a2b3b145a22f97bf0efdffafccd13e0328432823 | refs/heads/master | 2020-12-27T12:57:14.728715 | 2020-04-01T06:22:58 | 2020-04-01T06:22:58 | 237,911,201 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from vpython import *
g = vec(0, -9.8 ,0) # g = 9.8 m/s^2
size = 0.215 /2 # ball radius = 0.25 m
weight = 0.45 #ball weight
Cd = 0.47 #drag coefficient
Cm = 0.40 #magnus coefficient
Cw = 0.08 #rotational friction coefficient
rpm = 240 #initial rpm
density = 1.293
viscosity = 1.81E-5
v = vec(18, 9, 0)
omega = rpm/60*2*pi*norm(vec(0,1,0)) #initial omega
SP = size*mag(omega)/mag(v) #dimension less angular velocity
class soccer():
def __init__(self, size, pos, v, rpm ,omega):
self.o = sphere(radius = size, make_trail = True, trail_radius = 0.05, trail_type="points",
pos=pos, interval=10, texture='Soccer-ball-Texture1.jpg')
self.o.v = v
self.omega = omega
self.radius = size
self.stop = False
self.rpm = rpm
self.slab = label(pos= pos + vec(0,2,0), box = True)
def drag_torque(Cw, density, omega, radius):
return -Cw * density/2 * (radius**5) * mag(omega) * omega
def drag_force(Cd, density, velocity, radius):
return -1/2 * density * Cd * (pi*radius**2) * mag(velocity) * velocity
def magnus_force(Cm, radius, omega, velocity):
return 4/3*pi*(radius**3) * (Cm*density*cross(omega,velocity))
scene = canvas(width=1000, height=500, center =vec(0,0,0), background=vec(0.5,0.5,0)) # open a window
floor = box(length=40, height=0.01, width=30, color=color.blue) # the floor
balls = []
for i in range(-2,6):
rpm = 0 + 250*i
ball = soccer(size, vec(-20, size ,0), v, rpm, rpm/60*2*pi*norm(vec(0,0,1)))
balls.append(ball)
stops = [False]*10
scene.center = balls[4].o.pos + vec(0,2,0)
dt = 0.001
while True:
rate(200)
for ball in balls:
if ball.o.pos.y >= ball.radius:
ball.o.pos += ball.o.v*dt
ball.o.v += ( drag_force(Cd, density, ball.o.v, ball.radius)/weight + magnus_force(Cm, ball.radius, ball.omega, ball.o.v)/weight + g )*dt
I = 2/3*weight*ball.radius**2
ball.omega += drag_torque(Cw, density, ball.omega, ball.radius)/I *dt
ball.o.rotate(angle=mag(ball.omega)*dt, axis=ball.omega, origin=ball.o.pos)
ball.slab.pos = ball.o.pos + vec(0,1,0)
ball.slab.text = str('w:%3.0frpm'%ball.rpm)
else:
ball.stop = True
scene.center = balls[4].o.pos + vec(0,2,0)
for ball in balls:
if ball.stop == False:
break
else:
break
for ball in balls:
origin = vec(-20, 0, 0)
print("ball's rpm: %d, shift: %2.3f"%( ball.rpm, mag(ball.o.pos - balls[3].o.pos) ))
| UTF-8 | Python | false | false | 2,761 | py | 37 | magnus_omega.py | 31 | 0.544006 | 0.495473 | 0 | 70 | 37.328571 | 149 |
liuqinglong110/H-SRDC | 2,113,123,911,934 | 593ed7466f3ebd71f3ecbeac1482569d00f4bdf4 | b76baa268efa234b00899f2a33d49b4437551fed | /opts.py | 3a051343bf453c522bc8f8272f54c909e8f2d6d9 | [
"MIT"
]
| permissive | https://github.com/liuqinglong110/H-SRDC | 661c42ea883d9fed3e3a75693cd74a1a345f2382 | 6595ac3a743ed85821091e1a07ca5742649d12f3 | refs/heads/main | 2023-07-14T14:17:37.865873 | 2021-08-24T13:33:20 | 2021-08-24T13:33:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
def opts():
parser = argparse.ArgumentParser(description='H-SRDC', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# datasets
parser.add_argument('--data_path_source', type=str, default='./data/datasets/Office31/', help='root of source training set')
parser.add_argument('--data_path_target', type=str, default='./data/datasets/Office31/', help='root of target training set')
parser.add_argument('--data_path_target_t', type=str, default='./data/datasets/Office31/', help='root of target test set')
parser.add_argument('--src', type=str, default='amazon', help='source training set')
parser.add_argument('--tar', type=str, default='webcam_half', help='target training set')
parser.add_argument('--tar_t', type=str, default='webcam_half2', help='target test set')
parser.add_argument('--num_classes', type=int, default=31, help='class number')
# general optimization options
parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--no_da', action='store_true', help='whether to not use data augmentation')
parser.add_argument('--lr', type=float, default=1e-2, help='learning rate')
parser.add_argument('--lrplan', type=str, default='dao', help='learning rate decay plan of dao or exp or step for attention module')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay (l2 penalty)')
parser.add_argument('--nesterov', action='store_true', help='whether to use nesterov SGD')
parser.add_argument('--schedule', type=int, nargs='+', default=[80, 120], help='decrease learning rate at these epochs for step decay')
parser.add_argument('--gamma', type=float, default=0.1, help='lr is multiplied by gamma on schedule')
parser.add_argument('--eps', type=float, default=1e-8, help='a small value to prevent underflow')
# specific optimization options
parser.add_argument('--cluster_iter', type=int, default=5, help='number of iterations of k-means')
parser.add_argument('--cluster_method', type=str, default='kmeans', help='cluster method to choose [kmeans|spherical_kmeans]')
parser.add_argument('--beta', type=float, default=1.0, help='weight of auxiliary target distribution or assigned cluster labels')
parser.add_argument('--initial_cluster', type=int, default=0, help='target or source class centroids for initialization of k-means')
parser.add_argument('--src_cen_first', action='store_true', help='whether to use source centroids as target cluster centers at the first epoch')
parser.add_argument('--src_cls', action='store_true', help='whether to classify source instances when clustering target instances')
parser.add_argument('--src_soft_label', action='store_true', help='whether to use convex combination of true label vector and predicted label vector as training guide')
parser.add_argument('--learn_embed', action='store_true', help='whether to apply embedding clustering')
parser.add_argument('--embed_softmax', action='store_true', help='whether to use softmax to normalize soft cluster assignments for embedding clustering')
parser.add_argument('--scale', type=float, default=1.0, help='maximum value of lambda (default: 1.0)')
parser.add_argument('--div', type=str, default='kl', help='measure of divergence between one target instance and its perturbed counterpart')
parser.add_argument('--gray_tar_agree', action='store_true', help='whether to enforce consistency between RGB and gray images on the target domain')
parser.add_argument('--aug_tar_agree', action='store_true', help='whether to enforce consistency between RGB and augmented images on the target domain')
parser.add_argument('--sigma', type=float, default=0.1, help='standard deviation of Gaussian for data augmentation operation of blurring')
## source sample selection
parser.add_argument('--src_soft_select', action='store_true', help='whether to softly select source instances')
parser.add_argument('--record_weight_rank', action='store_true', help='whether to record weight rank of source samples')
parser.add_argument('--src_mix_weight', action='store_true', help='whether to mix 1 and soft weight')
# checkpoints
parser.add_argument('--log', type=str, default='./checkpoints/office31', help='log folder')
parser.add_argument('--resume', type=str, default=None, help='checkpoint path to resume (default: None)')
# architecture
parser.add_argument('--arch', type=str, default='resnet50', help='model name')
parser.add_argument('--num_neurons', type=int, default=128, help='number of neurons of fc1 of new model')
parser.add_argument('--pretrained', action='store_true', help='whether using pretrained model')
parser.add_argument('--only_decoder', action='store_true', help='whether to only use decoder for set transformer')
parser.add_argument('--num_head', type=int, default=4, help='head number of multi-head attention module')
# i/o
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers (default: 4)')
parser.add_argument('--start_epoch', type=int, default=0, help='manual epoch number (useful on restarts)')
parser.add_argument('--stop_epoch', type=int, default=200, help='stop epoch (default: 200)')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency (default: 10)')
args = parser.parse_args()
args.pretrained = True
args.src_cls = True
args.src_cen_first = True
args.learn_embed = True
args.embed_softmax = True
if args.tar.find('amazon') != -1 and args.src.find('webcam') != -1:
args.beta = 0.5
args.initial_cluster = 1
args.log = args.log + '_' + args.src + '2' + args.tar + '_bs' + str(args.batch_size) + '_' + args.arch + '_lr' + str(args.lr) + '_' + args.cluster_method
return args
| UTF-8 | Python | false | false | 6,090 | py | 30 | opts.py | 7 | 0.706076 | 0.694745 | 0 | 72 | 83.583333 | 172 |
dushuai0/RegressionTest | 2,516,850,850,711 | 1fb9b948c1e36c34524f76e42ebd8ef65df05051 | b713546464131c4bd1aecbf3897446121ade96f7 | /src/utils/browser.py | e46232f6f6e9ea9187ccdc232c8864a2a6875e46 | []
| no_license | https://github.com/dushuai0/RegressionTest | 54e2a7f89eb0720f8b6663b1f7edd40dff76a0fa | e40ef04e9e3380a3ac566516f026831a1c07882a | refs/heads/master | 2020-07-22T17:40:03.416651 | 2019-09-09T09:54:30 | 2019-09-09T09:54:30 | 207,277,966 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from selenium import webdriver
from src.utils.configurations import Settings, DRIVER_PATH
class UnSupportBrowserTypeError(Exception):
pass
class Browser:
def __init__(self):
self.browsertype = str(Settings.get_value_by_key('BROWSER_TYPE'))
if self.browsertype.lower() == 'chrome':
self.driver = webdriver.Chrome(executable_path=os.path.join(DRIVER_PATH, 'chromedriver.exe'))
elif self.browsertype.lower() == 'firefox':
self.driver = webdriver.Firefox(executable_path=os.path.join(DRIVER_PATH, 'geckodriver.exe'))
elif self.browsertype.lower() == 'ie':
self.driver = webdriver.Ie(executable_path=os.path.join(DRIVER_PATH, 'IEDriverServer.exe'))
else:
self.driver = None
raise UnSupportBrowserTypeError(
"Only Chrome, Firefox and IE are supported, plese check browser-type configuration!")
def get(self, url):
self.driver.get(url)
def maximize_window(self):
self.driver.maximize_window()
def delete_cookies(self):
self.driver.delete_all_cookies()
def close(self):
self.driver.close()
def quit(self):
self.driver.quit()
| UTF-8 | Python | false | false | 1,222 | py | 28 | browser.py | 17 | 0.649755 | 0.649755 | 0 | 37 | 31.972973 | 105 |
npvoid/OnlineDoubleOracle | 6,176,163,014,763 | 98902a286b547ab2565cbefd0862d9434d6da4bd | 8c20a31d9afc40eac30e1bd2366fcf695a492483 | /games/kuhn_poker.py | d66a04bce04a4aa662b50ad12379522f9a1c4619 | []
| no_license | https://github.com/npvoid/OnlineDoubleOracle | ef1c600d9fb32ff6b28272b0138c08e66c2b1b91 | be94f316f7672951aff31d4baea3211bf7dd5288 | refs/heads/master | 2023-04-09T01:12:19.952646 | 2021-04-23T16:04:15 | 2021-04-23T16:04:15 | 360,935,047 | 11 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
An implementation of Kuhn Poker based on following posts
https://ai.plainenglish.io/building-a-poker-ai-part-6-beating-kuhn-poker-with-cfr-using-python-1b4172a6ab2d
"""
import numpy as np
from typing import List, Dict
import random
import itertools
KUHN_INFOSTATES = ['KPB', 'K', 'QPB', 'Q', 'JPB', 'J', # turn for player 1
'KB', 'KP', 'QB', 'QP', 'JB', 'JP'] # turn for player 2
KUHN_ACTIONS = ['P', 'B']
KUHN_CARDS = ['J', 'Q', 'K']
def infostate2vector(infostate):
# infosate = card + history just the sum of two strings
assert infostate in KUHN_INFOSTATES, infostate + " not in infostates"
idx = KUHN_INFOSTATES.index(infostate)
vec = np.zeros((len(KUHN_INFOSTATES),), dtype=np.float32)
vec[idx] = 1.
return vec
class KuhnPoker:
def __init__(self):
self.done = False
self.current_player = 0
self.current_cards = None
self.history = None
self.infostates = KUHN_INFOSTATES
self.actions = KUHN_ACTIONS
self.cards = KUHN_CARDS
@staticmethod
def is_terminal(history: str) -> bool:
return history in ['BP', 'BB', 'PP', 'PBB', 'PBP']
@staticmethod
def get_payoff(history: str, cards: List[str]) -> int:
"""ATTENTION: this gets payoff for 'active' player in terminal history"""
if history in ['BP', 'PBP']:
return +1
else: # PP or BB or PBB
payoff = 2 if 'B' in history else 1
active_player = len(history) % 2
player_card = cards[active_player]
opponent_card = cards[(active_player + 1) % 2]
if player_card == 'K' or opponent_card == 'J':
return payoff
else:
return -payoff
def step(self, a):
self.history += self.actions[a] # Update history
self.current_player = 1 - self.current_player # Next player
if self.is_terminal(self.history):
self.done = True
r = ((-1)**self.current_player)*self.get_payoff(self.history, self.current_cards) # Return reward for player 0
next_obs = [0., 0.]
else:
r = 0.
next_obs = [infostate2vector(self.current_cards[self.current_player]+self.history),
self.current_cards[self.current_player]+self.history]
info = None
return next_obs, r, self.done, info
def reset(self, set_cards=None):
self.current_player = 0
self.done = False
self.current_cards = random.sample(self.cards, 2) if set_cards is None else set_cards
self.history = ''
return [infostate2vector(self.current_cards[0]+self.history), self.current_cards[0]+self.history]
def calc_ev(p1_strat, p2_strat, cards, history, active_player):
""" Returns value for player 2!! (p2_strat) """
if KuhnPoker.is_terminal(history):
return -KuhnPoker.get_payoff(history, cards)
my_card = cards[active_player]
next_player = (active_player + 1) % 2
if active_player == 0:
strat = p1_strat[my_card + history]
else:
strat = p2_strat[my_card + history]
return -np.dot(strat, [calc_ev(p1_strat, p2_strat, cards, history + a, next_player) for a in KUHN_ACTIONS])
def calc_best_response(agg_hagent, br_strat_map, br_player, cards, history, active_player, prob):
"""
after chance node, so only decision nodes and terminal nodes left in game tree
"""
if KuhnPoker.is_terminal(history):
return -KuhnPoker.get_payoff(history, cards)
key = cards[active_player] + history
next_player = (active_player + 1) % 2
if active_player == br_player:
vals = [calc_best_response(agg_hagent, br_strat_map, br_player, cards, history + action,
next_player, prob) for action in KUHN_ACTIONS]
best_response_value = max(vals)
if key not in br_strat_map:
br_strat_map[key] = np.array([0.0, 0.0])
br_strat_map[key] = br_strat_map[key] + prob * np.array(vals, dtype=np.float64)
return -best_response_value
else:
strategy = agg_hagent[key]
action_values = [calc_best_response(agg_hagent, br_strat_map, br_player, cards,
history + action, next_player, prob * strategy[idx])
for idx, action in enumerate(KUHN_ACTIONS)]
return -np.dot(strategy, action_values)
def get_exploitability(agg_hagent):
exploitability = 0
br_hagent = {}
for cards in itertools.permutations(KUHN_CARDS):
calc_best_response(agg_hagent, br_hagent, 0, cards, '', 0, 1.0)
calc_best_response(agg_hagent, br_hagent, 1, cards, '', 0, 1.0)
for k,v in br_hagent.items():
v[:] = np.where(v == np.max(v), 1, 0)
if np.sum(v)>1.:
idxs = np.nonzero(v)
v[:] = np.zeros_like(v)
v[np.random.choice(idxs[0])] = 1.
for cards in itertools.permutations(KUHN_CARDS):
ev_1 = calc_ev(agg_hagent, br_hagent, cards, '', 0)
ev_2 = calc_ev(br_hagent, agg_hagent, cards, '', 0)
exploitability += 1 / 6 * (ev_1 - ev_2)
return exploitability, br_hagent
def calc_ev_wrapper(p1_strat, p2_strat, cards):
vp2 = calc_ev(p1_strat, p2_strat, cards, '', 0)
return -vp2, vp2
if __name__=='__main__':
env = KuhnPoker()
a_list = [0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1] * 6
# ['P', 'P', 'P', 'B', 'P', 'P', 'B', 'B', 'B', 'P', 'B', 'B'] * 6
for cards in itertools.permutations(KUHN_CARDS):
cards = cards[:-1]
for i in range(5):
obs = env.reset(set_cards=cards)
while 1:
a = a_list.pop(0)
next_obs, r, d, _ = env.step(a)
if d:
print(cards[0]+cards[1]+env.history, r)
break
print('-----------')
| UTF-8 | Python | false | false | 5,915 | py | 12 | kuhn_poker.py | 11 | 0.569907 | 0.552663 | 0 | 157 | 36.452229 | 123 |
HeerKirov/sdust-online-judge | 10,084,583,214,666 | 300fd350c81ecef34ebc20bbbe574741fb71fb67 | 6c80a888f9cc3dda6522fb4af46436232fce0791 | /Judge/virtual-judge/conf.py | a46f6ebfaa66202eb6e925d150353f47c8de4d82 | [
"LicenseRef-scancode-sata"
]
| permissive | https://github.com/HeerKirov/sdust-online-judge | e17032fa2911b39a6fa2e72cdd03f5af7ecf9c6f | e34bc34b1749caa2e3d1eb467cb40a82ce246a1b | refs/heads/master | 2021-01-21T10:13:43.101911 | 2018-04-17T12:00:36 | 2018-04-17T12:00:36 | 92,946,531 | 11 | 4 | null | true | 2017-08-29T13:17:44 | 2017-05-31T13:09:15 | 2017-08-02T13:51:32 | 2017-08-29T13:17:44 | 2,272 | 0 | 2 | 0 | Python | null | null | # == SDUSTOJ 通信相关设置 ==============================================================================================
# SDUSTOJ数据库的参数
pg_db = {
'user': 'heer',
'password': '1234',
'host': 'localhost',
'port': '5432',
'database': 'sdustoj_server'
}
# 用于监听SDUSTOJ消息的Redis的参数
redis_db = {
'host': 'localhost',
'port': '6379',
'password': '1234',
'db': 0
}
# 接受SDUSTOJ命令的队列
queue = 'virtualjudge'
# 订阅SDUSTOJ哪些编程环境的提交消息
subscribe = [
'hdu-c', 'hdu-cpp', 'hdu-java', 'hdu-gcc', 'hdu-g++', 'hdu-cs',
'poj-g++', 'poj-gcc', 'poj-java', 'poj-cpp', 'poj-c',
]
# 该评测机在SDUSTOJ中的ID
judger_id = 1
# 本地数据库参数
local_pg_db = {
'user': 'heer',
'password': '1234',
'host': 'localhost',
'port': '5432',
'database': 'virtual_judge'
}
# 本地进行通信的队列参数
local_redis_db = {
'host': 'localhost',
'port': '6379',
'password': '1234',
'db': 1
}
# 本地进行消息通信的队列
local_queue = {
'submission-analyse': 'sa'
}
# == 爬虫相关 ====================================================================================
# 保存所有可用oj的登陆账户信息
oj_user_info = {
'hdu': ('User', 'pw'),
'poj': ('User', 'pw'),
}
oj_env_message = {
'hdu-c': ('hdu', '3'),
'hdu-cpp': ('hdu', '2'),
'hdu-java': ('hdu', '5'),
'hdu-gcc': ('hdu', '1'),
'hdu-g++': ('hdu', '0'),
'hdu-cs': ('hdu', '6'),
'poj-g++': ('poj', '0'),
'poj-gcc': ('poj', '1'),
'poj-java': ('poj', '2'),
'poj-cpp': ('poj', '4'),
'poj-c': ('poj', '5')
}
| UTF-8 | Python | false | false | 1,678 | py | 203 | conf.py | 79 | 0.429435 | 0.398522 | 0 | 77 | 18.324675 | 114 |
eddiejessup/chemopore | 4,776,003,674,325 | a7b65f3214dfa5ca224509ec694f77a8918ddd67 | 80ab5697e687b04b4473dc5eeb57275bf2b0d1b2 | /make_mesh.py | 850aa93caefea2b03672e49b46e92ed325afdabc | []
| no_license | https://github.com/eddiejessup/chemopore | c93a785a13bb4735ce5effdaff06137dc317f817 | d9c9d6d028288976015bfdf92a97775464f2713f | refs/heads/master | 2021-01-10T20:28:51.647085 | 2015-07-17T17:12:13 | 2015-07-17T17:12:13 | 24,348,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import fipy
import numpy as np
# All arguments on the left hand side are indices to the various constructions.
gmsh_text_box = '''
// Define the square that acts as the system boundary.
dx = %(dx)g;
Lx = %(Lx)g;
Ly = %(Ly)g;
// Define each corner of the square
// Arguments are (x, y, z, dx); dx is the desired cell size near that point.
Point(1) = {Lx / 2, Ly / 2, 0, dx};
Point(2) = {-Lx / 2, Ly / 2, 0, dx};
Point(3) = {-Lx / 2, -Ly / 2, 0, dx};
Point(4) = {Lx / 2, -Ly / 2, 0, dx};
// Line is a straight line between points.
// Arguments are indices of points as defined above.
Line(1) = {1, 4};
Line(2) = {4, 3};
Line(3) = {3, 2};
Line(4) = {2, 1};
// Loop is a closed loop of lines.
// Arguments are indices of lines as defined above.
Line Loop(1) = {1, 2, 3, 4};
'''
gmsh_text_radius = '''
R = %(R)g;
'''
gmsh_text_circle = '''
// Define a circle that acts as an obstacle
// Circle center coordinates
x = %(x)g;
y = %(y)g;
// The integer to start indexing objects from.
i = %(i)d;
// Define the center and compass points of the circle.
Point(i) = {x, y, 0, dx};
Point(i + 1) = {x - R, y, 0, dx};
Point(i + 2) = {x, y + R, 0, dx};
Point(i + 3) = {x + R, y, 0, dx};
Point(i + 4) = {x, y - R, 0, dx};
// Circle is confusingly actually an arc line between points.
// Arguments are indices of: starting point; center of curvature; end point.
Circle(i) = {i + 1, i, i + 2};
Circle(i + 1) = {i + 2, i, i + 3};
Circle(i + 2) = {i + 3, i, i + 4};
Circle(i + 3) = {i + 4, i, i + 1};
Line Loop(i) = {i, i + 1, i + 2, i + 3};
'''
gmsh_text_surface = '''
// The first argument is the outer loop boundary.
// The remainder are holes in it.
Plane Surface(1) = {%(args)s};
'''
def make_porous_mesh(r, R, dx, L):
gmsh_text = gmsh_text_box % {'dx': dx[0], 'Lx': L[0], 'Ly': L[1]}
loop_indexes = [1]
if r is not None and len(r) and R:
gmsh_text += gmsh_text_radius % {'R': R}
for i in range(len(r)):
index = 5 * (i + 1)
gmsh_text += gmsh_text_circle % {'x': r[i][0], 'y': r[i][1],
'i': index}
loop_indexes += [index]
surface_args = ', '.join([str(index) for index in loop_indexes])
gmsh_text += gmsh_text_surface % {'args': surface_args}
return fipy.Gmsh2D(gmsh_text)
if __name__ == '__main__':
r = np.array([[0.0, 0.1], [0.3, 0.3]])
R = 0.1
dx = np.array([0.02, 0.02])
L = np.array([1.0, 1.0])
m = make_porous_mesh(r, R, dx, L)
phi = fipy.CellVariable(m)
v = fipy.Viewer(vars=phi, xmin=-L[0] / 2.0, xmax=L[0] / 2.0)
v.plotMesh()
raw_input()
| UTF-8 | Python | false | false | 2,606 | py | 10 | make_mesh.py | 10 | 0.545664 | 0.510361 | 0 | 95 | 26.431579 | 79 |
Rosie-Hasan/Keeping | 12,146,167,556,584 | f14db8df1e2126a9eb5e6ca65d46371c77e4fc0e | e4ce93e2910cf1e93cb5a82495a7095000a99547 | /Making_arrays.py | 1c6a17c016409ef64667ca285d22110841f15ace | []
| no_license | https://github.com/Rosie-Hasan/Keeping | 2d02dcb2ed6d5fa983dea4703fad1b84e40a8600 | beadf8bb106562898a59fea2a4f11fd32ea95abf | refs/heads/main | 2023-07-13T08:28:21.137451 | 2021-08-20T12:18:03 | 2021-08-20T12:18:03 | 398,018,082 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from numpy import save
HLT_dict = np.load('HLT_dict.npy', allow_pickle='TRUE').item()
LRT_dict = np.load('LRT_dict.npy', allow_pickle='TRUE').item()
Offline_dict = np.load('Offline_dict.npy', allow_pickle='TRUE').item()
Truth_dict = np.load('Truth_dict.npy', allow_pickle='TRUE').item()
ROI_dict = np.load('ROI_dict.npy', allow_pickle='TRUE').item()
ROIkey = ROI_dict.keys()
def ROI(r,name1, a, name2,b):
compareto_dict = {}
compare_dict = {}
eta_min = ROI_dict[r][0]
eta_max = ROI_dict[r][1]
#print(eta_min,eta_max)
for i in b:
if name2[i][5] == r and name2[i][0] > eta_min and name2[i][0] < eta_max:
compareto_dict[i] = name2[i]
for j in a:
if name1[j][5] == r:
compare_dict[j] = name1[j]
#print(compareto_dict)
return compareto_dict, compare_dict
def match(a,d):
match = False
delta_eta = abs(compare_dict[a][0] - compareto_dict[d][0])
if delta_eta < 0.1:
match = True
else:
return False
delta_phi = abs(compare_dict[a][1] - compareto_dict[d][1])
if delta_phi < 0.1:
match = True
else:
return False
return match
####### comparing tracks
HLTkey = HLT_dict.keys()
LRTkey = LRT_dict.keys()
Offkey = Offline_dict.keys()
Truthkey = Truth_dict.keys()
total_matches = 0
total_failed = 0
first = [HLTkey, LRTkey, HLTkey, LRTkey]
second = [Offkey, Offkey, Truthkey, Truthkey]
third = [HLT_dict, LRT_dict, HLT_dict, LRT_dict]
fourth = [Offline_dict, Offline_dict, Truth_dict, Truth_dict]
label = ['HLTvsOff', 'LRTvsOff', 'HLTvsTruth', 'LRTvsTruth']
#####track.eta, track.phi, track.z0, track.pT, track.d0]
for k,l,m,n,o in zip(first, second, third, fourth, label):
failed_eta = []
failed_phi = []
failed_z0 = []
failed_pT = []
failed_d0 = []
eta = []
phi = []
z0 = []
pT = []
d0 = []
total_eta = []
total_phi = []
total_z0 = []
total_pT = []
total_d0 = []
delta_pT = []
total_matches = 0
total_failed = 0
matched = False
start = datetime.now().time()
for r in ROIkey:
#for i,j in zip(k,l):
#print(r)
compareto_dict, compare_dict = ROI(r,m,k,n,l)
#print(j,compareto_dict,i,compare_dict)
#print('comparing', len(compare_dict.keys()), 'compare to', len(compareto_dict.keys()))
for a in compare_dict.keys():
for d in compareto_dict.keys():
if match(a,d):
#total_matches = total_matches + 1
#print('match')
matched = True
break
else:
#total_failed = total_failed + 1
matched = False
#print('failed')
if matched == True:
total_matches = total_matches + 1
delta_pT.append(m[a][3]-n[d][3])
eta.append(m[a][0])
phi.append(m[a][1])
z0.append(m[a][2])
pT.append(m[a][3])
d0.append(m[a][4])
else:
total_failed = total_failed + 1
failed_eta.append(m[a][0])
failed_phi.append(m[a][1])
failed_z0.append(m[a][2])
failed_pT.append(m[a][3])
failed_d0.append(m[a][4])
#print('per event','matched=',total_matches, 'failed', total_failed)
end = datetime.now().time()
print('start=', start, 'end=', end)
total_eta = eta + failed_eta
total_phi = phi + failed_phi
total_z0 = z0 + failed_z0
total_pT = pT + failed_pT
total_d0 = d0 + failed_d0
##### saving arrays
save('eta_{}.npy'.format(o),eta)
save('failed_eta_{}.npy'.format(o),failed_eta)
save('total_eta_{}.npy'.format(o),total_eta)
save('phi_{}.npy'.format(o),phi)
save('failed_phi_{}.npy'.format(o),failed_phi)
save('total_phi_{}.npy'.format(o),total_phi)
save('z0_{}.npy'.format(o),z0)
save('failed_z0_{}.npy'.format(o),failed_z0)
save('total_z0_{}.npy'.format(o),total_z0)
save('pT_{}.npy'.format(o),pT)
save('failed_pT_{}.npy'.format(o),failed_pT)
save('total_pT_{}.npy'.format(o),total_pT)
save('d0_{}.npy'.format(o),d0)
save('failed_d0_{}.npy'.format(o),failed_d0)
save('total_d0_{}.npy'.format(o),total_d0)
save('delta_pT_{}'.format(o),delta_pT)
def match2(a,b):
match = False
delta_eta = abs(Truth_dict[a][0] - Offline_dict[b][0])
if delta_eta < 0.01:
match = True
else:
return False
delta_phi = abs(Truth_dict[a][1] - Offline_dict[b][1])
if delta_phi < 0.01:
match = True
else:
return False
#same_event = name1[stra][5] - name2[strb][5]
#if same_event == 0:
# match = True
#else:
# return False
return match
failed_eta = []
failed_phi = []
failed_z0 = []
failed_pT = []
failed_d0 = []
eta = []
phi = []
z0 = []
pT = []
d0 = []
total_eta = []
total_phi = []
total_z0 = []
total_pT = []
total_d0 = []
delta_pT = []
total_matches = 0
total_failed = 0
matched = False
start = datetime.now().time()
for x in Offkey:
for y in Truthkey:
if match2(y,x):
total_matches = total_matches + 1
matched = True
break
else:
total_failed = total_failed + 1
matched = False
if matched == True:
delta_pT.append(Offline_dict[x][3]-Truth_dict[y][3])
eta.append(Offline_dict[x][0])
phi.append(Offline_dict[x][1])
z0.append(Offline_dict[x][2])
pT.append(Offline_dict[x][3])
d0.append(Offline_dict[x][4])
else:
failed_eta.append(Offline_dict[x][0])
failed_phi.append(Offline_dict[x][1])
failed_z0.append(Offline_dict[x][2])
failed_pT.append(Offline_dict[x][3])
failed_d0.append(Offline_dict[x][4])
#
end = datetime.now().time()
#
print('start=', start, 'end=', end)
total_eta = eta + failed_eta
total_phi = phi + failed_phi
total_z0 = z0 + failed_z0
total_pT = pT + failed_pT
total_d0 = d0 + failed_d0
##### saving arrays
save('eta_OffvsTruth.npy',eta)
save('failed_eta_OffvsTruth.npy',failed_eta)
save('total_eta_OffvsTruth.npy',total_eta)
save('phi_OffvsTruth.npy',phi)
save('failed_phi_OffvsTruth.npy',failed_phi)
save('total_phi_OffvsTruth.npy',total_phi)
save('z0_OffvsTruth.npy',z0)
save('failed_z0_OffvsTruth.npy',failed_z0)
save('total_z0_OffvsTruth.npy',total_z0)
save('pT_OffvsTruth.npy',pT)
save('failed_pT_OffvsTruth.npy',failed_pT)
save('total_pT_OffvsTruth.npy',total_pT)
save('d0_OffvsTruth.npy',d0)
save('failed_d0_OffvsTruth.npy',failed_d0)
save('total_d0_OffvsTruth.npy',total_d0)
save('delta_pT_OffvsTruth',delta_pT)
print('arrays all done')
| UTF-8 | Python | false | false | 7,249 | py | 9 | Making_arrays.py | 9 | 0.532349 | 0.514002 | 0 | 296 | 22.47973 | 89 |
maya-shankar/flask-blog | 1,357,209,682,759 | 1ab32aba39c17378233389e6a09bf02c0eb328df | e0b49bcc851c2620a14b8df7a5cc3ca8b7922209 | /app.py | d31d24fae5b4e35fce6520f7324305d1805d06db | []
| no_license | https://github.com/maya-shankar/flask-blog | 955f59f5f3c236625371bc949d5ebcce47dc03ca | ec0f564b9b812d356d3d598b3cdfa3a685653579 | refs/heads/main | 2022-12-27T11:42:23.355306 | 2020-10-17T22:32:24 | 2020-10-17T22:32:24 | 304,897,103 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
from flask import Flask, render_template, request, url_for, flash, redirect
import db
app = Flask(__name__)
app.config['SECRET_KEY'] = 'rGfS1AGyCIkOQN5inOgYNtfhAtYbQ5psY9D6g4d7CcZeHwFcUcLHPw=='
@app.route('/')
def index():
posts = db.get_posts()
return render_template('index.html', posts=posts)
@app.route('/post/<int:post_id>', methods=('GET', 'PUT', 'DELETE'))
def post(post_id):
post = db.get_post(post_id)
if request.method == 'GET':
return render_template('post.html', post=post)
elif request.method == 'PUT':
title = request.form['title']
content = request.form['content']
if not (title and content):
flash('Title and content are required!')
else:
db.edit_post(post_id, title, content)
return render_template('post.html', post=post)
elif request.method == 'DELETE':
post = db.get_post(post_id)
db.delete_post(post_id)
flash(f"Post {post['title']} was successfully deleted")
return redirect(url_for('index'))
@app.route('/post', methods=('POST',))
def create():
if request.method == 'POST':
title = request.form['title']
content = request.form['content']
if not (title and content):
flash('Title and content are required!')
else:
db.put_post(title, content)
return redirect(url_for('index'))
return render_template('create.html')
@app.route('/about')
def about():
return render_template('about.html')
| UTF-8 | Python | false | false | 1,553 | py | 4 | app.py | 2 | 0.611719 | 0.606568 | 0 | 56 | 26.732143 | 85 |
anurag3it/Happy_Hacking | 18,957,985,673,385 | fd56ac1d2f0b30e489747ef78bda8865be1a2cbf | 57e8e2ec13350bb55120ba51c1e73a3b3b5766f9 | /is_armstrong.py | d49e87c8ebf997f96a67cfb1d096ca3b2a513a77 | [
"MIT"
]
| permissive | https://github.com/anurag3it/Happy_Hacking | 7121e970faa9b974da6daee0b6eff4354260b25c | 2981305e43633b0432f9d64cab39e6967431cf72 | refs/heads/master | 2020-04-03T07:47:40.472579 | 2018-10-28T20:43:04 | 2018-10-28T20:43:04 | 155,113,435 | 0 | 0 | null | true | 2018-10-28T20:39:02 | 2018-10-28T20:39:02 | 2018-10-28T16:42:39 | 2018-10-28T20:01:30 | 17,640 | 0 | 0 | 0 | null | false | null |
def is_armstrong(n):
m=0
t=n
while n>0:
r=(n%10)
m=m+(r*r*r)
n=n/10
print m
if m==t:
print "Yes it is armstrong"
else:
print "Not it is not armstrong"
is_armstrong(153)
| UTF-8 | Python | false | false | 187 | py | 1 | is_armstrong.py | 1 | 0.59893 | 0.550802 | 0 | 14 | 12.285714 | 33 |
igorosha53/specialist | 18,880,676,240,198 | d232df096835ae4e1df89c96c2ed1bb6b8494b7e | 7a1bfb6ae572e53c3be0413acc9e8e9d96eb035a | /Lesson_2/For/for_index_list.py | 2499372333394ac4d435d4fc0abe11b7cefb34e3 | []
| no_license | https://github.com/igorosha53/specialist | c59f6ff6eb27716c0ad49231bce7898c69c6cae6 | 99621ac88b84ec12fb76cf5fbf103c28932319a2 | refs/heads/master | 2020-06-03T21:57:37.770206 | 2019-07-08T08:48:31 | 2019-07-08T08:48:31 | 191,747,654 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s = [71, 35, 14, 65, 8, 5, 71] # не підходить, бо якщо будуть однакові числа...то й індекс продублюється
# for i in s:
# print(i, s.index(i))
n = len(s)
for i in range(n):
print(i, s[i])
s[i] = s[i] + 5
print()
print(s)
| UTF-8 | Python | false | false | 292 | py | 115 | for_index_list.py | 115 | 0.564103 | 0.508547 | 0 | 10 | 22.4 | 105 |
cash2one/xai | 15,831,249,473,430 | 7269151da1d050c59e115c703ac2b171656fc7bb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_compotes.py | df70d07860ba7dd413f3bd7da4baa3b8f46338f4 | [
"MIT"
]
| permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from xai.brain.wordbase.nouns._compote import _COMPOTE
#calss header
class _COMPOTES(_COMPOTE, ):
def __init__(self,):
_COMPOTE.__init__(self)
self.name = "COMPOTES"
self.specie = 'nouns'
self.basic = "compote"
self.jsondata = {}
| UTF-8 | Python | false | false | 245 | py | 37,275 | _compotes.py | 37,266 | 0.657143 | 0.657143 | 0 | 10 | 23.3 | 54 |
alexander89116/python_workshop | 18,451,179,520,861 | 7597135af1de89785e36ce1584830986d0671e90 | a72128c02966874730bd88ab6bd90416d0259fff | /contest3/C.py | 375da142e82f241b3ab44d64bdb401ec583968bf | []
| no_license | https://github.com/alexander89116/python_workshop | e3fabdd4571c8dccb5458f750e8be1f6b559de09 | fa3a2c8d9334df97f1a238dd62e1469eb335dd09 | refs/heads/master | 2020-04-27T17:41:44.229051 | 2019-09-15T13:38:48 | 2019-09-15T13:38:48 | 174,533,029 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | m = {}
s = input()
n = int(input())
i = 0
while n + i <= len(s):
s1 = s[i:n + i]
if s1 in m:
m[s1] = m[s1] + 1
else:
m[s1] = 1
i += 1
a = list()
for k in m.keys():
if m[k] >= 2:
a.append(k)
print(sorted(a))
| UTF-8 | Python | false | false | 251 | py | 28 | C.py | 25 | 0.398406 | 0.358566 | 0 | 16 | 14.6875 | 25 |
seoljeongwoo/learn | 4,526,895,542,856 | 8dd714460f9f9e524a986a63a8662c5a8025de48 | cf5f24e5a32f8cafe90d4253d727b1c0457da6a4 | /algorithm/boj_1506.py | 7d09c23b11244e584125fff194e4eafa4af54873 | []
| no_license | https://github.com/seoljeongwoo/learn | 537659ca942875f6846646c2e21e1e9f2e5b811e | 5b423e475c8f2bc47cb6dee09b8961d83ab08568 | refs/heads/main | 2023-05-04T18:07:27.592058 | 2021-05-05T17:32:50 | 2021-05-05T17:32:50 | 324,725,000 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
input = sys.stdin.readline
def dfs(c):
v[c] = True
for n in e[c]:
if v[n]==True: continue
dfs(n)
st.append(c)
return
def rev_dfs(c):
v[c] = True
ret = [c]
for n in rev_e[c]:
if v[n] == True : continue
ret += rev_dfs(n)
return ret
n = int(input())
cost = list(map(int,input().split()))
edge = [list(map(int,list(input().rstrip('\n')))) for _ in range(n)]
e = [ [] for _ in range(n+1)]
rev_e = [ [] for _ in range(n+1)]
for i,row in enumerate(edge):
for j,col in enumerate(row):
if col == 1: e[i].append(j); rev_e[j].append(i)
v = [False] * n
st = []
for i in range(n):
if v[i] == False: dfs(i)
v = [False] * n
ret = 0
while st:
t = st.pop()
if v[t] == True: continue
scc_lst = rev_dfs(t)
min_cost = int(1e6)
for data in scc_lst:
min_cost = min(min_cost, cost[data])
ret += min_cost
print(ret)
| UTF-8 | Python | false | false | 917 | py | 410 | boj_1506.py | 369 | 0.526718 | 0.520174 | 0 | 40 | 21.9 | 68 |
TalentFlow/Brandee-backend | 5,514,738,021,586 | 39264b16b3f363eb1f4682f4990462b5fafca4fc | dbaad69b1752a0a0a0268c53e6e6f2a1f172ace1 | /products/migrations/0001_initial.py | 5a1f6954d82ecdd6e7180a7638a2f6985b8d40b3 | []
| no_license | https://github.com/TalentFlow/Brandee-backend | 10a27f2bd9e479f36f9f144594675da63fcaef5c | d87297dbfd4407775db8bafa145b3ed2a965ae24 | refs/heads/master | 2023-02-04T15:08:59.051229 | 2020-12-24T13:02:32 | 2020-12-24T13:02:32 | 324,155,082 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2020-12-18 11:14
import cloudinary.models
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=10)),
('last_name', models.CharField(max_length=10)),
('company', models.CharField(blank=True, max_length=50, null=True)),
('address1', models.CharField(max_length=100)),
('address2', models.CharField(blank=True, max_length=100, null=True)),
('city', models.CharField(max_length=20)),
('country', django_countries.fields.CountryField(max_length=2)),
('postal', models.CharField(max_length=10)),
('phone', models.CharField(max_length=20)),
('default', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HomePhotos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', cloudinary.models.CloudinaryField(max_length=255)),
('img_catagory', models.CharField(choices=[('SHOES', 'Shoes'), ('BAGS', 'Bags'), ('BOUTIQUE', 'Boutique'), ('MATERNITY_STORE', 'Maternity-Store'), ('GLASSES', 'Glasses'), ('MAKE_UP', 'Make-up'), ('JEWELLERY', 'Jewellery'), ('FITNESS_ACCESSORIES', 'Fitness-Accessories'), ('FITNESS_APPARELS', 'Fitness-Apparel'), ('UNDERGARMENTS', 'Under-garments')], default=None, max_length=30)),
('Date', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('information', models.TextField()),
('catagory', models.CharField(choices=[('SHOES', 'Shoes'), ('BAGS', 'Bags'), ('BOUTIQUE', 'Boutique'), ('MATERNITY_STORE', 'Maternity-Store'), ('GLASSES', 'Glasses'), ('MAKE_UP', 'Make-up'), ('JEWELLERY', 'Jewellery'), ('FITNESS_ACCESSORIES', 'Fitness-Accessories'), ('FITNESS_APPARELS', 'Fitness-Apparel'), ('UNDERGARMENTS', 'Under-garments')], default='SHOES', max_length=30)),
('color', models.CharField(blank=True, max_length=20, null=True)),
('shoes_catagory', models.CharField(blank=True, choices=[('SNEAKERS', 'Sneakers'), ('RUNNING_SHOES', 'Running-Shoes'), ('PEEP_TOE_BOOTIES', 'Peep-Toe-Booties'), ('SANDLES_AND_SLIDES', 'Sandles-And-Slides'), ('WORKOUT_SHOES', 'Workout-Shoes'), ('HIKING_AND_OUTDOOR', 'Hiking-And-Outdoor'), ('GOLF', 'Golf'), ('TENNIS', 'Tennis'), ('VOLLEYBALL', 'VolleyBall'), ('SOCCER', 'Soccer'), ('BASKETBALL', 'BasketBall'), ('SKATEBOARDING', 'SkateBoarding')], default=None, max_length=30, null=True)),
('bags_catagory', models.CharField(blank=True, choices=[('PURSES', 'Purses'), ('CLUTCHES', 'Clutches'), ('SHOULDER_BAGS', 'Shoulder-Bags'), ('RUCKSACKS', 'Rucksacks'), ('CROSS_BODY_BAGS', 'Cross-Body-Bags'), ('BEACH_BAGS', 'Beach-Bags'), ('BUM_BAGS', 'Bum-Bags'), ('SHOPPER_BAGS', 'Shopper-Bags'), ('TOTE_BAGS', 'Tote-Bags'), ('TRAVEL_BAGS', 'Travel-Bags')], default=None, max_length=30, null=True)),
('boutique_catagory', models.CharField(blank=True, choices=[('MAXI', 'Maxi-Dress'), ('BABYDOLL_DRESS', 'Babydoll-Dress'), ('LITTLE_BLACK_DRESS', 'Little-Black-Dress'), ('FLORAL_DRESS', 'Floral-Dress'), ('CASUAL_DRESS', 'Casual-Dress'), ('MIDI_DRESS', 'Midi-Dress'), ('WRAP_DRESS', 'Wrap-Dress'), ('SHIFT_DRESS', 'Shift-Dress'), ('SOLID_DRESS', 'Solid-Dress'), ('T_SHIRT_DRESS', 'T-Shirt-Dress'), ('LACE_DRESS', 'Lace-Dress'), ('COCKTAIL_DRESS', 'Cocktail-Dress')], default=None, max_length=30, null=True)),
('jewellery_catagory', models.CharField(blank=True, choices=[('RINGS', 'Rings'), ('EARRINGS', 'Earrings'), ('NECKLACES', 'Necklaces'), ('BRACELETS', 'Bracelets')], default=None, max_length=30, null=True)),
('fitness_apparel_catagory', models.CharField(blank=True, choices=[('EXERCISE_JACKETS', 'Exersice-Jackets'), ('EXERCISE_TOPS', 'Exersice-Tops'), ('SPORTS_BRA', 'Sports-Bra'), ('SWIM_SUITS', 'Swim-Suits'), ('WORKOUT_SHORTS_PAINTS', 'Workout-Shorts-Paints'), ('YOGA_APPAREL', 'Yoga-Apparel')], default=None, max_length=30, null=True)),
('maternity_store_catagory', models.CharField(blank=True, choices=[('MATERNITY_BRA', 'Maternity-Bra'), ('BABY_SHOWER_DRESS', 'Baby-Shower-Dress'), ('MATERNITY_BELTS', 'Maternity-Belts'), ('MATERNITY_DRESS', 'Maternity-Dress'), ('MATERNITY_PAINTS', 'Maternity-Paints')], default=None, max_length=30, null=True)),
('under_garments_catagory', models.CharField(blank=True, choices=[('BRAS', 'Bras'), ('UNDERWEAR', 'Underwear'), ('PANTIES', 'Panties'), ('LINGERIE', 'Lingerie'), ('SLEEP', 'Sleep'), ('LOUNGE', 'Lounge')], default=None, max_length=30, null=True)),
('glasses_catagory', models.CharField(blank=True, choices=[('COMPUTER_GLASSES', 'Computer Glasses'), ('SUNGLASSES', 'Sunglasses'), ('MUZZUCCHELLI_COLLECTION', 'Muzzucchelli Collection'), ('METAL_COLLECTION', 'Metal Collection')], default=None, max_length=30, null=True)),
('price_new', models.FloatField(default=0)),
('price_old', models.FloatField(default=0)),
('is_featured', models.BooleanField(default=False)),
('is_new', models.BooleanField(default=False)),
('on_sale', models.BooleanField(default=False)),
('in_stock', models.IntegerField(default=1)),
('slug', models.SlugField(blank=True, unique=True)),
('Date', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-Date'],
},
),
migrations.CreateModel(
name='Sizes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.CharField(default=None, max_length=5)),
],
),
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-Date'],
},
),
migrations.CreateModel(
name='ProductPhotos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('is_featured', models.BooleanField(default=False)),
('Date', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.product')),
],
options={
'ordering': ['-Date'],
},
),
migrations.AddField(
model_name='product',
name='sizes',
field=models.ManyToManyField(blank=True, to='products.Sizes'),
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_charge_id', models.CharField(max_length=50)),
('amount', models.FloatField()),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered', models.BooleanField(default=False)),
('quantity', models.IntegerField(default=1)),
('size', models.CharField(blank=True, max_length=10, null=True)),
('products', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='products.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('items', models.ManyToManyField(to='products.OrderItems')),
('payment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.payment')),
('shipping_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.address')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| UTF-8 | Python | false | false | 10,527 | py | 16 | 0001_initial.py | 14 | 0.592476 | 0.585257 | 0 | 148 | 70.128378 | 522 |
ShenQianwithC/HistomicsTK-pv | 8,211,977,514,934 | aad27b0507ed149c78adc8ddba207a4fe6bc78ed | 09363052f9f3836b70d4a7bc55a506334ad1f1d9 | /packages/girder_worker/girder_worker/plugins/types/format/table/objectlist_to_rows.py | 6e19c2c4e174295ab3dd9818f2f66fbd70dd4f75 | [
"Apache-2.0"
]
| permissive | https://github.com/ShenQianwithC/HistomicsTK-pv | d7ca6ec08951d90eda5faa93d61d6d0333ee9938 | 4ad7e72a7ebdabbdfc879254fad04ce7ca47e320 | refs/heads/master | 2020-04-26T12:53:49.273444 | 2019-04-24T01:53:01 | 2019-04-24T01:53:01 | 173,435,036 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import collections
# Attempt to keep column ordering if objects happen to have ordered keys
field_map = collections.OrderedDict()
rows = []
def subkeys(path, obj, row):
if isinstance(obj, dict):
for k in obj:
if isinstance(k, (str, unicode)):
subkeys(path + [k], obj[k], row)
elif len(path) > 0:
field = '.'.join(path)
field_map[field] = True
row[field] = obj
for obj in input:
row = {}
subkeys([], obj, row)
rows.append(row)
fields = [key for key in field_map]
output = {'fields': fields, 'rows': rows}
| UTF-8 | Python | false | false | 591 | py | 166 | objectlist_to_rows.py | 108 | 0.585448 | 0.583756 | 0 | 26 | 21.730769 | 72 |
JPinSPACE/AdventOfCode | 12,713,103,236,566 | ddb9194b34037b0bfe463e4eb813c854158e496e | 09b65aa15e3a85d57f91defff1f28fedec75b48c | /day15/02_cookies_are_not_meals/solution.py | 01f233472894da7ccc9ef895f84644f6149260d7 | [
"MIT"
]
| permissive | https://github.com/JPinSPACE/AdventOfCode | be9b121e5f0e4e3fb85b089751373add961aa838 | af9264633995ff5e756b71b0134f26e44167b881 | refs/heads/master | 2021-01-10T16:27:48.227774 | 2015-12-23T05:56:49 | 2015-12-23T05:56:49 | 47,206,219 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Solution to the second puzzle of Day 15 on adventofcode.com
"""
import os
def evaluate_cookie(parts, part_names, ratios):
""" I evaluate cookies by eating them.
"""
cookie_score = 1
attrs = sorted(parts[part_names[0]].keys())
for attr in attrs:
attr_score = 0
for pos, ratio in enumerate(ratios):
attr_score += ratio * parts[part_names[pos]][attr]
if attr_score <= 0:
return 0
if attr == 'calories':
if attr_score != 500:
return 0
else:
cookie_score *= attr_score
return cookie_score
def main():
""" These are some seriously delicious cookies
"""
basedir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(basedir, 'input')
parts = {}
part_names = []
with open(file_path, 'r') as input_file:
for line in [ raw_line.strip() for raw_line in input_file ]:
pieces = line.split(' ')
part_names.append(pieces[0][:-1])
parts[pieces[0][:-1]] = {
'capacity': int(pieces[2][:-1]),
'durability': int(pieces[4][:-1]),
'flavor': int(pieces[6][:-1]),
'texture': int(pieces[8][:-1]),
'calories': int(pieces[10])
}
best_score = 0
best_ratio = []
for first in range(101)[1:]:
for second in range(101 - first)[1:]:
for third in range(101 - (first + second))[1:]:
fourth = 100 - (first + second + third)
score = evaluate_cookie(parts, part_names, [first,
second,
third,
fourth])
if score > best_score:
best_score = score
best_ratio = [first, second, third, fourth]
print best_score, best_ratio
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,147 | py | 33 | solution.py | 31 | 0.446204 | 0.427108 | 0 | 69 | 30.115942 | 68 |
milenmihaylov/SoftUni | 2,379,411,883,706 | 04e04f89c3cb64860af115770c0b7381a3510c9e | 568f5af0bab92edfb5e357407d1e675a25d63109 | /ProgrammingBasics/01.FirstStepsInCoding/06.charity_campaign.py | 8d3c32268512f23ee5a50db125e6cd68765e5846 | []
| no_license | https://github.com/milenmihaylov/SoftUni | 32c002d6d9e9bfe2a5171f8eaeb00c067131215c | 6b4a631e92c810ebfb9d3c04c37762588c1ab5c0 | refs/heads/master | 2023-08-07T09:07:32.478221 | 2021-08-11T07:20:58 | 2021-08-11T07:20:58 | 408,756,145 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | total_days = int(input())
number_of_bakers = int(input())
cakes_per_baker = int(input())
waffles_per_baker = int(input())
pancakes_per_baker = int(input())
cake_price = 45
waffle_price = 5.8
pancake_price = 3.2
total_cakes = total_days * number_of_bakers * cakes_per_baker
total_waffles = total_days * number_of_bakers * waffles_per_baker
total_pancakes = total_days * number_of_bakers * pancakes_per_baker
total_sum = total_cakes * cake_price + total_waffles * waffle_price + total_pancakes * pancake_price
expenses = total_sum / 8
charity_sum = total_sum - expenses
print(charity_sum)
| UTF-8 | Python | false | false | 592 | py | 68 | 06.charity_campaign.py | 68 | 0.719595 | 0.70777 | 0 | 20 | 28.6 | 100 |
raman162/lexchains | 10,788,957,895,800 | 59871ed993ddb2ed3f1b72fd80a108839bdbeceb | 1052971cd268f2f769169c83cdf87fbcb9a626a1 | /parse.py | 3104eb4724d6eea314b67b2590aa1a730f1084d2 | []
| no_license | https://github.com/raman162/lexchains | 06c3aabdc62263846c3ffa0581b37f252ea2407a | 98b35fb0036e790f9ca4c772b8aa69a4304da26c | refs/heads/master | 2021-01-10T05:00:54.087904 | 2016-01-29T01:47:45 | 2016-01-29T01:47:45 | 50,628,513 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import nltk
from nltk.corpus import wordnet as wn
nltk.data.path.append('./nltk_data/')
#checks to see if a word has any senses and is a noun, if so we take it on, if not we ignore it
def senseEmp(word):
global unknownWords
global usedWords
try :
senses=wn.synsets(word, pos='n')
#print 'Working on word', word
# print(senses)
except UnicodeDecodeError:
senses=[]
if not senses:
unknownWords.append(word)
# print 'There are no senses here'
return senses
else:
i=0
while i < len(senses) :
if senses[0].lemmas()[0].name() != senses[i].lemmas()[0].name() :
senses.remove(senses[i])
else :
i+=1
usedWords.append(word)
#print senses
return senses
#checks to see if a word and another a word or equal or related by hyponym, synonym, hypernym or antonym
#word1 is the actual word, while word2 is the synset
def wordRelate(word1, word2):
global match
global missedSenses
join1=False
antonyms=[]
match=None
missedSenses=[] #This list is to keep track of the missed senses that occur for each word throughout the document
for i in range(0, len(word2)):
# print word2[i], word1
#checks to see if the words are identical
if word2[i]==word1:
join1=True
match=word2[i]
break
#checks to see if the words have a hypernym relationship
for k in range(0, len(word2[i].hypernyms())):
if word2[i].hypernyms()[k]==word1:
join1=True
match=word2[i]
break
#checks to see if the words have a hyponum relationship
for k in range(0, len(word2[i].hyponyms())):
if word2[i].hyponyms()[k]==word1:
join1=True
match=word2[i]
break
#checks to see if the words have a relationship with synonyms
for k in range(0, len(word2[i].lemmas())):
if word2[i].lemmas()[k].antonyms() :
for m in range(0, len(word2[i].lemmas()[k].antonyms())):
#This is to get the list of antonyms
antonyms.append(word2[i].lemmas()[k].antonyms()[m])
if word2[i].lemmas()[k]==word1:
match=word2[i]
join1=True
break
if join1: break
#checks to see if equal to any of the antonyms
for k in range(0, len(antonyms)):
#print antonyms[k].name()
#print word1[l].lemmas()[0].name()
for m in range(0, len(word1.lemmas())):
if antonyms[k].name()==word1.lemmas()[m].name() :
join1=True
match=word2[i]
break
if join1 : break
if join1 : break
missedSenses.append(i)
# print missedSenses
return join1
#eliminating the helper chains (which did not help)
def elimHelper():
global allSenses
for i in range (0, len(allSenses)):
k=0
while k < len(allSenses[i]) :
if (usedWords.count(allSenses[i][k].lemmas()[0].name()) == 0 ):
allSenses[i].remove(allSenses[i][k])
else:
k+=1
while [] in allSenses : allSenses.remove([])
#adds the different senses of a word to the appropiate chains
def add2chains(word):
global allSenses
global match
global missedSenses
join=False
# print len(allSenses)
#If it is the first word we make chains for all the senses of that word
if len(allSenses) == 0 :
for i in range(0, len(word)):
chain=[]
chain.append(word[i])
allSenses.append(chain)
#If not the first word, then it checks through each of the chains then adds the appropiate sense of the word to the appropiate chain
else:
# print 'senses chain list greater than 3'
for i in range(0, len(allSenses)):
chain=allSenses[i]
for k in range(0, len(chain)):
if wordRelate(chain[k], word) :
allSenses[i].append(match)
join=True
break
#For all the senses that were missed for a word, we make a new chain appropiate for them
if missedSenses :
# print missedSenses
for i in range(0, len(missedSenses)):
chain=[]
chain.append(word[missedSenses[i]])
allSenses.append(chain)
##prints all of the chains made out
def printChains():
global allSenses
global usedWords
for i in range(0, len(allSenses)):
chain=allSenses[i]
print ' '
print 'Chain', i ,':',
for k in range(0, len(chain)):
numReps=usedWords.count(chain[k].lemmas()[0].name())
print chain[k].lemmas()[0].name(), '(', numReps, ')',
##Chain Eliminater!
def eliminateChains():
global allSenses
global usedWords
global elimChains
loc=getMaxChainLoc()
max=getMaxChainVal()
x=0
##Eliminates repeats from the largest chain continuously
#It should be noted that it only looks at a chain once, the locations of the previous chains are put into a list
while max != 1 :
#loops through all different words for that chain and eliminates repeats
while x < len(allSenses[loc]):
chain=allSenses[loc]
repeatsCount=allSenses[loc].count(allSenses[loc][x])
wordSen=allSenses[loc][x]
wordnoSen=chain[x].lemmas()[0].name()
if repeatsCount > 1 :
while repeatsCount != 1 :
allSenses[loc].remove(wordSen)
repeatsCount-=1
#eliminates words from other chains that may have a repeat
for m in range(0,len(allSenses)) :
if m!=loc :
otherchain=allSenses[m]
n=0
while n < len(otherchain):
if wordnoSen==otherchain[n].lemmas()[0].name() :
allSenses[m].remove(otherchain[n])
else :
n+=1
x+=1
elimChains.append(loc)
loc=getMaxChainLoc()
max=getChainLen(loc)
#print 'len is ', max
#print 'loc is ', loc
#Goes through entire list to eliminate repeats of single sized chains
for i in range(0, len(allSenses)):
chain=allSenses[i]
k=0
while k < len(chain) :
wordnoSen=chain[k].lemmas()[0].name()
repeatsCount=chain.count(chain[k])
if repeatsCount > 1 :
while repeatsCount != 1 :
allSenses[i].remove(chain[k])
repeatsCount-=1
# print wordnoSen
for m in range(i+1,len(allSenses)) :
otherchain=allSenses[m]
n=0
while n < len(otherchain):
if wordnoSen==otherchain[n].lemmas()[0].name() :
allSenses[m].remove(otherchain[n])
else :
n+=1
k+=1
##Bye Bye Empty Chains!
while [] in allSenses : allSenses.remove([])
##returns the location of the maximum Chain length from allSenes
def getMaxChainLoc():
global elimChains
global allSenses
max=0
loc=0
#x print elimChains
for i in range(0, len(allSenses)):
if len(allSenses[i]) > max and i not in elimChains:
max=len(allSenses[i])
loc=i
return loc
#returns chain length
def getChainLen(loc):
global allSenses
return len(allSenses[loc])
#returns the length of the longest chain
def getMaxChainVal():
global allSenses
max=0
for i in range(0, len(allSenses)):
if len(allSenses[i]) > max : max=len(allSenses[i])
return max
if __name__=='__main__':
global allSenses #manages all the different sense chains
global unknownWords #Keeps track of words not used in the chaining
global usedWords #Keeps track of all the words that we actually used during the chaining
global elimChains ##This keeps track of the largest chain
elimChains=[]
#Requesting input for the file name
file_name=raw_input('Please enter the name of the file or type in quit: ')
file_list=[]
if file_name == "quit":
sys.exit()
else:
try:
fin=open(file_name)
except:
print 'The file requested was not found'
file_list=fin.read().split(' ')
#print(file_list)
usedWords=[]
unknownWords=[]
allSenses=[]
#print ('The first word and last word respectively is:',file_list[0], file_list[len(file_list)-1])
#loops through all the words of the document and adds them to the appropiate chain
for i in range(0,len(file_list)):
word=senseEmp(file_list[i])
if word :
add2chains(word)
#printChains()
elimHelper() #Helps eliminate irrelivant chains
eliminateChains() #Eliminates the repeated words from the different chains
printChains() #prints out all of the chains
#print(wn.synsets(file_list[4]))
#print(len(wn.synsets(file_list[4])))
| UTF-8 | Python | false | false | 9,447 | py | 1 | parse.py | 1 | 0.559013 | 0.548322 | 0 | 281 | 32.601423 | 136 |
oldsyang/snake | 16,398,185,153,863 | 4b174eeee4fd4b6d1a4233178f0e4ea4acacef50 | c834519b1edaf8ba6a3a1031ffb370ecc7c9c991 | /utils/var.py | e950d7c0fdf6df92a5910902b94987064aa83a3a | []
| no_license | https://github.com/oldsyang/snake | 4f5e7435046862c416352ba347fb10be2b156135 | 026817ac87805f895981af5167d5b51fbc2132e4 | refs/heads/master | 2023-06-23T14:13:28.991980 | 2021-07-29T11:09:14 | 2021-07-29T11:09:14 | 390,696,711 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import contextvars
from typing import Any
aio_databases = contextvars.ContextVar('databases')
redis_var: contextvars.ContextVar[Any] = contextvars.ContextVar('redis')
| UTF-8 | Python | false | false | 168 | py | 20 | var.py | 18 | 0.809524 | 0.809524 | 0 | 5 | 32.6 | 72 |
SteveStrop/Estate-Agent-Apps-0.0 | 7,971,459,329,859 | d9e6f428ab81850d14aa83b72a347b08469953d4 | e04d47b516a8c73c24a8514c29178c4d976ddbdc | /ka_lib.py | 66a3802da71f945eaf88e4ef17f6f87d7b6fd94b | []
| no_license | https://github.com/SteveStrop/Estate-Agent-Apps-0.0 | 3b0707e76b5c1d95e6d450bb798a0053aa483661 | 1570be950bbcfeb666c755581df5760c9e174d78 | refs/heads/master | 2020-03-23T03:06:34.589779 | 2018-07-15T09:05:25 | 2018-07-15T09:05:25 | 141,010,087 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
import ast
from agent_lib import logon
def rgba_to_hex(rgba):
r, g, b, alpha = ast.literal_eval(rgba.strip("rgba"))
hex_value = '#%02x%02x%02x' % (r, g, b)
return hex_value
# find the correct link and click it
def click_hip_link(jobs_list, job_hip_number, links):
"""Searches through a list of jobs to find matching ref_num then opens the page"""
for job in jobs_list: # this all looks ugly
if job.hip == job_hip_number:
for link in links:
if job.href in link.get_attribute("href"):
link.click()
print("You have opened {} on {} at {}".format(job.hip, job.date, job.time))
return True
return False
# create class to store the Job details
class KAJob:
def __init__(self, href, hip, address, postcode, agent, appointment, floorplan, photos):
self.href = href
self.hip = hip
self.address = address
self.postcode = postcode
self.agent = agent
self.floorplan = floorplan
self.photos = photos
self.date = appointment[0:10] if (len(appointment) - 1) else "TBA" # self.appointment.date? is it better?
self.time = appointment[-5:] if (len(appointment) - 1) else "TBA"
def open_jobs_list():
# define constants
floorplan_bg_color = '#9fd69f' # colour used to show floor plan required
photo_bg_color = '#9fd69f' # colour used to show photos required
# create a new Chrome session
chrome_driver_path = "C:\Python36\selenium\webdriver\chrome\chromedriver.exe"
browser = webdriver.Chrome(chrome_driver_path)
logon(
username="xxxxxxxxxxxxxxxx",
password='xxxxxxxxxxxxxxxx',
login_pg="xxxxxxxxxxxxxxxxxxx",
landing_pg="xxxxxxxxxxxxxxxxxxxxxxxxxxx",
username_field="xxxxxxxxxxxxxxxxxxxx",
password_field="xxxxxxxxxxxxxxxxxxxx",
login_btn="xxxxxxxxxxxxxxxxxxxxxxx",
browser=browser
)
# read the List of Current Jobs table into Job class
jobs_data = browser.find_elements_by_css_selector('table.staffgridview td')
links = browser.find_elements_by_tag_name("a")
table_cols = 18
row_count = int(len(jobs_data) / table_cols)
jobs = [] # list of jobs, one for each row in the table
for row_index in range(row_count):
jobs.append(KAJob(
href='Select$' + str(row_index), # indexes 'Your Current List of Jobs'-table row, containing 'Open' link
hip=jobs_data[(table_cols * row_index) + 6].text,
address=jobs_data[(table_cols * row_index) + 7].text,
postcode=jobs_data[(table_cols * row_index) + 8].text,
agent=jobs_data[(table_cols * row_index) + 9].text,
floorplan=rgba_to_hex(jobs_data[(table_cols * row_index) + 12].value_of_css_property(
'background-color')) == floorplan_bg_color, # if cell is green floorplan required
photos=rgba_to_hex(
jobs_data[(table_cols * row_index) + 13].value_of_css_property('background-color')) == photo_bg_color,
appointment=jobs_data[(table_cols * row_index) + 17].text
))
# get a list of all links on the page
# choose a HIP to open the corresponding jobs details
click_hip_link(jobs_list=jobs,
job_hip_number=input("Choose a HIP to open"),
links=links)
# add error handling here ^
input("press enter to exit")
browser.quit()
if __name__ == "__main__":
open_jobs_list()
| UTF-8 | Python | false | false | 3,561 | py | 8 | ka_lib.py | 7 | 0.619208 | 0.610222 | 0 | 92 | 37.706522 | 118 |
chejeanx/euler | 11,252,814,334,393 | 31d645cbe541bc2ac6d23555845f13b8574d8761 | cbf6fb2a0d93ca13c84b418d33a43c0ef33e866b | /92.py | 2e5eb6c947ffd56b4e75ac6fc47e01b7d43569fe | []
| no_license | https://github.com/chejeanx/euler | 1fddcc964b7b48c24d8c656d5a515668726f10eb | f9e10897b84a05fa5e9ca8049131191e6bd0735f | refs/heads/master | 2021-01-20T15:07:21.243637 | 2017-06-22T15:19:00 | 2017-06-22T15:19:00 | 90,722,860 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Square digit chains
# https://projecteuler.net/problem=92
# Returns terminating number in square digit chain for startN
def squareDigitChain(startN, currentN, chainOutcomes):
if currentN == 1 or currentN == 89:
chainOutcomes[startN] = currentN
return currentN
if currentN in chainOutcomes:
return chainOutcomes[currentN]
answer = 0
for char in str(currentN):
answer += int(char) ** 2
return squareDigitChain(startN, answer, chainOutcomes)
# Returns number of numbers below maxNum that terminate their square digit chains at 89
def squareDigit89s(maxNum):
num89s = 0
for num in range(1, maxNum):
if num % 10000 == 0: print(num)
if squareDigitChain(num, num, {}) == 89:
num89s += 1
return num89s
print(squareDigit89s(10000000)) | UTF-8 | Python | false | false | 754 | py | 43 | 92.py | 43 | 0.742706 | 0.692308 | 0 | 25 | 29.2 | 87 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.