repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
maksim-prost/plan_for_day | 14,989,435,889,856 | 7385dbc66505c4a7a42638e4f71f25ca7635b992 | f832007907c8f18d9d9bbc702fad6880823e0d95 | /config.py | cf011518d1e0e16521a29f3338cfe1acc9e2f47e | []
| no_license | https://github.com/maksim-prost/plan_for_day | 2b1f18d76a67150964ba4dc06965544d8dbf9295 | 2d745d47cc5477d6b8399493fa23ddf48e2f8176 | refs/heads/main | 2023-07-19T09:19:19.897443 | 2021-09-08T11:08:35 | 2021-09-08T11:08:35 | 402,810,528 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
begin_day = 1
begin_month = 9
begin_year = 2021
BEGIN_DAY = datetime.datetime( begin_year,begin_month,begin_day)
TEMPLATE_WORKING_OUT = "Повторение обязанностей, работа с документацией, изучение документацией предварительного планирования."
list_dict_wath = [
{
'post' : "Помощник начальника караула",
'post_usage' : "помощника начальника 1 караула",
'name' : "А.В. Чикуров" ,
'title' : "старшина внутренней службы",
'number': 1,
},
{
'post' : "Командир отделения",
'post_usage' : "командира отделения 2 караула",
'name' : "В.В. Быков" ,
'title' : '',
'number': 2,
},
{
'post' : "Помощник начальника караула",
'post_usage' : "помощника начальника 3 караула",
'name' : "М.В. Корнев" ,
'title' : "старший сержант внутренней службы",
'number': 3,
},
{
'post' : "Помощник начальника караула",
'post_usage' : "помощника начальника 4 караула",
'name' : "А.И. Савченко" ,
'title' : "старший прапорщик внутренней службы",
'number': 4,
},
]
| UTF-8 | Python | false | false | 1,573 | py | 3 | config.py | 3 | 0.560411 | 0.548415 | 0 | 42 | 26.47619 | 127 |
derickbenites/CS50-Course | 13,769,665,190,502 | 6af753ecc3cbf207b295fdae9e1111d9750d1583 | fa2ff2fbf70bebddffe206fe2a0663953cd22cfe | /pset6 - Python 🐍🧡/cash/cash.py | 693ddee7cf118968146cf54b5ffe14beeb254c24 | []
| no_license | https://github.com/derickbenites/CS50-Course | e51aaf8c1fedb37587c61540739fc9e970288322 | 7ec989aca875c34200a0e97eb479a3d241e95568 | refs/heads/master | 2023-03-17T18:14:53.707017 | 2020-12-23T20:46:00 | 2020-12-23T20:46:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from cs50 import get_float
cash = get_float("cash: ")
while True:
if cash >= 0:
cents = int(cash * 100)
c25 = int(cents / 25)
c10 = int((cents - (c25 * 25)) / 10)
c5 = int((cents - (c25 * 25) - (c10 * 10)) / 5)
c1 = int(cents - (c25 * 25) - (c10 * 10) - (c5 * 5))
print(c25 + c10 + c5 + c1)
break
else:
cash = get_float("cash: ")
| UTF-8 | Python | false | false | 408 | py | 31 | cash.py | 6 | 0.45098 | 0.340686 | 0 | 18 | 21.611111 | 60 |
Hoheckell/djang2crud | 3,367,254,400,418 | acfa1e3a954cd1cabc98c9001fa5a457e24f8f62 | 69ee4238213b252f52b8e8e908829a0caf88d629 | /crud/forms.py | 74e400afe2b22c81d7e2a38d3475b2e123d2c1e5 | []
| no_license | https://github.com/Hoheckell/djang2crud | bd3c29aa78c707f1e09bdd336bb41a3d03c1932a | 7214287a31c212fa2d704a71898b57044e6f1719 | refs/heads/master | 2022-12-13T13:38:00.371805 | 2022-12-05T21:53:45 | 2022-12-05T21:53:45 | 134,580,708 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from crud.models import User
class UserForm(forms.Form):
nome = forms.CharField(label='Nome', max_length=100,widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(label='Email',max_length=100,widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}))
confirm = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class UpdUserForm(forms.ModelForm):
class Meta:
model = User
fields = ['nome','email']
# nome = forms.CharField(label='Nome', max_length=100,widget=forms.TextInput(attrs={'class': 'form-control'}))
#email = forms.EmailField(label='Email',max_length=100,widget=forms.TextInput(attrs={'class': 'form-control'}))
| UTF-8 | Python | false | false | 831 | py | 6 | forms.py | 3 | 0.708785 | 0.694344 | 0 | 15 | 54.4 | 115 |
dartrevan/ChemTextMining | 4,475,355,944,569 | 21dcb23210491b2e47d82180d10ae6ccb64ebd97 | c40bffd0b96cee19fba7eee4dc7cadcd9b978f97 | /evaluation/eval.py | c252dfb06bb957950dafff706a37181ed3ffb494 | []
| no_license | https://github.com/dartrevan/ChemTextMining | 926bfd3adf07ca6aa59a07ef281881208813332c | 87756d1b088a0a366be43c9dbb1bd9528cdbf4b5 | refs/heads/master | 2022-12-11T11:37:36.497988 | 2019-10-25T08:59:54 | 2019-10-25T08:59:54 | 80,611,797 | 25 | 4 | null | false | 2022-11-21T20:45:05 | 2017-02-01T10:57:22 | 2022-05-18T11:01:16 | 2022-11-21T20:45:01 | 1,666,528 | 22 | 3 | 15 | Python | false | false | # coding=utf-8
import sys
import optparse
import codecs
import json
from lxml import etree
def get_terms(item, gold, task, type):
units = []
review_id = int(item.get("id"))
content = item.get("text")
if gold:
terms = item.get("entities").values()
else:
terms = item.get("entities_pred")
if terms == None:
return review_id, 0, units
terms = terms.values()
terms_count = 0
term_set = [] # we don't have to take repeated terms
for json_term in terms:
if task == "entity": # task switch
if json_term.get("entity") != "Disease":
continue
term_identifier = str(json_term.get("start"))+"_"+str(json_term.get("end"))
if term_identifier in term_set:
continue
term_set.append(term_identifier)
terms_count += 1
written_term = json_term.get("text")
position_from = int(json_term.get("start"))
position_to = int(json_term.get("end"))
term = content[position_from:position_to]
#if written_term != term:
# print review_id, "terms does't match [", str(position_from), str(position_to), ") ->", term, "<>", written_term
units.append(str(position_from)+'_'+str(position_to))
return review_id, terms_count, units
def get_units(item, gold, task, type):
units = []
review_id = int(item["id"])
content = item["text"]
terms = item.get("entities", {}) if gold else item.get("entities_pred", {})
terms = terms.values()
terms_count = 0
term_set = [] # we don't have to take repeated terms
for json_term in terms:
if task == "entity" and json_term.get("entity") != "Disease" and gold: # task switch
continue
term_identifier = str(json_term.get("start"))+"_"+ str(json_term.get("end"))
if term_identifier in term_set:
continue
term_set.append(term_identifier)
terms_count += 1
#written_term = json_term.get("text")
position_from = int(json_term.get("start"))
position_to = int(json_term.get("end"))
term = content[position_from:position_to]
#if written_term != term:
# print review_id, "terms does't match [", str(position_from), str(position_to), ") ->", term, "<>", written_term
start = position_from
for i, unit in enumerate(term.split(' ')):
end = start + len(unit)
units.append(str(start)+'_'+str(end))
start = end + 1
return review_id, terms_count, units
def getAllDocs(f):
with codecs.open(f, encoding="utf-8") as fin:
return map(json.loads, fin.readlines())
def computeEvalNumbers(alg_type, itemlistGS, itemlistTest, task, type):
print "type\tid\tcorrect_unit_count\textracted_unit_coun\tmatch_count\tp\tr\tf"
idx2units = {}
for itm in itemlistGS:
if alg_type == "weak":
idx, terms_count, units = get_units(itm, True, task, type)
else:
idx, terms_count, units = get_terms(itm, True, task, type)
if terms_count>0:
idx2units[idx] = (terms_count, units)
total_p, total_r, total_f = .0, .0, .0
processed = []
match_collection = []
correct_collection = []
extracted_collection = []
for itm in itemlistTest:
if alg_type == "weak":
idx, terms_count, units = get_units(itm, False, task, type)
else:
idx, terms_count, units = get_terms(itm, False, task, type)
if idx in idx2units and not idx in processed: #it's not processed test review
processed.append(idx)
correct = idx2units[idx][1]
correct_collection += correct
correct4del = [i for i in correct]
extracted = units
extracted_collection += extracted
match = []
for i in extracted:
if i in correct4del:
match.append(i)
correct4del.remove(i)
match_collection += match
try:
r = float(len(match))/len(correct)
p = float(len(match))/len(extracted) if len(extracted) != 0 else 0
if p == 0 and r == 0:
f = 0
else:
f = (2*p*r)/(p+r)
total_p += p
total_r += r
total_f += f
print "%d\t%d\t%d\t%d\t%.3f\t%.3f\t%.3f" % (idx, len(correct),len(extracted), len(match), p, r, f)
except:
print "Unexpected error:", sys.exc_info()[0]
continue
n = len(idx2units.keys())
collection_p = float(len(match_collection))/len(correct_collection)
collection_r = float(len(match_collection))/len(extracted_collection) if len(extracted_collection) != 0 else 0
collection_f = 0 if collection_p == 0 and collection_r == 0 else (2*collection_p*collection_r)/(collection_p+collection_r)
print "%s\t%f\t%f\t%f" % (type,collection_p, collection_r, collection_f)
print "%s\t%f\t%f\t%f" % (type,total_p/n, total_r/n, total_f/n)
def main(argv=None):
# parse the input
parser = optparse.OptionParser()
parser.add_option('-g') #gold standard file
parser.add_option('-t') #test file with predicted entities
parser.add_option('-a') #"type" or "entity"
parser.add_option('-w') #weak or strong evaluation
options, args = parser.parse_args()
gold_file_name = options.g
test_file_name = options.t
task = options.a
alg_type = options.w
# process file with gold markup
itemlistGS = getAllDocs(gold_file_name)
itemlistTest = getAllDocs(test_file_name)
types=[u'none']
for type in types:
computeEvalNumbers(alg_type, itemlistGS, itemlistTest, task,type)
if __name__ == "__main__":
main(sys.argv[1:])
exit()
| UTF-8 | Python | false | false | 6,109 | py | 51 | eval.py | 13 | 0.548535 | 0.542315 | 0 | 172 | 33.517442 | 128 |
Deathik/moneycalc | 1,262,720,420,979 | 44f818a92fb444586094691adf1c04e1bd64e68a | 1053e4eed739ea12557104de36b95c1b813c3d76 | /moneycalc/settings.py | 40594fb83223dd74394bb2c582e5c96589bac091 | []
| no_license | https://github.com/Deathik/moneycalc | 6afbc3a67d41cf8a1dc7b3e2f02eefd59138f253 | 7821b55d8b97cbda98943c76b30d720b90293e21 | refs/heads/master | 2021-01-11T23:54:14.065469 | 2018-10-05T12:35:53 | 2018-10-05T12:35:53 | 78,640,579 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Django settings for moneycalc project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
#import raven
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z@vccq+2j3*bb3o21(rnw7b+9p76$73zc-)+9rg8(!duqyk!)='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'rest_framework',
'calculator.apps.CalculatorConfig',
'register.apps.RegisterConfig',
'blog.apps.BlogConfig',
'feedback.apps.FeedbackConfig',
#'raven.contrib.django.raven_compat',
#'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'moneycalc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'moneycalc.wsgi.application'
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Kiev'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '301803085570-1ta3vfer2fhabnmmte87laptfqgc0j4b.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '4XLF8iQa1jfMpLJLG4EHhRaM'
# Sentry config
# RAVEN_CONFIG = {
# 'dsn': 'https://f1a0318f75c74fd6b1ba7a084bbb895d:c3178c02d23342ac8ebc8480f8109255@sentry.io/166994',
# 'release': raven.fetch_git_sha(os.path.dirname(os.pardir)),
# }
#INTERNAL_IPS = ['127.0.0.1'] | UTF-8 | Python | false | false | 4,548 | py | 41 | settings.py | 24 | 0.67788 | 0.650616 | 0 | 152 | 27.934211 | 106 |
blancamm/k_pygame | 3,470,333,605,241 | b3cb2d2f586d06576b26c4c5ee6d2eb0ea955e88 | ca191c56f9b58793b3ce4bcb876cfee7d4ccea1a | /arkanoid/escenes.py | d450783f369a43b7af61886f4423089fc9c7ccff | []
| no_license | https://github.com/blancamm/k_pygame | 998b2ff1617472a8431996626e268aff176bd57d | 2e630b640d2b19ca0cbad8c3ba9c6f747d4ee7be | refs/heads/master | 2023-04-25T13:49:48.658440 | 2021-05-11T20:08:16 | 2021-05-11T20:08:16 | 361,891,993 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from arkanoid import ANCHO, ALTO, levels, FPS
from arkanoid.entities import Marcador, Bola, Raqueta, Ladrillo, CuentaVidas
from random import randint, choice
import sys
import pygame as pg
from enum import Enum
class Escene():
def __init__(self, pantalla):
self.pantalla = pantalla
self.TodoGroup= pg.sprite.Group()
self.reloj = pg.time.Clock()
def rest(self):
pass
def bucle_principal(self):
pass
def manejo_eventos(self):
for evento in pg.event.get():
if evento.type == pg.QUIT or \
evento.type ==pg.KEYDOWN and evento.key == pg.K_q:
pg.quit()
sys.exit()
class Game(Escene):
def __init__(self, pantalla):
super().__init__(pantalla)
#self.pantalla = pantalla
self.fondo = pg.image.load('./imagenes/background.png')
#SE CREA ESTOS GRoupS PARA PODER HACER LA COLISION ENTRE ELLOS
self.grupoJugador = pg.sprite.Group()
self.grupoLadrillos = pg.sprite.Group()
#self.TodoGroup = pg.sprite.Group ()
self.TodoGroup.add(self.grupoLadrillos)
self.cuentaPuntos = Marcador(10, 10, fontsize=50)
self.cuentaVidas = CuentaVidas(790, 10, 'topright', 50)
self.TodoGroup.add(self.cuentaPuntos, self.cuentaVidas) #se puede añadir con comas
self.bola= Bola (randint(0, ANCHO), randint(0, ALTO))
self.TodoGroup.add(self.bola)
self.raqueta = Raqueta(x = ANCHO//2, y=ALTO-30)
self.grupoJugador.add(self.raqueta)
self.TodoGroup.add(self.grupoJugador)
self.reset()
def reset(self): # es como resetear las cosas
self.vidas = 3
self.puntuacion = 0
self.level = 0
self.TodoGroup.remove(self.grupoLadrillos)
self.grupoLadrillos.empty()
self.disponer_ladrillos(levels[self.level])
self.TodoGroup.add(self.grupoLadrillos)
self.TodoGroup.remove(self.cuentaPuntos, self.cuentaVidas)
self.TodoGroup.add(self.cuentaPuntos, self.cuentaVidas)
def disponer_ladrillos(self, level):
for fila, cadena in enumerate(level):
contador = 0
for contador, caracter in enumerate(cadena):
x = 5 + (100 * contador)
y = 5 + (40 * fila)
if caracter in'XD':
ladrillo = Ladrillo(x,y, caracter == 'D') #si el carcater es igual D, te saldran D==D que es true, entonces te saldran un ladrillo duro
self.grupoLadrillos.add(ladrillo)
contador += 1
def bucle_principal(self):
game_Over = False #es variable, no es atributo. No tiene sentido fuera de este metodo por eso solo es variable y no atributo
while not game_Over and self.vidas > 0:
dt = self.reloj.tick(FPS)
self.manejo_eventos()
#self.disponer_ladrillos()
self.cuentaPuntos.text = self.puntuacion #o se podria hacer con format
self.cuentaVidas.text =self.vidas
self.bola.prueba_colision(self.grupoJugador)
tocados = self.bola.prueba_colision(self.grupoLadrillos)
for ladrillo in tocados:
self.puntuacion +=5
if ladrillo.desaparece():
self.grupoLadrillos.remove(ladrillo)
self.TodoGroup.remove(ladrillo)
if len(self.grupoLadrillos) == 0:
self.level += 1
self.disponer_ladrillos(levels[self.level])
self.TodoGroup.add(self.grupoLadrillos)
self.TodoGroup.update(dt)
if self.bola.estado == Bola.Estado.muerta:
self.vidas -= 1
self.pantalla.blit(self.fondo, (0,0))
self.TodoGroup.draw(self.pantalla)
pg.display.flip()
#la pantalla de inicio
class Portada(Escene):
def __init__(self, pantalla):
super().__init__(pantalla)
self.instrucciones = Marcador(ANCHO//2, ALTO//2, 'center', 50, (255, 255,0))
self.instrucciones.text = 'Pulsa espacio para jugar'
self.TodoGroup.add(self.instrucciones)
def reset(self):
pass
def bucle_principal(self):
game_Over = False
while not game_Over:
dt = self.reloj.tick (FPS)
self.manejo_eventos()
teclas_pulsadas = pg.key.get_pressed()
if teclas_pulsadas[pg.K_SPACE]:
game_Over = True
self.TodoGroup.update(dt)
self.pantalla.fill((0,0,0))
self.TodoGroup.draw(self.pantalla)
pg.display.flip()
| UTF-8 | Python | false | false | 4,735 | py | 11 | escenes.py | 10 | 0.580904 | 0.570131 | 0 | 150 | 30.506667 | 156 |
picografix/learning101 | 12,936,441,533,523 | 8f236095ec558c6d6ff25d7a03247ab8284b3a06 | b4a08bfb058df822e4fd7c2fdb838dcd783e0af9 | /Task3/chapter2_tutorials/src/circle.py | e46bbd33b209b1406c6fec921966302721c229b3 | []
| no_license | https://github.com/picografix/learning101 | f059b2f01bb89be80eba486e78ab03ba99be2d93 | d0c964b437dd781a823b07bbd69d21ae97781e5d | refs/heads/master | 2022-12-27T09:24:18.726550 | 2020-04-18T10:14:39 | 2020-04-18T10:14:39 | 258,752,775 | 1 | 1 | null | true | 2020-10-16T14:45:24 | 2020-04-25T10:54:17 | 2020-04-25T10:54:19 | 2020-10-16T14:45:21 | 39,116 | 0 | 1 | 1 | null | false | false | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
def move():
# Starts a new node
rospy.init_node('robot_cleaner', anonymous=True)
velocity_publisher = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)
vel_msg = Twist()
#Receiveing the user's input
print("Let's make circle by robot enter r for radius and e for exit")
isRadius= raw_input()
speed=1
#Checking if the movement is forward or backwards
if(isRadius=="r"):
print("enter radius for circle")
radius = input()
vel_msg.linear.x = abs(speed)
elif(isRadius=="e"):
exit()
#Since we are moving just in x-axis
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = speed/radius
while not rospy.is_shutdown():
#Setting the current time for distance calculus
t0 = rospy.Time.now().to_sec()
current_distance = 0
#Loop to move the turtle in an specified distance
while(current_distance < (2*3.14*radius)):
#Publish the velocity
velocity_publisher.publish(vel_msg)
#Takes actual time to velocity calculus
t1=rospy.Time.now().to_sec()
#Calculates distancePoseStamped
current_distance= speed*(t1-t0)
#After the loop, stops the robot
vel_msg.linear.z = 0
vel_msg.angular.x=0
#Force the robot to stop
velocity_publisher.publish(vel_msg)
return isRadius
if __name__ == '__main__':
try:
#Testing our function
t1="r"
while (t1!="e"):
t1=move()
except rospy.ROSInterruptException: pass
| UTF-8 | Python | false | false | 1,678 | py | 22 | circle.py | 20 | 0.620381 | 0.607271 | 0 | 57 | 28.438596 | 82 |
BaskerShu/typeidea | 8,959,301,789,263 | d374aa9b8deb6f352f28d986c1dfb0a820355ace | 2408b2c4f15abfc2b2180a7670b0d5a272415b2e | /setup.py | 07f81dd0eda0de993ecf0616bbbecf63c5bfe462 | [
"MIT"
]
| permissive | https://github.com/BaskerShu/typeidea | 3cae3ea65a1755a7761fd46364d0b7235391e768 | 3b09cd334510738721ec1c73a0713e9e9351456c | refs/heads/master | 2021-01-22T04:05:17.712814 | 2018-02-26T13:58:08 | 2018-02-26T14:05:17 | 102,263,492 | 0 | 0 | null | false | 2017-10-13T11:26:04 | 2017-09-03T12:36:37 | 2017-09-03T13:37:11 | 2017-10-13T11:26:04 | 28 | 0 | 0 | 0 | Python | null | null | from setuptools import find_packages, setup
packages = find_packages('typeidea')
print(packages)
setup(
name='typeidea',
version='1.0.4',
url='https://www.ysz.com/',
author='Lion',
author_email='yangshuzhi@outlook.com',
packages=packages,
package_dir={
'': 'typeidea',
},
include_package_data=True,
scripts=['typeidea/manage.py'],
install_requires=[
'django==1.11.8',
'xadmin==0.6.1',
'python-decouple==3.1',
'django-autocomplete-light==3.2.10',
'django-ckeditor==5.4.0',
'django-debug-toolbar==1.9.1',
'django-rest-framework==0.1.0',
'django-widget-tweaks==1.4.1',
'django-simple-captcha==0.5.6',
'django-markdownx==2.0.22',
'Markdown==2.6.11',
'mysqlclient==1.3.12',
'Pillow==5.0.0',
'coreapi==2.3.3',
'gunicorn==19.7.1',
'django-redis==4.8.0',
'hiredis==0.2.0',
'redis==2.10.6',
'gunicorn==19.7.1',
'raven==6.5.0',
],
)
| UTF-8 | Python | false | false | 1,040 | py | 55 | setup.py | 34 | 0.530769 | 0.463462 | 0 | 39 | 25.666667 | 44 |
skoc/tumor-bud-detection | 3,710,851,785,737 | d0c06b80b0f3f88302a482ec7ade67cfa6e1fa13 | 685c8afe181d430491daa8f1214df64054b07431 | /utils/visualizations.py | 77b3d92a65f9da32bd6d47d414010fd1dedd9537 | []
| no_license | https://github.com/skoc/tumor-bud-detection | 48daf03041ccef4a4eecc33bcd4a11cbe8363e6a | 860635f6226dc033cbe7c39c4e297a999c1fab7a | refs/heads/main | 2023-07-29T15:06:15.922354 | 2021-09-15T15:05:20 | 2021-09-15T15:05:20 | 359,135,564 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from utils.utils import *
def get_iou(gt, pr, n_classes, EPS=1e-12):
class_wise = np.zeros(n_classes)
for cl in range(n_classes):
intersection = np.sum((gt == cl)*(pr == cl))
union = np.sum(np.maximum((gt == cl), (pr == cl)))
iou = float(intersection)/(union + EPS)
class_wise[cl] = iou
return class_wise
def my_iou(res1, res2):
intersection = np.logical_and(res1, res2)
union = np.logical_or(res1, res2)
if np.sum(union) != 0:
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
return 0
def get_tpfpfn(mask_img, pred_img, thold_area, thold_iou):
conts_ground, _ = cv2.findContours(pred_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
lst_score = []
for j, cont in enumerate(conts_ground):
x,y,w,h = cv2.boundingRect(cont)
if w*h > thold_area:
score = my_iou(pred_img[y:y+h, x:x+w], mask_img[y:y+h, x:x+w])
lst_score.append(score)
count_fp = len(np.array(lst_score)[np.array(lst_score) < thold_iou])
# conts_ground, _ = cv2.findContours(mask_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# lst_score = []
# for j, cont in enumerate(conts_ground):
# x,y,w,h = cv2.boundingRect(cont)
# if w*h > thold_area:
# print(pred_img[y:y+h, x:x+w].shape)
# score = my_iou(mask_img[y:y+h, x:x+w], pred_img[y:y+h, x:x+w])
# lst_score.append(score)
count_tp = len(np.array(lst_score)[np.array(lst_score) >= thold_iou])
count_fn = len(np.array(lst_score)[np.array(lst_score) < thold_iou])
return (count_tp, count_fp, count_fn)
def confusion_matrix(masks, preds, thold_area = 100, thold_iou = 0.5):
count_tp, count_fp, count_fn = 0, 0, 0
for mask_img, pred_img in zip(masks, preds):
#(count_tp, count_fp, count_fn)
tp_score = get_tpfpfn(mask_img, pred_img, thold_area = thold_area, thold_iou = thold_iou)
count_tp += tp_score[0]
count_fp += tp_score[1]
count_fn += tp_score[2]
precision = count_tp/(count_tp + count_fp)
recall = count_tp/(count_tp + count_fn)
f1 = 2*(precision*recall)/(precision+recall)
return (f1, precision, recall)
def write_iou_per_bud(img_write_path, img_ground_path, img_pred_path, thold_area, dir_write='data/', size_img=512):
# debug
dict_locs = list()
# Read Images
img_write = read_image(img_write_path, img_size=size_img)
img_ground = read_image(img_ground_path, img_size=size_img, mask=True)
img_pred = read_image(img_pred_path, img_size=size_img, mask=True)
conts_ground, hierachy = cv2.findContours(img_ground, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for j, cont in enumerate(conts_ground):
x,y,w,h = cv2.boundingRect(cont)
if w*h > thold_area:
# Bud
bud_crop_ground = img_ground[y:y+h, x:x+w]
bud_crop_pred = img_pred[y:y+h, x:x+w]
# Calculate IoU
# score_iou = get_iou(bud_crop_ground, bud_crop_pred, n_classes=1)[0]
score_iou = my_iou(bud_crop_ground, bud_crop_pred)
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale
fontScale = 0.5
# Blue color in BGR
color = (0, 0, 255)
# Line thickness of 2 px
thickness = 1
# Using cv2.putText() method
img_write = cv2.putText(img_write, ""+str(round(score_iou,2)), (x-w,y), font,
fontScale, color, thickness, cv2.LINE_AA)
dict_locs.append(score_iou)
# Write IoU Score Annotated Image
write_loc = mkdir_if_not_exist(os.path.join(dir_write, 'iou-ann'))
cv2.imwrite(os.path.join(write_loc, 'iou-'+ img_pred_path.split('/')[-1]), img_write)
return dict_locs, img_write
def generate_visuals(dir_img, dir_pred, img_count=1, clean=True, thold_iou=0.5, img_size=512, dir_write='outputs/', thold_area=0):
# list all files in dir
files = [f for f in os.listdir(os.path.join(dir_img, 'img'))]
# select 0.1 of the files randomly
random_files = np.random.choice(files, img_count)
random_files = files
# Write Generated Visualization
dir_write += 'visualization'
# mkdir
mkdir_if_not_exist(dir_pred)
mkdir_if_not_exist(dir_write)
for i, file in enumerate(random_files):
sample_img = os.path.join(dir_img, 'img', file)
file_name = ('-'.join(file.split('-')[1:])).split('.')[0]
# Sample Paths
sample_ann = os.path.join(dir_img, 'ann', 'ann-'+file_name+'.jpg')
sample_pred = os.path.join(dir_pred, 'pred-'+file)
sample_mask = os.path.join(dir_img, 'mask', 'bw-'+file_name+'.png')
if not os.path.exists(sample_ann): continue
fig, ax = plt.subplots(2, 2, figsize=(72, 72))
# ax = axes.flatten()
# Read Images
orj_img = cv2.imread(sample_img,cv2.IMREAD_COLOR)
ann_img = cv2.imread(sample_ann, cv2.IMREAD_COLOR)
mask_img = cv2.imread(sample_mask, cv2.IMREAD_GRAYSCALE)
pred_img = cv2.imread(sample_pred, cv2.IMREAD_GRAYSCALE)
overlap_img, path_overlap = mapper_image(img_ann=read_image(sample_ann, img_size=img_size), img_pred=read_image(sample_pred, img_size=img_size, mask=True),\
fname="overlap-"+file, thold_area=thold_area, output_dir='.', clean=clean)
# IoU Scores
_, overlap_img = write_iou_per_bud(path_overlap, sample_mask, sample_pred, thold_area)
# iou_scores = write_iou_per_bud(overlap_img, read_image(sample_mask, img_size=img_size, mask=True), read_image(sample_pred, img_size=img_size, mask=True), thold_area=100)
# print(f"Score: {sum([i > thold_iou for i in iou_scores])/len(iou_scores)}")
# (count_tp, count_fp, count_fn)
# tuple_score = get_tpfpfn(mask_img, pred_img, thold_area=100, thold_iou=0.5)
# select only masked area below
# masked = input_img.copy()
# masked[mask_img == 0 ] = 0
# BGR to RGB
ann_img = cv2.cvtColor(ann_img, cv2.COLOR_BGR2RGB)
overlap_img = cv2.cvtColor(overlap_img, cv2.COLOR_BGR2RGB)
if os.path.exists(sample_ann):
ax[0, 0].imshow(ann_img)
ax[0, 0].set_axis_off()
ax[0 ,0].set_title("Ann Image", fontsize=60)
ax[1, 0].imshow(mask_img, cmap="gray")
ax[1, 0].set_axis_off()
ax[1, 0].set_title("Mask", fontsize=60)
ax[0, 1].imshow(overlap_img)
ax[0, 1].set_axis_off()
ax[0, 1].set_title("Overlap Image", fontsize=60)
ax[1, 1].imshow(pred_img, cmap="gray")
ax[1, 1].set_axis_off()
ax[1, 1].set_title("Predicted", fontsize=60)
plt.savefig(os.path.join(dir_write, 'visual-generated-'+file)) | UTF-8 | Python | false | false | 6,985 | py | 18 | visualizations.py | 9 | 0.577237 | 0.558482 | 0 | 185 | 36.762162 | 179 |
pahaz/exploit_farm | 17,403,207,513,116 | 47e0b654d415ad1ca9386d3a99ce64006da78524 | b250a7de577648c363b08c23e7375496e930d73d | /spl_example2_never_exits.py | 3a3ce73ff5cb833de2f50cc4c85aee3fc3b08f76 | []
| no_license | https://github.com/pahaz/exploit_farm | 73fb8f42d56b77ec6db39934994f3502a3d7f8e2 | 4d539a5d6564a3a9281b51322e31b323a473f538 | refs/heads/master | 2023-08-26T20:00:24.434872 | 2012-06-16T07:17:18 | 2012-06-16T07:17:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
import random
import time
import os
import sys
# force line buffering for stdout
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)
letters="1234567890abcdef"
while True:
flag=""
for i in range(32):
flag+=random.choice(letters)
print flag
time.sleep(1)
| UTF-8 | Python | false | false | 329 | py | 3 | spl_example2_never_exits.py | 3 | 0.629179 | 0.583587 | 0 | 18 | 17.277778 | 51 |
nedwilson/general_lib | 15,470,472,200,430 | 8e0d686c9598eb3d1ddea91c068916d118a73769 | 0e8baaecdb57ee7fb11ddb4013cc2b20c0ecfceb | /nuke/hiero/Python/Startup/SpreadsheetExportCSVMenu.py | e888a68a9e4871276d6c46d7bc3836b1add511f4 | []
| no_license | https://github.com/nedwilson/general_lib | 907361fb0f9c836a90b8f22cbf985eac9da94789 | 3d3578e7cff3f747d9e41fd37620aca51ea24132 | refs/heads/master | 2022-02-13T13:27:38.514514 | 2019-07-17T02:09:26 | 2019-07-17T02:09:26 | 112,660,433 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This Example shows how to register a custom menu to the Spreadsheet View and exports the contents of the Spreadsheet as a CSV file.
# Usage: Right-click on a row of the Spreadsheet, select Export > Spreadsheet to .CSV
# Note: The 'Action' column is not currently hooked up and it does not currently take 'Drop Frame' into account.
from hiero.core.events import registerInterest
from hiero.core import ApplicationSettings
from hiero.core import Sequence
from hiero.core import TrackItem
from hiero.core import Timecode
from PySide2.QtWidgets import QAction
from PySide2.QtWidgets import QMenu
from PySide2.QtGui import QDesktopServices
from PySide2.QtWidgets import QFileDialog
from PySide2.QtCore import QUrl
import os, csv
#### Shot Methods ####
def getStatus(trackItem):
status = 'OK'
if not trackItem.isMediaPresent():
status = 'OFF'
return status
def timecodePrefCheck():
# We need to check the user Preference for 'Timecode > EDL-Style Spreadsheet Timecodes'
return int(hiero.core.ApplicationSettings().boolValue('useVideoEDLTimecodes'))
def getReelName(trackItem):
reelName = ""
M = trackItem.metadata()
if M.hasKey('foundry.edl.sourceReel'):
reelName = M.value('foundry.edl.sourceReel')
return reelName
def getSrcIn(trackItem):
fps = trackItem.parent().parent().framerate()
clip = trackItem.source()
clipstartTimeCode = clip.timecodeStart()
srcIn = Timecode.timeToString(clipstartTimeCode+trackItem.sourceIn(), fps, Timecode.kDisplayTimecode)
return srcIn
def getSrcOut(trackItem):
fps = trackItem.parent().parent().framerate()
clip = trackItem.source()
clipstartTimeCode = clip.timecodeStart()
srcOut = Timecode.timeToString(clipstartTimeCode+trackItem.sourceOut()+timecodePrefCheck(), fps, Timecode.kDisplayTimecode)
return srcOut
def getDstIn(trackItem):
seq = trackItem.parent().parent()
tStart = seq.timecodeStart()
fps = seq.framerate()
dstIn = Timecode.timeToString(tStart+trackItem.timelineIn(),fps,Timecode.kDisplayTimecode)
return dstIn
def getDstOut(trackItem):
seq = trackItem.parent().parent()
tStart = seq.timecodeStart()
fps = seq.framerate()
dstOut = Timecode.timeToString(tStart+trackItem.timelineOut()+timecodePrefCheck(), fps, Timecode.kDisplayTimecode)
return dstOut
# Get a Nuke Read node style file path
def getNukeStyleFilePath(trackItem):
fi = trackItem.source().mediaSource().fileinfos()[0]
filename = fi.filename()
first = fi.startFrame()
last = fi.endFrame()
if trackItem.source().mediaSource().singleFile():
return filename
else:
return "%s %i-%i" % (filename,first,last)
#### The guts!.. Writes a CSV file from a Sequence Object ####
def writeCSVFromSequence(seq):
csvSavePath = os.path.join(os.getenv('HOME'),'Desktop',seq.name()+'.csv')
savePath,filter = QFileDialog.getSaveFileName(None,caption="Save CSV As...",dir = csvSavePath, filter = "*.csv")
print 'Saving To: ' + str(savePath)
if len(savePath)==0:
return
# The Header row for the CSV.. note that 'Action' is not currently supported.
csvHeader = ['Event', 'Status', 'Shot Name', 'Reel', 'Track', 'Speed', 'Src In', 'Src Out','Src Duration', 'Dst In', 'Dst Out', 'Dst Duration', 'Clip', 'Clip Media']
# Get a CSV writer object
csvWriter = csv.writer(open(savePath, 'w'), delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
# Write the Header row to the CSV file
csvWriter.writerow(csvHeader)
# Get all Tracks in the Sequence...
vTracks = seq.videoTracks()
aTracks = seq.audioTracks()
tracks = vTracks+aTracks
rowindex = 1
if len(tracks)>0:
for v in tracks:
for item in v:
if isinstance(item,TrackItem):
M = item.metadata()
if M.hasKey('foundry.edl.editNumber'):
event = M.value('foundry.edl.editNumber')
else:
event = rowindex
rowindex+=1
table_data = [str(event),
str(getStatus(item)),
str(item.name()),
str(getReelName(item)),
str(item.parent().name()),
"%.1f" % (100.0*float(item.playbackSpeed())),
str(getSrcIn(item)),
str(getSrcOut(item)),
str(item.sourceOut()-item.sourceIn()+1),
str(getDstIn(item)),
str(getDstOut(item)),
str(item.duration()),
str(item.source().name()),
str(getNukeStyleFilePath(item))]
csvWriter.writerow(table_data)
# Conveniently show the CSV file in the native file browser...
QDesktopServices.openUrl(QUrl('file:///%s' % (os.path.dirname(savePath))))
#### Adds Export... > Spreadsheet to CSV file in the Spreadsheet Menu ####
class SpreadsheetExportCSVMenu:
def __init__(self):
# hiero.core.events.registerInterest("kShowContextMenu/kSpreadsheet", self.eventHandler)
registerInterest("kShowContextMenu/kSpreadsheet", self.eventHandler)
self._exportAllSpreadsheetToCSV = self.createMenuAction("Spreadsheet To CSV", self.exportCSV)
self._exportCSVMenu = QMenu('Export...')
def createMenuAction(self, title, method):
action = QAction(title,None)
action.triggered.connect( method )
return action
def eventHandler(self, event):
self.selection = event.sender.selection()
enabled = True
if (self.selection is None) or (len(self.selection)==0):
self.selection = ()
enabled = False
self._exportAllSpreadsheetToCSV.setEnabled(enabled)
self._exportCSVMenu.setEnabled(enabled)
# Insert the custom Menu, divided by a separator
event.menu.addSeparator()
event.menu.addMenu(self._exportCSVMenu)
# Insert the action to the Export CSV menu
self._exportCSVMenu.addAction(self._exportAllSpreadsheetToCSV)
# Call the Method above to write the Sequence to a CSV file..
def exportCSV( self ):
print 'exporting CSV...'
# Ignore transitions from the selection
self.selection = [item for item in self.selection if isinstance(item, TrackItem)]
seq = self.selection[0].parent().parent()
print 'seq is', seq
if isinstance(seq,Sequence):
writeCSVFromSequence(seq)
else:
print 'Unable to Export Sequence'
#### Add the Menu... ####
csvActions = SpreadsheetExportCSVMenu() | UTF-8 | Python | false | false | 6,373 | py | 73 | SpreadsheetExportCSVMenu.py | 67 | 0.678487 | 0.675663 | 0 | 176 | 35.215909 | 168 |
holderdeord/hdo-transcript-search | 2,121,713,875,294 | 6253eabdb4b6dae89ebee95f52d21d6e7a6e7c50 | ba793fb0b4b7bc56d53489bd19201c2099443ef2 | /indexer/lib/hdo-transcript-indexer/extract_entities.py | df50d5fc53f0fc9717f17fbbc05dd92cf8382cb3 | []
| permissive | https://github.com/holderdeord/hdo-transcript-search | 27d38bdaabe2ef01db96c974d49fa2f2bbd6ec30 | 6fe920007ac07bf1adf009af8146e94cbd515d4a | refs/heads/master | 2022-12-17T16:29:45.236641 | 2020-12-28T11:45:38 | 2020-12-28T11:45:38 | 30,436,208 | 34 | 7 | BSD-3-Clause | false | 2022-12-07T08:24:01 | 2015-02-06T22:36:00 | 2022-05-25T10:49:47 | 2022-12-07T08:24:00 | 1,106 | 31 | 23 | 35 | JavaScript | false | false | from polyglot.text import Text
import json
import sys
txt = Text(sys.stdin.read())
txt.language = "no"
result = []
types = {
'I-PER': "person",
'I-ORG': "organisation",
'I-LOC': "location"
}
for chunk in txt.entities:
result.append({
"type": types[chunk.tag],
"text": str.join(' ', chunk),
"words": list(chunk)
})
print json.dumps(result)
| UTF-8 | Python | false | false | 366 | py | 77 | extract_entities.py | 48 | 0.617486 | 0.617486 | 0 | 23 | 14.913043 | 33 |
arosemena/ml-learning | 4,724,464,028,160 | c4ebbcdc8726650d677919121d786d5e896137e1 | 1081920d757188c517d520d379f41f8a68f71fbd | /main.py | faeffe11c77215a079501592bdf8ee1532b1aa5d | []
| no_license | https://github.com/arosemena/ml-learning | c0882a27364a490e8c186b1cb400d41fa6636c1b | 8ce6f5a78152844ca2db288e7fb0012f5b3c33e6 | refs/heads/master | 2023-01-13T21:22:59.609467 | 2020-11-09T17:42:51 | 2020-11-09T17:42:51 | 311,418,159 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/local/bin/python3
from regression.linear import cost
import csv
#
with open('test_data/fish_stats.csv') as file:
reader = csv.reader(file)
for row in reader:
print(', '.join(row))
| UTF-8 | Python | false | false | 205 | py | 3 | main.py | 1 | 0.663415 | 0.658537 | 0 | 10 | 19.5 | 46 |
KartikKapur/Small-Projects | 11,948,599,037,676 | f8aa2366cdcf50c67ae6f1db29901449a2809f26 | fabcb3fe7e641ba433c82f0ec4258b94b622ef5e | /Euler/One_Ten.py | 79a849f139b7091b4bfe20b811cfd3310a64b361 | []
| no_license | https://github.com/KartikKapur/Small-Projects | 7d9bd7259c6d809a59f0c146b815901eb93fed57 | 91a9aa98f68960a3c5128ffab508c879d721c0c8 | refs/heads/master | 2017-12-04T16:36:42.269729 | 2017-01-01T10:10:45 | 2017-01-01T10:10:45 | 69,705,069 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #PROBLEM 1
# If we list all the natural numbers below 10 that are multiples of 3 or 5,
# we get 3, 5, 6 and 9.
# The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below n
def three_and_five(n):
n-=1
if n >0:
if n% 3 == 0 or n% 5==0:
return n+three_and_five(n)
else:
return three_and_five(n)
else:
return 0
#PROBLEM 2
# Each new term in the Fibonacci sequence is generated by adding the previous two terms.
#By starting with 1 and 2, the first 10 terms will be:
# By considering the terms in the Fibonacci sequence whose values do not exceed a set upperbound,
# find the sum of the even-valued term
def even_fib(upper_bound):
first,recent, total = 1,2,0
while first <upper_bound:
if first %2 ==0:
total+=first
first,recent =recent, first+recent
return total
#PROBLEM 3
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
def prime(n):
count =2
while count**2 < n:
while n % count == 0:
n = n / count
count = count + 1
return n
#PROBLEM 4
# Find the largest palindrome made from the product of N DIGIT number.
def palindrome():
n = 0
for a in range(10**(n*2), 10**(n), -1):
for b in range(a, 10**(n), -1):
x = a * b
if x > n:
s = str(a * b)
if s == s[::-1]:
n = a * b
return n
#PROBLEM 5
# What is the smallest positive number that is evenly divisible by
#all of the numbers from a lower_bound to an upper_bound?
def LCM (lower, upper):
return lower*upper/ GCD(lower,upper)
def GCD(lower,upper):
while(True):
if(lower%upper != 0):
temp = upper
upper = lower%upper
lower = temp
else:
return upper
def Small_mult(lower, upper):
num_list = list(range(lower,upper))
LastLCM = 1
for i in num_list:
LastLCM = LCM(LastLCM,i)
return (LastLCM)
#PROBLEM 6
#Find the difference between the sum of
# the squares of the first one hundred natural numbers and the square of the sum.
def sq_sq(x):
y=x
square_sum=0
sum_sq=0
while x>0:
square_sum+=x**2
x-=1
while y>0:
sum_sq+=y
y-=1
sum_sq=sum_sq**2
return abs(square_sum-sum_sq)
#PROBLEM 7
# What is the nth prime number?
def is_prime(number):
if number < 2:
return False
if number % 2 == 0:
return False
else:
for i in range(3, number):
if not number % i:
return False
return True
def nth_prime(n):
start = 1
number =2
while start<n:
number+=1
if is_prime(number):
start+=1
return number
print(nth_prime(10001))
| UTF-8 | Python | false | false | 2,848 | py | 7 | One_Ten.py | 6 | 0.576896 | 0.543188 | 0 | 118 | 23.101695 | 97 |
openvax/mhctools | 2,456,721,340,848 | 33ba3788360ca0399ef68d00e67917edf0e78c94 | 7fbf21ac567cac7ef447aa17b27188ba32c812c7 | /mhctools/mhcflurry.py | 31ebf0ebbb2f93276eba18e0dba664414553ca10 | [
"Apache-2.0"
]
| permissive | https://github.com/openvax/mhctools | 11267f241b3a9cd5b7b5a2df69207aaee9c6ad1e | 441a650da0f669e67e881adc96db4248f384a0e7 | refs/heads/master | 2023-08-08T18:52:33.516173 | 2023-08-01T19:03:09 | 2023-08-01T19:03:09 | 31,274,759 | 60 | 13 | Apache-2.0 | false | 2023-08-01T19:03:10 | 2015-02-24T18:23:59 | 2023-07-21T08:46:59 | 2023-08-01T19:03:09 | 462 | 74 | 16 | 16 | Python | false | false | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
from numpy import nan
from .base_predictor import BasePredictor
from .binding_prediction import BindingPrediction
from .binding_prediction_collection import BindingPredictionCollection
from .unsupported_allele import UnsupportedAllele
logger = logging.getLogger(__name__)
class MHCflurry(BasePredictor):
"""
Wrapper around MHCflurry. Users will need to download MHCflurry models
first.
See https://github.com/hammerlab/mhcflurry
"""
def __init__(
self,
alleles,
default_peptide_lengths=[9],
predictor=None,
models_path=None):
"""
Parameters
-----------
alleles : list of str
default_peptide_lengths : list of int
predictor : mhcflurry.Class1AffinityPredictor (optional)
MHCflurry predictor to use
models_path : string
Models dir to use if predictor argument is None
"""
# moving import here since the mhcflurry package imports
# Keras and its backend (either Theano or TF) which end up
# slowing down responsive for any CLI application using MHCtools
from mhcflurry import Class1AffinityPredictor
BasePredictor.__init__(
self,
alleles=alleles,
default_peptide_lengths=default_peptide_lengths,
min_peptide_length=8,
max_peptide_length=15)
if predictor:
self.predictor = predictor
elif models_path:
logging.info("Loading MHCflurry models from %s" % models_path)
self.predictor = Class1AffinityPredictor.load(models_path)
else:
self.predictor = Class1AffinityPredictor.load()
# relying on BasePredictor and MHCflurry to both normalize
# allele names the same way using mhcnames
for allele in self.alleles:
if allele not in self.predictor.supported_alleles:
raise UnsupportedAllele(allele)
def predict_peptides(self, peptides):
"""
Predict MHC affinity for peptides.
"""
# importing locally to avoid slowing down CLI applications which
# don't use MHCflurry
from mhcflurry.encodable_sequences import EncodableSequences
binding_predictions = []
encodable_sequences = EncodableSequences.create(peptides)
for allele in self.alleles:
predictions_df = self.predictor.predict_to_dataframe(
encodable_sequences, allele=allele)
for (_, row) in predictions_df.iterrows():
binding_prediction = BindingPrediction(
allele=allele,
peptide=row.peptide,
affinity=row.prediction,
percentile_rank=(
row.prediction_percentile
if 'prediction_percentile' in row else nan),
prediction_method_name="mhcflurry")
binding_predictions.append(binding_prediction)
return BindingPredictionCollection(binding_predictions)
| UTF-8 | Python | false | false | 3,784 | py | 45 | mhcflurry.py | 41 | 0.646142 | 0.641913 | 0 | 103 | 35.737864 | 74 |
ohtacaesar/hobby-electronics | 2,388,001,851,647 | 70298c536b0c44942cbe608357e6b15ac31824c5 | aaf075acd366db5348cdd4373f6889222fc73bfd | /setup.py | 4f1a4fb103e34025b359da27a56550a2f898d343 | []
| no_license | https://github.com/ohtacaesar/hobby-electronics | cc8eb344727dd04bcff04575b8a7180df1923d98 | 2fa54e014c7d2fd9045c1cb27db87d6812e40227 | refs/heads/master | 2019-05-23T11:08:17.460233 | 2017-05-22T05:13:16 | 2017-05-22T05:13:16 | 47,062,899 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
setup(
name='hobby-electronics',
version='0.0.1',
install_requires=[
'spidev',
'pytz',
],
packages=['hobbye'],
entry_points={
'console_scripts': [
'tempd = hobbye.temperature:main',
]
}
)
| UTF-8 | Python | false | false | 290 | py | 12 | setup.py | 2 | 0.517241 | 0.506897 | 0 | 16 | 17.125 | 46 |
malisal/streampie | 16,355,235,489,627 | c71c1ab55a27359c23e1b40ab1144b8e7f38cc15 | 1d1e1ff4614e085ad5187fbc6aa68a003a5f1ac2 | /examples/factor_pool.py | 89140849e5084cb9fb4b5657d00d148eca9bfbd8 | [
"MIT"
]
| permissive | https://github.com/malisal/streampie | 9e83cf9ba51a7c241935d88f3821ed256de20133 | 7b1b24b01b234cd093a350f04ffaf1a4974282ad | refs/heads/master | 2021-01-10T07:51:44.333259 | 2016-04-20T17:11:07 | 2016-04-20T17:11:07 | 53,448,331 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from streampie import *
ints = [2498834631017, 14536621517459, 6528633441793, 1941760544137, 7311548077279,
8567757849149, 5012823744127, 806981130983, 15687248010773, 7750678781801,
2703878052163, 3581512537619, 12656415588017, 468180585877, 19268446801283,
5719647740869, 11493581481859, 366611086739]
def factor(n):
result = set()
for i in range(1, int(n ** 0.5) + 1):
div, mod = divmod(n, i)
if mod == 0:
result |= {i, div}
return sorted(list(result))[:-1]
def do_work(wid, items):
for i in items:
yield factor(i)
print ints >> ProcessPool(do_work, poolsize=8) >> list
| UTF-8 | Python | false | false | 644 | py | 9 | factor_pool.py | 5 | 0.666149 | 0.28882 | 0 | 20 | 31.15 | 84 |
llalma/IFB104_Assignment2 | 14,388,140,450,333 | 01dc09a47f156ba5f96c0cec82a8114e0ea53f3b | 5f8c2f6baec8ba0801819ed69aa3f42c444bc79f | /news_archivist.py | 7921234416550a63e2b50c4a0fbb9fdbd7af3d0f | []
| no_license | https://github.com/llalma/IFB104_Assignment2 | 0bcd31844854a095b4277a72245179ddba469e3c | 172fc9b133a7e2f2e70870a63aa1ca05ada68d22 | refs/heads/master | 2020-04-28T10:31:09.654702 | 2019-03-12T12:18:07 | 2019-03-12T12:18:07 | 175,204,043 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-----Statement of Authorship----------------------------------------#
#
# This is an individual assessment item. By submitting this
# code I agree that it represents my own work. I am aware of
# the University rule that a student must not act in a manner
# which constitutes academic dishonesty as stated and explained
# in QUT's Manual of Policies and Procedures, Section C/5.3
# "Academic Integrity" and Section E/2.1 "Student Code of Conduct".
#
# Student no: N9960392
# Student name: Liam Hulsman-Benson
#
# NB: Files submitted without a completed copy of this statement
# will not be marked. Submitted files will be subjected to
# software plagiarism analysis using the MoSS system
# (http://theory.stanford.edu/~aiken/moss/).
#
#--------------------------------------------------------------------#
#-----Task Description-----------------------------------------------#
#
# News Archivist
#
# In this task you will combine your knowledge of HTMl/XML mark-up
# languages with your skills in Python scripting, pattern matching
# and Graphical User Interface development to produce a useful
# application for maintaining and displaying archived news or
# current affairs stories on a topic of your own choice. See the
# instruction sheet accompanying this file for full details.
#
#--------------------------------------------------------------------#
#-----Imported Functions---------------------------------------------#
#
# Below are various import statements that were used in our sample
# solution. You should be able to complete this assignment using
# these functions only.
# Import the function for opening a web document given its URL.
from urllib.request import urlopen
# Import the function for finding all occurrences of a pattern
# defined via a regular expression, as well as the "multiline"
# and "dotall" flags.
from re import findall, MULTILINE, DOTALL
# A function for opening an HTML document in your operating
# system's default web browser. We have called the function
# "webopen" so that it isn't confused with the "open" function6
# for writing/reading local text files.
from webbrowser import open as webopen
# An operating system-specific function for getting the current
# working directory/folder. Use this function to create the
# full path name to your HTML document.
from os import getcwd
# An operating system-specific function for 'normalising' a
# path to a file to the path-naming conventions used on this
# computer. Apply this function to the full name of your
# HTML document so that your program will work on any
# operating system.
from os.path import normpath
# Import the standard Tkinter GUI functions.
from tkinter import *
# Import the SQLite functions.
from sqlite3 import *
# Import the date and time function.
from datetime import datetime
#
#--------------------------------------------------------------------#
#-----Student's Solution---------------------------------------------#
#
# Put your solution at the end of this file.
#
# Name of the folder containing your archived web documents. When
# you submit your solution you must include the web archive along with
# this Python program. The archive must contain one week's worth of
# downloaded HTML/XML documents. It must NOT include any other files,
# especially image files.
################ PUT YOUR SOLUTION HERE #################
# Html for the top of the document e.g. title of source to archivist name
html_top = """<!DOCTYPE html>
<html>
<head>
<title>***Date***</title>
</head>
<body style="background-color:#EEF313;">
<center>
<h1>ABC Sports NEWS Archive</h1>
<p></p>
<h2>***Date***</h2>
<img border="2" src = https://vignette.wikia.nocookie.net/logopedia/images/9/9a/AbcSports.jpg/revision/latest?cb=20140306150555 alt = 'ABC Sports Logo' height = "200" width = "600">
<p style="font-size:20px">NEWS source:<a href="http://www.abc.net.au/news/feed/45924/rss.xml">http://www.abc.net.au/news/feed/45924/rss.xml</p></a>
<p style="font-size:20px">Archivist: Liam Hulsman-Benson</p>
"""
# Html for each story, a new template is added for each story in the list
html_template = """<!DOCTYPE html>
<center><a href="***Link***"><h2>***Story_num*** ***TITLE***</h2></a></center>
<table style = "border-bottom: 1px solid black">
<tr>
<th width = 5% style="font-size:12px"><img border="2" src = ***Image*** alt = '***Alt_Image***' height = "400" width = "600"></th>
<th ><p style="font-size:20px">***Paragraph***</p></th>
</tr>
<tr>
<th width = 8%></th>
<th width = 8% ><p style="font-size:12px">Written: ***TIME***</p></th>
</table>
</body>
</html>
"""
##### Functions #####
def Event_logger_toggle():
#Get general path for db
path = getcwd()
general_path =normpath(path)
if state.get() == 1:
#Button in checked state
state.set(1)
conn = connect(general_path+'\event_log.db')
sql = "INSERT INTO Event_Log(Description) VALUES('Event logging switched on')"
conn.execute(sql)
conn.commit()
conn.close()
elif state.get() == 0:
#Button in unchecked state
state.set(0)
conn = connect(general_path+'\event_log.db')
sql = "INSERT INTO Event_Log(Description) VALUES('Event logging switched off')"
conn.execute(sql)
conn.commit()
conn.close()
def view_html():
# View HTML just created
path = getcwd()
general_path =normpath(path)
webopen(general_path+'\Extracted.html')
###Part B###
#Get general path for db
path = getcwd()
general_path =normpath(path)
if state.get() == 1:
conn = connect(general_path+'\event_log.db')
sql = "INSERT INTO Event_Log(Description) VALUES('Extracted news displayed in web browser')"
conn.execute(sql)
conn.commit()
conn.close()
def get_latest():
#Execute downloader file
global stories_per_date
path = getcwd()
general_path =normpath(path)
exec(open(general_path+'\downloader.py').read())#Execute downloader file
saveas = str(datetime.now())
if saveas[0:10] not in stories_per_date:
stories_per_date = stories_per_date + [saveas[0:10]]
last = len(stories_per_date)
days.insert(END,stories_per_date[last-1])
###Part B###
path = getcwd()
general_path =normpath(path)
if state.get() == 1:
conn = connect(general_path+'\event_log.db')
sql = "INSERT INTO Event_Log(Description) VALUES('The latest news downloaded and archived')"
conn.execute(sql)
conn.commit()
conn.close()
def generate_html(titles,paragraphs,dates,images,links):
#Replace the blanks in the HTML template
html_code = html_top+html_template
Story_num = 1
for All in range(len(titles)):
html_code = html_code.replace('***TITLE***', titles[All])
html_code = html_code.replace('***Story_num***',str(Story_num)+"." )
html_code = html_code.replace('***Date***', dates[0][0:16])
html_code = html_code.replace('***Paragraph***', paragraphs[All])
html_code = html_code.replace('***TIME***', dates[All])
html_code = html_code.replace('***Image***', images[All][0])
html_code = html_code.replace('***Alt_Image***',"Image: " + images[All][0][33:56] + " could not be reached")
html_code = html_code.replace('***Link***', links[All])
if All != len(titles)-1:
html_code = html_code + html_template
Story_num = Story_num + 1
# Write the HTML code to a file
html_file = open("Extracted" + '.html', 'w')
html_file.write(html_code)
html_file.close()
def extract_method(): #Extract th required data from the archived html file
#Make display extracted button clickable
display['state'] = 'normal'
# Find date that was selected in the GUI
selected = days.curselection()
date = days.get(selected)
##### Seperate each story #####
html = open('InternetArchive/'+date+'.html', 'r', encoding = 'UTF-8')
linenum = 1
story_start_pos = []
story_end_pos = []
for line in html:
if line.find('<item>') != -1:
story_start_pos = story_start_pos + [linenum]
elif line.find('</item>') != -1:
story_end_pos = story_end_pos + [linenum]
linenum = linenum + 1
linenum = 1
story = ''
stories = []
for All in range(len(story_start_pos)):
linenum = 1
story = ''
html = open('InternetArchive/'+date+'.html', 'r', encoding = 'UTF-8') # FInd a better way of doing this instead of repetedly openonmg html file
for line in html:
if story_start_pos[All] <= linenum <= story_end_pos[All]:
story = story + line
linenum = linenum + 1
stories = stories + [story]
##### Title #####
titles = []
title_regex = '<title>.*\</title>'
for All in range(len(stories)):
title = findall(title_regex, stories[All])
title = title[0][7:len(title)-9] # Start at 7 as <title> is 7 long, similar for the -9
titles = titles + [title]
##### Image #####
images = []
image_regex = 'http.*627.jpg'
image_regex2 = 'http.*627.png'
for All in range(len(stories)):
image = findall(image_regex, stories[All])
if image == []:
image = findall(image_regex2, stories[All])
if image == []:
image = ['No Image assosiated with story']
images = images + [image]
##### Paragraphs #####
paragraphs = []
paragraph_regex = '<p>.*</p>'
for All in range(len(stories)):
paragraph = findall(paragraph_regex, stories[All])
paragraph = paragraph[0][3:len(paragraph)-5]
paragraphs = paragraphs + [paragraph]
##### Date #####
dates = []
date_regex = '<pubDate>.*?</pubDate>'
for All in range(len(stories)):
date = findall(date_regex, stories[All])
date = date[0][9:len(date)-11]
dates = dates + [date]
##### Link #####
links = []
link_regex = '<link>.*<'
for All in range(len(stories)):
link = findall(link_regex, stories[All])
link = link[0][6:len(link)-2]
links = links + [link]
##### Print to Html doc #####
generate_html(titles[0:10],paragraphs[0:10],dates[0:10],images[0:10],links[0:10])
#Show an extracted date on the GUI
Extracted_date.set("The date extracted is: "+dates[0][0:16])
###Part B###
path = getcwd()
general_path =normpath(path)
if state.get() == 1:
conn = connect(general_path+'\event_log.db')
sql = "INSERT INTO Event_Log(Description) VALUES('NEWS from " +dates[0][5:16]+ " extracted from archive')"
conn.execute(sql)
conn.commit()
conn.close()
##### GUI #####
# Create a window
window = Tk()
# Give the window a title
window.title('ABC Sports NEWS Archive')
Extracted_date = StringVar()
stories_per_date = ['2017-10-18','2017-10-19','2017-10-20','2017-10-21','2017-10-22','2017-10-23','2017-10-24']
#Labels
Title = Label(window,text = "ABC Sports Archive",fg="blue",font=("Helvetica", 20))
Date_extaracted = Label(window,textvariable = Extracted_date,fg="black",font=("Helvetica", 15))
#Buttons
extract = Button(window,text = 'Extract news from highlighted day', command = extract_method,height=2,width=20,fg = "blue",bg="yellow",font=("Helvetica", 16),wraplength=200)
display = Button(window,text = 'Display extracted', command = view_html,height=2,width=20,fg = "blue",bg="yellow",font=("Helvetica", 16),wraplength=200, state=DISABLED) #If file has not been extracted do not allow button to be clicked
get_latest = Button(window,text = 'Get latest', command = get_latest,height=2,width=20,fg = "blue",bg="yellow",font=("Helvetica", 16),wraplength=200)
#Listbox
days = Listbox(window,height=8,width=20,font=("Helvetica", 15))
#Checkbox
state = IntVar()
Event_logger = Checkbutton(window, text = "Event Logger", command = Event_logger_toggle, variable = state)
#Place widgets in window
Title.grid(row = 0, column = 1, columnspan = 2)
Date_extaracted.grid(row=1, column = 1,columnspan=2)
extract.grid(row = 2, column = 2)
display.grid(row = 3, column = 2)
get_latest.grid(row = 4, column = 2)
days.grid(row = 2, column = 1,rowspan=3)
Event_logger.grid(row = 5, column = 2)
#Populate listbox with dates to select
for All in range(len(stories_per_date)):
days.insert(END,stories_per_date[All])
#Select default option for listbox
days.selection_set( first = END )
#Start mainloop
window.mainloop()
| UTF-8 | Python | false | false | 13,288 | py | 3 | news_archivist.py | 1 | 0.590321 | 0.570375 | 0 | 359 | 34.963788 | 234 |
ztkIsAlreadyTaken/rain | 6,622,839,617,417 | caec0efaad61c9fec42427c46931b71700a2e249 | 49b607d6ea398f1b25e3ae8f28ae158dc49d7ea2 | /rain/redis/cluster.py | 9e9f20b2337c6c4a775adb4d551c28284f4f86dd | [
"Unlicense"
]
| permissive | https://github.com/ztkIsAlreadyTaken/rain | 3bb4c2d5dab80eb7a1543d77e90ffb2d452b49ea | 774e52d9d38c87d94105f77c2dfc70d99416b694 | refs/heads/master | 2020-03-18T04:04:55.409881 | 2019-12-01T09:51:05 | 2019-12-01T09:51:05 | 134,268,466 | 17 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import asyncio
from typing import List
from rain.redis.client import Redis
from rain.redis.others import crc16
# noinspection PyMissingConstructor
class RedisCluster(Redis):
def __init__(self, *nodes: dict):
self.nodes: List[Redis] = list(map(lambda x: Redis(**x), nodes))
self.size = len(nodes)
self._command_funcs = {}
async def _ainit(self):
for n in self.nodes:
await n._ainit()
def _command_wrapper(self, command):
if command not in self._command_funcs:
async def execute(key, *args, **kwargs):
client = self.nodes[crc16(key.do_encode()) % self.size]
return await getattr(client, command)(key, *args, **kwargs)
execute.__qualname__ = 'RedisCluster.{}'.format(command)
self._command_funcs[command] = execute
return self._command_funcs[command]
def __getattribute__(self, item): # Redis client proxy
if item in {
'nodes', 'size', '_enter',
'_command_wrapper', '_command_funcs',
'_ainit', 'new'
}:
return super().__getattribute__(item)
return self._command_wrapper(item)
@classmethod
async def new(cls, *nodes: dict) -> 'RedisCluster':
cluster = cls(*nodes)
await cluster._ainit()
return cluster
def create_redis_cluster(*redis_conf: dict, loop=None) -> RedisCluster:
if not loop:
loop = asyncio.get_event_loop()
return loop.run_until_complete(RedisCluster.new(*redis_conf))
| UTF-8 | Python | false | false | 1,358 | py | 61 | cluster.py | 58 | 0.684831 | 0.681885 | 0 | 52 | 25.115385 | 71 |
abhinav-lv/Projects-Archive | 3,496,103,385,909 | 274a8b8aa39802b4e68dd82cb3976179bd355a26 | fa4c75393474723c2cf30048215d0d1eb923067a | /Machine Learning/14 OpenCV/ObjectTracking/main.py | e84d02be23c6f0fabf2758a7a1bdf1f01b288256 | []
| no_license | https://github.com/abhinav-lv/Projects-Archive | 9395bceb2ccd48904c1ae67154b263df4d28d5cc | 563c0d3cc2bd48db0974787952c4f2094e957767 | refs/heads/main | 2023-09-06T04:00:36.277052 | 2021-11-10T18:09:55 | 2021-11-10T18:09:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
from tracker import *
tracker = EuclideanDistTracker()
cap = cv2.VideoCapture("highway.mp4")
# Object detection from stable camera
object_detector = cv2.createBackgroundSubtractorMOG2(
history=100, varThreshold=80)
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, (720, 480))
# Reign of interest
height, width, _ = frame.shape
roi = frame[240:480, 280:450] # height, width
# 1. Object detection
mask = object_detector.apply(roi)
_, mask = cv2.threshold(mask, 252, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
# Calculate area and remove small elements
area = cv2.contourArea(cnt)
if area > 100:
# cv2.drawContours(roi, [cnt], -1, (0, 255, 0), 1)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(roi, (x, y), (x+w, y+h), (0, 255, 0), 2)
detections.append([x, y, w, h])
# 2. Object tracking
boxes_id = tracker.update(detections)
for box_id in boxes_id:
x, y, w, h, id = box_id
cv2.putText(roi, str(id), (x, y-15),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 1)
cv2.rectangle(roi, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Camera', frame)
cv2.imshow('Mask', mask)
cv2.imshow('ROI', roi)
if cv2.waitKey(30) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 1,505 | py | 17 | main.py | 3 | 0.58804 | 0.528904 | 0 | 53 | 27.396226 | 66 |
staplezz/datathon2020 | 18,734,647,360,607 | 42922654252f675e33475d4beb8ab1f31c4ad1fc | f5884f559ec162f576715bc4afa63810251a897e | /paso1/parserXML.py | c4f0b47739dfecc1d2542040bd58503032546f50 | [
"MIT"
]
| permissive | https://github.com/staplezz/datathon2020 | 1c04ce19ee59192f27ca1aaf5c2dcef200b4f002 | 476bdc85c3152cf1ed2aa1769ed0e4624eb5ca44 | refs/heads/master | 2022-03-29T14:03:19.634760 | 2020-01-25T16:17:02 | 2020-01-25T16:17:02 | 236,052,337 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from xml.etree import ElementTree
import os
import csv
'''
Parte 1 del proyecto de Datathon.
- Parser del xml dado de los autores hacia un archivo csv.
- Se incluyen los datos de los autores que tengan solo la etiqueta
- id_article y autor.
Además se extraen sólo las publicaciones con etiqueta "article" e
"inproceeding"
'''
#Leemos el archivo xml.
tree = ElementTree.parse("dblp_1990_2000_clean_utf8.xml")
#Creamos el archivo csv en donde almacenaremos
#los datos necesarios.
csv_authorships = open("authorships.csv", "w", newline='', encoding='utf-8')
csvwriter = csv.writer(csv_authorships)
#Creamos y escribimos los nombres de las columnas de nuestros datos.
col_names = ['id_article', 'author']
csvwriter.writerow(col_names)
#Obtenemos la raíz del árbol de xml.
root = tree.getroot()
#Iteramos sobre los datos del xml.
#Artículos
for article in root.findall('article'):
articleyear = int(article.find('year').text)
if articleyear >= 1990 and articleyear <= 2000:
for autor in article.iter('author'):
event_data = []
key = article.get('key')
key = key.split("/")[-1]
event_data.append(key)
event_data.append(autor.text)
csvwriter.writerow(event_data)
else:
continue
#Inproceedings
for article in root.findall('inproceedings'):
articleyear = int(article.find('year').text)
if articleyear >= 1990 and articleyear <= 2000:
for autor in article.iter('author'):
event_data = []
key = article.get('key')
key = key.split("/")[-1]
event_data.append(key)
event_data.append(autor.text)
csvwriter.writerow(event_data)
else:
continue
csv_authorships.close()
| UTF-8 | Python | false | false | 1,618 | py | 11 | parserXML.py | 6 | 0.719157 | 0.701178 | 0 | 60 | 25.883333 | 76 |
wecode-bootcamp-korea/22-Westagram-backend | 4,174,708,244,467 | 55ff2ef72477fa97cec958f19394f6f907b72aca | ecdb53a7a51f45c98a6a4447695048b7b730c51a | /students/sinjae/users/urls.py | ab5e9436b094f594f6bf136fecea4ff2b7a9ac83 | []
| no_license | https://github.com/wecode-bootcamp-korea/22-Westagram-backend | dd9e6df23c425c3cf1b9678ccf866638077eb95b | 843c8c2f52c6f74b725cbad1c4936dcaee630d94 | refs/heads/main | 2023-06-19T06:41:15.787859 | 2021-06-30T08:15:39 | 2021-06-30T08:15:39 | 379,212,367 | 0 | 0 | null | false | 2021-06-30T08:15:40 | 2021-06-22T09:24:40 | 2021-06-30T07:34:06 | 2021-06-30T08:15:40 | 399 | 0 | 1 | 10 | Python | false | false | from django.urls import path
from users.views import SignUp
urlpatterns = [
path("/signup", SignUp.as_view()),
path("/signin", SignUp.as_view()),
]
| UTF-8 | Python | false | false | 157 | py | 46 | urls.py | 41 | 0.66879 | 0.66879 | 0 | 7 | 21.428571 | 38 |
MaxPoon/Leetcode | 16,149,077,055,324 | 0dd51d6ee4ef9a937bcf41596125b45d1b30e15d | f0bbca88acab9f75a534c8b228f04abac33735f3 | /python/279.PerfectSquares_2.py | e128678f308588a560bd3ecefe382c56f2a51e91 | []
| no_license | https://github.com/MaxPoon/Leetcode | e4327a60d581f715a7c818b8e8e8aa472ed776c1 | 15f012927dc34b5d751af6633caa5e8882d26ff7 | refs/heads/master | 2020-09-17T05:33:13.877346 | 2019-05-09T04:34:54 | 2019-05-09T04:34:54 | 67,481,937 | 15 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
if int(n**0.5)**2 == n: return 1
while n%4==0: n/=4
if n%8 == 7: return 4
for a in range(int(n**0.5),0,-1):
b = int((n-a*a)**0.5)
if a*a+b*b==n:
return 2
return 3 | UTF-8 | Python | false | false | 273 | py | 267 | 279.PerfectSquares_2.py | 265 | 0.516484 | 0.450549 | 0 | 14 | 18.571429 | 35 |
SindyPin/Python_Exercises | 3,547,643,029,961 | 628e21ffa75df19a0166bbc950f3536f12b89882 | 07734c4b43bb25a0ea0733e8dd35baa64cd33fcb | /Triangles.py | b3176b261332e52cb303c02a508ecb89dd614332 | []
| no_license | https://github.com/SindyPin/Python_Exercises | 0f99de4fbd79d4bcf0856aad0aeb45fe6acf4fd3 | 0080225f668bc62dc4fa620a90cc790ca031d6c4 | refs/heads/main | 2022-12-26T21:03:12.706001 | 2020-10-15T17:13:12 | 2020-10-15T17:13:12 | 304,394,918 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | sideA = float(input('Enter the first side value of the triangle: '))
sideB = float(input('Enter the second side value of the triangle: '))
sideC = float(input('Enter the third side value of the triangle: '))
if (sideA < sideB + sideC and sideB < sideA + sideC and sideC < sideA + sideB ) and (sideA == sideB == sideC):
print('The figure is an equilateral triangle (all sides are equal)')
elif (sideA < sideB + sideC and sideB < sideA + sideC and sideC < sideA + sideB) and (sideA != sideB != sideC):
print('The figure is an scalene triangle (all sides are different)')
elif (sideA < sideB + sideC and sideB < sideA + sideC and sideC < sideA + sideB) and ((sideA == sideB) or (sideA == sideC) or (sideB == sideC)):
print('The figure is an isosceles triangle (only two sides are equal)')
else:
print('The figure is not a triangle')
| UTF-8 | Python | false | false | 847 | py | 82 | Triangles.py | 81 | 0.68477 | 0.68477 | 0 | 12 | 69.583333 | 144 |
CallMeEricChiu/tensorflow2.0 | 755,914,268,868 | b5659627559fcb089ed6d6b306480bce48650d7b | 7902ebeeca1f603034ed3e1336a73db5a9d4c312 | /dropout.py | d8e028f1ca6a534c81773323e0da29115880cc36 | []
| no_license | https://github.com/CallMeEricChiu/tensorflow2.0 | 647c5058af4a91ca5c7b1bc5bc6745f2eb9b67a3 | 9abec41787ccf035613f6a793d220ff28138e138 | refs/heads/master | 2020-09-28T18:40:23.675583 | 2019-12-15T12:05:57 | 2019-12-15T12:05:57 | 226,837,215 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
import tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers, Sequential
network = Sequential(
[
layers.Dense(256,'relu'),
layers.Dropout(0.5), # 0.5 rate to drop
layers.Dense(128,'relu'),
layers.Dropout(0.5),
layers.Dense(64,'relu'),
layers.Dense(32,'relu'),
layers.Dense(10)
]
)
for step,(x,y) in enumerate(db):
with tf.GradientTape() as tape:
x = tf.reshape(x,[-1,28*28])
out = network(x,training = False)
# test 使用dropout必须指定training参数
out = network(x,training=False)
| UTF-8 | Python | false | false | 571 | py | 18 | dropout.py | 17 | 0.663063 | 0.61982 | 0 | 23 | 22.608696 | 69 |
dkyos/ai-architect | 6,270,652,254,541 | 6df1f1176571abe655acece3b154b4072ad3c543 | 897f3ae26b3d4b5b0b164db559fc40c61b64b9ef | /ai_architect/models/wd2vec.py | 401de87c6fd1170f919fcf0c0643c8beb11ca31f | []
| no_license | https://github.com/dkyos/ai-architect | 0b0dfa11df31774539ff05cddeba133a6a8bea22 | 1842cb5b409fd690a008ea787e0ce1b367abbc8b | refs/heads/master | 2020-05-23T12:05:33.683740 | 2019-05-20T14:38:52 | 2019-05-20T14:38:52 | 186,750,714 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import json
import logging
import sys
from gensim.models import FastText, Word2Vec, KeyedVectors
from gensim.models.word2vec import LineSentence
from gensim import utils
import nltk
from nltk.corpus import conll2000
from six import iteritems
logger = logging.getLogger(__name__)
# pylint: disable-msg=too-many-instance-attributes
class WD2vec:
"""
Initialize the wd2vec model, train it, save it and load it.
"""
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
# pylint: disable-msg=too-many-branches
def __init__( # noqa: C901
self,
corpus,
corpus_format='txt',
word_embedding_type='word2vec',
sg=0,
size=100,
window=10,
alpha=0.025,
min_alpha=0.0001,
min_count=5,
sample=1e-5,
workers=20,
hs=0,
negative=25,
cbow_mean=1,
iterations=15,
min_n=1,
max_n=6,
prune_non_np=True):
"""
Initialize wd2vec model and train it.
Args:
corpus (str): path to the corpus.
corpus_format (str {json,txt,conll2000}): format of the input marked corpus; txt and json
formats are supported. For json format, the file should contain an iterable of
sentences. Each sentence is a list of terms (unicode strings) that will be used for
training.
mark_char (char): special character that marks NP's suffix.
word_embedding_type (str {word2vec,fasttext}): word embedding model type; word2vec and
fasttext are supported.
wd2vec_model_file (str): path to the file where the trained wd2vec model has to be
stored.
word_embedding_type is fasttext and word_ngrams is 1, binary should be set to True.
sg (int {0,1}): model training hyperparameter, skip-gram. Defines the training
algorithm. If 1, CBOW is used,otherwise, skip-gram is employed.
size (int): model training hyperparameter, size of the feature vectors.
window (int): model training hyperparameter, maximum distance between the current and
predicted word within a sentence.
alpha (float): model training hyperparameter. The initial learning rate.
min_alpha (float): model training hyperparameter. Learning rate will linearly drop to
`min_alpha` as training progresses.
min_count (int): model training hyperparameter, ignore all words with total frequency
lower than this.
sample (float): model training hyperparameter, threshold for configuring which
higher-frequency words are randomly downsampled, useful range is (0, 1e-5)
workers (int): model training hyperparameter, number of worker threads.
hs (int {0,1}): model training hyperparameter, hierarchical softmax. If set to 1,
hierarchical softmax will be used for model training. If set to 0, and `negative` is non-
zero, negative sampling will be used.
negative (int): model training hyperparameter, negative sampling. If > 0, negative
sampling will be used, the int for negative specifies how many "noise words" should be
drawn (usually between 5-20). If set to 0, no negative sampling is used.
cbow_mean (int {0,1}): model training hyperparameter. If 0, use the sum of the context
word vectors. If 1, use the mean, only applies when cbow is used.
iterations (int): model training hyperparameter, number of iterations.
min_n (int): fasttext training hyperparameter. Min length of char ngrams to be used
for training word representations.
max_n (int): fasttext training hyperparameter. Max length of char ngrams to be used for
training word representations. Set `max_n` to be lesser than `min_n` to avoid char
ngrams being used.
vectors with subword (ngrams) information. If 0, this is equivalent to word2vec training.
prune_non_np (bool): indicates whether to prune non-NP's after training process.
"""
self.word_embedding_type = word_embedding_type
self.sg = sg
self.size = size
self.window = window
self.alpha = alpha
self.min_alpha = min_alpha
self.min_count = min_count
self.sample = sample
self.workers = workers
self.hs = hs
self.negative = negative
self.cbow_mean = cbow_mean
self.iter = iterations
self.min_n = min_n
self.max_n = max_n
self.prune_non_np = prune_non_np
if corpus_format == 'txt':
self._sentences = LineSentence(corpus)
print(self._sentences)
elif corpus_format == 'json':
with open(corpus) as json_data:
self._sentences = json.load(json_data)
else:
logger.error('invalid corpus format: %s', corpus_format)
sys.exit(0)
logger.info('training wd2vec model')
self._train()
def _train(self):
"""
Train the wd2vec model.
"""
if self.word_embedding_type == 'word2vec':
self.model = Word2Vec(
self._sentences,
sg=self.sg,
size=self.size,
window=self.window,
alpha=self.alpha,
min_alpha=self.min_alpha,
min_count=self.min_count,
sample=self.sample,
workers=self.workers,
hs=self.hs,
negative=self.negative,
cbow_mean=self.cbow_mean,
iter=self.iter)
elif self.word_embedding_type == 'fasttext':
self.model = FastText(
self._sentences,
sg=self.sg,
size=self.size,
window=self.window,
alpha=self.alpha,
min_alpha=self.min_alpha,
min_count=self.min_count,
sample=self.sample,
workers=self.workers,
hs=self.hs,
negative=self.negative,
cbow_mean=self.cbow_mean,
iter=self.iter,
min_n=self.min_n,
max_n=self.max_n)
else:
logger.error('invalid word embedding type: %s', self.word_embedding_type)
sys.exit(0)
def save(self, wd2vec_model_file='wd2vec.model', word2vec_format=True):
"""
Save the wd2vec model.
Args:
wd2vec_model_file (str): the file containing the wd2vec model to load
word2vec_format(bool): boolean indicating whether to save the model in original
word2vec format.
"""
if self.word_embedding_type == 'fasttext':
self.model.save(wd2vec_model_file)
else:
self.model.save(wd2vec_model_file)
@classmethod
def load(self, wd2vec_model_file, word2vec_format=True):
"""
Load the wd2vec model.
Args:
wd2vec_model_file (str): the file containing the wd2vec model to load
word2vec_format(bool): boolean indicating whether the model to load has been stored in
original word2vec format.
Returns:
wd2vec model to load
"""
self.model = Word2Vec.load(wd2vec_model_file)
return self.model
| UTF-8 | Python | false | false | 8,294 | py | 16 | wd2vec.py | 10 | 0.591994 | 0.578852 | 0 | 206 | 39.257282 | 99 |
bdgarcia/lpcoding_hsh | 12,051,678,271,946 | 49e9ac8c1d7b31a170b7c94b55250ba5eb135d92 | b1700806777706a0b43be41d0a60ebc9a7149d4b | /crearSubastas.py | d2965b085fa3a896473a4137410fceebcb063c8a | []
| no_license | https://github.com/bdgarcia/lpcoding_hsh | 62fffc2945ef50a3e9b6230144b73d1bbb6d9a58 | 1d99f850ee64f8855345ee1e3382080f4e3fcea1 | refs/heads/master | 2020-04-30T09:34:48.084356 | 2019-05-20T03:54:31 | 2019-05-20T03:54:31 | 176,752,160 | 0 | 0 | null | false | 2019-07-14T19:33:46 | 2019-03-20T14:30:01 | 2019-05-20T03:54:33 | 2019-07-14T19:33:46 | 2,981 | 0 | 0 | 0 | JavaScript | false | false | from modelos.models import (Subasta, Alquila, Residencia)
from application.cerrar_subasta import cerrarSubasta
import datetime
def cerrarSubastasDeLaSemana():
subastas = list(Subasta.objects.filter(semana_alquila=lunesEn6Meses))
print("Cerrando subastas de esta semana")
for subasta in subastas:
cerrarSubasta(subasta)
print(len(subastas), "subasta/s cerradas")
def evaluarSubasta(res):
print("evaluando: ", res.codigo, res.nombre)
# 0=lunes, 2=miercoles
if 0 <= datetime.date.today().weekday() <= 2:
if not Subasta.objects.filter(semana_alquila=lunesEn6Meses, codigo_residencia=res).exists() and not Alquila.objects.filter(fecha=lunesEn6Meses, codigo_residencia=res).exists():
print("Creando: ", res.codigo)
s = Subasta()
s.codigo_residencia = res
s.monto_actual = res.monto_minimo_subasta
s.monto_inicial = res.monto_minimo_subasta
s.fecha_inicio = lunesActual
s.fecha_fin = miercolesActual
s.semana_alquila = lunesEn6Meses
s.save()
today = datetime.date.today()
lunesActual = (today - datetime.timedelta(days=today.weekday()))
miercolesActual = (today - datetime.timedelta(days=today.weekday()-2))
lunesEn6Meses = (lunesActual + datetime.timedelta(6*365/12))
residencias = Residencia.objects.filter(borrado_logico=False)
for res in residencias:
evaluarSubasta(res)
if 3 <= datetime.date.today().weekday():
cerrarSubastasDeLaSemana()
| UTF-8 | Python | false | false | 1,503 | py | 22 | crearSubastas.py | 12 | 0.695276 | 0.683965 | 0 | 39 | 37.538462 | 184 |
mofei952/leetcode_python | 7,361,573,975,861 | fbb580f0ecc6fdc495fdc7093b61dcb34a9fbb52 | 9e2a209455beb93c1c0dc02c8b96eaa22780e7f3 | /array/059 Spiral Matrix II.py | f171a3026c01dba8d224ddbbd38be1c87c93cb32 | []
| no_license | https://github.com/mofei952/leetcode_python | 164f3e617b4a39348069c7db00571eaef9960152 | 7f8145f0c7ffdf18c557f01d221087b10443156e | refs/heads/master | 2022-08-21T08:26:22.397593 | 2022-07-17T13:13:05 | 2022-07-17T13:13:05 | 174,816,021 | 0 | 0 | null | false | 2021-08-06T15:27:20 | 2019-03-10T11:46:00 | 2021-08-06T15:26:44 | 2021-08-06T15:26:41 | 216 | 0 | 0 | 0 | Python | false | false | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : mofei
# @Time : 2019/12/1 12:14
# @File : 059 Spiral Matrix II.py
# @Software: PyCharm
"""
Given a positive integer n, generate a square matrix filled with elements from 1 to n^2 in spiral order.
Example:
Input: 3
Output:
[
[ 1, 2, 3 ],
[ 8, 9, 4 ],
[ 7, 6, 5 ]
]
"""
from typing import List
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
if not n:
return []
dirs = [(0, 1), (1, 0), (0, -1), (-1, 0)]
dir_index = 0
x, y = 0, -1
num = 1
matrix = [[0] * n for i in range(n)]
for i in range(n * n):
dx, dy = dirs[dir_index]
x, y = x + dx, y + dy
matrix[x][y] = num
num += 1
xx, yy = x + dx, y + dy
if xx < 0 or xx >= n or yy < 0 or yy >= n or matrix[xx][yy]:
dir_index = (dir_index + 1) % 4
return matrix
def generateMatrix2(self, n: int) -> List[List[int]]:
if not n:
return []
matrix = [[0] * n for i in range(n)]
row_start, row_end = 0, n - 1
col_start, col_end = 0, n - 1
num = 1
while row_start <= row_end and col_start <= col_end:
for i in range(col_start, col_end + 1):
matrix[row_start][i] = num
num += 1
row_start += 1
for i in range(row_start, row_end + 1):
matrix[i][col_end] = num
num += 1
col_end -= 1
if row_start <= row_end:
for i in range(col_end, col_start - 1, -1):
matrix[row_end][i] = num
num += 1
row_end -= 1
if col_start <= col_end:
for i in range(row_end, row_start - 1, -1):
matrix[i][col_start] = num
num += 1
col_start += 1
return matrix
if __name__ == '__main__':
result = Solution().generateMatrix(10)
for line in result:
print([format(i, '3d') for i in line])
result = Solution().generateMatrix2(10)
for line in result:
print([format(i, '3d') for i in line])
| UTF-8 | Python | false | false | 2,249 | py | 156 | 059 Spiral Matrix II.py | 156 | 0.449978 | 0.417519 | 0 | 91 | 23.714286 | 104 |
engegreg/RefinanceML | 15,040,975,507,422 | ef2d9d060370611ecb32cd73f323d35c37e10b4c | 79d155bf2930c9e0b8e2107020fa77756669ad63 | /webapp.py | f574426f5cd17ee903c0ae3f1de04bcda0c204ed | []
| no_license | https://github.com/engegreg/RefinanceML | 64ff8eaeb79892b3047c977ce1b844cf9e833752 | 115b3f0b9c56fb09f7900d1db6cd8a023f5b3004 | refs/heads/main | 2023-07-01T10:22:04.187191 | 2021-08-10T05:48:24 | 2021-08-10T05:48:24 | 390,729,598 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import streamlit as st
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, \
recall_score, classification_report, roc_auc_score
df=pd.read_csv('loan_data_cleaned.csv')
st.title("Refinance Classification Model")
st.write("""
This is a Machine Learning model that determines whether someone is eligible for refinancing a loan.
Adjust the slider to the left to observer the model's behavior with additional depth.
""")
#Create interactive slider
get_n_estimators = st.sidebar.slider("Select depth", 1,25)
#Creating variables
target = 'refi_possible'
y=df[target]
X=df.drop(columns=target)
#TTS
X_train, X_val, y_train, y_val = train_test_split(X,y,
test_size=0.2, random_state = 1)
st.write(df.head())
#This function trains the model, returns the metrics of said model, and directly connects to the slider on the web app to make it interactive.
def adjust_depth(slider):
params = dict()
n_estimators = get_n_estimators
params['n_estimators'] = n_estimators
model_xg = XGBClassifier(use_label_encoder=False, n_estimators=params['n_estimators'])
model_xg.fit(X_train, y_train)
st.write("Training accuracy: ", model_xg.score(X_train, y_train))
st.write("Validation accuracy: ", model_xg.score(X_val, y_val))
st.write(classification_report(y_val,model_xg.predict(X_val),target_names = ['Approved','Declined']))
return params
adjust_depth(get_n_estimators)
| UTF-8 | Python | false | false | 1,578 | py | 4 | webapp.py | 1 | 0.726236 | 0.7218 | 0 | 58 | 26.206897 | 142 |
RomainL972/MachineMots | 1,391,569,444,834 | fa3f2006543687edae2ebc894a3f0a5b904e6bbf | 333d09084090cba0f86ff9d1fdd1d77faf2108c1 | /mots_trigrammes_generate.py | f84ad19a9dbab0967dcac4032cb3297bd522617a | []
| no_license | https://github.com/RomainL972/MachineMots | d4879fa6972632a03be7deac0e6c1d486a0ce889 | a7c724b512db4b42f9e08c27e8193cca42f36591 | refs/heads/main | 2023-08-25T02:08:35.083452 | 2021-10-09T14:02:17 | 2021-10-09T14:02:17 | 415,325,441 | 0 | 0 | null | true | 2021-10-09T14:02:38 | 2021-10-09T14:02:37 | 2021-10-02T10:45:48 | 2021-04-24T14:35:06 | 994 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generates word from trigramm transition matrix stored in a binary file
Checks whether the word already exists
"""
import numpy as np
from numpy.random import choice
import codecs
# Build a dictionnary to check whether word already exists
filepath = "liste.de.mots.francais.frgut.txt"
dico = []
with codecs.open(filepath, "r", "utf-8") as lines:
for l in lines:
dico.append(l[:-1])
# Load the trigram count matrixand normalize it
count = np.fromfile("count.bin",dtype="int32").reshape(256,256,256)
s=count.sum(axis=2)
st=np.tile(s.T,(256,1,1)).T
p=count.astype('float')/st
p[np.isnan(p)]=0
# Build words
outfile = "output.txt"
f = codecs.open(outfile,"w","utf-8")
# How many for each target size
K = 100
for TGT in range(4,11):
total = 0
while total<100:
i=0
j=0
res = u''
while not j==10:
k=choice(range(256),1,p=p[i,j,:])[0]
res = res + chr(k)
i=j
j=k
if len(res) == 1+TGT:
if res[:-1] in dico:
x=res[:-1]+"*"
else:
x=res[:-1]
total += 1
print(x)
f.write(x+"\n")
f.close()
| UTF-8 | Python | false | false | 1,232 | py | 4 | mots_trigrammes_generate.py | 2 | 0.563312 | 0.525162 | 0 | 52 | 22.692308 | 70 |
vincentndo/CS_168_-_Introduction_to_the_Internet-Architecture_and_Protocols | 14,147,622,289,106 | d313d3edd74203b3546079f4e3959318a547bfd8 | 74cc2c066c42ab3dac830273d4917d6b6e25d28a | /projects/proj1_chat/client.py | de81eba7d2f9d45e906816a61687bd95b452d64e | []
| no_license | https://github.com/vincentndo/CS_168_-_Introduction_to_the_Internet-Architecture_and_Protocols | f4fbb83dc1d01e0c77dc648e3ee06a8a242f7a7f | 23a876b27108111a7f88dabc7106ee8a33c9f11d | refs/heads/master | 2021-08-31T00:33:04.111430 | 2017-12-20T00:55:55 | 2017-12-20T00:55:55 | 105,010,790 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from MySocket import *
def process_string(client_socket, string):
string_list = string.split(" ")
if string_list[0] == "/join":
if len(string_list) < 2:
error_msg = utils.SERVER_JOIN_REQUIRES_ARGUMENT + '\n'
sys.stdout.write(error_msg)
else:
client_socket.send(pad_string(string))
elif string_list[0] == "/create":
if len(string_list) < 2:
error_msg = utils.SERVER_CREATE_REQUIRES_ARGUMENT + '\n'
sys.stdout.write(error_msg)
else:
client_socket.send(pad_string(string))
elif string_list[0] == "/list":
client_socket.send(pad_string(string))
elif string.startswith("/"):
error_msg = utils.SERVER_INVALID_CONTROL_MESSAGE.format(string) + '\n'
sys.stdout.write(error_msg)
else:
client_socket.send(pad_string(string))
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: python client.py <name> <host> <port>\n" +
"python client.py Vincent localhost 12345")
sys.exit(1)
name = sys.argv[1]
host = sys.argv[2]
port = int(sys.argv[3])
client_socket = MySocket(host, port, name, socket_list=[sys.stdin])
client_socket.socket_list.append(client_socket)
try:
client_socket.connect()
client_socket.send(pad_string(client_socket.name))
except:
error_msg = utils.CLIENT_CANNOT_CONNECT.format(client_socket.get_host(), client_socket.get_port()) + '\n'
sys.stdout.write(error_msg)
sys.exit(1)
sys.stdout.write(utils.CLIENT_MESSAGE_PREFIX)
sys.stdout.flush()
while 1:
ready_to_read, ready_to_write, in_error = select.select(client_socket.socket_list, [], [], 0)
for socket in ready_to_read:
if socket == client_socket:
sys.stdout.write(utils.CLIENT_WIPE_ME)
msg = client_socket.recv(utils.MESSAGE_LENGTH)
if msg:
if msg.strip() == "":
msg = '\r'
else:
msg = '\r' + msg.strip() + '\n'
sys.stdout.write(msg)
sys.stdout.write(utils.CLIENT_MESSAGE_PREFIX)
sys.stdout.flush()
else:
error_msg = '\r' + utils.CLIENT_SERVER_DISCONNECTED.format(client_socket.get_host(), client_socket.get_port()) + '\n'
sys.stdout.write(error_msg)
sys.exit(1)
else:
string = sys.stdin.readline().strip()
process_string(client_socket, string)
sys.stdout.write(utils.CLIENT_MESSAGE_PREFIX)
sys.stdout.flush()
client_socket.close()
| UTF-8 | Python | false | false | 2,797 | py | 9 | client.py | 8 | 0.540937 | 0.534144 | 0 | 99 | 27.252525 | 137 |
Tsingzao/MyLeetCode | 1,254,130,498,565 | ac96b58beb4726c18e18327980cb428704cb866c | 12ffde97db08a1d66fcee416a3aaf4607c153c50 | /Power of Three.py | 519505e90c153c7aafd07aa5b3a66b9f0ab1af4e | []
| no_license | https://github.com/Tsingzao/MyLeetCode | dc44ae5685bd6c8a62240394e8ae8dbfcec24301 | 421ea32d9fbe8d85479be6afa34881d1aa218ce3 | refs/heads/master | 2020-03-07T17:41:35.615720 | 2018-04-28T07:23:00 | 2018-04-28T07:23:00 | 127,617,953 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 09:13:50 2018
@author: Tsingzao
"""
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n <= 0:
return False
while n == round(n) and n != 1:
n = n / 3.
if n == 1:
return True
else:
return False | UTF-8 | Python | false | false | 419 | py | 147 | Power of Three.py | 146 | 0.412888 | 0.372315 | 0 | 21 | 18.047619 | 39 |
BenJoey/felev4 | 14,139,032,367,261 | 03e7a706dfa0d5738935f1398916b84fcccda396 | 648faac19a60118ff1021be69ed4ce08d95356cf | /Script Nyelvek/bead3.py | b261535dbae6fac91ed5872eb59c10903428640f | []
| no_license | https://github.com/BenJoey/felev4 | 29324954dd2691be1b27b96a86741edd791a65db | 389240edf4a81f532f9a2a09ef68c658e3cb4d42 | refs/heads/master | 2020-03-08T19:35:35.623191 | 2018-06-23T10:28:29 | 2018-06-23T10:28:29 | 128,358,030 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import glob, os, io
files = []
def CreateFunc(str):
vissza=[]
tmp=str[:str.find('{')]
arg=str[str.find('{'):]
if "CIKLUS" in tmp:
vissza.append("for"+tmp.lstrip("CIKLUS").rstrip()+" :")
else:
vissza.append("if"+tmp.lstrip("ELAGAZAS").rstrip()+" :")
val = arg.split(";;")
tmp = []
for i in val:
if ("CIKLUS" in i or "ELAGAZAS" in i):
tmp+=CreateFunc(i)
else:
tmp.append(i.replace('}','').replace('{',''))
tmp = [' '*4 + szo for szo in tmp]
vissza += tmp
return vissza
for file in glob.glob("*.prog"):
files.append(file)
for i in files:
inp = io.open(i,"r",encoding="utf-8")
fname=i.rstrip(".prog")+".py"
ki = io.open(fname,"w",encoding="utf-8")
for line in inp:
if line[0].isupper():
temp = CreateFunc(line)
else:
temp=line.split(";;")
ki.write("\n".join(temp)) | UTF-8 | Python | false | false | 932 | py | 46 | bead3.py | 34 | 0.51073 | 0.506438 | 0 | 36 | 24.916667 | 64 |
yzspku/BiCNN | 9,792,525,450,760 | 624895427f16519bc21aa625a220f6b8c7581a1e | 08e6d7b7a390fc28a3397a9df137b0fa8728a294 | /main.py | 9fa2b274050d9ba15d7d8c94139279d476bbdc12 | []
| no_license | https://github.com/yzspku/BiCNN | a6438e22450d2d1b3de5a510760d69f121134491 | 97c74dfef675a82db0a6f9600d90aaf6164ef845 | refs/heads/master | 2020-04-07T02:35:21.984955 | 2018-11-17T12:24:54 | 2018-11-17T12:24:54 | 157,981,918 | 10 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cub_200_2011 as dataset
import helper, utils
import model_global as glb, model_object as obj
import time
import torch
import torch.multiprocessing as mp
train_batch_size = 32
test_batch_size = 32
random_seed = 96
validation_size = 0.1
predict_weights = [0.2, 0.8]
use_multiple_gpu = False # run global model on 1 gpu and object-level model on another
pre_models = [None,None]
def print_summary_log(logger, trn_acc_glb, val_acc_glb, tst_acc_glb, trn_acc_obj, val_acc_obj, tst_acc_obj):
logger.info('')
logger.info('global-level model: ' + str(glb.model_name))
logger.info('pretrained: ' + str(glb.use_pretrained_params))
logger.info('fine tune all layers: ' + str(glb.fine_tune_all_layers))
logger.info('epochs: ' + str(glb.num_epochs))
logger.info('batch size: ' + str(train_batch_size))
logger.info('learning rate: ' + str(glb.learning_rate))
logger.info('prediction accuracy: %.4f%%, %.4f%%, %.4f%%' % (trn_acc_glb, val_acc_glb, tst_acc_glb))
logger.info('')
logger.info('object-level model: ' + str(obj.model_name))
logger.info('pretrained: ' + str(obj.use_pretrained_params))
logger.info('fine tune all layers: ' + str(obj.fine_tune_all_layers))
logger.info('epochs: ' + str(obj.num_epochs))
logger.info('batch size: ' + str(train_batch_size))
logger.info('learning rate: ' + str(obj.learning_rate))
logger.info('prediction accuracy: %.4f%%, %.4f%%, %.4f%%' % (trn_acc_obj, val_acc_obj, tst_acc_obj))
def evaluate(logger, models, train_loaders, validation_loaders, test_loaders):
logger.info('')
logger.info('evaluating model on multiple sets combining both global-level and object-level models\' predictions')
logger.info('predict weights: ' + str(predict_weights[0]) + ', ' + str(predict_weights[1]))
begin_time = time.time()
helper.evaluate(
logger=logger,
models=models,
data_loaders=train_loaders,
set_name='train set',
predict_weights=predict_weights
)
helper.evaluate(
logger=logger,
models=models,
data_loaders=validation_loaders,
set_name='validation set',
predict_weights=predict_weights
)
helper.evaluate(
logger=logger,
models=models,
data_loaders=test_loaders,
set_name='test set',
predict_weights=predict_weights
)
logger.info('evaluation has been done! total time: %.4fs' % (time.time() - begin_time))
def get_model_with_saved_parameters(model_path_glb, model_path_obj):
model_glb = helper.get_model_by_name(glb.model_name, pretrained=False)
helper.replace_model_fc(glb.model_name, model_glb)
model_glb.load_state_dict(torch.load(model_path_glb))
model_glb = model_glb.cuda()
model_obj = helper.get_model_by_name(obj.model_name, pretrained=False)
helper.replace_model_fc(obj.model_name, model_obj)
model_obj.load_state_dict(torch.load(model_path_obj))
model_obj = model_obj.cuda()
return model_glb, model_obj
def run_on_single_gpu(logger, data_loaders_glb, data_loaders_obj,
train_loaders, valid_loaders, test_loaders, pre_models, fine_tune_all_layers, num_epochs):
# if you want to change hyper-parameters like number of epochs or learning rate for each level's training,
# please go to corresponding module file
_, trn_acc_glb, val_acc_glb, tst_acc_glb, model_path_glb = glb.get_trained_model_global(
logger=logger, data_loaders=data_loaders_glb, train_batch_size=train_batch_size,
save_model=True, pre_model=pre_models[0], fine_tune_all_layers=fine_tune_all_layers, num_epochs=num_epochs)
_, trn_acc_obj, val_acc_obj, tst_acc_obj, model_path_obj = obj.get_trained_model_object(
logger=logger, data_loaders=data_loaders_obj, train_batch_size=train_batch_size,
save_model=True, pre_model=pre_models[1], fine_tune_all_layers=fine_tune_all_layers, num_epochs=num_epochs)
print_summary_log(logger, trn_acc_glb, val_acc_glb, tst_acc_glb, trn_acc_obj, val_acc_obj, tst_acc_obj)
model_glb, model_obj = get_model_with_saved_parameters(model_path_glb, model_path_obj)
evaluate(
logger=logger,
models=[model_glb, model_obj],
train_loaders=train_loaders,
validation_loaders=valid_loaders,
test_loaders=test_loaders
)
return model_glb, model_obj
def target_model_global(q_glb, data_loaders_glb, pre_model, fine_tune_all_layers, num_epochs):
logger_glb = glb.get_logger(train_batch_size, add_console_log_prefix=True)
logger_glb.info('target model global starts')
_, trn_acc_glb, val_acc_glb, tst_acc_glb, model_path_glb = glb.get_trained_model_global(
logger=logger_glb, data_loaders=data_loaders_glb, train_batch_size=train_batch_size,
cuda_device_idx=0, save_model=True, pre_model=pre_model, fine_tune_all_layers=fine_tune_all_layers,
num_epochs=num_epochs )
q_glb.put(trn_acc_glb)
q_glb.put(val_acc_glb)
q_glb.put(tst_acc_glb)
q_glb.put(model_path_glb)
logger_glb.info('target model global stops')
def target_model_object(q_obj, data_loaders_obj, pre_model, fine_tune_all_layers, num_epochs):
logger_obj = obj.get_logger(train_batch_size, add_console_log_prefix=True)
logger_obj.info('target model object starts')
_, trn_acc_obj, val_acc_obj, tst_acc_obj, model_path_obj = obj.get_trained_model_object(
logger=logger_obj, data_loaders=data_loaders_obj, train_batch_size=train_batch_size,
cuda_device_idx=1, save_model=True, pre_model=pre_model,fine_tune_all_layers=fine_tune_all_layers,
num_epochs=num_epochs )
q_obj.put(trn_acc_obj)
q_obj.put(val_acc_obj)
q_obj.put(tst_acc_obj)
q_obj.put(model_path_obj)
logger_obj.info('target model object stops')
def run_on_multiple_gpus(logger, data_loaders_glb, data_loaders_obj,
train_loaders, valid_loaders, test_loaders, pre_models, fine_tune_all_layers, num_epochs):
q_glb = mp.Queue() # store models and accuracies
q_obj = mp.Queue()
process_glb = mp.Process(target=target_model_global,
args=(q_glb, data_loaders_glb, pre_models[0], fine_tune_all_layers, num_epochs,))
process_obj = mp.Process(target=target_model_object,
args=(q_obj, data_loaders_obj, pre_models[1], fine_tune_all_layers, num_epochs,))
process_glb.start()
process_obj.start()
process_glb.join() # join current process(main process), then current process will stop until process_glb finishes
process_obj.join()
trn_acc_glb = q_glb.get() # FIFO
val_acc_glb = q_glb.get()
tst_acc_glb = q_glb.get()
model_path_glb = q_glb.get()
trn_acc_obj = q_obj.get()
val_acc_obj = q_obj.get()
tst_acc_obj = q_obj.get()
model_path_obj = q_obj.get()
print_summary_log(logger, trn_acc_glb, val_acc_glb, tst_acc_glb, trn_acc_obj, val_acc_obj, tst_acc_obj)
model_glb, model_obj = get_model_with_saved_parameters(model_path_glb, model_path_obj)
evaluate(
logger=logger,
models=[model_glb, model_obj],
train_loaders=train_loaders,
validation_loaders=valid_loaders,
test_loaders=test_loaders
)
return model_glb, model_obj
if __name__ == "__main__":
log_file_name_prefix = 'combined'
logger = utils.get_logger(log_file_name_prefix)
logger.info('start loading dataset')
begin_time = time.time()
train_loader_glb, valid_loader_glb = dataset.get_train_validation_data_loader(
resize_size=224, # apply random crop for train set
batch_size=train_batch_size,
random_seed=random_seed,
augment=True,
validation_size=validation_size,
object_boxes_dict=None,
show_sample=False
)
test_loader_glb = dataset.get_test_data_loader(
resize_size=224, # no any crop
batch_size=test_batch_size,
object_boxes_dict=None
)
bounding_boxes = utils.get_annotated_bounding_boxes()
train_loader_obj, valid_loader_obj = dataset.get_train_validation_data_loader(
resize_size=(224, 224), # for object level model, we don't need cropping any more!
batch_size=train_batch_size,
random_seed=random_seed,
augment=True,
validation_size=validation_size,
object_boxes_dict=bounding_boxes,
show_sample=False
)
test_loader_obj = dataset.get_test_data_loader(
resize_size=224,
batch_size=test_batch_size,
object_boxes_dict=bounding_boxes
)
logger.info('loading dataset costs %.4fs' % (time.time() - begin_time))
data_loaders_glb = [train_loader_glb, valid_loader_glb, test_loader_glb]
data_loaders_obj = [train_loader_obj, valid_loader_obj, test_loader_obj]
train_loaders = [train_loader_glb, train_loader_obj]
valid_loaders = [valid_loader_glb, valid_loader_obj]
test_loaders = [test_loader_glb, test_loader_obj]
pre_models = [None, None]
# test: it seems ResNet is better for global model and DenseNet better for object-level model
glb.model_name = 'resnet152'
obj.model_name = 'densenet161'
fine_tune_all_layers=False
glb.use_multiple_gpu=False
obj.use_multiple_gpu=False
num_epochs = 160
if not use_multiple_gpu:
pre_models[0], pre_models[1] = run_on_single_gpu(logger, data_loaders_glb, data_loaders_obj,
train_loaders, valid_loaders, test_loaders, pre_models, fine_tune_all_layers, num_epochs)
else:
mp.set_start_method('spawn') # CUDA requires this
pre_models[0], pre_models[1] = run_on_multiple_gpus(logger, data_loaders_glb, data_loaders_obj,
train_loaders, valid_loaders, test_loaders, pre_models, fine_tune_all_layers, num_epochs)
fine_tune_all_layers = True
num_epochs = 120
if not use_multiple_gpu:
run_on_single_gpu(logger, data_loaders_glb, data_loaders_obj, train_loaders, valid_loaders, test_loaders, pre_models, fine_tune_all_layers, num_epochs)
else:
run_on_multiple_gpus(logger, data_loaders_glb, data_loaders_obj, train_loaders, valid_loaders, test_loaders, pre_models, fine_tune_all_layers, num_epochs)
| UTF-8 | Python | false | false | 9,830 | py | 11 | main.py | 10 | 0.693591 | 0.686775 | 0 | 235 | 40.821277 | 158 |
Bejjox/BejjoLib | 12,670,153,575,084 | 6540f6d4e29fad489981ce0d556106263e305dcd | c56c053fac30f6af4a92becd4e6f4f1722967732 | /statc/static.py | c79e7fc43e6ececc6f5cde2d05c33b0747b5f2aa | [
"MIT"
]
| permissive | https://github.com/Bejjox/BejjoLib | cee2f7c8a9f76f620c855818461246d50fb936b9 | ba0e595d40b49a025ef37ca3122c7b25d1a7ecca | refs/heads/master | 2022-11-26T01:29:00.810556 | 2020-08-04T18:20:39 | 2020-08-04T18:24:20 | 279,634,369 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | class statistika(object):
"""docstring for statistika"""
def __init__(self, arg):
super(statistika, self).__init__()
self.arg = arg
import numpy as np
print(dir(np.sum))
print(np.sum((3,4,5)))
| UTF-8 | Python | false | false | 217 | py | 12 | static.py | 10 | 0.603687 | 0.589862 | 0 | 8 | 26.125 | 42 |
ericfaurot/udon | 8,967,891,728,992 | ce97a70d3ddcef937e53e641126f22bc2fe1bc2b | 697136106fdb41256a0b3f75222880859ff2aa1b | /setup.py | 17f31d111cf66d41e4715a768b52f5e486f072a1 | [
"ISC"
]
| permissive | https://github.com/ericfaurot/udon | bcf673cf8585a63077995ba8b17f5d8dd970c1f6 | 27c682d77a7236b73255d39505cff0f5b8e7d88f | refs/heads/master | 2021-06-21T17:11:59.502287 | 2020-11-13T15:25:20 | 2020-11-13T15:25:20 | 151,249,431 | 1 | 3 | ISC | false | 2020-11-26T21:01:01 | 2018-10-02T12:18:24 | 2020-11-13T15:25:33 | 2020-11-13T15:25:31 | 79 | 1 | 2 | 2 | Python | false | false | from distutils.core import setup
setup(name="udon",
description="Simple helpers for python apps",
version="0.17-dev",
author="Eric Faurot",
author_email="eric@faurot.net",
url="https://github.com/ericfaurot/udon",
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=[ "udon", "udon.tests" ])
| UTF-8 | Python | false | false | 681 | py | 27 | setup.py | 26 | 0.587372 | 0.580029 | 0 | 18 | 36.833333 | 73 |
dgpb/jobarts | 8,993,661,567,181 | 09db7d4ad1ee73e787af372cc4a56b90abd3b164 | b154ec1337c4b685062587498bfb8dec355a00b5 | /jobs/migrations/0006_auto_20201115_1336.py | 5de3184ccfff15968c0146c2ad0c3f92ce5830bb | []
| no_license | https://github.com/dgpb/jobarts | 759be69ac3a28f1e841024721115763dfe9d4231 | 07358e1a7907afc539eadf2052599b13edc7472a | refs/heads/main | 2023-05-12T04:15:25.848814 | 2021-06-02T14:33:19 | 2021-06-02T14:33:19 | 311,393,523 | 0 | 1 | null | false | 2021-06-02T01:55:11 | 2020-11-09T16:10:17 | 2021-06-02T01:18:54 | 2021-06-02T01:55:11 | 842 | 0 | 1 | 1 | JavaScript | false | false | # Generated by Django 3.1.2 on 2020-11-15 18:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0005_auto_20201113_2314'),
]
operations = [
migrations.AlterField(
model_name='job',
name='publication_date',
field=models.DateTimeField(),
),
]
| UTF-8 | Python | false | false | 381 | py | 38 | 0006_auto_20201115_1336.py | 16 | 0.582677 | 0.501312 | 0 | 18 | 20.166667 | 47 |
castodius/adventofcode | 17,952,963,321,118 | f1d37394bb4c29f096487237b0f636da98e99a62 | a435c5d6b5e45550ad953bb88d3a98fbbabbab8a | /2018/3/easy.py | 20d72d532a83946d3d4c89ae968a2a42bcf1207c | []
| no_license | https://github.com/castodius/adventofcode | adc6aa275fab2c17dc2eb85a223171ef01acb03e | b5e3308da8a3f142b86f423ca70e81f1e99d390b | refs/heads/master | 2022-12-26T09:32:51.458765 | 2022-12-18T13:59:18 | 2022-12-18T13:59:18 | 161,826,301 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
side = 1200
m = [[0 for i in range(side)] for j in range(side)]
count = 0
for line in sys.stdin:
inp = line.strip().split(' ')
x,y = map(int, inp[2].strip(':').split(','))
sx,sy = map(int, inp[3].split('x'))
for cx in range(x,x+sx):
for cy in range(y,y+sy):
m[cx][cy] += 1
if(m[cx][cy]==2):
count+=1
print count
| UTF-8 | Python | false | false | 359 | py | 117 | easy.py | 114 | 0.537604 | 0.506964 | 0 | 18 | 18.944444 | 51 |
misrashashank/Competitive-Problems | 10,746,008,218,838 | 5360e1ffd617d80094dce776e5d012f45ead17ff | 1165e63f222dc7a139cb3005e205f27e44ed0826 | /implement_power_function.py | eaedf0b1a17fb0a6bc7199ed1f2f54fd378915d9 | []
| no_license | https://github.com/misrashashank/Competitive-Problems | ac6c80c6a7d8365004452552a06801ac002ef809 | bcd5517fe834062c9e941300ad07a37efdff9c75 | refs/heads/master | 2021-07-19T09:42:41.221405 | 2020-05-16T22:04:46 | 2020-05-16T22:04:46 | 162,201,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Implement pow(x, n), which calculates x raised to the power n (xn).
Example 1:
Input: 2.00000, 10
Output: 1024.00000
Example 2:
Input: 2.10000, 3
Output: 9.26100
Example 3:
Input: 2.00000, -2
Output: 0.25000
Explanation: 2-2 = 1/22 = 1/4 = 0.25
Note:
-100.0 < x < 100.0
n is a 32-bit signed integer, within the range [−231, 231 − 1]
'''
class Solution:
def myPow(self, x, n):
'''
Float, Integer > Float
2.10000, 3 > 9.26100
2.00000, -2 > 0.25000
0.0000, 4 > 0.0000
Iterate for n times (n = power value). Keep all values in float.
Power is negative > Divide result by 1
Time: O(n)
Space: O(1)
This is time heavy.
Better
x, 10 > x * x * x ...10 times
x * x ...5 times
Known as Fast power method
Time: O(log n)
Space: O(1)
'''
# Method 1: Naive - Time heavy O(n)
'''
if n == 0:
return float(1)
if x == 0 or x == float(0):
return x
result = float(1.0)
for _ in range(abs(n)):
# print(result)
result *= x
# print(result)
if n < 0:
return float(1.0/result)
return result
'''
# Method 2: Fast Power - Time: O(log n)
# Check if power is negative
if n < 0:
x = 1/x
return self.fast_power(x, abs(n))
def fast_power(self, ele, power):
if power == 0:
return 1.0
# Recursive call with n/2
part = self.fast_power(ele, power//2)
# Check if power was even or odd
# Even power, result = part * part
# Odd power, result = part * part * x (Due to n/2, one x is left out)
if power % 2 == 0:
return part * part
else:
return part * part * ele
| UTF-8 | Python | false | false | 1,959 | py | 114 | implement_power_function.py | 114 | 0.472634 | 0.401535 | 0 | 85 | 22 | 77 |
zaoyuaner/Learning-materials | 1,795,296,344,027 | fc4160b5dfcad03bef4b8e8858ba39fe2ac135db | 07917881310fc81d85a2cbdf27c9b3c4fa03c694 | /python1901/1_python基础/day08/9.栈.py | 026aaab051eca256ee8021e2f7ba8946ab54c5af | []
| no_license | https://github.com/zaoyuaner/Learning-materials | 9bc9a127d1c6478fb6cebbb6371b1fd85427c574 | 1f468a6f63158758f7cbfe7b5df17f51e3205f04 | refs/heads/master | 2020-05-18T11:38:45.771271 | 2019-05-20T09:07:44 | 2019-05-20T09:07:44 | 184,384,050 | 2 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# author:zhangjiao
'''
栈特点:先进后出
在python中,我们使用列表来模仿的栈结构
'''
mystack = []
mystack.append("1")
mystack.append("2")
mystack.append("3")
mystack.append("4")
mystack.append("5")
print(mystack)
print(mystack.pop())
print(mystack.pop())
print(mystack.pop())
print(mystack.pop())
print(mystack.pop())
'''
需求:使用栈来进行遍历目录
1.根目录进栈
2.根目录出栈
3.列举根目录下面所有的文件
4.判断文件是否为目录
5.当该文件为目录的时候进栈
6.判断栈是否为空
7,不为空继续出栈
8.若为空,循环结束
'''
import os
def getalldir(path):
#根目录进栈
stack = []
#入栈
stack.append(path)
while stack:
#出栈
path = stack.pop()
fileList = os.listdir(path)
for filename in fileList:
abspath = os.path.join(path,filename)
if os.path.isdir(abspath):
print("目录",filename)
stack.append(abspath)
else:
print("文件",filename)
path = r"E:\python\python1901"
getalldir(path) | UTF-8 | Python | false | false | 1,162 | py | 1,070 | 9.栈.py | 684 | 0.611828 | 0.592473 | 0 | 52 | 16.903846 | 49 |
soasme/riotpy | 14,379,550,524,492 | 5939193d5337906d2f4787c91a24c4559b0abc43 | 5a4784ae6326eec57b40d76b9dfa7d175770dfaa | /riot/tags/utils.py | dd85ad6269fd5dbf2b73d5cf7411d6de46a1d2f8 | [
"MIT"
]
| permissive | https://github.com/soasme/riotpy | 1f6ab8fc6d138d23f0e7682bd75a670373e575c8 | 41b61bb2e1d0d6e60c62f514d90b77bd7c80204b | refs/heads/master | 2021-01-10T01:01:11.567560 | 2015-09-05T11:54:40 | 2015-09-05T11:54:40 | 40,594,039 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from functools import wraps
from urwid import AttrMap
from pyquery import PyQuery
from ..ui import IfWidget
def convert_string_to_node(string):
return PyQuery(string)
def detect_class(f):
@wraps(f)
def _detect_class(*args, **kwargs):
pq = kwargs.get('node', args[0])
class_name = pq.attr['class'] or ''
class_names = class_name.split(' ')
node = f(*args, **kwargs)
for class_name in class_names:
if class_name:
node = AttrMap(node, class_name)
return node
return _detect_class
def detect_if(f):
@wraps(f)
def _detect_class(*args, **kwargs):
pq = kwargs.get('node', args[0])
if_ = pq.attr['if'] or ''
state = if_ == 'True'
node = f(*args, **kwargs)
if if_:
node = IfWidget(node, state=state)
return node
return _detect_class
| UTF-8 | Python | false | false | 917 | py | 48 | utils.py | 44 | 0.562704 | 0.559433 | 0 | 35 | 25.2 | 48 |
seasa2016/Data_Science_2018Spring | 4,020,089,433,657 | 4121c5570c928543f988796554a1120ec1c842dd | dfc275b867e0117f4615041432246643e89deabb | /hw6/check_clique.py | a36da5db5801c495b257cf59fb895b8181210c0e | []
| no_license | https://github.com/seasa2016/Data_Science_2018Spring | 3e477b666cae15e91a84262ff00f329a1b9db8c3 | 9877460872d228f7ee1584b531228d0c9b49201f | refs/heads/master | 2020-03-06T17:30:56.655137 | 2019-04-04T03:47:23 | 2019-04-04T03:47:23 | 126,991,315 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import networkx as nx
if len(sys.argv) != 3:
print("usage: python check_clique.py [graph] [nodes]")
graph_data = sys.argv[1]
node_data = sys.argv[2]
with open(node_data,'r') as f, open(graph_data,'r') as g:
UG=nx.Graph()
nodes = [ line.strip() for line in f.readlines() if line!="\n"]
for line in g.readlines():
n=line.strip().split(' ')
if not UG.has_edge(n[0],n[1]):
UG.add_edge(n[0],n[1])
is_clique=True
for n1 in nodes:
check_edge=sum([not UG.has_edge(n1,n2) for n2 in nodes if n1!=n2])
if check_edge != 0:
print("This is \'not\' a clique !")
is_clique=False
break
if is_clique:
print("This is a clique !")
| UTF-8 | Python | false | false | 677 | py | 10 | check_clique.py | 8 | 0.608567 | 0.587888 | 0 | 22 | 28.590909 | 68 |
OmarMeriwani/ElDardeer | 6,047,313,980,013 | 248abad2020c11bc28afccc75c13bf4648cac168 | 6d4de3e5976349252374d6248a0c941eb417cfe2 | /boom.py | ae4a799d8050af485745534e168371b0f04865da | []
| no_license | https://github.com/OmarMeriwani/ElDardeer | 56fd7f2228e54d406f7ead30f09998e3188bc21c | 7e03749ef67ff7725de434c3da0bb1954c054491 | refs/heads/master | 2022-11-25T04:37:48.776098 | 2020-07-28T22:21:02 | 2020-07-28T22:21:02 | 283,340,439 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
locations = [ 0.225, 0.075, -0.075,0.225]
#* time_stamp location state time_stamp
df =pd.read_csv('Data 28_7/Linda 01/boardStates/data.log',header=None, delimiter=' ').values.tolist()
points = []
for i in df:
if i[3] == 1:
points.append([i[1],locations[int(i[2])],[],0,0])
df2 =pd.read_csv('Data 28_7/Linda 01/robotCartHotPointDumper/data.log',header=None, delimiter=' ').values.tolist()
cart = []
df3 = pd.DataFrame(columns=['time','mean','error','original'])
seq = 0
for i in range(0, len(points) - 1):
cartM = [j[8] for j in df2 if j[1] >= points[i][0] and j[1] < points[i+ 1][0]]
if i == 31:
cartM = [j[8] for j in df2 if j[1] >= points[31][0]]
error = [abs(points[i][1]) - abs(m) for m in cartM]
error = sum(error) / len(error)
cartmAVG = sum(cartM) / len(cartM)
print([points[i][0],cartmAVG, error, points[i][1]])
df3.loc[seq] = [points[i][0],cartmAVG, error, points[i][1]]
seq += 1
ax = sns.lineplot(x="time", y="mean",markers=True, dashes=False, data=df3)
plt.show()
#* time_stamp u v max mean std x y z 1,8 | UTF-8 | Python | false | false | 1,161 | py | 1 | boom.py | 1 | 0.618432 | 0.56503 | 0 | 33 | 34.212121 | 114 |
SamB2653/ml_course | 8,684,423,918,398 | beedb37f69e64b3687f72409fb051cbe4f267c3f | c88c6e0c277f846a1e964643bdebbfbce86500a2 | /ml_course/prerequisites/numpy_basic.py | 630b7ebb1e86ededf136c0d2bf3067b162cf3083 | []
| no_license | https://github.com/SamB2653/ml_course | 48d95d1deaf06daead68f4b85d6250515d4b60fb | 2416cacaf1ec4ac46fb08ce4632e0e486ea6cee7 | refs/heads/master | 2023-02-25T01:43:39.479966 | 2021-01-31T16:07:13 | 2021-01-31T16:07:13 | 331,679,077 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
'''
Following the NumPy UltraQuick Tutorial but with some minor changes and additions. Source:
https://colab.research.google.com/github/google/eng-edu/blob/master/ml/cc/exercises/numpy_ultraquick_tutorial.ipynb
'''
# 1D array (8 element vector)
one_dimensional_array = np.array([1.2, 2.4, 3.5, 4.7, 6.1, 7.2, 8.3, 9.5])
print("1D Array:\n %s" % one_dimensional_array)
# 2D array (3x2 matrix)
two_dimensional_array = np.array([[6, 5], [11, 7], [4, 8]])
print("2D Array (3x2):\n %s" % two_dimensional_array)
# 3D array (2x2x2 matrix) with all zeroes (np.ones for all ones)
all_zeroes = np.zeros([2, 2, 2])
print("3D Array (2x2x2) - all zeroes:\n %s\n" % all_zeroes)
# Sequence of integers, lower bound (5) included but not the upper bound (12).
sequence_of_integers = np.arange(5, 12)
print("Sequence of integers: %s" % sequence_of_integers)
# Populate an array with random numbers, generates a 6 element vector with random integers between 50 and 100
random_integers_between_50_and_100 = np.random.randint(low=50, high=101, size=6)
print("Random Integers (50-100, 6 elements): %s" % random_integers_between_50_and_100)
# Random float values between 0.0 and 1.0 (5 elements)
random_floats_between_0_and_1 = np.random.random([5])
print("Random floats between 0.0 and 1.0 (original): %s" % random_floats_between_0_and_1)
# Using broadcasting add 2.0 to the value of every item in the vector created in random_floats_between_0_and_1
random_floats_between_2_and_3 = random_floats_between_0_and_1 + 2.0
print("Random floats between 2.0 and 3.0 (+2 operation): %s" % random_floats_between_2_and_3)
# Using broadcasting multiply by 3 to the value of every item in the vector created in random_floats_between_0_and_1
random_floats_between_6_and_9 = random_floats_between_2_and_3 * 3
print("Random floats between 6.0 and 9.0 (*3 operation): %s" % random_floats_between_6_and_9)
# Task 1: Create a Linear Data set
feature = np.arange(6, 21) # Assign a sequence of integers from 6 to 20 (inclusive) to a NumPy array named feature
print("\nTask 1:\nSeries of integers between 6 and 20: %s" % feature)
label = (feature * 3) + 4 # Assign 15 values to a NumPy array named label such that: label = (3)(feature) + 4
print("Applying (3)(feature) + 4 operation: %s" % label)
# Task 2: Add Some Noise to the Data set
noise = (np.random.random([15]) * 4) - 2 # Create a noise array having the same dimension as label, no broadcasting
print("\nTask 2:\nAdd Some Noise to the Data set (noise matrix):\n %s" % noise)
label = label + noise # modify each value assigned to label by adding a different random float value between -2 and +2
print("Applying noise to label:\n %s" % label)
| UTF-8 | Python | false | false | 2,695 | py | 44 | numpy_basic.py | 12 | 0.715028 | 0.659369 | 0 | 50 | 52.9 | 119 |
KartikKannapur/Algorithms | 4,312,147,191,204 | 5423e39c9608c68efe73a388eb8eafbc3b40e93f | 7410903c6cd5ef35c592af00c934fb21c369cbf2 | /00_Code/02_HackerRank/01_Math_FindthePoint.py.py | fe62a00f4bcb82e1aaacea10aae7c95b6a84e1a7 | [
"MIT"
]
| permissive | https://github.com/KartikKannapur/Algorithms | f4e4726170599db0622d18e8c06a382e9bce9e77 | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | refs/heads/master | 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # #Read Input
N = int(input())
# #Looping through each set of inputs
for counter in range(N):
val_input = input()
px, py, qx, qy = [int(ele) for ele in val_input.split(" ")]
# #Compute Distances
print(((2*qx) - px), ((2*qy) - py))
| UTF-8 | Python | false | false | 262 | py | 304 | 01_Math_FindthePoint.py.py | 296 | 0.549618 | 0.541985 | 0 | 10 | 24.3 | 63 |
olorin-grey/computer_science | 14,920,716,410,879 | 9f989b18f955fe1d93dd2a847c99fa674558c5dd | f41686b692dd16c228bef108d63203ae276ad5f8 | /math_solutions/mult_x_add_y.py | 04a6e4eec7dc4083d7f435cb573b8f8fc01b79d8 | []
| no_license | https://github.com/olorin-grey/computer_science | 3a36f94eb412d211d24e7d3025fcf20f757ce564 | 934c7786b105649516115f9494e4d737715ac9dd | refs/heads/master | 2020-07-04T08:52:51.669590 | 2020-01-14T03:03:33 | 2020-01-14T03:03:33 | 202,229,843 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def mult_x_add_y(number, x, y):
print(number*x + y)
mult_x_add_y(5, 2, 3)
# 13
| UTF-8 | Python | false | false | 85 | py | 16 | mult_x_add_y.py | 15 | 0.552941 | 0.494118 | 0 | 6 | 13.166667 | 31 |
Gary-Joan/Proyecto_AYD2_fase4 | 8,839,042,706,900 | e2550ab958508793ba4ba3280b5659cb2e9783ab | 7147be7be4579405af86f0a82e0035b3c18f66aa | /Contrato/forms.py | db8699496d5e9a3e0bbf7f6974623233473f78ec | []
| no_license | https://github.com/Gary-Joan/Proyecto_AYD2_fase4 | 0fff677d2d42c127e36887db0d6f50f9f196b664 | bcb8d31ee17a0cb603561b1b9ed7fa30995d5494 | refs/heads/master | 2023-08-18T17:44:50.044743 | 2021-04-04T22:44:55 | 2021-04-04T22:44:55 | 259,494,228 | 0 | 0 | null | false | 2021-09-22T18:56:23 | 2020-04-28T01:00:21 | 2021-04-04T22:44:58 | 2021-09-22T18:56:21 | 2,726 | 0 | 0 | 1 | JavaScript | false | false | from django import forms
from .models import Contrato
class ContratoForm(forms.ModelForm):
class Meta:
model = Contrato
fields = ('Cliente', 'Gerente','Menu','Montaje','Restaurante','Salon') | UTF-8 | Python | false | false | 213 | py | 46 | forms.py | 40 | 0.680751 | 0.680751 | 0 | 9 | 22.777778 | 78 |
alvas-education-foundation/Krishna_Katira | 15,736,760,192,931 | 7fadb1684ce5f238d5ff1ca7c9a14ea7d84634c4 | 721cf86f26a44ea333b904c6bedf407315347e0f | /coding_solutions/22-06-20.py | eae1a5911d9667be36d673ec20c23730239065da | []
| no_license | https://github.com/alvas-education-foundation/Krishna_Katira | f76a00bad22c63597d040844720983943b1c97e0 | 8ff391f8a4b5082deb50b03460be2939fa611f4c | refs/heads/master | 2022-11-20T16:05:47.203175 | 2020-07-21T10:09:36 | 2020-07-21T10:09:36 | 265,737,615 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class A(object):
def __init__(self):
self.A=1
self.B=2
obj=A()
print(obj.__dict__) | UTF-8 | Python | false | false | 115 | py | 19 | 22-06-20.py | 19 | 0.443478 | 0.426087 | 0 | 6 | 18.333333 | 26 |
kevinyangff/Python-programming-exercises | 12,618,613,954,514 | c199232dc253c81316c8d39693772ed045bd3ddc | 7a244d033c1070b82075f85472ff74d108da26e8 | /level0/question42.py | adf712900b04cd7f417e1a48bd350359183b1887 | [
"MIT"
]
| permissive | https://github.com/kevinyangff/Python-programming-exercises | 0c82536dc7ca98ee534fbae9392dda704af61ee4 | 87546906d817263ae7ddbd0276f0bb36e0d63c41 | refs/heads/master | 2021-09-03T08:19:27.888182 | 2018-01-07T13:15:17 | 2018-01-07T13:15:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | tupleOriginal = (1,2,3,4,5,6,7,8,9,10)
t = tuple([x for x in tupleOriginal if x%2==0])
print(t) | UTF-8 | Python | false | false | 95 | py | 87 | question42.py | 86 | 0.642105 | 0.505263 | 0 | 3 | 31 | 47 |
Ntare-Katarebe/ICS3U-Unit6-03-Python-Array_Min | 6,854,767,854,788 | 9bedcd53cfbacc112de81d100a63b13436b49194 | 4f7990c345192f87ef76cfb72bc1c214fdc5540f | /array_min.py | 59cfa9a1f7f48029299fe0e91f476089960a276f | []
| no_license | https://github.com/Ntare-Katarebe/ICS3U-Unit6-03-Python-Array_Min | 24346f16f20e4cfcc8b04aa53794bb12dccee606 | f4cf6538b7319ac8c2a0a8964bfb0b7c649e3ba1 | refs/heads/main | 2023-05-13T01:49:03.800249 | 2021-06-04T16:21:06 | 2021-06-04T16:21:06 | 373,896,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# Created by: Ntare-Katarebe
# Created on: June 2021
# This program uses an array(list) to find the largest number
import math
import random
my_numbers = []
def min_number(my_numbers):
min = my_numbers[0]
for loop_counter in my_numbers:
if loop_counter < min:
min = loop_counter
return min
def main():
# this function uses an array
# input
for loop_counter in range(0, 10):
a_single_number = random.randint(1, 100)
my_numbers.append(a_single_number)
print("The random is: {0}".
format(my_numbers[loop_counter]), end="\n")
print("")
print("The smallest number is {}".format(min_number(my_numbers)))
print("\nDone.")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 784 | py | 2 | array_min.py | 1 | 0.603316 | 0.585459 | 0 | 40 | 18.6 | 69 |
louyanqi/leo | 8,839,042,695,562 | 393cefb80c22d618f89a6cb0c1a9cd1aee43d7bf | fd27dcde79c47b20699b43288d6bb2fb7e3ee417 | /controllers/manage/views.py | 988a3106d9cc75ed23b86686956fc083aeb8fc8e | []
| no_license | https://github.com/louyanqi/leo | 4be31cfe34e3aecdf89fe3696e9ecbbe426d4814 | 706d603ab08df8f3e50e9dc9428dab334540dc75 | refs/heads/master | 2021-09-09T03:35:10.405606 | 2018-03-13T14:59:24 | 2018-03-13T14:59:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import render_template, Blueprint, redirect, url_for, request
from models import db, User
from forms import RegisterForm, LoginForm
from flask_login import login_user, logout_user, login_required
control_blueprint = Blueprint('manage', __name__, template_folder='templates/control_template')
@control_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).one()
login_user(user, remember=form.remember.data)
return redirect(url_for('blog.home'))
else:
print(form.username.errors)
return render_template('control_template/login.html', form=form)
@login_required
@control_blueprint.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
return redirect(url_for('blog.home'))
@control_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if request.method == 'POST':
if form.validate_on_submit():
print(form.username.data, form.password.data)
new_user = User(username=form.username.data, password=User.set_password(form.password.data))
db.session.add(new_user)
db.session.commit()
return redirect(url_for('manage.login'))
return render_template('control_template/register.html', form=form) | UTF-8 | Python | false | false | 1,412 | py | 32 | views.py | 15 | 0.679887 | 0.679887 | 0 | 41 | 33.463415 | 104 |
ANh0r/LeetCode-Daily | 11,922,829,260,907 | a24189a1e2caa57a127681e368c6f327f9d4ad6a | 5da6b63331783b2728bc6b908c75c17864d1abe4 | /3.8 minCut.py | a7213b5e1e99fac5a645fa01a4833822e9c78885 | []
| no_license | https://github.com/ANh0r/LeetCode-Daily | 1c34baca46263da67e5a2b1b184dd441b90ef583 | a9ad5b5bc912a4ce5613000fbc47905510cde5ea | refs/heads/master | 2023-08-24T20:38:40.885292 | 2021-09-13T04:17:09 | 2021-09-13T04:17:09 | 283,934,963 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def minCut(self, s: str) -> int:
n = len(s)
g = [[True] * n for _ in range(n)]
for i in reversed(range(n)):
for j in range(i + 1, n):
g[i][j] = (s[i] == s[j]) and g[i + 1][j - 1]
f = [0] * n
for i in range(n):
if not g[0][i]:
f[i] = min(f[j] + 1 for j in range(i) if g[j + 1][i])
return f[-1] | UTF-8 | Python | false | false | 416 | py | 314 | 3.8 minCut.py | 313 | 0.387019 | 0.367788 | 0 | 12 | 33.75 | 69 |
r-valitov/Malicious-network-activity-detection-system | 5,102,421,153,757 | 9f30710ec3b3b999664476851e7d275371abc341 | 4771d1843c5f011f49dfdfb87ccc30294d6753fb | /DetectionRetrainSystem.py | 15fbed204173d6f2baf402e7bb51556ed2ca0775 | []
| no_license | https://github.com/r-valitov/Malicious-network-activity-detection-system | c8bae085fa22753b27cabb8f02be41fb5e2dd8a8 | 3e911a18f111dff08278c394809a3710c5d900da | refs/heads/master | 2022-10-03T18:57:14.211871 | 2020-06-05T13:36:59 | 2020-06-05T13:36:59 | 262,602,852 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pyshark
from DetectionSystem import DetectionSystem
from Trainer import Trainer
from enums.Behavior import Behavior
from enums.Kind import Kind
from enums.Mode import Mode
from history.notes.TCPHistoryNote import TCPHistoryNote
from history.notes.UDPHistoryNote import UDPHistoryNote
class DetectionRetrainSystem:
def __init__(self, hidden_size, model_path, interface="Ethernet"):
self.interface = interface
self.action_num = 2
self.trainer = Trainer(hidden_size, Behavior.TEACH, Mode.HYBRID)
self.detector = DetectionSystem(hidden_size, interface)
self.trainer.load_model(model_path)
self.detector.load_model(model_path)
def retrain(self, note):
if self.detector.suspicion <= 0.1:
self.trainer.retrain(note, 0.5, self.detector.suspicion)
def run(self):
try:
attack_counter = 0
packet_counter = 0
capture = pyshark.LiveCapture(interface=self.interface, display_filter="tcp or udp")
for packet in capture.sniff_continuously():
packet_counter += 1
protocol = str(packet.transport_layer).lower()
note = None
if protocol == 'udp':
note = UDPHistoryNote(packet, kind=Kind.ALL)
if protocol == 'tcp':
note = TCPHistoryNote(packet, kind=Kind.ALL)
self.detector.model.set_protocol(protocol)
action = self.detector.select_action(note.message)
if action == 0:
attack_counter += 1
self.detector.analyse(note, action, attack_counter, packet_counter)
if action == 1:
self.retrain(note)
finally:
self.detector.reset_firewall()
| UTF-8 | Python | false | false | 1,822 | py | 30 | DetectionRetrainSystem.py | 27 | 0.613063 | 0.607025 | 0 | 48 | 36.958333 | 96 |
harvey1327/Hack_Tools | 850,403,545,528 | 37e11906f20a764a8346328c133b18fb35f762a0 | f3710ce9160b81d4320e664599857ba2182d14ea | /hack.py | 71e48b90c3984f788aac16cda2dfad1cab3ef158 | []
| no_license | https://github.com/harvey1327/Hack_Tools | 69aed73efab511b705d744139a3b865d8d5e503a | a3213a2738bc21b69f01f0c205ad2de3484115ce | refs/heads/master | 2021-06-30T22:17:59.084209 | 2017-09-14T22:04:54 | 2017-09-14T22:04:54 | 103,585,621 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import requests
#token_array = [{'user_id': 1, 'token': 'JhjqCzT79ofx94SxL6rn', 'user_name':'admin'},{'user_id': 2, 'token': '7Dsk5VR6xD_goeKcxFps', 'user_name':'balh'},{'user_id': 3, 'token': 'EmSLzXwfKddc2y28pcxW', 'user_name':'test1'}, {'user_id': 4, 'token': 'yBETvpQLH7UkAmXbdGo-', 'user_name':'user4'}]
token_array = [{'user_id': 2, 'token': '7Dsk5VR6xD_goeKcxFps', 'user_name':'balh'},{'user_id': 3, 'token': 'EmSLzXwfKddc2y28pcxW', 'user_name':'test1'}, {'user_id': 4, 'token': 'yBETvpQLH7UkAmXbdGo-', 'user_name':'user4'}]
group_array = [{'group_id': 1, 'group_name': 'group1'},{'group_id': 2, 'group_name': 'group2'},{'group_id': 3, 'group_name': 'group3'},{'group_id':4, 'group_name': 'testgroup'}, {'group_id':5, 'group_name': 'group4'}]
new_user_id = 2
domain = 'http://127.0.0.1:10080'
for groupD in group_array:
for tokenD in token_array:
user_id = tokenD['user_id']
user_name = tokenD['user_name']
token = tokenD['token']
group_id = groupD['group_id']
group_name = groupD['group_name']
if user_id != new_user_id:
url = '%s/api/v3/groups/%s/members' % (domain, group_name)
payload = {'id': group_name, 'user_id': new_user_id, 'access_level': '30'}
headers = {'PRIVATE-TOKEN': token}
r = requests.post(url, data=payload, headers=headers)
output = 'StatusCode-%s, GroupName-%s, AuthUser-%s, Text-%s' % (r.status_code, group_name, user_name, r.text)
if r.status_code == 201:
print output
break
else:
print output
| UTF-8 | Python | false | false | 1,651 | py | 1 | hack.py | 1 | 0.572986 | 0.538462 | 0 | 33 | 49.030303 | 292 |
darksidergod/Crawler-Of-Lianjia | 9,629,316,701,081 | f1506d70e06626b5091c26fe2b879f67b692ec10 | 7581ab40041144fea6bf55bc68f4483c35476361 | /crawler/crawler_domain.py | 6029d43af26d4abe37872708e23324eb9428a52a | []
| no_license | https://github.com/darksidergod/Crawler-Of-Lianjia | 9979733654fc55c03eb1129123bba3c6591a5a3a | a5139da27e74f3fb6bf0ebf4dacd0823f2c7307d | refs/heads/master | 2020-08-22T13:16:21.030546 | 2019-10-20T17:53:25 | 2019-10-20T17:53:25 | 216,402,901 | 0 | 0 | null | true | 2019-10-20T17:50:19 | 2019-10-20T17:50:19 | 2019-01-31T10:01:08 | 2016-09-10T11:11:11 | 5,668 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python
import requests
def get_request(url):
try:
get_response = requests.get("https://" + url)
return get_response
except requests.exceptions.ConnectionError:
pass
target_url = "<target_url>"
with open("<name of domains to guess", 'r') as word_list:
for word in word_list:
stripped_word = word.strip()
test_url = stripped_word+ "." + target_url
response = get_request(test_url)
if response:
print("[+] Subdomain found." + " It is " + test_url)
| UTF-8 | Python | false | false | 568 | py | 13 | crawler_domain.py | 8 | 0.573944 | 0.573944 | 0 | 24 | 22.666667 | 64 |
kohnakagawa/ghidra_scripts | 18,399,639,913,255 | e9719d70a502f4d14615cc027470fb0db17360cf | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/assembler/AssemblySelector.pyi | 56a54088f1573362bff2a763f54d769285c26493 | [
"MIT"
]
| permissive | https://github.com/kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import ghidra.app.plugin.assembler.sleigh.sem
import java.lang
import java.util
class AssemblySelector(object):
"""
Provides a mechanism for pruning and selecting binary assembled instructions from the results
of parsing textual assembly instructions. There are two opportunities: After parsing, but before
semantic resolution, and after resolution. In the first opportunity, filtering is optional ---
the user may discard any or all parse trees. The second is required, since only one instruction
may be placed at the desired address --- the user must select one instruction among the many
results, and if a mask is present, decide on a value for the omitted bits.
Extensions of this class are also suitable for collecting diagnostic information about attempted
assemblies. For example, an implementation may employ the syntax errors in order to produce
code completion suggestions in a GUI.
"""
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def filterParse(self, parse: java.util.Collection) -> java.util.Collection:
"""
Filter a collection of parse trees.
Generally, the assembly resolver considers every possible parsing of an assembly
instruction. If, for some reason, the user wishes to ignore certain trees (perhaps for
efficiency, or perhaps because a certain form of instruction is desired), entire parse
trees may be pruned here.
It's possible that no trees pass the filter. In this case, this method ought to throw an
{@link AssemblySyntaxException}. Another option is to pass the erroneous result on for semantic
analysis, in which case, the error is simply copied into an erroneous semantic result.
Depending on preferences, this may simplify the overall filtering and error-handling logic.
By default, no filtering is applied. If all the trees produce syntax errors, an exception is
thrown.
@param parse the collection of parse results (errors and trees).
@return the filtered collection, optionally in-place.
@throws AssemblySyntaxException if the selector wishes to forward one or more syntax errors
"""
...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def select(self, rr: ghidra.app.plugin.assembler.sleigh.sem.AssemblyResolutionResults, ctx: ghidra.app.plugin.assembler.sleigh.sem.AssemblyPatternBlock) -> ghidra.app.plugin.assembler.sleigh.sem.AssemblyResolvedConstructor:
"""
Select an instruction from the possible results.
Must select precisely one resolved constructor from the results given back by the assembly
resolver. Precisely one. That means the mask of the returned result must consist of all 1s.
Also, if no selection is suitable, an exception must be thrown.
By default, this method selects the shortest instruction that is compatible with the given
context and takes 0 for bits that fall outside the mask. If all possible resolutions produce
errors, an exception is thrown.
@param rr the collection of resolved constructors
@param ctx the applicable context.
@return a single resolved constructor with a full instruction mask.
@throws AssemblySemanticException
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| UTF-8 | Python | false | false | 3,704 | pyi | 3,297 | AssemblySelector.pyi | 3,294 | 0.698164 | 0.696544 | 0 | 87 | 41.574713 | 227 |
Syyan7979/Kattis-Solutions-python- | 1,614,907,748,174 | ad07a729d18e8354c5dcfef06fdb404134a11fb6 | a7f80e213b521cac38c45b91c697d88233d4e67e | /Python Kattis/Kattis130(missingnumbers).py | 042a1b0f4f285fcd1c7a1c608088a91708675b1e | []
| no_license | https://github.com/Syyan7979/Kattis-Solutions-python- | d96c2d880999cf50a206a15a89c2a9f3da29a623 | 8a2363faa4efd8c0418464b7d558db718ea5eb83 | refs/heads/master | 2022-05-28T06:56:36.624530 | 2020-05-02T04:39:45 | 2020-05-02T04:39:45 | 260,613,426 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def out(someList):
counter = True
for i in range(1, someList[-1]+1):
if i not in someList:
print(i)
counter = False
if counter:
print("good job")
n = int(input())
out([int(input()) for i in range(n)]) | UTF-8 | Python | false | false | 214 | py | 129 | Kattis130(missingnumbers).py | 128 | 0.626168 | 0.61215 | 0 | 11 | 18.545455 | 37 |
SimonXu666j/pycode_learning | 11,149,735,110,305 | 38b32545e41a62322dfe20962f5dfdf403341166 | 60700f1fa67110cd68a337186f0da6e2319329a9 | /实用编程技巧/文件IO操作相关话题/test01.py | 283b33d71422392316a1948ebb352195e1c318d5 | []
| no_license | https://github.com/SimonXu666j/pycode_learning | 24660b5c9b90e0ef5da43dd99bd62caebbbe7b95 | 3104a497dc2032512d36d0671d319de9fe8eeedd | refs/heads/master | 2021-01-24T17:31:39.342918 | 2018-12-03T14:30:50 | 2018-12-03T14:30:50 | 123,224,426 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-*-coding:utf-8-*-
#如何读写文本文件?
'''
python2.x-->python3.x字符串的语义发生了变化:
str -> bytes
unicode -> str
python2.x中:写入文件前对unicode编码,读入文件后对二进制字符串编码
python3.x中,open函数指定't'的文本模式,encoding指定编码格式。
'''
print(u"****python2.x中******")
s=u'你好'
print(s)
#encode编码变成str
print(s.encode('utf-8'))
print(s.encode('gbk'))
#decode解码变成unicode
print(s.encode('utf-8').decode('utf-8'))
# f=open('py2.txt','w')
# s=u'你好'
# f.write(s.encode('gbk'))
# f.close()
# f=open('py2.txt','r')
# t=f.read()
# print(t) | UTF-8 | Python | false | false | 647 | py | 66 | test01.py | 64 | 0.635438 | 0.613035 | 0 | 27 | 17.222222 | 43 |
HyuncheolOh/snopes | 111,669,184,547 | 79062b89833edbacf3dc0ae687d92e279b9b8e45 | f1056661b869efe5f8080042ecc0eb8e05853e13 | /characterization/draw_tools/box_plot.py | c37fe969965cd1076b2755fa000b9d930db09161 | []
| no_license | https://github.com/HyuncheolOh/snopes | 16e631dc390e36c56c10b48fc0a62ba6a6e1f44b | 5ab76dc99f8ab1024cf36a82de99fba7d9c7310f | refs/heads/master | 2020-03-11T14:05:07.326024 | 2018-06-20T06:13:16 | 2018-06-20T06:13:16 | 130,043,528 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
class BoxPlot:
'''
def __init__(self, data):
self.data = data
plt.boxplot(data, showfliers=False)
'''
def __init__(self, subplot_num):
self.fig_num = 1
self.fig = plt.figure(figsize=(20,20))
self.subplot_x = subplot_num
self.subplot_y = subplot_num
def set_data(self, data, label):
self.ax = self.fig.add_subplot(self.subplot_x, self.subplot_y, self.fig_num)
self.ax.boxplot(data, showfliers = False)
self.fig_num += 1
def set_data_with_position(self, data, lebel, grid):
self.ax = self.fig.add_subplot(grid)
self.ax.boxplot(data, showfliers = True)
def set_title(self, title):
self.ax.title.set_text(title)
def set_xticks(self, x_ticks):
plt.xticks(np.arange(len(x_ticks)), x_ticks)
def set_label(self, x, y):
self.x_label = x
self.y_label = y
def set_ylim(self, value):
self.ax.set_ylim(0, value)
def save_image(self, path):
plt.savefig(path, bbox_inches='tight')
| UTF-8 | Python | false | false | 1,147 | py | 87 | box_plot.py | 52 | 0.599826 | 0.593723 | 0 | 41 | 26.95122 | 84 |
setsunaki/setsu | 10,256,381,933,176 | 0ab67c49b4e17c471622458ece384d20539b71d2 | 1c1bc6d6a221bd5e8346ce3acdae7fa6487434cd | /test.py | ad4798e0ac626d07ebbda9b9634b477316eb7182 | []
| no_license | https://github.com/setsunaki/setsu | 364c1ba83285b0a2fe1ce8a24753e8ace14f3d31 | b5d9db968bc277684ce0ef2bf97474bcfb8863ff | refs/heads/master | 2020-08-01T06:33:21.558279 | 2020-04-21T01:36:56 | 2020-04-21T01:36:56 | 210,900,296 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #comentarios
'''varias lineas
de comentarios wacho'''
#CALCULADORA
from os import system
def Pausa():
print("Apreta algo loko")
input()
system("cls")
def Sumar(a,b):
resta = a-b
print(f"Tu suma hermano {resta}")
def Restar(a,b):
print("Tu resta hermano ",str(a-b))
def Multiplicar(a,b):
print("Tu multiplicacion hermano ",str(a*b))
def Dividir(a,b):
print("Tu divicion hermano ",str(a/b))
def Menu():
print("""SELECCIONE OPERACION
1.Suma
2.Restar
3.Multiplicar
4.Dividir
9.Salir""")
while True:
num1=float(input("Ingrese un numero : "))
num2=float(input("Ingrese otro numero: "))
Menu()
op= int(input("Ingrese su opcion: "))
if op ==1:
Sumar(num1,num2)
elif op==2:
Restar(num1,num2)
elif op==3:
Multiplicar(num1,num2)
elif op==4:
Dividir(num1,num2)
elif op==9:
print("Terminamos por hoy wacho")
input()
break
else:
print("Opcion no valida")
Pausa() | UTF-8 | Python | false | false | 1,013 | py | 4 | test.py | 3 | 0.5923 | 0.572557 | 0 | 54 | 17.777778 | 48 |
chenpengpython/youdaoTranslation | 15,144,054,713,112 | 862e3ac48ab05cf451e1dddd0338c10301b43e1e | e004073354db14f1f16fe2df74e4a4ede4b01f2b | /demo.py | d4aac005f930817cb0d21b7d78410f825ef65d84 | []
| no_license | https://github.com/chenpengpython/youdaoTranslation | f34d4e5e24d547ae4343262230f535ef13a6664a | 269d844ef1e6f6c07438057e28317fa30d9e34ba | refs/heads/master | 2020-09-15T02:06:52.875742 | 2019-07-07T14:36:57 | 2019-07-07T14:36:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2019/7/7 22:02
# @Author : xuzhihai0723
# @Email : 18829040039@163.com
# @File : youdao.py
# @Software: PyCharm
import hashlib
import requests
import time
import execjs
def md5_encrypt(text):
md5 = hashlib.md5()
md5.update(text.encode('utf-8'))
return md5.hexdigest()
def get_salt(timestamp):
js = """
function get_salt(timestamp) {
return (timestamp + parseInt(10 * Math.random(), 10))
}
"""
ctx = execjs.compile(js)
salt = ctx.call('get_salt', timestamp)
return salt
def run():
api = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
keyword = input('你要翻译啥>> \n')
headers = {
'Cookie': 'OUTFOX_SEARCH_USER_ID=-1867145421@10.108.160.17; JSESSIONID=aaa4aprbywi3fhv_DTmVw; OUTFOX_SEARCH_USER_ID_NCOO=1231020454.0353444; ___rl__test__cookies=1562508084293',
'Referer': 'http://fanyi.youdao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
timestamp = int(time.time() * 1000)
salt = get_salt(timestamp)
data = {
'i': keyword,
'from': 'AUTO',
'to': 'AUTO',
'smartresult': 'dict',
'client': 'fanyideskweb',
'salt': str(salt),
'sign': md5_encrypt("fanyideskweb" + keyword + str(salt) + "@6f#X3=cCuncYssPsuRUE"),
'ts': str(timestamp),
'bv': md5_encrypt(headers['User-Agent'][8:]),
'doctype': 'json',
'version': '2.1',
'keyfrom': 'fanyi.web',
'action': 'FY_BY_REALTlME'
}
res = requests.post(api, data=data, headers=headers)
print(res.json())
if __name__ == '__main__':
run()
| UTF-8 | Python | false | false | 1,764 | py | 1 | demo.py | 1 | 0.587799 | 0.513683 | 0 | 60 | 28.2 | 185 |
jyotipatel/deep-learning | 910,533,112,503 | 71a0d23541fbc5d8bea261b8e9a8510eb02ff5b4 | c53663ca2a103887cc382ef338586d25807429e2 | /Convolutional_Neural_Networks/cnn_practise.py | b59673ab062141439019461ae96370a950da96d9 | []
| no_license | https://github.com/jyotipatel/deep-learning | 134dce38def08e03b16a9dcb17553f98144b80f7 | 59f35d0e26c3dd43e9df263bb225655b433fe0cb | refs/heads/master | 2020-03-23T04:41:13.799056 | 2018-07-16T07:09:31 | 2018-07-16T07:09:31 | 141,099,208 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 10:45:57 2018
@author: jyoti
"""
# Part 1 :Building the CNN
#importing the lib
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
#Initializing the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape= (128,128,3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2,2)))
# step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(output_dim = 256, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
# Step 5 - Comiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 : Fitting CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'dataset/training_set',
target_size=(128,128),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'dataset/test_set',
target_size=(128, 128),
batch_size=32,
class_mode='binary')
classifier.fit_generator(training_set,
steps_per_epoch=8000,
epochs=25,
validation_data=test_set,
validation_steps=2000) | UTF-8 | Python | false | false | 1,636 | py | 1 | cnn_practise.py | 1 | 0.68154 | 0.632641 | 0 | 66 | 23.80303 | 93 |
kevinbarry7/python_test | 17,506,286,730,138 | a1ee92ced1f0c9b833ec65033e75b9903db37d3a | 45e4c7672e282d011947289d2f8d5b3a84c7f8a8 | /552/executing-functions/exercise4/game.py | c6ebaec0622671ee8df6b4967bd7088f4e7044c7 | []
| no_license | https://github.com/kevinbarry7/python_test | 3e660b534b17efd2aedff69f775c80a5b74eea41 | 57f31fd2b5429c86028706d667db192c23a6b107 | refs/heads/master | 2020-06-29T22:15:48.690049 | 2020-06-14T17:52:30 | 2020-06-14T17:52:30 | 200,638,583 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
A module providing a simple dice game.
The purpose of this module is to show off a slightly longer function that
could benefit from being broken up into smaller functions.
Author: Kevin Barry
Date: March 28, 2020
"""
import random
def roll_off(handicap1,handicap2):
"""
Returns true if player 1 wins the roll-off contest with player 2; otherwise returns false
Each player rolls two dice and adds their handicap to the result. Player 1 wins
if his/her result is higher. This function will print out the player scores before
it returns the result.
Parameter handicap1: The handicap of player 1
Precondition: handicap1 is a number
Parameter handicap2: The handicap of player 2
Precondition: handicap2 is a number
"""
# Player 1 computes score
sum1 = rollem(1, 6)
sum1 += handicap1
# Player 2 computes score
sum2 = rollem(1, 6)
sum2 += handicap2
# Determine result
print('Player 1 got '+str(sum1)+'; Player 2 got '+str(sum2)+'.')
return sum1 > sum2
def rollem(first, last):
"""
Returns the sum of two random numbers.
The numbers generated are between first and last (inclusive).
Example: rollem(1,6) can return any value between 2 and 12.
Parameter first: The lowest possible number
Precondition: first is an integer
Parameter last: The greatest possible number
Precondition: last is an integer, last >= first
"""
num1 = random.randint(first,last)
num2 = random.randint(first,last)
thesum = num1+num2
return thesum
| UTF-8 | Python | false | false | 1,591 | py | 58 | game.py | 58 | 0.683847 | 0.656191 | 0 | 58 | 26.431034 | 93 |
HangJie720/ATM | 13,073,880,469,496 | 3151317ae02278909cbab18efe63527d2f37315f | 27e7cd08334f4bef873f4c67b5632e9f1c052ca9 | /atm/enter_data.py | fb0d6eb76987aa53746d232529c48eccf97282b4 | [
"MIT"
]
| permissive | https://github.com/HangJie720/ATM | 2f9734c88427784fe08ff588c5c736310453c782 | 9dfe5d9c883efc37d285c852fcc92a02c6cdd945 | refs/heads/master | 2020-03-24T20:28:36.780510 | 2018-05-16T22:20:15 | 2018-05-16T22:20:15 | 142,980,079 | 1 | 0 | MIT | true | 2018-07-31T07:42:21 | 2018-07-31T07:42:21 | 2018-07-28T06:08:01 | 2018-06-05T20:54:47 | 2,856 | 0 | 0 | 0 | null | false | null | from __future__ import absolute_import, division, unicode_literals
import logging
import os
from builtins import map
from datetime import datetime, timedelta
from past.utils import old_div
from .config import *
from .constants import *
from .database import Database
from .encoder import MetaData
from .method import Method
from .utilities import download_data
# load the library-wide logger
logger = logging.getLogger('atm')
def create_dataset(db, run_config, aws_config=None):
"""
Create a dataset and add it to the ModelHub database.
db: initialized Database object
run_config: RunConfig object describing the dataset to create
aws_config: optional. AWS credentials for downloading data from S3.
"""
# download data to the local filesystem to extract metadata
train_local, test_local = download_data(run_config.train_path,
run_config.test_path,
aws_config)
# create the name of the dataset from the path to the data
name = os.path.basename(train_local)
name = name.replace("_train.csv", "").replace(".csv", "")
# process the data into the form ATM needs and save it to disk
meta = MetaData(run_config.class_column, train_local, test_local)
# enter dataset into database
dataset = db.create_dataset(name=name,
description=run_config.data_description,
train_path=run_config.train_path,
test_path=run_config.test_path,
class_column=run_config.class_column,
n_examples=meta.n_examples,
k_classes=meta.k_classes,
d_features=meta.d_features,
majority=meta.majority,
size_kb=old_div(meta.size, 1000))
return dataset
def create_datarun(db, dataset, run_config):
"""
Given a config, creates a set of dataruns for the config and enters them into
the database. Returns the ID of the created datarun.
db: initialized Database object
dataset: Dataset SQLAlchemy ORM object
run_config: RunConfig object describing the datarun to create
"""
# describe the datarun by its tuner and selector
run_description = '__'.join([run_config.tuner, run_config.selector])
# set the deadline, if applicable
deadline = run_config.deadline
if deadline:
deadline = datetime.strptime(deadline, TIME_FMT)
# this overrides the otherwise configured budget_type
# TODO: why not walltime and classifiers budget simultaneously?
run_config.budget_type = 'walltime'
elif run_config.budget_type == 'walltime':
deadline = datetime.now() + timedelta(minutes=budget)
target = run_config.score_target + '_judgment_metric'
datarun = db.create_datarun(dataset_id=dataset.id,
description=run_description,
tuner=run_config.tuner,
selector=run_config.selector,
gridding=run_config.gridding,
priority=run_config.priority,
budget_type=run_config.budget_type,
budget=run_config.budget,
deadline=deadline,
metric=run_config.metric,
score_target=target,
k_window=run_config.k_window,
r_minimum=run_config.r_minimum)
return datarun
def enter_data(sql_config, run_config, aws_config=None,
run_per_partition=False):
"""
Generate a datarun, including a dataset if necessary.
sql_config: Object with all attributes necessary to initialize a Database.
run_config: all attributes necessary to initialize a Datarun, including
Dataset info if the dataset has not already been created.
aws_config: all attributes necessary to connect to an S3 bucket.
Returns: ID of the generated datarun
"""
# connect to the database
db = Database(sql_config.dialect, sql_config.database, sql_config.username,
sql_config.password, sql_config.host, sql_config.port,
sql_config.query)
# if the user has provided a dataset id, use that. Otherwise, create a new
# dataset based on the arguments we were passed.
if run_config.dataset_id is None:
dataset = create_dataset(db, run_config, aws_config=aws_config)
run_config.dataset_id = dataset.id
else:
dataset = db.get_dataset(run_config.dataset_id)
method_parts = {}
for m in run_config.methods:
# enumerate all combinations of categorical variables for this method
method = Method(m)
method_parts[m] = method.get_hyperpartitions()
logger.info('method %s has %d hyperpartitions' %
(m, len(method_parts[m])))
# create hyperpartitions and datarun(s)
run_ids = []
if not run_per_partition:
logger.debug('saving datarun...')
datarun = create_datarun(db, dataset, run_config)
logger.debug('saving hyperpartions...')
for method, parts in list(method_parts.items()):
for part in parts:
# if necessary, create a new datarun for each hyperpartition.
# This setting is useful for debugging.
if run_per_partition:
datarun = create_datarun(db, dataset, run_config)
run_ids.append(datarun.id)
# create a new hyperpartition in the database
db.create_hyperpartition(datarun_id=datarun.id,
method=method,
tunables=part.tunables,
constants=part.constants,
categoricals=part.categoricals,
status=PartitionStatus.INCOMPLETE)
logger.info('Data entry complete. Summary:')
logger.info('\tDataset ID: %d' % dataset.id)
logger.info('\tTraining data: %s' % dataset.train_path)
logger.info('\tTest data: %s' % (dataset.test_path or 'None'))
if run_per_partition:
logger.info('\tDatarun IDs: %s' % ', '.join(map(str, run_ids)))
else:
logger.info('\tDatarun ID: %d' % datarun.id)
logger.info('\tHyperpartition selection strategy: %s' % datarun.selector)
logger.info('\tParameter tuning strategy: %s' % datarun.tuner)
logger.info('\tBudget: %d (%s)' % (datarun.budget, datarun.budget_type))
return run_ids or datarun.id
| UTF-8 | Python | false | false | 6,783 | py | 60 | enter_data.py | 22 | 0.59885 | 0.597966 | 0 | 162 | 40.87037 | 81 |
YashKhant2412/MovieInfoBot | 7,224,135,012,480 | 33c5b21d8639c7b61f6ee0b64dd570f15445751f | 77dd316933398a5a88b7a57fedd583dee9e4be03 | /Code/conn.py | 548dc0e1fef4a3111f004a792be2fb4b9d4746cd | []
| no_license | https://github.com/YashKhant2412/MovieInfoBot | d29a3765f4db764ca35f5d4f18ce01487ec48ca9 | 77215eaeb6206241898fd527bc626757eab2c704 | refs/heads/main | 2023-06-16T15:33:27.729558 | 2021-07-10T07:21:01 | 2021-07-10T07:21:01 | 384,633,245 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
from openpyxl import load_workbook
conn = sqlite3.connect('Movie.db')
wb = load_workbook('Movie.csv')
ws = wb['Movie']
conn.execute("create table if not exists Movies (movie text, seat text, price int)")
for i in range(1,10):
temp_str = "insert into food_items (movie, seat, price) values ('{0}', '{1}', '{2}')".format(ws.cell(i,1).value, ws.cell(i,2).value, ws.cell(i,3).value)
conn.execute(temp_str)
conn.commit()
content = conn.execute("select * from Movies")
for i in content:
print(i)
| UTF-8 | Python | false | false | 522 | py | 8 | conn.py | 3 | 0.67433 | 0.653257 | 0 | 18 | 28 | 156 |
lvhaiyang/api_test_framework | 12,816,182,456,213 | 0cd622d02000e733793d2f885894ed7f36f10b6f | 52d42ac4d3307fba9aa36ab55b9a63012d168de9 | /test_framework/test_api/project_demo/mode_demo/case_demo.py | 5e4c4cdd3b9c0800d105edbd059b4eaecab8d71b | []
| no_license | https://github.com/lvhaiyang/api_test_framework | b0ce446babea7648f155fba0e4f6cd42e9b0db66 | 867507f3f59abc183e85e9e513cd03dfd4b7d772 | refs/heads/master | 2021-04-27T13:57:53.615223 | 2018-07-30T09:24:36 | 2018-07-30T09:24:36 | 122,449,192 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!encoding=utf-8
from __future__ import unicode_literals
import traceback
from lib.api_template import *
from lib.mysql import *
from config import *
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
CASE_INFO = ''
# 接口部分
class ApiTest(ApiTemplate):
def get_api_info(self):
self.save_key = os.path.basename(__file__).split('.')[0]
self.api_url = '/{}'.format(self.save_key.replace('_', '/'))
if self.api_url_suffix is not None:
self.api_url = '{0}.{1}'.format(self.api_url, self.api_url_suffix)
def change_headers(self):
headers = self.headers
params_data = self.save_json_data
# 》》》》》》》》
# step 1 第一个需要修改的地方
# 如果需要修改 headers 中的信息 按照下面的格式修改
# cmds = ["data['authorization'] = replace_data['login']['token']",
# "data['authorization'] = replace_data['login']['token']"]
cmds = []
self.change_data(u'headers', headers, params_data, cmds)
def change_send_data(self):
data = self.send_data
params_data = self.save_json_data
# 》》》》》》》》
# step 2 第二个需要修改的地方
# 如果需要修改 data 中的信息 按照下面的格式修改
# cmds = ["data['data'] = replace_data['termi_mallHome_getAreaList']['data']",
# "data['data'] = replace_data['termi_mallHome_getAreaList']['data']"]
cmds = []
self.change_data(u'send_data', data, params_data, cmds)
def change_expect_data(self):
data = self.expect_data
params_data = self.save_json_data
# 》》》》》》》》
# step 3 第三个需要修改的地方
# 如果需要修改 expect_data 中的信息 按照下面的格式修改
# cmds = ["data['data'] = replace_data['termi_mallHome_getAreaList']['data']",
# "data['data'] = replace_data['termi_mallHome_getAreaList']['data']"]
cmds = []
self.change_data(u'expect_data', data, params_data, cmds)
# 数据库部分
class DatabaseCheck(DatabaseCheckTemplate):
'''
先清理数据库环境 如果有上次的测试数据, 把上次测试的数据删除
数据库中插入需要的数据
检查运行接口测试之后数据库存储, 更新, 删除的数据情况
'''
MYSQL = None
# 》》》》》》》》
# step 1 第一个需要修改的地方
# config文件中配置的数据库名称
# name = 'ShangChengConfig'
name = ''
def set_up(self):
'''
初始化数据库
:return:
'''
MYSQL = self.MYSQL
# 》》》》》》》》
# step 2 第二个需要修改的地方
# 清理数据库
# delete_sqls = ["DELETE FROM m_index_like_details WHERE scene_id = '999'",
# "DELETE FROM m_index_like_details WHERE scene_id = '888'"]
delete_sqls = []
self.execute_sqls(delete_sqls=delete_sqls)
# 》》》》》》》》
# step 3 第三个需要修改的地方
# 插入测试数据
# insert_sqls = ["INSERT INTO `m_index_like_details` (`scene_id`, `scene_title`, `like_user`, `like_time`, `update_time`, `like_state`, `user_device_num`, `spare_field3`) VALUES ('888', NULL, '62160899', '2017-12-28 11:37:04', NULL, '1', NULL, NULL)",
# "INSERT INTO `m_index_like_details` (`scene_id`, `scene_title`, `like_user`, `like_time`, `update_time`, `like_state`, `user_device_num`, `spare_field3`) VALUES ('999', NULL, '62160899', '2017-12-28 11:37:04', NULL, '1', NULL, NULL)"]
insert_sqls = []
self.execute_sqls(insert_sqls=insert_sqls)
def verify_mode(self):
MYSQL = self.MYSQL
# 》》》》》》》》
# step 4 第四个需要修改的地方(查询sql语句, 需要验证的字段)
# 查询到一个数据的sql
# 需要验证的字段
# select_sqls = ["SELECT scene_id FROM m_index_like_details where scene_id = '999'",
# "SELECT scene_id FROM m_index_like_details where scene_id = '999'"]
# expect_strings = ['999', '999']
select_sqls = []
expect_strings = []
values = self.execute_sqls(select_sqls=select_sqls)
result = self.expect_mode(values, expect_strings)
return result
| UTF-8 | Python | false | false | 4,508 | py | 22 | case_demo.py | 16 | 0.574665 | 0.552781 | 0 | 115 | 32.756522 | 259 |
Razorro/Leetcode | 10,660,108,831,740 | c3626ea1efb1c930337e261be165d048d842d15a | a4e17629d83a738403c6426aa4b9495c21227a9c | /72. Edit Distance.py | 456c4fc9a647d15bd6ace177c229f67ca7ec5abc | []
| no_license | https://github.com/Razorro/Leetcode | dffacc1a9a51740727f94bfaf272b7f4d9abf229 | f18805698100fda4e06f63c06fe6295cf899d4b2 | refs/heads/master | 2021-06-16T23:17:09.825548 | 2021-02-25T11:25:57 | 2021-02-25T11:25:57 | 166,940,171 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2.
You have the following 3 operations permitted on a word:
Insert a character
Delete a character
Replace a character
Example 1:
Input: word1 = "horse", word2 = "ros"
Output: 3
Explanation:
horse -> rorse (replace 'h' with 'r')
rorse -> rose (remove 'r')
rose -> ros (remove 'e')
Example 2:
Input: word1 = "intention", word2 = "execution"
Output: 5
Explanation:
intention -> inention (remove 't')
inention -> enention (replace 'i' with 'e')
enention -> exention (replace 'n' with 'x')
exention -> exection (replace 'n' with 'c')
exection -> execution (insert 'u')
A good question! I thought it was similiar with the dynamic programming example in CLRS,
it turns out the skeleton may has a little similiarity, but the core idea can't be extracted
with the situation of this question.
It was called Levenshtein distance.
Mathematically, the Levenshtein distance between two strings {\displaystyle a,b} a,b (of length {\displaystyle |a|} |a| and {\displaystyle |b|} |b| respectively) is given by {\displaystyle \operatorname
{lev} _{a,b}(|a|,|b|)} \operatorname{lev}_{a,b}(|a|,|b|) where
| --- max(i, j) if min(i, j) = 0
lev(i, j) = | min --- lev(i-1, j) + 1
| --- lev(i, j-1) + 1
| --- lev(i-1, j-1) + 1
Computing the Levenshtein distance is based on the observation that if we reserve a matrix to hold the Levenshtein distances
between all prefixes of the first string and all prefixes of the second, then we can compute the values in the matrix in
a dynamic programming fashion, and thus find the distance between the two full strings as the last value computed.
This algorithm, an example of bottom-up dynamic programming, is discussed, with variants, in the 1974 article The
String-to-string correction problem by Robert A. Wagner and Michael J. Fischer.[4]
"""
class Solution:
def minDistance(self, word1: 'str', word2: 'str') -> 'int':
points = self.findBiggestCommon(word1, word2)
def findBiggestCommon(self, source, target):
path = [0] * len(source)
directions = []
for i in range(len(target)):
current = [0] * len(source)
d = []
for j in range(len(source)):
if target[i] == source[j]:
current[j] = path[j-1] + 1 if j-1 >= 0 else 1
d.append('=')
else:
left = current[j-1] if j-1 >= 0 else 0
if left > path[j]:
d.append('l')
else:
d.append('u')
current[j] = max(left, path[j])
path = current
directions.append(d)
x_y = []
row, col = len(target)-1, len(source)-1
while row >= 0 and col >=0:
if directions[row][col] == '=':
x_y.append((row, col))
row -= 1
col -= 1
elif directions[row][col] == 'u':
row -= 1
else:
col -= 1
return x_y
def standardAnswer(self, word1, word2):
m = len(word1) + 1
n = len(word2) + 1
det = [[0 for _ in range(n)] for _ in range(m)]
for i in range(m):
det[i][0] = i
for i in range(n):
det[0][i] = i
for i in range(1, m):
for j in range(1, n):
det[i][j] = min(det[i][j - 1] + 1, det[i - 1][j] + 1, det[i - 1][j - 1] +
0 if word1[i - 1] == word2[j - 1] else 1)
return det[m - 1][n - 1]
if __name__ == '__main__':
s = Solution()
distance = s.findBiggestCommon('horse', 'ros')
distance = sorted(distance, key=lambda e: e[1])
c = 0
trans = 0
for left, right in distance:
trans += abs(right - left) + left-c
c = left + 1
print(s.findBiggestCommon('horse', 'ros')) | UTF-8 | Python | false | false | 4,021 | py | 68 | 72. Edit Distance.py | 67 | 0.553842 | 0.534444 | 0 | 111 | 35.234234 | 202 |
KD2199/Expense_management_with_drf | 5,738,076,346,161 | 726923c0f419d231ad7d63cddfc21348bf9a83ef | cde63ec17ec8c692a64ea5a5ebfa0322fccb010f | /expense/forms.py | 6f1b4956a807f08acd3698a4c4b6ddf1dde01cac | []
| no_license | https://github.com/KD2199/Expense_management_with_drf | e41355279f65810dc3ec445f0e0825e73b4c085b | e5281ff1a38647749e8252620f63dcc3a840903c | refs/heads/master | 2023-08-25T07:53:52.424586 | 2021-10-24T06:12:06 | 2021-10-24T06:12:06 | 420,596,787 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from django.contrib.auth import get_user_model
from .models import Expenses, Categories, ExpenseMonths
from templatetags.get_month_name import get_month
from django.utils.translation import gettext_lazy as _
from decimal import Decimal
from django.http import HttpResponse
import csv
User = get_user_model()
class UserExpenseForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('instance')
super(UserExpenseForm, self).__init__(*args, **kwargs)
self.fields['expense_category'].queryset = (self.user.user_categories.all())
class Meta:
model = Expenses
fields = ('type', 'expense_category', 'amount', 'date')
widgets = {'type': forms.Select(attrs={'onchange': "check()"}),
'expense_category': forms.Select(attrs={'class': 'regDropDown', 'placeholder': _(' Select Category')}),
'amount': forms.NumberInput(attrs={'class': 'form-group', 'placeholder': _(' Enter Amount')}),
'date': forms.DateInput(attrs={'class': 'form-group', 'type': 'date'}),
}
def clean_amount(self):
price = self.cleaned_data['amount']
if price < 0:
raise forms.ValidationError("Amount can't be negative")
return price
def save(self, commit=True):
instance = super(UserExpenseForm, self).save(commit=False)
instance.user = self.user
name = str(self.cleaned_data.get("date"))
name = get_month(name.split('-')[1])
data = self.user.expense_months.filter(name=name)
for value in data:
if instance.type == 'income':
value.income += Decimal(instance.amount)
else:
value.limit -= Decimal(instance.amount)
value.save()
instance.save()
return instance
class NewCategoryForm(forms.Form):
new_category = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-group', 'placeholder': _(' Enter Category Name')}))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('instance')
super(NewCategoryForm, self).__init__(*args, **kwargs)
def save(self):
category_name = self.cleaned_data.get('new_category')
categories = self.user.user_categories.filter(name=category_name).exists()
if not categories:
Categories.objects.create(user=self.user, name=category_name)
return True
return False
class AddMonthForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('instance')
super(AddMonthForm, self).__init__(*args, **kwargs)
class Meta:
model = ExpenseMonths
fields = ('name', 'income', 'limit')
widgets = {
'name': forms.NumberInput(attrs={'class': 'form-group', 'type': 'month'}),
'limit': forms.NumberInput(attrs={'class': 'form-group', 'placeholder': _(' Set Your This Month Limit')}),
'income': forms.NumberInput(attrs={'class': 'form-group', 'placeholder': _(' Enter Your This Month Income')}),
}
def clean_income(self):
income = self.cleaned_data['income']
if income < 0:
raise forms.ValidationError("Income can't be negative")
return income
def clean_limit(self):
price = self.cleaned_data['limit']
if price < 0:
raise forms.ValidationError("Limit can't be negative")
return price
def clean(self):
name = self.cleaned_data.get("name")
if name:
name = get_month(name.split('-')[1])
self.cleaned_data["name"] = name
return self.cleaned_data
def save(self, commit=True):
instance = super(AddMonthForm, self).save(commit=False)
instance.user = self.user
trigger = (instance.limit*10)/100
instance.trigger = trigger
instance.save()
return instance
class GenerateReport(forms.Form):
start_date = forms.DateTimeField(widget=forms.NumberInput(attrs={'type': 'date'}))
end_date = forms.DateTimeField(widget=forms.NumberInput(attrs={'type': 'date'}))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('instance')
super(GenerateReport, self).__init__(*args, **kwargs)
def save(self):
start_date = self.cleaned_data.get('start_date')
end_date = self.cleaned_data.get('end_date')
user = self.user
data = user.user_expenses.filter(date__gt=start_date, date__lte=end_date).order_by('date')
if data:
response = HttpResponse(content_type='text/csv')
file_name = f"{user.get_full_name()}_{start_date.strftime('%d-%m-%Y')}_{end_date.strftime('%d-%m-%Y')}_expenses.csv"
response['Content-Disposition'] = f'attachment; filename={file_name}'
header = ['User', 'Type', 'Category', 'Amount', 'Date']
total_income = 0
total_expense = 0
writer = csv.writer(response)
writer.writerow(header)
for field in data:
if field.expense_category:
total_expense += float(field.amount)
row = [user.get_full_name(), field.type, field.expense_category.name, float(field.amount),
field.date.strftime('%d-%m-%Y')]
writer.writerow(row)
else:
total_income += float(field.amount)
row = [user.get_full_name(), field.type, '-', float(field.amount), field.date.strftime('%d-%m-%Y')]
writer.writerow(row)
writer.writerow([])
income = ['', '', '', f'Total Income: {total_income}', '']
expenses = ['', '', '', f'Total Expense: {total_expense}', '']
writer.writerow(income)
writer.writerow(expenses)
return response
return None
| UTF-8 | Python | false | false | 6,010 | py | 47 | forms.py | 25 | 0.579368 | 0.577371 | 0 | 157 | 37.267516 | 131 |
aldld/auction-algos | 9,070,970,965,956 | d03727f38279658ef390be5a73204d1933aca2b5 | 5b819a087c004d6718ece4830a1d6ff4240dc64e | /auction/auction.py | 42935033fcb0a9bac101cf7c7e9512a729e1d9e0 | [
"MIT"
]
| permissive | https://github.com/aldld/auction-algos | ecefdd583865f00e65b1d813712a9b0eeafbd793 | ac6c59d4dfaee353d5d2af137874448989a06e14 | refs/heads/master | 2021-01-11T11:51:48.376567 | 2017-04-29T02:09:50 | 2017-04-29T02:09:50 | 76,750,493 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Implementations of various auction algorithms for min-cost flows."""
# TODO: If needed, make this quick-and-dirty implementation less inefficient.
def min_cost_flow(eps, graph, get_iter_nodes=None, select_edge=None,
get_delta=None, visualizer=None, max_iter=None,
debug=False):
"""A general template for implementations of the generic algorithm for
min-cost network flows. This function is left as general as possible, so
that its behaviour may be specified at runtime by passing as input functions
that implement parts of the generic algorithm that are left unspecified.
"""
if select_edge is None:
select_edge = any_edge
if visualizer is None:
visualize = lambda *args: None # Do-nothing visualization.
else:
visualize = lambda graph: visualizer.draw_graph(graph)
# TODO: Detect infeasibility. For now, assume the problem is feasible.
# TODO: Make checking for stopping condition more efficient.
pos_surplus = set(node for node in graph.nodes if node.surplus > 0)
iteration = 0
if debug:
visualize(graph)
input("Iteration {}: Press ENTER to continue.".format(iteration))
while (max_iter is None or iteration < max_iter) and pos_surplus:
# Perform delta-pushes.
# Get all nodes where we attempt to perform delta-pushes.
iter_nodes = get_iter_nodes(pos_surplus)
for node in iter_nodes:
pos, neg = node.push_lists(eps)
edge = select_edge(pos, neg)
while edge is not None and node.surplus > 0:
# Perform a delta-push.
delta = get_delta(node, edge)
if debug:
visualize(graph)
input("Performing {} push at node {} on {}. ".format(delta, node.label, edge))
edge.flow += delta
pos, neg = node.push_lists(eps)
edge = select_edge(pos, neg)
# Perform price rises. TODO: Should we use the same iter_nodes?
# Here, we use I = iter_nodes. TODO: Support this operation without
# iterating over the entire graph.
out_vals = [edge.target.price + edge.cost + eps - edge.source.price
for edge in graph.edges
if edge.source in iter_nodes
and edge.target not in iter_nodes
and (edge.capacity is None or edge.flow < edge.capacity)]
in_vals = [edge.source.price - edge.cost + eps - edge.target.price
for edge in graph.edges
if edge.source not in iter_nodes
and edge.target in iter_nodes
and edge.flow > edge.min_flow]
price_rise_vals = out_vals + in_vals
gamma = 0
if price_rise_vals:
gamma = min(price_rise_vals)
for node in iter_nodes:
node.price += gamma
# Recompute nodes with positive surplus. TODO: Make this less naive.
pos_surplus = set(node for node in graph.nodes if node.surplus > 0)
iteration += 1
if debug:
visualize(graph)
input("Iteration {}: Press ENTER to continue.".format(iteration))
return True
def any_node(pos_surplus):
# TODO: This really shouldn't remove the element.
return set([pos_surplus.pop()])
def any_edge(pos, neg):
if pos:
return set(pos).pop()
if neg:
return set(neg).pop()
return None
def er_delta(node, edge):
if edge.source == node:
return min(node.surplus, edge.cost - edge.flow)
elif edge.target == node:
return -min(node.surplus, edge.flow - edge.min_flow)
raise
def er_min_cost_flow(eps, graph, visualizer=None, max_iter=None,
debug=False):
return min_cost_flow(eps, graph, any_node, any_edge, er_delta,
visualizer, max_iter, debug)
| UTF-8 | Python | false | false | 3,917 | py | 8 | auction.py | 5 | 0.602757 | 0.601225 | 0 | 102 | 37.401961 | 98 |
tapan17102001/ResponseApi | 15,917,148,838,678 | 1485d2743bbf7882dd46528199a953556b1606df | 656ab0f7a3ee3ccfd444ea4072daa38cc4082a56 | /recomand.py | a273b5d4912963346d0353c2ea29eca45ec936d0 | []
| no_license | https://github.com/tapan17102001/ResponseApi | d6ce1d09b910c015807e66a453e1f71e5a9fd3cf | 7624886d7ebfaf43ce14854b69b9628f5229df4f | refs/heads/master | 2022-07-15T09:33:34.851410 | 2020-05-13T11:53:35 | 2020-05-13T11:53:35 | 263,614,579 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
def recomand(word):
baseurl = "https://tastedive.com/api/similar"
params_d = {"q": word, "type": "movies", "limit": "5"}
resp = requests.get(baseurl, params=params_d)
# print(type(resp))
# print(resp.url)
# print(resp.text)
word_ds = resp.json()
# print(type(word_ds))
# print(word_ds)
new_d = (word_ds['Similar']['Results'])
return [d['Name'] for d in new_d]
print("Enter movie Name : ")
word = input()
print(recomand(word))
| UTF-8 | Python | false | false | 525 | py | 2 | recomand.py | 2 | 0.579048 | 0.577143 | 0 | 22 | 21.863636 | 58 |
trolen/advent-of-code | 9,552,007,314,005 | 0bf55739130e919a47471ce662cffcee84644677 | e7fcc1d64cd95805918ab1b5786bf81a92f973ef | /2019/day01/test_day01.py | dc880fed55875c9cd34fe2b279a915de6ff70387 | []
| no_license | https://github.com/trolen/advent-of-code | 8145c1e36fea04e53d4b7a885efcc2da71fbfe57 | 0a4e022a6a810d86e044a15036a2f5778f0d38af | refs/heads/master | 2023-02-26T13:11:58.341006 | 2023-02-20T23:22:27 | 2023-02-20T23:22:27 | 54,579,550 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/env python3
import unittest
import day01
class TestDay01(unittest.TestCase):
def test_part1(self):
self.assertEqual(2, day01.fuel_required(12))
self.assertEqual(2, day01.fuel_required(14))
self.assertEqual(654, day01.fuel_required(1969))
self.assertEqual(33583, day01.fuel_required(100756))
def test_part2(self):
self.assertEqual(2, day01.fuel_required2(14))
self.assertEqual(966, day01.fuel_required2(1969))
self.assertEqual(50346, day01.fuel_required2(100756))
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 587 | py | 297 | test_day01.py | 296 | 0.67121 | 0.553663 | 0 | 21 | 27 | 61 |
Ajay-Kumar-py/Hello_world | 2,439,541,451,796 | 55f0f32a30c064f52bb0a35966646a8e6f85aaef | 2d84601219060cdf052b0aa7c187d68360f3ba7a | /tset.py | 85b7db42cd2445c6a8f39a8c52ec89fd5a994189 | []
| no_license | https://github.com/Ajay-Kumar-py/Hello_world | 649a1b76ede8edcf2fe7a7b2a60d36253bec131b | 55323c4289c4fe70b018ceb8e48d55a7aeccdde5 | refs/heads/main | 2023-06-04T02:36:49.385236 | 2021-06-19T13:13:13 | 2021-06-19T13:13:13 | 376,546,446 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("hello world")
print("kiop") | UTF-8 | Python | false | false | 34 | py | 2 | tset.py | 2 | 0.705882 | 0.705882 | 0 | 2 | 16.5 | 20 |
Tejaswini8898/30days_python | 8,263,517,127,514 | 9880ceacf7f7f980b14e91356f90e42686a6d5e5 | 1674cb301a2bf6cb2a4ad120ce97dc1a81e1b336 | /Day3.py | 99a72886cbe13b184055cde5e39b5346b444895c | []
| no_license | https://github.com/Tejaswini8898/30days_python | 5b586976ba4743b4ad36150009785b74972c8823 | 52f5142efe2a7eef6bd7927be49f2c2f2a4c8d39 | refs/heads/main | 2023-06-22T21:53:33.647796 | 2021-07-20T05:20:38 | 2021-07-20T05:20:38 | 382,576,953 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | val = int(input("enter a number: "))
if val / 10 == 0 and val / 20 == 0:
print ('yes')
else:
print('false') | UTF-8 | Python | false | false | 115 | py | 11 | Day3.py | 11 | 0.53913 | 0.486957 | 0 | 5 | 22.2 | 36 |
karthikpappu/pyc_source | 17,179,874,529 | 1b30c857461be3b1ee4723db898ffcb108451239 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pypi_install_script/OmiseGO-0.1.tar/setup.py | 48a94d762542b1acb1bc082abacf9ccd029d97bb | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
import sys
setup(name='OmiseGO',
version=0.1) | UTF-8 | Python | false | false | 77 | py | 114,545 | setup.py | 111,506 | 0.766234 | 0.74026 | 0 | 4 | 18.5 | 29 |
CiscoTestAutomation/genieparser | 15,685,220,606,817 | d261db339b57fa5df99567633611f22b5c523c72 | 5ef6c8d47864f471e26b9902d61f8c687e941f05 | /src/genie/libs/parser/iosxe/tests/ShowLisp/cli/equal/golden_output2_expected.py | 4c3418b5285e399b788ab234395040b3c779e598 | [
"Apache-2.0"
]
| permissive | https://github.com/CiscoTestAutomation/genieparser | 169c196558f1c1a0f0d10650876096f993224917 | b531eff760b2e44cd69d7a2716db6f866907c239 | refs/heads/master | 2023-09-03T08:56:18.831340 | 2023-08-29T22:32:02 | 2023-08-29T22:32:02 | 131,621,824 | 247 | 409 | Apache-2.0 | false | 2023-08-29T22:32:04 | 2018-04-30T16:51:50 | 2023-08-14T18:19:33 | 2023-08-29T22:32:03 | 74,102 | 231 | 346 | 100 | Python | false | false | expected_output = {
'lisp_id': {
0: {
'domain_id': 0,
'multihoming_id': 0,
'locator_table': 'default',
'locator_default_set': 'N/A',
'eid_instance_count': '7',
'capability': ['Publish-Subscribe Instance-ID', 'Domain-Info', 'Route-Tag', 'SGT', 'Default-originate', 'Service-registration', 'Extranet-policy-propagation', 'Default-ETR Route-metric', 'Unknown vendor type skip', 'RAR-notify'],
'tcp_path_mtu_discovery': False
}
}
} | UTF-8 | Python | false | false | 547 | py | 9,975 | golden_output2_expected.py | 4,563 | 0.531993 | 0.52468 | 0 | 13 | 41.153846 | 241 |
ouseful-backup/ergast-python | 11,046,655,885,964 | 5b41302eb8a2175e2567bebf43dc4855e2638fc6 | b93843eab98ec09ae20141335c30b3b77da405e8 | /ergast/request.py | fbe79f6c87d940fe7e6d7eb629b655e2e662930b | [
"MIT"
]
| permissive | https://github.com/ouseful-backup/ergast-python | 708f6eabaac4982dafc85af0890d5ee52a1a75ec | e1d27b1c21f5e24c14729f05579e8c39646f8cda | refs/heads/master | 2021-06-21T13:04:48.335599 | 2017-07-10T02:23:44 | 2017-07-10T02:23:44 | 99,053,467 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Request for describing requests to the ergast service
"""
from cerberus import Validator
from six.moves import UserDict
req_schema = {
'protocol': {'type': 'string', 'allowed': ['http', 'https'], 'required': True},
'host': {'type': 'string', 'allowed': ['www.ergast.com'], 'required': True},
'series': {'type': 'string', 'allowed': ['f1'], 'required': True},
'resource': {
'type': 'string',
'allowed': ['seasons', 'circuits', 'races', 'constructors',
'drivers', 'qualifying', 'results', 'driverStandings'
'constructorStandings', 'status', 'laps', 'pitstops']
},
'id': {
'type': ['string', 'integer'],
'coerce': str,
'dependencies': 'resource'
},
'season': {'type': ['string', 'integer'], 'coerce': str},
'round': {'type': ['string', 'integer'], 'coerce': str},
'criteria': {
'type': 'dict',
'keyschema': {
'type': 'string',
'allowed': ['circuits', 'constructors', 'drivers',
'driverStandings', 'constructorStandings', 'grid',
'results', 'status', 'fastest', 'laps']
},
'valueschema': {'type': ['string', 'integer'], 'coerce': str}
},
'limit': {'type': ['string', 'integer'], 'coerce': str},
'offset': {'type': ['string', 'integer'], 'coerce': str}
}
req_validator = Validator(req_schema)
class Request(UserDict):
"""
Describes an Ergast API query.
Used by ergast.client to build urls and execute calls.
"""
def __init__(self, *args, **kwargs):
defaults = {'protocol': 'http', 'host': 'www.ergast.com',
'series': 'f1', 'limit': '1000', 'offset': '0', 'resource': 'races'}
settings = dict(defaults, **kwargs)
if not req_validator.validate(settings):
raise RuntimeError(req_validator.errors)
super().__init__(*args, **req_validator.document)
| UTF-8 | Python | false | false | 1,970 | py | 9 | request.py | 7 | 0.534518 | 0.530964 | 0 | 55 | 34.818182 | 88 |
blanu/InternetMeetupTime | 11,605,001,647,589 | 4817d042b2bef27f46c4c80bceec7b55ea817a0e | 40ba8a41adc2e1dfe37d44627faf6eae421b8bfa | /python/clock.py | 8310120966ebbf687076bffe444073b9dfea233b | []
| no_license | https://github.com/blanu/InternetMeetupTime | d085cc6ae355318d9cd647ba9432b8d51ae8b609 | e169ccfddea96888b9436737c22cc465b6151da7 | refs/heads/master | 2020-03-29T21:45:09.140296 | 2011-08-02T03:38:03 | 2011-08-02T03:38:03 | 2,115,040 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import time
if __name__=='__main__':
import curses
win=curses.initscr()
win.clear()
try:
while True:
sec=int(time.time())
esec=sec%86400
s=str(esec)
while len(s)<5:
s='0'+s
h=s[:1]
m=s[1:3]
s=s[3:]
# s='~'+h+':'+m+':'+s
s='~'+h+m+'.'+s
win.addnstr(11, 35, s, len(s))
win.move(0,0)
win.refresh()
time.sleep(1)
except:
curses.endwin()
| UTF-8 | Python | false | false | 422 | py | 5 | clock.py | 2 | 0.490521 | 0.447867 | 0 | 28 | 14.035714 | 34 |
lab-csx-ufmg/webmedia2018 | 17,729,625,029,588 | a3a167794024559bf3b7b08100468d9e18c90b2a | 57d64ba460b622ad056d006b1d7f67057a774677 | /crawling-github/GitHubAPI/GitHubBase.py | 1d7d84611b45cdb15514c14b32bb5317e07b2852 | [
"MIT"
]
| permissive | https://github.com/lab-csx-ufmg/webmedia2018 | 0e158706f93d83f187597593e1856e5c331c8a96 | b0b2585096a8ba594bad330635c0ee7213795e07 | refs/heads/master | 2020-03-29T04:16:29.239542 | 2019-04-28T00:19:09 | 2019-04-28T00:19:09 | 149,523,561 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import requests
import json
from GitHubAPI.credentials import GIT_USER, GIT_TOKEN
'''
GitHubBase: class that is base for github requests
'''
class GitHubBase():
BASE_URL = "https://api.github.com"
def __init__(self, user=None, token=None):
self.user = user or ('GIT_USER' in os.environ and os.environ['GIT_USER']) or GIT_USER
self.token = token or ('GIT_TOKEN' in os.environ and os.environ['GIT_TOKEN']) or GIT_TOKEN
def request_api(self, url, data={}):
headers = {}
if self.user:
headers = {'User-Agent': self.user}
if self.token:
headers["Authorization"] = "token {token}".format(token=self.token)
r = requests.get(url, headers=headers)
if 'application/json' in r.headers.get('content-type'):
return json.loads(r.text or r.content)
return {}
| UTF-8 | Python | false | false | 871 | py | 45 | GitHubBase.py | 25 | 0.623421 | 0.623421 | 0 | 27 | 31.259259 | 98 |
osilkin98/PyBry | 11,278,584,148,424 | 011f6a4213e2e156ae06ed124d570f7699aeb37b | a1cbc58a8f57d424fdedd4706f41362cc6944463 | /pybry/__init__.py | 878024ab69f27846b3b55ffe2d3fc64fc136e295 | [
"MIT"
]
| permissive | https://github.com/osilkin98/PyBry | 697b9d612d3c37f6065a047b21b3854b88692f9d | af86805a8077916f72f3fe980943d4cd741e61f0 | refs/heads/master | 2018-12-19T01:22:21.321819 | 2018-10-18T02:47:09 | 2018-10-18T02:47:09 | 148,851,950 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .lbryd_api import LbrydApi
from .lbrycrd_api import LbrycrdApi
from .LBRYException import LBRYException
__version__ = '1.6.4'
| UTF-8 | Python | false | false | 133 | py | 1 | __init__.py | 1 | 0.766917 | 0.744361 | 0 | 6 | 21.166667 | 40 |
eggied97/python2018 | 6,949,257,092,818 | a0fb1f10b82c5bacb025cddddad8e3fe5469542a | 471001240fd536b871eb35caa4d3319cf9a25057 | /graph-walkthrough.py | a52654ceee08bf40cdd2d5f315796e2c9fadee7f | []
| no_license | https://github.com/eggied97/python2018 | ebefb31ae22004cb4e3a38f5f2568a88cbfe2c80 | 1cfe71908a94752ab910f5d367ad811799c1fc53 | refs/heads/master | 2021-04-30T02:39:10.301779 | 2018-02-14T12:59:17 | 2018-02-14T12:59:17 | 121,505,133 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Slide 1
# from graph import *
#
# G = Graph(False, 4) # create instance G of class Graph with directed = False (so it is an undirected graph),
# # 4 vertices, and label the vertices with integers (0 to 3)
# print(G) # V=[0, 2, 3, 1] (list of vertex labels)
# # E=[] (empty list = no edges)
# print(G.directed) # False
# print(G.simple) # False (default)
# print(G.edges) # empty set of instances of the Edge class
# print(G.vertices) # list of 4 instances of the Vertex class
# Slide 2
# G.vertices is a set, so we make it a (sorted) list in order to be able to index into it
# gv = sorted(list(G.vertices), key=lambda v: v.label)
# u = gv[0] # "first" vertex of G
# print(type(u))
# print(u) # label of the first vertex
# v = gv[1] # "second" vertex of G
# print(v) # label of the second vertex
# Slide 3
# w = Vertex(G) # create instance w of class Vertex that is "related to" (but not part of!) graph G,
# # which means that it gets the next default label (in this case 4)
# xx = Vertex(G, 'X') # create another vertex, but with explicit label 'X'
# print(w) # label of vertex w
# print(xx) # label of vertex xx
# print(G) # G has not changed yet
# G.add_vertex(w)
# G.add_vertex(xx)
# print(G) # now G has two more vertices (without edges)
# Slide 4
# e = Edge(u, v) # create instance e of class Edge between vertices u and v without a weight (default);
# # note that, again, the edge is "related to" the graph, but not yet part of it!
# ex = Edge(u, xx, 8)
# print(e.weight)
# print(ex.weight)
# print(e) # labels of tail and head vertices
# print(ex)
# G.add_edge(e) # add edge e to graph G
# G += ex # add edge ex to graph G (be careful: there is no operator + for graphs, only +=)
# print(G) # now G finally has some edges
# Slide 5
# print(G.is_adjacent(u, v)) # should be True
# ued = u.incidence # list of edges incident with u
# print(type(ued[0]))
# print(ued[0])
# print(ued[1])
# print(u.degree)
# Slide 6
# print(e.tail) # label of u
# print(e.tail == u)
# print(e.head) # label of v
# print(e.other_end(u)) # also label of v
# print(w in G.vertices)
# print(e in G.edges)
# print(v in u.neighbours) # should be True
# print(e.incident(w)) # should be False
| UTF-8 | Python | false | false | 2,481 | py | 5 | graph-walkthrough.py | 5 | 0.582426 | 0.573559 | 0 | 61 | 39.672131 | 111 |
osess/performance_management | 51,539,623,237 | 274d80a66579af0179caf2d2ed1652c2730d266a | f2e68031902a32330151ff09e5fe3fab577d38fe | /apps/companydepartment/forms.py | 04892c2d10f0572f2cf0d218107be9304357bab1 | []
| no_license | https://github.com/osess/performance_management | 1279c9bcf82fd45df6e225d2d880d776d6f86f4c | 02a1174ba5462b295799aab934b56418ed6630e5 | refs/heads/master | 2016-09-13T19:43:02.095213 | 2016-05-26T00:56:21 | 2016-05-26T00:56:21 | 59,707,683 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
from django import forms
from django.utils.translation import ugettext_lazy as _
from companydepartment.models import *
from question_answer.models import KHPlan
# 2015-03-16
# 创建部门
class DepartmentAddModelForm(forms.ModelForm):
class Meta:
model = Department
exclude= ('belong_to','khplan','dpid','status','display_name','department_report_to')
class CompanyAddModelForm(forms.ModelForm):
class Meta:
model = Company
exclude = ('users','admin','super_user','is_active')
class HRDepartmentAddForm(forms.Form):
name = forms.CharField(label=_(u"Department Name"), max_length=125)
belong_to = forms.ModelChoiceField(label=_(u"Belong To Company"), queryset=Company.objects.all())
parent = forms.ModelChoiceField(label=_(u"Parent"), queryset=Department.objects.all(), required=False)
description = forms.CharField(label=_(u"Description"), max_length=250, required=False)
address = forms.CharField(label=_(u"Address"), max_length=125, required=False)
city = forms.CharField(label=_(u"City"), max_length=50, required=False)
state = forms.CharField(label=_(u"State"), max_length=10, required=False)
zip_code = forms.CharField(label=_(u"ZipCode"), max_length=7, required=False)
importance = forms.IntegerField(label=_(u"Importance"),initial=1)
khplan = forms.ModelChoiceField(label=_(u'khplan'),queryset=KHPlan.objects.all(),required=False)
lft = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
rght = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
tree_id = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
level = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
class HRDepartmentAddModelForm(forms.ModelForm):
class Meta:
model = Department
def __init__(self, *args, **kwargs):
super(HRDepartmentAddModelForm, self).__init__(*args, **kwargs)
for key in self.fields:
self.fields[key].label = _(self.fields[key].label)
class HRCompanyAddModelForm(forms.ModelForm):
class Meta:
model = Company
exclude = ('users','admin','super_user')
def __init__(self, *args, **kwargs):
super(HRCompanyAddModelForm, self).__init__(*args, **kwargs)
for key in self.fields:
self.fields[key].label = _(self.fields[key].label)
lft = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
rght = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
tree_id = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
level = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
parent = forms.ModelChoiceField(label=_(u"Parent"), queryset=Company.objects.all(), required=False)
class CompanyAddModelForm(forms.ModelForm):
class Meta:
model = Company
exclude = ('users','admin','super_user','is_active')
def __init__(self,*args, **kwargs):
super(CompanyAddModelForm,self).__init__(*args,**kwargs)
for key in self.fields:
self.fields[key].label = _(self.fields[key].label)
lft = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
rght = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
tree_id = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
level = forms.IntegerField(widget=forms.HiddenInput(), initial=1)
parent = forms.ModelChoiceField(label=_(u"Parent"), queryset=Company.objects.all(), required=False)
class CompanyModifyModelForm(forms.ModelForm):
class Meta:
model = Company
exclude = ('users','admin')
def __init__(self,*args, **kwargs):
super(CompanyModifyModelForm,self).__init__(*args,**kwargs)
for key in self.fields:
self.fields[key].label = _(self.fields[key].label)
class DepartmentModifyModelForm(forms.ModelForm):
class Meta:
model = Department
exclude = ('department_report_to',)
# 2015-03-12
class KHPlanAddForm(forms.ModelForm):
class Meta:
model = KHPlan
exclude = ('expiration_date','creator','status')
class KHPlanModifyForm(forms.ModelForm):
class Meta:
model = KHPlan
exclude = ('creator','expiration_date') | UTF-8 | Python | false | false | 4,258 | py | 244 | forms.py | 91 | 0.676706 | 0.666353 | 0 | 106 | 39.103774 | 106 |
hillerliao/dtwt | 3,977,139,760,012 | 38924ae1502e9914b2f5b2ac6577bfc5a9661134 | f737ceed342e5d605603381608a3a783dbbae1a8 | /recipe/forms.py | 705ab9619ce244dbe702db76bf8f347099460e83 | [
"MIT"
]
| permissive | https://github.com/hillerliao/dtwt | 34500b49d80cc79e491a645bb5f0611ee9ffae43 | 0e29e37d17a2e5a704885df87019ff0cdf006b8b | refs/heads/master | 2022-01-03T09:40:20.199817 | 2016-04-01T12:59:04 | 2016-04-01T12:59:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from .models import Recipe
from channel.models import Channel
class RecipeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(RecipeForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['trigger_channel'].initial = self.instance.trigger.channel
self.fields['action_channel'].initial = self.instance.action.channel
trigger_channel = forms.ModelChoiceField(queryset=Channel.objects.all())
action_channel = forms.ModelChoiceField(queryset=Channel.objects.all())
class Meta:
model = Recipe
fields = ('trigger', 'action')
| UTF-8 | Python | false | false | 650 | py | 26 | forms.py | 17 | 0.68 | 0.68 | 0 | 18 | 35.055556 | 82 |
palermog/Google_Drive_Student_Assignment_Management | 10,436,770,576,067 | a26cbabe9be4a3633a0cca0a07bc24be19d3ad99 | 42fa1eccb9c7cd4047d081ae8c1fd50e3867f6f7 | /RemovePermissions.py | a06fd8ad477043a9e65f0348287e82a8bb84fb0e | []
| no_license | https://github.com/palermog/Google_Drive_Student_Assignment_Management | 385bebbb19143980fce2a6e513301ab702495046 | 37c8264d64b45bb4343534ab93ae60e733cc9be4 | refs/heads/master | 2021-01-19T11:34:38.145294 | 2017-02-17T04:28:48 | 2017-02-17T04:28:48 | 82,254,604 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import gdata.docs
import gdata.docs.service
import gdata.spreadsheet.service
import re, os
import gdata.docs.client
import gdata.acl.data
import pprint
# DATA
username=
password=
students_data= # Google Sheets format : "timestamp / firstname / lastname / email"
classcode=
assignment=
#professor=
# Connect to Google-Spreadsheet
gd_client = gdata.spreadsheet.service.SpreadsheetsService()
gd_client.email = username
gd_client.password = password
gd_client.source = 'OpenSource-CreateDocs-v1'
gd_client.ProgrammaticLogin()
# Connect to Google-DocList
client = gdata.docs.client.DocsClient(source='OpenSource-CreateDocs-v1')
client.ssl = True
client.http_client.debug = False
client = gdata.docs.client.DocsClient(source='OpenSource-CreateDocs-v1')
client.ClientLogin(username, password, client.source);
#
q = gdata.spreadsheet.service.DocumentQuery()
q['title'] = students_data
q['title-exact'] = 'true'
feed = gd_client.GetSpreadsheetsFeed(query=q)
spreadsheet_id = feed.entry[0].id.text.rsplit('/',1)[1]
feed = gd_client.GetWorksheetsFeed(spreadsheet_id)
worksheet_id = feed.entry[0].id.text.rsplit('/',1)[1]
rows = gd_client.GetListFeed(spreadsheet_id, worksheet_id).entry
for row in rows:
firstname=row.custom['firstname'].text
lastname=row.custom['lastname'].text
email=row.custom['email'].text
title_doc=assignment+' - '+lastname
feeduri='/feeds/default/private/full?title='+title_doc+'&title-exact=true&max-results=5'
feed2 = client.GetResources(uri=feeduri)
if not feed2.entry:
print 'No document of that title.\n'
doc_id=feed2.entry[0]
aclfeed=client.GetAcl(doc_id)
for aclentry in aclfeed.entry:
if aclentry.scope.value==professor or aclentry.scope.value==username:
continue
client.DeleteAclEntry(aclentry)
print(aclentry.scope.value + ' no longer has permission to view ' + title_doc)
# pp=pprint.PrettyPrinter(indent=4)
# pp.pprint(aclfeed.ToString())
# REMOVE FIRST GRANTED PERMISSIONS FROM THE DOCUMENTS (OLD)
# acl_entry = client.GetAclPermissions(doc_id.resource_id.text).entry[1]
# client.Delete(acl_entry.GetEditLink().href, force=True)
| UTF-8 | Python | false | false | 2,090 | py | 4 | RemovePermissions.py | 3 | 0.762679 | 0.755024 | 0 | 65 | 31.153846 | 89 |
delair-ai/python-delairstack | 1,022,202,222,955 | cdeef2e8a6eda03fd93e8acc68b25aa4f72a2130 | 7ba75e8d477d0e5463b7c7763992e8bad947639f | /delairstack/core/utils/srs.py | e431f24632540ece38b8cf08e094f693985470fb | [
"MIT"
]
| permissive | https://github.com/delair-ai/python-delairstack | 1fd2f9082a0b156deb63c0f89ca61b6bac0419e8 | 4fb67ef8d1a95d4af9a4fab3168bc43c85cce420 | refs/heads/master | 2020-12-27T20:56:26.469637 | 2020-12-22T14:18:32 | 2020-12-22T14:18:32 | 238,050,263 | 9 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pkg_resources import resource_string
from delairstack.core.utils.typing import AnyStr
def __name2wkt(name: AnyStr, *, wkts_path: AnyStr) -> AnyStr:
"""Convert a vert SRS name to WKT format.
Args:
name: Name of vertical SRS to convert.
wkts_path: Path to the WKTs directory.
Raises:
ValueError: When ``name`` doesn't match any known WKT file.
"""
name = name.lower()
res_name = '{}/{}.wkt'.format(wkts_path, name)
try:
wkt = resource_string(__name__, res_name).decode('utf-8')
flatten_wkt = ''.join([line.strip() for line in wkt.splitlines()])
return flatten_wkt
except FileNotFoundError:
raise ValueError('Unknown SRS name: {}'.format(name))
def expand_vertcrs_to_wkt(desc: AnyStr) -> AnyStr:
"""Expand a vertical SRS description to WKT format.
The supported SRS names are ``"arbitrary"``, ``"arbitrary_feet"``,
``"arbitrary_feet_us"``, ``"egm96"``, ``"egm96_feet"``,
``"egm96_feet_us"``, ``"egm2008"``, ``"egm2008_feet"``,
``"egm2008_feet_us"``, ``"wgs84"``, ``"wgs84_feet"`` or
``"wgs84_feet_us"``.
Args:
desc: A WKT or one of the supported SRS names.
Returns:
If ``desc`` is equal to one of the supported SRS names, the
corresponding WKT is returned. Otherwise ``desc`` is returned.
"""
try:
return __name2wkt(desc, wkts_path='vertcrs')
except ValueError:
pass
return desc
| UTF-8 | Python | false | false | 1,470 | py | 85 | srs.py | 64 | 0.606803 | 0.588435 | 0 | 49 | 29 | 74 |
AntoineMkr/PythonNLPBitcoin | 12,661,563,619,171 | 0749b564b21a7f6c6c79c285a48a39b6836ccffa | c51bb8b64994d99560f35e4d36a6bd48b9f0de95 | /top100Cypto/getTweetsTop100.py | 1e7534cdb2aa353497381b9384d970f7bcc0f07b | []
| no_license | https://github.com/AntoineMkr/PythonNLPBitcoin | 548864ad430ee1cd764296a5098f5e31a67d39f5 | 3a0d4098222e4170261050beb5c02a8d89c9c10a | refs/heads/main | 2023-02-23T17:41:16.150936 | 2021-01-25T17:52:29 | 2021-01-25T17:52:29 | 326,165,780 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
import json
import re
import sys
from preprocessingTweets import format_syntax, format_semantic, remove_noise
import tweepy
from getTwitterHandles import getTwitterHandles
with open('./twitter_credentials.json') as cred_data:
info = json.load(cred_data)
consumer_key = info['CONSUMER_KEY']
consumer_secret = info['CONSUMER_SECRET']
access_key = info['ACCESS_KEY']
access_secret = info['ACCESS_SECRET']
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
print("Getting tweets from @" + str(screen_name))
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
bitcointweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
for tweet in new_tweets:
if(('bitcoin' in tweet.text) | ('btc' in tweet.text)):
tweet.text = " ".join(remove_noise(tweet.text))
tweet.text = format_syntax(tweet.text)
bitcointweets.append(tweet)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print(f"getting tweets before {oldest}")
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
for tweet in new_tweets:
if(('bitcoin' in tweet.text) | ('btc' in tweet.text)):
tweet.text = " ".join(remove_noise(tweet.text))
tweet.text = format_syntax(tweet.text)
bitcointweets.append(tweet)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print(f"...{len(alltweets)} tweets downloaded so far")
#transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in bitcointweets]
#write the csv
with open('./tweets2/'+screen_name + '_tweets.csv', 'w', encoding = 'utf8') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","text"])
writer.writerows(outtweets)
pass
if __name__ == "__main__":
handles = getTwitterHandles()
for handle in handles:
try:
get_all_tweets(str(handle))
except:
pass | UTF-8 | Python | false | false | 2,566 | py | 13 | getTweetsTop100.py | 5 | 0.714341 | 0.706157 | 0 | 84 | 29.559524 | 86 |
ermin-sakic/common-open-research-emulator-CORE | 4,380,866,667,414 | 2aadab4e66fbcde1fe9d126cc410283234704d60 | aedecd542cd751915376d1cac758c05a164cb56f | /coreemu-read-only/daemon/core/services/xorp.py | 062f4901672680089b6e1318525f62a4e851f71b | [
"BSD-2-Clause"
]
| permissive | https://github.com/ermin-sakic/common-open-research-emulator-CORE | 4ee6223b339e0de3f9d807381b705969ce3a56f2 | 9c246b0ae0e9182dcf61acc4faee41841d5cbd51 | refs/heads/master | 2021-01-19T11:43:14.949496 | 2015-06-24T20:29:30 | 2015-06-24T20:29:30 | 38,001,753 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# CORE
# Copyright (c)2011-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
xorp.py: defines routing services provided by the XORP routing suite.
'''
import os
from core.service import CoreService, addservice
from core.misc.ipaddr import IPv4Prefix
from core.constants import *
class XorpRtrmgr(CoreService):
''' XORP router manager service builds a config.boot file based on other
enabled XORP services, and launches necessary daemons upon startup.
'''
_name = "xorp_rtrmgr"
_group = "XORP"
_depends = ()
_dirs = ("/etc/xorp",)
_configs = ("/etc/xorp/config.boot",)
_startindex = 35
_startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (_configs[0], _name, _name),)
_shutdown = ("killall xorp_rtrmgr", )
_validate = ("pidof xorp_rtrmgr", )
@classmethod
def generateconfig(cls, node, filename, services):
''' Returns config.boot configuration file text. Other services that
depend on this will have generatexorpconfig() hooks that are
invoked here. Filename currently ignored.
'''
cfg = "interfaces {\n"
for ifc in node.netifs():
cfg += " interface %s {\n" % ifc.name
cfg += "\tvif %s {\n" % ifc.name
cfg += "".join(map(cls.addrstr, ifc.addrlist))
cfg += cls.lladdrstr(ifc)
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n\n"
for s in services:
try:
s._depends.index(cls._name)
cfg += s.generatexorpconfig(node)
except ValueError:
pass
return cfg
@staticmethod
def addrstr(x):
''' helper for mapping IP addresses to XORP config statements
'''
try:
(addr, plen) = x.split("/")
except Exception:
raise ValueError, "invalid address"
cfg = "\t address %s {\n" % addr
cfg += "\t\tprefix-length: %s\n" % plen
cfg +="\t }\n"
return cfg
@staticmethod
def lladdrstr(ifc):
''' helper for adding link-local address entries (required by OSPFv3)
'''
cfg = "\t address %s {\n" % ifc.hwaddr.tolinklocal()
cfg += "\t\tprefix-length: 64\n"
cfg += "\t }\n"
return cfg
addservice(XorpRtrmgr)
class XorpService(CoreService):
''' Parent class for XORP services. Defines properties and methods
common to XORP's routing daemons.
'''
_name = "XorpDaemon"
_group = "XORP"
_depends = ("xorp_rtrmgr", )
_dirs = ()
_configs = ()
_startindex = 40
_startup = ()
_shutdown = ()
_meta = "The config file for this service can be found in the xorp_rtrmgr service."
@staticmethod
def fea(forwarding):
''' Helper to add a forwarding engine entry to the config file.
'''
cfg = "fea {\n"
cfg += " %s {\n" % forwarding
cfg += "\tdisable:false\n"
cfg += " }\n"
cfg += "}\n"
return cfg
@staticmethod
def mfea(forwarding, ifcs):
''' Helper to add a multicast forwarding engine entry to the config file.
'''
names = []
for ifc in ifcs:
if hasattr(ifc, 'control') and ifc.control == True:
continue
names.append(ifc.name)
names.append("register_vif")
cfg = "plumbing {\n"
cfg += " %s {\n" % forwarding
for name in names:
cfg += "\tinterface %s {\n" % name
cfg += "\t vif %s {\n" % name
cfg += "\t\tdisable: false\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
@staticmethod
def policyexportconnected():
''' Helper to add a policy statement for exporting connected routes.
'''
cfg = "policy {\n"
cfg += " policy-statement export-connected {\n"
cfg += "\tterm 100 {\n"
cfg += "\t from {\n"
cfg += "\t\tprotocol: \"connected\"\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
@staticmethod
def routerid(node):
''' Helper to return the first IPv4 address of a node as its router ID.
'''
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
for a in ifc.addrlist:
if a.find(".") >= 0:
return a.split('/')[0]
#raise ValueError, "no IPv4 address found for router ID"
return "0.0.0.0"
@classmethod
def generateconfig(cls, node, filename, services):
return ""
@classmethod
def generatexorpconfig(cls, node):
return ""
class XorpOspfv2(XorpService):
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
not build its own configuration file but has hooks for adding to the
unified XORP configuration file.
'''
_name = "XORP_OSPFv2"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding4")
rtrid = cls.routerid(node)
cfg += "\nprotocols {\n"
cfg += " ospf4 {\n"
cfg += "\trouter-id: %s\n" % rtrid
cfg += "\tarea 0.0.0.0 {\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\t interface %s {\n" % ifc.name
cfg += "\t\tvif %s {\n" % ifc.name
for a in ifc.addrlist:
if a.find(".") < 0:
continue
addr = a.split("/")[0]
cfg += "\t\t address %s {\n" % addr
cfg += "\t\t }\n"
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpOspfv2)
class XorpOspfv3(XorpService):
''' The OSPFv3 service provides IPv6 routing. It does
not build its own configuration file but has hooks for adding to the
unified XORP configuration file.
'''
_name = "XORP_OSPFv3"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding6")
rtrid = cls.routerid(node)
cfg += "\nprotocols {\n"
cfg += " ospf6 0 { /* Instance ID 0 */\n"
cfg += "\trouter-id: %s\n" % rtrid
cfg += "\tarea 0.0.0.0 {\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\t interface %s {\n" % ifc.name
cfg += "\t\tvif %s {\n" % ifc.name
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpOspfv3)
class XorpBgp(XorpService):
''' IPv4 inter-domain routing. AS numbers and peers must be customized.
'''
_name = "XORP_BGP"
_custom_needed = True
@classmethod
def generatexorpconfig(cls, node):
cfg = "/* This is a sample config that should be customized with\n"
cfg += " appropriate AS numbers and peers */\n"
cfg += cls.fea("unicast-forwarding4")
cfg += cls.policyexportconnected()
rtrid = cls.routerid(node)
cfg += "\nprotocols {\n"
cfg += " bgp {\n"
cfg += "\tbgp-id: %s\n" % rtrid
cfg += "\tlocal-as: 65001 /* change this */\n"
cfg += "\texport: \"export-connected\"\n"
cfg += "\tpeer 10.0.1.1 { /* change this */\n"
cfg += "\t local-ip: 10.0.1.1\n"
cfg += "\t as: 65002\n"
cfg += "\t next-hop: 10.0.0.2\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpBgp)
class XorpRip(XorpService):
''' RIP IPv4 unicast routing.
'''
_name = "XORP_RIP"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding4")
cfg += cls.policyexportconnected()
cfg += "\nprotocols {\n"
cfg += " rip {\n"
cfg += "\texport: \"export-connected\"\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
for a in ifc.addrlist:
if a.find(".") < 0:
continue
addr = a.split("/")[0]
cfg += "\t\taddress %s {\n" % addr
cfg += "\t\t disable: false\n"
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpRip)
class XorpRipng(XorpService):
''' RIP NG IPv6 unicast routing.
'''
_name = "XORP_RIPNG"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding6")
cfg += cls.policyexportconnected()
cfg += "\nprotocols {\n"
cfg += " ripng {\n"
cfg += "\texport: \"export-connected\"\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
# for a in ifc.addrlist:
# if a.find(":") < 0:
# continue
# addr = a.split("/")[0]
# cfg += "\t\taddress %s {\n" % addr
# cfg += "\t\t disable: false\n"
# cfg += "\t\t}\n"
cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal()
cfg += "\t\t disable: false\n"
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpRipng)
class XorpPimSm4(XorpService):
''' PIM Sparse Mode IPv4 multicast routing.
'''
_name = "XORP_PIMSM4"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.mfea("mfea4", node.netifs())
cfg += "\nprotocols {\n"
cfg += " igmp {\n"
names = []
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
names.append(ifc.name)
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
cfg += "\t\tdisable: false\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
cfg += "\nprotocols {\n"
cfg += " pimsm4 {\n"
names.append("register_vif")
for name in names:
cfg += "\tinterface %s {\n" % name
cfg += "\t vif %s {\n" % name
cfg += "\t\tdr-priority: 1\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += "\tbootstrap {\n"
cfg += "\t cand-bsr {\n"
cfg += "\t\tscope-zone 224.0.0.0/4 {\n"
cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0]
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t cand-rp {\n"
cfg += "\t\tgroup-prefix 224.0.0.0/4 {\n"
cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0]
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
cfg += "\nprotocols {\n"
cfg += " fib2mrib {\n"
cfg += "\tdisable: false\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpPimSm4)
class XorpPimSm6(XorpService):
''' PIM Sparse Mode IPv6 multicast routing.
'''
_name = "XORP_PIMSM6"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.mfea("mfea6", node.netifs())
cfg += "\nprotocols {\n"
cfg += " mld {\n"
names = []
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
names.append(ifc.name)
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
cfg += "\t\tdisable: false\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
cfg += "\nprotocols {\n"
cfg += " pimsm6 {\n"
names.append("register_vif")
for name in names:
cfg += "\tinterface %s {\n" % name
cfg += "\t vif %s {\n" % name
cfg += "\t\tdr-priority: 1\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += "\tbootstrap {\n"
cfg += "\t cand-bsr {\n"
cfg += "\t\tscope-zone ff00::/8 {\n"
cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0]
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t cand-rp {\n"
cfg += "\t\tgroup-prefix ff00::/8 {\n"
cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0]
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
cfg += "\nprotocols {\n"
cfg += " fib2mrib {\n"
cfg += "\tdisable: false\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpPimSm6)
class XorpOlsr(XorpService):
''' OLSR IPv4 unicast MANET routing.
'''
_name = "XORP_OLSR"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding4")
rtrid = cls.routerid(node)
cfg += "\nprotocols {\n"
cfg += " olsr4 {\n"
cfg += "\tmain-address: %s\n" % rtrid
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
for a in ifc.addrlist:
if a.find(".") < 0:
continue
addr = a.split("/")[0]
cfg += "\t\taddress %s {\n" % addr
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
addservice(XorpOlsr)
| UTF-8 | Python | false | false | 14,777 | py | 187 | xorp.py | 93 | 0.462272 | 0.453204 | 0 | 472 | 30.307203 | 108 |
shellshock1953/python | 13,039,520,731,596 | 74c1a9bdcae71505a51b18b77191944e5f7d8630 | 8beba9cf2a57630ee84cf1fe4dff00bd5c4f33e9 | /bin/Vidjener.py | 3d42699d8dd522670c37c73985f405cd9064b0ed | []
| no_license | https://github.com/shellshock1953/python | e58b284363f1d017ac4cffbff1d48ded3e2204a7 | 82132a7f202b8996850f70dbc32b6064e599f721 | refs/heads/master | 2021-01-20T19:06:13.622451 | 2018-11-19T07:15:49 | 2018-11-19T07:15:49 | 64,968,228 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import string
alphabet = []
crypt = []
text_file = open('text.file').read().lower()
key_int = []
# key = raw_input('Enter KEY:')
key = 'c'
for letter in string.printable.lower(): # making alphabet
alphabet.append(letter)
for char in key: # convert char key into int key
if char in alphabet:
key_int.append(alphabet.index(char))
# print 'int key is %s' % key_int
i = 0
for letter in text_file: # input_text[char + key]
if letter not in alphabet:
continue
crypt.append(alphabet[(alphabet.index(letter) + key_int[i % len(key)]) % len(alphabet)])
i += 1
# crypted_file = open('crypt_file.txt', 'w')
# crypted_file.writelines(map(str, crypt))
# crypted_file.close()
print map(list, crypt) | UTF-8 | Python | false | false | 724 | py | 27 | Vidjener.py | 24 | 0.651934 | 0.649171 | 0 | 25 | 28 | 92 |
sinojelly/python-scripts | 1,941,325,261,012 | 40aed9abb8f53dcbadb5ef896478c9e99301934e | 1d1fda62eeef26b03a494214a31a7806637ef4bf | /pyblogpost/3.x/BlogPost.py | df9843f5b23d94740003cf8638a83e97fe236038 | []
| no_license | https://github.com/sinojelly/python-scripts | 323343cf73ab6370e2e8cac00371f1f10a214fa2 | ed1bf74d7a714ba533d6dbe13719fef3c3ed91da | refs/heads/master | 2016-09-05T15:40:19.743728 | 2011-09-09T13:14:01 | 2011-09-09T13:14:01 | 2,355,346 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import xmlrpc.client
import sys
import os
import uuid
import pyblog
import HtmlProc
import BlogConfig
import BlogData
import Utility as u
import MIME
import UserException
__author__ = "Chen Guodong"
__copyright__ = "Copyright 2010, Guodong Workshop."
__credits__=["Wei Shijun", "Bao Fanfan"]
__license__ = "New BSD License"
__version__ = "1.0.3"
__maintainer__ = "Chen Guodong(sinojelly)"
__email__ = "sinojelly@gmail.com"
__status__ = "Production"
__welcome__ = "Welcome to use BatchPublishBlog tool."
guid_file = u.tool_dir(True) + 'lastpost_guid.ini' # assign guid/uuid on the first post
config_file = u.tool_dir(True) + 'blogconfig.xml'
data_file = u.tool_dir(True) + 'blogdata.xml'
log_file = u.tool_dir(True) + 'runlog.txt'
# this python file's path
mime_config = MIME.MIME(u.tool_dir(True) + 'MIME.xml')
class Logger:
def write(self, s):
f = open(log_file, "a")
f.write(s)
f.close()
mylogger = Logger()
def get_mime_type(suffix):
return mime_config.get_mime_type(suffix)
class BlogDataX(BlogData.BlogData):
'''encapsulation complex data structure'''
def __init__(self, guid, data_file):
BlogData.BlogData.__init__(self, data_file)
self.medias = BlogData.BlogData.get_media_list(self, guid)
self.blogs = BlogData.BlogData.get_blogs(self, guid)
def get_blog_hash(self, blog_name):
try:
blog = self.blogs[blog_name] #if dict has no key names blog_name, it is None? no, it's KeyErr exception
except:
return None
return blog['file_hash']
def get_media_hash(self, media_name):
try:
media = self.medias[media_name]
except:
return None
if media:
return media['file_hash']
def get_postid(self, blog_name):
try:
blog = self.blogs[blog_name] #if dict has no key names blog_name, it is None? no, it's KeyErr exception
except:
return None
return blog['postid']
pass
class WordPressX(pyblog.WordPress):
def __init__(self, server):
self.serverparam = server
pyblog.WordPress.__init__(self, server['posturl'], server['username'], server['password'], server['encoding'])
def new_post(self, title, body, categories):
return pyblog.WordPress.new_post(self, self.get_content(title, body, categories), blogid='1')
def update_post(self, postid, title, body, categories):
pyblog.WordPress.edit_post(self, postid, self.get_content(title, body, categories))
def upload_media(self, media):
file = open(media, "rb")
content = xmlrpc.client.Binary(file.read()) #base 64 encoding binary
file.close()
media_obj = {'name':media, 'type':get_mime_type(u.get_file_suffix(media)), 'bits':content}
try:
return u.try_exec(xmlrpc.client.ProtocolError, 2, 5, self.upload_media_func, media_obj) # retry 2 times, delay 5 seconds to retry
##return pyblog.WordPress.new_media_object(self, media_obj)
except UserException.TryTimeOutException as ex:
u.print_t(ex)
url = {'url': media}
return url # upload media fail, return local path
def get_content(self, title, body, categories): # categories is strings seperated by ';', split to array of string
content = {"description":body, "title":title}
if self.serverparam['vcategories'] == 'true':
active_categories = self.get_active_categories(categories)
else:
active_categories = u.split_to_list(categories, ';', '')
if active_categories:
content["categories"] = active_categories
return content
def upload_media_func(self, param_tuple):
return pyblog.WordPress.new_media_object(self, param_tuple[0], blogid='1')
def get_active_categories(self, categories):
exist = []
try:
elems = pyblog.WordPress.get_categories(self)
for elem in elems:
exist.append(elem['categoryName'])
except:
u.print_t('Get exist categories fail!')
return u.get_intersection(exist, u.split_to_list(categories, ';', '')) # jiao ji
pass
class MetaWeblogX(pyblog.MetaWeblog):
pass
class BlogPost:
server_class = {}
server_class = {
'wordpress':WordPressX,
'mediaweblog':MetaWeblogX,
}
def __init__(self, html_file, html_file_guid, categories, config_file, data_file):
## try:
self.servers_blog = {}
self.html_proc = HtmlProc.HtmlProc(html_file)
self.file_name = html_file
self.file_guid = html_file_guid
self.config = BlogConfig.BlogConfig(config_file)
self.data = BlogDataX(html_file_guid, data_file)
self.new_medias = self.html_proc.get_media_files()
self.html_title = self.html_proc.get_html_title()
self.html_body = self.html_proc.get_html_body()
self.categories = categories
fileserver = self.config.get_fileserver()
blogs = self.config.get_blogs()
self.servers = [fileserver[0]]
for blog in blogs :
self.servers.append(blog)
## except:
## u.print_t("Error occur in BlogPost.__init__.")
pass
def should_post_media(self, file):
'''Wither the blog should to post/update this article/media.
If article not modified, but media modified, the media should post.
Returns:
0 -- not post
1 -- new post
'''
new_hash = self.html_proc.get_media_hash(file)
old_hash = self.data.get_media_hash(file) #time compare
if not old_hash : # have not post, it's None
return '1'
if new_hash != old_hash :
return '1'
return '0'
def should_post_blog(self, blog_name):
'''Wither the blog should to post/update this article/media.
If article not modified, but media modified, the media should post.
Returns:
0 -- not post
1 -- new post
2 -- update post
'''
new_hash = self.html_proc.get_html_hash()
old_hash = self.data.get_blog_hash(blog_name) #hash compare
if not old_hash : # have not post, it's None
return '1'
if new_hash != old_hash :
return '2'
return '0'
def get_categories(self, server):
categories = ""
if server['categories']:
categories += server['categories'] + ";"
if self.categories:
categories += self.categories
return categories
def new_blog(self, blog, server):
html_body = self.post_medias(blog, server)
u.print_t('Post content...')
postid = blog.new_post(self.html_title, html_body, self.get_categories(server))
self.data.add_blog(self.file_guid, server['name'], postid, self.html_proc.get_html_hash())
pass
def update_blog(self, blog, server):
html_body = self.post_medias(blog, server)
u.print_t('Post content...')
blog.update_post(self.data.get_postid(server['name']), self.html_title, html_body, self.get_categories(server))
self.data.update_blog(self.file_guid, server['name'], self.html_proc.get_html_hash())
pass
def post(self):
u.print_t("Begin to post %s..." % self.html_title)
for server in self.servers:
try:
self.post_blog(server)
except pyblog.BlogError as fault:
u.print_t('Process failure! BlogError: %s\n' % fault)
continue
## else :
## u.print_t('Process failure! Unknown error.\n')
## continue
u.print_t('Process successfull!\n')
self.data.write_file()
pass
def connect(self, server):
try:
return self.servers_blog[server['name']] # hold server connection. first time, should raise a KeyErr exception
except:
u.print_t("Connect to server %s..." % server['name'])
self.servers_blog[server['name']] = self.server_class[server['system']](server)
return self.servers_blog[server['name']]
def post_blog(self, server):
if server['postblog'] != 'true':
u.print_t("No post on %s because of manual closing." %(server['name']))
return
flag = self.should_post_blog(server['name'])
if flag == '0':
u.print_t("No post on %s because of no modify." %(server['name']))
return
blog = self.connect(server)
if flag == '1':
u.print_t("Begin new post on %s..." %(server['name']))
self.new_blog(blog, server)
elif flag == '2':
u.print_t("Begin update post on %s..." %(server['name']))
self.update_blog(blog, server)
pass
def post_medias(self, blog, server):
flag = server['media']
if flag == '0' :
return self.html_body
if flag == '1' :
return self.upload_medias(blog, self.html_body)
self.html_body = self.upload_medias(blog, self.html_body)
return self.html_body
def upload_medias(self, blog, html_body):
for media in self.new_medias:
if self.should_post_media(media) == '1':
url = self.upload_media(blog, media)
html_body = self.html_proc.update_html_body(html_body, media, url)
self.data.update_media(self.file_guid, media, url, self.html_proc.get_media_hash(media)) # update one media info to data file.
else :
html_body = self.html_proc.update_html_body(html_body, media, self.data.get_media_list(self.file_guid)[media]['remote_path']) # update html_body's media path
return html_body
def upload_media(self, blog, media):
u.print_t("Upload media file: %s..." % media)
return blog.upload_media(media)['url']
def usage():
help = '''Usage:\r\n python.exe blogpost.py categories html_file file_guid html_file2 file_guid2 ...\r\n''';
print(help)
def post_one_file(index, html_file, guid, categories):
if html_file[-3:] == 'ziw':
html_file = u.ziw2html(html_file)
else: # enter html's dir
os.chdir(os.path.dirname(html_file))
if guid == '0':
guid = "%s" % uuid.uuid1() # convert to string
f = open(guid_file, "a")
f.write("GUID"+str(index)+"=" + guid + "\r\n")
f.close()
mypost = BlogPost(html_file, guid, categories, config_file, data_file)
mypost.post()
mylogger.write("Post file: \nTitle: " + mypost.html_title + "\nGUID : " + mypost.file_guid +"\n")
def main():
print(__welcome__)
print("Version : " + __version__)
print("Author : " + __author__ )
print("Email : " + __email__)
u.debug.print(sys.argv)
argnum = len(sys.argv)
if argnum < 4 or argnum %2 != 0:
usage()
return -1
if not os.path.isfile(config_file):
printf('Config file(%s) not exist!' % config_file)
return -1
f = open(guid_file, "w")
f.write("[Common]\r\n")
f.close()
for index in range(int((len(sys.argv) - 2) / 2)): #argv[0] = 'BlogPost.py'
post_one_file(index, sys.argv[index * 2 + 2], sys.argv[index * 2 + 3], sys.argv[1]) #argv[1] = categories, seperated by ;
return 0
def start():
try:
main()
## except xmlrpc.client.ProtocolError as ex:
## u.print_t(ex)
## except:
## u.print_t("Unknown exception!")
finally:
print("Please check file("+log_file+") for more infomation.")
#stdout_ = sys.stdout # backup reference to the old stdout.
sys.stdout = mylogger
sys.stderr = mylogger
print("Batch publish blog finished at "+u.get_modify_time() +"\n")
os.system("pause")
# call start() without __name__ == '__main__', so can use cxfreeze to make scripts to a exe.
start()
##if __name__ == "__main__":
##
## try:
## main()
#### except xmlrpc.client.ProtocolError as ex:
#### u.print_t(ex)
#### except:
#### u.print_t("Unknown exception!")
## finally:
## print("Please check file("+log_file+") for more infomation.")
##
## #stdout_ = sys.stdout # backup reference to the old stdout.
## sys.stdout = mylogger
## sys.stderr = mylogger
##
## print("Batch publish blog finished at "+u.get_modify_time() +"\n")
## os.system("pause")
#for testing
##posturl='https://storage.msn.com/storageservice/MetaWeblog.rpc'
##username="sinojellycn"
##password="123456"
##
##blog = pyblog.WordPress(posturl, username, password)
##content = {"description":'Test description6', "title":'Test article6'}#, "categories": u.split_to_list('Python;Test', ';', '')}
##blog.new_post(content, blogid = "1")
##print(blog.list_methods2())
##print(blog.method_signature('metaWeblog.newPost'))
##print(blog.get_categories("1"))
#system test
##server_params = [
#### {"url":"http://www.cnblogs.com/sinojelly/services/metablogapi.aspx", "username":'sinojelly', "password":'87345465', "encoding":'utf-8'},
#### {"url":"http://blog.sinojelly.dreamhosters.com/xmlrpc.php", "username":'admin', "password":'87345465', "encoding":'utf-8'}, #wordpress
#### {"url":"http://sinojelly.20x.cc/xmlrpc.php", "username":'admin', "password":'B78b9z24', "encoding":'utf-8'},
#### {"url":"http://blog.csdn.net/sinojelly/services/MetaBlogApi.aspx", "username":'sinojelly',"password":'87345465', "encoding":'utf-8'}, # 2010.3.21 test OK!
#### {"url":"http://sinojelly.dreamhosters.com/xmlrpc.php", "username":'admin',"password":'87345465', "encoding":'utf-8'}, #drupal. 2010.3.21 test failure. Blog API module is not configured to support the 1 content type, or you don't have sufficient permissions to post this type of content.
#### {"url":"http://blog.vsharing.com/RPC.ashx", "username":'sinojelly',"password":'87345465', "encoding":'utf-8'}, #2010.3.21 test failure. NullReferenceException
#### {"url":"http://sinojelly.blog.51cto.com/xmlrpc.php", "username":'sinojelly', "password":'87345465', "encoding":'gb2312'}, # 2010.3.20 test failure. but WizPlugin post successful!
##
##]
##i = 0
##while i< len(server_params):
## print("Begin test post:")
## blog = pyblog.WordPress(server_params[i]['url'], server_params[i]['username'], server_params[i]['password'], server_params[i]['encoding'])
## content = {"description":'test', "title":'test_title', "categories": u.split_to_list('Python;Test;Code;2050', ';', '')}
## blog.new_post(content, blogid = "1")
## print("Finish:"+str(i))
## i = i + 1
| UTF-8 | Python | false | false | 15,086 | py | 25 | BlogPost.py | 15 | 0.581798 | 0.570595 | 0 | 389 | 36.781491 | 294 |
LoopsUTD/ImageProcessing | 19,645,180,413,574 | c13b997de8767a824aa82cf93bf5895044846dff | a9be07d397c882dc9cd6485037824365af680d6c | /UI/watershed.py | 803eeaea04c02ad15c993a662acda9cff7b29e54 | []
| no_license | https://github.com/LoopsUTD/ImageProcessing | 1eb37fbae3aea3b6ef4fe909fcf0abc6d9ed6221 | 769ab14d31ab7b635b84f8699ae305e2f85625b6 | refs/heads/master | 2021-09-07T09:10:04.657967 | 2018-02-20T21:11:53 | 2018-02-20T21:11:53 | 105,183,779 | 0 | 0 | null | false | 2018-02-19T16:55:14 | 2017-09-28T18:19:03 | 2017-10-25T17:34:05 | 2018-02-19T16:55:14 | 67,139 | 0 | 0 | 1 | Matlab | false | null | def main():
import cv2
import numpy as np
from matplotlib import pyplot as plt
name = "croppedNoLens.jpg"
img = cv2.imread(name)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
##Do kernel convolution noise reduction
# noise removal
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel, iterations = 3)
#sure_bg = cv2.dilate(opening,kernel,iterations=2)
#Lets find contours in the thresholded images
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
print("[INFO] {} unique contours found".format(len(cnts)))
for (i,c) in enumerate(cnts):
#Draw the contours
((x,y), _) = cv2.minEnclosingCircle(c)
#cv2.putText(opening, "#[]".format(i + 1), (int(x) - 10, int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,255), 2)
cv2.drawContours(opening, [c], -1, (128,128,128), -2)
#cv2.imshow("Image", opening)
#cv2.waitKey(0)
plt.imshow(opening, 'gray')
plt.show()
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 1,073 | py | 33 | watershed.py | 3 | 0.679404 | 0.630009 | 0 | 37 | 28.027027 | 112 |
3jackdaws/market-bot | 3,444,563,775,477 | 6204bbc443319aabbef74d97821a31096e1eeb6a | 19317a5fb5fae3a784de8f52aa2eec20cf929344 | /marketbot/modules/base/ticker.py | 552c408150412c24d87457e4e0a8ea79a2df4222 | []
| no_license | https://github.com/3jackdaws/market-bot | 4b4a4afc28c107efdc80163165346d9df65b710a | 2677e9fe9c870ab62d650a8fdf8a8fc9e82cf62f | refs/heads/master | 2020-09-09T21:28:34.283392 | 2020-02-11T16:05:00 | 2020-02-11T16:05:00 | 221,576,041 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from marketbot import Bot, Message, util
from discord import Embed, Webhook, TextChannel
from . import api
import re
import logging
import asyncio
from time import time
logger = logging.getLogger("BASE.TICKER")
TICKER_PATTERN = r'\[([a-zA-Z]+)\]'
EXPRESSION_PATTERN = TICKER_PATTERN + r'([.][a-z]+\(([^_()]*)\))?'
def get_embed(title, url=None, fields=[], color=6750105):
e = Embed(title=title, url=url)
e.color = color
for field in fields:
e.add_field(name=field[0], value=field[1], inline=field[2] if len(field) > 2 else True)
return e
class Stock:
regex = re.compile(EXPRESSION_PATTERN)
rh_stock_url = "https://robinhood.com/stocks/"
def __init__(self, ticker):
self.ticker = ticker.upper()
async def default(self):
asset, quote, rating = await asyncio.gather(
api.get_asset(self.ticker),
api.get_quote(self.ticker),
api.get_rating(self.ticker)
)
# print(asset)
# print(quote)
# print(rating)
e = get_embed(
asset['name'],
self.rh_stock_url + self.ticker,
fields=[
("Price", quote['price']),
("Change (today)", quote['change%']),
("Rating", rating['rating'] if rating else "N/A"),
]
)
return e
async def change(self, timespan="7 d"):
start = time()
matches = re.match("([0-9]+) ?(m|d)", timespan)
if matches:
limit = matches.group(1) or "7"
timespan = matches.group(2) or "d"
else:
limit = "7"
timespan = "d"
try:
timespan = {
"m":"minute",
"d":"day",
}[timespan]
except:
timespan = "day"
if int(limit) >= 1000:
return "Timespan limit must be less than 1000."
print(limit, timespan)
change, asset = await asyncio.gather(
api.get_change(self.ticker, limit, timespan),
api.get_asset(self.ticker)
)
e = get_embed(
asset['name'],
self.rh_stock_url + self.ticker,
fields=[
("Price", change['current']),
(f"Change ({change['period']})", f"{change['change%']}%"),
])
end = time()
return e
async def manage_stock_object(bot:Bot, message:Message):
expr = Stock.regex.search(message.content)
if expr:
expression = expr.group(0)
logger.info(f'[{message.author.name}]: "{message.content}" - Expression: {expression}')
ticker = expr.group(1)
arguments = expr.group(3)
stock = Stock(ticker)
stock_expr = expression.replace(f"[{ticker}]", "s", 1)
if arguments:
stock_expr = stock_expr.replace(arguments, f"'{arguments}'")
try:
start = time()
# logger.info("Evaluating " + stock_expr)
output = eval(stock_expr, {'s':stock})
if asyncio.iscoroutine(output):
output = await output
if isinstance(output, Stock):
output = await output.default()
end = time()
logger.info(f"{expression} executed in {round((end - start) * 1000, 2)}ms")
channel = bot.get_channel(message.channel.id) # type: TextChannel
if isinstance(output, Embed):
await util.opt_webhook_send_embed(channel, output)
elif isinstance(output, str):
await channel.send(output)
except Exception as e:
logger.error("MSO Error: " + str(e))
| UTF-8 | Python | false | false | 3,743 | py | 12 | ticker.py | 9 | 0.516965 | 0.507614 | 0 | 126 | 28.68254 | 95 |
dawidjk/Interview-Practice | 695,784,702,429 | 49f0cf1b2e2a7fe9bfe977c7e8a0bc6f16095ad4 | 88f705139e8a2b0402579a943f6270f6042c61ba | /datatypes/test_string_builder.py | 5bc69117aaba599030241477224123e7129f385c | []
| no_license | https://github.com/dawidjk/Interview-Practice | df3275be3746ba74c9cdbd572946fc384ca69271 | c9d4f6e88617ebd9c75f30cd0a5c8e54081035a1 | refs/heads/master | 2020-06-13T19:26:16.658566 | 2019-07-03T23:15:28 | 2019-07-03T23:15:28 | 194,765,831 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from string_builder import StringBuilder
class TestStringBuilder(unittest.TestCase):
def test(self):
string_builder = StringBuilder()
string_builder.append("I ")
string_builder.append("like ")
string_builder.append("pi!")
self.assertEqual(string_builder.toString(), "I like pi!")
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 353 | py | 12 | test_string_builder.py | 12 | 0.719547 | 0.719547 | 0 | 14 | 24.285714 | 59 |
TyrnanBatch/Messanger-App | 9,491,877,746,235 | 16c34db402f0a4f5ebeab0881aea09ecd331cd61 | 1c7ffa6dd70591d6add81a422a1f4106e2350173 | /main.py | f6a9c257b1280fc2b330fde0f139736e62848521 | []
| no_license | https://github.com/TyrnanBatch/Messanger-App | 4cf68f8327af81eb373f4ddc262851ac7ec1378d | 246a2da968a48c727d72cee0ac0f7a7e63adb50a | refs/heads/main | 2023-08-22T20:36:34.459370 | 2021-09-21T17:18:16 | 2021-09-21T17:18:16 | 408,507,343 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showinfo
import sys
# I will Put My cursor Here if im alt tabed:
# Create Tk window as 'root'
root = tk.Tk()
# Setting root settings (size, name, extra)
root.geometry('200x250')
root.configure(width=1600, height=900)
root.title('Messenger App')
root.configure(bg='light blue') # '#5e5a5a'
winWidth = root.winfo_reqwidth()
winHeight = root.winfo_reqheight()
posRight = int(root.winfo_screenwidth() / 2 - winWidth / 2)
posDown = int(root.winfo_screenheight() / 2 - winHeight / 2)
root.geometry("+{}+{}".format(posRight, posDown))
class Contacts():
def Open(Name):
print(Name) # Open the thing where they can see last msg and send msgs to person of "Name" - Maybe serch for an id attached to the Name?
def exit():
sys.exit()
def settings():
print("Settings")
def MainMenu():
global root
for widget in root.winfo_children():
widget.destroy()
root.title('Messenger App - Main Menu')
Space = tk.Label(root, text="", bg='light blue')
Space.pack()
MainMenuTitle = tk.Label(root, text="Messenger App" , bg='light blue')
MainMenuTitle.pack()
Space = tk.Label(root, text="", bg='light blue')
Space.pack()
OpenButton = tk.Button(root, text="Open Messenger", command=MessageMenu)
OpenButton.pack()
Space = tk.Label(root, text="", bg='light blue')
Space.pack()
SettingsButton = tk.Button(root, text="Settings", command=settings)
SettingsButton.pack()
Space = tk.Label(root, text="", bg='light blue')
Space.pack()
ExitButton = tk.Button(root, text="Exit", command=exit)
ExitButton.pack()
def MessageMenu():
global root
for widget in root.winfo_children():
widget.destroy()
root.title('Messenger App - Messenger')
root.geometry('500x500')
message_submit_button = tk.Button(root)
message_submit_button.grid(row=5, column=3)
message_entry = tk.Entry(root)
message_entry.grid(row=5, column=2)
columns = ('#1')
tree = ttk.Treeview(root, columns=columns, show='headings')
# define headings
tree.heading('#1', text='Contacts:')
# generate sample data
contacts = ['Bob','Jake','Tin_man']
# adding data to the treeview
for contact in contacts:
tree.insert('', tk.END, values=contact)
# bind the select event
def item_selected(event):
for selected_item in tree.selection():
# dictionary
item = tree.item(selected_item)
# list
record = item['values']
#
Contacts.Open(record)
tree.bind('<<TreeviewSelect>>', item_selected)
tree.grid(row=1, column=0, sticky='nsew')
# add a scrollbar
scrollbar = ttk.Scrollbar(root, orient=tk.VERTICAL, command=tree.yview)
tree.configure(yscroll=scrollbar.set)
scrollbar.grid(row=1, column=1, sticky='ns')
# Main Loop
if __name__ == "__main__":
MainMenu()
root.mainloop()
| UTF-8 | Python | false | false | 2,995 | py | 1 | main.py | 1 | 0.643406 | 0.631386 | 0 | 124 | 23.145161 | 144 |
pronay11/Hospital_Management_System | 9,431,748,203,039 | 10f7ee611dee3eff280fd25415a0eef67c7be0e9 | a6ed25e7155ec4b50f38681a03ff9bf702d8deb5 | /hospitalapp/migrations/0020_auto_20191202_0940.py | 9dd0ece1f0eda3897048715e9f391e47939a97c8 | []
| no_license | https://github.com/pronay11/Hospital_Management_System | 93fe448dd89b3ab15c064e8b20fa47e0b2ddcd86 | e7d2afff9b194ef9a0f2d9ca1cca9e7bee2ec9f5 | refs/heads/master | 2020-11-25T11:30:57.763491 | 2019-12-17T15:06:32 | 2019-12-17T15:06:32 | 228,637,957 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.5 on 2019-12-02 03:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hospitalapp', '0019_auto_20191202_0923'),
]
operations = [
migrations.RenameField(
model_name='doctor',
old_name='img',
new_name='picture',
),
]
| UTF-8 | Python | false | false | 365 | py | 31 | 0020_auto_20191202_0940.py | 20 | 0.569863 | 0.484932 | 0 | 18 | 19.277778 | 51 |
dabeiT3T/Data-Structures-Answer | 6,923,487,295,445 | 54e333f24ffc6806924579c8939a1022811de45f | 5486aa5c60ded82fa9851e19cda8229c1c55ce84 | /Chapter 8 Queues/Quiz 2/arrayqueue.py | 11bc4a67dd9dd85d5a6503dcc35766869a079844 | []
| no_license | https://github.com/dabeiT3T/Data-Structures-Answer | 1bea264b94e3233962fd8cdfe17af266cc757814 | d298cbfd22f998505b9e548e507a3ab9a3be47c0 | refs/heads/master | 2020-04-09T11:13:08.101875 | 2019-02-27T07:13:10 | 2019-02-27T07:13:10 | 160,300,167 | 5 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
'''
Author: dabei
My answer to Chapter 8 Quiz 2.
'''
import sys
from arrays import Array
from abstractqueue import AbstractQueue
class ArrayQueue(AbstractQueue):
'''Represents a linked queue'''
# default array's capacity
CAPACITY = 10
def __init__(self, items = None):
self.clear()
AbstractQueue.__init__(self, items)
def clear(self):
self._array = Array(ArrayQueue.CAPACITY)
self._head = 0
self._rear = self._head
self._size = 0
def peek(self):
'''Return the item at the queue's front.'''
if self.isEmpty():
raise KeyError('The queue is empty')
return self._array[self._head]
def add(self, item):
'''Add the item to the queue's rear.'''
# increase the _array
self._grow()
if self.isEmpty():
# _head == _rear
self._array[self._rear] = item
else:
# _rear += 1 and _array[_rear] = item
# when _rear + 1 >= len(_array),
# move the rear to the head of _array
self._rear = (self._rear+1) % len(self._array)
self._array[self._rear] = item
self._size += 1
def pop(self):
'''Pop the item at the queue's front.'''
if self.isEmpty():
raise KeyError('The queue is empty')
# get the head item
item = self._array[self._head]
if self._head != self._rear:
# _head runs towards _rear
self._head = (self._head+1) % len(self._array)
self._size -= 1
# decrease the _array
self._shrink()
return item
def _copyToNewArray(self, newArray):
for index, item in enumerate(self):
newArray[index] = item
# reset _head & _rear
self._head = 0
self._rear = len(self) - 1
self._array = newArray
def _grow(self):
'''Double array's length.'''
if len(self) == len(self._array):
# create a double array
newArray = Array(len(self._array)*2)
# copy
self._copyToNewArray(newArray)
def _shrink(self):
'''
Half array's length
when logical size is a quarter of its length,
but not less than its initial capacity.
'''
if len(self) <= len(self._array)//4 and len(self._array) >= ArrayQueue.CAPACITY*2:
# create a half-size array
newArray = Array(len(self._array)//2)
self._copyToNewArray(newArray)
def __iter__(self):
for i in range(len(self)):
index = (self._head + i) % len(self._array)
yield self._array[index]
if __name__ == '__main__':
queue = ArrayQueue()
print('Length:', len(queue))
print('Empty:', queue.isEmpty())
print('Add a, b, c')
queue.add('a')
queue.add('b')
queue.add('c')
print('Queue:', *queue)
print('Length:', len(queue))
print('Empty:', queue.isEmpty())
print('Peek:', queue.peek())
print('Pop:', queue.pop())
print('Pop:', queue.pop())
print('Pop:', queue.pop())
print('Empty:', queue.isEmpty())
print('Call peek() will raise an error.')
try:
queue.peek()
except KeyError as e:
print('KeyError:', e, file=sys.stderr)
print('Call pop() will raise an error.')
try:
queue.pop()
except KeyError as e:
print('KeyError:', e, file=sys.stderr)
print('Add d')
queue.add('d')
newQueue = ArrayQueue(range(5))
print('New queue:', newQueue)
print('Add to the new queue:', queue+newQueue)
print('Add a number will raise an error.')
try:
sum = queue + 4
except TypeError as e:
print('TypeError:', e, file=sys.stderr)
| UTF-8 | Python | false | false | 3,793 | py | 46 | arrayqueue.py | 33 | 0.538624 | 0.533087 | 0 | 132 | 27.734848 | 90 |
eldojk/Workspace | 10,917,806,888,732 | 324e0aacabbc5d5f51390b07562888073a5aec5b | a9dfc35814bde9f387bb78db2e8566c08e38b635 | /WS/G4G/Problems/dp/__init__.py | 765f31619e9b09723edb05086a92179248057a57 | []
| no_license | https://github.com/eldojk/Workspace | b6c87f7ab74c4a3bb8585fdfa36a24a731f280f8 | 224626665a2b4c0cf701731f4e4dc96c93a26266 | refs/heads/master | 2021-01-19T13:33:09.378172 | 2017-11-14T17:53:09 | 2017-11-14T17:53:09 | 82,396,876 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
https://people.cs.clemson.edu/~bcdean/dp_practice/
""" | UTF-8 | Python | false | false | 58 | py | 586 | __init__.py | 583 | 0.672414 | 0.672414 | 0 | 3 | 18.666667 | 50 |
HayeonLee/ProjCA | 9,294,309,276,028 | 683202c3d176e042fb60752e0f924bb22fa4caea | e35057a23886ede5b1509d569f03a2ecfd7041a0 | /ours/preprocess/count_max_token.py | 24493bc4a843c13b17b8ac043d768b23d8b0ca30 | []
| no_license | https://github.com/HayeonLee/ProjCA | faad0b1e28fc0abdde5ca94ae3e60b06c4ce812f | 17e662340ddd3ec6390eaf97fe15b407f13032a9 | refs/heads/master | 2020-03-22T15:41:53.558744 | 2018-08-26T14:23:12 | 2018-08-26T14:23:12 | 140,270,566 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import json
import matplotlib
matplotlib.use('agg')
from pycocotools.coco import COCO
import nltk
jf = json.load(open('/DATA/cvpr19/coco/annotations/captions_train2017.json'))
jf_val = json.load(open('/DATA/cvpr19/coco/annotations/captions_val2017.json'))
cnt = 0
for i in range(len(jf['annotations'])):
tokens = nltk.tokenize.word_tokenize(str(jf['annotations'][i]['caption']).lower().decode('utf-8'))
cnt = max(cnt, len(tokens))
for i in range(len(jf_val['annotations'])):
tokens = nltk.tokenize.word_tokenize(str(jf_val['annotations'][i]['caption']).lower().decode('utf-8'))
cnt = max(cnt, len(tokens))
print(cnt) | UTF-8 | Python | false | false | 665 | py | 10 | count_max_token.py | 9 | 0.685714 | 0.663158 | 0 | 20 | 31.35 | 106 |
ArisChristoforidis/NAS-Timeseries | 2,559,800,521,080 | 8a323032df1c47bd25e785adad8b9d3e27e9a829 | a98698cddd1bc4beb7644d7122b0a28555903814 | /enums.py | 44052ad9d1e2f49820481a403a1ae24393360950 | []
| no_license | https://github.com/ArisChristoforidis/NAS-Timeseries | e256d6069d39822934067fbe7c7c3283d298b7cf | 3306a67f6d95af1576b85ab373a537367a7b4d31 | refs/heads/master | 2023-04-20T15:11:51.157478 | 2021-06-29T08:57:04 | 2021-06-29T08:57:04 | 316,300,630 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from enum import Enum
class ConnectMode(Enum):
IN = 0,
OUT = 1
class ModuleType(Enum):
NEURAL_LAYER = 0,
ABSTRACT_MODULE = 1
class SaveMode(Enum):
PICKLE = 0,
CONSOLE_LOG = 1
| UTF-8 | Python | false | false | 209 | py | 33 | enums.py | 32 | 0.602871 | 0.574163 | 0 | 14 | 13.5 | 24 |
Angel888/suanfa | 6,287,832,166,414 | fecb9a8faa4f3eec5a0aa1ce44b3129533d97351 | 87fe6ec76a16f1fb4712df0b6a497f0df75bd5d1 | /biao-shi-shu-zhi-de-zi-fu-chuan-lcof.py | 20d25438bc573dbe5b09ae8e4453cc30f175af8e | []
| no_license | https://github.com/Angel888/suanfa | 48d4f6f425d0075111517e7ea635a74b79df97a6 | 4a27fdd976268bf4daf8eee447efd754f1e0bb02 | refs/heads/master | 2023-05-09T06:29:44.649587 | 2021-05-29T00:45:33 | 2021-05-29T00:45:33 | 371,849,036 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof/
# todo 有限状态自动机是什么鬼
class Solution:
def isNumber(self, s: str) -> bool:
pass | UTF-8 | Python | false | false | 182 | py | 154 | biao-shi-shu-zhi-de-zi-fu-chuan-lcof.py | 151 | 0.68125 | 0.68125 | 0 | 5 | 31.2 | 72 |
misbahulard/stix_api | 10,007,273,821,925 | e14249ed42431ae633c9c2197b8ce3f5041b91ca | 24c472e5a722b571feee1a5fc8345d9358595065 | /app/model/actor_analytics.py | 877196ec6b90a461c5e9b66fae377ae6bf5cebfc | []
| no_license | https://github.com/misbahulard/stix_api | b6270aa7f9ca05b6eb71363a7180a5aab661a758 | 348f72553b954b7b4802b660085b0317f77097e6 | refs/heads/master | 2022-05-29T22:08:06.035795 | 2019-07-19T09:53:01 | 2019-07-19T09:53:01 | 155,293,943 | 0 | 0 | null | false | 2022-05-25T02:07:03 | 2018-10-29T23:16:59 | 2019-07-19T09:53:14 | 2022-05-25T02:07:03 | 10,607 | 0 | 0 | 3 | Python | false | false | from app import mongo
from app.utils import jsonify_stix
class ActorAnalytics(object):
""" ActorAnalytics model for access actor_analytics collection in mongodb """
def __init__(self):
self.collection = mongo.db.actor_analytics
def get_actor_count(self):
pipeline = [
{
"$group": {
"_id": {
"ip": "$ip",
"country": "$country"
},
"count": {"$sum": "$number_observed"}
},
},
{
"$sort": { "count": -1 }
}
]
query = self.collection.aggregate(pipeline)
event_list = list(query)
event_list_limit = []
if len(event_list) >= 10:
for index in range(10):
event_list_limit.append(event_list[index])
else:
for index in range(len(event_list)):
event_list_limit.append(event_list[index])
result = jsonify_stix(event_list_limit)
return result
def get_actor_country_count(self):
pipeline = [
{
"$group": {
"_id": "$country",
"count": {"$sum": "$number_observed"}
},
},
{
"$sort": { "count": -1 }
}
]
query = self.collection.aggregate(pipeline)
event_list = list(query)
event_list_limit = []
if len(event_list) >= 10:
for index in range(10):
event_list_limit.append(event_list[index])
else:
for index in range(len(event_list)):
event_list_limit.append(event_list[index])
result = jsonify_stix(event_list_limit)
return result
| UTF-8 | Python | false | false | 2,122 | py | 15 | actor_analytics.py | 13 | 0.391612 | 0.386899 | 0 | 62 | 33.145161 | 81 |
greyhatchet/7P1A-Game | 4,715,874,097,568 | dd9b308b44359e0a9d37d82784c2cf31003611e7 | 3f926ee5300b659f3f36200178c9952f0f9199f1 | /test_scorereader.py | b88286103e28411fb94cd0839002607d9c437ce8 | []
| no_license | https://github.com/greyhatchet/7P1A-Game | 940efb71cad0f61068b1fd27fb5b95fd33fcc6c1 | a0c567116e8f5654f68ef18284505948f9496a27 | refs/heads/master | 2020-03-30T20:39:51.230256 | 2018-10-22T04:33:50 | 2018-10-22T04:33:50 | 151,598,111 | 1 | 0 | null | false | 2018-10-11T18:11:51 | 2018-10-04T15:56:45 | 2018-10-11T18:10:23 | 2018-10-11T18:11:50 | 2,916 | 1 | 0 | 0 | Python | false | null | import unittest
from scorereader import readScores
class ScoreReaderTestCase(unittest.TestCase):
# Tests for scorereader.py
def test_readScores(self):
self.assertTrue(readScores('scoresfile.txt'))
self.assertTrue(not readScores('notarealfilename.txt'))
self.assertTrue(not readScores(4))
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 383 | py | 20 | test_scorereader.py | 14 | 0.678851 | 0.67624 | 0 | 14 | 25.5 | 63 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.