repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
noname2048/ao-arduino-iot | 635,655,170,480 | e0219d64a7b62a7ae0286ed28d41c2c97f2a2016 | 7bed0f643bbb1d0511daea3aafcf7597ecc5282b | /src/local_db/schemas.py | cd9e8a204b264bdcc8a487027de94e8740c2bb04 | []
| no_license | https://github.com/noname2048/ao-arduino-iot | 87a0386462f052ea4536536e102ec6958512c994 | 5e92a0ecd228a6fc3a94842c7deea1433bda5bca | refs/heads/main | 2023-07-13T08:22:22.380136 | 2021-08-18T11:34:14 | 2021-08-18T11:34:14 | 393,054,869 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List, Optional
from pydantic import BaseModel
import datetime
class SensorValueBase(BaseModel):
device_id: int
time: datetime.datetime
temperature: float
humidity: float
class SensorValueCreate(SensorValueBase):
pass
class SensorValue(SensorValueBase):
id: int
class Config:
orm_mode = True
class DeviceBase(BaseModel):
modelname: str
pincode: str
created_at: Optional[datetime.datetime]
updated_at: Optional[datetime.datetime]
class DeviceCreate(DeviceBase):
pass
class Device(DeviceBase):
id: int
sensorvalue_set: List[SensorValue]
| UTF-8 | Python | false | false | 632 | py | 25 | schemas.py | 19 | 0.719937 | 0.719937 | 0 | 39 | 15.205128 | 43 |
ZhouYue321/machine_learning_AndrewNg | 8,864,812,514,778 | 50c3541f1f68c6f6602f69b9eda1d71717c1e314 | 7cef3483694d86be62f2fbc03d72d12798900a81 | /EX7_K_mean_and_principal_component/pca_project_data.py | e012da91b90bb23c225e6ba0718c000f7f550ae1 | []
| no_license | https://github.com/ZhouYue321/machine_learning_AndrewNg | 57c19498f1bd64a574f808089146a1884be2f922 | 3f59414ffb20d2a04ff8b7b1d757686b0892f742 | refs/heads/master | 2022-01-11T20:11:43.406077 | 2019-05-01T08:27:47 | 2019-05-01T08:27:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
def project_data(x, u, k):
Z = x @ u[:, :k]
return Z
| UTF-8 | Python | false | false | 105 | py | 59 | pca_project_data.py | 58 | 0.485714 | 0.466667 | 0 | 7 | 14 | 26 |
kirill-kundik/CinemaChallengeBackend | 15,401,752,747,131 | f7242ad77f64a5012e45928ec779375605f18b9d | ff896734043212b2da6f69471435a2dcda31612e | /migrations/versions/6fa53ebdac2e_.py | 5e1e3961f818192e2dfd1a4b661ba9fcb910aae0 | [
"MIT"
]
| permissive | https://github.com/kirill-kundik/CinemaChallengeBackend | 2d77ff2776a1ad9d109738cddbb9ecebcfaebc54 | aea4ac801a9a5c907f36f07b67df162b4bd85044 | refs/heads/master | 2023-01-09T23:08:04.630520 | 2020-11-21T08:42:01 | 2020-11-21T08:42:01 | 314,540,042 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """added obtained model for achievements
Revision ID: 6fa53ebdac2e
Revises: 249eca898a31
Create Date: 2020-11-20 12:51:18.146518
"""
# revision identifiers, used by Alembic.
revision = '6fa53ebdac2e'
down_revision = '249eca898a31'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('obtained',
sa.Column('user_id', sa.String(), nullable=False),
sa.Column('achievement_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['achievement_id'], ['achievement.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.oid'], ),
sa.PrimaryKeyConstraint('user_id', 'achievement_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('obtained')
# ### end Alembic commands ###
| UTF-8 | Python | false | false | 952 | py | 43 | 6fa53ebdac2e_.py | 40 | 0.67542 | 0.629202 | 0 | 33 | 27.848485 | 70 |
Terry1504/Django-Project | 12,094,627,908,066 | 3503483ae2bc65433505c9f6b71e91250f8976c6 | 346d8b9634d663fbf73126452d65bc3919d2dbb6 | /djangoSite/backend/migrations/0038_auto_20210316_1553.py | e48cdcac1381d94eb70e4e52a55da746ddb2781d | []
| no_license | https://github.com/Terry1504/Django-Project | cfd9fca4797aaad0eeaebc653d0f8900b6daee69 | 362cf13cdf13cdd882772d512b9a58a00b089920 | refs/heads/main | 2023-04-12T05:29:28.038490 | 2021-04-28T15:07:00 | 2021-04-28T15:07:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.4 on 2021-03-16 07:53
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('backend', '0037_auto_20210313_2322'),
]
operations = [
migrations.CreateModel(
name='UserOperator',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('user_account', models.CharField(default='None', max_length=20)),
('operate_type', models.CharField(default='None', max_length=20)),
('face_object_type', models.CharField(default='None', max_length=20)),
('face_object_id', models.CharField(default='None', max_length=50)),
('face_object_msg', models.CharField(default='None', max_length=100)),
('operate_time', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.AddField(
model_name='webreply',
name='reply_time',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| UTF-8 | Python | false | false | 1,148 | py | 174 | 0038_auto_20210316_1553.py | 83 | 0.583624 | 0.547038 | 0 | 31 | 36.032258 | 90 |
nigelsmall/py2neo | 19,593,640,837,489 | c4e3d391cd76463f0fa7759ac485a22a4b319611 | e8f6c339b4b97415406d238a78575c7b9b612a58 | /demo/moviegraph/server.py | 1d1a3adda4b1397e35de8f9d77bc3cb16b647ff2 | [
"Apache-2.0"
]
| permissive | https://github.com/nigelsmall/py2neo | a6963dd665a31aff33d6fab301053aa7a72ac98a | 422287bcb8ba4706ea5637c3178b4a75bca23579 | refs/heads/v3 | 2021-04-12T05:20:19.967005 | 2016-12-15T23:24:20 | 2016-12-15T23:24:20 | 1,990,631 | 349 | 75 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from bottle import get, post, redirect, request, run, static_file, template, TEMPLATE_PATH
from calendar import month_name
from datetime import date
from os import getenv
from os.path import dirname, join as path_join
from py2neo import Graph, watch
from demo.moviegraph.model import Movie, Person, Comment
home = dirname(__file__)
static = path_join(home, "static")
TEMPLATE_PATH.insert(0, path_join(home, "views"))
# Set up a link to the local graph database.
graph = Graph(password=getenv("NEO4J_PASSWORD"))
watch("neo4j.bolt")
@get('/css/<filename:re:.*\.css>')
def get_css(filename):
return static_file(filename, root=static, mimetype="text/css")
@get('/images/<filename:re:.*\.png>')
def get_image(filename):
return static_file(filename, root=static, mimetype="image/png")
@get("/")
def get_index():
""" Index page.
"""
return template("index")
@get("/person/")
def get_person_list():
""" List of all people.
"""
return template("person_list", people=Person.select(graph).order_by("_.name"))
@get("/person/<name>")
def get_person(name):
""" Page with details for a specific person.
"""
person = Person.select(graph, name).first()
movies = [(movie.title, "Actor") for movie in person.acted_in] + \
[(movie.title, "Director") for movie in person.directed]
return template("person", person=person, movies=movies)
@get("/movie/")
def get_movie_list():
""" List of all movies.
"""
return template("movie_list", movies=Movie.select(graph).order_by("_.title"))
@get("/movie/<title>")
def get_movie(title):
""" Page with details for a specific movie.
"""
return template("movie", movie=Movie.select(graph, title).first())
@post("/movie/comment")
def post_movie_comment():
""" Capture comment and redirect to movie page.
"""
today = date.today()
comment_date = "%d %s %d" % (today.day, month_name[today.month], today.year)
comment = Comment(comment_date, request.forms["name"], request.forms["text"])
title = request.forms["title"]
movie = Movie.select(graph, title).first()
comment.subject.add(movie)
graph.create(comment)
redirect("/movie/%s" % title)
if __name__ == "__main__":
run(host="localhost", port=8080, reloader=True)
| UTF-8 | Python | false | false | 2,334 | py | 95 | server.py | 73 | 0.655527 | 0.651671 | 0 | 90 | 24.933333 | 90 |
Eslamhathout/restuarant_reservation_api | 16,681,652,979,262 | 9517df60a90f8d459df084aa7c8bb56fb4bdfdd7 | 396d51798e80acf9b861ffb9baedd54fdb3d56ef | /app/reservation/permissions.py | d532a12ae39a53b7b5ef405166bb35d7cb4fed9f | [
"MIT"
]
| permissive | https://github.com/Eslamhathout/restuarant_reservation_api | fc118f648cceeaf95a8340356ba651080c9df1d5 | 67292e95eed13b5bee423a443180230b9de4c036 | refs/heads/main | 2023-07-24T19:04:12.332675 | 2021-09-07T23:03:50 | 2021-09-07T23:03:50 | 402,718,599 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import permissions
class IsAdmin(permissions.BasePermission):
"""Check if user an admin"""
message = 'This action can only be made by admin users.'
def has_permission(self, request, view):
return request.user.role == 'Admin'
| UTF-8 | Python | false | false | 268 | py | 17 | permissions.py | 15 | 0.701493 | 0.701493 | 0 | 9 | 28.777778 | 60 |
techandsociety/techandsocietycorpus | 15,710,990,392,464 | 1fbcaf42cc81384852a39d252a2abbda296e4b23 | 544f5b8f6d65ddb49eab9a14c69c5f54097a556a | /db/reset.py | ad024ce0f619ffce970dd1f35df7d2e6f770cb34 | []
| no_license | https://github.com/techandsociety/techandsocietycorpus | e1ebe76bf65bf55aaa739733bb08fb346ed9d803 | ecaf96978275f85abdfdca4ca1feaed445dd4771 | refs/heads/master | 2020-06-29T13:08:13.639244 | 2019-08-28T21:13:42 | 2019-08-28T21:13:42 | 200,543,652 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from db.objects import Recommendation
from sqlalchemy import Column, Integer
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import sqlalchemy
import sqlalchemy.types
url = 'postgresql://tech:tech@127.0.0.1/techwatch'
engine = create_engine(url)
Session = sessionmaker(bind=engine)
session = Session()
Recommendation.metadata.drop_all(engine)
Recommendation.metadata.create_all(engine)
session.close()
engine.dispose()
| UTF-8 | Python | false | false | 508 | py | 65 | reset.py | 49 | 0.818898 | 0.807087 | 0 | 19 | 25.736842 | 55 |
jennifertakagi/quarentena-dados-python | 6,133,213,341,003 | 03673f351a11fdaf79211a419b367fd8c856d79a | cc7c9438a91a2c315ac2c17c968a82d42cdc2fa8 | /movie_lens_dataset/quarentenadados_aula01e02_jennifer.py | be47600834fe045aa5bec4fdf73ada5509f73b87 | [
"MIT"
]
| permissive | https://github.com/jennifertakagi/quarentena-dados-python | ad5259d3cdc07c4a817e95ec5bcb58891c99f91c | d5e32a2c2066ad6a58d628d59a852404c5e1e1f0 | refs/heads/master | 2023-04-17T07:24:30.398752 | 2021-04-30T15:54:56 | 2021-04-30T15:54:56 | 258,198,708 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""QuarentenaDados - aula02 - Jennifer
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1t7zyaWjxxsoInx2Br79qk0HdiNkQFNgE
#Aula 01
"""
import pandas as pd
filmes = pd.read_csv("https://raw.githubusercontent.com/alura-cursos/introducao-a-data-science/master/aula0/ml-latest-small/movies.csv")
filmes.columns = ["filmeId", "titulo", "generos"]
filmes.head()
avaliacoes = pd.read_csv("https://github.com/alura-cursos/introducao-a-data-science/blob/master/aula0/ml-latest-small/ratings.csv?raw=true")
avaliacoes.columns = ["usuarioId", "filmeId", "nota", "momento"]
avaliacoes
notas_medias_por_filme = avaliacoes.groupby("filmeId")["nota"].mean()
notas_medias_por_filme.head()
filmes_com_media = filmes.join(notas_medias_por_filme, on="filmeId")
filmes_com_media.head()
"""## Desafio 1 do [Paulo Silveira](https://twitter.com/paulo_caelum)
O Paulo fez uma análise rápida e disse que tem 18 filmes sem avaliações, será que ele acertou?
Determine quantos filmes não tem avaliações e quais são esses filmes.
"""
filmes_sem_nota = filmes_com_media["nota"].isnull()
filmes_com_media[filmes_sem_nota]
"""## Desafio 2 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)
Mudar o nome da coluna nota do dataframe **filmes_com_media** para nota_média após o join.
"""
filmes_com_media.rename(columns={"nota": "nota_media"}, inplace=True)
"""## Desafio 3 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)
Colocar o número de avaliações por filme, isto é, não só a média mas o TOTAL de votos por filme.
"""
numero_avaliacoes_filmes = avaliacoes.groupby("filmeId")["nota"].count()
filmes_com_media = filmes_com_media.join(numero_avaliacoes_filmes, on="filmeId")
filmes_com_media.rename(columns={"nota": "numero_avaliacoes"}, inplace=True)
"""## Desafio 4 do [Thiago Gonçalves](https://twitter.com/tgcsantos)
Arredondar as médias (coluna de nota média) para duas casas decimais.
"""
filmes_com_media["nota_media"] = filmes_com_media["nota_media"].round(2)
"""## Desafio 5 do [Allan Spadini](https://twitter.com/allanspadini)
Descobrir os generos dos filmes (quais são eles, únicos). (esse aqui o bicho pega)
"""
generos_unicos = filmes_com_media["generos"].str.get_dummies("|")
generos_unicos.columns.to_list()
"""## Desafio 6 da [Thais André](https://twitter.com/thais_tandre)
Contar o número de aparições de cada genero.
"""
aparicoes_generos = filmes_com_media["generos"].str.get_dummies().sum().sort_values(ascending=False)
"""## Desafio 7 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)
Plotar o gráfico de aparições de cada genero. Pode ser um gráfico de tipo igual a barra.
"""
aparicoes_generos.plot(
kind='bar',
title='Filmes por categoria',
figsize=(8,8))
"""# Aula 02
##Desafio 1 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)
Rotacionar os ticks (os nomes dos generos) do gráfico de barras verdes (o último), de forma a deixar as legendas mais legíveis.
"""
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid")
plt.figure(figsize=(16,8))
sns.barplot(x=aparicoes_generos.index,
y=aparicoes_generos.values,
palette=sns.color_palette("BuGn_r", n_colors=len(aparicoes_generos) + 4))
plt.xticks(rotation=45)
plt.show()
"""## Desafio 2 do [Paulo Silveira](https://twitter.com/paulo_caelum)
Encontar vários filmes com médias próximas e distribuições diferentes, use a função **plot_filmes(n)** para plotar.
"""
filmes_com_media.sort_values("nota_media", ascending=False)[800:950]
def plot_filmes(id_filme):
notas_filme = avaliacoes.query(f"filmeId=={id_filme}")["nota"]
notas_filme.plot(kind="hist")
return notas_filme.describe()
plot_filmes(858) #Godfather, The (1972)
plot_filmes(1196) #Star Wars: Episode V - The Empire Strikes Back
"""## Desafio 3 do [Paulo Silveira](https://twitter.com/paulo_caelum)
Criar o boxplot dos 10 filmes com mais votos (não é com maior média, é com mais votos!). Não apenas plot mas também analise e tente tirar conclusões.
"""
dez_filmes_mais_avaliacoes = filmes_com_media.sort_values("numero_avaliacoes", ascending=False).head(10)
dez_filmes_mais_avaliacoes_ids = dez_filmes_mais_avaliacoes["filmeId"].to_list()
dez_filmes_mais_avaliacoes_titulos = dez_filmes_mais_avaliacoes["titulo"].to_list()
plt.figure(figsize=(18, 8))
ax = sns.boxplot(x="filmeId", y="nota", data=avaliacoes.query(f"filmeId in {dez_filmes_mais_avaliacoes_ids}"))
ax.set_xticklabels(dez_filmes_mais_avaliacoes_titulos, fontsize=12)
ax.set_xlabel("Filme", fontsize=16)
ax.set_ylabel("Nota", fontsize=16)
plt.xticks(rotation=45)
plt.show()
"""## Desafio 4 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)
Configurar a visualização do boxplot gerado pelo seaborn (último boxplot plotado na aula). Configurar o tamanho e colocar o nome dos filmes nos ticks.
"""
plt.figure(figsize=(12, 6))
ax = sns.boxplot(data = avaliacoes.query("filmeId in [1,2,919,46578]"), x="filmeId", y="nota")
ax.set_xticklabels(filmes.query("filmeId in [1,2,919,46578]")["titulo"])
ax.set_xlabel("Filme", fontsize=16)
ax.set_ylabel("Nota", fontsize=16)
plt.show()
"""## Desafio 5 do [Allan Spadini](https://twitter.com/allanspadini)
Calcular moda, média e mediana dos filmes. Explore filmes com notas mais próximas de 0.5, 3 e 5.
"""
avaliacoes.head()
def explorar_filmes(id_filme):
filme = filmes.query(f"filmeId == {id_filme}")
notas = avaliacoes.query(f"filmeId == {id_filme}")["nota"]
print(f'Filme: {filme.iloc[0, 1]}')
print(f'Moda: {notas.mode().values}')
print(f'Média: {notas.mean()}')
print(f'Mediana: {notas.median()}')
print('------------')
explorar_filmes(8387)
explorar_filmes(89386)
explorar_filmes(3774)
explorar_filmes(2041)
explorar_filmes(7541)
explorar_filmes(160080)
explorar_filmes(177593)
explorar_filmes(1178)
explorar_filmes(4334)
"""## Desafio 6 da [Thais André](https://twitter.com/thais_tandre)
Plotar o boxplot e o histograma um do lado do outro (na mesma figura ou em figuras distintas, mas um do lado do outro).
"""
pulp_fiction = avaliacoes.query('filmeId == 296')['nota']
f, axes = plt.subplots(1, 2, figsize=(16, 6))
pulp_fiction.plot(kind='hist', ax=axes[0])
pulp_fiction.plot(kind='box', ax=axes[1])
plt.show()
"""## Desafio 7 do [Thiago Gonçalves](https://twitter.com/tgcsantos)
Criar um gráfico de notas médias por ano (média geral considerando todos os filmes lançados naquele ano).
"""
filmes_media_ano = filmes_com_media.copy()
filmes_media_ano["ano"] = filmes_media_ano["titulo"].str.extract("\((\d{4})\)")
filmes_media_ano.head()
notas_medias_ano = filmes_media_ano.groupby("ano")["nota_media"].mean()
plt.figure(figsize=(16,8))
notas_medias_ano.plot()
| UTF-8 | Python | false | false | 6,833 | py | 4 | quarentenadados_aula01e02_jennifer.py | 3 | 0.722723 | 0.699099 | 0 | 209 | 31.406699 | 150 |
regzhuce/sdbootstrap | 18,056,042,517,330 | 3bc45d8643001dc08e1855133d011a4565da8f41 | 49f59a85199c4df6f934f2b1b3fb145afa106d47 | /sdbootstrap/bootstrap/outer.py | 6948557b405ef5e75769e1572bcb9e70cf5898e0 | []
| no_license | https://github.com/regzhuce/sdbootstrap | 419737bc3c75cd58313d05865abf1acf6a9e59c4 | d24408f3a47695645d5e36343082b983d37af4df | refs/heads/master | 2023-04-12T22:53:43.618317 | 2017-06-16T16:18:33 | 2017-06-16T16:18:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sdbootstrap.bootstrap import Bootstrap
from sdbootstrap.config import Config
import sdbootstrap.updater
import numpy as np
class OuterBootstrap(Bootstrap):
def __init__(self):
pass
def update_bootstrap(self,
theta_boot,
w_boot,
infile=None,
separator=None,
online_update=None,
conf=None):
for line in infile:
field = line.split(separator)
update_id = str(field[0])
k = str(field[1])
X = float(field[2])
w = float(field[3])
if k not in theta_boot:
theta_boot[k] = 0.0
w_boot[k] = 0.0
if w == 0:
continue
(theta_boot[k],w_boot[k])=online_update(theta_boot[k],w_boot[k],X,w,conf=conf)
def main(self):
conf = Config()
online_updater = getattr(sdbootstrap.updater,conf.online_update)()
theta_boot = dict()
w_boot = dict()
self.update_bootstrap(theta_boot,w_boot,
infile=conf.infile,
separator=conf.separator,
online_update=online_updater.online_update,
conf=conf)
self.print_bootstrap(theta_boot,w_boot,separator=conf.separator)
| UTF-8 | Python | false | false | 1,430 | py | 32 | outer.py | 25 | 0.486713 | 0.48042 | 0 | 42 | 33.047619 | 90 |
Adil-Iqbal/Coding | 18,253,611,024,975 | 7df1881b6fb9e4d6e2f4df82b574714383591142 | 7563fec2847dab97f11c3d11c56ae3dd81ad037f | /Ethoslab Fansite/Scripts/populate_episode_json_data.py | 8bfd1fd328d821f000984aad7e5147ad2f7beee1 | []
| no_license | https://github.com/Adil-Iqbal/Coding | 0120e48145ccf05175150cf6473226f7be04ddad | c32e2451609de4c32d68f428474253d1f396d776 | refs/heads/master | 2018-11-30T08:04:00.247898 | 2018-09-11T15:32:26 | 2018-09-11T15:32:26 | 119,769,550 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
from pprint import pprint
import traceback
"""This script is used to populate a JSON file with formatted Episode data."""
def get_episode_num(string):
"""Returns the episode number as an integer."""
for i, char in enumerate(string):
if char == ":":
return int(string[i - 3:i])
def to_seconds(string):
"""Returns number of seconds as an integer."""
original_string = string
string = string[2:]
seconds = 0
num = ""
for i, char in enumerate(string):
if char.isdigit():
num += char
else:
x = 1
if char == "H":
x = 3600
elif char == "M":
x = 60
try:
seconds += int(num) * x
except:
traceback.print_exc()
return original_string
num = ""
return seconds
url = "https://www.googleapis.com/youtube/v3/videos"
api_key = "AIzaSyBoVWZevLKCgLn_v-KNyT7gt3fsr_JdA4M"
#
# with open("episode_ids_list.txt", "r") as id_list:
# for i in range(303):
# parameters = {"part": "snippet,contentDetails",
# "id": id_list.readline(),
# "key": api_key,
# }
# result = requests.request(method="get", url=url, params=parameters)
# j_result = json.loads(result.text)
# try:
# episode_dict = {
# "id": 0,
# "type": "episode",
# "youtube_id": j_result['items'][0]['id'],
# "title": j_result['items'][0]['snippet']['title'],
# "episode": get_episode_num(j_result['items'][0]['snippet']['title']),
# "description": j_result['items'][0]['snippet']['description'],
# "published_at": j_result['items'][0]['snippet']['publishedAt'],
# "duration": to_seconds(j_result['items'][0]['contentDetails']['duration']),
# "media": j_result['items'][0]['snippet']['thumbnails'],
# "associated_clips": [],
# "curation": {
# "curated_time": 0,
# "percentage": 0.0
# }
# }
# episode_dict["id"] = episode_dict["episode"] - 105
# with open("episodes.json") as j_data:
# d_data = json.load(j_data)
# d_data.append(episode_dict)
# with open("episodes.json", "w") as j_data:
# json.dump(d_data, j_data)
# except:
# traceback.print_exc()
# pprint(j_result['items'][0])
# break
# else:
# print("Done!")
# *** Code used to find common errors in insertions to all_episodes list.
# def scan_episodes_file_for_common_errors():
# with open("episodes.json", "r") as j_data:
# print("Scanning 'episodes.json' for common issues...")
# d_data = json.load(j_data)
# prev_ep = -1
# for i, episode in enumerate(d_data):
# if type(episode["duration"]) == str:
# print(
# "Duration string (" + episode["duration"] + ") detected at... Index: " + str(i) + ", Episode ID:" + str(
# episode["id"]))
# if prev_ep > episode["id"]:
# print("Possibly repeated episode insertion at... Index: " + str(i) + ", Episode ID:" + str(episode["id"]))
# elif not ((episode["id"] - prev_ep) == 1):
# print("Possible skip of length " + str(episode["id"] - prev_ep) + " detected at... Index: " + str(
# i) + ", Episode ID:" + str(episode["id"]))
# else:
# prev_ep = episode["id"]
# else:
# print("Scan concluded.")
#
# scan_episodes_file_for_common_errors()
from random import randint
number_list = [randint(1, 30) for i in range(80)]
print(number_list)
for k in range(1, len(number_list)):
for i in range(0, len(number_list)-k):
if number_list[i] > number_list[i+1]:
number_list[i], number_list[i + 1] = number_list[i + 1], number_list[i]
print(number_list) | UTF-8 | Python | false | false | 4,306 | py | 48 | populate_episode_json_data.py | 26 | 0.480492 | 0.470274 | 0 | 115 | 35.46087 | 126 |
leeshlay/IoT_Parking | 3,917,010,178,046 | 24f9843bd90619e836f8183beeecd75a228c35a6 | 2b82623c8dc3877c4f9a414a040257aeb3e8ca04 | /fields/Field1/home/lib/GPIO_Intel.py | 4c104ff480dd5336e85376333a192351306ee2bc | []
| no_license | https://github.com/leeshlay/IoT_Parking | d5ce46d3233405202f17229b69fa3faf1ccc11f5 | 40c2aa814b1913eaea28cbd42dabc9b6331ef933 | refs/heads/master | 2021-01-21T13:48:50.170226 | 2016-04-27T20:01:26 | 2016-04-27T20:01:26 | 55,305,344 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import time, sys, os
IOPins = {
"IO2": 32,
"IO3": 18,
"IO4": 28,
"IO5": 17,
"IO6": 24,
"IO7": 27,
"IO8": 26,
"IO9": 19,
"IO10": 16,
"IO11": 25,
"IO12": 38,
"IO13": 39,
"A0": 37,
"A1": 36,
"A2": 23,
"A3": 22,
"A4": 29,
"A5": 29
}
PinsEnabled = {
"IO2": False,
"IO3": False,
"IO4": False,
"IO5": False,
"IO6": False,
"IO7": False,
"IO8": False,
"IO9": False,
"IO10": False,
"IO11": False,
"IO12": False,
"IO13": False,
"A0": False,
"A1": False,
"A2": False,
"A3": False,
"A4": False,
"A5": False,
}
class Intel:
def __init__(self):
print "Initial Setup."
return
def cmd(self, value, file):
with open(file, 'w') as File:
File.write(str(value))
return
def setup(self, pin, dir='out'):
if pin[:1]=="A":
actpin = IOPins[pin]
try:
self.cmd(actpin, '/sys/class/gpio/export')
except IOError as e:
print "Pin already exported"
self.cmd("out", '/sys/class/gpio/gpio{}/direction'.format(actpin))
self.cmd("0", '/sys/class/gpio/gpio{}/value'.format(actpin))
else:
actpin = IOPins[pin]
try:
self.cmd(actpin, '/sys/class/gpio/export')
except IOError as e:
print "Pin already exported"
self.cmd(dir, '/sys/class/gpio/gpio{}/direction'.format(actpin))
PinsEnabled[pin] = True
return 1
def output(self, pin, value='1'):
actpin = IOPins[pin]
if PinsEnabled[pin]:
self.pullup(pin)
self.cmd(value, '/sys/class/gpio/gpio{}/value'.format(actpin))
return 1
else:
print "{} has not been set to output.".format(pin)
return 0
def input(self, pin):
if pin[:1]=="A":
with open('/sys/bus/iio/devices/iio:device0/in_voltage{}_raw'.format(pin[1:]), 'r') as File:
return File.readline()[:-1]
else:
actpin = IOPins[pin]
if PinsEnabled[pin]:
self.pullup(pin, 'pullup')
with open('/sys/class/gpio/gpio{}/value'.format(actpin), 'r') as File:
return File.readline()[:-1]
def pullup(self, pin, drive='strong'):
actpin = IOPins[pin]
self.cmd(drive, '/sys/class/gpio/gpio{}/drive'.format(actpin))
return 1
def cleanup(self):
for Pin in Pins:
if Pin[:2] == 'IO':
actpin = IOPins[pin]
self.cmd('0', '/sys/class/gpio/gpio{}/value'.format(actpin))
elif Pin[:3] == 'PWM':
actpin = IOPins[pin]
self.cmd('0', '/sys/class/pwm/pwmchip0/pwm{}/enable'.format(actpin))
| UTF-8 | Python | false | false | 2,348 | py | 10 | GPIO_Intel.py | 5 | 0.594974 | 0.553663 | 0 | 113 | 19.752212 | 95 |
PrimeTime416/PythonPractice | 1,486,058,691,147 | e26d346f0fc0d9b041500ff9da98bb4daaca4ef8 | 2433bf64424a6cf55d7ab46b050686eb7e7a0351 | /random1.py | d5d7eed6564ff983edad050f6448b8ff05aa541f | []
| no_license | https://github.com/PrimeTime416/PythonPractice | 8aebfb8e42f996fb5fdb799b61d776b19818b1e1 | e48d8bf6e00e3c3cfc266061701e2f187dff78f7 | refs/heads/master | 2021-09-08T00:20:18.904001 | 2018-03-04T03:56:32 | 2018-03-04T03:56:32 | 115,659,165 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/user/bin/env python3
# Atalyah's program
import random
def main():
print("Guess a number between 1 and 10")
randomNumber = random.randint(1, 10)
# print(randomNumber)
while True:
userGuess = int(input("your Guess: "))
# userGuess = input("your Guess: ")
print(userGuess)
# print(randomNumber)
# print(userGuess == randomNumber)
# print(10 == 10.0)
if userGuess == randomNumber:
print("you are correct")
main()
| UTF-8 | Python | false | false | 505 | py | 10 | random1.py | 9 | 0.586139 | 0.562376 | 0 | 25 | 19.2 | 46 |
couragesuper/couragesuper-ds | 2,164,663,518,126 | 9698561eafbef2633b5e437cd8e021c52eced122 | a0c3b37c73b3437a831229e4e884ba7a5e0e9b9e | /JLRSupport/MiniCorvus/JLR_UDS.py | bd8f7bd83318ad2c2353ce6817c7a4ca0451f7e8 | []
| no_license | https://github.com/couragesuper/couragesuper-ds | 12f1e19f57b0c0ab5b3b6df0dadf2a1b78d2bf94 | 26a79de2eb2ece47a01c5e698b27c582e29b3d63 | refs/heads/master | 2023-01-04T01:22:30.956934 | 2023-01-02T13:36:20 | 2023-01-02T13:36:20 | 143,246,282 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # for XML
from xml.etree import ElementTree as ET
# socket
import socket
import threading
import time
# for timestamp
from datetime import datetime
# Process List
# 1. UDP broadcasting with VEH message (OK)
# 2. UDP broadcasting with Routine message (OK)
# 3. UDP Reading DID is failed ( error of payload )
# Goal : Direct Connection.
# timeout of sending Vehicle identification
# timeout of routing activation
# timeout of reading did
# error reading specified did
# Configuration : XML
xmlName = "Config.xml"
#Precondition : ba is valuable Array
def BaToVal( ba ) :
value = 0
if( type( ba ) == int ) : return True , ba
if( len( ba ) > 4 ) :
return False, ba
for elem in ba :
value = (value << 8) | elem
return True, value
def ShowTs() :
print( datetime.now() )
if False :
config = LoadConfigXML( xmlName )
print( config )
class UDS_Packet_Parser :
def __init__( self , isDebug ):
print( "[UDS_Packet_Parser] Init .. ")
self.isDebug = True
self.dictParseRule = {}
self.debugTag = "UDS_Packet_Parser"
def log(self, subTag, level, msg):
print( "[{}][{}][{}] {}".format( self.debugTag, subTag, level, msg ) )
# RULE XML
# L1 : DOIP or UDS (Protocols)
# L2 : Packets
# L3 : Fields
def LoadRuleXML( self , xmlfile):
# Debugging Data
subTag = "LoadRuleXML"
self.log(subTag, "I" , "[Parsing] Rule the XML")
# parsing xml
xmltree = ET.parse(xmlfile)
xmlroot = xmltree.getroot()
if (xmlroot == None):
self.log( subTag , "E" , "LoadRuleXML .. Root")
return False
#L1
for node_protocol in xmlroot:
tag = node_protocol.tag
self.log(subTag, "I", "\tnode L1 : {}".format(tag) )
if( tag == "DOIP" ) :
offset = 0
for node_l1 in node_protocol :
arrayHdrInfo = {}
if( node_l1.attrib["name"] == "Header" ) :
self.log(subTag, "I", "\t\tnode L1:{}-> attr:{}".format(node_l1 , node_l1.attrib))
#L2
for node_l2 in node_l1 :
self.log(subTag, "I", "\t\t\tnode L2:{} -> attr:{}".format(node_l2, node_l2.attrib))
name = node_l2.attrib["name"]
len = node_l2.attrib["len"]
arrayHdrInfo[ name ] = { "len" : int(len), "offset": offset }
offset = int(offset) + int(len)
self.dictParseRule["DOIPHDR"] = arrayHdrInfo
elif( tag == "UDS") :
for node_l1 in node_protocol:
arrayUDSInfo = self.dictParseRule["DOIPHDR"].copy()
self.log(subTag, "I", "\t\tnode L1:{} -> attr:{}".format(node_l1 , node_l1.attrib))
packet_name = node_l1.attrib["name"]
packet_type = node_l1.attrib["doiptype"]
arrayUDSInfo["doiptype"] = packet_type
offset = 8
for node_l2 in node_l1 :
if( node_l2.tag != "DOIPHEADER" ) :
self.log(subTag, "I", "\t\t\tnode L2:{} -> attr:{}".format(node_l2, node_l2.attrib))
name = node_l2.attrib["name"]
len = node_l2.attrib["len"]
arrayUDSInfo[name] = { "len" : int(len), "offset": offset }
offset = int(offset) + int (len)
self.dictParseRule[packet_name] = arrayUDSInfo
else:
self.log( subTag , "E" , " Invalid Node ")
if( self.isDebug == True ) :
self.log(subTag, "I", "[Display Nodes] ")
for k , v in self.dictParseRule.items() :
self.log(subTag, "D" , "\t{} = {} ".format( k ,v ) )
return True
def getRuleData(self):
return self.dictParseRule
if False :
parser = UDS_Packet_Parser( self.isDebug )
parser.LoadRuleXML("PacketParseRule.xml" )
print( parser.getRuleData() )
exit(0)
class JLR_UDS :
def __init__(self):
#1. Information
self.debug = 1 # flag for function of some debug
self.isDebug = True
self.status = 0 # status flagging
self.packet_len = 4096 # unit of transmission
#2. JLR_UDS
self.debugTag = "JLR_UDS"
#3. XML Config
self.config = {}
#4. Status Flag
self.resetFlag()
self.InitPackets()
def log(self, subTag, level, msg):
print( "[{}][{}][{}] {}".format( self.debugTag, subTag, level, msg ) )
def resetFlag(self) :
self.isFoundPIVI = False
def InitPackets(self):
subTag = "InitPackets"
parser = UDS_Packet_Parser(self.isDebug)
if( True == parser.LoadRuleXML("PacketParseRule.xml" ) ) :
self.dict_udsRule = parser.getRuleData().copy()
self.log( subTag, "I", "[Lists of packet data]" )
self.UDSPackets = {}
self.UDSPackets[ "VEH_REQ" ] = b'\x02\xfd\x00\x01\x00\x00\x00\x00'
self.UDSPackets[ "ROUTINE_REQ"] = b'\x02\xfd\x00\x05\x00\x00\x00\x07\x0e\x80\x00\x00\x00\x00\x00'
self.log(subTag, "I", "[Lists of doip types]")
self.DOIPType = {}
self.DOIPType["VEH_REQ"] = 1
self.DOIPType["VEH_RES"] = 4
self.DOIPType["ROU_REQ"] = 5
self.DOIPType["ROU_RES"] = 6
self.arrDIDPartNumbers = []
self.arrDIDPartNumbers.append(b'\xf1\x11')
self.arrDIDPartNumbers.append(b'\xf1\x12')
self.arrDIDPartNumbers.append(b'\xf1\x13')
self.arrDIDPartNumbers.append(b'\xf1\x88')
self.arrDIDPartNumbers.append(b'\xf1\x90')
self.arrDIDPartNumbers.append(b'\xf1\xA0')
self.arrDIDPartNumbers.append(b'\xf1\xBE')
self.arrDIDPartNumbers.append(b'\x41\xAE')
self.arrDIDPartNumbers.append(b'\x48\x84')
def LoadConfigXML( self , xml_file ):
xmltree = ET.parse( xml_file )
xmlroot = xmltree.getroot()
if (xmlroot == None):
print("[JLR_UDS][Error] LoadConfigXML .. Root ")
return False
for node in xmlroot:
print( node.attrib )
self.config["IP_ETH"] = node.attrib['IP_ETH']
#self.config["IP"] = node.attrib['IP']
self.config["PORT"] = int( node.attrib['PORT'] )
#self.targetAddr = ( self.config["IP"], self.config["PORT"] )
if( self.debug and 1 ) :
print( "[JLR_UDS] LoadConfigXML .. Ret:{} ".format( self.config ) )
def delayUDS(self) :
time.sleep( 0.02 )
# parsing byte through BaToVal ( 2 param )
# msg is string (need to convert byte array)
def parseUDSPacket(self , addr , msg):
subTag = "parseUDSPacket"
ba_pkt = bytearray( msg )
data = {}
if( len(ba_pkt) >= 8 ) :
# found rule
Ret, doipType = BaToVal(ba_pkt[2: 2 + 2])
if( Ret == True ) :
self.log( subTag, "I" , "DoipType = {}".format( doipType ) )
else :
self.log(subTag, "E", "DoipType")
RetFound , rule = self.foundRule( doipType )
if( RetFound == True ) :
for rule_k, rule_v in rule.items() :
if( rule_k != "doiptype" ) :
#print("{} = {}".format(rule_k, rule_v))
off = int(rule_v["offset"])
length = int(rule_v["len"])
data[ rule_k ] = BaToVal( ba_pkt[off:off+length] )[1]
else:
self.log(subTag , "E" , "Found Rule Error -> Type : {}".format( doiptype ) )
else :
self.log(subTag , "E" , "Packet Error(short packet) -> len : {}".format( len( packet ) ) )
self.log(subTag, "I", "\tParsed Data = {}".format( data ) )
return data
def foundRule(self ,doiptype):
for key, rule in self.dict_udsRule.items() :
if( key != "DOIPHDR" ) :
type = int( rule["doiptype"] )
if( doiptype == type ) : return True , rule
return False , None
# found PIVI and Connect PIVI
def foundPIVI(self):
# 1.unset flag whether PIVI is found
# 2.Create UDP Socket to broadcast to found PIVI
self.createUDPSocket()
# 3.Create Sending Thread and Receiving Thread
self.threadFoundPIVI_Send = threading.Thread( target = self.loopFoundPIVI_Send )
self.threadFoundPIVI_Recv = threading.Thread( target = self.loopFoundPIVI_Recv )
# 4 run Threads
self.threadFoundPIVI_Send.start()
self.threadFoundPIVI_Recv.start()
# network
# UDP socket : broad cast VEH message to all network .
# pivi answers VEH response.
def createUDPSocket(self):
self.sockUDP = socket.socket( socket.AF_INET , socket.SOCK_DGRAM , socket.IPPROTO_UDP )
self.sockUDP.setsockopt( socket.SOL_SOCKET , socket.SO_BROADCAST, 1 )
self.sockUDP.bind( ( self.config["IP_ETH"], self.config["PORT"]) )
# Protocol DOIP
# make UDP socket
# send VEH req : -> "VEH_REQ" (Doip 1)
# recv VEH res : -> "VEH_RES" (Doip 4) -> set isFoundPIVI to True
# close UDP socket
def loopFoundPIVI_Send(self):
# send vehicle identification message until pivi is answered.
while self.isFoundPIVI == False :
#self.log( "loopFoundPIVI_Send" , "I" , "Request Vehicle Identification" )
self.sockUDP.sendto( self.UDSPackets[ "VEH_REQ" ], ("255.255.255.255", self.config["PORT"]) )
self.delayUDS()
def loopFoundPIVI_Recv(self):
while self.isFoundPIVI == False :
ShowTs()
msg, addr = self.sockUDP.recvfrom(self.packet_len)
#print("[loopFoundPIVI_Recv] msg:{0}, msglen:{2}, addr:{1}".format(msg, addr, len(msg)))
if( addr[0] == self.config["IP_ETH"] ) :
print(" [ignore] packet from self ")
else :
data = self.parseUDSPacket( addr , msg )
# From PIVI
if( ( data['type'] == self.DOIPType["VEH_RES"] ) and ( data['LogAddr'] == 0x14b4 ) ) :
self.PIVI_Addr = addr
#self.SavePIVIAddress();
self.PIVI_VEH = data
self.sockUDP.close()
self.delayUDS()
self.isFoundPIVI = True
else :
print(">> I don't know this message")
self.CreateTCPSock()
self.runLoopUDS()
def CreateTCPSock(self):
# Create TCP Coskcet
self.sockTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sockTCP.connect(self.PIVI_Addr)
# Protocol UDS
# send Routing Acitivation : -> "ROU_REQ" (Doip 5)
# recv Routing Response : -> "ROU_RES" (Doip 6)
def runLoopUDS(self):
print("runLoopUDS")
self.isRecvRoutineActivation = False;
self.isUDSLoopRunning = True
self.threadTCP_Send = threading.Thread(target=self.loopUDS_Send)
self.threadTCP_Send.start()
self.threadTCPRecv = threading.Thread(target=self.loopUDS_Recv)
self.threadTCPRecv.start()
def loopUDS_Send(self):
subTag = "loopUDS_Send"
while self.isRecvRoutineActivation == False:
ShowTs()
self.log( subTag , "I" , " [Send] Routing Acitivation Reqeust" );
self.sockTCP.sendto( self.UDSPackets["ROUTINE_REQ"], self.PIVI_Addr)
self.delayUDS()
cnt = 0
# This is task after Activation
# packets is required to send.
while self.isUDSLoopRunning :
ShowTs()
if cnt < len(self.arrDIDPartNumbers):
self.udsReadingDID(self.arrDIDPartNumbers[ cnt ])
cnt = cnt + 1
elif cnt == len(self.arrDIDPartNumbers):
cnt = 0
def udsReadingDID( self, did ):
print( "sendReadingDID : did{} :type:{}".format(did , type(did)))
if( type(did) == bytes ) :
msg = b'\x02\xfd\x80\x01\x00\x00\x00\x07\x0e\x80\x14\xb4\x22'
msg = msg + did
print( "message:{}".format( msg ) )
self.sockTCP.sendto( msg, self.PIVI_Addr )
self.delayUDS()
else :
print( "sendReadingDID is failed with illegal param:{}".format( did ) )
def loopUDS_Recv(self):
subTag = "loopUDS_Recv"
while self.isRecvRoutineActivation == False:
msg = self.sockTCP.recv(self.packet_len)
ShowTs()
self.log( subTag , "I" , " Data is received=len:{} data:{}".format(len(msg) , msg) )
data = self.parseUDSPacket(0, msg)
if ((data['type'] == self.DOIPType["ROU_RES"]) and (data['SourceAddr'] == 0x14b4) and (data['ResponseCode'] == 0x10)):
ShowTs()
print(">> >> Routing Activation ")
self.isRecvRoutineActivation = True
else:
print("----")
time.sleep(self.udsDelay)
while True:
rcvdata = self.sockTCP.recv(self.packet_len)
print("[loopUDS_Recv] data={}".format(rcvdata))
self.parseUDSDID(rcvdata)
time.sleep(self.udsDelay)
# Module Test
if True :
print("JLR_UDS ...")
jlruds = JLR_UDS()
jlruds.LoadConfigXML( xmlName )
jlruds.foundPIVI()
| UTF-8 | Python | false | false | 13,705 | py | 344 | JLR_UDS.py | 134 | 0.530828 | 0.516819 | 0.00394 | 357 | 37.37535 | 130 |
fym0121/zhaopin | 11,209,864,686,420 | 0011373140d85ba60674994b3b213c5f959951b6 | 2ff6642c19e1bfd7f5edc76720e9603b22c1a7e4 | /jihuo.py | c1fb87c72b6b113ddc47e46e5fd04d8515504f5e | []
| no_license | https://github.com/fym0121/zhaopin | 0eb21c3fa3f7f2aac55c4016e7a48c42b7bce895 | 5a757e816e2a3ab98d12f3fea5ad39c9af7111e3 | refs/heads/master | 2018-09-10T04:24:10.894613 | 2018-06-13T08:30:07 | 2018-06-13T08:30:07 | 130,575,734 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import wx
import hashlib
class AuthDialog(wx.Dialog):
def __init__(
self, parent, id=-1, title='招聘激活', size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE, name='Authentication'
):
wx.Dialog.__init__(self)
self.Create(parent, id, title, pos, size, style, name)
sizer = wx.BoxSizer(wx.VERTICAL)
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Your id:")
self.shibie_tc = wx.TextCtrl(
self, -1, "", size=(80, -1))
self.jihuo_btn = wx.Button(self, label="active")
self.jihuo_btn.Bind(wx.EVT_BUTTON, self.on_active)
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
box.Add(self.shibie_tc, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
box.Add(self.jihuo_btn, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "activation:")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.jihuo_tc = wx.TextCtrl(
self, -1, "", size=(80, -1))
box.Add(self.jihuo_tc, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
def jiami(self, shibie):
shibie = shibie + 'vso'
jihuo = hashlib.md5(shibie.encode('utf-8')).hexdigest()[:6]
return jihuo
def on_active(self, evt):
shibie = self.shibie_tc.GetValue()
shibie = shibie + 'vso'
jihuo = hashlib.md5(shibie.encode('utf-8')).hexdigest()[:6]
self.jihuo_tc.SetValue(jihuo)
def on_cancel(self, evt):
self.Destroy()
exit()
app = wx.App()
dlg = AuthDialog(None)
dlg.CenterOnScreen()
dlg.ShowModal() | UTF-8 | Python | false | false | 1,864 | py | 14 | jihuo.py | 9 | 0.578664 | 0.561961 | 0 | 61 | 29.442623 | 91 |
hyuhyu2001/Lesson1_Testing | 420,906,796,887 | 1bb6ce448a6c68e066f307c7a4c3622714576f91 | ce71e7e9d7efabd24d9dd1ee67faf098317900f0 | /src/dataList/Singly_linked_list.py | 0a4cbe6ab982cc7cb8e0a14f0671879d7e5e3eb1 | []
| no_license | https://github.com/hyuhyu2001/Lesson1_Testing | 73a2b389201656f5d60e4e81f087e1cef682fe76 | b1fd6850d880886af9d57bdbcc80b9190a05e295 | refs/heads/master | 2021-01-10T10:12:44.397598 | 2017-09-25T06:39:47 | 2017-09-25T06:39:47 | 54,043,016 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/user/bin/env python
#encoding:utf-8
'''
现有一个单向链表dataList,链表的每个节点dataNode都有个不固定大小的buf来存储字符串数据。
比如,String为“123456789abcdefg”,dataList为:[12]->[34567]->[8]->[9ab]->[cdefg]。
要求实现一个查找算法,给出链表dataList的对象实例,查找某个子字符串,返回dataNode的结点及字符串首字符在该结点中的索引位置
eg:在如上dataList查找“bcd”,返回[9ab]和2(假设以0为起始下标)
String = '123456789abcdefg'
dataList = ['12','234567','8','9ab','cdefg']
dict = {} #创建一个空字典
'''
class Node(object): #创建Node的数据类型,包括data和nextdata
def __init__(self,data):
self.data = data
self.next = None
def __init__(self): #初始化链表
self.head =None
def __len__(self): #获取链表长度
pre = self.head
length = 0
while pre:
length += 1
pre = pre.next
return length
def append(self, data): #追加节点
node = Node(data)
if self.head is None:
self.head = node
else:
pre = self.head
while pre.next:
pre = pre.next
pre.next = node
def insert(self, index, data):#插入节点
node = Node(data)
if abs(index + 1) > len(self):
return False
index = index if index >= 0 else len(self) + index + 1
if index == 0:
node.next = self.head
self.head = node
else:
pre = self.get(index - 1)
if pre:
next = pre.next
pre.next = node
node.next = next
else:
return False
return node
def delete(self, index):#删除节点
f = index if index > 0 else abs(index + 1)
if len(self) <= f:
return False
pre = self.head
index = index if index >= 0 else len(self) + index
prep = None
while index:
prep = pre
pre = pre.next
index -= 1
if not prep:
self.head = pre.next
else:
prep.next = pre.next
return pre.data
def __reversed__(self): #反转链表
def reverse(pre_node, node):
if pre_node is self.head:
pre_node.next = None
if node:
next_node = node.next
node.next = pre_node
return reverse(node, next_node)
else:
self.head = pre_node
return reverse(self.head, self.head.next)
def clear(self): #清空链表
self.head = None | UTF-8 | Python | false | false | 2,591 | py | 28 | Singly_linked_list.py | 27 | 0.541833 | 0.518814 | 0 | 92 | 22.576087 | 74 |
jef771/competitive_programming_practice | 5,007,931,876,238 | 570d978a1f17d6e45e3a0b643178127a11e210e3 | a5349d4fe26ff631e9804e05692a466b5db77929 | /code_forces/1328A/a.py | b0b4a965b47c7af71477a8e416614c0338cfc14a | []
| no_license | https://github.com/jef771/competitive_programming_practice | 76c7e553c97c80309a4a279ac4b651f1c70d0500 | 65a0d41350fe535c5d20c51d7c1b452c58483530 | refs/heads/main | 2023-04-20T20:38:52.400418 | 2021-05-06T00:40:45 | 2021-05-06T00:40:45 | 290,401,748 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
def get_div(a, b):
if a%b == 0:
return 0
else:
return abs(a%b - b)
def main():
n = int(sys.stdin.readline())
for i in range(n):
a, b = map(int, sys.stdin.readline().split())
sys.stdout.write(f'{get_div(a, b)}\n')
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 315 | py | 200 | a.py | 154 | 0.488889 | 0.48254 | 0 | 19 | 15.631579 | 53 |
testasham/cnv2vcf | 16,466,904,653,831 | 0bb08b3c238e1d1645d229d52543bc1b15e56587 | 674cdad2bfc9829d7db0d151d118f79bdb260d9b | /exomeDepth/exomedepth_to_vcf.py | 877143dce8ab9dc774609ad612b1c3ad71828e2c | []
| no_license | https://github.com/testasham/cnv2vcf | bbb75e520fc31eaee85cc90328c5013474520866 | 55ef4c8939a6d5ec484b720bb4b03257200d5fc4 | refs/heads/master | 2023-06-27T08:52:55.541792 | 2020-06-07T11:51:38 | 2020-06-07T11:51:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import csv
import sys
from pysam import FastaFile
FORMAT = 'DP:BF:RR'
INFO_INDEX = 7
CNV_TYPE_TO_SHORT = {
'deletion': 'DEL',
'duplication': 'DUP'
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-path', dest='input_path', required=True, help='Path for input file.')
parser.add_argument('-o', '--output-path', dest='output_path', required=True, help='Path for output file.')
parser.add_argument('-r', '--genome-ref', dest='genome_ref', required=False,
default='/opt/data/ref/Human/Hg19/genome_ref/hg19.fa', help='Path to genome reference.')
return parser.parse_args()
def get_vcf_headers(sample_name, genome_ref):
cmdline = " ".join(sys.argv)
headers = ['##fileformat=VCFv4.1', '##source=exomeDepthVCFConverter,version=0.1.0',
f'##ConverterCMDLine={cmdline}']
headers += [f"##contig=<ID={name},length={length}>"
for name, length in zip(genome_ref.references, genome_ref.lengths)]
headers.append(f"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{sample_name}")
return headers
def extract_sample_name(bed_file_path):
with open(bed_file_path) as bed_file:
temp_reader = csv.DictReader(bed_file, delimiter='\t')
cnv_line = next(temp_reader)
return cnv_line['sample']
def main(args):
sample_name = extract_sample_name(args.input_path)
with open(args.input_path) as cnv_input, FastaFile(args.genome_ref) as genome_ref,\
open(args.output_path, 'w') as vcf_output:
is_full_chrom_name = genome_ref.references[0].startswith('chr')
cnv_reader = csv.DictReader(cnv_input, delimiter='\t')
vcf_output.write('\n'.join(get_vcf_headers(sample_name, genome_ref)) + '\n')
for cnv_line in cnv_reader:
vcf_line = get_vcf_line(cnv_line, genome_ref, is_full_chrom_name)
vcf_output.write(vcf_line + '\n')
def info_dict_to_string(info_dict):
return ';'.join(sorted(("%s=%s" % info_pair for info_pair in info_dict.items()))).replace(" ", "_")
def get_sv_type(cnv_line):
cnv_type = cnv_line['type']
short_type = CNV_TYPE_TO_SHORT[cnv_type]
return short_type
def get_cnv_info(cnv_line):
num_calls = cnv_line["num.calls"]
reads_expected = cnv_line["reads.expected"]
start_p = cnv_line["start.p"]
end_p = cnv_line["end.p"]
num_exons = cnv_line["nexons"]
sv_type = get_sv_type(cnv_line)
start_position = cnv_line["start"]
end_position = cnv_line["end"]
sv_len = int(end_position) - int(start_position)
cnv_info = {
'SVLEN': sv_len,
'END': end_position,
'SVTYPE': sv_type,
'num_calls': num_calls,
'reads_expected': reads_expected,
'start_p': start_p,
'end_p': end_p,
'num_exons': num_exons
}
return cnv_info
def get_sample_data(cnv_line):
depth = cnv_line['reads.observed']
bf = cnv_line['BF']
read_ratio = cnv_line['reads.ratio']
return f'{depth}:{bf}:{read_ratio}'
def get_vcf_line(cnv_line, genome_ref, is_full_chrom_name):
chrom = get_chrom(cnv_line)
start_position = cnv_line["start"]
chrom_for_ref = chrom
if is_full_chrom_name and not chrom.startswith('chr'):
chrom_for_ref = f'chr{chrom_for_ref}'
if not is_full_chrom_name and chrom.startswith('chr'):
chrom_for_ref = chrom_for_ref[3:]
ref_allele = genome_ref.fetch(region='{chr}:{pos}:{pos}'.format(chr=chrom_for_ref, pos=start_position))
alt = get_alt(cnv_line)
info = get_cnv_info(cnv_line)
sample_data = get_sample_data(cnv_line)
vcf_fields = [chrom, start_position, '.', ref_allele.upper(), alt, '.', 'PASS', info, FORMAT, sample_data]
vcf_fields[INFO_INDEX] = info_dict_to_string(info)
return "\t".join(vcf_fields)
def get_chrom(cnv_line):
return cnv_line["chromosome"]
def get_alt(cnv_line):
sv_type = get_sv_type(cnv_line)
return f'<{sv_type}>'
if __name__ == '__main__':
run_args = parse_args()
main(run_args)
| UTF-8 | Python | false | false | 4,070 | py | 5 | exomedepth_to_vcf.py | 2 | 0.624079 | 0.62113 | 0 | 121 | 32.636364 | 112 |
inowas/data_portal | 15,444,702,413,256 | 063d6be8d95715db417658536cac547acb494501 | e73db10a4930859cf3de1147080fb82e2d392328 | /src/tools/api_urls.py | 3dc0f48198d76a22d2746f4883dafa05e1d1e96b | []
| no_license | https://github.com/inowas/data_portal | 6befd906301c941ff7c4f449e239cee41357ac4d | 81453330a0e1fb2d2d7230e774b4b5c61cfbd209 | refs/heads/master | 2021-01-13T03:02:47.362173 | 2018-04-19T08:27:32 | 2018-04-19T08:27:32 | 77,037,011 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from tools import api_views
urlpatterns = [
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^weather-generator-v-1/$', api_views.WeatherGeneratorView.as_view(), name='weather_generator'),
]
urlpatterns = format_suffix_patterns(urlpatterns) | UTF-8 | Python | false | false | 392 | py | 75 | api_urls.py | 27 | 0.757653 | 0.755102 | 0 | 11 | 34.727273 | 105 |
Mill6159/PISA_Server | 13,700,945,721,188 | e375804860eb139a8b8084a38ee77d02796856e7 | 4347901c0e0c54f66a1a89e58f18e5570eb848e0 | /Py_files/Pisa_Parser.py | 5185c7407e43225866849f538d50a15c5a048e6f | []
| no_license | https://github.com/Mill6159/PISA_Server | ee3b44506f6e25e2fc0ae10057add080e6b820c5 | 31714c319d22be2e56d2dfe1ffdccee6c823a3c7 | refs/heads/main | 2022-12-30T20:00:07.993960 | 2020-10-15T13:46:28 | 2020-10-15T13:46:28 | 304,076,415 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Can we parse the interface output?
import numpy as np
import io
filename='PISA_test.txt'
# data=np.loadtxt('PISA_test.txt',
# dtype={'names': ('1', '2','3','4','5','6','7','8','9','10','11','12'), 'formats': (str,np.float,np.float,np.float,np.float,np.float,np.float,np.float,np.float,np.float,np.float,np.float)},skiprows=0)
# print(data.head())
# data=[]
# with io.open(filename, mode="r", encoding="utf-8") as f:
# for line in f:
# # print(line.split())
# data.append(line.split())
# print(data)
# n=0
# for i in data:
# print(i)
# n+=1
# if n==2:
# break
# for j in i:
# print(j)
# n+=1
# if n==2:
# break
# data=np.loadtxt('PISA_test.txt', delimiter=' ',
# dtype={'names': ('1', '2','3','4','5','6','7','8','9','10','11','12'), 'formats': (str,np.float,str,str,str,np.float,np.float,np.float,np.float,np.float,str,str)})
data=np.genfromtxt(filename,dtype=None)
print(data[1])
n=0
atom_No=[]
for i in data:
# print(i[0])
# print(i[1])
atom_No.append(str(i[0]) + str(i[1]))
n+=1
if n==2:
break
# for j in i:
# print(j)
# n+=1
# if n==2:
# break
print(atom_No)
| UTF-8 | Python | false | false | 1,140 | py | 6 | Pisa_Parser.py | 4 | 0.564035 | 0.522807 | 0 | 61 | 17.639344 | 202 |
aeberspaecher/PythonForScientists | 231,928,273,471 | 586af86eacb666c5519ae593572bc5938fe57cfa | c80c7f325ff37aa77f1a6e6a5a00f374424b7347 | /Code/HelloWorld.py | 5325624199b27fa06797c50633fa304f8611d639 | []
| no_license | https://github.com/aeberspaecher/PythonForScientists | 95cbba1f8ad27ca76bddde0cdc0c61e9ac45d614 | 85a92a44029c17a9ff75c70c3330e721bcc8cb8a | refs/heads/master | 2020-06-02T04:51:09.942333 | 2014-01-26T19:31:32 | 2014-01-26T19:31:32 | 2,454,269 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from math import sin, pi
def sincSquare(x):
"""Return sinc(x)^2.
"""
if(x <> 0.0):
return (sin(pi*x)/(pi*x))**2
else:
return 1.0
if(__name__ == '__main__'):
x = sys.argv[1]
y = sincSquare(float(x))
print("sinc(%s)^2 = %s"%(x, y))
| UTF-8 | Python | false | false | 289 | py | 41 | HelloWorld.py | 20 | 0.474048 | 0.446367 | 0 | 17 | 16 | 36 |
ArcoSeal/eve-indy-tools | 644,245,096,675 | 5443e0ddd443d60245e835ad6a39fb609a5532c3 | e0c90a52a8e3bf036e19b7aaf06275fe51ef5390 | /marketstuff.py | 596782cfc8c46e98c469354dc7a1dbf5872dae74 | []
| no_license | https://github.com/ArcoSeal/eve-indy-tools | d3336f7b6f60bbc17f42f8a8918d829a01db7d5c | 0e1fc6ae027b4bd33bed7cf78feb1920851b51b9 | refs/heads/master | 2020-04-24T08:46:40.467439 | 2019-02-21T09:27:55 | 2019-02-21T09:27:55 | 171,841,299 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def initmarketDB():
conn = sqlite3.connect(presets.marketDB)
c = conn.cursor()
c.execute('''DROP TABLE IF EXISTS MarketItems''')
c.execute('''CREATE TABLE MarketItems
(EntryID INTEGER PRIMARY KEY,
ItemID INT,
systemID INT,
MeanPrice REAL,
MedianPrice REAL,
StdPrice REAL,
PercentilePrice REAL,
nOrders INT,
MeanRegionalVolume REAL,
StdRegionalVolume REAL
)''')
conn.commit()
conn.close()
inittradetable()
def inittradetable():
conn = sqlite3.connect(presets.marketDB)
c = conn.cursor()
c.execute('''DROP TABLE IF EXISTS Trades''')
c.execute('''CREATE TABLE Trades
(EntryID INTEGER PRIMARY KEY,
ItemID INT,
BuysystemID INT,
BuyMedianPrice REAL,
BuyMeanRegionalVolume REAL,
SellsystemID INT,
SellMedianPrice REAL,
SellMeanRegionalVolume REAL,
nSellOrders INT
)''')
conn.commit()
conn.close()
def pullitemstatstomarketDB(items, systems, order_type, cache_limit=200, resume=False):
# cache_limit: maximum number of DB entries to be held in memory before dumping to disk
# make sure iteration works (if there's only one arg)
if isinstance(items, str): items = [items]
if isinstance(systems, str): systems = [systems]
items = tuple(auxdatatools.getitemid(item) if isinstance(item, str) else item for item in items)
systems = tuple(auxdatatools.getsystemID(system) if isinstance(system, str) else system for system in systems)
systems_regions = tuple(auxdatatools.getsystemregion(system) for system in systems)
regionsToPull = set(systems_regions)
# we assume that the DB is being populated in the same order it was previously - this should be true, as the order is determined by the order of
# items in presets.auxdataDB and systems in presets.auxdataDB, which will remain constant unless the data is updated (and the new data is different). We could
# actually check each entry, but this is slow
if resume and os.path.isfile(presets.marketDB):
if verbose: print('Attempting to resume from last complete item request...', end='')
entry_items = sqlitetools.getallitemsfromdbcol(presets.marketDB, 'MarketItems', 'typeID')
if len(entry_items) == 0:
if verbose: print('\nCannot resume, re-initialising market DB')
resume = False
initmarketDB()
else:
item_was_inprogress = entry_items[-1] # the item that was in progress last time the DB was written
items_done = set(entry_items)
items_done.remove(item_was_inprogress) # these items were definitely done
# delete all entries for the in progress item
conn = sqlite3.connect(presets.marketDB)
c = conn.cursor()
c.execute('''DELETE FROM MarketItems WHERE ItemID=?''', (item_was_inprogress, ))
conn.commit()
conn.close()
# remove completed items from list of requested items
items = list(items)
for item in items_done:
if item in items: items.remove(item)
items = tuple(items)
if verbose: print('ready to go.')
else:
if verbose: print('(Re-) initialising market DB')
initmarketDB() # otherwise (re) initialise the market DB
counter, total_combos = 1, len(items) * len(systems)
entries = []
if verbose: print_str = ''
for item in items:
for region in regionsToPull:
regionOrders = evemarket.getorders(item, region, order_type)
avgRegionStats = evemarket.getavgregionstats(item, region, avg_period=7)
for ii, system in enumerate(systems):
if systems_regions[ii] == region:
counter += 1
if verbose:
if len(print_str) > 0: print('\r' + ' '*len(print_str), end='\r')
print_str = 'Pulling data for item/system pair %s of %s... (%s, %s)' % (counter, total_combos, auxdatatools.getitemName(item), auxdatatools.getsystemName(system))
print(print_str, end=('\n' if counter == total_combos else ''))
sys.stdout.flush()
entry = (item, system) + evemarket.getitemstats(item, system, order_type, orders=regionOrders, get_region_stats=False) + avgRegionStats
entries.append(entry)
# dump data to DB on disk every so often to prevent memory overflow
if len(entries) > cache_limit:
addtomarketDB(entries, 'MarketItems')
entries = []
addtomarketDB(entries, 'MarketItems')
print('done.')
def addtomarketDB(entries, table):
if entries: # ignore empty entries
conn = sqlite3.connect(presets.marketDB)
c = conn.cursor()
try:
if table == 'MarketItems':
c.executemany('''INSERT INTO MarketItems(ItemID, systemID, MeanPrice, MedianPrice, StdPrice, nOrders, MeanRegionalVolume, StdRegionalVolume)
VALUES(?,?,?,?,?,?,?,?)''', entries)
elif table == 'Trades':
c.executemany('''INSERT INTO Trades(ItemID, BuysystemID, BuyMedianPrice, BuyMeanRegionalVolume, SellsystemID, SellMedianPrice, SellMeanRegionalVolume, nSellOrders)
VALUES(?,?,?,?,?,?,?,?)''', entries)
else:
raise Exception()
finally:
conn.commit()
conn.close()
def iswhregion(region):
if isinstance(region, int): region = auxdatatools.getregionName(region)
first3, last5 = region[0:3], region[3:]
# check if region name is of the form X-XYYYYY, where X are string characters and Y are digits
if isinstance(region[0:3], str) and region[1] == '-' and region[3:].isdigit():
return True
else:
return False
def initWHregions():
global wh_regions
all_regions = sqlitetools.getallitemsfromdbcol(presets.auxdataDB, 'Regions', 'regionName')
wh_regions = tuple(region for region in all_regions if iswhregion(region))
def initempireregions():
global empire_regions
if 'wh_regions' not in globals(): initWHregions()
non_empire_regions = presets.null_regions + wh_regions + presets.jove_regions
conn = sqlite3.connect(presets.auxdataDB)
c = conn.cursor()
sql_cmd = '''SELECT regionName FROM Regions WHERE regionName not in %s''' % sqlitetools.sql_placeholder_of_length(len(non_empire_regions))
empire_regions = tuple(ii[0] for ii in c.execute(sql_cmd, non_empire_regions).fetchall())
conn.close()
def getallHSsystems():
if 'empire_regions' not in globals(): initempireregions()
conn = sqlite3.connect(presets.auxdataDB)
c = conn.cursor()
sql_cmd = '''SELECT systemID FROM Systems NATURAL JOIN Regions
WHERE regionName in %s
AND Security >= 0.5''' % sqlitetools.sql_placeholder_of_length(len(empire_regions))
hs_systems = tuple(ii[0] for ii in c.execute(sql_cmd, empire_regions).fetchall())
conn.close()
return hs_systems
def findtrades(itemIDs, margin_threshold_pct, margin_threshold_abs, min_volume_abs, max_competition, cache_limit=200):
inittradetable()
if isinstance(itemIDs, int): itemIDs = [itemIDs]
margin_threshold = margin_threshold_pct / 100 # convert pct to decimal
for item in itemIDs:
entries = sqlitetools.getxbyyfromdb(presets.marketDB, 'MarketItems', ('solarSystemID', 'MedianPrice', 'MeanRegionalVolume', 'nOrders'), 'typeID', item)
entries = [entries[ii] for ii, entry in enumerate(entries) if entry[1] != None] # remove data where price is None
entries = [entries[ii] for ii, entry in enumerate(entries) if entry[2] != None] # remove data where voluem data was insufficient
if not entries: continue # skip where we have no entries
try:
max_entry = max(entries, key = itemgetter(1))
except:
print(entries)
raise
if max_entry[2] < min_volume_abs: continue # absolute sell volume threshold
if max_entry[3] > max_competition: continue # sell competition threshold
minprice_for_margin = max_entry[1] / (margin_threshold + 1)
trades = []
for entry in entries:
if entry[1] <= minprice_for_margin:
this_margin = (max_entry[1] - entry[1]) / entry[1]
this_margin_abs = this_margin * entry[1]
if this_margin_abs >= margin_threshold_abs:
trades.append( (item, entry[0], entry[1], entry[2], max_entry[0], max_entry[1], max_entry[2], max_entry[3]) )
try:
if verbose: print('Profitable trade found: %s, %s -> %s. Margin: %s%%' % ( auxdatatools.getitemName(item), auxdatatools.getsystemName(entry[0]), auxdatatools.getsystemName(max_entry[0]), round(this_margin * 100, 1) ) )
except NoMatchError:
print('Warning: Item ID %s in Market DB but not main Item DB. Maybe Item DB has been updated recently?' % item)
if len(trades) > cache_limit: addtomarketDB(trades, 'Trades')
addtomarketDB(trades, 'Trades')
if verbose: print('Total possible trades found: %s' % sqlitetools.gettablelen(presets.marketDB, 'Trades'))
| UTF-8 | Python | false | false | 9,792 | py | 11 | marketstuff.py | 9 | 0.604167 | 0.598141 | 0 | 232 | 41.206897 | 242 |
DanailYordanov/EasyCarUpload | 9,156,870,292,005 | 046d74247a68dc93aaa8481347dc57e1520d0614 | 0bc8be178edbc154aeb026fff6d4bdf4c714dbcc | /project/main/models.py | 57744364a1dadf97d641dab5ff64ed37ca8b9988 | []
| no_license | https://github.com/DanailYordanov/EasyCarUpload | 6b697524fa200f3c3ecfa17978b0df58265ab099 | e50338ac4e7768a0d6926123dc956c7746f2cb55 | refs/heads/master | 2023-09-04T19:40:43.658165 | 2021-11-04T16:25:49 | 2021-11-04T16:25:49 | 386,700,314 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.conf import settings
import django.utils.timezone as timezone
from .scripts.cars_bg import CarsBgClass
TRANSMISSION_TYPE_CHOICES = [
('', 'Избери'),
('Автоматични', 'Автоматични'),
('Ръчни', 'Ръчни')
]
CATEGORY_CHOICES = [
('', 'Избери'),
('Седан', 'Седан'),
('Хечбек', 'Хечбек'),
('Комби', 'Комби'),
('Купе', 'Купе'),
('Кабрио', 'Кабрио'),
('Джип', 'Джип'),
('Пикап', 'Пикап'),
('Ван', 'Ван')
]
FUEL_TYPE_CHOICES = [
('', 'Избери'),
('Бензин', 'Бензин'),
('Дизел', 'Дизел')
]
YEAR_CHOICES = [
('', 'Избери'),
]
MONTH_CHOICES = [
('', 'Избери'),
('Януари', 'Януари'),
('Февруари', 'Февруари'),
('Март', 'Март'),
('Април', 'Април'),
('Май', 'Май'),
('Юни', 'Юни'),
('Юли', 'Юли'),
('Август', 'Август'),
('Септември', 'Септември'),
('Октомври', 'Октомври'),
('Ноември', 'Ноември'),
('Декември', 'Декември')
]
DOORS_TYPE_CHOICES = [
('', 'Избери'),
('2/3', '2/3'),
('4/5', '4/5')
]
COLOR_CHOICES = [
('', 'Избери'),
('Бежов', 'Бежов'),
('Бордо', 'Бордо'),
('Бял', 'Бял'),
('Виолетов', 'Виолетов'),
('Жълт', 'Жълт'),
('Зелен', 'Зелен'),
('Кафяв', 'Кафяв'),
('Оранжев', 'Оранжев'),
('Сив', 'Сив'),
('Сребърен', 'Сребърен'),
('Червен', 'Червен'),
('Черен', 'Черен'),
('Лилав', 'Лилав'),
('Розов', 'Розов'),
('Светло зелен', 'Светло зелен'),
('Светло син', 'Светло син'),
('Тъмно зелен', 'Тъмно зелен'),
('Тъмно сив', 'Тъмно сив'),
('Тъмно син', 'Тъмно син')
]
EURO_STANDART_CHOICES = [
('', 'Избери'),
('EURO 1', 'EURO 1'),
('EURO 2', 'EURO 2'),
('EURO 3', 'EURO 3'),
('EURO 4', 'EURO 4'),
('EURO 5', 'EURO 5'),
('EURO 6', 'EURO 6')
]
def create_year_choices(from_year, to_year):
for year in range(from_year, to_year):
YEAR_CHOICES.append((str(year), str(year)))
current_year = timezone.now().year
create_year_choices(1990, current_year + 1)
class Brand(models.Model):
brand = models.CharField(max_length=50)
def __str__(self):
return self.brand
class Model(models.Model):
brand = models.ForeignKey(Brand, on_delete=models.CASCADE)
model = models.CharField(max_length=50)
def __str__(self):
return f'{self.brand} - {self.model}'
class Car(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
category = models.CharField(
choices=CATEGORY_CHOICES, max_length=10)
brand = models.ForeignKey(Brand, on_delete=models.SET_NULL, null=True)
model = models.ForeignKey(Model, on_delete=models.SET_NULL, null=True)
modification = models.CharField(max_length=100)
engine_type = models.CharField(max_length=20)
price = models.IntegerField()
transmission_type = models.CharField(
choices=TRANSMISSION_TYPE_CHOICES, max_length=20)
fuel_type = models.CharField(
choices=FUEL_TYPE_CHOICES, max_length=10)
doors_type = models.CharField(choices=DOORS_TYPE_CHOICES, max_length=5)
power = models.IntegerField()
displacement = models.IntegerField()
year = models.CharField(choices=YEAR_CHOICES, max_length=4)
month = models.CharField(choices=MONTH_CHOICES, max_length=15)
run = models.IntegerField()
color = models.CharField(choices=COLOR_CHOICES, max_length=20)
euro_standart = models.CharField(
choices=EURO_STANDART_CHOICES, max_length=10)
description = models.CharField(max_length=400)
def __str__(self):
return f'{self.brand} {self.model} - {self.price}лв.'
def get_image_paths(self):
images = self.image_set.all()
image_paths = []
for image in images:
image_paths.append(image.image.path)
return image_paths
class Image(models.Model):
car = models.ForeignKey(Car, on_delete=models.CASCADE)
image = models.ImageField(upload_to='car_pics')
def __str__(self):
return f'{self.car.brand} - {self.car.model} - {self.car.modification} - {self.car.price}'
class OfferSite(models.Model):
name = models.CharField(max_length=50)
site_url = models.URLField(max_length=100)
offer_url = models.URLField(max_length=100)
delete_url = models.URLField(max_length=100)
create_url = models.URLField(max_length=100)
def __str__(self):
return self.name
class Offer(models.Model):
offer_id = models.CharField(max_length=50)
site = models.ForeignKey(OfferSite, on_delete=models.SET_NULL, null=True)
car = models.ForeignKey(Car, on_delete=models.CASCADE)
def __str__(self):
return f'{self.car.brand} - {self.car.model} - {self.car.modification} - {self.car.price}'
| UTF-8 | Python | false | false | 5,345 | py | 21 | models.py | 18 | 0.597701 | 0.583699 | 0 | 178 | 25.882022 | 98 |
jcrabtr/Hardwire | 13,013,750,907,553 | 60154c628dfa26758ee96b9e228ffe9350e91221 | 03603bd426cb3d1b2243a7c70fc46cbd9705f30f | /hardwire/server.py | 8aad6bfd3922b9c239abcac0e755053595c98f4a | [
"MIT"
]
| permissive | https://github.com/jcrabtr/Hardwire | ef0d7b09694256654e37753eae4dec0f410aec23 | 09fbc4fae96f7b576c26aa27eb2cbcdc799a5b74 | refs/heads/master | 2016-09-06T09:02:58.660144 | 2014-04-01T23:01:22 | 2014-04-01T23:01:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
hardwire.server
Copyright (C) 2014 Joseph P. Crabtree
Hardwire._site_init based on wsgi_echo example
https://github.com/tavendo/AutobahnPython/blob/master/examples/twisted/websocket/echo_wsgi/server.py
Copyright (C) 2012-2013 Tavendo GmbH
This module implements the Hardwire Web Interface Server.
"""
import sys, os, uuid, json
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
from twisted.web.static import File
try:
from autobahn.twisted.resource import WebSocketResource, \
WSGIRootResource, \
HTTPChannelHixie76Aware
except ImportError:
# Version < 0.7.0 of Autobahn did not have the 'twisted' object
from autobahn.resource import WebSocketResource, \
WSGIRootResource, \
HTTPChannelHixie76Aware
try:
from autobahn.wamp1.protocol import exportRpc, \
exportPub, \
exportSub, \
WampServerFactory, \
WampServerProtocol
except ImportError:
# Version < 0.8.0 of Autobahn did not support multiple WAMP versions
from autobahn.wamp import exportRpc, \
exportPub, \
exportSub, \
WampServerFactory, \
WampServerProtocol
import settings
from hardwire.signal import Signal
from threading import Thread
class SignalServerProtocol(WampServerProtocol):
"""
The SignalServerProtocol is an extension of the Autobahn WAMPServerProtocol
that handles the PubSub for signal distribution as well as RPC for managing
the signal server and calling methods to store and load data on the server.
"""
@exportRpc
def register(self, name):
"""Registers a new pubsub signal with the server."""
signal = Signal(name, self.peerstr)
print "Adding signal named " + signal.name + " owned by " + signal.owner
return signal.uri
def onSessionOpen(self):
self.registerForRpc(self, "/signalManager/")
# Register Custom Pub Handler for Signals and signal list
self.signal_handler = SignalHandler(self.factory.signals,\
self.factory.publish_signal,\
self.factory.broadcast_signals)
self.registerHandlerForPubSub(self.signal_handler, "/signals/")
self.registerHandlerForPubSub(self.signal_handler, "/signals")
class SignalHandler(object):
"""
SignalHandler contains the custom publish and subscribe methods for the
signals on the server.
"""
def __init__(self, signals, publish, broadcast):
self.signals = signals
self.publish = publish
self.broadcast = broadcast
@exportSub("", True)
def subscribe(self, topicUriPrefix, topicUriSuffix):
# Publish the signal or the signal list, whichever is being subscribe to
if topicUriSuffix:
reactor.callLater(0, self.publish, topicUriSuffix)
else:
reactor.callLater(0, self.broadcast)
return True
@exportPub("", True)
def signal_pub_handler(self, topicUriPrefix, topicUriSuffix, event):
"""
Updates the signal object's value when a new value is published to its
uri.
"""
name = event[u'name'].encode("ascii", "ignore")
value = event[u'value']
self.signals[name].value = value
return False
class SignalServerFactory(WampServerFactory):
"""
The SignalServerFactory is an extension of the Autobahn WampServerFactory.
Like the WampServerFactory, it creates instances of the specific server
protocol. In addition, it stores and manages persistent signal and client
state and implements methods for managing signals through client
connections.
"""
protocol = SignalServerProtocol
def __init__(self, url, debug):
WampServerFactory.__init__(self, url, debugWamp = debug)
Signal.register = self.register
Signal.unregister = self.unregister
Signal.publish = self.publish_signal_public
self.signals = {}
def broadcast_signals(self):
"""Brodcasts the signal list kept on the server factory."""
signalUris = {}
for name in self.signals:
signalUris[str(self.signals[name].uri)] = name
self.dispatch("/signals", json.dumps(signalUris))
def register(self, new_signal):
self.signals[new_signal.name] = new_signal
self.broadcast_signals()
def unregister(self, deleted_signal):
self.signals.pop(deleted_signal.name, None)
self.broadcast_signals()
def publish_signal(self, name):
signal = self.signals[name]
signalDict = {}
signalDict["name"] = name
signalDict["value"] = signal.value
self.dispatch(signal.uri, json.dumps(signalDict))
def publish_signal_public(self, name):
reactor.callFromThread(self.publish_signal, name)
class Hardwire(object):
_instance = None
_reactor_thread = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Hardwire, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self, app=None, port=None, debug=False):
if port is None:
self._port = settings.PORT
else:
self._port = port
import __main__
self._user_dir = os.path.dirname(os.path.abspath(__main__.__file__))
self._hw_dir = os.path.dirname(os.path.abspath(__file__))
self._app = app
self._factory_init(debug)
self._site_init(debug)
def _factory_init(self, debug):
url = "ws://%s:%d" % (settings.INTERFACE, self._port)
self.factory = SignalServerFactory(url, debug)
self.factory.protocol = SignalServerProtocol
self.factory.setProtocolOptions(allowHixie76 = True)
self.factory.startFactory()
def _site_init(self, debug):
# Twisted Web resource for our WAMP factory
ws_resource = WebSocketResource(self.factory)
# Write hardwire settings to JS file
with open(os.path.join(self._hw_dir, 'static', 'js', 'hw-settings.js'), 'w+') as f:
f.write('var hw_settings = {port: %d}' % self._port)
# Twisted Web resource for static assets
hw_static_resource = File(os.path.join(self._hw_dir, 'static'))
# Create root resource from either the user's WSGI app, the user's
# index.html, or the Hardwire default index.html
if self._app:
print "Using user-supplied WSGI app..."
wsgi_resource = WSGIResource(reactor, reactor.getThreadPool(), self._app)
child_resources = {'hw_static': hw_static_resource, \
'static': static_resource, \
settings.WSURI_SUFFIX: ws_resource}
root_resource = WSGIRootResource(wsgi_resource, child_resources)
else:
user_index_path = os.path.join(self._user_dir, 'index.html')
if os.path.isfile(user_index_path):
print "Using user-supplied index.html..."
index_path = self._user_dir
else:
print "Using Hardwire default index.html..."
index_path = os.path.join(self._hw_dir, 'templates')
root_resource = File(index_path)
root_resource.putChild("hw_static", hw_static_resource)
root_resource.putChild(settings.WSURI_SUFFIX, ws_resource)
if debug:
log.startLogging(sys.stdout)
site = Site(root_resource)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is supported
reactor.listenTCP(self._port, site)
def run(self):
# Run the Twisted reactor in a thread. This is not the "right" way to
# use Twisted, but we're trading server efficiency for server-side code
# simplicity.
self._reactor_thread = Thread(target=reactor.run, args=(False,))
try:
self._reactor_thread.start()
except (KeyboardInterrupt, SystemExit):
self.stop()
def stop(self):
reactor.callFromThread(reactor.stop)
self._reactor_thread.join()
| UTF-8 | Python | false | false | 8,841 | py | 19 | server.py | 10 | 0.595634 | 0.592128 | 0 | 228 | 36.767544 | 100 |
zhengshiguang/nltk | 16,320,875,754,676 | 95ccf9b4a8af5c85fa7ca63662afa3f869fcb3b8 | b4739c7c82ad1018bf844b07de2949899ad1a5fc | /nltk_test.py | 4bac7f691eacee4b62c406e95dabff9dfa8786f4 | []
| no_license | https://github.com/zhengshiguang/nltk | 56f775643d829fdf56c1de0b4b2090f458b486ba | 12cce4c083cb6394bfa34632aa3bddb55fa7135f | refs/heads/master | 2020-03-22T12:33:54.110305 | 2018-07-07T09:12:29 | 2018-07-07T09:12:29 | 140,047,736 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import nltk
from nltk.book import *
print(nltk.__file__)
print(text1) | UTF-8 | Python | false | false | 70 | py | 2 | nltk_test.py | 1 | 0.728571 | 0.714286 | 0 | 5 | 13.2 | 23 |
EvelynGiordano/Alphanumeric-Cipher-Crack | 15,848,429,331,872 | 973f2ad2753098be872303831ca868b0e4803645 | 536ae380b598ec838607cea3d087facc0892ad02 | /hack.py | 89709ce662cd15ba51dc270a84be3d26bf53b405 | []
| no_license | https://github.com/EvelynGiordano/Alphanumeric-Cipher-Crack | efb475316833e85f952d8e2b1213c19a28de8421 | 9bd2678fc728669b0db4cdd78928523aa92243cb | refs/heads/main | 2023-05-11T04:21:39.387568 | 2021-06-03T18:20:33 | 2021-06-03T18:20:33 | 373,602,636 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import Counter
import re
import sys
alphabet = {
"A": None, "B": None,
"C": None, "D": None,
"E": None, "F": None,
"G": None, "H": None,
"I": None, "J": None,
"K": None, "L": None,
"M": None, "N": None,
"O": None, "P": None,
"Q": None, "R": None,
"S": None, "T": None,
"U": None, "V": None,
"W": None, "X": None,
"Y": None, "Z": None,
}
process = [
"the",
"to",
"who",
"that",
"are", # ape, ale, age, ace, awe, axe (all unlikely)
"and",
"for",
"been",
"but",
"know",
"put",
"have",
"would",
"could",
"should",
"and",
"was",
"why"
]
found = []
# region Norvig's Spell Checker
# The following is Peter Norvig's Bayesian Spellchecker written in 2007
# Uses Bayesian modeling to get the probability that a mistyped word is a candidate word
# Candidate words are words that are one- to two- letter edits away from the mistyped words
# The candidate word with the highest probability of being the mistyped word is returned
# https://norvig.com/spell-correct.html
def words(text): return re.findall(r'\w+', text.lower())
WORDS = Counter(words(open('big.txt').read()))
def P(word, N=sum(WORDS.values())):
"Probability of `word`."
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
# endregion
def most_common_words(t):
with open(t) as fin:
counter = Counter(fin.read().strip().split())
return counter.most_common()
def most_common_letters(t):
with open(t, encoding="utf8") as f:
c = Counter()
for line in f:
alphanumeric_filter = filter(str.isalnum, line)
alphanumeric_string = "".join(alphanumeric_filter)
c += Counter(str(alphanumeric_string).upper())
return c.most_common()
def get_pattern(s):
i = 0
pattern = []
letters = {}
for c in s:
if c in letters:
pattern.append(letters[c])
else:
pattern.append(i)
letters.update({c: i})
i = i+1
return pattern
def replace_text(t, c, oc):
with open(t, 'r', encoding="utf8") as file:
data = file.read()
data = data.replace(oc, c)
with open("replaced", 'w', encoding="utf8") as file:
file.write(data)
def equal_lower_cases(word, encrypted_word):
i = 0
if(len(word) != len(encrypted_word)):
return False
for x in encrypted_word:
if str(x).isupper():
i = i+1
continue
elif x == word[i]:
i = i+1
continue
else:
return False
return True
def replace_uppercase(word, encrypted_word):
i = 0
for x in encrypted_word:
if str(x).isupper():
# replace character in 'replaced.txt'
replace_text("replaced", word[i], x)
# set character in the dictionary
alphabet[str(word[i]).upper()] = x
found.insert(0, str(word[i]).upper())
i=i+1
else:
i=i+1
# returns True if there are too many uppercase letters in the given word
# returns False if there is only one uppercase letter in the given word, and also when dealing with "the" (base case)
def num_encrypted_letters(x, w):
if len(x) == 1:
return False
elif w == "the":
return False
else:
encrypted_letters = 0
for i in x:
if str(i).isupper():
encrypted_letters = encrypted_letters + 1
if encrypted_letters > 1:
return True
else:
return False
def percentage_key():
total = 0
i = 0
key = "AZERTYUIOPQSDFGHJKLMWXCVBN"
wanted_keys = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] # The keys you want
final = dict((k, alphabet[k]) for k in wanted_keys if k in alphabet)
for val in final:
if final[val] == key[i]:
total = total + 1
i = i + 1
return print("PERCENTAGE OF KEY DECRYPTED: ", total/26 * 100, "%")
def contains_upper(t):
with open(t, encoding="utf8") as f:
for line in f:
if any(letter.isupper() for letter in line):
return True
return False
def run_process(list):
for w in list:
common_encrypted_words = most_common_words("replaced")
for e in common_encrypted_words:
e = e[0]
if num_encrypted_letters(e, w) or not equal_lower_cases(w,e):
continue
else:
if get_pattern(w) == get_pattern(e):
replace_uppercase(w, e)
break
else:
continue
def run_spell_checker(list_of_words):
done = False
if contains_upper("replaced"):
for i in list_of_words:
i = i[0]
c = correction(i)
if str(i).islower() or i == correction(i) or len(i) < 3 or not equal_lower_cases(correction(i), i) or num_encrypted_letters(i, ""):
continue
else:
j = 0
for x in i:
if str(x).isupper() and not already_done(str(c[j]).upper()):
replace_uppercase(c, i)
done = True
break
else:
j = j+1
continue
if done:
run_spell_checker(most_common_words("replaced"))
break
def already_done(key):
res = False
if alphabet.get(key):
res = True
return res
# test key: AZERTYUIOPQSDFGHJKLMWXCVBN
def hack_cipher(t):
common_encrypted_letters = most_common_letters(t)
alphabet["E"] = common_encrypted_letters[0][0]
replace_text(t, 'e', alphabet["E"])
run_process(process)
run_spell_checker(most_common_words("replaced"))
unfound = 0
for x in alphabet.values():
if x == None:
unfound = unfound + 1
if unfound == 1:
a = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for x in alphabet:
if alphabet[x] != None:
a = a.replace(alphabet[x], "")
else:
k = x
alphabet[k] = a
with open("key.txt", "w", encoding="utf8") as final:
for x in alphabet.values():
if x == None:
final.write("_")
else:
final.write(x)
with open("key.txt", "r", encoding="utf8") as final:
print(final.read())
def main():
file = sys.argv[1]
with open(file, 'r', encoding="utf8") as inp:
y = inp.read().upper()
with open("capitalized", 'w', encoding="utf8") as out:
out.write(y)
with open("capitalized", "r", encoding="utf8") as f, open("replaced", "w", encoding="utf8") as n:
x = f.read()
result = re.sub("[^A-Za-z\s]", "", x, 0, re.IGNORECASE | re.MULTILINE)
n.write(result)
hack_cipher("replaced")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 8,451 | py | 1 | hack.py | 1 | 0.513194 | 0.505502 | 0 | 298 | 27.35906 | 144 |
Aswadhardi/WejapaInternships | 5,549,097,788,566 | e353ea5baa45540d83d5bb95ae9fc4c41ee8a372 | 80a45d385d306e26410ec742023e80850528c1a5 | /Wave3/for_loop_range.py | 12f74b9e5ccb221696be61aae26a29ef6a62ccd8 | []
| no_license | https://github.com/Aswadhardi/WejapaInternships | e87abcac76b35bf8cd49ca0abc51293c53bba6fd | 5dfafa1c58702a77f5ffc993b276c9167858202a | refs/heads/master | 2022-12-04T22:41:38.215442 | 2020-08-24T15:36:39 | 2020-08-24T15:36:39 | 282,212,775 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Write a for loop using range() to print out multiples of 5 up to 30 inclusive
for num in range(5, 31, 5):
print(num)
| UTF-8 | Python | false | false | 124 | py | 17 | for_loop_range.py | 17 | 0.685484 | 0.629032 | 0 | 4 | 30 | 79 |
Raymond-Xue/IR-final-project | 10,720,238,384,062 | f3e5229863402b556580981e5af1fe15ccbc699b | 5e3508395bef4627274d2ab43259e806bc0868c7 | /flaskapp/service/translate_service.py | a89f265badba136b0d3c71e92920efe14e907188 | []
| no_license | https://github.com/Raymond-Xue/IR-final-project | 0a7e4c84e88e54b871d116514a00164f116dd311 | fa41863523f3e9b9e1335d329c37d8d0b68331d1 | refs/heads/master | 2022-07-24T09:28:13.659195 | 2020-05-12T15:41:57 | 2020-05-12T15:41:57 | 264,462,212 | 1 | 0 | null | true | 2020-05-16T15:04:21 | 2020-05-16T15:04:20 | 2020-05-12T15:42:05 | 2020-05-12T15:42:02 | 403,740 | 0 | 0 | 0 | null | false | false | """
This module implements query translation funcitonality.
Author: Junda Li
"""
from googletrans import Translator
import jieba
class TranslateService:
def __init__(self, ser_url='translate.google.com'):
self.translator = Translator(service_urls=[ser_url])
def translate(self,txt):
"Translate raw txt to English"
output = self.translator.translate(txt)
return output.text.lower()
def tokenizer_chn(self,query):
"Tokenize Chinese querty to Chinese query"
result = list(jieba.cut_for_search(query))
return result
def ctoks2Eng(self, chntoks):
"covert chinese tokens to english token"
result = []
for wd in chntoks:
result.append(self.translator.translate(wd).text.lower())
return result
def translateTokens(self,txt):
"translate chinese raw text into enlgish tokens for searching purpose"
chntoks = self.tokenizer_chn(txt)
chntoks = self.ctoks2Eng(chntoks)
translated = self.ctoks2Eng(chntoks)
return translated
| UTF-8 | Python | false | false | 1,215 | py | 31 | translate_service.py | 16 | 0.590123 | 0.587654 | 0 | 38 | 28.342105 | 78 |
ThibaultHuyet/sensehat-container | 8,967,891,729,175 | e58c216b64f1145f8375d0fb02549a45bfde80f6 | f69b3e99177908cd5c54662377ed348872beb28d | /sensehat.py | 81c41d8dbe53362d88503e03f4910ac014645e4e | []
| no_license | https://github.com/ThibaultHuyet/sensehat-container | e186b935829f9d62367f535399a2be7891d1f839 | 4bba63ae1cea392b6775e036a780264a409ea31e | refs/heads/master | 2020-05-27T21:12:45.498846 | 2017-03-02T14:06:32 | 2017-03-02T14:06:32 | 83,615,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, jsonify
import datetime
from sense_hat import SenseHat
sense = SenseHat()
app = Flask(__name__)
@app.route('/API/temp', methods=['GET'])
def temp():
temp = sense.get_temperature()
temp = round(temp, 1)
now = datetime.datetime.now()
Temp = [
{
'Temperature' : temp,
'Year' : now.year,
'Month' : now.month,
'Day' : now.day,
'Hour' : now.hour,
'Minute' : now.minute,
'seconds' : now.second
}
]
return jsonify({'Temp' : Temp})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| UTF-8 | Python | false | false | 666 | py | 3 | sensehat.py | 1 | 0.496997 | 0.489489 | 0 | 31 | 20.483871 | 40 |
gugajung/guppe | 11,106,785,468,540 | 13df777373bf6ae3eb97a9df79b4b3d1f3f5e08a | 844c7f8fb8d6bfab912583c71b93695167c59764 | /fixação/Seção07-P1/11-20/Sec07aEx20.py | 13fd2f586966c0c490d80940211c8469bd790b2f | [
"Apache-2.0"
]
| permissive | https://github.com/gugajung/guppe | 2be10656cd9aa33be6afb8e86f20df82662bcc59 | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | refs/heads/main | 2023-05-28T08:08:24.963356 | 2021-06-07T16:56:11 | 2021-06-07T16:56:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | lista = []
totalValores = 10
listaNova = []
for i in range(totalValores):
numero = -1
while numero < 0 or numero > 50:
numero = int(input(f"Entre com o numero {i + 1} :-> "))
lista.append(numero)
if numero % 2 != 0:
listaNova.append(numero)
print("=================================================")
print(lista)
print(listaNova)
print("=================================================")
for k in range(0, totalValores - 1):
if k < len(listaNova):
msg = str(listaNova[k])
else:
msg = "-"
String = str(k) + " -> " + str(lista[k]) + " | " + msg
print(String)
| UTF-8 | Python | false | false | 630 | py | 397 | Sec07aEx20.py | 374 | 0.460317 | 0.442857 | 0 | 28 | 21.5 | 64 |
Jeong-Kyu/A_study | 2,388,001,854,935 | 78d35a0bf35475ef6f5f3a05a5cdc13d60242896 | 01932366dd322ec3459db9dd85a2fd8d22a82fcb | /keras/keras37_overfit.py | 85a5e0e98dfd2b0a1bc4c08b299a505259d6193a | []
| no_license | https://github.com/Jeong-Kyu/A_study | 653f5fd695109639badfa9e99fd5643d2e9ff1ac | 6866c88fcc25841ceae2cd278dcb5ad5654c2a69 | refs/heads/master | 2023-06-11T02:44:20.574147 | 2021-07-05T08:59:43 | 2021-07-05T08:59:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 과적합 Overfit 방지
# 1. 훈련 데이터 up
# 2. 피쳐 수 down
# 3. regularization
# 4. dropout(딥러닝 해당)
#5? 앙상블? 통상 2~5% 향상이 있다고 하는 구설 | UTF-8 | Python | false | false | 185 | py | 201 | keras37_overfit.py | 199 | 0.621849 | 0.563025 | 0 | 7 | 16.142857 | 30 |
brukhabtu/stitching | 16,097,537,441,554 | ff46d3eb7378e1656f14fa257a28c61954437e2e | 0d282e9b0e841df2b35a7286bb5673631a363ee1 | /stitching/exceptions.py | 7ac6f34def3d171072ca85ff0b89b13b232ed219 | []
| no_license | https://github.com/brukhabtu/stitching | 812bc23ce2ba5b2ce0a94048744713ddae0a4671 | 13855595e5b4b980727e97ef7470857e47af5f3b | refs/heads/master | 2021-01-13T09:08:00.738396 | 2016-07-28T17:31:20 | 2016-07-28T17:31:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class InvalidBrowserException(Exception):
def __init__(self, browser_name):
msg = '{} is not supported by the RegressionTestCase class.'.format(
browser_name)
super(InvalidBrowserException, self).__init__(msg)
class AssertScreenshotException(AssertionError):
def __init__(self, distance):
msg = ('The new screenshot did not match the baseline (by a distance ' +
'of {})').format(distance)
super(AssertScreenshotException, self).__init__(msg)
class MissingBaselineScreenshotException(Exception):
def __init__(self):
msg = ('Missing baseline screenshots. Please run tests with the ' +
'make_baseline_screenshots config directive set to true')
super(MissingBaselineScreenshotException, self).__init__(msg)
| UTF-8 | Python | false | false | 809 | py | 5 | exceptions.py | 4 | 0.665019 | 0.665019 | 0 | 19 | 41.578947 | 80 |
Megha-Komarraju/Data-Science-Salary-Estimator | 5,961,414,635,629 | 048f354fae6ba7ef2c8a028590f6ab9aaf7bfad7 | 9082e7b9f5660b0a94677fd4ab35b177de61a602 | /data_collection.py | c4873b4e450c662624e911a28b289d67f5000f8d | []
| no_license | https://github.com/Megha-Komarraju/Data-Science-Salary-Estimator | 3e5f086237cefc604f675a8f15be3a9fb097b774 | 28c17cf085e6276489df682d2b8624218f3cdceb | refs/heads/master | 2023-02-14T14:40:52.800754 | 2021-01-09T17:05:36 | 2021-01-09T17:05:36 | 327,656,224 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 10:33:41 2021
@author: saime
"""
import glassdoor_scraper as gs
import pandas as pd
path="C:/Users/saime/Documents/Kaggle/survey_Project-2020/chromedriver"
df=gs.get_jobs('data scientist',1000, False,path,15)
df | UTF-8 | Python | false | false | 278 | py | 1 | data_collection.py | 1 | 0.679856 | 0.600719 | 0 | 14 | 18 | 71 |
andrewcooke/rxpy | 1,408,749,320,360 | e86345eb0d830efc3e3db9e95c5e8fd22ba07657 | cbd2f187fb60939c49a00f154570f53d4bb19910 | /rxpy/src/rxpy/parser/support.py | 241325bd1e08a2f8fadf0302c89a0ee97e197fee | []
| no_license | https://github.com/andrewcooke/rxpy | 3c4443f3ccba479d936f0e49d7d009a64dfc89b3 | e7f330dc8c5fa49392a1a018ceda6312270e9a93 | refs/heads/master | 2021-01-10T13:46:37.129155 | 2011-06-03T23:29:58 | 2011-06-03T23:29:58 | 52,740,676 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is RXPY (http://www.acooke.org/rxpy)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
'''
Support classes for parsing.
'''
from string import digits, ascii_letters
from rxpy.alphabet.ascii import Ascii
from rxpy.alphabet.unicode import Unicode
from rxpy.graph.post import resolve_group_names, post_process
from rxpy.parser.error import SimpleGroupException
from rxpy.lib import _FLAGS, RxpyException, refuse_flags
OCTAL = '01234567'
ALPHANUMERIC = digits + ascii_letters
class ParserState(object):
'''
Encapsulate state needed by the parser. This includes information
about flags (which may change during processing and is related to
alphabets) and groups.
'''
(I, M, S, U, X, A, _L, _C, _E, _U, _G, IGNORECASE, MULTILINE, DOTALL, UNICODE, VERBOSE, ASCII, _LOOP_UNROLL, _CHARS, _EMPTY, _UNSAFE, _GROUPS) = _FLAGS
def __init__(self, flags=0, alphabet=None, hint_alphabet=None,
require=0, refuse=0):
'''
`flags` - initial flags set by user (bits as int)
`alphabet` - optional alphabet (if given, checked against flags; if not
given inferred from flags and hint)
`hint_alphabet` - used to help auto-detect ASCII and Unicode in 2.6
`require` - fkags required by the alphabet
`refuse` - flags refused by the alphabet
'''
self.__new_flags = 0
self.__initial_alphabet = alphabet
self.__hint_alphabet = hint_alphabet
self.__require = require
self.__refuse = refuse
flags = flags | require
# default, if nothing specified, is unicode
if alphabet is None and not (flags & (ParserState.ASCII | ParserState.UNICODE)):
alphabet = hint_alphabet if hint_alphabet else Unicode()
# else, if alphabet given, set flag
elif alphabet:
if isinstance(alphabet, Ascii): flags |= ParserState.ASCII
elif isinstance(alphabet, Unicode): flags |= ParserState.UNICODE
elif flags & (ParserState.ASCII | ParserState.UNICODE):
raise RxpyException('The alphabet is inconsistent with the parser flags')
# if alphabet missing, set from flag
else:
if flags & ParserState.ASCII: alphabet = Ascii()
if flags & ParserState.UNICODE: alphabet = Unicode()
# check contradictions
if (flags & ParserState.ASCII) and (flags & ParserState.UNICODE):
raise RxpyException('Cannot specify Unicode and ASCII together')
refuse_flags(flags & refuse)
self.__alphabet = alphabet
self.__flags = flags
self.groups = GroupState()
self.__comment = False # used to track comments with extended syntax
self.__unwind_credit = 10
def deep_eq(self, other):
'''
Used only for testing.
'''
def eq(a, b):
return a == b == None or (a and b and type(a) == type(b))
return self.__new_flags == other.__new_flags and \
eq(self.__initial_alphabet, other.__initial_alphabet) and \
eq(self.__hint_alphabet, other.__hint_alphabet) and \
self.__require == other.__require and \
self.__refuse == other.__refuse and \
eq(self.__alphabet, other.__alphabet) and \
self.__flags == other.__flags and \
self.groups == other.groups and \
self.__comment == other.__comment and \
self.__unwind_credit == other.__unwind_credit
@property
def has_new_flags(self):
'''
Have flags change during parsing (possible when flags are embedded in
the regular expression)?
'''
return bool(self.__new_flags & ~self.__flags)
def clone_with_new_flags(self):
'''
This discards group information because the expression will be parsed
again with new flags.
'''
return ParserState(alphabet=self.__initial_alphabet,
flags=self.__flags | self.__new_flags,
hint_alphabet=self.__hint_alphabet,
require=self.__require, refuse=self.__refuse)
def next_group_index(self, name=None):
'''
Get the index number for the next group, possibly associating it with
a name.
'''
return self.groups.new_index(name, self.flags & self._GROUPS)
def index_for_name_or_count(self, name):
'''
Given a group name or index (as text), return the group index (as int).
First, we parse as an integer, then we try as a name.
'''
return self.groups.index_for_name_or_count(name)
def new_flag(self, flag):
'''
Add a new flag (called by the parser for embedded flags).
'''
self.__new_flags |= flag
def significant(self, character):
'''
Returns false if character should be ignored (extended syntax).
'''
if self.__flags & self.VERBOSE:
if character == '#':
self.__comment = True
return False
elif self.__comment:
self.__comment = character != '\n'
return False
elif self.__alphabet.space(character):
return False
else:
return True
else:
return True
def unwind(self, count):
'''
Allow limited unwinding of loops. This is to limit unwinding in case
of nested repeats. Unfortunately, because the parser is L to R, it
will be applied to the outer loop (although this is not for direct
speed as much as letting the simple engine work, so that may not be
a serious issue).
'''
if count <= self.__unwind_credit:
self.__unwind_credit -= count
return True
else:
return False
@property
def alphabet(self):
'''
The alphabet to be used.
'''
return self.__alphabet
@property
def flags(self):
'''
Current flags (this does not change as new flags are added; instead
the entire expression must be reparsed if `has_new_flags` is True.
'''
return self.__flags
class GroupState(object):
def __init__(self):
self.__name_to_index = {}
self.__index_to_name = {}
def index_for_name_or_count(self, name):
'''
Given a group name or index (as text), return the group index (as int).
First, we parse as an integer, then we try as a name.
'''
try:
index = int(name)
if index not in self.__index_to_name:
raise RxpyException('Unknown index ' + str(name))
else:
return index
except ValueError:
if name not in self.__name_to_index:
raise RxpyException('Unknown name ' + str(name))
else:
return self.__name_to_index[name]
def new_index(self, name=None, extended=False):
def next_index():
index = 1
while index in self.__index_to_name:
index += 1
return index
if extended:
# allow aliasing and numbers as names
if not name:
name = str(next_index())
index = None
try:
index = self.index_for_name_or_count(name)
except RxpyException:
try:
index = int(name)
except ValueError:
index = next_index()
else:
return index
else:
# names are not numbers and cannot repeat
index = next_index()
if name:
try:
int(name)
raise SimpleGroupException('Invalid group name ' + name)
except ValueError:
if name in self.__name_to_index:
raise SimpleGroupException('Repeated group name ' + name)
else:
name = str(index)
self.__index_to_name[index] = name
self.__name_to_index[name] = index
return index
def __eq__(self, other):
return isinstance(other, GroupState) and \
self.__index_to_name == other.__index_to_name
@property
def count(self):
return len(self.__index_to_name)
@property
def names(self):
'''
Map from group names to index. Warning - for efficiency, exposed raw.
'''
return self.__name_to_index
@property
def indices(self):
'''
Map from group index to name. Warning - for efficiency, exposed raw.
'''
return self.__index_to_name
class Builder(object):
'''
Base class for states in the parser (called Builder rather than State
to avoid confusion with the parser state).
The parser can be though of as a state machine, implemented via a separate
loop (`parse()`) that repeatedly calls `.append_character()` on the current
state, using whatever is returned as the next state.
The graph is assembled within the states, which either assemble locally
or extend the state in a "parent" state (so states may reference parent
states, but the evaluation process remains just a single level deep).
It is also possible for one state to delegate to the append_character
method of another state (escapes are handled in this way, for example).
After all characters have been parsed, `None` is used as a final character
to flush any state that is waiting for lookahead.
'''
def __init__(self, state):
self._state = state
def append_character(self, character, escaped=False):
'''
Accept the given character, returning a new builder. A value of
None is passed at the end of the input, allowing cleanup.
If escaped is true then the value is always treated as a literal.
'''
def parse(text, state, class_, mutable_flags=True):
'''
Parse the text using the given builder.
If the expression sets flags then it is parsed again. If it changes flags
on the second parse then an error is raised.
'''
try:
graph = class_(state).parse(text)
except RxpyException:
# suppress error if we will parse again
if not (mutable_flags and state.has_new_flags):
raise
if mutable_flags and state.has_new_flags:
state = state.clone_with_new_flags()
graph = class_(state).parse(text)
graph = post_process(graph, resolve_group_names(state))
if state.has_new_flags:
raise RxpyException('Inconsistent flags')
return (state, graph)
| UTF-8 | Python | false | false | 12,938 | py | 53 | support.py | 47 | 0.566239 | 0.564384 | 0 | 341 | 36.932551 | 155 |
AceMouty/Python | 15,779,709,893,859 | d088f50d2390dfadf27d0ed8043406d654f28666 | 7b10ee96c21659773d9d90f6105d1517ab4339dd | /2.Variables_and_Strings/variables.py | dbf894a70d20981aabd9a28977202b78afffef5c | []
| no_license | https://github.com/AceMouty/Python | a2a62f2f47da8c3bb8c3cf4371918816ccc89f50 | 3d85e37afdd44d80b70879fe2d11e9c5ca00fbae | refs/heads/master | 2022-07-31T17:55:32.802313 | 2020-05-20T04:07:46 | 2020-05-20T04:07:46 | 264,051,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def main():
# variables MUST start with a letter or underscore
# the rest of a varibale name can only have a
# letter, number or underscore
# variables are ways for us to store data that
myFavNumber = 7
fName = "John"
lName = "Doe"
print(myFavNumber)
print(fName + " " + lName)
# we can assign multiple variables at a time
we, are, together = 2, 4, 6
print(we + are + together)
print()
num_of_cats = 99
print("we currently have " + str(num_of_cats) + " cats")
num_of_cats = num_of_cats - 1
print("But..we got rid of a cat so now we have " + str(num_of_cats))
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 669 | py | 27 | variables.py | 27 | 0.599402 | 0.588939 | 0 | 24 | 26.875 | 72 |
python-monero/monero-python | 5,506,148,098,439 | d60f94aace2094a619424aa8d02c25ed65211934 | 2f26da08004b66e4ff1aa12b41c70ab80890101f | /utils/integraddr.py | 7a047b783a10bd42798f50b2eeea27d125e6ef8b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/python-monero/monero-python | 2c810aafebb63a933d946eab735afbfbf8c6bc80 | b0023953de9f96ad98e75f1b5c45e20867f69c02 | refs/heads/master | 2020-04-22T15:09:39.758569 | 2019-11-27T09:27:27 | 2019-11-27T09:27:27 | 170,468,602 | 2 | 2 | BSD-3-Clause | true | 2019-11-27T09:27:29 | 2019-02-13T08:19:56 | 2019-11-06T12:18:02 | 2019-11-27T09:27:29 | 558 | 2 | 1 | 2 | Python | false | false | #!/usr/bin/python
import sys
from monero.address import address
from monero.numbers import PaymentID
USAGE = "{0} <address> <payment_id>"
try:
addr = address(sys.argv[1])
pid = PaymentID(sys.argv[2])
except IndexError:
print(USAGE.format(*sys.argv), file=sys.stderr)
sys.exit(-1)
print(addr.with_payment_id(pid))
| UTF-8 | Python | false | false | 332 | py | 77 | integraddr.py | 45 | 0.698795 | 0.686747 | 0 | 15 | 21.133333 | 51 |
PacoMP/ProgramPractice_PacoMP | 18,854,906,433,939 | 3a25e7dbff05cc02ca346226164e904aadd51f82 | 1bd922798cf22a4ea20b19ac01faa2a23c9fcce0 | /Python/Verato/p2.py | 8d2cc58f1eab0017f0cd9370eee177caf6e921ce | []
| no_license | https://github.com/PacoMP/ProgramPractice_PacoMP | 085a959e1c78384a79e273b1c655c73d78552f38 | 5502546dee972b61348f016f1571923c9ebe10e7 | refs/heads/master | 2020-04-22T02:27:37.925522 | 2019-05-15T23:20:31 | 2019-05-15T23:20:31 | 170,049,943 | 0 | 0 | null | false | 2019-02-26T14:56:31 | 2019-02-11T01:42:46 | 2019-02-13T17:53:26 | 2019-02-26T14:56:31 | 25 | 0 | 0 | 0 | Python | false | null | import sys
list = []
list = sys.stdin.readline().split(',')
for word in list:
word.replace('\n','')
list.sort(key=len)
for word in list:
if word.len() == word[0].len():
print(word,end = ',')
for word in list:
if word.len() == word[-1].len():
print(word,end = ',')
| UTF-8 | Python | false | false | 296 | py | 33 | p2.py | 25 | 0.543919 | 0.537162 | 0 | 16 | 17.5 | 38 |
python4humanities/book | 12,704,513,305,970 | c68c79af37c5e1ad85a69f557762da87a01ab5c7 | 58c2d41e9493dda441a70cc3b8b79b75df54faf7 | /정리하기 스크립트/1.6 출력창에 출력하기/01.py | 039c8dea94c78ff9c886a2ccf7f071ee8b805641 | []
| no_license | https://github.com/python4humanities/book | 027c736237e6ea54e968574f3f6f5593291f4a37 | ae9c20fa50a7199c2ed5046feae5839d37ea1217 | refs/heads/master | 2022-11-05T16:06:51.129821 | 2020-06-28T15:07:49 | 2020-06-28T15:07:49 | 250,149,625 | 5 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | print(10)
print(20)
print(30)
| UTF-8 | Python | false | false | 30 | py | 127 | 01.py | 125 | 0.7 | 0.5 | 0 | 3 | 9 | 9 |
codl/markov_done_quick | 17,351,667,897,470 | 8fbd5bd4c0763f8eeb1320942bf6f3e72f0f5a55 | ad1aa74302b106090aa5537e3b6ffd973f208124 | /model.py | 74c1d7defd2270f1f36f4626077b0e53c2acfc64 | []
| no_license | https://github.com/codl/markov_done_quick | 1d52d10f76a6ea12df6d8cbe699fd96a9ac028ea | cdb821c25bf7a8d94b2d31692e20a112a21a6ed7 | refs/heads/master | 2022-12-14T03:32:11.031890 | 2019-06-24T08:33:00 | 2019-06-29T00:32:21 | 138,527,743 | 6 | 0 | null | false | 2022-12-08T02:10:01 | 2018-06-25T01:06:33 | 2019-06-29T02:35:27 | 2022-12-08T02:09:59 | 16,424 | 4 | 0 | 3 | Python | false | false | import unicodedata
import pickle
import logging
import random
logger = logging.getLogger(__name__)
class TrigramMarkovChain(object):
def __init__(self):
# trigrams format:
# key is (word_1, word_2)
# value is a dict with key = word_3 and value = frequency
# None in either of these word values means past the ends of the string
# so word_1 = None and word_2 = None gives us words that begin
# a string
# and word_3 = None means the string ends there
# word_3 may start with a space. if word_3 does not start with a
# space then it should be appended to the preceding string without
# a space
self.trigrams = dict()
@classmethod
def tokenize(cls, string):
tokens = list()
buf = ""
SYMBOL = 1
ALPHANUM = 2
current_token = None
for char in string:
category = unicodedata.category(char)
if char == " ":
char_type = None
elif category[0] in "LN":
char_type = ALPHANUM
else:
char_type = SYMBOL
if char_type == current_token:
buf += char
else:
if current_token:
tokens.append(buf)
buf = ''
current_token = char_type
buf += char
if buf.strip() != '':
tokens.append(buf)
return tuple(tokens)
def ingest(self, string):
tokens = self.tokenize(string)
prev_tokens = (None, None)
tokens += (None,)
for token in tokens:
if prev_tokens not in self.trigrams:
self.trigrams[prev_tokens] = dict()
if token not in self.trigrams[prev_tokens]:
self.trigrams[prev_tokens][token] = 0
self.trigrams[prev_tokens][token] += 1
if token is not None:
prev_tokens = (prev_tokens[1], token.strip().lower())
def load(self, path):
with open(path, 'rb') as f:
self.trigrams = pickle.load(f)
def save(self, path):
with open(path, 'wb') as f:
pickle.dump(self.trigrams, f)
@classmethod
def from_file(cls, path):
instance = cls()
instance.load(path)
return instance
def next_token(self, token, token_2):
candidates = self.trigrams.get((token, token_2), None)
if not candidates:
logger.warning("Couldn't find trigram for tokens ('%s', '%s')", token, token_2)
return None
pool = list()
for key in candidates.keys():
for _ in range(candidates[key]):
pool.append(key)
return random.choice(pool)
def make_phrase(self):
string = ""
prev_tokens = (None, None)
while True:
token = self.next_token(*prev_tokens)
if token is None:
return string
string += token
prev_tokens = (prev_tokens[1], token.strip().lower())
| UTF-8 | Python | false | false | 3,086 | py | 7 | model.py | 4 | 0.52722 | 0.521387 | 0 | 104 | 28.673077 | 91 |
Psojed/40K-ancient-list | 1,529,008,406,877 | 85540955d0014e41945558b44b1788ed2baf995b | 0470327d1d96464cadc7a04fd5e02e3d1fae3e24 | /parse_enchants.py | a98d9160ae84e6208e8e6f281424b627d5463d19 | []
| no_license | https://github.com/Psojed/40K-ancient-list | 71e73db672681b3db2ded6c0b6dffa3c5c903a23 | 93b80e1c824beed853e5d93612a8453ee65b0dc3 | refs/heads/master | 2023-06-21T02:04:50.841391 | 2021-07-14T18:14:29 | 2021-07-14T18:14:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
from enum import Enum
from fsm import FSM
# Exclude psalms, etc.
VALID_QUALITY=['primary','secondary','relic','morality']
class Enchant():
def __init__(self, name):
self.name = name # str
self.desc = None # str
self.desc_repl = None # str
self.slots = [] # [str, ...]
self.quality = None # str
self.groups = [] # [str, ...]
self.range = None # (low, high)
def __str__(self):
s = '<Enchant '+self.name+' '+str((self.desc,self.desc_repl,self.slots,self.quality,self.groups,self.range))+'>'
return s
_S = Enum('_S', 'TOP ITEMS ENCHANT, VALUES')
# Complex FSM actions
def capture_item(M,D):
D['items'][M[0]] = M[1].split(',')
def create_enchant(M,D):
D['temp'] = Enchant(M[0])
def translate_quality(M,D):
qual = M[0]
T = { 'godlike':'relic' }
if qual in T:
qual = T[qual]
D['temp'].quality = qual
def commit_enchant(M,D):
if D['temp'].quality in VALID_QUALITY:
D['enchants'].append(D['temp'])
return _S.TOP
def set_range(M,D):
D['temp'].range = float(M[0]), float(M[1])
def set_item(M,D):
if M[0]=='':
D['temp'].slots = []
else:
D['temp'].slots = M[0].split(',')
machine = {
_S.TOP: [
(r'Templates', lambda: _S.ITEMS),
(r'Enchantment', lambda: _S.ENCHANT),
(r'', None),
],
_S.ITEMS: [
(r'{', None),
(r'(.*)=(.*)', capture_item),
(r'}', lambda: _S.TOP),
],
_S.ENCHANT: [
(r'{', None),
(r'Name=(.*)', create_enchant),
(r'NameID=(.*)', lambda M,D: setattr(D['temp'],'desc',str.lower(M[0]))),
(r'Property=(.*)', lambda M,D: setattr(D['temp'],'desc_repl',M[0])),
(r'ArtifactTypes=(.*)', set_item),
(r'EnchantQuality=(.*)', translate_quality),
(r'Groups=(.*)', lambda M,D: setattr(D['temp'],'groups',M[0].split(','))),
(r'Values$', lambda: _S.VALUES),
(r'}', commit_enchant),
(r'.*', None),
],
_S.VALUES: [
(r'100=(.*),(.*)', set_range),
(r'(.*)=(.*),(.*)', None), # FIXME: item level interpolation is NYI (@100)
(r'}', lambda: _S.ENCHANT),
(r'.*', None),
]
}
def parse_enchants(file):
fsm = FSM(_S, _S.TOP, [_S.TOP], machine)
fsm.reset()
fsm.data = {'items':{}, 'enchants':[], 'temp': None}
#fsm.tracing(True)
while True:
rawline = file.readline()
if rawline=='':
fsm.terminate()
return fsm.data['items'], fsm.data['enchants']
line = rawline.strip()
fsm(line)
raise Exception('Unknown error: file parsing failed.')
if __name__=='__main__':
with open('enchantments.cfg') as f:
items,enchants = parse_enchants(f)
print(str(len(items))+' items counted')
print(str(len(enchants))+' items counted')
print(enchants[37])
print(enchants[79])
#print([_.name for _ in enchants].index('critical_hit_chance_major'))
| UTF-8 | Python | false | false | 2,772 | py | 8 | parse_enchants.py | 4 | 0.556277 | 0.548341 | 0 | 102 | 26.176471 | 116 |
PaddlePaddle/PaddleTest | 5,291,399,735,558 | f58034d805f100eab6f217b93dec6a19c6228338 | bb150497a05203a718fb3630941231be9e3b6a32 | /models/PaddleDetection/module_test/test_module.py | 2853021236dd63edee4c8f2bb3335c095278e29f | []
| no_license | https://github.com/PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | false | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | 2023-08-22T09:37:52 | 2023-09-13T11:13:34 | 114,862 | 40 | 96 | 12 | Python | false | false | """
test functions for all modules
"""
import os
import sys
import wget
import paddle
class Test(object):
"""
test functions for all modules
"""
def __init__(self, cfg):
paddle.seed(33)
self.net = cfg.net
self.data = cfg.data
self.label = cfg.label
self.module_name = cfg.module_name
# some module has more than one input data
if hasattr(cfg, "input"):
self.input = cfg.input
self.type = ""
# some module has more than one predict result
self.predicts_module = [
"SOLOv2Head",
"SimpleConvHead",
"S2ANetHead",
"RetinaHead",
"PPYOLOERHead",
"PPYOLOEHead",
"PPYOLOEContrastHead",
"PicoHeadV2",
"PicoHead",
"PETRHead",
"OTAVFLHead",
"OTAHead",
"LDGFLHead",
"GFLHead",
"FCOSRHead",
"FCOSHead",
"FaceHead",
"DETRHead",
"DeformableDETRHead",
]
def backward_test(self):
"""
backward test for modules
"""
opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=self.net.parameters())
final_loss = 0
for epoch in range(5):
if (
self.module_name == "SparseRCNNHead"
or self.module_name == "FaceHead"
or self.module_name == "CenterTrackHead"
or self.module_name == "CenterNetHead"
or self.module_name == "RoIAlign"
):
predicts = self.net(self.data, **self.input)
elif self.module_name == "DETRHead" or self.module_name == "DeformableDETRHead":
predicts = self.net(self.input, self.data)
else:
predicts = self.net(self.data)
if self.module_name in self.predicts_module:
predicts = predicts[1]
elif self.module_name == "CenterTrackHead":
predicts = predicts["bboxes"]
elif self.module_name == "CenterNetHead":
predicts = predicts["heatmap"]
loss_sum = 0
length = len(predicts)
for i in range(0, length):
loss = paddle.nn.functional.square_error_cost(predicts[i], self.label[i])
avg_loss = paddle.mean(loss)
loss_sum += avg_loss
print("loss_sum:{}".format(loss_sum))
final_loss = loss_sum
loss_sum.backward()
opt.step()
opt.clear_grad()
print("final_loss:{}".format(final_loss))
self.type = "backward"
# paddle.save(final_loss, "{}_{}.pdparams".format(self.module_name, self.type))
self.check_result(final_loss)
return final_loss
def forward_test(self):
"""
forward test for modules
"""
if (
self.module_name == "SparseRCNNHead"
or self.module_name == "FaceHead"
or self.module_name == "CenterTrackHead"
or self.module_name == "CenterNetHead"
or self.module_name == "MaskHead"
or self.module_name == "PETRHead"
or self.module_name == "RoIAlign"
):
predicts = self.net(self.data, **self.input)
elif self.module_name == "DETRHead" or self.module_name == "DeformableDETRHead":
predicts = self.net(self.input, self.data)
else:
predicts = self.net(self.data)
if self.module_name in self.predicts_module:
predicts = predicts[1]
elif self.module_name == "CenterTrackHead":
predicts = predicts["bboxes"]
elif self.module_name == "CenterNetHead":
predicts = predicts["heatmap"]
print("predicts:{}".format(predicts))
self.type = "forward"
# paddle.save(predicts, "{}_{}.pdparams".format(self.module_name, self.type))
self.check_result(predicts)
return predicts
def compare(self, result, standard):
"""
compare between test result and standard result.
"""
compare_equal = True
if isinstance(result, list):
tensor_num = len(result)
for i in range(0, tensor_num):
allclose_tensor = paddle.allclose(result[i], standard[i], rtol=1e-05, atol=1e-08)
allclose_bool = bool(allclose_tensor.numpy())
compare_equal = compare_equal and allclose_bool
else:
allclose_tensor = paddle.allclose(result, standard, rtol=1e-05, atol=1e-08)
allclose_bool = bool(allclose_tensor.numpy())
compare_equal = compare_equal and allclose_bool
return compare_equal
def check_result(self, result):
"""
check result
"""
if not os.path.exists("standard_result"):
wget.download("https://paddle-qa.bj.bcebos.com/PaddleDetection/standard_result.zip")
os.system("unzip -q standard_result.zip")
standard_value = paddle.load("./standard_result/{}_{}.pdparams".format(self.module_name, self.type))
print("standard_value:{}".format(standard_value))
compare_res = self.compare(result, standard_value)
if compare_res:
print("{}_{} test success!".format(self.module_name, self.type))
else:
print("{}_{} test failed!".format(self.module_name, self.type))
| UTF-8 | Python | false | false | 5,505 | py | 2,348 | test_module.py | 1,564 | 0.541689 | 0.536785 | 0 | 150 | 35.7 | 108 |
csc/csc-agility-python-shell | 15,470,472,202,301 | ae65661e955e99233f8f5b759089f332b0d5332e | f13e2d2d812ca2e7b1dccc784de6c3c074707bd0 | /core/restclient/generator/v3_0/agilitymodel/base/InputVariableList.py | c7fd5f04364d5063f85ee11c5cfba041f35deb63 | []
| no_license | https://github.com/csc/csc-agility-python-shell | 92181ad26835b2924f3ce1a860eb76826595d398 | 3892fa4f9c1de14dbb8abb35d68a6c70ab0d6acc | refs/heads/master | 2020-03-01T10:43:41.390340 | 2016-02-17T22:02:11 | 2016-02-17T22:02:11 | 14,373,625 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from AgilityModelBase import AgilityModelBase
class InputVariableListBase(AgilityModelBase):
'''
classdocs
'''
def __init__(self, inputvariable=[]):
AgilityModelBase.__init__(self)
self._attrSpecs = getattr(self, '_attrSpecs', {})
self._attrSpecs.update({'inputVariable': {'maxOccurs': 'unbounded', 'type': 'InputVariable', 'name': 'inputvariable', 'minOccurs': '0', 'native': False}})
self.inputvariable = inputvariable
| UTF-8 | Python | false | false | 471 | py | 598 | InputVariableList.py | 581 | 0.656051 | 0.653928 | 0 | 11 | 41.727273 | 162 |
karthikpappu/pyc_source | 4,114,578,690,058 | 20ad532b41675a3f1b70d8f2e230b019032f1011 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/salve-2.4.2-py2.7/file.py | 0790424faaad3650d995903f8558a4028ffc85fd | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.linux-armv7l/egg/salve/block/file.py
# Compiled at: 2015-11-06 23:45:35
import os, salve
from salve.action import ActionList, backup, copy, create, modify
from salve.api import Block
from .base import CoreBlock
class FileBlock(CoreBlock):
"""
A file block describes an action performed on a file.
This includes creation, deletion, and string append.
"""
def __init__(self, file_context):
"""
File Block constructor
Args:
@file_context
The FileContext for this block.
"""
CoreBlock.__init__(self, Block.types.FILE, file_context)
for attr in ['target', 'source']:
self.path_attrs.add(attr)
for attr in ['target']:
self.min_attrs.add(attr)
self.primary_attr = 'target'
def compile(self):
"""
Uses the FileBlock to produce an action.
The type of action produced depends on the value of the block's
'action' attribute.
If it is a create action, this boils down to an invocation of
'touch -a'. If it is a copy action, this is a file copy preceded
by an attempt to back up the file being overwritten.
"""
salve.logger.info(('{0}: Converting FileBlock to FileAction').format(str(self.file_context)))
self.ensure_has_attrs('action')
def ensure_abspath_attrs(*args):
"""
A helper method that wraps ensure_has_attrs()
It additionally ensures that the attribute values are
absolute paths.
Args:
@args
A variable length argument list of attribute identifiers
subject to inspection.
"""
self.ensure_has_attrs(*args)
for arg in args:
assert os.path.isabs(self[arg])
def add_action(file_act, new, prepend=False):
"""
A helper method to merge actions into ALs when it is unknown
if the original action is an AL. Returns the merged action,
and makes no guarantees about preserving the originals.
Args:
@file_act
The original action to be extended with @new
@new
The action being appended or prepended to @file_act
KWArgs:
@prepend
When True, prepend @new to @file_act. When False, append
instead.
"""
if file_act is None:
return
else:
if not isinstance(file_act, ActionList):
file_act = ActionList([file_act], self.file_context)
if prepend:
file_act.prepend(new)
else:
file_act.append(new)
return file_act
triggers_backup = ('copy', )
file_action = None
if self['action'] == 'copy':
ensure_abspath_attrs('source', 'target')
file_action = copy.FileCopyAction(self['source'], self['target'], self.file_context)
elif self['action'] == 'create':
ensure_abspath_attrs('target')
file_action = create.FileCreateAction(self['target'], self.file_context)
else:
raise self.mk_except('Unsupported FileBlock action.')
if 'mode' in self:
chmod = modify.FileChmodAction(self['target'], self['mode'], self.file_context)
file_action = add_action(file_action, chmod)
if 'user' in self and 'group' in self:
chown = modify.FileChownAction(self['target'], self['user'], self['group'], self.file_context)
file_action = add_action(file_action, chown)
if self['action'] in triggers_backup:
backup_action = backup.FileBackupAction(self['target'], self.file_context)
file_action = add_action(file_action, backup_action, prepend=True)
return file_action | UTF-8 | Python | false | false | 4,148 | py | 114,545 | file.py | 111,506 | 0.572565 | 0.561716 | 0 | 109 | 37.06422 | 106 |
malva28/Cat-Jump-3D | 17,085,379,936,966 | a30411f4dbd37ddf97f59b41c355cadb148d8add | dbd3a307e11539780c16c0702e11594abad4e0c2 | /codigo/models/staticViewObjects.py | eff0e33a6925ec63634e4e7830657079fc405b96 | [
"MIT"
]
| permissive | https://github.com/malva28/Cat-Jump-3D | 6ceb90ecd2bccf5e7d9d59a6e1fb75b87a644de7 | 1a7270d4a72c27749a223b57438265825da1333f | refs/heads/main | 2023-02-13T08:29:45.889033 | 2021-01-18T03:25:46 | 2021-01-18T03:25:46 | 330,538,340 | 0 | 0 | null | false | 2021-01-18T03:25:47 | 2021-01-18T02:54:14 | 2021-01-18T02:59:53 | 2021-01-18T03:25:47 | 0 | 0 | 0 | 0 | Python | false | false | """
autor: Valentina Garrido
"""
import scene_graph_3D as sg
import easy_shaders as es
import basic_shapes as bs
import transformations as tr
import numpy as np
import basic_shapes_extended as bs_ext
import lighting_shaders as ls
from models.block import Block
import scene_graph_3D as sg
from models.gameCameras import StaticCamera
from models.movingQuad import MCListener
from textureSphere import generateSemiSphereTextureNormals
from sprites3D import create_3D_sprite_single_color
from OpenGL.GL import GL_REPEAT, GL_NEAREST
class StaticViewObject:
def __init__(self):
self.model = None
self.static_cam = StaticCamera()
def draw(self, projection, pipeline=None):
if self.model:
sg.drawSceneGraphNode(self.model, projection, self.static_cam.get_view(), pipeline=pipeline)
class GameOverCG(StaticViewObject):
def __init__(self):
super().__init__()
gameOverPic = sg.SceneGraphNode("GameOverPic")
gameOverPic.transform = tr.matmul([tr.translate(0, 0.5, 0),
tr.scale(1.8, 1.8 * 200 / 150, 1)])
gameOverText = sg.SceneGraphNode("GameOverText")
gameOverText.transform = tr.scale(1.8, 1.8 * 15 / 100, 1)
gameOverTextRot = sg.SceneGraphNode("GameOverTextRot")
gameOverTextRot.children += [gameOverText]
gameOverTextTR = sg.SceneGraphNode("GameOverTextTR")
gameOverTextTR.transform = tr.translate(0, 2, 0)
gameOverTextTR.children += [gameOverTextRot]
gameOver = sg.SceneGraphNode("GameOver")
gameOver.children += [gameOverPic, gameOverTextTR]
self.pause = 1.0
self.model = gameOver
def soft_rotate(self, t, min_angle=-np.pi / 4, max_angle=np.pi / 4):
resulting_angle = min_angle + (max_angle - min_angle) * np.sin(t)
return resulting_angle
def update(self, t, dt):
self.animation(t, dt)
def animation(self, t, dt):
pass
def set_picture(self, filename):
gpu_pic = es.toGPUShape(bs_ext.create4VertexTextureNormal(filename, [-0.5,0.,-0.5], [0.5,0.,-0.5],
[0.5,-0.5,0.5], [-0.5,0,0.5], nx=1, ny=1), GL_REPEAT, GL_NEAREST)
gameOverPic = sg.findNode(self.model, "GameOverPic")
sg.translate(gameOverPic, 0,1.6,0)
sg.scale(gameOverPic,1,1,2.5)
gameOverPic.children += [gpu_pic]
def set_text(self, phrase):
self.text_3D = Text3D(phrase)
self.model.children += [self.text_3D.model]
#gpu_pic = es.toGPUShape(bs.createTextureNormalsCube(filename), GL_REPEAT, GL_NEAREST)
#gameOverText = sg.findNode(self.model, "GameOverText")
#gameOverText.children += [gpu_pic]
class WinCG(GameOverCG):
def __init__(self):
super().__init__()
self.set_picture("textures/gameWonPic.png")
self.set_text("you win")
self.text_3D.read_letters([{'r': 1., 'g': 0., 'b': 0.},{'r': 1., 'g': 1., 'b': 0.},
{'r': 0., 'g': 1., 'b': 0.},{'r': 0., 'g': 0., 'b': 0.},
{'r': 0., 'g': 1., 'b': 1.},{'r': 0., 'g': 0., 'b': 1.},
{'r': 1., 'g': 0., 'b': 1.}])
self.reset_timers()
self.rotation_delay = 0.2
self.rotations_left = np.zeros(len(self.text_3D.letters), dtype=float)
sg.uniformScale(self.text_3D.model, 0.6)
sg.translate(self.text_3D.model, 0., 2., 1)
#gameOverPic = sg.findNode(self.model, "GameOverPic")
#gameOverPic.drawing = False
def animation(self, t, dt):
dtheta = 2 * np.pi / len(self.text_3D)
for i in range(len(self.text_3D.letters)):
letter_tr = self.text_3D.letters[i]
offset = t + (dtheta * i)
animation_vel = 7
letter_tr.transform = tr.uniformScale(0.8 + 0.5 * np.fabs(np.sin(animation_vel*offset)))
self.timer_for_next_rotation -= dt
if self.timer_for_next_rotation <= 0.:
if not self.rotating:
self.rotations_left = np.ones(len(self.text_3D.letters), dtype=float) * 2 * np.pi
self.rotating = True
self.rotate_each_letter(dt, self.rotations_left)
def rotate_each_letter(self, dt, rotations_left):
# rotations_left = np.ones(len(self.text_3D.letters), dtype=float)*2*np.pi
if not np.all(rotations_left <= 0.):
n = len(self.text_3D.letters)
decision = int(np.floor(self.time_rotating / self.rotation_delay))
max_n = decision if decision < n else n
for i in range(max_n):
letter = self.text_3D.letters[i]
rotation_left = self.rotate_letter(dt, letter, rotations_left[i])
rotations_left[i] = rotation_left
self.time_rotating += dt
else:
self.reset_timers()
def reset_timers(self):
self.timer_for_next_rotation = 5.
self.time_rotating = 0.
self.rotating = False
def rotate_letter(self, dt, letter_node, rotation_left=2 * np.pi):
if rotation_left > 0.:
dangle = np.pi/2
# print(self.timer_for_next_rotation)
# print(self.time_rotating)
# print("dangle: ", dangle)
# print("dt:", dt)
dtheta = dt * dangle
inner_letter_node = sg.findNode(letter_node, "letter")
if rotation_left - dtheta < 0.:
sg.rotationZ(inner_letter_node, rotation_left)
else:
sg.rotationZ(inner_letter_node, dtheta)
rotation_left -= dtheta
return rotation_left
class LoseCG(GameOverCG):
def __init__(self):
super().__init__()
self.set_picture("textures/gameOverPic.png")
self.set_text("you lost")
self.text_3D.read_letters([{'r':0.5, 'g':0., 'b':1.}], 0.5)
self.reset_timers()
self.rotation_delay = 0.2
self.rotations_left = np.zeros(len(self.text_3D.letters), dtype=float)
sg.uniformScale(self.text_3D.model, 0.6)
sg.translate(self.text_3D.model,0.,2.,1)
#def update(self, t, dt):
# self.animation(t, dt)
# if self.timer_for_next_rotation <= 0.:
def animation(self, t, dt):
dtheta = 2*np.pi/len(self.text_3D)
for i in range(len(self.text_3D.letters)):
letter_tr = self.text_3D.letters[i]
offset = t+(dtheta*i)
letter_tr.transform = tr.uniformScale(1+0.2*np.sin(offset))
self.timer_for_next_rotation -= dt
if self.timer_for_next_rotation <= 0.:
if not self.rotating:
self.rotations_left = np.ones(len(self.text_3D.letters), dtype=float) * 2 * np.pi
self.rotating = True
self.rotate_each_letter(dt, self.rotations_left)
def rotate_each_letter(self, dt, rotations_left):
#rotations_left = np.ones(len(self.text_3D.letters), dtype=float)*2*np.pi
if not np.all(rotations_left <= 0.):
n = len(self.text_3D.letters)
decision = int(np.floor(self.time_rotating/self.rotation_delay))
max_n = decision if decision < n else n
for i in range(max_n):
letter = self.text_3D.letters[i]
rotation_left = self.rotate_letter(dt, letter, rotations_left[i])
rotations_left[i] = rotation_left
self.time_rotating += dt
else:
self.reset_timers()
def reset_timers(self):
self.timer_for_next_rotation = 5.
self.time_rotating = 0.
self.rotating = False
def rotate_letter(self, dt, letter_node, rotation_left = 2*np.pi):
if rotation_left > 0.:
dangle = np.pi/2
#print(self.timer_for_next_rotation)
#print(self.time_rotating)
#print("dangle: ", dangle)
#print("dt:", dt)
dtheta = dt*dangle
inner_letter_node = sg.findNode(letter_node, "letter")
if rotation_left - dtheta < 0.:
sg.rotationX(inner_letter_node, rotation_left)
else:
sg.rotationX(inner_letter_node, dtheta)
rotation_left -= dtheta
return rotation_left
class Text3D(StaticViewObject):
def __init__(self, text_phrase, shader=None):
super().__init__()
self.phrase = text_phrase
self.letters = []
self.model = sg.SceneGraphNode("phrase")
self.len_phrase = len(text_phrase)
self.set_shader(shader)
def __len__(self):
return self.len_phrase
def set_shader(self, shader=None):
self.model.shader = shader
def read_letters(self, colors, letter_depth=0.5):
base_file = "textures/letters/letter_{}.png"
dsize = 2/len(self.phrase)
for i in range(len(self)):
letter = self.phrase[i]
if letter != " ":
if len(colors) == 1:
color = colors[0]
else:
color = colors[i]
img_name = base_file.format(letter)
letterShape = create_3D_sprite_single_color(img_name, color, letter_depth)
gpuLetter = es.toGPUShape(letterShape)
letter_node = sg.SceneGraphNode("letter")
letter_node.children = [gpuLetter]
sg.uniformScale(letter_node, dsize)
sg.rotationY(letter_node, np.pi / 2)
letter_pos = sg.SceneGraphNode("letterPos")
letter_pos.children = [letter_node]
sg.translate(letter_pos, dsize*(i+0.5)-1, 0., 0.)
letter_tr = sg.SceneGraphNode("letterTR")
letter_tr.children = [letter_pos]
self.model.children += [letter_tr]
self.letters.append(letter_tr)
sg.scale(self.model, -1, 1, 1)
class LifeGauge(StaticViewObject, MCListener):
def __init__(self, ini_pos_x, ini_pos_z):
super().__init__()
self.pos_x = ini_pos_x
self.pos_z = ini_pos_z
self.name = "LifeGauge"
self.current_life_sprite = 0
self.sprites = []
lifeGauge = sg.SceneGraphNode(self.name)
sg.rotationX(lifeGauge, np.pi/2)
sg.uniformScale(lifeGauge, 0.2)
lifeGaugeTR = sg.SceneGraphNode(self.name+"TR")
sg.translate(lifeGaugeTR, self.pos_x, 1.8, self.pos_z)
lifeGaugeTR.children += [lifeGauge]
self.model = lifeGaugeTR
self.default_sprite()
def load_sprites(self):
filenames = ["textures/crying.png",
"textures/slight_frown.png",
"textures/slight_smile.png",
"textures/smiley.png"]
self.sprites = [es.toGPUShape(generateSemiSphereTextureNormals(img, 20, 20), GL_REPEAT, GL_NEAREST)
for img in filenames]
def set_sprite(self, gpu_sprite):
lifeGauge = sg.findNode(self.model, self.name)
lifeGauge.children = [gpu_sprite]
def default_sprite(self):
self.current_life_sprite = 3
self.load_sprites()
self.change_life_display(self.current_life_sprite)
def change_life_display(self, ind):
if ind >= 0 and ind < len(self.sprites):
self.current_life_sprite = ind
sprite = self.sprites[self.current_life_sprite]
self.set_sprite(sprite)
def on_life_reduce(self, mc):
self.change_life_display(self.current_life_sprite-1)
def on_lose(self, mc):
self.change_life_display(0)
def on_win(self, mc):
self.change_life_display(3)
| UTF-8 | Python | false | false | 11,726 | py | 26 | staticViewObjects.py | 24 | 0.567627 | 0.550913 | 0 | 333 | 34.207207 | 131 |
HalimaBouzidi/CNN-people-detect | 12,850,542,186,364 | 8120847a8c11b135380befc006bef4d0df698859 | 2104614627b91d62740853d9acf68cb871ca94a8 | /train_person_classification.py | 5105d0541d0d6a3a5a1d4c79eaecf0be7fbbab78 | []
| no_license | https://github.com/HalimaBouzidi/CNN-people-detect | 31aeec53b043f14f2c3262833eff8bef770a49a2 | dcb7e4e4a32ca39f4cb3096d0f011b366cd8e496 | refs/heads/master | 2020-08-08T07:54:37.943847 | 2018-02-13T09:48:42 | 2018-02-13T09:48:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # import IN THIS ORDER - otherwise cv2 gets loaded after tensorflow,
# and tensorflow loads an incompatible internal version of libpng
# https://github.com/tensorflow/tensorflow/issues/1924
import cv2
import numpy as np
import tensorflow as tf
from Datasets.Dataset import batcher
from Datasets.tud import load_tud
from Datasets.inria import load_inria
from Datasets.zurich import load_zurich
import Model
class PersonModel(Model.BooleanModel):
def train(self, person_iter):
batch_size = 50
for batch_no, batch in enumerate(batcher(person_iter, batch_size=100)):
train_accuracy = self.accuracy.eval(feed_dict={
self.x:batch[0], self.y_: batch[1], self.keep_prob: 1.0})
if batch_no % 5 == 0:
print("Step %i, training accuracy %g"%(batch_no, train_accuracy))
# r = y_conv.eval(feed_dict={self.x: batch[0], keep_prob: 1.0})
# print('Guess: ', np.round(r.flatten()))
# print('Actual:', np.round(batch[1].flatten()))
self.train_step.run(feed_dict={self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.5})
if __name__ == '__main__':
combined_dataset = load_tud('/mnt/data/Datasets/pedestrians/tud/tud-pedestrians') + \
load_tud('/mnt/data/Datasets/pedestrians/tud/tud-campus-sequence') + \
load_tud('/mnt/data/Datasets/pedestrians/tud/tud-crossing-sequence') + \
load_tud('/mnt/data/Datasets/pedestrians/tud/TUD-Brussels') + \
load_tud('/mnt/data/Datasets/pedestrians/tud/train-210') + \
load_tud('/mnt/data/Datasets/pedestrians/tud/train-400') + \
load_tud('/mnt/data/Datasets/pedestrians/tud/TUD-MotionPairs/positive') + \
load_tud('/mnt/data/Datasets/pedestrians/tud/TUD-MotionPairs/negative') + \
load_inria('/mnt/data/Datasets/pedestrians/INRIA/INRIAPerson') + \
load_zurich('/mnt/data/Datasets/pedestrians/zurich')
combined_dataset.train.generate_negative_examples()
combined_dataset.test.generate_negative_examples()
combined_dataset.shuffle()
combined_dataset.balance()
train_pos = combined_dataset.train.num_positive_examples
train_neg = combined_dataset.train.num_negative_examples
print(len(combined_dataset.train), 'training examples ({},{}).'.format(train_pos, train_neg))
print(len(combined_dataset.test), 'testing examples ({},{}).'.format(combined_dataset.test.num_positive_examples, combined_dataset.test.num_negative_examples))
nn_im_w = 64
nn_im_h = 160
with tf.Session() as sess:
model = PersonModel(sess)
model.build_graph(nn_im_w, nn_im_h)
print("Training...")
model.train(combined_dataset.train.iter_people())
model.save('saved_model/')
| UTF-8 | Python | false | false | 2,782 | py | 11 | train_person_classification.py | 9 | 0.659238 | 0.646298 | 0 | 61 | 44.606557 | 163 |
datanooblol/ggwp | 5,712,306,518,341 | ab68136805de69e274db452f5593eb4678198971 | fb8710c45e12c7b7f982eafe13e5acf7d212a2fd | /ggwp/EzUtils/__init__.py | 055d59419ed8122f012c04e14838b8c7a18b0353 | []
| no_license | https://github.com/datanooblol/ggwp | c59cd07be3f5595657450380bb8ed9d522110b4f | 249cbeb2988bfc8b448876fe1b207b35367d8667 | refs/heads/main | 2023-05-04T15:59:36.598932 | 2021-05-26T02:53:29 | 2021-05-26T02:53:29 | 363,434,757 | 1 | 0 | null | false | 2021-05-26T02:53:30 | 2021-05-01T14:49:51 | 2021-05-21T19:00:51 | 2021-05-26T02:53:29 | 104 | 1 | 0 | 0 | Python | false | false | from ggwp.EzUtils.EzUtils import * | UTF-8 | Python | false | false | 34 | py | 26 | __init__.py | 23 | 0.823529 | 0.823529 | 0 | 1 | 34 | 34 |
LeishenKOBE/2020-every-month-plan | 8,718,783,652,440 | cc552fd38d15bcc929723e8358087b652b5ecb80 | 35f40b2e33565ffbabc790aca7d8706d535f84ae | /learn-path/LearningPythonSpider/requests/1.py | ed5b63af5fb318c6a5bc863ad3fdbfeb420c4163 | [
"MIT"
]
| permissive | https://github.com/LeishenKOBE/2020-every-month-plan | 1a9b890f85bbd5fcc04d1f12925699989f5ead24 | 3c4ba7e16af6ee5761b34663ad4800e18d350875 | refs/heads/master | 2020-12-03T06:38:05.486935 | 2020-12-02T17:10:08 | 2020-12-02T17:10:08 | 231,230,857 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
# params = {
# 'name':"geekdigging",
# 'age': '18'
# }
# headers = {
# }
# r = requests.get('https://httpbin.org/get',params=params)
# print(r.json())
# r = requests.get("https://www.baidu.com/img/bd_logo1.png?where=super")
# with open('baidu.png','wb') as f:
# f.write(r.content)
import requests
# r = requests.get("https://www.csdn.net")
# print(type(r.cookies), r.cookies)
# for key, value in r.cookies.items():
# print(key + '=' + value)
# headers = {
# 'cookie':'_zap=f1554d31-1519-45df-95ef-49b5d7da834c; d_c0="AFCnL10XWQ-PTlAtjG3TiOg63rTwifQNKDM=|1556456851"; _xsrf=3byY5lStlpBCh0YENUGlU5hgSDa9HnOm; __utma=51854390.1541155630.1556550259.1561007912.1562421959.8; __utmv=51854390.100-1|2=registration_date=20130814=1^3=entry_date=20130814=1; z_c0=Mi4xWFU0VEFBQUFBQUFBVUtjdlhSZFpEeGNBQUFCaEFsVk5ncTZ5WGdCelNMX3ZZdEdGUjFic0piUzZGRkoyc1N2NmZB|1573216386|75a075eb65400bb0c8b4201362d000f76902e683; q_c1=438a92f109b44e7884f82aab1ebf8ada|1582033519000|1556889269000; _ga=GA1.2.1541155630.1556550259; _gid=GA1.2.1431849190.1582948255; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1582948254,1582960222,1582960446,1582977677; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1582983449; tshl=; tst=h; KLBRSID=9d75f80756f65c61b0a50d80b4ca9b13|1582983493|1582983444',
# 'host':'www.zhihu.com',
# 'use-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
# }
# r = requests.get("https://www.zhihu.com/hot",headers=headers)
# print(r.text)import requests
# headers = {
# 'cookie': '_zap=7c875737-af7a-4d55-b265-4e3726f8bd30; _xsrf=MU9NN2kHxdMZBVlENJkgnAarY6lFlPmu; d_c0="ALCiqBcc8Q-PTryJU9ro0XH9RqT4NIEHsMU=|1566658638"; UM_distinctid=16d16b54075bed-05edc85e15710b-5373e62-1fa400-16d16b54076e3d; tst=r; q_c1=1a9d0d0f293f4880806c995d7453718f|1573961075000|1566816770000; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1574492254,1574954599,1575721552,1575721901; tgw_l7_route=f2979fdd289e2265b2f12e4f4a478330; CNZZDATA1272960301=1829573289-1568039631-%7C1575793922; capsion_ticket="2|1:0|10:1575798464|14:capsion_ticket|44:M2FlYTAzMDdkYjIzNDQzZWJhMDcyZGQyZTZiYzA1NmU=|46043c1e4e6d9c381eb18f5dd8e5ca0ddbf6da90cddf10a6845d5d8c589e7754"; z_c0="2|1:0|10:1575798467|4:z_c0|92:Mi4xLXNyV0FnQUFBQUFBc0tLb0Z4enhEeVlBQUFCZ0FsVk53eFRhWGdBSlc3WFo1Vk5RUThBMHMtanZIQ2tYcGFXV2pn|02268679f394bd32662a43630236c2fd97e439151b0132995db7322736857ab6"; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1575798469',
# 'host': 'www.zhihu.com',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
# }
# r = requests.get('https://www.zhihu.com', headers = headers)
# print(r.text)
# cookies = '_zap=7c875737-af7a-4d55-b265-4e3726f8bd30; _xsrf=MU9NN2kHxdMZBVlENJkgnAarY6lFlPmu; d_c0="ALCiqBcc8Q-PTryJU9ro0XH9RqT4NIEHsMU=|1566658638"; UM_distinctid=16d16b54075bed-05edc85e15710b-5373e62-1fa400-16d16b54076e3d; tst=r; q_c1=1a9d0d0f293f4880806c995d7453718f|1573961075000|1566816770000; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1574492254,1574954599,1575721552,1575721901; tgw_l7_route=f2979fdd289e2265b2f12e4f4a478330; CNZZDATA1272960301=1829573289-1568039631-%7C1575793922; capsion_ticket="2|1:0|10:1575798464|14:capsion_ticket|44:M2FlYTAzMDdkYjIzNDQzZWJhMDcyZGQyZTZiYzA1NmU=|46043c1e4e6d9c381eb18f5dd8e5ca0ddbf6da90cddf10a6845d5d8c589e7754"; z_c0="2|1:0|10:1575798467|4:z_c0|92:Mi4xLXNyV0FnQUFBQUFBc0tLb0Z4enhEeVlBQUFCZ0FsVk53eFRhWGdBSlc3WFo1Vk5RUThBMHMtanZIQ2tYcGFXV2pn|02268679f394bd32662a43630236c2fd97e439151b0132995db7322736857ab6"; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1575798469'
# jar = requests.cookies.RequestsCookieJar()
# for cookie in cookies.split(';'):
# key,val = cookie.split('=',1)
# jar.set(key,val)
# headers_request = {
# 'host' : "www.zhihu.com",
# 'user-agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
# }
# r = requests.get('https://www.zhihu.com',cookie = jar,headers = headers_request)
# print(r.text)
import requests
from bs4 import BeautifulSoup
response = requests.get('https://www.geekdigging.com/')
soup = BeautifulSoup(response.content, "html5lib")
print(soup.prettify()) | UTF-8 | Python | false | false | 4,270 | py | 202 | 1.py | 120 | 0.787354 | 0.480328 | 0 | 64 | 65.734375 | 911 |
Shan876/info3180-project1 | 4,999,341,962,868 | 3bfa078b30b5b0951b9a174ab9f5d76921c5a6e3 | ca2e32c3296aacc2c819ac018f25a51b0364bd33 | /app/models.py | 54a1608f882646e4cb2f20a3a13da90f34193758 | []
| no_license | https://github.com/Shan876/info3180-project1 | 76e63dd744aef44fca02a1a9b55e9e248a57726d | ae1af1099712eeecd97f7494884c0b71e9a1d5dc | refs/heads/master | 2023-03-29T07:49:36.732329 | 2021-03-24T05:04:27 | 2021-03-24T05:04:27 | 350,935,730 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import db
from sqlalchemy import Integer, Enum
import enum
class PropertyEnum(enum.Enum):
apartment = 'apartment'
house = 'house'
class PropertyModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
no_bedrooms = db.Column(db.Integer)
description = db.Column(db.String(500))
no_bathrooms = db.Column(db.Integer)
location = db.Column(db.String(100))
price = db.Column(db.Numeric(10, 2))
property_photo = db.Column(db.String(255))
property_type = db.Column(Enum(PropertyEnum))
def __init__(self, title, description, price, location, no_bedrooms, no_bathrooms, property_type, property_photo):
super().__init__()
self.title = title
self.description = description
self.price = price
self.location = location
self.no_bedrooms = no_bedrooms
self.no_bathrooms = no_bathrooms
self.property_type = property_type
self.property_photo = property_photo
| UTF-8 | Python | false | false | 1,013 | py | 7 | models.py | 4 | 0.661402 | 0.646594 | 0 | 32 | 30.65625 | 118 |
sahahn/ABCD_ML | 12,627,203,885,232 | 9dd3b5fc254cde0dfd40993002526cc5be03f051 | c56dfb37dbbb8ef55321aef5e7b74b091eeaa92b | /ABCD_ML/helpers/ML_Helpers.py | 2518ae07cc40d05fe8274ad0b31a32c849b846e7 | [
"MIT"
]
| permissive | https://github.com/sahahn/ABCD_ML | 77c6bd3848b6098ff080b86e4e170d1a0cede590 | f0b6477038e02cb8e63ae27f4ac319da345089ba | refs/heads/master | 2020-06-07T05:51:16.034912 | 2020-05-11T19:09:15 | 2020-05-11T19:09:15 | 192,940,180 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
ML_Helpers.py
====================================
File with various ML helper functions for ABCD_ML.
These are non-class functions that are used in _ML.py and Scoring.py
"""
import numpy as np
import inspect
from .Default_Params import get_base_params, proc_params, show
from copy import deepcopy
import nevergrad as ng
from ..main.Input_Tools import is_special, Select
from nevergrad.parametrization.core import Constant
def compute_macro_micro(scores, n_repeats, n_splits, weights=None):
'''Compute and return scores, as computed froma repeated k-fold.
Parameters
----------
scores : list or array-like
Should contain all of the scores
and have a length of `n_repeats` * `n_splits`
n_repeats : int
The number of repeats
n_splits : int
The number of splits per repeat
Returns
----------
float
The mean macro score
float
The standard deviation of the macro score
float
The standard deviation of the micro score
'''
r_scores = np.reshape(np.array(scores), (n_repeats, n_splits))
if weights is None:
macro_scores = np.mean(r_scores, axis=1)
else:
r_weights = np.reshape(np.array(weights), (n_repeats, n_splits))
macro_scores = np.average(r_scores, weights=r_weights, axis=1)
return (np.mean(macro_scores), np.std(macro_scores),
np.std(scores))
def is_array_like(in_val):
if hasattr(in_val, '__len__') and (not isinstance(in_val, str)) and \
(not isinstance(in_val, dict)) and (not hasattr(in_val, 'fit')) and \
(not hasattr(in_val, 'transform')):
return True
else:
return False
def conv_to_list(in_val, amt=1):
if in_val is None:
return None
if not is_array_like(in_val) or is_special(in_val):
in_val = [in_val for i in range(amt)]
return in_val
def proc_input(in_vals):
'''Performs common preproc on a list of str's or
a single str.'''
if isinstance(in_vals, list):
for i in range(len(in_vals)):
in_vals[i] = proc_str_input(in_vals[i])
else:
in_vals = proc_str_input(in_vals)
return in_vals
def proc_str_input(in_str):
'''Perform common preprocs on a str.
Speicifcally this function is is used to process user str input,
as referencing a model, metric, or scaler.'''
if not isinstance(in_str, str):
return in_str
in_str = in_str.replace('_', ' ')
in_str = in_str.lower()
in_str = in_str.rstrip()
chunk_replace_dict = {' regressor': '',
' regresure': '',
' classifier': '',
' classifer': '',
' classification': ''}
for chunk in chunk_replace_dict:
in_str = in_str.replace(chunk, chunk_replace_dict[chunk])
# This is a dict of of values to replace, if the str ends with that value
endwith_replace_dict = {' score': '',
' loss': '',
' corrcoef': '',
' ap': ' average precision',
' jac': ' jaccard',
' iou': ' jaccard',
' intersection over union': ' jaccard',
' logistic': '',
}
for chunk in endwith_replace_dict:
if in_str.endswith(chunk):
in_str = in_str.replace(chunk, endwith_replace_dict[chunk])
startwith_replace_dict = {'rf ': 'random forest ',
'lgbm ': 'light gbm ',
'lightgbm ': 'light gbm ',
'svc ': 'svm ',
'svr ': 'svm ',
'neg ': '',
}
for chunk in startwith_replace_dict:
if in_str.startswith(chunk):
in_str = in_str.replace(chunk, startwith_replace_dict[chunk])
# This is a dict where if the input is exactly one
# of the keys, the value will be replaced.
replace_dict = {'acc': 'accuracy',
'bas': 'balanced accuracy',
'ap': 'average precision',
'jac': 'jaccard',
'iou': 'jaccard',
'intersection over union': 'jaccard',
'mse': 'mean squared error',
'ev': 'explained variance',
'mae': 'mean absolute error',
'msle': 'mean squared log error',
'med ae': 'median absolute error',
'rf': 'random forest',
'lgbm': 'light gbm',
'svc': 'svm',
'svr': 'svm',
}
if in_str in replace_dict:
in_str = replace_dict[in_str]
return in_str
def user_passed_param_check(params, obj_str, search_type):
if isinstance(params, dict):
if search_type is None:
return deepcopy(params), {}
else:
return {}, deepcopy(proc_params(params, prepend=obj_str))
return {}, {}
def proc_extra_params(obj, extra_params, non_search_params, params=None):
if extra_params is None or len(extra_params) == 0:
return non_search_params, params
try:
init_params = get_possible_init_params(obj)
except AttributeError:
return non_search_params, params
# If any of the passed keys in extra_params are valid params to
# the base classes init, add them to non_search_params
for key in extra_params:
if key in init_params:
non_search_params[key] = deepcopy(extra_params[key])
# Also, if params, and overlapping key from extra_params and init
# Remove that param distribution as extra_params takes precendence.
if params is not None and key in params:
del params[key]
return non_search_params, params
def get_obj_and_params(obj_str, OBJS, extra_params, params, search_type):
# First get the object, and process the base params!
try:
obj, param_names = OBJS[obj_str]
except KeyError:
print('Requested:', obj_str, 'does not exist!')
# If params is a str, change it to the relevant index
if isinstance(params, str):
try:
params = param_names.index(params)
except ValueError:
print('str', params, 'passed, but not found as an option for',
obj_str)
print('Setting to default base params setting instead!')
params = 0
# If passed param ind is a dict, assume that user passed
if isinstance(params, dict):
base_params = params.copy()
# If not a dict passed, grab the param name, then params
else:
# Get the actual params
try:
param_name = param_names[params]
except IndexError:
print('Invalid param ind', params, 'passed for', obj_str)
print('There are only', len(param_names), 'valid param options.')
print('Setting to default base params setting instead!')
param_name = param_names[0]
base_params = get_base_params(param_name)
# Process rest of params by search type, and w.r.t to extra params
non_search_params, params =\
process_params_by_type(obj, obj_str, base_params, extra_params, search_type)
return obj, non_search_params, params
def process_params_by_type(obj, obj_str, base_params, extra_params, search_type):
# Special case if search type None
if search_type is None:
if base_params == 0:
return {}, {}
elif not isinstance(base_params, dict):
raise RuntimeError('params passed with custom obj must be either 0, for None, or a dict')
params = base_params.copy()
non_search_params = {}
# First, grab any params from the params passed which are not Nevergrad distributions
# These, regardless of search_type of None, should still be passed to class init.
for p in params:
try:
module = params[p].__module__
if 'nevergrad' not in module:
non_search_params[p] = params[p]
except AttributeError:
non_search_params[p] = params[p]
# process extra params
non_search_params, _ =\
proc_extra_params(obj, extra_params, non_search_params, params=None)
return non_search_params, {}
# Otherwise, prepend obj_str to all keys in base params
params = proc_params(base_params, prepend=obj_str)
# process extra params
non_search_params, params =\
proc_extra_params(obj, extra_params, {}, params=params)
return non_search_params, params
def get_possible_init_params(model):
'''Helper function to grab the names of valid arguments to
classes init
Parameters
----------
model : object
The object to inspect
Returns
----------
All valid parameters to the model
'''
pos_params = dict(inspect.getmembers(model.__init__.__code__))
return pos_params['co_varnames']
def get_possible_fit_params(model):
'''Helper function to grab the names of valid arguments to
classes fit method
Parameters
----------
model : object w/ fit method
The model object to inspect
Returns
----------
All valid parameters to the model
'''
pos_params = dict(inspect.getmembers(model.fit.__code__))
return pos_params['co_varnames']
def get_avaliable_by_type(AVALIABLE):
avaliable_by_type = {}
for pt in AVALIABLE:
avaliable_by_type[pt] = set()
for select in AVALIABLE[pt]:
avaliable_by_type[pt].add(AVALIABLE[pt][select])
avaliable_by_type[pt] = list(avaliable_by_type[pt])
avaliable_by_type[pt].sort()
return avaliable_by_type
def get_objects_by_type(problem_type, AVALIABLE=None, OBJS=None):
avaliable_by_type = get_avaliable_by_type(AVALIABLE)
objs = []
for obj_str in avaliable_by_type[problem_type]:
if 'basic ensemble' not in obj_str:
obj = OBJS[obj_str][0]
obj_params = OBJS[obj_str][1]
objs.append((obj_str, obj, obj_params))
return objs
def get_objects(OBJS):
objs = []
for obj_str in OBJS:
obj = OBJS[obj_str][0]
obj_params = OBJS[obj_str][1]
objs.append((obj_str, obj, obj_params))
return objs
def proc_problem_type(problem_type, avaliable_by_type):
if problem_type is not None:
problem_types = [problem_type]
else:
problem_types = list(avaliable_by_type)
return problem_types
def show_objects(problem_type=None, obj=None,
show_params_options=True, show_object=False,
show_all_possible_params=False, AVALIABLE=None, OBJS=None):
if obj is not None:
objs = conv_to_list(obj)
for obj in objs:
show_obj(obj, show_params_options, show_object,
show_all_possible_params, OBJS)
return
if AVALIABLE is not None:
avaliable_by_type = get_avaliable_by_type(AVALIABLE)
problem_types = proc_problem_type(problem_type, avaliable_by_type)
for pt in problem_types:
show_type(pt, avaliable_by_type,
show_params_options,
show_object,
show_all_possible_params, OBJS)
else:
for obj in OBJS:
show_obj(obj, show_params_options, show_object,
show_all_possible_params, OBJS)
def show_type(problem_type, avaliable_by_type, show_params_options,
show_object, show_all_possible_params, OBJS):
print('Avaliable for Problem Type:', problem_type)
print('----------------------------------------')
print()
print()
for obj_str in avaliable_by_type[problem_type]:
if 'basic ensemble' in obj_str:
print('- - - - - - - - - - - - - - - - - - - - ')
print('("basic ensemble")')
print('- - - - - - - - - - - - - - - - - - - - ')
print()
elif 'user passed' not in obj_str:
show_obj(obj_str, show_params_options, show_object,
show_all_possible_params, OBJS)
def show_obj(obj_str, show_params_options, show_object,
show_all_possible_params, OBJS):
print('- - - - - - - - - - - - - - - - - - - - ')
OBJ = OBJS[obj_str]
print(OBJ[0].__name__, end='')
print(' ("', obj_str, '")', sep='')
print('- - - - - - - - - - - - - - - - - - - - ')
print()
if show_object:
print('Object: ', OBJ[0])
print()
if show_params_options:
show_param_options(OBJ[1])
if show_all_possible_params:
possible_params = get_possible_init_params(OBJ[0])
print('All Possible Params:', possible_params)
print()
def show_param_options(param_options):
print('Param Indices')
print('-------------')
for ind in range(len(param_options)):
print()
print(ind, ":", sep='')
print()
print('"', param_options[ind], '"', sep='')
show(param_options[ind])
print()
print('-------------')
def f_array(in_array):
return np.array(in_array).astype(float)
def find_ind(X, base_X_mask, X_r, r_ind, mask=True):
r_dtype = X_r.dtype
o_dtype = X.dtype
if r_dtype != o_dtype and mask:
ind = np.where(np.all(X[:, base_X_mask].astype(r_dtype) == X_r[r_ind],
axis=1))
elif r_dtype != o_dtype:
ind = np.where(np.all(X.astype(r_dtype) == X_r[r_ind], axis=1))
elif mask:
ind = np.where(np.all(X[:, base_X_mask] == X_r[r_ind], axis=1))
else:
ind = np.where(np.all(X == X_r[r_ind], axis=1))
try:
return ind[0][0]
except IndexError:
return None
def replace_with_in_params(params, original, replace):
new_params = {}
for key in params:
new_params[key.replace(original, replace)] = params[key]
return new_params
def type_check(ud):
'''Check if a nevergrad dist'''
def_dist = [ng.p.Log, ng.p.Scalar, ng.p.Choice, ng.p.TransitionChoice]
for dd in def_dist:
if isinstance(ud, dd):
return True
types_to_check = [int, float, list, tuple, str, bool, dict, set, Constant]
for ttc in types_to_check:
if isinstance(ud, ttc):
return False
return True
def proc_mapping(indx, mapping):
if len(mapping) > 0 and len(indx) > 0:
# If should proc list...
if is_array_like(indx[0]):
return [proc_mapping(i, mapping) for i in indx]
else:
new_indx = set()
for i in indx:
new = mapping[i]
# If mapping points to a list of values
if isinstance(new, list):
for n in new:
new_indx.add(n)
else:
new_indx.add(new)
# Sort, then return
new_indx = sorted(list(new_indx))
return new_indx
else:
return indx
def update_mapping(mapping, new_mapping):
# Go through the mapping and update each key with the new mapping
for key in mapping:
val = mapping[key]
if isinstance(val, list):
new_vals = []
for v in val:
if v in new_mapping:
new_val = new_mapping[v]
if isinstance(new_val, list):
new_vals += new_val
else:
new_vals.append(new_val)
else:
new_vals.append(v)
mapping[key] = sorted(list(set(new_vals)))
# Assume int if not list
else:
if val in new_mapping:
mapping[key] = new_mapping[val]
def wrap_pipeline_objs(wrapper, objs, inds, random_state,
n_jobs, **params):
# If passed wrapper n_jobs, and != 1, set base obj jobs to 1
if 'wrapper_n_jobs' in params:
if params['wrapper_n_jobs'] != 1:
n_jobs = 1
wrapped_objs = []
for chunk, ind in zip(objs, inds):
name, obj = chunk
# Try to set attributes
try:
obj.n_jobs = n_jobs
except AttributeError:
pass
try:
obj.random_state = random_state
except AttributeError:
pass
wrapped_obj = wrapper(obj, ind, **params)
wrapped_objs.append((name, wrapped_obj))
return wrapped_objs
def check_for_duplicate_names(objs_and_params):
'''Checks for duplicate names within an objs_and_params type obj'''
names = [c[0] for c in objs_and_params]
# If any repeats
if len(names) != len(set(names)):
new_objs_and_params = []
for obj in objs_and_params:
name = obj[0]
if name in names:
cnt = 0
used = [c[0] for c in new_objs_and_params]
while name + str(cnt) in used:
cnt += 1
# Need to change name within params also
base_obj = obj[1][0]
base_obj_params = obj[1][1]
new_obj_params = {}
for param_name in base_obj_params:
p_split = param_name.split('__')
new_param_name = p_split[0] + str(cnt)
new_param_name += '__' + '__'.join(p_split[1:])
new_obj_params[new_param_name] =\
base_obj_params[param_name]
new_objs_and_params.append((name + str(cnt),
(base_obj, new_obj_params)))
else:
new_objs_and_params.append(obj)
return new_objs_and_params
return objs_and_params
def proc_type_dep_str(in_strs, avaliable, problem_type):
'''Helper function to perform str correction on
underlying proble type dependent input, e.g., for
metric or ensemble_types, and to update extra params
and check to make sure input is valid ect...'''
as_arr = True
if not is_array_like(in_strs):
as_arr = False
in_strs = [in_strs]
if not check_avaliable(in_strs, avaliable, problem_type):
in_strs = proc_input(in_strs)
if not check_avaliable(in_strs, avaliable, problem_type):
raise RuntimeError(in_strs, 'are not avaliable for '
'this problem type')
avaliable_by_type = get_a_by_type(avaliable, in_strs, problem_type)
final_strs = [avaliable_by_type[in_str] for in_str in in_strs]
if as_arr:
return final_strs
return final_strs[0]
def check_avaliable(in_strs, avaliable, problem_type):
avaliable_by_type = get_a_by_type(avaliable, in_strs, problem_type)
check = np.array([m in avaliable_by_type for
m in in_strs]).all()
return check
def get_a_by_type(avaliable, in_strs, problem_type):
avaliable_by_type = avaliable[problem_type]
for s in in_strs:
if 'user passed' in s:
avaliable_by_type[s] = s
return avaliable_by_type
def param_len_check(names, params, _print=print):
if isinstance(params, dict) and len(names) == 1:
return params
try:
if len(params) > len(names):
_print('Warning! More params passed than objs')
_print('Extra params have been truncated.')
return params[:len(names)]
# If non list params here
except TypeError:
return [0 for i in range(len(names))]
while len(names) != len(params):
params.append(0)
return params
def replace_model_name(base_estimator_params):
new = {}
for key in base_estimator_params:
value = base_estimator_params[key]
split_key = key.split('__')
split_key[0] = 'estimator'
new_key = '__'.join(split_key)
new[new_key] = value
return new
def get_avaliable_run_name(name, model, scores):
if name is None or name == 'default':
if isinstance(model, Select):
name = 'select'
elif isinstance(model, list):
name = 'special'
elif isinstance(model.obj, str):
name = model.obj
else:
name = 'user passed'
if name in scores:
n = 0
while name + str(n) in scores:
n += 1
name = name + str(n)
return name
def get_reverse_mapping(mapping):
reverse_mapping = {}
for m in mapping:
key = mapping[m]
if isinstance(key, list):
for k in key:
reverse_mapping[k] = m
else:
reverse_mapping[key] = m
return reverse_mapping
| UTF-8 | Python | false | false | 21,189 | py | 77 | ML_Helpers.py | 48 | 0.540847 | 0.538629 | 0 | 772 | 26.446891 | 101 |
birdmw/Go-AI | 15,547,781,641,360 | 1f53bffefe41f4dad208f366af3ff53dee6770a6 | 3857e48b2ede13a951fa33312cbf3a5f3232a920 | /board_manager.py | 6e0f0b0fb758e059980988dd7be9c23ccbe3061f | []
| no_license | https://github.com/birdmw/Go-AI | 3fcb264e6cb3d9d8373ab2d69e41107c01192584 | 18823c9b253e47d67884ed9b49c385d46f32355f | refs/heads/master | 2016-09-05T21:00:45.291160 | 2015-10-03T00:13:40 | 2015-10-03T00:13:40 | 42,617,339 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Author: Matthew Bird <birdmw@gmail.com> (main author)
#
#
"""An interactive board game environment for playing Go and playing against the computer"""
### USE: W, A, S, D to manipulate cursor
### USE: C to have the computer play a move (models must be loaded)
### USE: space-bar to play a move
import data_manager
import model_manager
import numpy as np
import argparse
import sys
from go import Board, BoardError, View, clear, getch
def main(moves_to_play = 3):
# Get arguments
parser = argparse.ArgumentParser(description='Starts a game of go in the terminal.')
parser.add_argument('-s', '--size', type=int, default=19, help='size of board')
args = parser.parse_args()
if args.size < 7 or args.size > 19:
sys.stdout.write('Board size must be between 7 and 19!\n')
sys.exit(0)
# Initialize board and view
board = Board(args.size)
view = View(board)
err = None
global move_count, prev_move_count, dm, mm, npboard, pred
pred = (1,1)
dm = data_manager.data_manager()
dm.load_popularity_boards()
mm = model_manager.model_manager()
mm.load_many_models(1,moves_to_play)
move_count = 0
prev_move_count = move_count
#actions
def goboard_to_npboard(goboard):
global move_count
goboard_np = np.array(goboard)
goboard_array = []
for i in range(19):
goboard_array.append([.5]*19)
i,j=0,0
for row in goboard:
if j >18:
j=0
for col in row:
if i >18:
i=0
if col._type == 'white':
goboard_array[i][j] = 1.0
elif col._type == 'black':
goboard_array[i][j] = 0.0
else:
goboard_array[i][j] = 0.5
i+=1
j+=1
for i in range(len(goboard_array)):
goboard_array[i] = goboard_array[i][::-1]
goboard_array = np.array(goboard_array).T
return np.concatenate(goboard_array)
#return npboard
def cpu_play():
global mm, move_count, npboard
global pred
if move_count > 0:
if (move_count % 2) == 0:
color = 'b'
else:
color = 'w'
predictions = mm.guess_list(npboard, move_count, dm)
x, y = predictions[0]
pred = predictions[0]
move = (y+1, 18-x+1)
board.move(move[0], move[1])
view.redraw()
def move():
"""
Makes a move at the current position of the cursor for the current
turn.
"""
board.move(*view.cursor)
view.redraw()
def undo():
"""
Undoes the last move.
"""
board.undo()
view.redraw()
def redo():
"""
Redoes an undone move.
"""
board.redo()
view.redraw()
def exit():
"""
Exits the game.
"""
sys.exit(0)
# Action keymap
KEYS = {
'w': view.cursor_up,
's': view.cursor_down,
'a': view.cursor_left,
'd': view.cursor_right,
' ': move,
'u': undo,
'r': redo,
'c': cpu_play,
'\x1b': exit,
}
# Main loop
while True:
clear()
global pred
sys.stdout.write('{0}\n'.format(view))
print "move #:", move_count
sys.stdout.write('Black: {black} <===> White: {white}\n'.format(**board.score))
sys.stdout.write('{0}\'s prediction '.format(pred))
sys.stdout.write('{0}\'s '.format(mm.most_popular_moves))
sys.stdout.write('{0}\'s move... '.format(board.turn))
if err:
sys.stdout.write('\n' + err + '\n')
err = None
# Get action key
c = getch()
global move_count, prev_move_count
change_flag = 0
try:
# Execute selected action
KEYS[c]()
prev_move_count = move_count
if c == ' ' or c == 'r' or c == 'c':
move_count += 1
change_flag = 1
elif c == 'u':
move_count = max( [0, move_count-1] )
change_flag = 1
except BoardError as be:
# Board error (move on top of other piece, suicidal move, etc.)
if change_flag == 1:
move_count = prev_move_count
change_flag = 1
err = be.message
except KeyError:
# Action not found, do nothing
pass
if change_flag == 1: # update global npboard
global npboard
npboard = goboard_to_npboard(board._state.board)
#print board._state.board
#print npboard
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 4,839 | py | 5 | board_manager.py | 4 | 0.50279 | 0.489977 | 0.000207 | 174 | 26.810345 | 91 |
Mytsu/Simplex-Python | 9,620,726,770,500 | dc4b980ae67d39e5baea633e9712a0c620ddc1dc | 96a4f105497c3d25d8c6b2f787638d28df2bcf34 | /src/simplex.py | 5c266c8a3821831443da613ca3823f126db75074 | []
| no_license | https://github.com/Mytsu/Simplex-Python | c7924e9a8ddd22ba3f86f2eed206d4157cc77921 | 176c7c20057f45ccb109690ff492068a8427d234 | refs/heads/master | 2020-04-04T19:16:20.166410 | 2018-12-07T12:17:06 | 2018-12-07T12:17:06 | 156,199,139 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from numbers import Number
from typing import List, Tuple, Union
from matrix import Matrix
class Simplex:
N_DIGITS = 5
def __init__(self,
A: Union[List[List[Number]], Matrix],
b: Union[List[Number], Matrix],
c: Union[List[Number], Matrix]):
if isinstance(A, Matrix):
self.coefficients = A
else:
self.coefficients = Matrix(data=A)
if isinstance(b, Matrix):
self.resources = b
else:
self.resources = Matrix(len(b), 1, b)
if isinstance(c, Matrix):
self.cost = c
else:
self.cost = Matrix(1, len(c), c)
self.variables = self.coefficients.columns
self.restrictions = self.coefficients.rows
self.base_variables = [None for _ in range(self.restrictions)]
self.multiple_solutions = False
self.solution: Matrix
self.second_solution: Matrix
self.base: Matrix
self.base_inv: Matrix
self.reduced_cost: List[Number]
self.direction: List[Number]
def run(self):
self.initial_bfs()
self.update_base()
enter = self.next_to_enter_base()
while enter is not None:
self.direction = self.get_direction(enter)
if all(x >= 0 for x in self.direction):
break
self.new_bfs(self.direction, enter)
if self.multiple_solutions:
break
self.update_base()
enter = self.next_to_enter_base()
def initial_bfs(self):
for col in range(self.variables):
if self.can_be_base(col) and col not in self.base_variables:
i = list(self.coefficients.column(col)).index(1)
self.base_variables[i] = col
assert all(x is not None for x in self.base_variables)
self.solution = Matrix(
data=[[0 if i not in self.base_variables
else self.resources[self.base_variables.index(i), 0]
for i in range(self.variables)]])
def update_base(self) -> Matrix:
data = [list(self.coefficients.column(i)) for i in self.base_variables]
self.base = Matrix(len(data), len(data[0]), data)
self.base_inv = self.base.inverse()
def can_be_base(self, i: int) -> bool:
"""Checks if column can be part of base."""
return all(j in (0, 1) for j in self.coefficients.column(i)) and \
list(self.coefficients.column(i)).count(1) == 1
def next_to_enter_base(self) -> List:
"""Get the index of the column that will enter to the base."""
reduced_cost = round(self.cost - self.base_cost.transposed().product(
self.base_inv.product(self.coefficients)), Simplex.N_DIGITS)
self.reduced_cost = reduced_cost
entering = next((i for i, k in enumerate(reduced_cost) if k < 0), None)
if entering is not None:
return entering
entering, cost = next(
((i, k) for i, k in enumerate(reduced_cost)
if k <= 0 and i not in self.base_variables), (None, None))
if cost == 0:
self.multiple_solutions = True
self.second_solution = self.solution.copy()
return entering
@property
def base_cost(self) -> Matrix:
data = [self.cost[0, i] for i in self.base_variables]
return Matrix(data=[data])
def get_direction(self, entering: int) -> Matrix:
base_dir = -self.base_inv.product(
Matrix(data=[list(self.coefficients.column(entering))]))
direction = [1 if i == entering else 0 if i not in self.base_variables
else base_dir[self.base_variables.index(i), 0]
for i in range(self.variables)]
return Matrix(data=[direction])
def get_leaving_variable(self, direction: Matrix) -> Tuple[Number, int]:
return min([(abs(x/d), i) for i, (x, d) in
enumerate(zip(self.solution, direction)) if d < 0])
def new_bfs(self, direction: Matrix, enter: int):
step, leave = self.get_leaving_variable(direction)
leave_index = self.base_variables.index(leave)
self.solution = direction * step + self.solution
self.base_variables[leave_index] = enter
| UTF-8 | Python | false | false | 4,326 | py | 8 | simplex.py | 6 | 0.576976 | 0.572353 | 0 | 118 | 35.661017 | 79 |
juhipawar/Data_Warehouse_Assignment_2 | 5,351,529,280,806 | 8d35ae8c538fc2b09468a969a995388a9616ad8d | a83ea31b7b8c7517d92baa6b0ad9a489899659ae | /Sentimental_Analysis.py | 2ae8852a0db330019da07ba3c32df3297987b69a | []
| no_license | https://github.com/juhipawar/Data_Warehouse_Assignment_2 | 0edad43b41ecbc96e36b41e625217381c7fc6cb8 | 7af1945f8e89dd13e4f03357c036abb66fe3bb7e | refs/heads/master | 2020-03-19T12:45:28.656706 | 2018-06-08T02:35:58 | 2018-06-08T02:35:58 | 136,538,235 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer # Adpeted from https://github.com/cjhutto/vaderSentiment
import csv
import numpy as np
import pandas as pd
inputFile = open('Clean_file.csv')
fileObject= csv.reader(inputFile)
sentimentIntensityAnalyzer = SentimentIntensityAnalyzer()
dataFrame = pd.DataFrame(columns=['tweets','sentiments','scores'])
scores = []
sentiments = []
tweets = []
for index,row in enumerate(fileObject):
readTweets = sentimentIntensityAnalyzer.polarity_scores(row[3])
positive = readTweets['pos']
negative = readTweets['neg']
neutral = readTweets['neu']
npArray = np.array([positive,negative,neutral])
index = np.argmax(npArray)
def switch_demo(index):
swticher = {
0: positive,
1: negative,
2: neutral
}
sentiments.append(index)
scores.append(np.max(npArray))
tweets.append(row[3])
dataFrame['tweet'] = tweets
dataFrame['sentiment'] = sentiments
dataFrame['sentiment_score'] = scores
dataFrame.to_csv('File_sentimental.csv',index=False)
| UTF-8 | Python | false | false | 1,083 | py | 4 | Sentimental_Analysis.py | 3 | 0.704524 | 0.699908 | 0 | 33 | 31.818182 | 125 |
jonathanSeang/BlackJackPython | 5,703,716,585,009 | 5c752d8289255e238df8fc73f8dc0e91dc051459 | 9e734f874ddda92d6439c0f217d7e3e61df850df | /main.py | 5dc6cff07c5cc7f8de5b66615fb1993140c38211 | []
| no_license | https://github.com/jonathanSeang/BlackJackPython | 3504488bd19104f858f9dba45eb2e0d9db83ca29 | e60b23e76103c1e3f3d3a5bf699cd3bbcd0e9401 | refs/heads/main | 2023-05-21T02:14:45.433522 | 2021-06-13T23:20:35 | 2021-06-13T23:20:35 | 376,063,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # BlackJack Project
# Will have a dealer and human player
# Uses a normal deck of cards
# Player has limited amount of cash
# Player can HIT or STAY to get to 21
# Maybe add INSURANCE, SPLIT, or DOUBLE DOWN
# If player under 21, deal hits until they beat player or dealer busts
from Deck import Deck
from Hand import Hand
STARTING_NUMBER_OF_CHIPS = 100
CARDS_DEALT_AT_START = 2
CARDS_DEALT_PER_HIT = 1
# prints current chips available and asks for amount to be bet
def ask_for_bet(chips_available):
print("\n\n\nYou currently have " + str(chips_available) + " chips available")
while (True):
try:
bet_amount = int(input("Please input the amount you would like to bet: "))
if bet_amount > chips_available: raise Exception
except:
print("Value is invalid")
else:
break
return bet_amount
def ask_for_hit(player_hand):
print("\nCurrent hand value is " + str(player_hand.possible_values))
while True:
player_input = input("Would you like to HIT? (Type 'Hit' to Hit and 'Stand' to Stand) ")
if player_input == 'Hit':
return True
elif player_input == 'Stand':
return False
else:
continue
# Whenever new card is added, throw this exception so we can restart function
class ContinueDueToNewCard(Exception):
pass
# Dealer will always Hit until dealer's value meets or exceeds 17
def dealer_action(deck, dealer_hand):
# For every possible value, check if any of them are in range(17, 21)
while True: # Loop used to restart possible_values
try:
for value in dealer_hand.possible_values:
while value < 17:
new_card = deck.deal_n_cards(CARDS_DEALT_PER_HIT)
dealer_hand.add_cards(new_card)
raise ContinueDueToNewCard # Break out while loop whenever new card is added
break
except ContinueDueToNewCard:
continue
if dealer_hand.check_if_hand_busts():
print("Dealer hand busts!")
print("\nDealer hand value is " + str(dealer_hand.possible_values))
def find_if_player_won(dealer_hand, player_hand):
dealer_highest = 0
for value in dealer_hand.possible_values:
if value <= 21:
dealer_highest = value
break
player_highest = 0
for value in player_hand.possible_values:
if value <= 21:
player_highest = value
break
return player_highest > dealer_highest
def ask_to_continue():
while True:
player_input = input("Would you like to continue playing? (Type 'Continue' to Continue and 'Quit' to Quit ")
if player_input == 'Continue':
return True
elif player_input == 'Quit':
return False
else:
continue
if __name__ == '__main__':
chips_available = STARTING_NUMBER_OF_CHIPS
game_in_play = True
while game_in_play:
# instantiate deck
deck = Deck()
deck.shuffle()
# ask for bet
chips_bet = ask_for_bet(chips_available)
# deal initial cards
dealer_hand = Hand('Dealer', deck.deal_n_cards(CARDS_DEALT_AT_START))
player_hand = Hand('Player', deck.deal_n_cards(CARDS_DEALT_AT_START))
# show appropriate cards
print("Dealer reveals his first card is: " + str(dealer_hand.cards[0]))
print(player_hand)
hand_in_play = True
while hand_in_play:
# ask if want to hit or stay
hand_in_play = ask_for_hit(player_hand)
if hand_in_play: # deal player a card if hit
new_cards = deck.deal_n_cards(CARDS_DEALT_PER_HIT)
print("Dealer has given you " + ', '.join(map(str, new_cards)))
player_hand.add_cards(new_cards)
# check if bust else ask to hit
if player_hand.check_if_hand_busts():
print("Your hand busts!")
hand_in_play = False
# if player stands, dealer action
dealer_action(deck, dealer_hand)
print(dealer_hand)
# determine winner and adjust chips
if find_if_player_won(dealer_hand, player_hand):
chips_available += chips_bet
print(f"\nCongratulations! You won {chips_bet} with a new total of {chips_available}")
else:
chips_available -= chips_bet
print(f"\nSorry. You lost {chips_bet} with a new total of {chips_available}")
# ask player to play again
game_in_play = ask_to_continue()
print(f"\n\nYou finished with a total of {chips_available}")
| UTF-8 | Python | false | false | 4,711 | py | 3 | main.py | 3 | 0.601571 | 0.596476 | 0 | 153 | 29.79085 | 116 |
jaspering21/Management | 16,166,256,931,122 | b3c6e7359fec03a9e962b6ae2eb24bd0d629d700 | 1c28b1c98d8ec92d2635934f19decbfa6d15c9ed | /Department/form.py | c1502f144a63f83a7733e57a3d19e85f6a1682fa | []
| no_license | https://github.com/jaspering21/Management | aed01c56ba161de4d340fb02f152752629565fa2 | 8b9b58f2fc7dee2166c1d8838c4bc4d052a4c46c | refs/heads/master | 2020-04-23T05:19:43.907150 | 2019-02-15T22:33:37 | 2019-02-15T22:33:37 | 170,936,879 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from Department.models import EMPLOYEE
from Department.models import DEPARTMENT
class HomeForm(forms.ModelForm):
post = forms.CharField(required=False)
class Meta:
model1 = EMPLOYEE
model2 = DEPARTMENT
fields = ('post', )
| UTF-8 | Python | false | false | 282 | py | 9 | form.py | 4 | 0.702128 | 0.695035 | 0 | 11 | 24.545455 | 42 |
PyCN/eca | 249,108,155,731 | 0fbe12a869b0a5f4e35748de7becabda2c7ff19d | 7c7a7bb54341d88fec69a91b5f28a8e496ba0324 | /utils.py | 6455cadc67f0b39e49fc75bf8f8020506e25e7dd | [
"MIT"
]
| permissive | https://github.com/PyCN/eca | 0b2d63bec342be45387fc5ac6f4bccb1fc23becf | 1427e4c57379ccdb1118160627ab987ecf63c49e | refs/heads/master | 2021-05-27T04:07:22.431481 | 2014-06-09T03:23:14 | 2014-06-09T04:36:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cPickle
import gzip
import os
import theano.tensor as T
import numpy as np
rect = lambda x: T.where(x < 0., 0., x)
def rearrange_for_plot(w):
"""
Helper for tiling 1-dimensional square vectors into an array of images
Expecting w in form (n_pixels, n_images)
"""
assert w.ndim == 2
w = np.swapaxes(w, 0, 1)
n_images = w.shape[0]
ratio = 4/3
if np.sqrt(w.shape[1]) % 1.0 == 0:
chans = 1
image_dim = int(np.sqrt(w.shape[1]))
elif np.sqrt(w.shape[1] / 3.) % 1.0 == 0:
chans = 3
image_dim = int(np.sqrt(w.shape[1] / 3.))
else:
print 'Chosen weights probably not representing a square image'
return w
l = np.int(np.sqrt(n_images * ratio) + 0.5)
full_rows = n_images / l
last_row = n_images % l
# Scale pixels to interval 0..1
w += np.abs(np.min(w))
w /= np.max(w)
w_ = np.ones((n_images, image_dim + 1, image_dim + 1, chans))
w_[:, 1:, 1:, :] = w.reshape(n_images, image_dim, image_dim, chans)
if chans == 1:
# Remove color channel if this is grayscale image
w_ = w_.reshape(w_.shape[:-1])
rows = np.vstack([np.hstack(w_[l * i:l * (i + 1), :, :]) for i in
range(full_rows)])
if last_row:
r = np.hstack(w_[l * full_rows:, :, :])
ones = np.ones((r.shape[0], (l - last_row) * (image_dim + 1), chans))
if chans == 1:
ones = ones[:, :, 0]
r = np.hstack((r, ones))
rows = np.vstack((rows, r))
return rows
def axis_and_show(axis):
""" Helper for hiding axis handling """
if axis is None:
try:
import matplotlib.pyplot as axis
except ImportError:
pass
return axis, True, axis.ylim if axis else None
return axis, False, axis.set_ylim
def imshowtiled(im, axis=None):
axis, show_it, _ = axis_and_show(axis)
if axis is None:
return
im = rearrange_for_plot(im)
if im.ndim == 3:
im = axis.imshow(im, interpolation='nearest')
elif im.ndim == 2:
import matplotlib.cm as cm
im = axis.imshow(im, cmap=cm.Greys_r, interpolation='nearest')
else:
pass
if show_it:
axis.show()
return im
def plot_Xdist(signal, axis=None):
axis, show_it, _ = axis_and_show(axis)
if axis is None:
return
s = signal.val()
n = s.shape[1] / 10.
for row in s[:5]:
p, x = np.histogram(row, bins=n, density=True)
x = x[:-1] + (x[1] - x[0])/2 # convert bin edges to centers
axis.plot(x, p)
if show_it:
axis.show()
def plot_qXphi(signal, n=int(1e5), axis=None):
axis, show_it, lim = axis_and_show(axis)
if axis is None:
return
en = np.mean(np.square(signal.val()), axis=1)
nphi = np.linalg.norm(signal.layer.phi[0].get_value(), axis=0)
Q = T.diagonal(signal.layer.Q).eval()
pen, = axis.plot(en[:n], 's-')
pphi, = axis.plot(nphi[:n], '*-')
pq, = axis.plot(Q[:n], 'x-')
axis.legend([pen, pphi, pq], ['E{X^2}', '|phi|', 'q_i'])
lim([0.0, 5])
if show_it:
axis.show()
def plot_svds(*args, **kwargs):
axis = kwargs['axis'] if 'axis' in kwargs else None
axis, show_it, _ = axis_and_show(axis)
if axis is None:
return
n = kwargs['n'] if 'n' in kwargs else int(1e9)
plots = []
names = []
svd = lambda x: np.linalg.svd(x, compute_uv=False) / np.sqrt(x.shape[1])
for s in args:
try:
val = s.val()
except:
val = s
plots.append(axis.plot(svd(val)[:n])[-1])
try:
name = s.name
except:
name = '?'
names.append('svd(' + name + ')')
axis.legend(plots, names)
if show_it:
axis.show()
def visualize(weights):
try:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
return
animation = False
if type(weights) is list and len(weights) > 1:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ims = [[imshowtiled(w, axis=ax)] for w in weight_seq]
import matplotlib.animation as animation
ani = animation.ArtistAnimation(fig, ims, interval=100, repeat_delay=1000)
#writer = animation.writers['ffmpeg'](fps=20,bitrate=1000)
#ani.save('demo.mp4',writer=writer,dpi=100)
plt.show()
else:
weights = weights[0] if type(weights) is list else weights
imshowtiled(weights)
class Dataset(object):
"""
Class for handling training, validation, and test data
"""
class Data(object):
def __init__(self, samples, labels, type):
self.samples = samples
self.labels = labels
self.type = type
self.k = samples.shape[1]
self.n = samples.shape[0]
def accuracy(self, est, print_it=False):
# If estimate is stacked, extract the labels
if est.shape[0] == self.samples.shape[0]:
est = est[-self.labels.shape[0]:, :]
true = self.labels if self.labels.ndim == 1 else np.argmax(self.labels, axis=0)
est = est if est.ndim == 1 else np.argmax(est, axis=0)
acc = float(np.bincount(est == true, minlength=2)[1]) / len(est)
if print_it:
print "Accuracy %s: %6.2f %%" % (self.type, 100. * acc)
return acc
def __init__(self, batch_size=500, testset_size=10000, normalize=True,
as_one_hot=False, stacked=False):
self.batch_size = batch_size
self.testset_size = testset_size
self.as_one_hot = as_one_hot
self.stacked = stacked
assert not stacked or as_one_hot, 'stacking requires one hot'
self.load()
if normalize:
for x, y in self.data.values():
x -= np.mean(x, axis=0, keepdims=True)
x /= np.maximum(np.std(x, axis=0, keepdims=True), 1e-10)
def size(self, type):
assert type in self.data.keys(), 'type has to be in %s' % str(self.data.keys())
u_dim, y_dim = self.dims(type)
samples = self.samples(type)
return ((u_dim, samples), (y_dim, samples))
def samples(self, type):
return self.data[type][0][:self.batch_size].shape[0]
def dims(self, type):
y_dim = 10 if self.as_one_hot else 1
u_dim = self.data[type][0].shape[1]
u_dim += y_dim if self.stacked else 0
return (u_dim, y_dim)
def get(self, type, i=None):
"""
Returns a tuple (u, y) of i'th minibatch expanded into a one-hot coded vectors if necessary.
E.g. 5 -> [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]
"""
assert type in self.data.keys(), 'type has to be in %s' % str(self.data.keys())
(u, y) = self.data[type]
i = 0 if i is None else i
start = i * self.batch_size
end = min(u.shape[0], (i + 1) * self.batch_size)
u = u[start:end].T
if not self.as_one_hot:
return Dataset.Data(u, y[start:end].T, type)
# Convert into one_hot presentation 2 -> [0, 0, 1, 0, ...]
y_ = np.array(np.zeros(10))
y_[y[start]] = 1.
for i in range(start + 1, end):
new = np.zeros(10)
new[y[i]] = 1.
y_ = np.vstack((y_, new))
y_ = np.float32(y_.T)
if self.stacked:
if type == 'trn':
u = np.vstack([u, y_])
else:
u = np.vstack([u, np.float32(np.nan * y_)])
return MnistDataset.Data(u, y_, type)
def get_patches(self, w=8, m=10000, normalize_contrast=False):
patches = []
rng = np.random.RandomState(seed=0)
pix = self.data['trn'][0]
pix = pix.reshape((pix.shape[0],) + self.data_shape)
width, height, chans = self.data_shape
for i in xrange(m):
x, y = rng.randint(width - w), rng.randint(height - w)
j = rng.randint(len(pix))
patches += [pix[j, x:x+w, y:y+w, :chans].reshape(w * w * chans)]
patches = np.array(patches)
if normalize_contrast:
patches -= np.mean(patches, axis=1, keepdims=True)
patches /= np.maximum(np.std(patches, axis=1, keepdims=True), 1e-10)
return patches.T
class MnistDataset(Dataset):
def load(self):
# Download e.g. from http://deeplearning.net/data/mnist/mnist.pkl.gz
filename = 'mnist.pkl.gz'
if not os.path.exists(filename):
raise Exception("Dataset not found, please run:\n wget http://deeplearning.net/data/mnist/mnist.pkl.gz")
self.data_shape = (28, 28, 1)
data = cPickle.load(gzip.open(filename))
self.data = {
'trn': [np.float32(data[0][0]), np.int32(data[0][1])],
'val': [np.float32(data[1][0][:self.testset_size]),
np.int32(data[1][1][:self.testset_size])],
'tst': [np.float32(data[2][0][:self.testset_size]),
np.int32(data[2][1][:self.testset_size])]
}
class Cifar10Dataset(Dataset):
def load(self):
from skdata.cifar10.dataset import CIFAR10
c = CIFAR10()
len(c.meta)
pix = np.float32(c._pixels / 255.)
self.data_shape = pix.shape[1:]
assert self.data_shape == (32, 32, 3)
pix = pix.reshape(60000, np.prod(self.data_shape))
lbl = c._labels
assert self.testset_size <= 10000
t = self.testset_size
self.data = {
'trn': [pix[:40000], lbl[:40000]],
'val': [pix[40000:40000 + t], lbl[40000:40000 + t]],
'tst': [pix[50000:50000 + t], lbl[50000:50000 + t]]
}
def free_mem():
from theano.sandbox.cuda import cuda_ndarray
return cuda_ndarray.cuda_ndarray.mem_info()[0] / 1024 / 1024
| UTF-8 | Python | false | false | 9,852 | py | 15 | utils.py | 13 | 0.539687 | 0.511165 | 0 | 300 | 31.84 | 117 |
dh-trier/topicmodeling | 10,746,008,178,943 | 8de14e2f7d3c748477986795ffe51e055579416b | 7626d21e6d5008a9e3f8a8097da365eca68377f9 | /scripts/preprocessing.py | 7b8c559f5bc3c2048fa2f6075f2de6d6ebb6289e | [
"Unlicense"
]
| permissive | https://github.com/dh-trier/topicmodeling | 15e56075a138baf76235eda6ccf257a117c21423 | bb431346205cfa4d604349ffb0f7dc901d6d8af2 | refs/heads/master | 2022-12-21T08:44:15.941169 | 2022-12-15T07:44:43 | 2022-12-15T07:44:43 | 197,348,288 | 7 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""
Topic Modeling with gensim: Preprocessing.
Provides preprocessing for the input text files.
Adds linguistic annotation using TextBlob.
Uses this information to filter the tokens in the documents.
Works for English, French and German only!
For other languages, you need a different annotation tool.
See: https://textblob.readthedocs.io/en/dev/index.html
"""
# == Imports ==
import os
import glob
from os.path import join
from os.path import basename
from textblob import TextBlob as tb
import helpers
import re
workdir = join(os.path.realpath(os.path.dirname(__file__)))
# == Functions ==
def load_text(textfile):
"""
Loads a single plain text file.
Provides the content as a string.
"""
with open(textfile, "r", encoding="utf8") as infile:
text = infile.read()
return text
def load_stoplist(lang):
"""
Loads a language-specific list of stopwords from the stoplists folder.
Returns a list of stopwords.
"""
try:
stoplistfile = join(workdir, "stoplists", lang+".txt")
with open(stoplistfile, "r", encoding="utf8") as infile:
stoplist = infile.read().split("\n")
return stoplist
except:
stoplist = []
print("Warning. No stoplist for the indicated language has been found.")
print("Please consider adding a stoplist for the language code to the stoplist folder.")
return stoplist
def prepare_text(text, lang, stoplist):
"""
Adds the linguistic annotation to the text: part of speech.
Uses the linguistic annotation to filter out certain tokens.
By default, nouns, verbs and adjectives are retained.
Also uses a stoplist and a minimum word length criterion to further filter tokens.
Returns the single text as a list of lower-cased tokens.
"""
if lang == "en":
text = tb(text)
poslist = ["NN", "NNS", "JJ", "JJR", "VB", "VBZ", "VBG", "VBN"]
prepared = [item[0].lower() for item in text.tags if item[1] in poslist]
prepared = [item for item in prepared if len(item) > 1 and item not in stoplist]
return prepared
elif lang == "fr":
from textblob_fr import PatternTagger, PatternAnalyzer
text = tb(text, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())
poslist = ["NN", "NNS", "JJ", "JJR", "VB", "VBZ", "VBG", "VBN"]
prepared = [item[0].lower() for item in text.tags if item[1] in poslist]
prepared = [item for item in prepared if len(item) > 1 and item not in stoplist]
return prepared
elif lang == "de":
from textblob_de import TextBlobDE as tbd
text = tbd(text)
poslist = ["NN", "NNS", "JJ", "JJR", "VB", "VBZ", "VBG", "VBN"]
prepared = [item[0].lower() for item in text.tags if item[1] in poslist]
prepared = [item for item in prepared if len(item) > 1 and item not in stoplist]
#print(prepared[0:100])
return prepared
else:
print("Sorry, the language code you supplied does not refer to a supported language (en, de, fr).")
print("The preprocessing step falls back to a very simple, language-agnostic procedure now.")
print("Please consider adding a stoplist for your language code.")
text = re.split("\W+", text)
prepared = [item.lower() for item in text]
prepared = [item for item in prepared if len(item) > 2 and item not in stoplist]
return prepared
# == Coordinating function ==
def main(workdir, dataset, identifier, lang):
print("\n== preprocessing ==")
alltextids = []
allprepared = []
stoplist = load_stoplist(lang)
textpath = join(workdir, "datasets", dataset, "txt", "*.txt")
numfiles = len([entry for entry in os.listdir(join(workdir, "datasets", dataset, "txt")) if os.path.isfile(os.path.join(workdir, "datasets", dataset, "txt", entry))])
progress = 0
for textfile in sorted(glob.glob(textpath)):
textid = basename(textfile).split(".")[0]
alltextids.append(textid)
text = load_text(textfile)
prepared = prepare_text(text, lang, stoplist)
allprepared.append(prepared)
progress +=1
print(progress, "/", numfiles, end="\r")
helpers.save_pickle(allprepared, workdir, identifier, "allprepared.pickle")
print("files processed:", len(allprepared))
print("==", helpers.get_time(), "done preprocessing", "==")
| UTF-8 | Python | false | false | 4,490 | py | 13,670 | preprocessing.py | 12 | 0.639644 | 0.635189 | 0 | 118 | 36.771186 | 170 |
UNGGUY/BXXT | 4,776,003,654,809 | 02d5bf33d7f352bf88abdf3d8f6660f880949513 | ae9d039124a115076db5fd6496c530e0ea7b4d3b | /customer/admin.py | 9a8fa3700f02bce0a32416055cece22ef71d2905 | []
| no_license | https://github.com/UNGGUY/BXXT | 8ebbb1c5f524ed9c89bb513f0d4add5012b9d186 | 86f739dade0f3ad33a394d9c01b7aa3620f7bb5f | refs/heads/main | 2023-01-31T02:33:49.349134 | 2020-12-14T09:20:00 | 2020-12-14T09:20:00 | 313,051,665 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
# Register your models here.
from django.contrib import admin
import xadmin
from django.contrib.auth.models import Group
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.template.response import TemplateResponse
from xadmin.util import model_ngettext
from import_export import resources
from django.shortcuts import render, redirect
from customer.models import Manager, Apply, Audit, Hospital, UserType, User, Record, Detail
from xadmin import views
from xadmin.plugins.actions import BaseActionView
from django.utils.translation import gettext_lazy as _
from django.db.models import Q
from xadmin.layout import Fieldset, Row
from customer.resources import *
class MyDelete(BaseActionView):
# 这里需要填写三个属性
action_name = "my_delete" #: 相当于这个 Action 的唯一标示, 尽量用比较针对性的名字
description = '删除所选的 %(verbose_name_plural)s'
#: 描述, 出现在 Action 菜单中, 可以使用 ``%(verbose_name_plural)s`` 代替 Model 的名字.
model_perm = 'change' #: 该 Action 所需权限
def do_action(self, queryset):
# queryset 是包含了已经选择的数据的 queryset
for obj in queryset:
# obj 的操作
obj.isDelete = True
obj.save()
class UserAdmin(object):
list_display = ['uid', 'uname', 'utype', 'sex', 'age', 'tel', 'money', 'address', 'isDelete']
ordering = ["isDelete"]
search_fields = ['uid', 'uname', 'tel']
list_filter = ['utype', 'sex', 'address', 'isDelete']
list_per_page = 20
model_icon = 'fa fa-users'
list_editable = ['uname', 'utype', 'age', 'tel', 'address']
show_bookmarks = False
form_layout = (
Fieldset('账户信息',
Row('uid', 'password', 'isDelete'),
),
Fieldset('个人信息',
Row('uname', 'sex'),
Row('age', 'tel'),
Row('address'),
Row('utype'),
Row('money'),
)
)
list_export = ''
import_export_args = {'import_resource_class': UserResource, 'export_resource_class': UserResource}
# 批量导入
# 删除重写
actions = [MyDelete]
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
class ManagerAdmin(object):
list_display = ['mid_detail', 'mname', 'type', 'count', 'work_rate', 'isDelete']
ordering = ['isDelete']
search_fields = ['mid', 'mname']
list_filter = ['type', 'isDelete']
model_icon = 'fa fa-user'
show_bookmarks = False
def work_rate(self, instance):
if instance.count == 0:
return '-'
else:
return instance.right / instance.count
work_rate.short_description = '<div class="dropdown pull-left">' \
'<a class="dropdown-toggle md-opjjpmhoiojifppkkcdabiobhakljdgm_doc" ' \
'data-toggle="dropdown" href="#">' \
'正确率' \
'</a>' \
'<ul class="dropdown-menu" role="menu">' \
'<li><a href="?o=count.right.isDelete" ' \
'class="active md-opjjpmhoiojifppkkcdabiobhakljdgm_doc">' \
'<i class="fa fa-caret-up"></i> 正序</a></li>' \
'<li><a href="?o=-count.-right.isDelete" ' \
'class="active md-opjjpmhoiojifppkkcdabiobhakljdgm_doc">' \
'<i class="fa fa-caret-down"></i> 倒序</a></li>' \
'</ul></div>'
work_rate.is_column = True
work_rate.allow_tags = True
list_editable = ['mname']
def mid_detail(self, obj):
if self.request.user.is_superuser:
return '%s' % obj.mid
else:
if Group.objects.get(user=self.request.user).name == 'manager' :
return '<a href="%s">%s</a>' % ('/xadmin/customer/audit/?_rel_mid__id__exact=' + str(obj.id), obj.mid)
else:
return '%s' % obj.mid
mid_detail.allow_tags = True
mid_detail.short_description = '<div class="dropdown pull-left">' \
'<a class="dropdown-toggle md-opjjpmhoiojifppkkcdabiobhakljdgm_doc" ' \
'data-toggle="dropdown" href="#">' \
'账户' \
'</a>' \
'<ul class="dropdown-menu" role="menu">' \
'<li><a href="?o=mid.isDelete" ' \
'class="active md-opjjpmhoiojifppkkcdabiobhakljdgm_doc">' \
'<i class="fa fa-caret-up"></i> 正序</a></li>' \
'<li><a href="?o=-mid.isDelete" ' \
'class="active md-opjjpmhoiojifppkkcdabiobhakljdgm_doc">' \
'<i class="fa fa-caret-down"></i> 倒序</a></li>' \
'</ul></div>'
form_layout = (
Fieldset('账户信息',
Row('mid', 'pw', 'isDelete'),
),
Fieldset('个人信息',
Row('mname', 'type'),
Row('count', 'right'),
)
)
# 批量导入
list_export = ''
import_export_args = {'import_resource_class': ManagerResource, 'export_resource_class': ManagerResource}
# 删除重写
actions = [MyDelete]
def save_models(self):
obj = self.new_obj
qs = Group.objects.get(user=self.request.user)
if qs.name == "manager" and obj.type != '2':
# self.new_obj.type = None
obj.type = "2"
self.message_user('由于您只有创建审核人的权限,故自动将身份修改为审核人', 'warning')
obj.save()
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
def queryset(self):
qs = super(ManagerAdmin, self).queryset()
if self.request.user.is_superuser: # 超级用户可查看所有数据
return qs
else:
rs = qs.filter(Q(type='2') | Q(type='3'))
return rs # user是IDC Model的user字段
class HospitalAdmin(object):
list_display = ['hid', 'hname', 'isDelete']
ordering = ['isDelete']
model_icon = 'fa fa-hospital-o'
search_fields = ['hid', 'hname']
# readonly_fields = "isDelete"
show_bookmarks = False
list_filter = ["isDelete"]
# 删除重写
actions = [MyDelete]
list_export = ''
import_export_args = {'import_resource_class': HospitalResource, 'export_resource_class': HospitalResource}
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
class UserTypeAdmin(object):
list_display = ['utype', 'limit', 'ratio', 'change']
model_icon = 'fa fa-user-md'
list_editable = ['limit', 'ratio', 'change']
show_bookmarks = False
form_layout = (
Row('utype'),
Row('limit', 'ratio', 'change')
)
list_export = ''
import_export_args = {'import_resource_class': UserTypeResource, 'export_resource_class': UserTypeResource}
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
# 增加重写
class ApplyAdmin(object):
list_display = ['aid', 'uid', 'astatus', 'atime', 'isDelete']
ordering = ['isDelete']
model_icon = 'fa fa-tasks'
list_editable = ['astatus']
# readonly_fields = ("isDelete", "aid", "uid", 'atime')
search_fields = ['aid', 'uid__uname']
list_filter = ['astatus', 'atime', "isDelete"]
show_bookmarks = False
list_export = ''
import_export_args = {'import_resource_class': ApplyResource, 'export_resource_class': ApplyResource}
# @staticmethod
# def has_add_permission(request=None):
# # Disable add
# return False
# 删除重写
actions = [MyDelete]
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
class RecordAdmin(object):
list_display = ['aid', 'rid', 'rtime', 'money', 'money_bx', 'msg']
model_icon = 'fa fa-tag'
list_editable = ['money_bx']
# readonly_fields = ("isDelete", "aid", "rid", "rtime", "msg")
search_fields = ['aid__aid', 'rid']
show_bookmarks = False
form_layout = (
Row('aid', 'rid'),
Row('money', 'money_bx'),
Row('msg')
)
list_export = ''
import_export_args = {'import_resource_class': RecordResource, 'export_resource_class': RecordResource}
# @staticmethod
# def has_add_permission (request=None):
# # Disable add
# return False
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
class DetailAdmin(object):
list_display = ['did', 'rid', 'dtime', 'type', 'money', 'money_bx', 'hname', 'sname', 'dstatus', 'msg', 'image_data']
model_icon = 'fa fa-tags'
list_editable = ['money_bx', "dstatus"]
# readonly_fields = ('rid', 'did', 'dtime', 'type', 'hname', 'sid', 'folder')
search_fields = ['rid__rid', 'did', 'sname', 'hname']
list_filter = ['type', 'dstatus']
show_bookmarks = False
form_layout = (
Row('rid', 'did'),
Row('dtime', 'type'),
Row('hname', 'sname'),
Row('money', 'money_bx'),
Row('folder'),
Row('msg'),
Row('dstatus')
)
list_export = ''
import_export_args = {'import_resource_class': DetailResource, 'export_resource_class': DetailResource}
# @staticmethod
# def has_add_permission(request=None):
# # Disable add
# return False
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
class AuditAdmin(object):
list_display = ['auid', 'aid_detail', 'austatus', 'mid', 'autime']
model_icon = 'fa fa-columns'
list_editable = ['austatus']
# readonly_fields = ('auid', 'aid', 'mid', 'autime')
show_bookmarks = False
def aid_detail(self, obj):
return '<a href="%s">%s</a>' % ('/xadmin/customer/apply/?_q_=' + str(obj.aid), obj.aid)
aid_detail.allow_tags = True
str = "申请编号"
aid_detail.short_description = str
search_fields = ['auid', "aid__aid"]
list_filter = ['austatus', "autime"]
list_export = ''
import_export_args = {'import_resource_class': AuditResource, 'export_resource_class': AuditResource}
# 删除屏蔽
@staticmethod
def has_delete_permission(request=None):
# Disable add
return False
class GlobalSetting(object):
site_title = 'BXXT'
site_header = 'BXXT'
site_footer = 'BXXT'
xadmin.site.register(views.CommAdminView, GlobalSetting)
xadmin.site.register(User, UserAdmin)
xadmin.site.register(Manager, ManagerAdmin)
xadmin.site.register(Hospital, HospitalAdmin)
xadmin.site.register(UserType, UserTypeAdmin)
xadmin.site.register(Audit, AuditAdmin)
xadmin.site.register(Apply, ApplyAdmin)
xadmin.site.register(Record, RecordAdmin)
xadmin.site.register(Detail, DetailAdmin)
| UTF-8 | Python | false | false | 11,583 | py | 26 | admin.py | 8 | 0.557078 | 0.556451 | 0 | 340 | 31.847059 | 121 |
chrisglencross/advent-of-code | 3,015,067,052,564 | 29d2154f08c5706e718f3e624136927e19e48ee2 | ec931947aa3e06ce565637e7ee1cb707f56375a2 | /aoc2018/day22/day22.py | 5064f8e38e649d092828c7cfdca041c2c74e0e25 | []
| no_license | https://github.com/chrisglencross/advent-of-code | 5f16ed7e2265d27ce15f502ce2a1c2f11fc99fc0 | 21623d4aa01a9e20285a0233c50f8f56c4099af5 | refs/heads/master | 2023-01-24T22:01:30.829679 | 2023-01-12T23:03:03 | 2023-01-12T23:03:03 | 224,833,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # depth=510
# target=(10, 10)
depth = 11991
target = (6, 797)
# We really want an expandable grid...
# Set a fixed maximum
max_x = target[0] + 200
max_y = target[1] + 500
def empty_grid():
rows = []
for y in range(0, max_y + 1):
rows.append([None] * (max_x + 1))
return rows
erosion_levels = empty_grid()
risk_levels = empty_grid()
def get_geologic_index(coords):
if coords == (0, 0):
return 0
if coords == target:
return 0
x = coords[0]
y = coords[1]
if y == 0:
return x * 16807
if x == 0:
return y * 48271
return erosion_levels[y][x - 1] * erosion_levels[y - 1][x]
def get_risk_level(coords):
geologic_index = get_geologic_index(coords)
erosion_level = (geologic_index + depth) % 20183
x = coords[0]
y = coords[1]
erosion_levels[y][x] = erosion_level
risk_level = erosion_level % 3
risk_levels[y][x] = risk_level
return risk_level
# Part 1
# total_risk_level = 0
# for y in range(0, target[1] + 1):
# for x in range(0, target[0] + 1):
# total_risk_level = total_risk_level + get_risk_level((x, y))
# print(total_risk_level)
# Part 2
# Precalculate risk levels
for y in range(0, max_y + 1):
for x in range(0, max_x + 1):
get_risk_level((x, y))
def get_allowable_equip(coords):
current_risk_level = risk_levels[coords[1]][coords[0]]
if current_risk_level == 0: # Rocky
return ["climbing", "torch"]
if current_risk_level == 1: # Wet
return ["climbing", "neither"]
if current_risk_level == 2:
return ["torch", "neither"]
raise Exception("Unknown risk level: " + str(current_risk_level))
class Move:
def __init__(self, name, move, equip, cost):
self.name = name;
self.move = move
self.equip = equip
self.cost = cost
all_moves = [
Move("equip neither", None, "neither", 7),
Move("equip torch", None, "torch", 7),
Move("equip climbing", None, "climbing", 7),
Move("up", (0, -1), None, 1),
Move("left", (-1, 0), None, 1),
Move("right", (1, 0), None, 1),
Move("down", (0, 1), None, 1),
]
class State:
def __init__(self, coords, equip):
self.coords = coords
self.equip = equip
# Returns a tuple of move + next state
def next_states(self):
result = []
allowable_equip = get_allowable_equip(self.coords)
for move in all_moves:
if move.equip is not None and move.equip in allowable_equip:
result.append((move, State(self.coords, move.equip)))
elif move.move is not None:
new_coords = (self.coords[0] + move.move[0], self.coords[1] + move.move[1])
if 0 <= new_coords[0] < max_x and 0 <= new_coords[1] < max_y:
allowable_equip = get_allowable_equip(new_coords)
if self.equip in allowable_equip:
result.append((move, State(new_coords, self.equip)))
return result
def __str__(self):
return f"{self.coords}:{self.equip}"
state_scores = dict()
initial_state = State((0, 0), "torch")
state_scores[str(initial_state)] = 0
dirty_states = {initial_state}
best_target_score = None
while dirty_states:
new_dirty_states = set()
for state in dirty_states:
print(f"Scanning from {state}")
current_score = state_scores[str(state)]
for move, next_state in state.next_states():
new_score = current_score + move.cost
# No point carrying on if we've already got to target with a better score
if best_target_score is None or new_score < best_target_score:
best_score = state_scores.get(str(next_state))
# Reached this point with a new best score
if best_score is None or best_score > new_score:
state_scores[str(next_state)] = new_score
new_dirty_states.add(next_state)
# print(new_score)
if next_state.coords == target and next_state.equip == "torch":
best_target_score = new_score
print(new_score)
dirty_states = new_dirty_states
print(best_target_score)
| UTF-8 | Python | false | false | 4,257 | py | 252 | day22.py | 237 | 0.569415 | 0.546159 | 0 | 146 | 28.157534 | 91 |
omoskovko/TestTask | 14,147,622,292,151 | 8c4024f3459355f77c060aa409fe852f0d6de6db | 8bb511210b6799ab986ef6fc8b1c15694069e3a0 | /common/http_receiver.py | b6131aff3531ca3b49f12d4288447726ee2bafa2 | []
| no_license | https://github.com/omoskovko/TestTask | 15d6856f7bcbadbf1047e6127f6f476ffea1a78e | f64152bd1d66b1fcc422deb65d122e6cb9e11c1c | refs/heads/master | 2021-07-14T19:27:39.508318 | 2017-10-20T17:02:03 | 2017-10-20T17:02:03 | 107,704,441 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from urllib import request
from urllib import parse
from . import utils as common_utils
class SmartRedirectHandler(request.HTTPRedirectHandler):
i = 0
def http_error_301(self, req, fp, code, msg, headers):
result = request.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
self.i += 1
return result
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
class Receive(object):
def __init__(self, vURL, headers={}):
self.headers = headers
url = str(vURL).replace(" ", "")
if len(url) == 0:
raise(Exception("URL can't be empty"))
parsed_url = parse.urlparse(url)
if not parsed_url.hostname:
parsed_url = parse.urlparse("http://"+url)
self.url = parsed_url.geturl()
self.sHandler = SmartRedirectHandler()
def find_connectable_ip(self):
parsed_url = parse.urlparse(self.url)
ip = common_utils.find_connectable_ip(parsed_url.hostname,
port=parsed_url.port)
if not ip:
raise(Exception("Can't find connectable IP for host %s" % (parsed_url.hostname)))
return ip
def setdefaulttimeout(self, timeout):
request.socket.setdefaulttimeout(timeout)
def urlopen(self):
res = {'status': 'OK'}
req = request.Request(self.url, headers=self.headers)
opener = request.build_opener(self.sHandler)
try:
res['object'] = opener.open(req)
except Exception as url_err:
res['object'] = None
res['status'] = "Can't open URL=%s because of error: %s" % (self.url, str(url_err))
return res
| UTF-8 | Python | false | false | 1,864 | py | 4 | http_receiver.py | 2 | 0.570279 | 0.555794 | 0 | 57 | 31.701754 | 96 |
geomeza/simulation | 3,813,930,982,743 | 3109d388c0ad24b192dc2dd14414bf2fdeca225a | 319b7d15da4e3eeb087c4ca8a7783f74c422c7b3 | /src/biological_neural_network.py | c0c876c96a6871bc025f5d683859629c4619b293 | []
| no_license | https://github.com/geomeza/simulation | bb0bc60b094d4a8e7e1b0cf0977adac803d97b3b | 0c0ef394201ed9787ff76128e2f2ab8ff1397878 | refs/heads/master | 2023-01-29T09:35:03.910639 | 2020-12-09T06:31:13 | 2020-12-09T06:31:13 | 319,864,330 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from biological_neuron import BiologicalNeuron
from euler_estimator import EulerEstimator
import matplotlib.pyplot as plt
class BiologicalNeuralNetwork:
def __init__(self, neurons, synapses):
self.neurons = neurons
self.synapses = synapses
def get_derivatives(self):
derivatives = []
for i, n in enumerate(self.neurons):
senders_to_synapse = [x for x,y in self.synapses if y == i]
derivatives.append(lambda t, x, i=i, n=n, s = senders_to_synapse: n.dv_dt(t, x[i*4:(i+1)*4]) + sum([x[p*4] for p in s if x[p*4] > 50])/n.c)
derivatives.append(lambda t, x, i=i, n=n: n.dn_dt(t, x[i*4:(i+1)*4]))
derivatives.append(lambda t, x, i=i, n=n: n.dm_dt(t, x[i*4:(i+1)*4]))
derivatives.append(lambda t, x, i=i, n=n: n.dh_dt(t, x[i*4:(i+1)*4]))
return derivatives
def get_starting_point(self):
point = [0, []]
for neuron in self.neurons:
point[1].append(neuron.v_0)
point[1].append(neuron.n_0)
point[1].append(neuron.m_0)
point[1].append(neuron.h_0)
return point
| UTF-8 | Python | false | false | 1,147 | py | 2 | biological_neural_network.py | 2 | 0.575414 | 0.553618 | 0 | 30 | 37.233333 | 151 |
prayswear/ICN | 16,114,717,301,961 | dcd54a3d31d59913e453216df88c18294321f08c | 5b96b6bf5ae357deab937ea66bda5f0f8d6b4df3 | /packet/test3.py | d8862b5a770817dcadff8c488d4d33a2dd2eab0c | []
| no_license | https://github.com/prayswear/ICN | 1154ea6d25e4ce0878f2055ebf2f82a9644e97d4 | 760caae64e8a79c8c31ca60af3e6a8095578da8b | refs/heads/master | 2021-08-18T23:35:25.843931 | 2017-11-24T06:44:55 | 2017-11-24T06:44:55 | 103,603,049 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import binascii
from packet import *
import hashlib
import time
a='c0a80264'
b=socket.inet_ntoa(binascii.a2b_hex(a))
print(b)
print(time.time())
| UTF-8 | Python | false | false | 160 | py | 33 | test3.py | 31 | 0.76875 | 0.725 | 0 | 10 | 15 | 39 |
dyuanjia/CTF-Pwn | 15,023,795,621,732 | 99f3a8e94af42a61ad33527798066ef474c524ff | 1c95023e63bc4d53f9df9ad10573660ec7c476fa | /how2orw/orw.py | 1e3481ddc1f2d944e05fad9374e0502e874b1968 | []
| no_license | https://github.com/dyuanjia/CTF-Pwn | af4f90b780289d55e3d5930668551a05e885fdab | 0f655b4f38293a4b2bfc375b9d0e8d1e591981d7 | refs/heads/master | 2020-12-21T02:31:54.061899 | 2020-01-27T12:48:33 | 2020-01-27T12:48:33 | 236,279,121 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
from pwn import *
context.arch = 'amd64'
binary = remote('edu-ctf.csie.org', 10171)
shellcode = asm('''
mov rax, 0x67616c662f77
push rax
mov rax, 0x726f2f656d6f682f
push rax
mov rdi, rsp
// move pointer to /home/orw/flag to rdi, little endian
xor rsi, rsi
xor rdx, rdx
mov rax, 2
syscall
// open will return a fd at rax
mov rdi, rax
// set the top of the stack as the start of buffer
mov rsi, rsp
mov rdx, 0x50
mov rax, 0
syscall
// fd=1 means stdout
mov rdi, 1
mov rax, 1
syscall
''')
'''
shellcode = asm(
shellcraft.pushstr( "/home/orw/flag" ) +
shellcraft.open('rsp', 0, 0) +
shellcraft.read('rax', 'rsp', 0x30) +
shellcraft.write(1, 'rsp', 0x30)
)
'''
binary.sendafter('>', shellcode)
binary.sendlineafter(':)', b'a' * 0x18 + p64(0x6010a0))
binary.interactive()
| UTF-8 | Python | false | false | 977 | py | 44 | orw.py | 19 | 0.555783 | 0.495394 | 0 | 44 | 21.181818 | 63 |
llienofdoom/nv_pdfmerge | 6,614,249,641,216 | 24ff9a12dd3594713a8d3af88616f3388236c237 | 9a6383dd3303d99cd495a1bb7da953a3d95322a0 | /nv_pdfmerge.py | 9a7ecc7208b93ce678663c6ba65b23c487c79f8e | []
| no_license | https://github.com/llienofdoom/nv_pdfmerge | 7f7f0254ce0be15092df8dc6a542687d4ef161d2 | 7610568e50cd19c68850e7585adf39dfee25ae34 | refs/heads/master | 2023-05-30T18:08:33.035761 | 2021-06-10T11:58:01 | 2021-06-10T11:58:01 | 372,633,965 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import glob
from PyPDF2 import PdfFileReader, PdfFileWriter
input_folder = sys.argv[1]
if input_folder == None:
print('Please drag a folder onto the app, like a boss. Not what you just did. Like a lame-o...')
sys.exit()
else:
print('Working in %s' % input_folder)
print('Collecting initial file names to merge...')
pdf_files_to_merge = []
try:
pdf_files_to_merge.append(glob.glob( os.path.join(input_folder, '*Index*') )[0])
pdf_files_to_merge.append(glob.glob( os.path.join(input_folder, '*Executive*') )[0])
pdf_files_to_merge.append(glob.glob( os.path.join(input_folder, '*Balance*') )[0])
pdf_files_to_merge.append(glob.glob( os.path.join(input_folder, '*Profit*') )[0])
pdf_files_to_merge.append(glob.glob( os.path.join(input_folder, '*Cash*') )[0])
pdf_files_to_merge.append(glob.glob( os.path.join(input_folder, '*Receivables*') )[0])
pdf_files_to_merge.append(glob.glob( os.path.join(input_folder, '*Payables*') )[0])
except:
print('Files needed for merge NOT found in selected folder. Please be better at what you do...')
sys.exit()
print('Done! Carying on...')
print('Merging Files...')
pdf_writer = PdfFileWriter()
for pdf_file in pdf_files_to_merge:
pdf_reader = PdfFileReader(pdf_file)
for page in range(pdf_reader.getNumPages()):
pdf_writer.addPage(pdf_reader.getPage(page))
print('Done!')
print('Generating Output. Hang on Tight!')
output = os.path.join(input_folder, 'Financial Report VentureWeb International Limited MONTH YEAR.pdf')
with open(output, 'wb') as out:
pdf_writer.write(out)
print('Done! Go have a margarita! ')
| UTF-8 | Python | false | false | 1,767 | py | 4 | nv_pdfmerge.py | 2 | 0.639502 | 0.634409 | 0 | 41 | 42.097561 | 107 |
Aparna-B/JudgingNorms | 12,618,613,919,658 | 22f4d8f26093957e173bc89e9353d8c59cbf7356 | 786c9b92d482b942ac9257a045feee688e2633b7 | /lib/compute_kruskal_wilcoxon.py | 1059cbf5afed7080e3988ea2471f44d185a5ac8e | [
"MIT"
]
| permissive | https://github.com/Aparna-B/JudgingNorms | 6e6cd4c0c7d62bdce1bc5dd2cb7db3b16ef77b52 | 558640989d6d83f60ea393e7795585c79391bd96 | refs/heads/main | 2023-04-15T11:03:45.012036 | 2023-04-01T20:42:26 | 2023-04-01T20:42:26 | 620,075,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import pandas as pd
import numpy as np
from scipy.stats import kruskal, wilcoxon
def compute_mean_sig(dataset, condition1,
condition2, category,
verbose=False):
"""Get proportion of images significantly different between conditions,
wrt contention_def group.
Parameters:
dataset: str, csv file path to dataset (e.g., for the Clothing dataset).
condition1: str, either descriptive, normative, or context
condition2: str, either descriptive, normative, or context
category: int, 0: OR of labels, 1/2/3: factual features.
Returns:
4 floats, results of Kruskal Wallis H-test and Wilcoxon signed rank test.
"""
# reading in the dataset
df_group1 = pd.read_csv(dataset.format(condition1))
df_group1 = df_group1.groupby('imgname').mean().reset_index()
df_group2 = pd.read_csv(dataset.format(condition2))
df_group2 = df_group2.groupby('imgname').mean().reset_index()
df_group1 = df_group1.sort_values('imgname')
df_group2 = df_group2.sort_values('imgname')
assert (df_group1.imgname == df_group2.imgname).all()
# Statistical test to test if mean label is significantly different (unpaired).
kruskal_stats, kruskal_p = kruskal(
df_group1['{}{}'.format(condition1, category)].values,
df_group2['{}{}'.format(condition2, category)].values)
# Statistical test to test if mean label is significantly different.
# (paired, one-sided).
wilcoxon_stats, wilcoxon_p = wilcoxon(
df_group1['{}{}'.format(condition1, category)].values,
df_group2['{}{}'.format(condition2, category)].values,
alternative='greater')
return (kruskal_stats, kruskal_p,
wilcoxon_stats, wilcoxon_p)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Compute significant differences between labels.")
parser.add_argument(
"-g1",
"--group1",
help="first labelling condition in stat test",
action="store",
type=str,
default='descriptive',
required=False)
parser.add_argument(
"-g2",
"--group2",
help="second labelling condition in stat test",
action="store",
type=str,
default='normative',
required=False)
parser.add_argument(
"-c",
"--attribute_category",
help="category to compare across conditions (0/1/2/3,"
"where 0 refers to OR of labels)",
action="store",
type=int,
default=0,
required=False)
args = parser.parse_args()
dataset_results = []
datasets = ['data_dir/dress/{}_labels.csv',
'data_dir/meal/{}_labels.csv',
'data_dir/pet/{}_labels.csv',
'data_dir/toxicity/{}_labels.csv']
dataset_name = []
for dataset in datasets:
result = compute_mean_sig(dataset, args.group1,
args.group2, args.attribute_category,
verbose=False)
dataset_results.append(result)
print(dataset)
dataset_name.append(dataset)
dataset_results = np.array(dataset_results)
df_result = pd.DataFrame(
{
'Kruskal stat': dataset_results[:, 0],
'Kruskal pval': dataset_results[:, 1],
'Wilcoxon stat': dataset_results[:, 2],
'Wilcoxon pval': dataset_results[:, 3],
'dataset': dataset_name
}
)
df_result.to_csv('data_dir/{}_{}_stat_tests.csv'.format(
args.group1,
args.group2
), index=False)
| UTF-8 | Python | false | false | 3,663 | py | 39 | compute_kruskal_wilcoxon.py | 21 | 0.596779 | 0.583402 | 0 | 108 | 32.907407 | 83 |
rashiraffi/Tutorials | 18,013,092,852,140 | cf02cd8b438125c3b20a98fece3728c95bf896fb | ce5beb5e0e7abcfd8a5e90b8b4b5d5fb7cc1fd8a | /PYTHON/OOP/helloworld.py | ccefc02260e5be9edc81b6a5c0191490edba4c47 | []
| no_license | https://github.com/rashiraffi/Tutorials | f8b92dfe8388c4f617e277e3635c1c322708768c | 6fc6fb7a72e6774b7b785b2e37b5bcd853e31869 | refs/heads/master | 2021-06-09T05:45:01.558160 | 2021-05-29T16:16:04 | 2021-05-29T16:16:04 | 181,651,412 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Class creation
class MyClass:
s="Hello World..!"
#Object Creation
s1=MyClass
print(s1.s) | UTF-8 | Python | false | false | 93 | py | 111 | helloworld.py | 81 | 0.72043 | 0.698925 | 0 | 6 | 14.666667 | 22 |
bryan1188/DNHS-EVS | 13,632,226,204,485 | aade51625e76e42b2d0a894ad603a736e02c8baf | f4730681e6fd02ec9ab40b0b82d32955e44c0dc4 | /E_Voting/election/management/helpers/declare_winner.py | 39366a5fdf8a10792d6cb0ae6a6237dacba794ee | []
| no_license | https://github.com/bryan1188/DNHS-EVS | eb7dc1578be261bbe4fbafcd35c8238fc7f48e31 | 8449c552ba2c0d113b5f7dca2722a41897d11e56 | refs/heads/master | 2022-12-15T05:23:48.783753 | 2019-06-24T06:56:21 | 2019-06-24T06:56:21 | 168,844,378 | 0 | 0 | null | false | 2022-12-08T05:13:56 | 2019-02-02T15:13:47 | 2019-06-24T06:56:38 | 2022-12-08T05:13:55 | 83,432 | 0 | 0 | 3 | JavaScript | false | false | from registration.models import Vote
from reporting.models import WinnerCandidateDenormalized
from operator import itemgetter
from reporting.management.helpers.bulk_create_helper import BulkCreateManager
def declare_winners(election):
'''
For a given election, declare the winners based on the votes
Detect if there is a tie, System admin will manually delete the winner
after the 'toss-coin'
Steps:
Algo #1
1. Get positions of the election
2. Iterate positions
3. Iterate candidate for every position and get the votes
4. Create list of dictionaries for candidate and vote_count
5. sort the list by vote_count using reverse=True
https://stackoverflow.com/questions/72899/how-do-i-sort-a-list-of-dictionaries-by-a-value-of-the-dictionary
6. get the winners depending on number of slots
7. Check for a tie
'''
winner_candidate_bulk_mgr = BulkCreateManager(chunk_size=100)
# 1
positions = election.positions.all()
# 2
for position in positions:
# 3
candidate_list = list()
for candidate in position.candidates.filter(election=election):
candidate_dictionary = dict()
candidate_dictionary['candidate'] = candidate
candidate_dictionary['vote_count'] = candidate.votes.count()
candidate_dictionary['tie_flag'] = False
# 4
candidate_list.append(candidate_dictionary)
# 5
candidate_list = sorted(
candidate_list,
key=itemgetter('vote_count'),
reverse=True
)
# 6
number_of_slots = position.number_of_slots
winner_list = candidate_list[:number_of_slots]
tie_flag = False
# 7
tie_pointer = number_of_slots #to prevent out of range error
while len(candidate_list) > tie_pointer \
and candidate_list[tie_pointer]['vote_count'] == winner_list[-1]['vote_count']:
winner_list.append(candidate_list[tie_pointer])
tie_pointer += 1
tie_flag = True
if tie_flag:
# check from the end of winner_list
# get the vote_count
tie_vote_count = winner_list[-1]['vote_count']
#check all in the winner_list which have the same number of tie_vote_count
#then flagged as tie True
for winner in winner_list:
if winner['vote_count'] == tie_vote_count:
winner['tie_flag'] = True
#8 Push to the database using bulk_manager
for winner in winner_list:
winner_candidate_bulk_mgr.add(WinnerCandidateDenormalized(
candidate_name = winner['candidate'].student.__str__(),
candidate_sex = winner['candidate'].student.sex.__str__(),
candidate_age = winner['candidate'].student.age,
candidate_mother_tongue = winner['candidate'].student.mother_tongue.mother_tongue,
candidate_ethnic_group = winner['candidate'].student.ethnic_group.ethnic_group,
candidate_religion = winner['candidate'].student.religion.religion,
candidate_address_barangay = winner['candidate'].student.address_barangay.address_barangay,
candidate_address_municipality = winner['candidate'].student.address_municipality.address_municipality,
candidate_address_province = winner['candidate'].student.address_province.address_province,
candidate_class_grade_level = winner['candidate'].candidate_class.grade_level,
candidate_class_section = winner['candidate'].candidate_class.section,
candidate_party = winner['candidate'].party.name,
candidate_position = winner['candidate'].position.title,
candidate_position_number_of_slots = winner['candidate'].position.number_of_slots,
candidate_position_priority = winner['candidate'].position.priority,
number_of_votes = winner['vote_count'],
election_id = election.id,
election_name = election.name,
election_school_year = election.school_year,
election_day_from = election.election_day_from,
election_day_to = election.election_day_to,
tie = winner['tie_flag']
)
)
winner_candidate_bulk_mgr.done()
| UTF-8 | Python | false | false | 4,786 | py | 161 | declare_winner.py | 112 | 0.579816 | 0.574175 | 0 | 99 | 47.343434 | 127 |
jsong336/live-there | 584,115,594,311 | c7950b0837a9470c344a6ca2749ccb833d289179 | 51b0f4503aa608c716bf6cbc275411993bc93c5c | /_scripts/CSVConverters/__main__.py | bb9ce2834e1d7729b7540398922603a7f67235e2 | []
| no_license | https://github.com/jsong336/live-there | 11a3b584977d533bebf57d5895f6d90b490c1024 | d3f0ee0dc30a6595875cfef50fe13bd95a0dbeef | refs/heads/master | 2023-03-02T20:51:37.012348 | 2020-09-08T22:49:37 | 2020-09-08T22:49:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Entry point of conversion module """
from UniversityCSVConvert import UniversityConverter, UniversityUpdater, AverageUtilityFeeConverter
import os
import sys
CSV_RAW_PATH = os.path.join(os.path.dirname(__file__), 'csv', 'raw')
if __name__ == '__main__':
args = sys.argv
if len(args) > 1:
if args[1] == '--university':
# Execute university csv conversion
# Retrieve parameters
host = '35.225.74.52' #input('Enter Host: ')
user = 'root' #input('Enter user: ')
pw = 'livethere2020' #input('Enter password: ')
# Configure path
uni_csv_path = os.path.join(CSV_RAW_PATH, 'university.csv')
print(f'Converting CSV @ {uni_csv_path} to SQL')
converter = UniversityConverter(host, user, pw, uni_csv_path)
commit = input('To commit type y [y/n] ')
if commit == 'y':
# Commit new changes
converter.commit()
print('DB is updated.')
# # Retrieve parameters
# host = '35.225.74.52' #input('Enter Host: ')
# user = 'root' #input('Enter user: ')
# pw = 'livethere2020' #input('Enter password: ')
# uni_csv_path = os.path.join(CSV_RAW_PATH, 'university.csv')
# # updater = UniversityUpdater(host, user, pw, uni_csv_path)
# # updater.commit()
# adder = AverageUtilityFeeConverter(host, user, pw, uni_csv_path)
# adder.commit()
| UTF-8 | Python | false | false | 1,455 | py | 83 | __main__.py | 50 | 0.57732 | 0.558076 | 0 | 39 | 36.179487 | 99 |
djdhiraj/Machine-Learning | 9,216,999,850,500 | f2d28d4622dee393982cd9b4695bd317991a9a8a | ac79c8082d0def1a7f7ff42b9241c8bbfcf6b468 | /Python/Advance_sqlalchemy.py | f4efe755cfe54e8d257f2a857675cf7e7704608c | []
| no_license | https://github.com/djdhiraj/Machine-Learning | b0a272b4968750131a826868a2f03fd74eea9cf5 | ac6bba19e745a0d994ff9f118afbc4ff8dcda8ac | refs/heads/master | 2020-03-26T20:24:38.211017 | 2019-08-28T10:10:07 | 2019-08-28T10:10:07 | 145,322,260 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, union, union_all, except_, intersect
engine = create_engine('sqlite:///college.db', echo = True)
meta = MetaData()
conn = engine.connect()
addresses = Table(
'addresses', meta,
Column('id', Integer, primary_key = True),
Column('st_id', Integer),
Column('postal_add', String),
Column('email_add', String)
)
u = union(addresses.select().where(addresses.c.email_add.like('%@gmail.com')), addresses.select().where(addresses.c.email_add.like('%@yahoo.com')))
result = conn.execute(u)
result.fetchall()
u = union_all(addresses.select().where(addresses.c.email_add.like('%@gmail.com')), addresses.select().where(addresses.c.email_add.like('%@yahoo.com')))
result = conn.execute(u)
result.fetchall()
u = except_(addresses.select().where(addresses.c.email_add.like('%@gmail.com')), addresses.select().where(addresses.c.postal_add.like('%Pune')))
result = conn.execute(u)
result.fetchall()
u = intersect(addresses.select().where(addresses.c.email_add.like('%@yahoo.com')), addresses.select().where(addresses.c.postal_add.like('%Delhi')))
result = conn.execute(u)
result.fetchall()
| UTF-8 | Python | false | false | 1,172 | py | 23 | Advance_sqlalchemy.py | 13 | 0.709044 | 0.709044 | 0 | 25 | 45.88 | 151 |
durrantmm/bhattlab | 5,927,054,893,474 | 38aa755f40ebc0f3da7fe06524ca6899356a8c28 | ca442ba67cf621e6aad397806bae0fc11f5020a3 | /mytools/scripts/iscounter_consolidate_rank.py | 161ed61814b034f61f614af38399446bffac43ca | []
| no_license | https://github.com/durrantmm/bhattlab | 1424157a3ba5deaf005bc0b2f3d69da24b793c32 | d6d86dda754462bdce9de2c74681a3119dbb8da8 | refs/heads/master | 2021-01-11T03:41:21.118770 | 2016-12-06T01:10:23 | 2016-12-06T01:10:23 | 71,401,317 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import sys
from collections import defaultdict
def main(args):
header = ''
results_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(tuple)))
all_taxa = set()
with open(args['results']) as infile:
header = infile.readline().strip().split()
for line in infile:
line = {header[i]:line.strip().split()[i] for i in range(len(header))}
results_dict[line['Date']][line['Taxon']][line['InsertionSequence']] = (int(line['InitialReadCount']),
int(line['NumAlignedReads']))
all_taxa.add(line['Taxon'])
taxon_dict = get_taxon_nodes(args['nodes'])
consolidation_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: [0,0])))
for date in results_dict:
sub_taxa = defaultdict(set)
for taxon in results_dict[date]:
cur_taxon = taxon_dict[taxon][1]
children = set([taxon])
while cur_taxon != '1':
if taxon_dict[cur_taxon][0] == 'species' or taxon_dict[cur_taxon][0] == 'genus':
for child in children:
sub_taxa[cur_taxon].add(child)
cur_taxon = taxon_dict[cur_taxon][1]
children.add(cur_taxon)
for taxon in sub_taxa:
sub_taxa[taxon] = sub_taxa[taxon]-set([taxon])
for taxon in results_dict[date]:
if taxon in sub_taxa.keys():
children = sub_taxa[taxon]
for child in children:
if child in results_dict[date]:
for IS in results_dict[date][child]:
consolidation_dict[date][taxon][IS][0] += results_dict[date][child][IS][0]
consolidation_dict[date][taxon][IS][1] += results_dict[date][child][IS][1]
for IS in results_dict[date][taxon]:
consolidation_dict[date][taxon][IS][0] += results_dict[date][taxon][IS][0]
consolidation_dict[date][taxon][IS][1] += results_dict[date][taxon][IS][1]
for date in consolidation_dict:
for taxon in consolidation_dict[date]:
for IS in consolidation_dict[date][taxon]:
total_reads = consolidation_dict[date][taxon][IS][0]
num_aligned_reads = consolidation_dict[date][taxon][IS][1]
print "\t".join([date, taxon+"-CONS", IS, str(total_reads), str(num_aligned_reads),
str(num_aligned_reads/float(total_reads))])
def get_taxon_nodes(nodes_locations, logger=None):
assert type(nodes_locations) is list, "The nodes location must be a list of file locations."
taxon_nodes_dict = {}
for location in nodes_locations:
with open(location) as nodes_in:
for line in nodes_in:
line = line.strip().split("|")
id = line[0].strip()
parent_id = (line[2].strip(), line[1].strip())
taxon_nodes_dict[id] = parent_id
return taxon_nodes_dict
if __name__ == "__main__":
# setup the option parser
parser = argparse.ArgumentParser(description='Quickly get the taxon id for a given')
parser.add_argument('results', help='FILL THIS OUT')
parser.add_argument('--nodes', required=False,
default=["/srv/gsfs0/projects/bhatt/mdurrant/my_code/bhattlab/mytools/iscounter2/data/TaxonomyDatabase/nodes.dmp",
"/srv/gsfs0/projects/bhatt/mdurrant/my_code/bhattlab/mytools/iscounter2/data/TaxonomyDatabase/merged.dmp"],
help='FILL THIS OUT')
args = parser.parse_args()
args = vars(args)
main(args) | UTF-8 | Python | false | false | 3,769 | py | 34 | iscounter_consolidate_rank.py | 34 | 0.561953 | 0.555585 | 0 | 84 | 43.880952 | 139 |
ddjddd/Algorithm-Study | 730,144,464,912 | 634b1fa76919c0d7df138894b596e536651d2377 | d0e2aa848f35efe2aae72e649ea9267d2443472c | /Backjoon_Onlie_Judge/2000/2052.py | 42db4b83774b7dff6eb8a9c97f357b487417f9b4 | []
| no_license | https://github.com/ddjddd/Algorithm-Study | 376f72849506841be0351dfdfe9b1ca95d197956 | 6ec72ae4de9d52592b76af6e4e8246c9fdeb946f | refs/heads/master | 2023-04-14T10:05:09.303548 | 2023-04-06T14:16:50 | 2023-04-06T14:16:50 | 121,474,738 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
input = int(sys.stdin.readline())
ret = str(5**input)
ret = '0.'+ret.zfill(input)
print(ret)
| UTF-8 | Python | false | false | 105 | py | 481 | 2052.py | 479 | 0.666667 | 0.647619 | 0 | 6 | 16.5 | 33 |
crosal-research/research | 7,980,049,249,355 | d63ed304cb6d0d599c993bbc4238c78b02c23d04 | 1f34184151e75b6e29a95f7f47bdf89d020991ac | /data/scripts/yahoo.py | 89d13602a9c2e21b6e89cf2bb67084f51001e32a | []
| no_license | https://github.com/crosal-research/research | ab9cc34a7d5fa56ba26b7580e1e9d4a59873981a | 8644500492e89c1c987633719706673b8129cf78 | refs/heads/master | 2020-04-12T06:33:33.227696 | 2016-10-19T02:09:59 | 2016-10-19T02:09:59 | 62,669,319 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ######################################################################
# script to retrieve data from yahoo finance
# initial data: 16/07/2016
######################################################################
import pandas as pd
__all__ = ["fetch_yahoo"]
def fetch_yahoo(tickers, date_ini, date_final):
'''
retrieves data from yahoo finance. Return data frame with date and closing adj price
inputs:
- tickers: [string] - yahoo' tickers
- date_ini: string - initial date (format - ex: 2016/07/01)
- date_final: string - final date (format - same as above)
output:
- pandas data frame
'''
url = _build_url(tickers[0], date_ini, date_final)
df = pd.read_csv(url, index_col=0, usecols=[0, 6], skiprows=[0],
names = ["Date", tickers[0]]).sort_index(ascending=True)
for t in tickers[1:]:
url = _build_url(t, date_ini, date_final)
df = pd.merge(df,
pd.read_csv(url, index_col=0, usecols=[0,6], skiprows=[0],
names = ["Date", t]).sort_index(ascending=True),
left_index=True, right_index=True, how="outer")
df.columns = tickers
return df
def _build_url(ticker, date_ini, date_final):
'''
function to form url.
input:
- ticker: string - asset ticker
- date_ini: initial date - format: m/d/Y
- date_final: final date - format: m/d/Y
output:
- string - valid yahoo's url
'''
d_ini = date_ini.split("/")
d_final = date_final.split("/")
dates = "a={}&b={}&c={}&d={}&e={}&f={}&g=d&i" \
"gnore=.csv".format(str(int(d_ini[0])-1), str(int(d_ini[1])), d_ini[2],
str(int(d_final[0])-1), str(int(d_final[1])), d_final[2])
return "http://chart.finance.yahoo.com/table.csv?s={}&{}".format(ticker, dates)
| UTF-8 | Python | false | false | 1,852 | py | 95 | yahoo.py | 91 | 0.518898 | 0.5 | 0 | 47 | 38.404255 | 89 |
kawazrepos/Kawaz | 1,915,555,437,208 | a1fecb279676342d692a10f22bceaa0dc5a40516 | c5266222639aae5054ebf409f345446514965f91 | /src/Kawaz/bugwaz/urls.py | d433d414bf41fbb268e1d5889fce306b56d15f6a | []
| no_license | https://github.com/kawazrepos/Kawaz | 1a2ac76ccb453fa060aaac31494bc6d25188255b | 13b86253d8b027530f32fc714d1e3738191a433e | refs/heads/master | 2016-08-06T00:33:53.361492 | 2014-05-04T19:32:59 | 2014-05-04T19:32:59 | 2,407,362 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#
# Author: alisue
# Date: 2010/10/25
#
from django.conf.urls.defaults import *
import views
components_patterns = patterns('',
url(r'^$', views.component_list, name='bugwaz-component-list'),
url(r'^(?P<object_id>\d+)/$', views.component_detail, name='bugwaz-component-detail'),
url(r'^create/$', views.create_component, name='bugwaz-component-create'),
url(r'^(?P<object_id>\d+)/update/$', views.update_component, name='bugwaz-component-update'),
url(r'^(?P<object_id>\d+)/delete/$', views.delete_component, name='bugwaz-component-delete'),
)
versions_patterns = patterns('',
url(r'^$', views.version_list, name='bugwaz-version-list'),
url(r'^(?P<object_id>\d+)/$', views.version_detail, name='bugwaz-version-detail'),
url(r'^create/$', views.create_version, name='bugwaz-version-create'),
url(r'^(?P<object_id>\d+)/update/$', views.update_version, name='bugwaz-version-update'),
url(r'^(?P<object_id>\d+)/delete/$', views.delete_version, name='bugwaz-version-delete'),
)
reports_patterns = patterns('',
url(r'^$', views.report_list, name='bugwaz-report-list'),
url(r'^(?P<object_id>\d+)/$', views.report_detail, name='bugwaz-report-detail'),
url(r'^create/$', views.create_report, name='bugwaz-report-create'),
url(r'^(?P<object_id>\d+)/update/$', views.update_report, name='bugwaz-report-update'),
url(r'^(?P<object_id>\d+)/update_status/$', views.update_report_status, name='bugwaz-report-update-status'),
url(r'^(?P<object_id>\d+)/delete/$', views.delete_report, name='bugwaz-report-delete'),
url(r'^(?P<object_id>\d+)/charge/$', views.charge_report, name='bugwaz-report-charge'),
url(r'^(?P<object_id>\d+)/discharge/$', views.discharge_report, name='bugwaz-report-discharge'),
)
extra_patterns = patterns('',
(r'^components/', include(components_patterns)),
(r'^versions/', include(versions_patterns)),
(r'^reports/', include(reports_patterns)),
)
urlpatterns = patterns('',
url(r'^$', views.product_list, name='bugwaz-product-list'),
url(r'^(?P<object_id>\d+)/$', views.product_detail, name='bugwaz-product-detail'),
url(r'^create/$', views.create_product, name='bugwaz-product-create'),
url(r'^(?P<object_id>\d+)/update/$', views.update_product, name='bugwaz-product-update'),
url(r'^(?P<object_id>\d+)/delete/$', views.delete_product, name='bugwaz-product-delete'),
(r'^(?P<product>\d+)/', include(extra_patterns)),
) | UTF-8 | Python | false | false | 2,998 | py | 627 | urls.py | 365 | 0.536358 | 0.533356 | 0 | 46 | 64.195652 | 112 |
adammpoland/homebase | 15,384,572,879,877 | 7b125a32b66e02657936fa8acc7473e2f2948e85 | 85c2e7405b305b20afb5a7b2716fe34a2c5f884e | /public/uploads/bigredIII.py | 0557ebf16adc788a567f47ef39fae622e5313fcd | []
| no_license | https://github.com/adammpoland/homebase | dd246d738f52d8babeef8287ef02cfd243efaa23 | 2975e31effa683f604d909143aa1de213c5f931d | refs/heads/master | 2022-12-13T16:43:59.603895 | 2019-06-21T17:53:47 | 2019-06-21T17:53:47 | 124,105,871 | 0 | 0 | null | false | 2022-12-10T19:18:38 | 2018-03-06T16:25:35 | 2019-06-21T17:53:49 | 2022-12-10T19:18:38 | 44,635 | 0 | 0 | 5 | HTML | false | false | from __future__ import division
import time
import RPi.GPIO as GPIO
from time import sleep
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
#servo_min 150 and servo_max = 600
servo_min = 150 # Min pulse length out of 4096
servo_max = 600 # Max pulse length out of 4096
straight_up = 390
def set_servo_pulse(channel, pulse):
pulse_length = 1000000 # 1,000,000 us per second
pulse_length //= 60 # 60 Hz
print('{0}us per period'.format(pulse_length))
pulse_length //= 4096 # 12 bits of resolution
print('{0}us per bit'.format(pulse_length))
pulse *= 1000
pulse //= pulse_length
pwm.set_pwm(channel, 0, pulse)
# Set frequency to 60hz, good for servos.
pwm.set_pwm_freq(60)
IRTrackingPinR = 18
IRTrackingPinC = 15
IRTrackingPinL = 14
front = 24
IRTrackingPinBall = 23
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(IRTrackingPinR, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def setup1():
GPIO.setmode(GPIO.BCM)
GPIO.setup(IRTrackingPinC, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def setup2():
GPIO.setmode(GPIO.BCM)
GPIO.setup(IRTrackingPinL, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def setup5():
GPIO.setmode(GPIO.BCM)
GPIO.setup(IRTrackingPinBall, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def setup3():
GPIO.setmode(GPIO.BCM)
GPIO.setup(front, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def setup4():
GPIO.setmode(GPIO.BCM)
GPIO.setup(21, GPIO.OUT)
#/////////////////////////////////////////////////////////////
def forward():
pwm.set_pwm(0,0,servo_min)
pwm.set_pwm(1,0,servo_max)
sleep(1)
def turnRight():
pwm.set_pwm(1,0,servo_max)
pwm.set_pwm(0,0,servo_max)
sleep(0.57)
def turnLeft():
pwm.set_pwm(0,0,servo_min)
pwm.set_pwm(1,0,servo_min)
sleep(0.50)
def destroy():
pwm.set_pwm(0,0, straight_up+1)
pwm.set_pwm(1,0, straight_up)
sleep(2)
GPIO.output(21, GPIO.LOW)
GPIO.cleanup()
#GPIO.output(21, GPIO.LOW)
def shoot():
#shooter code goes here
def option1():
shoot()
forward()
turnRight()
forward()
setup()
setup1()
setup2()
setup3()
setup4()
setup5()
try:
option1()
except KeyboardInterrupt:
destroy() | UTF-8 | Python | false | false | 2,275 | py | 5 | bigredIII.py | 2 | 0.615385 | 0.561319 | 0 | 106 | 19.481132 | 65 |
AkaiTsuki/machine-learning-feature-analysis | 16,312,285,798,311 | 89899a486dc53df465e68f8e03d0bfb81a7b2cbc | 7c8e0733b9eb138fc3a8f37a53281864afb1cc2b | /nulearn/linear_model.py | 7acb12bc485665e2d53169de4798d5ad8bf7020f | []
| no_license | https://github.com/AkaiTsuki/machine-learning-feature-analysis | 5bef501196c217624a4dbc805be305e74761440e | 220da2f446c64a603b87e4317dc038febbb86773 | refs/heads/master | 2020-04-05T23:25:35.829383 | 2014-11-12T21:35:30 | 2014-11-12T21:35:30 | 26,200,641 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'jiachiliu'
from numpy.linalg import inv
import numpy as np
from validation import mse
import sys
class LinearRegression(object):
"""docstring for LinearRegression"""
def __init__(self):
self.weights = None
def fit(self, train, target):
self.weights = inv(train.T.dot(train)).dot(train.T).dot(target)
return self
def predict(self, test):
return test.dot(self.weights)
class GradientDescendingRegression(LinearRegression):
def __init__(self):
LinearRegression.__init__(self)
def fit(self, train, target, alpha=0.00001, max_loop=1500):
m, n = train.shape
self.weights = np.ones(n)
for k in range(max_loop):
predict = self.predict(train)
error = predict - target
self.weights -= alpha * train.T.dot(error)
return self
class StochasticGradientDescendingRegression(LinearRegression):
def __init__(self):
LinearRegression.__init__(self)
def fit(self, train, target, alpha=0.0001, max_loop=130, converge=0.001):
m, n = train.shape
self.weights = np.zeros(n)
for k in range(max_loop):
prev_error = mse(self.predict(train), target)
self.print_progress(k, prev_error)
for t in range(m):
data_point = train[t]
error = self.predict(data_point) - target[t]
self.weights -= alpha * error * data_point
print self.weights
if abs(prev_error - mse(self.predict(train), target)) <= converge:
break
return self
@staticmethod
def print_progress(k, cost):
print "Iteration: %s, error: %s" % (k + 1, cost)
class LogisticGradientDescendingRegression(StochasticGradientDescendingRegression):
def __init__(self):
LinearRegression.__init__(self)
def fit(self, train, target, alpha=0.0001, max_loop=130, converge=0.001):
m, n = train.shape
self.weights = np.zeros(n)
for k in range(max_loop):
prev_error = mse(self.predict(train), target)
self.print_progress(k, prev_error)
for t in xrange(m):
data_point = train[t]
predict = self.predict(data_point)
error = predict - target[t]
self.weights -= alpha * error * predict * (1.0 - predict) * data_point
if abs(prev_error - mse(self.predict(train), target)) <= converge:
break
return self
@staticmethod
def sigmoid(vals):
return 1.0 / (1 + np.exp(-vals))
def predict(self, test):
return self.sigmoid(test.dot(self.weights))
@staticmethod
def convert_to_binary(vals, threshold=0.5):
return map(lambda v: 1 if v >= threshold else 0, vals)
class BatchLogisticRegression(LogisticGradientDescendingRegression):
def fit(self, train, target, alpha=0.0001, max_loop=1300, converge=0.0001, beta=10):
m, n = train.shape
self.weights = np.ones(n)
for k in range(max_loop):
prev_error = mse(self.predict(train), target)
self.print_progress(k, prev_error)
predict = self.predict(train)
error = predict - target
self.weights -= alpha * (train.T.dot(error) + beta * self.weights)
return self
class RidgedLogisticRegression(LogisticGradientDescendingRegression):
def __init__(self):
LogisticGradientDescendingRegression.__init__(self)
def fit(self, train, target, alpha=0.0001, max_loop=1000, converge=0.0001, beta=0.001):
m, n = train.shape
self.weights = np.zeros(n)
for k in range(max_loop):
prev_error = mse(self.predict(train), target)
self.print_progress(k, prev_error)
for t in xrange(m):
data_point = train[t]
predict = self.predict(data_point)
error = predict - target[t]
self.weights -= alpha * ((error * data_point) + beta * self.weights)
if abs(prev_error - mse(self.predict(train), target)) <= converge:
break
return self
class Perceptron:
def __init__(self):
self.weights = None
def predict(self, test):
return test.dot(self.weights)
def fit(self, train, target):
m, n = train.shape
self.weights = np.zeros(n)
train, target = self.flip(train, target)
k = 0
while not self.all_positive(train):
count = 0
for features, label in zip(train, target):
if self.predict(features) <= 0:
count += 1
self.weights += features
print 'Iteration %d, total mistakes: %d' % (k + 1, count)
k += 1
print 'Iteration %d, total mistakes: %d' % (k + 1, self.total_error(self.predict(train)))
@staticmethod
def total_error(predict):
count = 0
for p in predict:
if p <= 0:
count += 1
return count
def all_positive(self, train):
for features in train:
if self.predict(features) <= 0:
return False
return True
@staticmethod
def flip(train, target):
new_train = []
new_target = []
for features, label in zip(train, target):
if label == -1:
new_train.append(-features)
new_target.append(-label)
else:
new_train.append(features)
new_target.append(label)
return np.array(new_train), np.array(new_target) | UTF-8 | Python | false | false | 5,666 | py | 18 | linear_model.py | 17 | 0.567596 | 0.551712 | 0 | 174 | 31.568966 | 97 |
Einstein-NTE/einstein | 4,698,694,260,111 | 931d070e03bba04e31a8f0a6ba41ed327172622e | 92ae11d4f1c880ce77199184081d476717ffe3b1 | /GUI/DateTime.py | cae425d702d6fc068fb2472b2e2f83ca8ebdaf56 | []
| no_license | https://github.com/Einstein-NTE/einstein | 1af627038a071819a0d95b2f636c0a0f3f0355b7 | a8a4c963c710dfade3dbfae5bb5041c6bb2ff0eb | refs/heads/master | 2016-09-15T15:24:14.776508 | 2010-08-26T13:24:51 | 2010-08-26T13:24:51 | 370,138 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Python part of the low-level DateTime[Delta] type implementation.
Copyright (c) 1998-2001, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2007, eGenix.com Software GmbH; mailto:info@egenix.com
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
# Import C extension module
from mxDateTime import *
from mxDateTime import __version__
# Singletons
oneSecond = DateTimeDelta(0,0,0,1)
oneMinute = DateTimeDelta(0,0,1)
oneHour = DateTimeDelta(0,1)
oneDay = DateTimeDelta(1)
oneWeek = DateTimeDelta(7)
Epoch = DateTimeFromAbsDateTime(1,0)
# Shortcuts for pickle; for backward compatibility only (they are now
# defined in __init__.py to further reduce the pickles length)
def _DT(absdate,abstime):
return DateTimeFromAbsDateTime(absdate,abstime)
def _DTD(seconds):
return DateTimeDeltaFromSeconds(seconds)
# Module init
class modinit:
global _time,_string,_math,_types
import time,string,math,types
_time = time
_string = string
_math = math
_types = types
del modinit
### Helpers
def _isstring(arg,
isinstance=isinstance, types=_types):
if isinstance(arg, types.StringType):
return 1
try:
if isinstance(arg, types.UnicodeType):
return 1
except AttributeError:
pass
return 0
### Compatibility APIs
# Aliases and functions to make 'from mx.DateTime import *' work much
# like 'from time import *'
def localtime(ticks=None,
# Locals:
time=_time.time,float=float,localtime=_time.localtime,
round=round,int=int,DateTime=DateTime,floor=_math.floor):
"""localtime(ticks=None)
Construct a DateTime instance using local time from ticks. If
ticks are not given, it defaults to the current time. The
result is similar to time.localtime(). Fractions of a second
are rounded to the nearest micro-second.
"""
if ticks is None:
ticks = time()
else:
ticks = float(ticks)
ticks = round(ticks, 6)
fticks = floor(ticks)
Y,M,D,h,m,s = localtime(fticks)[:6]
s = s + (ticks - fticks)
return DateTime(Y,M,D,h,m,s)
def gmtime(ticks=None,
# Locals:
time=_time.time,float=float,gmtime=_time.gmtime,
round=round,int=int,DateTime=DateTime,floor=_math.floor):
"""gmtime(ticks=None)
Construct a DateTime instance using UTC time from ticks. If
ticks are not given, it defaults to the current time. The
result is similar to time.gmtime(). Fractions of a second are
rounded to the nearest micro-second.
"""
if ticks is None:
ticks = time()
else:
ticks = float(ticks)
ticks = round(ticks, 6)
fticks = floor(ticks)
Y,M,D,h,m,s = gmtime(ticks)[:6]
s = s + (ticks - fticks)
return DateTime(Y,M,D,h,m,s)
def mktime((year,month,day,hour,minute,second,dow,doy,dst),
# Locals:
DateTime=DateTime):
"""mktime((year,month,day,hour,minute,second,dow,doy,dst))
Same as the DateTime() constructor accept that the interface
used is compatible to the similar time.mktime() API.
Note that the tuple elements dow, doy and dst are not used in
any way.
"""
return DateTime(year,month,day,hour,minute,second)
def ctime(datetime):
"""ctime(datetime)
Returns a string representation of the given DateTime instance
using the current locale's default settings.
"""
return datetime.strftime('%c')
def today(hour=0,minute=0,second=0.0,
# Locals:
localtime=_time.localtime,time=_time.time,DateTime=DateTime):
"""today(hour=0,minute=0,second=0.0)
Returns a DateTime instance for today (in local time) at the
given time (defaults to midnight).
"""
Y,M,D = localtime(time())[:3]
return DateTime(Y,M,D,hour,minute,second)
def TimeDelta(hours=0.0,minutes=0.0,seconds=0.0,
# Locals:
DateTimeDelta=DateTimeDelta):
"""TimeDelta(hours=0.0,minutes=0.0,seconds=0.0)
Returns a DateTimeDelta-object reflecting the given time
delta. Seconds can be given as float to indicate fractions.
"""
return DateTimeDelta(0,hours,minutes,seconds)
def gm2local(datetime):
""" gm2local(datetime)
Convert a DateTime instance holding UTC time to a DateTime
instance using local time.
"""
return localtime(datetime.gmticks())
def local2gm(datetime):
""" local2gm(datetime)
Convert a DateTime instance holding local time to a DateTime
instance using UTC time.
"""
return gmtime(datetime.ticks())
# Alias
gmt = utc
# Default value for DateTimeFromTJD's tjd_myriad parameter
current_myriad = localtime().tjd_myriad
def DateTimeFromTJD(tjd,tjd_myriad=current_myriad):
""" DateTimeFromTJD(tjd[,myriad])
Return a DateTime instance for the given Truncated Julian Day.
myriad defaults to the TJD myriad current at package import
time.
Note that this version of Truncated Julian Day number does
real truncation of important information. It's use is
discouraged and unsupported.
"""
return DateTimeFromAbsDays(tjd + tjd_myriad * 10000.0 - 1721425.0)
def DateTimeFromJDN(jdn):
""" DateTimeFromJDN(jdn)
Return a DateTime instance for the given Julian Day Number.
References:
-----------
Gregorian 2000-01-01 12:00:00 corresponds to JDN 2451545.0.
Gregorian 1858-11-17 00:00:00.00 corresponds to JDN 2400000.5; MJD 0.0.
Julian -4712-01-01 12:00:00.00 corresponds to JDN 0.0.
Gregorian -4713-11-24 12:00:00.00 corresponds to JDN 0.0.
"""
return DateTimeFromAbsDays(jdn - 1721425.5)
def DateTimeFromMJD(mjd):
""" DateTimeFromMJD(mjd)
Return a DateTime instance for the given Modified Julian Day
(MJD). The MJD is calculated the same way as the JDN except
that 1858-11-17 00:00:00.00 is taken as origin of the scale.
"""
return DateTimeFromAbsDays(mjd + 678575.0)
def DateTimeFrom(*args, **kws):
""" DateTimeFrom(*args, **kws)
Generic DateTime instance constructor. Can handle parsing
strings, numbers and keywords.
XXX Add support for Unicode.
"""
if len(args) == 1:
# Single argument
arg = args[0]
argtype = type(arg)
if _isstring(arg):
import Parser
return apply(Parser.DateTimeFromString, args, kws)
elif argtype is DateTimeType:
return arg
elif argtype is DateTimeDeltaType:
raise TypeError,'cannot convert DateTimeDelta to DateTime'
else:
try:
value = float(arg)
except (TypeError, ValueError):
value = int(arg)
assert not kws
return DateTimeFromTicks(value)
elif len(args) > 1:
# More than one argument
if len(args) == 2 and _isstring(args[0]) and _isstring(args[1]):
# interpret as date and time string
import Parser
return apply(Parser.DateTimeFromString,
(args[0] + ' ' + args[1],),
kws)
# Assume the arguments are the same as for DateTime()
return apply(DateTime, args, kws)
elif len(kws) > 0:
# Keyword arguments; add defaults... today at 0:00:00
hour = kws.get('hour',0)
minute = kws.get('minute',0)
second = kws.get('second',0)
today = now()
day = kws.get('day',today.day)
month = kws.get('month',today.month)
year = kws.get('year',today.year)
return DateTime(year,month,day,hour,minute,second)
else:
raise TypeError,'cannot convert arguments to DateTime'
def DateTimeDeltaFrom(*args, **kws):
""" DateTimeDeltaFrom(*args, **kws)
Generic DateTimeDelta instance constructor. Can handle parsing
strings, numbers and keywords.
XXX Add support for Unicode.
"""
if len(args) == 1:
# Single argument
arg = args[0]
if _isstring(arg):
import Parser
return apply(Parser.DateTimeDeltaFromString, args, kws)
elif type(arg) is DateTimeDeltaType:
return arg
elif type(arg) is DateTimeType:
raise TypeError,'cannot convert DateTime to DateTimeDelta'
else:
try:
value = float(arg)
except TypeError:
value = int(arg)
assert not kws
return DateTimeDeltaFromSeconds(value)
elif len(args) > 1:
# Assume the arguments are the same as for DateTimeDelta()
return apply(DateTimeDelta, args, kws)
elif len(kws) > 0:
# Keyword arguments; default: 00:00:00:00.00
hours = kws.get('hours',0)
minutes = kws.get('minutes',0)
seconds = kws.get('seconds',0.0)
days = kws.get('days',0)
return DateTimeDelta(days,hours,minutes,seconds)
else:
raise TypeError,'cannot convert arguments to DateTimeDelta'
def TimeDeltaFrom(*args, **kws):
""" TimeDeltaFrom(*args, **kws)
Generic TimeDelta instance constructor. Can handle parsing
strings, numbers and keywords.
XXX Add support for Unicode.
"""
if len(args) > 1:
# Assume the arguments are the same as for TimeDelta(): without
# days part !
return apply(DateTimeDelta, (0,)+args, kws)
else:
# Otherwise treat the arguments just like for DateTimeDelta
# instances.
return apply(DateTimeDeltaFrom, args, kws)
def DateFromTicks(ticks,
# Locals:
DateTime=DateTime,localtime=_time.localtime):
""" DateFromTicks(ticks)
Constructs a DateTime instance pointing to the local time date
at 00:00:00.00 (midnight) indicated by the given ticks value.
The time part is ignored.
"""
return apply(DateTime, localtime(ticks)[:3])
def TimestampFromTicks(ticks,
# Locals:
DateTime=DateTime,localtime=_time.localtime):
""" TimestampFromTicks(ticks)
Constructs a DateTime instance pointing to the local date and
time indicated by the given ticks value.
"""
return apply(DateTime, localtime(ticks)[:6])
def TimeFromTicks(ticks,
# Locals:
DateTimeDelta=DateTimeDelta,localtime=_time.localtime):
""" TimeFromTicks(ticks)
Constructs a DateTimeDelta instance pointing to the local time
indicated by the given ticks value. The date part is ignored.
"""
return apply(DateTimeDelta, (0,) + localtime(ticks)[3:6])
# Aliases
utctime = gmtime
utc2local = gm2local
local2utc = local2gm
DateTimeFromTicks = localtime
Date = DateTime
Time = TimeDelta
Timestamp = DateTime
DateFrom = DateTimeFrom # XXX should only parse the date part !
TimeFrom = TimeDeltaFrom
TimestampFrom = DateTimeFrom
GregorianDateTime = DateTime
GregorianDate = Date
JulianDate = JulianDateTime
### For backward compatibility (these are depreciated):
def gmticks(datetime):
"""gmticks(datetime)
[DEPRECIATED: use the .gmticks() method]
Returns a ticks value based on the values stored in
datetime under the assumption that they are given in UTC,
rather than local time.
"""
return datetime.gmticks()
# Alias
utcticks = gmticks
def tz_offset(datetime,
# Locals:
oneSecond=oneSecond):
"""tz_offset(datetime)
[DEPRECIATED: use the .gmtoffset() method]
Returns a DateTimeDelta instance representing the UTC
offset for datetime assuming that the stored values refer
to local time. If you subtract this value from datetime,
you'll get UTC time.
"""
return datetime.gmtoffset()
### Constants (only English; see Locale.py for other languages)
# Weekdays
Monday = 0
Tuesday = 1
Wednesday = 2
Thursday = 3
Friday = 4
Saturday = 5
Sunday = 6
# as mapping
Weekday = {'Saturday': 5, 6: 'Sunday', 'Sunday': 6, 'Thursday': 3,
'Wednesday': 2, 'Friday': 4, 'Tuesday': 1, 'Monday': 0,
5: 'Saturday', 4: 'Friday', 3: 'Thursday', 2: 'Wednesday',
1: 'Tuesday', 0: 'Monday'}
# Months
January = 1
February = 2
March = 3
April = 4
May = 5
June = 6
July = 7
August = 8
September = 9
October = 10
November = 11
December = 12
# as mapping
Month = {2: 'February', 3: 'March', None: 0, 'July': 7, 11: 'November',
'December': 12, 'June': 6, 'January': 1, 'September': 9, 'August':
8, 'March': 3, 'November': 11, 'April': 4, 12: 'December', 'May':
5, 10: 'October', 9: 'September', 8: 'August', 7: 'July', 6:
'June', 5: 'May', 4: 'April', 'October': 10, 'February': 2, 1:
'January', 0: None}
# Limits (see also the range checks in mxDateTime.c)
MaxDateTime = DateTime(5867440,12,31)
MinDateTime = DateTime(-5851455,1,1)
MaxDateTimeDelta = DateTimeDeltaFromSeconds(2147483647 * 86400.0)
MinDateTimeDelta = -MaxDateTimeDelta
###
class RelativeDateTime:
"""RelativeDateTime(years=0,months=0,days=0,
hours=0,minutes=0,seconds=0,
year=0,month=0,day=0,
hour=None,minute=None,second=None,
weekday=None,weeks=None)
Returns a RelativeDateTime instance for the specified relative
time. The constructor handles keywords, so you'll only have to
give those parameters which should be changed when you add the
relative to an absolute DateTime instance.
Adding RelativeDateTime instances is supported with the
following rules: deltas will be added together, right side
absolute values override left side ones.
Adding RelativeDateTime instances to DateTime instances will
return DateTime instances with the appropriate calculations
applied, e.g. to get a DateTime instance for the first of next
month, you'd call now() + RelativeDateTime(months=+1,day=1).
"""
years = 0
months = 0
days = 0
year = None
month = 0
day = 0
hours = 0
minutes = 0
seconds = 0
hour = None
minute = None
second = None
weekday = None
# cached hash value
_hash = None
# For Zope security:
__roles__ = None
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self,
years=0,months=0,days=0,
hours=0,minutes=0,seconds=0,
year=None,month=None,day=None,
hour=None,minute=None,second=None,
weekday=None,weeks=0):
self.years = years
self.months = months
self.days = days + weeks*7
self.year = year
self.month = month
self.day = day
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.hour = hour
self.minute = minute
self.second = second
if weekday is not None:
# Make sure we've got a 2-tuple
assert len(weekday) == 2
self.weekday = weekday
def __add__(self,other,
# Locals:
isinstance=isinstance):
if isinstance(other,RelativeDateTime):
# RelativeDateTime (self) + RelativeDateTime (other)
r = RelativeDateTime()
# date deltas
r.years = self.years + other.years
r.months = self.months + other.months
r.days = self.days + other.days
# absolute entries of other override those in self, if given
r.year = other.year or self.year
r.month = other.month or self.month
r.day = other.day or self.day
r.weekday = other.weekday or self.weekday
# time deltas
r.hours = self.hours + other.hours
r.minutes = self.minutes + other.minutes
r.seconds = self.seconds + other.seconds
# absolute entries of other override those in self, if given
r.hour = other.hour or self.hour
r.minute = other.minute or self.minute
r.second = other.second or self.second
return r
else:
raise TypeError,"can't add the two types"
def __radd__(self,other,
# Locals:
isinstance=isinstance,DateTimeType=DateTimeType,
DateTime=DateTime,DateTimeDelta=DateTimeDelta):
if isinstance(other,DateTimeType):
# DateTime (other) + RelativeDateTime (self)
# date
if self.year is None:
year = other.year + self.years
else:
year = self.year + self.years
if self.month is None:
month = other.month + self.months
else:
month = self.month + self.months
if self.day is None:
day = other.day
else:
day = self.day
if day < 0:
# fix negative day values
month = month + 1
day = day + 1
day = day + self.days
# time
if self.hour is None:
hour = other.hour + self.hours
else:
hour = self.hour + self.hours
if self.minute is None:
minute = other.minute + self.minutes
else:
minute = self.minute + self.minutes
if self.second is None:
second = other.second + self.seconds
else:
second = self.second + self.seconds
# Refit into proper ranges:
if month < 1 or month > 12:
month = month - 1
yeardelta, monthdelta = divmod(month, 12)
year = year + yeardelta
month = monthdelta + 1
# Make sure we have integers
year = int(year)
month = int(month)
day = int(day)
if self.weekday is None:
return DateTime(year, month, 1) + \
DateTimeDelta(day-1,hour,minute,second)
# Adjust to the correct weekday
day_of_week,index = self.weekday
d = DateTime(year, month, 1) + \
DateTimeDelta(day-1,hour,minute,second)
if index == 0:
# 0 index: next weekday if no match
return d + (day_of_week - d.day_of_week)
elif index > 0:
# positive index (1 == first weekday of month)
first = d - (d.day - 1)
diff = day_of_week - first.day_of_week
if diff >= 0:
return first + (diff + (index-1) * 7)
else:
return first + (diff + index * 7)
else:
# negative index (-1 == last weekday of month)
last = d + (d.days_in_month - d.day)
diff = day_of_week - last.day_of_week
if diff <= 0:
return last + (diff + (index+1) * 7)
else:
return last + (diff + index * 7)
else:
raise TypeError,"can't add the two types"
def __sub__(self,other):
if isinstance(other,RelativeDateTime):
# RelativeDateTime (self) - RelativeDateTime (other)
r = RelativeDateTime()
# date deltas
r.years = self.years - other.years
r.months = self.months - other.months
r.days = self.days - other.days
# absolute entries of other override those in self, if given
r.year = other.year or self.year
r.month = other.month or self.month
r.day = other.day or self.day
r.weekday = other.weekday or self.weekday
# time deltas
r.hours = self.hours - other.hours
r.minutes = self.minutes - other.minutes
r.seconds = self.seconds - other.seconds
# absolute entries of other override those in self, if given
r.hour = other.hour or self.hour
r.minute = other.minute or self.minute
r.second = other.second or self.second
return r
else:
raise TypeError,"can't subtract the two types"
def __rsub__(self,other,
# Locals:
isinstance=isinstance,DateTimeType=DateTimeType):
if isinstance(other,DateTimeType):
# DateTime (other) - RelativeDateTime (self)
return other + self.__neg__()
else:
raise TypeError,"can't subtract the two types"
def __neg__(self):
# - RelativeDateTime(self)
r = RelativeDateTime()
# negate date deltas
r.years = - self.years
r.months = - self.months
r.days = - self.days
# absolute entries don't change
r.year = self.year
r.month = self.month
r.day = self.day
r.weekday = self.weekday
# negate time deltas
r.hours = - self.hours
r.minutes = - self.minutes
r.seconds = - self.seconds
# absolute entries don't change
r.hour = self.hour
r.minute = self.minute
r.second = self.second
return r
def __nonzero__(self):
# RelativeDateTime instances are considered false in case
# they do not define any alterations
if (self.year is None and
self.years == 0 and
self.month is None and
self.months == 0 and
self.day is None and
self.weekday is None and
self.days == 0 and
self.hour is None and
self.hours == 0 and
self.minute is None and
self.minutes == 0 and
self.second is None and
self.seconds == 0):
return 0
else:
return 1
def __mul__(self,other):
# RelativeDateTime (self) * Number (other)
factor = float(other)
r = RelativeDateTime()
# date deltas
r.years = factor * self.years
r.months = factor * self.months
r.days = factor * self.days
# time deltas
r.hours = factor * self.hours
r.minutes = factor * self.minutes
r.seconds = factor * self.seconds
return r
__rmul__ = __mul__
def __div__(self,other):
# RelativeDateTime (self) / Number (other)
return self.__mul__(1/float(other))
def __eq__(self, other):
if isinstance(self, RelativeDateTime) and \
isinstance(other, RelativeDateTime):
# RelativeDateTime (self) == RelativeDateTime (other)
if (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.year == other.year and
self.day == other.day and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.weekday == other.weekday):
return 1
else:
return 0
else:
raise TypeError,"can't compare the two types"
def __hash__(self):
if self._hash is not None:
return self._hash
x = 1234
for value in (self.years, self.months, self.days,
self.year, self.day,
self.hours, self.minutes, self.seconds,
self.hour, self.minute, self.second,
self.weekday):
if value is None:
x = 135051820 ^ x
else:
x = hash(value) ^ x
self._hash = x
return x
def __str__(self,
join=_string.join):
l = []
append = l.append
# Format date part
if self.year is not None:
append('%04i-' % self.year)
elif self.years:
append('(%0+5i)-' % self.years)
else:
append('YYYY-')
if self.month is not None:
append('%02i-' % self.month)
elif self.months:
append('(%0+3i)-' % self.months)
else:
append('MM-')
if self.day is not None:
append('%02i' % self.day)
elif self.days:
append('(%0+3i)' % self.days)
else:
append('DD')
if self.weekday:
append(' %s:%i' % (Weekday[self.weekday[0]][:3],self.weekday[1]))
append(' ')
# Normalize relative time values to avoid fractions
hours = self.hours
minutes = self.minutes
seconds = self.seconds
hours_fraction = hours - int(hours)
minutes = minutes + hours_fraction * 60.0
minutes_fraction = minutes - int(minutes)
seconds = seconds + minutes_fraction * 6.0
seconds_fraction = seconds - int(seconds)
if 0:
# Normalize to standard time ranges
if seconds > 60.0:
extra_minutes, seconds = divmod(seconds, 60.0)
minutes = minutes + extra_minutes
elif seconds < -60.0:
extra_minutes, seconds = divmod(seconds, -60.0)
minutes = minutes - extra_minutes
if minutes >= 60.0:
extra_hours, minutes = divmod(minutes, 60.0)
hours = hours + extra_hours
elif minutes <= -60.0:
extra_hours, minutes = divmod(minutes, -60.0)
hours = hours - extra_hours
# Format time part
if self.hour is not None:
append('%02i:' % self.hour)
elif hours:
append('(%0+3i):' % hours)
else:
append('HH:')
if self.minute is not None:
append('%02i:' % self.minute)
elif minutes:
append('(%0+3i):' % minutes)
else:
append('MM:')
if self.second is not None:
append('%02i' % self.second)
elif seconds:
append('(%0+3i)' % seconds)
else:
append('SS')
return join(l,'')
def __repr__(self):
return "<%s instance for '%s' at 0x%x>" % (
self.__class__.__name__,
self.__str__(),
id(self))
# Alias
RelativeDate = RelativeDateTime
def RelativeDateTimeFrom(*args, **kws):
""" RelativeDateTimeFrom(*args, **kws)
Generic RelativeDateTime instance constructor. Can handle
parsing strings and keywords.
"""
if len(args) == 1:
# Single argument
arg = args[0]
if _isstring(arg):
import Parser
return apply(Parser.RelativeDateTimeFromString, args, kws)
elif isinstance(arg, RelativeDateTime):
return arg
else:
raise TypeError,\
'cannot convert argument to RelativeDateTime'
else:
return apply(RelativeDateTime,args,kws)
def RelativeDateTimeDiff(date1,date2,
floor=_math.floor,int=int,divmod=divmod,
RelativeDateTime=RelativeDateTime):
""" RelativeDateTimeDiff(date1,date2)
Returns a RelativeDateTime instance representing the difference
between date1 and date2 in relative terms.
The following should hold:
date2 + RelativeDateDiff(date1,date2) == date1
for all dates date1 and date2.
Note that due to the algorithm used by this function, not the
whole range of DateTime instances is supported; there could
also be a loss of precision.
XXX There are still some problems left (thanks to Carel
Fellinger for pointing these out):
29 1 1901 -> 1 3 1901 = 1 month
29 1 1901 -> 1 3 1900 = -10 month and -28 days, but
29 1 1901 -> 28 2 1900 = -11 month and -1 day
and even worse:
>>> print RelativeDateDiff(Date(1900,3,1),Date(1901,2,1))
YYYY-(-11)-DD HH:MM:SS
with:
>>> print Date(1901,1,29) + RelativeDateTime(months=-11)
1900-03-01 00:00:00.00
>>> print Date(1901,2,1) + RelativeDateTime(months=-11)
1900-03-01 00:00:00.00
"""
diff = date1 - date2
if diff.days == 0:
return RelativeDateTime()
date1months = date1.year * 12 + (date1.month - 1)
date2months = date2.year * 12 + (date2.month - 1)
#print 'months',date1months,date2months
# Calculate the months difference
diffmonths = date1months - date2months
#print 'diffmonths',diffmonths
if diff.days > 0:
years,months = divmod(diffmonths,12)
else:
years,months = divmod(diffmonths,-12)
years = -years
date3 = date2 + RelativeDateTime(years=years,months=months)
diff3 = date1 - date3
days = date1.absdays - date3.absdays
#print 'date3',date3,'diff3',diff3,'days',days
# Correction to ensure that all relative parts have the same sign
while days * diff.days < 0:
if diff.days > 0:
diffmonths = diffmonths - 1
years,months = divmod(diffmonths,12)
else:
diffmonths = diffmonths + 1
years,months = divmod(diffmonths,-12)
years = -years
#print 'diffmonths',diffmonths
date3 = date2 + RelativeDateTime(years=years,months=months)
diff3 = date1 - date3
days = date1.absdays - date3.absdays
#print 'date3',date3,'diff3',diff3,'days',days
# Drop the fraction part of days
if days > 0:
days = int(floor(days))
else:
days = int(-floor(-days))
return RelativeDateTime(years=years,
months=months,
days=days,
hours=diff3.hour,
minutes=diff3.minute,
seconds=diff3.second)
# Aliases
RelativeDateDiff = RelativeDateTimeDiff
Age = RelativeDateTimeDiff
###
_current_year = now().year
_current_century, _current_year_in_century = divmod(_current_year, 100)
_current_century = _current_century * 100
def add_century(year,
current_year=_current_year,
current_century=_current_century):
""" Sliding window approach to the Y2K problem: adds a suitable
century to the given year and returns it as integer.
The window used depends on the current year (at import time).
If adding the current century to the given year gives a year
within the range current_year-70...current_year+30 [both
inclusive], then the current century is added. Otherwise the
century (current + 1 or - 1) producing the least difference is
chosen.
"""
if year > 99:
# Take it as-is
return year
year = year + current_century
diff = year - current_year
if diff >= -70 and diff <= 30:
return year
elif diff < -70:
return year + 100
else:
return year - 100
# Reference formulas for JDN taken from the Calendar FAQ:
def gregorian_jdn(year,month,day):
# XXX These require proper integer division.
a = (14-month)/12
y = year+4800-a
m = month + 12*a - 3
return day + (306*m+5)/10 + y*365 + y/4 - y/100 + y/400 - 32045
def julian_jdn(year,month,day):
# XXX These require proper integer division.
a = (14-month)/12
y = year+4800-a
m = month + 12*a - 3
return day + (306*m+5)/10 + y*365 + y/4 - 32083
| UTF-8 | Python | false | false | 32,116 | py | 221 | DateTime.py | 143 | 0.571148 | 0.547546 | 0 | 1,054 | 29.470588 | 79 |
luxaflow/ChatRoomMVC | 6,451,040,929,311 | 191b400fdcbacd6d8a04bcce667cd19685f0ad44 | 8fcfb38ebe5d3251f73ace213cfd073e0e047274 | /libaries/View.py | 7ebfeec3a4524baf22699cba2da77e801ff21ad7 | []
| no_license | https://github.com/luxaflow/ChatRoomMVC | aea7498eba670ad26cc3d7f23a275c90256ad026 | 4c6885c8a61f2d42defcf77a5f2d6d7a9a2c7526 | refs/heads/master | 2022-04-07T18:13:38.061060 | 2020-03-05T12:38:16 | 2020-03-05T12:38:16 | 245,158,694 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
class View(tk.Tk):
"""
Betreft het Window waarin alle Views geladen kunnen worden
Deze klasse kan niet overerft worden omdat hiermee het Window meerderee instanties gaat maken.
Daarna is het niet meer mogelijk te weten in welk window de applicatie draait
"""
def __init__(self):
tk.Tk.__init__(self)
self.title('ChatRoom')
self.resizable(False, False)
self.geometry('500x300')
self.__frame = None
"""
Hiermee worden nieuwe view in het window geladen
Tevens worden oude Views gestopt
"""
def load_frame(self, frame, controller):
new_frame = frame(self, controller)
if self.__frame is not None:
self.__frame.pack_forget()
self.__frame.destroy()
self.__frame = new_frame
self.__frame.pack()
controller.set_current_view(self.__frame)
| UTF-8 | Python | false | false | 904 | py | 20 | View.py | 19 | 0.623894 | 0.617257 | 0 | 29 | 30.172414 | 98 |
davidcostaw/medicar | 4,707,284,169,358 | 1965455df7f97f6774968ffd32c200a6f97578fd | 9958b5b3ca39360c2b57ccb58d5478fa2ba7fa78 | /backend/agendas/filters.py | 06513a1794855c005d5881db032bda79a1d017d2 | []
| no_license | https://github.com/davidcostaw/medicar | 54067a03f3e1fd216f64d92ed6ec07230b73f269 | e1b1ef2f114c268cf5d987b41af89a0807cc4ebf | refs/heads/master | 2023-04-24T01:37:42.388026 | 2021-05-14T13:46:35 | 2021-05-14T13:46:35 | 367,164,283 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import django_filters
from .models import Agenda
from backend.medicos.models import Medico
from backend.especialidades.models import Especialidade
class AgendaFilter(django_filters.FilterSet):
medico = django_filters.filters.ModelMultipleChoiceFilter(
queryset=Medico.objects.all()
)
medico__especialidade = django_filters.filters.ModelMultipleChoiceFilter(
queryset=Especialidade.objects.all()
)
data_inicio = django_filters.DateFilter(field_name='dia', lookup_expr='gte')
data_final = django_filters.DateFilter(field_name='dia', lookup_expr='lte')
class Meta:
model = Agenda
fields = [
'medico', 'medico__especialidade', 'data_inicio', 'data_final'
]
| UTF-8 | Python | false | false | 739 | py | 35 | filters.py | 34 | 0.705007 | 0.705007 | 0 | 24 | 29.791667 | 80 |
ebertx/thinking-python | 18,519,899,013,779 | 9abd2de2e03e56e299d93c88012dcf42102d8cc6 | 766896c67dc8099523a1895b884c020645a0c081 | /9-5.py | 539371d75d52b40a1ddb2235662c066fc33db11f | []
| no_license | https://github.com/ebertx/thinking-python | cf9daa02bc5106f872ec8c4f083d6a2d32fcb778 | bb08f58806e26a14d3420454f7b7271a8de85c28 | refs/heads/master | 2016-09-06T05:30:23.659158 | 2015-02-27T07:51:10 | 2015-02-27T07:51:10 | 29,249,177 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def uses_all(word, letters):
for l in letters.lower():
is_letter_in_word = False
for w in word.lower():
if l == w:
is_letter_in_word = True
if not is_letter_in_word:
return False
return True
letters = raw_input('input allowed letters >> ')
fin = open('words.txt')
allowed_words = 0;
for line in fin:
if uses_all(line.strip(), letters):
allowed_words += 1
print line.strip()
print allowed_words | UTF-8 | Python | false | false | 489 | py | 26 | 9-5.py | 25 | 0.568507 | 0.564417 | 0 | 21 | 22.333333 | 48 |
athletejuan/TIL | 18,373,870,094,664 | 20c8ef7a08bb07ccf822e571204bddec31d4345a | 6fa0d5d3b61fbce01fad5a7dd50258c09298ee00 | /Algorithm/BOJ/10833.py | 375e183cd7c63333c2a0688abe2a5c105057904f | []
| no_license | https://github.com/athletejuan/TIL | c8e6bd9f7e2c6f999dbac759adcdb6b2959de384 | 16b854928af2f27d91ba140ebc1aec0007e5eb04 | refs/heads/master | 2023-02-19T13:59:06.495110 | 2022-03-23T15:08:04 | 2022-03-23T15:08:04 | 188,750,527 | 1 | 0 | null | false | 2023-02-15T22:54:50 | 2019-05-27T01:27:09 | 2021-11-09T23:46:36 | 2023-02-15T22:54:48 | 11,469 | 0 | 0 | 4 | Python | false | false | N = int(input())
remain = 0
for _ in range(N):
S,A = map(int, input().split())
remain += A%S
print(remain) | UTF-8 | Python | false | false | 115 | py | 1,322 | 10833.py | 876 | 0.556522 | 0.547826 | 0 | 7 | 15.571429 | 35 |
nqxing/PythonProject | 4,724,464,046,650 | aa1946d6969008136b9f8d5e49cd341900a1b9e3 | e94c53d4d776eafa461e459af6559f8b0461cd5e | /selenium/baidu-jietu.py | fb5758e886906de327f48a4928d748f71c3e4a74 | []
| no_license | https://github.com/nqxing/PythonProject | f66859862fc85366f21a21f76a54227309c77bbf | d1ca59872e290d34ea9586e04cbce43c3e78f438 | refs/heads/master | 2022-05-23T05:37:19.514264 | 2022-05-20T07:25:03 | 2022-05-20T07:25:03 | 306,828,334 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get('http://www.baidu.com')
time.sleep(3)
driver.get_screenshot_as_file('b1.png')
driver.quit()
| UTF-8 | Python | false | false | 174 | py | 427 | baidu-jietu.py | 381 | 0.758621 | 0.747126 | 0 | 7 | 23.857143 | 39 |
Anais5/pendu | 6,305,012,022,462 | 3fbd228f04d366c3e716e2d3749cd40b80ce5a0c | da4398ae1da8086a97ef25e74123daaa0c49d4a8 | /projet_pendu_romain&anais.py | dccc5715ade9dc87565c297370b5e144df29b00a | []
| no_license | https://github.com/Anais5/pendu | 802069cbe4a574c151174f4f6953ceb34f55e596 | 416de024a6d1eac19f61b407c497f81fe4f6f964 | refs/heads/main | 2023-01-22T06:39:28.002880 | 2020-11-25T16:17:17 | 2020-11-25T16:17:17 | 315,989,527 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
ERREURS_POSSIBLES = 7 #ne pas changer ca créerai des erreurs
SYMOBOLE_MYSTERE = "?"
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
LONGUEUR_MINIMALE = 4
def ascii(erreurs : int):
"""Cette fonction retourne un bonhomme pendu suivant le
nombre d'erreurs."""
bonhomme = ("|", "O", "|", "/", "\ ", "/", "\ ")
dessin = " _______ \n |/ 0 \n | 1 \n | 324 \n | 5 6 \n | \n/|\ "
for chiffre in range(erreurs):
for i in range(len(dessin)):
if dessin[i] == str(chiffre):
dessin = dessin.replace(dessin[i], bonhomme[chiffre])
for j in range(len(dessin)):
if dessin[j] in ("0", "1", "2", "3", "4", "5", "6"):
dessin = dessin.replace(dessin[j], " ")
return dessin
def charge_dictionnaire(dico : str):
"""Cette fonction charge les mots ligne par ligne d'un document
texte puis retourne une liste de ces mots."""
liste_mots = []
with open(dico, "r") as f:
for mot in f.readlines():
mot = mot.strip().upper()
if len(mot) >= LONGUEUR_MINIMALE: #on vérifie que la longueur du mot soit supérieure ou égale à la longueur minimale
liste_mots.append(mot)
return liste_mots
DICO_MOTS = charge_dictionnaire("dico_fr.txt")
def genere_motif(secret : str, propositions : list):
"""Cette fonction génère un motif à partir du mot secret donné et des
propositions de lettres qui ont été faites par l'utilisateur.
Si la lettre est dans le mot secret et dans les propositions
alors elle est affichée, si ce n'est pas le cas elle est remplacée
par un symbole mystère
(qui est défini par la variable globale : SYMBOLE_MYSTERE)."""
for i in range(len(propositions)):
propositions[i] = propositions[i].upper()
motif = ""
for lettre in secret:
if lettre not in propositions: #si la proposition n'est pas dans secret, elle sera remplacée par le symbole mystère
motif += SYMOBOLE_MYSTERE
elif lettre in propositions:
motif += lettre #si la lettre est bien dans le mot elle est ajoutée dans le motif
return motif
def filtrer(motif : str, mot : str, deja_donnees : list):
"""Cette fonction cherche si oui ou non le motif pourrait être
compatible avec un mot en fonction des propositions de lettres
déjà faites. Elle renvoie un booléen et les lettres qui seraient
possible de proposer."""
reste_lettre = set()
if len(motif) != len(mot):
return (False, reste_lettre)
rep = True
longueur_motif = len(motif) - 1
for i in range(0, longueur_motif):
if motif[i] == "?":
if mot[i] in deja_donnees:
rep = False
elif motif[i] != mot[i]:
rep = False
if rep == True:
for j in range(0, longueur_motif):
if motif[j] == SYMOBOLE_MYSTERE:
reste_lettre.add(mot[j])
return (True, reste_lettre)
else:
return (False, reste_lettre)
def trouver_mots_restants(motif : str, deja_donnees : list, liste_mots_restants : list):
"""D'après une liste de mots et une liste de lettres déjà données,
cette fonction retourne un tuple qui contient le motif, les mots
qui pourraient être la bonne réponse en fonction du motif et un
dictionnaire des lettres qui pourraient être proposées."""
dico_lettres = {l: 0 for l in ALPHABET if l not in deja_donnees}
mots_possibles = []
for mot in liste_mots_restants:
ok, lettres_restantes = filtrer(motif, mot, deja_donnees)
if ok == True:
mots_possibles.append(mot)
for l in dico_lettres:
if l not in deja_donnees:
if l in mots_possibles:
dico_lettres[l] += 1
return motif, mots_possibles, dico_lettres
def partie_avancee():
"""Cette fonction permet de jouer au pendu, elle a besoin des
autres fonctions pour pouvoir s'éxecuter correctement et elle
ne renvoie rien."""
limite = ERREURS_POSSIBLES
propositions_vraies = []
propositions_fausses = []
prop = []
mot = random.choice(DICO_MOTS)
rep = genere_motif(mot, propositions_vraies)
dessin = ascii(7 - limite)
print(f"{dessin}\nMot : {rep}; lettre(s) déjà essayée(s) : {propositions_fausses}; essai(s) restant(s) : "
f"{limite} \nMot(s) possible(s) : {trouver_mots_restants(rep, prop, DICO_MOTS)[1]}")
while mot != rep: #s'éxecutera en boucle tant que le mot n'est pas deviné
if limite == 0: #si la limite est atteinte
print(f"Tu as perdu, le mot a trouver était : {mot} !") #on affiche le mot qu'il fallait trouver
return #on arrête la boucle
lettre = input("Choisir une lettre: ") #on demande au joueur de proposer une lettre
lettre = lettre.upper()
if len(lettre) > 1 or type(lettre) != str or lettre not in ALPHABET:
print("Vous devez utiliser une lettre sans accent !") #si le joueur ne propose pas une lettre on lui revoie une erreur
if lettre in prop:
print(f"Vous avez déjà essayé la lettre {lettre} !") #si la lettre a déjà été proposée on l'indique au joueur
elif lettre in ALPHABET:
prop.append(lettre)
if lettre in mot:
propositions_vraies.append(lettre)
rep = genere_motif(mot, propositions_vraies) #génère un motif du mot a chercher avec les propositions vraies
else:
limite -= 1
propositions_fausses.append(lettre)
rep = genere_motif(mot, propositions_vraies)
print(f"Cette lettre n'est pas dans le mot ! Il vous reste {limite} essais.")
dessin = ascii(7 - limite)
print(f"{dessin}\nMot : {rep}; lettre(s) déjà essayée(s) : {propositions_fausses}; essai(s) restant(s) : "
f"{limite} \nMot(s) possible(s) : {trouver_mots_restants(rep, prop, DICO_MOTS)[1]}")
print("Tu as gagné ! Bravo !")
partie_avancee()
| UTF-8 | Python | false | false | 6,204 | py | 2 | projet_pendu_romain&anais.py | 1 | 0.605819 | 0.601268 | 0 | 131 | 44.961832 | 131 |
daniel-reich/ubiquitous-fiesta | 15,187,004,398,497 | 7b70ed4d752c7258b294814dad4deee7c3383727 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ruPm4LX6fEvvLJk9j_12.py | da9227043c7134d67a21c4391b7daf7748661358 | []
| no_license | https://github.com/daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def to_base(num, base):
digits = []
while num > 0:
digits.append(num % base)
num //= base
return digits
def esthetic(num):
res = []
for b in range(2, 11):
digits = to_base(num, b)
if all(abs(i - j) == 1 for i, j in zip(digits, digits[1:])):
res.append(b)
return res or "Anti-Esthetic"
| UTF-8 | Python | false | false | 325 | py | 38,088 | ruPm4LX6fEvvLJk9j_12.py | 38,088 | 0.566563 | 0.547988 | 0 | 14 | 21.928571 | 64 |
edesai/openstack_automation | 12,567,074,310,586 | 7f57b5a39a29dd45dbe83dd33b1ed437d3bc7309 | 4c27a9f71b41cecb914e705a5b1722348625bb0c | /common/MySqlDbTables.py | 1baa6327e7959030adcd2a22187c69b6cfd9546d | []
| no_license | https://github.com/edesai/openstack_automation | d471812f28f7f181c57d884bcd204a0e829c6383 | cc57b0c3a3a25d87b96c6c18e2219a6f507bc24c | refs/heads/master | 2021-01-17T16:39:54.372110 | 2017-02-09T05:55:54 | 2017-02-09T05:55:54 | 69,001,703 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Dec 12, 2016
@author: edesai
'''
class MySqlDbTables(object):
INSTANCES_INSTANCE_NAME = 1
INSTANCES_INSTANCE_IP = 6
INSTANCES_HOST_NAME = 10
INSTANCES_VDP_VLAN = 11
AGENTS_HOST_NAME = 0
AGENTS_AGENT_INFO = 3
| UTF-8 | Python | false | false | 250 | py | 29 | MySqlDbTables.py | 28 | 0.656 | 0.6 | 0 | 14 | 16.857143 | 31 |
joeypy/Django-E-commerce | 14,559,939,137,110 | 33db5c1d218572ec3882a362e8102762b60a7c74 | fd8e014160c060f4556b16570c0e44000ba5d2b2 | /store/admin.py | 8009514498b1ffdd79889604addda4b1f015e836 | []
| no_license | https://github.com/joeypy/Django-E-commerce | 66e1497bf635a1c0d099d1d3db2c804d898d38c4 | bcb6319c48628712174c63bc1b273789fbd2f90f | refs/heads/main | 2023-08-11T10:46:02.990070 | 2021-10-03T17:44:01 | 2021-10-03T17:44:01 | 403,164,471 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from django.utils.html import format_html
from import_export.admin import ImportExportActionModelAdmin
from store.models import Product, Variation
@admin.register(Product)
class ProductAdmin(ImportExportActionModelAdmin):
list_display = (
"id",
"product_name",
"price",
"stock",
"category",
"modified_at",
"is_available",
"image_tag",
)
list_display_links = (
"id",
"product_name",
)
ordering = ("id",)
readonly_fields = ("slug",)
def image_tag(self, obj):
if obj.images:
return format_html(
f'<img src="{obj.images.url}" style="width: 50px; height:50px;" />'
)
else:
return "No Image"
image_tag.allow_tags = True
image_tag.short_description = "Product Img"
@admin.register(Variation)
class VariationAdmin(ImportExportActionModelAdmin):
list_display = (
"id",
"product",
"variation_category",
"variation_value",
"is_active",
"created_at",
)
list_editable = ("is_active",)
list_filter = ("product", "variation_category", "variation_value",)
| UTF-8 | Python | false | false | 1,229 | py | 6 | admin.py | 6 | 0.580146 | 0.576892 | 0 | 50 | 23.56 | 83 |
PiErr0r/aoc | 18,150,531,810,903 | 74283b59f1d63a7c2eb47b550842ab1f3fe54fe3 | e995b7b23c45207b29cb64680bb08be5e95a34d2 | /2017/15.py | d9e989d67f12d496ca2b1ec57baa0346d32c0d1f | []
| no_license | https://github.com/PiErr0r/aoc | 3592546a931b85785ec7249f693ded6ea9e04263 | aafc2333f3c79c6a969e074d4f3942e76c17e5f6 | refs/heads/master | 2023-01-09T15:15:33.060563 | 2022-12-25T10:26:36 | 2022-12-25T10:26:36 | 250,568,271 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math, copy, re
GEN_A = 634
GEN_B = 301
FACTOR_A = 16807
FACTOR_B = 48271
MUL_A = 4
MUL_B = 8
DIV = 2147483647
PAIR_NUM_1 = 40000000
PAIR_NUM_2 = 5000000
def part_1():
curr_a = GEN_A
curr_b = GEN_B
L = 16
cnt = 0
for i in range(PAIR_NUM_1):
curr_a = (curr_a * FACTOR_A) % DIV
curr_b = (curr_b * FACTOR_B) % DIV
bin_a = bin(curr_a)[2:][-16:]
bin_b = bin(curr_b)[2:][-16:]
if len(bin_a) < L:
bin_a = (L - len(bin_a)) * '0' + bin_a
if len(bin_b) < L:
bin_b = (L - len(bin_b)) * '0' + bin_b
if bin_a == bin_b: cnt += 1
print(cnt)
print('END OF PART1')
return
def part_2():
curr_a = GEN_A
curr_b = GEN_B
L = 16
cnt = 0
for i in range(PAIR_NUM_2):
while curr_a % MUL_A != 0:
curr_a = (curr_a * FACTOR_A) % DIV
while curr_b % MUL_B != 0:
curr_b = (curr_b * FACTOR_B) % DIV
bin_a = bin(curr_a)[2:][-16:]
bin_b = bin(curr_b)[2:][-16:]
if len(bin_a) < L:
bin_a = (L - len(bin_a)) * '0' + bin_a
if len(bin_b) < L:
bin_b = (L - len(bin_b)) * '0' + bin_b
if bin_a == bin_b: cnt += 1
if i % 100000 == 0:
print(i, cnt)
curr_a = (curr_a * FACTOR_A) % DIV
curr_b = (curr_b * FACTOR_B) % DIV
print(cnt)
print('END OF PART2')
return
if __name__ == '__main__':
# with open('15_input') as f:
# data = f.read()
# data = data.split('\n')
# data = list(map(int, data.split()))
# part_1()
part_2()
| UTF-8 | Python | false | false | 1,376 | py | 218 | 15.py | 212 | 0.522529 | 0.458576 | 0 | 73 | 17.821918 | 41 |
holzschu/Carnets | 10,539,849,784,144 | 142e797aeff8ae3a02f087ba2e13328a987b4b1a | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/sympy/integrals/rubi/rules/piecewise_linear.py | d3ac12cc92b07c734659a5615ccc18307e654cf1 | [
"BSD-3-Clause"
]
| permissive | https://github.com/holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | false | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | 2022-11-29T02:39:24 | 2022-02-16T10:31:09 | 69,752 | 461 | 32 | 191 | Python | false | false | """
This code is automatically generated. Never edit it manually.
For details of generating the code see `rubi_parsing_guide.md` in `parsetools`.
"""
from sympy.external import import_module
matchpy = import_module("matchpy")
if matchpy:
from matchpy import Pattern, ReplacementRule, CustomConstraint, is_match
from sympy.integrals.rubi.utility_function import (
Int, Sum, Set, With, Module, Scan, MapAnd, FalseQ,
ZeroQ, NegativeQ, NonzeroQ, FreeQ, NFreeQ, List, Log, PositiveQ,
PositiveIntegerQ, NegativeIntegerQ, IntegerQ, IntegersQ,
ComplexNumberQ, PureComplexNumberQ, RealNumericQ, PositiveOrZeroQ,
NegativeOrZeroQ, FractionOrNegativeQ, NegQ, Equal, Unequal, IntPart,
FracPart, RationalQ, ProductQ, SumQ, NonsumQ, Subst, First, Rest,
SqrtNumberQ, SqrtNumberSumQ, LinearQ, Sqrt, ArcCosh, Coefficient,
Denominator, Hypergeometric2F1, Not, Simplify, FractionalPart,
IntegerPart, AppellF1, EllipticPi, EllipticE, EllipticF, ArcTan,
ArcCot, ArcCoth, ArcTanh, ArcSin, ArcSinh, ArcCos, ArcCsc, ArcSec,
ArcCsch, ArcSech, Sinh, Tanh, Cosh, Sech, Csch, Coth, LessEqual, Less,
Greater, GreaterEqual, FractionQ, IntLinearcQ, Expand, IndependentQ,
PowerQ, IntegerPowerQ, PositiveIntegerPowerQ, FractionalPowerQ, AtomQ,
ExpQ, LogQ, Head, MemberQ, TrigQ, SinQ, CosQ, TanQ, CotQ, SecQ, CscQ,
Sin, Cos, Tan, Cot, Sec, Csc, HyperbolicQ, SinhQ, CoshQ, TanhQ, CothQ,
SechQ, CschQ, InverseTrigQ, SinCosQ, SinhCoshQ, LeafCount, Numerator,
NumberQ, NumericQ, Length, ListQ, Im, Re, InverseHyperbolicQ,
InverseFunctionQ, TrigHyperbolicFreeQ, InverseFunctionFreeQ, RealQ,
EqQ, FractionalPowerFreeQ, ComplexFreeQ, PolynomialQ, FactorSquareFree,
PowerOfLinearQ, Exponent, QuadraticQ, LinearPairQ, BinomialParts,
TrinomialParts, PolyQ, EvenQ, OddQ, PerfectSquareQ, NiceSqrtAuxQ,
NiceSqrtQ, Together, PosAux, PosQ, CoefficientList, ReplaceAll,
ExpandLinearProduct, GCD, ContentFactor, NumericFactor,
NonnumericFactors, MakeAssocList, GensymSubst, KernelSubst,
ExpandExpression, Apart, SmartApart, MatchQ,
PolynomialQuotientRemainder, FreeFactors, NonfreeFactors,
RemoveContentAux, RemoveContent, FreeTerms, NonfreeTerms,
ExpandAlgebraicFunction, CollectReciprocals, ExpandCleanup,
AlgebraicFunctionQ, Coeff, LeadTerm, RemainingTerms, LeadFactor,
RemainingFactors, LeadBase, LeadDegree, Numer, Denom, hypergeom, Expon,
MergeMonomials, PolynomialDivide, BinomialQ, TrinomialQ,
GeneralizedBinomialQ, GeneralizedTrinomialQ, FactorSquareFreeList,
PerfectPowerTest, SquareFreeFactorTest, RationalFunctionQ,
RationalFunctionFactors, NonrationalFunctionFactors, Reverse,
RationalFunctionExponents, RationalFunctionExpand, ExpandIntegrand,
SimplerQ, SimplerSqrtQ, SumSimplerQ, BinomialDegree, TrinomialDegree,
CancelCommonFactors, SimplerIntegrandQ, GeneralizedBinomialDegree,
GeneralizedBinomialParts, GeneralizedTrinomialDegree,
GeneralizedTrinomialParts, MonomialQ, MonomialSumQ,
MinimumMonomialExponent, MonomialExponent, LinearMatchQ,
PowerOfLinearMatchQ, QuadraticMatchQ, CubicMatchQ, BinomialMatchQ,
TrinomialMatchQ, GeneralizedBinomialMatchQ, GeneralizedTrinomialMatchQ,
QuotientOfLinearsMatchQ, PolynomialTermQ, PolynomialTerms,
NonpolynomialTerms, PseudoBinomialParts, NormalizePseudoBinomial,
PseudoBinomialPairQ, PseudoBinomialQ, PolynomialGCD, PolyGCD,
AlgebraicFunctionFactors, NonalgebraicFunctionFactors,
QuotientOfLinearsP, QuotientOfLinearsParts, QuotientOfLinearsQ,
Flatten, Sort, AbsurdNumberQ, AbsurdNumberFactors,
NonabsurdNumberFactors, SumSimplerAuxQ, Prepend, Drop,
CombineExponents, FactorInteger, FactorAbsurdNumber,
SubstForInverseFunction, SubstForFractionalPower,
SubstForFractionalPowerOfQuotientOfLinears,
FractionalPowerOfQuotientOfLinears, SubstForFractionalPowerQ,
SubstForFractionalPowerAuxQ, FractionalPowerOfSquareQ,
FractionalPowerSubexpressionQ, Apply, FactorNumericGcd,
MergeableFactorQ, MergeFactor, MergeFactors, TrigSimplifyQ,
TrigSimplify, TrigSimplifyRecur, Order, FactorOrder, Smallest,
OrderedQ, MinimumDegree, PositiveFactors, Sign, NonpositiveFactors,
PolynomialInAuxQ, PolynomialInQ, ExponentInAux, ExponentIn,
PolynomialInSubstAux, PolynomialInSubst, Distrib, DistributeDegree,
FunctionOfPower, DivideDegreesOfFactors, MonomialFactor, FullSimplify,
FunctionOfLinearSubst, FunctionOfLinear, NormalizeIntegrand,
NormalizeIntegrandAux, NormalizeIntegrandFactor,
NormalizeIntegrandFactorBase, NormalizeTogether,
NormalizeLeadTermSigns, AbsorbMinusSign, NormalizeSumFactors,
SignOfFactor, NormalizePowerOfLinear, SimplifyIntegrand, SimplifyTerm,
TogetherSimplify, SmartSimplify, SubstForExpn, ExpandToSum, UnifySum,
UnifyTerms, UnifyTerm, CalculusQ, FunctionOfInverseLinear,
PureFunctionOfSinhQ, PureFunctionOfTanhQ, PureFunctionOfCoshQ,
IntegerQuotientQ, OddQuotientQ, EvenQuotientQ, FindTrigFactor,
FunctionOfSinhQ, FunctionOfCoshQ, OddHyperbolicPowerQ, FunctionOfTanhQ,
FunctionOfTanhWeight, FunctionOfHyperbolicQ, SmartNumerator,
SmartDenominator, SubstForAux, ActivateTrig, ExpandTrig, TrigExpand,
SubstForTrig, SubstForHyperbolic, InertTrigFreeQ, LCM,
SubstForFractionalPowerOfLinear, FractionalPowerOfLinear,
InverseFunctionOfLinear, InertTrigQ, InertReciprocalQ, DeactivateTrig,
FixInertTrigFunction, DeactivateTrigAux, PowerOfInertTrigSumQ,
PiecewiseLinearQ, KnownTrigIntegrandQ, KnownSineIntegrandQ,
KnownTangentIntegrandQ, KnownCotangentIntegrandQ,
KnownSecantIntegrandQ, TryPureTanSubst, TryTanhSubst, TryPureTanhSubst,
AbsurdNumberGCD, AbsurdNumberGCDList, ExpandTrigExpand,
ExpandTrigReduce, ExpandTrigReduceAux, NormalizeTrig, TrigToExp,
ExpandTrigToExp, TrigReduce, FunctionOfTrig, AlgebraicTrigFunctionQ,
FunctionOfHyperbolic, FunctionOfQ, FunctionOfExpnQ, PureFunctionOfSinQ,
PureFunctionOfCosQ, PureFunctionOfTanQ, PureFunctionOfCotQ,
FunctionOfCosQ, FunctionOfSinQ, OddTrigPowerQ, FunctionOfTanQ,
FunctionOfTanWeight, FunctionOfTrigQ, FunctionOfDensePolynomialsQ,
FunctionOfLog, PowerVariableExpn, PowerVariableDegree,
PowerVariableSubst, EulerIntegrandQ, FunctionOfSquareRootOfQuadratic,
SquareRootOfQuadraticSubst, Divides, EasyDQ, ProductOfLinearPowersQ,
Rt, NthRoot, AtomBaseQ, SumBaseQ, NegSumBaseQ, AllNegTermQ,
SomeNegTermQ, TrigSquareQ, RtAux, TrigSquare, IntSum, IntTerm, Map2,
ConstantFactor, SameQ, ReplacePart, CommonFactors,
MostMainFactorPosition, FunctionOfExponentialQ, FunctionOfExponential,
FunctionOfExponentialFunction, FunctionOfExponentialFunctionAux,
FunctionOfExponentialTest, FunctionOfExponentialTestAux, stdev,
rubi_test, If, IntQuadraticQ, IntBinomialQ, RectifyTangent,
RectifyCotangent, Inequality, Condition, Simp, SimpHelp, SplitProduct,
SplitSum, SubstFor, SubstForAux, FresnelS, FresnelC, Erfc, Erfi, Gamma,
FunctionOfTrigOfLinearQ, ElementaryFunctionQ, Complex, UnsameQ,
_SimpFixFactor, SimpFixFactor, _FixSimplify, FixSimplify,
_SimplifyAntiderivativeSum, SimplifyAntiderivativeSum,
_SimplifyAntiderivative, SimplifyAntiderivative, _TrigSimplifyAux,
TrigSimplifyAux, Cancel, Part, PolyLog, D, Dist, Sum_doit, PolynomialQuotient, Floor,
PolynomialRemainder, Factor, PolyLog, CosIntegral, SinIntegral, LogIntegral, SinhIntegral,
CoshIntegral, Rule, Erf, PolyGamma, ExpIntegralEi, ExpIntegralE, LogGamma , UtilityOperator, Factorial,
Zeta, ProductLog, DerivativeDivides, HypergeometricPFQ, IntHide, OneQ, Null, rubi_exp as exp, rubi_log as log, Discriminant,
Negative, Quotient
)
from sympy import (Integral, S, sqrt, And, Or, Integer, Float, Mod, I, Abs, simplify, Mul,
Add, Pow, sign, EulerGamma)
from sympy.integrals.rubi.symbol import WC
from sympy.core.symbol import symbols, Symbol
from sympy.functions import (sin, cos, tan, cot, csc, sec, sqrt, erf)
from sympy.functions.elementary.hyperbolic import (acosh, asinh, atanh, acoth, acsch, asech, cosh, sinh, tanh, coth, sech, csch)
from sympy.functions.elementary.trigonometric import (atan, acsc, asin, acot, acos, asec, atan2)
from sympy import pi as Pi
A_, B_, C_, F_, G_, H_, a_, b_, c_, d_, e_, f_, g_, h_, i_, j_, k_, l_, m_, n_, p_, q_, r_, t_, u_, v_, s_, w_, x_, y_, z_ = [WC(i) for i in 'ABCFGHabcdefghijklmnpqrtuvswxyz']
a1_, a2_, b1_, b2_, c1_, c2_, d1_, d2_, n1_, n2_, e1_, e2_, f1_, f2_, g1_, g2_, n1_, n2_, n3_, Pq_, Pm_, Px_, Qm_, Qr_, Qx_, jn_, mn_, non2_, RFx_, RGx_ = [WC(i) for i in ['a1', 'a2', 'b1', 'b2', 'c1', 'c2', 'd1', 'd2', 'n1', 'n2', 'e1', 'e2', 'f1', 'f2', 'g1', 'g2', 'n1', 'n2', 'n3', 'Pq', 'Pm', 'Px', 'Qm', 'Qr', 'Qx', 'jn', 'mn', 'non2', 'RFx', 'RGx']]
i, ii, Pqq, Q, R, r, C, k, u = symbols('i ii Pqq Q R r C k u')
_UseGamma = False
ShowSteps = False
StepCounter = None
def piecewise_linear():
from sympy.integrals.rubi.constraints import cons1092, cons19, cons1093, cons89, cons90, cons1094, cons91, cons25, cons74, cons68, cons4, cons1095, cons216, cons685, cons102, cons103, cons1096, cons1097, cons33, cons96, cons358, cons1098, cons21, cons1099, cons2, cons3
pattern1885 = Pattern(Integral(u_**WC('m', S(1)), x_), cons19, cons1092)
rule1885 = ReplacementRule(pattern1885, With1885)
pattern1886 = Pattern(Integral(v_/u_, x_), cons1093, CustomConstraint(With1886))
rule1886 = ReplacementRule(pattern1886, replacement1886)
pattern1887 = Pattern(Integral(v_**n_/u_, x_), cons1093, cons89, cons90, cons1094, CustomConstraint(With1887))
rule1887 = ReplacementRule(pattern1887, replacement1887)
pattern1888 = Pattern(Integral(S(1)/(u_*v_), x_), cons1093, CustomConstraint(With1888))
rule1888 = ReplacementRule(pattern1888, replacement1888)
pattern1889 = Pattern(Integral(S(1)/(u_*sqrt(v_)), x_), cons1093, CustomConstraint(With1889))
rule1889 = ReplacementRule(pattern1889, replacement1889)
pattern1890 = Pattern(Integral(S(1)/(u_*sqrt(v_)), x_), cons1093, CustomConstraint(With1890))
rule1890 = ReplacementRule(pattern1890, replacement1890)
pattern1891 = Pattern(Integral(v_**n_/u_, x_), cons1093, cons89, cons91, CustomConstraint(With1891))
rule1891 = ReplacementRule(pattern1891, replacement1891)
pattern1892 = Pattern(Integral(v_**n_/u_, x_), cons1093, cons25, CustomConstraint(With1892))
rule1892 = ReplacementRule(pattern1892, replacement1892)
pattern1893 = Pattern(Integral(S(1)/(sqrt(u_)*sqrt(v_)), x_), cons1093, CustomConstraint(With1893))
rule1893 = ReplacementRule(pattern1893, replacement1893)
pattern1894 = Pattern(Integral(S(1)/(sqrt(u_)*sqrt(v_)), x_), cons1093, CustomConstraint(With1894))
rule1894 = ReplacementRule(pattern1894, replacement1894)
pattern1895 = Pattern(Integral(u_**m_*v_**n_, x_), cons19, cons4, cons1093, cons74, cons68, CustomConstraint(With1895))
rule1895 = ReplacementRule(pattern1895, replacement1895)
pattern1896 = Pattern(Integral(u_**m_*v_**WC('n', S(1)), x_), cons19, cons4, cons1093, cons68, cons1095, CustomConstraint(With1896))
rule1896 = ReplacementRule(pattern1896, replacement1896)
pattern1897 = Pattern(Integral(u_**m_*v_**WC('n', S(1)), x_), cons1093, cons216, cons89, cons90, cons685, cons102, cons103, CustomConstraint(With1897))
rule1897 = ReplacementRule(pattern1897, replacement1897)
pattern1898 = Pattern(Integral(u_**m_*v_**n_, x_), cons1093, cons685, cons1096, cons1097, CustomConstraint(With1898))
rule1898 = ReplacementRule(pattern1898, replacement1898)
pattern1899 = Pattern(Integral(u_**m_*v_**n_, x_), cons1093, cons216, cons33, cons96, CustomConstraint(With1899))
rule1899 = ReplacementRule(pattern1899, replacement1899)
pattern1900 = Pattern(Integral(u_**m_*v_**n_, x_), cons1093, cons358, cons1098, CustomConstraint(With1900))
rule1900 = ReplacementRule(pattern1900, replacement1900)
pattern1901 = Pattern(Integral(u_**m_*v_**n_, x_), cons1093, cons21, cons25, CustomConstraint(With1901))
rule1901 = ReplacementRule(pattern1901, replacement1901)
pattern1902 = Pattern(Integral(u_**WC('n', S(1))*log(x_*WC('b', S(1)) + WC('a', S(0))), x_), cons2, cons3, cons1092, cons1099, cons89, cons90)
rule1902 = ReplacementRule(pattern1902, With1902)
pattern1903 = Pattern(Integral(u_**WC('n', S(1))*(x_*WC('b', S(1)) + WC('a', S(0)))**WC('m', S(1))*log(x_*WC('b', S(1)) + WC('a', S(0))), x_), cons2, cons3, cons19, cons1092, cons1099, cons89, cons90, cons68)
rule1903 = ReplacementRule(pattern1903, With1903)
return [rule1885, rule1886, rule1887, rule1888, rule1889, rule1890, rule1891, rule1892, rule1893, rule1894, rule1895, rule1896, rule1897, rule1898, rule1899, rule1900, rule1901, rule1902, rule1903, ]
def With1885(m, u, x):
c = D(u, x)
return Dist(S(1)/c, Subst(Int(x**m, x), x, u), x)
def With1886(u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1886(u, v, x):
a = D(u, x)
b = D(v, x)
return -Dist((-a*v + b*u)/a, Int(S(1)/u, x), x) + Simp(b*x/a, x)
def With1887(n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1887(n, u, v, x):
a = D(u, x)
b = D(v, x)
return -Dist((-a*v + b*u)/a, Int(v**(n + S(-1))/u, x), x) + Simp(v**n/(a*n), x)
def With1888(u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1888(u, v, x):
a = D(u, x)
b = D(v, x)
return -Dist(a/(-a*v + b*u), Int(S(1)/u, x), x) + Dist(b/(-a*v + b*u), Int(S(1)/v, x), x)
def With1889(u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if And(NonzeroQ(-a*v + b*u), PosQ((-a*v + b*u)/a)):
return True
return False
def replacement1889(u, v, x):
a = D(u, x)
b = D(v, x)
return Simp(S(2)*ArcTan(sqrt(v)/Rt((-a*v + b*u)/a, S(2)))/(a*Rt((-a*v + b*u)/a, S(2))), x)
def With1890(u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if And(NonzeroQ(-a*v + b*u), NegQ((-a*v + b*u)/a)):
return True
return False
def replacement1890(u, v, x):
a = D(u, x)
b = D(v, x)
return Simp(-S(2)*atanh(sqrt(v)/Rt(-(-a*v + b*u)/a, S(2)))/(a*Rt(-(-a*v + b*u)/a, S(2))), x)
def With1891(n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1891(n, u, v, x):
a = D(u, x)
b = D(v, x)
return -Dist(a/(-a*v + b*u), Int(v**(n + S(1))/u, x), x) + Simp(v**(n + S(1))/((n + S(1))*(-a*v + b*u)), x)
def With1892(n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1892(n, u, v, x):
a = D(u, x)
b = D(v, x)
return Simp(v**(n + S(1))*Hypergeometric2F1(S(1), n + S(1), n + S(2), -a*v/(-a*v + b*u))/((n + S(1))*(-a*v + b*u)), x)
def With1893(u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if And(NonzeroQ(-a*v + b*u), PosQ(a*b)):
return True
return False
def replacement1893(u, v, x):
a = D(u, x)
b = D(v, x)
return Simp(S(2)*atanh(sqrt(u)*Rt(a*b, S(2))/(a*sqrt(v)))/Rt(a*b, S(2)), x)
def With1894(u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if And(NonzeroQ(-a*v + b*u), NegQ(a*b)):
return True
return False
def replacement1894(u, v, x):
a = D(u, x)
b = D(v, x)
return Simp(S(2)*ArcTan(sqrt(u)*Rt(-a*b, S(2))/(a*sqrt(v)))/Rt(-a*b, S(2)), x)
def With1895(m, n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1895(m, n, u, v, x):
a = D(u, x)
b = D(v, x)
return -Simp(u**(m + S(1))*v**(n + S(1))/((m + S(1))*(-a*v + b*u)), x)
def With1896(m, n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1896(m, n, u, v, x):
a = D(u, x)
b = D(v, x)
return -Dist(b*n/(a*(m + S(1))), Int(u**(m + S(1))*v**(n + S(-1)), x), x) + Simp(u**(m + S(1))*v**n/(a*(m + S(1))), x)
def With1897(m, n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1897(m, n, u, v, x):
a = D(u, x)
b = D(v, x)
return -Dist(n*(-a*v + b*u)/(a*(m + n + S(1))), Int(u**m*v**(n + S(-1)), x), x) + Simp(u**(m + S(1))*v**n/(a*(m + n + S(1))), x)
def With1898(m, n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1898(m, n, u, v, x):
a = D(u, x)
b = D(v, x)
return -Dist(n*(-a*v + b*u)/(a*(m + n + S(1))), Int(u**m*v**(n + S(-1)), x), x) + Simp(u**(m + S(1))*v**n/(a*(m + n + S(1))), x)
def With1899(m, n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1899(m, n, u, v, x):
a = D(u, x)
b = D(v, x)
return Dist(b*(m + n + S(2))/((m + S(1))*(-a*v + b*u)), Int(u**(m + S(1))*v**n, x), x) - Simp(u**(m + S(1))*v**(n + S(1))/((m + S(1))*(-a*v + b*u)), x)
def With1900(m, n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1900(m, n, u, v, x):
a = D(u, x)
b = D(v, x)
return Dist(b*(m + n + S(2))/((m + S(1))*(-a*v + b*u)), Int(u**(m + S(1))*v**n, x), x) - Simp(u**(m + S(1))*v**(n + S(1))/((m + S(1))*(-a*v + b*u)), x)
def With1901(m, n, u, v, x):
if isinstance(x, (int, Integer, float, Float)):
return False
a = D(u, x)
b = D(v, x)
if NonzeroQ(-a*v + b*u):
return True
return False
def replacement1901(m, n, u, v, x):
a = D(u, x)
b = D(v, x)
return Simp(u**m*v**(n + S(1))*(b*u/(-a*v + b*u))**(-m)*Hypergeometric2F1(-m, n + S(1), n + S(2), -a*v/(-a*v + b*u))/(b*(n + S(1))), x)
def With1902(a, b, n, u, x):
c = D(u, x)
return -Dist(c*n/b, Int(u**(n + S(-1))*(a + b*x)*log(a + b*x), x), x) - Int(u**n, x) + Simp(u**n*(a + b*x)*log(a + b*x)/b, x)
def With1903(a, b, m, n, u, x):
c = D(u, x)
return -Dist(c*n/(b*(m + S(1))), Int(u**(n + S(-1))*(a + b*x)**(m + S(1))*log(a + b*x), x), x) - Dist(S(1)/(m + S(1)), Int(u**n*(a + b*x)**m, x), x) + Simp(u**n*(a + b*x)**(m + S(1))*log(a + b*x)/(b*(m + S(1))), x)
| UTF-8 | Python | false | false | 19,898 | py | 1,896 | piecewise_linear.py | 1,479 | 0.639964 | 0.591617 | 0 | 488 | 39.77459 | 360 |
ptrstn/certifier | 4,707,284,204,940 | 2660e0d7f014ea18b83735768bcebfb27e4cdfdb | 8af9a98f15c17367b9b1efd96626c66baa6348d4 | /analysis/views.py | 6db67846d38c823f582587c94f6320a206311ce2 | []
| no_license | https://github.com/ptrstn/certifier | c782310a65a1fe2a10a1d5ad42cb886abd12e66b | da20e317391b16f49f05615324bb9f72e4226395 | refs/heads/master | 2022-11-13T08:46:26.026756 | 2020-06-22T17:24:37 | 2020-06-22T17:24:37 | 172,786,402 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
from users.models import User
from django.shortcuts import render
from oms.utils import retrieve_run
from oms.models import OmsRun
from .analyse import run_principal_component_analysis, run_tsne, run_umap, load_data
from .models import ChartDataModel
import json
from django.utils.safestring import mark_safe
from pandas.compat import StringIO
import pandas as pd
from .jobs.chart_data_load_job import update_data
class ChartData(APIView):
authentication_classes = []
permission_classes = []
def get(self, request, format=None):
"""
Return a list of all users.
"""
data = {
"hello": "world",
}
return Response(data)
def generate_chart_data(data, run_number, reco, chart):
chart_data = []
good_runs = {}
bad_runs = {}
this_run = {}
this_data = {}
good_data = data[data["bad_reason"]=="GOOD"]
bad_data = data[data["bad_reason"]!="GOOD"]
good_runs["x"] = list(good_data[chart+"1"])
good_runs["y"] = list(good_data[chart+"2"])
good_runs["text"] = list(good_data["run_number"])
good_runs["mode"] = "markers"
good_runs["type"] = "scatter"
good_runs["name"] = "Good"
good_runs["marker"] = {"size": 8}
bad_runs["x"] = list(bad_data[chart+"1"])
bad_runs["y"] = list(bad_data[chart+"2"])
bad_runs["text"] = list(bad_data["run_number"])
bad_runs["mode"] = "markers"
bad_runs["type"] = "scatter"
bad_runs["name"] = "Bad"
bad_runs["marker"] = {"size": 8}
chart_data.append(good_runs)
chart_data.append(bad_runs)
try:
this_data = data[data["run_number"]==run_number][data["reco"]==reco]
this_run["x"] = list(this_data[chart+"1"])
this_run["y"] = list(this_data[chart+"2"])
this_run["text"] = list(this_data["run_number"])
this_run["mode"] = "markers"
this_run["type"] = "scatter"
this_run["name"] = "Current Run ("+str(run_number)+")"
this_run["marker"] = {"size": 12}
chart_data.append(this_run)
except KeyError:
pass
return chart_data
def analyse(request, run_number, reco):
try:
run = OmsRun.objects.get(run_number=run_number)
except OmsRun.DoesNotExist:
run = retrieve_run(run_number)
try:
chart_data_instance = ChartDataModel.objects.get()
except ChartDataModel.DoesNotExist:
update_data()
chart_data_instance = ChartDataModel.objects.get()
pca_data = pd.read_csv(StringIO(chart_data_instance.pca_data))
t_sne_data = pd.read_csv(StringIO(chart_data_instance.t_sne_data))
umap_data = pd.read_csv(StringIO(chart_data_instance.umap_data))
pca_chart_data = generate_chart_data(pca_data, run_number, reco, "pca")
t_sne_chart_data = generate_chart_data(t_sne_data, run_number, reco, "tsne")
umap_chart_data = generate_chart_data(umap_data, run_number, reco, "umap")
print(type(pca_data))
context = { "run_number": run_number,
"reco": reco, "run": run,
"pca_data": mark_safe(json.dumps(pca_chart_data)),
"t_sne_data": mark_safe(json.dumps(t_sne_chart_data)),
"umap_data": mark_safe(json.dumps(umap_chart_data))
}
return render(request, "analysis/analyse.html", context)
| UTF-8 | Python | false | false | 3,435 | py | 68 | views.py | 59 | 0.622416 | 0.619505 | 0 | 104 | 32.028846 | 84 |
vaguely-right/Baseball | 19,593,640,824,601 | 1681ab059da8c66bd880a1b6d32463e498d9dc33 | 12f5a42183290a18f906146d7941861baf04b9cb | /Retrosheet/Old/6-retrosheetlogistic.py | 2a7290190c677db3dbdb1c06d43513b0b2f5eaed | []
| no_license | https://github.com/vaguely-right/Baseball | ac92e0a63ad9b0fbd07ceaf401e252bb7f53aa0f | 8b06fb779157d2c8951ccc2fa6d3f164c27490c7 | refs/heads/master | 2021-06-30T21:29:16.431884 | 2021-01-03T01:31:02 | 2021-01-03T01:31:02 | 206,458,040 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from tqdm import tqdm
import numpy.linalg as la
import seaborn as sns
from sklearn.linear_model import LogisticRegression
import seaborn as sns
pd.set_option('display.width',150)
pd.set_option('display.max_columns',16)
#%%
# Read the constants
fg = pd.read_csv('fgconstants.csv')
fg.set_index('Season',inplace=True)
def get_events(year):
if type(year)==int:
year = str(year)
#Define the files
gmfile = 'Data\\'+year+'games.txt'
evfile = 'Data\\'+year+'events.txt'
idfile = 'retroID.csv'
#Read the data
gm = pd.read_csv(gmfile)
ev = pd.read_csv(evfile)
pid = pd.read_csv(idfile,index_col=False)
pid['Name'] = pid.First+' '+pid.Last
#Get the gamesite from the game dataframe
ev = ev.merge(gm[['gameid','gamesite']],how='left',on='gameid')
#Get just the end of batter events
ev = ev[ev.battereventflag=='T']
#Create a dictinary for the eventtype codes
eventdict = {0 : 'UNKNOWN',
1 : 'NOBAT',
2 : 'BIPOUT',
3 : 'K',
4 : 'NOBAT',
5 : 'NOBAT',
6 : 'NOBAT',
7 : 'NOBAT',
8 : 'NOBAT',
9 : 'NOBAT',
10 : 'NOBAT',
11 : 'NOBAT',
12 : 'NOBAT',
13 : 'NOBAT',
14 : 'BB',
15 : 'OTHER',
16 : 'BB',
17 : 'OTHER',
18 : 'OTHER',
19 : 'BIPOUT',
20 : 'SNGL',
21 : 'XBH',
22 : 'XBH',
23 : 'HR',
24 : 'NOBAT'}
eventdf = pd.DataFrame.from_dict(eventdict,orient='index')
eventdf.columns=['event']
#Assign event abbreviations to every event
ev = ev.merge(eventdf,how='left',left_on='eventtype',right_index=True)
#Specify sacrifice hit and fly events
ev.event[ev.shflag=='T'] = 'OTHER'
ev.event[ev.sfflag=='T'] = 'BIPOUT'
ev['timesthrough'] = ev.groupby(['gameid','pitcher']).cumcount()//9
ev.timesthrough[ev.timesthrough>2] = 2
ev['pitbathand'] = ev.pitcherhand+ev.batterhand
return ev
def pivot_events(year,split,minpa=0):
if split not in ['batter','pitcher','gamesite','batterhand','pitcherhand','timesthrough','pitbathand']:
print('Invalid split index')
print('Currently supported: batter, pitcher, gamesite, batterhand, pitcherhand, pitbathand, timesthrough')
return
ev = get_events(year)
# New in this version: drop OTHER events
ev = ev[ev.event!='OTHER']
ptable = pd.pivot_table(ev[[split,'event']],index=[split],columns=['event'],aggfunc=len,fill_value=0,margins=True)
ptable = ptable[:-1]
ptable = ptable.rename(columns={'All':'PA'})
# ptable = ptable[['PA','SNGL','XBH','HR','BB','K','BIPOUT','OTHER']]
ptable = ptable[['PA','SNGL','XBH','HR','BB','K','BIPOUT']]
ptable.SNGL = ptable.SNGL/ptable.PA
ptable.XBH = ptable.XBH/ptable.PA
ptable.HR = ptable.HR/ptable.PA
ptable.BB = ptable.BB/ptable.PA
ptable.K = ptable.K/ptable.PA
ptable.BIPOUT = ptable.BIPOUT/ptable.PA
# ptable.OTHER = ptable.OTHER/ptable.PA
# ptable['AVG'] = (ptable.SNGL+ptable.XBH+ptable.HR)/(ptable.SNGL+ptable.XBH+ptable.HR+ptable.K+ptable.BIPOUT)
# ptable['OBP'] = (ptable.SNGL+ptable.XBH+ptable.HR+ptable.BB)/(ptable.SNGL+ptable.XBH+ptable.HR+ptable.K+ptable.BIPOUT+ptable.BB)
# ptable['WOBA'] = (ptable.SNGL*0.89+ptable.XBH*1.31+ptable.HR*2.10+ptable.BB*0.70)/(1-ptable.OTHER)
# ptable['FIP'] = (ptable.HR*13+ptable.BB*3-ptable.K*2)/(ptable.K+ptable.BIPOUT)*3+3.05
ptable['AVG'] = (ptable.SNGL+ptable.XBH+ptable.HR)/(1-ptable.BB)
ptable['OBP'] = ptable.SNGL+ptable.XBH+ptable.HR+ptable.BB
c = fg.loc[year]
ptable['WOBA'] = ptable.SNGL*c.w1B + ptable.XBH*(c.w2B*0.9+c.w3B*0.1) + ptable.HR*c.wHR + ptable.BB*(c.wBB*0.9+c.wHBP*0.1)
ptable['FIP'] = (ptable.HR*13+ptable.BB*3-ptable.K*2)/(ptable.K+ptable.BIPOUT)*3+c.cFIP
return ptable
#%%
# Get the events for a specified year
year = 2013
cols = ['SNGL','XBH','HR','BB','K','BIPOUT']
splits = ['batter','pitcher','gamesite','timesthrough','pitbathand']
ev = get_events(year)
ev = ev[ev.event!='OTHER']
ev = ev[['batter','pitcher','gamesite','timesthrough','pitbathand','event']]
ev['ind'] = 1.0
# Calculate the mean probabilities, ratios, and logratios
pbar = ev.event.value_counts(normalize=True).to_frame().transpose()
pbar = pbar[['SNGL','XBH','HR','BB','K','BIPOUT']]
rbar = pbar / (1-pbar)
logrbar = np.log(rbar)
# Pivot to get the indicators
xbatter = ev.pivot(columns='batter',values='ind').fillna(0)
xpitcher = ev.pivot(columns='pitcher',values='ind').fillna(0)
xgamesite = ev.pivot(columns='gamesite',values='ind').fillna(0)
xtimesthrough = ev.pivot(columns='timesthrough',values='ind').fillna(0)
xpitbathand = ev.pivot(columns='pitbathand',values='ind').fillna(0)
# Concatenate the indicators for the array
xbatter.columns = pd.MultiIndex.from_product([['batter'],xbatter.columns])
xpitcher.columns = pd.MultiIndex.from_product([['pitcher'],xpitcher.columns])
xgamesite.columns = pd.MultiIndex.from_product([['gamesite'],xgamesite.columns])
xtimesthrough.columns = pd.MultiIndex.from_product([['timesthrough'],xtimesthrough.columns])
xpitbathand.columns = pd.MultiIndex.from_product([['pitbathand'],xpitbathand.columns])
x = pd.concat([xbatter,xpitcher,xgamesite,xtimesthrough,xpitbathand],axis=1)
x.columns.names=['split','ID']
#x['intercept','intercept'] = 1.0
#%% Try categorical logistic regression with just two predictors
reg = LogisticRegression()
X = x[['batter','pitcher']].to_numpy()
Y = ev[['event']]
reg.fit(X,Y)
#%% Get the means in logit, probit, and probability space
bbar = pd.DataFrame(reg.intercept_).transpose()
bbar.columns = reg.classes_.transpose()
bbar.index = ['mean']
rbar = np.exp(bbar)
pbar = rbar/(1+rbar)
pbar['SUM'] = pbar.sum(axis=1)
pbar
#%% ALTERNATE: Go the other way with it, start with the "true" pbar
pbar = ev.event.value_counts(normalize=True).to_frame().transpose()[['BB','BIPOUT','HR','K','SNGL','XBH']]
rbar = pbar / (1-pbar)
bbar = np.log(rbar)
#%% Transform the estimates
bhat = pd.DataFrame(reg.coef_.transpose())
bhat.index = x[['batter','pitcher']].columns
bhat.columns = reg.classes_
rhat = np.exp(np.add(bhat,bbar))
phat = rhat/(1+rhat)
phat['SUM'] = phat.sum(axis=1)
#%%
s='batter'
o='HR'
sns.scatterplot(x=p[o].loc[s],y=phat[o].loc[s],hue=p.loc[s].PA)
p[o].loc[s].hist(weights=p.loc[s].PA)
phat[o].loc[s].hist(weights=p.loc[s].PA)
#%%
#%%
#%%
#%% Now try categorical logistic regression for all five predictors
#reg = LogisticRegression()
#reg = LogisticRegression(class_weight=truepbar.transpose().event.to_dict())
#reg = LogisticRegression(class_weight='balanced')
reg = LogisticRegression(class_weight=truepbar.transpose().event.apply(lambda x: np.sqrt(x/(1-x))).to_dict())
X = x.to_numpy()
Y = ev[['event']]
reg.fit(X,Y)
#%% Get the means in logit, probit, and probability space
bbar = pd.DataFrame(reg.intercept_).transpose()
bbar.columns = reg.classes_.transpose()
bbar.index = ['mean']
rbar = np.exp(bbar)
pbar = rbar/(1+rbar)
#pbar['SUM'] = pbar.sum(axis=1)
#pbar
#%% ALTERNATE: Go the other way with it, start with the "true" pbar
truepbar = ev.event.value_counts(normalize=True).to_frame().transpose()[['BB','BIPOUT','HR','K','SNGL','XBH']]
rbar = truepbar / (1-truepbar)
bbar = np.log(rbar)
#%% Transform the estimates
bhat = pd.DataFrame(reg.coef_.transpose())
bhat.index = x.columns
bhat.columns = reg.classes_
rhat = np.exp(np.add(bhat,bbar))
phat = rhat/(1+rhat)
#phat['SUM'] = phat.sum(axis=1)
#SUM is way off now, normalize
#phat = np.divide(phat,np.sum(phat,axis=1).to_frame())
#phat.sum(axis=1)
#%%
phat.groupby('split').mean()
pbar
truepbar
sns.scatterplot(truepbar.transpose().event.to_numpy(),np.divide(pbar,truepbar).transpose()['mean'].to_numpy())
phat['SUM'] = phat.sum(axis=1)
#%%
# Get some base values to compare to
pbatter = pivot_events(year,'batter')
pbatter = pbatter[cols+['PA']]
#pbatter.columns = pd.MultiIndex.from_product([['batter'],pbatter.columns])
#pbatter.index = pd.MultiIndex.from_product([['bat'],pbatter.index])
ppitcher = pivot_events(year,'pitcher')
ppitcher = ppitcher[cols+['PA']]
#ppitcher.columns = pd.MultiIndex.from_product([['pitcher'],ppitcher.columns])
pgamesite = pivot_events(year,'gamesite')
pgamesite = pgamesite[cols+['PA']]
#pgamesite.columns = pd.MultiIndex.from_product([['gamesite'],pgamesite.columns])
ptimesthrough = pivot_events(year,'timesthrough')
ptimesthrough = ptimesthrough[cols+['PA']]
ppitbathand = pivot_events(year,'pitbathand')
ppitbathand = ppitbathand[cols+['PA']]
p = pd.concat([pbatter,ppitcher,pgamesite,ptimesthrough,ppitbathand],axis=0)
p.index = x.columns
#%%
s='pitbathand'
o='K'
sns.scatterplot(x=p[o].loc[s],y=phat[o].loc[s],hue=p.loc[s].PA)
sns.scatterplot(x=p[cols].loc[s],y=phat[cols].loc[s])
p[o].loc[s].hist(weights=p.loc[s].PA)
phat[o].loc[s].hist(weights=p.loc[s].PA)
plong = p.melt(ignore_index=False).reset_index()
phatlong = phat.melt(ignore_index=False).rename(columns={'variable':'event'}).reset_index()
comp = plong.merge(phatlong,how='inner',on=['split','ID','event']).merge(p.PA,left_on=['split','ID'],right_index=True)
sns.scatterplot(x='value_x',y='value_y',data=comp,hue='PA')
phat
| UTF-8 | Python | false | false | 9,423 | py | 66 | 6-retrosheetlogistic.py | 12 | 0.646503 | 0.633875 | 0 | 292 | 31.232877 | 133 |
berserg2010/family_and_history_backend | 14,181,982,017,011 | c9c5d54f8aa8a3159e8e623351b62d41494d3406 | d8e287db32e57e25e0cdebb3c03b07409e8cf66f | /backend/person_app/events/birth/tests/queries.py | 384e62f782c3d3b3e2063faf401671ff9d5495c7 | [
"Apache-2.0"
]
| permissive | https://github.com/berserg2010/family_and_history_backend | 4adeb449b0c307b4d2853f0101ca67ee4deb07f3 | 08fd5901e6e0c9cbd75a72e46d69ac53c737786a | refs/heads/master | 2020-05-18T19:41:56.060827 | 2019-05-03T18:21:20 | 2019-05-03T18:21:20 | 184,614,338 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # BIRTH
ALL_BIRTH = '''
query AllBirth($idPerson: ID){
allBirth(idPerson: $idPerson){
id
}
}
'''
BIRTH = '''
query Birth($id: ID!){
birth(id: $id){
id
}
}
'''
SAVE_BIRTH = '''
mutation SaveBirth(
$data: BirthInput
){
saveBirth(
data: $data
){
status
formErrors
birth{
id
}
}
}
'''
DELETE_BIRTH = '''
mutation DeleteBirth($id: ID!){
deleteBirth(id: $id){
status
id
}
}
'''
SEARCH_BIRTH = '''
query SearchBirth($searchTerm: String){
searchBirth(searchTerm: $searchTerm){
id
surname
}
}
'''
LIKE_BIRTH = '''
mutation LikeBirth($id: ID!, $email: String!){
likeBirth(
id: $id,
email: $email,
){
birth{
likes
}
}
}
'''
| UTF-8 | Python | false | false | 822 | py | 32 | queries.py | 31 | 0.469586 | 0.469586 | 0 | 63 | 12.047619 | 46 |
wmarenga/Python | 18,820,546,714,355 | cf18bf05190d30cd2d0abd062a20dc4ee48a4033 | 825799d25e40efc1af92c7252cb443be3ac8bed8 | /Python_para_Iniciantes_Direto_ao_que_interessa/Aulas/Aula28_Filtro_numpy.py | 18685b8f5c7f9f54fef497fb134d2d566d077993 | [
"MIT"
]
| permissive | https://github.com/wmarenga/Python | 398f456c44260e52e1309102d99a144ac29d0838 | f7e8fb960733d744268a2c136c6034e1a1205ecf | refs/heads/main | 2023-03-16T07:09:59.769021 | 2023-02-13T21:28:07 | 2023-02-13T21:28:07 | 297,141,069 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
"Lendo a planilha do Excel pelo pandas/Numpy"
caminho_arquivo ='D:\\23) Programação\\Cursos\\Python\\4) Python para Iniciantes (Direto ao que interessa)\\Aula 28 (Filtros_Numpy)\\caso_estudo.xlsx'
pessoas = np.array(pd.read_excel(caminho_arquivo, sheet_name='pessoas'))
print(pessoas)
"Mostrar todas as linhas da 2 coluna que e igual a 'Caio Pereira'"
print(pessoas[pessoas[:,1]=='Caio Pereira'])
"Mostrar todas as linhas da 3 coluna que e igual a 'M'"
print(pessoas[pessoas[:,2]=='M'])
"## Mostrar apenas a coluna 3 ## com todas as linhas da 3 coluna que e igual a 'M'"
print(pessoas[pessoas[:,2]=='M',3])
"## Aplicar uma soma na coluna 3 ## com todas as linhas da 3 coluna que e igual a 'M'"
print(np.sum(pessoas[pessoas[:,2]=='M',3]))
"## Podemos tb aplicar minimo, maximo e mediana coluna 3, como no pandas ## com todas as linhas da 3 coluna que e igual a 'M'"
print(np.min(pessoas[pessoas[:,2]=='M',3]))
print(np.max(pessoas[pessoas[:,2]=='M',3]))
print(np.mean(pessoas[pessoas[:,2]=='M',3])) | UTF-8 | Python | false | false | 1,044 | py | 1,192 | Aula28_Filtro_numpy.py | 1,118 | 0.697697 | 0.673704 | 0 | 24 | 42.458333 | 150 |
brandonivey/mixtapes | 3,882,650,469,127 | 039c34a389d8a10ab16c35d90ce478d0fa01ade4 | 87cd994ad889b8700520b37eef6172827aeb9338 | /mixtapes/server.py | fff3e8cd74a2d9b99f51a967932d2182dbacff51 | []
| no_license | https://github.com/brandonivey/mixtapes | a319ba70cdd26bc5056fc717b712cf25a08000b8 | 1ab8fdcb410efb15c122628184a6dc9d34f3dbf7 | refs/heads/master | 2020-05-30T14:41:27.735582 | 2014-04-25T13:53:07 | 2014-04-25T13:53:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sys
class Log:
"""
Allows log files to replace sys.stderr or sys.stdout
If multipule file discriptors are passed to __init__, it will write
to each one
"""
def __init__(self, *fds):
self.fds = fds
def write(self, data):
for fd in self.fds:
fd.write(data)
fd.flush()
def close(self):
for fd in self.fds:
try:
fd.close()
except AttributeError:
pass
# This odd structure is me trying to get imported modules to write errors
# correctly when logging is enable
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Processes an approved\
mixtape ZIP file and uploads it to S3')
parser.add_argument('-d', '--keep-dirs', action="store_true", default=False,
help="Keep the temporary directories instead of deleteing")
parser.add_argument('-k', '--keep-orig', action="store_true",
default=False, help='Keep original ZIP')
parser.add_argument('-o', '--output',help="If this is set, program will\
output to given file instead of to STDOUT")
parser.add_argument("--save-rest", action="store_false", default=False,
help="Don't wipe the directory of non-ZIP files")
# Makes a command line interface with arguments
args = vars(parser.parse_args())
if args["output"]:
# if there is an output file passed
output = open(args["output"], "a")
log = Log(sys.stdout, output)
sys.stdout = log
sys.stderr = log
# all arguments are passed to process_zip, and it will not accept "output"
# process.process_mixtape will try to find args, we need to give it
del args["output"]
from twisted.internet import reactor, protocol
from twisted.internet.defer import DeferredQueue, DeferredSemaphore
from twisted.internet.threads import deferToThread
from process import debug
import process
import os
import json
# A note about python syntax
#
# This script makes frequant use of * and ** in functions funcs.
# func(*[1, 2, 3]) is the exact same thing as func(1, 2, 3)
# func(*{'foo': 1, 'bar': 2, 'baz': 3}) is the exact same thing as:
# func(foo=1, bar=2, baz=3)
class Processor():
"""
Whenever mixtapeReceived is called, deferToThread is scheduled to be run as
soon as a "slot" for being run is available. There is currently 1 slot
deferToThread runs process_mixtape in another thread, and releases the
slot when its that process is done
"""
def __init__(self):
self.sem = DeferredSemaphore(1) #do one thing at a time
def mixtapeReceived(self, mixtape):
debug("Adding %s to be processed" % mixtape)
self.sem.run(deferToThread, process.process_mixtape, *mixtape)
# DeferredSemaphore.run will not .release() until the defer returned by
# deferToThread fires
class AddToQueue(protocol.Protocol):
"""
Whenever someone connects, an instance of this protocol is made that
describes how to interact with them
"""
processor = Processor()
def __init__(self):
self.info = ""
def connectionMade(self):
debug("Connection made")
def dataReceived(self, data):
"""
This method is called whenever the client sends data
We are trying to get a number enclosed in square braces, because that's
easy to parse using JSON (JavaScript Object Notation)
"""
debug("Data received: %s" % data)
self.info += data
if self.info.endswith("]"):
try:
# Parses the recieved information
info = json.loads(self.info)
# Verify that it's exactly what we want
if type(info[0]) is not int:
raise Exception("ID %s is not int" % type(info[0]))
if len(info) is not 1:
raise Exception("%s args, expected exactly 1" % len(info))
self.processor.mixtapeReceived(info)
self.transport.write("OK")
except (ValueError, IndexError, Exception) as e:
# In the case of JSON not being able to parse, in the case of
# info[0] not making sense, or in the case of my own errors
self.transport.write(e.message)
debug("Error!" + str(e))
finally:
debug("Ending connection")
self.transport.loseConnection()
def verify_mixtape_counter():
"""
Ensures that a mixtape.counter file exists at the path of the script
See process.Connection.__enter__ for better documentation of this stuff
"""
pathbase = os.path.dirname(__file__)
pathbase = pathbase if pathbase else '.'
try:
f = open(os.path.join(pathbase, "mixtapes.counter"), 'r')
int(f.read())
except (OSError, IOError, Exception):
f = open(os.path.join(pathbase, "mixtapes.counter"), 'w')
f.write("0")
f.close()
def main():
"""This runs the the above on port 8000"""
factory = protocol.ServerFactory()
factory.protocol = AddToQueue
reactor.listenTCP(8000,factory)
verify_mixtape_counter()
process.reactor = reactor
reactor.run()
# Don't try to understand this.
if __name__ == "__main__":
# this next part will run main(), and always close output if it exists but
# not raise an error if it does not
process.args = args
try:
main()
finally:
try:
output.close()
except NameError:
pass
| UTF-8 | Python | false | false | 5,623 | py | 7 | server.py | 3 | 0.615863 | 0.610706 | 0 | 169 | 32.272189 | 80 |
Leonid3/neoart | 9,251,359,558,567 | 5972a6c2e32de0ae5cbcb6193678193b5a1e9ffc | c8412e6475de59e5596566d740c83b4d87c8c0b9 | /python/neoart/__init__.py | f334f432264a1749bf73ea97eaa056468e54e2c0 | []
| no_license | https://github.com/Leonid3/neoart | 1c5eac4e974b57523781ac7a663a92ac0e1575d9 | 91ec8447d89c849d8f7c8ee116eb365e3380f2d0 | refs/heads/master | 2020-12-24T14:27:33.984313 | 2013-01-18T13:39:11 | 2013-01-18T13:39:11 | 39,614,472 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | __all__ = ['Neoart', 'DefaultNumericArgs', 'CircularGeometry',
'CheaseGeometry']
from arguments import DefaultNumericArgs
from neoart import Neoart
from geometry import CircularGeometry, CheaseGeometry
| UTF-8 | Python | false | false | 214 | py | 125 | __init__.py | 14 | 0.766355 | 0.766355 | 0 | 6 | 34.666667 | 62 |
shinyu880509/stock | 12,962,211,310,388 | 12318763783125ac2f63a8281e203cae44ffe9fd | d9764f6d2e496f8c7eed781dc5cb21d0d66f6e6f | /getID.py | 10ecf3a55b0439e84a239a6e5edaffc86beca16b | []
| no_license | https://github.com/shinyu880509/stock | 4f61b68c8582f2e133bf0663e4fe041339dbe276 | e6256f1e4ec9a539f08bc33b16911fb3c060f2fa | refs/heads/master | 2023-02-08T12:54:47.694036 | 2021-01-04T07:47:11 | 2021-01-04T07:47:11 | 245,349,243 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
#新聞用
def getNameData():
stockID = ['5203', '6112', '6183', '6214']
stockName = ['"訊連"' ,'"聚碩"' ,'"關貿"' ,'"精誠"']
return stockID,stockName
def getName(id):
stockID = ['2427', '2453', '2468', '2471', '2480', '3029', '3130', '4994', '5203', '6112', '6183', '6214']
stockName = ['2427三商電','2453凌群' ,'2468華經' ,'2471資通' ,'2480敦陽' ,'3029零壹' ,'3130一零四' ,'4994傳奇' ,'5203訊連' ,'6112聚碩' ,'6183關貿' ,'6214精誠']
for i in range(len(stockID)):
if id == stockID[i]:
return stockName[i]
def check(idd):
stockID = ['2427', '2453', '2468', '2471', '2480', '3029', '3130', '4994', '5203', '6112', '6183', '6214']
c = 1
for i in range(len(stockID)):
if idd == stockID[i]:
c = 0
return c
def checkTec(ty):
typeID = ['rsi', 'kd', 'macd', 'bias']
c = ['rsi', 0]
for i in range(len(typeID)):
if ty == typeID[i]:
c[0] = typeID[i]
c[1] = i
return c
def checkCha(ty):
typeID = ['30days', 'today']
c = ['30days', 0]
for i in range(len(typeID)):
if ty == typeID[i]:
c[0] = typeID[i]
c[1] = i
return c
def checkFin(ty):
typeName = ['個股月營收', '成長性分析', '股利政策', '獲利能力分析(季)', '資本形成-股東權益(季)', '現金分析(年)', '償還能力分析(季)', '經營能力分析(年)']
typeID = ['SymScore', 'GrowthAnly', 'Divpolicy', 'ProfitByQr', 'Capstreq', 'Cashratio', 'SolvencyBySym', 'Oper']
c = ['SymScore', 0, '個股月營收']
for i in range(len(typeID)):
if ty == typeID[i]:
c[0] = typeID[i]
c[1] = i
c[2] = typeName[i]
return c
def checkPre(ty):
typeID = ['pre', 'today']
c = ['pre', 0]
for i in range(len(typeID)):
if ty == typeID[i]:
c[0] = typeID[i]
c[1] = i
return c
#帳號管理
def getAcc():
conn = sqlite3.connect('stock.db')
c =conn.cursor()
c.execute("select * from account")
re = []
for rows in c.fetchall():
re.append(rows)
return re
def deleteAcc(uid):
err = 0
conn = sqlite3.connect('stock.db')
c =conn.cursor()
c.execute("select * from account")
for rows in c.fetchall():
if uid == rows[0]:
err = 1
if err == 1:
with sqlite3.connect("stock.db") as con:
cur = con.cursor()
cur.execute("delete from account where username = '" + uid + "'")
con.commit()
with sqlite3.connect("stock.db") as con:
cur = con.cursor()
cur.execute("delete from indexStock where username = '" + uid + "'")
con.commit()
return
def alterAcc(uid, n, ty):
if ty == '0':
err = 0
conn = sqlite3.connect('stock.db')
c =conn.cursor()
c.execute("select * from account")
for rows in c.fetchall():
if n == rows[0]:
return 0
if err != 1:
with sqlite3.connect("stock.db") as con:
cur = con.cursor()
cur.execute("update account set username = '" + n + "' where username = '" + uid + "'")
con.commit()
elif ty == '1':
with sqlite3.connect("stock.db") as con:
cur = con.cursor()
cur.execute("update account set email = '" + n + "' where username = '" + uid + "'")
con.commit()
elif ty == '2':
with sqlite3.connect("stock.db") as con:
cur = con.cursor()
cur.execute("update account set password = '" + n + "' where username = '" + uid + "'")
con.commit()
return 1 | UTF-8 | Python | false | false | 3,753 | py | 784 | getID.py | 13 | 0.494246 | 0.438114 | 0 | 116 | 29.724138 | 137 |
Max-coder1/Auto-Thermo-Converter | 9,680,856,325,353 | d7cfba2cd973e10383b9841059288ec0bbe03fdd | 915545c8e060a154ce069643b4d994216cfee1d4 | /THERMO-CONVERTER/tempconv.py | d1750ea65f62f65da2f9035b6af31b6939995f12 | []
| no_license | https://github.com/Max-coder1/Auto-Thermo-Converter | 944232744f00137809b9f1cb9c0cbfcc7962390d | 62f4fb1f14eb3611e11426d741bb7a7faed12777 | refs/heads/master | 2022-04-27T22:03:33.294937 | 2020-04-20T02:49:06 | 2020-04-20T02:49:06 | 257,145,972 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # TEMPERATURE CONVERTER
import tkinter as tk
from tkinter import *
import random
# declare global_Variables
temp_c = None
temp_f = None
#create the main window
root = tk.Tk()
root.title("AUTO-THERMO_CONVERTER")
# create main container
frame = tk.Frame(root,bg="yellow")
# Layout the main contaner, specify that we want it to grow with the window size
frame.pack(fill = tk.BOTH,expand=True)
# allow middle cell of the grid to grow when the window grows
frame.columnconfigure(1,weight=1)
frame.rowconfigure(1,weight =1)
# Variables for holding temperature data
temp_c = tk.DoubleVar()
temp_f = tk.DoubleVar()
#__________________________________________________________________________________________________________________________________
# This function is called whenever the button is pressed
def convert():
global temp_c
global temp_f
# convert celsuis to fahrenhiet and update (through textvariable)
try:
val = temp_c.get()
temp_f.set((val*9.0/5)+32)
except:
pass
#____________________________________________________________________________________________________________________________________
# This secoond function is also called
# Canvas
C = tk.Canvas(frame,bg="yellow", height=30,width=180)
oval = C.create_oval(0,0,40,40,fill = "pink")
oval2 = C.create_oval(0,0,200,200,fill="red")
oval3 = C.create_oval(0,0,150,150,fill="white")
C.grid(row=4,column=0)
# create widgets
label_input = tk.Label(frame,width = 15, text = "Enter a Value ",bg = "pink", font=("times",17))
entry_celsuis = tk.Entry(frame, width = 15, textvariable = temp_c)
label_unitc = tk.Label(frame,width = 5, text =" 'C",fg="blue",font=("times",20))
label_equal = tk.Label(frame, width = 15, text = "Equivalent To ",bg = "pink",font=("times",17))
label_fahrenheit = tk.Label(frame,width= 10,textvariable=temp_f,font=("times",20))
label_unitf = tk.Label(frame,width = 5, text = " 'F",fg="blue",font=("times",20))
button_convert = tk.Button(frame,height = 2,text= "CONVERT",command=(convert),bg="red")
developer = tk.Label(frame,text = "Developed By DERA",fg = "Blue",font=("times",12))
info = tk.Label(frame,text="Stay Safe from COVID-19",fg = "pink",bg="orange",font=("times",14,"italic"))
# label for displaying warning msg about the covid 19 pandemic
# Initiallising
#_________________________________________________
# Scaling widget
def start():
def sel():
myinfos = ["The normal body\n temperature range is\n typically stated as\n[36.5 - 37.5'C]or[97.7-99.5'F]",
"Ensure you take serious \n precautions and preventive\nmeasures against COVID-19",
"The [ WHO ] is constantly\n strengthening measures\nto ensure that the\nPandemic is contolled",
"Normal human body\ntemperature(normothermia,\neuthemia) is the typical\n temperature found in human",
"Maintain at least 1 & half\nmetres distance between\nyou and a person who\nis coughing or sneezing",
"People coughing or sneezing\n persistently should keep a\nsocial distance but not mix up\nwith a crowd",
"if you notice symptoms\n related to covid-19 please call\nemergency hotlines:\n+2348023169485,\n+2348033565529",
"whatever you do will seem\ninsignificant but it is\nvery important you do it\n.(Mahtma Gandhi)\n... STAY SAFE ...",
]
selection = random.choice(myinfos)
label.config(text = selection)
var = DoubleVar()
scale = (Scale(frame,variable = var,orient = HORIZONTAL,bg="orange"))
scale.grid(row=0,column=3)
scale.set(40)
scale.focus_set()
#scale.current(60)
button = Button(frame,width = 20,fg="blue",bg="red",text="Virtual Scale info's",command=sel)
button.grid(row=1,column=3)
label = Label(frame,width=22,height=5,bg="light grey")
label.grid(row=2,column=3)
start()
#___________________
# lay out widgets
label_input.grid(row=0,column = 0,padx=5,pady=5,sticky=tk.E)
entry_celsuis.grid(row = 0, column=1, padx=5, pady=5)
label_unitc.grid(row = 0, column=2, padx=5, pady=5, sticky=tk.W)
label_equal.grid(row = 1, column=0, padx=5, pady=5, sticky=tk.E)
label_fahrenheit.grid(row = 1, column=1, padx=5, pady=5)
label_unitf.grid(row = 1, column=2, padx=5, pady=5, sticky=tk.W)
button_convert.grid(row = 2, column=1,columnspan=1, padx=5, pady=5, sticky=tk.E)
developer.grid(row = 4,column = 0,padx=5, pady=5, sticky=tk.E)
info.grid(row = 2,column = 0,padx=5, pady=5, sticky=tk.E)
# Place the cursor
entry_celsuis.focus()
# Run forever
root.mainloop()
| UTF-8 | Python | false | false | 4,689 | py | 2 | tempconv.py | 1 | 0.612284 | 0.577309 | 0 | 108 | 41.416667 | 135 |
Hamirall/lengow-test | 11,227,044,554,945 | f8678d4012465d05e3cb3fd3d348b1c2035872ac | e5e2df3410232a71feda5b246cda51254ecfb31d | /lengow/orders/urls.py | 91de7c7a2bf5ff9a8912c0d2daa71a05bf12c1e5 | []
| no_license | https://github.com/Hamirall/lengow-test | 72c890b2fe7b82667de1789f659d41255c5f6194 | 4cde5dc652dca5c2c58c0b2a15b237b7c146fe8a | refs/heads/master | 2023-01-05T00:00:04.202807 | 2020-11-02T16:03:40 | 2020-11-02T16:03:40 | 309,199,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
from orders.views import OrderList
from orders.views import OrderDetail
from orders.views import OrderUpdate
from orders.views import CreateOrder
urlpatterns = [
path('list/', OrderList.as_view(), name='order-list'),
path('create', CreateOrder.as_view(), name='create-order'),
path('update/<slug:slug>/', OrderUpdate.as_view(success_url="/orders/list"), name='order-update'),
path('<slug:slug>/', OrderDetail.as_view(), name='order-detail'),
] | UTF-8 | Python | false | false | 509 | py | 11 | urls.py | 6 | 0.722986 | 0.722986 | 0 | 13 | 38.230769 | 102 |
neurodebian/snapshot-neuro.debian.net | 13,125,420,090,730 | b445ce7bbf5dae0cd5f29f7a4358f0dd5fb4bd06 | 85416680d980e773a369d0ee36d7db3128e08e98 | /misc/dump-tools/add-new-dumps-to-git | f7583555adab1fea78010df7ae9125c6f048052e | []
| no_license | https://github.com/neurodebian/snapshot-neuro.debian.net | e819c6e38bfa4b4ef795cc75d3f5649a0a11572e | 7848c5df1b3b853e81fa232923e782acfe9c47bd | refs/heads/master | 2021-10-25T15:59:58.603232 | 2017-09-23T18:12:58 | 2017-09-23T18:12:58 | 991,805 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# Copyright (c) 2010 Peter Palfrader
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Imports all mirrorruns found in a git repository for dumps into the database.
# Just imports metadata, does not get any actualy content (i.e. does not
# populate the farm).
import sys
import errno
import yaml
import optparse
import os
import tempfile
import shutil
import subprocess
import fcntl
import time
thisscriptdir = os.path.abspath(os.path.dirname(sys.argv[0]))
sys.path.append(os.path.join(thisscriptdir, '../../lib'))
from dbhelper import DBHelper
block_size = 4096
def read_fd_to_file(i, path):
o = open(path, "w")
while True:
buf = i.read(block_size)
if not buf: break
o.write(buf)
o.close()
i.close()
def get_lock(fn, wait=5*60):
f = open(fn, "w")
sl = 0.1
ends = time.time() + wait
while True:
success = False
try:
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
return f
except IOError:
pass
if time.time() >= ends:
return None
sl = min(sl*2, 10, ends - time.time())
time.sleep(sl)
return f
parser = optparse.OptionParser()
parser.set_usage("%prog --config=<conffile>")
parser.add_option("-c", "--config", dest="conffile", metavar="CONFFILE",
help="Config file location.")
parser.add_option("-v", "--verbose", action="store_true",
help="Be verbose.")
parser.add_option("-s", "--snapshot", action="store", metavar="PATH",
help="Path to the snapshot script.")
parser.add_option("-a", "--adder", action="store", metavar="PATH",
help="Path to the add-dump-to-git script.")
parser.add_option("-e", "--extracter", action="store", metavar="PATH",
help="Path to the extract-dumps script.")
parser.add_option("-b", "--backing", dest="backing_git", metavar="GITDIR",
default = 'backing-git',
help="Location of backing git working copy.")
(options, args) = parser.parse_args()
if options.conffile is None:
parser.print_help()
sys.exit(1)
if options.snapshot is None:
options.snapshot = os.path.join(thisscriptdir, '../../snapshot')
if options.extracter is None:
options.extracter = os.path.join(thisscriptdir, 'extract-dumps')
if options.adder is None:
options.adder = os.path.join(thisscriptdir, 'add-dump-to-git')
config = yaml.safe_load(open(options.conffile).read())
db = DBHelper(config['db']['connectstring'])
if not os.path.exists(options.snapshot) or not os.access(options.snapshot, os.X_OK):
sys.stderr.write("%s does not exist or is not executable\n"%(options.snapshot))
sys.exit(1)
if not os.path.exists(options.extracter) or not os.access(options.extracter, os.X_OK):
sys.stderr.write("%s does not exist or is not executable\n"%(options.extracter))
sys.exit(1)
if not os.path.exists(options.adder) or not os.access(options.adder, os.X_OK):
sys.stderr.write("%s does not exist or is not executable\n"%(options.adder))
sys.exit(1)
options.extracter = os.path.abspath(options.extracter)
extractcall = [options.extracter]
extractcall += ['--backing', options.backing_git]
options.adder = os.path.abspath(options.adder)
addcall = [options.adder]
addcall += ['--backing', options.backing_git]
if options.verbose: addcall += ['--verbose']
if options.verbose: optional_quiet = []
else: optional_quiet = ['--quiet']
tmpdir = None
lock = None
counter = 1
try:
tmpdir = tempfile.mkdtemp(prefix='snapshot.add-new-dumps-to-git.')
lockfilename = os.path.join(options.backing_git, '.lock-add-new-dumps-to-git')
lockfile = get_lock(lockfilename)
if lockfile is None:
sys.stderr.write("Could not acquire lock.\n")
sys.exit(1)
have_uuids = set()
p = subprocess.Popen(extractcall+['list'], stdout=subprocess.PIPE)
for line in p.stdout:
line = line.rstrip()
uuid, objectspec = line.split(None, 1)
have_uuids.add(uuid)
rows = db.query('SELECT mirrorrun_id, mirrorrun_uuid FROM mirrorrun ORDER BY run')
for row in rows:
if row['mirrorrun_uuid'] in have_uuids:
continue
if options.verbose: print "Doing %d (%s)."%(row['mirrorrun_id'], row['mirrorrun_uuid'])
fn = os.path.join(tmpdir, "%d"%(row['mirrorrun_id']))
# dump mirrorrun to fn
c = [options.snapshot, '-c', options.conffile, '--mirrorrun', '%d'%(row['mirrorrun_id'])]
if options.verbose: c.append('--verbose')
c.append('dump')
if options.verbose: print " [%s]"%(" ".join(c))
p = subprocess.Popen(c, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.close()
read_fd_to_file(p.stdout, fn)
p.wait()
# import into git
args = []
counter += 1
if counter % 50 != 0:
args.append('--no-gc')
args.append(fn)
subprocess.check_call(addcall+args)
os.unlink(fn)
db.close()
os.chdir(options.backing_git)
if options.verbose: print "# git gc --auto"; sys.stdout.flush()
subprocess.check_call(['git', 'gc', '--auto'] + optional_quiet)
lockfile.close()
finally:
if not tmpdir is None:
shutil.rmtree(tmpdir)
# vim:set et:
# vim:set ts=4:
# vim:set shiftwidth=4:
| UTF-8 | Python | false | false | 6,258 | 73 | add-new-dumps-to-git | 42 | 0.665548 | 0.660754 | 0 | 184 | 33.01087 | 97 |
|
Melih-Celik/file_explorer | 18,537,078,875,474 | 338f7711bcf57d7702bd133971761ac93516ad54 | 3cfd1f99fbeb754f0602eed05f1ed34d38c7711a | /Turkish/File_Explorer.py | c49f85d10f18ec5ae5ecd5ca01bf28145843cb6c | [
"MIT"
]
| permissive | https://github.com/Melih-Celik/file_explorer | 8c1f119b74d5e9223b1a69f28f06b69bc11abee9 | 9d7afb9bc32bd167fb19e891adf751bae804d6e4 | refs/heads/master | 2021-09-06T17:25:48.211104 | 2018-02-09T01:05:07 | 2018-02-09T01:05:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from time import sleep
from shutil import rmtree
def info():
print("\n\t\tİşlem Rehberi\n1.Yeni dosya oluşturmak için -d\n2.Dosya içeriğini ekrana yazdırmak için -r(Beta)\n3.Yeni Klasör oluşturmak için -k\
\n4.Klasör silmek için -ks\t!!!Klasör kalıcı olarak silinir!!!\n5.Dosya silmek için -ds \t!!!Dosya kalıcı olarak silinir!!!\n6.Çıkmak için 'cik' yazınız...\n")
def create_file(cwd):
dosya_adi=input("Oluşuturacağınız dosyanın adını uzantısıyla beraber yazınız : ")
if dosya_adi =='cik':
print("İşlem iptal edildi.Çıkılıyor.Lütfen bekleyin...")
sleep(1.5)
else:
yeni_dosya=open("%s/%s"%(cwd,dosya_adi),"w")
while True:
yaz=input("Dosyanıza içerik yazmak ister misiniz : [E/H] ")
if yaz == "E" or yaz== "e":
print("\n Çıkmak için 'cik' yazın\nMetninizi giriniz : \n")
while True:
icerik=input()
if icerik=="cik":
print("Başarıyla kaydedildi.Çıkılıyor.Lütfen bekleyin...")
sleep(1.5)
break
else:
yeni_dosya.write(icerik+"\n")
continue
yeni_dosya.close()
break
elif yaz == "H" or yaz== "h":
pass
else:
print("Lütfen Evet için E Hayır için H yazınız !!!")
continue
print("\n"*100)
def read_file(cwd):
print("\n"*100)
files_in_dir(cwd)
file_read=int(input("\n(Dosyada değişikilik yapılamaz)\nOkunacak dosyayı seçiniz : "))
if str(file_read)=="cik":
print("İşlem iptal edildi.Çıkılıyor.Lütfen bekleyin...")
sleep(1.5)
else:
file_to_read=open("%s"%(os.listdir(cwd)[file_read-1]),"r")
for a in file_to_read: print(a)
while True:
cikis=input("\n\nCikmak icin 'cik' yazın : ")
if cikis=="cik":
break
else:
print("Yanlış giriş yaptınız...\n")
continue
def remove_file(cwd):
print("\n"*100)
files_in_dir(cwd)
file_del=input("Silinecek dosyayı seçiniz : ")
if file_del=="cik":
print("İşlem iptal edildi.Çıkılıyor.Lütfen bekleyin...")
sleep(1.5)
else:
try:
while True:
secim=input("Dosyayı kalıcı olarak silmek istediğinize emin misiniz : [E/H] ")
if secim.upper() =="E":
file_del=int(file_del)
os.remove("%s/%s"%(cwd,os.listdir(cwd)[file_del-1]))
print("İşlem başarılı.Lütfen bekleyin...")
sleep(1)
break
elif secim.upper() =="H":
break
else:
print("Lütfen Evet için E Hayır için H yazınız !!!")
continue
except PermissionError:
print("Klasör silme erişiminiz yok! (Programı yönetici olarak açmayı deneyin)\nLütfen Bekleyin...")
sleep(3)
def create_dir(cwd):
crt_dir=input("Oluşturmak istediğiniz klasörün adını giriniz : ")
if crt_dir=="cik":
print("İşlem iptal edildi.Çıkılıyor.Lütfen bekleyin...")
sleep(1.5)
else:
os.mkdir("%s/%s"%(cwd,crt_dir))
os.chdir("%s/%s"%(cwd,crt_dir))
print("İşlem başarılı.Lütfen bekleyin...")
sleep(1)
def remove_dir(cwd):
print("\n"*100)
files_in_dir(cwd)
dir_del=input("Silinecek klasörü seçiniz : ")
if dir_del=="cik":
print("İşlem iptal edildi.Çıkılıyor.Lütfen bekleyin...")
sleep(1.5)
else:
try:
while True:
secim=input("Klasörü ve içindekileri kalıcı olarak silmek istediğinize emin misiniz : [E/H]")
if secim.upper()=="E":
dir_del=int(dir_del)
rmtree("%s/%s"%(cwd,os.listdir(cwd)[dir_del-1]))
print("İşlem başarılı.Lütfen bekleyin...")
sleep(1)
break
elif secim.upper() =="H":
break
else:
print("Lütfen Evet için E Hayır için H yazınız !!!")
continue
except PermissionError:
print("Dizin silme erişiminiz yok! (Programı yönetici olarak açmayı deneyin)\nLütfen Bekleyin...")
sleep(3)
def files_in_dir(cwd):
try:
print("-1.Geri gel")
n=1
for a in os.listdir(cwd):
print(n,a)
n=n+1
except PermissionError:
print("Bu dizine giriş izniniz yok! (Programı yönetici olarak açmayı deneyin)\nGeri gelin.")
def pick_dir(cwd):
while True:
try:
new_way=input("Bir işlem yapınız : ")
if new_way == "-1":
os.chdir(os.pardir)
break
elif new_way.isdigit():
new_way=int(new_way)
os.chdir("%s/%s"%(cwd,os.listdir(cwd)[new_way-1]))
break
elif new_way == "-i":
info()
elif new_way == "-d":
create_file(cwd)
break
elif new_way == "-r":
read_file(cwd)
break
elif new_way == "-k":
create_dir(cwd)
break
elif new_way == "-ks":
remove_dir(cwd)
break
elif new_way == "-ds":
remove_file(cwd)
break
elif new_way == "cik":
exit()
else:
print("Geçersiz bir işlem yaptınız!")
except (FileNotFoundError,TypeError,ValueError,IndexError):
print("\nYanlış bir işlem yaptınız !")
except NotADirectoryError:
print("Bu bir dizin değil!!!")
while True:
print("\nSeçenekleri görmek için -i yazınız...\n")
cwd=os.getcwd()
print("Bulunduğunuz dizin : \n%s\n"%(cwd))
files_in_dir(cwd)
pick_dir(cwd)
print("\n"*100)
continue
| UTF-8 | Python | false | false | 6,485 | py | 4 | File_Explorer.py | 2 | 0.486899 | 0.479593 | 0 | 169 | 35.260355 | 159 |
562794175/at_demo | 11,501,922,464,292 | 0f2742d32defbe9ca09ec0c329d3afa0c89fdbe6 | 6ed7523aea6ce8f650604c963d5c04c3c99ff52c | /DRLHappyDemo/HappyDemoUtils.py | b53bc62018ae13ad99a567d30f821cd75830d540 | []
| no_license | https://github.com/562794175/at_demo | 96b576c96a3cdac6e80f0651e5d74b117cdfb957 | e237dc86ecc5ace721e12998ada02ae9a2e373e1 | refs/heads/master | 2021-06-02T22:31:02.064066 | 2020-03-07T13:38:37 | 2020-03-07T13:38:37 | 135,537,132 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
def load():
IMAGES, HITMASKS = {}, {}
return IMAGES, HITMASKS
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
| UTF-8 | Python | false | false | 353 | py | 552 | HappyDemoUtils.py | 308 | 0.572238 | 0.569405 | 0 | 15 | 22.533333 | 56 |
rmanzoni/cmg-cmssw | 18,116,172,087,716 | 65fc08c38d1e4183774a943175dcb655a8554614 | e9998430626270ba20dc17eb807899def97209a1 | /CMGTools/TTHAnalysis/python/samples/samples_13TeV_PHYS14.py | 23fbd1b7b0dd99cdb1abd0128c9495256a7a16dd | []
| no_license | https://github.com/rmanzoni/cmg-cmssw | 7cdec3f0b7cbb789ab5b105259a1b58a78e7defb | 71e96154765289ddcb5b64470e08dc339e38f072 | refs/heads/CMGTools-from-CMSSW_7_4_2 | 2020-12-25T23:53:29.190645 | 2015-05-22T15:56:14 | 2015-05-22T15:56:14 | 13,803,005 | 0 | 1 | null | true | 2015-08-20T15:51:30 | 2013-10-23T13:08:32 | 2015-04-15T15:34:01 | 2015-08-20T15:42:08 | 790,017 | 0 | 0 | 0 | C++ | null | null | import PhysicsTools.HeppyCore.framework.config as cfg
import os
################## Triggers
from CMGTools.TTHAnalysis.samples.triggers_13TeV_PHYS14 import *
#####COMPONENT CREATOR
from CMGTools.TTHAnalysis.samples.ComponentCreator import ComponentCreator
kreator = ComponentCreator()
## CENTRALLY PRODUCED MINIAODs V2 (from global DBS, in T2_CH_CAF)
################## PU40 bx25ns (not default, so samples have a _PU40bx25 postfix) ##################
GGHZZ4L_PU40bx25 = kreator.makeMCComponent("GGHZZ4L_PU40bx25", "/GluGluToHToZZTo4L_M-125_13TeV-powheg-pythia6/Phys14DR-PU40bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 43.92*2.76E-04)
DYJetsMuMuM50_PtZ180_PU40bx25 = kreator.makeMCComponent("DYJetsMuMuM50_PtZ180_PU40bx25", "/DYJetsToMuMu_PtZ-180_M-50_13TeV-madgraph/Phys14DR-PU40bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root")
TT_PU40bx25 = kreator.makeMCComponent("TT_PU40bx25", "/TT_Tune4C_13TeV-pythia8-tauola/Phys14DR-PU40bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",809.1)
TTH_PU40bx25 = kreator.makeMCComponent("TTH_PU40bx25", "/TTbarH_M-125_13TeV_amcatnlo-pythia8-tauola/Phys14DR-PU40bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.5085)
mcSamplesPHYS14_PU40bx25 = [TT_PU40bx25,TTH_PU40bx25,DYJetsMuMuM50_PtZ180_PU40bx25,GGHZZ4L_PU40bx25]
################## PU4 bx25ns (no default of phys14, so no _4bx50 postfix) ##############
# inclusive samples only for the low PU scenario
TT_PU4bx50 = kreator.makeMCComponent("TT_PU4bx50", "/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU4bx50_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",809.1)
WJetsToLNu_PU4bx50 = kreator.makeMCComponent("WJetsToLNu_PU4bx50","/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU4bx50_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 20508.9)
DYJetsToLL_M50_PU4bx50 = kreator.makeMCComponent("DYJetsToLL_M50_PU4bx50", "/DYJetsToLL_M-50_13TeV-madgraph-pythia8/Phys14DR-PU4bx50_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 2008.*3)
mcSamplesPHYS14_PU4bx50 = [TT_PU4bx50,WJetsToLNu_PU4bx50,DYJetsToLL_M50_PU4bx50]
################## PU20 bx25ns (default of phys14, so no postfix) ##############
#### Background samples
## Cross sections from McM (LO Madgraph)
QCD_HT_100To250 = kreator.makeMCComponent("QCD_HT_100To250", "/QCD_HT-100To250_13TeV-madgraph/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 28730000)
QCD_HT_250To500 = kreator.makeMCComponent("QCD_HT_250To500", "/QCD_HT_250To500_13TeV-madgraph/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 670500)
QCD_HT_250To500_ext1 = kreator.makeMCComponent("QCD_HT_250To500_ext1", "/QCD_HT_250To500_13TeV-madgraph/Phys14DR-PU20bx25_PHYS14_25_V1_ext1-v2/MINIAODSIM", "CMS", ".*root", 670500)
QCD_HT_500To1000 = kreator.makeMCComponent("QCD_HT_500To1000", "/QCD_HT-500To1000_13TeV-madgraph/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 26740)
QCD_HT_500To1000_ext1 = kreator.makeMCComponent("QCD_HT_500To1000_ext1", "/QCD_HT-500To1000_13TeV-madgraph/Phys14DR-PU20bx25_PHYS14_25_V1_ext1-v1/MINIAODSIM", "CMS", ".*root", 26740)
QCD_HT_1000ToInf_ext1 = kreator.makeMCComponent("QCD_HT_1000ToInf_ext1", "/QCD_HT_1000ToInf_13TeV-madgraph/Phys14DR-PU20bx25_PHYS14_25_V1_ext1-v1/MINIAODSIM", "CMS", ".*root", 769.7)
QCD_HT_1000ToInf = kreator.makeMCComponent("QCD_HT_1000ToInf", "/QCD_HT_1000ToInf_13TeV-madgraph/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 769.7)
QCDHT = [
QCD_HT_100To250,
QCD_HT_250To500,
QCD_HT_500To1000,
QCD_HT_1000ToInf,
QCD_HT_250To500_ext1,
QCD_HT_500To1000_ext1,
QCD_HT_1000ToInf_ext1
]
QCD_Pt15to30 = kreator.makeMCComponent("QCD_Pt15to30","/QCD_Pt-15to30_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 2237000000)
QCD_Pt30to50 = kreator.makeMCComponent("QCD_Pt30to50","/QCD_Pt-30to50_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 161500000)
QCD_Pt50to80 = kreator.makeMCComponent("QCD_Pt50to80","/QCD_Pt-50to80_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 22110000)
QCD_Pt80to120 = kreator.makeMCComponent("QCD_Pt80to120","/QCD_Pt-80to120_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 3000114.3)
QCD_Pt120to170 = kreator.makeMCComponent("QCD_Pt120to170","/QCD_Pt-120to170_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 493200)
QCD_Pt170to300 = kreator.makeMCComponent("QCD_Pt170to300","/QCD_Pt-170to300_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 120300)
QCD_Pt300to470 = kreator.makeMCComponent("QCD_Pt300to470","/QCD_Pt-300to470_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 7475)
QCD_Pt470to600 = kreator.makeMCComponent("QCD_Pt470to600","/QCD_Pt-470to600_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 587.1)
QCD_Pt600to800 = kreator.makeMCComponent("QCD_Pt600to800","/QCD_Pt-600to800_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 167)
QCD_Pt800to1000 = kreator.makeMCComponent("QCD_Pt800to1000","/QCD_Pt-800to1000_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 28.25)
QCD_Pt1000to1400 = kreator.makeMCComponent("QCD_Pt1000to1400","/QCD_Pt-1000to1400_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 8.195)
QCD_Pt1400to1800 = kreator.makeMCComponent("QCD_Pt1400to1800","/QCD_Pt-1400to1800_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 0.7346)
QCD_Pt1800to2400 = kreator.makeMCComponent("QCD_Pt1800to2400","/QCD_Pt-1800to2400_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 0.102, True)
QCD_Pt2400to3200 = kreator.makeMCComponent("QCD_Pt2400to3200","/QCD_Pt-2400to3200_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 0.00644, True)
QCD_Pt3200 = kreator.makeMCComponent("QCD_Pt3200","/QCD_Pt-3200_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_trkalmb_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 0.000163, True)
QCDPt = [
QCD_Pt15to30,
QCD_Pt30to50,
QCD_Pt50to80,
QCD_Pt80to120,
QCD_Pt120to170,
QCD_Pt170to300,
QCD_Pt300to470,
QCD_Pt470to600,
QCD_Pt600to800,
QCD_Pt800to1000,
QCD_Pt1000to1400,
QCD_Pt1400to1800,
QCD_Pt1800to2400,
QCD_Pt2400to3200,
QCD_Pt3200
]
# Muon-enriched QCD (cross sections and filter efficiency from McM)
QCD_Mu15 = kreator.makeMCComponent("QCD_Mu15", "/QCD_Pt-20toInf_MuEnrichedPt15_PionKaonDecay_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v3/MINIAODSIM", "CMS", ".*root", 866.6e6*0.00044);
QCD_Pt30to50_Mu5 = kreator.makeMCComponent("QCD_Pt30to50_Mu5", "/QCD_Pt-30to50_MuEnrichedPt5_PionKaonDecay_Tune4C_13TeV_pythia8/Phys14DR-AVE20BX25_tsg_PHYS14_25_V3-v2/MINIAODSIM", "CMS", ".*root", 164400000*0.0122);
QCD_Pt50to80_Mu5 = kreator.makeMCComponent("QCD_Pt50to80_Mu5", "/QCD_Pt-50to80_MuEnrichedPt5_PionKaonDecay_Tune4C_13TeV_pythia8/Phys14DR-AVE20BX25_tsg_PHYS14_25_V3-v1/MINIAODSIM", "CMS", ".*root", 21930000*0.0218);
QCD_Pt80to120_Mu5 = kreator.makeMCComponent("QCD_Pt80to120_Mu5", "/QCD_Pt-80to120_MuEnrichedPt5_PionKaonDecay_Tune4C_13TeV_pythia8/Phys14DR-AVE20BX25_tsg_PHYS14_25_V3-v1/MINIAODSIM", "CMS", ".*root", 3000000*0.0395);
QCD_Mu5 = [ QCD_Pt30to50_Mu5, QCD_Pt50to80_Mu5, QCD_Pt80to120_Mu5 ]
# Electron-enriched QCD (cross sections and filter efficiency from McM)
QCD_Pt10to20_EMEnriched = kreator.makeMCComponent("QCD_Pt10to20_EMEnriched", "/QCD_Pt-10to20_EMEnriched_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 8838e6*0.143);
QCD_Pt20to30_EMEnriched = kreator.makeMCComponent("QCD_Pt20to30_EMEnriched", "/QCD_Pt-20to30_EMEnriched_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_castor_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 6773e5*0.007);
QCD_Pt30to80_EMEnriched = kreator.makeMCComponent("QCD_Pt30to80_EMEnriched", "/QCD_Pt-30to80_EMEnriched_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 1859e5*0.056);
QCD_Pt80to170_EMEnriched = kreator.makeMCComponent("QCD_Pt80to170_EMEnriched", "/QCD_Pt-80to170_EMEnriched_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_castor_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 3259e3*0.158);
QCDPtEMEnriched = [
QCD_Pt10to20_EMEnriched,
QCD_Pt20to30_EMEnriched,
QCD_Pt30to80_EMEnriched,
QCD_Pt80to170_EMEnriched
]
QCD_Pt20to30_bcToE = kreator.makeMCComponent("QCD_Pt20to30_bcToE", "/QCD_Pt_20to30_bcToE_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 6759e5*0.00075);
QCD_Pt30to80_bcToE = kreator.makeMCComponent("QCD_Pt30to80_bcToE", "/QCD_Pt_30to80_bcToE_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 1859e5*0.00272);
QCD_Pt80to170_bcToE = kreator.makeMCComponent("QCD_Pt80to170_bcToE", "/QCD_Pt_80to170_bcToE_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 3495e3*0.01225);
QCD_Pt170toInf_bcToE = kreator.makeMCComponent("QCD_Pt170toInf_bcToE", "/QCD_Pt_170toInf_bcToE_Tune4C_13TeV_pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 1285e2*0.0406);
QCDPtbcToE = [
QCD_Pt20to30_bcToE,
QCD_Pt30to80_bcToE,
QCD_Pt80to170_bcToE,
QCD_Pt170toInf_bcToE
]
# W inclusive (cross section from FEWZ, StandardModelCrossSectionsat13TeV)
WJetsToLNu = kreator.makeMCComponent("WJetsToLNu","/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 20508.9)
# cross sections for WJets taken from McM LO times inclusive k-factor from FEWZ(20508.9 pb x3)/McM(50100.0)
WJetsToLNu_HT100to200 = kreator.makeMCComponent("WJetsToLNu_HT100to200", "/WJetsToLNu_HT-100to200_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",1817.0*1.23)
WJetsToLNu_HT200to400 = kreator.makeMCComponent("WJetsToLNu_HT200to400", "/WJetsToLNu_HT-200to400_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",471.6*1.23)
WJetsToLNu_HT400to600 = kreator.makeMCComponent("WJetsToLNu_HT400to600", "/WJetsToLNu_HT-400to600_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",55.61*1.23)
WJetsToLNu_HT600toInf = kreator.makeMCComponent("WJetsToLNu_HT600toInf", "/WJetsToLNu_HT-600toInf_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",18.81*1.23)
WJetsToLNuHT = [
WJetsToLNu_HT100to200,
WJetsToLNu_HT200to400,
WJetsToLNu_HT400to600,
WJetsToLNu_HT600toInf,
]
# DY inclusive (cross section from FEWZ, StandardModelCrossSectionsat13TeV)
DYJetsToLL_M50 = kreator.makeMCComponent("DYJetsToLL_M50", "/DYJetsToLL_M-50_13TeV-madgraph-pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 2008.*3)
# DY HT bins: cross sections for DYJets taken from McM LO times inclusive k-factor from FEWZ(2008 pb x3)/McM(4746)
DYJetsToLL_M50_HT100to200 = kreator.makeMCComponent("DYJetsToLL_M50_HT100to200", "/DYJetsToLL_M-50_HT-100to200_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",194.3*1.27)
DYJetsToLL_M50_HT200to400 = kreator.makeMCComponent("DYJetsToLL_M50_HT200to400", "/DYJetsToLL_M-50_HT-200to400_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",52.24*1.27)
DYJetsToLL_M50_HT400to600 = kreator.makeMCComponent("DYJetsToLL_M50_HT400to600", "/DYJetsToLL_M-50_HT-400to600_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",6.546*1.27)
DYJetsToLL_M50_HT600toInf = kreator.makeMCComponent("DYJetsToLL_M50_HT600toInf", "/DYJetsToLL_M-50_HT-600toInf_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",2.179*1.27)
DYJetsM50HT = [
DYJetsToLL_M50_HT100to200,
DYJetsToLL_M50_HT200to400,
DYJetsToLL_M50_HT400to600,
DYJetsToLL_M50_HT600toInf,
]
DYJetsMuMuM50_PtZ180 = kreator.makeMCComponent("DYJetsMuMuM50_PtZ180", "/DYJetsToMuMu_PtZ-180_M-50_13TeV-madgraph/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v3/MINIAODSIM", "CMS", ".*root")
GJets_HT100to200 = kreator.makeMCComponent("GJets_HT100to200", "/GJets_HT-100to200_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",1534)
GJets_HT200to400 = kreator.makeMCComponent("GJets_HT200to400", "/GJets_HT-200to400_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",489.9)
GJets_HT400to600 = kreator.makeMCComponent("GJets_HT400to600", "/GJets_HT-400to600_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",62.05)
GJets_HT600toInf = kreator.makeMCComponent("GJets_HT600toInf", "/GJets_HT-600toInf_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",20.87)
GJetsHT = [
GJets_HT100to200,
GJets_HT200to400,
GJets_HT400to600,
GJets_HT600toInf,
]
ZJetsToNuNu_HT100to200 = kreator.makeMCComponent("ZJetsToNuNu_HT100to200", "/ZJetsToNuNu_HT-100to200_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",372.6*1.27)
ZJetsToNuNu_HT200to400 = kreator.makeMCComponent("ZJetsToNuNu_HT200to400", "/ZJetsToNuNu_HT-200to400_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",100.8*1.27)
ZJetsToNuNu_HT400to600 = kreator.makeMCComponent("ZJetsToNuNu_HT400to600", "/ZJetsToNuNu_HT-400to600_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root",11.99*1.27)
ZJetsToNuNu_HT600toInf = kreator.makeMCComponent("ZJetsToNuNu_HT600toInf", "/ZJetsToNuNu_HT-600toInf_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",4.113*1.27)
ZJetsToNuNuHT = [
ZJetsToNuNu_HT100to200,
ZJetsToNuNu_HT200to400,
ZJetsToNuNu_HT400to600,
ZJetsToNuNu_HT600toInf,
]
# Single top cross sections: https://twiki.cern.ch/twiki/bin/viewauth/CMS/SingleTopSigma
TToLeptons_tch = kreator.makeMCComponent("TToLeptons_tch", "/TToLeptons_t-channel-CSA14_Tune4C_13TeV-aMCatNLO-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 136.05*0.108*3)
TToLeptons_sch = kreator.makeMCComponent("TToLeptons_sch", "/TToLeptons_s-channel-CSA14_Tune4C_13TeV-aMCatNLO-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 7.20*0.108*3)
TBarToLeptons_tch = kreator.makeMCComponent("TBarToLeptons_tch", "/TBarToLeptons_t-channel_Tune4C_CSA14_13TeV-aMCatNLO-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 80.97*0.108*3)
TBarToLeptons_sch = kreator.makeMCComponent("TBarToLeptons_sch", "/TBarToLeptons_s-channel-CSA14_Tune4C_13TeV-aMCatNLO-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 4.16*0.108*3)
TBar_tWch = kreator.makeMCComponent("TBar_tWch", "/Tbar_tW-channel-DR_Tune4C_13TeV-CSA14-powheg-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",35.6)
T_tWch = kreator.makeMCComponent("T_tWch", "/T_tW-channel-DR_Tune4C_13TeV-CSA14-powheg-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",35.6)
SingleTop = [
TToLeptons_tch, TToLeptons_sch, TBarToLeptons_tch, TBarToLeptons_sch, TBar_tWch, T_tWch
]
# TTbar cross section: MCFM with dynamic scale, StandardModelCrossSectionsat13TeV
TTJets = kreator.makeMCComponent("TTJets", "/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",809.1)
# TTV cross sections are from 13 TeV MG5_aMC@NLO v2.2.1, NNPDF 2.3nlo, fixed scale = mtop + 0.5*mv
TTWJets = kreator.makeMCComponent("TTWJets", "/TTWJets_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.6647)
TTZJets = kreator.makeMCComponent("TTZJets", "/TTZJets_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.8565)
# TTH cross section from LHC Higgs XS WG: https://twiki.cern.ch/twiki/bin/view/LHCPhysics/CERNYellowReportPageAt1314TeV?rev=15
TTH = kreator.makeMCComponent("TTH", "/TTbarH_M-125_13TeV_amcatnlo-pythia8-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root",0.5085)
# cross section from StandardModelCrossSectionsat13TeV (NLO MCFM, mll > 12) times BR=(3*0.108)*(3*0.0337)
WZJetsTo3LNu = kreator.makeMCComponent("WZJetsTo3LNu", "/WZJetsTo3LNu_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",2.165)
# cross section from StandardModelCrossSectionsat13TeV (NLO MCFM, mll > 12) times BR=(3*0.0337)**2
ZZTo4L = kreator.makeMCComponent("ZZTo4L","/ZZTo4L_Tune4C_13TeV-powheg-pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 31.8*(3*0.03366**2))
# GGH cross section from LHC Higgs XS WG: https://twiki.cern.ch/twiki/bin/view/LHCPhysics/CERNYellowReportPageAt1314TeV?rev=15
GGHZZ4L = kreator.makeMCComponent("GGHZZ4L", "/GluGluToHToZZTo4L_M-125_13TeV-powheg-pythia6/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 43.92*2.76E-04)
GGHTT = kreator.makeMCComponent("GGHTT", "/GluGluToHToTauTau_M-125_13TeV-powheg-pythia6/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root", 43.92*0.0632)
VBFTT = kreator.makeMCComponent("VBFTT", "/VBF_HToTauTau_M-125_13TeV-powheg-pythia6/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v2/MINIAODSIM", "CMS", ".*root", 3.748*0.0632)
#### Signal samples
# cross sections from LHC SUSY Cross Section Working Group https://twiki.cern.ch/twiki/bin/view/LHCPhysics/SUSYCrossSections
SMS_T2tt_2J_mStop850_mLSP100 = kreator.makeMCComponent("SMS_T2tt_2J_mStop850_mLSP100", "/SMS-T2tt_2J_mStop-850_mLSP-100_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.0189612)
SMS_T2tt_2J_mStop650_mLSP325 = kreator.makeMCComponent("SMS_T2tt_2J_mStop650_mLSP325", "/SMS-T2tt_2J_mStop-650_mLSP-325_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.107045)
SMS_T2tt_2J_mStop500_mLSP325 = kreator.makeMCComponent("SMS_T2tt_2J_mStop500_mLSP325", "/SMS-T2tt_2J_mStop-500_mLSP-325_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.51848)
SMS_T2tt_2J_mStop425_mLSP325 = kreator.makeMCComponent("SMS_T2tt_2J_mStop425_mLSP325", "/SMS-T2tt_2J_mStop-425_mLSP-325_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",1.31169)
SMS_T2qq_2J_mStop600_mLSP550 = kreator.makeMCComponent("SMS_T2qq_2J_mStop600_mLSP550", "/SMS-T2qq_2J_mStop-600_mLSP-550_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",1.76645)
SMS_T2qq_2J_mStop1200_mLSP100 = kreator.makeMCComponent("SMS_T2qq_2J_mStop1200_mLSP100", "/SMS-T2qq_2J_mStop-1200_mLSP-100_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.0162846)
SMS_T2bb_2J_mStop900_mLSP100 = kreator.makeMCComponent("SMS_T2bb_2J_mStop900_mLSP100", "/SMS-T2bb_2J_mStop-900_mLSP-100_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.0128895)
SMS_T2bb_2J_mStop600_mLSP580 = kreator.makeMCComponent("SMS_T2bb_2J_mStop600_mLSP580", "/SMS-T2bb_2J_mStop-600_mLSP-580_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.174599)
SMS_T1tttt_2J_mGl1500_mLSP100 = kreator.makeMCComponent("SMS_T1tttt_2J_mGl1500_mLSP100", "/SMS-T1tttt_2J_mGl-1500_mLSP-100_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.0141903)
SMS_T1tttt_2J_mGl1200_mLSP800 = kreator.makeMCComponent("SMS_T1tttt_2J_mGl1200_mLSP800", "/SMS-T1tttt_2J_mGl-1200_mLSP-800_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.0856418)
SMS_T1qqqq_2J_mGl1400_mLSP100 = kreator.makeMCComponent("SMS_T1qqqq_2J_mGl1400_mLSP100", "/SMS-T1qqqq_2J_mGl-1400_mLSP-100_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.0252977)
SMS_T1qqqq_2J_mGl1000_mLSP800 = kreator.makeMCComponent("SMS_T1qqqq_2J_mGl1000_mLSP800", "/SMS-T1qqqq_2J_mGl-1000_mLSP-800_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.325388)
SMS_T1bbbb_2J_mGl1500_mLSP100 = kreator.makeMCComponent("SMS_T1bbbb_2J_mGl1500_mLSP100", "/SMS-T1bbbb_2J_mGl-1500_mLSP-100_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.0141903)
SMS_T1bbbb_2J_mGl1000_mLSP900 = kreator.makeMCComponent("SMS_T1bbbb_2J_mGl1000_mLSP900", "/SMS-T1bbbb_2J_mGl-1000_mLSP-900_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_tsg_PHYS14_25_V1-v1/MINIAODSIM", "CMS", ".*root",0.325388)
SusySignalSamples = [
SMS_T2tt_2J_mStop850_mLSP100,
SMS_T2tt_2J_mStop650_mLSP325,
SMS_T2tt_2J_mStop500_mLSP325,
SMS_T2tt_2J_mStop425_mLSP325,
SMS_T2qq_2J_mStop600_mLSP550,
SMS_T2qq_2J_mStop1200_mLSP100,
SMS_T2bb_2J_mStop900_mLSP100,
SMS_T2bb_2J_mStop600_mLSP580,
SMS_T1tttt_2J_mGl1500_mLSP100,
SMS_T1tttt_2J_mGl1200_mLSP800,
SMS_T1qqqq_2J_mGl1400_mLSP100,
SMS_T1qqqq_2J_mGl1000_mLSP800,
SMS_T1bbbb_2J_mGl1500_mLSP100,
SMS_T1bbbb_2J_mGl1000_mLSP900,
]
mcSamplesPHYS14_PU20bx25 = QCDHT + QCDPt + [QCD_Mu15] + QCD_Mu5 + QCDPtEMEnriched + QCDPtbcToE + [WJetsToLNu] + WJetsToLNuHT + [DYJetsToLL_M50, DYJetsMuMuM50_PtZ180] + DYJetsM50HT + GJetsHT + ZJetsToNuNuHT + SingleTop + [ TTJets, TTWJets, TTZJets, TTH, WZJetsTo3LNu, ZZTo4L, GGHZZ4L, GGHTT, VBFTT] + SusySignalSamples
## PRIVATE SAMPLES
GJets_HT100to200_fixPhoton = kreator.makeMCComponentFromEOS('GJets_HT100to200', '/GJets_HT-100to200_Tune4C_13TeV-madgraph-tauola/miniAOD_fixPhoton_7_2_3/150204_164703/0000/', '/store/user/mmasciov/PHYS14_fixPhoton/%s',".*root", 1534)
GJets_HT200to400_fixPhoton = kreator.makeMCComponentFromEOS('GJets_HT200to400', '/GJets_HT-200to400_Tune4C_13TeV-madgraph-tauola/miniAOD_fixPhoton_7_2_3/150204_164621/0000/', '/store/user/mmasciov/PHYS14_fixPhoton/%s',".*root", 489.9)
GJets_HT400to600_fixPhoton = kreator.makeMCComponentFromEOS('GJets_HT400to600', '/GJets_HT-400to600_Tune4C_13TeV-madgraph-tauola/miniAOD_fixPhoton_7_2_3/150204_164547/0000/', '/store/user/mmasciov/PHYS14_fixPhoton/%s',".*root", 62.05)
GJets_HT600toInf_fixPhoton = kreator.makeMCComponentFromEOS('GJets_HT600toInf', '/GJets_HT-600toInf_Tune4C_13TeV-madgraph-tauola/miniAOD_fixPhoton_7_2_3/150204_122016/0000/', '/store/user/mmasciov/PHYS14_fixPhoton/%s',".*root", 20.87)
GJets_fixPhoton = [
GJets_HT100to200_fixPhoton,
GJets_HT200to400_fixPhoton,
GJets_HT400to600_fixPhoton,
GJets_HT600toInf_fixPhoton,
]
QCD_HT_100To250_fixPhoton = kreator.makeMCComponentFromEOS("QCD_HT_100To250", '/QCD_HT-100To250_13TeV-madgraph/miniAOD_fixPhoton_reco/150206_145121/0000/', '/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_reco/%s', ".*root", 28730000)
QCD_HT_250To500_fixPhoton = kreator.makeMCComponentFromEOS("QCD_HT_250To500", '/QCD_HT_250To500_13TeV-madgraph/miniAOD_fixPhoton_reco/150206_145040/0000/', '/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_reco/%s', ".*root", 670500)
QCD_HT_250To500_ext1_fixPhoton = kreator.makeMCComponentFromEOS("QCD_HT_250To500_ext1", '/QCD_HT_250To500_13TeV-madgraph/miniAOD_fixPhoton_reco/150206_144831/0000/', '/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_reco/%s', ".*root", 670500)
QCD_HT_500To1000_fixPhoton = kreator.makeMCComponentFromEOS("QCD_HT_500To1000", '/QCD_HT-500To1000_13TeV-madgraph/miniAOD_fixPhoton_reco/150206_144759/0000/', '/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_reco/%s', ".*root", 26740)
QCD_HT_500To1000_ext1_fixPhoton = kreator.makeMCComponentFromEOS("QCD_HT_500To1000_ext1", '/QCD_HT-500To1000_13TeV-madgraph/miniAOD_fixPhoton_reco/150206_144615/0000/', '/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_reco/%s', ".*root", 26740)
QCD_HT_1000ToInf_fixPhoton = kreator.makeMCComponentFromEOS("QCD_HT_1000ToInf", '/QCD_HT_1000ToInf_13TeV-madgraph/miniAOD_fixPhoton_7_2_3/150204_172505/0000/', '/store/user/mmasciov/PHYS14_fixPhoton/%s', ".*root", 769.7)
QCD_HT_1000ToInf_ext1_fixPhoton = kreator.makeMCComponentFromEOS("QCD_HT_1000ToInf_ext1", '/QCD_HT_1000ToInf_13TeV-madgraph/miniAOD_fixPhoton_7_2_3/150204_172427/0000/', '/store/user/mmasciov/PHYS14_fixPhoton/%s', ".*root", 769.7)
QCDHT_fixPhoton = [
QCD_HT_100To250_fixPhoton,
QCD_HT_250To500_fixPhoton,
QCD_HT_500To1000_fixPhoton,
QCD_HT_1000ToInf_fixPhoton,
QCD_HT_250To500_ext1_fixPhoton,
QCD_HT_500To1000_ext1_fixPhoton,
QCD_HT_1000ToInf_ext1_fixPhoton,
]
QCD_Pt170to300_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt170to300" , "QCD_Pt-170to300_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150228_154438/0000/" , "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 120300)
QCD_Pt300to470_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt300to470" , "QCD_Pt-300to470_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150228_154529/0000/" , "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 7475)
QCD_Pt470to600_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt470to600" , "QCD_Pt-470to600_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150228_154700/0000/" , "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 587.1)
QCD_Pt600to800_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt600to800" , "QCD_Pt-600to800_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150228_154904/0000/" , "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 167)
QCD_Pt800to1000_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt800to1000" , "QCD_Pt-800to1000_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150228_155003/0000/" , "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 28.25)
QCD_Pt1000to1400_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt1000to1400", "QCD_Pt-1000to1400_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150228_154248/0000/", "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 8.195)
QCD_Pt1400to1800_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt1400to1800", "QCD_Pt-1400to1800_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150228_154344/0000/", "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 0.7346)
QCD_Pt1800to2400_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt1800to2400", "QCD_Pt-1800to2400_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150301_002302/0000/", "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 0.102)
QCD_Pt2400to3200_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt2400to3200", "QCD_Pt-2400to3200_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150301_002547/0000/", "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 0.00644)
QCD_Pt3200_fixPhoton = kreator.makeMCComponentFromEOS("QCD_Pt3200" , "QCD_Pt-3200_Tune4C_13TeV_pythia8/miniAOD_fixPhoton_QCDPt/150301_002653/0000/" , "/store/group/phys_susy/mmasciov/PHYS14_fixPhoton_QCDPt/%s", ".*root", 0.000163)
QCDPt_fixPhoton = [
QCD_Pt170to300_fixPhoton,
QCD_Pt300to470_fixPhoton,
QCD_Pt470to600_fixPhoton,
QCD_Pt600to800_fixPhoton,
QCD_Pt800to1000_fixPhoton,
QCD_Pt1000to1400_fixPhoton,
QCD_Pt1400to1800_fixPhoton,
QCD_Pt1800to2400_fixPhoton,
QCD_Pt2400to3200_fixPhoton,
QCD_Pt3200_fixPhoton
]
T5ttttDeg_mGo1000_mStop300_mCh285_mChi280 = kreator.makeMCComponentFromEOS('T5ttttDeg_mGo1000_mStop300_mCh285_mChi280', '/T5ttttDeg_mGo1000_mStop300_mCh285_mChi280_23bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T5ttttDeg_mGo1300_mStop300_mCh285_mChi280 = kreator.makeMCComponentFromEOS('T5ttttDeg_mGo1300_mStop300_mCh285_mChi280', '/T5ttttDeg_mGo1300_mStop300_mCh285_mChi280_23bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0460525)
T5ttttDeg_mGo1000_mStop300_mChi280 = kreator.makeMCComponentFromEOS('T5ttttDeg_mGo1000_mStop300_mChi280', '/T5ttttDeg_mGo1000_mStop300_mChi280_4bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T5ttttDeg_mGo1300_mStop300_mChi280 = kreator.makeMCComponentFromEOS('T5ttttDeg_mGo1300_mStop300_mChi280', '/T5ttttDeg_mGo1300_mStop300_mChi280_4bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0460525)
T5ttttDeg_mGo1000_mStop300_mCh285_mChi280_dil = kreator.makeMCComponentFromEOS('T5ttttDeg_mGo1000_mStop300_mCh285_mChi280_dil', '/T5ttttDeg_mGo1000_mStop300_mCh285_mChi280_23bodydec_dilepfilterPt8p5_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T5ttttDeg_mGo1300_mStop300_mCh285_mChi280_dil = kreator.makeMCComponentFromEOS('T5ttttDeg_mGo1300_mStop300_mCh285_mChi280_dil', '/T5ttttDeg_mGo1300_mStop300_mCh285_mChi280_23bodydec_dilepfilterPt8p5_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0460525)
T5ttttDeg = [ T5ttttDeg_mGo1000_mStop300_mCh285_mChi280, T5ttttDeg_mGo1300_mStop300_mCh285_mChi280, T5ttttDeg_mGo1000_mStop300_mChi280, T5ttttDeg_mGo1300_mStop300_mChi280, T5ttttDeg_mGo1000_mStop300_mCh285_mChi280_dil, T5ttttDeg_mGo1300_mStop300_mCh285_mChi280_dil ]
T1ttbbWW_mGo1000_mCh725_mChi715 = kreator.makeMCComponentFromEOS('T1ttbbWW_mGo1000_mCh725_mChi715', '/T1ttbbWW_2J_mGo1000_mCh725_mChi715_3bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T1ttbbWW_mGo1000_mCh725_mChi720 = kreator.makeMCComponentFromEOS('T1ttbbWW_mGo1000_mCh725_mChi720', '/T1ttbbWW_2J_mGo1000_mCh725_mChi720_3bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T1ttbbWW_mGo1300_mCh300_mChi290 = kreator.makeMCComponentFromEOS('T1ttbbWW_mGo1300_mCh300_mChi290', '/T1ttbbWW_2J_mGo1300_mCh300_mChi290_3bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0460525)
T1ttbbWW_mGo1300_mCh300_mChi295 = kreator.makeMCComponentFromEOS('T1ttbbWW_mGo1300_mCh300_mChi295', '/T1ttbbWW_2J_mGo1300_mCh300_mChi295_3bodydec_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0460525)
T1ttbbWW = [ T1ttbbWW_mGo1000_mCh725_mChi715, T1ttbbWW_mGo1000_mCh725_mChi720, T1ttbbWW_mGo1300_mCh300_mChi290, T1ttbbWW_mGo1300_mCh300_mChi295 ]
T1ttbb_mGo1500_mChi100 = kreator.makeMCComponentFromEOS('T1ttbb_mGo1500_mChi100', '/T1ttbb_2J_mGo1500_mChi100_3bodydec_asymmDecOnly/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0141903)
T1ttbb = [ T1ttbb_mGo1500_mChi100 ]
T6ttWW_mSbot600_mCh425_mChi50 = kreator.makeMCComponentFromEOS('T6ttWW_mSbot600_mCh425_mChi50', '/T6ttWW_600_425_50_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.174599)
T6ttWW_mSbot650_mCh150_mChi50 = kreator.makeMCComponentFromEOS('T6ttWW_mSbot650_mCh150_mChi50', '/T6ttWW_650_150_50_v2/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.107045)
T6ttWW = [ T6ttWW_mSbot600_mCh425_mChi50, T6ttWW_mSbot650_mCh150_mChi50 ]
#SqGltttt_mGo1300_mSq1300_mChi100 = kreator.makeMCComponentFromEOS('SqGltttt_mGo1300_mSq1300_mChi100', '/13TeV_SqGltttt_Gl_1300_Sq_1300_LSP_100/', '/store/cmst3/group/susy/alobanov/MC/PHYS14/PU20_25ns/%s', '.*root')
SqGltttt = [ ] #SqGltttt_mGo1300_mSq1300_mChi100 ]
T1tttt_mGo1300_mChi100 = kreator.makeMCComponentFromEOS('T1tttt_mGo1300_mChi100', '/SMS_T1tttt_2J_mGl1300_mLSP100/', '/store/cmst3/group/susy/alobanov/MC/PHYS14/PU20_25ns/%s', '.*root', 0.0460525)
T1tttt_mGo800_mChi450 = kreator.makeMCComponentFromEOS('T1tttt_mGo800_mChi450', '/SMS_T1tttt_2J_mGl800_mLSP450/', '/store/cmst3/group/susy/alobanov/MC/PHYS14/PU20_25ns/%s', '.*root', 1.4891)
T1tttt_priv = [ T1tttt_mGo1300_mChi100, T1tttt_mGo800_mChi450 ]
T5qqqqWWDeg_mGo1400_mCh315_mChi300 = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo1400_mCh315_mChi300', '/SMS_T5qqqqWW_mGl1400_mChi315_mLSP300/', '/store/cmst3/group/susy/alobanov/MC/PHYS14/PU20_25ns/%s', '.*root', 0.0252977)
T5qqqqWWDeg_mGo1000_mCh310_mChi300 = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo1000_mCh310_mChi300', '/T5qqqqWWDeg_mGo1000_mCh310_mChi300/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T5qqqqWWDeg_mGo1000_mCh310_mChi300_dilep= kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo1000_mCh310_mChi300_dilep', '/T5qqqqWWDeg_mGo1000_mCh310_mChi300_dilep/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388*(0.333)*(0.333))
T5qqqqWWDeg_mGo1000_mCh315_mChi300 = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo1000_mCh315_mChi300', '/T5qqqqWWDeg_mGo1000_mCh315_mChi300/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T5qqqqWWDeg_mGo1000_mCh315_mChi300_dilep = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo1000_mCh315_mChi300_dilep', '/T5qqqqWWDeg_mGo1000_mCh315_mChi300_dilep/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388*(0.333)*(0.333))
T5qqqqWWDeg_mGo1000_mCh325_mChi300 = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo1000_mCh325_mChi300', '/T5qqqqWWDeg_mGo1000_mCh325_mChi300/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T5qqqqWWDeg_mGo1000_mCh325_mChi300_dilep = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo1000_mCh325_mChi300_dilep', '/T5qqqqWWDeg_mGo1000_mCh325_mChi300_dilep/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388*(0.324)*(0.324))
T5qqqqWWDeg_mGo800_mCh305_mChi300 = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo800_mCh305_mChi300', '/T5qqqqWWDeg_mGo800_mCh305_mChi300/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 1.4891)
T5qqqqWWDeg_mGo800_mCh305_mChi300_dilep = kreator.makeMCComponentFromEOS('T5qqqqWWDeg_mGo800_mCh305_mChi300_dilep', '/T5qqqqWWDeg_mGo800_mCh305_mChi300_dilep/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 1.4891*(0.342)*(0.342))
T5qqqqWWDeg = [
T5qqqqWWDeg_mGo1400_mCh315_mChi300,
T5qqqqWWDeg_mGo1000_mCh310_mChi300, T5qqqqWWDeg_mGo1000_mCh315_mChi300, T5qqqqWWDeg_mGo1000_mCh325_mChi300, T5qqqqWWDeg_mGo800_mCh305_mChi300,
T5qqqqWWDeg_mGo1000_mCh310_mChi300_dilep, T5qqqqWWDeg_mGo1000_mCh315_mChi300_dilep, T5qqqqWWDeg_mGo1000_mCh325_mChi300_dilep, T5qqqqWWDeg_mGo800_mCh305_mChi300_dilep
]
T5qqqqWW_mGo1500_mCh800_mChi100 = kreator.makeMCComponentFromEOS('T5qqqqWW_mGo1500_mCh800_mChi100', '/SMS_T5qqqqWW_Gl1500_Chi800_LSP100/', '/store/cmst3/group/susy/alobanov/MC/PHYS14/PU20_25ns/%s', '.*root', 0.0141903)
T5qqqqWW_mGo1200_mCh1000_mChi800 = kreator.makeMCComponentFromEOS('T5qqqqWW_mGo1200_mCh1000_mChi800', '/SMS_T5qqqqWW_Gl1200_Chi1000_LSP800/', '/store/cmst3/group/susy/alobanov/MC/PHYS14/PU20_25ns/%s', '.*root', 0.0856418)
T5qqqqWW_mGo1000_mCh800_mChi700 = kreator.makeMCComponentFromEOS('T5qqqqWW_mGo1000_mCh800_mChi700', '/T5qqqqWW_mGo1000_mCh800_mChi700/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388)
T5qqqqWW_mGo1000_mCh800_mChi700_dilep= kreator.makeMCComponentFromEOS('T5qqqqWW_mGo1000_mCh800_mChi700_dilep', '/T5qqqqWW_mGo1000_mCh800_mChi700_dilep/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.325388*(3*0.108)*(3*0.108))
T5qqqqWW_mGo1200_mCh1000_mChi800_cmg = kreator.makeMCComponentFromEOS('T5qqqqWW_mGo1200_mCh1000_mChi800_cmg', '/T5qqqqWW_mGo1200_mCh1000_mChi800/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0856418)
T5qqqqWW_mGo1200_mCh1000_mChi800_dilep= kreator.makeMCComponentFromEOS('T5qqqqWW_mGo1200_mCh1000_mChi800_dilep', '/T5qqqqWW_mGo1200_mCh1000_mChi800_dilep/', '/store/cmst3/group/susy/gpetrucc/13TeV/Phys14DR/MINIAODSIM/%s',".*root", 0.0856418*(3*0.108)*(3*0.108))
T5qqqqWW = [
T5qqqqWW_mGo1500_mCh800_mChi100, T5qqqqWW_mGo1200_mCh1000_mChi800,
T5qqqqWW_mGo1000_mCh800_mChi700, T5qqqqWW_mGo1200_mCh1000_mChi800_cmg,
T5qqqqWW_mGo1000_mCh800_mChi700_dilep, T5qqqqWW_mGo1200_mCh1000_mChi800_dilep
]
# note: cross section for q~ q~ from https://twiki.cern.ch/twiki/bin/view/LHCPhysics/SUSYCrossSections13TeVsquarkantisquark (i.e. gluinos and stops decoupled)
T6qqWW_mSq950_mCh325_mChi300 = kreator.makeMCComponentFromEOS('T6qqWW_mSq950_mCh325_mChi300', '/SMS_T6qqWW_mSq950_mChi325_mLSP300/', '/store/cmst3/group/susy/alobanov/MC/PHYS14/PU20_25ns/%s', '.*root', 0.0898112)
T6qqWW = [ T6qqWW_mSq950_mCh325_mChi300 ]
mcSamplesPriv = T5ttttDeg + T1ttbbWW + T1ttbb + T6ttWW + SqGltttt + T1tttt_priv + T5qqqqWW + T5qqqqWWDeg + T6qqWW + GJets_fixPhoton + QCDHT_fixPhoton
mcSamples = mcSamplesPHYS14_PU20bx25 + mcSamplesPHYS14_PU40bx25 + mcSamplesPHYS14_PU4bx50 + mcSamplesPriv
#-----------DATA---------------
#dataDir = os.environ['CMSSW_BASE']+"/src/CMGTools/TTHAnalysis/data"
dataDir = "$CMSSW_BASE/src/CMGTools/TTHAnalysis/data" # use environmental variable, useful for instance to run on CRAB
#lumi: 12.21+7.27+0.134 = 19.62 /fb @ 8TeV
json=dataDir+'/json/Cert_Run2012ABCD_22Jan2013ReReco.json'
SingleMu = cfg.DataComponent(
name = 'SingleMu',
files = kreator.getFilesFromEOS('SingleMu',
'/SingleMu/Run2012D-15Apr2014-v1/AOD/02e0a1be-c9c7-11e3-bfe2-0024e83ef644/MINIAOD/CMSSW_7_0_9_patch2_GR_70_V2_AN1',
'/eos/cms/store/cmst3/user/cmgtools/CMG/%s'),
intLumi = 1,
triggers = [],
json = json
)
dataSamplesMu=[]
dataSamplesE=[]
dataSamplesMuE=[]
dataSamples1Mu=[SingleMu]
dataSamplesAll = dataSamplesMu+dataSamplesE+dataSamplesMuE+dataSamples1Mu
from CMGTools.TTHAnalysis.setup.Efficiencies import *
#Define splitting
for comp in mcSamples:
comp.isMC = True
comp.isData = False
comp.splitFactor = 250 # if comp.name in [ "WJets", "DY3JetsM50", "DY4JetsM50","W1Jets","W2Jets","W3Jets","W4Jets","TTJetsHad" ] else 100
comp.puFileMC=dataDir+"/puProfile_Summer12_53X.root"
comp.puFileData=dataDir+"/puProfile_Data12.root"
comp.efficiency = eff2012
for comp in dataSamplesAll:
comp.splitFactor = 1000
comp.isMC = False
comp.isData = True
if __name__ == "__main__":
import sys
if "test" in sys.argv:
from CMGTools.TTHAnalysis.samples.ComponentCreator import testSamples
testSamples(mcSamples)
| UTF-8 | Python | false | false | 38,258 | py | 10 | samples_13TeV_PHYS14.py | 8 | 0.770924 | 0.589289 | 0 | 419 | 90.307876 | 318 |
rsagun/CMPS-146-Game-AI | 5,454,608,477,068 | fd889ed87b41e539fb42656844b962c541adb0f9 | c6edae277d207ded481bb7dc8ecc469f24d9a127 | /final/Animal.py | d1ffcbb43d908dc564fc922177f60b82b1c8a198 | []
| no_license | https://github.com/rsagun/CMPS-146-Game-AI | 128486ef8fbfa55fb7f1c5207fb17f6d80b4de2f | c595d117ae919e5e118e60179e350b335e94516b | refs/heads/master | 2020-03-22T12:04:51.050481 | 2018-07-06T18:27:26 | 2018-07-06T18:27:26 | 140,015,626 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import choice, random, randint, choices
from math import ceil
import os
from time import time
import multiprocessing.pool as mpool
from functools import partial
from Environment import Environment
# No matter what environment is we hav esimilar creatures.
# Not weighting environment when taking in to account the sp
possible_traits = {
'temperature': [['fur', random()], ['scales', random()], ['hide', random()]],
'size': [random() for i in range(5)],
'diet': ['herbivore', 'carnivore', 'omnivore'],
'aggression': [random() for i in range(5)],
'strength': [random() for i in range(5)],
'speed': [random() for i in range(5)],
'fat': [random() for i in range(5)],
'reproduction': ['mammalian', 'reptilian', 'avian', 'insect'],
'toes': [randint(0, 10)],
'arms': [randint(0, 4)],
'legs': [choice([0, 2, 3, 4])],
'shoulders': [choice([2, 4])],
'chest': ['scrawny', 'meaty', 'pointed'],
'neck': ['short', 'long', 'thick', 'flexible'],
'skin type': ['dry', 'moist', 'webby', 'hairy', 'flabby', 'swole']
}
class Animal:
def __init__(self, traits={}):
if not traits:
self.traits = {key: choice(list(value)) for key, value in possible_traits.items()}
self.traits['size'] += (self.traits['toes']*.005 + self.traits['arms']*.025 + self.traits['legs']*.025)/1.25
self.traits['speed'] -= .2*self.traits['size']
else:
self.traits = traits
'''self.traits['size'] = random()
self.traits['speed'] = random()
self.traits['strength'] = random()'''
self._fitness = 0
def __repr__(self):
return '\n'.join([str(key) + ": " + str(value) for key, value in self.traits.items()])
def calculate_fitness(self, environment):
# Stores the fitness created by the functions
coefficients = dict(
temperature=self.temperature_fitness(environment),
food_density=self.food_fitness(environment),
limb=self.limb_fitness(environment),
combative=self.combative_fitness(environment),
athletic=self.athletic_fitness(environment),
disaster=self.disaster_fitness(environment),
pollutive=self.pollutive_fitness(environment)
)
# Weight extremes with respective factors
factors = self.calculate_balance(environment)
self._fitness = sum(map(lambda m: coefficients[m] * factors[m], coefficients))
return self
def calculate_balance(self, environment):
# Dict keys are same as coe and values are 0 - 1 that are multiplied on to the fitness
# Something that could go wrong is everyting is weighted the same amount or too similar
# Temperature/Food/Limb/Combative/Athletic/Disaster/Pollutive
# Weights higher for more extreme temperatures (really hot or really cold)
temp = environment.traits['temperature']
temp_factor = ((1 - (abs(.5 - temp)) - .5) / .5)
# Weights higher for more extreme amounts of resources (a lot or little)
# Should it be only in cases of little to no food?
p_d = environment.traits['plant density']
a_d = environment.traits['prey density']
if self.traits['diet'] == 'carnivore':
food_factor = 1 - a_d
elif self.traits['diet'] == 'herbivore':
food_factor = 1 - p_d
else:
# Might have to switch a bit
food_factor = ((1 - (abs(.25 - ((p_d + a_d) / 2))) - .25) / .75)
# Weight for limb/skin type
fert = environment.traits['fertility']
nat_d = environment.traits['natural disaster']
sun_expose = environment.traits['sun exposure']
precip = environment.traits['precipitation']
wind = environment.traits['wind']
limb_factor = (fert + nat_d + sun_expose + precip + wind) / 5
# Weight for combative
hostility = environment.traits['hostility']
combative_factor = 1 - hostility
# Weight for athletic
geovar = environment.traits['geovariance']
athletic_factor = 1 - geovar
# Weight for disaster
n_d = environment.traits['natural disaster']
disaster_factor = 1 - n_d
# Weight for pollutive
pollu = environment.traits['pollution']
pollutive_factor = 1 - pollu
# Weights for limbs
factors = dict(
temperature=temp_factor,
food_density=food_factor,
limb=limb_factor,
combative=combative_factor,
athletic=athletic_factor,
disaster=disaster_factor,
pollutive=pollutive_factor
)
return factors
def temperature_fitness(self, environment):
temp = self.traits['temperature']
env_temp = environment.traits['temperature']
if temp[0] == 'fur':
fitness = abs(temp[1] - env_temp) + (-.15 if env_temp > .5 else .15)
fitness = (fitness + .15) / 1.3
elif temp[0] == 'scales':
fitness = ((0.7 * env_temp) + (0.3 * temp[1]))
else:
fitness = ((1 - (abs(.5 - env_temp)) - .5)) + .5 * (temp[1])
if env_temp > .5:
fitness = 1 - fitness
return fitness
def food_fitness(self, environment):
p_d = environment.traits['plant density']
a_d = environment.traits['prey density']
size = self.traits['size']
diet = self.traits['diet']
speed = self.traits['speed']
aggression = self.traits['aggression']
# bigger size ----> more food required
# carnivore ----> more animal density
# herbivore ---> more plant density
# omnivore ---> mix of both
# if prey > .6 theres a variance of kinds of prey
# if big and fast a higher prey density is better
# if big with low prey density not as fit as someone with small and prey density
# lot of prey bigger is better / small of prey smaller is better
# faster as carnivore is better
# herbivore use aggression to run into fitness to differentiate them between each other
if diet == 'carnivore':
# higher speed higher aggression lower size
fitness = (a_d + speed + aggression - size) / 2
elif diet == 'herbivore':
fitness = ((p_d - size) + 1) / 2
elif diet == 'omnivore':
fitness = ((.5 * (a_d + speed + aggression) + .5 * p_d) - size) / 2
return fitness
def limb_fitness(self, environment):
# how feasible to work in environment.
# Only works off of skin type does not include limbs as of now.
# ('none', 'dry', 'moist', 'webby', 'hairy', 'flabby', 'swole')
# Abiotic Factors of the environment
fert = environment.traits['fertility']
nat_d = environment.traits['natural disaster']
sun_expose = environment.traits['sun exposure']
precip = environment.traits['precipitation']
wind = environment.traits['wind']
# Species Factors that affect the fitness
skin_type = self.traits['skin type']
# Natural disaster same in all skin type?
# Moist - mid fertil for living in dirt, high precip for body immersion, low ND makes sense,
# small medium sun do not want to boil or steam, small wind do not want to dry out skin
# If want more diversity change the natural disaster number.
if skin_type == 'moist':
# Check the different abiotic facors
fitness = (((1 - abs(.7 - fert)) - 0.3) / .7) + (1 - abs(0 - nat_d)) + \
(1 - (abs(.4 - sun_expose) - .4) / .6) + (1 - abs(1 - precip)) + (
(1 - (abs(.25 - wind)) - .25) / .75)
# Webby - fertility does not necessairly affect, low ND, low average does not like extreme weather,
# low average recip not extreme, low average wind
elif skin_type == 'webby':
# Check the different abiotic facors
fitness = ((1 - (abs(.65 - fert)) - .35) / .65) + (1 - abs(0 - nat_d)) + \
((1 - (abs(.25 - sun_expose)) - .25) / .75) + ((1 - (abs(.4 - precip)) - .4) / .6) + (
(1 - (abs(.15 - wind)) - .15) / .85)
# Hairy - fert average, low precip, low ND, low average sun exposure sweating no energy, high average wind
elif skin_type == 'hairy':
# Check the different abiotic facors
fitness = ((1 - (abs(.5 - fert)) - .5) / .5) + (1 - abs(0 - nat_d)) + \
((1 - (abs(.35 - sun_expose)) - .35) / .65) + ((1 - (abs(.15 - precip)) - .15) / .85) + (
(1 - (abs(.75 - wind)) - .25) / .75)
# Flabby - fert above average eat grass, low ND, high average sun Exposure,
# low precip too wet dirt, below average wind
elif skin_type == 'flabby':
# Check the different abiotic facors
fitness = ((1 - (abs(.75 - fert)) - .25) / .75) + (1 - abs(0 - nat_d)) + \
((1 - (abs(.75 - sun_expose)) - .25) / .75) + ((1 - (abs(.15 - precip)) - .15) / .85) + (
(1 - (abs(.25 - wind)) - .25) / .75)
# Swole - fert high grow stuff and pick, low ND, above average sun exposure, precip low affect grip on things,
# low wind does not want to fight against it (wind resistance)
elif skin_type == 'swole':
# Check the different abiotic facors
fitness = ((1 - (abs(.9 - fert)) - .1) / .9) + (1 - abs(0 - nat_d)) + \
((1 - (abs(.60 - sun_expose)) - .4) / .6) + ((1 - (abs(.15 - precip)) - .15) / .85) + (
(1 - (abs(.20 - wind)) - .2) / .8)
# Dry - Low average fertility, low ND makes sense, high average sun exposure like to sun bathe, low precip does not like wet skin,
# moderately high wind cause like to dry skin in wind
else:
# Check the different abiotic facors
fitness = ((1 - (abs(.2 - fert)) - .2) / .8) + (1 - abs(0 - nat_d)) + \
((1 - (abs(.85 - sun_expose)) - .15) / .85) + ((1 - (abs(.15 - precip)) - .15) / .85) + (
(1 - (abs(.75 - wind)) - .25) / .75)
return fitness / 5
def combative_fitness(self, environment):
fitness_list = []
# How likely creature is to survive in environment
# Natural disasters weather geovariance speed strength agility size pollution
agility = self.traits['speed']
strength = self.traits['strength']
size = self.traits['size']
aggro = self.traits['aggression']
geovar = environment.traits['geovariance']
n_d = environment.traits['natural disaster']
pollu = environment.traits['pollution']
hostility = environment.traits['hostility']
env_list = [(geovar, 'geovariance'), (n_d, 'natural disaster'), (pollu, 'pollution'),
(hostility, 'hostility')]
'''
# all factors scale from 0-1, 0 being worst case for the creature, 1 being best
# creatures general athleticism in the environment, normalized
# stronger and faster creatures do better in environments which require a lot of
# strenuous movement
athletics_factor = ((agility + strength - geovar) + 1) / 3'''
'''
# creatures general ability to get away from natural disasters
# faster and stronger creatures get away from disasters, bigger ones can have more trouble doing so
disaster_factor = ((agility + strength - size - n_d) + 2) / 4'''
# creatures general ability to deal with other creatures
# since size(and maybe strength in the future) is partially influenced by limbs, limbs add to
# creatures fighting capability
fitness = ((agility - (1 - size) + aggro + strength - hostility) + 1) / 5
'''
# creatures reaction to environmental pollution(foreign substances)
# stronger creatures can fare better against pollutants, but bigger ones suffer
pollutive_factor = ((strength - size - pollu) + 2) / 3
if self.traits['diet'] is 'carnivore':
pollutive_factor *= .5
if self.traits['diet'] is 'omnivore':
pollutive_factor *= .75'''
# weight hostility fitness based on the prevalence of the environment. I.E a creature that survives well in a
# polluted environment is better than a combative creature in that same environment
# sorted list of environmental variables in order to weight which is the most important survivability
'''env_list.sort()
# print(env_list)
# creates list in order of the most important fitness to least
for i in env_list:
if i[1] is 'geovariance':
fitness_list.append(athletics_factor)
if i[1] is 'natural disaster':
fitness_list.append(disaster_factor)
if i[1] is 'hostility':
fitness_list.append(combative_factor)
else:
fitness_list.append(pollutive_factor)
# most important fitness weighted the most
fitness = fitness_list[0] * .4 + fitness_list[1] * .3 + fitness_list[1] * .2 + fitness_list[1] * .1
# fitness = (geovarathletics_factor + n_ddisaster_factor + hostilitycombative_factor + pollu*pollutive_factor)/4
#print("Athleticism: ", athletics_factor, "/ Disaster Survival: ", disaster_factor, "/ Prowess: ",
#combative_factor, "/ Fragility: ", pollutive_factor)'''
return fitness
def athletic_fitness(self, environment):
agility = self.traits['speed']
strength = self.traits['strength']
geovar = environment.traits['geovariance']
# all factors scale from 0-1, 0 being worst case for the creature, 1 being best
# creatures general athleticism in the environment, normalized
# stronger and faster creatures do better in environments which require a lot of
# strenuous movement
fitness = ((agility + strength - geovar) + 1) / 3
return fitness
def disaster_fitness(self, environment):
agility = self.traits['speed']
strength = self.traits['strength']
size = self.traits['size']
# creatures general ability to get away from natural disasters
# faster and stronger creatures get away from disasters, bigger ones can have more trouble doing so
n_d = environment.traits['natural disaster']
fitness = ((agility + strength - size - n_d) + 2) / 4
return fitness
def pollutive_fitness(self, environment):
strength = self.traits['strength']
size = self.traits['size']
pollu = environment.traits['pollution']
# creatures reaction to environmental pollution(foreign substances)
fitness = ((strength - size - pollu) + 2) / 3
if self.traits['diet'] is 'carnivore':
fitness *= .5
if self.traits['diet'] is 'omnivore':
fitness *= .75
return fitness
def fitness(self, environment):
if self._fitness == 0:
self.calculate_fitness(environment)
return self._fitness
def mutate(self, genome):
#for the mutation, pick one random trait on the animal to change
trait_to_mutate = choice(list(genome))
if trait_to_mutate in ('speed', 'fat', 'aggression', 'strength', 'size'):
genome[trait_to_mutate] += random() * .01 - .005
genome[trait_to_mutate] = max(min(genome[trait_to_mutate], .995), .005)
else:
genome[trait_to_mutate] = choice(possible_traits[trait_to_mutate])
if trait_to_mutate in 'temperature':
genome[trait_to_mutate][1] += random()*.01 - .005
genome[trait_to_mutate][1] = max(min(genome[trait_to_mutate][1], .995), .005)
return genome
#mutate percent can be adjusted
def generate_children(self, other, mutate_percent=.005):
new_genome_1 = {}
new_genome_2 = {}
iter = 0
for key, value in sorted(self.traits.items()):
#if key in ['speed', 'size', 'strength']:
#continue
if iter % 2 == 0:
new_genome_1[key] = value
else:
new_genome_2[key] = value
iter += 1
iter = 0
for key, value in sorted(other.traits.items()):
#if key in ['speed', 'size', 'strength']:
#continue
if iter % 2 == 0:
new_genome_2[key] = value
else:
new_genome_1[key] = value
iter += 1
if random() < mutate_percent:
new_genome_1 = self.mutate(new_genome_1)
if random() < mutate_percent:
new_genome_2 = self.mutate(new_genome_2)
return Animal(new_genome_1), Animal(new_genome_2)
def generate_successors(population, environment, percent=10):
results = []
elitist_pops = sorted(population, key=lambda a: a.fitness(environment), reverse=True)[:len(population) // percent]
results.extend(elitist_pops)
population_size = len(population)
num_parent_parings = population_size // 2 - len(elitist_pops) // 2
min_element = min(population, key=lambda p: p.fitness(environment)).fitness(environment)
max_element = max(population, key=lambda p: p.fitness(environment)).fitness(environment)
if min_element == max_element:
return population
population_weights = [(p.fitness(environment) - min_element) / (max_element - min_element) for p in population]
for p in range(num_parent_parings):
p1, p2 = choices(population, weights=population_weights, k=2)
while p1 == p2:
p1, p2 = choices(population, weights=population_weights, k=2)
results.extend(p1.generate_children(p2))
return results
def genetic_algorithm(environment):
#arbitrary number
pop_limit = 1000
batches = os.cpu_count()
batch_size = int(ceil(pop_limit / batches))
with mpool.Pool(processes=os.cpu_count()) as pool:
init_time = time()
adam = Animal()
with open("animals/first.txt", 'w') as f:
f.write(str(adam))
#f.write('\n\n'+ str(environment))
population = [adam]
for _ in range(pop_limit - 1):
population.append(Animal({key: min(max(value + ((random() * .4 - .2)), 0), 1) if key in ('fat', 'speed', 'aggression', 'size', 'strength') else value for key, value in adam.traits.items()}))
#population = [adam].extend([Animal({key: min(max(value + ((random() * .2 - .1)), 0), 1) if key in ('fat', 'speed', 'aggression', 'size', 'strength') else value for key, value in adam.traits.items()}) for _ in range(pop_limit - 1)])
#population = [Animal() for _ in range(pop_limit)]
e = [environment for _ in range(pop_limit)]
population = pool.starmap(Animal.calculate_fitness, zip(population, e), batch_size)
init_done = time()
print("Created and calculated initial population statistics in:", init_done - init_time, "seconds")
generation = 0
start = time()
now = start
print("Use ctrl-c to terminate this loop manually.")
try:
while 1:
now = time()
if generation > 0:
best = max(population, key=lambda a: a.fitness(environment))
print("Generation:", str(generation))
print("Max fitness:", str(best.fitness(environment)))
print("Average generation time:", (now - start) / generation)
print("Net time:", now - start)
with open("animals/last.txt", 'w') as f:
f.write(str(best))
f.write('\n\n'+ str(environment))
generation += 1
if generation > 500:
break
gen_time = time()
next_pop = generate_successors(population, environment)
gen_done = time()
print("Generated successors in:", gen_done - gen_time, "seconds")
next_pop = pool.starmap(Animal.calculate_fitness, zip(next_pop, e), batch_size)
pop_done = time()
print("Calculated fitnesses in:", pop_done - gen_done, "seconds")
population = next_pop
except KeyboardInterrupt:
pass
return population
def main():
#a = Animal()
#b = Animal()
e = Environment()
#print(str(a) + '\n')
#print(str(b) + '\n')
#a.generate_chidren(b)
genetic_algorithm(e)
#print(str(e) + '\n')
#print("Temperature_Fitness:", a.temperature_fitness(e))
#print("Food_Fitness:", a.food_fitness(e))
#print("Limb_Fitness:", a.limb_fitness(e))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 21,093 | py | 8 | Animal.py | 4 | 0.570995 | 0.553833 | 0 | 506 | 40.685771 | 240 |
wingskh/mmskeleton | 8,349,416,431,622 | ed23953bd1170663cc59723ea625e1ba7888ad34 | bb4c8f2ca1cbde8fc05f54643705f3a13871c785 | /mmskeleton/models/loss/__init__.py | 1689757031bbd2c71ffc11609182ae6f4f84ebac | [
"Apache-2.0"
]
| permissive | https://github.com/wingskh/mmskeleton | 999c7ee18a0a084c7aef7e88021ac6f63faaabdc | 0d179386e63d6d34ee374163d7b5aff95ef78cec | refs/heads/master | 2022-12-14T23:54:39.427020 | 2020-09-12T12:17:45 | 2020-09-12T12:17:45 | 294,425,242 | 1 | 0 | Apache-2.0 | true | 2020-09-10T13:56:10 | 2020-09-10T13:56:09 | 2020-09-10T13:56:06 | 2020-08-07T02:49:40 | 95,581 | 0 | 0 | 0 | null | false | false | from .JointsMSELoss import JointsMSELoss
from .JointsOHKMMSELoss import JointsOHKMMSELoss | UTF-8 | Python | false | false | 90 | py | 69 | __init__.py | 43 | 0.888889 | 0.888889 | 0 | 2 | 44 | 48 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.