repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Jz52710/PythonWebBC
| 11,278,584,143,922 |
590eb1defa238e4b93972c1ddc07cf8fdee69f87
|
c8e50bd1cccd18d255b2ce3e8b22259ab60b8fb9
|
/pytho网络编程/day2/day2.py
|
96b753e63ac8925a07590352f846a4aeaa85e387
|
[] |
no_license
|
https://github.com/Jz52710/PythonWebBC
|
43c9b3bb32817cc6696c2031b78cc4a0f1043b8b
|
bf1d2d22c2da8fbd2c71dbb5d2722f34526d8a08
|
refs/heads/master
| 2020-08-01T01:12:47.562300 | 2019-10-08T00:34:40 | 2019-10-08T00:34:40 | 210,810,038 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import time,datetime
#日期
#1.时间戳
print(time.time())
#2.时间元组
res = time.localtime(time.time())#把时间戳转化为时间元组
print(res)
#格式化时间
res1 = time.strftime("%Y-%m-%d (%H:%M:%S)",res)#将时间元组转化为格式化时间
print(res1)
d=datetime.date(2019,9,26)
print(d)
print("当前日期",datetime.date.today())
a = datetime.date(2019,11,11)
print(a)
|
UTF-8
|
Python
| false | false | 393 |
py
| 14 |
day2.py
| 12 | 0.699029 | 0.63754 | 0 | 17 | 17.235294 | 61 |
adebisi-aden/consumerfinance.gov
| 19,224,273,624,282 |
e316e1765c1c81909dd6feb5beb068686b946773
|
135238dfa1bdd3a577c6b08ebcd478a649c0dce3
|
/cfgov/v1/management/commands/invalidate_page_cache.py
|
cb0108b7d15f0317e9987c65ef882f8279931639
|
[
"CC0-1.0"
] |
permissive
|
https://github.com/adebisi-aden/consumerfinance.gov
|
5e716a03180ba18bac747e5f3e7e47c7774c2dab
|
8c0f5afac341823c59f73b0c6bd60592e0f5eaca
|
refs/heads/main
| 2023-08-12T04:13:43.687689 | 2021-09-17T14:24:54 | 2021-09-17T14:24:54 | 407,607,316 | 0 | 0 |
CC0-1.0
| true | 2021-09-17T16:21:12 | 2021-09-17T16:21:11 | 2021-09-17T14:24:58 | 2021-09-17T15:52:43 | 741,342 | 0 | 0 | 0 | null | false | false |
from django.core.management.base import BaseCommand
from wagtail.contrib.frontend_cache.utils import PurgeBatch
class Command(BaseCommand):
help = "Invalidate the cache of pages by full URLs"
def add_arguments(self, parser):
parser.add_argument(
"--url",
required=True,
nargs="+",
help=(
"The full URL for the page cache to invalidate "
"(can specify multiple)"
)
)
def handle(self, *args, **options):
batch = PurgeBatch()
batch.add_urls(options["url"])
batch.purge()
|
UTF-8
|
Python
| false | false | 617 |
py
| 322 |
invalidate_page_cache.py
| 214 | 0.568882 | 0.568882 | 0 | 23 | 25.826087 | 64 |
rohitgeo/arcgis-python-api
| 6,030,134,088,304 |
e7f431afb663feb3b4aa931cb067246f85f6fb7b
|
3c7520e943bdacfa096f2031c3b33f1778f8c402
|
/talks/uc2018/Plenary/pools/model.py
|
a16520e6132dd05ac94cfdd9990b7e2b57584a95
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/rohitgeo/arcgis-python-api
|
40aa2b90847d5bca7ab92117f19739bb91b67eb1
|
c550277a6b7948f3dbf9b4106d76839d4f235990
|
refs/heads/master
| 2021-01-16T19:55:59.219092 | 2018-08-02T04:57:05 | 2018-08-02T04:57:05 | 62,830,488 | 6 | 4 |
Apache-2.0
| true | 2018-08-02T04:57:06 | 2016-07-07T18:45:39 | 2018-04-26T08:33:26 | 2018-08-02T04:57:05 | 224,384 | 1 | 2 | 0 |
Python
| false | null |
import numpy as np
from PIL import Image
import torch
from torch import nn,optim
from torchvision.models import *
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.init import kaiming_normal
import os
import json, pdb
from PIL import ImageDraw, ImageFont
from matplotlib import patches, patheffects
# torch.cuda.set_device(0)
from distutils.version import LooseVersion
import matplotlib.cm as cmx
import matplotlib.colors as mcolors
from cycler import cycler
from PIL import ImageOps
import torch.nn.functional as F
total_classes = 2
k = 9
def children(m): return m if isinstance(m, (list, tuple)) else list(m.children())
def apply_init(m, init_fn):
m.apply(lambda x: cond_init(x, init_fn))
def cond_init(m, init_fn):
if not isinstance(m, (nn.BatchNorm1d,nn.BatchNorm2d,nn.BatchNorm3d)):
if hasattr(m, 'weight'): init_fn(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
class StdConv(nn.Module):
def __init__(self, nin, nout, stride=2, drop=0.1):
super().__init__()
self.conv = nn.Conv2d(nin, nout, 3, stride=stride, padding=1)
self.bn = nn.BatchNorm2d(nout)
self.drop = nn.Dropout(drop)
def forward(self, x): return self.drop(self.bn(F.relu(self.conv(x))))
def flatten_conv(x,k):
bs,nf,gx,gy = x.size() # batch size, num filters, width, height
x = x.permute(0,3,2,1).contiguous()
return x.view(bs,-1,nf//k)
class OutConv(nn.Module):
def __init__(self, k, nin, bias):
super().__init__()
self.k = k
self.oconv1 = nn.Conv2d(nin, (total_classes + 1)*k, 3, padding=1) # nclasses
self.oconv2 = nn.Conv2d(nin, 4*k, 3, padding=1) # bboxes
self.oconv1.bias.data.zero_().add_(bias)
def forward(self, x):
return [flatten_conv(self.oconv1(x), self.k),
flatten_conv(self.oconv2(x), self.k)]
drop=0.4
class SSD_MultiHead(nn.Module):
def __init__(self, k, bias):
super().__init__()
self.drop = nn.Dropout(drop)
self.sconv0 = StdConv(512,256, stride=1, drop=drop)
self.sconv1 = StdConv(256,256, drop=drop)
self.sconv2 = StdConv(256,256, drop=drop)
self.sconv3 = StdConv(256,256, drop=drop)
self.out0 = OutConv(k, 256, bias)
self.out1 = OutConv(k, 256, bias)
self.out2 = OutConv(k, 256, bias)
self.out3 = OutConv(k, 256, bias)
def forward(self, x):
x = self.drop(F.relu(x))
x = self.sconv0(x)
x = self.sconv1(x)
o1c,o1l = self.out1(x)
x = self.sconv2(x)
o2c,o2l = self.out2(x)
x = self.sconv3(x)
o3c,o3l = self.out3(x)
return [torch.cat([o1c,o2c,o3c], dim=1),
torch.cat([o1l,o2l,o3l], dim=1)]
IS_TORCH_04 = LooseVersion(torch.__version__) >= LooseVersion('0.4')
model_meta = {
resnet18:[8,6], resnet34:[8,6], resnet50:[8,6], resnet101:[8,6], resnet152:[8,6],
vgg16:[0,22], vgg19:[0,22]
}
requires_grad = False
USE_GPU = True
def map_over(x, f): return [f(o) for o in x] if is_listy(x) else f(x)
def V (x, requires_grad=False, volatile=False): return map_over(x, lambda o: V_(o, requires_grad, volatile))
def V_(x, requires_grad=False, volatile=False): return create_variable(x, volatile, requires_grad)
def is_listy(x): return isinstance(x, (list,tuple))
def A(*a): return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
def create_variable(x, volatile, requires_grad=False):
if type (x) != Variable:
if IS_TORCH_04: x = Variable(T(x), requires_grad=requires_grad)
else: x = Variable(T(x), requires_grad=requires_grad, volatile=volatile)
return x
def T(a, half=False, cuda=True):
if not torch.is_tensor(a):
a = np.array(np.ascontiguousarray(a))
if a.dtype in (np.int8, np.int16, np.int32, np.int64):
a = torch.LongTensor(a.astype(np.int64))
elif a.dtype in (np.float32, np.float64):
a = torch.cuda.HalfTensor(a) if half else torch.FloatTensor(a)
else: raise NotImplementedError(a.dtype)
if cuda: a = to_gpu(a, async=True)
return a
def to_gpu(x, *args, **kwargs):
return x.cuda(*args, **kwargs) if USE_GPU else x
def to_np(v):
if isinstance(v, (np.ndarray, np.generic)): return v
if isinstance(v, (list,tuple)): return [to_np(o) for o in v]
if isinstance(v, Variable): v=v.data
if isinstance(v, torch.cuda.HalfTensor): v=v.float()
return v.cpu().numpy()
def cut_model(m, cut):
return list(m.children())[:cut] if cut else [m]
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1,1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
class Flatten(nn.Module):
def __init__(self): super().__init__()
def forward(self, x): return x.view(x.size(0), -1)
def num_features(m):
c=children(m)
if len(c)==0: return None
for l in reversed(c):
if hasattr(l, 'num_features'): return l.num_features
res = num_features(l)
if res is not None: return res
class ConvnetBuilder():
def __init__(self, f, c, is_multi, is_reg, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, pretrained=True):
self.f,self.c,self.is_multi,self.is_reg,self.xtra_cut = f,c,is_multi,is_reg,xtra_cut
if xtra_fc is None: xtra_fc = [512]
if ps is None: ps = [0.25]*len(xtra_fc) + [0.5]
self.ps,self.xtra_fc = ps,xtra_fc
if f in model_meta: cut,self.lr_cut = model_meta[f]
else: cut,self.lr_cut = 0,0
cut-=xtra_cut
layers = cut_model(f(pretrained), cut)
self.nf = num_features(layers)*2
if not custom_head: layers += [AdaptiveConcatPool2d(), Flatten()]
self.top_model = nn.Sequential(*layers)
n_fc = len(self.xtra_fc)+1
if not isinstance(self.ps, list): self.ps = [self.ps]*n_fc
if custom_head: fc_layers = [custom_head]
else: fc_layers = self.get_fc_layers()
self.n_fc = len(fc_layers)
self.fc_model = to_gpu(nn.Sequential(*fc_layers))
if not custom_head: apply_init(self.fc_model, kaiming_normal)
self.model = to_gpu(nn.Sequential(*(layers+fc_layers)))
@property
def name(self): return f'{self.f.__name__}_{self.xtra_cut}'
def create_fc_layer(self, ni, nf, p, actn=None):
res=[nn.BatchNorm1d(num_features=ni)]
if p: res.append(nn.Dropout(p=p))
res.append(nn.Linear(in_features=ni, out_features=nf))
if actn: res.append(actn)
return res
def get_fc_layers(self):
res=[]
ni=self.nf
for i,nf in enumerate(self.xtra_fc):
res += self.create_fc_layer(ni, nf, p=self.ps[i], actn=nn.ReLU())
ni=nf
final_actn = nn.Sigmoid() if self.is_multi else nn.LogSoftmax(1)
if self.is_reg: final_actn = None
res += self.create_fc_layer(ni, self.c, p=self.ps[-1], actn=final_actn)
return res
def get_layer_groups(self, do_fc=False):
if do_fc:
return [self.fc_model]
idxs = [self.lr_cut]
c = children(self.top_model)
if len(c)==3: c = children(c[0])+c[1:]
lgs = list(split_by_idxs(c,idxs))
return lgs+[self.fc_model]
|
UTF-8
|
Python
| false | false | 7,430 |
py
| 217 |
model.py
| 14 | 0.607537 | 0.583042 | 0 | 211 | 34.218009 | 117 |
justin831201/SATravelWebSystem
| 16,192,026,741,385 |
23753226b08777f7b7bded1660b042a1e20e428f
|
a7e11d3cec28b0cdc20e574c2bcbc34269697b85
|
/SATravelWebSystem/urls.py
|
2badff0c7257bb97ac0e2d478befaba551911398
|
[] |
no_license
|
https://github.com/justin831201/SATravelWebSystem
|
5078c3f181af25a281f15a8c459dfed024a47c09
|
e8fc55c599a117205c5b444ea835cb1ab533da8a
|
refs/heads/master
| 2021-01-01T04:40:40.805442 | 2016-05-17T06:03:03 | 2016-05-17T06:03:03 | 58,545,239 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""SATravelWebSystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from HomePage import views as Home_Views
from ListPage import views as List_Views
from TripPage import views as Trip_Views
from GuidePage import views as Guide_Views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', Home_Views.home, name='home'),
url(r'^signin', Home_Views.signin, name='signin'),
url(r'^login', Home_Views.login, name='login'),
url(r'^finish', Home_Views.finish, name='finish'),
url(r'^finally', Home_Views.login_finish, name='finally'),
url(r'^list', List_Views.list, name='list'),
url(r'^trip', Trip_Views.trip, name='trip'),
url(r'^guide', Guide_Views.guide, name='guide'),
]
|
UTF-8
|
Python
| false | false | 1,361 |
py
| 7 |
urls.py
| 4 | 0.692138 | 0.68626 | 0 | 33 | 40.242424 | 79 |
ArturoGuerra/discordmusicbot
| 18,562,848,674,545 |
19d8bfc63227ce18f750f07725818483b39caa3b
|
994da2508283fa63835d1145b488c91e91b95ff0
|
/persistence.py
|
58f258a51ed1e6ffa208222e51e2ad2d72a1d2c7
|
[] |
no_license
|
https://github.com/ArturoGuerra/discordmusicbot
|
c929dcb6e869ee805e24215f82c88ee0d52e942b
|
8f3ae9d748ab5981991e95765115875cbb36428b
|
refs/heads/master
| 2022-12-12T06:18:19.003173 | 2017-08-24T18:11:34 | 2017-08-24T18:11:34 | 87,961,155 | 1 | 2 | null | false | 2022-12-07T23:58:09 | 2017-04-11T17:25:42 | 2017-06-28T18:50:08 | 2022-12-07T23:58:08 | 59 | 0 | 0 | 4 |
Python
| false | false |
import config
import pymysql
import musicbot
from peewee import *
from playhouse.shortcuts import RetryOperationalError
cfg = musicbot.MusicApplication().config
class MyRetryDB(RetryOperationalError, MySQLDatabase):
pass
try:
my_db = MyRetryDB(
cfg.database,
host=cfg.dbhost,
port=3306,
user=cfg.dbuser,
password=cfg.dbpass,
charset='utf8mb4')
except Exception as e:
my_db=None
class BaseModel(Model):
class Meta:
database=my_db
class Servers(BaseModel):
server = BigIntegerField(null=False, primary_key=True)
channel = BigIntegerField(null=True)
playlist = CharField(null=True)
class Playlists(BaseModel):
playlist = CharField(null=False)
link = CharField(null=False)
class Meta:
primary_key = None
|
UTF-8
|
Python
| false | false | 835 |
py
| 10 |
persistence.py
| 6 | 0.671856 | 0.664671 | 0 | 33 | 24.30303 | 58 |
dBangos/python_projects
| 4,715,874,097,175 |
a1c91c46d90e652331b86dfdb89b08e9340aac67
|
cbf1aca5b2e447f9a5976b05981a8b37ebd4ccd7
|
/K-centers.py
|
f1cca9120c5749bc8e82bede12b3722c2bf90cd1
|
[] |
no_license
|
https://github.com/dBangos/python_projects
|
8c6166efc5aefd1779db0d75aedad4f43f69468f
|
625d620f396a0dee32537ed194b752501eb007e3
|
refs/heads/main
| 2023-02-23T15:37:55.908019 | 2021-01-21T16:20:11 | 2021-01-21T16:20:11 | 310,552,118 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import networkx as nx
import matplotlib.pyplot as plt
def k_centers_objective_value(G, centers):
obj_val = 0
nodes = G.number_of_nodes()
for i in range(0, nodes):# edw exw diale3ei ena node
if i not in centers:
temp = 1000000
for j in range(0, len(centers)):#edw to sugrinw me ena kentro
if nodes_connected(G, i, centers[j]):
if G[i][centers[j]]['weight'] < temp:
temp = G[i][centers[j]]['weight']#edw to temp exei parei thn mikroterh timh akmhs se kentro
if temp>obj_val:
obj_val = temp
return obj_val
def nodes_connected(g, u, v):
return u in g.neighbors(v)
def distFromC(graph,centers):
obj_val = 0
nodes = graph.number_of_nodes()
for i in range(0, nodes): # edw exw diale3ei ena node
if i not in centers:
temp = 1000000
for j in range(0, len(centers)): # edw to sugrinw me ena kentro
if nodes_connected(graph, i, centers[j]):
if graph[i][centers[j]]['weight'] < temp:
temp = graph[i][centers[j]]['weight'] # edw to temp exei parei thn mikroterh timh akmhs se kentro
if temp > obj_val:
obj_val = temp
solution=i
return solution
def Greedy(G,k,first_center):
center_solution = []
nodes=G.number_of_nodes()
center_solution.append(first_center)
#first_center=center_solution[0]
for i in range(1,k):#gia kathe kentro
temp = distFromC(G,center_solution)
center_solution.append(temp)
obj_val=k_centers_objective_value(G,center_solution)
return center_solution,obj_val
def Greedy2(G,k):
first_center=0
nodes=G.number_of_nodes()
center_solution = []
templist=[]
for j in range(0,nodes):
templist=[]
first_center = j
templist.append(first_center)
for i in range(1,k):#gia kathe kentro
temp = distFromC(G,templist)
templist.append(temp)
center_solution.append(templist)
obj_list=[0]*k
obj_val=1000000
for i in range(0,len(center_solution)):
tempval = k_centers_objective_value(G, center_solution[i])
if (tempval<obj_val):
obj_val=tempval
obj_list=center_solution[i]
return obj_list,obj_val
def k_center_greedy_based(G, k):
first_center = 0
nodes = G.number_of_nodes()
obj_list = [0] * k
obj_val = 1000000
for j in range(0, nodes):
templist = []
first_center = j
templist.append(first_center)
for i in range(1, k): # gia kathe kentro
temp = distFromC(G, templist)
templist.append(temp)
tempval = k_centers_objective_value(G, templist)
if (tempval < obj_val):
obj_val = tempval
obj_list = templist.copy()
return obj_val,obj_list
def Greedy4(G, k):
first_center = 0
nodes = G.number_of_nodes()
obj_list = [0] * k
obj_val = 1000000
for j in range(0, nodes):
print(j)
templist = []
first_center = j
templist.append(first_center)
for i in range(1, k): # gia kathe kentro
temp = distFromC(G, templist)
templist.append(temp)
tempval = k_centers_objective_value(G, templist)
if (tempval < obj_val):
obj_val = tempval
obj_list = templist.copy()
templist = obj_list.copy()
obj_val=k_centers_objective_value(G, obj_list)
for f in range(0,nodes):
print(f)
templist[k-1]=f
tempval = k_centers_objective_value(G, templist)
if(tempval<obj_val):
obj_val = tempval
obj_list = templist.copy()
print(obj_list)
print(obj_val)
return obj_list, obj_val
|
UTF-8
|
Python
| false | false | 4,129 |
py
| 3 |
K-centers.py
| 3 | 0.531364 | 0.516348 | 0 | 124 | 31.266129 | 122 |
rcrowther/wild
| 17,377,437,709,880 |
3397cebe2c270435a189b285f29f96de04b4bc8d
|
38c4676aaa6d5e8a443ad19da57a41419fefe7d5
|
/phases/TreeActions.py
|
63ba9d27f2137fcffc70c120866acb48993f09b1
|
[] |
no_license
|
https://github.com/rcrowther/wild
|
0a3872d2cca312653f96f389b12eb9bd36f12463
|
0d12fbb8e5a4418c8c43b20452672e288bcca031
|
refs/heads/master
| 2021-01-01T19:20:28.179634 | 2017-04-01T18:49:45 | 2017-04-01T18:49:45 | 33,536,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from trees.Trees import *
from trees.TreeTraverser import CallbackTraverser
import SymbolTables
#! use CallbackBodyBuilder
class RemoveComments(CallbackTraverser):
def __init__(self, tree, reporter):
self.reporter = reporter
CallbackTraverser.__init__(self, tree)
def _removeComments(self, tree):
newBody = [t for t in tree.body if (not isinstance(t, Comment))]
tree.body = newBody
def definingExpressionWithBody(self, tree):
'''
'''
#print('defining expression with body: ' + tree.defMark.data)
# funcs/dynamic allocated var/val
self._removeComments(tree)
def expressionWithBody(self, tree):
#print('expression with body: ' + tree.actionMark.data)
# expressionWithBody is definitions
#' but will in future be branch calls like case/if
self._removeComments(tree)
#! use CallbackBodyBuilder
class SplitVals(CallbackTraverser):
'''
Split the compound val statement into defenitions and assignments.
Expression(val, zee, kind, funcRender.CALL, None, children: Constant(3.1))
becomes,
Expression(val, zee, kind, funcRender.DEF, None, children: ())
Expression('$equ$', zee, kind, funcRender.CALL, None, children: Constant(3.1))
'''
def __init__(self, tree, reporter):
self.reporter = reporter
CallbackTraverser.__init__(self, tree)
def _splitVals(self, bodyList):
b = []
for t in bodyList:
# insert the element back
b.append(t)
if (
t.isNonAtomicExpression
and t.actionMark.identifier == 'val'
):
# build a new assignment
syntheticDelivery = Expression(noPathIdentifierFunc('$$assign$'))
syntheticDelivery.defMark = t.defMark
syntheticDelivery.children = t.children
syntheticDelivery.RenderKind = RenderKind.function
syntheticDelivery.isMachine = True
# reduce t to a definition
# (asserting, should be true?)
t.isDef = True
t.children = []
# insert syntheticDelivery as
# initialising assignment after tree
b.append(syntheticDelivery)
return b
def definingExpressionWithBody(self, tree):
tree.body = self._splitVals(tree.body)
def expressionWithBody(self, tree):
tree.body = self._splitVals(tree.body)
#? need to intern paths
class Intern(CallbackTraverser):
def __init__(self, tree, expSymbolTable, reporter):
self.expSymbolTable = expSymbolTable
self.reporter = reporter
#print('intern tree' + tree.toString())
CallbackTraverser.__init__(self, tree)
def comment(self, tree):
#print('comment found!')
pass
def constant(self, tree):
#print('constant: ' + tree.data)
pass
def mark(self, tree):
#print('mark: ' + tree.data)
pass
def definingExpression(self, tree):
#print('defining expression: ' + tree.defMark.data)
try:
#! need to do path too
mark = tree.defMark
if (mark.isNotEmpty()):
self.expSymbolTable.define(tree.defMark.identifier)
except SymbolTables.DuplicateDefinitionException:
self.reporter.error('duplicate expression definition in scope of symbol mark: {0}'.format(tree.defMark.identifier), tree.position)
def expression(self, tree):
#print('expression: ' + tree.actionMark.data)
self.expSymbolTable.add(tree.actionMark.identifier)
def definingExpressionWithBody(self, tree):
#print('defining expression with body: ' + tree.defMark.data)
try:
self.expSymbolTable.define(tree.defMark.identifier)
except SymbolTables.DuplicateDefinitionException:
self.reporter.error('duplicate expression with body definition in scope of symbol mark: {0}'.format(tree.defMark.identifier), tree.position)
def expressionWithBody(self, tree):
#print('expression with body: ' + tree.actionMark.data)
self.expSymbolTable.add(tree.actionMark.identifier)
#? No ?!<$#
normalizedSymbols = {
'+' : '$$plus$',
'-' : '$$minus$',
'*' : '$$mult$',
'%' : '$$divide$',
'=' : '$$assign$',
'^' : '$$cap$',
'&' : '$$amp$',
'~' : '$$tithe$',
'@' : '$$at$',
# TODO: Allow underscore, but what about initial underscore?
# use '$$$' in place?
'_' : '_',
# NB: allow dot
'.' : '.'
}
#! messy
class MarkNormalize(CallbackTraverser):
def __init__(self, tree, reporter):
self.reporter = reporter
CallbackTraverser.__init__(self, tree)
def _isAlphaNumeric(self, c):
return (
# alphabetic
(c >= 65 and c <= 90)
or (c >= 97 and c <= 122)
# numeric
or (c >= 48 and c <= 57)
#? or c == UNDERSCORE
)
def _normalizeMarkText(self, mark):
nMark = ''
failed = False
for cp in mark:
if (self._isAlphaNumeric(ord(cp))):
nMark += cp
else:
try:
nMark += normalizedSymbols[cp]
except Exception:
self.reporter.error('Unable to normalize mark: {0}: unrecognised symbol: {1}'.format(mark, cp))
failed = True
break
return mark if failed else nMark
def _normalizeMark(self, tree, mark):
strippedMark = mark
lastChar = mark[-1:]
if (lastChar == '!' or lastChar == '?'):
tree.isMutable = lastChar == '!'
strippedMark = mark[0:-1]
return self._normalizeMarkText(strippedMark)
def comment(self, tree):
#print('comment found!')
pass
def constant(self, tree):
#print('constant: ' + tree.data)
pass
def mark(self, tree):
#print('mark: ' + tree.data)
pass
def definingExpression(self, tree):
#print('defining expression: ' + tree.defMark.data)
#tree.defMark.data = self._normalizeMark(tree, tree.defMark.identifier)
normalizedMark = self._normalizeMark(tree, tree.defMark.identifier)
tree.defMark = tree.defMark.replaceIdentifier(normalizedMark)
def expression(self, tree):
#print('expression: ' + tree.actionMark.data)
#tree.actionMark.data = self._normalizeMark(tree, tree.actionMark.identifier)
normalizedMark = self._normalizeMark(tree, tree.actionMark.identifier)
tree.actionMark = tree.actionMark.replaceIdentifier(normalizedMark)
def definingExpressionWithBody(self, tree):
#print('defining expression with body: ' + tree.defMark.data)
#tree.defMark.data = self._normalizeMark(tree, tree.defMark.identifier)
normalizedMark = self._normalizeMark(tree, tree.defMark.identifier)
tree.defMark = tree.defMark.replaceIdentifier(normalizedMark)
def expressionWithBody(self, tree):
#print('expression with body: ' + tree.actionMark.data)
#tree.actionMark.data = self._normalizeMark(tree, tree.actionMark.identifier)
normalizedMark = self._normalizeMark(tree, tree.actionMark.identifier)
tree.actionMark = tree.actionMark.replaceIdentifier(normalizedMark)
#from trees.Trees import Constant, Mark
#x deprecated due to revision in tokeniser
#from collections import namedtuple
|
UTF-8
|
Python
| false | false | 7,274 |
py
| 57 |
TreeActions.py
| 57 | 0.62909 | 0.62579 | 0 | 233 | 30.197425 | 148 |
RyosukeNORO/Squeezing-Level
| 4,629,974,768,055 |
e2c9de1171f82e42b1f54c48c0d17a653a7a82d3
|
4b47f2276cf5adc0b95fa4fce6b66acbfd6dc07a
|
/squeezing_pumploss.py
|
be4b6f4a53d5476783bd6959d1cd4bb4072f6e44
|
[] |
no_license
|
https://github.com/RyosukeNORO/Squeezing-Level
|
8102033ab6d1359cb691d02f09354e79efb6c9cc
|
c2a1790b7025e8a33edf81577dabf177562d1516
|
refs/heads/main
| 2023-06-27T19:57:32.829547 | 2021-07-30T08:49:52 | 2021-07-30T08:49:52 | 390,994,093 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
####励起光の損失がどの程度スクイージングレベルに影響を与えるかというプログラム(スクイーズド光損失は0と仮定している)
import numpy as np
from matplotlib import pyplot as plt
#定数
c = 299792458E6 #光速(um/s)
eps = 8.85418782E-18 #真空の誘電率(F/um)
#変数
div = 1001
z = np.linspace(0, 200000, div) #素子長(um)
I = 0.1 #ポンプ光パワー(W)
A = 10 # 導波路断面積(um^2)
d33 = 1/9*(2/np.pi)*13.8 #非線形光学定数(pm/V)
d = d33*10E-6 #非線形光学定数(um/V)
npu = 2.271213253 #ポンプ光の感じる屈折率
ns = 2.147041829 #シグナル光の感じる屈折率
lamp = 0.405 #ポンプ光波長(um)
lams = lamp*2 #シグナル光波長(um)
kp = 2*np.pi*npu/lamp #ポンプ光波数(1/um)
ks = 2*np.pi*ns/lams #シグナル光波数(1/um)
omes = ks*c/ns #シグナル光の角周波数(rad/s)
a = [0.2,0.5,1,2,4] #損失(dB/cm)
print(str(I*100/A)+'MW/cm^2')
# gの計算
#g = (2*np.pi*2*d)/npu**(3/2) * np.sqrt((8*np.pi*I)/(c*A)) * (ks/npu)
g = np.sqrt((2*omes**2*d**2*I)/(ns**2*npu*eps*c**3*A)) #パラメトリックゲイン0.113E-3#
print("g = {} /um".format(g))
#-----------------------------------------------------------------------------
# 0
gaa = a[0] # 導波路損失(dB/cm)
gama = np.log(10**(gaa*1E-5)) # 導波路損失(/um)
Sa = [0 for a in range(div)]
Sfa = [0 for a in range(div)]
Smaxa = 0
i = 0
while i < div:
Sa[i] = (gama + 2*g*np.exp(-(gama+2*g)*z[i]))/(gama+2*g)
Sfa[i] = 10*np.log10(Sa[i])
if Smaxa > Sfa[i]:
Smaxa = Sfa[i]
zmaxa = z[i]
i += 1
#-----------------------------------------------------------------------------
# 1
gab = a[1] # 導波路損失(dB/cm)
gamb = np.log(10**(gab*1E-5)) # 導波路損失(/um)
Sb = [0 for a in range(div)]
Sfb = [0 for a in range(div)]
Smaxb = 0
i = 0
while i < div:
Sb[i] = (gamb + 2*g*np.exp(-(gamb+2*g)*z[i]))/(gamb+2*g)
Sfb[i] = 10*np.log10(Sb[i])
if Smaxb > Sfb[i]:
Smaxb = Sfb[i]
zmaxb = z[i]
i += 1
#-----------------------------------------------------------------------------
# 2
gac = a[2] # 導波路損失(dB/cm)
gamc = np.log(10**(gac*1E-5)) # 導波路損失(/um)
Sc = [0 for a in range(div)]
Sfc = [0 for a in range(div)]
Smaxc = 0
i = 0
while i < div:
Sc[i] = (gamc + 2*g*np.exp(-(gamc+2*g)*z[i]))/(gamc+2*g)
Sfc[i] = 10*np.log10(Sc[i])
if Smaxc > Sfc[i]:
Smaxc = Sfc[i]
zmaxc = z[i]
i += 1
#-----------------------------------------------------------------------------
# 3
gad = a[3] # 導波路損失(dB/cm)
gamd = np.log(10**(gad*1E-5)) # 導波路損失(/um)
Sd = [0 for a in range(div)]
Sfd = [0 for a in range(div)]
Smaxd = 0
i = 0
while i < div:
Sd[i] = (gamd + 2*g*np.exp(-(gamd+2*g)*z[i]))/(gamd+2*g)
Sfd[i] = 10*np.log10(Sd[i])
if Smaxd > Sfd[i]:
Smaxd = Sfd[i]
zmaxd = z[i]
i += 1
#-----------------------------------------------------------------------------
# 4
gae = a[4] # 導波路損失(dB/cm)
game = np.log(10**(gae*1E-5)) # 導波路損失(/um)
Se = [0 for a in range(div)]
Sfe = [0 for a in range(div)]
Smaxe = 0
'''
H = game/(game+2*g) # 飽和したときのsqueezingレベル
Hf = 10*np.log10(H)
print("S(r->inf) = {} dB (loss = {} dB/cm)".format(Hf, a[4]))
'''
i = 0
while i < div:
Se[i] = (game + 2*g*np.exp(-(game+2*g)*z[i]))/(game+2*g)
Sfe[i] = 10*np.log10(Se[i])
if Smaxe > Sfe[i]:
Smaxe = Sfe[i]
zmaxe = z[i]
i += 1
print("Smax = {} dB (loss = {} dB/cm) Zmax = {} um".format(Smaxa, a[0], zmaxa))
print("Smax = {} dB (loss = {} dB/cm) Zmax = {} um".format(Smaxb, a[1], zmaxb))
print("Smax = {} dB (loss = {} dB/cm) Zmax = {} um".format(Smaxc, a[2], zmaxc))
print("Smax = {} dB (loss = {} dB/cm) Zmax = {} um".format(Smaxd, a[3], zmaxd))
print("Smax = {} dB (loss = {} dB/cm) Zmax = {} um".format(Smaxe, a[4], zmaxe))
#グラフを描く
plt.plot(z, Sfa, color='purple', label=str(a[0])+' dB/cm')
plt.plot(z, Sfb, color='red', label=str(a[1])+' dB/cm')
plt.plot(z, Sfc, color='orange', label=str(a[2])+' dB/cm')
plt.plot(z, Sfd, color='green', label=str(a[3])+' dB/cm')
plt.plot(z, Sfe, color='blue', label=str(a[4])+' dB/cm')
plt.xlabel('distance(um)')
plt.ylabel('squeezing level(dB)')
plt.legend()#loc='best')
plt.gca().xaxis.set_tick_params(which='both', direction='in',bottom=True, top=True, left=True, right=True)
plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=True, left=True, right=True)
#plt.xlim([0, 7000])
#plt.ylim([-35, 0])
plt.show()
|
UTF-8
|
Python
| false | false | 4,696 |
py
| 3 |
squeezing_pumploss.py
| 2 | 0.486493 | 0.43436 | 0 | 140 | 28.142857 | 106 |
marianneke/kaggle-speech-recognition
| 2,585,570,343,868 |
289ee0d3f5e37a5583d551619b552657928de535
|
e9d524397a7f1100c5bb4f35c230a61d3e7a30cd
|
/07_ensemble_best_of_3.py
|
a3127f48712885e59f766d2f5b9af62093ffa7a6
|
[] |
no_license
|
https://github.com/marianneke/kaggle-speech-recognition
|
6a76d1a90b2098d14d1e8c7ec4dd224455fdeda5
|
8ea062c49fc7c6ee4578feb82422515e06e118a6
|
refs/heads/master
| 2020-03-07T18:15:44.030831 | 2018-01-16T06:55:45 | 2018-01-16T06:55:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
import pdb
data1 = pd.read_csv('GCommandsPytorch/checkpoint/model6_lb085.csv')
data2 = pd.read_csv('GCommandsPytorch/checkpoint/model_all_train_lb086.csv')
data3 = pd.read_csv('GCommandsPytorch/checkpoint/model_all_train_lb086_v2.csv')
data1 = data1.sort_values(by = ['fname'])
data2 = data2.sort_values(by = ['fname'])
data3 = data3.sort_values(by = ['fname'])
labels = []
for ix in xrange(len(data1)):
clip_name = data1.iloc[ix]['fname']
val1 = data1.iloc[ix]['label']
val2 = data2.iloc[ix]['label']
val3 = data3.iloc[ix]['label']
if val1 == val2:
ans = val1
elif val1 == val3:
ans = val1
elif val2 == val3:
ans = val2
else:
ans = val3
#print(clip_name, val1, val2, val3, ans)
labels.append(ans)
submit_data = pd.DataFrame.from_items([
('fname', data1['fname']),
('label', labels)
])
submit_data.to_csv('output/ensemble_of_3.csv', index = False)
|
UTF-8
|
Python
| false | false | 915 |
py
| 9 |
07_ensemble_best_of_3.py
| 8 | 0.661202 | 0.614208 | 0 | 36 | 24.416667 | 79 |
pnemade/ttname
| 3,685,081,990,212 |
ce4a298c952f928bf1c1fcaeb42d513223e3c4b9
|
e213a5e9c10f6ca9e8aa9cb15b5a67159834a4b1
|
/ttname/table.py
|
499f47c254d359dbdc9ac00238c2b3e5ccf31997
|
[
"Bitstream-Vera",
"BSD-2-Clause"
] |
permissive
|
https://github.com/pnemade/ttname
|
c92f5112495d32329864e2d09ef76cf69be1c7de
|
0a352b9a3b6aeb558e3cd4d2939a24321dd7b0dc
|
refs/heads/master
| 2020-04-20T19:26:03.976497 | 2014-01-14T06:58:23 | 2014-01-14T06:58:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
objects representing the font metadata "name" table and its Records
"""
# Once upon a time, this file used the TTFont API to all the editing. And it
# works great for reading, but when you call save() all you get is garbage out.
# Rather than try and figure out what the hell is wrong with that, we just do
# the XML serialization dance, since that code path seems to have been well used
# for at least a decade.
# This file is part of ttname.
#
# Copyright 2013 T.C. Hollingsworth <tchollingsworth@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from StringIO import StringIO
from fontTools.ttLib import TTFont
import fontTools.ttLib.xmlImport
import collections
import tempfile
import sys
import os
#use lxml for a speed boost if we have it
try:
import lxml.etree as etree
except ImportError: #pragma: no cover
import xml.etree.ElementTree as etree
# EVIL MONKEYPATCH HACKS
# everything in TTFont takes a file object but the XML reader. fml.
def _parse(self):
if hasattr(self.fileName, 'read'):
self.parseFile(self.fileName)
#this never gets used, it's just to be exceedingly correct
else: #pragma: no cover
self.parseFile(open(fileName))
fontTools.ttLib.xmlImport.ExpatParser.parse = _parse
class StrungIO(StringIO):
"A special StringIO that ignores ttx's foolish close operations"
def close(self):
self.seek(0)
def free(self):
StringIO.close(self)
# and now the normal stuff...
SectionData = collections.namedtuple('SectionData', ['platformID', 'platEncID', 'langID'])
class TTNameRecord(object):
"An object representing a name record that mimics those returned by TTFont"
#...so I don't have to rewrite cli.py ;-)
def __init__(self, *args):
if len(args) == 1:
self._elem = args[0]
elif len(args) == 5:
parent, nameID, platformID, platEncID, langID = args
self._elem = elem = etree.SubElement(parent, 'namerecord')
self.nameID = nameID
self.platformID = platformID
self.platEncID = platEncID
self.langID = langID
else:
raise TypeError('either 1 or 5 arguments expected')
def __getattr__(self, name):
if name == 'langID':
return int(self._elem.get(name), 16)
elif name in self._elem.keys():
return int(self._elem.get(name))
elif name == 'string':
return self._elem.text.strip()
else:
raise AttributeError
def __setattr__(self, name, value):
if name == '_elem': #chicken, meet egg
object.__setattr__(self, name, value)
elif name == 'langID':
self._elem.set(name, hex(value))
elif name in ['nameID', 'platformID', 'platEncID']:
self._elem.set(name, str(value))
elif name == 'string':
self._elem.text = value
else:
object.__setattr__(self, name, value)
class TTNameTable(object):
'The "name" table of an OpenType font, containing metadata regarding the font'
def __init__(self, fileish):
self._infile = fileish
self._xml = StrungIO()
#grrrrrrrr
stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
tt = TTFont(fileish)
tt.saveXML(self._xml, tables=['name'], progress=False)
sys.stdout = stdout
self._xml.seek(0)
self._tree = etree.parse(self._xml)
self._name = self._tree.getroot().find('name')
def __del__(self):
self._xml.free()
# yet more compat fun
@property
def names(self):
for elem in self._name:
yield TTNameRecord(elem)
def save(self, fileish):
new_xml = StrungIO()
self._tree.write(new_xml)
new_xml.seek(0)
stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
self._font = TTFont(self._infile)
self._font.importXML(new_xml, progress=False)
self._font.save(fileish)
sys.stdout = stdout
new_xml.free()
# I hate camelcased function names, but that's what TTFont uses :-(
def getName(self, nameID, platformID, platEncID, langID, write=False):
for n in self.names:
if n.nameID == nameID and n.platformID == platformID and \
n.platEncID == platEncID and n.langID == langID:
return n
if write:
new = TTNameRecord(self._name, nameID, platformID, platEncID, langID)
return new
else:
return None
def getSection(self, platformID, platEncID, langID):
for n in self.names:
if platformID == n.platformID and platEncID == n.platEncID and langID == n.langID:
yield n
def getNameFromAll(self, nameID):
for n in self.names:
if nameID == n.nameID:
yield n
def getNamesBySection(self):
"""returns a mapping of names keyed by section information"""
result = {}
for n in self.names:
sd = SectionData(n.platformID, n.platEncID, n.langID)
if sd not in result.iterkeys():
result[sd] = []
result[sd].append(n)
return result
|
UTF-8
|
Python
| false | false | 6,636 |
py
| 9 |
table.py
| 7 | 0.627637 | 0.625226 | 0 | 187 | 34.465241 | 94 |
AleksandrVed/P
| 5,798,205,850,302 |
6c66aa922e0c7e7a3f22056e59ebdcc2893ca5f0
|
e3b9e82e0e16f3f39fb3ea22f9103a53d86a45e7
|
/lab3.py
|
546538747768973cb13bc7430299cbd78d611bf2
|
[] |
no_license
|
https://github.com/AleksandrVed/P
|
68772e68d7b4cb506ded9a7a19874e96f4dcb27f
|
700aa013908adcd4b958b016fcfe79be09bcfe6d
|
refs/heads/master
| 2020-07-23T15:15:06.448284 | 2020-01-13T14:48:23 | 2020-01-13T14:48:23 | 207,607,569 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
Exit = 0
while Exit == 0:
print("Ввести a")
a = float(input())
print("Ввести нижнюю границу x")
x1 = float(input())
print("Ввести вверхнюю границу x")
x2 = float(input())
print("Ввести x")
x = float(input())
print('Ввести шаг изменения переменной x')
Step = float(input())
print("Выбрать номер функции 1-3")
ch = int(input())
if ch == 1:
while x1 <= x and x <= x2:
g1 = (3*a*a-7*a*x+4*x*x)
if g1 != 0:
g=(-7)*(20*a*a+11*a*x+45*x*x)/(3*a*a-7*a*x+4*x*x)
print('G = ' + str(G))
x = x + Step
else:
print("Значения не принадлежат области определения функции.")
continue
elif ch == 2:
while x1 <= x and x <= x2:
f1 = (60 * (a**2) + 88* (a*x) +21 * (x**2))
if f1 != 1 and f1 == 1:
f = math.tan(60*a*a+88*a*x-21*x*x)
print('F = ' + str(F))
x = x + Step
elif ch == 3:
while x1 <= x and x <= x2:
y1 = (-40) * a * a + 3 * a * x + x * x + 1
if y1 >= 0:
Y = math.log(y1)
y=math.log((-40) * a * a + 3 * a * x + x * x + 1)/math.log(2)
print('Y = ' + str(Y))
x = x + Step
else:
print("Значения не принадлежат области определения функции.")
continue
else:
print("Нет такой функции")
print("Выйти из программы?(1-да, 0-нет)")
Exit = int(input())
|
UTF-8
|
Python
| false | false | 1,791 |
py
| 13 |
lab3.py
| 8 | 0.426471 | 0.38491 | 0 | 47 | 32.276596 | 77 |
yjiao/codeforces_predict
| 5,815,385,744,917 |
8937b8d28522346a462bcb8915001171876d454a
|
8c5ae64b6cfa2433b1d493e0f5f7b7c43edd5a06
|
/linear_regression/generate_features_OLS.py
|
3419c877bc335a425432cd9c13ea332909bf64ab
|
[] |
no_license
|
https://github.com/yjiao/codeforces_predict
|
0671ea0f9148be4c5aa223d977024ec755c59d76
|
f155cf48a46867210c0d0381438f6edab057e763
|
refs/heads/master
| 2021-01-21T14:49:22.868118 | 2017-06-29T18:12:59 | 2017-06-29T18:12:59 | 95,341,176 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# performs feature engineering for linear regression
# looks at user characteristics in the last 30 days as
# well as historical behaviors
import pandas as pd
import numpy as np
import psycopg2
from collections import Counter
# ---------------------------------------------
# constants
secinday = 3600.0*24.0 # days
# --------------------------------------------------------
# UTILITY FUNCTIONS
# --------------------------------------------------------
def get_binary_dictionary():
# ---------------------------------------------
# some hard-coded groupings of sparse features
# languages
java = [ 'Java 6', 'Java 8', 'Java 8 ZIP', 'Java 7' ]
python = [ 'Python 3', 'Python 2', 'PyPy 2', 'PyPy 3']
lowlevel = [ 'GNU C++14', 'GNU C', 'MS C++', 'GNU C++', 'GNU C++0x', 'MS C#', 'GNU C11', 'GNU C++11 ZIP', 'GNU C++11', 'Mono C#', 'Delphi', ]
# verdicts
errors = [ "COMPILATION_ERROR", "RUNTIME_ERROR", "CRASHED", "REJECTED", "IDLENESS_LIMIT_EXCEEDED"]
wrong = [ "TIME_LIMIT_EXCEEDED", "WRONG_ANSWER", "CHALLENGED", "MEMORY_LIMIT_EXCEEDED" ]
# practice mdoes
practice = [ 'GYM', 'OUT_OF_COMPETITION', 'VIRTUAL', 'PRACTICE' ]
bindict = {}
for j in java:
bindict[j] = 'java'
for p in python:
bindict[p] = 'python'
for c in lowlevel:
bindict[c] = 'lowlevel'
for v in errors:
bindict[v] = 'error'
for v in wrong:
bindict[v] = 'wrong'
for v in practice:
bindict[v] = 'practice'
bindict['OK'] = 'ok'
bindict['CONTESTANT'] = 'contestant'
return bindict
def get_categorical_variables( colnames, cur, con):
catvars = []
for c in colnames:
cur.execute("select * from %s" % c, con)
catvars.extend([c[1] for c in cur.fetchall()])
return catvars
def get_stats_df(df, column, filter_):
df = df[column][filter_]
mean = np.mean(df)
max_ = np.max(df)
tot = np.sum(df)
std = np.std(df)
return (mean, max_, tot, std)
def get_stats_col(col):
mean = np.mean(col)
max_ = np.max(col)
min_ = np.min(col)
tot = np.sum(col)
std = np.std(col)
return (mean, max_, min_, tot, std)
def remove_nonrelevant_months(data, month=3):
# remove information for other months
for m in range(1,6):
if m == month:
continue
name1 = "delta_smoothed_%dmonths" % m
name2 = "smoothed_%dmonths" % m
data.drop([name1, name2], axis=1, inplace=True)
def prob_solve(user_rating, problem_rating):
return 1.0 / ( 1 + 10 ** ((problem_rating - user_rating ) / 400.0) )
# --------------------------------------------------------
# MAIN FUNCTIONS
# --------------------------------------------------------
def get_features(dfw, prevcontest, nextcontest, prevtags, tags, prevcontestdf, month=3):
rating_column = 'smoothed_%dmonths' % month
change_column = 'delta_smoothed_%dmonths' % month
dfw.fillna(value=0, inplace=True)
# --------------------------------------------------------------------
# number of wrong problems
# num problems wrong in practice
mean_wrong_contest,\
max_wrong_contest,\
total_wrong_contest,\
std_wrong_contest = get_stats_df(dfw, 'wrong', dfw.contestant > 0)
# num problems wrong in contest
mean_wrong_practice,\
max_wrong_practice,\
total_wrong_practice,\
std_wrong_practice = get_stats_df(dfw, 'wrong', dfw.practice > 0)
# --------------------------------------------------------------------
# number of error
# num problems error in practice
mean_error_contest,\
max_error_contest,\
total_error_contest,\
std_error_contest = get_stats_df(dfw, 'error', dfw.contestant > 0)
# num problems error in contest
mean_error_practice,\
max_error_practice,\
total_error_practice,\
std_error_practice = get_stats_df(dfw, 'error', dfw.practice > 0)
# --------------------------------------------------------------------
# difference between user rating and problem rating
dfw['ratingdiff'] = dfw['problem_rating'] - prevcontest[rating_column]
mean_ratingdiff_contest,\
max_ratingdiff_contest, _,\
std_ratingdiff_contest = get_stats_df(dfw, 'ratingdiff', dfw.contestant > 0)
mean_ratingdiff_practice,\
max_ratingdiff_practice, _,\
std_ratingdiff_practice = get_stats_df(dfw, 'ratingdiff', dfw.practice > 0)
# --------------------------------------------------------------------
# probability of solving question
dfw['probability'] = dfw['problem_rating'].apply( lambda x: prob_solve(prevcontest[rating_column], x ) )
mean_probability_contest,\
max_probability_contest,\
min_probability_contest,\
std_probability_contest = get_stats_df(dfw, 'probability', dfw.contestant > 0)
mean_probability_practice,\
max_probability_practice,\
min_probability_practice,\
std_probability_practice = get_stats_df(dfw, 'probability', dfw.practice > 0)
# --------------------------------------------------------------------
# unlikely solves
n_unlikely02 = sum(dfw.probability <= 0.2)
n_unlikely01 = sum(dfw.probability <= 0.1)
# --------------------------------------------------------------------
# probability of performance on contest
g = prevcontestdf.groupby(['contestid', 'problemid'])
performance = 1.0
usr_rating = prevcontest[rating_column]
for k, v in g:
cid = k[0]
pid = k[1]
pr = dfw.loc[
np.logical_and(
dfw.contestid == cid,
dfw.problemid == pid
), 'problem_rating'
].values[0]
if pr == -1:
continue
if "OK" in v.verdict.values:
performance *= prob_solve(usr_rating, pr)
else:
ptmp = (1 - prob_solve(usr_rating, pr))
performance *= ptmp
# --------------------------------------------------------------------
# Time between solves
timediff = dfw.starttimeseconds - np.roll(dfw.starttimeseconds, 1)
timediff = timediff[1:] / 3600.0 / 24.0
# (mean, max_, min_, tot, std)
mean_timediff, max_timediff, min_timediff, _, std_timediff = get_stats_col(timediff)
# --------------------------------------------------------------------
# Time between first submit and solves
# (mean, max_, min_, tot, std)
mean_solvetime,\
max_solvetime,\
min_solvetime,\
tot_solvetime,\
std_solvetime = get_stats_col(dfw.hours_submit_to_solve)
# --------------------------------------------------------------------
# num problems solved > threshold
# (mean, max_, min_, tot, std)
if 'solvetimeseconds' not in dfw.columns:
n100 = 0
n200 = 0
n300 = 0
n400 = 0
n500 = 0
n_solved = 0
else:
_, _, _, n100, _ = get_stats_col(np.logical_and(dfw.ratingdiff >= 100, dfw.solvetimeseconds > 0))
_, _, _, n200, _ = get_stats_col(np.logical_and(dfw.ratingdiff >= 200, dfw.solvetimeseconds > 0))
_, _, _, n300, _ = get_stats_col(np.logical_and(dfw.ratingdiff >= 300, dfw.solvetimeseconds > 0))
_, _, _, n400, _ = get_stats_col(np.logical_and(dfw.ratingdiff >= 400, dfw.solvetimeseconds > 0))
_, _, _, n500, _ = get_stats_col(np.logical_and(dfw.ratingdiff >= 500, dfw.solvetimeseconds > 0))
n_solved = sum(dfw.solvetimeseconds > 0)
# --------------------------------------------------------------------
# tags
# number of new tags
newtags = set(tags.keys()).difference(prevtags.keys())
total_newtags = 0
unique_newtags = len(newtags)
for nt in newtags:
total_newtags += tags[nt]
alltags = prevtags.keys()
alltags.extend(tags.keys())
unique_tags_total = len(set( alltags ))
# --------------------------------------------------------------------
# rate of change
dr = prevcontest[change_column]
dt = max(dfw.starttimeseconds) - prevcontest.starttimeseconds
drdt = (dr + 0.0) / dt
# --------------------------------------------------------------------
# languages
langcounts = np.sum(dfw[['java', 'python', 'lowlevel']]) > 0
# --------------------------------------------------------------------
features = {
"n_solved" : n_solved,
"mean_wrong_contest" : mean_wrong_contest,
"max_wrong_contest" : max_wrong_contest,
"total_wrong_contest" : total_wrong_contest,
"std_wrong_contest" : std_wrong_contest,
"mean_wrong_practice" : mean_wrong_practice,
"max_wrong_practice" : max_wrong_practice,
"total_wrong_practice" : total_wrong_practice,
"std_wrong_practice" : std_wrong_practice,
"mean_error_contest" : mean_error_contest,
"max_error_contest" : max_error_contest,
"total_error_contest" : total_error_contest,
"std_error_contest" : std_error_contest,
"mean_error_practice" : mean_error_practice,
"max_error_practice" : max_error_practice,
"total_error_practice" : total_error_practice,
"std_error_practice" : std_error_practice,
"mean_ratingdiff_contest" : mean_ratingdiff_contest,
"max_ratingdiff_contest" : max_ratingdiff_contest,
"std_ratingdiff_contest " : std_ratingdiff_contest,
"mean_ratingdiff_practice" : mean_ratingdiff_practice,
"max_ratingdiff_practice" : max_ratingdiff_practice,
"std_ratingdiff_practice" : std_ratingdiff_practice,
"mean_timediff" : mean_timediff,
"max_timediff" : max_timediff,
"min_timediff" : min_timediff,
"std_timediff" : std_timediff,
"mean_solvetime" : mean_solvetime,
"max_solvetime" : max_solvetime,
"min_solvetime" : min_solvetime,
"tot_solvetime" : tot_solvetime,
"std_solvetime" : std_solvetime,
"mean_probability_contest" : mean_probability_contest,
"max_probability_contest" : max_probability_contest,
"min_probability_contest" : min_probability_contest,
"std_probability_contest" : std_probability_contest,
"mean_probability_practice": mean_probability_practice,
"max_probability_practice" : max_probability_practice,
"min_probability_practice" : min_probability_practice,
"std_probability_practice" : std_probability_practice,
"n100" : n100,
"n200" : n200,
"n300" : n300,
"n400" : n400,
"n500" : n500,
"unique_newtags" : unique_newtags,
"total_newtags" : total_newtags,
"unique_tags_total" : unique_tags_total,
'performance' : performance,
'java' : int(langcounts['java']),
'python' : int(langcounts['python']),
'lowlevel' : int(langcounts['lowlevel']),
'n_unlikely01' : n_unlikely01,
'n_unlikely02' : n_unlikely02,
"drdt" : drdt
}
return features
def get_df_problem(df, subdict, prevcontest, nextcontest, dfrat, df_prate, df_tags, bindict, month=3):
trainlist = []
# --------------------------------------------------
# per-problem features
gprob = df.groupby(['contestid', 'problemid'])
for k, v in gprob:
cid = k[0]
pid = k[1]
ex = dict()
# generic problem info
ex['points'] = v.points.values[0]
ex['problemid'] = v.problemid.values[0]
ex['contestid'] = v.contestid.values[0]
# ----------------------------------
# user rating info
# find closest PREVIOUS contest
# if there is no next contest,then skip this entry
ex['starttimeseconds'] = min(v.starttimeseconds)
ex['stoptimeseconds'] = max(v.starttimeseconds)
# ----------------------------------
# problem rating and probability of solving
if (cid, pid) in df_prate.index:
ex['problem_rating'] = df_prate.loc[cid, pid].values[0]
else:
ex['problem_rating'] = -1
# ----------------------------------
# time to solves
solvetime = v.loc[v.verdict=='OK', 'starttimeseconds']
if len(solvetime) > 0:
ex['solvetimeseconds'] = min(solvetime)
ex['hours_submit_to_solve'] = (ex['solvetimeseconds'] - ex['starttimeseconds']) / 3600.0
else:
# some problems were never solved
ex['hours_submit_to_solve'] = -1
# -----------------------------------------------------------------------
# tags
idx_tag = (cid, pid)
if idx_tag in df_tags.index:
for t in df_tags.loc[idx_tag]['tag'].values:
ex[t] = 1
# -----------------------------------------------------------------------
# binary variables that should be grouped
# languages
lang = v['language'].values[0]
if lang in bindict:
ex[bindict[ lang ]] = 1
# verdicts
vcnt = v.verdict.value_counts()
vdict = vcnt.to_dict()
for key, val in vdict.iteritems():
if key in bindict:
ex[ bindict[ key ] ] = val
# participant type
pcnt = v.participanttype.value_counts()
pdict = pcnt.to_dict()
for t in pdict.iterkeys():
if t in bindict:
ex[ bindict[t] ] = 1
# add in any missing binary variables
for bincol in bindict.itervalues():
if bincol not in ex:
ex[bincol] = np.nan
trainlist.append(ex)
df_problems = pd.DataFrame.from_dict(trainlist)
return df_problems
def summarize(qfront, qback, subdict, prevcontest, nextcontest, dfrat, df_prate, df_tags, bindict, prevtags, month=3):
df = pd.DataFrame.from_dict(subdict[qfront:qback+1])
pcid = dfrat.loc[prevcontest]['contestid']
#print "PREV:", pcid
idx = np.logical_and(
df.participanttype == 'CONTESTANT',
df.contestid == str(pcid)
)
prevcontestdf = df.loc[idx]
df_problems = get_df_problem(df, subdict, prevcontest, nextcontest, dfrat, df_prate, df_tags, bindict)
# grab new tags
tagkey = df_problems[['contestid', 'problemid']].values
tags = []
for tk in tagkey:
for t in df_tags.loc[tk].values:
tags.append(t[0])
tags = Counter(tags)
features = get_features(df_problems, dfrat.loc[prevcontest], dfrat.loc[nextcontest], prevtags, tags, prevcontestdf )
prevrec = dfrat.loc[prevcontest]
prevrec = prevrec.to_dict()
prevrec.pop('index')
features.update(prevrec)
return tags, features
def parse_user(handle, con, df_smooth, df_prate, df_tags, bindict):
# --------------------------
# per user
dfsub = pd.read_sql("select * from submissions where handle='%s'" % handle, con)
dfsub.is_copy = False
dfsub['type'] = 'problem'
dfrat = df_smooth.loc[df_smooth.handle == handle]
dfrat.is_copy = False
dfsub.sort_values('starttimeseconds', inplace=True)
dfrat.sort_values('starttimeseconds', inplace=True)
dfsub.reset_index(inplace=True)
dfrat.reset_index(inplace=True)
dfsub['index'] = dfsub.index
dfrat['index'] = dfrat.index
subdict = dfsub.to_dict(orient='records')
ratdict = dfrat.to_dict(orient='records')
idx = dfsub[['starttimeseconds', 'type', 'index']]
idx = idx.to_dict(orient="records")
idx_rat = dfrat[['starttimeseconds', 'type', 'index']]
idx_rat = idx_rat.to_dict(orient="records")
idx.extend(idx_rat)
idx.sort(key = lambda x: x['starttimeseconds'])
# note we are using a list as a queue since the python Queue class has heavy overhead
# including locks, which we do not need
qfront = 0
qback = 0
cutoff = 30
prevcontest = -1
prevtags = {}
trainlist = []
qempty = False
# historical totals
n_contests = 0
n_problems = 0
for i in idx:
if qfront >= len(subdict):
break
if i['type'] == 'problem':
qback += 1
n_problems += 1
else:
assert(i['type'] == 'contest')
# found contest
curtime = i['starttimeseconds']
days_elapsed = (curtime - subdict[qfront]['starttimeseconds']) / secinday
# remove problems that occurred too long ago
while days_elapsed > cutoff:
qfront += 1
if qfront >= len(subdict):
qempty = True
break
days_elapsed = (curtime - subdict[qfront]['starttimeseconds']) / secinday
if qempty:
break
# process previous problems solved
if prevcontest == -1:
tags, features = summarize(qfront, qback, subdict, i['index'], i['index'], dfrat, df_prate, df_tags, bindict, prevtags)
else:
tags, features = summarize(qfront, qback, subdict, prevcontest, i['index'], dfrat, df_prate, df_tags, bindict, prevtags)
features['total_contests'] = n_contests
features['total_problems'] = n_problems
n_contests += 1
trainlist.append(features)
prevtags.update(Counter(tags))
prevcontest = i['index']
ret = pd.DataFrame.from_dict(trainlist)
return ret
if __name__ == "__main__":
con = psycopg2.connect(database='codeforces', user='Joy')
cur = con.cursor()
# smoothed user ratings
df_smooth = pd.read_csv('user_ratings_smoothed.csv', engine = 'c')
df_smooth['type'] = 'contest'
df_smooth['starttimeseconds'] = df_smooth['ratingupdatetimeseconds']
df_smooth.drop('contestname', axis=1, inplace=True)
df_smooth.drop('time', axis=1, inplace=True)
# problem ratings
df_prate = pd.read_sql("SELECT * FROM problem_rating", con)
df_prate.set_index(['contestid', 'problemid'], inplace=True)
# problem tags
df_tags = pd.read_sql("SELECT * FROM tags", con)
df_tags.set_index(['contestid', 'problemid'], inplace=True)
df_tags.sort_index(inplace=True)
present_handles = set(df_smooth.handle)
bindict = get_binary_dictionary()
lastidx = 143
for i, user in enumerate(present_handles[lastidx:]):
print lastidx + i, user
user_rating = df_smooth.loc[df_smooth.handle == user]
data = parse_user(user, con, df_smooth, df_prate, df_tags, bindict)
data.to_csv("ols_train/%s.csv" % user, index=False)
|
UTF-8
|
Python
| false | false | 19,074 |
py
| 48 |
generate_features_OLS.py
| 14 | 0.527734 | 0.5162 | 0 | 504 | 36.845238 | 156 |
ksurct/ksurct-robot-2018
| 7,627,861,940,945 |
d436c3a309d4e295c1a808cf9e6a911963850d3f
|
028790381ab0287e4de99787a5e603922c9a1b70
|
/ksurobot/main.py
|
263169d47a5ad4ed1e5700bc230f9c5224405f08
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/ksurct/ksurct-robot-2018
|
289117589b2088b7283ef9f226823a16231fc533
|
98fbf6bc83c0252357e8808a58e2cf9d077aa172
|
refs/heads/master
| 2021-07-08T22:38:23.450840 | 2019-04-16T22:39:51 | 2019-04-16T22:39:51 | 104,816,023 | 9 | 0 | null | false | 2018-04-20T20:48:46 | 2017-09-26T00:32:43 | 2018-04-11T00:21:22 | 2018-04-20T20:48:46 | 99 | 7 | 0 | 0 |
Python
| false | null |
''' main.py
Starts the program
'''
import asyncio
import websockets
import logging
from server import Server
from robot import Robot
from settings import *
def main():
''' Main Entrance to the program '''
# Setup Logging
logging.basicConfig(format='%(name)s: %(levelname)s: %(asctime)s: %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Get the event loop to work with
loop = asyncio.get_event_loop()
# Setup Robot
robot = Robot()
server = Server(SERVER_IP, SERVER_PORT, robot, timeout=SERVER_TIMEOUT)
try:
# Main event loop
loop.run_until_complete(server.start_server())
loop.run_forever()
except KeyboardInterrupt:
logger.info('Keyboard Interrupt. Closing...')
finally:
# Shutdown the server
task = asyncio.ensure_future(server.shutdown())
loop.run_until_complete(task)
# Close the loop
loop.close()
logger.info('Event loop closed')
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 1,042 |
py
| 20 |
main.py
| 17 | 0.629559 | 0.629559 | 0 | 48 | 20.708333 | 103 |
davidbegin/python-in-the-morning
| 4,260,607,593,226 |
54e8957e857d2971920a50c09252cd0e5e413687
|
420d4cf595fc8f28be0415aec70a4087e157555c
|
/Fluent_Python/Day48/wait_trails.py
|
df1322bf49d304846d5cad9c7380f0aa4464ecc3
|
[] |
no_license
|
https://github.com/davidbegin/python-in-the-morning
|
8cf89e62e7e8a2df5b8f875aae3cc7815545ad61
|
aa4a271d1df0ce0a82d776c0955c1f20deb50937
|
refs/heads/master
| 2020-09-28T09:55:50.723066 | 2020-08-06T01:17:24 | 2020-08-06T01:17:24 | 226,753,142 | 23 | 3 | null | false | 2020-03-04T06:36:30 | 2019-12-09T00:31:09 | 2020-03-04T03:45:02 | 2020-03-04T03:45:00 | 76,436 | 10 | 1 | 7 |
Python
| false | false |
print('\033c')
daily_fact = "\033[35m61 Years ago on 1959 - 2 - 6\n\n\t\033[97mJack Kilby of Texas Instruments files the first patent for an integrated circuit."
print(f"\t\033[35;1;6m{daily_fact}\033[0m\n")
from dis import dis
import opcode
import asyncio
import random
import time
async def part1(n: int) -> str:
print(f"\t\tenter #part1 {n}")
await asyncio.sleep(n)
print(f"\t\t\tdone sleeping in #part1 - {n}")
result = f"part1: {n}"
return result
async def part2(n: int, arg: str) -> str:
print(f"\t\tenter #part2 {n}")
await asyncio.sleep(n)
print(f"\t\t\tdone sleeping in #part2 - {n}")
result = f"part2: {n}"
return result
async def chain(n: int) -> None:
print("\tEnter #chain")
p1 = await part1(n)
print(f"\033[95mresult of p1 - n: {n} {p1}\033[0m")
p2 = await part2(n, p1)
print(f"\033[96mresult of p2 - {n}: {p2}\033[0m")
async def main(*args):
print("Enter #main")
await asyncio.gather(*(chain(n) for n in args))
if __name__ == "__main__":
import sys
random.seed(444)
args = [1, 2, 3] if len(sys.argv) == 1 else map(int, sys.argv[1:])
start = time.perf_counter()
# asyncio.run is standard way to quick off a async program
# pass it async function that will handle all the tasks
asyncio.run(main(*args))
end = time.perf_counter() - start
# print(f"Program finished in {end:0.2f} seconds.")
|
UTF-8
|
Python
| false | false | 1,427 |
py
| 573 |
wait_trails.py
| 220 | 0.621584 | 0.567624 | 0 | 58 | 23.568966 | 146 |
pangl/keyword_driven_proj
| 8,177,617,740,356 |
6d5a305108c7ed1bb5f2eeac3a1c5d43398f2061
|
891ee01311f28a900cf6939cafe518446d5a23db
|
/keyword_driven_proj/Util/GetConfig.py
|
17378d3dfa21c7425fa3e669ceeaeb14dddbda90
|
[] |
no_license
|
https://github.com/pangl/keyword_driven_proj
|
f79a0eed9993fe02dcee9d32fa3a09bb2d2317c6
|
4343a3ebf7f973d02d00f08097060c716adce66c
|
refs/heads/master
| 2020-04-28T08:14:53.575321 | 2019-03-12T02:37:20 | 2019-03-12T02:37:20 | 175,119,851 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import configparser
class Config(object):
def __init__(self,config_file_path):
self.config_file_path = config_file_path
self.config = configparser.ConfigParser()
self.config.read(self.config_file_path)
def get_all_sections(self):
return self.config.sections()
def get_option(self,section_name,option_name):
value = self.config.get(section_name,option_name)
return value
def all_section_items(self,section_name):
items = self.config.items(section_name)
print(items)
return dict(items)
|
UTF-8
|
Python
| false | false | 597 |
py
| 7 |
GetConfig.py
| 6 | 0.634841 | 0.634841 | 0 | 20 | 27.85 | 57 |
ab/rent
| 962,072,694,408 |
e2ac8b326a1a653531c2d298198fa153b8b6eb3d
|
7e1390deb97597cf20861d0b7a3908a6b2dde1d4
|
/rent.py
|
43ca4adaceac45decfdcbcd8263ed85128862b99
|
[] |
no_license
|
https://github.com/ab/rent
|
165b5dd9bf7ffbeb9bcd3ffb7c44c45f5002684d
|
f1b45df66e3cd1c254d8510ebf737fd443fc779f
|
refs/heads/master
| 2020-04-24T16:20:25.634591 | 2015-10-24T23:03:02 | 2015-10-24T23:06:42 | 18,027,639 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
usage: rent.py YAML_FILE [dry|real]
Send a rent reminder email based on data from YAML_FILE.
If dry is passed, no email will actually be sent.
"""
import smtplib
import sys
import yaml
from datetime import date, timedelta
def first_next_month(start):
if start.month == 12:
return date(start.year + 1, 1, 1)
else:
return date(start.year, start.month + 1, 1)
class NoUtilityInfo(KeyError):
pass
def usage():
sys.stderr.write(__doc__.lstrip())
class RentComponent(object):
def __init__(self, name, total, divided_among):
self.name = name
self.total = total
self.divided_among = divided_among
if divided_among > 1:
self.share = float(total) / divided_among
elif divided_among == 1:
self.share = total
else:
raise ValueError('Unexpected divided_among: %r' % divided_among)
def __str__(self):
if self.divided_among == 1:
return '{}: {:.2f}'.format(self.name, self.total)
else:
return '{}: {:.2f} = {:.2f} / {}'.format(
self.name, self.share, self.total, self.divided_among)
class RentReminder(object):
def __init__(self, config_file, rent_date=None, smtp_server='localhost',
dry_run=True):
self.config = self.load_config(config_file)
if rent_date is None:
self.today = date.today()
else:
self.today = rent_date
self.due_date = first_next_month(self.today)
self.smtp_server = smtp_server
self.dry_run = dry_run
def load_config(self, filename):
return yaml.safe_load(open(filename, 'r'))
def rent_for(self, name):
"""Return the current rent for a given person"""
return self.due_date_rents()[name]
def utility_info(self):
utilities = self.config['utilities']
try:
return utilities[self.today.year][self.today.month]
except KeyError:
month = self.today.strftime('%Y-%m')
raise NoUtilityInfo('No utility info for ' + month)
def utility_info_share(self):
return dict((k, float(v) / self.num_payers()) for k, v in
self.utility_info().iteritems())
def num_payers(self):
return len(self.config['people']) + 1
def total_for(self, name):
return sum(p.share for p in self.parts_for(name))
def all_rents(self):
return self.config['rent']
def due_date_rents(self):
"""Return a dict mapping name to rent for the due date."""
if not hasattr(self, '_due_date_rents'):
self._due_date_rents = self.rents_as_of(self.due_date)
return self._due_date_rents
def rents_as_of(self, rent_date):
"""
Return a dict mapping name to rent for the given date.
"""
if rent_date is None:
rent_date = date.today()
for rent_hash in reversed(self.all_rents()):
if rent_date >= rent_hash['since']:
print 'Using rent as of', rent_hash['since']
return rent_hash['splits']
raise KeyError('Cannot find rent for %r' % rent_date)
def due_month_name(self):
return self.due_date.strftime('%B')
def parts_for(self, name):
"""Return a list of RentComponent objects for person `name`."""
parts = []
parts.append(RentComponent('Rent', self.rent_for(name), 1))
for k, v in sorted(self.utility_info().iteritems()):
parts.append(RentComponent(k, float(v), self.num_payers()))
return parts
def payment_links_for(self, name, total):
links = []
person_config = self.config['people'][name]
if person_config.get('paypal_me'):
links.append('https://www.paypal.me/{}/{:.2f}'.format(
self.config['payment_links']['paypal'], total))
if person_config.get('square_me'):
links.append('https://cash.me/{}/{:.2f}'.format(
self.config['payment_links']['square'], total))
return links
def email_for(self, name):
person_config = self.config['people'][name]
from_address = self.config['email']['from']
to_address = person_config['email']
recipients = [to_address]
headers = [
('From', from_address),
('To', to_address),
]
if person_config['cc']:
headers.append(('Cc', person_config['cc']))
recipients.append(person_config['cc'])
recipients.append(self.config['email']['bcc'])
parts = self.parts_for(name)
total = round(sum(p.share for p in parts), 2)
assert total == round(self.total_for(name), 2) # useless double check
subject = "{} rent is ${:.2f}".format(self.due_month_name(), total)
headers.append(('Subject', subject))
header_block = '\n'.join(name + ': ' + val for name, val in headers)
parts_block = '\n'.join(str(p) for p in parts)
parts_block += '\n' + '=' * 14
parts_block += '\nTotal: {:.2f}'.format(total)
links = self.payment_links_for(name, total)
if links:
parts_block += '\n\n' + '\n'.join(links)
return {
'from': from_address,
'recipients': recipients,
'message': header_block + '\n\n' + parts_block + '\n',
}
def send_email_for(self, name):
print '==='
print 'Generating email for {}'.format(name)
data = self.email_for(name)
for i in ['from', 'recipients', 'message']:
print i + ':', repr(data[i])
self.send_email(data['from'], data['recipients'], data['message'])
def send_email(self, from_address, recipients, data):
if self.dry_run:
print 'Not sending email due to dry run'
for line in data.split('\n'):
print '| ' + line
return
s = smtplib.SMTP(self.smtp_server)
s.set_debuglevel(1)
s.sendmail(from_address, recipients, data)
s.quit()
print 'Sent email'
def send_all_email(self):
for name in self.config['people'].keys():
self.send_email_for(name)
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
sys.exit(1)
if sys.argv[2] == 'dry':
dry_run = True
elif sys.argv[2] == 'real':
dry_run = False
else:
usage()
sys.exit(1)
r = RentReminder(sys.argv[1], dry_run=dry_run)
r.send_all_email()
|
UTF-8
|
Python
| false | false | 6,599 |
py
| 1 |
rent.py
| 1 | 0.555387 | 0.550841 | 0 | 221 | 28.855204 | 77 |
huangcun666/pyproject
| 7,516,192,786,469 |
bd3d55c12384dcbae9f4f0266cfc35d25828fa0c
|
96fec7cd18e12fc11770de2585b13908e6c83b1c
|
/tornado-boilerplate-master/buedafab/deploy/types.py
|
6c37cb5327e67a28646dc491142f113d3f1bcaf6
|
[
"MIT"
] |
permissive
|
https://github.com/huangcun666/pyproject
|
e253a10891432bca631a24b05ce5329c47377a2e
|
30e023c9df88be33a01d477c8300fd8f95f1afec
|
refs/heads/master
| 2022-12-03T19:41:24.226334 | 2018-04-09T01:17:05 | 2018-04-09T01:17:05 | 128,696,272 | 0 | 1 | null | false | 2022-10-31T17:49:24 | 2018-04-09T01:09:42 | 2018-04-09T01:18:28 | 2018-04-09T01:18:12 | 29,902 | 0 | 1 | 1 |
Python
| false | false |
"""Deploy commands for applications following Bueda's boilerplate layouts."""
from fabric.api import warn, cd, require, local, env, settings, abort
from fabric.colors import green, red
import os
from buedafab.operations import run, put, chmod
from buedafab import celery, db, tasks, notify, testing, utils
from buedafab import deploy
def _git_deploy(release, skip_tests):
starting_branch = utils.branch()
print(green("Deploying from git branch '%s'" % starting_branch))
# Ideally, tests would run on the version you are deploying exactly.
# There is no easy way to require that without allowing users to go
# through the entire tagging process before failing tests.
if not skip_tests and testing.test():
abort(red("Unit tests did not pass -- must fix before deploying"))
local('git push %(master_remote)s' % env, capture=True)
deploy.release.make_release(release)
require('pretty_release')
require('path')
require('hosts')
print(green("Deploying version %s" % env.pretty_release))
put(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', 'files', 'ssh_config'), '.ssh/config')
deployed = False
hard_reset = False
deployed_versions = {}
deploy.release.bootstrap_release_folders()
for release_path in env.release_paths:
with cd(os.path.join(env.path, env.releases_root, release_path)):
deployed_versions[run('git describe')] = release_path
print(green("The host '%s' currently has the revisions: %s"
% (env.host, deployed_versions)))
if env.pretty_release not in deployed_versions:
env.release_path = os.path.join(env.path, env.releases_root,
deploy.release.alternative_release_path())
with cd(env.release_path):
run('git fetch %(master_remote)s' % env, forward_agent=True)
run('git reset --hard %(release)s' % env)
deploy.cron.conditional_install_crontab(env.release_path, env.crontab,
env.deploy_user)
deployed = True
else:
warn(red("%(pretty_release)s is already deployed" % env))
env.release_path = os.path.join(env.path, env.releases_root,
deployed_versions[env.pretty_release])
with cd(env.release_path):
run('git submodule update --init --recursive', forward_agent=True)
hard_reset = deploy.packages.install_requirements(deployed)
deploy.utils.run_extra_deploy_tasks(deployed)
local('git checkout %s' % starting_branch, capture=True)
chmod(os.path.join(env.path, env.releases_root), 'g+w', use_sudo=True)
return deployed, hard_reset
def default_deploy(release=None, skip_tests=None):
"""Deploy a project according to the methodology defined in the README."""
require('hosts')
require('path')
require('unit')
env.test_runner = testing.webpy_test_runner
utils.store_deployed_version()
deployed, hard_reset = _git_deploy(release, skip_tests)
deploy.release.conditional_symlink_current_release(deployed)
tasks.restart_webserver(hard_reset)
with settings(warn_only=True):
notify.hoptoad_deploy(deployed)
notify.campfire_notify(deployed)
webpy_deploy = default_deploy
tornado_deploy = default_deploy
def django_deploy(release=None, skip_tests=None):
"""Deploy a Django project according to the methodology defined in the
README.
Beyond the default_deploy(), this also updates and migrates the database,
loads extra database fixtures, installs an optional crontab as well as
celeryd.
"""
require('hosts')
require('path')
require('unit')
require('migrate')
require('root_dir')
env.test_runner = testing.django_test_runner
utils.store_deployed_version()
deployed, hard_reset = _git_deploy(release, skip_tests)
db.update_db(deployed)
db.migrate(deployed)
db.load_data()
deploy.release.conditional_symlink_current_release(deployed)
celery.update_and_restart_celery()
tasks.restart_webserver(hard_reset)
notify.hoptoad_deploy(deployed)
notify.campfire_notify(deployed)
print(green("%(pretty_release)s is now deployed to %(deployment_type)s"
% env))
|
UTF-8
|
Python
| false | false | 4,206 |
py
| 31 |
types.py
| 22 | 0.684498 | 0.684498 | 0 | 106 | 38.679245 | 78 |
an0o0nym/website_monitor
| 8,650,064,149,889 |
c579ea32a5ac3f68c579eb2854a0d87830ea2f75
|
ab1c7c3d78f78ee624e14cbb5441694a4deca5ba
|
/website_monitor/db_utils.py
|
c59fd7a5de0be645762a99610e9739ba98dbe701
|
[
"MIT"
] |
permissive
|
https://github.com/an0o0nym/website_monitor
|
5f031b88c84669bce31391cdc9982e1625ee5f68
|
60a3f76dbe85e9966470a1b009b3f1b02d8acd72
|
refs/heads/master
| 2023-01-05T14:50:21.853828 | 2018-03-09T13:23:10 | 2018-03-09T13:23:10 | 124,247,630 | 1 | 1 |
MIT
| false | 2022-12-26T20:36:19 | 2018-03-07T14:27:01 | 2022-02-08T16:36:39 | 2022-12-26T20:36:18 | 26 | 1 | 1 | 4 |
Python
| false | false |
# -*- coding: utf-8 -*-
"""Database utils module."""
import os
import sqlite3
DB_NAME = 'website_monitor.db'
DB_TABLE_NAME = 'website_checks'
def get_connection():
"""
Helper function used to initiate database connection.
:return: Tuple of two items - sqlite3.Connection class instance
and sqlite3.Cursor class instance
"""
conn, cur = None, None
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), DB_NAME)
try:
conn = sqlite3.connect(path)
except Exception as e:
print("Error while making connection with DB: {}".format(e))
if conn is not None:
cur = conn.cursor()
return (conn, cur)
def create_table():
"""
Helper function used to populate database with nescessary table.
:return: None
"""
conn, cur = get_connection()
try:
cur.execute(
'''
CREATE TABLE website_checks (
id INTEGER,
webname TEXT,
url TEXT,
request_time REAL,
status INTEGER,
response_time REAL,
requirements INTEGER,
error TEXT,
PRIMARY KEY(id))
'''
)
conn.commit()
except Exception as e:
print("Error while creating table: {}".format(e))
finally:
conn.close()
def record_insert(webname, url, request_time=None, status=None,
response_time=None, requirements=None, error=None):
"""
Helper function used to create records in the database table.
:param webname: Alias name of the website
:param url: Website URL
:param request_time: Time at which request was made
:param status: Status of HTTP response to specific website
:param response_time: Time to complete whole request
:param requirements: Content requirements for specific website
:param error: Error messages for specific website
:return: None
"""
conn, cur = get_connection()
request_time = request_time.strftime("%d-%m-%Y %H:%M:%S")
try:
cur.execute(
'''
INSERT INTO website_checks (
webname, url, request_time, status, response_time,
requirements,error
) VALUES(?,?,?,?,?,?,?)
''', (webname, url, request_time, status, response_time,
requirements, error))
conn.commit()
except Exception as e:
print("Error while making INSERT: {}".format(e))
finally:
conn.close()
def get_all_records():
"""
Helper function used to fetch all the records from database table.
:return: List of tuples representing status check records.
"""
conn, cur = get_connection()
records = []
try:
cur.execute('''SELECT * FROM website_checks''')
records = cur.fetchall()
except Exception as e:
print("Error while getting all records: {}".format(e))
finally:
conn.close()
return records
|
UTF-8
|
Python
| false | false | 3,023 |
py
| 12 |
db_utils.py
| 6 | 0.582534 | 0.58088 | 0 | 109 | 26.733945 | 76 |
bireme/lilacs-mongodb
| 2,705,829,431,878 |
cb4821c9a8c98387c551deb8399950636fc82d8b
|
39146e67dbb67281ce757c51f89a9e9b93a80ae2
|
/stopwatch_decorator.py
|
43a7d4b872f6011cc872a69d3d9db02bb18739c2
|
[] |
no_license
|
https://github.com/bireme/lilacs-mongodb
|
e550653ac10d3ca9254c291f1eb17132131ed989
|
3b75cc8475d725b7790de41ce29250517def0c18
|
refs/heads/master
| 2021-06-06T12:25:09.061067 | 2021-06-01T17:28:45 | 2021-06-01T17:28:45 | 2,338,560 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from time import time
def stopwatch(fn):
def decorated(*args, **kwargs):
t0 = time()
res = fn(*args, **kwargs)
return (time()-t0, res)
return decorated
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def test():
decorated_gcd = stopwatch(gcd)
delta, res = decorated_gcd(1989, 867)
assert delta > 0
assert res == 51
if __name__=='__main__':
n1 = 99971 * 99989
n2 = 99971 * 99991
print stopwatch(gcd)(9949 * 9967, 9949 * 9973)
|
UTF-8
|
Python
| false | false | 512 |
py
| 4 |
stopwatch_decorator.py
| 3 | 0.546875 | 0.447266 | 0 | 26 | 18.692308 | 50 |
westernx/vee
| 2,534,030,723,618 |
1dfff8af835c26df9989cb0e182b7799b493ba88
|
fe30d77ae29475f181559a109a00d92720b357cb
|
/vee/pipeline/python.py
|
4798d6b8540d2fe1d2af595635d803e9a2e7c2fb
|
[] |
no_license
|
https://github.com/westernx/vee
|
f1fcfea757669612644711078dc68099ab19c89c
|
213529b5881703a8d1afb081c2d90843832c3581
|
refs/heads/master
| 2021-01-18T21:57:53.399368 | 2016-02-09T20:54:07 | 2016-02-09T20:54:07 | 30,377,715 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import re
import shutil
import sys
from vee import log
from vee.cli import style, style_note, style_warning
from vee.envvars import join_env_path
from vee.package import Package
from vee.pipeline.generic import GenericBuilder
from vee.subproc import call
from vee.utils import find_in_tree
python_version = '%d.%d' % (sys.version_info[:2])
site_packages = os.path.join('lib', 'python' + python_version, 'site-packages')
def call_setup_py(setup_py, args, **kwargs):
kwargs['cwd'] = os.path.dirname(setup_py)
cmd = ['python', '-c', 'import sys, setuptools; sys.argv[0]=__file__=%r; execfile(__file__)' % os.path.basename(setup_py)]
cmd.extend(args)
return call(cmd, **kwargs)
class PythonBuilder(GenericBuilder):
factory_priority = 5000
@classmethod
def factory(cls, step, pkg):
if step != 'inspect':
return
setup_path = find_in_tree(pkg.build_path, 'setup.py')
egg_path = find_in_tree(pkg.build_path, 'EGG-INFO', 'dir') or find_in_tree(pkg.build_path, '*.egg-info', 'dir')
dist_path = find_in_tree(pkg.build_path, '*.dist-info', 'dir')
if setup_path or egg_path or dist_path:
return cls(pkg, (setup_path, egg_path, dist_path))
def get_next(self, name):
if name in ('build', 'install', 'develop'):
return self
def __init__(self, pkg, paths):
super(PythonBuilder, self).__init__(pkg)
self.setup_path, self.egg_path, self.dist_path = paths
def inspect(self):
pkg = self.package
if self.setup_path and not self.egg_path:
log.info(style_note('Building Python egg-info'))
res = call_setup_py(self.setup_path, ['egg_info'], env=pkg.fresh_environ(), indent=True, verbosity=1)
if res:
raise RuntimeError('Could not build Python package')
self.egg_path = find_in_tree(pkg.build_path, '*.egg-info', 'dir')
if not self.egg_path:
log.warning('Could not find newly created *.egg-info')
if self.egg_path:
requires_path = os.path.join(self.egg_path, 'requires.txt')
if os.path.exists(requires_path):
for line in open(requires_path, 'rb'):
line = line.strip()
if not line:
continue
if line.startswith('['):
break
name = re.split('\W', line)[0].lower()
log.debug('%s depends on %s' % (pkg.name, name))
pkg.dependencies.append(Package(name=name, url='pypi:%s' % name))
def build(self):
pkg = self.package
if self.setup_path:
# Some packages need to be built at the same time as installing.
# Anything which uses the distutils install_clib command, for instance...
if pkg.defer_setup_build:
log.info(style_note('Deferring build to install stage'))
return
log.info(style_note('Building Python package'))
cmd = ['build']
cmd.extend(pkg.config)
res = call_setup_py(self.setup_path, cmd, env=pkg.fresh_environ(), indent=True, verbosity=1)
if res:
raise RuntimeError('Could not build Python package')
return
# python setup.py bdist_egg
if self.egg_path:
log.info(style_note('Found Python Egg', os.path.basename(self.egg_path)))
log.warning('Scripts and other data will not be installed.')
if not pkg.package_path.endswith('.egg'):
log.warning('package does not appear to be an Egg')
# We must rename the egg!
pkg_info_path = os.path.join(self.egg_path, 'PKG-INFO')
if not os.path.exists(pkg_info_path):
log.warning('EGG-INFO/PKG-INFO does not exist')
else:
pkg_info = {}
for line in open(pkg_info_path, 'rU'):
line = line.strip()
if not line:
continue
name, value = line.split(':')
pkg_info[name.strip().lower()] = value.strip()
try:
pkg_name = pkg_info['name']
pkg_version = pkg_info['version']
except KeyError:
log.warning('EGG-INFO/PKG-INFO is malformed')
else:
new_egg_path = os.path.join(os.path.dirname(self.egg_path), '%s-%s.egg-info' % (pkg_name, pkg_version))
shutil.move(self.egg_path, new_egg_path)
self.egg_path = new_egg_path
pkg.build_subdir = os.path.dirname(self.egg_path)
pkg.install_prefix = site_packages
return
# python setup.py bdist_wheel
if self.dist_path:
log.info(style_note('Found Python Wheel', os.path.basename(self.dist_path)))
log.warning('Scripts and other data will not be installed.')
if not pkg.package_path.endswith('.whl'):
log.warning('package does not appear to be a Wheel')
pkg.build_subdir = os.path.dirname(self.dist_path)
pkg.install_prefix = site_packages
return
def install(self):
if not self.setup_path:
return super(PythonBuilder, self).install()
pkg = self.package
pkg._assert_paths(install=True)
install_site_packages = os.path.join(pkg.install_path, site_packages)
# Setup the PYTHONPATH to point to the "install" directory.
env = pkg.fresh_environ()
env['PYTHONPATH'] = '%s:%s' % (install_site_packages, env.get('PYTHONPATH', ''))
if os.path.exists(pkg.install_path):
log.warning('Removing existing install', pkg.install_path)
shutil.rmtree(pkg.install_path)
os.makedirs(install_site_packages)
log.info(style_note('Installing Python package', 'to ' + install_site_packages))
cmd = [
'install',
'--root', pkg.install_path, # Better than prefix
'--prefix', '.',
'--install-lib', site_packages, # So that we don't get lib64; virtualenv symlinks them together anyways.
'--single-version-externally-managed',
]
if not pkg.defer_setup_build:
cmd.append('--skip-build')
res = call_setup_py(self.setup_path, cmd, env=env, indent=True, verbosity=1)
if res:
raise RuntimeError('Could not install Python package')
def develop(self):
pkg = self.package
log.info(style_note('Building scripts'))
cmd = [
'build_scripts', '-e', '/usr/bin/env VEE=%s VEE_PYTHON=%s dev python' % (os.environ.get("VEE", ''), os.environ.get('VEE_PYTHON', '')),
'install_scripts', '-d', 'build/scripts',
]
if call_setup_py(self.setup_path, cmd):
raise RuntimeError('Could not build scripts')
egg_info = find_in_tree(os.path.dirname(self.setup_path), '*.egg-info', 'dir')
if not egg_info:
raise RuntimeError('Could not find built egg-info')
dirs_to_link = set()
for line in open(os.path.join(egg_info, 'top_level.txt')):
dirs_to_link.add(os.path.dirname(line.strip()))
for name in sorted(dirs_to_link):
log.info(style_note("Adding ./%s to $PYTHONPATH" % name))
pkg.environ['PYTHONPATH'] = join_env_path('./' + name, pkg.environ.get('PYTHONPATH', '@'))
scripts = os.path.join(os.path.dirname(self.setup_path), 'build', 'scripts')
if os.path.exists(scripts):
log.info(style_note("Adding ./build/scripts to $PATH"))
pkg.environ['PATH'] = join_env_path('./build/scripts', pkg.environ.get('PATH', '@'))
|
UTF-8
|
Python
| false | false | 7,957 |
py
| 35 |
python.py
| 31 | 0.560261 | 0.558753 | 0 | 216 | 35.824074 | 146 |
joeldodge79/sparts
| 3,453,153,710,640 |
584ffbb6c74c5ae0c36a8c3ea8cf2f34c9532b72
|
2500fd37d3553d998df0e6b4bd7c0e094056ac35
|
/sparts/tasks/tornado_thrift.py
|
e81be95ca24d591ba79dfa31acb0f1b0f56b22ca
|
[
"ISC"
] |
permissive
|
https://github.com/joeldodge79/sparts
|
c95ea057cb37e8794f93ce1460aa6770c3075d46
|
40df516f6ccbc1a233a691a5fbd540b240880dfc
|
refs/heads/master
| 2020-12-31T04:07:58.624993 | 2014-04-06T09:12:04 | 2014-04-06T09:12:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import absolute_import
import tornado.web
from thrift.transport.TTransport import TMemoryBuffer
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
class TornadoThriftHandler(tornado.web.RequestHandler):
def initialize(self, processor):
if hasattr(processor, 'processor'):
processor = processor.processor
self.processor = processor
def post(self):
iprot = TBinaryProtocol(TMemoryBuffer(self.request.body))
oprot = TBinaryProtocol(TMemoryBuffer())
self.processor.process(iprot, oprot)
self.set_header('Content-Type', 'application/x-thrift')
self.write(oprot.trans.getvalue())
|
UTF-8
|
Python
| false | false | 679 |
py
| 42 |
tornado_thrift.py
| 39 | 0.721649 | 0.721649 | 0 | 19 | 34.736842 | 65 |
nsknojj/data_mining
| 9,457,518,021,781 |
3549473b330147ce954162aa9117a6aa4c44dd5f
|
0ee45aadb6554576d01dee90d5dfdb927de2f0b7
|
/1300012758 张闻涛 作业2 wiki 新闻分类/Bayes/Test.py
|
a7997dcb095e393156b2073bfd24d70a1a6a4e6e
|
[] |
no_license
|
https://github.com/nsknojj/data_mining
|
c845c7703c353f609f0391716304c736d4a222c9
|
a331d808fae91244cfefdef1a65556ba6f954d8c
|
refs/heads/master
| 2016-09-12T06:14:16.782708 | 2016-05-13T06:51:02 | 2016-05-13T06:51:02 | 46,955,636 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
import re
import StopWord
from Statistic import Stat
def test(stat, path='', n_test=10):
allCat = {'Crime and law': 0, 'Culture and entertainment': 0, 'Disasters and accidents': 0,
'Science and technology': 0, 'Health': 0}
callBack = dict(allCat)
callAll = dict(allCat)
stopWord = StopWord.getStopWord()
termSum = len(stat.terms)
correct = 0
wrong = 0
for n in range(1, n_test+1):
filename = path + str(n) + '.txt'
with open(filename, 'rb') as fin:
title = fin.readline().strip()
termList = re.split('[^a-zA-Z]+', fin.readline())
maxi = 0
toCat = ''
for cat in stat.cats: #
noC = stat.cats[cat]
p = 0.0
for t in termList:
t = t.lower()
if not (t in stopWord or len(t) == 1):
if t in stat.terms:
noT = stat.termToInt[t]
p += math.log(1.0 * (stat.termInCat[noC][noT] + 1) / (stat.catTermAmount[noC] + termSum))
p += math.log(1.0 * (stat.catTermAmount[noC] + 0.01) / stat.totalTerm)
if p > maxi or toCat == '':
maxi = p
toCat = cat
cat = fin.readline().strip()
if cat in stat.cats:
allCat[cat] += 1
callAll[toCat] += 1
if toCat == cat:
callBack[cat] += 1
correct += 1
print(title + ' : ' + cat + ' toCat: ' + toCat + ' Yes')
else:
wrong += 1
print(title + ' : ' + cat + ' toCat: ' + toCat + ' No')
print('\nTotal Precision: correct / total = %d / %d' % (correct, correct + wrong))
for cat in allCat:
print('[' + cat + ']')
if callAll[cat] > 0:
p = callBack[cat] * 100.0 / callAll[cat]
else:
p = -1
if allCat[cat] > 0:
r = callBack[cat] * 100.0 / allCat[cat]
else:
r = -1
print('Precision : %d / %d = %.3f%%' % (callBack[cat], callAll[cat], p))
print('Recall : %d / %d = %.3f%%' % (callBack[cat], allCat[cat], r))
print('F = %.3f%%' % (2.0 * p * r / (p + r)))
# stat = Stat()
# with open('log/log.txt','rb') as f:
# stat.totalCat = int(f.readline().strip())
# stat.cats = dict(f.readline().strip())
# stat.totalTerm = int(f.readline().strip())
# stat.terms = list(f.readline().strip())
# stat.termToInt = dict(f.readline().strip())
# stat.termAmount = list(f.readline().strip())
# stat.termInDoc = list(f.readline().strip())
# stat.catTermAmount = list(f.readline().strip())
# stat.termInCat = list(f.readline().strip())
# print(stat.termInCat)
|
UTF-8
|
Python
| false | false | 2,880 |
py
| 12 |
Test.py
| 10 | 0.466667 | 0.451042 | 0 | 78 | 35.923077 | 117 |
oldsuper/dialog_service_api_test
| 14,568,529,072,757 |
ce36b219d08f7e17688de5f521bcb89550d3c166
|
c55faa0905c671f05f3cdde94367dc9a7442247c
|
/src/utils/m_config.py
|
a37fd5f2b11d25be2c0d75c577655d16411b2708
|
[] |
no_license
|
https://github.com/oldsuper/dialog_service_api_test
|
9253de1eb577e9b29c43dcd4e8c214f9e984c3c2
|
f829bffd2880c391bff173683e07bbe69fd6bb72
|
refs/heads/master
| 2020-06-20T12:20:43.518134 | 2019-10-02T20:34:57 | 2019-10-02T20:34:57 | 197,120,197 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import ConfigParser
config_level_option_key = 'config_level'
class MConfig(ConfigParser):
def __init__(self, config_file_path):
if not os.path.exists(config_file_path):
raise ValueError('ConfigFileNotExists')
config = ConfigParser.ConfigParser()
config.read(config_file_path)
section_list = []
total_options = []
for section in config.options():
if config.has_option(section):
config.get(section, config_level_option_key)
for option in config.options(section):
total_options.append(option)
|
UTF-8
|
Python
| false | false | 613 |
py
| 8 |
m_config.py
| 4 | 0.619902 | 0.619902 | 0 | 18 | 33.055556 | 60 |
gdymind/Deep-Perceptual-Image-Downsampling
| 15,101,105,033,326 |
52f0c01d59c348c9acc86c4b9a80da1861bf9892
|
8aa1ce51861056a8b1c71a04b1d596b754765c99
|
/src/data/__init__.py
|
a0645570d4f34803bf78ee2cff46d6bd61bcc413
|
[
"MIT"
] |
permissive
|
https://github.com/gdymind/Deep-Perceptual-Image-Downsampling
|
1827883b35e0af7e584a5076acf548000eeb4a74
|
b6101086dbf20db0780f4bd49a2b7ab34696c0c3
|
refs/heads/master
| 2022-07-09T10:54:01.933736 | 2018-11-12T03:39:21 | 2018-11-12T03:39:21 | 147,042,890 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
from data.BaseDataset import BaseDataset
from torch.utils.data.dataloader import DataLoader
class Data:
def __init__(self, args):
train_name, test_name = args.data_name.split('/')
# tarin dataset & dataloader
self.loader_train = None
trainset = None
# load test dataset
testset = BaseDataset(args, name = test_name, train = False)
self.loader_test = DataLoader(testset, batch_size = 1,
shuffle = False, pin_memory = args.cpu)
# self.loader_test = MSDataLoader(args, testset, batch_size = 1,
# shuffle = False, pin_memory = not args.cpu)
# load train dataset
if not args.test_only:
trainset = BaseDataset(args, name = train_name, train = True)
self.loader_train = DataLoader(trainset, batch_size = args.batch_size,
shuffle = True, pin_memory = args.cpu)
# self.loader_train = MSDataLoader(args, trainset, batch_size = args.batch_size,
# shuffle = True, pin_memory = not arg.cpu)
if args.gen_data_only:
sys.exit()
|
UTF-8
|
Python
| false | false | 1,125 |
py
| 19 |
__init__.py
| 18 | 0.604444 | 0.602667 | 0 | 30 | 36.533333 | 92 |
AlexAndriamahaleo/Tourisme_de_cinephile
| 12,996,571,060,123 |
4bfd9249232cafdbe5b8edf13fb9d06e688565ac
|
6ec36f81f5b61a96f811baf1c77674cb06c349e8
|
/Tourisme de cinephile/testSQLviewsFromOtherFile.py
|
c56be54a71b39590e02b99bb417c7c80f7a6a627
|
[] |
no_license
|
https://github.com/AlexAndriamahaleo/Tourisme_de_cinephile
|
624c5d819f06615aeb081370ef68d54077bf259b
|
7a9466925a661cbdaab4e08f92b868e2bf815acc
|
refs/heads/master
| 2020-03-06T23:57:39.556308 | 2018-04-09T10:04:55 | 2018-04-09T10:04:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
# TODO// create views
# Connection à la base de donnée
def connect_database(databaseName_and_path):
connexion = sqlite3.connect(databaseName_and_path)
curseur = connexion.cursor()
return connexion, curseur
# charger toutes les données de la base dans un tableau
def select_data(curseur):
curseur.execute("SELECT * FROM liste_des_sites_des_hotspots_paris_wifi")
resultat = curseur.fetchall()
return resultat
# Extraire des données specifique du table à l'aide du paramètre
def select_specific_data(curseur, ou):
curseur.execute("SELECT * FROM liste_des_sites_des_hotspots_paris_wifi WHERE ARRONDISSEMENT= '" + ou + "'")
resultat = list(curseur)
return resultat
# Connection à la base de donnée par fonction
myConnexion, myCurseur = connect_database("liste_des_sites_des_hotspots_paris_wifi")
# affichage en console du résultat
myResult = select_data(myCurseur)
print(myResult)
# Utiliser une variable dans une requete par fonction
ou = "75001"
myResult = select_specific_data(myCurseur, ou)
print(myResult)
# fermer la base de données
myConnexion.close()
|
UTF-8
|
Python
| false | false | 1,174 |
py
| 16 |
testSQLviewsFromOtherFile.py
| 8 | 0.74055 | 0.733677 | 0 | 43 | 26.069767 | 111 |
sponege/CTF-writeups
| 5,377,299,090,342 |
1d340730ab8375f4b3c4169b8e0ed6ef1fcbf520
|
821fe521a009f797ffb4df68182470cd1c67a1aa
|
/HSCTF2020/binary-word-search/sol.py
|
3aef1a8b9d4baa2edead4dbfda95b390fc73df3a
|
[] |
no_license
|
https://github.com/sponege/CTF-writeups
|
d4b3d28dbcf865bafba226f910dfe41825046abf
|
5415ec9511fade1f1876396f1a1136f25d393762
|
refs/heads/master
| 2022-12-01T15:45:06.794640 | 2020-07-31T22:12:05 | 2020-07-31T22:12:05 | 259,153,036 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
from PIL import Image
import binascii
img = Image.open("BinaryWordSearch.png")
pixels = img.load()
start = "flag{"
sBits = ''
for c in start:
bits = bin(ord(c))[2:]
while len(bits) != 8:
bits = '0' + bits
sBits += bits
print(sBits)
flags = []
for x in range(img.width):
for y in range(img.height):
for dir in range(8):
sX = x
sY = y
bits = ''
success = True
for i in range(len(sBits)):
if sX < 0 or sX > img.width - 1 or sY < 0 or sY > img.height - 1:
#success = False
break
bit = str((pixels[sX, sY][0] == 0) * 1)
if bit != sBits[i]:
#success = False
break
bits += bit
if dir > 0 and dir < 4: ## right, range of 1-3
sX += 1
elif dir > 4: ## left, range of 5-7
sX -= 1
if dir < 2 or dir == 7: ## up
sY += 1
elif dir > 2 and dir < 6: ## down, range of 3-5
sY -= 1
if success:
while True:
if sX < 0 or sX > img.width - 1 or sY < 0 or sY > img.height - 1:
break
bits += str((pixels[sX, sY][0] == 0) * 1)
if dir > 0 and dir < 4: ## right, range of 1-3
sX += 1
elif dir > 4: ## left, range of 5-7
sX -= 1
if dir < 2 or dir == 7: ## up
sY += 1
elif dir > 2 and dir < 6: ## down, range of 3-5
sY -= 1
while len(bits) % 16 != 0:
bits += '0'
hB = hex(int(bits, 2))[2:]
if len(hB) % 2 != 0:
hB += 'a'
flag = bytes.fromhex(hB)
#print("\033c") ## clear screen
#print((x, y, dir))
print(flag)
if b'FLAG' in flag:
flags.append(flag)
print("-----FLAGS-----")
for flag in flags:
print(flag)
|
UTF-8
|
Python
| false | false | 2,244 |
py
| 95 |
sol.py
| 44 | 0.367201 | 0.338681 | 0 | 71 | 30.605634 | 85 |
materials-data-facility/toolbox
| 7,696,581,430,789 |
bf9913db89a6e1b812a7f3fde4743f5a671833c0
|
68145cdada48e1ab4a3bc69e4c905521e0123a36
|
/tests/test_toolbox.py
|
154ec69087e91d5792f92f24a784705bd6633e6b
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/materials-data-facility/toolbox
|
5ab0f46afbe1dbd1e3238af733bf51219ee49693
|
e0e178ef2433811a25db76bb711c40b2552e836e
|
refs/heads/master
| 2022-08-24T14:13:45.837592 | 2022-08-01T17:07:54 | 2022-08-01T17:07:54 | 110,880,833 | 5 | 7 |
Apache-2.0
| false | 2022-05-06T21:40:05 | 2017-11-15T20:06:22 | 2021-12-16T23:06:27 | 2022-05-06T21:40:04 | 6,429 | 5 | 4 | 2 |
Python
| false | false |
from copy import deepcopy
import json
import os
import shutil
from globus_nexus_client import NexusClient
import globus_sdk
import mdf_toolbox
import pytest
from unittest import mock
on_github = os.getenv('ON_GITHUB') is not None
def test_login():
if on_github: return True
# Login works
# Impersonate Forge
res1 = mdf_toolbox.login(services="search", app_name="MDF_Forge",
client_id="b2b437c4-17c1-4e4b-8f15-e9783e1312d7")
assert type(res1) is dict
assert isinstance(res1.get("search"), globus_sdk.SearchClient)
# Test other services
# Use default "unknown app"
# TODO: "groups" cannot be tested without whitelisting app
res2 = mdf_toolbox.login(services=["search_ingest", "transfer", "data_mdf", "mdf_connect",
"petrel"])
assert isinstance(res2.get("search_ingest"), globus_sdk.SearchClient)
assert isinstance(res2.get("transfer"), globus_sdk.TransferClient)
assert isinstance(res2.get("data_mdf"), globus_sdk.RefreshTokenAuthorizer)
assert isinstance(res2.get("mdf_connect"), globus_sdk.RefreshTokenAuthorizer)
assert isinstance(res2.get("petrel"), globus_sdk.RefreshTokenAuthorizer)
# assert isinstance(res2.get("groups"), NexusClient)
def test_confidential_login(capsys):
if on_github: return True
# Load creds
with open(os.path.expanduser("~/.client_credentials.json")) as f:
creds = json.load(f)
# Single services, different cases
assert isinstance(mdf_toolbox.confidential_login(services="transfer", **creds)["transfer"],
globus_sdk.TransferClient)
assert isinstance(mdf_toolbox.confidential_login(services=["search"], **creds)["search"],
globus_sdk.SearchClient)
# Manual scope set
assert isinstance(mdf_toolbox.confidential_login(
services="urn:globus:auth:scope:transfer.api.globus.org:all",
**creds)["urn:globus:auth:scope:transfer.api.globus.org:all"],
globus_sdk.TransferClient)
# make_clients=False
assert isinstance(mdf_toolbox.confidential_login(
services="transfer", make_clients=False, **creds)["transfer"],
globus_sdk.ClientCredentialsAuthorizer)
assert isinstance(mdf_toolbox.confidential_login(
services="urn:globus:auth:scope:transfer.api.globus.org:all",
make_clients=False,
**creds)["urn:globus:auth:scope:transfer.api.globus.org:all"],
globus_sdk.ClientCredentialsAuthorizer)
# No client available
assert isinstance(mdf_toolbox.confidential_login(services="petrel", **creds)["petrel"],
globus_sdk.ClientCredentialsAuthorizer)
# Bad scope
assert mdf_toolbox.confidential_login(services="invalid", **creds) == {}
out, err = capsys.readouterr()
assert "Error: Cannot create authorizer for scope 'invalid'" in out
def test_anonymous_login(capsys):
if on_github: return True
# Valid services work
res1 = mdf_toolbox.anonymous_login(["transfer", "search", "publish", "groups"])
assert isinstance(res1.get("search"), globus_sdk.SearchClient)
assert isinstance(res1.get("transfer"), globus_sdk.TransferClient)
assert isinstance(res1.get("groups"), NexusClient)
# Single service works
res2 = mdf_toolbox.anonymous_login("search")
assert isinstance(res2.get("search"), globus_sdk.SearchClient)
# Bad services don't work
assert mdf_toolbox.anonymous_login(["garbage", "invalid"]) == {}
out, err = capsys.readouterr()
assert "Error: No known client for 'garbage' service." in out
assert "Error: No known client for 'invalid' service." in out
def test_uncompress_tree():
if on_github: return True
root = os.path.join(os.path.dirname(__file__), "testing_files")
# Basic test, should extract tar and nested tar, but not delete anything
# Also should error on known-bad-weird archive
res = mdf_toolbox.uncompress_tree(root)
assert res["success"]
assert res["num_extracted"] == 2
assert res["files_errored"] == [os.path.join(root, "toolbox_more", "toolbox_error.tar.gz")]
lv1_txt = os.path.join(root, "toolbox_more", "toolbox_compressed", "tlbx_uncompressed.txt")
assert os.path.isfile(lv1_txt)
lv2_txt = os.path.join(root, "toolbox_more", "toolbox_compressed", "toolbox_nested",
"tlbx_uncompressed2.txt")
assert os.path.isfile(lv2_txt)
nested_tar = os.path.join(root, "toolbox_more", "toolbox_compressed", "toolbox_nested.tar")
assert os.path.isfile(nested_tar)
# Test deleting extracted archive
shutil.rmtree(os.path.join(root, "toolbox_more", "toolbox_compressed", "toolbox_nested"))
mdf_toolbox.uncompress_tree(os.path.join(root, "toolbox_more", "toolbox_compressed"),
delete_archives=True)
assert os.path.isfile(lv2_txt)
assert not os.path.isfile(nested_tar)
# Clean up
shutil.rmtree(os.path.join(root, "toolbox_more", "toolbox_compressed"))
shutil.rmtree(os.path.join(root, "toolbox_more", "toolbox_error.tar/"))
def test_format_gmeta():
# Simple GMetaEntry
md1 = {
"mdf": {
"acl": ["public"],
"mdf_id": "123",
"data": "some"
}
}
# More complex GMetaEntry
md2 = {
"mdf": {
"title": "test",
"source_name": "source name",
"citation": ["abc"],
"data_contact": {
"given_name": "Test",
"family_name": "McTesterson",
"full_name": "Test McTesterson",
"email": "test@example.com"
},
"data_contributor": [{
"given_name": "Test",
"family_name": "McTesterson",
"full_name": "Test McTesterson",
"email": "test@example.com"
}],
"ingest_date": "Jan 1, 2017",
"metadata_version": "1.1",
"mdf_id": "123",
"parent_id": "000",
"resource_type": "dataset"
},
"dc": {},
"misc": {}
}
# Format both
gme1 = mdf_toolbox.format_gmeta(md1, md1["mdf"].pop("acl"), md1["mdf"]["mdf_id"])
assert gme1 == {
"@datatype": "GMetaEntry",
"@version": "2016-11-09",
"subject": "123",
"visible_to": ["public"],
"content": {
"mdf": {
"mdf_id": "123",
"data": "some"
}
}
}
gme2 = mdf_toolbox.format_gmeta(md2, ["abcd"], "https://example.com/123456")
assert gme2 == {
"@datatype": "GMetaEntry",
"@version": "2016-11-09",
"subject": "https://example.com/123456",
"visible_to": ["urn:globus:auth:identity:abcd", "urn:globus:groups:id:abcd"],
"content": {
"mdf": {
"title": "test",
"source_name": "source name",
"citation": ["abc"],
"data_contact": {
"given_name": "Test",
"family_name": "McTesterson",
"full_name": "Test McTesterson",
"email": "test@example.com"
},
"data_contributor": [{
"given_name": "Test",
"family_name": "McTesterson",
"full_name": "Test McTesterson",
"email": "test@example.com"
}],
"ingest_date": "Jan 1, 2017",
"metadata_version": "1.1",
"mdf_id": "123",
"parent_id": "000",
"resource_type": "dataset"
},
"dc": {},
"misc": {}
}
}
# Format into GMetaList
gmlist = mdf_toolbox.format_gmeta([gme1, gme2])
assert gmlist == {
"@datatype": "GIngest",
"@version": "2016-11-09",
"ingest_type": "GMetaList",
"ingest_data": {
"@datatype": "GMetaList",
"@version": "2016-11-09",
"gmeta": [gme1, gme2]
}
}
# Error if bad type
with pytest.raises(TypeError):
mdf_toolbox.format_gmeta(1)
def test_gmeta_pop():
class TestResponse():
status_code = 200
headers = {
"Content-Type": "json"
}
data = {
'@datatype': 'GSearchResult',
'@version': '2016-11-09',
'count': 11,
'gmeta': [{
'@datatype': 'GMetaResult',
'@version': '2016-11-09',
'content': [{
'mdf': {
'links': {
'landing_page':
'https://data.materialsdatafacility.org/test/test_fetch.txt',
'txt': {
"globus_endpoint": "82f1b5c6-6e9b-11e5-ba47-22000b92c6ec",
"http_host": "https://data.materialsdatafacility.org",
"path": "/test/test_fetch.txt"
}
}
}
}, {
'mdf': {
'links': {
'landing_page':
'https://data.materialsdatafacility.org/test/test_fetch.txt',
'txt': {
"globus_endpoint": "82f1b5c6-6e9b-11e5-ba47-22000b92c6ec",
"http_host": "https://data.materialsdatafacility.org",
"path": "/test/test_fetch.txt"
}
}
}
}],
'subject': 'https://data.materialsdatafacility.org/test/test_fetch.txt',
}],
'offset': 0,
'total': 22
}
text = json.dumps(data)
def json(self):
return self.data
ghttp = globus_sdk.GlobusHTTPResponse(TestResponse(), client=mock.Mock())
popped = mdf_toolbox.gmeta_pop(ghttp)
assert popped == [{
'mdf': {
'links': {
'landing_page': 'https://data.materialsdatafacility.org/test/test_fetch.txt',
'txt': {
'globus_endpoint': '82f1b5c6-6e9b-11e5-ba47-22000b92c6ec',
'http_host': 'https://data.materialsdatafacility.org',
'path': '/test/test_fetch.txt'
}
}
}
}, {
'mdf': {
'links': {
'landing_page': 'https://data.materialsdatafacility.org/test/test_fetch.txt',
'txt': {
'globus_endpoint': '82f1b5c6-6e9b-11e5-ba47-22000b92c6ec',
'http_host': 'https://data.materialsdatafacility.org',
'path': '/test/test_fetch.txt'
}
}
}
}]
info_pop = mdf_toolbox.gmeta_pop(ghttp, info=True)
assert info_pop == (popped, {'total_query_matches': 22})
# String loading
str_gmeta = json.dumps({
"gmeta": [{
"content": [
{"test1": "test1"},
{"test2": "test2"}
]
},
{
"content": [
{"test3": "test3"},
{"test4": "test4"}
]
}
]})
assert mdf_toolbox.gmeta_pop(str_gmeta) == [
{"test1": "test1"},
{"test2": "test2"},
{"test3": "test3"},
{"test4": "test4"}
]
# Error on bad data
with pytest.raises(TypeError):
mdf_toolbox.gmeta_pop(1)
def test_translate_index():
# Known index
assert mdf_toolbox.translate_index("mdf") == "1a57bbe5-5272-477f-9d31-343b8258b7a5"
# Invalid index
assert mdf_toolbox.translate_index("invalid_index_not_real") == "invalid_index_not_real"
def test_quick_transfer():
# TODO
pass
def test_dict_merge():
base = {
"base_key": "base",
"both_key": "base",
"level2": {
"base_key": "base",
"both_key": "base",
"level3": {
"base_key": "base",
"both_key": "base",
"mismatch_key": "string"
}
}
}
add = {
"both_key": "add",
"add_key": "add",
"level2": {
"both_key": "add",
"add_key": "add",
"level3": {
"both_key": "add",
"add_key": "add",
"mismatch_key": 10,
"level4": {
"add_key": "add"
}
}
}
}
merged = {
"base_key": "base",
"both_key": "base",
"add_key": "add",
"level2": {
"base_key": "base",
"both_key": "base",
"add_key": "add",
"level3": {
"base_key": "base",
"both_key": "base",
"add_key": "add",
"mismatch_key": "string",
"level4": {
"add_key": "add"
}
}
}
}
b_list = {
"list_field": ["base"]
}
a_list = {
"list_field": ["add"]
}
m_list = {
"list_field": ["base", "add"]
}
a_list_bad = {
"list_field": "foo"
}
# Proper use
old_base = deepcopy(base)
old_add = deepcopy(add)
assert mdf_toolbox.dict_merge(base, add) == merged
# Originals should be unchanged
assert base == old_base
assert add == old_add
# Test list appending
# No appending
assert mdf_toolbox.dict_merge(b_list, a_list, append_lists=False) == b_list
# With appending
assert mdf_toolbox.dict_merge(b_list, a_list, append_lists=True) == m_list
# With mismatched data types
assert mdf_toolbox.dict_merge(b_list, a_list_bad, append_lists=False) == b_list
assert mdf_toolbox.dict_merge(b_list, a_list_bad, append_lists=True) == b_list
assert mdf_toolbox.dict_merge({}, {}) == {}
# Check errors
with pytest.raises(TypeError):
mdf_toolbox.dict_merge(1, {})
with pytest.raises(TypeError):
mdf_toolbox.dict_merge({}, "a")
with pytest.raises(TypeError):
mdf_toolbox.dict_merge([], [])
def test_insensitive_comparison():
# Correct results:
# dbase == d1 always
# dbase == d2 iff string_insensitive=True
# dbase == d3 iff type_insensitive=True
# dbase == d4 never (extra dict key)
# dbase == d5 never (extra list item)
# dbase == d6 never (float not equal)
dbase = {
"aaa": ["a", "zzz", 4, 5, "QQzz"],
"ccc": "AAAABBBBCCCC",
"bbb": 50.00000000000,
"www": (1, 2, 9, 4, 5, "F")
}
d1 = {
"bbb": 50.0,
"aaa": ["a", 5, 4, "zzz", "QQzz"],
"www": (2, 1, 9, 5, "F", 4),
"ccc": "AAAABBBBCCCC"
}
d2 = {
"aaa": ["a", "zzz", 4, 5, "zzqq"],
"ccc": "aaaaBB BBCCC\tC\n",
"bbb": 50.00000000000,
"www": (1, 2, 9, 4, 5, "f")
}
d3 = {
"aaa": ("a", "zzz", 4, 5, "QQzz"),
"ccc": "AAAABBBBCCCC",
"bbb": 50.00000000000,
"www": [1, 2, 9, 4, 5, "F"]
}
d4 = {
"aaa": ["a", "zzz", 4, 5, "QQzz"],
"ccc": "AAAABBBBCCCC",
"bbb": 50.00000000000,
"www": (1, 2, 9, 4, 5, "F"),
"zzz": "abc"
}
d5 = {
"aaa": ["a", "zzz", 4, 5, "QQzz", "zzz"],
"ccc": "AAAABBBBCCCC",
"bbb": 50.00000000000,
"www": (1, 2, 9, 4, 5, "F")
}
d6 = {
"aaa": ["a", "zzz", 4, 5, "QQzz"],
"ccc": "AAAABBBBCCCC",
"bbb": 50.1,
"www": (1, 2, 9, 4, 5, "F")
}
assert mdf_toolbox.insensitive_comparison(dbase, d1) is True
assert mdf_toolbox.insensitive_comparison(dbase, d2) is False
assert mdf_toolbox.insensitive_comparison(dbase, d2, string_insensitive=True) is True
assert mdf_toolbox.insensitive_comparison(dbase, d3) is False
assert mdf_toolbox.insensitive_comparison(dbase, d3, type_insensitive=True) is True
assert mdf_toolbox.insensitive_comparison(dbase, d4) is False
assert mdf_toolbox.insensitive_comparison(dbase, d4, string_insensitive=True,
type_insensitive=True) is False
assert mdf_toolbox.insensitive_comparison(dbase, d5) is False
assert mdf_toolbox.insensitive_comparison(dbase, d5, string_insensitive=True,
type_insensitive=True) is False
assert mdf_toolbox.insensitive_comparison(dbase, d6) is False
assert mdf_toolbox.insensitive_comparison(dbase, d6, string_insensitive=True,
type_insensitive=True) is False
def test_translate_json():
# Set up test dicts
source_doc = {
"dict1": {
"field1": "value1",
"field2": 2
},
"dict2": {
"nested1": {
"field1": True,
"field3": "value3"
}
},
"compost": "CN25",
"na_val": "na"
}
mapping1 = {
"custom": {
"foo": "dict1.field1",
"bar": "dict2.nested1.field1",
"missing": "na_val"
},
"material": {
"composition": "compost"
}
}
mapping2 = {
"custom.foo": "dict1.field1",
"custom.bar": "dict2.nested1.field1",
"custom.missing": "na_val",
"material.composition": "compost"
}
correct_output = {
"material": {
"composition": "CN25"
},
"custom": {
"foo": "value1",
"bar": True,
"missing": "na"
}
}
no_na_output = {
"material": {
"composition": "CN25"
},
"custom": {
"foo": "value1",
"bar": True
}
}
assert mdf_toolbox.translate_json(source_doc, mapping1) == correct_output
assert mdf_toolbox.translate_json(source_doc, mapping2) == correct_output
assert mdf_toolbox.translate_json(source_doc, mapping1, ["abcd"]) == correct_output
assert mdf_toolbox.translate_json(source_doc, mapping1, ["na"]) == no_na_output
assert mdf_toolbox.translate_json(source_doc, mapping1, "na") == no_na_output
def test_flatten_json():
unflat_dict = {
"key1": {
"key2": {
"key3": {
"key4": "value1"
},
"key5": "value2"
},
"key6": {
"key7": 555,
"key8": [1, {"list_flattened": "foo"}, "b"]
}
},
"key9": "value3"
}
flat_dict = {
"key1.key2.key3.key4": "value1",
"key1.key2.key5": "value2",
"key1.key6.key7": 555,
"key1.key6.key8": [1, "b"],
"key1.key6.key8.list_flattened": "foo",
"key9": "value3"
}
assert mdf_toolbox.flatten_json(unflat_dict) == flat_dict
def test_posixify():
assert mdf_toolbox.posixify_path('C:\\Users\\') == '/c/Users'
assert mdf_toolbox.posixify_path('/users/test') == '/users/test'
|
UTF-8
|
Python
| false | false | 20,133 |
py
| 28 |
test_toolbox.py
| 14 | 0.483236 | 0.457706 | 0 | 581 | 33.652324 | 98 |
KirsteinDamian/WordCounter
| 10,685,878,632,647 |
675881fa6271e1d83553477a360b7236242ef36c
|
be20e3063d50679cc6561f2831d1a41ed6a284d4
|
/wordcount-project/wordcount/views.py
|
dbc2e56c1ac91d4595d1512f1a051a7a07898e25
|
[] |
no_license
|
https://github.com/KirsteinDamian/WordCounter
|
6561e70a10c58dd85f1392bb8920c57223a1186f
|
4b852cde0a24af4ae11f8c2b7e50a07edd9f97c5
|
refs/heads/master
| 2023-03-29T00:32:38.070268 | 2021-03-28T20:05:11 | 2021-03-28T20:05:11 | 352,165,086 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
# def home(request):
# return HttpResponse('Hello')
def home(request):
return render(request, 'home.html')
def count(request):
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
occurence_count = {}
for word in wordlist:
if word not in occurence_count:
occurence_count[word] = 1
else:
occurence_count[word] += 1
popular_word, popular_count = word_and_count_that_appeared_the_most(occurence_count)
return render(request, 'count.html', {
'fulltext': fulltext, 'count': len(wordlist), 'popular_word': popular_word, 'popular_count': popular_count,
})
def word_and_count_that_appeared_the_most(words: dict):
popular_word = ""
popular_count = 0
for word, count in words.items():
if popular_count < count:
popular_count = count
popular_word = word
return popular_word, popular_count
def about(request):
return render(request, 'about.html')
|
UTF-8
|
Python
| false | false | 1,063 |
py
| 3 |
views.py
| 1 | 0.648166 | 0.645343 | 0 | 38 | 26.973684 | 115 |
Manokha/RESTo
| 3,685,081,941,636 |
0436cf1637e6a9dc0f8e2c84ee74a3f70245a0b4
|
f41c3b7a653440cb7f5650cd59888d79818e17c4
|
/src/resto/server.py
|
cb47bd1cc3dbc25b861a14af0ba09217bdd505b8
|
[] |
no_license
|
https://github.com/Manokha/RESTo
|
04475f43e7a79f656ac1540dbea17d4ea2faedaa
|
9dda7f70afc9baeab6da86fdd954766298ea5485
|
refs/heads/master
| 2020-12-03T18:41:50.753725 | 2020-01-04T15:11:38 | 2020-01-04T15:11:38 | 231,434,235 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import aiopg
from aiohttp import web
from resto.views import RestaurantsView
from resto.models import PGRestaurants
@web.middleware
async def json_middleware(request, handler):
try:
return web.json_response(await handler(request))
except web.HTTPException as e:
return web.json_response(
{'error': e.reason},
status=e.status
)
async def create_connection_pool(app):
app['pg_pool'] = await aiopg.create_pool(**dict(app['cfg'].items('Database')))
async def create_collections(app):
app['restaurants'] = PGRestaurants(app['pg_pool'])
async def dispose_connection_pool(app):
app['pg_pool'].close()
await app['pg_pool'].wait_closed()
async def resto_app(cfg):
app = web.Application(middlewares=[json_middleware])
app['cfg'] = cfg
app.on_startup.append(create_connection_pool)
app.on_startup.append(create_collections)
app.on_cleanup.append(dispose_connection_pool)
app.add_routes([
web.view('/restaurants', RestaurantsView),
web.view('/restaurants/{name}', RestaurantsView)
])
return app
|
UTF-8
|
Python
| false | false | 1,141 |
py
| 15 |
server.py
| 9 | 0.67397 | 0.673094 | 0 | 47 | 23.276596 | 82 |
PerceptumNL/KhanLatest
| 13,443,247,637,486 |
9924336f626d9e7a29e16feccf605372a0fe394b
|
290d2bcf49c5f3418add77606b5f4b0a6a7a6d15
|
/third_party/agar-src/tests/test_base_test.py
|
c8466de2c29e518e894704b6658f231ab69526b2
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
https://github.com/PerceptumNL/KhanLatest
|
887a191bfd1562fcc6d1d4a2b740cf361c57b5ca
|
c6a3907d96d30f1cb43bf7bf2a392ff3e77a2568
|
refs/heads/master
| 2021-07-22T19:56:07.979749 | 2020-07-21T08:07:23 | 2020-07-21T08:07:23 | 4,924,139 | 3 | 2 | null | false | 2021-06-10T17:39:40 | 2012-07-06T12:12:02 | 2021-01-11T18:05:09 | 2021-06-10T17:39:39 | 85,969 | 8 | 12 | 4 |
JavaScript
| false | false |
from google.appengine.api import memcache
from google.appengine.ext import deferred
from agar.test import BaseTest
import main
def do_something():
return True
class TestBaseTest(BaseTest):
def test_assert_tasks_in_queue(self):
self.assertTasksInQueue(0)
deferred.defer(do_something, _name="hello_world")
self.assertTasksInQueue(1)
self.assertTasksInQueue(1, name='hello_world')
self.assertTasksInQueue(0, name='something else')
self.assertTasksInQueue(0, url='/foobar')
self.assertTasksInQueue(1, url='/_ah/queue/deferred')
self.assertTasksInQueue(1, queue_names='default')
self.assertTasksInQueue(0, queue_names='other')
def test_assert_memcache_items(self):
self.assertMemcacheItems(0)
memcache.set("foo", "bar")
self.assertMemcacheItems(1)
memcache.set("abc", "xyz")
self.assertMemcacheItems(2)
def test_assert_memcache_hits(self):
self.assertMemcacheHits(0)
memcache.get("foo")
self.assertMemcacheHits(0)
memcache.set("foo", "bar")
memcache.get("foo")
self.assertMemcacheHits(1)
memcache.get("foo")
self.assertMemcacheHits(2)
|
UTF-8
|
Python
| true | false | 1,249 |
py
| 936 |
test_base_test.py
| 465 | 0.651721 | 0.639712 | 0 | 50 | 23.96 | 61 |
FoolLuckyBoy/selenium_demo
| 2,284,922,637,333 |
f9cc01e4db89fcb264581e88ef2e855b0b983746
|
09bb6cd5e24a0a3363156591beb8e479c38ad994
|
/pathxuexi.py
|
9698a6f7d551e10bf96a9752103731b89fa87149
|
[] |
no_license
|
https://github.com/FoolLuckyBoy/selenium_demo
|
6b8b7174d8859d844037bce8c178a0a2d8b7309e
|
6bbe06da346893f848dee64ca2bd3a37276108c1
|
refs/heads/master
| 2020-04-30T16:50:46.316330 | 2019-03-21T14:47:16 | 2019-03-21T14:47:16 | 176,960,928 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
# print(__file__)
# 获取当前运行的xx.py的路径
curpath = os.path.realpath(__file__) # 根据系统获取绝对路径
print(curpath)
dirpath = os.path.dirname(curpath) # 文件夹层
print(dirpath)
casespath = os.path.join(dirpath, "cases")
print(casespath)
report = os.path.join(dirpath, "report", "result.html")
print(report)
# 没有文件夹的话,会报错
reportx = os.path.join(dirpath, "reportx", "result.html")
print(reportx)
# 判断文件路径是否存在
a = os.path.exists(os.path.join(dirpath, "reporty"))
print(a)
if not a:
# 创建路径
os.mkdir(os.path.join(dirpath, "reporty"))
fp = open(reportx, "wb")
|
UTF-8
|
Python
| false | false | 661 |
py
| 10 |
pathxuexi.py
| 9 | 0.689165 | 0.689165 | 0 | 31 | 17.193548 | 57 |
sy-eggplant/nlp100
| 13,804,024,906,819 |
f283c471ae779795652d5a1fb2b52fdf5b23b93e
|
0f67f09746971a9f5e0c7bb17ce14dc66732ff5f
|
/02.py
|
ef841317e92b252b81acd2b10fd35eefaab64100
|
[] |
no_license
|
https://github.com/sy-eggplant/nlp100
|
ec4e1505845d0560ac4231906504665a1b0ac8fa
|
33c32ce13008b6f158d8b2f75755abc8bde973b7
|
refs/heads/master
| 2021-01-01T06:56:29.985279 | 2018-03-18T12:09:18 | 2018-03-18T12:09:18 | 97,554,446 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
str1 = 'パトカー'
str2 = 'タクシー'
str3 = ''
for i in range(1,5):
str3 += (str1[i-1])
str3 += (str2[i-1])
print(str3)
str3 = ''
for (a,b) in zip(str1,str2):
str3 += a+b
print(str3)
|
UTF-8
|
Python
| false | false | 200 |
py
| 53 |
02.py
| 47 | 0.538043 | 0.445652 | 0 | 15 | 11.266667 | 28 |
hopetambala/django-sustainable_development_goals
| 7,559,142,446,800 |
736d4cec847357a934ba82e3ed61101a6bd0da78
|
1db73a8600edcf3c731c6d7f17026af4f88f2445
|
/unsdg/urls.py
|
f2911efe8ce5f03165f7fa12527e32e1da8fb817
|
[
"MIT"
] |
permissive
|
https://github.com/hopetambala/django-sustainable_development_goals
|
2950a9f8704c73318d1c3d58948cbbc0d0cdfc49
|
8ffa849aa40df003db46bbacd739207c780a12c9
|
refs/heads/master
| 2022-12-11T05:29:07.460002 | 2019-01-24T23:15:40 | 2019-01-24T23:15:40 | 158,108,848 | 0 | 1 |
MIT
| false | 2022-12-08T01:29:26 | 2018-11-18T17:27:44 | 2019-01-24T23:15:45 | 2022-12-08T01:29:26 | 26,429 | 0 | 0 | 6 |
Python
| false | false |
from django.urls import path
from . import views
urlpatterns = [
path('', views.HomePageView.as_view(), name='home'),
path('cti/', views.CountryTargetIndicatorListView.as_view(), name='country_target_indicators'),
path('cti/<int:pk>/', views.IndicatorDetailView.as_view(), name='indicator_detail'),
#path('cti/<int:pk>/', views.CountryTargetIndicatorDetailView.as_view(), name='country_target_indicator_detail'), #possibly bad?
path('cti/new/', views.IndicatorCreateView.as_view(), name='indicator_new'),
#path('cti/new/', views.CountryTargetIndicatorCreateView.as_view(), name='country_target_indicator_new'),#possibly bad?
path('cti/<int:pk>/delete/', views.IndicatorDeleteView.as_view(), name='indicator_delete'),
path('cti/<int:pk>/update/', views.IndicatorUpdateView.as_view(), name='indicator_update'),
path('cti/filter/', views.CTIndicatorFilterView.as_view(), name='country_target_indicator_filter'),
path('about/', views.AboutPageView.as_view(), name='about'),
path('indicator_value_names/', views.IndicatorNameListView.as_view(), name='indicator_names'),
path('goals/', views.GoalListView.as_view(), name='goals'),
path('targets/', views.TargetListView.as_view(), name='targets'),
]
|
UTF-8
|
Python
| false | false | 1,233 |
py
| 78 |
urls.py
| 14 | 0.715328 | 0.715328 | 0 | 19 | 63.947368 | 131 |
NBGBZJ/xiecheng
| 2,448,131,394,195 |
0031437b1c00ec70b08c03078370a7b3d3de40a4
|
1221e5a7e015667fba128fe8dc07440c419e622f
|
/my_sql.py
|
aff0ca9c6aeb45f99e38ef7ec1aab40a631fb880
|
[] |
no_license
|
https://github.com/NBGBZJ/xiecheng
|
e81ec11e8ab2a10fb058e1f066e238d713a64fde
|
0280964757804bb438bea877310c03d8d0c28542
|
refs/heads/master
| 2021-01-10T15:48:33.369464 | 2015-11-10T16:41:04 | 2015-11-10T16:41:04 | 45,787,010 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import pymssql
from my_log import log_set
DB_CHARSET = 'utf8'
DB_HOST ='127.0.0.1'
DB_USER='root'
DB_PWD ='fuhan'
DB_PORT = '3306'
DB_NAME ='xx'
import pymssql
class MSSQL:
"""
对pymssql的简单封装
pymssql库,该库到这里下载:http://www.lfd.uci.edu/~gohlke/pythonlibs/#pymssql
使用该库时,需要在Sql Server Configuration Manager里面将TCP/IP协议开启
用法:
"""
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def __GetConnect(self):
"""
得到连接信息
返回: conn.cursor()
"""
if not self.db:
raise(NameError,"没有设置数据库信息")
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="utf8")
cur = self.conn.cursor()
if not cur:
raise(NameError,"连接数据库失败")
else:
return cur
def ExecQuery(self,sql):
cur = self.__GetConnect()
cur.execute(sql)
resList = cur.fetchall()
#查询完毕后必须关闭连接
self.conn.close()
return resList
def ExecNonQuery(self,sql):
"""
执行非查询语句
调用示例:
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
"""
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
ms = MSSQL(host="localhost",user="root",pwd="fuhan",db="xx")
def save_info(flight,id):
resList = ms.ExecQuery("INSERT INTO flight( flight,del_id) VALUES (%s,%s);"%(flight,str(id)))
print(resList)
return resList
def get_info_list():
resList = ms.ExecNonQuery("SELECT * FROM flight;")
return resList
def get_info(flight):
resList = ms.ExecNonQuery("SELECT * FROM flight WHERE flight =%s;" % flight)
return resList
def del_info(flight):
resList = ms.ExecNonQuery("DELETE * FROM flight WHERE flight =%s;" % flight)
return resList
save_info(44, '7890')
print(get_info(44))
|
UTF-8
|
Python
| false | false | 2,214 |
py
| 22 |
my_sql.py
| 18 | 0.579024 | 0.56878 | 0 | 91 | 21.527473 | 116 |
MohammadMahdiOmid/Virtual-Irrigation
| 13,469,017,443,512 |
e671a675d8c48b01d16da85dadd7e6a7dcd5bb04
|
b7a52487d72de80ac1a74958b7c0489065f81ad6
|
/Image_processing/import_image_satellite/landsat.py
|
6ef6b59fec6eee9d471d5b01aaa593cc48b928f7
|
[] |
no_license
|
https://github.com/MohammadMahdiOmid/Virtual-Irrigation
|
a69775dbf8fb284994255d6b8dcc6244547936fb
|
e3833b5717290425b169c7a1d5c75b6f593d9962
|
refs/heads/master
| 2023-08-21T19:19:47.966353 | 2021-10-14T06:55:56 | 2021-10-14T06:55:56 | 391,267,356 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
# load images
blue = io.imread("E:/Company/KAVOSHGARAN/Virtual Irrigation/Image/Landsat/Blue.tif")
green = io.imread("E:/Company/KAVOSHGARAN/Virtual Irrigation/Image/Landsat/Green.tif")
red = io.imread("E:/Company/KAVOSHGARAN/Virtual Irrigation/Image/Landsat/Red.tif")
nir = io.imread("E:/Company/KAVOSHGARAN/Virtual Irrigation/Image/Landsat/NIR.tif")
# load all images
# landsat = io.imread_collection("E:/Company/KAVOSHGARAN/Virtual Irrigation/Image/Landsat/*.tif")
# print(len(landsat))
# print(landsat[3])
# shape of images
print(blue.shape)
print(green.shape)
print(red.shape)
print(nir.shape)
# change to array
b_arr = np.array(blue).astype(float)
g_arr = np.array(green).astype(float)
r_arr = np.array(red).astype(float)
n_arr = np.array(nir).astype(float)
# print(b_arr,g_arr,r_arr,n_arr)
# print(b_arr[100][25])
# dimention
print(b_arr.ndim)
# type
print(g_arr.dtype)
# size
print(r_arr.size)
# itemsize
print(n_arr.itemsize)
# To normalization data between [0..1] or [0..255]
b_arr_normal = (b_arr - np.min(b_arr)) / (np.max(b_arr) - np.min(b_arr))
g_arr_normal = (g_arr - np.min(g_arr)) / (np.max(g_arr) - np.min(g_arr))
r_arr_normal = (r_arr - np.min(r_arr)) / (np.max(r_arr) - np.min(r_arr))
# n_arr_normal = (n_arr - np.min(n_arr)) / (np.max(n_arr) - np.min(n_arr))
# To combine colors(RGB)
nor_stack = np.stack([r_arr_normal, g_arr_normal, b_arr_normal], axis=2)
# To showing landsat images
fig1 = plt.gcf()
plt.figure(figsize=(20, 20))
plt.title("landsat 3 images ", fontsize=30)
plt.imshow(nor_stack)
plt.axis('off')
plt.savefig('landsat.png')
plt.show()
|
UTF-8
|
Python
| false | false | 1,655 |
py
| 36 |
landsat.py
| 2 | 0.700302 | 0.687613 | 0 | 56 | 28.553571 | 97 |
rronan/IntPhys-Baselines
| 1,520,418,464,901 |
6ff90a8f84bb993cc8559cbe5102b6707961cdc9
|
8023d3411451b107c4daafb03074716dc2d60358
|
/models/linear_rnn.py
|
81cbaf35e524590e563b5f33ce14131d5bea94c1
|
[] |
no_license
|
https://github.com/rronan/IntPhys-Baselines
|
f2794fb4d85f51f3e9bbb75a2cb7a7204a1a6982
|
9b13d79b4d5c1db56c7a089e0b6b87609b07ff84
|
refs/heads/master
| 2021-04-12T09:16:10.976853 | 2019-11-18T22:10:22 | 2019-11-18T22:10:22 | 126,332,241 | 32 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch.nn as nn
from torch.autograd import Variable
import torch
import torch.optim as optim
import os
from pydoc import locate
from .encoder import Encoder
from .decoder import Decoder
from .model import Model
import utils
class Linear_rnn(nn.Module, Model):
def __init__(self, opt, test=False):
super(Linear_rnn, self).__init__()
self.__name__ = "linear_rnn"
self.input_len, self.target_len = opt.input_len, opt.target_len
self.latentDim = opt.latentDim
self.bsz = 1 if test else opt.bsz
self.input = torch.FloatTensor(opt.bsz * opt.input_len, opt.nc_in, 64, 64)
self.input = Variable(self.input)
self.target = torch.FloatTensor(opt.bsz * opt.target_len, opt.nc_out, 64, 64)
self.target = Variable(self.target)
self.h0 = Variable(torch.randn(opt.n_layer, 1, opt.n_hidden))
if opt.rnn == "LSTM":
self.h0 = (self.h0, Variable(torch.randn(opt.n_layer, 1, opt.n_hidden)))
self.criterion = nn.MSELoss()
self.recurrent_module = locate("torch.nn.%s" % opt.rnn)(
opt.latentDim, opt.n_hidden, opt.n_layer
)
# does this must be done at the end of __init__ ?
self.optimizer = optim.Adam(
self.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)
)
# def encoder and decoder as volatile
self.encoder = Encoder(opt)
self.decoder = Decoder(opt)
def forward(self, x, h):
x, h = self.recurrent_module(x, h)
return x, h
def gpu(self):
self.cuda()
self.input = self.input.cuda()
self.target = self.target.cuda()
if type(self.h0) is list:
self.h0[0] = self.h0[0].cuda()
self.h0[1] = self.h0[1].cuda()
else:
self.h0 = self.h0.cuda()
def step(self, batch, set_):
self.input.data.copy_(batch[0])
self.target.data.copy_(batch[1])
x = self.encoder(self.input).detach()
x = x.view(-1, self.input_len, self.latentDim)
y = self.encoder(self.target).detach()
y = y.view(-1, self.input_len, self.latentDim)
self.out, _ = self.forward(x, self.h0)
err = self.criterion.forward(self.out, y)
if set_ == "train":
self.zero_grad()
err.backward()
self.optimizer.step()
return {"err": err.data[0]}
def output(self):
out = self.decoder(self.out.view(-1, self.latentDim)).data
d1, d2, d3 = out.size(1), out.size(2), out.size(3)
return out.view(-1, self.target_len, d1, d2, d3)
def load(self, d):
for e in d:
if e[0] == self.__name__:
path = d[0][-1]
print("loading %s: %s" % (self.__name__, path))
self.recurrent_module.load_state_dict(torch.load(path))
if e[0] == "encoder":
path = d[0][-1]
print("loading encoder: %s" % path)
self.encoder.load_state_dict(
utils.filter(torch.load(path), ["resnet_features", "encoder"])
)
if e[0] == "decoder":
path = d[0][-1]
print("loading decoder: %s" % path)
self.decoder.load_state_dict(
utils.filter(torch.load(path), ["decoder", "deconv"])
)
def save(self, path, epoch):
f = open(os.path.join(path, "%s.txt" % self.__name__), "w")
f.write(str(self))
f.close()
torch.save(
self.recurrent_module.state_dict(),
os.path.join(path, "%s_%d.pth" % (self.__name__, epoch)),
)
def score():
self.input.data.copy_(batch[0])
self.target.data.copy_(batch[1])
x = self.encoder(self.input).detach()
x = x.view(-1, self.input_len, self.latentDim)
y = self.encoder(self.target).detach()
y = y.view(-1, self.input_len, self.latentDim)
self.out, _ = self.forward(x, self.h0)
err = self.criterion.forward(self.out, y)
return err.data[0]
|
UTF-8
|
Python
| false | false | 4,093 |
py
| 18 |
linear_rnn.py
| 16 | 0.542389 | 0.527242 | 0 | 114 | 34.903509 | 85 |
eiphy/Tools-Package
| 18,090,402,285,245 |
e034f021cc93fe3875d3bb6f5f3ba8bae3842462
|
c0d5ada8a31912c338d4e4dbda47a109e3f6027d
|
/SuSyNAT/python2/SuSyNAT/secollect.py
|
025401c167553e972c7da71cebdfeb0a09264c7f
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/eiphy/Tools-Package
|
1cf8d3e727b73565472e84692c3a78460dd6fd6e
|
816c39016d25f32e3d2a312dbfa58cc297caf780
|
refs/heads/master
| 2020-04-28T16:22:28.372899 | 2019-05-22T09:26:49 | 2019-05-22T09:26:49 | 175,408,277 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import read_split
import stdize
import copy
def secolle(filename, filew, mode=0):
auto_list = read_split.ReadStd(filename)[0]
state_list, event_list = read_split.trans_split(auto_list[4])
if mode == 0:
state = 'states = ' + ', '.join(state_list) + '\n'
event = 'alphabet = ' + ', '.join(event_list) + '\n'
with open(filew, 'w') as fileobject:
fileobject.write(state)
fileobject.write(event)
else:
auto_list[0] = copy.deepcopy(state_list)
auto_list[1] = copy.deepcopy(event_list)
auto_list[2] = copy.deepcopy(event_list)
auto_list[3] = copy.deepcopy(event_list)
stdize.std_write(filew, auto_list)
# secolle('EOperation.cfg', 'Test.cfg')
|
UTF-8
|
Python
| false | false | 746 |
py
| 16 |
secollect.py
| 13 | 0.599196 | 0.588472 | 0 | 26 | 27.730769 | 65 |
sansbacon/dkbestball
| 5,016,521,846,215 |
5174e80399abfc11365494cacbad79656fea0347
|
b55d2ce829f110da103f7daacefdff25045f5167
|
/dkbestball/analyzer.py
|
d7f8de17f6dc65173f98be26df427e5f951209d3
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/sansbacon/dkbestball
|
9fe446722b9b8634757bc51bc441334f0c177d0f
|
28bf83baf5dfc137e589f33ce4333cf841ede87f
|
refs/heads/main
| 2023-01-24T18:55:54.412041 | 2020-12-12T02:54:06 | 2020-12-12T02:54:06 | 302,356,206 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from functools import lru_cache
import logging
import pickle
import pandas as pd
class Analyzer:
"""Encapsulates analysis / summary of rosters and results"""
CONTEST_CODES = {
'3m': '3-Player',
'6m': '6-Player',
'12m': '12-Player',
'pa': 'Action',
'm': 'Millionaire',
't': 'Tournament'
}
DATA_COLUMNS = [
'entry_keys', 'contest_key', 'contest_name', 'contest_size',
'entry_fee', 'draftgroup_id', 'winnings', 'leader_points', 'my_place',
'my_points', 'my_entry_key', 'myroster', 'contest_type'
]
FINANCIAL_COLUMNS = [
'contest_type', 'entry_fee', 'Entries', 'Paid', 'Won', 'ROI'
]
OWNERSHIP_COLUMNS = [
'displayName', 'position', 'teamAbbreviation', 'n', 'tot', 'pct'
]
ROSTER_COLUMNS = [
'draftGroupId', 'contestKey', 'entryKey', 'lineupId', 'userName',
'userKey', 'playerId', 'playerDkId', 'displayName', 'position',
'teamAbbreviation', 'draftableId'
]
STANDINGS_COLUMNS = [
'contest_key', 'contest_name', 'contest_type', 'entry_fee',
'contest_size', 'my_place', 'winnings', 'my_points', 'leader_points'
]
def __init__(self, username, datadir):
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.username = username
self.datadir = datadir
self.mydata_path = self.datadir / 'mydata.pkl'
self.data = pd.DataFrame(self._load_data())
self.data['contest_type'] = self.data['contest_name'].apply(
self.contest_type)
def _filter_rosters(self, df, contests):
"""Filters roster by contest(s)"""
return df.loc[df.contestKey.isin(contests), :]
def _load_data(self):
"""Loads data file"""
with self.mydata_path.open('rb') as f:
return pickle.load(f)
@lru_cache(maxsize=128)
def _tournament_keys(self, contest_type, keycol):
"""Gets key column for given contest type"""
return self.data.loc[self.data['contest_type'] == contest_type, keycol]
def contest_type(self, s):
"""Gets contest type from contest name"""
val = 'Unknown'
if 'Tournament' in s:
val = 'Tournament'
if 'Millionaire' in s:
val = 'Tournament'
if 'Play-Action' in s:
val = 'Tournament'
if '12-Player' in s:
val = '12-Man'
if '6-Player' in s:
val = '6-Man'
if '3-Player' in s:
val = '3-Man'
return val
def financial_summary(self):
"""Summarizes financial results"""
std = self.standings()
gb = std.groupby(['contest_type', 'entry_fee'], as_index=False)
aggs = (('contest_key', 'count'), ('entry_fee', 'sum'), ('winnings',
'sum'))
summ = gb.agg(Entries=aggs[0], Paid=aggs[1], Won=aggs[2])
summ['ROI'] = ((summ.Won - summ.Paid) / summ.Paid).mul(100).round(1)
return summ
@lru_cache(maxsize=128)
def myrosters(self):
"""
draftGroupId 37605
contestKey 89460375
entryKey 2062649745
lineupId -1
userName sansbacon
userKey 725157
playerId 380750
playerDkId 20426
displayName Cam Newton
position QB
teamAbbreviation NE
draftableId 14885230
"""
return pd.concat(
[pd.DataFrame(row.myroster) for row in self.data.itertuples()])
def ownership(self, df=None):
"""Gets player ownership
Args:
df (DataFrame): matches myrosters
Returns:
DataFrame with columns
displayName, position, teamAbbreviation,
n, tot, pct
"""
if df is None:
df = self.myrosters()
grpcols = ['displayName', 'position', 'teamAbbreviation']
gb = df.groupby(grpcols, as_index=False)
summ = gb.agg(n=('userName', 'count'))
summ['tot'] = len(df['entryKey'].unique())
summ['pct'] = (summ['n'] / summ['tot']).mul(100).round(1)
return summ.sort_values('pct', ascending=False)
def positional_ownership(self, df=None, pos='QB', thresh=10):
"""Gets positional ownership"""
if df is None:
df = self.ownership()
q = f'position == "{pos}" and pct > {thresh}'
return df.query(q)
def standings(self):
"""Gets standings dataframe"""
return self.data.loc[:, self.STANDINGS_COLUMNS]
def standings_summary(self, contest_type):
"""Gets standing summary for contest type
"""
std = self.standings()
std = std.loc[std.contest_name.str.
contains(self.CONTEST_CODES.get(contest_type), ' '), :]
return (std['my_place'].value_counts().reset_index().sort_values(
'index').set_axis(['place', 'n_teams'], axis=1).assign(
pct=lambda df_: round(df_.n_teams / len(std), 2)))
def tournament_contests(self):
"""Gets tournament contests"""
return self._tournament_keys(contest_type='Tournament',
keycol='contest_key')
def tournament_entries(self):
return self._tournament_keys(contest_type='Tournament',
keycol='my_entry_key')
def tournament_ownership(self):
"""Shows tournament ownership"""
return self.ownership(self.tournament_rosters())
def tournament_rosters(self):
df = self.myrosters()
return self._filter_rosters(df, self.tournament_contests())
if __name__ == '__main__':
pass
|
UTF-8
|
Python
| false | false | 5,908 |
py
| 17 |
analyzer.py
| 11 | 0.533514 | 0.518957 | 0 | 176 | 32.568182 | 79 |
congjianting/video_analyst
| 1,314,260,035,875 |
bb06ea7683bc253b18522039975f72f3f2e8e480
|
3e7601a4ca1d261516a63998fc3b656dd20dcd6f
|
/videoanalyst/evaluation/davis_benchmark/__init__.py
|
14ddb3ba7a596256c3cb9da0829958be4115437a
|
[
"MIT"
] |
permissive
|
https://github.com/congjianting/video_analyst
|
393595b5985d131f214244eccaedfc35245dd997
|
0148139ba38fd03c0a6480a5b312d08bde746561
|
refs/heads/master
| 2022-04-14T10:03:27.225544 | 2020-04-09T08:16:48 | 2020-04-09T08:16:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .benckmark_helpler import load_dataset, label2color,labelcolormap, MultiBatchIouMeter
from .log_helper import add_file_handler, init_log
from .evaluation_method import davis2017_eval
from .davis2017.utils import overlay_semantic_mask
|
UTF-8
|
Python
| false | false | 238 |
py
| 18 |
__init__.py
| 13 | 0.844538 | 0.806723 | 0 | 4 | 58.75 | 90 |
TactileUniverse/3D-Printed-Galaxy-Software
| 12,919,261,677,476 |
1bb9bb051840a2006c04bef3448dece8a060a1dd
|
90351d9f82dbee09a25d586e9cf6d381ad00c10d
|
/make_model.py
|
dddabdba792150042063c64507798cecffd97e25
|
[
"MIT"
] |
permissive
|
https://github.com/TactileUniverse/3D-Printed-Galaxy-Software
|
3cd4a231b3f24061c7221c52c28ca6ca8a970313
|
67efa40634393dd511ea941792f7eb82ad6b2c5f
|
refs/heads/master
| 2021-01-11T20:03:32.967566 | 2020-04-22T08:55:44 | 2020-04-22T08:55:44 | 79,459,697 | 4 | 1 |
MIT
| false | 2020-04-22T08:55:46 | 2017-01-19T14:11:11 | 2020-02-24T10:26:47 | 2020-04-22T08:55:44 | 4,257 | 3 | 1 | 0 |
Python
| false | false |
import bpy
import bmesh
import json
import sys
import os
argv = sys.argv
if '--' not in argv:
raise ValueError('You must pass a configuration file on the command line after ` -- `')
argv = argv[argv.index('--') + 1:]
if len(argv) == 0:
raise ValueError('No configuration file passed in')
elif len(argv) > 1:
raise ValueError('Only pass in one configuration file')
with open(argv[0]) as config_file:
config = json.load(config_file)
if 'input_file_path' not in config:
raise ValueError('the config file must contain the keyword `input_file_path`')
# set defaults
config.setdefault('plane_height', 112)
config.setdefault('emboss_plane_keywords', {})
config.setdefault('output_path', os.getcwd())
config.setdefault('output_name', 'output')
config.setdefault('stl_keywords', {})
input_name = os.path.basename(config['input_file_path'])
input_dir = os.path.dirname(config['input_file_path'])
if input_dir == '':
input_dir = os.getcwd()
# import image as plane
bpy.ops.import_image.to_plane(
files=[{'name': input_name}],
directory=input_dir,
height=config['plane_height'],
relative=False
)
def view3d_find(return_area=False):
# returns first 3d view, normally we get from context
for area in bpy.context.window.screen.areas:
if area.type == 'VIEW_3D':
v3d = area.spaces[0]
rv3d = v3d.region_3d
for region in area.regions:
if region.type == 'WINDOW':
if return_area:
return region, rv3d, v3d, area
return region, rv3d, v3d
return None, None
region, rv3d, v3d, area = view3d_find(True)
override = {
'scene': bpy.context.scene,
'screen': bpy.context.screen,
'active_object': bpy.context.active_object,
'window': bpy.context.window,
'blend_data': bpy.context.blend_data,
'region': region,
'area': area,
'space': v3d
}
name = bpy.context.active_object.name
bpy.ops.object.editmode_toggle()
bpy.ops.object.emboss_plane(override, **config['emboss_plane_keywords'])
bpy.ops.object.editmode_toggle()
base_path = os.path.join(
config['output_path'],
config['output_name']
)
bpy.ops.file.pack_all()
blend_file_path = '{0}.blend'.format(base_path)
bpy.ops.wm.save_mainfile(
filepath=blend_file_path,
check_existing=False
)
stl_file_path = '{0}.stl'.format(base_path)
bpy.ops.export_mesh.stl(
filepath=stl_file_path,
check_existing=False,
**config['stl_keywords']
)
bpy.ops.wm.quit_blender()
|
UTF-8
|
Python
| false | false | 2,527 |
py
| 12 |
make_model.py
| 9 | 0.656114 | 0.646221 | 0 | 96 | 25.322917 | 91 |
tfranzel/drf-spectacular
| 14,783,277,436,712 |
a55fc311ea09484a64265a989276869a61374b19
|
09d8e7ecf237cf339cabb21292eaf79458fea6d9
|
/tests/test_examples.py
|
713107888df06289c2cd133325928fc7f56f3829
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/tfranzel/drf-spectacular
|
f79c2f6561a0d4f72c3ed625188e55f68b0e3a48
|
c56f62caec338226bc28953b9e677c7f883dcf76
|
refs/heads/master
| 2023-08-23T23:11:07.659394 | 2023-08-14T23:13:07 | 2023-08-14T23:13:07 | 244,234,763 | 1,824 | 260 |
BSD-3-Clause
| false | 2023-09-10T12:50:48 | 2020-03-01T22:36:54 | 2023-09-08T23:10:11 | 2023-09-10T12:50:48 | 1,788 | 1,817 | 217 | 67 |
Python
| false | false |
import pytest
from rest_framework import __version__ as DRF_VERSION # type: ignore[attr-defined]
from rest_framework import generics, pagination, serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import (
OpenApiExample, OpenApiParameter, OpenApiResponse, extend_schema, extend_schema_serializer,
)
from tests import assert_schema, generate_schema
from tests.models import SimpleModel, SimpleSerializer
@extend_schema_serializer(
examples=[
OpenApiExample(
'Serializer A Example RO',
value={"field": 1},
response_only=True,
),
OpenApiExample(
'Serializer A Example WO',
value={"field": 2},
request_only=True,
),
OpenApiExample(
'Serializer A Example RW',
summary='Serializer A Example RW custom summary',
value={'field': 3}
),
OpenApiExample(
'Serializer A Example RW External',
external_value='https://example.com/example_a.txt',
media_type='application/x-www-form-urlencoded'
)
]
)
class ASerializer(serializers.Serializer):
field = serializers.IntegerField()
class BSerializer(serializers.Serializer):
field = serializers.IntegerField()
@extend_schema_serializer(
examples=[
OpenApiExample(
'Serializer C Example RO',
value={"field": 111},
response_only=True,
),
OpenApiExample(
'Serializer C Example WO',
value={"field": 222},
request_only=True,
),
]
)
class CSerializer(serializers.Serializer):
field = serializers.IntegerField()
@extend_schema(
responses=BSerializer,
examples=[OpenApiExample("Example ID 1", value=1, parameter_only=('id', 'path'))]
)
class ExampleTestWithExtendedViewSet(viewsets.GenericViewSet):
serializer_class = ASerializer
queryset = SimpleModel.objects.none()
@extend_schema(
request=ASerializer,
responses={
201: BSerializer,
400: OpenApiTypes.OBJECT,
403: OpenApiTypes.OBJECT,
},
examples=[
OpenApiExample(
'Create Example RO',
value={'field': 11},
response_only=True,
),
OpenApiExample(
'Create Example WO',
value={'field': 22},
request_only=True,
),
OpenApiExample(
'Create Example RW',
value={'field': 33},
),
OpenApiExample(
'Create Error 403 Integer Example',
value={'field': 'error (int)'},
response_only=True,
status_codes=[status.HTTP_403_FORBIDDEN],
),
OpenApiExample(
'Create Error 403 String Example',
value={'field': 'error (str)'},
response_only=True,
status_codes=['403']
),
],
)
def create(self, request, *args, **kwargs):
super().create(request, *args, **kwargs) # pragma: no cover
@extend_schema(
parameters=[
OpenApiParameter(
name="artist",
description="Filter by artist",
required=False,
type=str,
examples=[
OpenApiExample(
"Artist Query Example 1",
value="prince",
description="description for artist query example 1"
),
OpenApiExample(
"Artist Query Example 2",
value="miles davis",
description="description for artist query example 2"
)
]
),
],
responses=CSerializer,
)
def list(self, request):
return Response() # pragma: no cover
@extend_schema(
examples=[
OpenApiExample(
"Example ID 2",
value=2,
parameter_only=('id', OpenApiParameter.PATH)
)
]
)
def retrieve(self, request):
return Response() # pragma: no cover
@action(detail=False, methods=['GET'])
def raw_action(self, request):
return Response() # pragma: no cover
@extend_schema(responses=BSerializer)
@action(detail=False, methods=['POST'])
def override_extend_schema_action(self, request):
return Response() # pragma: no cover
def test_examples(no_warnings):
assert_schema(
generate_schema('schema', ExampleTestWithExtendedViewSet),
'tests/test_examples.yml',
)
@pytest.mark.skipif(DRF_VERSION < '3.12', reason='DRF pagination schema broken')
def test_example_pagination(no_warnings):
class PaginatedExamplesViewSet(ExampleTestWithExtendedViewSet):
pagination_class = pagination.LimitOffsetPagination
schema = generate_schema('e', PaginatedExamplesViewSet)
operation = schema['paths']['/e/']['get']
assert operation['responses']['200']['content']['application/json']['examples'] == {
'SerializerCExampleRO': {
'value': {
'count': 123,
'next': 'http://api.example.org/accounts/?offset=400&limit=100',
'previous': 'http://api.example.org/accounts/?offset=200&limit=100',
'results': [{'field': 111}],
},
'summary': 'Serializer C Example RO'
}
}
def test_example_request_response_listed_examples(no_warnings):
@extend_schema(
request=ASerializer(many=True),
responses=ASerializer(many=True),
examples=[
OpenApiExample('Ex', {'id': '1234'})
]
)
class XView(generics.CreateAPIView):
pass
schema = generate_schema('e', view=XView)
operation = schema['paths']['/e']['post']
assert operation['requestBody']['content']['application/json'] == {
'schema': {'type': 'array', 'items': {'$ref': '#/components/schemas/A'}},
'examples': {'Ex': {'value': [{'id': '1234'}]}}
}
assert operation['responses']['201']['content']['application/json'] == {
'schema': {'type': 'array', 'items': {'$ref': '#/components/schemas/A'}},
'examples': {'Ex': {'value': [{'id': '1234'}]}}
}
def test_examples_list_detection_on_non_200_decoration(no_warnings):
class ExceptionSerializer(serializers.Serializer):
api_status_code = serializers.CharField()
extra = serializers.DictField(required=False)
@extend_schema(
responses={
200: SimpleSerializer,
400: OpenApiResponse(
response=ExceptionSerializer,
examples=[
OpenApiExample(
"Date parse error",
value={"api_status_code": "DATE_PARSE_ERROR", "extra": {"details": "foobar"}},
status_codes=['400']
)
],
),
},
)
class XListView(generics.ListAPIView):
model = SimpleModel
serializer_class = SimpleSerializer
pagination_class = pagination.LimitOffsetPagination
schema = generate_schema('/x/', view=XListView)
# regular response listed/paginated
assert schema['paths']['/x/']['get']['responses']['200']['content']['application/json'] == {
'schema': {'$ref': '#/components/schemas/PaginatedSimpleList'}
}
# non-200 error response example NOT listed/paginated
assert schema['paths']['/x/']['get']['responses']['400']['content']['application/json'] == {
'examples': {
'DateParseError': {
'summary': 'Date parse error',
'value': {'api_status_code': 'DATE_PARSE_ERROR', 'extra': {'details': 'foobar'}}
}
},
'schema': {'$ref': '#/components/schemas/Exception'},
}
def test_inherited_status_code_from_response_container(no_warnings):
@extend_schema(
responses={
400: OpenApiResponse(
response=SimpleSerializer,
examples=[
# prior to the fix this required the argument status_code=[400]
# as the code was not passed down and the filtering sorted it out.
OpenApiExample("an example", value={"id": 3})
],
),
},
)
class XListView(generics.ListAPIView):
model = SimpleModel
serializer_class = SimpleSerializer
schema = generate_schema('/x/', view=XListView)
assert schema['paths']['/x/']['get']['responses']['400']['content']['application/json'] == {
'schema': {'$ref': '#/components/schemas/Simple'},
'examples': {'AnExample': {'value': {'id': 3}, 'summary': 'an example'}}
}
def test_examples_with_falsy_values(no_warnings):
@extend_schema(
responses=OpenApiResponse(
description='something',
response=OpenApiTypes.JSON_PTR,
examples=[
OpenApiExample('one', value=1),
OpenApiExample('empty-list', value=[]),
OpenApiExample('false', value=False),
OpenApiExample('zero', value=0),
OpenApiExample('empty'),
],
),
)
class XListView(generics.ListAPIView):
model = SimpleModel
serializer_class = SimpleSerializer
schema = generate_schema('/x/', view=XListView)
assert schema['paths']['/x/']['get']['responses']['200']['content']['application/json']['examples'] == {
'One': {'summary': 'one', 'value': 1},
'Empty-list': {'summary': 'empty-list', 'value': []},
'False': {'summary': 'false', 'value': False},
'Zero': {'summary': 'zero', 'value': 0},
'Empty': {'summary': 'empty'},
}
@pytest.mark.skipif(DRF_VERSION < '3.12', reason='DRF pagination schema broken')
def test_plain_pagination_example(no_warnings):
class PlainPagination(pagination.LimitOffsetPagination):
""" return a (unpaginated) basic list, while other might happen in the headers """
def get_paginated_response_schema(self, schema):
return schema
class PaginatedExamplesViewSet(ExampleTestWithExtendedViewSet):
pagination_class = PlainPagination
schema = generate_schema('e', PaginatedExamplesViewSet)
operation = schema['paths']['/e/']['get']
assert operation['responses']['200']['content']['application/json']['examples'] == {
'SerializerCExampleRO': {
'value': [{'field': 111}],
'summary': 'Serializer C Example RO'
}
}
|
UTF-8
|
Python
| false | false | 10,952 |
py
| 136 |
test_examples.py
| 89 | 0.556337 | 0.544375 | 0 | 322 | 33.012422 | 108 |
JeffreyCA/dashlane-to-keepass
| 10,952,166,652,740 |
45b8bc12b10b1e4099d2dd765f1f2a16f775a171
|
def97878bef58f8b4be7d39c48e7895171672ccf
|
/script.py
|
2810c11d966f5116292727be1d567f6237beeacc
|
[] |
no_license
|
https://github.com/JeffreyCA/dashlane-to-keepass
|
207bce3c3c5385fee358a4bf213b471006faa25e
|
2c600781507105899a105659a5b855b6486b6d70
|
refs/heads/master
| 2020-03-22T05:46:01.319909 | 2019-03-29T15:19:07 | 2019-03-29T15:19:07 | 139,588,705 | 2 | 0 | null | false | 2019-03-29T15:19:08 | 2018-07-03T13:36:56 | 2019-03-29T04:01:04 | 2019-03-29T15:19:08 | 12 | 1 | 0 | 1 |
Python
| false | null |
import argparse
import csv
import io
import os.path
import re
import sys
import traceback
import validators
import xml.etree.ElementTree as ET
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# Force UTF-8 encoding on Python 2.x
try:
reload(sys)
sys.setdefaultencoding('utf8')
except:
pass
class PWEntry:
def __init__(self, title, url, username, secondary, email, password, note):
self.title = title
self.url = url
self.username = username
self.secondary = secondary
self.email = email
self.password = password
self.note = note
def hasDoubleQuotes(text):
return '"' in text
def isValidSite(text):
return validators.url(text) or validators.url('https://' + text) or validators.domain(text) or validators.ip_address.ipv4(text) or validators.ip_address.ipv6(text)
def readCsv(filename, verbose = False):
with io.open(filename, 'rt', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, quotechar='"', delimiter=',', doublequote=False)
ambiguousEntries = []
entries = []
processed = 0
for row in reader:
try:
if len(row) >= 7:
name = row[0]
site = row[1]
user = row[2]
email = row[3]
secondary = row[4]
password = row[5]
note = row[6]
if not user:
if email:
user = email
email = ''
elif secondary:
user = secondary
secondary = ''
if hasDoubleQuotes(note):
note = note.replace('"', '')
entry = PWEntry(name, site, user, secondary, email, password, note)
if hasDoubleQuotes(email) or hasDoubleQuotes(password) or (len(row) > 7 and hasDoubleQuotes(note)):
if verbose:
print('Skipped: ' + site)
ambiguousEntries.append(entry)
continue
if isValidSite(site):
processed += 1
entries.append(PWEntry(name, site, user, secondary, email, password, note))
elif verbose:
print('Skipped[0]: ' + str(row))
except:
print(traceback.format_exc())
return entries, ambiguousEntries
def writeEntries(filename, entries, groupName = 'General', verbose = False):
processed = 0
pwlist = ET.Element('pwlist')
for entry in entries:
processed += 1
pwentry = ET.SubElement(pwlist, 'pwentry')
ET.SubElement(pwentry, 'group').text = groupName
ET.SubElement(pwentry, 'title').text = entry.title
ET.SubElement(pwentry, 'username').text = entry.username
ET.SubElement(pwentry, 'url').text = entry.url
ET.SubElement(pwentry, 'password').text = entry.password
note = ''
if entry.email:
note += 'email: ' + entry.email
if entry.secondary:
note += '\nsecondary: ' + entry.secondary
if entry.note:
# Append note after above email
if note:
note += '\n'
note += entry.note
if note:
ET.SubElement(pwentry, 'notes').text = note
if verbose:
print('Processing ' + entry.url)
tree = ET.ElementTree(pwlist)
indent(pwlist)
xml_str = ET.tostring(pwlist).decode()
xml_str = escape_quotes(xml_str)
text_file = open(filename, 'w')
text_file.write(xml_str)
text_file.close()
if verbose:
print('')
print('Successfully converted ' + str(processed) + ' password entries.')
def html_escape(text):
quotation_escape_table = {
"'": "'"
}
return "".join(quotation_escape_table.get(c, c) for c in text)
def escape_tag(m):
tag = m.group(1)
content = m.group(2)
return '<' + tag + '>' + html_escape(content) + '</' + tag + '>'
def escape_quotes(xml_str):
replaced_text = re.sub(r'<(\w+)>(.+?)</\w+>', escape_tag, xml_str)
return replaced_text
def outputAmbiguousEntries(ambiguousEntries):
if ambiguousEntries:
print('The following password entries were found to be ambiguous due to double quotation marks (") in its fields.')
print('Please manually add the passwords for these sites:\n')
for entry in ambiguousEntries:
print(entry.url)
print('')
def indent(elem, level = 0):
"""https://stackoverflow.com/a/33956544"""
i = '\n' + level * '\t'
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + '\t'
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def main():
parser = argparse.ArgumentParser(description='Converts Dashlane-exported CSV files to KeePass 1.x XML format. Supports password entries with both username and email fields.')
parser.add_argument('csv_file', metavar='input_csv', type=str,
help='Dashlane-exported CSV input file')
parser.add_argument('xml_file', metavar='output_xml', type=str,
help='KeePass 1.x XML output file')
parser.add_argument('-g', '--group', type=str, default='General',
help='name of group the passwords are stored under (by default the group is \'General\')')
parser.add_argument('-v', '--verbose', action='store_true',
help='enable verbose logging')
args = parser.parse_args()
csv_file = args.csv_file
xml_file = args.xml_file
group = args.group
verbose = args.verbose
if not os.path.exists(csv_file):
print('Input file ' + csv_file + ' does not exist!')
return
entries, ambiguousEntries = readCsv(csv_file, verbose)
outputAmbiguousEntries(ambiguousEntries)
writeEntries(xml_file, entries, group, verbose)
main()
|
UTF-8
|
Python
| false | false | 6,437 |
py
| 2 |
script.py
| 1 | 0.555694 | 0.550412 | 0 | 208 | 29.947115 | 178 |
eProsima/Fast-DDS
| 14,937,896,274,410 |
732ef6944c87cf6f3b5a5f8ff21c9d2d0c6816d9
|
c976078bf8dde5baf96416d60dd3bb06c72111ad
|
/test/profiling/allocations/allocation_plot.py
|
d719cc3900aa953b8a9b7c19f50f0a9d73f4eca4
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/eProsima/Fast-DDS
|
21f3fecacca5a285ad9950b7683456c6f9930a4d
|
107ea8d64942102696840cd7d3e4cf93fa7a143e
|
refs/heads/master
| 2023-08-31T14:56:45.942016 | 2023-08-11T11:40:25 | 2023-08-11T11:40:25 | 20,296,703 | 1,225 | 463 |
Apache-2.0
| false | 2023-09-14T11:33:09 | 2014-05-29T14:36:15 | 2023-09-14T08:37:13 | 2023-09-14T11:33:08 | 109,341 | 1,705 | 628 | 91 |
C++
| false | false |
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
import pandas as pd
import sys
if len(sys.argv) < 2:
print("Bad usage.")
print(" allocation_plot.py <csv_file> [0,1,2,3]")
exit(-1)
if len(sys.argv) > 2:
bp0 = sys.argv[2].find('0') != -1
bp1 = sys.argv[2].find('1') != -1
bp2 = sys.argv[2].find('2') != -1
bp3 = sys.argv[2].find('3') != -1
else:
bp0 = True
bp1 = True
bp2 = True
bp3 = True
headers = ["Phase 0 Allocations", "Phase 0 Deallocations", "Phase 1 Allocations", "Phase 1 Deallocations",
"Phase 2 Allocations", "Phase 2 Deallocations", "Phase 3 Allocations", "Phase 3 Deallocations"]
df = pd.read_csv(sys.argv[1], delimiter=",", names=headers)
p0a = [float(value) for value in df["Phase 0 Allocations"].values[1::]]
p1a = [float(value) for value in df["Phase 1 Allocations"].values[1::]]
p2a = [float(value) for value in df["Phase 2 Allocations"].values[1::]]
p3a = [float(value) for value in df["Phase 3 Allocations"].values[1::]]
axis = np.arange(1, len(p0a)+1)
style.use('ggplot')
if bp0:
plt.plot(axis, p0a, label='Ph0')
if bp1:
plt.plot(axis, p1a, label='Ph1')
if bp2:
plt.plot(axis, p2a, label='Ph2')
if bp3:
plt.plot(axis, p3a, label='Ph3')
plt.xlabel("Execution")
plt.ylabel("Allocations")
plt.title("Allocations in phases")
plt.xticks(axis, axis)
plt.legend()
plt.show()
|
UTF-8
|
Python
| false | false | 1,397 |
py
| 1,428 |
allocation_plot.py
| 1,198 | 0.640659 | 0.595562 | 0 | 49 | 27.510204 | 106 |
SaiTeja531/Amazon-Automation
| 13,228,499,274,870 |
e1913d025995c09ac5e0d203637b9db25e9fdbda
|
cffdb24cd863ebcbb45933de15cfe3149864852d
|
/Utilities/Base.py
|
d663ed3ba71954cb94786ecec55cd362f9ed4ee1
|
[] |
no_license
|
https://github.com/SaiTeja531/Amazon-Automation
|
77c8ca1258aef210102e7061ac3f4f0c0a5c5d22
|
bfc101d1c3bc0f889648def8b6b983e1af297d66
|
refs/heads/master
| 2023-08-11T08:03:22.030705 | 2021-09-06T04:24:51 | 2021-09-06T04:24:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
class BaseClass:
def __init__(self, driver, wait, actions):
self.driver = driver
self.wait = wait
self.actions = actions
def find_element_by_selector(self, element):
return self.driver.find_element(element)
def wait_until_visibility_of_element_located(self, element):
return self.wait.until(EC.visibility_of_element_located(element))
def wait_until_element_clickable_link_text(self, element):
return self.wait.until(EC.element_to_be_clickable(element))
def wait_until_presence_of_all_elements_located(self, element):
return self.wait.until(EC.presence_of_all_elements_located(element))
def click_if_visible(self, element):
self.wait.until(EC.visibility_of_element_located(element)).click()
def hover(self, element):
locator = self.wait.until(EC.presence_of_element_located(element))
self.actions.move_to_element(locator).perform()
def sending_keys(self, locator, keys):
self.wait.until(EC.presence_of_element_located(locator)).send_keys(keys)
def get_text_of_element(self, locator):
return self.wait.until(EC.visibility_of_element_located(locator)).text
def get_title_of_page(self):
return self.driver.title
def text_is_displayed(self, by_locator):
element = self.wait.until(EC.visibility_of_element_located(by_locator))
return element.is_displayed()
def wait_until_visibility_of_elements_located(self, locator):
return self.wait.until(EC.visibility_of_all_elements_located(locator))
def clearing_any_text_present(self, locator):
self.wait.until(EC.visibility_of_element_located(locator)).clear()
def scroll_to_element(self, element):
self.wait.until(EC.presence_of_element_located(element))
self.driver.execute_script("arguments[0].scrollIntoView;", element)
def how_much_time_u_want_to_stop_the_script_to_see_the_view_of_actions(self, seconds):
return time.sleep(seconds)
def back_to_previous_page(self):
self.driver.back()
|
UTF-8
|
Python
| false | false | 2,225 |
py
| 10 |
Base.py
| 8 | 0.696629 | 0.69618 | 0 | 60 | 35.85 | 90 |
SqrTed/Planes
| 635,655,193,295 |
68a19bc9a396e25e262bc903639e726639132fee
|
eb41ebd17a3997e6bf127225c037df27f07ef85f
|
/controller/controller.py
|
d5a589e81acb6810a904582f5eec3691811adc77
|
[] |
no_license
|
https://github.com/SqrTed/Planes
|
a78fef09b2cef2215e73ba0be5c5d0d1f1128654
|
2c8fc7a6ef0f3f7d567e36ee4b9cbed5f4d95473
|
refs/heads/master
| 2021-04-30T14:36:19.781533 | 2018-02-12T08:41:45 | 2018-02-12T08:41:45 | 121,221,338 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from domain.entities import Plane, Point
from validation.validator import Validator
from domain.entities import Utils
class PlayerController(Utils):
def __init__(self, player_planes_board, player_hits_board):
self.__planes = player_planes_board
self.__hits = player_hits_board
self.__validator = Validator()
self.__planes_alive = 0
@property
def get_planes(self):
return self.__planes
def new_plane(self, head):
self.__validator.validate_plane(head)
plane = Plane(head)
self.__validator.overlap_plane(self.__planes, plane.coordinates())
self.__planes.add(plane.coordinates())
self.__planes_alive += 1
def get_point(self, point):
return self.__planes[point.x][point.y]
def attacked(self, point):
self.__validator.validate_plane(point)
if self.__planes[point.x][point.y] == "#":
self.__planes[point.x][point.y] = "!"
return "X"
elif self.__planes[point.x][point.y] in ["S", "N", "E", "W"]:
self.__planes = self.__fill(point, "!", self.__planes)
self.__planes_alive -= 1
return "K"
return "O"
def hit(self, point, result):
if result == "K":
self.__hits = self.__fill(point, "X", self.__hits)
else:
self.__hits[point.x][point.y] = result
def check_hit(self, point):
if self.__hits[point.x][point.y] != " ":
return True
return False
@property
def planes_alive(self):
return self.__planes_alive
def __str__(self):
return "Alive Planes: " + str(self.__planes_alive) + "\nPlanes:\n" + str(self.__planes) + "\nMoves:\n" + str(
self.__hits)
def __fill(self, param, value, board):
if param.value == "N":
return super().fill_N(param.x, param.y, value, board)
if param.value == "S":
return super().fill_S(param.x, param.y, value, board)
if param.value == "W":
return super().fill_W(param.x, param.y, value, board)
if param.value == "E":
return super().fill_E(param.x, param.y, value, board)
class ComputerController(PlayerController):
pass
|
UTF-8
|
Python
| false | false | 2,248 |
py
| 10 |
controller.py
| 10 | 0.560943 | 0.559609 | 0 | 69 | 31.57971 | 117 |
EmanAlmusa/EDI-Files-Parser
| 6,227,702,586,468 |
3593bc2b370b8dabcaefb25f6d0cec93aa5f82c2
|
9be9fef615783085efe16b86658b509b6fdc370e
|
/parse_999/parse_999.py
|
6a3796e7098a167d25ea03dc84b2d51afef2e630
|
[] |
no_license
|
https://github.com/EmanAlmusa/EDI-Files-Parser
|
f5136fe168fe905db21b314dfe891cea6d091fe7
|
89bff5b49b395f756e3852e6c91017247e31c239
|
refs/heads/master
| 2023-06-27T05:45:41.923789 | 2021-08-05T12:46:28 | 2021-08-05T12:46:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import os
from parse_common.main_parser import MainParser
loop_2000 = '2000'
loop_2100 = '2100'
loop_2110 = '2110'
class Parse999(MainParser):
def __init__(self, edi_file):
super().__init__(edi_file)
self.edi_parsed = {
'header_section': {
'file_name': os.path.basename(self.edi_file),
"date_created": {
"date": self.time,
"time": self.date
},
"current_status": self.get_current_status(),
"status_history": [self.get_current_status()],
}}
self.index_999 = {'999_index': {
'header_section': {
'file_name': os.path.basename(self.edi_file),
"date_created": {
"date": self.time,
"time": self.date
},
"current_status": self.get_current_status(),
"status_history": [self.get_current_status()],
}
}}
for i in range(len(self.edi_file_info)):
i = 0
self.extract_data()
if self.segment.split('-')[0] == 'AK1':
self.bulid_main_dict()
self.__bulid_ak9_dict(self.edi_parsed[self.segment])
elif self.segment.split('-')[0] == 'AK2':
self.bulid_main_dict()
self.edi_parsed[self.segment]['loop_name'] = loop_2000
self.__bulid_ak2_dict(self.edi_parsed[self.segment])
else:
self.bulid_main_dict()
def __bulid_ak9_dict(self, param):
for i in range(len(self.edi_file_info)):
try:
if self.edi_file_info[i].split('*')[0] == 'AK9':
self.data_element = self.edi_file_info[i].split('*')
self.index += 1
self.segment = self.data_element.pop(0) + '-' + str(self.index)
param[self.segment] = {}
self.count = 1
for self.data in self.data_element:
data_element_count = '{:02}'.format(self.count)
param[self.segment][data_element_count] = self.data
self.count += 1
self.pop_element(i)
break
except IndexError:
pass
def __bulid_ak2_dict(self, param):
for i in range(len(self.edi_file_info)):
i = 0
try:
if self.edi_file_info[i].split('*')[0] == 'IK3':
self.extract_data()
param[self.segment] = {}
self.bulid_data_element(param[self.segment], 0)
param[self.segment]['loop_name'] = loop_2100
self.__bulid_ik3_dict(param[self.segment])
if self.edi_file_info[i].split('*')[0] == 'IK5':
self.extract_data()
param[self.segment] = {}
self.bulid_data_element(param[self.segment], 0)
break
except IndexError:
pass
def __bulid_ik3_dict(self, param):
for i in range(len(self.edi_file_info)):
i = 0
try:
if self.edi_file_info[i].split('*')[0] == 'CTX':
self.extract_data()
param[self.segment] = {}
self.bulid_data_element(param[self.segment], 0)
elif self.edi_file_info[i].split('*')[0] == 'IK4':
self.extract_data()
param[self.segment] = {}
self.bulid_data_element(param[self.segment], 0)
param[self.segment]['loop_name'] = loop_2110
self.__bulid_ik4_dict(param[self.segment])
else:
break
except IndexError:
pass
def __bulid_ik4_dict(self, param):
for i in range(len(self.edi_file_info)):
i = 0
try:
if self.edi_file_info[i].split('*')[0] == 'CTX':
self.extract_data()
param[self.segment] = {}
self.bulid_data_element(param[self.segment], 0)
except IndexError:
pass
def extract_index_data(self):
for data in self.edi_parsed:
segment = data.split('-')[0]
if segment == 'ISA':
self.index_999[segment] = {}
self.index_999[segment]['05'] = self.edi_parsed[data]['05']
self.index_999[segment]['06'] = self.edi_parsed[data]['06']
self.index_999[segment]['07'] = self.edi_parsed[data]['07']
self.index_999[segment]['08'] = self.edi_parsed[data]['08']
if segment == 'GS':
self.index_999[segment] = {}
self.index_999[segment]['02'] = self.edi_parsed[data]['02']
self.index_999[segment]['03'] = self.edi_parsed[data]['03']
self.index_999[segment]['06'] = self.edi_parsed[data]['06']
if segment == 'AK1':
self.index_999[segment] = {}
self.index_999[segment]['01'] = self.edi_parsed[data]['01']
self.index_999[segment]['02'] = self.edi_parsed[data]['02']
self.index_999[segment]['03'] = self.edi_parsed[data]['03']
if segment == 'AK2':
self.index_999[segment] = {}
self.index_999[segment]['01'] = self.edi_parsed[data]['01']
self.index_999[segment]['02'] = self.edi_parsed[data]['02']
self.index_999[segment]['03'] = self.edi_parsed[data]['03']
return self.index_999
|
UTF-8
|
Python
| false | false | 5,784 |
py
| 3 |
parse_999.py
| 3 | 0.461618 | 0.428596 | 0 | 142 | 39.732394 | 83 |
JamesKlee/part3
| 7,026,566,534,609 |
67787d22d095b45dd0b20b2d3c281592c11ef051
|
a45e5ad192d662d85b1ad6e58c483a89d702225c
|
/scripts/master.py
|
8cdb5a9c9495036c9b5f99df25012c2c851fe45b
|
[] |
no_license
|
https://github.com/JamesKlee/part3
|
f4d5e8ee2739e08104100d0d2828bee2f03d5349
|
8da73f7f46184d9bb3c5d94b07b90b073968a647
|
refs/heads/master
| 2020-12-26T19:23:41.792801 | 2016-01-11T18:59:35 | 2016-01-11T18:59:35 | 45,981,471 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import rospy
from pf_localisation.util import *
from geometry_msgs.msg import ( PoseStamped, PoseWithCovarianceStamped,
PoseArray, Quaternion )
from pf_localisation.msg import WeightedParticles, Registration, Particles
from pf_localisation.updateParticle import UpdateParticleCloud
import thread
import sys
import time
from copy import deepcopy
class Node(object):
def __init__(self, ftype):
if ftype == "amcl" or ftype == "kld":
self.ftype = ftype
else:
rospy.loginfo("ERROR: Filter '" + str(ftype) + "' not accepted")
sys.exit(1)
self.registered = []
self.particleWT = []
self.particlesAdded = []
self.totalWeight = 0
self.updater = UpdateParticleCloud()
self.lock = thread.allocate_lock()
self.reinitList = []
self.reinit = False
self.mapAdded = False
self._cloud_publisher = rospy.Publisher("/updatedCloud", Particles)
self._weighted_particle_subscriber = rospy.Subscriber("/weightedParticles", WeightedParticles, self.addParticles, queue_size=100)
self._register_subscriber = rospy.Subscriber("/regNode", Registration, self.register, queue_size=10)
rospy.loginfo("RUNNING")
def register(self, reg):
self.lock.acquire()
nFound = True
pos = 0
for i in range (0, len(self.registered)):
if reg.frame_id == self.registered[i][0]:
nFound = False
pos = i
if nFound and reg.toAdd:
self.registered.append((reg.frame_id, reg.freePoints, reg.resolution))
self.mapAdded = True
rospy.loginfo("\tREGISTERED: " + reg.frame_id)
elif not nFound and not reg.toAdd:
del self.registered[pos]
rospy.loginfo("\tDEREGISTERED: " + reg.frame_id)
self.mapAdded = True
if len(self.particlesAdded) == len(self.registered):
self.resample()
self.updater.mapInfo = self.registered
self.lock.release()
def addParticles(self, wParticles):
self.lock.acquire()
name = wParticles.poseArray.header.frame_id
#print(name)
toAdd = False
for i in range(0, len(self.registered)):
if self.registered[i][0] == name:
toAdd = True
if not toAdd:
print("NOT FOUND IN REG")
self.lock.release()
return
toAdd = True
for i in range(0, len(self.particlesAdded)):
if self.particlesAdded[i] == name:
toAdd = False
if not toAdd:
self.lock.release()
return
for i in range(0, len(wParticles.poseArray.poses)):
newWT = (name, wParticles.poseArray.poses[i], wParticles.array[i])
self.particleWT.append(newWT)
self.totalWeight = self.totalWeight + wParticles.totalWeight
self.particlesAdded.append(name)
self.reinitList.append(wParticles.reinit)
if len(self.particlesAdded) == len(self.registered):
self.resample()
else:
self.lock.release()
def resample(self):
#rospy.loginfo("PARTICLES RECIEVED: " + str(len(self.particleWT)))
particles = None
if self.ftype == "kld":
particles = self.updater.resample_kld(self.particleWT, self.totalWeight)
elif self.ftype == "amcl":
particles = self.updater.resample_amcl(self.particleWT, self.totalWeight)
else:
rospy.logError("ERROR IN TYPE OF RESAMPLE")
toSend = []
for i in range(0, len(self.registered)):
toAdd = []
toAdd.append(self.registered[i][0])
toSend.append(toAdd)
for i in range (0, len(particles)):
particle = particles[i]
for j in range(0, len(toSend)):
if particle[0] == toSend[j][0]:
toSend[j].append(particle[1])
self.reinit = True
if not self.mapAdded:
for i in range (0, len(self.reinitList)):
if self.reinitList[i] == False:
self.reinit = False
break
self.count = 0
for i in range(0, len(toSend)):
time.sleep(0.1 * len(self.registered))
name = toSend[i][0]
if len(toSend[i]) <= 1:
list = []
else:
list = toSend[i]
del list[0]
self.send(name,list)
self.particleWT = []
self.particlesAdded = []
self.totalWeight = 0
self.reinitList = []
self.mapAdded = False
self.updater.reinit = False
self.lock.release()
def send(self, map_topic, plist):
particles = Particles()
particles.particles.header.seq = 1
particles.particles.header.stamp = rospy.get_rostime()
particles.particles.header.frame_id = map_topic
particles.particles.poses = plist
self.count = self.count + len(plist)
if self.ftype == "amcl" or self.mapAdded:
particles.reinit = self.reinit
else:
particles.reinit = self.updater.reinit
self._cloud_publisher.publish(particles)
#print("SENT")
rospy.init_node("master")
if len(sys.argv) != 2:
print("\tUSAGE: master.py <filterType>")
sys.exit(1)
ftype = str(sys.argv[1])
Node(ftype)
rospy.spin()
|
UTF-8
|
Python
| false | false | 4,609 |
py
| 16 |
master.py
| 16 | 0.682795 | 0.675418 | 0 | 173 | 25.641618 | 131 |
nish235/PythonPrograms
| 17,282,948,409,780 |
1ed2cc497ea045bd58c0de8ad0849bfa0b7c26bc
|
9b0babcf6849e11b3f208e702d2b36fd049f63f2
|
/Oct20/reverse.py
|
ed844765fe3db8002235e34d8449dc36911acc2d
|
[] |
no_license
|
https://github.com/nish235/PythonPrograms
|
d5ec56647d06136aef9501d732e7da32e82f3947
|
f657c1263098665a50b1b1fcbfc49bea6ce7af6f
|
refs/heads/main
| 2023-03-18T23:02:34.974009 | 2021-03-13T06:18:00 | 2021-03-13T06:18:00 | 302,834,862 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import array as arr
n = arr.array('i', [1, 2, 3, 4, 4, 5, 6])
n.reverse()
print(n)
|
UTF-8
|
Python
| false | false | 91 |
py
| 197 |
reverse.py
| 192 | 0.516484 | 0.43956 | 0 | 6 | 13.166667 | 41 |
scp15472/userservice
| 3,977,139,730,423 |
d74003a7cbd3091c4dd4a330d28bb5d6165bef9f
|
7e5d1af6d0d835c6d96aaef5377aa64f12529af1
|
/userservice/service_apis/login1.py
|
cb05ad682d58cb5519c2a8733be0cf596d479a23
|
[] |
no_license
|
https://github.com/scp15472/userservice
|
62249a67c45e9a92eb2a01710c86e429ce260681
|
ab604122042ece4e01059365c37b22b30c9cf014
|
refs/heads/master
| 2022-12-09T11:03:53.838767 | 2018-11-18T06:21:32 | 2018-11-18T06:21:32 | 145,285,867 | 0 | 0 | null | false | 2022-12-08T02:59:25 | 2018-08-19T08:26:50 | 2018-11-18T06:21:59 | 2022-12-08T02:59:23 | 44 | 0 | 0 | 7 |
Python
| false | false |
import django;
from flask import jsonify, request, make_response, json, Response
from flask_restful import Resource
from userservice.Service_handler_apis import login_post_handler, \
login_get_handler, login_put_handler, login_delete_handler
from userservice.utility import login_methods
django.setup()
class Login1(Resource):
def post(self):
data = request.get_json()
login_objects = login_post_handler.create_login(data)
response_dict = login_methods.get_login_dict(login_objects)
response = make_response(json.dumps({"Login": response_dict}))
response.mimetype = 'application/json'
response.set_cookie('token', login_objects.token)
return response
def get(self, token=None):
if not token:
token = request.cookies.get('token')
login_objects = login_get_handler.get_single_login(token)
if login_objects:
response_dict = login_methods.get_login_dict(login_objects)
return jsonify({"Login": response_dict})
# filters = request.args
# login_objects = login_get_handler.get_login_by_filter(filters)
# response_dict = [login_methods.get_login_dict(login) for login in login_objects]
# return jsonify({"Login": response_dict})
def put(self, token):
login_objects = login_get_handler.get_single_login(token)
if login_objects:
data = request.get_json()
login_objects = login_put_handler.update_login(login_objects, data)
response_dict = login_methods.get_login_dict(login_objects)
response = make_response(json.dumps({"Login": response_dict}))
response.mimetype = 'application/json'
response.set_cookie("token", None)
return response
return jsonify({"Login": response_dict})
else:
return jsonify({"Message": "User Login not found!!!"})
def delete(self, token):
login_objects = login_get_handler.get_single_login(token)
if login_objects:
login_objects = login_delete_handler.delete_login(token)
return jsonify({"Login": "LogOut successfully"})
else:
return jsonify({"Message": "Login is not deleted"})
|
UTF-8
|
Python
| false | false | 2,281 |
py
| 25 |
login1.py
| 23 | 0.638755 | 0.638317 | 0 | 58 | 38.327586 | 90 |
intelemetry/malware-crawler
| 10,084,583,218,430 |
626f0236be5d080adfee3b2c4e33b409c4eebc2a
|
78c322d1e2c15e42969dc4ae5a6075283975d8c6
|
/MalwareCrawler/src/core/statistics.py
|
0ae84cf648dc02dc8200663120e28ac9ce483208
|
[] |
no_license
|
https://github.com/intelemetry/malware-crawler
|
723f26d233736d88ebdf508513052fb6bb609e5d
|
1672bab7aa292a350c0661e048517e9665ddc570
|
refs/heads/master
| 2021-01-10T10:22:12.862103 | 2015-10-17T20:36:41 | 2015-10-17T20:36:41 | 44,453,524 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (C) 2013-2015 Ragpicker Developers.
# This file is part of Ragpicker Malware Crawler - http://code.google.com/p/malware-crawler/
import os
import sys
from core.config import Config
from core.database import Database
from core.constants import RAGPICKER_ROOT
try:
from prettytable import PrettyTable
except ImportError:
raise Exception("PrettyTable is required for using statistics: https://code.google.com/p/prettytable/")
class Statistics():
def __init__(self):
# Datenbank
self.__database = Database()
# Kofiguration aus der reporting.conf holen
self.__cfgReporting = Config(os.path.join(RAGPICKER_ROOT, 'config', 'reporting.conf'))
self.__vxcageEnabled = self.__cfgReporting.getOption("vxcage", "enabled")
self.__vxcageHost = self.__cfgReporting.getOption("vxcage", "host")
self.__vxcagePort = self.__cfgReporting.getOption("vxcage", "port")
def runStatisticsLong(self):
#Pruefen ob VxCage und MongoDB aktiviert sind
if self.__database.isRagpickerDBEnabled():
if self.__vxcageEnabled:
self.__runStatisticsMongodbLong()
if self.__database.isCodeDBEnabled():
self.__runStatisticsCodedb()
else:
print("vxcage in reporting.conf is not enabled")
sys.stdout.flush()
else:
print("mongodb in reporting.conf is not enabled")
sys.stdout.flush()
def runStatisticsShort(self):
#Pruefen ob VxCage und MongoDB aktiviert sind
if self.__database.isRagpickerDBEnabled():
if self.__vxcageEnabled:
self.__runStatisticsMongodbShort()
if self.__database.isCodeDBEnabled():
self.__runStatisticsCodedb()
else:
print("vxcage in reporting.conf is not enabled")
sys.stdout.flush()
else:
print("mongodb in reporting.conf is not enabled")
sys.stdout.flush()
def runStatisticsAV(self):
#Pruefen ob VxCage und MongoDB aktiviert sind
if self.__database.isRagpickerDBEnabled():
if self.__vxcageEnabled:
self.__runStatisticsAV()
else:
print("vxcage in reporting.conf is not enabled")
sys.stdout.flush()
else:
print("mongodb in reporting.conf is not enabled")
sys.stdout.flush()
def __runStatisticsMongodbLong(self):
print "**************************************"
print "*** Statistics MongoDB (Ragpicker) ***"
print "**************************************"
print ""
print "Number of malware samples in database:", self.__database.countReportsRagpickerDB()
print ""
#Statistiken der eingesetzten AV-Produkte
self.__runStatisticsAVProducts()
#Liste der letzen 20 Samples, die weder auf VT noch von einem lokalen AV gefunden wurden
self.__runStatisticsLast20SamplesNotFoundByAV()
#Liste der letzen 20 Samples, die nicht auf VT gefunden wurden
self.__runStatisticsLast20SamplesNotFoundByVT()
#Liste der letzen 20 Samples, die nicht von einem lokalen AV-Produkt gefunden wurden
self.__runStatisticsLast20SamplesNotFoundByLocalAV()
#Liste und Haeufigkeit der Filetypes
self.__runStatisticsFiletypes()
#Haeufigkeit der PE Charakteristiken
self.__runStatisticsPeCharacteristics()
#Liste und Haeufigkeit der verwendeten Packer/Compiler in der Malware
self.__runStatisticsPackerCompiler()
#Liste der verwendeten digitalen Signaturen
self.__runStatisticsPackerSignatures()
sys.stdout.flush()
def __runStatisticsMongodbShort(self):
print "**************************************"
print "*** Statistics MongoDB (Ragpicker) ***"
print "**************************************"
print ""
print "Number of malware samples in database:", self.__database.countReportsRagpickerDB()
print ""
#Liste und Haeufigkeit der Filetypes
self.__runStatisticsFiletypes()
#Haeufigkeit der PE Charakteristiken
self.__runStatisticsPeCharacteristics()
sys.stdout.flush()
def __runStatisticsAV(self):
print "**************************************"
print "*** Statistics MongoDB (Ragpicker) ***"
print "**************************************"
print ""
print "Number of malware samples in database:", self.__database.countReportsRagpickerDB()
print ""
#Statistiken der eingesetzten AV-Produkte
self.__runStatisticsAVProducts()
#Liste der letzen 20 Samples, die weder auf VT noch von einem lokalen AV gefunden wurden
self.__runStatisticsLast20SamplesNotFoundByAV()
#Liste der letzen 20 Samples, die nicht auf VT gefunden wurden
self.__runStatisticsLast20SamplesNotFoundByVT()
#Liste der letzen 20 Samples, die nicht von einem lokalen AV-Produkt gefunden wurden
self.__runStatisticsLast20SamplesNotFoundByLocalAV()
sys.stdout.flush()
def __runStatisticsCodedb(self):
print "***********************************"
print "*** Statistics MongoDB (CodeDB) ***"
print "***********************************"
print ""
print "Number of malware samples in database:", self.__database.countReportsCodeDB()
print ""
sys.stdout.flush()
def __runStatisticsFiletypes(self):
#Liste und Haeufigkeit der Filetypes
print "Filetypes of malware"
res = self.__database.getFiletypes()
table = PrettyTable(["filetype", "count"])
table.align["filetype"] = "l"
table.align["count"] = "c"
table.padding_width = 1
try:
for values in res['result']:
if values.get("_id"):
outputPacker = values.get("_id")
outputCount = str(values.get("count"))
table.add_row([outputPacker, outputCount])
print(table)
except KeyError:
raise Exception("Dict has no key 'result' ")
print ""
def __runStatisticsPeCharacteristics(self):
#Haeufigkeit der PE Charakteristiken
print "PE-Characteristics of malware"
peC = self.__database.getStatisticsPeCharacteristics()
table = PrettyTable(["pe-characteristics", "count"])
table.align["pe-characteristics"] = "l"
table.align["count"] = "c"
table.padding_width = 1
table.add_row(["EXE", peC.get("exe")])
table.add_row(["DLL", peC.get("dll")])
table.add_row(["Driver", peC.get("driver")])
table.add_row(["DLL/Driver", peC.get("dllDriver")])
table.add_row(["No PE File", peC.get("noPe")])
print (table)
print ""
def __runStatisticsPackerCompiler(self):
#Liste und Haeufigkeit der verwendeten Packer/Compiler in der Malware
print "Packer/compiler used in malware"
res = self.__database.getStatisticsPackerCompiler()
table = PrettyTable(["packer/compiler", "count"])
table.align["packer/compiler"] = "l"
table.align["count"] = "c"
table.padding_width = 1
try:
for values in res['result']:
if values.get("_id"):
outputPacker = values.get("_id")[0]
outputCount = str(values.get("count"))
table.add_row([outputPacker, outputCount])
print(table)
except KeyError:
raise Exception("Dict has no key 'result' ")
print " "
def __runStatisticsPackerSignatures(self):
#Liste der verwendeten digitalen Signaturen
print "Signatures used by malware"
res = self.__database.getStatisticsPackerSignatures()
table = PrettyTable(["publisher", "issuer", "count"])
table.align["publisher"] = "l"
table.align["issuer"] = "l"
table.align["count"] = "c"
table.padding_width = 1
try:
for values in res['result']:
if values.get("_id"):
outputPublisher = values.get("_id").get("PublisherO")
if values.get("_id").get("Issuer"):
outputIssuer = values.get("_id").get("Issuer")
else:
outputIssuer = " "
outputCount = str(values.get("count"))
table.add_row([outputPublisher, outputIssuer, outputCount])
print(table)
except KeyError:
raise Exception("Dict has no key 'result' ")
print ""
def __runStatisticsLast20SamplesNotFoundByAV(self):
#Liste der letzen 20 Samples, die weder auf VT noch von einem lokalen AV gefunden wurden
print "Last 20 samples not found by VirusTotal and local AV-Products"
res = sorted(self.__database.getSamplesNotFoundByAV(), reverse=True)
table = PrettyTable(["timestamp of crawling", "sha256"])
table.align["timestamp of crawling"] = "c"
table.align["sha256"] = "c"
table.padding_width = 1
try:
for values in res:
sha256 = values.get("Info").get("file").get("sha256")
timestamp = values.get("Info").get("analyse").get("started")
table.add_row([timestamp, sha256])
print(table.get_string(start=0, end=20))
except KeyError:
raise Exception("Dict has no key 'Info' ")
print ""
def __runStatisticsLast20SamplesNotFoundByVT(self):
#Liste der letzen 20 Samples, die nicht auf VirusTotal gefunden wurden
print "Last 20 samples not found by VirusTotal"
res = sorted(self.__database.getSamplesNotFoundByVT(), reverse=True)
table = PrettyTable(["timestamp of crawling", "sha256"])
table.align["timestamp of crawling"] = "c"
table.align["sha256"] = "c"
table.padding_width = 1
try:
for values in res:
sha256 = values.get("Info").get("file").get("sha256")
timestamp = values.get("Info").get("analyse").get("started")
table.add_row([timestamp, sha256])
print(table.get_string(start=0, end=20))
except KeyError:
raise Exception("Dict has no key 'Info' ")
print ""
def __runStatisticsLast20SamplesNotFoundByLocalAV(self):
#Liste der letzen 20 Samples, die nicht von einem lokalen AV-Produkt gefunden wurden
print "Last 20 samples not found by local AV-Products"
res = sorted(self.__database.getSamplesNotFoundByLocalAV(), reverse=True)
table = PrettyTable(["timestamp of crawling", "sha256"])
table.align["timestamp of crawling"] = "c"
table.align["sha256"] = "c"
table.padding_width = 1
try:
for values in res:
sha256 = values.get("Info").get("file").get("sha256")
timestamp = values.get("Info").get("analyse").get("started")
table.add_row([timestamp, sha256])
print(table.get_string(start=0, end=20))
except KeyError:
raise Exception("Dict has no key 'Info' ")
print ""
def __runStatisticsAVProducts(self):
#Statistiken der eingesetzten AV-Produkte
#VirusTotal und lokale AV-Produkte
print "VirusTotal and local AV-Products"
print " Samples rated as none-malware by all AV-Products at time of crawling:", \
self.__database.getStatisticsNoneMalwareByAV()
print ""
#VirusTotal
ret = self.__database.getStatisticsVirusTotal()
print "VirusTotal"
print " Samples analyzed at time of crawling:", ret.get("analyzed")
print " Samples not analyzed at time of crawling:", ret.get("notAnalyzed")
print " Samples found at time of crawling:", ret.get("samplesFound")
print " Samples not found at time of crawling:", ret.get("SamplesNotFound")
print ""
#Lokale AV-Produkte
print "Local AV-Products"
print " analyzed => Samples analyzed at time of crawling"
print " not analyzed => Samples not analyzed at time of crawling"
print " malware => Samples rated as malware at time of crawling"
print " none-malware => Samples rated as none-malware at time of crawling"
table = PrettyTable(["product", "analyzed", "not analyzed", "malware", "none-malware", "detection rate"])
table.align["product"] = "l"
table.align["analyzed"] = "r"
table.align["not analyzed"] = "r"
table.align["malware"] = "r"
table.align["none-malware"] = "r"
table.align["detection rate"] = "r"
table.padding_width = 1
# Statistik Daten holen
ret = self.__database.getStatisticsAntivirus()
# Table-Body zusammenbauen
for av in ret:
table.add_row([av.get("product"), av.get("analyzed"), av.get("notanalyzed"), av.get("malware"), av.get("nonemalware"), av.get("rate")])
print(table)
print ""
|
UTF-8
|
Python
| false | false | 14,322 |
py
| 104 |
statistics.py
| 59 | 0.545804 | 0.537914 | 0 | 367 | 38.027248 | 147 |
reinaaa05/python
| 10,900,627,035,396 |
8e79bfee21ca34b1ec5d9840834df395cf77e9e0
|
4a230737626c0cadfc5326315d036bf8453ef953
|
/dotinstall/python_lessons/myapp_26.py
|
2eb557a1f7dcfbc9ad1d0ab1deddfa3f575732e6
|
[] |
no_license
|
https://github.com/reinaaa05/python
|
0037a40b588b6954ea5d5b0ff45df98c1522f865
|
2d9e2b7388c4a19a0389aa6cb532774271bd27b0
|
refs/heads/master
| 2023-04-21T23:39:20.450914 | 2021-05-12T07:27:47 | 2021-05-12T07:27:47 | 340,906,770 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#スライス
# scores = [40, 50,70,90,60]
# print(scores[1:4]) #50,70,90
# print(scores[:2]) #40, 50
# print(scores[3:]) #90,60
# print(scores[-3:]) #70, 90, 60
s = "hello"
print(s[1:4])
|
UTF-8
|
Python
| false | false | 189 |
py
| 262 |
myapp_26.py
| 174 | 0.569061 | 0.364641 | 0 | 10 | 17.2 | 32 |
retro486/simple-django-room-scheduler
| 8,306,466,781,491 |
6c770c6f4a8cb4575b166e21b08f0764e5583b8a
|
ee646a37ee66ded40dca860225ac9dda9a57bedb
|
/settings.py
|
22a6893ddb52487278ca6a77e111ad53da4fc205
|
[] |
no_license
|
https://github.com/retro486/simple-django-room-scheduler
|
d4f96c0502cf0135438251f3e75947594be66a07
|
6e0dae6214bc33d0a817d4b778db341fe18fcd85
|
refs/heads/master
| 2021-01-19T08:11:24.729357 | 2012-02-11T23:18:30 | 2012-02-11T23:18:30 | 3,418,704 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
curr_path = '/'.join(os.path.abspath(__file__).split('/')[0:-1])
#################################
# Custom settings for rooms app
# Not custom, but really needs to be changed for each install.
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'type some random gibberish here'
# The number of hours to look ahead on the front page; make sure this is some
# factor of 0.5 or else the table footer won't render correctly.
RES_LOOK_AHEAD_HOURS = 6.5
RES_LOOK_AHEAD_INC = 15 # increment to use on the template in minutes
# The default template to use for the front page
RES_DEFAULT_TEMPLATE = 'rooms/default.html'
# Note this project makes use of the vufind-unicorn driver.pl file, though
# we use a slightly modified version, the out of box version should work
# well. If you use Unicorn on a Windows server, you will need a modified
# copy here: http://pastebin.com/Rv4TmHpV
# The temporary URL to the modified VuFind driver.pl file on the biblio server:
RES_BARCODE_URL = 'http://localhost/cgi-bin/driver.pl?query=checkBarcode&patronId='
RES_USERNAME_URL = 'http://localhost/cgi-bin/driver.pl?query=checkUsername&username='
# Prevent users from requesting reservations that occur in the past?
# note this isn't an issue with the default kiosk-style template where users
# can only say how long they need the room for (on-demand)
RES_ENFORCE_NO_PAST = True
# More user quotas: enable maximum # of reservations (independent of max # of hours per day) and how
# many reservations per day (TOTAL, regardless of room selection) do users get?
RES_ENFORCE_MAX_NUM = True
RES_MAX_NUM = 3 # users get up to 3 reservations per day, regardless of room selection
# Enfore allowing users to only be able to have a single reservation for any room
# at any given time.
RES_ENFORCE_OAAT = True
# Enforce a minimum time on meetings and if so what amount of time is minimal
# Helps prevent blocking off many small amounts of time. Can be used independent
# of ENFORCE_MAX_NUM. Note this may or may not be helpful based on how your
# reserve interface works.
RES_ENFORCE_MIN_LENGTH = True
RES_MIN_LENGTH = 30 # minutes
# END custom settings for rooms app
#######################################
# Django settings for studyrooms project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = '/var/www/html/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
MEDIA_URL = 'http://localhost/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
(curr_path + '/templates'),
)
ADMINS = (
('Site Admin Name', 'site_admin_email@abc.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'studyrooms.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin', # for manual mgmt of data
'south',
'rooms',
'roomkeys',
)
|
UTF-8
|
Python
| false | false | 5,609 |
py
| 16 |
settings.py
| 10 | 0.704225 | 0.698699 | 0 | 149 | 36.644295 | 122 |
ds-ga-1007/assignment10
| 6,038,724,046,939 |
e9758de72a2c1ef8c81c49c8a57ed7bfdffd2f00
|
8d62eb52d1d3e44cecedbe2bac7e671ae588d255
|
/kk3175/assignment10.py
|
29a5f299b61ff34c904101f529d09672e8ed471e
|
[] |
no_license
|
https://github.com/ds-ga-1007/assignment10
|
d5a9e7aca2913e1c0c4ca18621a0381a4ed28ebb
|
14a97f913567c32f0134aa84a2018777780d23d1
|
refs/heads/master
| 2016-08-11T09:47:52.902316 | 2015-12-23T16:22:18 | 2015-12-23T16:22:18 | 46,821,256 | 0 | 54 | null | false | 2015-12-23T16:22:18 | 2015-11-24T21:56:14 | 2015-12-09T16:33:48 | 2015-12-23T16:22:18 | 3,152 | 0 | 42 | 0 |
Jupyter Notebook
| null | null |
'''
Main program for running the outputs required in Assignment 10.
Assignment 10 requires outputs for questions 4 and 5.
Author: kk3175
Date: 12/8/2015
Class: DSGA1007, Assignment 10
'''
import pandas as pd
from RestaurantInspectionData import RestaurantInspectionData
from RestaurantGraphs import plotRestaurantGrades
from AssignmentQuestions import questionFour, questionFive
'''
Main function for generating the outputs required in questions 4 and 5.
'''
def main():
try:
restaurantData = RestaurantInspectionData()
questionFour(restaurantData)
questionFive(restaurantData)
except EOFError:
print 'Sorry, the restaurant data file is empty.'
except KeyboardInterrupt:
print 'Goodbye!'
# Input error if there is an issue with loading the restaurant data csv file.
# Output error if there is an issue saving a graph to file.
except IOError:
print 'Sorry, there was an input output error.'
main()
|
UTF-8
|
Python
| false | false | 927 |
py
| 218 |
assignment10.py
| 165 | 0.779935 | 0.752967 | 0 | 35 | 25.4 | 78 |
liuguanyu/gthief
| 18,751,827,230,022 |
3d92b05dd363fd65284f479e5149b711501a2c7c
|
6da3533a1adce36465b21b40dc395f45bf2dd656
|
/Util/__init__.py
|
0faf6d178295d5d90ae571baac90862b7918c545
|
[] |
no_license
|
https://github.com/liuguanyu/gthief
|
4869ff9a44beb6cf642dd7c5317be77f1e313016
|
27b4bb9b18631cc48e99819dc1d0881612a6c9a0
|
refs/heads/master
| 2021-01-11T06:19:31.179258 | 2014-09-05T10:28:21 | 2014-09-05T10:28:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from Util.picbedclient import *
from Util.jsonpatch import *
|
UTF-8
|
Python
| false | false | 60 |
py
| 14 |
__init__.py
| 13 | 0.816667 | 0.816667 | 0 | 2 | 29.5 | 31 |
amukherjee01/Oop-Python
| 16,003,048,148,566 |
0d9163777fae9706c291cf3839e4259052dee8c5
|
794fb64a31b06388a7cf1abcaf94a6ca5ddfbe09
|
/oop.py.py
|
504f7e5215f60ce4d1cc850250a3e989e799e03b
|
[] |
no_license
|
https://github.com/amukherjee01/Oop-Python
|
9cb40a6457f1ac96662eab58ee0385221e751fbf
|
38e2d50efb51c4526401d48c99836d0906e7c198
|
refs/heads/main
| 2022-12-21T07:52:04.310839 | 2020-10-02T06:47:12 | 2020-10-02T06:47:12 | 300,537,313 | 0 | 0 | null | true | 2020-10-02T07:34:01 | 2020-10-02T07:34:00 | 2020-10-02T06:47:15 | 2020-10-02T06:47:13 | 0 | 0 | 0 | 0 | null | false | false |
'''l1 = [89,"python",98,67,"hello"]
num = []
chaa = []
for i in l1:
if type(i) == int:
num.append(i)
else:
chaa.append(i)
print(num)
print(chaa)'''
'''
num = input("Enter number scored by aditya : ")
if num.isdigit():
num = int(num)
if num < 50 and num >=30:
print("Aditya scored %d %% which is third division"%(num))
elif num < 60 and num > 50:
print("Aditya scored %d %% which is second divison"%(num))
elif num >= 60:
print("Aditya scored %d %% which is first division"%(num))
else:
print("Aditya is failed")
else:
print("Invalid input")'''
'''
num = int(input("Enter number :"))
b = []
for i in range(1,num+1):
b.append(i)
print(b) '''
'''
def addnums(num1,num2):
num3 = num1 + num2
print(num3)
addnums(3,5)'''
# def category():
# '''Chose a category
# 1.Arthmatic
# 2.logical'''
# cat = int(input("Enter category :"))
# if cat == 1:
# print(arth.__doc__)
# arth()
# elif cat == 2:
# logical()
# else:
# print("Invalid input..")
# def arth():
# '''chose operation
# 1.Add
# 2.Sub
# 3.Div '''
# ope = int(input("Enter operation : "))
# num1 = int(input("Enter number :"))
# num2 = int(input("Enter second number :"))
# if ope == 1:
# num3 = addnums(num1,num2)
# print(num3)
# elif ope == 2:
# num3 = addsubr(num1,num2)
# print(num3)
# elif ope == 3:
# num3 = adddiv(num1,num2)
# print(num3)
# else:
# print("Invalid input : ")
# def addnums(num1,num2):
# res = num1 + num2
# return res
# def addsubr(num1,num2):
# res = num1 - num2
# return res
# def adddiv(num1,num2):
# res = num1/num2
# return res
# print(category.__doc__)
# category()
# print("c:\\programs\\files\\new\\code\\test")
# message = "hello world"
# print(message)
# greetings = "welcome"
# new_msg = '{},{} hii how are !!!'.format(message,greetings)
# new_1 = f'{message},{greetings.upper()}'
# print(new_msg)
# print(new_1)
# new_2 = "old"
# new_message = message.replace('world',new_2)
# print(new_message)
# print(dir(message))
# print(help(str))
strings = 'aditya'
l = []
for i in range(-1,-len(strings)-1,-1):
print(strings[i],end="")
|
UTF-8
|
Python
| false | false | 2,312 |
py
| 1 |
oop.py.py
| 1 | 0.536765 | 0.506055 | 0 | 112 | 19.651786 | 66 |
pranav6670/DS-and-Algo-implemetations
| 7,645,041,810,879 |
2494f6276166abfd58c32a5690aa0b3b15ed6835
|
a39e1a5e5b309faf8e3cfac0de16ac53dea2c572
|
/Searching/binary_recursive.py
|
47f6ba37656474b5c9bab8eae34a0d1ed9d43037
|
[] |
no_license
|
https://github.com/pranav6670/DS-and-Algo-implemetations
|
42116396e2b9899637338f675f71b9963f3c5160
|
13d86201c286158c395313c77ffae2463ead7166
|
refs/heads/master
| 2022-12-21T14:50:17.402630 | 2020-09-22T18:56:56 | 2020-09-22T18:56:56 | 297,738,233 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
z = [1, 2, 3, 4, 5]
def binary(a, low, high, x):
if low > high:
return -1
mid = (low + high) // 2
if a[mid] == x:
return mid
elif a[mid] > x:
binary(a, low, mid - 1, x)
else:
binary(a, mid + 1, high, x)
print(binary(z, 0, len(z) - 1, 3))
|
UTF-8
|
Python
| false | false | 294 |
py
| 23 |
binary_recursive.py
| 23 | 0.442177 | 0.401361 | 0 | 16 | 17.375 | 35 |
sanneabhilash/python_learning
| 17,540,646,470,113 |
febb5055180458c2a75ac107d0db1f0cfa6c6924
|
23105de40d00538d17888204736222178c3bfd88
|
/unit_testing_examples/unitTestExample.py
|
317e3def684d1fb80a0f1c16b968a9892a1fcddc
|
[] |
no_license
|
https://github.com/sanneabhilash/python_learning
|
013a5919758ab660e1c048f926e39feb912c3ba2
|
aecab7dd174829f9d324a91cb1d42cda60779982
|
refs/heads/master
| 2020-06-20T05:55:52.815505 | 2019-07-29T05:53:59 | 2019-07-29T05:53:59 | 197,016,520 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
def add(x, y):
return x + y
def div(x, y):
return x / y
def sub(x, y):
return x - y
def fact(n):
if n == 0:
return 1
return n * fact(n - 1)
# MyTest inherits TestCase class from unittest
class MyTest(unittest.TestCase):
def setUp(self):
print("IN SET UP")
def tearDown(self) -> None:
print("IN TEAR DOWN")
def test_add(self):
self.assertEqual(add(3, 4), 7)
def test_sub(self):
self.assertEqual(sub(10, 5), 5)
def test_factorial(self):
res = fact(5)
self.assertEqual(res, 120)
def test_zerodivisionerror(self):
with self.assertRaises(ZeroDivisionError): 6 / 0
def test_zerodivisionerrorB(self):
self.assertRaises(ZeroDivisionError, div, 8, 0)
def test_split(self):
s = 'hello$$sorld'
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(4)
# Executing the script as standalone, the __name__ will equal to __main__
# unittest.main() will execute all tests methods that you wrote
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 1,174 |
py
| 88 |
unitTestExample.py
| 84 | 0.602215 | 0.586031 | 0 | 59 | 18.898305 | 73 |
derymukti/pentools2
| 5,772,436,066,760 |
7780ecaa62e8c25bfaa6065f7319cdf08bbef002
|
87df2eea3e2983af38484e09cdf6e074c064f77c
|
/system/middleware.py
|
54a8b8fbb7671bf9e3789e46752806ad5576e4bc
|
[] |
no_license
|
https://github.com/derymukti/pentools2
|
321118da54eea912e68604149759d2a2b07ed482
|
a7d68c6c2ab05a504fda903b4acd78ed02b5ae35
|
refs/heads/master
| 2020-03-26T16:21:39.479376 | 2018-08-17T15:41:11 | 2018-08-17T15:41:11 | 145,097,022 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from functions_lib import *
from flask import request
from functools import wraps
from jose import jwt
from system import secret
def login_require(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
token = request.headers['authorization']
res = jwt.decode(token,secret,algorithms=['HS256'])
if res['status'] != "admin":
return res['status']
return f(*args, **kwargs)
except:
return response(code=403,message="Forbidden")
return decorated_function
|
UTF-8
|
Python
| false | false | 488 |
py
| 12 |
middleware.py
| 9 | 0.709016 | 0.696721 | 0 | 19 | 24.631579 | 54 |
YanZiQinKevin/openCV
| 9,869,834,854,289 |
a207942194cbc86671ebb90e5713a477423d3ac0
|
63cff89f6ada157be7be7888755992f5d581dec9
|
/opencv_1.py
|
5676b53a37755e588951965121470251a1883b68
|
[] |
no_license
|
https://github.com/YanZiQinKevin/openCV
|
9ad210a68a9c2b2c9e624ac4f9f47721b83770a7
|
b38a61bca50102371eb76856b9103e326bea4c02
|
refs/heads/master
| 2021-05-16T13:49:18.599506 | 2018-03-18T21:45:39 | 2018-03-18T21:45:39 | 117,765,416 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding: utf-8
import cv2
def main():
#reading and displaying the image
imgpath="/Users/yanziqin/openCV/image/4.1.01.tiff"
#imread: 1为彩色,0为黑白
img = cv2.imread(imgpath,0)
# 是否模糊
imageVar = cv2.Laplacian(img,cv2.CV_64F).var()
print (imageVar)
imgpath1="/Users/yanziqin/openCV/car.jpg"
#imread: 1为彩色,0为黑白
img1 = cv2.imread(imgpath1,0)
# 是否模糊
imageVar1 = cv2.Laplacian(img1,cv2.CV_16S,ksize=3)
imageVar1 = cv2.convertScaleAbs(imageVar1)
cv2.imshow('lap',imageVar1)
print (imageVar1)
#Image = set of numbers
print(img)
#[ 42 46 52] = B G R
#N dimensional arrays = composite data type
print(img.dtype)
print(img.shape)
print(img.size)
print(img.ndim)
cv2.namedWindow('lne',cv2.WINDOW_AUTOSIZE)
cv2.imshow('lne',img1)
#copy image
#outpath="/Users/yanziqin/openCV/output/4.1.01.tiff"
#cv2.imwrite(outpath,img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ =="__main__":
main()
|
UTF-8
|
Python
| false | false | 963 |
py
| 11 |
opencv_1.py
| 8 | 0.700762 | 0.644178 | 0 | 48 | 18.166667 | 53 |
luca16s/INF1025
| 14,250,701,537,543 |
1f3fb2f9c449ca06c422ce734b7293ae6202b25b
|
ebef4a843005b257a0403e20977cab573d84d090
|
/Exercicios/Lista1/011.py
|
99d1d5ca0e53a7837409f20ae3f517d450d95bad
|
[] |
no_license
|
https://github.com/luca16s/INF1025
|
c0ce2271c33939ca74bb42ef7e132fe7b39632c9
|
adece192edd50ae35c1cb3171c41bfb86da12f4d
|
refs/heads/main
| 2023-01-22T17:35:54.827658 | 2020-12-08T00:56:28 | 2020-12-08T00:56:28 | 291,729,506 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#11) Um pequeno, porém horrível, alienígena está no topo da Torre Eiffel (que tem 324 metros de altura) e
#ameaça destruir a cidade de Paris! Um agente da MIB está no nível do chão, a 54 metros da Torre Eiffel,
#mirando sua arma a laser no alienígena. Em que ângulo, em graus, o agente deve disparar o laser?
import math
altura = 324
comprimento = 54
angulo = math.atan2(altura, comprimento)
print(math.degrees(angulo))
|
UTF-8
|
Python
| false | false | 432 |
py
| 111 |
011.py
| 101 | 0.763033 | 0.732227 | 0 | 11 | 37.454545 | 105 |
EliotGeller/Matplotlib
| 3,435,973,887,040 |
c34e1e21bd206e22f14e96cd04a538601c055e2d
|
fb6fe919f0a7294946a5ac543a02ddc77c5369c0
|
/StackPlots.py
|
aeaab846d56e9dba88ada76356a757bfce1e1bce
|
[] |
no_license
|
https://github.com/EliotGeller/Matplotlib
|
f396773259d336e61db36c441501a38a924ed129
|
774ff1c26c66b11adf250dd5ef7244490f6e34ac
|
refs/heads/master
| 2020-03-16T23:02:06.684481 | 2018-05-21T01:26:39 | 2018-05-21T01:26:39 | 133,062,068 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from random import randint
group1 = []
Index = []
group1err = []
ident = ["Value 1","Value 2","Value 3","Value 4","Value 5"]
group2 = []
group2err = []
for i in range(50):
group1.append(float(randint(30,40)))
group2.append(float(randint(30,40)))
group1err.append(float(randint(1,5)))
group2err.append(float(randint(1,5)))
Index.append(i)
plt.stackplot(Index, group1, group2, group1err, group2err,
labels = ["Data 1", "Data 2", "Data 1 err", "Data 2 err"],
colors = ["red", "blue", "green", "pink"],
baseline = 'weighted_wiggle')
plt.legend(loc = 'lower right')
plt.show()
|
UTF-8
|
Python
| false | false | 718 |
py
| 17 |
StackPlots.py
| 16 | 0.608635 | 0.557103 | 0 | 27 | 25.62963 | 72 |
fczj/python-S14
| 2,087,354,119,846 |
0bb86372c636359ded74a53afaccb33e6950419f
|
e5b4d9e3240b3302db47d770260dd1bcff93d5ef
|
/RabbitMQ/rabbitmq/rpc-cmd-server.py
|
0d7146c49ba5a1ba76bdf704737b6cf4d63828f4
|
[
"MIT"
] |
permissive
|
https://github.com/fczj/python-S14
|
a28f255b13c15f17d632cf920bca53ed5035752a
|
53a1350dae7c797134463c8c42edae44bb5427cb
|
refs/heads/master
| 2021-01-02T09:23:48.195642 | 2017-10-30T12:35:57 | 2017-10-30T12:35:57 | 99,205,729 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-30 下午11:53
# @Author : xiongzhibiao
# @Email : 158349411@qqcom
# @File : rpc-cmd-server.py
# @Software: PyCharm
import pika
import os
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='run_cmd',
exchange_type='direct')
def run_cmd(cmd):
result = os.popen(cmd)
return result.read()
def on_request(ch, method, props, body):
response = run_cmd(body.decode())
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print (" [x] Awaiting RPC requests")
channel.start_consuming()
|
UTF-8
|
Python
| false | false | 1,055 |
py
| 114 |
rpc-cmd-server.py
| 80 | 0.613701 | 0.594672 | 0 | 35 | 29.057143 | 75 |
vinayjaibheem/CSEE5590_python_deep_learning
| 13,829,794,728,192 |
7821c35b9f9abf9ebc1f1283e3440c98c3172387
|
1c1be1a9fcfb65b319d63d93ebdff5561fb2518d
|
/ICP 01/Source/ICP1/reverse.py
|
7b8148e5deed002730511e80a873c07886c26cab
|
[] |
no_license
|
https://github.com/vinayjaibheem/CSEE5590_python_deep_learning
|
8b4497c34568b17091f5314f39d8be4d9f6ef910
|
1e3333aeecc36f18c6fcc98139f2b6106d885d20
|
refs/heads/master
| 2021-07-18T04:05:15.113457 | 2018-12-07T23:33:42 | 2018-12-07T23:33:42 | 146,037,638 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
first_name = input("\n Enter first name \n")
last_name = input("\n Enter last name \n")
concated_string = first_name + last_name
print("The concatenated string is: ")
print(concated_string)
print("The reversed string is: ")
reverse_string = concated_string[::-1]
print(reverse_string)
|
UTF-8
|
Python
| false | false | 288 |
py
| 54 |
reverse.py
| 36 | 0.715278 | 0.711806 | 0 | 10 | 27.9 | 45 |
KDF5000/LeetCode
| 5,471,788,371,789 |
dc6e10c1c8b12d42cf7b3776c46d63be77c28567
|
dac093e586dba971ef58e3db530424fd5d460825
|
/RemoveDumplicates.py
|
1a940af15be8ec94a6f76fd9404f717801ae5ee2
|
[] |
no_license
|
https://github.com/KDF5000/LeetCode
|
e57f3dc05f02b2865458db544eb8098c75c7e852
|
a62167ae88f93949133c3e61372ce08141b18023
|
refs/heads/master
| 2021-01-10T16:41:05.631022 | 2015-11-20T15:36:16 | 2015-11-20T15:36:16 | 44,290,829 | 1 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
__author__ = 'devin'
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
p = head
current_val = head.val
while p is not None:
# 重复就删除
if p.next is not None and p.next.val == current_val:
p.next = p.next.next
else: # 没有删除节点 向后移
if p.next is not None:
current_val = p.next.val
p = p.next
return head
if __name__ == '__main__':
head = None
p = None
for i in [1,1,2,2]:
if head is None:
head = ListNode(i)
p = head
else:
p.next = ListNode(i)
p = p.next
p = head
nodes = []
while p is not None:
nodes.append(p.val)
p = p.next
print nodes
s = Solution()
head = s.deleteDuplicates(head)
p = head
nodes = []
while p is not None:
nodes.append(p.val)
p = p.next
print nodes
|
UTF-8
|
Python
| false | false | 1,271 |
py
| 21 |
RemoveDumplicates.py
| 21 | 0.472245 | 0.468222 | 0 | 55 | 21.545455 | 64 |
SowmicaML/Compiler-directives-Program
| 8,924,942,056,499 |
07b8db134182ee3eee59c7a39135270a1e56c23e
|
fdd8dc00efec38663542ad394c8f54ffae06ebff
|
/lexical.py
|
8c59336eaba5aaac14ee7ee0e1fc8df41ab70747
|
[] |
no_license
|
https://github.com/SowmicaML/Compiler-directives-Program
|
02d544aa90e5189ca90020a7b2c9481dd433a449
|
8dc54a201b6139deb5e1262eaa30e5097426fa03
|
refs/heads/master
| 2020-11-26T14:59:34.438885 | 2019-12-19T18:51:31 | 2019-12-19T18:51:31 | 229,113,514 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" This is a program to perform lexical analysis for a c program in python
C program to be stored in a file InputProg.c """
#include <stdio.h>
#int main()
#{
# int a , b , c;
# b= 5 ;
# c= 6 ;
# a=b+c;
# printf("%d",a);
# return 0;
#}
#opening file
f = open('InputProg.c','r')
#defining the tokens
operator = ['=','+','-','/','*','++','--']
optname=['Assignment Operator','Additon Operator','Substraction Operator','Division Operator','Multiplication Operator','increment Operator', 'Decrement Operator']
header = {'.h': 'header file'}
header_keys = header.keys()
headerfile = {'<stdio.h>':'Standard Input Output Header','<string.h>':'String Manipulation Library'}
macros = {r'#\w+' : 'macro'}
macros_keys = macros.keys()
datatype = ['int','float','double','char','long','short']
size=['2 or 4',4,8,1,8,2]
keyword=['return','for','while','do','if','else','elseif','switch','case','break','continue']
builtin_functions = {'printf':'output operation'}
operators = ['_','`','~','!','@','#','$','%','^','&','*','(',')','|',':',';','{','}','[',']','?',',']
# Flag
dataFlag = False
#function to determine built in functions
def builtinfunc(t):
for fun in builtin_functions:
if fun in token:
print(fun,":Built in function-",builtin_functions[fun])
#function to determine special operators
def spclop(t):
for el in operators:
if el in token:
print(el,"special operator")
#assignment operators
def op(t):
for asi in operator:
if asi in token:
print(asi,optname[operator.index(asi)])
i = f.read()#reading from the file
count = 0
program = i.split('\n')
for line in program:
count = count+1
print( "Line #",count,"\n",line,"\n")
tokens = line.split(' ')
tokens=[el for el in tokens if el!='']
print ("Tokens are",tokens)
print("\n")
for token in tokens:
if token in macros_keys:
print( "Macro is: ", macros[token])
if '.h' in token:
print( "Header File is: ",token, headerfile[token])
if '()' in token:
print ("Function named", token)
if dataFlag == True and ('()' not in token) and (token not in operator) and (token not in operators) and (token not in builtin_functions ) :
print( token," :Identifier")
if token in datatype:
print (token," :Data Type"," Size=:" ,size[datatype.index(token)],"bytes")
dataFlag = True
builtinfunc(token)
spclop(token)
op(token)
if token in keyword:
print(token,":keyword")
if token.isnumeric():
print (token,":constant")
dataFlag = False
print ("________________________")
f.close()
|
UTF-8
|
Python
| false | false | 2,810 |
py
| 2 |
lexical.py
| 1 | 0.554804 | 0.550534 | 0 | 103 | 26.281553 | 163 |
olszewskip/MDFS_playground
| 1,898,375,554,280 |
7a0c91434514d70ffdddbfe0681e12be6063baae
|
8245e898fbd7bb655706de0a0c35078934ceee12
|
/python/scheduler/tiles_generator.py
|
9315324704e1d724b8d1f2d2903e9651a99f65c2
|
[] |
no_license
|
https://github.com/olszewskip/MDFS_playground
|
86e889bb08be0139ace42916c794eacfc657b74d
|
1cf7df6d8c60c2a4da7a9286a469bdb8855abb46
|
refs/heads/master
| 2020-04-08T21:01:22.839688 | 2019-05-15T14:29:06 | 2019-05-15T14:29:06 | 159,725,454 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from itertools import chain, starmap, cycle, compress
class TilesGenerator():
def __init__(self, k, M, size, rank):
self.k = k # dimension of the contingency matrix
self.M = M # number of tiles across one dimension
self.size = size # apropo mpi
self.rank = rank # apropo mpi
self._final_gen = self._checkered_tiles_gen()
def __iter__(self):
return self
def __next__(self):
try:
return next(self._final_gen)
except StopIteration:
raise
def _tiles_gen(self, sd=1):
def embedd(*indeces):
for i in range(indeces[-1] + sd, self.M):
yield (*indeces, i)
points = ((idx,) for idx in range(self.M))
for _ in range(self.k - 1):
points = chain.from_iterable(starmap(embedd, points))
return points
def _rank_selector(self):
rank = self.rank
size = self.size
return cycle( (idx == rank for idx in range(size)) )
def _checkered_tiles_gen(self):
return compress(self._tiles_gen(), self._rank_selector())
|
UTF-8
|
Python
| false | false | 1,143 |
py
| 119 |
tiles_generator.py
| 67 | 0.549431 | 0.546807 | 0 | 39 | 28.307692 | 65 |
l5d1l5/python-urbanPlanning
| 6,743,098,679,451 |
e0527f61a55f8e7f80eedab9f94cb23fefebb568
|
b7b122b7b9c35ec2f191021c3396f4790f023ed3
|
/23_生活圈_02_通过计算曲线拐点找到特征层级/data_generator.py
|
2141b71e4f8da8e7b482e8c620c9f2b67defe951
|
[
"MIT"
] |
permissive
|
https://github.com/l5d1l5/python-urbanPlanning
|
e7fe413441aea7df94765062fe14d331a6e0006a
|
f22e5b561a55ac2263aa96e8e9aff155b0aae1fd
|
refs/heads/master
| 2020-06-22T09:54:54.895918 | 2019-06-18T02:22:14 | 2019-06-18T02:22:14 | 197,694,260 | 0 | 2 |
MIT
| true | 2019-07-19T03:19:41 | 2019-07-19T03:19:40 | 2019-07-17T02:51:51 | 2019-06-18T02:22:15 | 513,243 | 0 | 0 | 0 | null | false | false |
import numpy as np
class DataGenerator(object):
"""Generate data to work with kneedle."""
def __init(self,):
pass
@staticmethod
def noisy_gaussian(mu=50, sigma=10, N=100):
"""Recreate NoisyGaussian from the orignial kneedle paper.
:param mu: The mean value to build a normal distribution around
:type mu: int
:param sigma: The standard deviation of the distribution.
:type sigma: int
:param N: The number of samples to draw from to build the normal distribution.
:type N: int
:returns: tuple(x, y)
:rtypes: (array, array)
"""
z = np.random.normal(loc=mu, scale=sigma, size=N)
x = np.sort(z)
y = np.array(range(N)) / float(N)
return x, y
@staticmethod
def figure2():
"""Recreate the values in figure 2 from the original kneedle paper.
:returns: tuple(x, y)
:rtypes: (array, array)
"""
with np.errstate(divide='ignore'):
x = np.linspace(0.0, 1, 10)
return x, np.true_divide(-1, x + 0.1) + 5
@staticmethod
def decreasing():
"""Test function for decreasing data.
:returns: tuple(x, y)
:rtypes: (array, array)
"""
x = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]
y = [2314, 802, 519, 417, 358, 318, 302, 284, 280]
return x, y
|
UTF-8
|
Python
| false | false | 1,409 |
py
| 211 |
data_generator.py
| 64 | 0.562811 | 0.504613 | 0 | 45 | 30.311111 | 86 |
marek5050/Sparkie
| 13,134,010,006,264 |
fdc9e8975480a3e23edd3961e5b7ae4d7d7b767b
|
06719484b00557f729ea7f994525b7a614073611
|
/Streaming/kmeans_reducer.py
|
8c59a0773e0d436ec292eb4b126dda8753ce700f
|
[] |
no_license
|
https://github.com/marek5050/Sparkie
|
c5945ee8d8b82d1b8787fa7ea12245f5f155dc16
|
f764cea0707afe4f2aa67ce5056a6204400eca4c
|
refs/heads/master
| 2016-09-11T02:56:19.882248 | 2015-06-25T18:30:10 | 2015-06-25T18:30:10 | 31,774,324 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import sys
import random
import math
base=0
count=0
priorCluster=-1
currentCluster=-1
def mean(num):
return str(num/count)
for line in sys.stdin:
cord = line.split("\t")
currentCluster=cord[0]
_points=cord[1]
_str_point=_points.split(" ")
point= [int(numeric_string) for numeric_string in _str_point]
count+=1
if currentCluster!=priorCluster :
print " ".join(map(mean,point))
priorCluster=currentCluster
base=0
count=0
if base==0 :
base=point
else:
idx=0
for coord in point:
base[idx]+=coord
idx+=0
|
UTF-8
|
Python
| false | false | 673 |
py
| 35 |
kmeans_reducer.py
| 24 | 0.573551 | 0.555721 | 0 | 40 | 15.675 | 66 |
Owncreed93/intro-django
| 19,146,964,246,180 |
9d5c9244059c916e4601f112a65b6e90faacfc00
|
dae99c679399b4bf9faa6a8c7fca89ed17989642
|
/educational_site/courses/models.py
|
cc8412f24da8e39aed2a49d10d506c72ac7268ac
|
[] |
no_license
|
https://github.com/Owncreed93/intro-django
|
da5a5d7bf9c2d133801d35a6c681bb1d24e5c00a
|
e5218bd2ceb2761b7c053b9948b126c5c5b87780
|
refs/heads/master
| 2022-04-22T14:14:41.615215 | 2020-04-25T08:11:17 | 2020-04-25T08:11:17 | 257,488,596 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
class Course(models.Model):
created_at = models.DateTimeField(
auto_now_add = True
)
name = models.CharField(max_length = 180, default = None)
description = models.TextField(default = None)
def __str__(self):
return self.name
|
UTF-8
|
Python
| false | false | 296 |
py
| 5 |
models.py
| 4 | 0.64527 | 0.635135 | 0 | 13 | 21.769231 | 62 |
LiamPa/betfairscrape
| 19,207,093,761,142 |
e73089f06679ab72062d1e281ba6af993e584ee1
|
c2d0d48c232dc00b32d19aec9c2187697bb2181e
|
/betfairscrape/parse/racecard.py
|
9c85d69db2406d9406f66317e4568451dde53fef
|
[] |
no_license
|
https://github.com/LiamPa/betfairscrape
|
eb6c00bb44346dd92cae289d31395937983da617
|
9db535b611a81c0fb4bd6381506ed2b7ae3ed732
|
refs/heads/master
| 2016-08-12T18:35:17.174958 | 2016-05-07T10:58:43 | 2016-05-07T10:58:43 | 51,641,727 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ..utils import strp_betfair_time
class RaceCard:
def __init__(self, race_card):
self.raw_response = race_card
self.in_play_hints = [InPlayHints(hint) for hint in race_card.get('inPlayHints')]
self.minimum_weight = race_card.get('minimumWeight')
self.prize = race_card.get('prize')
self.race = Race(race_card.get('race'))
class InPlayHints:
def __init__(self, in_play_hints):
self.hint_name = in_play_hints.get('hintName')
self.hint_value = in_play_hints.get('hintValue')
class Race:
def __init__(self, race):
self.betfair_meeting_id = race.get('betfairMeetingId')
self.distance = race.get('distance')
self.eligibility = race.get('eligibility')
self.is_result_available = race.get('isResultAvailable')
self.meeting_going = race.get('meetingGoing')
self.meeting_id = race.get('meetingId')
self.number_of_runners = race.get('numberOfRunners')
self.race_class = race.get('raceClass')
self.race_id = race.get('raceId')
self.race_title = race.get('raceTitle')
self.result_status = race.get('resultStatus')
self.start_date = strp_betfair_time(race.get('startDate'))
self.race_type = Type(race.get('raceType'))
self.race_classification = RaceClassification(race.get('raceClassification'))
self.markets = [Market(market) for market in race.get('markets')]
self.going = Type(race.get('going'))
self.course = Course(race.get('course'))
class Course:
def __init__(self, course):
self.country = course.get('country')
self.country_code = course.get('countryCode')
self.course_id = course.get('courseId')
self.course_type = course.get('courseType')
self.name = course.get('name')
self.surface_type = course.get('surfaceType')
self.timeform_course_code = course.get('timeformCourseCode')
self.timezone = course.get('timezone')
class Type:
def __init__(self, data):
self.abbr = data.get('abbr')
self.full = data.get('full')
self.key = data.get('key')
class Market:
def __init__(self, market):
self.market_id = market.get('marketId')
self.market_type = market.get('marketType')
self.number_of_winners = market.get('numberOfWinners')
class RaceClassification:
def __init__(self, race_classification):
self.classification = race_classification.get('classification')
self.classification_abbr = race_classification.get('classificationAbbr')
self.code = race_classification.get('code')
self.display_name = race_classification.get('displayName')
self.display_name_abbr = race_classification.get('displayNameAbbr')
|
UTF-8
|
Python
| false | false | 2,788 |
py
| 7 |
racecard.py
| 6 | 0.638809 | 0.638809 | 0 | 79 | 34.291139 | 89 |
leejunbo/manage
| 4,398,046,517,907 |
018bfb8e3715026ebc5bad2b91fa738949fd6822
|
994e38c2663bb57e7e614328c98a932e03437cb6
|
/register.py
|
a39152a10403cd8e641ae5d468b3716f259fabb6
|
[] |
no_license
|
https://github.com/leejunbo/manage
|
a99b42e7ed5605c677c779a722a1e73ed1b692ac
|
b16b14d391edbce0294632856052751ba5e07586
|
refs/heads/master
| 2020-03-13T20:14:45.672674 | 2018-04-27T09:03:06 | 2018-04-27T09:03:06 | 131,269,722 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import manage
@manage.app.route('/register')
def register():
db = manage.get_db()
cursor = db.cursor()
sql = "INSERT INTO register (account,psw,repsw,phone,email) VALUES (%s,%s,%s,%s,%s)",
cursor.execute(sql)
return manage.render_template("register.html")
|
UTF-8
|
Python
| false | false | 278 |
py
| 23 |
register.py
| 5 | 0.658273 | 0.658273 | 0 | 10 | 26.8 | 89 |
MASk1999/PorsheForLife
| 14,851,996,916,407 |
183e5ca17a0870bed7de4efe6e166f0c07e09a72
|
c0083bc593015b6786e18b32ca4cd304b72ffb06
|
/porsche/taycan/urls.py
|
6d31b88c9736a3f2df4b0e44db2824660754f587
|
[] |
no_license
|
https://github.com/MASk1999/PorsheForLife
|
e3967ba7a88a886f186c93fe09b58d398038a805
|
5815309a60fb24396d04558cf9327f141131ef2c
|
refs/heads/master
| 2022-05-28T13:07:46.347764 | 2020-05-02T14:51:33 | 2020-05-02T14:51:33 | 260,621,643 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='taycan-home'),
path('nextpage/', views.nextpage, name='taycan-next'),
path('cr/', views.CR, name='taycan-CR'),
path('or/', views.OR, name='taycan-OR'),
]
|
UTF-8
|
Python
| false | false | 263 |
py
| 12 |
urls.py
| 7 | 0.631179 | 0.631179 | 0 | 9 | 28.222222 | 58 |
albert-zhao/py_code
| 13,675,175,889,263 |
163e1b407ec03229f701b5c1ad8642ebf6deee32
|
6365c4ff836c16103f39bf394c306eff1c08351a
|
/crawler/crawler_link1.py
|
e448bba0efe4ecc260cba5540639e0de9a67fac5
|
[] |
no_license
|
https://github.com/albert-zhao/py_code
|
8f7d9d52b68cc409fc9161fbad980c083b297548
|
267e9898c04392ff2352baaefe575f32fe6ca0e5
|
refs/heads/master
| 2020-03-23T12:55:26.907635 | 2019-12-25T07:20:46 | 2019-12-25T07:20:46 | 141,590,466 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/python
import re
import urllib2
import urlparse
def link_crawler(seed_url, link_regex):
"""Crawl from the given seed URL following links matched by link_regex
"""
crawl_queue = [seed_url]
seen = set(crawl_queue) # keep track which URL's have seen before
while crawl_queue:
url = crawl_queue.pop()
html = download(url)
for link in get_links(html):
# check if link matches expected regex
if re.match(link_regex, link):
# form absolute link
link = urlparse.urljoin(seed_url, link)
# print 'link', link
# check if have already seen this link
if link not in seen:
seen.add(link)
crawl_queue.append(link)
def get_links(html):
"""Return a list of links from html
"""
if html is None:
return []
# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
def download(url, numretries=2):
print 'Downloading:', url
try:
html = urllib2.urlopen(url).read()
except urllib2.URLError as e:
print 'Download error : ', e.reason
html = None
if numretries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# recursively retry Sxx HTTP errors
return download(url, numretries - 1)
return html
if __name__ == '__main__':
link_crawler('http://example.webscraping.com', '/places/default/(index|view)')
|
UTF-8
|
Python
| false | false | 1,668 |
py
| 50 |
crawler_link1.py
| 45 | 0.576739 | 0.569544 | 0 | 54 | 29.907407 | 82 |
satyaambati5/hyderabadiEvents
| 13,915,694,069,649 |
ca8bb33c8b69f44594f279bb7fc499853cd6050a
|
b6fb85d8700032e11f82a9343c32a3140fc56839
|
/hyddavaths/admin.py
|
7bef490e301af45a537827a920eddddcdeae14a7
|
[] |
no_license
|
https://github.com/satyaambati5/hyderabadiEvents
|
8fdc11560696f54ff071afff04157c758195ca70
|
2a7ab2766583cd4fcbad6e23c8d443bdf9a8b881
|
refs/heads/master
| 2023-04-24T18:54:53.825808 | 2021-05-19T20:32:33 | 2021-05-19T20:32:33 | 368,993,882 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from .models import scroler,gallery_photo
# Register your models here.
admin.site.register(scroler)
admin.site.register(gallery_photo)
|
UTF-8
|
Python
| false | false | 168 |
py
| 10 |
admin.py
| 6 | 0.821429 | 0.821429 | 0 | 6 | 27.166667 | 41 |
yzlee/genpac
| 5,557,687,686,412 |
40856edacac4d34d6b514cffa13ac211d5413992
|
66ce1760e66b673b0f7a32f9302a0161a8c81459
|
/genpac/__init__.py
|
cf18116d463e767d2f6e64193157f22589fbdb5b
|
[
"MIT"
] |
permissive
|
https://github.com/yzlee/genpac
|
bf5a5103c3bd308427415926163e7b053b27d73b
|
d766d2e643a826cb3049d3219bcdc59c2571053b
|
refs/heads/master
| 2021-04-18T18:46:34.637542 | 2018-03-22T06:38:39 | 2018-03-22T06:38:39 | 126,289,961 | 0 | 0 | null | true | 2018-03-22T06:28:31 | 2018-03-22T06:28:31 | 2018-03-21T08:02:40 | 2017-11-10T07:42:31 | 443 | 0 | 0 | 0 | null | false | null |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import,
division, print_function)
__version__ = '2.0.1'
__author__ = 'JinnLynn <eatfishlin@gmail.com>'
__license__ = 'The MIT License'
__copyright__ = 'Copyright 2013-2017 JinnLynn'
__all__ = ['GenPAC', 'Namespace', 'Generator', 'Config',
'FmtBase', 'FmtPAC', 'FmtDnsmasq', 'FmtWingy',
'GenPACDeprecationWarning', 'run', 'formater', 'parse_rules']
from .core import GenPAC, Generator, Namespace, formater, parse_rules
from .config import Config
from .formats import FmtBase, FmtPAC, FmtDnsmasq, FmtWingy
from .deprecated import GenPACDeprecationWarning, install_showwarning
install_showwarning()
def run():
gp = GenPAC()
gp.run()
|
UTF-8
|
Python
| false | false | 765 |
py
| 4 |
__init__.py
| 3 | 0.661438 | 0.645752 | 0 | 24 | 30.875 | 72 |
robertl9/CS4300_Flask_template
| 2,911,987,839,478 |
90477b0eb096a3c1f4acaae3d27477ed07e79613
|
7724b56c2eff8f1a788b903cc0e47df4dcac3dbd
|
/precomputation_scripts/units.py
|
0285aa8ac8cb35b72a3c63507220f620c5561d71
|
[] |
no_license
|
https://github.com/robertl9/CS4300_Flask_template
|
c0a1cb9aaf3d8ac53bfc351b13fd963cffcc7ace
|
cd3368c763d19ae4028167a1eaedbecbdcb57596
|
refs/heads/master
| 2021-04-09T13:16:03.974048 | 2018-05-01T02:59:29 | 2019-04-11T01:15:11 | 125,662,039 | 0 | 0 | null | true | 2018-03-18T21:26:36 | 2018-03-17T19:23:57 | 2018-03-17T19:23:59 | 2018-03-18T21:26:36 | 105 | 0 | 0 | 0 |
Python
| false | null |
smallest = ["pinch", "pinches", "gram", "mL", "grams", "g", "ml", "gr", "gm", "dash", "Dash", "dashes", "Gram"]
spoon = ["teaspoons", "tablespoon", "teaspoon", "tablespoons", "tsp", "Tb", "tbsp", "Tablespoon",
"Teaspoon", "T", "t", "Tbsp", "Tbs", "Tablespoons", "tsps", "Tsp","Teaspoons", "tbsps", "TB", "Tbsps"]
cup = ["cup", "cups", "c", "C", "Cup", "Cups"]
pint = ["pint", "pints", "pt"]
small = ["ounces", "oz", "ounce", "fl. oz.", "ozs", "fluid ounces", "Oz"]
bigger = ["pounds", "lb", "pound", "lbs", "kg", "Pound", "Pounds", "quarts", "quart", "l", "liters", "L", "liter"]
length = ["9-inch", "7-inch", "3-inch", "6-inch", "2-inch", "4-inch", "10-inch", "8 inch", "inches", "8-inch","inch"]
biggest = ["gallon"]
units_lst = [(smallest, 0.1), (spoon, 1), (cup, 1), (pint, 4), (small, 0.2), (bigger, 8), (length, 0.5), (biggest, 16)]
def unit_weights(unit):
for (lst, weight) in units_lst:
if unit in lst:
return weight
return 1
|
UTF-8
|
Python
| false | false | 948 |
py
| 15 |
units.py
| 8 | 0.542194 | 0.517932 | 0 | 18 | 51.722222 | 119 |
julianlucasvb/100DaysOfCode
| 17,824,114,305,501 |
1bc62cac7fe58ec97a6c4b09b6c9ff6f56d6f51c
|
97914ca57fdd03f5785ee64e5268a0c9e67dc86c
|
/day 2/desafio005.py
|
45d06b850888d8a4eb7b05e1604c658dc1504ebe
|
[] |
no_license
|
https://github.com/julianlucasvb/100DaysOfCode
|
54822084fa6eb638b83321d8a09354a4349742c3
|
9799aa30882db1e7ddff65b8074874004f1c413a
|
refs/heads/master
| 2023-01-21T11:06:12.318445 | 2020-11-27T03:46:34 | 2020-11-27T03:46:34 | 315,495,472 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
num = int(input('Digite um número: '))
antecessor = num - 1
sucessor = num + 1
print('O antecessor do número digitado e {0} e o sucessor e {1!'.format(antecessor, sucessor))
|
UTF-8
|
Python
| false | false | 176 |
py
| 7 |
desafio005.py
| 7 | 0.695402 | 0.672414 | 0 | 4 | 42.5 | 94 |
afcarl/SpatialMoney
| 7,662,221,670,106 |
3c7106e46a8ad441a898e8405d813737b9980a69
|
48c4bb95c2d49ca9dca1e6356e61784d6f36a01d
|
/__old__/merge_db.py
|
8aefd4a4b5a3d27bd24b5890418332a1e4d99967
|
[] |
no_license
|
https://github.com/afcarl/SpatialMoney
|
2a5e5caf91ed7b0925317d4ec949d9ca4752f416
|
6c9339ae556c842f228cd1cce65842b5b346f060
|
refs/heads/master
| 2020-03-16T16:11:36.835280 | 2018-03-19T20:31:31 | 2018-03-19T20:31:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import apsw
from sqlite3 import connect
from datetime import date
from os import listdir, remove
import numpy as np
from tqdm import tqdm
class DatabaseManager(object):
@classmethod
def run(cls):
data_folder = "/Users/M-E4-ANIOCHE/Desktop/SpatialEconomy-data"
new_db_name = "data_{}".format(str(date.today()).replace("-", "_"))
new_db_folder = "/Users/M-E4-ANIOCHE/Desktop"
db_path = "{}/{}.db".format(new_db_folder, new_db_name)
# remove("{}/{}.db".format(new_db_folder, new_db_name))
connection = connect(db_path)
cursor = connection.cursor()
# Get the list of the databases containing data
list_db_name = [i for i in listdir(data_folder) if i[-3:] == ".db"]
# Create tables
for i in tqdm(list_db_name):
old_db_path = "{}/{}".format(data_folder, i)
query = \
"ATTACH DATABASE '{}' As old;".format(old_db_path)
cursor.execute(query)
# cursor.execute("BEGIN")
query = "SELECT name FROM old.sqlite_master WHERE type='table'"
cursor.execute(query)
table_names = [i[0] for i in cursor.fetchall()]
if "sqlite_sequence" in table_names:
table_names.remove("sqlite_sequence")
for j in table_names:
if j.startswith("parameters"):
# Create parameters table in new db
query = \
"CREATE TABLE IF NOT EXISTS `parameters` (`ID` INTEGER PRIMARY KEY, " \
"`eco_idx` INTEGER, `vision_area` INTEGER, " \
"`movement_area` INTEGER, `stride` INTEGER, " \
"`width` INTEGER, `height` INTEGER , " \
"`x0` INTEGER, `x1` INTEGER, `x2` INTEGER, " \
"`alpha` REAL, `tau` REAL, `t_max` INTEGER);"
cursor.execute(query)
else:
query = "CREATE TABLE IF NOT EXISTS `exchanges_{}` (ID INTEGER PRIMARY KEY , " \
"`exchange_type` TEXT, " \
"`t` INTEGER, `x0` REAL, `x1` REAL, `x2` REAL);"\
.format(j.split("_")[2])
cursor.execute(query)
# cursor.execute("COMMIT")
query = \
"DETACH old"
cursor.execute(query)
# Fill tables
idx_parameter = 0
for i in tqdm(list_db_name):
old_db_path = "{}/{}".format(data_folder, i)
query = \
"ATTACH DATABASE '{}' As old;".format(old_db_path)
cursor.execute(query)
query = "SELECT name FROM old.sqlite_master WHERE type='table'"
cursor.execute(query)
table_names = [i[0] for i in cursor.fetchall()]
if "sqlite_sequence" in table_names:
table_names.remove("sqlite_sequence")
for j in table_names:
query = "SELECT * FROM old.'{}';".format(j)
cursor.execute(query)
dat = cursor.fetchall()
if j.startswith("parameters"):
eco_idx = int(j.split("parameters_")[1])
for d in dat:
query = \
"INSERT INTO `parameters` (`ID`, `eco_idx`, `vision_area`, " \
"`movement_area`, `stride`, `width`, " \
"`height`, `x0`, `x1`, `x2`, " \
"`alpha`, `tau`, `t_max`" \
") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"
cursor.execute(query, (idx_parameter, eco_idx) + d[1:])
idx_parameter += 1
else:
eco_idx = j.split("_")[2]
exchange_type = j.split("_")[0]
query = "SELECT MAX(id) FROM exchanges_{}".format(eco_idx)
cursor.execute(query)
output = cursor.fetchone()
if output[0] is None:
idx = 0
else:
idx = output[0] + 1
for k, d in enumerate(dat):
query = "INSERT INTO `exchanges_{}` (`ID`, `exchange_type`, `t`, `x0`, `x1`, `x2`) " \
"VALUES (?, ?, ?, ?, ?, ?);".format(eco_idx)
cursor.execute(query, (idx+k, exchange_type) + d)
query = \
"DETACH old"
cursor.execute(query)
connection.commit()
connection.close()
def main():
d = DatabaseManager()
d.run()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 4,801 |
py
| 40 |
merge_db.py
| 34 | 0.449698 | 0.443658 | 0 | 150 | 31.013333 | 110 |
faisalarkan21/TensorFlow-Examples
| 12,721,693,132,616 |
5f820f8f5ad3adcd4f03bcfcf41bba28d0b9bbbc
|
e4c69632b1b5507d637254d6f5238980e17ff810
|
/examples/testing/tensor_dashboard_example.py
|
f5a608a15a8e0dc0d2dc8714223d9356e7b28cc4
|
[
"MIT"
] |
permissive
|
https://github.com/faisalarkan21/TensorFlow-Examples
|
23d1a43e58709c808332b62ea08f8a8c6d9359ca
|
b2ed71ca904dce8cb22a71d379872370638a9221
|
refs/heads/master
| 2020-04-10T22:51:41.533393 | 2018-03-08T05:37:53 | 2018-03-08T05:37:53 | 124,303,219 | 0 | 0 |
MIT
| true | 2018-03-07T22:40:08 | 2018-03-07T22:40:08 | 2018-03-07T20:59:18 | 2018-03-07T15:17:48 | 5,667 | 0 | 0 | 0 | null | false | null |
import tensorflow as tf
C_1 = tf.constant(5.0)
C_2 = tf.constant(1.0)
C_3 = tf.constant(2.0)
golden_ratio = (tf.sqrt(C_1) + C_2)/C_3
with tf.Session() as sess:
writer = tf.summary.FileWriter('logs', sess.graph)
print sess.run(golden_ratio)
writer.close()
# then run,
# tensorboard --logdir=logs
# logs is folder, with events file in it.
|
UTF-8
|
Python
| false | false | 352 |
py
| 2 |
tensor_dashboard_example.py
| 2 | 0.664773 | 0.630682 | 0 | 16 | 21.0625 | 54 |
dojo-natal/dojo-natal
| 19,524,921,344,534 |
4e6bddc364cdf71619a96fa43210e43cdfda70b0
|
f909d50732e807488e348bd46e3faec732b1dd6d
|
/2020/20200129_prefixa_lisp/prefixa_lisp.py
|
f74dfcc8d89932e43ac0722c31036323527b882b
|
[] |
no_license
|
https://github.com/dojo-natal/dojo-natal
|
3489f82067908b7a96c6f6cfc5b1efbdaca35669
|
05a96aff80c4d04421a62eb348c9c479e02f3c7e
|
refs/heads/master
| 2023-08-07T16:16:17.393455 | 2023-07-27T01:11:15 | 2023-07-27T01:11:15 | 190,207,999 | 6 | 3 | null | false | 2019-07-13T21:22:35 | 2019-06-04T13:33:09 | 2019-07-10T10:24:38 | 2019-07-10T10:24:36 | 10 | 4 | 2 | 0 |
Python
| false | false |
def prefixa_lisp(expr):
if expr == "(+ (+ 2 3) 4)":
return 9
op, n1, n2 = expr[1:-1].split(" ")
if op == "+":
return float(n1) + float(n2)
if op == "-":
return float(n1) - float(n2)
if op == "/":
return float(n1) / float(n2)
if op == "*":
return float(n1) * float(n2)
def construct(expr):
# "(+ (+ (2) (3)) (4
char = expr[0]
if char in ("-", "+", "/", "*"):
tmp = Node(char)
opens, closes = 0, 0
chunk_left, chunk_right = "", ""
position = 0
for ch in expr:
position += 1
if ch == "(":
opens += 1
if ch == ")":
closes += 1
chunk_left += ch
if closes == opens:
break
tmp.left = construct(chunk_left)
tmp.right = construct(expr[position:])
return tmp
# if ch in ("-", "+", "*", "/"):
# tmp = Node(ch)
class Node(object):
def __init__(self, symbol):
self.symbol = symbol
self.left = None
self.right = None
|
UTF-8
|
Python
| false | false | 1,117 |
py
| 86 |
prefixa_lisp.py
| 79 | 0.408236 | 0.38496 | 0 | 49 | 21.632653 | 46 |
gtzago/aprendizado-maquina
| 3,762,391,392,784 |
79b3bc2dd09946ef0c0fa3c00bf80b7746d14557
|
e51142facf2882bb44ed94737064ce9aed4b142d
|
/classif_regres/regression_tree.py
|
d86befc86bd6aa6286326a921e5381b6afb05d9b
|
[] |
no_license
|
https://github.com/gtzago/aprendizado-maquina
|
44325bcaa6e980d0e28e967731424d7f8e4b5b00
|
319d3c1f9d96cf1a80754c905cefcecb7ae24ce5
|
refs/heads/master
| 2021-01-10T11:03:32.622708 | 2016-02-24T16:39:53 | 2016-02-24T16:39:53 | 52,455,681 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import utils
import reduc_dim
import classif_regres
import networkx as nx
class RegressionTree(object):
def __init__(self, nclasses):
self.root = None
self.children = None
self.att = None
self.n = 0
self.m = 0
self.nclasses = nclasses
def train(self, x, y, SDRMIN=0.05, NMIN=0, xvalues={}):
self.x = np.array(x)
self.y = np.reshape(y, (-1, 1))
self.n = self.y.size
self.m = self.x.shape[1]
if len(xvalues.keys()) == 0:
for att in range(0, self.m):
xvalues[att] = np.union1d(self.x[:, att], self.x[:, att])
if self.n < NMIN or self.n <= 1 or np.std(y) == 0.0:
self.root = np.mean(self.y)
else:
sdr = np.zeros(self.m)
for att in range(0, self.m):
# APENAS PARA ATRIBUTOS NOMINAIS
sdr[att] = self.sdr(self.x, self.y, att)
att = np.argmax(sdr)
values = xvalues[att]
sdr = sdr[att]
if sdr < SDRMIN:
self.root = np.mean(self.y) # valor mais comum
else:
self.root = None
self.children = {}
for v in values:
self.children[v] = RegressionTree(self.nclasses)
# true if attribute is discrete (string)
if isinstance(self.x[0, att], basestring):
self.att = att
x = self.x
y = self.y
del self.x
del self.y
for v in values:
ind = np.where(x[:, att] == v)
if len(ind[0]) != 0:
ind = ind[0]
self.children[v].train(
x[ind, :], y[ind, 0], SDRMIN, NMIN, xvalues)
else:
# valor mais comum
self.children[v].root = np.mean(y)
else:
# TODO: variaveis continuas
pass
def gerar_grafo(self, g=nx.Graph(), pos={}, idnode=0, x=0, y=0):
filhos = self.children.keys()
deltax = np.linspace(-np.power(4.0, 10 + y),
np.power(4.0, 10 + y), len(filhos))
j = 1
pos[idnode] = [x, y]
for thr in filhos:
if self.children[thr].root is None:
# liga um no a outro
g.add_edge(idnode, 10 * idnode + j)
# da nome a ligacao
g.edge[idnode][10 * idnode + j]['value'] = thr
g, pos = self.children[thr].gerar_grafo(
g, pos, 10 * idnode + j, x + deltax[j - 1], y - 1)
j = j + 1
else:
# liga um no a outro
g.add_edge(idnode, 10 * idnode + j)
# da nome a ligacao
g.edge[idnode][10 * idnode + j]['value'] = thr
# da nome ao no folha
g.node[10 * idnode + j]['name'] = str(self.children[thr].root)
pos[10 * idnode + j] = [x + deltax[j - 1], y - 1]
j = j + 1
# da nome ao no
g.node[idnode]['name'] = str(self.att)
return g, pos
def estimate(self, x):
self.x = np.array(x)
n = self.x.shape[0]
self.yhat = np.array([])
for i in range(0, n):
self.yhat = np.append(self.yhat, self.estimate_one(self.x[i, :]))
return self.yhat
def estimate_one(self, x):
if self.root is None:
for v in self.children.keys():
if x[self.att] == v:
return self.children[v].estimate_one(x)
else:
if self.root is None:
print 'aaaaaaaahhhhhhhh'
return self.root
def sdr(self, x, y, att):
'''
Calcula a reducao do desvio padrao.
Entradas:
x: matriz de exemplos.
y: vetor de classes/saidas.
att: atributo a ser testado.
thr: se atributo for continuo, limiar de separacao.
'''
values = np.union1d(x[:, att], x[:, att])
# true if attribute is discrete (string)
if isinstance(x[0, att], basestring):
sdr = np.std(y)
for v in values:
sdr = sdr - \
utils.simple_probability(
x, att, np.equal, v) * np.std(y[x[:, att] == v])
else:
sdr = 0
return sdr
|
UTF-8
|
Python
| false | false | 4,617 |
py
| 30 |
regression_tree.py
| 28 | 0.440113 | 0.42755 | 0 | 137 | 32.70073 | 78 |
MelonFaceDOOM/cannabis_scraping
| 15,977,278,342,326 |
e192bc691b254c8e1512292559090c5cdb38e097
|
9ca5486c0bf9d4a4a7dc26605a79eadb76ee9c7b
|
/analysis/price_by_quantity.py
|
869fa424de5862045b3aa09d360c4436aa4a9f47
|
[] |
no_license
|
https://github.com/MelonFaceDOOM/cannabis_scraping
|
e6f41e3741c483699085b85035b0eb3adf5252b1
|
e1aef9554346aa00102e8e8059b336d7ff95183c
|
refs/heads/master
| 2020-09-05T10:19:59.199381 | 2020-03-09T20:15:22 | 2020-03-09T20:15:22 | 220,071,588 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from scraping.build_dataframe import merge_all_parsed
from cleaning.categorization import extract_flower
from cleaning.clean_weight_price import price_tuple_to_grams
from cleaning.flower_cleaning import clean_name, specific_replacements, standardize_og_kush
import matplotlib.pyplot as plt
def main():
df = merge_all_parsed()
df = extract_flower(df)
df['strain'] = df['name'].apply(clean_name)
df['strain'] = df['strain'].apply(specific_replacements)
df['strain'] = df['strain'].apply(standardize_og_kush)
df['gram_prices'] = df['prices'].apply(price_tuple_to_grams)
legal_sites = ["ocs"]
df_legal = df[df['site'].isin(legal_sites)]
df_illegal = df.loc[set(df.index) - set(df_legal.index)]
# # get list of unique weight values
# weights = df_flower['gram_prices'].apply(lambda x: [p[0] for p in x]).to_list()
# sorted(list({item for row in weights for item in row }))
df_legal = calc_price_per_gram(df_legal)
df_illegal = calc_price_per_gram(df_illegal)
ppg5_legal = df_legal['ppg_5'].mean()
ppg15_legal = df_legal['ppg_15'].mean()
ppg28_legal = df_legal['ppg_28'].mean()
ppgbulk_legal = df_legal['ppg_bulk'].mean()
ppg5_illegal = df_illegal['ppg_5'].mean()
ppg15_illegal = df_illegal['ppg_15'].mean()
ppg28_illegal = df_illegal['ppg_28'].mean()
ppgbulk_illegal = df_illegal['ppg_bulk'].mean()
# TODO: check the weight values in each price category
# (i.e. is ppg_28 purely 28g, or are there some between 14 and 28g)
txt_output = []
txt_output.append(f'Average price 5g or less (legal - illegal): {ppg5_legal:0.2f} - {ppg5_illegal:0.2f}')
txt_output.append(f'Average price for 5-15g (legal - illegal): {ppg15_legal:0.2f} - {ppg15_illegal:0.2f}')
txt_output.append(f'Average price for 15-28g (legal - illegal): {ppg28_legal:0.2f} - {ppg28_illegal:0.2f}')
txt_output.append(f'Average price for >28g (legal - illegal): {ppgbulk_legal:0.2f} - {ppgbulk_illegal:0.2f}')
# cheapest listings
# df_legal.sort_values(by=['ppg_3.5']
count_5_i = df_illegal['ppg_5'].count()
count_15_i = df_illegal['ppg_15'].count()
count_28_i = df_illegal['ppg_28'].count()
count_bulk_i = df_illegal['ppg_bulk'].count()
count_5_l = df_legal['ppg_5'].count()
count_15_l = df_legal['ppg_15'].count()
count_28_l = df_legal['ppg_28'].count()
count_bulk_l = df_legal['ppg_bulk'].count()
txt_output.append(f'there were {count_5_i} illegal listings under 5g')
txt_output.append(f'there were {count_15_i} illegal listings at 5-15g')
txt_output.append(f'there were {count_28_i} illegal listings at 15-28g')
txt_output.append(f'there were {count_bulk_i} illegal listings over 28g')
txt_output.append(f'there were {count_5_l} legal listings under 5g')
txt_output.append(f'there were {count_15_l} legal listings at 5-15g')
txt_output.append(f'there were {count_28_l} legal listings at 15-28g')
txt_output.append(f'there were {count_bulk_l} legal listings over 28g')
save_hist(df_legal['ppg_5'], '_l', 1, 25)
save_hist(df_legal['ppg_15'], '_l')
save_hist(df_legal['ppg_28'], '_l')
save_hist(df_legal['ppg_bulk'], '_l')
save_hist(df_illegal['ppg_5'], '_i')
save_hist(df_illegal['ppg_15'], '_i')
save_hist(df_illegal['ppg_28'], '_i')
save_hist(df_illegal['ppg_bulk'], '_i', 1, 7)
legal_strains = df_legal['strain'].drop_duplicates().tolist()
df_common_illegal = df_illegal[df_illegal['strain'].isin(legal_strains)][
['strain', 'ppg_5', 'ppg_15', 'ppg_28', 'ppg_bulk']]
illegal_strains = df_common_illegal['strain'].drop_duplicates().tolist()
df_common_legal = df_legal[df_legal['strain'].isin(illegal_strains)]
df_common_illegal = df_common_illegal.groupby('strain').mean().reset_index()
df_common_legal = df_common_legal.groupby('strain').mean().reset_index()
df_common_strains = df_common_illegal.merge(df_common_legal, on='strain', suffixes=['_i', '_l'])
txt_output.append(
f"There were {len(df_common_strains)} strains from OCS.ca that were also found on the illegal market")
df_illegal_cheaper = df_common_strains[df_common_strains['ppg_5_i'] < df_common_strains['ppg_5_l']]
#len(df_illegal_cheaper)
df_illegal_not_cheaper = df_common_strains.loc[set(df_common_strains.index) - set(df_illegal_cheaper.index)]
df_common_strains[['ppg_5_i', 'ppg_5_l']].plot.bar(figsize=(20, 8))
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('')
plt.ylabel('Price ($CAD)', fontsize=18)
plt.legend(('Illegal, 5 grams or less', 'Legal, 5 grams or less'), fontsize=18)
plt.savefig("strain_comp.png", bbox_inches='tight', pad_inches=0)
plt.close()
with open("price_by_quality_info.txt", "w") as f:
for line in txt_output:
f.write(line)
f.write('\n')
def calc_price_per_gram(df):
"""Add four different price-per-gram columns to df"""
def _price_per_gram(prices, min_weight, max_weight):
""" Returns the average price-per gram of all price tuples within the min/max provided.
Arguments:
prices -- list of tuples of weight and price (i.e. [("1g", "$5")("5g","$20")]
min_weight -- the minimum weight to be included
max_weight -- the maximum weight to be included"""
price_tuples_within_range = []
for price in prices:
if price[0] >= min_weight and price[0] <= max_weight:
price_tuples_within_range.append(price[1]/price[0])
if not price_tuples_within_range:
return float('nan')
return sum(price_tuples_within_range)/len(price_tuples_within_range)
df['ppg_5'] = df['gram_prices'].apply(_price_per_gram, min_weight=0, max_weight=5)
df['ppg_15'] = df['gram_prices'].apply(_price_per_gram, min_weight=5.1, max_weight=15)
df['ppg_28'] = df['gram_prices'].apply(_price_per_gram, min_weight=15.1, max_weight=28)
df['ppg_bulk'] = df['gram_prices'].apply(_price_per_gram, min_weight=28.1, max_weight=1000)
return df
def save_hist(df_column, suffix=None, start=1, end=15):
"""Save histogram of distributions of values within provided dataframe column"""
df_column.hist(bins=20, facecolor='gray', ec='black', range=[start, end], figsize=(12, 8), grid=False)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Price ($CAD)', fontsize=18)
plt.ylabel('Number of Listings', fontsize=18)
plt.savefig(f"{df_column.name}{suffix}.png", bbox_inches='tight', pad_inches = 0)
plt.close()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 6,775 |
py
| 97 |
price_by_quantity.py
| 36 | 0.630258 | 0.600148 | 0 | 151 | 42.86755 | 113 |
codelibs/elasticsearch-learning-to-rank
| 7,559,142,467,792 |
e6a3a664164d7142a611cc2703abd97079adc1f7
|
e62f8e6971fb6b3d0621fd33b4f6569678c0ac22
|
/demo/collect_features.py
|
7bca892f666109a706bddaa4705a4f15c16190df
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/codelibs/elasticsearch-learning-to-rank
|
9ae3d31f6a53b33907caae1bfa1e21e525ed7d93
|
5e811d0988abce61efa7f564ed9b23a13bb76e56
|
refs/heads/master
| 2023-03-22T04:37:15.107754 | 2019-10-24T02:54:06 | 2019-10-24T02:54:06 | 138,831,446 | 3 | 2 |
Apache-2.0
| true | 2018-11-29T02:01:39 | 2018-06-27T04:56:40 | 2018-11-22T01:56:48 | 2018-11-29T02:01:39 | 980 | 1 | 1 | 0 |
Java
| false | null |
import json
from judgments import judgments_from_file, judgments_by_qid
from log_conf import Logger
from utils import elastic_connection, JUDGMENTS_FILE, JUDGMENTS_FILE_FEATURES, FEATURE_SET_NAME, INDEX_NAME
logQuery = {
"size": 100,
"query": {
"bool": {
"filter": [
{
"terms": {
"_id": ["7555"]
}
}
],
"should": [
{"sltr": {
"_name": "logged_featureset",
"featureset": "movie_features",
"params": {
"keywords": "rambo"
}
}}
]
}
},
"ext": {
"ltr_log": {
"log_specs": {
"name": "main",
"named_query": "logged_featureset",
"missing_as_zero": True
}
}
}
}
def feature_dict_to_list(ranklib_labeled_features):
r_val = [0.0] * len(ranklib_labeled_features)
for idx, logEntry in enumerate(ranklib_labeled_features):
value = logEntry['value']
try:
r_val[idx] = value
except IndexError:
Logger.logger.info("Out of range %s" % idx)
return r_val
def log_features(es, judgments_dict, search_index):
for qid, judgments in judgments_dict.items():
keywords = judgments[0].keywords
doc_ids = [judgment.docId for judgment in judgments]
logQuery['query']['bool']['filter'][0]['terms']['_id'] = doc_ids
logQuery['query']['bool']['should'][0]['sltr']['params']['keywords'] = keywords
logQuery['query']['bool']['should'][0]['sltr']['featureset'] = FEATURE_SET_NAME
Logger.logger.info("POST")
Logger.logger.info(json.dumps(logQuery, indent=2))
res = es.search(index=search_index, body=logQuery)
# Add feature back to each judgment
features_per_doc = {}
for doc in res['hits']['hits']:
docId = doc['_id']
features = doc['fields']['_ltrlog'][0]['main']
features_per_doc[docId] = feature_dict_to_list(features)
# Append features from ES back to ranklib judgment list
for judgment in judgments:
try:
features = features_per_doc[
judgment.docId] # If KeyError, then we have a judgment but no movie in index
judgment.features = features
except KeyError:
Logger.logger.info("Missing movie %s" % judgment.docId)
def build_features_judgments_file(judgments_with_features, filename):
with open(filename, 'w') as judgmentFile:
for qid, judgmentList in judgments_with_features.items():
for judgment in judgmentList:
judgmentFile.write(judgment.to_ranklib_format() + "\n")
if __name__ == "__main__":
es_connection = elastic_connection()
judgmentsByQid = judgments_by_qid(judgments_from_file(JUDGMENTS_FILE))
log_features(es_connection, judgmentsByQid, INDEX_NAME)
build_features_judgments_file(judgmentsByQid, JUDGMENTS_FILE_FEATURES)
|
UTF-8
|
Python
| false | false | 3,169 |
py
| 31 |
collect_features.py
| 16 | 0.543074 | 0.53834 | 0 | 91 | 33.824176 | 107 |
deltaint-project/deltaint
| 10,694,468,576,457 |
85f348e312b8f2b142c55d154259598e18daa8b8
|
1691c6aee50dd13c7e1dc88baa43fbc12d7da99a
|
/Mininet-DINT/p4utils/utils/topology.py
|
613c4abe4ffc0514197d97a347dc6e5f1d30d8b9
|
[] |
no_license
|
https://github.com/deltaint-project/deltaint
|
e8be36b1e3d198f2074bd4e329672ee4167211d7
|
c2f4d982cc447077a1d511d51e321c8ada8d460d
|
refs/heads/master
| 2023-05-09T06:01:11.855384 | 2021-05-23T00:33:19 | 2021-05-23T00:33:19 | 369,816,623 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import copy
import json
import pprint
import networkx as nx
from ipaddress import ip_interface
from p4utils import NodeDoesNotExist, InvalidHostIP
from p4utils.logger import log
class TopologyDB(object):
"""
Topology API to save mininet topologies and load them to query useful information.
dict keyed by node name ->
dict keyed by - properties -> val
- neighbor -> interface properties
Attributes:
db: path to network database object. This attribute is used when loading the database.
net: mininet object. When present, TopologyDB saves it in a json format to the disk
"""
def __init__(self, db=None, net=None, *args, **kwargs):
super(TopologyDB, self).__init__(*args, **kwargs)
self._network = {}
if net:
self.topo = net.topo
self.parse_net(net)
elif db:
self.load(db)
else:
log.warning('Topology instantiated without any data')
def __iter__(self):
"""Enables iteration on this object"""
return iter(self._network)
def __repr__(self):
"""Transforms the object into a string when printed"""
return pprint.pformat(self._network)
def __getitem__(self, item):
"""Items can be accessed by using []"""
return self._node(item)
def __contains__(self, item):
"""Enables X in Self"""
return item in self._network
def load(self, fpath):
"""
Load a topology database from the given filename.
Args:
fpath: path to json file
"""
with open(fpath, 'r') as f:
self._network = json.load(f)
def save(self, fpath):
"""
Save the topology database to the given filename.
Args:
fpath: path to json file
"""
with open(fpath, 'w') as f:
json.dump(self._network, f)
@staticmethod
def other_intf(intf):
"""Get the interface on the other end of a link."""
link = intf.link
if link:
if link.intf2 == intf:
return link.intf1
else:
return link.intf2
else:
return None
def parse_net(self, net):
"""Stores the content of the given network in the TopologyDB object."""
for host in net.hosts:
self.add_host(host)
for controller in net.controllers:
self.add_controller(controller)
for switch in net.switches:
if net.topo.isP4Switch(switch.name):
self.add_p4switch(switch)
else:
self.add_switch(switch)
def _add_node(self, node, props):
"""Register a network node.
Args:
node: mininet.node.Node object
props: properties (dictionary)
"""
if node.params.get('isHiddenNode', False):
return
interfaces_to_nodes = {}
interfaces_to_port = {}
for port, port_id in node.ports.items():
interfaces_to_port[port.name] = port_id
for itf in node.intfList():
nh = TopologyDB.other_intf(itf)
if not nh:
continue # Skip loopback and the likes
# do not create connection to hidden node in topology
if nh.node.params.get('isHiddenNode', False):
continue
props[nh.node.name] = {
'ip': '%s/%s' % (itf.ip, itf.prefixLen),
'mac': '%s' % (itf.mac),
'intf': itf.name,
'bw': itf.params.get('bw', -1),
'loss': itf.params.get('loss', 0),
'weight': itf.params.get('weight', 1),
'delay': itf.params.get('delay', 0),
'queue_length': itf.params.get('max_queue_size', -1)
}
interfaces_to_nodes[itf.name] = nh.node.name
# add an interface to node mapping
props['interfaces_to_node'] = interfaces_to_nodes
props['interfaces_to_port'] = interfaces_to_port
self._network[node.name] = props
def add_host(self, node):
"""Register a host."""
attributes = {'type': 'host'}
# node.gateway attribute only exists in my custom mininet
if hasattr(node, "gateway"):
attributes.update({'gateway': node.gateway})
elif 'defaultRoute' in node.params:
attributes.update({'gateway': node.params['defaultRoute'].replace("via ", "")})
self._add_node(node, attributes)
def add_controller(self, node):
"""Register a controller."""
self._add_node(node, {'type': 'controller'})
def add_switch(self, node):
"""Register a switch."""
self._add_node(node, {'type': 'switch'})
def _node(self, node):
"""
Returns node's information
Args:
node: node's name
Raises:
NodeDoesNotExist: if node's name does not exist in the topology
"""
try:
return self._network[node]
except KeyError:
raise NodeDoesNotExist(node)
def node(self, node):
"""
Public version of _node
Args:
node: nodes'name
Returns: Node if exists
"""
return self._node(node)
def _interface(self, node1, node2):
"""Returns interface information of node1 facing node2"""
# this functions checks if node 2 exists, if not raises exeption.
self._node(node2)
return self._node(node1)[node2]
def _node_interface(self, node, intf):
"""Returns interface information of node1's interface intf"""
connected_to = self._node(node)["interfaces_to_node"][intf]
return self._interface(node, connected_to)
def node_to_node_interface_ip(self, node1, node2):
"""Return the ip_interface for node1 facing node2."""
return self._interface(node1, node2)['ip']
def node_to_node_interface_bw(self, node1, node2):
"""Return the bandwidth capacity of the interface on node1 facing node2.
If it is unlimited, return -1."""
# checks if they exist
self._node(node2)
return self._interface(node1, node2)['bw']
def node_interface_ip(self, node, intf):
"""Returns the IP address of a given interface and node."""
return self._node_interface(node, intf)['ip'].split("/")[0]
def node_interface_bw(self, node, intf):
"""Returns the bw of a given interface and node"""
return self._node_interface(node, intf)['bw']
def subnet(self, node1, node2):
"""Return the subnet linking node1 and node2."""
return ip_interface(self.node_to_node_interface_ip(node1, node2)).network.with_prefixlen
def get_node_type(self, node):
"""Returns node's type"""
return self._node(node)['type']
def get_neighbors(self, node):
"""Returns node's neighbors (all of them)"""
return list(self._node(node)["interfaces_to_node"].values())
def get_interfaces(self, node):
"""Returns node's interfaces names"""
return list(self._node(node)["interfaces_to_node"].keys())
class TopologyDBP4(TopologyDB):
"""See base class.
Adds some special methods for P4 switches.
"""
def __init__(self, *args, **kwargs):
super(TopologyDBP4, self).__init__(*args, **kwargs)
def add_p4switch(self, node):
"""Adds P4 Switch node."""
# set fake ips so they can be queried with the topo
for intf in node.intfList():
if intf.name == "lo":
continue
if intf.params.get('sw_ip', None):
intf.ip, intf.prefixLen = intf.params['sw_ip'].split("/")
self._add_node(node, {'type': 'switch', 'subtype': 'p4switch',
'thrift_port': node.thrift_port, 'sw_id': node.device_id})
# clean the IPs, this seems to make no sense, but when the p4switch is
# started again, if the interface has an IP, the interface is not added
# There are two options, remove that check, or clear the IP after creating
# the object.
for intf in node.intfList():
if intf.name == "lo":
continue
intf.ip, intf.prefixLen = None, None
def get_cpu_port_intf(self, p4switch, quiet=False):
"""
Returns the port index of p4switch's cpu port
Args:
p4switch: name of the p4 switch
cpu_node: name of the cpu-node (usually a bridge)
Returns: index
"""
has_cpu_port = any('cpu' in x for x in list(self[p4switch]['interfaces_to_port'].keys()))
if self.is_p4switch(p4switch) and has_cpu_port:
return [x for x in list(self[p4switch]['interfaces_to_port'].keys()) if 'cpu' in x][0]
else:
if not quiet:
print(("Switch %s has no cpu port" % p4switch))
return None
def get_cpu_port_index(self, p4switch, quiet=False):
"""
Returns the port index of p4switch's cpu port
Args:
p4switch: name of the p4 switch
Returns: index
"""
has_cpu_port = any('cpu' in x for x in list(self[p4switch]['interfaces_to_port'].keys()))
if self.is_p4switch(p4switch) and has_cpu_port:
intf = self.get_cpu_port_intf(p4switch)
return self[p4switch]['interfaces_to_port'][intf]
else:
if not quiet:
print(("Switch %s has no cpu port" % p4switch))
return None
def get_thrift_port(self, switch):
"""Return the Thrift port used to communicate with the P4 switch."""
if self._node(switch).get('subtype', None) != 'p4switch':
raise TypeError('%s is not a P4 switch' % switch)
return self._node(switch)['thrift_port']
def get_thrift_ip(self, switch):
"""Return the Thrift ip used to communicate with the P4 switch."""
if self._node(switch).get('subtype', None) != 'p4switch':
raise TypeError('%s is not a P4 switch' % switch)
return self._node(switch)['thrift_ip']
def get_ctl_cpu_intf(self, switch):
"""Returns the controller side cpu interface used to listent for cpu packets"""
if self._node(switch).get('subtype', None) != 'p4switch':
raise TypeError('%s is not a P4 switch' % switch)
if self._node(switch).get('ctl_cpu_intf', None):
return self._node(switch)['ctl_cpu_intf']
else:
return self.get_cpu_port_intf(switch)
class Topology(TopologyDBP4):
"""
Builds or loads a topology object from a mininet or properly json file.
The topology object provides an API to query information about the mininet topology
Attributes:
db: json file describint the topology. The file had to be saved using this same class but using a mininet object.
"""
def __init__(self, db="topology.db", *args, **kwargs):
super(Topology, self).__init__(db, *args, **kwargs)
# Save network startup state:
# In case of link removal, we use this objects to remember the state of links and nodes
# before the removal. This assumes that the topology will not be enhanced, i.e., links and
# nodes can be removed and added, but new links or devices cannot be added.
self._original_network = copy.deepcopy(self._network)
self.network_graph = NetworkGraph()
# this was separated so the Graph constructors has no parameters. Otherwise it would
# fail when creating SubGraphs. This happens since networkx 2.x
self.network_graph.load_topology_from_database(self)
# Creates hosts to IP and IP to hosts mappings
self.hosts_ip_mapping = {}
self.create_hosts_ip_mapping()
def create_hosts_ip_mapping(self):
"""Creates a mapping between host names and IP addresses, and vice versa."""
self.hosts_ip_mapping = {}
hosts = self.get_hosts()
self.hosts_ip_mapping["ipToName"] = {}
self.hosts_ip_mapping["nameToIp"] = {}
for host in hosts:
ip = self.node_interface_ip(host, self.get_host_first_interface(host).format(host))
self.hosts_ip_mapping["ipToName"][ip] = host
self.hosts_ip_mapping["nameToIp"][host] = ip
def get_host_name(self, ip):
"""Returns the host name to an IP address.
Args:
ip: host's ip
"""
name = self.hosts_ip_mapping.get("ipToName", {}).get(ip, None)
if name:
return name
raise InvalidHostIP(ip)
def get_host_gateway_name(self, host):
"""Get host gateway name"""
if self.is_host(host):
return self[host]["interfaces_to_node"][self.get_host_first_interface(host)]
def get_host_ip(self, name):
"""Returns the IP to a host name.
Args:
name: host's name
"""
ip = self.hosts_ip_mapping.get("nameToIp", {}).get(name, None)
if ip:
return ip
raise NodeDoesNotExist(name)
def get_host_mac(self, name):
"""Returns the mac to a host name
Args:
name: host's name
"""
intf = self.get_host_first_interface(name)
nhop = self.get_interfaces_to_node(name)[intf]
return self[name][nhop]['mac']
def is_router(self, node):
"""Checks if node is a router.
Args:
node: name of router
Returns:
True if node is a router, False otherwise
"""
return self[node]["type"] == "router"
def is_host(self, node):
"""Checks if node is a host.
Args:
node: name of Mininet node
Returns:
True if node is a host, False otherwise
"""
return self[node]["type"] == "host"
def is_switch(self, node):
"""Checks if node is a switch.
Args:
node: name of Mininet node
Returns:
True if node is a switch, False otherwise
"""
return self[node]["type"] == "switch"
def is_p4switch(self, node):
"""Checks if node is a P4 switch.
Args:
node: name of Mininet node
Returns:
True if node is a P4 switch, False otherwise
"""
return self[node]["type"] == "switch" and self[node].get('subtype', None) == 'p4switch'
def get_hosts(self):
"""Returns the hosts from the topologyDB."""
return {node: self[node] for node in self if self.is_host(node)}
def get_switches(self):
"""Returns the switches from the topologyDB."""
return {node: self[node] for node in self if self.is_switch(node)}
def get_p4switches(self):
"""Returns the P4 switches from the topologyDB."""
return {node: self[node] for node in self if self.is_p4switch(node)}
def get_routers(self):
"""Returns the routers from the topologyDB."""
return {node: self[node] for node in self if self.is_router(node)}
def get_host_first_interface(self, name):
"""Returns the first interface from a host. Assume it's single-homed.
Args:
name: host name
Returns:
interface name (str)
"""
return list(self[name]["interfaces_to_node"].keys())[0]
def get_p4switch_id(self, sw_name):
"""Returns the ID of a P4 switch.
Args:
sw_name: P4 switch name in the topology
Raises:
TypeError if sw_name is not a P4 switch
Returns:
ID of P4 switch as a string
"""
if self[sw_name].get('subtype', None) != 'p4switch':
raise TypeError('%s is not a P4 switch' % sw_name)
return self[sw_name]['sw_id']
def are_neighbors(self, node1, node2):
"""
Returns if two nodes are direct neighbors
Args:
node1: first node
node2: second node
Returns:
"""
return self.network_graph.are_neighbors(node1, node2)
def get_hosts_connected_to(self, node):
"""
Returns the hosts directly connected to the node
Args:
node:
Returns: list of hosts
"""
nodes = self.get_neighbors(node)
return [host for host in nodes if self.get_node_type(host) == 'host']
def get_switches_connected_to(self, node):
"""
Returns the switches directly connected to the node
Args:
node:
Returns: list of switches
"""
nodes = self.get_neighbors(node)
return [host for host in nodes if self.is_p4switch(host)]
def get_routers_connected_to(self, node):
"""
Returns the routers directly connected to the node
Args:
node:
Returns: list of routers
"""
nodes = self.get_neighbors(node)
return [host for host in nodes if self.is_router(host)]
def get_direct_host_networks_from_switch(self, switch):
"""
Returns all the subnetworks a switch can reach directly
Args:
switch: switch name
Returns: Returns set of networks
"""
networks = []
hosts = self.get_hosts_connected_to(switch)
for host in hosts:
sub_nets = [self.subnet(host, neighbor) for neighbor in list(self[host]['interfaces_to_node'].values())]
networks += sub_nets
return set(networks)
def get_interfaces_to_node(self, node):
"""
Returns dictionary with all interface_name -> node
Args:
node: node's name
"""
return self[node]['interfaces_to_node']
def get_interfaces_to_port(self, node):
"""
Returns dictionary with all interface_name -> port_num
Args:
node: node's name
"""
return self[node]['interfaces_to_port']
def interface_to_node(self, node, intf):
"""
Returns name of the neigbor at node[intf]
Args:
node: node we are quering
intf: name of the interface
"""
return self[node]['interfaces_to_node'][intf]
def interface_to_port(self, node, intf):
"""
Returns port number of the node[intf]
Args:
node: node we are quering
intf: name of the interface
"""
return self[node]['interfaces_to_port'][intf]
def node_to_node_port_num(self, node1, node2):
"""
Returns the port number from node1 point of view that connects to node2
Args:
node1: src node
node2: dst node
"""
intf = self[node1][node2]['intf']
return self.interface_to_port(node1, intf)
def node_to_node_mac(self, node1, node2):
"""
Returns mac address of node1's interface facing node2
Args:
node1: src node
node2: dst node
"""
return self[node1][node2]['mac']
def get_shortest_paths_between_nodes(self, node1, node2):
"""
Returns all the shortest paths between node1 and node2
Args:
node1: src node
node2: dst node
Returns: List of shortests paths
"""
return self.network_graph.get_paths_between_nodes(node1, node2)
def get_all_paths_between_nodes(self, node1, node2):
"""
Returns all the paths between node1 and node2
Args:
node1: src node
node2: dst node
Returns: List of shortests paths
"""
return self.network_graph.get_all_paths_between_nodes(node1, node2)
class NetworkGraph(nx.Graph):
"""
Uses a networkx object as a base to load the topology.
This object is used to get useful information about the topology
using networkx useful graph algorithms. For instance we can easily
get short paths between two nodes.
"""
def __init__(self, *args, **kwargs):
super(NetworkGraph, self).__init__(*args, **kwargs)
def load_topology_from_database(self, topology_db):
self.topology_db = topology_db
self.load_graph_from_db()
def load_graph_from_db(self):
"""Loads networkx object from topologyDB"""
for node, attributes in self.topology_db._original_network.items():
if node not in self.nodes():
self.add_node(node, attributes)
def add_edge(self, node1, node2):
"""Connects node1 and node2 using an edge"""
if node1 in self.nodes() and node2 in self.nodes():
super(NetworkGraph, self).add_edge(node1, node2)
def add_node(self, node, attributes):
"""
Adds node and connects it with all its neighbors.
Args:
node: node's name
attributes: node's attributes
"""
super(NetworkGraph, self).add_node(node)
self._node[node]['type'] = self.topology_db.get_node_type(node)
# check if the node has a subtype
subtype = attributes.get('subtype', None)
if subtype:
self._node[node]['subtype'] = subtype
for neighbor_node in self.topology_db.get_neighbors(node):
if neighbor_node in self.nodes():
weight = attributes[neighbor_node].get("weight", 1)
bw = attributes[neighbor_node].get("bw", 1000)
super(NetworkGraph, self).add_edge(node, neighbor_node, weight=weight, bw=bw)
def set_node_shape(self, node, shape):
"""Sets node's shape. Used when plotting the network"""
self._node[node]['node_shape'] = shape
def set_node_color(self, node, color):
"""Sets node's color. Used when plotting the network"""
self._node[node]['node_color'] = color
def set_node_type_shape(self, type, shape):
"""Sets all node's with a given type shape. Used when plotting the network"""
for node in self.nodes():
if self._node[node]['type'] == type:
self.set_node_shape(node, shape)
def set_node_type_color(self, type, color):
"""Sets all node's with a given type color. Used when plotting the network"""
for node in self.nodes():
if self._node[node]['type'] == type:
self.set_node_color(node, color)
def get_hosts(self):
"""Returns all the nodes that are hosts"""
return [x for x in self.nodes() if self._node[x]['type'] == 'host']
def get_switches(self):
"""Returns all the nodes that are switches"""
return [x for x in self.nodes() if self._node[x]["type"] == "switch"]
def get_routers(self):
"""Returns all the nodes that are routers"""
return [x for x in self.nodes() if self._node[x]["type"] == "router"]
def get_p4switches(self):
"""Returns all the nodes that are P4 switches"""
return [x for x in self.nodes() if
self._node[x]['type'] == "switch" and self._node[x].get('subtype', "") == 'p4switch']
def keep_only_switches(self):
"""Returns a networkx subgraph including only switch nodes"""
return self.subgraph(self.get_switches())
def keep_only_p4switches(self):
"""Returns a networkx subgraph including only P4 switch nodes"""
return self.subgraph(self.get_p4switches())
def keep_only_p4switches_and_hosts(self):
"""Returns a networkx subgraph including only hosts and P4 switch nodes"""
return self.subgraph(self.get_p4switches() + self.get_hosts())
def are_neighbors(self, node1, node2):
"""Returns True if node1 and node2 are neighbors, False otherwise."""
return node1 in self.adj[node2]
def get_neighbors(self, node):
"""Return all neighbors for a given node."""
return list(self.adj[node].keys())
def total_number_of_paths(self):
"""Returns the total number of shortests paths between all host pairs in the network"""
total_paths = 0
for host in self.get_hosts():
for host_pair in self.get_hosts():
if host == host_pair:
continue
# compute the number of paths
npaths = sum(1 for _ in nx.all_shortest_paths(self, host, host_pair, 'weight'))
total_paths += npaths
return total_paths
def get_paths_between_nodes(self, node1, node2):
"""Compute the paths between two nodes."""
paths = nx.all_shortest_paths(self, node1, node2, 'weight')
paths = [tuple(x) for x in paths]
return paths
def get_all_paths_between_nodes(self, node1, node2):
"""Compute all the paths between two nodes."""
paths = nx.shortest_simple_paths(self, node1, node2, 'weight')
paths = [tuple(x) for x in paths]
return paths
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
db = sys.argv[1]
else:
db = "./topology.db"
topo = Topology(db=db)
|
UTF-8
|
Python
| false | false | 25,221 |
py
| 133 |
topology.py
| 61 | 0.574482 | 0.567583 | 0 | 785 | 31.128662 | 121 |
olivia2800/ai-maze-python
| 936,302,879,804 |
786a1e2a5405cfba0cd20078400d22c2e0d414b3
|
0fad113e47c5ceafef451de457335396d6941677
|
/maze_generator.py
|
c042dcfb6a71d6c30289eec83af03ece1e872917
|
[] |
no_license
|
https://github.com/olivia2800/ai-maze-python
|
b812a060fc10b627b6315ac45d675d713889587c
|
393e3615de0ca4cb1724b023e837b682f2c0310c
|
refs/heads/master
| 2023-08-12T00:30:37.719012 | 2021-09-28T13:49:11 | 2021-09-28T13:49:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 00:01:40 2020
@author: Raul Ortega Ochoa
"""
import pygame, argparse, csv, time
import argparse
import numpy as np
from time import sleep
from numpy.random import randint
def is_in_map(pos, grid_dim):
"""
Parameters
----------
pos : tuple of 2 ints
x, y coordinates in the grid system of current
position
grid_dim : tuple of ints
x, y dimension of the grid system
Returns
true if pos in map
false if not in map
"""
(max_x, max_y) = grid_dim # unroll the dimensions
(x, y) = pos # unroll the position coordinates
x_in = (x <= max_x) & (x >= 0) # logical x in map
y_in = (y <= max_y) & (y >= 0) # logical y in map
return bool(x_in*y_in) # only true if both true
# ===========================
def possible_next_steps(grid_dim, last_pos):
"""
Parameters
----------
grid_dim : tuple of 2 ints
dimensions of the grid
last_pos : tuple of 2 ints
x, y coordinates of current position
Returns
possible_steps: list of list of tuples (x,y) denoting the
next 2 movements possible in every direction possible
"""
x_pos, y_pos = last_pos # unroll coordinates
possible_steps = []
operations_1 = [(0,1), (0,-1), (1,0), (-1,0)]
operations_2 = [(0,2), (0,-2), (2,0), (-2,0)]
num_operations = len(operations_1)
for i in range(num_operations):
op1_x, op1_y = operations_1[i]
op2_x, op2_y = operations_2[i]
if (is_in_map((x_pos + op1_x, y_pos + op1_y), grid_dim)) and (is_in_map((x_pos + op2_x, y_pos + op2_y), grid_dim)):
possible_steps.append([(x_pos + op1_x, y_pos + op1_y), (x_pos + op2_x, y_pos + op2_y)])
return possible_steps
# ===========================
def generate_step(grid, last_pos, pos_history, back_step):
"""
Parameters
----------
grid : list of list of ints
the grid, it is filled with 0, 1, 2, 3 that correspond
to different colors
last_pos : tuple of 2 ints
x, y coordinates of current position
pos_history : list of tuples of 2 ints
coordinates of last visited nodes, only add when see for the
first time
Returns
changes grid[x][y] to white through the path the algorithm is going
and paints the last_pos on the grid blue
returns grid, last_pos, back_step, done
"""
(x, y) = last_pos
grid[x, y] = 1
grid_dim = (len(grid), len(grid[0]))
possible_steps = possible_next_steps(grid_dim, last_pos)
valid_steps = []
for step in possible_steps:
(x1, y1) = step[0]
(x2, y2) = step[1]
not_white = (grid[x1, y1] != 1) & (grid[x2, y2] != 1)
not_green = (grid[x1, y1] != 2) & (grid[x2, y2] != 2)
if bool(not_white * not_green):
valid_steps.append(step)
#print(f"Valid steps: {valid_steps}")
if (len(valid_steps) == 0): # if it is a dead end
last_pos = pos_history[-2 - back_step]
if last_pos == (0,0):
done = True
return grid, last_pos, back_step, done
back_step += 1
done = False
return grid, last_pos, back_step, done
else:
back_step = 0 # reset it
# choose a valid step at random
if (len(valid_steps) == 1):
last_pos = valid_steps[0]
(x1, y1) = last_pos[0]
(x2, y2) = last_pos[1]
grid[x1, y1] = 1
grid[x2, y2] = 4
last_pos = last_pos[1]
done = False
return grid, last_pos, back_step, done
else:
index = randint(0, len(valid_steps))
# print(f"valid: {len(valid_steps)}, chose {index}")
last_pos = valid_steps[index]
(x1, y1) = last_pos[0]
(x2, y2) = last_pos[1]
grid[x1, y1] = 1
grid[x2, y2] = 4
last_pos = last_pos[1]
done = False
return grid, last_pos, back_step, done
#==============================================================================
#==============================================================================
if __name__ == "__main__":
start_t0 = time.time()
# define the two colors of the grid RGB
black = (0, 0, 0) # grid == 0
white = (255, 255, 255) # grid == 1
green = (50,205,50) # grid == 2
red = (255,99,71) # grid == 3
grey = (211,211,211) # for background
blue = (153,255,255) # grid[x][y] == 4, where current position is
# set the height/width of each location on the grid
height = 7
width = height # i want the grid square
margin = 1 # sets margin between grid locations
# parsing user input
# example: python maze_generator.py --display=True --num_mazes=1
parser = argparse.ArgumentParser()
parser.add_argument("--display", help="Display generating process 0: False, 1:True", default=1, type=int)
parser.add_argument("--num_mazes", help="Number of mazes to generate.", default=1, type=int)
args = parser.parse_args()
for iter_maze in range(args.num_mazes):
start_t = time.time()
# initialize the grid array full of zeros
num_rows = 41
num_columns = num_rows
grid = np.zeros((num_rows, num_columns))
if args.display == 1:
# initialize pygame
pygame.init()
# congiguration of the window
WINDOW_SIZE = [330, 330]
screen = pygame.display.set_mode(WINDOW_SIZE)
# screen title
pygame.display.set_caption(f"Generating Maze {iter_maze+1}/{args.num_mazes}...")
done = False # loop until done
run = False # when run = True start running the algorithm
clock = pygame.time.Clock() # to manage how fast the screen updates
idx_to_color = [black, white, green, red, blue]
# initialize last_pos variable. Its the starting point for the algorithm
last_pos = (0, 0)
pos_history = []
pos_history.append(last_pos)
back_step = 0
# define start and goal
grid[0, 0] = 2
grid[-1, -1] = 3
# main program
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# wait for user to press RETURN key to start
elif event.type == pygame.KEYDOWN:
if event.key==pygame.K_RETURN:
run = True
screen.fill(grey) # fill background in grey
# draw
for row in range(num_rows):
for column in range(num_columns):
color = idx_to_color[int(grid[row, column])]
pygame.draw.rect(screen, color,
[(margin + width) * column + margin,
(margin + height) * row + margin,
width, height])
# set limit to 60 frames per second
clock.tick(60)
# update screen
pygame.display.flip()
if run == True:
# feed the algorithm the last updated position and the grid
grid, last_pos, back_step, done = generate_step(grid, last_pos, pos_history, back_step)
if last_pos not in pos_history:
pos_history.append(last_pos)
sleep(0.01)
close = False
while not close:
for event in pygame.event.get():
if event.type == pygame.QUIT:
close = True
pygame.quit()
# wait for user to press any key to start
if event.type == pygame.KEYDOWN:
close = True
pygame.quit()
else:
print(f"Generating Maze {iter_maze}/{args.num_mazes}...", end=" ")
done = False # loop until done
# initialize last_pos variable. Its the starting point for the algorithm
last_pos = (0, 0)
pos_history = []
pos_history.append(last_pos)
back_step = 0
# define start and goal
grid[0, 0] = 2
grid[-1, -1] = 3
# main program
while not done:
# feed the algorithm the last updated position and the grid
grid, last_pos, back_step, done = generate_step(grid, last_pos, pos_history, back_step)
if last_pos not in pos_history:
pos_history.append(last_pos)
# export maze to .csv file
with open(f"mazes/maze_{iter_maze}.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(grid)
print(f"{time.time()-start_t:.3f} s")
print(f"--- finished {time.time()-start_t0:.3f} s---")
exit(0)
|
UTF-8
|
Python
| false | false | 8,866 |
py
| 11 |
maze_generator.py
| 4 | 0.522671 | 0.49921 | 0 | 268 | 31.089552 | 123 |
zhuqiangLu/NMF
| 15,367,392,990,704 |
dda5423a32a47b7019a810db0942ed574702ff4b
|
88541647f252cdc23003e06870bb0074aea3e027
|
/algorithms/algorithms.py
|
e068bb83defb4ec04034661dfc9f81eee7be1d18
|
[] |
no_license
|
https://github.com/zhuqiangLu/NMF
|
30df518d2c800dada57e58cb44efc98900706387
|
80c7329122ba0ed0b8ff6a188963d5f7e36556db
|
refs/heads/main
| 2023-01-01T19:00:54.007913 | 2020-10-23T07:50:11 | 2020-10-23T07:50:11 | 305,027,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from utils import positive_init
from eval import RRE
def MSE(X, X_hat):
# implementation of Mean Square Error
e = np.linalg.norm(X-X_hat)/X.size
return e
def KL_Divergence(X, X_hat):
X_hat += 1e-7
div = (X/X_hat) + 1e-7
return np.sum(X * np.log(div) - X + X_hat)/X.size
def L21_norm(X, X_hat):
l21 = np.sum(np.sqrt(np.sum(np.square(X-X_hat), axis=0, keepdims=True)))
return l21/X.size
def RobustNMF(X, X_hat, E, lamb):
# the objective function of L1 Robust NMF
t1 = np.linalg.norm(X-X_hat-E)
t2 = lamb * np.linalg.norm(np.sum(E, axis=1), ord=0)
return (t1 + t2)/X.size
def MUR_L21(X, D, R, X_clean,steps, tol=1e-3):
# updating rule for L21 nmf
'''
X : (C, N)
D : (C, k)
R : (k, N)
'''
step = 0
diff = float("Inf")
N = X.shape[1]
rres = list()
while step < steps and diff > tol:
L21_bef = L21_norm(X, D.dot(R))
# first update D
Dig = (1/(np.sqrt(np.sum(np.square(X-D.dot(R)), axis=0, keepdims=True)))) * np.eye(N)
D = D * (X.dot(Dig).dot(R.T))/(D.dot(R).dot(Dig).dot(R.T))+1e-7
# then R
Dig = (1/(np.sqrt(np.sum(np.square(X-D.dot(R)), axis=0, keepdims=True)))) * np.eye(N)
R = R * (D.T.dot(X).dot(Dig))/(D.T.dot(D).dot(R).dot(Dig))+1e-7
# compute diff for stop
diff = L21_bef - L21_norm(X, D.dot(R))
if X_clean is not None:
rres.append(RRE(X_clean, D.dot(R)))
#print(step, L21_norm(X, D.dot(R)), diff)
step+=1
return D, R, rres
def MUR_L2(X, D, R, X_clean,steps=50, tol=1e-3):
'''
X : (C, N)
D : (C, k)
R : (k, N)
'''
step = 0
diff = float("Inf")
rres = list()
while step < steps and diff > tol:
MSE_bef = MSE(X, D.dot(R))
R = R * ((np.dot(D.T, X)/(np.dot(np.dot(D.T, D), R))))+1e-7
D = D * ((np.dot(X, R.T))/(np.dot(np.dot(D, R), R.T)))+1e-7
diff = MSE_bef - MSE(X, D.dot(R))
step += 1
if X_clean is not None:
rres.append(RRE(X_clean, D.dot(R)))
#print(step, MSE(X, D.dot(R)), diff)
return D, R, rres
def MUR_KL(X, D, R, X_clean,steps=500, tol=1e-3):
'''
X : (C, N)
D : (C, k)
R : (k, N)
'''
step = 0
diff = float("Inf")
rres = list()
while step < steps and diff > tol:
KL_bef = KL_Divergence(X, D.dot(R))
temp = X/((np.dot(D, R))+1e-7)
R = R * np.dot(D.T, temp)/np.sum(D.T, axis=1, keepdims=True)
temp = X/((np.dot(D, R))+1e-7)
D = D * np.dot(temp, R.T)/np.sum(R.T, axis=0, keepdims=True)
diff = KL_bef - KL_Divergence(X, D.dot(R))
step += 1
if X_clean is not None:
rres.append(RRE(X_clean, D.dot(R)))
#print(step, KL_Divergence(X, D.dot(R)), diff)
return D, R, rres
def MUR_L1_ROBUST(X, D, R, X_clean,lamb=0.05, steps=500, tol=1e-3):
'''
X : (C, N)
D : (C, k)
R : (k, N)
'''
rres = list()
E = np.random.normal(0, 1, X.shape) * 40
E[(X-E) < 0 ] = 0
C = X.shape[0]
N = X.shape[1]
k = D.shape[1]
step = 0
diff = float('Inf')
while step < steps and diff > tol:
# obj bef
obj_bef = RobustNMF(X, D.dot(R), E, lamb)
# update D
X_hat = X-E
D = D * ((np.dot(X_hat, R.T))/((np.dot(np.dot(D, R), R.T)+1e-7)))
#update R
zeros = np.zeros((1, k))
es = np.sqrt(lamb) * np.exp(np.ones((1, C)))
X_curl = np.vstack((X, np.zeros(((1,N)))))
D_curl = np.vstack(((np.hstack((D, np.eye(C), -1*np.eye(C)))),\
np.hstack((zeros, es, es))))
E_p = (np.abs(E) + E)/2
E_n = (np.abs(E) - E)/2
# prepare for the computation of R
S = np.abs(D_curl.T.dot(D_curl))
R_curl = np.vstack((R, E_p, E_n))
# prepart for the long equation
denom = (S.dot(R_curl))+1e-7
mol1 = D_curl.T.dot(D_curl).dot(R_curl)
mol2 = D_curl.T.dot(X_curl)
# compute the block R
R_curl_tmp = R_curl * (1 - (mol1 - mol2)/denom)
R_curl = np.maximum(np.zeros_like(R_curl), R_curl_tmp)
R = R_curl[:k, :]
#restore E
E = R_curl[k:k+C,:] - R_curl[k+C:, :]
step += 1
obj_aft = RobustNMF(X, D.dot(R), E, lamb)
diff = obj_bef - obj_aft
if X_clean is not None:
rres.append(RRE(X_clean, D.dot(R)))
#print(step, obj_aft, diff)
return D, R, E, rres
def NMF(X, hidden_dim, iters, tol, obj, X_clean=None):
'''
implementation of algorithm presented in
https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf
X: contaminated data (C, N)
X_hat: the clean data
'''
C = X.shape[0]
N = X.shape[-1]
D = positive_init(C, hidden_dim)
R = positive_init(hidden_dim, N)
E = np.zeros_like(X)
if obj == "L2":
print("OBJ: L2")
D, R, rres = MUR_L2(X, D, R, X_clean, steps=iters, tol=tol)
elif obj == "KL":
print("OBJ: KL")
D, R, rres = MUR_KL(X, D, R, X_clean, steps=iters, tol=tol)
elif obj == "L21":
print("OBJ: L21")
D, R, rres= MUR_L21(X, D, R, X_clean, steps=iters, tol=tol)
elif obj == "L1":
print("OBJ: ROBUSTL1")
D, R, E, rres = MUR_L1_ROBUST(X, D, R, X_clean, steps=iters, tol=tol)
else:
D, R, rres = MUR_L2(X, D, R, X_clean, steps=iters, tol=tol)
return D, R, E, rres
|
UTF-8
|
Python
| false | false | 5,695 |
py
| 98 |
algorithms.py
| 6 | 0.479719 | 0.458121 | 0 | 209 | 26.177033 | 94 |
eflows4hpc/dls-dags
| 6,347,961,689,952 |
a40934b56ca2c169566e3c7d736fc9f2a572f90a
|
9b51a4a09e8d9ee144e91d989a6c97429087cf82
|
/utils.py
|
90ac75872d42f9a8e1eac104e55e249d83ded326
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/eflows4hpc/dls-dags
|
c68a7da602fcbb4594d67ede19c9121c440bf79c
|
3d311423c8e607b518f3fe5081ceeb1d91557896
|
refs/heads/master
| 2023-08-21T13:20:22.936933 | 2023-08-15T12:55:42 | 2023-08-15T12:55:42 | 508,962,576 | 0 | 1 |
BSD-2-Clause
| false | 2022-09-14T10:05:54 | 2022-06-30T06:37:33 | 2022-06-30T06:47:29 | 2022-09-14T10:05:53 | 68 | 0 | 1 | 0 |
Python
| false | false |
import json
import os
import stat
import tempfile
from urllib.parse import urlparse
import requests
from airflow.models.connection import Connection
from datacat_integration.hooks import DataCatalogHook
from webdav3.client import Client
from airflow.exceptions import AirflowNotFoundException
from airflow.models.dagrun import DagRun
from airflow.settings import Session
from sqlalchemy import update
def get_mlflow_client():
try:
from mlflow.client import MlflowClient
except ImportError:
print("Unable to import mlflow")
try:
connection = Connection.get_connection_from_secrets("my_mlflow")
except AirflowNotFoundException as _:
print("Please define the mlflow connection 'my_mlflow'")
return MlflowClient()
mlflow_url = f"http://{connection.host}:{connection.port}"
print("Will be using remote mlflow @", mlflow_url)
remote_client = MlflowClient(tracking_uri=mlflow_url, registry_uri=mlflow_url)
return remote_client
def upload_metrics(mlflow_client, metadata, runid):
metrics = metadata.get("metrics")
if metrics:
for metric_name, metric_value in metrics.items():
print("Logging metric", metric_name)
mlflow_client.log_metric(run_id=runid, key=metric_name, value=metric_value)
params = metadata.get("params")
if params:
for param_name, param_value in params.items():
mlflow_client.log_param(run_id=runid, key=param_name, value=param_value)
def ssh2local_copy(ssh_hook, source: str, target: str):
with ssh_hook.get_conn() as ssh_client:
sftp_client = ssh_client.open_sftp()
lst = sftp_client.listdir(path=source)
print(f"{len(lst)} objects in {source}")
mappings = dict()
for fname in lst:
local = tempfile.mktemp(prefix="dls", dir=target)
full_name = os.path.join(source, fname)
sts = sftp_client.stat(full_name)
if str(sts).startswith("d"):
print(f"{full_name} is a directory. Skipping")
continue
print(f"Copying {full_name} --> {local}")
ssh_download(sftp_client=sftp_client, remote=full_name, local=local)
mappings[local] = fname
return mappings
def copy_streams(inp, outp, chunk_size=1024 * 1000):
while True:
chunk = inp.read(chunk_size)
if not chunk:
break
content_to_write = memoryview(chunk)
outp.write(content_to_write)
def ssh_download(sftp_client, remote, local):
# sftp_client.get(remote, local)
with sftp_client.open(remote, "rb") as i:
with open(local, "wb") as o:
i.set_pipelined(pipelined=True)
copy_streams(inp=i, outp=o)
def file_exist(sftp, name):
try:
r = sftp.stat(name)
return r.st_size
except:
return -1
def http2ssh(url: str, ssh_client, remote_name: str, force=True, auth=None):
sftp_client = ssh_client.open_sftp()
size = file_exist(sftp=sftp_client, name=remote_name)
if size > 0:
print(f"File {remote_name} exists and has {size} bytes")
if force is not True:
return 0
print("Forcing overwrite")
dirname = os.path.dirname(remote_name)
ssh_client.exec_command(command=f"mkdir -p {dirname}")
ssh_client.exec_command(command=f"touch {remote_name}")
with requests.get(url, stream=True, verify=False, auth=auth) as r:
written = 0
length = int(r.headers.get("Content-Length", 0))
with sftp_client.open(remote_name, "wb") as f:
f.set_pipelined(pipelined=True)
for chunk in r.iter_content(chunk_size=1024 * 1000):
written += len(chunk)
content_to_write = memoryview(chunk)
f.write(content_to_write)
print(f"Written {written} bytes. Content-lenght {length}")
if length > 0 and written < length:
print(f"Size mismatch {written} < {length}")
raise Exception("Size copying missmatch")
return 0
def setup_webdav(params):
oid = params.get('oid', False)
if not oid:
print(
"Missing object id (oid) in pipeline parameters. Please provide datacat id"
)
return -1
webdav_connid, dirname = resolve_oid(oid=oid)
if webdav_connid == -1:
return -1
# fixing dirname
if dirname.startswith("/"):
dirname = dirname[1:]
if dirname[-1] != "/":
dirname = dirname + "/"
client = get_webdav_client(webdav_connid=webdav_connid)
client.verify = params.get("verify_webdav_cert", True)
prefix = get_webdav_prefix(client=client, dirname=dirname)
if not prefix:
print("Unable to determine common prefix, quitting")
prefix=""
print(f"Determined common prefix: {prefix}")
return client, dirname, prefix
def get_webdav_client(webdav_connid):
connection = Connection.get_connection_from_secrets(webdav_connid)
options = {
"webdav_hostname": f"https://{connection.host}{connection.schema}",
"webdav_login": connection.login,
"webdav_password": connection.get_password(),
}
return Client(options)
def get_webdav_prefix(client, dirname):
# not so efficient
flist = client.list(dirname, get_info=True)
if not flist:
print(f"Empty directory {dirname}")
return None
got = [fname for fname in flist if fname["path"].endswith(dirname)]
if not got:
print("Could not determine the prefix... quiting")
return None
prefix = got[0]["path"][0 : -len(dirname)]
print(f"Determined common prefix: {prefix}")
return prefix
def mkdir_rec(client, path):
#check if exist
if client.check(path):
return
parent, chlid = os.path.split(path)
mkdir_rec(client=client, path=parent)
client.mkdir(path)
def walk_dir(client, path, prefix):
for p in client.list(path, get_info=True):
curr_name = p["path"]
if curr_name.startswith(prefix):
curr_name = curr_name[len(prefix):]
if curr_name == path:
continue
# will skip empty directories but we can live with that?
if p["isdir"]:
yield from walk_dir(client, curr_name, prefix)
continue
yield curr_name
class LFSC(object):
def list(self, path, get_info=True):
lst = [os.path.realpath(os.path.join(path, el)) for el in os.listdir(path)]
if not get_info:
return lst
return [{"path": el, "isdir": os.path.isdir(el)} for el in lst]
class RFSC(object):
def __init__(self, client, **kwargs):
self.client = client
def list(self, path, get_info=True):
if not get_info:
return [el.filename for el in self.client.listdir_attr(path)]
return [
{"path": os.path.join(path, el.filename), "isdir": stat.S_ISDIR(el.st_mode)}
for el in self.client.listdir_attr(path)
]
def resolve_oid(oid:str, type:str='dataset'):
try:
hook = DataCatalogHook()
entry = json.loads(hook.get_entry(type, oid))
webdav_connid = urlparse(entry["url"]).netloc
print("Will be using webdav connection", webdav_connid)
dirname = entry["metadata"]["path"]
print(f"Processing webdav dir: {dirname}")
return webdav_connid, dirname
except Exception as e:
print(f"No entry {type}/{oid} in data cat found. Or entry invalid. {e}")
return "default_webdav", "dls/"
def get_unicore_client(user, password, site_url, **kwargs):
from pyunicore import client, credentials
creds = credentials.UsernamePassword(user, password)
transport = client.Transport(credential=creds)
print(f"Connecting to Unicore site: {site_url}")
cl = client.Client(transport, site_url)
storages = cl.get_storages()
home = storages[0]
print(f"Will be using unicore storage {home}, number of storages retrieved: {len(storages)}")
return home
def mask_config(cfg, fields2mask = ['vault_id']):
return dict((key, val) if key not in fields2mask else (key, "***") for key, val in cfg.items())
def clean_up_vaultid(context):
dagrun = context['dag_run']
cfg = dagrun.conf
masked = mask_config(cfg=cfg)
session = Session()
cnt = session.execute(
update(DagRun)
.where(DagRun.id==dagrun.id)
.values(conf=masked)
).rowcount
print(f"Clean-up updated {cnt} rows to mask configs")
session.commit()
|
UTF-8
|
Python
| false | false | 8,563 |
py
| 43 |
utils.py
| 37 | 0.628284 | 0.62408 | 0 | 270 | 30.714815 | 100 |
dimitreOliveira/titanicDeepLearning
| 12,670,153,554,698 |
4355c7dd3310aaa514a228a1a4e684c34c43f4c0
|
81d0cbffb9e66f98775c52f9b56d32f8da785a67
|
/dataset.py
|
94a93a995122ee34ececa4262f880e156d8ab370
|
[
"MIT"
] |
permissive
|
https://github.com/dimitreOliveira/titanicDeepLearning
|
340413442b0fb5875db97d7d2e2fc0da3601848e
|
ced45386fbbd0cd83cde4f53c177ffaaf5a00956
|
refs/heads/master
| 2022-01-10T14:43:12.615362 | 2019-06-12T23:59:33 | 2019-06-12T23:59:33 | 115,830,056 | 2 | 0 | null | false | 2019-06-12T23:59:34 | 2017-12-30T23:05:06 | 2019-02-06T12:43:52 | 2019-06-12T23:59:33 | 39 | 1 | 0 | 0 |
Python
| false | false |
import csv
import re
import numpy as np
import pandas as pd
def load_data(train_path, test_path):
"""
method for data loading
:param train_path: path for the train set file
:param test_path: path for the test set file
:return: a 'pandas' array for each set
"""
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)
print("number of training examples = " + str(train_data.shape[0]))
print("number of test examples = " + str(test_data.shape[0]))
print("train shape: " + str(train_data.shape))
print("test shape: " + str(test_data.shape))
return train_data, test_data
def output_submission(test_ids, predictions, id_column, predction_column, file_name):
"""
:param test_ids: vector with test dataset ids
:param predictions: vector with test dataset predictions
:param id_column: name of the output id column
:param predction_column: name of the output predction column
:param file_name: string for the output file name
:return: output a csv with ids ands predictions
"""
print('Outputting submission...')
with open('submissions/' + file_name, 'w') as submission:
writer = csv.writer(submission)
writer.writerow([id_column, predction_column])
for test_id, test_prediction in zip(test_ids, np.argmax(predictions, 1)):
writer.writerow([test_id, test_prediction])
print('Output complete')
def pre_process_data(df):
"""
Perform a number of pre process functions on the data set
:param df: pandas data frame
:return: updated data frame
"""
# setting `passengerID` as Index since it wont be necessary for the analysis
df = df.set_index("PassengerId")
# convert 'Sex' values
df['gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
# We see that 2 passengers embarked data is missing, we fill those in as the most common Embarked value
df.loc[df.Embarked.isnull(), 'Embarked'] = df['Embarked'].mode()[0]
# Replace missing age values with median ages by gender
for gender in df['gender'].unique():
median_age = df[(df['gender'] == gender)].Age.median()
df.loc[(df['Age'].isnull()) & (df['gender'] == gender), 'Age'] = median_age
# convert 'gender' values to new columns
df = pd.get_dummies(df, columns=['gender'])
# convert 'Embarked' values to new columns
df = pd.get_dummies(df, columns=['Embarked'])
# bin Fare into five intervals with equal amount of values
df['Fare-bin'] = pd.qcut(df['Fare'], 5, labels=[1, 2, 3, 4, 5]).astype(int)
# bin Age into seven intervals with equal amount of values
# ('baby','child','teenager','young','mid-age','over-50','senior')
bins = [0, 4, 12, 18, 30, 50, 65, 100]
age_index = (1, 2, 3, 4, 5, 6, 7)
df['Age-bin'] = pd.cut(df['Age'], bins, labels=age_index).astype(int)
# create a new column 'family' as a sum of 'SibSp' and 'Parch'
df['family'] = df['SibSp'] + df['Parch'] + 1
df['family'] = df['family'].map(lambda x: 4 if x > 4 else x)
# create a new column 'FTicket' as the first character of the 'Ticket'
df['FTicket'] = df['Ticket'].map(lambda x: x[0])
# combine smaller categories into one
df['FTicket'] = df['FTicket'].replace(['W', 'F', 'L', '5', '6', '7', '8', '9'], '4')
# convert 'FTicket' values to new columns
df = pd.get_dummies(df, columns=['FTicket'])
# get titles from the name
df['title'] = df.apply(lambda row: re.split('[,.]+', row['Name'])[1], axis=1)
# convert titles to values
df['title'] = df['title'].map({' Capt': 'Other', ' Master': 'Master', ' Mr': 'Mr', ' Don': 'Other',
' Dona': 'Other', ' Lady': 'Other', ' Col': 'Other', ' Miss': 'Miss',
' the Countess': 'Other', ' Dr': 'Other', ' Jonkheer': 'Other', ' Mlle': 'Other',
' Sir': 'Other', ' Rev': 'Other', ' Ms': 'Other', ' Mme': 'Other', ' Major': 'Other',
' Mrs': 'Mrs'})
# convert 'title' values to new columns
df = pd.get_dummies(df, columns=['title'])
df = df.drop(['Name', 'Ticket', 'Cabin', 'Sex', 'Fare', 'Age'], axis=1)
return df
def mini_batches(train_set, train_labels, mini_batch_size):
"""
Generate mini batches from the data set (data and labels)
:param train_set: data set with the examples
:param train_labels: data set with the labels
:param mini_batch_size: mini batch size
:return: mini batches
"""
set_size = train_set.shape[0]
batches = []
num_complete_minibatches = set_size // mini_batch_size
for k in range(0, num_complete_minibatches):
mini_batch_x = train_set[k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch_y = train_labels[k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch = (mini_batch_x, mini_batch_y)
batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if set_size % mini_batch_size != 0:
mini_batch_x = train_set[(set_size - (set_size % mini_batch_size)):]
mini_batch_y = train_labels[(set_size - (set_size % mini_batch_size)):]
mini_batch = (mini_batch_x, mini_batch_y)
batches.append(mini_batch)
return batches
|
UTF-8
|
Python
| false | false | 5,334 |
py
| 6 |
dataset.py
| 4 | 0.605549 | 0.595238 | 0 | 133 | 39.105263 | 120 |
AndreasLP/Euler
| 14,594,298,898,363 |
66f99c9387aa5d5b276f02149d0a7426a5094d31
|
8a81e6415436bf55f7169db165abd1eaef74afac
|
/problem38/problem38.py
|
91a94ce6a6e57b1daa1565f686637679dda9f53f
|
[] |
no_license
|
https://github.com/AndreasLP/Euler
|
520d997322290e8a40cc0de28867f65eeedccc88
|
4f9e6019d50d005c82a5e2e55275aefc5c9fe93e
|
refs/heads/master
| 2022-04-01T12:40:01.711757 | 2020-01-17T10:03:09 | 2020-01-17T10:03:09 | 146,212,985 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from is_pandigital import is_pandigital_9
pos = set()
for test in range(2,10**5+1):
s = ""
n = 1
while(len(s)<9):
s += str(test*n)
n += 1
if is_pandigital_9(s):
pos.add(s)
print(max(pos))
print(pos)
|
UTF-8
|
Python
| false | false | 245 |
py
| 89 |
problem38.py
| 63 | 0.514286 | 0.473469 | 0 | 14 | 16.5 | 41 |
signed8bit/vpp
| 7,318,624,306,807 |
5cca331d8d8dcd18f63186f68d4c0352f635efb7
|
8168685b60ac5ce47ca0e2fc5867e374ca3979e8
|
/test/util.py
|
c72a3965d9ef5d7c3e34c4a0c0b18a8b948d3b25
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/signed8bit/vpp
|
a28badc48daab1b2e3d1be96445a7ff3f6828ef5
|
ebb9a6a1280ae89b8b1555197dd2f7c3f40a4a6e
|
refs/heads/master
| 2020-07-15T16:33:36.574957 | 2016-10-17T15:35:32 | 2016-10-17T18:33:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
## @package util
# Module with common functions that should be used by the test cases.
#
# The module provides a set of tools for setup the test environment
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6NDOptSrcLLAddr
## Util class
#
# Test cases that want to use methods defined in Util class should
# inherit this class.
#
# class Example(Util, VppTestCase):
# pass
class Util(object):
## Class method to send ARP Request for each VPP IPv4 address in
# order to determine VPP interface MAC address to IPv4 bindings.
#
# Resolved MAC address is saved to the VPP_MACS dictionary with interface
# index as a key. ARP Request is sent from MAC in MY_MACS dictionary with
# interface index as a key.
# @param cls The class pointer.
# @param args List variable to store indices of VPP interfaces.
@classmethod
def resolve_arp(cls, args):
for i in args:
ip = cls.VPP_IP4S[i]
cls.log("Sending ARP request for %s on port %u" % (ip, i))
arp_req = (Ether(dst="ff:ff:ff:ff:ff:ff", src=cls.MY_MACS[i]) /
ARP(op=ARP.who_has, pdst=ip,
psrc=cls.MY_IP4S[i], hwsrc=cls.MY_MACS[i]))
cls.pg_add_stream(i, arp_req)
cls.pg_enable_capture([i])
cls.cli(2, "trace add pg-input 1")
cls.pg_start()
arp_reply = cls.pg_get_capture(i)[0]
if arp_reply[ARP].op == ARP.is_at:
cls.log("VPP pg%u MAC address is %s " % (i, arp_reply[ARP].hwsrc))
cls.VPP_MACS[i] = arp_reply[ARP].hwsrc
else:
cls.log("No ARP received on port %u" % i)
cls.cli(2, "show trace")
## @var ip
# <TODO add description>
## @var arp_req
# <TODO add description>
## @var arp_reply
# <TODO add description>
## @var VPP_MACS
# <TODO add description>
## Class method to send ND request for each VPP IPv6 address in
# order to determine VPP MAC address to IPv6 bindings.
#
# Resolved MAC address is saved to the VPP_MACS dictionary with interface
# index as a key. ND Request is sent from MAC in MY_MACS dictionary with
# interface index as a key.
# @param cls The class pointer.
# @param args List variable to store indices of VPP interfaces.
@classmethod
def resolve_icmpv6_nd(cls, args):
for i in args:
ip = cls.VPP_IP6S[i]
cls.log("Sending ICMPv6ND_NS request for %s on port %u" % (ip, i))
nd_req = (Ether(dst="ff:ff:ff:ff:ff:ff", src=cls.MY_MACS[i]) /
IPv6(src=cls.MY_IP6S[i], dst=ip) /
ICMPv6ND_NS(tgt=ip) /
ICMPv6NDOptSrcLLAddr(lladdr=cls.MY_MACS[i]))
cls.pg_add_stream(i, nd_req)
cls.pg_enable_capture([i])
cls.cli(2, "trace add pg-input 1")
cls.pg_start()
nd_reply = cls.pg_get_capture(i)[0]
icmpv6_na = nd_reply['ICMPv6 Neighbor Discovery - Neighbor Advertisement']
dst_ll_addr = icmpv6_na['ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address']
cls.VPP_MACS[i] = dst_ll_addr.lladdr
## @var ip
# <TODO add description>
## @var nd_req
# <TODO add description>
## @var nd_reply
# <TODO add description>
## @var icmpv6_na
# <TODO add description>
## @var dst_ll_addr
# <TODO add description>
## @var VPP_MACS
# <TODO add description>
## Class method to configure IPv4 addresses on VPP interfaces.
#
# Set dictionary variables MY_IP4S and VPP_IP4S to IPv4 addresses
# calculated using interface VPP interface index as a parameter.
# /24 IPv4 prefix is used, with VPP interface address host part set
# to .1 and MY address set to .2.
# Used IPv4 prefix scheme: 172.16.{VPP-interface-index}.0/24.
# @param cls The class pointer.
# @param args List variable to store indices of VPP interfaces.
@classmethod
def config_ip4(cls, args):
for i in args:
cls.MY_IP4S[i] = "172.16.%u.2" % i
cls.VPP_IP4S[i] = "172.16.%u.1" % i
cls.api("sw_interface_add_del_address pg%u %s/24" % (i, cls.VPP_IP4S[i]))
cls.log("My IPv4 address is %s" % (cls.MY_IP4S[i]))
## @var MY_IP4S
# Dictionary variable to store host IPv4 addresses connected to packet
# generator interfaces.
## @var VPP_IP4S
# Dictionary variable to store VPP IPv4 addresses of the packet
# generator interfaces.
## Class method to configure IPv6 addresses on VPP interfaces.
#
# Set dictionary variables MY_IP6S and VPP_IP6S to IPv6 addresses
# calculated using interface VPP interface index as a parameter.
# /64 IPv6 prefix is used, with VPP interface address host part set
# to ::1 and MY address set to ::2.
# Used IPv6 prefix scheme: fd10:{VPP-interface-index}::0/64.
# @param cls The class pointer.
# @param args List variable to store indices of VPP interfaces.
@classmethod
def config_ip6(cls, args):
for i in args:
cls.MY_IP6S[i] = "fd10:%u::2" % i
cls.VPP_IP6S[i] = "fd10:%u::1" % i
cls.api("sw_interface_add_del_address pg%u %s/64" % (i, cls.VPP_IP6S[i]))
cls.log("My IPv6 address is %s" % (cls.MY_IP6S[i]))
## @var MY_IP6S
# Dictionary variable to store host IPv6 addresses connected to packet
# generator interfaces.
## @var VPP_IP6S
# Dictionary variable to store VPP IPv6 addresses of the packet
# generator interfaces.
|
UTF-8
|
Python
| false | false | 5,954 |
py
| 15 |
util.py
| 10 | 0.577931 | 0.560296 | 0 | 139 | 41.834532 | 104 |
salarmgh/djshop-backend
| 10,960,756,542,505 |
d0a48fbf28a4f765c9d11b61122934cc31f6bd0f
|
8491a2a41bcc9471cb447f85acb6e697fb9a9efb
|
/backend/tests/models/test_landing_model.py
|
1ca22af6e689bb705c06f4363703a4cb20a049eb
|
[] |
no_license
|
https://github.com/salarmgh/djshop-backend
|
9cae72eeaefabc99004cb8a9a00eadaa69df2d71
|
921e44c6e52ff025b08e6214ed7fb93a1ca6073c
|
refs/heads/master
| 2020-11-29T19:25:30.328235 | 2020-03-31T11:15:36 | 2020-03-31T11:15:36 | 230,199,388 | 2 | 0 | null | false | 2020-03-02T05:15:37 | 2019-12-26T05:16:50 | 2020-03-02T04:06:14 | 2020-03-02T05:15:37 | 229 | 1 | 0 | 0 |
Python
| false | false |
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
import os
import re
from django.conf import settings
from ...models import LandingBanner
from ..helpers import generate_random_string, generate_random_number, check_file_exists_on_model, check_filename_is_same_on_model
import shutil
class LandingBannerTests(TestCase):
def setUp(self):
self.image_name = generate_random_string(3, 5) + '.jpg'
self.title = generate_random_string(15, 50)
self.description = generate_random_string(15, 50)
self.url = generate_random_string(15, 50)
self.image = SimpleUploadedFile(name=self.image_name, content=open('backend/tests/assets/placeholder.jpg', 'rb').read(), content_type='image/jpeg')
self.landing = LandingBanner.objects.create(title=self.title, description=self.description, url=self.url, image=self.image)
def test_can_create_landing(self):
"""
Ensure we can create a new landing object.
"""
self.assertEqual(self.landing.title, self.title)
self.assertEqual(self.landing.description, self.description)
self.assertEqual(self.landing.url, self.url)
def test_is_filename_same_on_model(self):
"""
Ensure filename is same on the model as we want
"""
self.assertTrue(check_filename_is_same_on_model(self.landing, settings.LANDING_IMAGES_DIR, self.image_name, settings.MEDIA_DIR))
def test_is_file_exists_on_model(self):
"""
Ensure file is present on the file system
"""
self.assertTrue(check_file_exists_on_model(self.landing, settings.LANDING_IMAGES_DIR, self.image_name, settings.MEDIA_DIR))
def test_can_delete_image(self):
"""
Ensure we can delete a image object with the file.
"""
self.landing.delete()
self.assertFalse(check_file_exists_on_model(self.landing, settings.LANDING_IMAGES_DIR, self.image_name, settings.MEDIA_DIR))
def tearDown(self):
shutil.rmtree(settings.MEDIA_DIR)
|
UTF-8
|
Python
| false | false | 2,075 |
py
| 34 |
test_landing_model.py
| 29 | 0.686265 | 0.679518 | 0 | 50 | 40.46 | 155 |
kauecdev/-ifpi-ads-algoritmos2020
| 9,457,517,993,852 |
b125f888c188d129f4cedb2282877eb3c4e74b68
|
50b8b7e80a12deb854117b640f6b5875d52d0492
|
/Fabio03b/Q13_F3b_maior_n_termos.py
|
10927ca85f19cca14cc7b58f518a8d9d0e73b54b
|
[] |
no_license
|
https://github.com/kauecdev/-ifpi-ads-algoritmos2020
|
09a851d927ecb6f83857f9abdfdaa3add7e250d4
|
039ff61b8560cb9e6139406767c09e8ec2e2ffe1
|
refs/heads/master
| 2021-02-16T07:10:21.208317 | 2020-10-24T18:27:37 | 2020-10-24T18:27:37 | 244,979,191 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def main():
n = int(input("Digite a quantidade de termos: "))
# Método com while
# contador = 0
# maior = 0
# while contador < n:
# n1 = int(input("Digite o valor do termo: "))
# if (n1 > maior):
# maior = n1
# contador += 1
# Método com for
maior = 0
for i in range(0, n):
n1 = int(input("Digite o valor do termo: "))
if n1 > maior:
maior = n1
print(maior)
main()
|
UTF-8
|
Python
| false | false | 489 |
py
| 146 |
Q13_F3b_maior_n_termos.py
| 145 | 0.468172 | 0.445585 | 0 | 30 | 15.166667 | 54 |
ambonip/Catalogo_Prestazioni
| 8,632,884,272,940 |
fd0ceeb3eb0a75f7df878ba5ee6f0e787ef6bc55
|
dca9d5c1fc9778c4524da39ec0de51470bef3dce
|
/models/menu.py
|
fad8f8b7ce35aab3334e884feefb7bd21b70a3fe
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
https://github.com/ambonip/Catalogo_Prestazioni
|
2e9e8b792150801ad8e1c898f03a2607db9508a5
|
689589cdac3ed011d5b6e21bed9b67ad3a4d492f
|
refs/heads/master
| 2020-07-29T04:58:53.409173 | 2017-04-27T09:54:06 | 2017-04-27T09:54:06 | 65,996,556 | 0 | 0 | null | false | 2017-04-27T07:20:03 | 2016-08-18T12:47:00 | 2016-08-19T09:12:03 | 2017-04-27T07:20:03 | 2,888 | 0 | 0 | 0 |
Python
| null | null |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
#response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
# _class="navbar-brand",_href="http://www.web2py.com/",
# _id="web2py-logo")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Paolo Amboni <paolo.amboni@gmail.com>'
response.meta.description = 'catalogo delle prestazioni di laboratorio'
response.meta.keywords = 'laboratorio, analisi, catalogo, prestazioni'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Home'), False, URL('default', 'index'), []),
(T('Advanced search'),False,URL('default','adv_search'),[])
]
# costruisco submenu per configurazione
sub_conf=[('Gestione Materiali',False,URL('gestione','mat')),
('Gestione contenitori',False,URL('gestione','cont')),
('Gestione metodi',False,URL('gestione','metod')),
('Gestione settori',False,URL('gestione','sett')),
('Gestione analisi',False,URL('gestione','lis_anal')),
('Download catalogo Offline',False,URL('static','catalogo_prestazioni.zip')),
]
#costruisco submenu per esportazione liste
sub_export=[('esporta analisi sintetico -> CSV',False,URL('gestione','exp_anal')),
('esporta contenitori -> CSV',False,URL('gestione','exp_cont')),
('esporta metodi -> CSV',False,URL('gestione','exp_meto')),
('esporta materiali -> CSV',False,URL('gestione','exp_mate')),
]
response.menu+=[('Esporta dati',False,'#',sub_export)]# aggancio il menù di esportazione
if auth.has_membership('superuser'):#aggiungo gestione unità operative solo per superuser
sub_conf.append(('Unità operative',False,URL('gestione','unop')))
sub_conf.append(('Configurazione generale',False,URL('gestione','gen_cfg')))
response.menu+=[('Gestione utenti',False,URL('gestione','ges_user',[]))]
response.menu += [('Aggiorna cataolog offline', False, URL('gestione', 'exp_catalog', []))]
if auth.is_logged_in():#aggiungo i menù solo per utenti loggati
response.menu+=[('Cofigurazione',False,'#',sub_conf)]#al menu configurzione aggancio il submenu preparato prima
#response.menu+=[('Esporta',False,'#',sub_export)]# aggancio il menù di esportazione
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
if "auth" in locals(): auth.wikimenu()
|
UTF-8
|
Python
| false | false | 3,159 |
py
| 67 |
menu.py
| 5 | 0.577045 | 0.574826 | 0 | 63 | 49.063492 | 115 |
chenshanghao/LeetCode_learning
| 13,993,003,485,272 |
efdc65b98fce1ba07220a9b38907b54b40a41919
|
5982a9c9c9cb682ec9732f9eeb438b62c61f2e99
|
/Problem_5/learning_solution_3.py
|
f06adeba1a554d829ffd172841449a0988bee6ff
|
[] |
no_license
|
https://github.com/chenshanghao/LeetCode_learning
|
6fdf98473be8f2240dd86d5586bbd1bbb95d6b0c
|
acf2395f3b946054009d4543f2a13e83402323d3
|
refs/heads/master
| 2021-10-23T05:23:01.970535 | 2019-03-15T05:08:54 | 2019-03-15T05:08:54 | 114,688,902 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Solution(object):
def __init__(self):
self.logestSize = 0
self.longestSrart= 0
def longestPalindrome(self, s):
for index, value in enumerate(s):
self.checkEvenPalindrome(s, index)
self.checkOddPalindrome(s, index)
return s[self.longestSrart: self.longestSrart + self.logestSize+1]
def checkOddPalindrome(self, s, index):
start = index
end = index
#注意边界条件
while start >=1 and end < len(s) - 1 and s[start -1] == s[end + 1]:
start -= 1
end += 1
if end - start > self.logestSize:
self.logestSize = end - start
self.longestSrart = start
def checkEvenPalindrome(self, s, index):
start = index
end = min(index+1, len(s) -1)
while start >= 1 and end <= len(s)-1 and s[start-1]== s[end+1] and s[start]==s[end]:
start -= 1
end +=1
if end -start > self.logestSize and s[start] == s[end]:
self.logestSize = end - start
self.longestSrart = start
|
UTF-8
|
Python
| false | false | 1,112 |
py
| 262 |
learning_solution_3.py
| 223 | 0.537273 | 0.521818 | 0 | 36 | 29.222222 | 92 |
mousyball/pytorch-trainer
| 2,894,807,998,606 |
28daaee595ea529801f5e00448a937622d8c52ba
|
592bf29b079177132cd88892a057e54bf47556cd
|
/networks/classification/networks/base.py
|
d413bbd8b6b35a9a38b1cb9b19ec3e3521f38ae8
|
[] |
no_license
|
https://github.com/mousyball/pytorch-trainer
|
e3428469495a99c1b32e038b9bc44dc68c009b98
|
f2b0fa3a4543cee83eda7f9731942f76589794c1
|
refs/heads/master
| 2023-05-13T17:55:23.944058 | 2021-06-07T13:30:03 | 2021-06-07T13:30:03 | 329,233,204 | 5 | 1 | null | false | 2021-06-07T13:30:04 | 2021-01-13T07:52:58 | 2021-02-21T08:10:48 | 2021-06-07T13:30:04 | 259 | 2 | 1 | 4 | null | false | false |
import torch.nn as nn
from ...loss import build_loss
from ..builder import build_backbone
class INetwork(nn.Module):
def __init__(self):
super(INetwork, self).__init__()
def _construct_network(self, cfg):
"""Construct network from builder."""
raise NotImplementedError()
def freeze(self):
"""Freeze components or layers."""
raise NotImplementedError()
def get_lr_params(self, group_list):
"""Get LR group for optimizer."""
raise NotImplementedError()
def train_step(self, batch_data):
"""Define training step."""
raise NotImplementedError()
def val_step(self, batch_data):
"""Define validation step."""
raise NotImplementedError()
def forward(self, x):
"""Define forward propagation."""
raise NotImplementedError()
class BaseNetwork(INetwork):
def __init__(self):
super(BaseNetwork, self).__init__()
def _construct_network(self, cfg):
"""Construct network from builder."""
if 'backbone' not in cfg:
raise KeyError("Key 'backbone' is not in config.")
self.backbone = build_backbone(cfg.backbone)
self.criterion = build_loss(cfg.loss)
def freeze(self):
"""Freeze components or layers.
TODO:
* Freeze all or backbone.
NOTE:
* Freeze layers depended on demand.
"""
raise NotImplementedError()
def get_lr_params(self, group_list):
"""Get LR group for optimizer.
TODO:
* Checker for this function.
* Config yaml for components.
* Any better coding logic?
NOTE:
* Make sure that the following layers exist in the network!!!
* For example, `nn.Conv1d` is used in network and you should add it to the condition below.
* The parameters wouldn't be updated if they are freezed in advance.
"""
modules = [self.__getattr__(m) for m in group_list]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], nn.Conv2d) \
or isinstance(m[1], nn.Conv1d) \
or isinstance(m[1], nn.Linear) \
or isinstance(m[1], nn.BatchNorm2d) \
or isinstance(m[1], nn.SyncBatchNorm) \
or isinstance(m[1], nn.GroupNorm):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_optimizer_params(self, group_info, lr):
"""Get optimizer parameters from config.
Args:
group_info (Tuple(List, Union[int, float])): This contains
group_list that sends to the optimizer and corresponding
weight for scaling.
lr (float): learning rate
Returns:
List: parameters group for optimizer.
"""
# Check if config name matches the attribute name.
for groups in group_info:
group_list = groups[0]
for group in group_list:
if group not in dir(self):
assert False, f"{group} not in {self.__dict__.keys()}"
params_group = []
for group_list, weight in group_info:
params_group.append(
{
'params': self.get_lr_params(group_list),
'lr': lr * weight
}
)
return params_group
def train_step(self, batch_data):
"""Define training step."""
inputs, labels = batch_data['inputs'], batch_data['targets']
outputs = self.forward(inputs)
loss = self.criterion(outputs, labels)
return dict(cls_loss=loss)
def val_step(self, batch_data):
"""Define validation step."""
inputs, labels = batch_data['inputs'], batch_data['targets']
outputs = self.forward(inputs)
loss = self.criterion(outputs, labels)
return dict(cls_loss=loss)
def forward(self, x):
"""Define forward propagation."""
output = self.backbone(x)
return output
|
UTF-8
|
Python
| false | false | 4,229 |
py
| 87 |
base.py
| 67 | 0.550485 | 0.547647 | 0 | 129 | 31.782946 | 105 |
tainaml/p_gs
| 10,823,317,607,518 |
b58135e173ad06dc016e5e17a6bf0f7a65a9103d
|
f5122242ceb72699f137d720a4c2087ab88ce215
|
/apps/account/account_exceptions.py
|
3bddd2c213790735c4f65ba03cf1215b76135d6a
|
[] |
no_license
|
https://github.com/tainaml/p_gs
|
009eae484152a213955da86d459314b7be0c4f69
|
0201a81c4ad5408bc9a2a585b9cb870dcdfe11d1
|
refs/heads/master
| 2023-01-20T05:00:31.052509 | 2018-08-15T18:49:52 | 2018-08-15T18:49:52 | 144,891,416 | 0 | 0 | null | false | 2023-01-12T09:29:19 | 2018-08-15T18:51:52 | 2018-08-15T18:54:33 | 2023-01-12T09:29:17 | 51,564 | 0 | 0 | 26 |
JavaScript
| false | false |
__author__ = 'phillip'
class AccountException(Exception):
pass
class AccountDoesNotExistException(AccountException):
pass
class TokenIsNoLongerValidException(AccountException):
pass
class TokenIsNotActiveException(AccountException):
pass
class TokenDoesNotExistException(AccountException):
pass
|
UTF-8
|
Python
| false | false | 319 |
py
| 981 |
account_exceptions.py
| 521 | 0.799373 | 0.799373 | 0 | 16 | 19 | 54 |
MatheusFreitas25/Python-Studies
| 14,800,457,315,085 |
acf4ea0a5a78b2bfa7338137a6c2843359f85c15
|
1d6d4716fea71753f077de76e27301f3d7bdb040
|
/General_exercises/15_Lista_MenoresQue.py
|
131198d1de6d35b619328f700db3d2eeaec2a894
|
[] |
no_license
|
https://github.com/MatheusFreitas25/Python-Studies
|
b219e3b8deb54beff1c190ef43be3788a9aa7806
|
5483a8ec4cd877fa3426257fd2115ff9204b1a27
|
refs/heads/master
| 2022-03-14T04:02:07.185398 | 2022-02-03T13:32:06 | 2022-02-03T13:32:06 | 225,725,786 | 0 | 0 | null | false | 2022-02-03T13:32:06 | 2019-12-03T22:02:46 | 2021-11-05T04:42:31 | 2022-02-03T13:32:06 | 30,250 | 0 | 0 | 0 |
Python
| false | false |
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
c = int(input("Os numeros devem ser menores do que : "))
b=[]
for elem in a:
if elem < c:
b.append(elem)
print(elem)
print(b)
|
UTF-8
|
Python
| false | false | 189 |
py
| 58 |
15_Lista_MenoresQue.py
| 57 | 0.52381 | 0.439153 | 0 | 9 | 20.111111 | 56 |
minxiyang/hmumu-coffea
| 14,663,018,385,774 |
e22f034619c2015e2b07f0ff2de1f842f0eae1ce
|
772eb33ca463552fc46c79a18d82ed34ab1712a0
|
/slurm_cluster_prep.py
|
d3ee2e4eeb5ed758499e14c2f510ab4af6a760dd
|
[] |
no_license
|
https://github.com/minxiyang/hmumu-coffea
|
662ee3631309e0afdf5d11e9bdcd6a5150e0ea02
|
8888b2bc94e3acf930001d609c425b5daa3521ef
|
refs/heads/master
| 2023-05-11T15:16:01.323083 | 2021-05-17T04:30:04 | 2021-05-17T04:30:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pytest
import asyncio
import dask
from dask.distributed import Client
from dask.distributed import Scheduler, Worker
from dask_jobqueue import SLURMCluster
from coffea.processor.executor import dask_executor
from python.dimuon_processor_pandas import DimuonProcessor
dask.config.set({"temporary-directory": "/depot/cms/hmm/dask-temp/"})
dask.config.set({'distributed.worker.timeouts.connect': '60s'})
__all__ = ['pytest', 'asyncio', 'dask',
'Client', 'Scheduler', 'Worker',
'SLURMCluster', 'dask_executor',
'DimuonProcessor']
print('Dask version:', dask.__version__)
async def f(scheduler_address):
r = await Worker(scheduler_address,
resources={'processor': 0, 'reducer': 1},
ncores=1,
nthreads=1,
memory_limit='64GB')
await r.finished()
|
UTF-8
|
Python
| false | false | 875 |
py
| 15 |
slurm_cluster_prep.py
| 12 | 0.648 | 0.638857 | 0 | 26 | 32.653846 | 69 |
DMCristallo/search
| 5,437,428,612,535 |
5ee0baa4b0ac2160042112f684e66de666ea2067
|
2266792c24c9741df5736157c2b1cdd3f430714e
|
/avail.py
|
4d65c5f8fa8393be3531f9e54999146959f95ccd
|
[] |
no_license
|
https://github.com/DMCristallo/search
|
cda0a9e3a56a711838c2a63ff1586137a22b725c
|
c161dac2571a4e3ab7ed4ec77608b73df4cc1d89
|
refs/heads/master
| 2020-12-24T14:53:19.679230 | 2015-11-03T01:16:45 | 2015-11-03T01:16:45 | 41,462,192 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'someone on the internet'
import ctypes
import itertools
import os
import platform
import string
def get_available_drives():
if 'Windows' not in platform.system():
return []
drive_bitmask = ctypes.cdll.kernel32.GetLogicalDrives()
return list(itertools.compress(string.ascii_uppercase,
map(lambda x:ord(x) - ord('0'), bin(drive_bitmask)[:1:-1])))
|
UTF-8
|
Python
| false | false | 397 |
py
| 9 |
avail.py
| 8 | 0.680101 | 0.667506 | 0 | 16 | 23.875 | 75 |
Khateebxtreme/IOT_SKETCHES-RASPBERRY_PI
| 11,991,548,692,920 |
3b6e8ba60cc57872107d3dd00c5be3f76c24b20b
|
f5c3a0719cc1a1c17bca555c2afb23db60a87255
|
/Fundamentals/01 - BLINK_LED_CONTROL/CODE.py
|
1d50232b5477c493b6d2971d85e19ddbed603ccd
|
[] |
no_license
|
https://github.com/Khateebxtreme/IOT_SKETCHES-RASPBERRY_PI
|
42632bd918df780d5b93bc49c7636e69c060f1a4
|
3e15ad6b613b38f43b8fcad8cf5973533b9843c2
|
refs/heads/master
| 2022-12-14T15:17:08.616642 | 2020-08-22T23:31:27 | 2020-08-22T23:31:27 | 285,519,625 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(8,GPIO.IN)
GPIO.setup(10,GPIO.OUT)
while True:
if GPIO.input(8)==True:
GPIO.output(10,True)
else:
GPIO.output(10,True)
time.sleep(1)
GPIO.output(10,False)
time.sleep(1)
|
UTF-8
|
Python
| false | false | 301 |
py
| 4 |
CODE.py
| 1 | 0.601329 | 0.561462 | 0 | 13 | 21.230769 | 29 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.