repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
veritesomwa/web_browser
| 12,335,146,122,275 |
9a4566ac7a6a4830725a825b911a21c29b563548
|
90336d79e35cd6b9d5f1169ac2c4e6e2514fc16b
|
/main.py
|
a88b3e68e4c0a13ad7331e2d4da4ddf78ffaa235
|
[] |
no_license
|
https://github.com/veritesomwa/web_browser
|
680db9992f52030f91c0896e962ae37001b73528
|
859fc89a963f9d58cc07daa208e6b35669e406b2
|
refs/heads/master
| 2023-02-23T13:53:56.218092 | 2021-01-30T17:59:57 | 2021-01-30T17:59:57 | 334,477,297 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from PyQt5.QtWidgets import (QApplication, QFrame, QWidget,
QVBoxLayout, QHBoxLayout, QStackedLayout,
QPushButton, QLineEdit, QTabBar, QLabel)
from PyQt5.QtCore import *
from PyQt5.QtGui import QIcon, QImage, QFont
import sys, os
from PyQt5.QtWebEngineWidgets import QWebEngineView
class AddressBar(QLineEdit):
def __init__(self):
super().__init__()
def mousePressEvent(self, e):
self.selectAll()
class App(QFrame):
def __init__(self):
super().__init__()
self.AppSettings()
self.CreateApplication()
def AppSettings(self):
self.setWindowTitle('Web Browser')
self.setMinimumSize(840, 620)
def CreateApplication(self):
# Main Layout
self.layout = QVBoxLayout()
self.layout.setSpacing(0)
self.layout.setContentsMargins(0,0,0,0)
# Variables
self.tab_count = 0
self.tabs = []
self.tabbar = QTabBar(tabsClosable=True, movable=True)
self.tabbar.tabCloseRequested.connect(self.CloseTab)
self.tabbar.tabBarClicked.connect(self.SwitchTab)
self.tabbar.setElideMode(Qt.ElideLeft)
self.tabbar.setExpanding(False)
# ToolBar
self.ToolBar = QWidget()
self.toolbar_layout = QHBoxLayout()
# Tools
self.btnAddTab = QPushButton('+')
self.btnAddTab.clicked.connect(self.AddTab)
self.address_bar = AddressBar()
self.address_bar.returnPressed.connect(self.BrowseTo)
self.btn_back = QPushButton('<')
self.btn_back.clicked.connect(self.Back)
self.btn_forward = QPushButton('>')
self.btn_forward.clicked.connect(self.Forward)
self.btn_refresh = QPushButton('F5')
self.btn_refresh.clicked.connect(self.Refresh)
# Add Tools to ToolBar layout
self.toolbar_layout.addWidget(self.btn_back)
self.toolbar_layout.addWidget(self.btn_forward)
self.toolbar_layout.addWidget(self.btn_refresh)
self.toolbar_layout.addWidget(self.address_bar)
self.toolbar_layout.addWidget(self.btnAddTab)
# Container
self.container = QWidget()
self.container_layout = QStackedLayout()
self.container.setLayout(self.container_layout)
# addLayout to toolbar
self.ToolBar.setLayout(self.toolbar_layout)
# Adding Widgets to Main Layout
self.layout.addWidget(self.tabbar)
self.layout.addWidget(self.ToolBar)
self.layout.addWidget(self.container)
self.AddTab()
self.setLayout(self.layout)
self.show()
def CloseTab(self, i):
self.tabbar.removeTab(i)
def AddTab(self):
if len(self.tabs):
self.tab_count += 1
i = self.tab_count
self.tabs.append(QWidget())
self.tabs[i].layout = QHBoxLayout()
self.tabs[i].setObjectName('tab'+str(i))
self.tabs[i].layout.setContentsMargins(0,0,0,0)
self.tabs[i].content = QWebEngineView()
self.tabs[i].content.titleChanged.connect(lambda: self.setTabTitle(i))
self.tabs[i].content.iconChanged.connect(lambda: self.setTabIcon(i))
self.tabs[i].content.urlChanged.connect(lambda: self.setAddressBar(i))
self.tabs[i].content.load(QUrl.fromUserInput('http://www.google.com'))
self.tabs[i].layout.addWidget(self.tabs[i].content)
self.container_layout.addWidget(self.tabs[i])
self.tabs[i].setLayout(self.tabs[i].layout)
self.tabbar.addTab('New Tab')
self.tabbar.setTabData(i, 'tab'+str(i))
self.tabbar.setCurrentIndex(i)
self.container_layout.setCurrentWidget(self.tabs[i])
self.address_bar.selectAll()
self.address_bar.setFocus()
def SwitchTab(self, i):
if self.tabs[i]:
self.tabbar.currentIndex()
tabName = self.tabbar.tabData(i)
tabObj = self.findChild(QWidget, tabName)
self.container_layout.setCurrentWidget(tabObj)
url = tabObj.content.url().toString()
self.address_bar.setText(url)
def BrowseTo(self):
text = self.address_bar.text()
url = ""
if 'http' not in text:
if '.' not in text:
if 'localhost' in text:
url = 'http://'+text
else:
url = 'http://google.com/search?q='+text
else:
url = 'http://'+text
else:
url = text
i = self.tabbar.currentIndex()
self.object = self.findChild(QWidget, self.tabbar.tabData(i))
self.object.content.load(QUrl.fromUserInput(url))
def setTabTitle(self, i):
tabName = self.tabbar.tabData(i)
TabObj = self.findChild(QWidget, tabName)
self.tabbar.setTabText(i, TabObj.content.title())
def setAddressBar(self, i):
tabName = self.tabbar.tabData(i)
url = self.findChild(QWidget, tabName).content.url().toString()
self.address_bar.setText(url)
def setTabIcon(self, i):
tabName = self.tabbar.tabData(i)
icon = self.findChild(QWidget, tabName).content.icon()
self.tabbar.setTabIcon(i, icon)
def Back(self):
i = self.tabbar.currentIndex()
self.tabs[i].content.back()
def Forward(self):
i = self.tabbar.currentIndex()
self.tabs[i].content.forward()
def Refresh(self):
i = self.tabbar.currentIndex()
self.tabs[i].content.reload()
if __name__ == "__main__":
app = QApplication(sys.argv)
with open('material.css') as style:
app.setStyleSheet(style.read())
window = App()
sys.exit(app.exec_())
|
UTF-8
|
Python
| false | false | 5,794 |
py
| 3 |
main.py
| 1 | 0.605109 | 0.601312 | 0 | 198 | 28.267677 | 78 |
AnupKandalkar/ssdb_django_angular_heroku
| 5,643,587,069,377 |
12cb9f67e8c51b1927963ba56d20e53678397c88
|
82b194b063eadfb57d3e3e83b41279c122a33b53
|
/movies/serializers.py
|
bc96149241f04df7e3c2f139ca49351bf4174a25
|
[] |
no_license
|
https://github.com/AnupKandalkar/ssdb_django_angular_heroku
|
8c02a3a8751ffaf5957895bf4a27add2fe7d004a
|
91619f128728d42f15e26dd0c57ad36fab1fd79c
|
refs/heads/master
| 2021-01-21T21:40:16.190289 | 2019-01-16T17:36:08 | 2019-01-16T17:36:08 | 50,094,286 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import serializers
from movies.models import MoviesData, MoviesGenre
class MoviesDataSerializer(serializers.ModelSerializer):
class Meta:
model = MoviesData
class MoviesGenreSerializer(serializers.ModelSerializer):
class Meta:
model = MoviesGenre
|
UTF-8
|
Python
| false | false | 301 |
py
| 22 |
serializers.py
| 11 | 0.760797 | 0.760797 | 0 | 11 | 25.727273 | 57 |
JeanMarc-Moly/mugimugi_client_api
| 12,506,944,809,218 |
a046654a358040ffe93ebe5a04b13378cdbb3c93
|
8f05deac11701551448e701108b0e3ba7925d845
|
/mugimugi_client_api/search_item/author.py
|
c2f34077f8efe6eab3c5b1964f7e28918998fb01
|
[] |
no_license
|
https://github.com/JeanMarc-Moly/mugimugi_client_api
|
0e7d4df3e0bd36586fe86f4b48a045131fdb3809
|
d566ce5e462108858d2b4960bbd968b8e77e2540
|
refs/heads/main
| 2023-06-23T16:31:00.227665 | 2021-07-21T21:30:57 | 2021-07-21T21:30:57 | 376,898,617 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from dataclasses import dataclass
from typing import ClassVar
from mugimugi_client_api_entity import Author
from mugimugi_client_api_entity import SearchAuthor as Root
from mugimugi_client_api_entity.enum import ItemType
from mugimugi_client_api_entity.enum.element_prefix import ElementPrefix
from .abstract import SearchItem
@dataclass
class SearchAuthor(SearchItem):
ROOT: ClassVar[type] = Root
TYPE: ClassVar[ItemType] = ItemType.AUTHOR
PREFIX: ClassVar[ElementPrefix] = Author.PREFIX
|
UTF-8
|
Python
| false | false | 505 |
py
| 59 |
author.py
| 50 | 0.809901 | 0.809901 | 0 | 16 | 30.5625 | 72 |
young2141/PS_Codes
| 7,662,221,679,318 |
b7dc6eb06d6bf674ec774d994a86750e043664f1
|
a4ee3873ccd4b09a26b9febff9cd1a678dd90cc2
|
/solved/swea4530.py
|
0e86a9e243bda6bc2416297050c80f5597fbf826
|
[] |
no_license
|
https://github.com/young2141/PS_Codes
|
d37d97d9b92931d27cefcef052a7f3f897ef8e1c
|
856fe7646d133cfb7e107b05ffe8d03ab8901e2d
|
refs/heads/master
| 2023-02-25T19:10:41.890785 | 2023-02-14T04:16:36 | 2023-02-14T04:16:36 | 191,506,351 | 0 | 0 | null | false | 2019-06-14T04:06:42 | 2019-06-12T05:53:16 | 2019-06-12T07:46:21 | 2019-06-14T04:06:42 | 21 | 0 | 0 | 0 |
C++
| false | false |
# sw expert academy 4530 lv4
def calc(num):
ret = 0
l = len(str(num))
n = str(num)
s = 0
for i in range(l-1, -1, -1):
val = int(n[i])
if val > 4 :
val -= 1
ret += val * pow(9,s)
s += 1
return ret - 1
tc = int(input())
for tc in range(1, tc+1):
start, end = map(int,input().split())
if start <0 and end > 0 :
answer = calc(abs(start)) + calc(abs(end)) + 1
else:
answer = calc(abs(end)) - calc(abs(start))
print('#{} {}'.format(tc, abs(answer)))
'''
4
1 1
5 10
-3 -5
-2 -1
'''
|
UTF-8
|
Python
| false | false | 509 |
py
| 71 |
swea4530.py
| 65 | 0.528487 | 0.469548 | 0 | 33 | 14.363636 | 48 |
patogc1910/Aprendiendo-Python
| 8,083,128,452,497 |
d641e7281c99716bc6d4dca78df2920e13175dcc
|
e8c7a31821c018dad0db63883e2ea3dacddb37d4
|
/Conversiones.py
|
72409795b3962bae486cf48d9747e09e7ee3e12b
|
[] |
no_license
|
https://github.com/patogc1910/Aprendiendo-Python
|
e466467c368b15720fea6682ad8d0ee1e2cf5319
|
da542ac738c0aee40d9bb157efcd0a7a781461f0
|
refs/heads/master
| 2020-07-25T16:49:28.706303 | 2019-09-21T04:48:17 | 2019-09-21T04:48:17 | 208,360,714 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
Valor = "1234"
print(type(Valor)) #Gracias al operador type nos ayuda que tipo de valor es entero,decimal etcc...
Valor = int(1234)
print(type(Valor))
Valor = "1234"
print(len(str(Valor)))
Entrada = "Numero Que Utilizamos {}"
print(Entrada.format(Valor))
#Jose Patricio Gomez Calderon #1852897
#Fecha:14/09/2019
|
UTF-8
|
Python
| false | false | 329 |
py
| 11 |
Conversiones.py
| 10 | 0.699088 | 0.617021 | 0 | 16 | 18.9375 | 98 |
srnoglu1/python-study
| 4,157,528,390,824 |
b8b972697211f87d31db97b5a73c8c150cb7e24d
|
53a11da69af60f6f49927d0f4871cf7767390812
|
/list-comprehension.py
|
bac23cee29b2413170a5dbd829d84094b6c8d7fb
|
[] |
no_license
|
https://github.com/srnoglu1/python-study
|
4770e588bcc726a4721747f4209ae17659af986b
|
5734c44994f5280cd58b51151724399b9b510171
|
refs/heads/master
| 2023-03-22T03:11:34.463481 | 2021-03-08T11:48:17 | 2021-03-08T11:48:17 | 337,135,206 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
list1 =[10,8,30,12,4,56]
numbers = []
for i in list1:
i *= 2
numbers.append(i)
print(i)
name = ["Ahmet","Mehmet","Gonca","Başak","Elif"]
result = [c.upper() for c in name]
result = [str(number) for number in list1]
result = [i.lower() for i in name]
print(result)
|
UTF-8
|
Python
| false | false | 280 |
py
| 24 |
list-comprehension.py
| 24 | 0.609319 | 0.55914 | 0 | 14 | 18.928571 | 48 |
TeamMacLean/model_scripts
| 7,713,761,293,638 |
c7fe0cd26a87b25c689c2fb8e4aeece8870cf099
|
13fd447d20c703e4c72d6da2e9d98e2e0fc04a0b
|
/plotting/clusterV.py
|
cd23940330818944e500db160889381ab9e823a6
|
[] |
no_license
|
https://github.com/TeamMacLean/model_scripts
|
f06f1be8459ca41ab002efccf329d48a46e953f4
|
83312d35de65f54c052ca4a18186703fe9b0d38e
|
refs/heads/master
| 2021-01-22T19:31:34.479684 | 2016-02-25T16:49:45 | 2016-02-25T16:49:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#changing notebook file to have a single python script that can run on the cluster
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import argparse ###*
import os ###*
###*#%matplotlib inline
###*pwd
import pickle
###*import numpy as np
###*import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
###*change the path to go to the right directory
###*pth='/usr/users/TSL_20/minottoa/new/' ###*'../'
parser=argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,epilog=("""
""")) ###*
parser.add_argument("p",help="path to the right directory") ###*
parser.add_argument("r", type=int, help="number of runs")###*
parser.add_argument("j", type=int, help="number of jumps")###*
args=parser.parse_args() ###*
pth=args.p ###*
xi=range(1,(args.j+1))###*
folder=''###*'minottoa/'#PAPER/'
subf=''###*'new/'#PLL2/'
#subf2='RUNSL2/'
runsx='RUN0/'
nx='n0/'
###*xi=range(1,21)
dbina=500
dbinb=50
#plt.style.use('fivethirtyeight')
plt.style.use('bmh')
###*print plt.style.available
###*xi=range(1,11)
###*print xi
ltj={}
nhj={}
xtj=[]
ytj=[]
ztj=[]
n=0
for j in xi:
fin=pth+folder+subf+runsx+nx+'pts'+str(j)+'plotdata.p'
#print fin
f=open(fin,"rb")
A=pickle.load(f)
f.close()
lj=A[1]
nj=A[2]
tj=[]
#tnj=[]
for i in range(len(nj)):
tj.append(i+n*len(nj))
xtj.append(n*len(nj))
ytj.append(nj[0])
ztj.append(lj[0])
n+=1
ltj[j]=[tj,lj]
nhj[j]=[tj,nj]
#raw_input()
fig, axesa = plt.subplots(1,figsize=(10, 8))
for i in ltj.keys():
axesa.plot(ltj[i][0],ltj[i][1],".-", markersize=2, linewidth=1.1)
markerline, stemlines, baseline=axesa.stem(xtj,ztj,linefmt='--', markerfmt='.', basefmt='',stemlineswidth=0.1)
#plt.setp(markerline, 'markerfacecolor', 'b')
#plt.setp(stemlines, 'linewidth', 1)
fig, axesb = plt.subplots(1,figsize=(10, 8))
for i in nhj.keys():
axesb.plot(nhj[i][0],nhj[i][1],".-", markersize=2, linewidth=1.1)
#axesb.axvline(nhj[i][0][0],0.0,nhj[i][1][0], color='k', linestyle='--')
#axesb.stem(xtj,ytj,linefmt='--', markerfmt='bo', basefmt='r-')
markerline, stemlines, baseline=axesb.stem(xtj,ytj,linefmt='--', markerfmt='.', basefmt='',stemlineswidth=0.1)
###*print xi
ltj={}
ltja={}
ltjb={}
nhj={}
nhja={}
nhjb={}
xtj=[]
ytj=[]
ztj=[]
n=0
for j in xi:
fin=pth+folder+subf+runsx+nx+'pts'+str(j)+'plotdata.p'
#print fin
f=open(fin,"rb")
A=pickle.load(f)
###*print A
f.close()
lj=A[1]
lja=A[10]
ljb=A[11]
print len(lja), len(ljb)
nj=A[2]
nja=A[3]
njb=A[4]
tj=[]
#tnj=[]
for i in range(len(nj)):
tj.append(i+n*len(nj))
xtj.append(n*len(nj))
ytj.append(nj[0])
ztj.append(lj[0])
n+=1
ltj[j]=[tj,lj]
ltja[j]=[tj,lja]
ltjb[j]=[tj,ljb]
nhj[j]=[tj,nj]
nhja[j]=[tj,nja]
nhjb[j]=[tj,njb]
#raw_input()
fig, axesa = plt.subplots(1,figsize=(10, 10))
xtjl=[xtj[1]+i for i in xtj]
###
##fig.suptitle('RXLR effectors length distribution P. Infestans', fontsize=40)
axesa.set_ylabel("$Length$ $(bp)$", fontsize=40)
axesa.set_xlabel("$Time$ $(Evolutionary$ $events)$",fontsize=40)
axesa.xaxis.set_tick_params(labelsize=20)
axesa.yaxis.set_tick_params(labelsize=20)
axesa.yaxis.set_major_formatter(mtick.FormatStrFormatter('%1.e'))
###
ms=0.1
kn=0
for i in ltj.keys():
if kn==0:
axesa.plot(ltj[i][0],ltj[i][1],marker=".",color='black',alpha=0.85,markersize=0.1,label="total length")
axesa.plot(ltja[i][0],ltja[i][1],marker=".",color='red',alpha=0.85,markersize=0.1,label="Eff length")
axesa.plot(ltjb[i][0],ltjb[i][1],marker=".",color='blue',alpha=0.85,markersize=0.1,label="T.E. lenght")
kn=1
else:
axesa.plot(ltj[i][0],ltj[i][1],marker=".",color='black',alpha=0.85,markersize=0.1)
axesa.plot(ltja[i][0],ltja[i][1],marker=".",color='red',alpha=0.85,markersize=0.1)
axesa.plot(ltjb[i][0],ltjb[i][1],marker=".",color='blue',alpha=0.85,markersize=0.1)
markerline, stemlines, baseline=axesa.stem(xtj,ztj,linefmt='--.',markerfmt='.',basefmt='',stemlineswidth=0.1)
plt.setp(baseline, 'color','b', 'linewidth', 0)
axesa.legend(loc='best', fancybox=True, framealpha=0.5)
#axesa.stem(xtjl,ltja,linefmt='--', markerfmt='bo', basefmt='r-')
#axesa.stem(xtjl,ltjb,linefmt='--', markerfmt='o', basefmt='r-')
####################################################################################
####################################################################################
####################################################################################
###*uncommented stuff to save pic
fout=folder+subf###*+subf2
namepth=pth+fout+"typrunlength"#.svg" ###*add pth
print namepth
fig.patch.set_alpha(0.5)
#fig.savefig(namepth+'.eps',format='eps' ,dpi=1200, bbox_inches='tight')
###*comment line below because of runtimeerror arise (it should be saving the same image in different format, shouldn't it?)
fig.savefig(namepth+'.png',format='png' ,dpi=1200, bbox_inches='tight')
###*commented below cause jpg is not supported?????
###*fig.savefig(namepth+'.jpg',formar='jpg' ,dpi=1200, bbox_inches='tight')
####################################################################################
####################################################################################
####################################################################################
fig, axesb = plt.subplots(1,figsize=(10, 10))
axesb.set_ylabel("$Number$ $of$ $units$", fontsize=40)
axesb.set_xlabel("$Time$ $(Evolutionary$ $events)$",fontsize=40)
axesb.xaxis.set_tick_params(labelsize=20)
axesb.yaxis.set_tick_params(labelsize=20)
axesb.yaxis.set_major_formatter(mtick.FormatStrFormatter('%1.e'))
###
ms=1
kn=0
for i in nhj.keys():
if kn==0:
axesb.plot(nhj[i][0],nhj[i][1],marker='.',color='black',alpha=0.85,markersize=1.0,label="total number")
axesb.plot(nhj[i][0],nhja[i][1],marker='.',color='red',alpha=0.85,markersize=1.0,label="Eff number")
axesb.plot(nhj[i][0],nhjb[i][1],marker='.',color='blue',alpha=0.85,markersize=1.0,label="T.E. number")
kn=1
else:
axesb.plot(nhj[i][0],nhj[i][1],marker='.', color='black',alpha=0.85,markersize=1.0)
axesb.plot(nhj[i][0],nhja[i][1],marker='.',color='red',alpha=0.85,markersize=1.0)
axesb.plot(nhj[i][0],nhjb[i][1],marker='.',color='blue',alpha=0.85,markersize=1.0)
markerlinex, stemlinesx, baselinex=axesb.stem(xtj,ytj,linefmt='--.',markerfmt='.',basefmt='')
plt.setp(baselinex, 'color','b', 'linewidth', 0)
axesb.legend(loc='best', fancybox=True, framealpha=0.5)
###################################################################
###################################################################
###*uncomment lines to make image be saved
#fout=folder+subf+subf2
namepth=pth+fout+"typrunnumbers"#.svg" ###*add pth
print namepth
#fig.set_size_inches(13.5,10.5)
fig.patch.set_alpha(0.5)
#fig.savefig(namepth,dpi=100, bbox_inches='tight')
#fig.savefig(namepth+'.eps',format='eps',dpi=1200, bbox_inches='tight')
fig.savefig(namepth+'.png',format='png' ,dpi=1200, bbox_inches='tight')
#fig.savefig(namepth+'.svg',format='svg', dpi=200, bbox_inches='tight')
###*commented below cause jpg is not supported
###*fig.savefig(namepth+'.jpg',format='jpg',dpi=1200, bbox_inches='tight')
fig, axesb = plt.subplots(1,figsize=(10, 10))
axesb.set_ylabel("$Number$ $of$ $TE's$", fontsize=40)
axesb.set_xlabel("$Number$ $of$ $EG's$",fontsize=40)
axesb.xaxis.set_tick_params(labelsize=20)
axesb.xaxis.set_major_formatter(mtick.FormatStrFormatter('%1.e'))
axesb.yaxis.set_tick_params(labelsize=20)
axesb.yaxis.set_major_formatter(mtick.FormatStrFormatter('%1.e'))
###
##########################################
for i in nhj.keys():
axesb.plot(nhjb[i][1],nhja[i][1],".",markersize=1.0)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
axins = zoomed_inset_axes(axesb, 3.0, loc=4)
for i in nhj.keys():
axins.plot(nhjb[i][1],nhja[i][1],"o",markersize=4.0)
x1, x2, y1, y2 = 350, 470, 900, 1100 #specify the limits
axins.set_xlim(x1, x2) # apply the x-limits
axins.set_ylim(y1, y2) # apply the y-limits
plt.yticks(visible=False)
plt.xticks(visible=False)
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
mark_inset(axesb, axins, loc1=1, loc2=2, fc="none", ec="0.0")
##############################################
#ax_inset=fig.add_axes([0.35,0.17,0.65,0.5])
#ax_inset.xaxis.set_tick_params(labelsize=11)
#ax_inset.yaxis.set_tick_params(labelsize=11)
#for i in nhj.keys():
# ax_inset.plot(nhjb[i][1],nhja[i][1],"o",markersize=4.0)
#ax_inset.set_ylim(900,1100)
#ax_inset.set_xlim(350,480)
######################################
#axesb.axvline(nhj[i][0][0],0.0,nhj[i][1][0], color='k', linestyle='--')
#axesb.stem(xtj,ytj,linefmt='--', markerfmt='bo', basefmt='r-')
###############################################################
###############################################################
###############################################################
###*uncomment lines to make images be saved
fout=folder+subf###*+subf2
namepth=pth+fout+"randomwalk"#.eps"###*add pth
##print namepth
#fig.set_size_inches(13.5,10.5)
fig.patch.set_alpha(0.5)
#fig.savefig(namepth+'.eps', dpi=1200, bbox_inches='tight')
fig.savefig(namepth+'.png', dpi=1200, bbox_inches='tight')
##fig.savefig(namepth+'.svgz', dpi=600, bbox_inches='tight')
###*commented below cau8se jpg is not supported
###*fig.savefig(namepth+'.jpg', dpi=1200, bbox_inches='tight')
##fig.savefig(namepth+'.pdf', dpi=1200, bbox_inches='tight')
plt.gcf().canvas.get_supported_filetypes()
nX=['n0/','n1/','n2/','n3/','n4/','n5/','n6/','n7/','n8/'] #Parameters C ###*uncommented this line
###*nX=['n0/']
rnX=[]
import numpy as np
for mu in range(args.r): ###*(50): ###*changed the number of runs
###*if mu!=18 and mu!=49: ###*add line to avoid exctiontions
if all(os.listdir(pth+'RUN'+str(mu)+'/'+x)==[] for x in nX): ###*
pass ###*
else: ###*
rnX.append('RUN'+str(mu)+'/') #RUNS R0,R1,...,
###*xi=range(1,11) ###*in theory we don't have any jumps here is ti fine if i leave it as it is? #jumps
#############################################################
n=0
M=0
t=[]
NAV={}
NAVa={}
NAVb={}
LAV={}
LAVa={}
LAVb={}
STDN={}
STDNa={}
STDNb={}
STDL={}
LSTDa={}
LSTDb={}
for pc in nX: #n0, n1, etc
AVN={}
AVNa={}
AVNb={}
AVL={}
AVLa={}
AVLb={}
for rj in rnX: #R0, R1, R2, ...
#n=0
L=[]
N=[]
Na=[]
Nb=[]
La=[]
Lb=[]
for ji in xi: #j1, j2, j2,j3
fin=pth+folder+subf+ rj + pc +'pts'+str(ji)+'plotdata.p'
#print fin
f=open(fin,"rb")
A=pickle.load(f)
f.close()
lz=A[1]
nz=A[2]
nza=A[3]
nzb=A[4]
lza=A[10]
lzb=A[11]
for k in lz:
if M==0:
t.append(n)
n+=1.0
L.append(k)
for k in nz:
N.append(k)
for k in nza:
Na.append(k)
for k in nzb:
Nb.append(k)
for k in lza:
La.append(k)
for k in lzb:
Lb.append(k)
M=1
AVN[rj]=N
AVNa[rj]=Na
AVNb[rj]=Nb
AVL[rj]=L
AVLa[rj]=La
AVLb[rj]=Lb
avl=[]
stdl=[]
avn=[]
stdn=[]
avna=[]
stdna=[]
avnb=[]
stdnb=[]
avla=[]
stdnla=[]
avlb=[]
stdnlb=[]
print(AVN.keys())
rj=0
for tr in t:
avx=[]
avx2=[]
avxa=[]
avxb=[]
avx2a=[]
avx2b=[]
for mu in AVN.keys():
avx.append(AVN[mu][rj])
avxa.append(AVNa[mu][rj])
avxb.append(AVNb[mu][rj])
avx2.append(AVL[mu][rj])
avx2a.append(AVLa[mu][rj])
avx2b.append(AVLb[mu][rj])
avl.append(np.mean(avx2))
stdl.append(np.std(avx2))
avn.append(np.mean(avx))
stdn.append(np.std(avx))
avna.append(np.mean(avxa))
stdna.append(np.std(avxa))
avnb.append(np.mean(avxb))
stdnb.append(np.std(avxb))
avla.append(np.mean(avx2a))
stdnla.append(np.std(avx2a))
avlb.append(np.mean(avx2b))
stdnlb.append(np.std(avx2b))
rj+=1
NAV[pc]=avn
NAVa[pc]=avna
NAVb[pc]=avnb
LAV[pc]=avl
LAVa[pc]=avla
LAVb[pc]=avlb
STDN[pc]=stdn
STDL[pc]=stdl
STDNa[pc]=stdna
STDNb[pc]=stdnb
LSTDa[pc]=stdnla
LSTDb[pc]=stdnlb
print len(t)
#raw_input()
#############################################################
fig1, axesa = plt.subplots(1,figsize=(10, 8))
axesa.set_ylabel("$< Lengths >_{Ens}$", fontsize=40)
axesa.set_xlabel("$Time$ $(Evolutionary$ $events)$",fontsize=40)
axesa.xaxis.set_tick_params(labelsize=20)
axesa.xaxis.set_major_formatter(mtick.FormatStrFormatter('%2.e'))
axesa.yaxis.set_tick_params(labelsize=20)
axesa.yaxis.set_major_formatter(mtick.FormatStrFormatter('%2.e'))
fig2, axesb = plt.subplots(1,figsize=(10, 10))
axesb.set_ylabel("$<Number$ $of$ $units>_{Ens}$", fontsize=40)
axesb.set_xlabel("$Time$ $(Evolutionary$ $events)$",fontsize=40)
axesb.xaxis.set_tick_params(labelsize=20)
axesb.xaxis.set_major_formatter(mtick.FormatStrFormatter('%1.e'))
axesb.yaxis.set_tick_params(labelsize=20)
axesb.yaxis.set_major_formatter(mtick.FormatStrFormatter('%1.e'))
ltn=0
for ck in NAV.keys():
ytjb=[]
ytjba=[]
ytjbb=[]
ytja=[]
for i in xtj:
ytjb.append(NAV[ck][i])
ytjba.append(NAV[ck][i])
ytjbb.append(NAV[ck][i])
ytja.append(LAV[ck][i])
if ltn==0:
axesb.plot(t,NAV[ck],color="black",label="Average Num of Units")
#ltn=1
else:
axesb.plot(t,NAV[ck],color="black")
yrrminus=[]
yrrplus=[]
l=0
for sj in NAV[ck]:
yrrminus.append(sj-STDN[ck][l])
yrrplus.append(sj+STDN[ck][l])
l+=1
axesb.fill_between(t, yrrminus, yrrplus,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesb.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
if ltn==0:
axesb.plot(t,NAVa[ck],color="red",label="Average Num of E. Units")
#ltn=1
else:
axesb.plot(t,NAVa[ck],color="red")
#axesb.plot(t,NAVa[ck])
yrrminusa=[]
yrrplusa=[]
l=0
for sj in NAVa[ck]:
yrrminusa.append(sj-STDNa[ck][l])
yrrplusa.append(sj+STDNa[ck][l])
l+=1
axesb.fill_between(t, yrrminusa, yrrplusa,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesb.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
if ltn==0:
axesb.plot(t,NAVb[ck],color="blue",label="Average Num of TE's")
#ltn=1
else:
axesb.plot(t,NAVb[ck],color="blue")
#axesb.plot(t,NAVb[ck])
yrrminusb=[]
yrrplusb=[]
l=0
for sj in NAVb[ck]:
yrrminusb.append(sj-STDNb[ck][l])
yrrplusb.append(sj+STDNb[ck][l])
l+=1
axesb.fill_between(t, yrrminusb, yrrplusb,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
axesb.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
###########################
yrrminus=[]
yrrplus=[]
l=0
for sj in LAV[ck]:
yrrminus.append(sj-STDL[ck][l])
yrrplus.append(sj+STDL[ck][l])
l+=1
axesa.fill_between(t, yrrminus, yrrplus,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesa.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
if ltn==0:
axesa.plot(t,LAV[ck],color="black",label="Average Length")
#ltn=1
else:
axesa.plot(t,LAV[ck],color="black")
#axesa.plot(t,LAV[ck])
#axesa.stem(xtj,ytja,linefmt='--', markerfmt='bo', basefmt='r-')
#axesa.fill_between(t, yrrminusb, yrrplusb,
#alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesa.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
if ltn==0:
axesa.plot(t,LAVa[ck],color="red",label="Average EGs Length")
#ltn=1
else:
axesa.plot(t,LAVa[ck],color="red")
#axesa.plot(t,LAVa[ck])
lyrrminusa=[]
lyrrplusa=[]
l=0
for sj in LAVa[ck]:
lyrrminusa.append(sj-LSTDa[ck][l])
lyrrplusa.append(sj+LSTDa[ck][l])
l+=1
axesa.fill_between(t, lyrrminusa, lyrrplusa, alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesa.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
if ltn==0:
axesa.plot(t,LAVb[ck],color="blue",label="Average TEs Length")
ltn=1
else:
axesa.plot(t,LAVb[ck],color="blue")
#axesa.plot(t,LAVb[ck])
lyrrminusb=[]
lyrrplusb=[]
l=0
for sj in LAVb[ck]:
lyrrminusb.append(sj-LSTDb[ck][l])
lyrrplusb.append(sj+LSTDb[ck][l])
l+=1
axesa.fill_between(t, lyrrminusb, lyrrplusb,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
axesa.stem(xtj,ytja,linefmt='--', markerfmt='bo', basefmt='r-')
###############################################################
axesb.legend(loc='best', fancybox=True, framealpha=0.5)
fout=pth+folder+subf#+subf2
namepth=fout+"averagesl"#.eps"
##print namepth
##fig.set_size_inches(13.5,10.5)
fig1.patch.set_alpha(0.5)
#fig1.savefig(namepth+'.eps', dpi=1200, bbox_inches='tight')
fig1.savefig(namepth+'.png', dpi=1200, bbox_inches='tight')
##fig.savefig(namepth+'.svgz', dpi=600, bbox_inches='tight')
###*commented below cause jpg is not supported
###*fig1.savefig(namepth+'.jpg', dpi=1200, bbox_inches='tight')
##fig.savefig(namepth+'.pdf', dpi=1200, bbox_inches='tight')
##############################################################
axesa.legend(loc='best', fancybox=True, framealpha=0.5)
namepth=fout+"averagesn"#.eps"
##print namepth
##fig.set_size_inches(13.5,10.5)
fig2.patch.set_alpha(0.5)
#fig2.savefig(namepth+'.eps', dpi=1200, bbox_inches='tight')
fig2.savefig(namepth+'.png', dpi=1200, bbox_inches='tight')
##fig.savefig(namepth+'.svgz', dpi=600, bbox_inches='tight')
###*commented below cause jpg is not supported
###*fig2.savefig(namepth+'.jpg', dpi=1200, bbox_inches='tight')
##fig.savefig(namepth+'.pdf', dpi=1200, bbox_inches='tight')
for ck in LAV.keys():
fig, axesa = plt.subplots(1,figsize=(10, 8))
axesa.plot(t,LAV[ck])
axesa.stem(xtj,ytja,linefmt='--', markerfmt='bo', basefmt='r-')
#########################
yrrminus=[]
yrrplus=[]
l=0
for sj in LAV[ck]:
yrrminus.append(sj-STDL[ck][l])
yrrplus.append(sj+STDL[ck][l])
l+=1
axesa.fill_between(t, yrrminus, yrrplus,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesa.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
axesa.plot(t,LAVa[ck])
lyrrminusa=[]
lyrrplusa=[]
l=0
for sj in LAVa[ck]:
lyrrminusa.append(sj-LSTDa[ck][l])
lyrrplusa.append(sj+LSTDa[ck][l])
l+=1
axesa.fill_between(t, lyrrminusa, lyrrplusa, alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesa.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
axesa.plot(t,LAVb[ck])
lyrrminusb=[]
lyrrplusb=[]
l=0
for sj in LAVb[ck]:
lyrrminusb.append(sj-LSTDb[ck][l])
lyrrplusb.append(sj+LSTDb[ck][l])
l+=1
axesa.fill_between(t, lyrrminusb, lyrrplusb,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
axesa.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
print nX #pars
print rnX #runs
print xi #jumps
print LAV.keys()
print folder, subf,# subf2
#raw_input()
#for rn in rnX:
ALLSERIES={}
kt=0
n=0
#clr=iter(cm.rainbow(np.linspace(0,1,len(xi))))
tj=[]
for prt in nX:
fig, axesa = plt.subplots(1,figsize=(10, 8))
fig, axesb = plt.subplots(1,figsize=(10, 8))
#####################
#ytjb=[]
#ytja=[]
#for i in xtj:
# ytjb.append(NAV[ck][i])
# ytja.append(LAV[ck][i])
axesb.plot(t,NAV[prt],'black')
yerrminus=[]
yerrplus=[]
l=0
for sj in NAV[prt]:
yerrminus.append(sj-STDN[prt][l])
yerrplus.append(sj+STDN[prt][l])
l+=1
axesb.fill_between(t, yerrminus, yerrplus,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesb.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
yerrminusb=[]
yerrplusb=[]
l=0
for sj in LAV[prt]:
yerrminusb.append(sj-STDL[prt][l])
yerrplusb.append(sj+STDL[prt][l])
l+=1
axesa.plot(t,LAV[ck],'black')
#axesa.stem(xtj,ytja,linefmt='--', markerfmt='bo', basefmt='r-')
axesa.fill_between(t, yerrminusb, yerrplusb,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
#axesa.stem(xtj,ytjb,linefmt='--', markerfmt='bo', basefmt='r-')
##################
for rn in rnX:
nz=[]
lz=[]
#tj=[]
for jp in xi:
file= pth+folder+subf+rn+prt+'pts'+str(jp)+'plotdata.p'
#print file
f=open(file,"rb")
A=pickle.load(f)
f.close()
lz.extend(A[1])
nz.extend(A[2])
if kt==0:
for i in range(len(A[1])):
tj.append(i+n*len(A[1]))
n+=1
#raw_input()
kt=1
print len(tj), len(lz), len(nz)
#c=next(clr)
#fig, axesa = plt.subplots(1,figsize=(10, 8))
axesa.plot(tj,lz)#,c=c)
axesb.plot(tj,nz)#,c=c)
#changed saving order to avoid memoryerror stop the program if it raised at CDATAV dumping
par=[args.p,args.r,args.j] ###*added
name2=pth+"CDATAV_par.p" ###*added
pickle.dump(par,open(name2,"wb"), protocol=2) ###*
Data=[t,LAV,NAV,STDN,STDL]
name=pth+"CDATAVcomp.p" ###*as above
pickle.dump(Data,open(name,"wb"))
Data=[t,LAV,NAV,STDN,STDL,NAVa,NAVb,STDNa,STDNb,LAVa,LAVb,LSTDa,LSTDb]
name=pth+"CDATAV.p" ###*changed to have only one code in the main dir (added pth+)
pickle.dump(Data,open(name,"wb"),protocol=2)
|
UTF-8
|
Python
| false | false | 22,087 |
py
| 37 |
clusterV.py
| 15 | 0.560149 | 0.529225 | 0 | 781 | 27.28041 | 147 |
WeltonKing/Lockbox
| 6,339,371,736,095 |
3fdb9c5634b342a89b7230d49852fbd0bf7143dd
|
e9f1d1dc96f04a712ae98e8bd56ba4573c4f9846
|
/classes.py
|
f83d46beb1b1353f5f99b22f036edf702c75fbb9
|
[] |
no_license
|
https://github.com/WeltonKing/Lockbox
|
3804daa4508c3e50519b82fabe804debdd8552c0
|
fb5fb04d029d3133ae4bced8b9fcaca7d8fa6978
|
refs/heads/master
| 2022-12-23T17:47:48.311232 | 2020-10-01T16:26:04 | 2020-10-01T16:26:04 | 292,721,698 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
project: Lockbox
file: classes.py
authors: david rademacher & welton king v
desc: contains class definitions
'''
from enum import Enum
# states of the program
class states(Enum):
START_UP = 0
MAIN_MENU = 1
DISPLAY = 2
# account messages
class msgs(Enum):
PASS_INCORRECT = 0
NAME_MISSING = 1
NAME_TAKEN = 2
# user account object
class user:
def __init__(self, name='', password=''):
self._name = name
self._pass = password
def is_empty(self):
return self._name == '' and self._pass == ''
def __repr__(self):
return '{user: self._name}'
def __str__(self):
return self._name
|
UTF-8
|
Python
| false | false | 663 |
py
| 7 |
classes.py
| 6 | 0.598793 | 0.589744 | 0 | 35 | 17.942857 | 52 |
fruffy/iroko
| 19,224,273,644,067 |
2390df932c5987605f8c2cc0df0b0f759dea46e4
|
22e1c8f460a75e2f4ce77fb3fd9bba04113fc77b
|
/DataCenterGym/DataCenterGym/__init__.py
|
3f8765ed86692b50107b06ca8d674ab51c814066
|
[] |
no_license
|
https://github.com/fruffy/iroko
|
c69b9273c23c64a749921c11d37ba07cc4d11157
|
5641e99a69c803d202737ca81f710c14e6e9f35c
|
refs/heads/master
| 2018-10-15T08:52:46.401525 | 2018-10-14T20:06:27 | 2018-10-14T20:06:27 | 105,965,046 | 0 | 1 | null | false | 2018-10-27T07:17:04 | 2017-10-06T03:47:57 | 2018-10-27T02:58:41 | 2018-10-27T07:17:04 | 109,365 | 0 | 1 | 1 |
Python
| false | null |
from gym.envs.registration import register
register(
id='DataCenter-v0',
entry_point='DataCenterGym.envs:DCEnv'
)
|
UTF-8
|
Python
| false | false | 139 |
py
| 40 |
__init__.py
| 36 | 0.654676 | 0.647482 | 0 | 6 | 22.166667 | 46 |
notnews/archive_news_cc
| 11,940,009,097,200 |
af4aaa3316dfd0a86e9eaf6edd8aea517bb48bb2
|
f44f4f7831de79d51158733ce2aa6a1d8dae9378
|
/scripts/scrape_archive_org.py
|
efbef121f0ab951c2d5b4eac1f5fb40b22d4af4a
|
[
"MIT"
] |
permissive
|
https://github.com/notnews/archive_news_cc
|
a6f6db716026b750921d8c6d971831a708030d5c
|
469979c3dbdcf901eca87c8ebba4f328b73bcdd2
|
refs/heads/master
| 2023-06-26T21:24:20.128614 | 2023-06-11T23:13:56 | 2023-06-11T23:14:20 | 112,410,624 | 42 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import optparse
#import csv
import pandas as pd
import gzip
import time
#import xml.parsers.expat
import requests
#from bs4 import BeautifulSoup
import concurrent.futures
import logging
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s',
handlers=[logging.FileHandler("logs/scrape_archive_org.log"),
logging.StreamHandler()])
__version__ = 'r5 (2022/10/28)'
META_DIR = 'data/meta/'
HTML_DIR = 'data/html/'
MAX_WORKERS = int(os.environ.get("MAX_WORKERS", 3))
def parse_command_line(argv):
"""Command line options parser for the script
"""
usage = "Usage: %prog [options] <CSV input file>"
parser = optparse.OptionParser(usage=usage)
parser.add_option("--meta", action="store",
type="string", dest="meta", default=META_DIR,
help="Meta files directory (default: '{:s}')".format(META_DIR))
parser.add_option("--html", action="store",
type="string", dest="html", default=HTML_DIR,
help="HTML files directory (default: '{:s}')".format(HTML_DIR))
parser.add_option("-s", "--skip", action="store",
type="int", dest="skip", default=0,
help="Skip rows (default: 0)")
parser.add_option("-c", "--compress", action="store_true",
dest="compress", default=False,
help="Compress downloaded files (default: No)")
return parser.parse_args(argv)
def download_file(options, url, local_filename):
# NOTE the stream=True parameter
logging.info("Downloading...[{:s}]".format(url))
r = requests.get(url, stream=True)
if options.compress:
f = gzip.open(local_filename, 'wb')
else:
f = open(local_filename, 'wb')
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
f.close()
def handle_download(_id, retry=0):
try:
_id = _id[0]
file_name = os.path.join(options.meta, _id + "_meta.xml")
if options.compress:
file_name += ".gz"
if not os.path.isfile(file_name):
rq = requests.get('http://archive.org/download/' + _id)
if rq.status_code == 200:
if not rq.url.endswith('/'):
rq.url = rq.url + '/'
url = rq.url + _id + "_meta.xml"
if not os.path.isfile(file_name):
download_file(options, url, file_name)
url = 'http://archive.org/details/' + _id
file_name = os.path.join(options.html, _id + ".html")
if options.compress:
file_name += ".gz"
if not os.path.isfile(file_name):
download_file(options, url, file_name)
except:
if retry > 4:
logging.error(f'id: {_id}. Stopped retrying')
else:
retry += 1
wait_time = 120
logging.warning(f'id: {_id}. Waiting {wait_time} secs and retrying... ')
time.sleep(wait_time)
handle_download(_id, retry=retry)
def parallel_download(identifiers):
with concurrent.futures.ThreadPoolExecutor() as executor:
for r in executor.map(handle_download, identifiers):
if r:
logging.warning(r)
if __name__ == "__main__":
logging.info("{:s} - {:s}\n".format(os.path.basename(sys.argv[0]), __version__))
logging.info(f'Max workers set to {MAX_WORKERS}')
(options, args) = parse_command_line(sys.argv)
if len(args) < 2:
logging.info("Usage: {:s} [options] <CSV input file>".format(os.path.basename(sys.argv[0])))
sys.exit(-1)
if not os.path.exists(options.meta):
os.mkdir(options.meta)
if not os.path.exists(options.html):
os.mkdir(options.html)
# CSV to list
df = pd.read_csv(args[1])
identifiers = [list(row) for row in df.values]
# Consider skip option
if options.skip:
identifiers = identifiers[options.skip:]
# Download
if os.environ.get("ARCHIVE_TEST"):
# Testing
for id_ in identifiers:
handle_download(id_)
else:
# Multithread
total = len(identifiers)
logging.info(f'{total} total identifiers to process...')
downloaded = len(os.listdir(options.html))
logging.info(f'{downloaded} total identifiers downloaded...')
while downloaded < total:
try:
parallel_download(identifiers)
except Exception as e:
logging.warning(f'Restarting: {e}')
time.sleep(120)
logging.info("All done")
|
UTF-8
|
Python
| false | false | 4,853 |
py
| 57 |
scrape_archive_org.py
| 3 | 0.562127 | 0.554708 | 0 | 159 | 29.522013 | 100 |
dmytrov/gaussianprocess
| 11,321,533,829,951 |
26b11950c5ec47ed92f86883656006d98d5df426
|
7a3c194356437db110671ad163f03df1344a5e87
|
/code/validation/generate/learn_dynamic_mp.py
|
b912f7d006722a06054ace8e506f3f14db4e9b3f
|
[
"MIT"
] |
permissive
|
https://github.com/dmytrov/gaussianprocess
|
6b181afdb989415ec929197cdb20efee571ebbcc
|
7044bd2d66f44e10656fee17e94fdee0c24c70bb
|
refs/heads/master
| 2022-12-08T22:23:11.850861 | 2020-09-14T19:37:33 | 2020-09-14T19:37:33 | 295,513,701 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import time
import numpy as np
import dmp.discretedmp as ddmp
from validation.common import *
import validation.generate.common as vgc
class DMPLearner(vgc.ModelLerner):
def __init__(self, dirname, param, args):
super(DMPLearner, self).__init__(dirname, param, args)
self.load_data()
self.generate_BVHes()
def train_model(self, training, validation):
dmp = ddmp.DiscreteDMP(npsi=self.param["npsi"])
dmp.learn(training)
dmp.reset()
dmp.run(validation.shape[0])
predicted = dmp.y_path.copy()
predicted[np.isnan(predicted)] = 0.0
errors = compute_errors(observed=validation, predicted=predicted)
#print(predicted)
#print(np.sum((validation-predicted)**2)
#plt.plot(validation, color="blue")
#plt.plot(predicted, color="orange")
#plt.plot(validation-predicted, color="red")
#plt.show()
return dmp, predicted, errors
def generate_BVHes(self):
self.training_y = [t.copy() for t in self.trial.training_data()]
for i in range(len(self.training_y)):
validation = self.training_y[i]
t0 = time.time()
dmp, predicted, errors = self.train_model(self.training_y, validation)
t1 = time.time()
errors = compute_errors(observed=validation,
predicted=predicted)
errors["timing"] = t1 - t0
errors["settings"] = self.param
# Make a BVH
if self.partitioner is not None:
self.partitioner.set_all_parts_data(predicted)
self.partitioner.motiondata.write_BVH(
os.path.join(self.dirname, "final({}).bvh".format(i)))
# Write the errors
efn = os.path.join(self.dirname, "errors({}).pkl".format(i))
with open(efn, "wb") as filehandle:
pickle.dump(errors, filehandle)
efn = os.path.join(self.dirname, "errors.pkl")
with open(efn, "wb") as filehandle:
pickle.dump(errors, filehandle)
|
UTF-8
|
Python
| false | false | 2,136 |
py
| 114 |
learn_dynamic_mp.py
| 108 | 0.580993 | 0.577247 | 0 | 63 | 32.857143 | 82 |
osterburg/ML_models
| 9,938,554,359,972 |
07640eb62c10d296e8b69024e4cc72c0597d7e8d
|
252a31d1064c83f103f330fa123b40675afd924c
|
/UNet/main.py
|
b6ba9c2de6d152cf3e5b6f059f0c8959ff39d83c
|
[
"MIT"
] |
permissive
|
https://github.com/osterburg/ML_models
|
f483043d05bf6b6ab4ca0737b7c0c567360338d6
|
d09d2603092f3274592b88af30572fbdd9f771ed
|
refs/heads/master
| 2020-06-19T20:28:15.182345 | 2019-07-14T13:14:25 | 2019-07-14T13:14:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import argparse
import model
def get_parser():
"""
Set hyper parameters for training UNet.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epoch', type=int, default=100)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001)
parser.add_argument('-tr', '--train_rate', type=float, default=0.8,
help='ratio of training data')
parser.add_argument('-b', '--batch_size', type=int, default=20)
parser.add_argument('-l2', '--l2', type=float, default=0.05,
help='L2 regularization')
return parser
if __name__ == '__main__':
parser = get_parser().parse_args()
unet = model.UNet(classes=2)
unet.train(parser)
|
UTF-8
|
Python
| false | false | 711 |
py
| 6 |
main.py
| 4 | 0.624473 | 0.59775 | 0 | 23 | 29.956522 | 75 |
nan0te/Python-Algorithm-And-DataStructure
| 12,738,873,000,367 |
d617bf4391c07ca54e76b70420b2c51b6d841693
|
72f908b64b74b3a861a64e78f4250d35bd57186d
|
/POO punto 1/Figura.py
|
a1b3a64851840eaea34521af35bd3cbc73cb16b8
|
[
"MIT"
] |
permissive
|
https://github.com/nan0te/Python-Algorithm-And-DataStructure
|
9060811b39ba9c647b6fa3a84567624e02d7fee6
|
7b7802b56d397c38f230f5efb687cedc6cc263f3
|
refs/heads/master
| 2023-04-22T21:28:53.381880 | 2021-04-27T06:05:59 | 2021-04-27T06:05:59 | 290,072,444 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Figura:
def __init__(self, nombre, color):
self.name = nombre
self.color = color
def mostrar(self):
pass
|
UTF-8
|
Python
| false | false | 149 |
py
| 33 |
Figura.py
| 31 | 0.530201 | 0.530201 | 0 | 7 | 19.571429 | 38 |
jcreinhold/intensity-normalization
| 15,899,968,969,876 |
95d8ba0a75b8f9f5680d88e2c56c786c46afe528
|
a2a7506038b69541523f98f2f98613d731ad2acf
|
/intensity_normalization/cli/preprocess.py
|
493fcb78deffddae3e8bb6487dd997dcc5d1e83d
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/jcreinhold/intensity-normalization
|
163e3ed26ea80acef1513e448b7fef78a2c6ddf2
|
ce53748bb6b6721acad031d3b98a2e8f9921e4c6
|
refs/heads/master
| 2023-06-10T03:25:17.287607 | 2023-05-31T21:03:26 | 2023-05-31T21:03:26 | 136,844,850 | 283 | 62 |
NOASSERTION
| false | 2022-02-25T22:59:54 | 2018-06-10T21:05:58 | 2022-02-17T14:54:50 | 2022-02-25T22:59:54 | 1,771 | 214 | 43 | 9 |
Python
| false | false |
"""CLI for MR image preprocessor
Author: Jacob Reinhold <jcreinhold@gmail.com>
Created on: 13 Oct 2021
"""
__all__ = ["preprocess_main", "preprocess_parser"]
from intensity_normalization.util.preprocess import Preprocessor
# main functions and parsers for CLI
preprocess_parser = Preprocessor.parser()
preprocess_main = Preprocessor.main(preprocess_parser)
|
UTF-8
|
Python
| false | false | 360 |
py
| 57 |
preprocess.py
| 34 | 0.777778 | 0.761111 | 0 | 12 | 29 | 64 |
sloww/checklist
| 8,332,236,577,737 |
02361a0a7d94d8ad760ab620f253417b8e8ab28d
|
b2d8fcfcf3604e255f0dee55d04944316680efd4
|
/admin.py
|
65230e912f99ccb8bf1c7ec98501568e02b3e96d
|
[] |
no_license
|
https://github.com/sloww/checklist
|
19923e61506d3aad36c91df011aabbc850d229ef
|
ea1d1d470c4eaef7fec59cb9258f066e0d90357e
|
refs/heads/master
| 2021-05-06T22:51:02.585903 | 2017-12-04T13:37:46 | 2017-12-04T13:37:46 | 112,867,836 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from .models import Company, User, Template, Equipment ,Record, CheckPoint,Team, RecordPoint
from v1.admin import admin_site
from searchadmin.admin import SelectModelAdmin
# class DataMasterAdmin(SelectModelAdmin):
##25 search_fields = ('title','remark','distributor',)
#26 list_display = ('master_code','title','remark', 'distributor', )
#27 fields = ('master_code','title_show','title','img_show',
#28 'img_url', 'scan_show','describe','describe_show', 'tel','company','master_uuid',
#29 'remark', 'distributor', 'redirect_url','redirect_on',
#30 'feedback_show',
#31 )
class EquipmentAdmin(SelectModelAdmin):
readonly_fields = ('url',)
admin_site.register(Company)
admin_site.register(User)
admin_site.register(Template)
admin_site.register(Equipment,EquipmentAdmin)
admin_site.register(Record)
admin_site.register(RecordPoint)
admin_site.register(CheckPoint)
admin_site.register(Team)
# Register your models here.
|
UTF-8
|
Python
| false | false | 1,017 |
py
| 5 |
admin.py
| 4 | 0.709931 | 0.695182 | 0 | 32 | 30.78125 | 94 |
PhillipNordwall/gpggen
| 11,802,570,147,412 |
8f305c7890c877193f6a0e7ee46c9e7afd083b75
|
b28809e411d5646c1472140fee20087330fb10b2
|
/gpggen/__init__.py
|
58e140e36bae8e45c99cc13315ed6db1df53f2d9
|
[
"MIT"
] |
permissive
|
https://github.com/PhillipNordwall/gpggen
|
fd2ee2ac0cd482620984e40e622ba95af96b41da
|
98806b3b75bd955dc9591f0c630f90aebc614d49
|
refs/heads/master
| 2020-12-24T10:32:39.252326 | 2017-10-29T05:41:29 | 2017-10-29T05:41:29 | 73,152,969 | 0 | 0 | null | false | 2017-10-29T05:41:30 | 2016-11-08T05:39:16 | 2016-11-08T05:49:55 | 2017-10-29T05:41:30 | 7 | 0 | 0 | 0 |
Python
| false | null |
"""gpggen provides the capability of calling out to gpg to generate keys, and
check for a matching hex "word"
"""
import os
import re
import signal
import subprocess
import sys
OUTDIR = b"out"
WORDS = (b'BAD', b'DAD',
b'DEAD', b'BEEF', b'FACE', b'1337', b'1234',
b'([BDF])00\1', b'FB5D', b'2600', b'BE[AE]D'
b'F00D', b'CAFE', b'DEAF', b'BABE', b'C0DE',
b'[01]{5,}', b'ABCDEF',
b'0FF1CE', b'C0FFEE', b'BADDAD', b'(.)\1{4,}',
b'ABACABA')
RWORDS = re.compile(b'|'.join(WORDS))
def newname(gpgout):
"""newname takes output from gpg --gen-key and generates a new name based
in the form of 'id-fingerprint-subid'
Args:
gpgout: The output from gpg --gen-key
Returns:
string of the form 'id-fingerprint-subid'
Raises:
None
"""
exp = b'^[^/]*/([^ ]*).*\n.*Key fingerprint = (.*)\n[^/]*/([^ ]*).*'
reg = re.compile(exp)
return b'-'.join(b''.join(i.split(b' ')) for i in reg.findall(gpgout)[0])
def createkey():
"""createkey generates a public and private set of gpg keys.
It calls gpg to create a key and parses it's output to return a string in
the form of 'id-fingerprint-subid'. There is also the side effect of
having leaving two files in the CWD. X.pub and X.sec.
Args:
None
Returns:
string of the form 'id-fingerprint-subid'
Raises:
None
Side Effect:
Two files in the CWD of the form X.sec and X.pub. These are the
public and private keys generated by the gpg command
"""
subprocess.run("gpg --batch --gen-key gpginp".split(" "),
stderr=subprocess.DEVNULL)
proc = subprocess.run("gpg --with-fingerprint X.pub".split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return newname(proc.stdout)
def keeper(string):
"""keeper returns a true when a substring of s is in the WORDS list.
Args:
string : The string to check against.
Returns:
True if there is a substring in 'WORDS' that is in s, else false.
Raises:
None
"""
words = RWORDS.findallt(string)
if words:
return True
return False
def sigint(sig, frame):
"""sigint handler for SIGINT to do cleanup.
Args:
sig The signal number being called with.
frame: The current stack frame.
Returns:
None
Side Effect:
Removal of the temporary outputs, X.pub, and X.sec.
"""
os.remove('X.pub')
os.remove('X.sec')
sys.exit(0)
def main(args):
"""Program entry point.
This takes the arguments and performs the encryption.
Args:
args: The dictionary of arguments parsed by docopt
Returns:
None
Raises:
None
"""
signal.signal(signal.SIGINT, sigint)
while True:
name = createkey()
print('.', end='', flush=True)
if keeper(name):
print(name)
os.rename('X.pub', OUTDIR + b'/' + name + b'.pub')
os.rename('X.sec', OUTDIR + b'/' + name + b'.sec')
|
UTF-8
|
Python
| false | false | 3,109 |
py
| 4 |
__init__.py
| 2 | 0.581216 | 0.571888 | 0 | 109 | 27.522936 | 77 |
dataduce/sparkle_api
| 2,216,203,125,529 |
1a4cf52c3497bdfc90f93ecf56e452f31c9b5b09
|
f4192f2e42989fdd6d03b9f7357cb06046df131c
|
/src/quizzing/models.py
|
603e72fcbe0da1f9b44c768bb418543a43889789
|
[
"MIT"
] |
permissive
|
https://github.com/dataduce/sparkle_api
|
f333c584bfedc3b3aa10326758149000a8c5231c
|
bba3edf67ff5defec9f812513c4ee62572ecca8e
|
refs/heads/master
| 2022-03-23T08:57:18.575913 | 2019-12-08T22:19:49 | 2019-12-08T22:19:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
import uuid, secrets
def create_secret_id():
return secrets.token_urlsafe(24)
class Quiz(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
def get_questions(self):
return self.question_set.all()
def __str__(self):
return self.name
class Question(models.Model):
quiz = models.ForeignKey(Quiz, related_name='questions', on_delete=models.CASCADE)
youtube_url = models.CharField(max_length=255)
question_text = models.TextField()
order = models.IntegerField()
def get_answers(self):
return self.question_set.all()
def __str__(self):
return self.question_text
class Answer(models.Model):
question = models.ForeignKey(Question, related_name='answers', on_delete=models.CASCADE)
content = models.CharField(max_length=1000,
blank=False)
correct = models.BooleanField(blank=False,
default=False)
def __str__(self):
return self.content
class FundedQuiz(models.Model):
"""
Record which quizzes are funded.
"""
id = models.CharField(primary_key=True, default=create_secret_id, editable=False, max_length=255)
quiz = models.ForeignKey(Quiz, on_delete=models.PROTECT)
complete = models.BooleanField(default=False)
percent_correct = models.DecimalField(max_digits=3, decimal_places=0, default=0)
amount = models.IntegerField(default=0)
redeemed = models.BooleanField(default=False)
opennode_withdraw_id = models.CharField(max_length=255, null=True, blank=True)
opennode_charge_id = models.CharField(max_length=255, null=True, blank=True)
lightning_gift_order_id = models.CharField(max_length=255, null=True, blank=True) # leaving as option to use lightning node
|
UTF-8
|
Python
| false | false | 1,848 |
py
| 7 |
models.py
| 5 | 0.679654 | 0.664502 | 0 | 60 | 29.783333 | 127 |
leetcode-notes/daily-algorithms-practice
| 2,963,527,470,166 |
33ee789086ae8b78a9738322dfe6dd217393fcc0
|
487c45df5fcbe7fdf6df5a348f6fe163bbb22033
|
/hackercup/recursive.py
|
33fe9818f3c33264b194f1aee20d5467d69c3185
|
[
"Unlicense"
] |
permissive
|
https://github.com/leetcode-notes/daily-algorithms-practice
|
dba03ac1c55262f6bae7d5aa4dac590c3c067e75
|
2a03499ed0b403d79f6c8451c9a839991b23e188
|
refs/heads/master
| 2023-06-18T14:14:58.770797 | 2021-07-12T05:27:32 | 2021-07-12T05:27:32 | 264,057,786 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def solve(n, inbound, outbound):
p = [["N"]*n for _ in range(n)]
for i in range(n):
p[i][i] = 'Y'
i = 0
while i < n:
for row in p:
for j in range(i+1, n):
if row[j-1] == 'N':
break
if row[j-1] == 'Y' and inbound[j] == 'Y' and outbound[j-1] == 'Y':
row[j] = 'Y'
i += 1
i = 0
while i < n:
for row in p:
for j in range(i-1, -1, -1):
if row[j+1] == 'N':
break
if row[j+1] == 'Y' and outbound[j+1] == 'Y' and inbound[j] == 'Y':
row[j] = 'Y'
i += 1
return p
# n = int(input())
# i = 0
# while i < n:
# a = int(input())
# ib = input()
# ob = input()
# ans = solve(a, ib, ob)
# print("Case #{}".format(i+1))
# for line in ans:
# print("".join(c for c in line))
# i += 1
g = open('output3.txt', 'w')
with open('sample.txt', 'r') as f:
num_cases = int(f.readline())
for i in range(num_cases):
a = int(f.readline())
ip = (f.readline())
op = f.readline()
res = solve(a, ip, op)
g.write("Case #{}:".format(i+1)+"\n")
for line in res:
g.write("".join(c for c in line)+"\n")
f.close()
g.close()
'''
N = 10
ib = "NYYYNNYYYY"
ob = "YYNYYNYYNY"
solve(10, ib, ob)
'''
# def solve(n):
# if n == 1:
# return 3
# if n == 2:
# return 1
# if n >= 3:
# return 2*solve(n-1)-solve(n-2)
# print(solve(4))
|
UTF-8
|
Python
| false | false | 1,567 |
py
| 208 |
recursive.py
| 202 | 0.403957 | 0.383535 | 0 | 67 | 22.38806 | 82 |
LeoLozano10/Tema-2_Estructura_De_Datos
| 13,649,406,097,967 |
ce70dd31fc3b3668a5225f2f62ac069576cc2c17
|
6adac488e9eefaf69733ff8a3cc1e3a9835277ad
|
/Monedas/CambiodeMonedas.py
|
df1abe2f55724799feb5443d1901415a37db6ec7
|
[] |
no_license
|
https://github.com/LeoLozano10/Tema-2_Estructura_De_Datos
|
141acc4fe60fe36e6d1862dab8d9c4836989468f
|
c01c4dd5a59738d1e85de655f099011c4c8111b9
|
refs/heads/master
| 2020-03-31T14:02:58.859146 | 2018-10-09T15:24:38 | 2018-10-09T15:24:38 | 152,277,713 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
dinero = int(input("Cantidad:"))
uno = dinero // 200
restouno = dinero % 200
dos = restouno // 100
restodos = restouno % 100
tres = restodos // 50
restotres = restodos % 50
cuatro = restotres // 20
restocuatro = restotres % 20
cinco = restocuatro // 10
restocinco = restocuatro % 10
seis = restocinco // 5
restoseis = restocinco % 5
siete=restoseis//2
restosiete=restoseis % 2
ocho=restosiete//1
ocho=restosiete % 2
"""clase base"""
if dinero==0:
print("0")
else:
if uno >= 1: print(uno,"Billete de 200 pesos")
if dos >= 1: print (dos, "Billete de 100 pesos")
if tres >= 1: print (tres,"Billete de 50 pesos")
if cuatro >= 1: print (cuatro, "moneda de 20 pesos")
if cinco >= 1:print (cinco, "moneda de 10 pesos")
if seis >= 1: print (seis,"Moneda de 5 pesos")
if siete>=1:print(siete,"Moneda de 2 pesos")
if ocho>=1: print(ocho,"moneda de 1 pesos")
|
UTF-8
|
Python
| false | false | 885 |
py
| 3 |
CambiodeMonedas.py
| 2 | 0.654237 | 0.59209 | 0 | 31 | 27.580645 | 56 |
smherold4/ucsd-metabolomics-phenotype-data
| 17,231,408,814,243 |
36374de6186d26cc6cb973344cfbb165b9229e0e
|
292f7157a243ce66972af13494f93625024b4fbd
|
/requirements-linux.py
|
7e50a3774629e208883c204d2c8319f52e295272
|
[] |
no_license
|
https://github.com/smherold4/ucsd-metabolomics-phenotype-data
|
d76383583790547f013d12618313fb48b55de427
|
86d1397218162dd9c6bd784c1ab81bf8d1564f67
|
refs/heads/master
| 2022-07-07T05:46:05.748602 | 2019-11-27T16:39:53 | 2019-11-27T16:39:53 | 197,568,397 | 1 | 0 | null | false | 2021-06-01T23:58:35 | 2019-07-18T10:47:12 | 2020-03-21T19:12:22 | 2021-06-01T23:58:33 | 311 | 1 | 0 | 5 |
Python
| false | false |
astroid==1.6.6
backports.functools-lru-cache==1.5
certifi==2019.6.16
configparser==3.7.4
defusedxml==0.5.0
elasticsearch==6.4.0
enum34==1.1.6
futures==3.3.0
isort==4.3.21
lazy-object-proxy==1.4.1
mccabe==0.6.1
netifaces==0.10.4
numpy==1.16.4
pandas==0.24.2
pandasticsearch==0.5.3
pipenv==2018.11.26
psycopg2-binary==2.8.3
pycurl==7.19.5.1
pylint==1.9.5
python-dateutil==2.8.0
python-dotenv==0.10.3
pytz==2019.1
requests==2.22.0
singledispatch==3.4.0.3
six==1.12.0
SQLAlchemy==1.3.5
typing==3.7.4
urllib3==1.25.3
virtualenv==16.6.2
virtualenv-clone==0.5.3
wrapt==1.11.2
WSDiscovery==0.2
|
UTF-8
|
Python
| false | false | 586 |
py
| 42 |
requirements-linux.py
| 33 | 0.713311 | 0.505119 | 0 | 32 | 17.3125 | 34 |
Qboi123/Qplay-Bubbles-Alpha
| 18,614,388,284,960 |
0bd1bb525b5bc013781fcc4ce67f7111c0a6052b
|
2c62eced6b0364d6546514c727382c37619651f6
|
/string2.py
|
9fc665f58f9753969253b8535127e9e3f1e0cf68
|
[] |
no_license
|
https://github.com/Qboi123/Qplay-Bubbles-Alpha
|
77c222cf82132c6d3c1a9005c8a6fd86b4ac629c
|
4c3ffe1f6400a545236ca0bfd386c6f7613dfb86
|
refs/heads/master
| 2022-11-22T00:54:26.296218 | 2022-11-15T16:20:03 | 2022-11-15T16:20:03 | 230,899,952 | 0 | 0 | null | false | 2022-11-15T16:20:04 | 2019-12-30T11:00:02 | 2022-10-27T21:39:32 | 2022-11-15T16:20:03 | 135,267 | 0 | 0 | 0 |
Python
| false | false |
join = str.join
split = str.split
splitLines = str.splitlines
encode = str.encode
index = str.index
count = str.count
replace = str.replace
isDecimal = str.isdecimal
isDigit = str.isdigit
isNumeric = str.isnumeric
isAlpha = str.isalpha
isAlphabetic = str.isalpha
isAlNum = str.isalnum
isAlphaNumeric = str.isalnum
isASCII = str.isascii
isId = str.isidentifier
isIdentifier = str.isidentifier
isPrintable = str.isprintable
isSpace = str.isspace
isWhitespace = str.isspace
isTitle = str.istitle
isUpper = str.isupper
isLower = str.islower
isUppercase = str.isupper
isLowercase = str.islower
upper = str.upper
lower = str.lower
title = str.title
capitalize = str.capitalize
caseFold = str.casefold
center = str.center
endsWith = str.endswith
startsWith = str.startswith
contains = str.__contains__
lJust = str.ljust
lStrip = str.lstrip
leftJust = str.ljust
leftStrip = str.lstrip
add = str.__add__
length = str.__len__
|
UTF-8
|
Python
| false | false | 956 |
py
| 56 |
string2.py
| 36 | 0.736402 | 0.736402 | 0 | 40 | 21.9 | 31 |
Parallel-in-Time/pySDC
| 9,766,755,645,552 |
7bc348ed2a6e54aadf3c89c0910b9a31f6e5ce4f
|
7fdac5209f86de756b9a8123a0911b70738eceeb
|
/pySDC/projects/deprecated/node_failure/postproc_hard_faults_test.py
|
3fbfe745676dad9e2a45fa84d4194b777c3d0609
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/Parallel-in-Time/pySDC
|
edc66e399f6066effc5aaa376883e88e06b5332b
|
1a51834bedffd4472e344bed28f4d766614b1537
|
refs/heads/master
| 2023-08-30T23:17:56.017934 | 2023-08-30T05:42:00 | 2023-08-30T05:42:00 | 26,165,004 | 30 | 31 |
BSD-2-Clause
| false | 2023-09-14T06:40:13 | 2014-11-04T10:56:53 | 2023-09-09T12:35:16 | 2023-09-14T06:40:11 | 350,235 | 27 | 30 | 19 |
Jupyter Notebook
| false | false |
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
# import os
def create_plots(setup, cwd=''):
"""
Function to create heatmaps for faults at different steps and iterations
Args:
setup (str): name of the setup (heat or advection)
cwd: current working directory
"""
axis_font = {'fontname': 'Arial', 'size': '8', 'family': 'serif'}
fs = 8
fields = [
(setup + '_results_hf_SPREAD.npz', 'SPREAD'),
(setup + '_results_hf_SPREAD_PREDICT.npz', 'SPREAD_PREDICT'),
(setup + '_results_hf_INTERP.npz', 'INTERP'),
(setup + '_results_hf_INTERP_PREDICT.npz', 'INTERP_PREDICT'),
]
vmin = 99
vmax = 0
for file, _ in fields:
infile = np.load(cwd + 'data/' + file)
data = infile['iter_count'].T
data = data - data[0, 0]
vmin = min(vmin, data.min())
vmax = max(vmax, data.max())
for file, strategy in fields:
infile = np.load(cwd + 'data/' + file)
data = infile['iter_count'].T
data = data - data[0, 0]
ft_iter = infile['ft_iter']
ft_step = infile['ft_step']
rcParams['figure.figsize'] = 3.0, 2.5
fig, ax = plt.subplots()
cmap = plt.get_cmap('Reds', vmax - vmin + 1)
pcol = plt.pcolor(data, cmap=cmap, vmin=vmin, vmax=vmax)
pcol.set_edgecolor('face')
plt.axis([ft_step[0], ft_step[-1] + 1, ft_iter[0] - 1, ft_iter[-1]])
ticks = np.arange(int(vmin) + 1, int(vmax) + 2, 2)
tickpos = np.linspace(ticks[0] + 0.5, ticks[-1] - 0.5, len(ticks))
cax = plt.colorbar(pcol, ticks=tickpos, format='%2i')
plt.tick_params(axis='both', which='major', labelsize=fs)
cax.set_ticklabels(ticks)
cax.set_label(r'$K_\mathrm{add}$', **axis_font)
cax.ax.tick_params(labelsize=fs)
ax.set_xlabel('affected step', labelpad=1, **axis_font)
ax.set_ylabel(r'affected iteration ($K_\mathrm{fault}$)', labelpad=1, **axis_font)
ax.set_xticks(np.arange(len(ft_step)) + 0.5, minor=False)
ax.set_xticklabels(ft_step, minor=False)
ax.set_yticks(np.arange(len(ft_iter)) + 0.5, minor=False)
ax.set_yticklabels(ft_iter, minor=False)
# Set every second label to invisible
for label in ax.xaxis.get_ticklabels()[::2]:
label.set_visible(False)
ax.tick_params(pad=2)
# plt.tight_layout()
# fname = setup+'_iteration_counts_hf_'+strategy+'.png'
fname = 'data/' + setup + '_iteration_counts_hf_' + strategy + '.png'
plt.savefig(fname, transparent=True, bbox_inches='tight')
# plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
plt.close('all')
if __name__ == "__main__":
create_plots(setup='HEAT')
create_plots(setup='ADVECTION')
|
UTF-8
|
Python
| false | false | 2,887 |
py
| 617 |
postproc_hard_faults_test.py
| 475 | 0.568756 | 0.555594 | 0 | 96 | 29.072917 | 90 |
sal-uva/data-journalisme
| 16,638,703,314,512 |
9a9c968ba4fdde73902679caa3e5383f2f8e357e
|
c748c150c09ff5fde9e52490302f13169770ead8
|
/racisme-in-nederland/scrapeTweedeKamer.py
|
401d41ef127d73df73c77bbb0d9cf0db0266ec25
|
[] |
no_license
|
https://github.com/sal-uva/data-journalisme
|
29403d0f1d7d43987e278b765af8e5b1d938a15d
|
a5b740a555aa3b1cea1b1029ad0374696e648444
|
refs/heads/master
| 2020-07-30T02:42:37.962247 | 2019-10-08T12:18:45 | 2019-10-08T12:18:45 | 210,059,642 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sqlite3
import pandas as pd
import io
import os
import urllib.request, json
import untangle
import time
import ssl
import xmltodict
import pickle as p
from TweedeKamerToCsv import TweedeKamerToCsv
from xml.parsers.expat import ExpatError
from bs4 import BeautifulSoup
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def scrapeTweedeKamer():
#getHandelingen()
li_xml = [file for file in os.listdir('data/politiek/handelingen/xml/')]
li_metadata = [file for file in os.listdir('data/politiek/handelingen/metadata/')]
li_input = [file for file in li_xml if 'metadata-' + file not in li_metadata]
print(len(li_input))
getMetaData(li_input)
#li_input = [file for file in os.listdir('data/politiek/handelingen/xml/')]
#print(li_input)
#li_input = [file for file in li_input if file.startswith('h-tk-2015') or file.startswith('h-tk-2016') or file.startswith('h-tk-2017') or file.startswith('h-tk-2018')]
#getMetaData(li_input)
#getHandelingen()
#TweedeKamerToCsv()
# li_input = [file for file in os.listdir('data/politiek/kamervragen/') if file.endswith('.p')]
# print(li_input)
# getMetaData(li_input)
def getHandelingen(year = 1996, vol = 95):
"""
Collects all the Tweede Kamer handelingen from officielebekendmakingen.nl
year: the starting year of the loop
coutner: the starting document in the respective year
"""
years_left = True
docs_left = True
first_fail = False
doc = 1
# Starting dossier number
dossier_no = vol
dossiers = True
# loop through all pages listing 30 documents
while years_left:
# docname = 'h-tk-' + str(year) + str(year + 1) + '-' + str(vol) + '-' + str(doc) + '.p'
# if os.path.isfile('data/politiek/handelingen/xml/' + docname) and (vol != 1 and doc != 1):
# print('Document ' + docname + ' already exists')
# else:
# Document numbers are different before 2011, as they rely on page numbers
if year < 2011:
# For loop through the dossier numbers for the year
dossier_url = 'https://zoek.officielebekendmakingen.nl/handelingen/TK/' + str(year) + '-' + str(year + 1) + '/' + str(dossier_no)
print('Requesting:', dossier_url)
request = urllib.request.Request(dossier_url, headers=headers)
# Bypass SSL - not a problem since we're only requesting one site
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# check if the page exists
try:
response = urllib.request.urlopen(request, context=gcontext)
# if there's a HTTP error
except ConnectionResetError as e:
print('HTTP error when requesting dossier page')
print('Reason:', e)
print('Sleeping for a minute')
time.sleep(60)
pass
except urllib.error.HTTPError as httperror:
print('HTTP error when requesting dossier page')
print('Reason:', httperror.code)
pass
else:
# Get a list of the links for the documents
html = response.read()
soup = BeautifulSoup(html, features="lxml")
anchors = soup.find('div', {'class': 'lijst'})
# Check if it's not a 404 HTML page
if not anchors:
# Workaround for empty doc in 1997 (https://zoek.officielebekendmakingen.nl/handelingen/TK/1997-1998/11)
if (year == 1997 and dossier_no == 11):
dossier_no = dossier_no + 1
else:
dossier_no = 1
year = year + 1
print('No dossiers found for this year. Continuing to ' + str(year))
# If the dossier exists and has documents, get the xml urls and save them as pickled dictonaries
else:
anchors = anchors.find_all('a')
li_documents = []
for a in anchors:
doc_name = a['href'].split('?')[0]
print(doc_name)
if 'h-tk' not in doc_name:
doc_name = 'h-' + doc_name.split('h-')[1]
else:
doc_name = 'h-tk' + doc_name.split('h-tk')[1]
#print(doc_name)
li_documents.append(doc_name)
print(li_documents)
for document_name in li_documents:
url = 'https://zoek.officielebekendmakingen.nl/' + document_name + '.xml'
print(url)
# check if the page exists
try:
response = urllib.request.urlopen(url, context=gcontext)
# if there's a HTTP error
except ConnectionResetError as e:
print('HTTP error when requesting thread')
print('Reason:', e)
print('Sleeping for a minute')
time.sleep(60)
pass
except urllib.error.HTTPError as httperror:
print('HTTP error when requesting thread')
print('Reason:', httperror.code)
pass
else:
file = response.read()
if not os.path.exists('data/politiek/handelingen/xml/'):
os.makedirs('data/politiek/handelingen/xml/')
di_handelingen = xmltodict.parse(file)
#print(di_handelingen)
print('data/politiek/handelingen/xml/' + document_name + '.p')
p.dump(di_handelingen, open('data/politiek/handelingen/xml/' + document_name + '.p', 'wb'))
dossier_no = dossier_no + 1
print('Dossier no:', dossier_no)
else:
# Handelingen have a volume (vol) and document number (doc),
# so we'll have to loop through both
document_name = 'h-tk-' + str(year) + str(year + 1) + '-' + str(vol) + '-' + str(doc)
url = 'https://zoek.officielebekendmakingen.nl/' + document_name + '.xml'
print('Requesting:')
print(url)
request = urllib.request.Request(url, headers=headers)
# Bypass SSL - not a problem since we're only requesting one site
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# check if the page exists
try:
response = urllib.request.urlopen(request, context=gcontext)
# if there's a HTTP error
except ConnectionResetError as e:
print('HTTP error when requesting thread')
print('Reason:', e)
print('Sleeping for a minute')
time.sleep(60)
pass
except urllib.error.HTTPError as httperror:
print('HTTP error when requesting thread')
print('Reason:', httperror.code)
pass
else:
info = response.info()
print(info.get_content_subtype()) # -> html
# Check if the response is a 404 handle page or a XML file
if info.get_content_subtype() != 'xml':
print('No doc left this volume')
# If fetching already failed after increasing the volume number, proceed to the next year instead
if first_fail:
year = year + 1
else:
vol = vol + 1
doc = 1
first_fail = True
else:
file = response.read()
if not os.path.exists('data/politiek/handelingen/xml/'):
os.makedirs('data/politiek/handelingen/xml/')
try:
di_handelingen = xmltodict.parse(file)
p.dump(di_handelingen, open('data/politiek/handelingen/xml/' + document_name + '.p', 'wb'))
print(di_handelingen)
except ExpatError as e:
print('Couldn\'t parse')
else:
print('Continuing')
first_fail = False
doc = doc + 1
# End if the year to check is 2019
if year == 2019:
years_left = False
def getKamerVragen(year = 1995, counter = 0):
"""
Collects all the Tweede Kamer kamervragen from officielebekendmakingen.nl
year: the starting year of the loop
coutner: the starting document in the respective year
"""
documents_left = True
while documents_left:
document_name = 'ah-tk-' + str(year) + str(year + 1) + '-' + str(counter)
url = 'https://zoek.officielebekendmakingen.nl/' + document_name + '.xml'
print('Requesting:')
print(url)
request = urllib.request.Request(url, headers=headers)
# Bypass SSL - not a problem since we're only requesting one site
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# check if the page exists
try:
response = urllib.request.urlopen(request, context=gcontext)
# if there's a HTTP error
except ConnectionResetError as e:
print('HTTP error when requesting thread')
print('Reason:', e)
print('Sleeping for a minute')
time.sleep(60)
pass
except urllib.error.HTTPError as httperror:
print('HTTP error when requesting thread')
print('Reason:', httperror.code)
pass
else:
info = response.info()
print(info.get_content_subtype()) # -> html
# Check if the response is a 404 handle page or a XML file
if info.get_content_subtype() != 'xml':
print('No file left this year')
year = year + 1
counter = 0
else:
file = response.read()
if not os.path.exists('data/politiek/kamervragen/'):
os.makedirs('data/politiek/kamervragen/')
p.dump(file, open('data/politiek/kamervragen/' + document_name + '.p', 'wb'))
try:
di_kamervragen = xmltodict.parse(file)
print(di_kamervragen)
except ExpatError as e:
print('No file left', e)
year = year + 1
counter = 0
else:
print('continue')
#time.sleep(4)
counter = counter + 1
# End if the year to check is 2019
if year == 2019:
documents_left = False
def getMetaData(li_input):
"""
Gets the metadata from Officiële Bekendmakingen documents
Input: a list of document names
"""
for doc in li_input:
if ('metadata-' + doc) not in os.listdir('data/politiek/handelingen/metadata/'):
url = 'https://zoek.officielebekendmakingen.nl/' + doc[:-2] + '/metadata.xml'
print('Requesting:')
print(url)
request = urllib.request.Request(url, headers=headers)
# Bypass SSL - not a problem since we're only requesting one site
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# check if the page exists
try:
response = urllib.request.urlopen(request, context=gcontext)
# if there's a HTTP error
except ConnectionResetError as e:
print('HTTP error when requesting thread')
print('Reason:', e)
print('Sleeping for a minute')
time.sleep(60)
pass
except urllib.error.HTTPError as httperror:
print('HTTP error when requesting thread')
print('Reason:', httperror.code)
pass
else:
info = response.info()
print(info.get_content_subtype()) # -> html
# Check if the response is a 404 handle page or a XML file
if info.get_content_subtype() != 'xml':
print('Error... xml file not found.')
quit()
else:
file = response.read()
if not os.path.exists('data/politiek/metadata/'):
os.makedirs('data/politiek/handelingen/metadata/')
p.dump(file, open('data/politiek/handelingen/metadata/metadata-' + doc, 'wb'))
di_kamervragen = xmltodict.parse(file)
print(di_kamervragen)
if __name__ == 'main':
scrapeTweedeKamer()
|
UTF-8
|
Python
| false | false | 10,822 |
py
| 37 |
scrapeTweedeKamer.py
| 24 | 0.652435 | 0.636078 | 0 | 369 | 28.327913 | 168 |
kics/pysample
| 8,684,423,913,272 |
9e59c729e129285d27abbf8f1744f5b74b24a021
|
af71382848705c8ba1a6514adcd03539b420f0ff
|
/3rd_party/solrpy/time_sort.py
|
21f2950c6858a4a3cdfb8b28661d10e85aa7fee9
|
[] |
no_license
|
https://github.com/kics/pysample
|
7269dc5c9cc537650b6a864be2d751fc3eeb248c
|
687ce6e5cdfc39df770363f01c5242bea51bc7ff
|
refs/heads/master
| 2016-08-06T09:37:25.918037 | 2014-05-15T23:14:02 | 2014-05-15T23:14:02 | 14,380,759 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#coding:utf8
# Created: 2013-11-06
#
import timeit
setup='''
import solr as solrpy
solr = solrpy.Solr('http://localhost:8983/solr/shinsho_jawiki')
res = solr.select(q="*:*",qt='tvrh',tv='true',tv_tf='true',rows=1,fields=[u'text',u'id'])
word_list = [(key,tvs) for key,tvs in res.termVectors.items() if key != u'uniqueKeyFieldName'][0][1][u'text'].keys()
words = map(lambda x:x.encode('utf8'),word_list[:10])
'''
stmt1='''
for word in words:
res = solr.select(q=word,fq=u'int_id:[* TO -1]',rows=10000,fields=[u'int_id'],sort=[u'int_id'],omitHeader='true',score=False)
'''
stmt2='''
for word in words:
res = solr.select(q=word,fq=u'int_id:[* TO -1]',rows=10000,fields=[u'int_id'],omitHeader='true',score=False)
'''
print timeit.timeit(stmt1,setup,number=1)
print timeit.timeit(stmt2,setup,number=1)
|
UTF-8
|
Python
| true | false | 830 |
py
| 345 |
time_sort.py
| 309 | 0.66506 | 0.620482 | 0 | 27 | 29.703704 | 129 |
YPCrumble/python-amazon-paapi
| 7,808,250,578,960 |
4332fb89bed05f24c407f03c17fd998d3afdcedb
|
8a064480709e9144f67cb5aec799b4a13719b3ec
|
/amazon_paapi/models/variations_result.py
|
f22cfcd5ecf7acd1c968965356a5d7ce232275db
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
https://github.com/YPCrumble/python-amazon-paapi
|
352312601ebf2a16efe79d270303d600e92a6627
|
4aa9c7597123580eed24bf1ec9bacee0fedeb8a0
|
refs/heads/master
| 2022-11-03T01:32:29.324826 | 2022-09-22T18:38:57 | 2022-09-22T18:38:57 | 246,726,325 | 0 | 0 |
MIT
| true | 2020-03-12T02:33:49 | 2020-03-12T02:33:48 | 2020-03-11T23:48:57 | 2020-03-11T21:53:42 | 180 | 0 | 0 | 0 | null | false | false |
from typing import List
from .item_result import Item
from ..sdk.models import VariationsResult, VariationSummary
class ApiPrice:
amount: float
currency: str
display_amount: str
class ApiVariationDimension:
display_name: str
name: str
values: List[str]
class ApiVariationPrice:
highest_price: ApiPrice
lowest_price: ApiPrice
class ApiVariationSummary(VariationSummary):
page_count: int
price: ApiVariationPrice
variation_count: int
variation_dimensions: List[ApiVariationDimension]
class VariationsResult(VariationsResult):
items: List[Item]
variation_summary: ApiVariationSummary
|
UTF-8
|
Python
| false | false | 645 |
py
| 62 |
variations_result.py
| 51 | 0.755039 | 0.755039 | 0 | 30 | 20.5 | 59 |
lotharhill/LRecruitMatcher
| 4,011,499,492,445 |
26caa022c35aff69059b2a703f9206e60b5fc302
|
709da7521a12998757908b1216f6249e7c7eaefd
|
/recruit/matcher/migrations/0008_interview.py
|
5b5aa8c0201c39f3e42e0de3f2eda8ab196fccc7
|
[] |
no_license
|
https://github.com/lotharhill/LRecruitMatcher
|
126922d065b5326dd8796ad2e6eda7285d30e8dd
|
164cf129d2b6b7ebb426c417a1e5fe6e132b6d0e
|
refs/heads/master
| 2021-01-23T06:55:40.721393 | 2017-01-31T05:13:23 | 2017-01-31T05:13:23 | 80,494,694 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-03 03:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('matcher', '0007_position_description'),
]
operations = [
migrations.CreateModel(
name='Interview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='match')),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('scheduled_time', models.DateTimeField(blank=True)),
('candidate_feedback', models.TextField(blank=True, max_length=1000)),
('client_feedback', models.TextField(blank=True, max_length=1000)),
('interview_with', models.ManyToManyField(to='matcher.Person')),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='matcher.Match')),
],
options={
'ordering': ('-created',),
},
),
]
|
UTF-8
|
Python
| false | false | 1,366 |
py
| 31 |
0008_interview.py
| 14 | 0.597365 | 0.576135 | 0 | 34 | 39.176471 | 119 |
kylehennig/wec2019
| 9,259,949,506,580 |
ac3e9a00db1d7203d48851d021dcdcd24185edab
|
8be07459b70e0edeb84d1173c75d4f3748eabd7a
|
/bot/bot_client.py
|
53038296f0228b016c15e702780f847f85c71492
|
[] |
no_license
|
https://github.com/kylehennig/wec2019
|
112fd64eab2fba5df20e14b6c8c4437db47eeb68
|
7472f7e37b7e70678bf180941a2983c640814fe3
|
refs/heads/master
| 2020-04-17T08:31:37.802035 | 2019-01-24T16:06:19 | 2019-01-24T16:06:19 | 166,415,965 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import tornado.ioloop
from bot.bot import make_move
from bot.web_socket_client import WebSocketClient
from server.board import Board
from server.message import Message
class BotClient(WebSocketClient):
"""Handles websockets for bots"""
def __init__(self, url, size):
super().__init__(url)
message = Message("JOIN", {"size": size})
self.write_message(message.json())
self.last_message = "JOIN"
def on_message(self, message):
"""Handles messages from server"""
message = Message.decode(message)
if self.last_message == "JOIN":
message = Message("BOARD", "")
self.write_message(message.json())
self.last_message = "BOARD"
elif self.last_message == "BOARD":
board = message.body["board"]
board = Board.from_json(board)
x, y = make_move(board)
message = Message("MOVE", {"x": x, "y": y})
self.write_message(message.json())
self.last_message = "MOVE"
elif self.last_message == "MOVE":
message = Message("BOARD", "")
self.write_message(message.json())
self.last_message = "BOARD"
elif message.header == "ERROR":
print("ERROR")
print(message.body)
sys.exit(1)
else:
print("Unrecognized message header: {}".format(message.header))
def main():
url = "ws://localhost:8888/player"
if len(sys.argv) == 1:
BotClient(url, 100)
elif len(sys.argv) == 2:
size = int(sys.argv[1])
BotClient(url, size)
else:
print("Usage: python3 run_bot.py <size>")
tornado.ioloop.IOLoop.current().start()
|
UTF-8
|
Python
| false | false | 1,735 |
py
| 14 |
bot_client.py
| 12 | 0.5683 | 0.561383 | 0 | 57 | 29.438596 | 75 |
garystafford/aws-iot-analytics-demo
| 10,608,569,240,840 |
bcb34b257ba244c0751b1608de2dc22c52a886d6
|
601fc8d8405334555d6fec832f9b7b7e6673ab8c
|
/device_scripts/MQ.py
|
8c66faa1078dd07d2e04711baa809c960879e984
|
[
"MIT"
] |
permissive
|
https://github.com/garystafford/aws-iot-analytics-demo
|
96f825642dd4c5cd638613adb0277baae22ea1fe
|
5aaa4f9456764a7f1e7ebe964670fb430ea36154
|
refs/heads/master
| 2022-12-04T13:57:05.232535 | 2020-08-27T20:33:14 | 2020-08-27T20:33:14 | 279,300,653 | 13 | 10 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
import time
from MCP3008 import MCP3008
# References:
# https://tutorials-raspberrypi.com/configure-and-read-out-the-raspberry-pi-gas-sensor-mq-x/
# http://sandboxelectronics.com/?p=165
class MQ:
######################### Hardware Related Macros #########################
MQ_PIN = 0 # define which analog input channel you are going to use (MCP3008)
RL_VALUE = 5 # define the load resistance on the board, in kilo ohms
RO_CLEAN_AIR_FACTOR = 9.83 # RO_CLEAR_AIR_FACTOR=(Sensor resistance in clean air)/RO,
# which is derived from the chart in datasheet
######################### Software Related Macros #########################
CALIBARAION_SAMPLE_TIMES = 50 # define how many samples you are going to take in the calibration phase
CALIBRATION_SAMPLE_INTERVAL = 500 # define the time interval(in milisecond) between each samples in the
# cablibration phase
READ_SAMPLE_INTERVAL = 50 # define the time interval(in milisecond) between each samples in
READ_SAMPLE_TIMES = 5 # define how many samples you are going to take in normal operation
# normal operation
######################### Application Related Macros ######################
GAS_LPG = 0
GAS_CO = 1
GAS_SMOKE = 2
def __init__(self, Ro=10, analogPin=0):
self.Ro = Ro
self.MQ_PIN = analogPin
self.adc = MCP3008()
self.LPGCurve = [2.3, 0.21, -0.47] # two points are taken from the curve.
# with these two points, a line is formed which is "approximately equivalent"
# to the original curve.
# data format:{ x, y, slope}; point1: (lg200, 0.21), point2: (lg10000, -0.59)
self.COCurve = [2.3, 0.72, -0.34] # two points are taken from the curve.
# with these two points, a line is formed which is "approximately equivalent"
# to the original curve.
# data format:[ x, y, slope]; point1: (lg200, 0.72), point2: (lg10000, 0.15)
self.SmokeCurve = [2.3, 0.53, -0.44] # two points are taken from the curve.
# with these two points, a line is formed which is "approximately equivalent"
# to the original curve.
# data format:[ x, y, slope]; point1: (lg200, 0.53), point2: (lg10000, -0.22)
print("Calibrating...")
self.Ro = self.MQCalibration(self.MQ_PIN)
print("Calibration is done...\n")
print("Ro=%f kohm" % self.Ro)
def MQPercentage(self):
val = {}
read = self.MQRead(self.MQ_PIN)
val["GAS_LPG"] = self.MQGetGasPercentage(read / self.Ro, self.GAS_LPG)
val["CO"] = self.MQGetGasPercentage(read / self.Ro, self.GAS_CO)
val["SMOKE"] = self.MQGetGasPercentage(read / self.Ro, self.GAS_SMOKE)
return val
######################### MQResistanceCalculation #########################
# Input: raw_adc - raw value read from adc, which represents the voltage
# Output: the calculated sensor resistance
# Remarks: The sensor and the load resistor forms a voltage divider. Given the voltage
# across the load resistor and its resistance, the resistance of the sensor
# could be derived.
############################################################################
def MQResistanceCalculation(self, raw_adc):
return float(self.RL_VALUE * (1023.0 - raw_adc) / float(raw_adc));
######################### MQCalibration ####################################
# Input: mq_pin - analog channel
# Output: Ro of the sensor
# Remarks: This function assumes that the sensor is in clean air. It use
# MQResistanceCalculation to calculates the sensor resistance in clean air
# and then divides it with RO_CLEAN_AIR_FACTOR. RO_CLEAN_AIR_FACTOR is about
# 10, which differs slightly between different sensors.
############################################################################
def MQCalibration(self, mq_pin):
val = 0.0
for i in range(self.CALIBARAION_SAMPLE_TIMES): # take multiple samples
val += self.MQResistanceCalculation(self.adc.read(mq_pin))
time.sleep(self.CALIBRATION_SAMPLE_INTERVAL / 1000.0)
val = val / self.CALIBARAION_SAMPLE_TIMES # calculate the average value
val = val / self.RO_CLEAN_AIR_FACTOR # divided by RO_CLEAN_AIR_FACTOR yields the Ro
# according to the chart in the datasheet
return val;
######################### MQRead ##########################################
# Input: mq_pin - analog channel
# Output: Rs of the sensor
# Remarks: This function use MQResistanceCalculation to caculate the sensor resistenc (Rs).
# The Rs changes as the sensor is in the different consentration of the target
# gas. The sample times and the time interval between samples could be configured
# by changing the definition of the macros.
############################################################################
def MQRead(self, mq_pin):
rs = 0.0
for i in range(self.READ_SAMPLE_TIMES):
rs += self.MQResistanceCalculation(self.adc.read(mq_pin))
time.sleep(self.READ_SAMPLE_INTERVAL / 1000.0)
rs = rs / self.READ_SAMPLE_TIMES
return rs
######################### MQGetGasPercentage ##############################
# Input: rs_ro_ratio - Rs divided by Ro
# gas_id - target gas type
# Output: ppm of the target gas
# Remarks: This function passes different curves to the MQGetPercentage function which
# calculates the ppm (parts per million) of the target gas.
############################################################################
def MQGetGasPercentage(self, rs_ro_ratio, gas_id):
if (gas_id == self.GAS_LPG):
return self.MQGetPercentage(rs_ro_ratio, self.LPGCurve)
elif (gas_id == self.GAS_CO):
return self.MQGetPercentage(rs_ro_ratio, self.COCurve)
elif (gas_id == self.GAS_SMOKE):
return self.MQGetPercentage(rs_ro_ratio, self.SmokeCurve)
return 0
######################### MQGetPercentage #################################
# Input: rs_ro_ratio - Rs divided by Ro
# pcurve - pointer to the curve of the target gas
# Output: ppm of the target gas
# Remarks: By using the slope and a point of the line. The x(logarithmic value of ppm)
# of the line could be derived if y(rs_ro_ratio) is provided. As it is a
# logarithmic coordinate, power of 10 is used to convert the result to non-logarithmic
# value.
############################################################################
def MQGetPercentage(self, rs_ro_ratio, pcurve):
return (math.pow(10, (((math.log(rs_ro_ratio) - pcurve[1]) / pcurve[2]) + pcurve[0])))
|
UTF-8
|
Python
| false | false | 6,923 |
py
| 12 |
MQ.py
| 5 | 0.565217 | 0.545139 | 0 | 138 | 49.15942 | 108 |
cipriancus/CloudComputing
| 13,752,485,312,296 |
dccc7c0c960abe12eaf16441f36cfb2f0a52bdef
|
95b6aaac1e3f01c54fa8bf6a54ccdd83cfc79dd2
|
/Laboratorul1/main.py
|
7062cfcee8f0c34b18dd787f0472086859185abd
|
[] |
no_license
|
https://github.com/cipriancus/CloudComputing
|
2fbc6e4de74914c51dd09d78b23986680bf0b682
|
3e85b84c2d76d986f3081eb6e33b7e400d0a8f05
|
refs/heads/master
| 2022-03-07T08:56:26.803068 | 2022-02-21T15:04:34 | 2022-02-21T15:04:34 | 83,019,212 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import urllib
import json
from urllib import request
city_dict={'Iasi':'675810','Suceava':'665849','Vaslui':'663118','Covasna':'680428'}
city_name=input('Name of City')
AppKEY ="99bc216d5e4421eeb06fbbb0e974427d"
urlManagement = 'http://api.openweathermap.org/data/2.5/forecast/city?id='+city_dict[city_name]+'&APPID='+AppKEY
print(urlManagement)
try:
response = urllib.request.urlopen(urlManagement).read()
print(response)
except Exception as e:
print("Error -> ", e)
|
UTF-8
|
Python
| false | false | 484 |
py
| 10 |
main.py
| 7 | 0.731405 | 0.63843 | 0 | 17 | 27.470588 | 112 |
tmjnow/python-for-everybody-2
| 3,418,793,968,295 |
6d871b4634764b59ce16963da7054ddad6142ea2
|
30023deca629b86add099dfe12d8eedd5ea26223
|
/3-web-data/ch11/11_0.py
|
f017d4405721c670a07a245c025db49af398eb5b
|
[] |
no_license
|
https://github.com/tmjnow/python-for-everybody-2
|
6302d95a693eb347283769d662dd28bb1161fc45
|
2485e1513b2e7a717c15d2e1d53bee7d630dc3d1
|
refs/heads/master
| 2021-06-06T14:05:32.147585 | 2016-05-18T13:10:39 | 2016-05-18T13:10:39 | 105,756,476 | 0 | 1 | null | true | 2017-10-04T10:33:54 | 2017-10-04T10:33:54 | 2016-05-13T03:44:31 | 2016-05-18T13:11:11 | 690 | 0 | 0 | 0 | null | null | null |
# Regular Expressions
'''
Quick Guide
^ Matches the beginning of a line
$ Matches the end of the line
. Matches any character
\s Matches whitespace
\S Matches any non-whitespace character
* Repeats a character zero or more times
*? Repeats a character zero or more times (non-greedy)
+ Repeats a character one or more times
+? Repeats a character one or more times (non-greedy)
[aeiou] Matches a single character in the listed set
[^XYZ] Matches a single character not in the listed set
[a-z0-9] The set of characters can include a range
( Indicates where string extraction is to start
) Indicates where string extraction is to end
'''
# Programming language for matching and parsing strings.
# Very powerful and quite cryptic
import re
# using find()
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if line.find('From:') >= 0:
print line
print '\n\n'
# using re.search()
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if re.search('From:', line):
print line
print '\n\n'
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if re.search('^From:', line): # equivalent to line.startswith('From:')
print line
# wild-card characters
#
# ^ match the start of the line
# X capital X
# . match any character
# * any character zero or more times
# : colon
#
# ^X.*:
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if re.search('^X.*:', line):
print line
print '\n\n'
# fine-tuning match
#
# ^ match the start of the line
# X capital X
# - hyphen
# \S any non-whitespace character
# + one or more times
# : colon
#
# ^X-\S+:
# re.findall()
x = 'My 2 favorite numbers are 19 and 42'
y = re.findall('[0-9]+', x)
print y
y = re.findall('[AEIOU]+', x)
print y
print '\n\n'
# Greedy and Non-Greedy Matching
x = 'From: Using the : character'
y = re.findall('^F.+:', x) # Greedy - take the larger - 'From: Using the :'
print y
y = re.findall('^F.+?:', x) # Non-Greedy - take the smaller - 'From:'
print y
print '\n\n'
# Sample line from mbox-short.txt
x = 'From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'
# Get email addresses
y = re.findall('\S+@\S+', x)
print y
print '\n\n'
# Get email addresses only from lines starting with 'From:'
y = re.findall('^From (\S+@\S+)', x)
print y
print '\n\n'
# Get the domain name
lin = 'From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'
y = re.findall('^From .*@([^ ]*)', lin)
print y
print '\n\n'
# Spam Confidence
hand = open('mbox-short.txt')
numlist = list()
for line in hand:
line = line.rstrip()
stuff = re.findall('^X-DSPAM-Confidence: ([0-9.]+)', line)
if len(stuff) != 1: continue
num = float(stuff[0])
numlist.append(num)
print 'Maximum:', max(numlist)
print '\n\n'
# Escape Character
x = 'We just received $10.00 for cookies.'
y = re.findall('\$[0-9.]+', x)
print y
print '\n\n'
|
UTF-8
|
Python
| false | false | 3,035 |
py
| 45 |
11_0.py
| 42 | 0.627348 | 0.613509 | 0 | 125 | 23.152 | 76 |
jason-gill00/LibraryManagementSystem
| 10,608,569,242,454 |
b70120d58eff0a478fd9e9c351a7f37a6ea89bb5
|
08ac6dcb06689c49421c4c0edd9bdc84d9ecdcc6
|
/client/CreateAccount.py
|
c17bed35c70885cd3f7ce3acae8090e7d25b34c6
|
[] |
no_license
|
https://github.com/jason-gill00/LibraryManagementSystem
|
4f0041d4e6b9c620a7c5b3ed5d85e2877e872012
|
488510b180a95cdafd5a7a2bed357ae4275e7b41
|
refs/heads/main
| 2023-05-31T13:18:13.140862 | 2021-06-25T19:18:56 | 2021-06-25T19:18:56 | 380,326,179 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from tkinter import *
from tkinter import ttk
from PIL import ImageTk, Image
from tkinter import messagebox
import cx_Oracle
class CreateAccount:
def __init__(self, root):
self.root = root
self.root.title("Create an Account")
self.root.geometry("1350x700+0+0")
self.root.config(bg="white")
#======== BG image ===============
path_to_file = "E:/School/Python/Tkinter/CPSPROJECT/home-libraries-0516-AD-KOTU05-01.jpg"
self.bg=ImageTk.PhotoImage(Image.open(path_to_file))
self.bg_image = Label(self.root, image=self.bg).place(x=0, y=0, relwidth=1, relheight=1)
#========== Left Image =======wood-blue-dark-texture-background-159513483.jpg
path_to_left = "E:/School/Python/Tkinter/CPSPROJECT/bluewood.jpg"
self.left_pic=ImageTk.PhotoImage(Image.open(path_to_left))
left = Label(self.root, image=self.left_pic).place(x=135, y=100, width=400, height=500)
#========== Register Freame =======
frame1 = Frame(self.root, bg="white")
frame1.place(x=535, y=100, width=700, height=500)
title = Label(frame1, text = "REGISTER HERE", font=("times new roman", 20, "bold"), bg="white", fg="green").place(x=50, y=30)
sign_in_btn = Button(self.root, text="Sign In", fg="white", bg="#0B547C", font=("Arial", 15, "bold"), command = self.sign_in).place(x=200, y=500, width=250, height=40)
first_name_lbl = Label(frame1, text="First Name", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=50, y=100)
self.first_name = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.first_name.place(x=50, y=130, width=250)
last_name_lbl = Label(frame1, text="Last Name", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=370, y=100)
self.last_name = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.last_name.place(x=370, y=130, width=250)
street_lbl = Label(frame1, text="Street Name", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=50, y=170)
self.street = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.street.place(x=50, y=200, width=250)
city_lbl = Label(frame1, text="City", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=370, y=170)
self.city = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.city.place(x=370, y=200, width=250)
province_lbl = Label(frame1, text="Province", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=50, y=230)
self.province = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.province.place(x=50, y=260, width=250)
country_lbl = Label(frame1, text="Country", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=370, y=230)
self.country = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.country.place(x=370, y=260, width=250)
phonenumber_lbl = Label(frame1, text="Phone Number", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=50, y=290)
self.phonenumber = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.phonenumber.place(x=50, y=320, width=250)
birthdate_lbl = Label(frame1, text="Birthdate", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=370, y=290)
self.birthdate = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.birthdate.place(x=370, y=320, width=250)
email_lbl = Label(frame1, text="Email", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=50, y=350)
self.email = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.email.place(x=50, y=380, width=250)
password_lbl = Label(frame1, text="Password", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=370, y=350)
self.password = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.password.place(x=370, y=380, width=250)
create_account_btn= Button(frame1, text="Create Account",fg="white", bg="#0B547C", font=("Arial", 15, "bold"), command = self.create_account).place(x=200, y=440, width = 250, height=40)
def create_account(self):
print(self.first_name.get())
insert_query = """
INSERT INTO PERSON
VALUES('{}','{}',{},'{}','{}','{}','{}','{}','{}')
""".format(self.birthdate.get(), self.email.get(), self.phonenumber.get(), self.first_name.get(), self.last_name.get(), self.province.get(), self.city.get(), self.country.get(), self.street.get())
print(insert_query)
dsnStr = cx_Oracle.makedsn("oracle.scs.ryerson.ca", "1521", "orcl")
db = cx_Oracle.connect(user="j65gill", password="09260785", dsn=dsnStr)
cursor = db.cursor()
cursor.execute(insert_query)
db.commit()
myresult = cursor.fetchall()
for x in myresult:
print(x)
cursor.close()
# INSERT ALL
# INTO person
# VALUES('01-FEB-1987','sean12@gmail.com',647123456,'sean','beasley','M1W1J5','Ontario','Toronto','Canada',123,'Jane St.')
# dsn_str = cx_Oracle.makedsn("oracle.scs.ryerson.ca", "1521", "orcl")
# db = cx_Oracle.connect(user="j65gill", password="09260785", dsn=dsnStr)
def sign_in(self):
self.root.destroy()
import library
root = Tk()
obj = CreateAccount(root)
root.mainloop()
|
UTF-8
|
Python
| false | false | 5,561 |
py
| 10 |
CreateAccount.py
| 9 | 0.607804 | 0.545046 | 0 | 118 | 46.127119 | 204 |
altlinuxteam/domain-test
| 7,164,005,457,537 |
b273fad824a3573822b30a9837f1639a5b7bf960
|
3c0236baa0457a1efded1ee1d8cac29f04fdecb0
|
/util/diag/diag.py
|
d20d1c935fcfa3d5c7683d93681eb959538fa960
|
[] |
no_license
|
https://github.com/altlinuxteam/domain-test
|
7f09439a6e21a96c0bf35ebf588fac730a95b1fa
|
129f0437f84d74dff72fb74b95df198092d2006b
|
refs/heads/master
| 2021-01-08T00:46:14.141024 | 2020-05-10T08:59:48 | 2020-05-10T08:59:48 | 241,865,245 | 0 | 0 | null | false | 2020-04-13T10:01:39 | 2020-02-20T11:17:43 | 2020-03-03T10:55:46 | 2020-04-13T10:01:38 | 37 | 0 | 0 | 0 |
Python
| false | false |
#
# samba-itest - utility to diagnose domain controller problems
#
# Copyright (C) 2019-2020 BaseALT Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .config import read_config
from .import_wrapper import try_import
from .report.report import Report
class Diag:
def __init__(self, diag_config, report):
self.config = read_config(diag_config)
self.type = 'controller'
if 'type' in self.config:
self.type = self.config['type']
self.report = Report(report, self.type)
def run(self):
self.check_packages()
self.check_files()
self.check_systemd()
self.check_samba()
self.check_ports()
self.report.dump()
def check_packages(self):
mod_rpm = try_import('rpm')
if mod_rpm:
from diag.checker.rpm import (
is_rpm_installed,
rpm_files_correct
)
if 'packages' in self.config:
for pkg in self.config['packages']:
if is_rpm_installed(pkg):
self.report.add('package', pkg, rpm_files_correct(pkg))
def check_files(self):
from diag.checker.files import file_present
if 'files_present' in self.config:
for fpath in self.config['files_present']:
self.report.add('file', fpath, file_present(fpath))
def check_systemd(self):
mod_dbus = try_import('dbus')
if mod_dbus:
import diag.checker.systemd
def check_samba(self):
mod_samba = try_import('samba')
if mod_samba:
import diag.checker.samba
def check_ports(self):
from diag.checker.network import is_port_opened
if 'localhost_ports_opened' in self.config:
for port_struct in self.config['localhost_ports_opened']:
descr = port_struct['description']
port = port_struct['port']
if is_port_opened(port):
self.report.add('port', '{} port ({}) is opened'.format(descr, port), 'true')
print('{} port ({}) is opened'.format(descr, port))
else:
self.report.add('port', '{} port ({}) is opened'.format(descr, port), 'false')
print('{} port ({}) is closed'.format(descr, port))
|
UTF-8
|
Python
| false | false | 2,942 |
py
| 32 |
diag.py
| 6 | 0.600612 | 0.597553 | 0 | 83 | 34.433735 | 98 |
wangjcStrive/FlaskDemo
| 16,226,386,463,248 |
26a8678ded14f90df7d52f9734d0b379c9e15a54
|
48f8a13f5c4fbddaa21729d31692b03b9018b106
|
/watchlist/DataBase/DailyTaskDB.py
|
f6045ff7ff1da1dcd0ccadcf79bc22b759fd2d64
|
[] |
no_license
|
https://github.com/wangjcStrive/FlaskDemo
|
082dd0974d1926f8cb74b7d558d0fc552ed4bf86
|
ff8ca0dfe34081a3c946c283d51ae1462b46b8bc
|
refs/heads/master
| 2022-12-01T18:08:53.643289 | 2020-08-17T03:03:31 | 2020-08-17T03:03:31 | 280,406,761 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pyodbc
conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=JICWANG1-E7470\MYSQL;DATABASE=DailyTask;UID=sa;PWD=LMwjc7922!')
cursor = conn.cursor()
|
UTF-8
|
Python
| false | false | 172 |
py
| 8 |
DailyTaskDB.py
| 5 | 0.773256 | 0.709302 | 0 | 4 | 41.75 | 132 |
Aasthaengg/IBMdataset
| 14,482,629,761,626 |
383c91c589d612341b65e7bc394dab9da8101a53
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03393/s309528444.py
|
b7a857f0558b7ba41983e68539a43c68b3e8f418
|
[] |
no_license
|
https://github.com/Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from string import ascii_lowercase
def main():
word = input()
if len(word) < 26:
count = {}
for a in ascii_lowercase:
count[a] = 0
for w in word:
count[w] += 1
count_l = list(count.items())
count_l.sort(key=lambda x: x[0])
add = ""
for i in range(26):
if count_l[i][1] == 0:
add = count_l[i][0]
break
print(word + add)
elif word == ascii_lowercase[::-1]:
print(-1)
else:
is_printed = False
while len(word) > 0 and not is_printed:
for c in ascii_lowercase[ascii_lowercase.index(word[-1]) + 1:]:
if c not in word[:-1]:
print(word[:-1] + c)
is_printed = True
break
else:
word = word[:-1]
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 916 |
py
| 202,060 |
s309528444.py
| 202,055 | 0.427948 | 0.408297 | 0 | 35 | 25.142857 | 75 |
nadsi96/Alba_Manager
| 17,257,178,601,782 |
1223d79c941658814a990b3fa1837a843292a5a5
|
64fe153ae1adba2be340c7148c45ed5ee63bc2be
|
/settings_frame/manage_wage/manage_payroll.py
|
7271969e6e06faa63ad7e20883ef813aa1f555d6
|
[] |
no_license
|
https://github.com/nadsi96/Alba_Manager
|
c7b2a71890041cee803ea8d2f3a0847c50329b77
|
befde731582924e8e600b2f46fd80e0b9b97d83f
|
refs/heads/master
| 2023-08-14T18:34:58.581780 | 2021-10-17T08:26:40 | 2021-10-17T08:26:40 | 329,536,738 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from tkinter import *
from UI import btn_menu
from settings_frame.manage_wage.payroll_book import payroll_book_query
from settings_frame.manage_wage.analysis import analysis_wage
class Manage_Payroll(btn_menu.Btn_Menu):
def __init__(self, parent):
super().__init__(parent, "Manage_Payroll")
self.open_command_list = [self.open_payroll_book_query, self.open_analysis]
self.set_Window()
def set_Window(self):
self.set_Buttons()
def set_Buttons(self):
self.btn_list = []
self.btn_list.append(super().new_Button(text="급여 장부 조회"))
self.btn_list.append(super().new_Button(text="뭐할까.."))
for idx, btn in enumerate(self.btn_list):
super().btn_pack(btn)
btn.config(command=self.open_command_list[idx])
def open_analysis(self):
print("open_analysis")
analysis_wage.Analysis_Wage(self.window)
return
def open_payroll_book_query(self):
print("open_payroll_book_query")
_pbq = payroll_book_query.Payroll_Book_Query(self.window)
return
|
UTF-8
|
Python
| false | false | 1,143 |
py
| 52 |
manage_payroll.py
| 51 | 0.620444 | 0.620444 | 0 | 34 | 32.117647 | 83 |
KSARATH22/project
| 10,428,180,622,575 |
dfe89448ae440e3869c167413113b25fcf144c9b
|
501a3a1628493eceabe76ad5e9910d87d04c3fee
|
/hospital2project/hospital2app/migrations/0001_initial.py
|
1ce342c861a641ea243b9833dc4da188e43e7817
|
[] |
no_license
|
https://github.com/KSARATH22/project
|
6d2ff886c4fe6a1bc49c962e0b2556fc7fc26001
|
4cdce50f21e8282bb20053b35e2ed874267b0541
|
refs/heads/master
| 2020-05-15T22:38:13.891842 | 2019-04-21T12:34:11 | 2019-04-21T12:34:11 | 182,530,821 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 2.2 on 2019-04-21 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContactData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=50)),
('mobile', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Doctors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=300)),
('last_name', models.CharField(max_length=300)),
('email', models.EmailField(max_length=200)),
('mobile', models.BigIntegerField()),
('addres', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='FeedbackData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('rating', models.IntegerField()),
('time', models.DateField()),
('feedback', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Messages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('datetime', models.DateField()),
('message', models.CharField(max_length=200)),
('rating', models.IntegerField()),
],
),
migrations.CreateModel(
name='Nurses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=300)),
('last_name', models.CharField(max_length=300)),
('email', models.EmailField(max_length=200)),
('mobile', models.BigIntegerField()),
('addres', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='Patients',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=300)),
('last_name', models.CharField(max_length=300)),
('pat_age', models.IntegerField()),
('pat_gender', models.CharField(max_length=200)),
('mobile', models.BigIntegerField()),
('addres', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='Reports',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('case', models.CharField(max_length=200)),
('lab_attendant', models.CharField(max_length=300)),
('description', models.CharField(max_length=200)),
],
),
]
|
UTF-8
|
Python
| false | false | 3,634 |
py
| 10 |
0001_initial.py
| 6 | 0.521464 | 0.50055 | 0 | 87 | 40.770115 | 114 |
chepe4pi/rest-app-example
| 13,460,427,530,516 |
21c05154db1c3eb4e03baffdc3a33a0d39ccdea1
|
95a3237c467e2ba63390cdcf7b0c0e0fc488ee05
|
/interview/urls.py
|
65b6468e70fc3cfef64b9fc6cc322fac2b3ba19d
|
[] |
no_license
|
https://github.com/chepe4pi/rest-app-example
|
672484b1f8acda232202a6bab29912179dc499f1
|
2825d9ab117f7237ed38b47d8cfa3f99cb261fc2
|
refs/heads/master
| 2021-07-15T01:51:17.894159 | 2017-10-17T08:15:32 | 2017-10-17T08:15:32 | 107,237,066 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import url
from django.contrib import admin
from int_order.views import OrderViewSet
from int_user_info.views import ScoresViewSet
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
urlpatterns_order = [
url('^api/orders/(?P<pk>\d+)/$', OrderViewSet.as_view({'patch': 'partial_update', 'get': 'retrieve'})),
url('^api/orders/$', OrderViewSet.as_view({'get': 'list'}))
]
urlpatterns_user_info = [
url('^api/scores/$', ScoresViewSet.as_view({'get': 'list'}))
]
urlpatterns_index = [
url(r'^$', TemplateView.as_view(template_name='index.html')),
]
urlpatterns += urlpatterns_order
urlpatterns += urlpatterns_index
urlpatterns += urlpatterns_user_info
|
UTF-8
|
Python
| false | false | 742 |
py
| 15 |
urls.py
| 13 | 0.698113 | 0.698113 | 0 | 26 | 27.538462 | 107 |
simbha/GAE-appswell
| 19,189,913,915,192 |
3e31d5fe0502ab01d3cc0c96bd4d3eb1521dd916
|
bb3df8c53dd2c954a2f536dcdf31963732a2a175
|
/appspot/project/controllers/demo_controller.py
|
5a32f1a11d5ec0abe5a6b94d93a8a8fec8ae6605
|
[
"MIT"
] |
permissive
|
https://github.com/simbha/GAE-appswell
|
a9081d55818e825a2d4d4ff731cab72478e5ba1c
|
3090df26624a31c0ad4a5a80cbba80a864a8565b
|
refs/heads/master
| 2020-05-19T17:10:55.889402 | 2013-10-04T04:01:53 | 2013-10-04T04:01:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Demo Controller
NOTES
self.Html is a helper class for generating output
"""
#
# IMPORTS
#
# Python Standard Library
import sys, os, logging, inspect
from random import randint, sample
from pprint import pformat
from decimal import Decimal
from os.path import dirname, join as osjoin
from datetime import datetime
from cgi import escape
# App Engine Imports
from google.appengine.api import users
from google.appengine.ext.webapp.util import login_required
# Appswell
from config import core as c
from framework.lib.base_controller import BaseController
from framework.lib.gatekeeper import Gatekeeper
from framework.lib import multicache as memcache
#from framework.vendor.appengine_utilities import sessions
from framework.vendor.recaptcha.client import captcha
from project.models.simple_form import AppswellSimpleForm, AppswellSimpleModelForm
from project.models.simple_log import AppswellSimpleLog, AppswellSimpleLogModelForm
from project.vendor.klenwell import demo
#
# MODULE ATTRIBUTES
#
#
# CONTROLLER CLASS
#
class DemoController(BaseController):
name = 'DemoController'
layout = 'default'
auto_render = True
# helper objects
Gatekeeper = Gatekeeper()
def home(self):
self.t['head_content'] += self.Html.css_link('/css/home.css')
self.template_type = 'django'
self.render('home', 'home')
def index(self):
self.t['head_content'] += self.Html.css_link('/css/demo.css')
self.set('header', 'demo index')
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('data', 'pick an action from the menu at right' )
self.template_type = 'django'
#self.render('index') <- auto-renders
def changelog(self):
self.redirect('/presents/changelog')
def framework(self):
"""
Dump information related to framework or environment
"""
subaction = self.params and self.params[0] or 'index'
subheader = 'details'
subcontent = ''
pref = '<pre>%s</pre>'
# subactions
if subaction == 'version':
version = 'available only in development server'
if c.IS_DEV_SERVER:
from google.appengine.tools import appcfg
version = appcfg.GetVersionObject()
subheader = "google app engine version (<tt>appcfg.GetVersionObject()</tt>)"
subcontent = pref % (pformat(version))
elif subaction == 'routemap':
subheader = 'ROUTE_MAP from config'
subcontent = pref % (pformat(c.ROUTE_MAP))
elif subaction == 'environment':
environ_data = {}
try:
for k in os.environ:
environ_data[k] = os.environ[k]
except Exception, e:
logging.error(e)
environ_data = { 'error': e }
subheader = 'os.environ'
subcontent = pref % (pformat(environ_data))
elif subaction == 'controller':
subheader = 'appswell controller object __dict__'
subcontent = pref % (pformat(self._get_controller_dict()))
elif subaction == 'config':
subheader = 'from config import core as c'
k1 = "c.IS_DEV_SERVER"
k2 = "c.SDK_VERSION"
k3 = "c.DEMO"
k4 = "c.acme.user"
k5 = "c.acme.password"
data = {
k1 : c.IS_DEV_SERVER,
k2 : c.SDK_VERSION,
k3 : c.DEMO,
k4 : c.acme.user,
k5 : c.acme.password
}
subcontent = '<pre>%s</pre>' % (pformat(data))
else: # index
subheader = "framework index"
subcontent = "choose an option above"
submenu = """
<ul>
<li><a href="/demo/framework">index</a></li>
<li><a href="/demo/framework/version">version</a></li>
<li><a href="/demo/framework/environment">environment</a></li>
<li><a href="/demo/framework/config">config</a></li>
<li><a href="/demo/framework/routemap">routemap</a></li>
<li><a href="/demo/framework/controller">controller object</a></li>
</ul>
"""
contentf = """
<h3>framework information</h3>
<p>select an option below to display information on framework</p>
%s
<br />
<h4>%s</h4>
%s
"""
# output
content = contentf % (submenu, subheader, subcontent)
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('headline', 'framework')
self.set('content', content)
self.render('content')
def templates(self):
"""
Demo mako and django templates
"""
subaction = self.params and self.params[0] or 'index'
subaction2 = len(self.params) > 1 and self.params[1] or None
subheader = 'details'
subcontent = ''
pref = '<pre>%s</pre>'
# subactions
if subaction == 'mako':
self.template_type = 'mako'
if subaction2 == 'auto':
explanation = "This examples uses the mako template, as \
integrated within the appswell framework."
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('headline', 'mako auto-rendering test')
self.set('explanation', explanation)
# these are all equivalent
#return self.render('test')
#return self.render('test', 'layouts')
return self.render('test', '/layouts')
else: # mako template
app_root = os.path.dirname(os.path.dirname(__file__))
from framework.vendor.mako.template import Template
from framework.vendor.mako.lookup import TemplateLookup
view_dict = {
'__flash__' : '',
'head_content' : self.Html.css_link('/css/demo.css'),
'menu' : self.Gatekeeper.get_controller_menu(self),
'headline' : 'mako test',
'explanation' : "This is an example that makes explicit \
use of the mako template, outside any special \
integration done by the appswell framework."
}
# set layout to empty and auto_render to false so that dispatch
# will not attempt to automagically render path and we can
# print output below
self.layout = None
self.auto_render = False
# manually set and render mako template
view_path = osjoin(app_root, 'views/demo/test.mako')
layout_path = osjoin(app_root, 'views/layouts')
MakoLookup = TemplateLookup( directories=[layout_path] )
MakoTpl = Template(filename=view_path, lookup=MakoLookup)
output = MakoTpl.render(**view_dict)
return self.write(output)
elif subaction == 'django':
content = """
<p>this examples uses the default django templating</p>
<a href="/demo/templates">return to templates</a>
"""
self.set('header', 'django example')
self.set('subheader', '')
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('data', content)
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.template_type = 'django'
return self.render('index')
else:
subheader = "templates index"
subcontent = "choose an option above"
submenu = """
<ul>
<li><a href="/demo/templates">index</a></li>
<li><a href="/demo/templates/mako/template">mako template</a></li>
<li><a href="/demo/templates/mako/auto">mako auto</a></li>
<li><a href="/demo/templates/django">django</a></li>
</ul>
"""
contentf = """
<h3>templating samples</h3>
<p>select an option below to demo a template</p>
%s
<br />
<h4>%s</h4>
%s
"""
# output
content = contentf % (submenu, subheader, subcontent)
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('headline', 'templates')
self.set('content', content)
self.render('content')
def ajax(self):
"""makes an ajax request in the template to the services backend"""
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.render('ajax')
def atom(self):
subaction = self.params and self.params[0] or 'index'
subheader = 'details'
subcontent = ''
pref = '<pre>%s</pre>'
# subactions
if subaction == 'builder':
return self._atom_builder();
else:
import cgi
import framework.vendor.google_api as google_api
from gdata.service import GDataService
client = GDataService()
feed_url = 'http://code.google.com/feeds/p/appswell/hgchanges/basic'
feed = client.Get(feed_url, converter=None)
#logging.info(dir(feed))
data = {
'feed' : feed.__dict__,
'first entry' : feed.entry[0].__dict__
}
subheader = 'atom consumer'
subcontent_t = """
<h4>sample feed data</h4>
<table class="data">
<tr>
<td class="label">feed.GetSelfLink().href</td>
<td class="value">%s</td>
</tr>
<tr>
<td class="label">feed.title.text</td>
<td class="value">%s</td>
</tr>
<tr>
<td class="label">feed.entry[0].title.text</td>
<td class="value">%s</td>
</tr>
<tr>
<td class="label">feed.entry[0].content.text</td>
<td class="value">%s</td>
</tr>
<tr>
<td class="label">feed.entry[0].updated.text</td>
<td class="value">%s</td>
</tr>
</table>
<br />
<h4>feed object</h4>
<pre>%s</pre>
"""
subcontent = subcontent_t % ( feed.GetSelfLink().href,
feed.title.text,
self._ascii(feed.entry[0].title.text),
self._ascii(feed.entry[0].content.text),
feed.entry[0].updated.text,
cgi.escape(pformat(data, indent=2)) )
submenu = """
<ul>
<li><a href="/demo/atom/consumer">consumer</a></li>
<li><a href="/demo/atom/builder">builder</a></li>
</ul>
"""
contentf = """
<h5>
powered by
<a href="http://code.google.com/p/gdata-python-client/">google data library</a>
</h5>
<p>select an option below</p>
%s
<br />
<h3>%s</h3>
%s
"""
# output
content = contentf % (submenu, subheader, subcontent)
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('headline', 'atom')
self.set('content', content)
self.render('content')
def _atom_builder(self):
import framework.vendor.google_api
import atom
feedauthor = atom.Author(name = atom.Name(text='klenwell@gmail.com'))
feedtitle = atom.Title(text = "Sample Atom Feed")
feedlink = atom.Link(href = "http://appswell.appspot.com/demo/atom_builder")
feedid = atom.Id(text="urn:uuid:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
#time = datetime.datetime.now().isoformat()
feedupdated = atom.Updated("2010-01-27T12:00:00Z")
entries = []
e_title = atom.Title(text="A Sample Atom Feed")
e_link = atom.Link(href= "/demo/atom")
e_id = atom.Id(text="urn:uuid:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeef")
e_updated = atom.Updated("2010-01-27T12:00:00Z")
e_summary = atom.Summary(text="A sample feed entry. Click title to return to demo atom menu.")
entries.append( atom.Entry(title=e_title, link=e_link, atom_id=e_id, summary=e_summary))
feed = atom.Feed(entry=entries, title=feedtitle, link=feedlink, atom_id=feedid, updated=feedupdated)
self.set_header("Content-Type", "application/atom+xml")
self.write(str(feed))
def hello_world(self):
self.t['head_content'] += self.Html.css_link('/css/demo.css')
self.set('header', 'hello world')
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('data', 'a simple hello world example: also available as <a href="/hello">/hello</a>')
self.template_type = 'django'
self.render('index', 'default')
def __sessions(self):
"""
TODO(klenwell): re-enable when session replaced
Session module part of gaeutilities package:
http://code.google.com/p/gaeutilities/
"""
Session = sessions.Session()
if self.has_param('stop', 1):
Session.delete_item('test_start')
# session not yet started
if not 'test_start' in Session:
Session['test_start'] = datetime.now()
session_age = 'new session started at %s' % (Session['test_start'])
# session exists
else:
session_age = datetime.now() - Session['test_start']
hm = 'session age is %s' % (session_age)
Data = {
'session age' : str(session_age),
'stop session' : self.Html.link('/demo/sessions/stop', 'click here'),
'source' : self.Html.link(
'http://code.google.com/p/gaeutilities/source/browse/trunk/appengine_utilities/sessions.py',
'click here', True),
'session object': Session.__dict__
}
self.t['head_content'] += self.Html.css_link('/css/demo.css')
self.set('header', 'session demo')
self.set('subheader', '<h5>powered by %s</h5><br />' % \
( self.Html.link('http://code.google.com/p/gaeutilities/', \
'gaeutilities', True) ))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('data', '<pre>%s</pre>' % pformat(Data))
self.template_type = 'django'
self.render('index', 'default')
def sitemap(self):
"""TO DO: dynamic generation"""
url_list = """\
http://appswell.appspot.com/
http://appswell.appspot.com/demo
"""
# prepare output
self.layout = None
self.auto_render = False
self.template_type = 'output'
self.set_header("Content-Type", "text/plain")
#self.output = str(feed)
self.write(str(url_list))
def vendor_test(self):
from project.vendor.klenwell.demo import VendorDemo
# test
VendorTest = VendorDemo()
Data = {
'VendorTest.is_loaded' : VendorTest.is_loaded == True and 'success' or 'failure',
'VendorTest.test()' : VendorTest.test() == 'success' and 'success' or 'failure'
}
# output
self.t['head_content'] += self.Html.css_link('/css/demo.css')
self.set('header', 'testing vendor import')
self.set('subheader', 'test results')
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('data', '<pre>%s</pre>' % pformat(Data))
self.template_type = 'django'
self.render('index', 'default')
def model(self):
"""
Display last 10 SimpleLog records
"""
num_records = 10
RecentLogs = AppswellSimpleLog.gql('ORDER BY created DESC LIMIT %s' % \
(num_records))
self.t['head_content'] += self.Html.css_link('/css/demo.css')
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('RecentLogs', RecentLogs)
self.set('num_records', num_records)
self.render('model')
#@login_required
# NOTE: this decorator does not work with the appswell dispatcher
def __email(self):
"""
TODO(klenwell): re-enable when session replaced
Allow user to email self. Also demonstrates login requirement.
The login_required decorator above is superfluous as the login
requirement is also set in app.yaml for this action.
google docs:
http://code.google.com/appengine/docs/python/config/appconfig.html#Requiring_Login_or_Administrator_Status
http://code.google.com/appengine/docs/python/users/userclass.html
http://code.google.com/appengine/docs/python/mail/sendingmail.html
http://code.google.com/appengine/docs/python/tools/devserver.html#Using_Mail
"""
email_message_t = """
This message was sent from %s/demo/email.
It was sent as a demonstration of the Google App Engine
email function. The user was required to sign in to his
or her Gmail send this message.
If you did not request this message, we apologize for any
inconvenience. If you believe our service is being abused,
please feel free to contact Tom at klenwell@gmail.com.
"""
feedback = ''
show_form = False
email_message = email_message_t % (os.environ.get('SERVER_NAME'))
Session = sessions.Session()
user = users.get_current_user()
if not user:
self.flash('you must log in with your Google account')
self.redirect('/')
# request email: send
if self.request.POST.get('send_email'):
from google.appengine.api import mail
to_email = user.email()
to_name = user.nickname()
SimpleLog = AppswellSimpleLog()
try:
mail.send_mail(
sender='Appswell Email Demo <klenwell@gmail.com>',
to='%s <%s>' % (to_name, to_email),
subject='Appswell Email Demo',
body=email_message )
feedback = 'Your message has been queued for delivery. Check your Google Account email.'
Session['sent_email'] = True
log_msg = 'sent test email to %s' % (to_name)
SimpleLog.log('email', log_msg, 'system')
except Exception, e:
feedback = 'there was a problem sending the email: %s' % (str(e))
error_msg = 'unable to send test email: %s' % (str(e))
SimpleLog.log('email', error_msg, 'error')
logging.error(error_msg)
# limit possible abuse
elif Session.get('sent_email'):
feedback = 'an email has been sent to your address %s' % (user.email())
# else show form
else:
show_form = True
# output
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('feedback', feedback)
self.set('email_message', email_message)
self.set('user_email', user.email())
self.set('show_form', show_form)
self.render('email')
def multicache(self):
"""
google docs:
http://code.google.com/appengine/docs/python/memcache/functions.html
"""
import config.cache
cache_key = 'memcache_demo'
CacheConfig = config.cache.Memcache.get(cache_key, 'default')
cache_len = CacheConfig.get('duration', 60)
display_t = '<span class="%s">%s</span>: %s';
cache = memcache.get(cache_key)
if cache is not None:
display = display_t % ('hit', 'cache found', cache)
else:
cache = 'cache saved <b>%s</b> (will be saved for %s seconds)' % \
( datetime.now().strftime('%Y-%m-%d %H:%M:%S'), cache_len )
memcache.set(cache_key, cache, cache_len)
display = display_t % ('miss', 'cache not found', \
'saving new cache (reload page to see content)')
# prepare content
content_t = """
<p>
Multicache is a simple wrapper for the Google App Engine's Memcache library that
enables it to store items larger than the 1 MB limit.
</p>
<p>
For additional details on usage, see the
<a href="http://code.google.com/p/appswell/source/browse/appspot/lib/multicache.py?spec=svn116329ce59bd52af14388fedf2cdac7015d67fbe&name=v1s11-branch&r=116329ce59bd52af14388fedf2cdac7015d67fbe">multicache</a>
and
<a href="http://code.google.com/p/appswell/source/browse/appspot/test/unit/test_multicache.py?spec=svn116329ce59bd52af14388fedf2cdac7015d67fbe&name=v1s11-branch&r=116329ce59bd52af14388fedf2cdac7015d67fbe">unit test</a>
source code.
</p>
<div class="cache_demo">
<p>%s</p>
<small>current time: %s</small>
</div>
"""
content = content_t % (display, datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# prepare output
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('headline', 'multicache')
self.set('content', content)
self.render('content')
def recaptcha(self):
"""
Post recaptcha challenge to SimpleLog
ref: http://daily.profeth.de/2008/04/using-recaptcha-with-google-app-engine.html
"""
# recaptcha html
recaptcha_html = captcha.displayhtml(
public_key = c.RECAPTCHA_PUBLIC_KEY,
use_ssl = False,
error = None)
if self.request_type == 'POST':
#SimpleModelForm = AppswellSimpleModelForm(self.Request.POST)
captcha_response = captcha.submit(
self.request.POST.get("recaptcha_challenge_field", None),
self.request.POST.get("recaptcha_response_field", None),
c.RECAPTCHA_PRIVATE_KEY,
self.request.remote_addr )
if not captcha_response.is_valid:
recaptcha_html = captcha.displayhtml(
public_key = c.RECAPTCHA_PUBLIC_KEY,
use_ssl = False,
error = captcha_response.error_code)
self.flash('recaptcha failed: %s' % (captcha_response.error_code))
#elif SimpleModelForm.is_valid():
elif captcha_response.is_valid:
SimpleLog = AppswellSimpleLog()
SimpleLog.log('demo',
'recaptcha: %s' % (self.request.POST['recaptcha_response_field']),
'system')
logging.info('successful recaptcha: %s' % self.request.POST['recaptcha_response_field'])
self.flash('recaptcha successful: Simplelog update')
return self.redirect('/demo/model')
# render mako view
self.template_type = 'mako'
self.set('recaptcha_html', recaptcha_html)
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.render('recaptcha')
def twitter(self):
"""
Basic twitter API examples using tweepy
see http://joshthecoder.github.com/tweepy/docs/api.html#timeline-methods
"""
warning = ''
UserTimeline = []
lib_home = 'https://github.com/joshthecoder/tweepy'
# set up twitter object
from framework.vendor import tweepy
# set screen name
screen_name = self.params and self.params[0] or c.TWITTER_USER
# dump option
dump = (screen_name == 'dump')
if dump:
screen_name = c.TWITTER_USER
# get recent posts
try:
public_tweets = tweepy.api.user_timeline(screen_name)
except twython.core.TwythonError, e:
logging.error('Twython Error: %s' % str(e))
# prepare output
if public_tweets and dump:
tweet_object = public_tweets[0]
subheader = 'most recent status object dump for %s' % (screen_name)
subcontent = '<pre>%s</pre>' % (pformat(tweet_object.__getstate__()))
elif public_tweets:
lif = '<li><tt>[%s]</tt> %s</li>'
TweetList = []
for tweet in public_tweets:
TweetList.append(lif % (str(tweet.created_at), tweet.text))
subheader = 'user timeline for <a href="%s">%s</a>' % \
('http://twitter.com/klenwell', screen_name)
subcontent = '<ul>%s</ul>' % ('\n'.join(TweetList))
else:
subheader = "unable to retrieve timeline for %s" % (screen_name)
subcontent = 'try <a href="/demo/twitter/klenwell">klenwell</a>'
# output
contentf = """
<h5>
powered by <a href="%s">tweepy</a>
</h5>
<br />
<h4>%s</h4>
%s
"""
content = contentf % (lib_home, subheader, subcontent)
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('headline', 'twitter')
self.set('content', content)
self.render('content')
def simple_form(self):
"""
A simple form demonstrating the interaction of controller, model,
modelform (see simple_form.py in models), and template.
Notice the use of as_table method by modelform object SimpleModelForm
"""
# form submitted
if self.request_type == 'POST':
SimpleModelForm = AppswellSimpleModelForm(self.request.POST)
if SimpleModelForm.is_valid(): # this step technically redundant
if SimpleModelForm.save():
self.flash('link <b>%s</b> saved : thank you!' % \
(SimpleModelForm.cleaned_data.get('url', 'n/a')))
SimpleModelForm = AppswellSimpleModelForm() # resets form
else:
self.flash('there was a problem with the form : see below')
# new form (no submission)
else:
SimpleModelForm = AppswellSimpleModelForm()
# prepare view
DataView = {
'controller': self._get_controller_dict(),
'POST' : self.request.POST.__dict__,
#'self.request' : self.request,
"self.Request.POST.get('url')" : self.request.POST.get('url'),
'is_valid' : SimpleModelForm.is_valid(),
}
self.t['head_content'] += self.Html.css_link('/css/demo.css')
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('data', pformat(DataView))
self.set('simple_form', SimpleModelForm.as_table())
self.set('datastore', pformat(AppswellSimpleForm.find_last_n(5)))
self.template_type = 'django'
self.render('simple_form', 'default')
def testing(self):
# prepare content
source_link = "%s%s" % (
"http://code.google.com/p/appswell/source/browse",
"/appspot/project/test/unit/template.py?name=appswell-core")
content = """
<p>The Appswell package includes a testing harness. It is not available from the
appspot server but can be found locall on the dev_appserver at
<a href="/test">/test</a>.</p>
<p>A sample test can be found in the source code. It is available online here:
<a href="%s">unit test template</a></p>
<br />
<h4>screenshot</h4>
<img src="/img/appswell_tests_screenshot.png" title="unit test screenshot"
alt="unit test screenshot" style="padding:8px;" />
""" % (source_link)
# prepare output
self.set('head_content', self.Html.css_link('/css/demo.css'))
self.set('menu', self.Gatekeeper.get_controller_menu(self))
self.set('headline', 'testing')
self.set('content', content)
self.render('content')
def _get_controller_dict(self):
"""
Return controller's dict after stripping any sensitive information
"""
# collect request attrs
request_attrs = []
for a in dir(self.request):
try:
if a.startswith('_'):
continue
request_attr =(a, escape(str(type(getattr(self.request,a)))))
request_attrs.append(request_attr)
# need to catch some deprecation warnings added in 1.7.5
except DeprecationWarning:
pass
co = self.__dict__.copy()
# set view parameters
co['config'] = {}
co['Request'] = escape(str(type(self.request)))
co['Request attrs'] = request_attrs
co['self.request.POST'] = self.request.POST
co['self.request.GET'] = self.request.GET
co['self.has_param("controller")'] = self.has_param("controller")
co['self.get_param(1)'] = self.get_param(1)
return co
def _ascii(self, text):
return unicode(text, errors='ignore').replace("\0", "") \
.encode('ascii', 'ignore')
|
UTF-8
|
Python
| false | false | 29,198 |
py
| 62 |
demo_controller.py
| 33 | 0.571272 | 0.564593 | 0 | 803 | 35.361146 | 218 |
sergiolucero/ppp
| 13,056,700,599,211 |
920e328189c96ea81622394e12a1574dacf67175
|
5e6871ff97cef787317d9f4b4c47cef83f5b4a27
|
/SCRIPTS/blade.py
|
a08e5a4aa0d4e88a1f5c269690e8160a0b0f2daa
|
[] |
no_license
|
https://github.com/sergiolucero/ppp
|
c47c3f9be93307f5f414712755570180a7e33adc
|
ff32448cd77cc31e2936294c8f5bf9ff731dbce1
|
refs/heads/master
| 2023-04-05T21:31:12.246550 | 2021-05-06T09:40:35 | 2021-05-06T09:40:35 | 339,729,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from lenin import *
for level in ['R','C','L','M']:
k=Klass(f'{level}:Presi')
print(' LEVEL:', level, 'N=', len(k.data))
k.train()
|
UTF-8
|
Python
| false | false | 147 |
py
| 52 |
blade.py
| 23 | 0.517007 | 0.517007 | 0 | 6 | 23.5 | 49 |
emresvm/191216011_yusufemre_sevim_odev1
| 7,456,063,262,510 |
b4ab86d9c86e3658e19c6d24f85babd8353d6b03
|
45513f4dce00589832336be61d2a4a50e67f337e
|
/191216011_yusufemre_sevim_odev1_kod.py.py
|
5ae7d6a374a09548db8e98abb20bbadbfd1888c1
|
[] |
no_license
|
https://github.com/emresvm/191216011_yusufemre_sevim_odev1
|
4be817624251d2bdbd338c1dbb04a42f0b262413
|
e8f189bea29429643888bafe89d10fe4285a28b4
|
refs/heads/master
| 2020-09-09T01:13:36.907149 | 2019-11-12T20:03:36 | 2019-11-12T20:03:36 | 221,299,152 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
başlık = ("***Hesap makinesi***")
print(başlık)
işlemler ="""
1 -Toplama İşlemi \t +
2 -Çıkarma İşlemi \t -
3 -Çarpma İşlemi \t *
4 -Bölme İşlemi \t /
"""
print(işlemler)
while True:
soru = input("Lütfen yapmak istediğiniz işlem numarasını giriniz (Uygulamadan çıkmak için x'e basınız ..)\t :")
if soru == "x":
print("Uygulamadan çıkılıyor ... ")
break
elif soru =="1":
s1=input("Birinci sayıyı giriniz :\t")
s2=input("İkinci sayıyı giriniz :\t")
try:
s_1=int(s1)
s_2=int(s2)
print( s_1 + s_2 )
except ValueError:
print("Lütfen sadece sayı giriniz ! ")
elif soru == "2":
c1 = input("Birinci sayıyı giriniz :\t")
c2 = input("İkinci sayıyı giriniz :\t")
try:
c_1=int(c1)
c_2=int(c2)
print( c_1 - c_2 )
except ValueError:
print("Lütfen sadece sayı giriniz ! ")
elif soru == "3":
b1 = input("Birinci sayıyı giriniz :\t")
b2 = input("İkinci sayıyı giriniz :\t")
try:
b_1=int(b1)
b_2=int(b2)
print(b_1 * b_2)
except ValueError:
print("Lütfen sayı giriniz : ")
elif soru == "4":
d1 = input("Birinci sayıyı giriniz :\t")
d2 = input("İkinci sayıyı giriniz :\t")
try:
d_1=int(d1)
d_2=int(d2)
print(d_1 / d_2)
except ValueError:
print("Lütfen sayı giriniz : ")
except ZeroDivisionError:
print("Bölme işlemlerinde sıfır kullanılmaz !")
|
UTF-8
|
Python
| false | false | 1,692 |
py
| 1 |
191216011_yusufemre_sevim_odev1_kod.py.py
| 1 | 0.506454 | 0.481868 | 0 | 57 | 27.54386 | 115 |
oliabhi/Machine-Learning
| 7,533,372,648,079 |
7576cd7ab5691eccef6c7c0c8ab49e2da006af74
|
841791c9a0afbe56a93e5e5f89ab770f3f4e3901
|
/Boosting/XGBRegressor.py
|
d59ff475a6f16987df3867a760cfa9cb4218ede5
|
[] |
no_license
|
https://github.com/oliabhi/Machine-Learning
|
75f9956ed3060193f51275454bdeead43f607032
|
49d018c7f1d7f78a3089e59793c42a337fc448d3
|
refs/heads/main
| 2023-07-12T06:05:15.753097 | 2021-08-15T03:36:37 | 2021-08-15T03:36:37 | 388,693,379 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
import numpy as np
df = pd.read_csv("G:/Statistics (Python)/Cases/Real Estate/Housing.csv")
dum_df = pd.get_dummies(df.iloc[:,1:11], drop_first=True)
from sklearn.model_selection import train_test_split
X = dum_df
y = df.iloc[:,0]
# Create training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3,
random_state=2018)
from xgboost import XGBRegressor
clf = XGBRegressor()
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
#def mean_absolute_percentage_error(y_true, y_pred):
# y_true, y_pred = np.array(y_true), np.array(y_pred)
# return np.mean(np.abs((y_true - y_pred) / y_true))
#
#print(mean_absolute_percentage_error(y_test,y_pred))
from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score
print(mean_squared_error(y_test, y_pred))
print(mean_absolute_error(y_test, y_pred))
print(r2_score(y_test, y_pred))
################ Tunning XG Boost ##################################
lr_range = [0.001, 0.01, 0.1, 0.2,0.25, 0.3]
n_est_range = [10,20,30,50,100]
md_range = [2,4,6,8,10]
parameters = dict(learning_rate=lr_range,
n_estimators=n_est_range,
max_depth=md_range)
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
kfold = KFold(n_splits=5, random_state=42,shuffle=True)
clf = XGBRegressor(random_state=1211)
cv = GridSearchCV(clf, param_grid=parameters,
cv=kfold,scoring='r2')
cv.fit(X,y)
print(cv.best_params_)
print(cv.best_score_)
|
UTF-8
|
Python
| false | false | 1,647 |
py
| 48 |
XGBRegressor.py
| 43 | 0.625379 | 0.5932 | 0 | 54 | 28.5 | 75 |
evaldobratti/played_with_me
| 7,395,933,693,447 |
c83812feb132f101226065770bf28674dbf4205a
|
e251626c54cbd44f3b087b639d038303cb3a165f
|
/web_app/tasks.py
|
57f81106fdd79eff92953ca5bf6253f84eaccac4
|
[] |
no_license
|
https://github.com/evaldobratti/played_with_me
|
635dbd2333d8a7f08d7eeed4ebb1018af597d650
|
d29e73c611354ab1ccf53b68f4ad5bda35d80ee9
|
refs/heads/master
| 2020-05-29T20:15:38.030603 | 2015-06-08T00:04:48 | 2015-06-08T00:04:48 | 34,880,714 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from models import *
from huey.djhuey import crontab, periodic_task, db_task
import logging
log = logging.getLogger('pwm_logger')
@db_task()
def download_games(account_id):
log.info("requisitando download de " + str(account_id))
last_match_id = None
while True:
try:
log.info("acc: {} last match: {}".format(account_id, last_match_id or 'started'))
matches = get_until_success(lambda: dota_api.get_match_history(account_id,
start_at_match_id=last_match_id))
log.info("acc: {} results remaining: {}".format(account_id, matches.results_remaining))
if matches.results_remaining <= 0:
log.info("acc: {} finished parsing".format(account_id))
return
log.info("acc {} parse matches: {}".format(account_id, [m.match_id for m in matches.matches]))
for match in matches.matches:
with transaction.atomic():
get_details_match(match.match_id)
last_match_id = match.match_id
log.info("acc: {} parsed: {}".format(account_id, last_match_id))
except Exception, e:
log.exception(e)
|
UTF-8
|
Python
| false | false | 1,256 |
py
| 21 |
tasks.py
| 11 | 0.562102 | 0.561306 | 0 | 30 | 40.866667 | 108 |
pspratling/Speech-Recognition
| 6,116,033,430,712 |
ae2b6ecfaf6ee67c27bc1781f10318c9cfc7e6ab
|
83636925813a056eef13969101d06f822093e489
|
/speech_recognition/data/get_silence_data.py
|
a7595f7c8ec3498007b9ee005b6742a7c4bb7691
|
[
"MIT"
] |
permissive
|
https://github.com/pspratling/Speech-Recognition
|
dffec30394dcb26d940b9a6563fda231e82f60b2
|
9712a2e6c6e1574d79fbbaa99b37eb3131848019
|
refs/heads/master
| 2020-03-26T00:38:02.750577 | 2018-10-14T20:59:55 | 2018-10-14T20:59:55 | 144,329,114 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import numpy as np
import librosa
#all clips are sampled with samplerate=16000
SAMPLERATE = 16000
def extract_clip(filename, duration, offset):
"""
Extracts 1 second clip from a given file
filename: background noise filename to extract from
duration: length of the background noise audio clip
offset: where in the clip to start extracting from
returns mfcc
"""
freq, _ = librosa.load(filename, offset=offset, duration=1, sr=None)
mfcc = librosa.feature.mfcc(freq, sr=SAMPLERATE)
return mfcc
def get_silence_samples(filename, num_samples):
"""
Extracts 1 second clips randomly from a given background noise file
filename: background noise filename to extract from
num_samples: desired number of 1 second clips to extract
returns array of extracted samples in mfcc form
"""
duration = librosa.get_duration(filename=filename)
offsets = np.random.uniform(0, duration-1, size=num_samples)
mfcc_list = []
for i in range(num_samples):
mfcc = extract_clip(filename, duration, offsets[i])
mfcc_list.append(mfcc)
return np.array(mfcc_list)
silence_filenames = [file for file in os.listdir('../../data/_background_noise_') if file[-4:]=='.wav']
#looping through background noise clips and saving MFCCs for use in models
for fold, num_samples in zip(['train', 'validation', 'test'], [550, 75, 75]):
print('Extracting silence clips for {}...'.format(fold))
mfcc_list = np.array([]).reshape(0, 20 , 32)
for filename in silence_filenames:
mfcc = get_silence_samples('../../data/_background_noise_/{}'.format(filename),
num_samples=num_samples)
mfcc_list = np.vstack([mfcc_list, mfcc])
os.mkdir('../../data/{}/silence'.format(fold))
target_mfcc_path = '../../data/{}/silence/mfcc.npy'.format(fold)
np.save(target_mfcc_path, mfcc_list)
print('Done!')
|
UTF-8
|
Python
| false | false | 1,965 |
py
| 9 |
get_silence_data.py
| 4 | 0.659542 | 0.644784 | 0 | 56 | 34.107143 | 103 |
lambdamax/storage
| 2,697,239,466,348 |
8cb2224769f72a3763592fca6d7c83338d8230ee
|
b103e401fbc0c7eb35257cb555c1e16831ac481c
|
/migrations/versions/b041e63868e3_init03.py
|
a9abdded36f9e118d95409bf47daa941812fc805
|
[] |
no_license
|
https://github.com/lambdamax/storage
|
581f4899735f169e4bd07aa7499737b3988b2229
|
2bfa0f48e9e56715dc4387b4c971e1d61a68b2b6
|
refs/heads/master
| 2023-09-01T09:31:11.804470 | 2017-12-01T15:23:46 | 2017-12-01T15:23:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""'init03'
Revision ID: b041e63868e3
Revises: bf8c66e884b3
Create Date: 2017-11-09 13:56:23.089000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b041e63868e3'
down_revision = 'bf8c66e884b3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('st_history', sa.Column('register_date', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('st_history', 'register_date')
# ### end Alembic commands ###
|
UTF-8
|
Python
| false | false | 670 |
py
| 10 |
b041e63868e3_init03.py
| 6 | 0.683582 | 0.602985 | 0 | 28 | 22.928571 | 89 |
Arkansas-Department-of-Transportation/sirGISTools
| 10,058,813,457,063 |
009b42164b9f48d4ce52b879800b31cc7ac9f45b
|
48e7ff73974770d57c8e7429d0a709cea78833ab
|
/RoadInv_ARNOLD_reconcilation.py
|
312e93b3210c25e06ec3c02ad3a16a15ef530e37
|
[] |
no_license
|
https://github.com/Arkansas-Department-of-Transportation/sirGISTools
|
bdc426ecb8fce56e569fd6e742d1209edf581ddf
|
e59fc00dd812e65dfa71ed5c8af4d4b20cde38bb
|
refs/heads/master
| 2020-12-04T04:22:16.627174 | 2020-01-03T15:19:06 | 2020-01-03T15:19:06 | 231,609,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import arcpy
#input layers
#Definition query down to the County in both the road inventory and the ARNOLD dataset, Handel only one county at a time
#definition query will be handeled on the table layer level
roadInventoryLayer = ""
ARNOLDLayer = ""
outpath = "C:\\scratch\\test.csv"
multiplicationFactor = 10000
roadIdsList = []
#Get all of the road inventory segements for a single section for that county = multiply by 10000 and add together
roadInvDict = {}
with arcpy.da.SearchCursor(roadInventoryLayer, ['AH_roadid', "RoadLength" ]) as roadInvCur:
for segement in roadInvCur:
selectedRoadID = segement[0]
selectedRoadLength = segement[1]
if selectedRoadID in roadInvDict:
roadInvDict[selectedRoadID] = int((selectedRoadLength * multiplicationFactor)) + roadInvDict[selectedRoadID][0]
else:
roadInvDict[selectedRoadID] = int((selectedRoadLength * multiplicationFactor))
roadIdsList.appned(selectedRoadID)
#Get all arnold segments for a single section for that county - multiply by 10000 and add together (multiplication is there to avoid floating point rounding errors
arnoldDict = {}
with arcpy.da.SearchCursor(ARNOLDLayer, ['AH_RoadID', "AH_Length" ]) as arnoldCur:
for segment in arnoldCur:
selectedRoadID = segement[0]
selectedRoadLength = segement[1]
if segement[0] in arnoldDict:
arnoldDict[selectedRoadID] = int((selectedRoadLength * multiplicationFactor)) + arnoldDict[selectedRoadID]
else:
arnoldDict[selectedRoadID] = int((selectedRoadLength * multiplicationFactor))
roadIdsList.appned(selectedRoadID)
roadIdsList = List(Set(roadIdsList))
#Generate an output list with the following fields AH_RoadID, ARNOLD Milage, RoadInventory Milage, Difference, Error
outputList = ["AH_RoadID" , "ARNOLD_Milage" , "RoadInv_Milage", "Milage_Diff" , "Error"]
for roadID in roadIdsList:
#Merge the two lists together according to road ID, divide the road milage according to the multiplication factor to avoid issues with floating point addition
if roadID in arnoldDict:
arnoldMiles = float(arnoldDict[roadID]) / multiplicationFactor
else:
arnoldMiles = - 1
error = "Missing from ARNOLD"
if roadID in roadInvDict:
roadInvMiles = float(roadInvDict[roadID]) / multiplicationFactor
else:
roadInvMiles = - 1
error = "Missing from Road Inventory"
#output table contains the following errors codes
# - Missing from Road Inventory
# - Missing from ARNOLD
# - Road Inventory and Arnold milage do not match
if roadID in arnoldDict and roadID in roadInvDict:
milesDiff = (arnoldDict[roadID] - roadInvDict[roadID]) / multiplicationFactor
if milesDiff == 0:
error = "No Error"
else:
error = "Milages Differs"
outputRow = [roadID, arnoldMiles , roadInvMiles, milesDiff, error]
outputList.append(outputRow)
print "write CSV file"
#open and set the csv file for writting
opener = open(csvPath, "w")
csvWriter = csv.writer(opener, "windcsv")
for row in outputList:
csvWriter.writerow(row)
#close out the csv file
del csvWriter
opener.close()
|
UTF-8
|
Python
| false | false | 3,316 |
py
| 5 |
RoadInv_ARNOLD_reconcilation.py
| 2 | 0.695115 | 0.687877 | 0 | 90 | 35.844444 | 163 |
srgsoroka/PT1
| 9,929,964,409,659 |
bd7ff0386aba9a94ddc386fae1ba5c39018394ca
|
863c1f53e5001466acad95a6f66dacfd3af9ad91
|
/2days/hello19.py
|
b694411d849671acd80112f6bcec11e8016fa3e5
|
[] |
no_license
|
https://github.com/srgsoroka/PT1
|
ab8f0f166e8b9505c2977cf736dde34b46bb9076
|
9987e1e8df011b1a14eca57b4e7269f9106b41d8
|
refs/heads/master
| 2020-07-23T10:20:39.474604 | 2019-09-23T14:23:38 | 2019-09-23T14:23:38 | 207,526,317 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def f(x):
return 2 * x
print(f(2))
double = f
print(double(3))
def execute(f, x):
return f(x)
print(execute(double, 23))
def f():
def g(x):
return 2 * x
return g
double = f()
double
print(double(44))
print(f()(44))
|
UTF-8
|
Python
| false | false | 254 |
py
| 34 |
hello19.py
| 34 | 0.53937 | 0.5 | 0 | 30 | 7.433333 | 26 |
ekg/beats
| 5,660,766,912,743 |
cb52d67d791a07c54134fe6d6eda737f4d4244f7
|
a22c2b25ae78467dfae3642b3133028d4b8736b3
|
/code/xgood.py
|
3e4f1e824b49b8fd74a41bc631876183941a36d6
|
[] |
no_license
|
https://github.com/ekg/beats
|
93462080f266bb03ba7127c550ccc96cbe25fd60
|
d32d3ffb983e6a313093cf64628c8d540119d1ce
|
refs/heads/master
| 2021-06-19T12:34:26.696391 | 2019-10-05T22:23:03 | 2019-10-05T22:23:03 | 137,067,949 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
Scale.default.set('minorPentatonic')
tuning='ET12'
Root.default.set(-2)
d1 >> play("x-o- ").every(3, "stutter", cycle=5)
kk >> keys((P[0:4],2+P[0:4],4+P[0:4]),amp=0.8,oct=3,dur=P[3,1,3,1,1])
|
UTF-8
|
Python
| false | false | 194 |
py
| 64 |
xgood.py
| 63 | 0.603093 | 0.489691 | 0 | 6 | 31.166667 | 69 |
LeoAndo/work
| 10,041,633,547,450 |
a9b7d3b12d1710c5f918121be7f58cf7e0580329
|
a4b73e20fd36e1f0943240319fea2ad161c0cf0c
|
/src/training/python/withdraw.py
|
9e1d6b4770af0e3fbfe23be493ef99a0a98f5305
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/LeoAndo/work
|
cbc19bab3b1f5d742a6b958cb28adbca308cd50c
|
23b15e68c91d28872129a43ad8e53ffd93321a7f
|
refs/heads/master
| 2020-04-30T15:47:56.944325 | 2020-04-30T12:03:48 | 2020-04-30T12:03:48 | 176,930,277 | 2 | 0 |
Apache-2.0
| false | 2020-12-21T10:12:29 | 2019-03-21T11:22:32 | 2020-12-21T09:28:09 | 2020-12-21T10:12:29 | 224,042 | 2 | 0 | 57 |
Kotlin
| false | false |
print('==============================')
print('threading test')
# マルチスレッド処理
import threading
lock = threading.Lock() # ロックオブジェクトを作成
balance = 0 # 残高格納変数
def withdraw_with_confirm(money):
global balance # 残高変数を書き換えるためにglobal宣言
lock.acquire() # ここからロックのかかった処理
try:
if balance >= money: #残高足りてるか?
if input('y/n?: ') == 'y': # コマンド入力で確認
balance -=money # 残高更新
return True
return False
else:
return ValueError()
finally:
lock.release() # ロック解除
balance = 100 # 残高 100円設定
class LazyThread(threading.Thread):
def run(self):
import time
time.sleep(1) # 1秒待機
try:
withdraw_with_confirm(90) # 90円引き落とす
except ValueError:
print('lazy thread: NG %s' % balance)
else:
print('lazy thread: OK %s' % balance)
lazy_thread = LazyThread()
lazy_thread.start()
try:
withdraw_with_confirm(20)
except ValueError:
print('current thread: NG %s' % balance)
else:
print('current thread: OK %s' % balance)
|
UTF-8
|
Python
| false | false | 1,277 |
py
| 261 |
withdraw.py
| 82 | 0.569585 | 0.55576 | 0 | 44 | 23.613636 | 50 |
mbway/intelligent-robotics
| 919,123,037,726 |
a388a54ab7d804fc8af3000812b7b5689516f4de
|
36b972c3b14d4bdffb1e31ff8dc23615a4d3a589
|
/exercise1/src/tests/odometry.py
|
4dc1dfe16cef1bb7f5091cc6e0fd2425e93e9b19
|
[] |
no_license
|
https://github.com/mbway/intelligent-robotics
|
8699fada7f64c9eb8b59e82e7586bbe062049b89
|
fc2aaef934e95023bdfe2bc40619f53bc6b21caa
|
refs/heads/master
| 2018-12-20T14:46:35.728227 | 2017-03-27T22:02:43 | 2017-03-27T22:02:43 | 69,690,930 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import rospy
import roslib
# Messages
from nav_msgs.msg import Odometry
#from geometry_msgs.msg import Point, Quaternion
def odometry_callback(msg):
print msg.pose.pose
print msg.twist.twist
if __name__ == '__main__':
rospy.init_node('odometry', anonymous=True)
rospy.Subscriber('odom', Odometry, odometry_callback)
rospy.spin()
|
UTF-8
|
Python
| false | false | 379 |
py
| 2,987 |
odometry.py
| 85 | 0.701847 | 0.701847 | 0 | 20 | 17.9 | 57 |
matt-bowen/SHIVIR
| 19,387,482,382,272 |
1f1b5cb9867fa25fb8290db089d35279111d7745
|
51e252c95d7351eef0508e2a461acf3222c5d30f
|
/galfitcreate.py
|
fa79f827ba8545b603e17e53ab98751adbcf97ee
|
[] |
no_license
|
https://github.com/matt-bowen/SHIVIR
|
dd19a690b594374795df56f35b6ab054b1ed90fe
|
bf97c3cc58297c926c38f35d71b70a91cc71cebb
|
refs/heads/master
| 2020-04-07T05:18:36.557897 | 2018-03-20T17:49:07 | 2018-03-20T17:49:07 | 124,184,041 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 13 12:14:06 2018
@author: matt
"""
import re
import sys
def processDecomp(galaxy):
decomp = open("/home/matt/Thesis/Files From Kevin/decompositionValues.txt",'r')
for line in decomp:
if re.match(galaxy+"((?!plus|minus).*)", line):
decompVals = line
decompVals = decompVals.split(" ")
for i, j in enumerate(decompVals):
if j[0].isdigit():
decompVals[i] = float(j)
decompDict = {'galaxy': decompVals[0], 'mu_e1': decompVals[1],
'r_e1': decompVals[4], 'n1': decompVals[7],
'mu_e2': decompVals[10], 'r_e2': decompVals[13],
'mu_e3': decompVals[16], 'r_e3': decompVals[19],
'n3': decompVals[22]}
return decompDict
def processProf(galaxy):
path = "/home/matt/Thesis/Working Sources/SHIVIR/"+galaxy+"/"+galaxy+"-I-Image_mag_cut3.prof"
for line in open(path, 'r'):
line = line.split(" ")
newline=[]
for i in line:
if i != "":
newline.append(i)
newline[-1].replace("\n",'')
try:
newline = [float(i) for i in newline]
except ValueError:
pass
try:
if int(newline[0]) == 5: #take PA at ~5 arcsec for bulge
PA5 = newline[4]
ellip5 = newline[3]
except (IndexError, ValueError):
pass
profDict = {'radius': newline[0], 'ellip': newline[3],
'PAFinal': newline[4], 'PA5': PA5,
'xpos': newline[8], 'ypos': newline[9],
'ellip5': ellip5}
return profDict
def main(input1):
galaxy = input1
buildHalo = True
inclMask = True
#change this to sersic
expString = "# Object number: 3\n0) sersic\n1) $SERSIC2POS 1 1\n3) $SERSIC2MAG 1\n4) $SERSIC2RE 1\n5) $SERSIC2IND 1\n9) 1 1\n10) 0 1\nZ) 0\n\n# Object number: 4"
noExpString = "# Object number:3"
decompDict = processDecomp(galaxy)
profDict = processProf(galaxy)
#print("decomp:", decompDict, "\n")
#print("profile:", profDict)
center = str(int(profDict['xpos'])) + ' ' + str(int(profDict['ypos']))
conbox = "400 400"
addMoreRegion = 75
addFactor = 2
fitregion = str(int(profDict['xpos']-profDict['radius'])-addFactor*addMoreRegion) + ' ' +\
str(int(profDict['xpos']+profDict['radius'])+addFactor*addMoreRegion) + ' ' +\
str(int(profDict['ypos']-profDict['radius'])-addFactor*addMoreRegion) + ' ' +\
str(int(profDict['ypos']+profDict['radius'])+addFactor*addMoreRegion)
with open("GALFITTEMPLATE", 'r') as file :
filedata = file.read()
if inclMask:
filedata = filedata.replace("$PIXELMASK", galaxy+"-I-Flag-Circ.fits")
else:
filedata = filedata.replace("$PIXELMASK", "none")
filedata = filedata.replace("$PIXELMASK", "none")
filedata = filedata.replace("$INPUTFILE", galaxy+"-I-Image.fits")
filedata = filedata.replace("$OUTPUTFILE", galaxy+"-Output.fits")
filedata = filedata.replace("$SIGMAFILE", galaxy+"-I-Sigma.fits")
filedata = filedata.replace("$FITREGION", fitregion)
filedata = filedata.replace("$CONBOX", conbox)
filedata = filedata.replace("$SERSIC1POS", center)
filedata = filedata.replace("$SERSIC1MAG", str(decompDict['mu_e1']))
filedata = filedata.replace("$SERSIC1RE", str((decompDict['r_e1']/0.187)))
filedata = filedata.replace("$SERSIC1IND", str(decompDict['n1']))
filedata = filedata.replace("$SERSIC1PA", str(-profDict['PA5']))
filedata = filedata.replace("$SERSIC1ELLIP", str(-profDict['ellip5']+1))
#add axis ratio at 5 arcsec
filedata = filedata.replace("$EXPD1POS", center)
filedata = filedata.replace("$EXPD1MAG", str(decompDict['mu_e2']))
filedata = filedata.replace("$EXPD1RE", str((decompDict['r_e2']/0.187)))
filedata = filedata.replace("$EXPD1PA", str(-profDict['PAFinal']))
filedata = filedata.replace("$EXPD1ELLIP", str(-profDict['ellip']+1))
#add axis ratio at end
if buildHalo: #change this to sersic
expString = expString.replace("$SERSIC2POS", center)
expString = expString.replace("$SERSIC2MAG", str(decompDict['mu_e3']))
expString = expString.replace("$SERSIC2RE", str(decompDict['r_e3']/0.187))
expString = expString.replace("$SERSIC2IND", str(decompDict['n3']))
filedata = filedata.replace("$EXPBOOL", expString)
else:
filedata = filedata.replace("$EXPBOOL", noExpString)
filedata = filedata.replace("$SKYEST", "1")
with open(galaxy+".galfit", 'w') as file:
file.write(filedata)
#main(sys.argv[1])
main("VCC2050")
|
UTF-8
|
Python
| false | false | 4,910 |
py
| 12 |
galfitcreate.py
| 8 | 0.580041 | 0.552138 | 0 | 127 | 37.653543 | 165 |
DRAGON-NOOB/Maxx-ShowMore-Bot
| 5,600,637,374,548 |
3e964cbfbe50104efaefb4e1107f3a3fe5afdea4
|
719c2001a8dcafb2911539753de5561be2191a1e
|
/const/CONFIG.py
|
4ddc2c00db96d7f843a60bef1f6e45569443bb45
|
[] |
no_license
|
https://github.com/DRAGON-NOOB/Maxx-ShowMore-Bot
|
a5d6dee4045ad34cca7fa57c2dde5618a4c2a449
|
0b4ac6fc65ae786507824b0607abee567f0022fc
|
refs/heads/main
| 2023-01-03T12:23:42.942285 | 2020-11-01T18:13:39 | 2020-11-01T18:13:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
BOT_TOKEN = 'Your Bot Token 🌝'
API_ID = your API ID
API_HASH = 'create_new_one'
FULL_POSTS_CHNLD_ID = -100111
FULL_POSTS_CHNLD_ID_no_100 = 111 # This is where all posts will be saved
FULL_CHNL_INVTE_LNK = 'https://t.me/joinchat/NOTaVALIDlink'
DEV_ID = 0000
|
UTF-8
|
Python
| false | false | 263 |
py
| 3 |
CONFIG.py
| 2 | 0.703846 | 0.642308 | 0 | 9 | 27.888889 | 73 |
jota-info/TCC-IFC-Licence-Plate-Recognition
| 7,876,970,070,505 |
35444ed121ad943c3dd66d5d4ee808eb7e9f0e83
|
77944edfeb5360433b0ccdd48eef238ddd4f2e3b
|
/Códigos TCC/Testes OpenCV/recortar imagem.py
|
100234e9d86f4d471cedf82432e1c55be6ad1f1e
|
[] |
no_license
|
https://github.com/jota-info/TCC-IFC-Licence-Plate-Recognition
|
7add5b6a5ebb4472cdb2a7aace6997df52f04926
|
227ab466562795c18afaf7e0a67b7011aa604003
|
refs/heads/master
| 2015-09-24T22:33:55.362436 | 2015-07-09T12:51:24 | 2015-07-09T12:51:24 | 38,817,358 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#~ cv2.SetImageROI(frame_gray, box2);
#~ cvResize(frame_gray, licenca, CV_INTER_LINEAR);
#~ cvResetImageROI(frame_gray);
|
UTF-8
|
Python
| false | false | 121 |
py
| 31 |
recortar imagem.py
| 16 | 0.727273 | 0.710744 | 0 | 3 | 39.333333 | 50 |
amrrs/SO_Answers
| 14,448,269,994,741 |
be0fef2af92c7ed055294337f430603f871eccfb
|
ecce417cb478c1f0a7a04e04439a6b9dc4139dd1
|
/filter_words_with_digits.py
|
769333282176594a29cb33a5dd201a50deb9562c
|
[] |
no_license
|
https://github.com/amrrs/SO_Answers
|
f07e0236bb63832dbf74900e1c25753cd0a00fd0
|
f3d684cc269873e7703d45d4e6c062190334600b
|
refs/heads/master
| 2021-06-27T19:59:53.050669 | 2019-07-04T18:24:22 | 2019-07-04T18:24:22 | 105,772,423 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
c = "Snap-on Power M1302A5 Imperial,IMPRL 0.062IN"
only_a = []
for word in c.split():
#print(word)
if not bool(re.search('\d',word)):
#print(word)
only_a.append(word)
' '.join(only_a)
any([word.isdigit() for word in c.split()])
|
UTF-8
|
Python
| false | false | 253 |
py
| 42 |
filter_words_with_digits.py
| 39 | 0.596838 | 0.561265 | 0 | 10 | 24.3 | 50 |
neptunedays9/text-analysis-py
| 8,950,711,852,282 |
16432fda90beffb7c479aba781c95f7e208b9645
|
d581a0509b59a32e471fe82838247ab4545f0dd1
|
/character_filter.py
|
a75be701e46bd2ad2952b4711ecb48a91383930f
|
[] |
no_license
|
https://github.com/neptunedays9/text-analysis-py
|
f25d3bb3b6908da6faeed0a50c61b781500d75db
|
d4a8b9cfd702d5016f3bd9f6379d97ecaedfd2ae
|
refs/heads/master
| 2021-04-16T14:39:41.844912 | 2020-04-26T12:05:14 | 2020-04-26T12:05:14 | 249,363,208 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
def remove_unwanted_characters(text_array):
# print(text_array)
text_array_new = []
for t in text_array:
if len(t) > 1:
t = re.sub('[^A-Za-z0-9]+', '', t)
text_array_new.append(t)
return text_array_new
|
UTF-8
|
Python
| false | false | 263 |
py
| 12 |
character_filter.py
| 11 | 0.539924 | 0.528517 | 0 | 13 | 19.307692 | 46 |
westonsankey/DSBA6100-PatentProject
| 16,020,228,045,254 |
4202a703d5bf42f3ba7b03f602818798995bdf81
|
37664e6eda5cdfe386420bf2dcacf209f9851713
|
/class_entity.py
|
73603679d2cc8a12bf6e1382ae1d636ed5c0f908
|
[] |
no_license
|
https://github.com/westonsankey/DSBA6100-PatentProject
|
16593a3d80ebf3d20b0adcfb5ecb2e6b53c25196
|
ab222911a7e7eecc4c4d0137847e949ec1b81505
|
refs/heads/master
| 2018-01-11T21:02:39.579564 | 2015-11-08T15:35:20 | 2015-11-08T15:35:20 | 44,880,352 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from collections import defaultdict
from alchemyapi import AlchemyAPI
import json
alchemyapi = AlchemyAPI()
CLASSES_TO_PROCESS = ['SEMICONDUCTOR DEVICE MANUFACTURING',
'ACTIVE SOLID-STATE DEVICES (E.G., TRANSISTORS, SOLID-STATE DIODES)',
'MULTIPLEX COMMUNICATIONS',
'COMPUTER GRAPHICS PROCESSING AND SELECTIVE VISUAL DISPLAY SYSTEMS',
'ELECTRIC LAMP AND DISCHARGE DEVICES',
'ELECTRICAL COMPUTERS AND DIGITAL PROCESSING SYSTEMS',
'TELECOMMUNICATIONS',
'LIQUID CRYSTAL CELLS, ELEMENTS AND SYSTEMS',
'CHEMISTRY',
'ELECTRICITY',
'DATA PROCESSING',
'ERROR DETECTION/CORRECTION AND FAULT DETECTION/RECOVERY',
'ELECTROPHOTOGRAPHY',
'OPTICAL',
'MOTION VIDEO SIGNAL PROCESSING FOR RECORDING OR REPRODUCING',
'EQUIPMENT FOR PRODUCTION, DISTRIBUTION, OR TRANSFORMATION OF ENERGY',
'ELECTRICAL COMPUTERS AND DIGITAL DATA PROCESSING SYSTEMS',
'BATTERIES',
'MOTOR VEHICLES',
'TRANSPORTATION']
def get_alchemy_api_entities(patent_class, abstract_doc):
response = alchemyapi.entities('text', abstract_doc, {'sentiment': 0})
json_response_text = json.dumps(response, indent=4)
filename = '%s_entity.json' % (patent_class)
write_file = open(filename, 'w')
write_file.write(json_response_text)
write_file.close()
def create_class_abstract_dict():
read_file = open('patent_data_with_class_desc.csv', 'r')
class_abstracts = defaultdict(str)
for line in read_file:
fields = line.split('|')
patent_class = fields[4]
abstract = fields[8].strip()
if patent_class != '\\N' and abstract != 'NA':
if patent_class in CLASSES_TO_PROCESS:
class_abstracts[patent_class] += abstract
read_file.close()
def main():
class_abstracts = create_class_abstract_dict()
for patent_class in CLASSES_TO_PROCESS:
get_alchemy_api_entities(patent_class, class_abstracts[patent_class])
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 1,960 |
py
| 5 |
class_entity.py
| 4 | 0.691327 | 0.689286 | 0 | 60 | 31.683333 | 77 |
tschamm/boschshc-hass
| 19,292,993,100,853 |
42ed49179b94157cbca9c12ae44183e87249ca20
|
2c794efaf2a462c15943d356d50255a61c79d94f
|
/custom_components/bosch_shc/number.py
|
6285d053bbfebc8fac8f1082b2596897ad1cd3d3
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/tschamm/boschshc-hass
|
796e07c447e05f1b75791ca3659d18d3d39045db
|
eb080ab1e95af473471288b483a5f02dd6874cc3
|
refs/heads/master
| 2023-05-15T06:01:23.359786 | 2023-05-05T18:38:51 | 2023-05-05T18:38:51 | 232,424,544 | 91 | 27 |
NOASSERTION
| false | 2023-08-16T08:19:52 | 2020-01-07T21:53:58 | 2023-08-11T11:58:12 | 2023-08-15T21:02:08 | 718 | 87 | 20 | 19 |
Python
| false | false |
"""Platform for switch integration."""
from __future__ import annotations
from boschshcpy import SHCThermostat, SHCSession
from boschshcpy.device import SHCDevice
from homeassistant.components.number import (
NumberDeviceClass,
NumberEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_SESSION, DOMAIN
from .entity import SHCEntity
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the SHC switch platform."""
entities: list[NumberEntity] = []
session: SHCSession = hass.data[DOMAIN][config_entry.entry_id][DATA_SESSION]
for number in (
session.device_helper.thermostats + session.device_helper.roomthermostats
):
entities.append(
SHCNumber(
device=number,
parent_id=session.information.unique_id,
entry_id=config_entry.entry_id,
attr_name="Offset",
)
)
if entities:
async_add_entities(entities)
class SHCNumber(SHCEntity, NumberEntity):
"""Representation of a SHC number."""
_attr_device_class = NumberDeviceClass.TEMPERATURE
_attr_entity_category = EntityCategory.DIAGNOSTIC
_attr_native_unit_of_measurement = TEMP_CELSIUS
def __init__(
self,
device: SHCDevice,
parent_id: str,
entry_id: str,
attr_name: str | None = None,
) -> None:
"""Initialize a SHC number."""
super().__init__(device, parent_id, entry_id)
self._attr_name = (
f"{device.name}" if attr_name is None else f"{device.name} {attr_name}"
)
self._attr_unique_id = (
f"{device.root_device_id}_{device.id}"
if attr_name is None
else f"{device.root_device_id}_{device.id}_{attr_name.lower()}"
)
self._device: SHCThermostat = device
def set_native_value(self, value: float) -> None:
"""Update the current value."""
self._device.offset = value
@property
def native_value(self) -> float:
"""Return the value of the number."""
return self._device.offset
@property
def native_step(self) -> float:
"""Return the step of the number."""
return self._device.step_size
@property
def native_min_value(self) -> float:
"""Return the min value of the number."""
return self._device.min_offset
@property
def native_max_value(self) -> float:
"""Return the max value of the number."""
return self._device.max_offset
|
UTF-8
|
Python
| false | false | 2,866 |
py
| 17 |
number.py
| 12 | 0.638172 | 0.638172 | 0 | 94 | 29.489362 | 83 |
zxycode-2020/python_base
| 1,958,505,122,112 |
c11c9c6a10d5d7d11bab87939fd7a3b4c02a3ecb
|
25b6465a0e060087dc432d0abe795fbf85cb873e
|
/day14/tk/22、绝对布局.py
|
812125931759cf51238c605c66873273c4420e0c
|
[] |
no_license
|
https://github.com/zxycode-2020/python_base
|
ee03dd1a7ef2e98c77082e489c1a5df862ed68bd
|
ee6a2b6a0a3a0560c7bfd5bcbf93022e945916cd
|
refs/heads/master
| 2022-04-20T23:41:04.059228 | 2020-04-18T10:05:02 | 2020-04-18T10:05:02 | 256,716,268 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tkinter
win = tkinter.Tk()
win.title("sunck")
win.geometry("400x400+200+20")
label1=tkinter.Label(win,text="good",bg="blue")
label2=tkinter.Label(win,text="nice",bg="red")
label3=tkinter.Label(win,text="cool",bg="pink")
#绝对布局,窗口的变化对位置没有影响
label1.place(x=10, y=10)
label2.place(x=50, y=50)
label3.place(x=100, y=100)
win.mainloop()
|
UTF-8
|
Python
| false | false | 388 |
py
| 190 |
22、绝对布局.py
| 167 | 0.688202 | 0.601124 | 0 | 16 | 20.375 | 47 |
clulab/releases
| 5,832,565,627,107 |
25c70a2c8d8ba909b10caf5bf3be18af77a2a347
|
135d096f158ac67e4ca2de670cf63281d363c15e
|
/lrec2022-odinsynth/python/rl_utils.py
|
323ea3f9c6360c5917170b8643be79b59c2243fa
|
[] |
no_license
|
https://github.com/clulab/releases
|
93ba25af87bbf65feb7141b46befa981df4f608c
|
60e0c3389724460b5b32ba35c89d8838da4d51c9
|
refs/heads/master
| 2023-07-27T10:05:00.494539 | 2023-07-11T14:09:21 | 2023-07-11T14:09:21 | 70,280,616 | 29 | 18 | null | false | 2023-07-11T14:09:23 | 2016-10-07T20:28:41 | 2023-07-10T10:52:02 | 2023-07-11T14:09:22 | 483,540 | 29 | 12 | 8 |
TeX
| false | false |
"""
Most of the code is from https://github.com/Curt-Park/rainbow-is-all-you-need
There were some modifications to make it suitable for our usecase
"""
import collections
from queryast import AstNode
import numpy as np
import random
import torch
# -*- coding: utf-8 -*-
"""Segment tree for Prioritized Replay Buffer."""
import operator
from typing import Callable, Dict, Iterator, List
class ProblemSpecification:
def __init__(self, sentences: List[List[str]], specs: List[dict], vocabulary: dict) -> None:
self.sentences = sentences
self.specs = specs
self.vocabulary = {x:sorted(list(set(vocabulary[x]))) for x in vocabulary}
def __str__(self) -> str:
return f'ProblemSpecification(sentences: {self.sentences}, specs: {self.specs}, vocabulary: {self.vocabulary})'
def construct_query(self, query: str):
return {
'query': query,
'sentences': self.sentences,
'specs': self.specs,
}
def hash(self):
hashes = []
for spec in self.specs:
sentence = self.sentences[spec['sentId']]
start = spec['start']
end = spec['end']
string_to_hash = ' '.join(sentence) + f' {start} {end}'
hashes.append(hash(string_to_hash))
return sum(hashes)
class OdinsynthEnvStep:
def __init__(self, query: AstNode, problem_specification: ProblemSpecification):
self.query = query
self.problem_specification = problem_specification
def __str__(self):
return f"{self.query.pattern()} - {self.problem_specification}"
def hash(self):
return hash(self.query.pattern()) + self.problem_specification.hash()
class SegmentTree:
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
Attributes:
capacity (int)
tree (list)
operation (function)
"""
def __init__(self, capacity: int, operation: Callable, init_value: float):
"""Initialization.
Args:
capacity (int)
operation (function)
init_value (float)
"""
assert (
capacity > 0 and capacity & (capacity - 1) == 0
), "capacity must be positive and a power of 2."
self.capacity = capacity
self.tree = [init_value for _ in range(2 * capacity)]
self.operation = operation
def _operate_helper(
self, start: int, end: int, node: int, node_start: int, node_end: int
) -> float:
"""Returns result of operation in segment."""
if start == node_start and end == node_end:
return self.tree[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._operate_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._operate_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self.operation(
self._operate_helper(start, mid, 2 * node, node_start, mid),
self._operate_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end),
)
def operate(self, start: int = 0, end: int = 0) -> float:
"""Returns result of applying `self.operation`."""
if end <= 0:
end += self.capacity
end -= 1
return self._operate_helper(start, end, 1, 0, self.capacity - 1)
def __setitem__(self, idx: int, val: float):
"""Set value in tree."""
idx += self.capacity
self.tree[idx] = val
idx //= 2
while idx >= 1:
self.tree[idx] = self.operation(self.tree[2 * idx], self.tree[2 * idx + 1])
idx //= 2
def __getitem__(self, idx: int) -> float:
"""Get real value in leaf node of tree."""
assert 0 <= idx < self.capacity
return self.tree[self.capacity + idx]
class SumSegmentTree(SegmentTree):
""" Create SumSegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity: int):
"""Initialization.
Args:
capacity (int)
"""
super(SumSegmentTree, self).__init__(
capacity=capacity, operation=operator.add, init_value=0.0
)
def sum(self, start: int = 0, end: int = 0) -> float:
"""Returns arr[start] + ... + arr[end]."""
return super(SumSegmentTree, self).operate(start, end)
def retrieve(self, upperbound: float) -> int:
"""Find the highest index `i` about upper bound in the tree"""
# TODO: Check assert case and fix bug
assert 0 <= upperbound <= self.sum() + 1e-5, "upperbound: {}".format(upperbound)
idx = 1
while idx < self.capacity: # while non-leaf
left = 2 * idx
right = left + 1
if self.tree[left] > upperbound:
idx = 2 * idx
else:
upperbound -= self.tree[left]
idx = right
return idx - self.capacity
class MinSegmentTree(SegmentTree):
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity: int):
"""Initialization.
Args:
capacity (int)
"""
super(MinSegmentTree, self).__init__(
capacity=capacity, operation=min, init_value=float("inf")
)
def min(self, start: int = 0, end: int = 0) -> float:
"""Returns min(arr[start], ..., arr[end])."""
return super(MinSegmentTree, self).operate(start, end)
class ReplayBuffer:
"""
A simple replay buffer.
Consists of a list instead of a numpy/tensor because
our type of data is not numeric and we do the encoding
in the model
"""
def __init__(self, size: int, metadata: dict = {}):
self.obs_buf = [None] * size
self.next_obs_buf = [None] * size
self.acts_buf = [None] * size
self.rews_buf = [None] * size
self.done_buf = [None] * size
self.max_size = size
self.ptr, self.size, = 0, 0
def store(
self,
obs,
act,
rew,
next_obs,
done,
):
self.obs_buf[self.ptr] = obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.next_obs_buf[self.ptr] = next_obs
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
"""
Update the internals of this replay buffer according to its internal update policy
There is no update policy in this case. This type of replay buffer is a FIFO type with
a max capacity
"""
def update(self, data: dict):
return
"""
Sample from the underlying lists
Uses a metadata parameter to make the signature uniform among
other replay buffers which need additional parameters
"""
def sample(self, batch_size, metadata = {}) -> Dict[str, np.ndarray]:
idxs = np.random.choice(self.size, size=batch_size, replace=False)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for idx in idxs:
s_lst.append(self.obs_buf[idx])
a_lst.append(self.acts_buf[idx])
r_lst.append(self.rews_buf[idx])
s_prime_lst.append(self.next_obs_buf[idx])
done_mask_lst.append(self.done_buf[idx])
return ExperienceReplayBatch(
s_lst,
a_lst,
r_lst,
s_prime_lst,
done_mask_lst,
)
def __len__(self) -> int:
return self.size
class PrioritizedReplayBuffer(ReplayBuffer):
"""Prioritized Replay buffer.
Attributes:
max_priority (float): max priority
tree_ptr (int): next index of tree
alpha (float): alpha parameter for prioritized replay buffer
sum_tree (SumSegmentTree): sum tree for prior
min_tree (MinSegmentTree): min tree for min prior to get max weight
"""
def __init__(
self,
size: int,
metadata: dict = {'alpha': 0.6, 'total_timesteps': 10000},
):
"""Initialization."""
assert metadata['alpha'] >= 0
super(PrioritizedReplayBuffer, self).__init__(size)
self.max_priority, self.tree_ptr = 1.0, 0
self.metadata = metadata
self.alpha = metadata['alpha']
self.beta = metadata['beta']
self.total_timesteps = metadata['total_timesteps']
# capacity must be positive and a power of 2.
tree_capacity = 1
while tree_capacity < self.max_size:
tree_capacity *= 2
self.sum_tree = SumSegmentTree(tree_capacity)
self.min_tree = MinSegmentTree(tree_capacity)
self.global_timestep = 0
def store(
self,
obs: OdinsynthEnvStep,
act: int,
rew: float,
next_obs: OdinsynthEnvStep,
done: bool
):
"""Store experience and priority."""
super().store(obs, act, rew, next_obs, done)
self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.tree_ptr = (self.tree_ptr + 1) % self.max_size
"""
Sample from the underlying lists
Uses a metadata parameter to access the beta parameter used
for sampling
"""
def sample(self, batch_size) -> Dict[str, np.ndarray]:
self.global_timestep += 1
"""Sample a batch of experiences."""
assert len(self) >= batch_size
fraction = min(self.global_timestep / self.total_timesteps, 1.0)
self.beta = self.beta + fraction * (1.0 - self.beta)
assert self.beta > 0
indices = self._sample_proportional(batch_size)
obs = [self.obs_buf[i] for i in indices]
next_obs = [self.next_obs_buf[i] for i in indices]
acts = [self.acts_buf[i] for i in indices]
rews = [self.rews_buf[i] for i in indices]
done = [self.done_buf[i] for i in indices]
weights = [self._calculate_weight(i, self.beta) for i in indices]
return PrioritizedExperienceReplayBatch(obs, acts, rews, next_obs, done, weights, indices)
def update_priorities(self, indices: List[int], priorities: np.ndarray):
"""Update priorities of sampled transitions."""
assert len(indices) == len(priorities)
for idx, priority in zip(indices, priorities):
assert priority > 0
assert 0 <= idx < len(self)
self.sum_tree[idx] = priority ** self.alpha
self.min_tree[idx] = priority ** self.alpha
self.max_priority = max(self.max_priority, priority)
"""
Update the internals of this replay buffer according to its internal update policy
The update policy in this case is to assign a priority (weight) to each example
Update the priorities accordingly
"""
def update(self, data: dict):
new_priorities = data['loss_for_prior'] + data['prior_eps']
self.update_priorities(data['indices'], new_priorities)
def _sample_proportional(self, batch_size) -> List[int]:
"""Sample indices based on proportions."""
indices = []
p_total = self.sum_tree.sum(0, len(self) - 1)
segment = p_total / batch_size
for i in range(batch_size):
a = segment * i
b = segment * (i + 1)
upperbound = random.uniform(a, b)
idx = self.sum_tree.retrieve(upperbound)
indices.append(idx)
return indices
def _calculate_weight(self, idx: int, beta: float):
"""Calculate the weight of the experience at idx."""
# get max weight
p_min = self.min_tree.min() / self.sum_tree.sum()
max_weight = (p_min * len(self)) ** (-beta)
# calculate weights
p_sample = self.sum_tree[idx] / self.sum_tree.sum()
weight = (p_sample * len(self)) ** (-beta)
weight = weight / max_weight
return weight
ExperienceReplayBatch = collections.namedtuple('ExperienceReplayBatch', ['obs', 'acts', 'rews', 'next_obs', 'done'])
PrioritizedExperienceReplayBatch = collections.namedtuple('PrioritizedExperienceReplayBatch', ['obs', 'acts', 'rews', 'next_obs', 'done', 'weights', 'indices'])
# NOTE The batching is handled here; That is, when iterating
# over this dataset you obtain a batch (with batch_size specified
# when creating the object). This class inherits from IterableDataset
# because the underlying buffer (@param buffer) can grow in size
# during training
class ReplayBufferDataset(torch.utils.data.IterableDataset):
def __init__(self, buffer, batch_size, total_timesteps) -> None:
super(ReplayBufferDataset).__init__()
self.batch_size = batch_size
self.buffer = buffer
self.total_timesteps = total_timesteps
def __iter__(self) -> Iterator:
while True:
sample = self.buffer.sample(self.batch_size)
yield sample
# A pytorch dataset over a list
class ListDataset(torch.utils.data.Dataset):
def __init__(self, data: List) -> None:
super(ListDataset).__init__()
self.data = data
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
import gym
class ListBasedGymEnv(gym.Wrapper):
def __init__(self, env=None):
super().__init__(env)
def step(self, action):
step = self.env.step(action)
return (step[0].tolist(),) + step[1:]
def reset(self):
res = self.env.reset()
return res.tolist()
# per = PrioritizedReplayBuffer(300, 1000, 3, 0.6)
# per.store(['a', 'b'], 0, 0.1, ['a', 'c'], False)
# per.store(['a', 'b'], 1, 0.2, ['a', 'd'], False)
# per.store(['b', 'a'], 0, 0.3, ['b', 'd'], False)
# per.store(['b', 'a'], 1, 0.3, ['b', 'e'], False)
# per.store(['a', 'a'], 0, 0.3, ['b', 'b'], False)
# per.store(['a', 'a'], 1, 0.3, ['a', 'c'], False)
# x = per.sample_batch()
|
UTF-8
|
Python
| false | false | 14,557 |
py
| 1,590 |
rl_utils.py
| 249 | 0.574638 | 0.567425 | 0 | 455 | 30.989011 | 161 |
adamgreenhall/openreviewquarterly
| 19,129,784,349,566 |
4dd05c568820cb146f192f7a5e64c897e76d07fa
|
29c6264b6a0ecdf301ebc6bf580520cf3f81a481
|
/builder/getgoogledoc.py
|
10dbd4f503504807d63f13882a805ab734733f6f
|
[
"MIT"
] |
permissive
|
https://github.com/adamgreenhall/openreviewquarterly
|
1f022be164b6d94a807872cc9286dbfe87796114
|
8d04fa129a43ed80b1351b1fb1467b8243d1dcdf
|
refs/heads/master
| 2020-04-05T23:41:53.697130 | 2015-09-02T18:54:41 | 2015-09-02T18:54:41 | 5,611,451 | 4 | 0 | null | false | 2014-07-16T16:27:10 | 2012-08-30T06:43:19 | 2014-06-04T09:27:16 | 2014-07-16T16:27:10 | 13,445 | 2 | 0 | 2 |
Ruby
| null | null |
#http://code.google.com/apis/documents/docs/1.0/developers_guide_python.html#DownloadingWPDocuments
import getpass,logging
import sys,os
#sys.path.insert(0,'../../code/python/gdata-current/gdata-python-client/src/')
#sys.path.append('~/Documents/code/python/')
from gdocs3 import client as gclient
#from gdata import spreadsheet.service.SpreadsheetsService as gdata_spreadsheetService
import gdata.spreadsheet.service
email_default = 'adam.greenhall@gmail.com'
app_name='adam-orq-builder'
def docs_authenticate(email=email_default,pw=None,get_spreadsheets=False):
def spreadsheet_authenticate(email=email_default,pw=None):
gs_client = gdata.spreadsheet.service.SpreadsheetsService()
gs_client.ssl = True
gs_client.ClientLogin(email,pw,source=app_name)
return gs_client
gd = gclient.DocsClient(source=app_name)
if pw is None:
logging.info('logging in to gDocs as: {}'.format(email))
pw=getpass.getpass()
gd.ClientLogin(email, pw, gd.source)
gd.ssl = True # Force all API requests through HTTPS
gd.http_client.debug = False # Set to True for debugging HTTP requests
if get_spreadsheets: gs=spreadsheet_authenticate(email,pw)
else: gs=None
return dict(document=gd,spreadsheet=gs)
def find_file(clients,title,kind='document'):
def retry_without_exact(uri):
uri=uri.replace('title-exact=true','')
return client.GetDocList(uri),uri
if kind not in ['document','spreadsheet','pdf']: kind=None
#query='?title={}&title-exact=true&max-results=5'.format(title.replace(' ','&'))
query='?title={}&max-results=5'.format(title.replace(' ','&'))
if kind is None: uri='/feeds/default/private/full'+query
else: uri='/feeds/default/private/full/-/{}'.format(kind)+query
feed = clients['document'].GetDocList(uri)
try: return feed.entry[0]
except IndexError:
#feed,uri=retry_without_exact(uri)
#try: return feed.entry[0]
#except IndexError:
msg='Document {title} not found in gDocs. Search was for {search}'.format(title=title,search=uri)
raise IOError(msg)
def find_folder(clients,foldername):
feed = clients['document'].GetDocList(uri='/feeds/default/private/full/-/folder')
for entry in feed.entry:
if entry.title.text.lower() == foldername.lower(): return entry
else: raise IOError('folder "{}" not found'.format(foldername))
def download_folder(clients,folder,localdirname='',kind='document',exportas='html'):
outfilenames=[]
folderfeed = clients['document'].GetDocList(uri=folder.content.src)
for doc in folderfeed.entry:
if doc.GetDocumentType() == kind:
filename=download_file(clients,doc,dirname=localdirname,exportas=exportas)
outfilenames.append(filename)
return outfilenames
def download_file(clients,doc,dirname='',exportas='html'):
filename = os.path.join(dirname,doc.title.text+'.'+exportas)
logging.info('Downloading document to {f}...'.format(f=filename))
docs_token=clients['document'].auth_token
if doc.GetDocumentType()=='spreadsheet':
logging.debug('authorizing spreadsheet download')
clients['document'].auth_token = gdata.gauth.ClientLoginToken(clients['spreadsheet'].GetClientLoginToken())
try: clients['document'].Export(doc, filename)
except:
msg= 'couldn\'t export "{name}" ({nativekind}) as {kind}'.format(name=doc.title.text,nativekind=doc.GetDocumentType(),kind=exportas)
logging.error(msg)
raise
clients['document'].auth_token=docs_token #reset to the documents authorization
return filename
def download(title='biographies',
foldername=None,
exportas='html',
kind='document',
dirname='.',
email=email_default,
clients=None):
#login
if clients is None: clients=docs_authenticate(email=email,pw=None,get_spreadsheets=True)
if foldername is None:
#find doc
doc=find_file(clients,title,kind)
#export
filename = download_file(clients, doc, dirname=dirname,exportas=exportas)
return filename
else:
#find folder
folder=find_folder(clients,foldername)
#export
filenames=download_folder(clients,folder,localdirname=dirname,kind=kind,exportas='html')
return filenames
def test():
logging.basicConfig( level=logging.DEBUG, format='%(levelname)s: %(message)s')
#filename=download(title='biographies',exportas='html',kind='document',dirname='info')
filename=download(title='orq1 pieces',exportas='csv',kind='spreadsheet',dirname='info')
#filename=download(foldername='ORQ4 interview',exportas='html',kind='document',dirname='info/orq4 interview')
if __name__ == '__main__': test()
|
UTF-8
|
Python
| false | false | 4,836 |
py
| 83 |
getgoogledoc.py
| 7 | 0.682589 | 0.680314 | 0 | 119 | 39.647059 | 141 |
EmilyXiong/AIFirseSem
| 429,496,738,485 |
2da830d794f78da0c134cb77b5180b826725ff63
|
088c0e9cc556f97a7ab61a2c2d5d6aa15e554748
|
/AIClass/PythonLabs/src/Lab02/ex22.py
|
481349fa422b21b52fa256517a1db082dae2d00d
|
[] |
no_license
|
https://github.com/EmilyXiong/AIFirseSem
|
f5ae560fe7aaca55e721759dfe20f51ae8dd3fbc
|
5b8e1fe74a90f41a3fb3f607a873439945957170
|
refs/heads/master
| 2021-04-30T10:31:28.692649 | 2018-02-13T03:33:59 | 2018-02-13T03:33:59 | 121,334,677 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
strInput = sys.argv[1].split()
for word in strInput:
print (word[::-1])
|
UTF-8
|
Python
| false | false | 87 |
py
| 63 |
ex22.py
| 60 | 0.655172 | 0.632184 | 0 | 5 | 16.6 | 30 |
goibibo/pyconhack-mashup
| 2,362,232,058,116 |
eb24f5b2f9424fbe49dfecbac237dbb8680e88aa
|
cdad769f99f8eb68a4ae0bdcef0d3f2904f424d9
|
/goibibo/airportlist.py
|
683ad5cb8213f4efe79e8e8e23786dee71973923
|
[] |
no_license
|
https://github.com/goibibo/pyconhack-mashup
|
76b49df7569eb11570099c4bb76325e97f78f85d
|
798fbe3575d839053c160f52623fc262f7f4b52b
|
refs/heads/master
| 2016-08-01T00:53:48.634836 | 2013-11-13T19:32:26 | 2013-11-13T19:32:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
Agartala, IXA
Agatti Island, AGX
Agra, AGR
Ahmedabad, AMD
Aizawl, AJL
Akola, AKD
Allahabad, IXD
Amritsar, ATQ
Aurangabad, IXU
Bagdogra, IXB
Bangalore, BLR
Belgaum, IXG
Bellary, BEP
Bhopal, BHO
Bhubaneshwar, BBI
Bhuj, BHJ
Bikaner, BKB
Bilaspur, PAB
Calicut, CCJ
Car Nicobar, CBD
Chandigarh, IXC
Coimbatore, CJB
Cooch Behar, COH
Daman, NMB
Dehradun, DED
Delhi, DEL
Dharamshala, DHM
Dibrugarh, DIB
Dimapur, DMU
Diu, DIU
Gaya, GAY
Goa, GOI
Gorakhpur, GOP
Guwahati, GAU
Gwalior, GWL
Hubli, HBX
Hyderabad, HYD
Imphal, IMF
Indore, IDR
Jabalpur, JLR
Jagdalpur, JGB
Jaipur, JAI
Jaisalmer, JSA
Jammu, IXJ
Jamshedpur, IXW
Jodhpur, JDH
Jorhat, JRH
Kailashahar, IXH
Kanpur, KNU
Khajuraho, HJR
Kochi, COK
Kolhapur, KLH
Kolkata, CCU
Kota, KTU
Kulu, KUU
Latur, LTU
Leh, IXL
Lilabari, IXI
Lucknow, LKO
Ludhiana, LUH
Madras, MAA
Madurai, IXM
Malda, LDA
Mangalore, IXE
Mumbai, BOM
Mysore, MYQ
Nagpur, NAG
Nanded, NDC
Nasik, ISK
Neyveli, NVY
Pantnagar, PGH
Pathankot, IXP
Patna, PAT
Port Blair, IXZ
Pune, PNQ
Puttaparthi, PUT
Raipur, RPR
Rajahmundry, RJA
Rajkot, RAJ
Rajouri, RJI
Ranchi, IXR
Rewa, REW
Rourkela, RRK
Salem, SXV
Satna, TNI
Shillong, SHL
Shimla, SLV
Sholapur, SSE
Silchar, IXS
Srinagar, SXR
Surat, STV
Tezpur, TEZ
Tezu, TEI
Tirupati, TIR
Trichy, TRZ
Trivandrum, TRV
Tuticorin, TCR
Udaipur, UDR
Vadodara, BDQ
Varanasi, VNS
Vijayawada, VGA
Visakhapatnam, VTZ
Warrangal, WGC
|
UTF-8
|
Python
| false | false | 1,366 |
py
| 11 |
airportlist.py
| 8 | 0.770864 | 0.770864 | 0 | 103 | 12.262136 | 18 |
dvalenza/flask-app-structure2.0
| 4,123,168,605,371 |
be99b3df2a65f52195162bfdcfd28a477fc7c379
|
c1c75b6e163ff5612b5a6133ed59bc5d5b84e923
|
/myapp/users/forms.py
|
3ed339ecdfc3d458f8bec6cd6ec3d1e89e7d4133
|
[] |
no_license
|
https://github.com/dvalenza/flask-app-structure2.0
|
6870cd6c22a80d67200f09c2b94c001a1e61e49c
|
8017ce6f84b183a67105ac828e1ba70eb959bc42
|
refs/heads/master
| 2020-12-29T01:11:58.093832 | 2014-12-07T22:24:20 | 2014-12-07T22:24:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask.ext.wtf import Form
from wtforms import TextField, PasswordField, SubmitField
from wtforms.validators import Required, Length
class LoginForm(Form):
email = TextField('Email', validators=[Required()])
password = PasswordField('Password', validators=[Required(), Length(min=5)])
submit = SubmitField('Submit')
class CreateAccForm(Form):
username = TextField('Username', validators=[Required()])
password = PasswordField('Password', validators=[Required(), Length(min=5)])
email = TextField('Email', validators=[Required()])
submit = SubmitField("Create account")
|
UTF-8
|
Python
| false | false | 604 |
py
| 5 |
forms.py
| 3 | 0.728477 | 0.725166 | 0 | 15 | 39.266667 | 80 |
pony999/sentdex
| 7,756,710,951,889 |
f32aa50291edc3dfa665bc50e50843767afc3b19
|
f4a60738d7e3155049a314f9fae99a94960f7d75
|
/Matplotlib/T11-HandlingUnixTime.py
|
0b42858945f22ecc1be173be2b4394244e3a6764
|
[] |
no_license
|
https://github.com/pony999/sentdex
|
5a8f9163cb353c45b1f362c7fa56cebe0651b8d2
|
434c387caf8c65fdf78483b54e84b3a379d20c97
|
refs/heads/master
| 2021-04-15T04:53:44.333599 | 2018-06-05T22:22:21 | 2018-06-05T22:22:21 | 126,474,607 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" Source:
https://www.youtube.com/watch?v=aRQxMYoCOuI&index=11&list=PLQVvvaa0QuDfefDfXb9Yf0la1fPDKluPF
Tutorial 11:
Handling Unix Time
"""
import datetime as dt
import time
import numpy as np
example = time.time()
print(example) # print current unix time
print(dt.datetime.fromtimestamp(example)) # print current time
dateconv = np.vectorize(dt.datetime.fromtimestamp) # convert unix time
date = dateconv(example)
print(date)
|
UTF-8
|
Python
| false | false | 506 |
py
| 35 |
T11-HandlingUnixTime.py
| 30 | 0.667984 | 0.652174 | 0 | 20 | 24.3 | 96 |
FrancineU/Flask_Project3
| 10,488,310,161,474 |
a5d127db3325706d48a26d0be1444bf226e8e592
|
e42a1fd45e1d634d6392a616190414c4ab5c2bbb
|
/tests/test_pitch.py
|
3367c928ae8f3e95a3e762457aae80c763397e60
|
[] |
no_license
|
https://github.com/FrancineU/Flask_Project3
|
ae9a711bd2e00b238911d87e501b39e6ce3fbf63
|
004ba315f660f3be2bc790f2e6c880eb82af4ff3
|
refs/heads/master
| 2023-04-04T07:34:34.524377 | 2021-03-31T19:48:14 | 2021-03-31T19:48:14 | 353,472,017 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from ..models import Pitch
from .. import db
class PitchTest(unittest.TestCase):
'''
A class to test CRUD operation on Pitch Model
'''
def SetUp(self):
self.new_pitch = Pitch(project_name = "Hello World", pitch_description = "blablablabla blaalbal")
def create_pitch_test(self):
db.session.add(self.new_pitch)
db.session.commit()
saved_pitch = Pitch.query.filter_by(project_name = "Hello World").first()
self.assertEqual("Hello World", saved_pitch.project_name)
|
UTF-8
|
Python
| false | false | 550 |
py
| 9 |
test_pitch.py
| 6 | 0.656364 | 0.656364 | 0 | 17 | 30.823529 | 105 |
miguelangelgordillo/ejemplo1
| 11,639,361,412,722 |
f1fb426e54eb07f0da03d106b94d466d8160d066
|
f2266a75409bb1cd5321497015c501c8f54d0461
|
/ejemplo1.py
|
8963578eb338b675ce757d3fcbce58d644b2c848
|
[] |
no_license
|
https://github.com/miguelangelgordillo/ejemplo1
|
46365084fd71d95bf8988ed78219568934e7d6a7
|
e460b5cf6feac7ecc1707eadee5d5f84fc9d55bb
|
refs/heads/master
| 2021-01-10T06:31:02.906098 | 2015-10-02T07:24:03 | 2015-10-02T07:24:03 | 43,540,315 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'miguel'
palabras=["abc","xd","crm","zl","xb"]
listax=[]
listas=[]
for letra in palabras:
if letra[0] =="x":
listax.append(letra)
else:
listas.append(letra)
lista_final= sorted(listax) + sorted(listas)
print lista_final
|
UTF-8
|
Python
| false | false | 258 |
py
| 2 |
ejemplo1.py
| 2 | 0.612403 | 0.608527 | 0 | 13 | 18.923077 | 44 |
tushar-rishav/Apeksha
| 3,444,563,798,800 |
1f2bf16d31016bff13a2d03767282b54042b1bda
|
b62d10a3efbc50db39307afcd1d93f9b447c14dc
|
/app/routes.py
|
2a0c5aa497fdcd68e03ffa6cf51d500c1c7fac9c
|
[] |
no_license
|
https://github.com/tushar-rishav/Apeksha
|
9e64ddb11c361c1ef32c394faa9cbcc06c94f331
|
e4f04fc15026c149e51a22103547f3854703fd91
|
refs/heads/master
| 2021-01-10T03:01:55.372007 | 2017-10-24T17:35:16 | 2017-10-25T19:58:28 | 49,715,267 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from app import App
from flask import render_template, request, flash, session, url_for, redirect
from forms import SigninForm
import datetime as dt
from models import db, Respon
class Date(object):
end_time = dt.datetime(2017, 10, 25, 17, 32, 0, 0)
def __init__(self):
pass
track = Date()
subj_questions = {
"Physics" : [0,30],
"Chemistry": [30,60],
"Mathematics":[60,90]
}
def end():
global track
return dt.datetime.now() > track.end_time
def update_response_str(db_resp, form_resp, subject):
db_resp = list(db_resp)
start,end = subj_questions.get(subject,[0,0])[0], subj_questions.get(subject,[0,0])[1]
form_resp = dict(form_resp)
for i in xrange(start,end):
form_resp[str(i+1)] = form_resp.get(str(i+1),[u'X'])[0]
for i in xrange(0,30):
# try:
# if form_resp[str(start+i+1)] != 'X':
db_resp[i] = form_resp[str(start+i+1)]
# except Exception as e:
# print(e)
return ''.join(db_resp)
@App.route('/')
def home():
return render_template('home.html', title="home", msg=None, en=False)
@App.route('/about')
def about():
return render_template('about.html', title="about")
@App.route('/contact', methods=['GET', 'POST'])
def contact():
pass
@App.route('/next')
@App.route('/next/<subj>')
def next(subj=None):
if 'reg' in session:
import json
global track
track = Date()
#print "END ",end()
if not end():
response = Respon.query.filter_by(reg_id=session['reg']).first()
if not response:
db.session.add(Respon(session['reg'], "X"*30, "X"*30, "X"*30))
db.session.commit()
response = Respon.query.filter_by(reg_id=session['reg']).first()
temp = {
"Physics":response.Physics,
"Chemistry":response.Chemistry,
"Mathematics":response.Mathematics
}
return render_template('next.html', question=range(1, 91), options=['A', 'B', 'C', 'D'],
title=subj if subj else None, start=subj_questions.get(subj,[0,0])[0],
last=subj_questions.get(subj,[0,0])[1], attempts = temp.get(subj,"X"*30) if subj else "X"*30 )
else:
return render_template('home.html', title="Contest ended", msg="Contest ended", en=True)
else:
return redirect(url_for('signin'))
@App.route('/next/<subj>/save',methods=['POST'])
def save(**args):
resp_str = ""
if 'reg' not in session:
return redirect(url_for('signin'))
if request.method == 'POST':
subj = str(request.form['subject'])
response = Respon.query.filter_by(reg_id=session['reg']).first()
if not response:
db.session.add(Respon(session['reg'], "X"*30, "X"*30, "X"*30))
db.session.commit()
response = Respon.query.filter_by(reg_id=session['reg']).first()
if subj == "Physics":
resp_str = str(response.Physics)
resp_str = update_response_str(resp_str, request.form, subj)
response.Physics = resp_str
elif subj == "Chemistry":
resp_str = str(response.Chemistry)
resp_str = update_response_str(resp_str, request.form, subj)
response.Chemistry = resp_str
else:
resp_str = str(response.Mathematics)
resp_str = update_response_str(resp_str, request.form, subj)
response.Mathematics = resp_str
db.session.commit()
return render_template('next.html', question=range(1, 91), options=['A', 'B', 'C', 'D'],
title=subj if subj else None, start=subj_questions.get(subj,[0,0])[0],
last=subj_questions.get(subj,[0,0])[1], attempts = resp_str)
@App.route('/signin', methods=['GET', 'POST'])
def signin():
if end():
return render_template('home.html', title="Contest ended", msg="Contest ended", en=True)
form = SigninForm(request.form)
print "FORM", form.__dict__
print "SESSION", session.__dict__
if 'reg' in session:
return redirect(url_for('next'))
if request.method == 'POST':
if form.validate_form() == False:
return render_template('signin.html', form=form, title="signin")
else:
session['reg'] = form.reg.data
return redirect(url_for('next'))
elif request.method == 'GET':
return render_template('signin.html', form=form, title="signin")
@App.route('/signout')
def signout():
if 'reg' not in session:
return redirect(url_for('signin'))
session.pop('reg', None)
return redirect(url_for('home'))
|
UTF-8
|
Python
| false | false | 4,790 |
py
| 11 |
routes.py
| 4 | 0.564301 | 0.549061 | 0 | 146 | 31.808219 | 130 |
1234567890boo/ywviktor
| 11,613,591,616,095 |
606a81a812a38ea0c6f53e889eb71c56d7c23e9c
|
32aa592fc3b7376b8fb36c0ac2245e6571fb7bdd
|
/maze_game/classes/empty.py
|
d1df86dab7fde1a7c9e6440b3160d50fdebcefd1
|
[] |
no_license
|
https://github.com/1234567890boo/ywviktor
|
00063a1c58b392cb4230791a9cffced6d2864889
|
12b18887243e9b64fb08db4ad440c7144bdf8cbb
|
refs/heads/master
| 2022-05-14T12:43:43.422329 | 2022-04-30T04:24:05 | 2022-04-30T04:24:05 | 57,740,866 | 0 | 0 | null | false | 2020-06-29T00:22:12 | 2016-05-01T18:48:27 | 2020-06-29T00:21:34 | 2020-06-29T00:21:32 | 1,360 | 0 | 0 | 0 |
Python
| false | false |
from classes.gridobj import *
class Empty(GridObj):
def kind(self):
return "Empty"
def draw(self,screen,x,y,width,height):
pass
def canMoveInto(self, obj):
return True
|
UTF-8
|
Python
| false | false | 205 |
py
| 171 |
empty.py
| 130 | 0.62439 | 0.62439 | 0 | 9 | 21.777778 | 43 |
BenaroyaResearch/bripipetools
| 2,946,347,612,714 |
f836ea5c17830864292919fc514a7f2968df47ec
|
efb09be1bacad1be6b12d539433299725c11a21d
|
/tests/test_submission.py
|
9b739ce0f2f25fb0b6744cfe77880b92a98acd04
|
[
"MIT"
] |
permissive
|
https://github.com/BenaroyaResearch/bripipetools
|
92dad1bd39b4c709d9deb7727b91c9e20b9fd0fc
|
5ee23751b0f28a17baaf0428fe18af8c50b341db
|
refs/heads/master
| 2023-02-17T15:34:43.777561 | 2023-02-13T20:11:49 | 2023-02-13T20:11:49 | 48,504,657 | 3 | 0 |
MIT
| false | 2023-02-13T20:11:51 | 2015-12-23T18:13:54 | 2022-03-29T21:24:37 | 2023-02-13T20:11:49 | 91,852 | 1 | 0 | 17 |
HTML
| false | false |
import logging
import os
import re
import datetime
import mock
import mongomock
import pytest
from bripipetools import annotation
from bripipetools import submission
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def mock_db():
# GIVEN a mock_ed version of the TG3 Mongo database
logger.info(("[setup] mock_ database, connect "
"to mock Mongo database"))
yield mongomock.MongoClient().db
logger.debug(("[teardown] mock_ database, disconnect "
"from mock Mongo database"))
@pytest.fixture(scope='function')
def mock_params():
yield [{'tag': 'SampleName', 'type': 'sample', 'name': 'SampleName'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_endpoint'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path4'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path5'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path6'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path7'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path1'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path2'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path3'},
{'tag': 'fastq_in', 'type': 'input', 'name': 'from_path8'},
{'tag': 'annotation_gtf', 'type': 'annotation', 'name': 'gtfFile'},
{'tag': 'annotation_adapters', 'type': 'annotation', 'name': 'adapterFile'},
{'tag': 'annotation_ribosomal_intervals', 'type': 'annotation', 'name': 'ribointsFile'},
{'tag': 'annotation_refflat', 'type': 'annotation', 'name': 'refflatFile'},
{'tag': 'tophat_alignments_bam_out', 'type': 'output', 'name': 'to_endpoint'},
{'tag': 'tophat_alignments_bam_out', 'type': 'output', 'name': 'to_path'}]
class TestBatchParameterizer:
"""
"""
def test_get_lane_order(self, mock_params):
# mock_filename = '161231_P00-00_C00000XX_optimized_workflow_1.txt'
# mock_file = mock_template(mock_filename, tmpdir)
mock_paths = ['lib1111-11111111', 'lib2222-22222222']
parameterizer = submission.BatchParameterizer(
sample_paths=mock_paths,
parameters=mock_params,
endpoint='',
target_dir=''
)
logger.debug("{}".format(parameterizer.parameters))
assert (parameterizer._get_lane_order()
== ['4', '5', '6', '7', '1', '2', '3', '8'])
def test_get_lane_fastq_when_lane_exists(self, mock_params, tmpdir):
mock_path = tmpdir.mkdir('lib1111-11111111')
mock_lane = '2'
mock_fastqpath = mock_path.ensure(
'sample-name_S001_L00{}_R1_001.fastq.gz'.format(mock_lane)
)
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir=''
)
test_fastqpath = parameterizer._get_lane_fastq(str(mock_path), mock_lane)
assert (test_fastqpath == mock_fastqpath)
def test_get_lane_fastq_when_lane_missing(self, mock_params, tmpdir):
mock_path = tmpdir.mkdir('lib1111-11111111')
mock_lane = '2'
mock_path.ensure('sample-name_S001_L001_R1_001.fastq.gz')
mock_fastqpath = mock_path.join('empty_L00{}_R1.fastq.gz'
.format(mock_lane))
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir=''
)
test_fastqpath = parameterizer._get_lane_fastq(str(mock_path), mock_lane)
assert (test_fastqpath == mock_fastqpath)
def test_build_reference_path(self, mock_params):
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir=''
)
mock_param = {'tag': 'annotation_gtf',
'type': 'annotation',
'name': 'gtfFile'}
test_path = parameterizer._build_reference_path(mock_param)
mock_path = 'library::annotation::GRCh38/Homo_sapiens.GRCh38.77.gtf'
assert (test_path == mock_path)
def test_set_option_value(self, mock_params):
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir='',
build='GRCh38',
)
mock_param = {'tag': 'option_tophat',
'type': 'option',
'name': 'index'}
test_value = parameterizer._set_option_value(mock_param)
mock_value = 'GRCh38'
assert (test_value == mock_value)
def test_set_option_value_invalid_build(self, mock_params):
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir='',
build='GRCh38'
)
mock_param = {'tag': 'option_bowtie2',
'type': 'option',
'name': 'index'}
with pytest.raises(KeyError):
parameterizer._set_option_value(mock_param)
def test_set_option_value_not_stranded(self, mock_params):
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir='',
build='GRCh38',
)
mock_param = {'tag': 'option_tophat',
'type': 'option',
'name': 'library_type'}
test_value = parameterizer._set_option_value(mock_param)
mock_value = 'fr-unstranded'
assert (test_value == mock_value)
def test_set_option_value_stranded(self, mock_params):
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir='',
build='GRCh38',
stranded=True
)
mock_param = {'tag': 'option_tophat',
'type': 'option',
'name': 'library_type'}
test_value = parameterizer._set_option_value(mock_param)
mock_value = 'fr-firststrand'
assert (test_value == mock_value)
def test_prep_output_dir(self, mock_params, tmpdir):
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir=str(tmpdir)
)
mock_outtype = 'counts'
mock_path = tmpdir.join(mock_outtype)
test_path = parameterizer._prep_output_dir(mock_outtype)
assert (test_path == mock_path)
assert (os.path.exists(test_path))
assert (os.path.isdir(test_path))
def test_build_output_path(self, mock_params, tmpdir):
mock_targetdir = tmpdir.mkdir('pipeline')
parameterizer = submission.BatchParameterizer(
sample_paths=[],
parameters=mock_params,
endpoint='',
target_dir=str(mock_targetdir)
)
mock_sample = 'lib1111_C00000XX'
mock_param = {'tag': 'tophat_alignments_bam_out',
'type': 'output',
'name': 'to_path'}
test_path = parameterizer._build_output_path(mock_sample, mock_param)
mock_path = os.path.join('/mnt/bioinformatics/pipeline/alignments',
'lib1111_C00000XX_tophat_alignments.bam')
assert (test_path == mock_path)
def test_build_sample_parameters(self, mock_params, tmpdir):
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_path = (tmpdir
.mkdir('bioinformatics')
.mkdir('pipeline')
.mkdir(mock_runid)
.mkdir('lib1111-11111111'))
mock_fastqpath = mock_path.ensure(
'sample-name_S001_L001_R1_001.fastq.gz'
)
mock_fastqpath = re.sub(str(tmpdir), '/mnt', str(mock_fastqpath))
mock_emptypaths = {
lane: re.sub(
str(tmpdir), '/mnt', str(mock_path.join(
'empty_L00{}_R1.fastq.gz'.format(lane))
)
)
for lane in range(2, 9)
}
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
mock_targetdir = (tmpdir.join('pipeline').join(mock_runid))
parameterizer = submission.BatchParameterizer(
sample_paths=[str(mock_path)],
parameters=mock_params,
endpoint=mock_endpoint,
target_dir=str(mock_targetdir)
)
test_values = parameterizer._build_sample_parameters(str(mock_path))
mock_refpaths = [
'library::annotation::GRCh38/Homo_sapiens.GRCh38.77.gtf',
'library::annotation::adapters/smarter_adapter_seqs_3p_5p.fasta',
('library::annotation::GRCh38/Homo_sapiens.GRCh38.77'
'.ribosomalIntervalsWheader_reorder.txt'),
'library::annotation::GRCh38/Homo_sapiens.GRCh38.77.refflat.txt',
]
mock_samplename = 'lib1111_C00000XX'
mock_outpath = os.path.join(
'/mnt/bioinformatics/pipeline/', mock_runid,
'alignments', 'lib1111_C00000XX_tophat_alignments.bam'
)
mock_values = ([mock_samplename,
mock_endpoint, mock_emptypaths[4], mock_emptypaths[5],
mock_emptypaths[6], mock_emptypaths[7],
mock_fastqpath, mock_emptypaths[2], mock_emptypaths[3],
mock_emptypaths[8]]
+ mock_refpaths
+ [mock_endpoint, mock_outpath])
assert (test_values == mock_values)
def test_parameterize(self, mock_params, tmpdir):
tmpdir = tmpdir.mkdir('bioinformatics').mkdir('pipeline')
mock_paths = [str(tmpdir.mkdir('lib1111-11111111')),
str(tmpdir.mkdir('lib2222-22222222'))]
parameterizer = submission.BatchParameterizer(
sample_paths=mock_paths,
parameters=mock_params,
endpoint='',
target_dir=str(tmpdir)
)
parameterizer.parameterize()
test_params = parameterizer.samples
assert (len(test_params) == len(mock_paths))
assert (test_params[0][0]
== {'tag': 'SampleName',
'type': 'sample',
'name': 'SampleName',
'value': 'lib1111'})
#@pytest.fixture(scope='function')
def mock_template(filename, tmpdir):
# GIVEN a simplified workflow batch content with protypical contents
mock_params = ['SampleName',
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_endpoint'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path4'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path5'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path6'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path7'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path1'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path2'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path3'),
('fastq_in##Param::2942::'
'globus_get_data_flowcell_text::from_path8'),
'annotation_gtf##SourceType::SourceName::gtfFile',
'annotation_adapters##SourceType::SourceName::adapterFile',
('annotation_ribosomal_intervals##SourceType::'
'SourceName::ribointsFile'),
'annotation_refflat##SourceType::SourceName::refflatFile',
('tophat_alignments_bam_out##Param::2946::'
'globus_send_data::to_endpoint'),
('tophat_alignments_bam_out##Param::2946::'
'globus_send_data::to_path')]
mock_contents = ['###METADATA\n',
'#############\n',
'Workflow Name\toptimized_workflow_1\n',
'Workflow id\tba1f5a6a3d5eec2c\n',
'Project Name\t<Your_project_name>\n',
'###TABLE DATA\n',
'#############\n',
'{}\t'.format('\t'.join(mock_params))]
mock_file = tmpdir.join(filename)
mock_file.write(''.join(mock_contents))
return str(mock_file)
class TestBatchCreator:
"""
"""
def test_build_batch_name_with_defaults(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
creator = submission.BatchCreator(
paths=[],
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir)
)
test_name = creator._build_batch_name()
mock_date = datetime.date.today().strftime("%y%m%d")
mock_name = '{}__optimized_workflow_1_GRCh38.77_unstrand'.format(
mock_date
)
assert (test_name == mock_name)
def test_build_batch_name_with_tags(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_grouptag = 'C00000XX'
mock_subgrouptags = ['P1-1', 'P99-99']
creator = submission.BatchCreator(
paths=[],
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag=mock_grouptag,
subgroup_tags=mock_subgrouptags
)
test_name = creator._build_batch_name()
mock_date = datetime.date.today().strftime("%y%m%d")
mock_name = '{}_{}_{}_optimized_workflow_1_GRCh38.77_unstrand'.format(
mock_date, mock_grouptag, '_'.join(mock_subgrouptags)
)
assert (test_name == mock_name)
def test_get_sample_paths_for_folders(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
mock_paths.append(str(folderpath))
for s in mock_samples[idx]:
folderpath.mkdir(s)
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
creator._check_input_type()
assert creator.inputs_are_folders
def test_get_sample_paths_for_samples(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
mock_paths.append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
creator._check_input_type()
assert not creator.inputs_are_folders
def test_get_sample_paths_for_empty_samples(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
mock_paths.append(str(samplepath))
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
with pytest.raises(IndexError):
creator._check_input_type()
def test_prep_target_dir_for_folder(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
mock_paths.append(str(folderpath))
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
test_path = creator._prep_target_dir(mock_paths[0])
mock_date = datetime.date.today().strftime("%y%m%d")
mock_path = tmpdir.join(
'Project_P1-1Processed_globus_{}'.format(mock_date)
)
assert (test_path == mock_path)
assert os.path.isdir(str(mock_path))
def test_prep_target_dir_for_samples(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
mock_paths.append(str(samplepath))
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='Mock',
subgroup_tags=''
)
test_path = creator._prep_target_dir()
mock_date = datetime.date.today().strftime("%y%m%d")
mock_path = tmpdir.join(
'Project_MockProcessed_globus_{}'.format(mock_date)
)
assert (test_path == mock_path)
assert os.path.isdir(str(mock_path))
def test_get_sample_paths_with_defaults(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
mock_samplepaths = {}
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
mock_paths.append(str(folderpath))
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
mock_samplepaths.setdefault(idx, []).append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
test_samplepaths = creator._get_sample_paths(mock_paths[0])
assert (test_samplepaths == mock_samplepaths[0])
def test_get_sample_paths_with_num_samples_opt(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
mock_samplepaths = {}
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
mock_paths.append(str(folderpath))
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
mock_samplepaths.setdefault(idx, []).append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags='',
num_samples=1,
)
test_samplepaths = creator._get_sample_paths(mock_paths[0])
assert (test_samplepaths == mock_samplepaths[0][0:1])
def test_get_sample_paths_with_sort_opt(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
mock_samplepaths = {}
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
mock_paths.append(str(folderpath))
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
mock_samplepaths.setdefault(idx, []).append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
filepath = samplepath.ensure(
'sample-name_S001_L002_R1_001.fastq.gz'
)
if s == mock_samples[idx][0]:
filepath.write('mock contents\n')
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags='',
sort=True,
num_samples=1
)
test_samplepaths = creator._get_sample_paths(mock_paths[0])
assert (test_samplepaths == mock_samplepaths[0][1:])
def test_get_input_params_for_folders(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
mock_paths.append(str(folderpath))
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
test_params = creator._get_input_params()
assert (len(test_params) == 4)
assert (test_params[0][0]['value'] == 'lib1111')
def test_get_input_params_for_samples(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
mock_paths.append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint='',
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
test_params = creator._get_input_params()
assert (len(test_params) == 4)
assert (test_params[0][0]['value'] == 'lib1111')
def test_create_batch(self, tmpdir):
mock_filename = 'optimized_workflow_1.txt'
mock_file = mock_template(mock_filename, tmpdir)
mock_folders = ['P1-1-11111111', 'P99-99-99999999']
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_folders):
folderpath = tmpdir.mkdir(f)
mock_paths.append(str(folderpath))
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
creator = submission.BatchCreator(
paths=mock_paths,
workflow_template=mock_file,
endpoint=mock_endpoint,
base_dir=str(tmpdir),
group_tag='',
subgroup_tags=''
)
test_path = creator.create_batch()
with open(test_path) as f:
test_contents = f.readlines()
logger.debug("{}".format(test_contents))
assert (len([l for l in test_contents
if re.search('^lib', l)])
== 4)
class TestFlowcellSubmissionBuilder:
"""
"""
def test_init_annotator(self, mock_db):
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_path = '/mnt/bioinformatics/pipeline/Illumina/{}'.format(mock_runid)
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
builder = submission.FlowcellSubmissionBuilder(
path=mock_path,
endpoint=mock_endpoint,
db=mock_db
)
builder._init_annotator()
assert (type(builder.annotator) == annotation.FlowcellRunAnnotator)
def test_get_workflow_options_for_all_workflows(self, mock_db, tmpdir):
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_path = '/mnt/bioinformatics/pipeline/Illumina/{}'.format(mock_runid)
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
mock_workflowdir = tmpdir.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [str(mock_workflowdir.ensure(w))
for w in mock_workflows]
mock_workflowopts.sort()
builder = submission.FlowcellSubmissionBuilder(
path=mock_path,
endpoint=mock_endpoint,
db=mock_db,
workflow_dir=str(mock_workflowdir)
)
test_workflowopts = builder.get_workflow_options(optimized_only=False)
assert (test_workflowopts == mock_workflowopts)
def test_get_workflow_options_for_optimized_workflows(self, mock_db,
tmpdir):
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_path = '/mnt/bioinformatics/pipeline/Illumina/{}'.format(mock_runid)
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
mock_workflowdir = tmpdir.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [str(mock_workflowdir.ensure(w))
for w in mock_workflows
if re.search('optimized', w)]
mock_workflowopts.sort()
builder = submission.FlowcellSubmissionBuilder(
path=mock_path,
endpoint=mock_endpoint,
db=mock_db,
workflow_dir=str(mock_workflowdir)
)
test_workflowopts = builder.get_workflow_options()
assert (test_workflowopts == mock_workflowopts)
def test_get_projects(self, mock_db, tmpdir):
# GIVEN a flowcell run ID and an arbitrary root directory,
# under which a folder exists at 'bioinformatics/pipeline/Illumina/<run_id>',
# and that folder contains a subfolder named 'Unaligned'
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
# AND the unaligned folder includes multiple project folders
mock_projects = ['P1-1-11111111', 'P99-99-99999999']
mock_path = (tmpdir
.mkdir('bioinformatics')
.mkdir('pipeline')
.mkdir('Illumina')
.mkdir(mock_runid))
mock_unaligndir = mock_path.mkdir('Unaligned')
mock_paths = [mock_unaligndir.mkdir(p) for p in mock_projects]
builder = submission.FlowcellSubmissionBuilder(
path=str(mock_path),
endpoint=mock_endpoint,
db=mock_db
)
builder._get_project_paths()
assert (builder.project_paths == mock_paths)
def test_assign_workflows(self, mock_db, tmpdir):
# GIVEN a flowcell run ID and an arbitrary root directory,
# under which a folder exists at 'bioinformatics/pipeline/Illumina/<run_id>',
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
mock_path = (tmpdir
.mkdir('bioinformatics')
.mkdir('pipeline')
.mkdir('Illumina')
.mkdir(mock_runid))
# AND that folder contains a subfolder named 'Unaligned'
mock_unaligndir = mock_path.mkdir('Unaligned')
# AND the unaligned folder includes multiple project folders
mock_projects = ['P1-1-11111111', 'P99-99-99999999']
mock_paths = [str(mock_unaligndir.mkdir(p)) for p in mock_projects]
# WHEN a flowcell submission is built using the available workflows
mock_workflowdir = mock_path.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [str(mock_workflowdir.ensure(w))
for w in mock_workflows
if re.search('optimized', w)]
mock_workflowopts.sort()
mock_buildopts = ['GRCh38.77', 'NCBIM37.67', 'mm10']
builder = submission.FlowcellSubmissionBuilder(
path=str(mock_path),
endpoint=mock_endpoint,
db=mock_db,
workflow_dir=str(mock_workflowdir)
)
with mock.patch('builtins.input',
side_effect=iter(["0", "0", "0", "", ""])):
builder._assign_workflows()
# THEN the workflow builder will generate workflow batch files
# and store the paths to these files in a batch map
mock_batchkey = (mock_workflowopts[0], mock_buildopts[0], False)
assert (builder.batch_map == {mock_batchkey: [mock_paths[0]]})
def test_get_batch_tags(self, mock_db, tmpdir):
# GIVEN a flowcell run ID and an arbitrary root directory,
# under which a folder exists at 'bioinformatics/pipeline/Illumina/<run_id>',
# and that folder contains a subfolder named 'Unaligned'
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
# AND the unaligned folder includes multiple project folders
mock_projects = ['P1-1-11111111', 'P99-99-99999999']
mock_path = (tmpdir
.mkdir('bioinformatics')
.mkdir('pipeline')
.mkdir('Illumina')
.mkdir(mock_runid))
mock_unaligndir = mock_path.mkdir('Unaligned')
mock_paths = [str(mock_unaligndir.mkdir(p)) for p in mock_projects]
builder = submission.FlowcellSubmissionBuilder(
path=str(mock_path),
endpoint=mock_endpoint,
db=mock_db
)
test_grouptag, test_subgrouptags = builder._get_batch_tags(mock_paths)
assert (test_grouptag == 'C00000XX')
assert (test_subgrouptags == ['P1-1', 'P99-99'])
def test_run(self, mock_db, tmpdir):
# GIVEN a flowcell run ID and an arbitrary root directory,
# under which a folder exists at 'bioinformatics/pipeline/Illumina/<run_id>',
# and that folder contains a subfolder named 'Unaligned'
mock_runid = '161231_INSTID_0001_AC00000XX'
mock_endpoint = 'benaroyaresearch#BRIGridFTP'
mock_path = (tmpdir
.mkdir('bioinformatics')
.mkdir('pipeline')
.mkdir('Illumina')
.mkdir(mock_runid))
mock_workflowdir = mock_path.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [mock_template(w, mock_workflowdir)
for w in mock_workflows
if re.search('optimized', w)]
mock_workflowopts.sort()
mock_buildopts = ['GRCh38.77', 'NCBIM37.67', 'mm10']
# AND the unaligned folder includes multiple project folders
mock_projects = ['P1-1-11111111', 'P99-99-99999999']
mock_unaligndir = mock_path.mkdir('Unaligned')
mock_samples = {0: ['lib1111-11111111', 'lib2222-22222222'],
1: ['lib3333-33333333', 'lib4444-44444444']}
mock_paths = []
for idx, f in enumerate(mock_projects):
folderpath = mock_unaligndir.mkdir(f)
mock_paths.append(str(folderpath))
for s in mock_samples[idx]:
samplepath = folderpath.mkdir(s)
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
builder = submission.FlowcellSubmissionBuilder(
path=str(mock_path),
endpoint=mock_endpoint,
db=mock_db,
workflow_dir=str(mock_workflowdir)
)
builder.batch_map = {
(mock_workflowopts[0], mock_buildopts[0], False): [mock_paths[0]]
}
test_paths = builder.run()
with open(test_paths[0]) as f:
test_contents = f.readlines()
mock_date = datetime.date.today().strftime("%y%m%d")
mock_paths = [os.path.join(
str(mock_path),
'globus_batch_submission',
'{}_C00000XX_P1-1_optimized_workflow1_GRCh38.77_unstrand.txt'.format(
mock_date
)
)]
assert (test_paths == mock_paths)
assert (len([l for l in test_contents
if re.search('^lib', l)])
== 2)
class TestSampleSubmissionBuilder:
"""
"""
def test_read_paths(self, tmpdir):
mock_workflowdir = tmpdir.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [str(mock_workflowdir.ensure(w))
for w in mock_workflows]
mock_workflowopts.sort()
# mock_filename = 'optimized_workflow_1.txt'
# mock_file = mock_template(mock_filename, tmpdir)
mock_samples = ['lib1111-11111111', 'lib2222-22222222']
mock_paths = []
for s in mock_samples:
samplepath = tmpdir.mkdir(s)
mock_paths.append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
mock_manifest = tmpdir.join('manifest.txt')
mock_manifest.write('\n'.join(mock_paths))
builder = submission.SampleSubmissionBuilder(
manifest=str(mock_manifest),
out_dir=str(tmpdir),
endpoint='',
workflow_dir=str(mock_workflowdir)
)
builder._read_paths()
assert (builder.paths == mock_paths)
def test_get_workflow_options_for_all_workflows(self, tmpdir):
mock_workflowdir = tmpdir.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [str(mock_workflowdir.ensure(w))
for w in mock_workflows]
mock_workflowopts.sort()
builder = submission.SampleSubmissionBuilder(
manifest='',
out_dir=str(tmpdir),
endpoint='',
workflow_dir=str(mock_workflowdir)
)
test_workflowopts = builder.get_workflow_options(optimized_only=False)
assert (test_workflowopts == mock_workflowopts)
def test_assign_workflow(self, tmpdir):
mock_workflowdir = tmpdir.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [str(mock_workflowdir.ensure(w))
for w in mock_workflows]
mock_workflowopts.sort()
mock_buildopts = ['GRCh38.77', 'NCBIM37.67', 'mm10']
mock_samples = ['lib1111-11111111', 'lib2222-22222222']
mock_paths = []
for s in mock_samples:
samplepath = tmpdir.mkdir(s)
mock_paths.append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
builder = submission.SampleSubmissionBuilder(
manifest='',
out_dir=str(tmpdir),
endpoint='',
workflow_dir=str(mock_workflowdir)
)
builder.paths = mock_paths
with mock.patch('builtins.input',
side_effect=iter(["0", "0"])):
builder._assign_workflow()
mock_batchkey = (mock_workflowopts[0], mock_buildopts[0])
assert (builder.batch_map == {mock_batchkey: mock_paths})
def test_run(self, tmpdir):
mock_workflowdir = tmpdir.mkdir('galaxy_workflows')
mock_workflows = ['workflow1.txt', 'optimized_workflow1.txt']
mock_workflowopts = [mock_template(w, mock_workflowdir)
for w in mock_workflows
if re.search('optimized', w)]
mock_workflowopts.sort()
mock_buildopts = ['GRCh38.77', 'NCBIM37.67', 'mm10']
mock_samples = ['lib1111-11111111', 'lib2222-22222222']
mock_paths = []
for s in mock_samples:
samplepath = tmpdir.mkdir(s)
mock_paths.append(str(samplepath))
samplepath.ensure('sample-name_S001_L001_R1_001.fastq.gz')
samplepath.ensure('sample-name_S001_L002_R1_001.fastq.gz')
builder = submission.SampleSubmissionBuilder(
manifest='',
out_dir=str(tmpdir),
endpoint='',
workflow_dir=str(mock_workflowdir)
)
mock_batchkey = (mock_workflowopts[0], mock_buildopts[0])
builder.batch_map = {mock_batchkey: mock_paths}
test_batchpaths = builder.run()
with open(test_batchpaths[0]) as f:
test_contents = f.readlines()
logger.debug("{}".format(test_contents))
mock_date = datetime.date.today().strftime("%y%m%d")
mock_batchpaths = [os.path.join(
str(tmpdir),
'{}__optimized_workflow1_GRCh38.77_unstrand.txt'.format(mock_date)
)]
assert (test_batchpaths == mock_batchpaths)
assert (len([l for l in test_contents
if re.search('^lib', l)])
== 2)
|
UTF-8
|
Python
| false | false | 41,188 |
py
| 297 |
test_submission.py
| 93 | 0.558755 | 0.516 | 0 | 1,099 | 36.477707 | 99 |
andreofner/torchsim
| 7,121,055,788,571 |
ebc99ebddc2f2ecec3b46040b9a4d3ec9e621248
|
5c00b0626b4ec2bc428e565c97b4afc355198cc4
|
/torchsim/core/nodes/simple_bouncing_ball_node.py
|
7ed3e3458819917f274d9ce0bb6e09bfd658e672
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
https://github.com/andreofner/torchsim
|
8cff778a324d4f7dc040f11a12d0dc8cd66375b7
|
81d72b82ec96948c26d292d709f18c9c77a17ba4
|
refs/heads/master
| 2021-10-24T05:25:33.740521 | 2019-03-22T10:20:00 | 2019-03-22T10:20:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import copy
import logging
from dataclasses import dataclass, field
from enum import Enum
import numpy as np
import torch
from torchsim.core.graph.node_base import EmptyInputs
from torchsim.core.graph.slot_container import MemoryBlocks
from torchsim.core.graph.unit import Unit
from torchsim.core.graph.worker_node_base import WorkerNodeBase
from torchsim.core.memory.tensor_creator import TensorCreator
from torchsim.core.models.expert_params import ParamsBase
from torchsim.gui.observables import disable_on_runtime
from torchsim.gui.observer_system import ObserverPropertiesItem
from torchsim.gui.validators import *
logger = logging.getLogger(__name__)
class SimpleBall:
"""Simulates the ball physics."""
_sx: int
_sy: int
_ball_radius: int
_pos: np.array
_direction: np.array
def __init__(self, sx: int, sy: int, dir_x=1, dir_y=1, ball_radius=0) -> None:
self._sx = sx
self._sy = sy
self._ball_radius = ball_radius
if self._ball_radius * 2 + 1 > self._sx or self._ball_radius * 2 + 1 > self._sy:
print('error: the ball has to fit in the bitmap (ball_radius * 2 +1 <= sx; sy)')
# in case the ball just fits the width/height, disable movement in the corresponding direction
if self._sx == self._ball_radius * 2 + 1:
dir_x = 0
if self._sy == self._ball_radius * 2 + 1:
dir_y = 0
self._pos = self._random_position()
self._direction = np.array([dir_y, dir_x]) # dimensions are in this order because of rendering
def _random_position(self):
return np.array(
[np.random.randint(0+self._ball_radius, self._sy-self._ball_radius),
np.random.randint(0+self._ball_radius, self._sx-self._ball_radius)])
def next_frame(self):
"""Simulates the ball movement, resolves bounces, direction."""
pos = self._pos + self._direction
self._pos[0], self._direction[0] = self._bounce(self._direction[0], pos[0], self._sy) # y
self._pos[1], self._direction[1] = self._bounce(self._direction[1], pos[1], self._sx) # x
def _bounce(self, direction: int, value: int, dim_size: int) -> [int, int]:
if value < self._ball_radius:
new_direction = -direction
return (self._ball_radius + 1), new_direction
if value >= dim_size - self._ball_radius:
new_direction = -direction
return (dim_size - self._ball_radius - 2), new_direction
return value, direction
def get_pos(self):
return self._pos
class BallShapes(Enum):
DISC = 0
CIRCLE = 1
SQUARE = 2
EMPTY_SQUARE = 3
TRIANGLE = 4
EMPTY_TRIANGLE = 5
def __str__(self):
return self._name_
__repr__ = __str__
class BallRenderer:
"""Renders the ball of a given shape on a given position."""
_ball_radius: int
_ball_shape: BallShapes
def __init__(self, ball_radius: int, ball_shape: BallShapes):
self._ball_radius = ball_radius
self._ball_shape = ball_shape
self.shape_indices = {}
def render_ball_to(self, pos: np.array, bitmap: torch.Tensor):
if self._ball_shape not in self.shape_indices:
if self._ball_shape == BallShapes.CIRCLE:
indices = self._render_circle_ball_to(self._ball_radius)
elif self._ball_shape == BallShapes.DISC:
indices = self._render_disc_ball_to(self._ball_radius)
elif self._ball_shape == BallShapes.SQUARE:
indices = self._render_square_ball_to(self._ball_radius)
elif self._ball_shape == BallShapes.EMPTY_SQUARE:
indices = self._render_empty_square_ball_to(self._ball_radius)
elif self._ball_shape == BallShapes.EMPTY_TRIANGLE:
indices = self._render_empty_triangle_ball_to(self._ball_radius)
elif self._ball_shape == BallShapes.TRIANGLE:
indices = self._render_triangle_ball_to(self._ball_radius)
else:
raise ValueError("Unknown shape.")
self.shape_indices[self._ball_shape] = np.array(list(zip(*indices)))
indices = self.shape_indices[self._ball_shape] + np.expand_dims(pos, 1)
indices = indices[:, (0 < indices[0, :]) * (indices[0, :] < bitmap.shape[0]) *
(0 < indices[1, :]) * (indices[1, :] < bitmap.shape[1])]
bitmap[indices] = 1
@staticmethod
def _render_square_ball_to(radius):
for y in range(-radius, radius + 1):
for x in range(-radius, radius + 1):
yield y, x
@staticmethod
def _render_empty_square_ball_to(radius):
for y in range(-radius, radius + 1):
for x in range(-radius, radius + 1):
if BallRenderer._is_point_on_boundary(np.array([0, 0]), np.array([y, x]), radius):
yield y, x
@staticmethod
def _render_empty_triangle_ball_to(radius):
for y in range(-radius, radius + 1):
for x in range(-radius, radius + 1):
if BallRenderer.is_point_on_triangle(np.array([0, 0]), np.array([y, x]), radius):
yield y, x
@staticmethod
def _render_triangle_ball_to(radius):
for y in range(-radius, radius + 1):
for x in range(-radius, radius + 1):
if BallRenderer.is_point_inside_triangle(np.array([0, 0]), np.array([y, x]), radius):
yield y, x
@staticmethod
def _render_disc_ball_to(radius):
for y in range(-radius, radius + 1):
for x in range(-radius, radius + 1):
if BallRenderer._euclidean_dist(np.array([0, 0]), np.array([y, x])) <= radius+0.3:
yield y, x
@staticmethod
def _render_circle_ball_to(radius):
for y in range(-radius, radius + 1):
for x in range(-radius, radius + 1):
if radius-0.49 \
<= BallRenderer._euclidean_dist(np.array([0, 0]), np.array([y, x])) \
<= radius+0.49:
yield y, x
@staticmethod
def _is_point_on_boundary(a: np.array, b: np.array, radius: int):
dx = abs(a[1] - b[1])
dy = abs(a[0] - b[0])
return dx == radius or dy == radius
@staticmethod
def _euclidean_dist(a: np.array, b: np.array):
dy = a[0] - b[0]
dx = a[1] - b[1]
return np.math.sqrt(dy * dy + dx * dx)
@staticmethod
def is_point_on_triangle(a: np.array, b: np.array, radius):
dy = a[0] - b[0]
dx = a[1] - b[1]
if dy == radius // 2:
return True
if abs(dx) - dy == np.ceil(radius / 2):
return True
@staticmethod
def is_point_inside_triangle(a: np.array, b: np.array, radius):
dy = a[0] - b[0]
dx = a[1] - b[1]
if abs(dx) - dy <= radius // 2 and dy <= radius // 2:
return True
@dataclass
class SimpleBouncingBallNodeParams(ParamsBase):
sx: int = 27
sy: int = 40
dir_x: int = 1
dir_y: int = 1
ball_radius: int = 1
ball_shapes: List[BallShapes] = field(default_factory=list)
noise_amplitude: float = 0.2
switch_next_shape_after: int = 0
random_position_direction_switch_after: int = 0
class SimpleBouncingBallUnit(Unit):
"""A world containing 1-pixel ball which can move in 8 direction and bounces from the walls."""
_params: SimpleBouncingBallNodeParams
_noise_amplitude: float
_bitmap: torch.Tensor
_ball: SimpleBall
_ball_renderer: BallRenderer
def __init__(self, creator: TensorCreator, params: SimpleBouncingBallNodeParams):
super().__init__(creator.device)
self._params = copy.copy(params)
if len(self._params.ball_shapes) == 0:
self._params.ball_shapes = [shape for shape in BallShapes]
self._step_shape_switch_counter = 0
self._step_direction_switch_counter = 0
self._shape_counter = 0
self._creator = creator
self._ball = SimpleBall(self._params.sx,
self._params.sy,
self._params.dir_x,
self._params.dir_y,
self._params.ball_radius)
self._ball_renderer = BallRenderer(ball_radius=self._params.ball_radius,
ball_shape=self._params.ball_shapes[0])
# size_y, size_x, 1 color channel
self._bitmap = self._creator.zeros((self._params.sy, self._params.sx, 1),
dtype=self._float_dtype,
device=self._device)
self._label = self._creator.zeros(len(self._params.ball_shapes),
dtype=self._float_dtype,
device=self._device)
self._label[0] = 1
def step(self):
background = self._creator.zeros_like(self._bitmap)
background = background.uniform_() * self._params.noise_amplitude
self._switch_shape()
self._switch_position_and_direction()
self._ball.next_frame()
self._ball_renderer.render_ball_to(self._ball.get_pos(), background)
self._bitmap.copy_(background)
def _switch_shape(self):
if self._params.switch_next_shape_after > 0:
self._step_shape_switch_counter += 1
if self._step_shape_switch_counter % self._params.switch_next_shape_after == 0:
self._shape_counter += 1
self._ball_shape = self._params.ball_shapes[self._shape_counter % len(self._params.ball_shapes)]
self._ball_renderer = BallRenderer(ball_radius=self._params.ball_radius,
ball_shape=self._ball_shape)
self._label.zero_()
self._label[self._shape_counter % len(self._params.ball_shapes)] = 1
def _switch_position_and_direction(self):
if self._params.random_position_direction_switch_after > 0:
self._step_direction_switch_counter += 1
if self._step_direction_switch_counter % self._params.random_position_direction_switch_after == 0:
rand_dir_x = rand_dir_y = 0
# repeat until we have a nonzero speed in both directions
while rand_dir_x == 0 or rand_dir_y == 0:
rand_dir_x, rand_dir_y = np.random.randint(-2, 3), np.random.randint(-2, 3)
self._ball = SimpleBall(self._params.sx, self._params.sy, rand_dir_x, rand_dir_y, self._params.ball_radius)
class SimpleBouncingBallOutputs(MemoryBlocks):
def __init__(self, owner):
super().__init__(owner)
self.bitmap = self.create("Bitmap")
self.label_one_hot = self.create("Label one hot")
def prepare_slots(self, unit: SimpleBouncingBallUnit):
self.bitmap.tensor = unit._bitmap
self.label_one_hot.tensor = unit._label
class SimpleBouncingBallNode(WorkerNodeBase[EmptyInputs, SimpleBouncingBallOutputs]):
"""Simple world containing one moving object at a time and noise.
Size, direction of movement, size of object, switch period between two shapes
and amplitude of noise can be specified.
"""
outputs: SimpleBouncingBallOutputs
def __init__(self, params: SimpleBouncingBallNodeParams, name="SimpleBouncingBallWorld"):
super().__init__(name=name, outputs=SimpleBouncingBallOutputs(self))
self._params = params.clone()
def _create_unit(self, creator: TensorCreator):
self._creator = creator
return SimpleBouncingBallUnit(creator, self._params)
@property
def ball_shapes(self) ->List[int]:
return [item.value for item in self._params.ball_shapes]
@ball_shapes.setter
def ball_shapes(self, value: List[int]):
parsed_value = [BallShapes(item) for item in value]
if len(parsed_value) == 0:
raise FailedValidationException(f"Value must not be empty")
self._params.ball_shapes = parsed_value
@property
def ball_radius(self) -> int:
return self._params.ball_radius
@ball_radius.setter
def ball_radius(self, value: int):
validate_positive_int(value)
if int(value) * 2 + 1 > self._params.sx or int(value) * 2 + 1 > self._params.sy:
raise FailedValidationException('The ball (of size 2*ball_diameter+1) has to fit inside the [sx, sy] dimensions')
self._params.ball_radius = value
@property
def switch_next_shape_after(self) -> int:
return self._params.switch_next_shape_after
@switch_next_shape_after.setter
def switch_next_shape_after(self, value: int):
validate_positive_int(value)
self._params.switch_next_shape_after = value
@property
def sx(self) -> int:
return self._params.sx
@sx.setter
def sx(self, value: int):
validate_positive_int(value)
if int(value) < self._params.ball_radius * 2 + 1:
raise FailedValidationException("The ball (of size 2*ball_radius+1) has to fit inside [sx, sy] dimensions")
self._params.sx = value
@property
def sy(self) -> int:
return self._params.sy
@sy.setter
def sy(self, value: int):
validate_positive_int(value)
if int(value) < self._params.ball_radius * 2 + 1:
raise FailedValidationException("The ball (of size 2*ball_radius+1) has to fit inside [sx, sy] dimensions")
self._params.sy = value
@property
def noise_amplitude(self) -> float:
return self._params.noise_amplitude
@noise_amplitude.setter
def noise_amplitude(self, value: float):
validate_positive_with_zero_float(value)
self._params.noise_amplitude = value
@property
def dir_x(self) -> int:
return self._params.dir_x
@dir_x.setter
def dir_x(self, value: int):
if value not in [0, 1, 2]:
raise FailedValidationException("Invalid direction, allowed values are [0,1,2] (1 is no movement)")
self._params.dir_x = value
@property
def dir_y(self) -> int:
return self._params.dir_y
@dir_y.setter
def dir_y(self, value: int):
if value not in [0, 1, 2]:
raise FailedValidationException("Invalid direction, allowed values are [0,1,2] (1 is no movement)")
self._params.dir_y = value
@property
def random_position_direction_switch_after(self) -> int:
return self._params.random_position_direction_switch_after
@random_position_direction_switch_after.setter
def random_position_direction_switch_after(self, value: int):
validate_positive_with_zero_int(value)
self._params.random_position_direction_switch_after = value
def get_properties(self) -> List[ObserverPropertiesItem]:
"""Define which properties can be changed from GUI and how."""
return [
self._prop_builder.auto('Random reset', type(self).random_position_direction_switch_after, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Sx', type(self).sx, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Sy', type(self).sy, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Dir_x', type(self).dir_x, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Dir_y', type(self).dir_y, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Ball radius', type(self).ball_radius, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Ball shapes', type(self).ball_shapes, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Noise amplitude', type(self).noise_amplitude, edit_strategy=disable_on_runtime),
self._prop_builder.auto('Switch next shape after', type(self).switch_next_shape_after, edit_strategy=disable_on_runtime)
]
def _step(self):
self._unit.step()
|
UTF-8
|
Python
| false | false | 16,088 |
py
| 632 |
simple_bouncing_ball_node.py
| 592 | 0.598023 | 0.58814 | 0 | 424 | 36.943396 | 137 |
TGITS/programming-workouts
| 3,161,095,934,942 |
175b7a3ea2591ba9fb48c72eb2fb5da2103a54fa
|
9aca898ee664a0188d0e27d33dfe37da9b50bcc9
|
/erri/python/lesson_30/dice.py
|
fd69a49b98105d930e2e524bb265aaf44eb57ed0
|
[
"MIT"
] |
permissive
|
https://github.com/TGITS/programming-workouts
|
2ffc9fe4d1d472063e1af95115198553265581c9
|
1ceeab4fc84cf5502d7f78b35bc421d719fe8484
|
refs/heads/master
| 2023-07-25T18:31:51.028052 | 2023-07-10T12:47:20 | 2023-07-10T12:47:20 | 80,997,078 | 0 | 0 |
MIT
| false | 2023-09-06T20:28:49 | 2017-02-05T14:09:57 | 2021-11-26T11:30:09 | 2023-09-06T20:28:48 | 16,081 | 0 | 1 | 20 |
HTML
| false | false |
from random import randint
class Dice:
def __init__(self, faces):
self.faces = faces
def roll(self):
return randint(1, self.faces)
|
UTF-8
|
Python
| false | false | 159 |
py
| 1,094 |
dice.py
| 450 | 0.603774 | 0.597484 | 0 | 10 | 14.9 | 37 |
QSuHack/Dataqset
| 16,947,940,950,583 |
30ce30ea645141ddc4eeb970f6b66709a2931e35
|
45fd789f2588659f4eb6fecc10f3b6d321823dfe
|
/x64/Release/aes.py
|
0a6c4b1e2970b8501f81346408ce550e10ec9417
|
[
"MIT"
] |
permissive
|
https://github.com/QSuHack/Dataqset
|
4b765d68ebbfba8291bd537b56e86ce393b714fa
|
19be1d14e9293eb28e2d9c7e72cff557531bb4e1
|
refs/heads/master
| 2022-10-29T10:06:20.395420 | 2019-04-23T23:22:57 | 2019-04-23T23:22:57 | 176,785,640 | 2 | 1 |
MIT
| false | 2022-10-25T19:20:55 | 2019-03-20T17:37:19 | 2019-05-15T16:05:00 | 2019-04-23T23:22:57 | 68,580 | 1 | 1 | 1 |
Python
| false | false |
import pyaes
import os
import hashlib
def szyfruj(nazwa_pliku_do_zaszyfrowania, klucz):
file_in = open(nazwa_pliku_do_zaszyfrowania, 'rb')
klucz = hashlib.sha256(klucz).digest()[:32]
file_out = open('tmp.txt', 'wb+')
mode = pyaes.AESModeOfOperationCTR(klucz)
pyaes.encrypt_stream(mode, file_in, file_out)
file_out.close()
file_in.close()
path = os.getcwd()
if os.path.isfile(path + "\\" + nazwa_pliku_do_zaszyfrowania):
os.remove(path + "\\" + nazwa_pliku_do_zaszyfrowania)
os.rename(path + "\\tmp.txt", path + "\\" + nazwa_pliku_do_zaszyfrowania)
def deszyfruj(nazwa_pliku_do_deszyfracji, klucz):
file_in = open(nazwa_pliku_do_deszyfracji, "rb")
file_out = open('tmp.txt', 'wb+')
klucz = hashlib.sha256(klucz).digest()[:32]
mode = pyaes.AESModeOfOperationCTR(klucz)
pyaes.decrypt_stream(mode, file_in, file_out)
file_out.close()
file_in.close()
path = os.getcwd()
if os.path.isfile(path + "\\" + nazwa_pliku_do_deszyfracji):
os.remove(path + "\\" + nazwa_pliku_do_deszyfracji)
os.rename(path + "\\tmp.txt", path + "\\" + nazwa_pliku_do_deszyfracji)
#
# key = b"my_very_long_and_difficult_password"
# szyfruj("dane.txt", key)
# input()
# deszyfruj("dane.txt", key)
|
UTF-8
|
Python
| false | false | 1,274 |
py
| 16 |
aes.py
| 10 | 0.641287 | 0.633438 | 0 | 37 | 33.432432 | 81 |
Clinical-Genomics/cg
| 1,005,022,378,232 |
48fd303e6c7e2952ad347ec1206cb0c27bc75d61
|
88994e2e840a70ec702cee09e1a13813aa6f800c
|
/alembic/versions/fab30255b84f_move_synopsis_to_case.py
|
1116d1666d881a909493a4a3341166ecd4c0a82e
|
[] |
no_license
|
https://github.com/Clinical-Genomics/cg
|
1e9eb0852f742d555a48e8696914ebe177f7d436
|
d2ec6d25b577dd6938bbf92317aeff1d6b3c5b08
|
refs/heads/master
| 2023-09-01T02:04:04.229120 | 2023-08-31T13:50:31 | 2023-08-31T13:50:31 | 82,567,026 | 19 | 8 | null | false | 2023-09-14T15:24:13 | 2017-02-20T14:29:43 | 2023-09-04T08:52:50 | 2023-09-14T15:24:11 | 19,839 | 5 | 2 | 149 |
Python
| false | false |
"""move_synopsis_to_case
Revision ID: fab30255b84f
Revises: 432379a1adfa
Create Date: 2021-02-17 17:43:47.102289
"""
from typing import List
from alembic import op
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# revision identifiers, used by Alembic.
revision = "fab30255b84f"
down_revision = "432379a1adfa"
branch_labels = None
depends_on = None
class Family(Base):
__tablename__ = "family"
id = sa.Column(sa.types.Integer, primary_key=True)
internal_id = sa.Column(sa.types.String(32), unique=True, nullable=False)
name = sa.Column(sa.types.String(128), nullable=False)
_cohorts = sa.Column(sa.types.Text)
_synopsis = sa.Column(sa.types.Text)
@property
def cohorts(self) -> List[str]:
"""Return a list of cohorts."""
return self._cohorts.split(",") if self._cohorts else []
@cohorts.setter
def cohorts(self, cohort_list: List[str]):
self._cohorts = ",".join(cohort_list) if cohort_list else None
@property
def synopsis(self) -> List[str]:
"""Return a list of synopsis."""
return self._synopsis.split(",") if self._synopsis else []
@synopsis.setter
def synopsis(self, synopsis_list: List[str]):
self._synopsis = ",".join(synopsis_list) if synopsis_list else None
class FamilySample(Base):
__tablename__ = "family_sample"
__table_args__ = (sa.UniqueConstraint("family_id", "sample_id", name="_family_sample_uc"),)
id = sa.Column(sa.types.Integer, primary_key=True)
family_id = sa.Column(sa.ForeignKey("family.id", ondelete="CASCADE"), nullable=False)
sample_id = sa.Column(sa.ForeignKey("sample.id", ondelete="CASCADE"), nullable=False)
mother_id = sa.Column(sa.ForeignKey("sample.id"))
father_id = sa.Column(sa.ForeignKey("sample.id"))
family = orm.relationship("Family", backref="links")
sample = orm.relationship("Sample", foreign_keys=[sample_id], backref="links")
mother = orm.relationship("Sample", foreign_keys=[mother_id], backref="mother_links")
father = orm.relationship("Sample", foreign_keys=[father_id], backref="father_links")
class Sample(Base):
__tablename__ = "sample"
_cohorts = sa.Column(sa.types.Text)
id = sa.Column(sa.types.Integer, primary_key=True)
internal_id = sa.Column(sa.types.String(32), nullable=False, unique=True)
name = sa.Column(sa.types.String(128), nullable=False)
_synopsis = sa.Column(sa.types.Text)
@property
def cohorts(self) -> List[str]:
"""Return a list of cohorts."""
return self._cohorts.split(",") if self._cohorts else []
@cohorts.setter
def cohorts(self, cohort_list: List[str]):
self._cohorts = ",".join(cohort_list) if cohort_list else None
@property
def synopsis(self) -> List[str]:
"""Return a list of synopsis."""
return self._synopsis.split(",") if self._synopsis else []
@synopsis.setter
def synopsis(self, synopsis_list: List[str]):
self._synopsis = ",".join(synopsis_list) if synopsis_list else None
def upgrade():
bind = op.get_bind()
session = orm.Session(bind=bind)
op.add_column("family", sa.Column("_synopsis", sa.TEXT))
op.add_column("family", sa.Column("_cohorts", sa.TEXT))
# copy data from sample._synopsis to family._synopsis
for sample in session.query(Sample).filter(Sample._synopsis.isnot(None)):
for link in sample.links:
link.family._synopsis = sample._synopsis
for sample in session.query(Sample).filter(Sample._cohorts.isnot(None)):
for link in sample.links:
link.family._cohorts = sample._cohorts
session.commit()
op.drop_column("sample", "_synopsis")
op.drop_column("sample", "_cohorts")
def downgrade():
bind = op.get_bind()
session = orm.Session(bind=bind)
op.add_column("sample", sa.Column("_synopsis", sa.TEXT))
op.add_column("sample", sa.Column("_cohorts", sa.TEXT))
# copy data from family._synopsis to sample._synopsis
for family in session.query(Family).filter(Family._synopsis.isnot(None)):
for link in family.links:
link.sample._synopsis = family._synopsis
for family in session.query(Family).filter(Family._cohorts.isnot(None)):
for link in family.links:
link.sample._cohorts = family._cohorts
session.commit()
op.drop_column("family", "_synopsis")
op.drop_column("family", "_cohorts")
|
UTF-8
|
Python
| false | false | 4,507 |
py
| 859 |
fab30255b84f_move_synopsis_to_case.py
| 761 | 0.66075 | 0.647881 | 0 | 134 | 32.634328 | 95 |
MRSNOO/nguyennhatquang-fundamental-c4t4
| 12,824,772,380,739 |
bb366866dac6e2a570cb6d5809340ea3c9f9c370
|
2a712fa9047892ccb033e93db0cdd9084b0fc086
|
/Fundamentals/Session01/Session01class/hello_world.py
|
560c9f7a19cd309629e90ded80f73350a652db3d
|
[] |
no_license
|
https://github.com/MRSNOO/nguyennhatquang-fundamental-c4t4
|
f20fbe145b82e16b9aff9bfb70572cd4c003297d
|
feb199d9de1a0edb79f132631584eb8e13464369
|
refs/heads/master
| 2020-03-21T08:00:48.399362 | 2019-03-17T11:55:14 | 2019-03-17T11:55:14 | 138,313,946 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print("Hello")
# name= input("what's your name?")
# print("Hi",name)
|
UTF-8
|
Python
| false | false | 71 |
py
| 200 |
hello_world.py
| 95 | 0.605634 | 0.605634 | 0 | 4 | 16.5 | 34 |
microsoft/knowledge-extraction-recipes-forms
| 1,769,526,565,383 |
5c2cb1e7f169c06cefe773e0313cab7c519871a2
|
ea890e0db720028312e4587477aabe15893e5154
|
/Analysis/Form_Layout_Clustering/src/common.py
|
ecb5f2f153a9021e4f9a47366c4041ce2bf77984
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"Apache-2.0"
] |
permissive
|
https://github.com/microsoft/knowledge-extraction-recipes-forms
|
18a7fcc0f6c13e76985d6e291dbc3b093477fc9d
|
edded8e8076322684336cc3d90f75859987100cc
|
refs/heads/master
| 2023-08-22T13:28:05.257078 | 2023-07-18T08:14:33 | 2023-07-18T08:14:33 | 259,083,417 | 145 | 47 |
MIT
| false | 2023-08-02T02:23:03 | 2020-04-26T16:49:19 | 2023-07-25T00:00:07 | 2023-08-02T02:23:02 | 85,242 | 194 | 45 | 14 |
Jupyter Notebook
| false | false |
from PIL import Image
from PIL.PngImagePlugin import PngImageFile
from PIL.PpmImagePlugin import PpmImageFile
def get_image(image, clr_mode='L'):
"""Wrapper to retrieve and convert (color mode) PIL.Image object.
Args:
image (str or PIL.Image):
Expects PIL.Image object or string path to image file
clr_mode (str, optional):
Color mode: 'RGB' or 'L' for grayscale. Defaults to 'L'.
Returns:
PIL.Image:
Image object
"""
assert(clr_mode in ['L', 'RGB'])
image_type = type(image)
pil_im = None
if image_type is str:
pil_im = Image.open(image).convert(clr_mode)
elif image_type in [PngImageFile, Image.Image, Image, PpmImageFile]:
pil_im = image.convert(clr_mode)
else:
raise ValueError("Unsupported type of input image argument")
return pil_im
def resize_with_aspect_ratio(img, max_size=2800):
"""Helper function to resize image against the longer edge
Args:
img (PIL.Image):
Image object to be resized
max_size (int, optional):
Max size of the longer edge in pixels.
Defaults to 2800.
Returns:
PIL.Image:
Resized image object
"""
w, h = img.size
aspect_ratio = min(max_size/w, max_size/h)
resized_img = img.resize(
(int(w * aspect_ratio), int(h * aspect_ratio))
)
return resized_img
|
UTF-8
|
Python
| false | false | 1,434 |
py
| 325 |
common.py
| 126 | 0.611576 | 0.605997 | 0 | 52 | 26.576923 | 72 |
samyk/TwistedProxy
| 14,010,183,354,797 |
cb1be9509a6731f964c4a94ee1f45fcf3d787e59
|
09d983c80632bd863ddcf3440ca7594d7bdc99a5
|
/TCP/Crypto.py
|
922b9ab00eb4e675438a66875511708f4f4daa41
|
[] |
no_license
|
https://github.com/samyk/TwistedProxy
|
32bd3425e42bc224628993c024b2128ea1929995
|
49b26b918721c0d26f6ec1b67da3ad5b8cad31c5
|
refs/heads/master
| 2020-04-07T06:03:05.287773 | 2018-11-22T06:28:57 | 2018-11-22T06:28:57 | 158,120,574 | 21 | 4 | null | true | 2018-11-18T19:45:59 | 2018-11-18T19:45:59 | 2018-11-10T22:07:07 | 2018-10-20T08:58:34 | 159 | 0 | 0 | 0 | null | false | null |
# -*- coding: utf-8 -*-
import os
from TCP._tweetnacl import (
crypto_box_afternm,
crypto_box_beforenm,
crypto_scalarmult_base,
crypto_box_open_afternm
)
from TCP.Nonce import Nonce
class Crypto:
def __init__(self, server_key):
self.session_key = None
self.server_key = bytes.fromhex(server_key)
self.client_sk = bytes.fromhex('85980ab6075cc197ab8de0faba3c699682b459979365435144482f5ebae82145')
self.client_pk = crypto_scalarmult_base(self.client_sk)
self.nonce = None
self.rnonce = None
self.snonce = None
self.s = None
self.k = None
def encrypt_client_packet(self, packet_id, payload):
if packet_id == 10100:
return payload
elif packet_id == 10101:
payload = self.session_key + bytes(self.snonce) + payload
encrypted = crypto_box_afternm(payload, bytes(self.nonce), self.s)
return self.client_pk + encrypted
elif self.snonce is None:
return payload
else:
return crypto_box_afternm(payload, bytes(self.snonce), self.k)
def decrypt_client_packet(self, packet_id, payload):
if packet_id == 10100:
return payload
elif packet_id == 10101:
if payload[:32] != self.client_pk:
print('[*] It look like frida didn\'t attached properly to your device since client pk don\'t match with the static one !')
os._exit(0)
payload = payload[32:] # skip the pk since we already know it
self.nonce = Nonce(clientKey=self.client_pk, serverKey=self.server_key)
self.s = crypto_box_beforenm(self.server_key, self.client_sk)
decrypted = crypto_box_open_afternm(payload, bytes(self.nonce), self.s)
self.snonce = Nonce(decrypted[24:48])
return decrypted[48:]
elif self.snonce is None:
return payload
else:
self.snonce.increment()
return crypto_box_open_afternm(payload, bytes(self.snonce), self.k)
def encrypt_server_packet(self, packet_id, payload):
if packet_id == 20100 or (packet_id == 20103 and not self.session_key):
return payload
elif packet_id in (20103, 24662):
nonce = Nonce(self.snonce, self.client_pk, self.server_key)
payload = bytes(self.rnonce) + self.k + payload
encrypted = crypto_box_afternm(payload, bytes(nonce), self.s)
return encrypted
else:
return crypto_box_afternm(payload, bytes(self.rnonce), self.k)
def decrypt_server_packet(self, packet_id, payload):
if packet_id == 20100:
self.session_key = payload[-24:]
return payload
elif packet_id == 20103 and not self.session_key:
return payload
elif packet_id in (20103, 24662):
nonce = Nonce(self.snonce, self.client_pk, self.server_key)
decrypted = crypto_box_open_afternm(payload, bytes(nonce), self.s)
self.rnonce = Nonce(decrypted[:24])
self.k = decrypted[24:56]
return decrypted[56:]
else:
self.rnonce.increment()
return crypto_box_open_afternm(payload, bytes(self.rnonce), self.k)
|
UTF-8
|
Python
| false | false | 3,444 |
py
| 22 |
Crypto.py
| 19 | 0.577526 | 0.54065 | 0 | 101 | 33.09901 | 139 |
alshobaki2/shobakiPython
| 4,827,543,247,747 |
19503106ccb405ab0d9f41d6ab8b506dd399fac1
|
2d331d264815aef2690e63193c3213730c806dcf
|
/extractTableNames2018.py
|
c274cf141ac0b983512829331bc4925f5a8d9719
|
[] |
no_license
|
https://github.com/alshobaki2/shobakiPython
|
5f8cf35848fb55af32e03a95e4d390ed62fda152
|
93974bb1e93b7633770cdd3bcc705216f9088dc7
|
refs/heads/master
| 2020-04-16T12:47:27.956052 | 2019-01-14T06:42:16 | 2019-01-14T06:42:16 | 165,596,251 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pyodbc
import pandas
import re
import os
#TableName = 'XXPOS_ADJ_DETAILS'
cnxn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=192.168.40.50,17001;"
"Database=UMDW;"
"UID=ETL_USER;"
"PWD=pass@word1;"
)
cursor = cnxn.cursor()
sql = '''
SELECT OBJECT_NAME(a.object_id) sp_name, (a.definition) sp_definition FROM sys.sql_modules a WHERE
OBJECTPROPERTY(object_id, 'IsProcedure') = 1
'''
data = pandas.read_sql(sql,cnxn)
data = pandas.read_sql(sql,cnxn)
data["sp_definition"] = data["sp_definition"].str.replace('[', '',regex=False)
data["sp_definition"] = data["sp_definition"].str.replace(']', '',regex=False)
data["sp_definition"] = data["sp_definition"].str.replace(')', ') ',regex=False)
data["sp_definition"] = data["sp_definition"].str.upper()
data['index1'] = data.index
df=data["sp_definition"].str.extractall(r'(from|into|update|join|table|APPLY)[\s|(]*(\S*)', re.DOTALL | re.IGNORECASE)
df = df.reset_index()
df
|
UTF-8
|
Python
| false | false | 1,072 |
py
| 12 |
extractTableNames2018.py
| 11 | 0.612873 | 0.593284 | 0 | 32 | 32.53125 | 120 |
hyesungoh/dino_history
| 15,556,371,558,762 |
4e848c42163937b233c3d1b813a9330c3d0374b1
|
95a1f698b1d8b7c2578d5306481f506751b0452e
|
/dino_history/user/migrations/0010_auto_20201017_1724.py
|
1c74232e75dbe42dc211dedbb9e61534c4d8aab0
|
[] |
no_license
|
https://github.com/hyesungoh/dino_history
|
45130bf8aa984282c90fa2b241401d0c038968e6
|
e5f50a8b83ff445c627302af2e6ca893ef8a4af2
|
refs/heads/master
| 2023-01-07T14:43:28.643160 | 2020-10-26T12:48:01 | 2020-10-26T12:48:01 | 284,934,718 | 0 | 0 | null | false | 2020-08-24T12:10:33 | 2020-08-04T09:26:13 | 2020-08-20T18:26:26 | 2020-08-24T12:10:32 | 15,343 | 0 | 0 | 0 |
Python
| false | false |
# Generated by Django 2.2.4 on 2020-10-17 17:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0009_student_exp'),
]
operations = [
migrations.AlterField(
model_name='student',
name='exp',
field=models.IntegerField(default=0),
),
]
|
UTF-8
|
Python
| false | false | 373 |
py
| 39 |
0010_auto_20201017_1724.py
| 22 | 0.576408 | 0.522788 | 0 | 18 | 19.722222 | 49 |
Mirrorkirby/313E-Spring-2014-Grading-Scripts
| 2,396,591,798,249 |
f429b4bfcd08ed349effc3175a538c2babd27982
|
22ea43c168d6bc35848ae1179d3829ee671af35c
|
/assignment_12/assignment_12.py
|
55ffaa86991fd43a0b1ac5a244d1e39a29ac3914
|
[] |
no_license
|
https://github.com/Mirrorkirby/313E-Spring-2014-Grading-Scripts
|
9652fe801aa0d89aff570b66fd6910b50281dbf6
|
89d1fd09f44b2af2662556e28797bfebd39af16a
|
refs/heads/master
| 2021-01-02T22:39:51.278623 | 2014-05-07T17:53:02 | 2014-05-07T17:53:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from os.path import os, exists
from datetime import datetime, timedelta
from functools import *
import math
import subprocess
import sys
import re
import difflib
import time
pipes = {'stdout':subprocess.PIPE, 'stdin':subprocess.PIPE, 'stderr':subprocess.PIPE}
outputFilename = 'assignment_12.txt'
outputFile = open(outputFilename, 'a')
filename = "TestSparseMatrix.py"
dateString = "4-05-2014 23:59:59"
def main():
editor = ''
if input('Want to use Sublime instead of cat? (N for No) ') != 'N':
editor = '~/sublime_text'
elif input('Want to use Vim instead of cat? (N for No) ') != 'N':
editor = 'vim'
out = subprocess.getoutput('ls ./')
CSIDS = out.split("\n")
if len(sys.argv) == 3:
outputFile.write('CSID\tGrade\tComments\n')
lowerBound = sys.argv[1]
upperBound = sys.argv[2] + '~';
myList = []
count = 0
for item in CSIDS:
if lowerBound <= item <= upperBound:
if "." not in item :
myList.append(item)
for csid in myList :
count += 1
os.system('clear')
print('======================')
print(csid + " " + str(count) + " out of " + str(len(myList)))
print('======================')
assign12(csid, True,editor)
#singleton mode
else:
csid = sys.argv[1]
os.system('clear')
print('======================')
print(csid)
print('======================')
assign12(csid, False,editor)
outputFile.close()
def assign12(csid, writeToFile,editor):
fileToGrade = ""
late = 0
grade = 70
style = 30
wrongFileName = False
header = True
comments = []
os.chdir(csid)
if writeToFile: outputFile.write(csid + "\t")
files = os.listdir('.')
#filename checking
for f in files :
splitted = subprocess.getoutput('ls -l ' + f).split()
if f == filename :
fileToGrade = filename
late = isLate(splitted)
break
elif f == filename.lower() :
fileToGrade = filename.lower()
late = isLate(splitted)
wrongFileName = True
break
#really odd filename
if fileToGrade == "" :
print(subprocess.getoutput('ls -l'))
fileToGrade = input("Which file should I grade? ")
if fileToGrade == "" :
if writeToFile:
outputFile.write("0\tno file\n")
os.chdir("..")
return
else :
splitted = subprocess.getoutput('ls -l ' + fileToGrade.replace(' ','\ ')).split()
late = isLate(splitted)
wrongFileName = True
#grading time!
'''
70 + 10
15 - Sparse add()
15 - Sparse mult()
10 - Sparse getRow()
10 - Sparse getCol()
20 - Sparse str()
10 - formatting
10 - including 0's
setElement broken = -10
+10 - Sparse str() extra credit
'''
grade = 70
#Test these squares
formatting = True
if not (fileToGrade == '' and late != -1):
#grab output
output = []
for run in range(1,4):
try:
print("\n=====File %d===== (Ours then Theirs)"%run)
testFile = 'matrix%d.txt'%run
outFile = 'out%d.txt'%run
correctFile = '../correct%d.txt'%run
os.system('cp ../'+ testFile +' matrix.txt')
output.append(subprocess.getoutput('python3 ' + fileToGrade + '> ' + outFile).splitlines())
#modifying output
f = open(outFile,'r')
lines = f.readlines()
f.close()
f = open(outFile,'w')
lines = [x.rstrip() for x in lines]
lines = list(filter(None,lines))
f.write('\n'.join(lines))
f.close()
os.system('diff -yB ' + correctFile +' '+outFile)
print('\n'+'='*35)
except KeyboardInterrupt:
print(' passed on run',run)
off = input('points off: ')
while off != '':
grade -= int(off)
print('Current Grade: %d'%grade)
comment = input('comments? ')
if comment != '':
comments.append(comment + ' (-%s)'%off)
off = input('points off: ')
print()
if grade >= 70:
print("<('.')^ Perfection ^('.')>")
print("Grade: %d/70"%grade)
else:
print("Grade: %d/70"%grade)
#checking for header and style
input("Hit Enter to cat first 20 lines (header)")
print(subprocess.getoutput('head -20 ' + fileToGrade))
headerInput = input("Header(y/n, hit enter for y): ")
if headerInput == 'y' or headerInput == '':
header = True
else:
header = False
input("Hit Enter to cat whole file (style/comments)")
if editor != '':
os.system(editor +' '+ fileToGrade)
else:
print(subprocess.getoutput('cat ' + fileToGrade))
style = input("Style/Other (Out of 30, hit enter for 30): ")
gen_comments = input("General Comments?: ").strip()
gen_comments = gen_comments if len(gen_comments) is not 0 else "style"
if not style.isdigit():
style = 30
else :
style = int(style)
if (gen_comments != "style" or style != 30):
gen_comments += " (%+d)" % (style - 30)
comments.append("%s" % gen_comments)
#writing grade time!
if late == -1:
if writeToFile: outputFile.write('0\t More than 7 days late')
print('Late more than 7 days!')
else :
if late == 3:
comments.append("3-7 days late (-30)")
grade -= 30
elif late == 2:
comments.append("2 days late (-20)")
grade -= 20
elif late == 1:
comments.append("1 day late (-10)")
grade -= 10
if wrongFileName or not header:
grade -= 5
if wrongFileName and header:
comments.append("wrong filename (-5)")
elif header and not wrongFileName:
comments.append("malformed header (-5)")
else:
comments.append("wrong filename and malformed header (-5)")
if writeToFile: outputFile.write(str(grade+style) + "\t" + ', '.join(comments))
if writeToFile: outputFile.write('\n')
os.chdir("..")
#returns the number of days late an assignment is
def isLate(splitted):
dueDate = datetime.strptime(dateString,"%m-%d-%Y %H:%M:%S")
lateOne = dueDate + timedelta(days=1)
lateTwo = lateOne + timedelta(days=1)
lateSev = dueDate + timedelta(days=7)
turninDate = datetime.strptime(splitted[5] + " " +(("0" + splitted[6]) if len(splitted[6]) == 1 else splitted[6])+ " " + splitted[7] +" 2014", "%b %d %H:%M %Y")
if turninDate <= dueDate:
return 0
elif turninDate <= lateOne:
return 1
elif turninDate <= lateTwo:
return 2
elif turninDate <= lateSev:
return 3
else :
return -1
main()
|
UTF-8
|
Python
| false | false | 6,386 |
py
| 35 |
assignment_12.py
| 17 | 0.581741 | 0.561228 | 0 | 228 | 27.008772 | 162 |
verilyw/SR_Framework
| 10,350,871,220,653 |
b057117b2119902e9738270aee075475718fada9
|
f3a84ece8b0e95afb913cbb3b6312bb18677978a
|
/sr_framework/article_helper/article_utils.py
|
212323d5e39f2fa1db94c6b438334bf2e1c26587
|
[] |
no_license
|
https://github.com/verilyw/SR_Framework
|
c7350b26554bdda0e63da96db4bb08f86b35f2c2
|
46e1c256e79c4865ef47321a18a474736f625c2e
|
refs/heads/master
| 2023-06-21T17:42:13.533958 | 2021-08-06T14:00:01 | 2021-08-06T14:00:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import cv2
import imageio
import os
import os.path as osp
import sys
sys.path.append('../')
from utils import calc_metrics
from .show import *
from tqdm import tqdm, trange
def relation():
t1 = [1, 2, 3, 4]
t2 = [2, 3, 4, 5]
t3 = [3, 4, 5, 6]
c4 = [4, 5, 6, 7]
show_relation(t1, t2, t3, c4, save_path='relation.png')
def feature_map(tensor):
numpy_array = tensor.squeeze().cpu().numpy()
arr = np.mean(numpy_array, axis=0)
show_feature_map(arr, save_path='feature_map.png')
# calculate 1-D spectral densities
def Frequency_analysis(image):
# Fourier Transform
fft_res = np.abs(np.fft.fft2(image))
# Center
fft_res = np.fft.fftshift(fft_res)
h, w = fft_res.shape
fft_res = np.pad(fft_res, pad_width=((0, (h+1)%2), (0, (w+1)%2)), mode='constant', constant_values=0)
h, w = fft_res.shape
if h > w:
pad = h - w
fft_res = np.pad(fft_res, pad_width=((0, 0), (pad//2, pad//2)), mode='constant', constant_values=0)
elif w > h:
pad = w - h
fft_res = np.pad(fft_res, pad_width=((pad//2, pad//2), (0, 0)), mode='constant', constant_values=0)
h, w = fft_res.shape
if h!= w:
raise ValueError('')
max_range = h // 2
cy, cx = h//2, w//2
x, y = [], []
for r in range(max_range):
x.append(r)
f = 0.0
if r == 0:
f = fft_res[cy, cx]
else:
f += sum(fft_res[cy-r, cx-r:cx+r])
f += sum(fft_res[cy-r:cy+r, cx+r])
f += sum(fft_res[cy+r, cx+r:cx-r:-1])
f += sum(fft_res[cy+r:cy-r:-1, cx-r])
y.append(np.log(1+f))
# Normalize frequency to [0, 1]
max_freq = np.max(x)
x = x / max_freq
show_feature_map(x, y, save_path='freq.png')
def get_direction(x, y, w, h):
left, right, top, bottom = 0, 0, 0, 0
half_w, half_h = w // 2, h // 2
if x < half_w:
if x >= 256:
left = x - 256
right = x + 256
else:
left = 0
right = 512
else:
if (w-x-1) >= 256:
right = x + 256
left = x - 256
else:
right = w - 1
left = w - 513
if y < half_h:
if y >= 256:
top = y - 256
bottom = y + 256
else:
top = 0
bottom = 512
else:
if (h-y-1) >= 256:
bottom = y+256
top = y-256
else:
bottom = h - 1
top = h - 513
return left, right, top, bottom
def generate_best(dir_list, border=4):
img_list_dict = dict()
# get sorted img names in corresponding model
for dir_name in dir_list:
img_list_dict[dir_name] = sorted(os.listdir(dir_name))
length = len(dir_list)
for i in range(0, 100): # for every Urban img
# load hr img
hr_basename = img_list_dict[dir_list[0]][i]
hr_path = osp.join(dir_list[0], hr_basename)
hr_img = imageio.imread(hr_path, pilmode='RGB')
h, w = hr_img.shape[:-1]
h_step, w_step = h // 20, w // 20
img_psnrs, img_ssims = [], []
# get metrics of different models for this img
for k in range(length-1):
basename = img_list_dict[dir_list[k+1]][i]
path = osp.join(dir_list[k+1], basename)
img = imageio.imread(path, pilmode='RGB')
if dir_list[k+1] == 'IDN':
img = cv2.copyMakeBorder(img, 4, 4, 4, 4, cv2.BORDER_REPLICATE)
psnr, ssim = calc_metrics(hr_img, img, crop_border=border, test_Y=True)
img_psnrs.append(psnr)
img_ssims.append(ssim)
str_img_psnrs = ['{:.2f}'.format(x) for x in img_psnrs]
print('full img[{:03d}] | {}'.format((i+1), str_img_psnrs))
# whether best is ours
if np.argmax(np.array(img_psnrs)) < length-2 or np.argmax(np.array(img_ssims)) < length-2:
continue
# fixed stride for different location, get 64*64*3 patch
for y in range(0, h-64, h_step):
for x in range(0, w-64, w_step):
imgs, psnrs, ssims = [], [], []
# plot rectangle on hr img
hr_img1 = hr_img.copy()
cv2.rectangle(hr_img1, (x, y), (x+63, y+63), (255, 0, 0), 2)
left, right, top, bottom = get_direction(x+32, y+32, w, h)
imgs.append(hr_img1[top:bottom+1, left:right+1, :]) # 513 * 513 * 3
hr_patch = hr_img[y:y+64, x:x+64, :]
imgs.append(hr_patch)
# for different model, get corresponding patch
for k in range(length-1):
basename = img_list_dict[dir_list[k+1]][i]
path = osp.join(dir_list[k+1], basename)
img = imageio.imread(path, pilmode='RGB')
if dir_list[k+1] == 'IDN':
img = cv2.copyMakeBorder(img, 4, 4, 4, 4, cv2.BORDER_REPLICATE)
img_patch = img[y:y+64, x:x+64, :]
imgs.append(img_patch)
# calculate psnr and ssim
psnr, ssim = calc_metrics(hr_patch, img_patch)
psnrs.append(psnr)
ssims.append(ssim)
str_psnrs = ['{:.2f}'.format(psnr) for psnr in psnrs]
print('[{:03d}] | ({}/{}, {}/{}) | {}'.format((i+1), y, h, x, w, str_psnrs))
if np.argmax(np.array(psnrs)) == length-2 and np.argmax(np.array(ssims)) == length-2:
print('Saving...')
plot_compare(imgs, img_psnrs, img_ssims, i+1, '{}_{}'.format(y, x), dir_list)
if __name__ == '__main__':
dir_list = ['HR', 'BICUBIC', 'FSRCNN', 'VDSR', 'DRRN', 'LapSRN', 'IDN', 'CARN', 'IMDN', 'XXX']
scale = 4
generate_best(dir_list, border=scale)
|
UTF-8
|
Python
| false | false | 5,919 |
py
| 44 |
article_utils.py
| 34 | 0.488596 | 0.453455 | 0 | 181 | 31.701657 | 107 |
melissaarliss/python-course
| 16,509,854,301,062 |
52ed31548e782714afcb5b6615138f3c4239be16
|
258c6738568d0e64c03ab100d7a70066ac29b817
|
/hw/hw-5/problem2.py
|
9e290d0b8bf6a425bc78699faf770a725c28cead
|
[] |
no_license
|
https://github.com/melissaarliss/python-course
|
61f078296364d2efbf8d6dd6934cc139af6cea67
|
265bc290bf404cc2af5e93193d9048dd98b300d2
|
refs/heads/master
| 2020-08-01T19:34:19.075288 | 2019-11-25T17:34:54 | 2019-11-25T17:34:54 | 211,093,530 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
employees = [
{
"name": "Ron Swanson",
"age": 55,
"department": "Management",
"phone": "555-1234",
"salary": "100,000"
},
{
"name": "Leslie Knope",
"age": 45,
"department": "Middle Management",
"phone": "555-4321",
"salary": "90,000"
},
{
"name": "Andy Dwyer",
"age": 30,
"department": "Shoe Shining",
"phone": "555-1122",
"salary": "50,000"
},
{
"name": "April Ludgate",
"age": 25,
"department": "Administration",
"phone": "555-3345",
"salary": "60,000"
}
]
for i in range(len(employees)):
name = employees[i]["name"]
department = employees[i]["department"]
phone = employees[i]["phone"]
print(f"{name} in {department} can be reached at {phone}.")
|
UTF-8
|
Python
| false | false | 701 |
py
| 59 |
problem2.py
| 48 | 0.574893 | 0.493581 | 0 | 36 | 18.388889 | 60 |
Melody15/PAT
| 6,330,781,819,244 |
04dca24486d3cbd2476c0d4ab013f0f19308578c
|
201595579b6f72391c52d762e0f9ec25200c8681
|
/PAT (Basic-Level)/Python/1032.py
|
ab7271e7bbb400f8dd8a6f44d13062d3cb97d06e
|
[] |
no_license
|
https://github.com/Melody15/PAT
|
d794bc928c175d3c5fe00c700d9085d4bc0e8cb6
|
2e5283c9e2dc5046287b4d91e12c37402de14c02
|
refs/heads/master
| 2021-01-22T02:58:02.201256 | 2017-12-23T14:22:36 | 2017-12-23T14:22:36 | 102,257,767 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
N = int(input())
infoData = {}
for i in range(N):
tempData = input().split()
if tempData[0] in infoData:
infoData[tempData[0]] += int(tempData[1])
else:
infoData[tempData[0]] = int(tempData[1])
result = sorted(infoData.items(), key = lambda item:item[1], reverse = True)
print(result[0][0], result[0][1])
#testpoint 3 overtime
|
UTF-8
|
Python
| false | false | 404 |
py
| 143 |
1032.py
| 56 | 0.608911 | 0.576733 | 0 | 18 | 21.444444 | 77 |
pd1714git/masterThesis
| 7,756,710,978,151 |
7a0e8508544aa7e0f12f8c0da09359a2bbe9ab25
|
badc7317d7257b6885c78442e13ef1d336fefefb
|
/cds-eda/cds_eda/__init__.py
|
2546f0d0e69c3332fd9e87da16a5476435a281f5
|
[] |
no_license
|
https://github.com/pd1714git/masterThesis
|
71beb929cecff39013dd2318371a86d1c4d9b9b1
|
1a1714bd31ffd7f217e6836a06f306cfc5c11efc
|
refs/heads/main
| 2023-03-08T13:49:23.297557 | 2021-02-24T09:08:40 | 2021-02-24T09:08:40 | 340,701,280 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__version__ = '0.1.0'
from .cleaning import cleaning
from .eda import eda
|
UTF-8
|
Python
| false | false | 73 |
py
| 8 |
__init__.py
| 3 | 0.712329 | 0.671233 | 0 | 3 | 23.666667 | 30 |
metamarcdw/pycoinnet2
| 9,423,158,292,361 |
8bd2fa38961fd48f1e382750a1820537b96b424f
|
88f0dd8c471151d1c4752a46f7dc8c1e8cdc6d49
|
/pycoinnet/helpers/networks.py
|
9c5dfb815aa5ff49f7aba3f6696f447d7243becd
|
[
"MIT"
] |
permissive
|
https://github.com/metamarcdw/pycoinnet2
|
0222b6c55ac18af712c5083197133b3abb62200f
|
4f03ae92a3a224f16532d304490f9d5aaf97c79b
|
refs/heads/master
| 2017-04-04T10:55:34.963061 | 2015-06-22T01:24:40 | 2015-06-22T01:24:40 | 37,493,086 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import
import binascii
MAINNET = dict(
MAGIC_HEADER = binascii.unhexlify(u'F9BEB4D9'),
DNS_BOOTSTRAP = [
u"seed.bitcoin.sipa.be", u"dnsseed.bitcoin.dashjr.org"
u"bitseed.xf2.org", u"dnsseed.bluematt.me",
],
DEFAULT_PORT = 8333,
)
TESTNET = dict(
MAGIC_HEADER = binascii.unhexlify(u'0B110907'),
DNS_BOOTSTRAP = [
u"bitcoin.petertodd.org", u"testnet-seed.bitcoin.petertodd.org",
u"bluematt.me", u"testnet-seed.bluematt.me"
],
DEFAULT_PORT = 18333,
)
|
UTF-8
|
Python
| false | false | 569 |
py
| 9 |
networks.py
| 8 | 0.646749 | 0.611599 | 0 | 22 | 24.863636 | 72 |
alexa984/python101
| 5,153,960,776,855 |
9c6309108698e807c607fbc7247d0d53c07ea85d
|
ac215179b41d28be712159241b101c6a28e62d59
|
/week2/birthday_ranges.py
|
f282099b46c946e28eb9a52de638d4aad6d9566a
|
[] |
no_license
|
https://github.com/alexa984/python101
|
13ea1644342cc12d885a2bae6d326b5198cb2bba
|
fb5a48c29f75f7cb96966073f18fb3190aa0b929
|
refs/heads/master
| 2020-05-04T00:12:48.452752 | 2019-06-05T13:01:31 | 2019-06-05T13:01:31 | 178,879,767 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def birthday_ranges(birthdays, ranges):
result = []
for my_range in ranges:
result.append(len(list(x for x in birthdays if my_range[0] <= x <= my_range[1])))
return result
|
UTF-8
|
Python
| false | false | 192 |
py
| 128 |
birthday_ranges.py
| 120 | 0.635417 | 0.625 | 0 | 5 | 37.4 | 89 |
tapomay/libgenetic
| 8,985,071,608,567 |
fbeee46d6563e9bf05bec0c494daa2e256a30e9c
|
19001f24a510c1ceedd71ebad0e55b49fcfe635c
|
/src_python/bio_ga/randomforest/rf.py
|
262a250f5832726822d63f4ccb12d9f626b7b640
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/tapomay/libgenetic
|
5cd83d3acaed6137bef68a36cf16509ea84c572b
|
4045d1ac056251d95c101d34c9c7549d151a3e7e
|
refs/heads/master
| 2020-05-26T01:56:29.460387 | 2017-11-12T20:43:37 | 2017-11-12T20:43:37 | 84,984,244 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
|
UTF-8
|
Python
| false | false | 1,067 |
py
| 40 |
rf.py
| 30 | 0.73477 | 0.693533 | 0 | 31 | 33.451613 | 75 |
mberkdemir/macro-software
| 8,684,423,885,254 |
fe4a8470e443f7fd508e36b6bad5805324166c3a
|
a8519d8742a73828505560ba158890ccc95acdbb
|
/main.py
|
f0320e56057612d7d1685666591a6e0b61e709a3
|
[] |
no_license
|
https://github.com/mberkdemir/macro-software
|
b8317620211ed3b31fcef30c98d24ef2207746c7
|
5f258655d0e8dd726000225ac841bbc93d842bc0
|
refs/heads/master
| 2023-03-03T20:48:18.254393 | 2021-02-08T12:23:25 | 2021-02-08T12:23:25 | 337,040,625 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QInputDialog, QMessageBox
import sys
from ControllerProgram import Ui_Main
from pycode import Macro
from threading import Thread
class App(QtWidgets.QWidget):
def __init__(self):
super(App, self).__init__()
self.ui = Ui_Main()
self.ui.setupUi(self)
self.ui.btn_AddMouseEvent.clicked.connect(self.add_mouse_event)
self.ui.btn_save.clicked.connect(self.save_macro)
self.ui.btn_delete.clicked.connect(self.clear_macro)
self.ui.btn_preview_macro.clicked.connect(self.preview_macro)
self.ui.slider_power.valueChanged.connect(self.power_changed)
self.ui.listWidget_your_macros.itemSelectionChanged.connect(self.item_activated)
self.macro = Macro()
self.temp_pattern = []
self.t1 = Thread(target=lambda: self.macro.mainloop())
self.startup()
def add_mouse_event(self):
temp = self.ui.spinBox_x.text() + ',' + self.ui.spinBox_y.text()
temp_delay = self.ui.doubleSpinBox_delay_mouse.text()
self.add_temp_pattern([self.ui.spinBox_x.value(),
self.ui.spinBox_y.value(),
self.ui.doubleSpinBox_delay_mouse.value()])
self.ui.listWidget.addItem("➳ mouse event " + temp + "\n● delay " + temp_delay)
self.ui.spinBox_x.setValue(0)
self.ui.spinBox_y.setValue(0)
self.ui.doubleSpinBox_delay_mouse.setValue(0)
def set_temp_pattern(self, value):
self.temp_pattern = value
def add_temp_pattern(self, _pattern):
self.temp_pattern.append(_pattern)
def get_temp_pattern(self):
return self.temp_pattern
def preview_macro(self):
self.macro.set_preview_macro(self.get_temp_pattern())
def save_macro(self):
text, ok = QInputDialog.getText(self, 'Save Macro - Bisquit', 'Give a name to your macro.')
if ok and text:
if self.get_temp_pattern() is not []:
self.macro.create_macro_pattern(text, self.get_temp_pattern())
self.ui.listWidget_your_macros.clear()
self.list_macros()
self.clear_macro()
else:
msg = QMessageBox()
msg.setWindowTitle('Error - Bisquit')
msg.setText('There is no macro to be saved.')
msg.setStandardButtons(QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ignore)
msg.exec_()
else:
msg = QMessageBox()
msg.setWindowTitle('Error - Bisquit')
msg.setText('Please give a name to your macro.')
msg.setStandardButtons(QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ignore)
msg.exec_()
def clear_macro(self):
self.ui.listWidget.clear()
self.set_temp_pattern([])
def power_changed(self):
self.macro.set_macro_power(self.ui.slider_power.value())
def startup(self):
self.t1.start()
self.t1.join(1)
self.list_macros()
def list_macros(self):
try:
temp = self.macro.get_macro_patterns()
for name in temp["macros"]:
self.ui.listWidget_your_macros.addItem(name.get("name"))
except:
self.list_macros()
def item_activated(self):
self.macro.set_macro_index(self.ui.listWidget_your_macros.currentRow())
def closeEvent(self, event):
self.macro.set_program_working(False)
del self.t1
def main():
app = QtWidgets.QApplication(sys.argv)
app.setStyle('Fusion')
win = App()
win.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 3,739 |
py
| 6 |
main.py
| 3 | 0.6 | 0.597323 | 0 | 113 | 32.053097 | 99 |
arkpku/boolean_formulas
| 13,864,154,459,060 |
2f5a78cd996bcbd3bb1c55a76a058ee492e5d3e9
|
30c666ea8f75a2d02f701403e9eff3f8a95b5349
|
/boolean_formula.py
|
051b92e4b8951cf3d29c6d552637f3958996fcbf
|
[] |
no_license
|
https://github.com/arkpku/boolean_formulas
|
16905ea8e0d87dc23a3ae2d69d50dd144e81c16b
|
7aa73a2e0cc2f8d636caea930a918558310b8a9e
|
refs/heads/master
| 2020-12-10T03:26:34.811225 | 2017-07-01T14:38:47 | 2017-07-01T14:38:47 | 95,573,741 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
# generate sequences representing trees. O = operator; P = propositional letters
def enumerate_sequence(n_proposition, initial_seg):
nO = initial_seg.count('O')
nP = initial_seg.count('P')
if nO >= n_proposition or nP >= n_proposition + 1:
return []
if nP == nO + 1:
if nP == n_proposition:
return [initial_seg]
else:
return []
else:
return enumerate_sequence(n_proposition, initial_seg + 'P') + enumerate_sequence(n_proposition, initial_seg + 'O')
def generate_random_sequence(n_propositions):
if n_propositions == 1:
return 'P'
seq = 'O'
while(len(seq) < 2*n_propositions - 2):
if seq.count('P') < seq.count('O') and seq.count('O') < n_propositions - 1:
seq += random.choice(['P','O'])
elif seq.count('P') == seq.count('O'):
seq += 'O'
elif seq.count('O') == n_propositions - 1:
seq += 'P'
return seq + 'P'
# get a sub tree sequence from a legal sequence as is generated above
def get_subtree_seq(sequence):
length = 0
while sequence[:length].count('P') != sequence[:length].count('O') + 1:
length += 1
# if length > len(sequence) + 1:
# return -1,-1
return sequence[:length], length
def build_tree(sequence):
root = {}
root['operator'] = sequence[0]
root['children'] = []
if sequence[0] == 'O':
lseq, llength = get_subtree_seq(sequence[1:])
rseq, rlength = get_subtree_seq(sequence[1+llength:])
lsubtree = build_tree(lseq)
lsubtree['parent'] = root
root['children'].append(lsubtree)
rsubtree = build_tree(rseq)
rsubtree['parent'] = root
root['children'].append(rsubtree)
return root
def random_specify_tree(root, list_of_operators = ['and', 'or', 'imply', 'equiv','Nand','Nor', 'Nimply', 'Nequiv']):
if root['operator'] == 'P':
root['operator'] = random.choice([True,False])
else:
root['operator'] = random.choice(list_of_operators)
for child in root['children']:
random_specify_tree(child, list_of_operators)
return root
def evaluate_tree(root):
if root['operator'] in [True,False]:
return root['operator']
elif root['operator'] == 'and':
return evaluate_tree(root['children'][0]) and evaluate_tree(root['children'][1])
elif root['operator'] == 'or':
return evaluate_tree(root['children'][0]) or evaluate_tree(root['children'][1])
elif root['operator'] == 'imply':
return (not evaluate_tree(root['children'][0])) or evaluate_tree(root['children'][1])
elif root['operator'] == 'equiv':
return evaluate_tree(root['children'][0]) == evaluate_tree(root['children'][1])
elif root['operator'] == 'Nand':
return not(evaluate_tree(root['children'][0]) and evaluate_tree(root['children'][1]))
elif root['operator'] == 'Nor':
return not(evaluate_tree(root['children'][0]) or evaluate_tree(root['children'][1]))
elif root['operator'] == 'Nimply':
return not((not evaluate_tree(root['children'][0])) or evaluate_tree(root['children'][1]))
elif root['operator'] == 'Nequiv':
return not(evaluate_tree(root['children'][0]) == evaluate_tree(root['children'][1]))
elif root['operator'] == 'not':
return not evaluate_tree(root['children'][0])
# expand the negations
def tree_trim(root, list_of_neg_operators = ['Nand','Nor','Nimply','Nequiv']):
if root['operator'] in [True, False]:
return
if root['operator'] in list_of_neg_operators:
if root['operator'] == 'Nand':
n_root = root.copy()
n_root['operator'] = 'and'
n_root['parent'] = root
root['operator'] = 'not'
root['children'] = [n_root]
elif root['operator'] == 'Nor':
n_root = root.copy()
n_root['operator'] = 'or'
n_root['parent'] = root
root['operator'] = 'not'
root['children'] = [n_root]
elif root['operator'] == 'Nimply':
n_root = root.copy()
n_root['operator'] = 'imply'
n_root['parent'] = root
root['operator'] = 'not'
root['children'] = [n_root]
elif root['operator'] == 'Nequiv':
n_root = root.copy()
n_root['operator'] = 'equiv'
n_root['parent'] = root
root['operator'] = 'not'
root['children'] = [n_root]
for child in root['children']:
tree_trim(child)
# returns a list
def tree_to_pre_order(root):
if root['operator'] in [True, False]:
if root ['operator']:
return ['True']
else:
return['False']
else:
r = [root['operator']]
for child in root['children']:
r += tree_to_pre_order(child)
return r
def tree_to_post_order(root):
if root['operator'] in [True, False]:
if root ['operator']:
return ['True']
else:
return['False']
else:
r = []
for child in root['children']:
r += tree_to_post_order(child)
return r + [root['operator']]
def tree_to_normal_order(root):
if root['operator'] in [True, False]:
if root ['operator']:
return ['True']
else:
return['False']
else:
if root['operator'] == 'not':
return ['not'] + tree_to_normal_order(root['children'][0])
else:
return ['('] + tree_to_normal_order(root['children'][0]) + [root['operator']] + tree_to_normal_order(root['children'][1]) + [')']
# 'True' should be a boolean value not a string
def pre_order_to_tree(sequence, index, list_of_operators = ['and', 'or', 'imply', 'equiv','Nand','Nor', 'Nimply', 'Nequiv']):
root = {}
root['operator'] = sequence[index]
root['children'] = []
if sequence[index] in [True,False, 'True', 'False']:
return root, index + 1
else:
lt, il = pre_order_to_tree(sequence, index + 1)
lt['parent'] = root
root['children'].append(lt)
rt, ir = pre_order_to_tree(sequence, il)
rt['paremt'] = root
root['children'].append(rt)
return root, ir
def depth_of_tree(root):
if len(root['children']) == 0:
return 1
else:
return 1 + max(list(map(lambda x: depth_of_tree(x),root['children'])))
|
UTF-8
|
Python
| false | false | 6,444 |
py
| 3 |
boolean_formula.py
| 3 | 0.552917 | 0.5464 | 0 | 174 | 36.04023 | 141 |
Mahelita/braindevel
| 17,910,013,649,699 |
9dc2fe41749591f6c9e0e83ba9f3a5ee158ccdb9
|
9481772b5eefeaae71d8d2d668dda611735ab5a7
|
/braindecode/veganlasagne/update_modifiers.py
|
4100f7b649c76c206079d3c681871b1014215d3d
|
[] |
no_license
|
https://github.com/Mahelita/braindevel
|
f8cd77803e0860764dee5822dd00fc8a2c8c3a6c
|
21f58aa74fdd2a3b03830c950b7ab14d44979045
|
refs/heads/master
| 2020-03-23T15:03:13.140736 | 2017-09-11T14:42:06 | 2017-09-11T14:42:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import lasagne
from copy import deepcopy, copy
import numpy as np
import logging
from _collections import deque
log = logging.getLogger(__name__)
class MaxNormConstraint():
def __init__(self, layer_names_to_norms=None, default_norm=None):
assert (layer_names_to_norms is not None) or (default_norm is not None)
self.layer_names_to_norms = layer_names_to_norms
self.default_norm = default_norm
def modify(self, updates, final_layer):
all_layers = lasagne.layers.get_all_layers(final_layer)
if self.layer_names_to_norms is None:
layer_names_to_norms = dict()
else:
layer_names_to_norms = deepcopy(self.layer_names_to_norms)
normed_layer_names = set()
for layer in all_layers:
# Either in dict or there is default norm, or nothing
if layer.name in layer_names_to_norms:
norm = layer_names_to_norms[layer.name]
normed_layer_names.add(layer.name)
log.info("Constraining {:s} to norm {:.2f}".format(
layer.name, norm))
updates[layer.W] = lasagne.updates.norm_constraint(
updates[layer.W], max_norm=norm)
elif hasattr(layer, 'W') and self.default_norm is not None:
log.info("Constraining {:s} to norm {:.2f}".format(
layer.name, self.default_norm))
updates[layer.W] = lasagne.updates.norm_constraint(
updates[layer.W], max_norm=self.default_norm)
assert np.array_equal(sorted(layer_names_to_norms.keys()),
sorted(normed_layer_names)), ("All layers specified in max "
"col norm should be specified, nonexisting layers",
np.setdiff1d(layer_names_to_norms.keys(), normed_layer_names))
return updates
class MaxNormConstraintAll2():
def __init__(self, layer_names_to_norms):
self.layer_names_to_norms = layer_names_to_norms
def modify(self, updates, final_layer):
layer_names_to_norms = deepcopy(self.layer_names_to_norms)
all_layers = lasagne.layers.get_all_layers(final_layer)
normed_layer_names = set()
for layer in all_layers:
if layer.name in layer_names_to_norms:
norm = layer_names_to_norms[layer.name]
normed_layer_names.add(layer.name)
log.info("Constraining {:s} to norm {:.2f}".format(
layer.name, norm))
updates[layer.W] = lasagne.updates.norm_constraint(
updates[layer.W], max_norm=norm)
elif hasattr(layer, 'W'):
norm = 2.0
log.info("Constraining {:s} to norm {:.2f}".format(
layer.name, norm))
updates[layer.W] = lasagne.updates.norm_constraint(
updates[layer.W], max_norm=norm)
assert np.array_equal(sorted(layer_names_to_norms.keys()),
sorted(normed_layer_names)), ("All layers specified in max "
"col norm should be specified, nonexisting layers",
np.setdiff1d(layer_names_to_norms.keys(), normed_layer_names))
return updates
class MaxNormConstraintWithDefaults(object):
""" Uses max norm constraint of 2.0 on all intermediate layers
and constraint of 0.5 on final layers (= layers that are not followed
by any more layers with parameters)."""
def __init__(self, layer_names_to_norms):
self.layer_names_to_norms = layer_names_to_norms
def modify(self, updates, final_layer):
_, layers_to_succs = (
layer_to_predecessors_and_successors(final_layer))
layer_names_to_norms = deepcopy(self.layer_names_to_norms)
all_layers = lasagne.layers.get_all_layers(final_layer)
normed_layer_names = set()
for layer in all_layers:
if layer.name in layer_names_to_norms:
norm = layer_names_to_norms[layer.name]
normed_layer_names.add(layer.name)
log.info("Constraining {:s} to norm {:.2f}".format(
layer.name, norm))
updates[layer.W] = lasagne.updates.norm_constraint(
updates[layer.W], max_norm=norm)
elif hasattr(layer, 'W'):
# check if any successors also have weights...
successors = layers_to_succs[layer]
successors_have_weights = np.any([hasattr(l_succ, 'W')
for l_succ in successors])
if successors_have_weights:
norm = 2.0
else:
norm = 0.5
log.info("Constraining {:s} to norm {:.2f}".format(
layer.name, norm))
updates[layer.W] = lasagne.updates.norm_constraint(
updates[layer.W], max_norm=norm)
assert np.array_equal(sorted(layer_names_to_norms.keys()),
sorted(normed_layer_names)), ("All layers specified in max "
"col norm should be specified, nonexisting layers",
np.setdiff1d(layer_names_to_norms.keys(), normed_layer_names))
return updates
def layer_to_predecessors_and_successors(final_layer):
""" Dicts with predecessor and successor layers per layer."""
layer_to_pred = {}
layer_to_succ= {}
layer_to_succ[final_layer] = []
queue = deque([final_layer])
seen = set()
while queue:
# Peek at the leftmost node in the queue.
layer = queue.popleft()
if layer is None:
# Some node had an input_layer set to `None`. Just ignore it.
pass
elif layer not in seen:
# We haven't seen this node yet, update predecessors and successors
# for it and its input layers
seen.add(layer)
if hasattr(layer, 'input_layers'):
# need copy, else later input layers itself will be modified
layer_to_pred[layer] = copy(layer.input_layers)
for predecessor in layer.input_layers:
layer_to_succ[predecessor] = layer_to_succ[layer] + [layer]
for successor in layer_to_succ[layer]:
layer_to_pred[successor] += copy(layer.input_layers)
queue.extendleft(reversed(copy(layer.input_layers)))
elif hasattr(layer, 'input_layer'):
layer_to_pred[layer] = [layer.input_layer]
layer_to_succ[layer.input_layer] = layer_to_succ[layer] + [layer]
for offspring in layer_to_succ[layer]:
layer_to_pred[offspring] += [layer.input_layer]
queue.appendleft(layer.input_layer)
else:
# We've been here before: Either we've finished all its incomings,
# or we've detected a cycle.
pass
return layer_to_pred, layer_to_succ
|
UTF-8
|
Python
| false | false | 7,083 |
py
| 343 |
update_modifiers.py
| 145 | 0.568121 | 0.565297 | 0 | 160 | 43.1875 | 81 |
weiliangxie/HD_map_updating
| 7,078,106,107,640 |
ffb4eee2b0a15219cf39ddf54ac7bece3c685c7b
|
fdee5fbda72d98df7f400f810cf4c3714ad471e8
|
/triangulation/projector.py
|
da5e01a6c50493372207c128fe8c589df83335d5
|
[] |
no_license
|
https://github.com/weiliangxie/HD_map_updating
|
069061f05be916754580047021dea32c1651eafd
|
f2a298084a6ab93b3a9074645fb8d6b30b05d214
|
refs/heads/master
| 2023-03-16T17:43:49.481898 | 2021-01-30T04:06:29 | 2021-01-30T04:06:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import numpy as np
from scipy.spatial.transform import Rotation
class ColmapProjector:
"""Project traffic sign 2d position to colmap 3d position
"""
def __init__(self, intrinsics, ref_img, ref_pose, ref_bbox, src_img, src_pose):
"""
args:
intrinsics: camera intrinsics. 3x3 numpy array
ref_img: reference image. src_img is relative to ref_img.
value range in [0,1]
ref_pose: camera pose of reference image obtained from Colmap
3x4 numpy array [R|t]
ref_bbox: bounding box of interested region. 1x4 numpy array,
[x1, y1, x2, y2] from top-left to bottom-right
src_img: source image
src_pose: camera pose of source image obtained from Colmap.
3x4 numpy array [R|t]
"""
super().__init__()
self.intrinsics = intrinsics
self.ref_img = ref_img
self.ref_pose = ref_pose
self.ref_bbox = ref_bbox
self.src_img = src_img
self.src_pose = src_pose
# use large number of orb feature points to ensure
# there are some points inside bbox (a bad idea)
self.orb = cv2.ORB_create(2000)
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
def _is_inside(self, kp, bbox):
"""determine if a cv2 keypoint is inside a bbox
"""
x, y = kp.pt[0], kp.pt[1]
return bbox[0] <= x <= bbox[2] and bbox[1] <= y <= bbox[3]
def extract_features(self):
"""extract features from ref_img and imgs
"""
kps, des = self.orb.detectAndCompute(self.ref_img, None)
kp_ref, des_ref = [], []
for kp, de in zip(kps, des):
if self._is_inside(kp, self.ref_bbox):
kp_ref.append(kp)
des_ref.append(de)
kp_src, des_src = self.orb.detectAndCompute(self.src_img, None)
self.kp_ref = kp_ref
self.des_ref = np.array(des_ref)
self.kp_src = kp_src
self.des_src = np.array(des_src)
def match_features(self):
"""match features between ref image and src image
"""
matches = self.matcher.match(self.des_ref,self.des_src)
matches = sorted(matches, key = lambda x:x.distance)
self.matches = matches
def project(self):
"""project 2d traffic sign feature points to 3d colmap point
Returns:
3d traffic sign colmap point
"""
ref_proj = self.intrinsics @ self.ref_pose
src_proj = self.intrinsics @ self.src_pose
ref_points2d = []
src_points2d = []
# here heuristically use top 10 matches as traffic sign representative
for match in self.matches[:10]:
ref_points2d.append([self.kp_ref[match.queryIdx].pt[0], self.kp_ref[match.queryIdx].pt[1]])
src_points2d.append([self.kp_src[match.trainIdx].pt[0], self.kp_src[match.trainIdx].pt[1]])
ref_points2d = np.array(ref_points2d).T
src_points2d = np.array(src_points2d).T
points3d = cv2.triangulatePoints(ref_proj, src_proj, ref_points2d, src_points2d)
points3d /= points3d[3,:]
return np.mean(points3d, axis=1)[:3]
if __name__ == "__main__":
ref_img = cv2.imread("data/ref_img.jpg")
src_img = cv2.imread("data/src_img.jpg")
ref_R = Rotation.from_quat([-0.00507002, 0.00227284, -0.00148047, 0.999983]).as_dcm().astype(np.float32)
ref_t = np.array([0.111909, 0.00141431, 0.781914], dtype=np.float32).reshape((3,1))
src_R = Rotation.from_quat([-0.00493567, 0.00275127, -7.77088e-05, 0.999984]).as_dcm().astype(np.float32)
src_t = np.array([0.0901664, -0.00513701, -1.16926], dtype=np.float32).reshape((3,1))
ref_pose = np.hstack([ref_R, ref_t])
src_pose = np.hstack([src_R, src_t])
ref_bbox = np.array([1492, 268, 1633, 426])
intrinsics = np.array([
[1406.620, 0, 960],
[0, 1406.620, 600],
[0, 0, 1]
], dtype=np.float32)
cv2.rectangle(ref_img, tuple(ref_bbox[:2]), tuple(ref_bbox[2:]), (0,204,0), 2)
cv2.imshow("bbox", ref_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
projector = ColmapProjector(intrinsics, ref_img, ref_pose, ref_bbox,src_img, src_pose)
projector.extract_features()
projector.match_features()
points3d = projector.project()
print("Traffic sign at {} in Colmap".format(points3d))
|
UTF-8
|
Python
| false | false | 4,469 |
py
| 28 |
projector.py
| 12 | 0.590289 | 0.535914 | 0 | 119 | 36.563025 | 109 |
pydevhari/Django-API-Project
| 17,523,466,579,455 |
f58cd4da569ccf16db2c01b9e62e841bba26a625
|
3e724c6c40eb1b7bedbd6d4256716f4f4b14a283
|
/myapp/migrations/0004_auto_20191210_1605.py
|
6b6669e119eeb96be8959519c5c428bc1fd165c7
|
[] |
no_license
|
https://github.com/pydevhari/Django-API-Project
|
6c5b1f3cdc75aeb8c80549be94c5a9a5b426f98b
|
96c2a1ca7e28c3c9c65fa6a9f4ea4ac45237cacc
|
refs/heads/master
| 2020-10-01T01:46:05.326962 | 2019-12-12T15:16:56 | 2019-12-12T15:16:56 | 227,425,234 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 2.2.7 on 2019-12-10 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_auto_20191210_1521'),
]
operations = [
migrations.AddField(
model_name='marks',
name='result',
field=models.CharField(default='', max_length=5),
),
migrations.AddField(
model_name='marks',
name='total',
field=models.IntegerField(default=10),
),
]
|
UTF-8
|
Python
| false | false | 541 |
py
| 14 |
0004_auto_20191210_1605.py
| 6 | 0.554529 | 0.491682 | 0 | 23 | 22.521739 | 61 |
flavioarchilli/offline
| 13,305,808,722,946 |
96c7c6ced008bb24bb8236365ab7f29842273cec
|
14213264dd57a209ad291070e6d560765ef2da11
|
/tests/test_app.py
|
75b2d5a27f8934376435d8267c18f9f5ecb7bfc1
|
[
"MIT"
] |
permissive
|
https://github.com/flavioarchilli/offline
|
c5467c897532df96f1be88812bb35711227d9474
|
9ddfbd4b8e2e535e74bfca6b7b3d98aed22a8aaf
|
refs/heads/master
| 2020-02-26T16:35:51.743777 | 2015-10-02T17:43:07 | 2015-10-02T17:43:07 | 37,140,713 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest2 as unittest
import monitoring_app
class TestPages(unittest.TestCase):
def setUp(self):
self.app = monitoring_app.create_app()
self.client = self.app.test_client()
def test_app_should_instantiate_with_tasks_resolver(self):
"""App should instantiate with the correct job resolvers."""
assert len(self.app.job_resolvers()) == 1
assert self.app.job_resolvers()[0] == monitoring_app.job_resolvers.tasks_resolver
|
UTF-8
|
Python
| false | false | 475 |
py
| 60 |
test_app.py
| 36 | 0.692632 | 0.686316 | 0 | 13 | 35.538462 | 89 |
937229974/tensorflow
| 4,973,572,141,371 |
6b7a3402935848b723d166f8446e67869b1ecd87
|
5d9767564c4db884412487e304da0f9234106cf5
|
/predict.py
|
2ac35c2fce57b52453c13843381128afbf551294
|
[] |
no_license
|
https://github.com/937229974/tensorflow
|
a682c530e30ba4a8b24e01f154ad5f742ea7eb76
|
d7b90001a218bd46755332162e0ec877df5e80b1
|
refs/heads/master
| 2020-03-28T18:14:23.727196 | 2018-10-13T06:02:45 | 2018-10-13T06:02:45 | 148,863,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from train import crack_captcha_cnn
from train import MAX_CAPTCHA,CHAR_SET_LEN,keep_prob,X
from train import vec2text,convert2gray
from PIL import Image
import numpy as np
import tensorflow as tf
import glob,random
def predict_result(captcha_image):
output = crack_captcha_cnn()
saver = tf.train.Saver()
# tf.reset_default_graph()
with tf.Session() as sess:
saver.restore(sess, "F:/CNN_2/model_3/crack_capcha.model-2000")
predict = tf.argmax(tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
text_list = sess.run(predict, feed_dict={X: [captcha_image], keep_prob: 1})
# text_list = sess.run(predict, feed_dict={X: [captcha_image]})
text = text_list[0].tolist()
vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
i = 0
for n in text:
vector[i * CHAR_SET_LEN + n] = 1
i += 1
return vec2text(vector)
def get_name_and_image():
list1 = glob.glob(r"C:\Users\Administrator\Desktop\text\text\*.png")
num =random.randint(0,len(list1)-1)
path = list1[num]
print(path)
p, image_text = path.split('_')
captcha_text, type = image_text.split('.')
captcha_image = Image.open(path)
captcha_image = np.array(captcha_image)
image = convert2gray(captcha_image)
image = image.flatten() / 255
return captcha_text, image
if __name__ =="__main__":
text , image =get_name_and_image()
predict_text = predict_result(image)
print("正确: {} 预测: {}".format(text, predict_text))
|
UTF-8
|
Python
| false | false | 1,545 |
py
| 5 |
predict.py
| 3 | 0.627846 | 0.611581 | 0 | 55 | 26.927273 | 83 |
Zichen-Yan/DRL-Repo-Pytorch-
| 18,004,502,939,505 |
590ecaabc409a8df3df62fc9f165bbcf4662f430
|
f94d6d5092aa211ba3d65cc256aee2b2f216f349
|
/model-free/PPG/model.py
|
c5a833c8964aebeb8f00557d6e73702239caf5c7
|
[] |
no_license
|
https://github.com/Zichen-Yan/DRL-Repo-Pytorch-
|
98a880e00f8e48903e37ad0fd285ff831a110edf
|
d55592d5fe7e73b941c6be4b1c389ce84901a4f3
|
refs/heads/main
| 2023-07-13T11:36:57.200822 | 2021-08-21T02:08:25 | 2021-08-21T02:08:25 | 392,948,319 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
from torch import nn
class Actor(nn.Module):
def __init__(self, state_dim, hidden_dim, num_actions):
super().__init__()
self.net = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh()
)
self.action_head = nn.Sequential(
nn.Linear(hidden_dim, num_actions),
nn.Softmax(dim=-1)
)
self.value_head = nn.Linear(hidden_dim, 1)
self.apply(init_)
def forward(self, x):
hidden = self.net(x)
return self.action_head(hidden), self.value_head(hidden)
class Critic(nn.Module):
def __init__(self, state_dim, hidden_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, 1),
)
self.apply(init_)
def forward(self, x):
return self.net(x)
def init_(m):
if isinstance(m, nn.Linear):
gain = torch.nn.init.calculate_gain('tanh')
torch.nn.init.orthogonal_(m.weight, gain)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
|
UTF-8
|
Python
| false | false | 1,355 |
py
| 58 |
model.py
| 55 | 0.530627 | 0.528413 | 0 | 51 | 25.568627 | 64 |
The-lYNCAN/BlogingSite
| 9,216,999,865,716 |
fbef1bc335d4062072853a6f829357e9b85ec87b
|
f97552a2f04266d91bbd61e5b51a664a9227abc1
|
/login/views.py
|
a7f3b16cab9167ee6f733fb2900db3adb69ef984
|
[] |
no_license
|
https://github.com/The-lYNCAN/BlogingSite
|
58e45a064d0099302c0d9f66f927b039cb6131c7
|
7f0345cae710ffc43c277dcd302c0f90db2c5a09
|
refs/heads/main
| 2022-12-27T04:28:04.880657 | 2020-10-03T14:57:16 | 2020-10-03T14:57:16 | 300,898,093 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import TemplateView, DetailView, FormView, CreateView, ListView, UpdateView
from django.urls import reverse
from django.views.generic import View
from django.views.generic.edit import FormMixin
from login.forms import signup_form, login_form
from django.http import HttpResponseRedirect
from django.contrib.auth import login, logout, authenticate
from login import models
from login.forms import commenting
from django.contrib.auth.views import login_required
# Create your views here.
class Index_view(TemplateView):
template_name = 'index.html'
def post(self, request):
if self.request.method == "POST":
print("this time its a post")
print(self.request.user)
return HttpResponseRedirect(reverse('login:signup'))
class signup_view(FormView):
template_name = 'signup.html'
form_class = signup_form
success_url = '/login/index'
def form_valid(self, form):
form.save()
return HttpResponseRedirect(reverse('login:index'))
class login_view(FormView):
form_class = login_form
template_name = 'login.html'
success_url = '/login/index'
def form_valid(self, form):
name = self.request.POST.get('username')
password = self.request.POST.get("password")
print(self.request.POST)
print(name)
details = authenticate(self.request, username=name, password=password)
print(name)
if details:
login(self.request, details)
return HttpResponseRedirect(reverse('login:index'))
else:
return HttpResponse("wrong username")
class Blog_creation(CreateView):
model = models.blog_model
fields = [
'title',
'content',
]
template_name = 'blog_creation.html'
def get_success_url(self):
return reverse('login:index')
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = str(self.request.user)
self.object.save()
return super(Blog_creation, self).form_valid(form)
class listing_blogs(ListView):
template_name = 'blogs.html'
model = models.blog_model
class complete_blog(FormMixin, DetailView):
from login.models import blog_model
template_name = 'detail.html'
model = models.blog_model
form_class = commenting
def get_context_data(self, **kwargs):
data = super(complete_blog, self).get_context_data(**kwargs)
data['comments'] = models.comments.objects.all()
data['form'] = self.get_form()
return data
def get_success_url(self):
return reverse('login:blog_complete', kwargs={'pk':self.get_object().pk})
def post(self, request, pk):
print('its a post')
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = str(self.request.user)
print(str(self.get_object().title))
self.object.blog = str(self.get_object().title)
self.object.save()
return super(complete_blog, self).form_valid(form)
#def form_valid(self, form):
# print("its a valid form")
class update_blog(UpdateView):
model = models.blog_model
template_name = 'update.html'
fields = ('title', 'content')
template_name_suffix = 'update'
def get_success_url(self):
return reverse('login:blog_complete', kwargs={'pk':self.object.pk})
class loggout_view(View):
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('login:logins'))
|
UTF-8
|
Python
| false | false | 3,775 |
py
| 7 |
views.py
| 5 | 0.658543 | 0.658543 | 0 | 111 | 33.018018 | 101 |
nbaek/EconOfPrivacy
| 8,890,582,320,853 |
bc24bd1544781cbc62cb20486482f1b1ba32b237
|
e4100ee2d7d4956e8e27d05821ed879532a54076
|
/BufferStockEGM.py
|
4ee29030db33a8f4d13ff9483efcf20cd9a62951
|
[] |
no_license
|
https://github.com/nbaek/EconOfPrivacy
|
4562b7bbdff98a030a078ad8beb52b5e0a5a9d15
|
d0776afb585e152c592fce0ae2563cf004b054e7
|
refs/heads/master
| 2021-08-22T18:30:27.586110 | 2017-11-30T23:40:56 | 2017-11-30T23:40:56 | 105,625,775 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import importlib
import BFClass
importlib.reload(BFClass)
from BFClass import model
import funs
importlib.reload(funs)
import FigModule
importlib.reload(FigModule)
from FigModule import solver;
#%% With retirement
par = model()
par.T = 90-20
par.simlifecycle = 1
par.RT = par.T - 20 # Retirement age
par.sim_mini = 1.5
# Income profile
par.L[0:par.RT] = np.linspace(1,1/(par.G),par.RT)
par.L[par.RT] = 0.90
par.L[par.RT:] = par.L[par.RT:]/par.G
# Create grids for state variables
par.grid = par.create_grids(par)
#%%
solution = []
beta_n = []
params = []
for b in [-10.0, 0.0]:
par.beta_n = b
beta_n.append(b)
solution.append(solver(par))
params.append(par)
sim = []
for i in range(len(solution)):
sim.append(FigModule.Simulation(params[i], solution[i]))
#%% Colors
col = [np.nan, np.nan, np.nan]
col[0] = "#006282"
col[1] = "#E6216C"
col[2] = "#89F5FF"
case = []
for i in range(len(solution)):
if beta_n[i] < 0:
case.append('Public')
else:
case.append('Private')
#%% Figure with lags
lags = [1, 2, 3, 30,50]
fig, ax = plt.subplots(1)
for j in range(len(lags)):
l = lags[j]
t = par.T - l
if j == 0:
for i in range(len(solution)):
ax.plot(solution[i]['m'][t], solution[i]['c'][t], label = case[i], color = col[abs(1-i)])
else:
for i in range(len(solution)):
ax.plot(solution[i]['m'][t], solution[i]['c'][t], color = col[i])
ax.set_xlim(0, 10)
ax.set_ylim(0, 5)
ax.set_xlabel("Cash-in-hand")
ax.set_ylabel("Consumption")
ax.grid()
ax.legend()
plt.show()
#%% Creating means of population and
means = FigModule.createMeans(sim, solution)
FigModule.lifecycleFigures(means, case, col)
FigModule.MPC_figure(solution, case, col)
FigModule.consumptionFigure(means, case, col)
FigModule.savingsFigure(means, case, col)
FigModule.singlePersonPlot(par, sim, case, col, public = 1)
FigModule.singlePersonPlot(par, sim, case, col, public = 0)
|
UTF-8
|
Python
| false | false | 1,986 |
py
| 6 |
BufferStockEGM.py
| 5 | 0.653575 | 0.626385 | 0 | 79 | 24.151899 | 101 |
calvinfeng/low-rank-factorization
| 4,088,808,889,492 |
1e12d6d07bed944d9b48297f3543bb0a22912028
|
71be3ba418974e2815976c2fc9421b3b37c8438c
|
/incremental_svd/user.py
|
f55ec9976c35bd769928b023fe75815f80458d1c
|
[] |
no_license
|
https://github.com/calvinfeng/low-rank-factorization
|
1061e5dc0ed86619c47fe0a3beb40a03ecda9aa4
|
4402e79ac8adfb40d3917067746eee17d96e65e1
|
refs/heads/master
| 2021-04-27T16:42:17.974634 | 2018-03-05T04:30:26 | 2018-03-05T04:30:26 | 122,307,930 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Project: Recommender System
# Author(s): Calvin Feng
from random import random, sample
from pdb import set_trace as debugger
from math import sqrt
class User:
def __init__(self, user_id, movie_ratings, preference_length, is_test_user=False):
self.id = user_id
self.preference_length = preference_length
self.theta = self.random_init(preference_length)
if is_test_user:
self.set_ratings(movie_ratings, 2)
else:
self.set_ratings(movie_ratings, 0)
self._baseline_rating = None
def random_init(self, size):
# Give User a bias term, which is 1
preference_vector = [1]
while len(preference_vector) < size:
preference_vector.append(random())
return preference_vector
def set_ratings(self, movie_ratings, num_of_hidden_ratings):
hidden_ratings = dict()
if len(movie_ratings) >= num_of_hidden_ratings:
random_keys = sample(movie_ratings, num_of_hidden_ratings)
for i in range(0, num_of_hidden_ratings):
key = random_keys[i]
hidden_ratings[key] = movie_ratings.pop(key)
self.movie_ratings = movie_ratings
self.hidden_ratings = hidden_ratings
@property
def avg_rating(self):
if self._baseline_rating is None and len(self.movie_ratings) != 0:
avg = 0
for movie_id in self.movie_ratings:
avg += float(self.movie_ratings[movie_id])
self._baseline_rating = avg / len(self.movie_ratings)
return self._baseline_rating
def sim(self, other_user):
# Using Pearson correlation coefficient
user_correlation = 0
this_user_variance, other_user_variance = 0, 0
movies_seen_by_both = []
for movie_id in self.movie_ratings:
if other_user.movie_ratings.get(movie_id):
movies_seen_by_both.append(movie_id)
if len(movies_seen_by_both) >= 20:
for movie_id in movies_seen_by_both:
this_rating, other_rating = float(self.movie_ratings[movie_id]), float(other_user.movie_ratings[movie_id])
user_correlation += (this_rating - self.avg_rating)*(other_rating - other_user.avg_rating)
this_user_variance += (this_rating - self.avg_rating)**2
other_user_variance += (other_rating - other_user.avg_rating)**2
if this_user_variance == 0 or other_user_variance == 0:
# If one of the variances is zero, it's an undefined correlation
return 0
else:
return user_correlation/(sqrt(this_user_variance)*sqrt(other_user_variance))
else:
# Statistically insignificant thus I return 0 for similarity
return 0
|
UTF-8
|
Python
| false | false | 2,824 |
py
| 11 |
user.py
| 8 | 0.60517 | 0.598442 | 0 | 72 | 38.222222 | 122 |
Yuhjiang/LeetCode_Jyh
| 11,184,094,852,917 |
7261fcb5dc7613820ea89e2da839f65867686cb1
|
e8e40045ffa81bcb86a3d75748f333ca5f1ed5e8
|
/problems/midium/63-unique-paths-ii.py
|
03907a9ecf231d8d7fcbb3b2236e53eba120ad66
|
[] |
no_license
|
https://github.com/Yuhjiang/LeetCode_Jyh
|
00ba0e7b6cc3d5075ecd6e9009268056fbb3a70c
|
a60d8b5adce3d1149757a7b164b52da0244aebd8
|
refs/heads/master
| 2021-07-13T03:26:59.259705 | 2020-08-26T07:27:39 | 2020-08-26T07:27:39 | 193,892,319 | 1 | 0 | null | false | 2019-08-13T13:36:27 | 2019-06-26T11:35:32 | 2019-08-07T14:48:04 | 2019-08-13T13:36:27 | 33 | 0 | 0 | 0 |
Python
| false | false |
from typing import List
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
if not obstacleGrid:
return 0
if not obstacleGrid[0]:
return 1
if obstacleGrid[0][0] == 1:
return 0
row, col = len(obstacleGrid), len(obstacleGrid[0])
dp = [[0 for _ in range(col)] for _ in range(row)]
dp[0][0] = 1 if obstacleGrid[0][0] != 1 else 0
def dfs(i, j):
if obstacleGrid[i][j] == 1:
return 0
if i > 0:
dp[i][j] = dp[i][j] + dfs(i-1, j)
if j > 0:
dp[i][j] = dp[i][j] + dfs(i, j-1)
return dp[i][j]
return dfs(row-1, col-1)
class NewSolution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
row, col = len(obstacleGrid), len(obstacleGrid[0])
dp = [0 for _ in range(col)]
dp[0] = 1 if obstacleGrid[0][0] != 1 else 0
for i in range(row):
for j in range(col):
if obstacleGrid[i][j] == 1:
dp[j] = 0
continue
if j - 1 >= 0 and obstacleGrid[i][j-1] == 0:
dp[j] += dp[j-1]
return dp[col-1]
if __name__ == '__main__':
print(NewSolution().uniquePathsWithObstacles([
[0,0,0],
[0,1,0],
[0,0,0]
]))
# print(Solution().uniquePathsWithObstacles([
# [1], [0]
# ]))
|
UTF-8
|
Python
| false | false | 1,480 |
py
| 159 |
63-unique-paths-ii.py
| 158 | 0.473649 | 0.439189 | 0 | 56 | 25.446429 | 77 |
Neo945/LetsTweet
| 1,133,871,366,349 |
1b686c9d15b3032848951436db22d037c9a0c810
|
53e1a8879b49c4cd6c7a49e3310e4e8f11c618e7
|
/letsTweet/rest_api/dev.py
|
c98496a1bf89307eb5dc3282d0a667962c6f7b25
|
[
"MIT"
] |
permissive
|
https://github.com/Neo945/LetsTweet
|
c741f7cbe7bbc8015234b80c0f4869ebeebcaf9d
|
b131536e2af9137075e68ac59ec8d5adb41d2279
|
refs/heads/master
| 2023-03-24T21:35:32.042842 | 2021-03-17T14:24:34 | 2021-03-17T14:24:34 | 339,714,319 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import authentication
from django.contrib.auth import get_user_model
User = get_user_model()
class DevAuthentication(authentication.BasicAuthentication):
def authenticate(self, request):
qs = User.objects.all()
user = qs.order_by("?").first()
return (user, None)
|
UTF-8
|
Python
| false | false | 314 |
py
| 20 |
dev.py
| 16 | 0.703822 | 0.703822 | 0 | 10 | 30.5 | 60 |
Varunaditya/pythonMultiprocessing
| 9,328,669,004,589 |
44d500c4413213c9c28498eaea0dc81e3b45ac96
|
808d8edd98998e6f360181f16c7c1fcebe0dbc47
|
/sharingData_serverProcess.py
|
707007f14645d14b1e9735c36f4366cde02207cc
|
[] |
no_license
|
https://github.com/Varunaditya/pythonMultiprocessing
|
0e13a3002765cb31bcb48681f12d5596657fe598
|
1a0d15acd756d9d16fdced9d98e4b8d14b5353d3
|
refs/heads/master
| 2020-03-28T00:11:40.907691 | 2018-09-04T22:41:18 | 2018-09-04T22:41:18 | 147,383,879 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# A program that uses the Manager class of the server process to synchronize data among various processes.
# Manager class supports a lot more data types as compared to synchronized variables and arrays but are slower.
# Author: Varunaditya Jadwal
import multiprocessing
def printingRecords(records):
for record in records:
print('Name: {0}\tScore: {1}'.format(record[0], record[1]))
def insertRecord(record, records):
records.append(record)
print('The new record was added successfully!')
if __name__ == '__main__':
with multiprocessing.Manager() as manager:
records = manager.list([('A', 1), ('B', 2), ('C', 3)])
recordToBeInserted = ('D', 4)
p1 = multiprocessing.Process(target = insertRecord, args = (recordToBeInserted, records))
p2 = multiprocessing.Process(target = printingRecords, args = (records,))
p1.start()
p1.join()
p2.start()
p2.join()
|
UTF-8
|
Python
| false | false | 878 |
py
| 4 |
sharingData_serverProcess.py
| 4 | 0.715262 | 0.699317 | 0 | 25 | 34.16 | 111 |
shawnluo/SVU_Projects
| 19,181,323,947,936 |
7c56eb6143704349a4a238d9025bb969dd896c0b
|
39e7d6ab503da85cede33484f342ca3318a764fd
|
/CS200/CS200_Project1/InsPic.py
|
95f7f58174ca0997ab3d3ee6211ab23f84d4e62e
|
[] |
no_license
|
https://github.com/shawnluo/SVU_Projects
|
653645ffbcf5837fb3606652799cb33b47d64848
|
6c91d51750bc52c74a91ba202ea4d2a006903b5d
|
refs/heads/master
| 2021-01-09T09:39:02.056019 | 2016-07-14T04:22:17 | 2016-07-14T04:22:17 | 62,114,269 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
def InsPic():
path = os.getcwd()
fileList = os.listdir(path)
i = 1
for file in fileList:
ext = os.path.splitext(file)[1]
if ext.strip() == '.jpg':
# os.rename(file, str(i) + r'.jpg')
i = i + 1
f = open(r'Index.html', 'w+')
f.write('<?xml version = "1.0" encoding = "UTF-8"?>\n')
for i in range(1, i):
fileName = str(i) + r'.jpg'
image = r'<img src="' + fileName + r'">'
f.write(image)
f.close()
InsPic()
|
UTF-8
|
Python
| false | false | 520 |
py
| 5 |
InsPic.py
| 4 | 0.475 | 0.461538 | 0 | 28 | 17.571429 | 59 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.