repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
williamratcliff/tripleaxisproject | 4,990,752,025,398 | 717753383165d28d28fc347cc4c8c9040d9b32cc | 450448e0ddb786fd13cfe9f6df5aa47573769fdc | /tripleaxisproject/eclipse/src/vice/gridpanel.py | 7f02cd988dfc1a8fe892f60f2c9b3996a5b6c3fa | [] | no_license | https://github.com/williamratcliff/tripleaxisproject | 70bbd9ab5f7f1d2f30ced18b0887e51a1e3551e8 | 8649730ccc03e7d172ad41db776e2df9b463f3d6 | refs/heads/master | 2021-01-19T20:18:25.875294 | 2018-09-12T20:43:46 | 2018-09-12T20:43:46 | 32,125,247 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import wx
import wx.grid as gridlib
import sys,os
from polarization import classify_files2 as classify_files
from utilities import readncnr3 as readncnr
import numpy as N
import wx.aui
import demjson
keyMap = {
wx.WXK_BACK : "WXK_BACK",
wx.WXK_TAB : "WXK_TAB",
wx.WXK_RETURN : "WXK_RETURN",
wx.WXK_ESCAPE : "WXK_ESCAPE",
wx.WXK_SPACE : "WXK_SPACE",
wx.WXK_DELETE : "WXK_DELETE",
wx.WXK_START : "WXK_START",
wx.WXK_LBUTTON : "WXK_LBUTTON",
wx.WXK_RBUTTON : "WXK_RBUTTON",
wx.WXK_CANCEL : "WXK_CANCEL",
wx.WXK_MBUTTON : "WXK_MBUTTON",
wx.WXK_CLEAR : "WXK_CLEAR",
wx.WXK_SHIFT : "WXK_SHIFT",
wx.WXK_ALT : "WXK_ALT",
wx.WXK_CONTROL : "WXK_CONTROL",
wx.WXK_MENU : "WXK_MENU",
wx.WXK_PAUSE : "WXK_PAUSE",
wx.WXK_CAPITAL : "WXK_CAPITAL",
wx.WXK_PRIOR : "WXK_PRIOR",
wx.WXK_NEXT : "WXK_NEXT",
wx.WXK_END : "WXK_END",
wx.WXK_HOME : "WXK_HOME",
wx.WXK_LEFT : "WXK_LEFT",
wx.WXK_UP : "WXK_UP",
wx.WXK_RIGHT : "WXK_RIGHT",
wx.WXK_DOWN : "WXK_DOWN",
wx.WXK_SELECT : "WXK_SELECT",
wx.WXK_PRINT : "WXK_PRINT",
wx.WXK_EXECUTE : "WXK_EXECUTE",
wx.WXK_SNAPSHOT : "WXK_SNAPSHOT",
wx.WXK_INSERT : "WXK_INSERT",
wx.WXK_HELP : "WXK_HELP",
wx.WXK_NUMPAD0 : "WXK_NUMPAD0",
wx.WXK_NUMPAD1 : "WXK_NUMPAD1",
wx.WXK_NUMPAD2 : "WXK_NUMPAD2",
wx.WXK_NUMPAD3 : "WXK_NUMPAD3",
wx.WXK_NUMPAD4 : "WXK_NUMPAD4",
wx.WXK_NUMPAD5 : "WXK_NUMPAD5",
wx.WXK_NUMPAD6 : "WXK_NUMPAD6",
wx.WXK_NUMPAD7 : "WXK_NUMPAD7",
wx.WXK_NUMPAD8 : "WXK_NUMPAD8",
wx.WXK_NUMPAD9 : "WXK_NUMPAD9",
wx.WXK_MULTIPLY : "WXK_MULTIPLY",
wx.WXK_ADD : "WXK_ADD",
wx.WXK_SEPARATOR : "WXK_SEPARATOR",
wx.WXK_SUBTRACT : "WXK_SUBTRACT",
wx.WXK_DECIMAL : "WXK_DECIMAL",
wx.WXK_DIVIDE : "WXK_DIVIDE",
wx.WXK_F1 : "WXK_F1",
wx.WXK_F2 : "WXK_F2",
wx.WXK_F3 : "WXK_F3",
wx.WXK_F4 : "WXK_F4",
wx.WXK_F5 : "WXK_F5",
wx.WXK_F6 : "WXK_F6",
wx.WXK_F7 : "WXK_F7",
wx.WXK_F8 : "WXK_F8",
wx.WXK_F9 : "WXK_F9",
wx.WXK_F10 : "WXK_F10",
wx.WXK_F11 : "WXK_F11",
wx.WXK_F12 : "WXK_F12",
wx.WXK_F13 : "WXK_F13",
wx.WXK_F14 : "WXK_F14",
wx.WXK_F15 : "WXK_F15",
wx.WXK_F16 : "WXK_F16",
wx.WXK_F17 : "WXK_F17",
wx.WXK_F18 : "WXK_F18",
wx.WXK_F19 : "WXK_F19",
wx.WXK_F20 : "WXK_F20",
wx.WXK_F21 : "WXK_F21",
wx.WXK_F22 : "WXK_F22",
wx.WXK_F23 : "WXK_F23",
wx.WXK_F24 : "WXK_F24",
wx.WXK_NUMLOCK : "WXK_NUMLOCK",
wx.WXK_SCROLL : "WXK_SCROLL",
wx.WXK_PAGEUP : "WXK_PAGEUP",
wx.WXK_PAGEDOWN : "WXK_PAGEDOWN",
wx.WXK_NUMPAD_SPACE : "WXK_NUMPAD_SPACE",
wx.WXK_NUMPAD_TAB : "WXK_NUMPAD_TAB",
wx.WXK_NUMPAD_ENTER : "WXK_NUMPAD_ENTER",
wx.WXK_NUMPAD_F1 : "WXK_NUMPAD_F1",
wx.WXK_NUMPAD_F2 : "WXK_NUMPAD_F2",
wx.WXK_NUMPAD_F3 : "WXK_NUMPAD_F3",
wx.WXK_NUMPAD_F4 : "WXK_NUMPAD_F4",
wx.WXK_NUMPAD_HOME : "WXK_NUMPAD_HOME",
wx.WXK_NUMPAD_LEFT : "WXK_NUMPAD_LEFT",
wx.WXK_NUMPAD_UP : "WXK_NUMPAD_UP",
wx.WXK_NUMPAD_RIGHT : "WXK_NUMPAD_RIGHT",
wx.WXK_NUMPAD_DOWN : "WXK_NUMPAD_DOWN",
wx.WXK_NUMPAD_PRIOR : "WXK_NUMPAD_PRIOR",
wx.WXK_NUMPAD_PAGEUP : "WXK_NUMPAD_PAGEUP",
wx.WXK_NUMPAD_NEXT : "WXK_NUMPAD_NEXT",
wx.WXK_NUMPAD_PAGEDOWN : "WXK_NUMPAD_PAGEDOWN",
wx.WXK_NUMPAD_END : "WXK_NUMPAD_END",
wx.WXK_NUMPAD_BEGIN : "WXK_NUMPAD_BEGIN",
wx.WXK_NUMPAD_INSERT : "WXK_NUMPAD_INSERT",
wx.WXK_NUMPAD_DELETE : "WXK_NUMPAD_DELETE",
wx.WXK_NUMPAD_EQUAL : "WXK_NUMPAD_EQUAL",
wx.WXK_NUMPAD_MULTIPLY : "WXK_NUMPAD_MULTIPLY",
wx.WXK_NUMPAD_ADD : "WXK_NUMPAD_ADD",
wx.WXK_NUMPAD_SEPARATOR : "WXK_NUMPAD_SEPARATOR",
wx.WXK_NUMPAD_SUBTRACT : "WXK_NUMPAD_SUBTRACT",
wx.WXK_NUMPAD_DECIMAL : "WXK_NUMPAD_DECIMAL",
wx.WXK_NUMPAD_DIVIDE : "WXK_NUMPAD_DIVIDE"
}
class MyApp(wx.App):
def __init__(self, redirect=False, filename=None, useBestVisual=False, clearSigInt=True):
wx.App.__init__(self,redirect,filename,clearSigInt)
def OnInit(self):
return True
#---------------------------------------------------------------------------
class CustomDataTable(gridlib.PyGridTableBase):
def __init__(self,colLabels):
gridlib.PyGridTableBase.__init__(self)
self.colLabels =colLabels
self.rowLabels=['0']
self.dataTypes=[]
for i in range(len(colLabels)):
self.dataTypes.append(gridlib.GRID_VALUE_STRING)
# self.dataTypes = [gridlib.GRID_VALUE_STRING, #selected
# gridlib.GRID_VALUE_STRING,#filename
# gridlib.GRID_VALUE_STRING,#sequence number
# gridlib.GRID_VALUE_STRING, #polarization state
# gridlib.GRID_VALUE_STRING, #hsample
# gridlib.GRID_VALUE_STRING, #vsample
# #gridlib.GRID_VALUE_STRING,
# ]
self.data = []
emptydata=[]
for i in range(len(colLabels)):
emptydata.append('')
# self.data.append(['', #selected
# '', #filename
# '', #sequence number
# '', #polarization state
# '', #hsample
# '', #vsample
# ])
self.data.append(emptydata)
print 'data', self.data
return
#[1010, "The foo doesn't bar", "major", 1, 'MSW', 1, 1, 1, 1.12],
#[1011, "I've got a wicket in my wocket", "wish list", 2, 'other', 0, 0, 0, 1.50],
#[1012, "Rectangle() returns a triangle", "critical", 5, 'all', 0, 0, 0, 1.56]
#]
#--------------------------------------------------
# required methods for the wxPyGridTableBase interface
def GetNumberRows(self):
return len(self.data)
#return len(self.data)
def GetNumberCols(self):
return len(self.colLabels)
def IsEmptyCell(self, row, col):
try:
return not self.data[row][col]
except IndexError:
return True
# Get/Set values in the table. The Python version of these
# methods can handle any data-type, (as long as the Editor and
# Renderer understands the type too,) not just strings as in the
# C++ version.
def GetValue(self, row, col):
try:
return self.data[row][col]
except IndexError:
return ''
def SetValue(self, row, col, value):
try:
self.data[row][col] = value
#print 'SetValue works',self.GetNumberRows(),self.data[row][1]
except IndexError:
# add a new row
#print 'IndexError in SetValue',self.GetNumberRows()
self.AppendRow()
self.data[row][col]=value
#print 'IndexError in SetValue after SetValue',self.GetNumberRows()
#print 'setting row ',row,' col ',col, ' val ',value
#print self.__dict__
#self.SetValue(row, col, value)
return
def AppendRow(self):
self.data.append([''] * self.GetNumberCols())
#print 'After Append SetValue',self.GetNumberRows()
#self.rowLabels[row]='File '+str(len(self.rowLabels))
#self.rowLabels.append('File '+str(len(self.rowLabels)))
# tell the grid we've added a row
msg = gridlib.GridTableMessage(self, # The table
gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, # what we did to it
1 # how many
)
#print 'size notified',self.GetNumberRows()
self.GetView().ProcessTableMessage(msg)
#print 'self.rowLabels', self.rowLabels
#self.data[row][col] = value
#--------------------------------------------------
# Some optional methods
# Called when the grid needs to display labels
def GetColLabelValue(self, col):
return self.colLabels[col]
# Called when the grid needs to display labels
def GetRowLabelValue(self, row):
return str(row)
#return self.rowLabels[row]
# Called to determine the kind of editor/renderer to use by
# default, doesn't necessarily have to be the same type used
# natively by the editor/renderer if they know how to convert.
def GetTypeName(self, row, col):
return self.dataTypes[col]
# Called to determine how the data can be fetched and stored by the
# editor and renderer. This allows you to enforce some type-safety
# in the grid.
def CanGetValueAs(self, row, col, typeName):
colType = self.dataTypes[col].split(':')[0]
if typeName == colType:
return True
else:
return False
def CanSetValueAs(self, row, col, typeName):
return self.CanGetValueAs(row, col, typeName)
def DeleteRows(self,pos=0,numRows=1):
# print 'Delete number',self.GetNumberRows()
# print 'pos',pos
# print 'numRows', numRows
if numRows>=0 and numRows<=self.GetNumberRows():
# print 'Delete',numRows
#for i in range(numRows):
# self.data.pop()
del self.data[pos:pos+numRows]
msg = gridlib.GridTableMessage(self, # The table
gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, # what we did to it
pos,numRows # how many
)
#msg = wx.grid.GridTableMessage(self, 0, numRows)
self.GetView().ProcessTableMessage(msg)
# print 'Deleted'
self.UpdateValues()
return True
else:
return False
def UpdateValues( self ):
"""Update all displayed values"""
msg =gridlib.GridTableMessage(self, gridlib.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
self.GetView().ProcessTableMessage(msg)
#---------------------------------------------------------------------------
class CustTableGrid(gridlib.Grid):
def __init__(self, parent,data,colLabels=['h','k','l','e','a3','a4']):
gridlib.Grid.__init__(self, parent, -1)
self.colLabels=colLabels
table = CustomDataTable(colLabels)
# The second parameter means that the grid is to take ownership of the
# table and will destroy it when done. Otherwise you would need to keep
# a reference to it and call it's Destroy method later.
self.SetTable(table, True)
#attr = gridlib.GridCellAttr()
#attr.SetReadOnly(True)
#attr.SetRenderer(gridbar.GridCellBarRenderer())
#self.SetColAttr(13, attr)
#self.SetCellValue(1,13,'q')
#self.SetCellRenderer(1,13,gridbar.GridCellBarRenderer)
#self.SetRowLabelSize(0)
self.SetMargins(0,0)
self.AutoSize()
#gridlib.Grid.SetSelectionMode(self,gridlib.Grid.SelectRows)
gridlib.Grid.EnableEditing(self,False)
attr=gridlib.GridCellAttr()
attr.SetReadOnly(True)
self.SetColAttr(0,attr)
for col in range(0,len(colLabels)):
attr=gridlib.GridCellAttr()
attr.SetReadOnly(True)
#attr.SetBackgroundColour('grey' if col%2 else (139, 139, 122))
#attr.SetTextColour((167,167,122) if col%2 else (139, 139, 122))
self.SetColAttr(col,attr)
#gridlib.EVT_GRID_CELL_LEFT_DCLICK(self, self.OnLeftDClick)
gridlib.EVT_GRID_CELL_LEFT_CLICK(self,self.OnLeftClick)
#gridlib.EVT_GRID_CELL_CHANGE(self,self.OnCellChange)
gridlib.EVT_GRID_LABEL_LEFT_DCLICK(self,self.onLeftDClickRowCell)
# I do this because I don't like the default behaviour of not starting the
# cell editor on double clicks, but only a second click.
def OnLeftClick(self, evt):
print 'LeftClick'
col=evt.GetCol()
row=evt.GetRow()
table=self.GetTable()
if col<=0 and row >=0:
currval=table.GetValue(row,0)
if currval=='':
table.SetValue(row,0,'x')
else:
table.SetValue(row,0,'')
#if self.CanEnableCellControl():
# self.EnableCellEditControl()
gridlib.Grid.ForceRefresh(self)
def OnCellChange(self, evt):
# print 'Changed'
if self.CanEnableCellControl():
self.EnableCellEditControl()
gridlib.Grid.ForceRefresh(self)
evt.Skip()
def onLeftDClickRowCell(self,evt):
col=evt.GetCol()
table=self.GetTable()
data=N.array(table.data)
# print 'before ', data[:,0]
col_to_sort=[(i,s) for i,s in enumerate(data[:,col])]
col_to_sort.sort(lambda x,y: cmp(x[1],y[1]))
g_col = [i for (i,s) in col_to_sort]
#print col_to_sort
if col >=0:
if (N.diff(g_col)>0).all():
g_col=g_col[::-1]
#print 'col=',col
#print 'sort '
#print g
for i in range(data.shape[1]):
data[:,i]=data[g_col,i]
table.data=data.tolist()
# print 'after',data[:,0]
gridlib.Grid.AutoSize(self)
gridlib.Grid.ForceRefresh(self)
#evt.Skip()
class GridPanel(wx.Panel):
def __init__(self, parent,data,ice_categories,log=sys.stdout):
self.log = log
wx.Panel.__init__(self, parent, -1)
self.nb = wx.aui.AuiNotebook(self)
#print ice_categories
#print type(ice_categories)
for i in range(len(ice_categories)):
print ice_categories[i].keys()
value=ice_categories[i][ice_categories[i].keys()[0]]
print 'value',value
print 'mytype', type(value[0])
if type(value[0])==type({}):
page = CustTableGrid(self,data)
self.nb.AddPage(page, ice_categories[i].keys()[0])
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.FitInside(self.nb)
class mydict(dict):
def __init__(self,*kwargs):
dict.__init__(self,*kwargs)
def __add__(self,other):
result=mydict()
for key in self.keys():
result[key]=self[key]+other[key]
return result
def get_config(myfilestr):
#print myfilestr
myfile=open(myfilestr)
jsonstr=myfile.read()
myfile.close()
#print jsonstr
json_obj=demjson.decode(jsonstr)
return json_obj['menu']
if __name__=='__main__':
# a=mydict()
# a['name']='john'
# b=mydict()
# b['name']='smith'
# print a['name']
# print b['name']
# print a+b
mydirectory=r'c:\polcorrecter\data'
myfilebase='fieldscansplusminusreset53630.bt7'
myfilestr=os.path.join(mydirectory,myfilebase)
print myfilestr
mydatareader=readncnr.datareader()
mydata=mydatareader.readbuffer(myfilestr)
print mydata.data.keys()
mydirectory=r'C:\mytripleaxisproject\trunk\eclipse\src\vice'
jsonfile='test.json'
myfilestr=os.path.join(mydirectory,jsonfile)
ice_categories=get_config(myfilestr)
app=MyApp()
frame=wx.Frame(None,-1,'Grid Catalog',size=(240,200))
panel=GridPanel(frame,mydata,ice_categories)
frame.Show()
app.MainLoop()
| UTF-8 | Python | false | false | 15,822 | py | 365 | gridpanel.py | 165 | 0.551321 | 0.537037 | 0 | 425 | 35.218824 | 94 |
mouhcineToumi/pymaps | 7,550,552,513,633 | b2390f8a8895721018e0dfcc5cd7b42442e1e3c7 | 49e96a1b663a061b035935b5ca0ba926d049e823 | /main.py | cde998d7a680853868a6a81db149bd545e7cd42a | [] | no_license | https://github.com/mouhcineToumi/pymaps | 102ffbc6b1f5998322abaf8cc2a37d305389c679 | 6e455d17009cc2c016c27183578164d2e35eb978 | refs/heads/master | 2020-12-10T01:33:57.125167 | 2020-01-12T23:04:40 | 2020-01-12T23:04:40 | 233,471,728 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
from flask import Flask, render_template, request
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map, icons
app = Flask(__name__, template_folder="templates")
API_KEY = "AIzaSyAeWu0zu9VchrXlJzumD3B2CPN2vLYyiPM"
# you can set key as config
app.config['GOOGLEMAPS_KEY'] = API_KEY
# you can also pass key here
GoogleMaps(
app,
key=API_KEY
)
@app.route('/')
def main():
fullmap = Map(
identifier="fullmap",
varname="fullmap",
style=(
"height:100%;"
"width:100%;"
"top:0;"
"left:0;"
"position:absolute;"
"z-index:200;"
),
lat=37.4419,
lng=-122.1419,
markers=[
{
'icon': '//maps.google.com/mapfiles/ms/icons/green-dot.png',
'lat': 37.4419,
'lng': -122.1419,
'infobox': "Hello I am GREEN!"
},
{
'icon': '//maps.google.com/mapfiles/ms/icons/blue-dot.png',
'lat': 37.4300,
'lng': -122.1400,
'infobox': "Hello I am BLUE!"
},
{
'icon': icons.dots.yellow,
'lat': 37.4500,
'lng': -122.1350,
'infobox': "Hello I am Yellow!"
}
]
)
return render_template(
'main.html',
fullmap=fullmap,
GOOGLEMAPS_KEY=API_KEY
)
if __name__ == "__main__":
app.run(debug=True, use_reloader=True)
| UTF-8 | Python | false | false | 1,625 | py | 1 | main.py | 1 | 0.461538 | 0.419077 | 0 | 65 | 23 | 76 |
Daumis102/RangoAdvisor | 13,065,290,521,385 | 2068058c8dbdac42df9b9cead3f9cb5fb72835c7 | a9ee675b43d1c9b322f9de7547da56e8a6458606 | /django_advisor/advisor/tests.py | 1c13ff3a97d9b2eed0d3f513d5a68e434c149a69 | [] | no_license | https://github.com/Daumis102/RangoAdvisor | 9b18eb68cf58d3aaddfa0ccc3a2f7b10014d3351 | 8028761362cca9de5e0b650d809b20275b91170e | refs/heads/master | 2021-04-29T14:00:10.395256 | 2018-03-28T15:50:36 | 2018-03-28T15:50:36 | 121,764,515 | 1 | 1 | null | false | 2018-03-13T17:43:00 | 2018-02-16T15:10:51 | 2018-02-23T09:15:44 | 2018-03-13T17:43:00 | 7,184 | 1 | 0 | 0 | Python | false | null | from datetime import date
from django.test import TestCase, Client, RequestFactory
from django.contrib.auth import SESSION_KEY
from advisor.views import *
from .models import *
# helper methods for the setUp functions to reduce the same boilerplate
def get_init_dir():
return os.path.normpath(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), ''))
def get_user_info():
return {'username': 'anguel', 'password': 'anguel', 'email': 'anguel@gmail.com'}
def get_loc_info(i):
return {'name': 'Caledonian Uni', 'city': 'Glasgow', 'coordinates': '233,233',
'visited_by': ','.join(map(str, [i]))}
def get_rev_info(u, l):
return {'title': 'What a gorgeous view', 'publish_date': date.today(),
'content': 'The Cloud Gat, or Bean as the Chicagoans call it, is truly a sight to behold. Quite an impressive piece of engineering.',
'rating': 5, 'posted_by': UserProfile.objects.get(user=u), 'location_id': l}
def get_prof_info(u):
return {'user': u,
'avatar': File(open(os.path.join(get_init_dir(), 'static', 'images', 'default_avatar.png'), 'rb'), 'rb')}
def get_pic_info(u, l):
return {'upload_date': date.today(), 'picture': File(
open(os.path.join(get_init_dir(), 'media', 'initial_population', 'bean', 'bean1.jpg'), 'rb'), 'rb'),
'location_id': l, 'uploaded_by': UserProfile.objects.get(user=u)}
class LocationTests(TestCase):
def setUp(self):
self.init_dir = get_init_dir()
self.user = User.objects.create_user(**get_user_info())
self.profile = UserProfile.objects.create(**get_prof_info(self.user))
self.loc = Location.objects.create(**get_loc_info(self.user.id))
self.rev = Review.objects.create(**get_rev_info(self.user, self.loc))
self.pic = Picture.objects.create(**get_pic_info(self.user, self.loc))
def test_get_lat(self):
self.assertEquals(self.loc.get_lat(), 233)
def test_get_lng(self):
self.assertEqual(self.loc.get_lng(), 233)
def test_to_str(self):
self.assertEqual(str(self.loc), self.loc.name)
def test_get_picture(self):
self.assertEqual(self.loc.get_picture(),
self.pic) # this works cause the picture is mapped to this location in the setup
def test_get_rating(self):
self.assertEqual(self.loc.get_rating(), 5)
def test_num_reviews(self):
self.assertEqual(self.loc.num_reviews(), 1)
def test_num_visited(self):
self.assertEqual(self.loc.num_visited_by(), 1)
def test_visited_by(self):
self.assertIn(str(self.user.id), self.loc.visited_by_list())
class UserTests(TestCase):
def setUp(self):
self.init_dir = get_init_dir()
self.user = User.objects.create_user(**get_user_info())
self.profile = UserProfile.objects.create(**get_prof_info(self.user))
def test_to_str(self):
self.assertEqual(str(self.profile), self.user.username)
class ReviewTests(TestCase):
def setUp(self):
self.init_dir = get_init_dir()
self.user = User.objects.create_user(**get_user_info())
self.profile = UserProfile.objects.create(**get_prof_info(self.user))
self.loc = Location.objects.create(**get_loc_info(self.user.id))
self.rev = Review.objects.create(**get_rev_info(self.user, self.loc))
def test_to_str(self):
self.assertEqual(str(self.loc), self.loc.name)
class PictureTests(TestCase):
def setUp(self):
self.init_dir = get_init_dir()
self.user = User.objects.create_user(**get_user_info())
self.profile = UserProfile.objects.create(**get_prof_info(self.user))
self.loc = Location.objects.create(**get_loc_info(self.user.id))
self.pic = Picture.objects.create(**get_pic_info(self.user, self.loc))
def test_to_str(self):
self.assertEqual(str(self.pic), (self.pic.location_id.name + " " + str(self.pic.id)))
class ViewsTest(TestCase):
def setUp(self):
self.init_dir = get_init_dir()
self.user = User.objects.create_user(**get_user_info())
self.profile = UserProfile.objects.create(**get_prof_info(self.user))
self.loc = Location.objects.create(**get_loc_info(self.user.id))
self.rev = Review.objects.create(**get_rev_info(self.user, self.loc))
self.pic = Picture.objects.create(**get_pic_info(self.user, self.loc))
self.client = Client()
self.factory = RequestFactory()
def test_index_page(self):
resp = self.client.get('/advisor/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Caledonian Uni')
# test both routes
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Caledonian Uni')
def test_contacts_page(self):
resp = self.client.get('/advisor/contacts/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Daumantas')
def test_about_page(self):
resp = self.client.get('/advisor/contacts/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'DjangoAdvisor')
def test_location_page(self):
loc = Location.objects.get(name='Caledonian Uni', city='Glasgow', coordinates='233,233')
resp = self.client.get('/advisor/location/{}/'.format(loc.slug))
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Caledonian Uni')
def test_add_location_page_logged_in(self):
req = self.factory.get('/advisor/add_location/')
req.user = self.user
req.profile = self.profile
resp = add_location(req)
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Fill out the details to add a new location') # logged in and can add details
def test_add_location_page_logged_out(self):
resp = self.client.get('/advisor/add_location/')
self.assertEqual(resp.status_code, 302) # redirect if anon wants to add location
self.assertIn(resp.url, '/accounts/login/?next=/advisor/add_location/')
def test_profile_page_logged_in(self):
req = self.factory.get('/advisor/profile')
req.user = self.user
req.profile = self.profile
resp = profile(req)
self.assertEqual(resp.status_code, 200)
self.assertContains(resp,
'Username: '.format(self.user.username)) # show profile when logged in. check for username
def test_profile_page_logged_out(self):
resp = self.client.get('/advisor/profile')
self.assertEqual(resp.status_code, 301) # redirect if logged out
self.assertIn(resp.url, '/advisor/profile/')
def test_log_in(self):
resp = self.client.post('/advisor/login/', {'username': ['anguel'], 'password': ['anguel']})
self.assertEqual(resp.status_code, 200)
self.client.login(username='anguel', password='anguel')
self.assertTrue(SESSION_KEY in self.client.session) # now logged in
def test_log_out(self):
self.client.login(username='anguel', password='anguel')
self.assertTrue(SESSION_KEY in self.client.session)
resp = self.client.get('/advisor/logout/')
self.assertEqual(resp.status_code, 302) # redirect if they try to go to logout when not logged in
self.assertIn(resp.url, '/advisor/index/')
def test_loc_pic_upload(self):
req = self.factory.post('/advisor/photo/upload/', {'photo': File(
open(os.path.join(self.init_dir, 'media', 'initial_population', 'bean', 'bean1.jpg'), 'rb'), 'rb'),
'location': '{}'.format(self.loc.slug)})
req.user = self.user
req.profile = self.profile
self.assertEqual(len(Picture.objects.all()), 1) # originally has 1 pic
resp = upload_location_photo(req)
self.assertEqual(len(Picture.objects.all()), 2) # now has 2 pics
self.assertEqual(resp.status_code, 200)
def test_del_acc(self):
self.client.login(username='anguel', password='anguel') # show there is user currently in db
self.assertTrue(SESSION_KEY in self.client.session)
req = self.factory.post('/advisor/profile/deleteaccount/')
req.user = self.user
req.profile = self.profile
resp = delete_account(req)
self.client.logout()
self.assertEqual(resp.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session) # no logged in user
self.assertEqual(len(User.objects.all()), 0) # show no users in db
def test_toggle_visited(self):
req = self.factory.post('/advisor/location/toggle-visited', {'location_id': self.loc.id})
req.user = self.user
req.profile = self.profile
resp = toggle_visited(req)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(self.loc.visited_by_list()), 1)
class TestSlugify(TestCase):
def setUp(self):
self.init_dir = get_init_dir()
self.user = User.objects.create_user(**get_user_info())
self.profile = UserProfile.objects.create(**get_prof_info(self.user))
self.loc = Location.objects.create(**get_loc_info(self.user.id))
self.rev = Review.objects.create(**get_rev_info(self.user, self.loc))
self.pic = Picture.objects.create(**get_pic_info(self.user, self.loc))
self.client = Client()
self.factory = RequestFactory()
def test_two_same_loc(self):
new_loc = Location.objects.create(name='Caledonian Uni', city='Glasgow', coordinates='235,235',
visited_by=','.join(map(str, [self.user.id])))
new_loc.save()
self.assertNotEquals(self.loc.slug, new_loc.slug)
class FormsTest(TestCase):
def setUp(self):
self.init_dir = get_init_dir()
self.user = User.objects.create_user(**get_user_info())
self.profile = UserProfile.objects.create(**get_prof_info(self.user))
self.loc = Location.objects.create(**get_loc_info(self.user.id))
self.rev = Review.objects.create(**get_rev_info(self.user, self.loc))
self.pic = Picture.objects.create(**get_pic_info(self.user, self.loc))
self.client = Client()
self.factory = RequestFactory()
def test_register_form(self):
self.assertEqual(len(User.objects.all()), 1) # first show there is only 1 user, created during setup
resp = self.client.post('/advisor/register/',
{'username': ['testUser'], 'password': ['test'], 'passwordConfirm': ['test'],
'currentUrl': ['/advisor/index/']})
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(User.objects.all()), 2) # now show there is a new one
self.assertJSONEqual(str(resp.content, encoding='utf8'),
{'statusCode': 0, 'currentUrl': '/advisor/index/'})
def test_signin_form(self):
resp = self.client.post('/advisor/login/', {'username': ['anguel'], 'password': ['anguel']})
self.assertEqual(resp.status_code, 200)
self.client.login(username='anguel', password='anguel')
self.assertTrue(SESSION_KEY in self.client.session)
def test_add_location_form(self):
req = self.factory.post('/advisor/add_location/',
{'location_name': ['Loch Lomond'], 'city': ['Balloch'], 'coords': ['56.1366,-4.7398'],
'review-title': ['Superb'], 'location_image': [File(
open(
os.path.join(self.init_dir, 'media', 'initial_population', 'bean', 'bean1.jpg'),
'rb'), 'rb')], 'input-rating': 4, 'review-content': 'Very nice place'})
req.user = self.user
req.profile = self.profile
self.assertEqual(len(Location.objects.all()), 1) # show before adding new place that there is only one in db
resp = add_location(req)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(Location.objects.all()), 2) # now there is the new place
def test_changepassword_form(self):
req = self.factory.post('/advisor/changepw/',
{'changePWPassword': ['newPassword']})
req.user = self.user
req.profile = self.profile
resp = change_pw(req)
self.assertEqual(resp.status_code, 200)
self.assertJSONEqual(str(resp.content, encoding='utf8'),
{'statusCode': 0})
def test_changeavatar_pseudo_form(self):
req = self.factory.post('/advisor/change_pp/',
{'newAvatar': [File(
open(
os.path.join(self.init_dir, 'media', 'initial_population', 'bean', 'bean1.jpg'),
'rb'), 'rb')]})
original_avatar = self.profile.avatar.chunks(2) # first 2 chunks of original avatar
req.user = self.user
req.profile = self.profile
resp = change_pp(req)
self.assertEqual(resp.status_code, 200)
self.assertJSONEqual(str(resp.content, encoding='utf8'),
{'statusCode': 0})
self.assertNotEqual(self.profile.avatar.chunks(2),
original_avatar) # first 2 chunks of new avatar should not be equal to the first 2 of the original
def test_write_review_form(self):
req = self.factory.post('/advisor/write_review/',
{'reviewTitle': 'Alright', 'input-rating': 3, 'reviewContent': 'Place is ok',
'slug': self.loc.slug})
req.user = self.user
req.profile = self.profile
self.assertEqual(len(Review.objects.all()), 1) # originally start with 1 review
resp = write_review(req)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(Review.objects.all()), 2) # new review added so now 2
| UTF-8 | Python | false | false | 14,145 | py | 29 | tests.py | 12 | 0.611453 | 0.602192 | 0 | 311 | 44.482315 | 145 |
biomaks/telegram-wpg-transit-bot | 8,881,992,377,335 | 5635e553345f7553a19558683d6364aa85335e42 | a512a8146a0d1b2d6a974a937faf436f81ed0166 | /app/sqs_launcher.py | 97a159b32f8501ba473dfefe67f7c0fe973ed0a8 | [] | no_license | https://github.com/biomaks/telegram-wpg-transit-bot | 5e222fd3eb45e3a76da765893aebe6aab39dc539 | 6d09d162b2ad7ae0c2892f24802cbb36023615cc | refs/heads/master | 2020-06-04T03:51:18.251669 | 2019-08-04T01:43:27 | 2019-08-04T01:43:27 | 191,862,848 | 0 | 0 | null | false | 2019-06-16T17:26:03 | 2019-06-14T02:25:21 | 2019-06-16T02:30:50 | 2019-06-16T17:26:03 | 21 | 0 | 0 | 0 | Python | false | false | from sqs_launcher import SqsLauncher
from .config import aws_sqs_queue
launcher = SqsLauncher(aws_sqs_queue)
| UTF-8 | Python | false | false | 111 | py | 24 | sqs_launcher.py | 15 | 0.801802 | 0.801802 | 0 | 5 | 21.2 | 37 |
UchihaPan/BlogAppDjango | 12,592,844,138,251 | c36a8e933fef6971f2d443b182c650fe3752c565 | 1f869947c95985f53ce018c2e7c4d9d233d21f79 | /blog_app/models.py | 6964ad7c8c40f4f61bf5aa779611f92838cb74e7 | [] | no_license | https://github.com/UchihaPan/BlogAppDjango | a01512b9990c7a19ee6186e38e8aac935e883997 | 25bb341f823a6d34d40cbd0f2f15af88362f60d9 | refs/heads/main | 2023-07-07T07:18:00.315753 | 2021-08-06T10:21:52 | 2021-08-06T10:21:52 | 392,680,166 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from PIL import Image
from django.shortcuts import reverse
class Post(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
content = models.CharField(max_length=1000)
date_posted = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', args=[str(self.pk)])
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_images')
def __str__(self):
return self.user.username
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
super().save()
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| UTF-8 | Python | false | false | 1,119 | py | 4 | models.py | 3 | 0.672029 | 0.655049 | 0 | 34 | 31.911765 | 80 |
caracaljump/pythonExercize | 3,564,822,884,349 | 6e41b82ad8b912081c79f6f02857143beeb248c5 | 20c6e5ca74cd105a8b7108f437583463722197c3 | /Challenge3.py | bffd5838df348e49e8ef56c38594617690e49801 | [] | no_license | https://github.com/caracaljump/pythonExercize | 56676a9e60590eeb86d8b923647b3f1a98a35648 | ca4802fd3557cbbc8944ef4d8145fc90e96a555b | refs/heads/main | 2023-04-24T04:36:15.992567 | 2021-05-11T03:30:31 | 2021-05-11T03:30:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 1
a = 1
b = 2
c = 3
print(a, b, c)
# 2
x = 9
if x < 10:
print ("This number is less than 10")
elif x >= 10:
print ("This number is or more than 10")
# 3
y = 13
if y <= 10:
print ("message1")
elif y > 10 and y <= 25:
print ("message2")
else:
print ("message3")
# 4
d = 45
e = 8
d % e
# 5
d / e
# 6
age = 21
if age >= 20:
print("you may drink alcohol!")
| UTF-8 | Python | false | false | 395 | py | 15 | Challenge3.py | 14 | 0.501266 | 0.410127 | 0 | 45 | 7.755556 | 44 |
1571min/introducing-python-study | 11,793,980,241,464 | 32f01e28c8119faabd4e5f1c50bec51e35fff441 | f2453ee9850a9152027b66a3c7977fbe49153919 | /chapter1/python61.py | 9bc62b98e956370668b175db62d13790154c470a | [] | no_license | https://github.com/1571min/introducing-python-study | 5fe8570e64151cc6229cc4f42ce9c91943189f21 | cbbc66d66a5d4b4bd2a10077d47b37be0842bb88 | refs/heads/master | 2020-07-09T11:53:43.126994 | 2019-08-23T09:45:43 | 2019-08-23T09:45:43 | 203,962,613 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | year_list=[19940420]
for i in range(0,4):
year_list.append(year_list[i]+10000)
print(year_list)
thigs=["mozzarella",'cinderella','samonella']
thigs[0]=thigs[0].upper()
thigs[2]=thigs[2][::-1].capitalize()
print(thigs)
e2f={'dog':'chien','cat':'char','walrus':'morse'}
print(e2f['walrus'])
f2e={}
for name,value in e2f.items():
temp={value:name}
f2e.update(temp)
print(f2e)
print(list(f2e.keys()))
print(list(f2e.values()))
life={'animals':{'dog':'chien','cat':'char','walrus':'morse'},'plants':{},'other':{}}
print(life['animals']['cat'])
| UTF-8 | Python | false | false | 557 | py | 15 | python61.py | 15 | 0.640934 | 0.590664 | 0 | 22 | 24.181818 | 85 |
divbasson/web-dev-course-2014 | 18,236,431,150,174 | f3029c0664750db49c4b9c647f2b835f08615a10 | c9f1b54035e25451fc7433266e2e50c1f0c165bf | /code/web_server/web_server_2_1.py | 208219f29a5478b1dee8a6ede98368af1d9c7dce | [] | no_license | https://github.com/divbasson/web-dev-course-2014 | 42a3fcb0779135b902e99cb0c9833b4e7ea83b23 | f8ead7584d210d17dec0b19375027d2d036d9170 | HEAD | 2018-08-29T05:55:49.665521 | 2014-10-03T16:12:05 | 2014-10-03T16:12:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return app.send_static_file("hello_world.html")
app.debug = True
app.run()
| UTF-8 | Python | false | false | 157 | py | 60 | web_server_2_1.py | 32 | 0.656051 | 0.656051 | 0 | 10 | 14.7 | 51 |
BinSyed/Python_Basics | 2,370,821,971,924 | 46d3f4826a90b8312264deabcc1905b5788206ac | ae449abf457c8874fff0e176c59727e42f711598 | /Add Two Numbers.py | e5ef379880eee7ad3357ba5664e4dfc3fae101a1 | [] | no_license | https://github.com/BinSyed/Python_Basics | 6b6ecfae6879a631d03ef653114be1313b1c72bb | e0bf1728c18b980a36f0ae2187ef72d0b8113b27 | refs/heads/master | 2023-04-08T16:24:28.282125 | 2021-04-20T06:58:47 | 2021-04-20T06:58:47 | 292,492,039 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Example 1: Add Two Numbers
# This program adds two numbers
num1 = 1.5
num2 = 6.3
# Add two numbers
sum = num1 + num2
# Display the sum
print('The sum of {0} and {1} is {2}'.format(num1, num2, sum))
#Output
The sum of 1.5 and 6.3 is 7.8
#The program below calculates the sum of two numbers entered by the user..
#Example 2: Add Two Numbers With User Input
# Store input numbers
num1 = input('Enter first number: ')
num2 = input('Enter second number: ')
# Add two numbers
sum = float(num1) + float(num2)
# Display the sum
print('The sum of {0} and {1} is {2}'.format(num1, num2, sum))
#Output
Enter first number: 1.5
Enter second number: 6.3
The sum of 1.5 and 6.3 is 7.8
| UTF-8 | Python | false | false | 684 | py | 4 | Add Two Numbers.py | 3 | 0.681287 | 0.622807 | 0 | 35 | 18.542857 | 74 |
RLBot/RLBotPack | 8,418,135,913,666 | 0902913385d42a2c3e8e3f0fa4355424b80dcdf1 | 3cf1535bd23bfbfe078464eb6ba9043e30b2d67d | /RLBotPack/DisasterBot/skeleton/skeleton_agent.py | d3c8c91161e8c1d20e8150a22b6553b7aa65bee4 | [
"MIT"
] | permissive | https://github.com/RLBot/RLBotPack | 11bab3be9dea24521853a5ba3f0ba5716c9922d2 | d4756871449a6e587186e4f5d8830fc73a85c33a | refs/heads/master | 2023-07-09T08:37:05.350458 | 2023-05-17T21:51:07 | 2023-05-17T21:51:07 | 188,609,141 | 27 | 132 | MIT | false | 2023-09-12T00:02:43 | 2019-05-25T20:24:44 | 2023-09-10T12:11:26 | 2023-09-12T00:02:43 | 214,439 | 24 | 45 | 7 | Python | false | false | import time
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from skeleton.util.structure.game_data import GameData
class SkeletonAgent(BaseAgent):
"""Base class inheriting from BaseAgent that manages data provided by the rlbot framework,
and converts it into our internal data structure, and extracts further useful info."""
def __init__(self, name: str = "skeleton", team: int = 0, index: int = 0):
super(SkeletonAgent, self).__init__(name, team, index)
self.game_data = GameData(self.name, self.team, self.index)
self.controls = SimpleControllerState()
def initialize_agent(self):
"""Hopefully this gets called before get_output and after the game has fully loaded.
And hopefully no inheriting classes override this method without calling super()"""
self.game_data.read_field_info(self.get_field_info())
def get_output(self, game_tick_packet: GameTickPacket) -> SimpleControllerState:
"""Overriding this function is not advised, use get_controls() instead."""
chrono_start = time.time()
self.pre_process(game_tick_packet)
self.controls = self.get_controls()
self.feedback()
delta_time = time.time() - chrono_start
if delta_time > 1 / 120:
self.logger.warn(f"Slow to execute on tick {self.game_data.counter}: {delta_time * 120 * 100:.3f}%")
return self.controls
def pre_process(self, game_tick_packet: GameTickPacket):
"""First thing executed in get_output()."""
self.game_data.read_game_tick_packet(game_tick_packet)
self.game_data.read_ball_prediction_struct(self.get_ball_prediction_struct())
self.game_data.update_extra_game_data()
def feedback(self):
"""Last thing executed in get_output() before return statement."""
self.game_data.feedback(self.controls)
def get_controls(self) -> SimpleControllerState:
"""Function to override by inheriting classes"""
return self.controls
| UTF-8 | Python | false | false | 2,110 | py | 605 | skeleton_agent.py | 446 | 0.683412 | 0.677251 | 0 | 56 | 36.678571 | 112 |
zeel-dev/zebuker-cookiecutter | 17,892,833,775,450 | ae2f59443911ac77e8f1d8f2f2451323f32cf8ab | bac3b9e7289a391f19bf83926f1f17e775cb0eb5 | /{{cookiecutter.service_name}}/{{cookiecutter.service_slug}}/schemas.py | 497b14452dfa29906a0b0faa661efbca15094cbc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | https://github.com/zeel-dev/zebuker-cookiecutter | ae1ef8b9e8aa30f3e5792b3a3c2a83adf316fbb4 | 31d25321b6dca8e3207f60da907e5a7996860308 | refs/heads/master | 2020-05-24T03:02:06.995187 | 2019-06-06T20:34:13 | 2019-06-06T20:34:13 | 187,064,132 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Schemas linked to the database models"""
import re
from marshmallow import post_dump
from marshmallow_sqlalchemy import ModelSchema
class BaseSchema(ModelSchema):
"""
Custom base schema class that serializes datetime strings without the
timezone offset.
"""
def __init__(self, strict=True, **kwargs):
super(BaseSchema, self).__init__(strict=strict, **kwargs)
@post_dump
def strip_timezone_offset(self, data):
"""Strips timezone offset from ISO8601/RFC3339 strings"""
for key in data:
if data[key] and isinstance(data[key], str):
matches = re.match(
r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d{6})?[+-]\d\d:\d\d$',
data[key]
)
if matches:
data[key] = data[key][:-6]
return data | UTF-8 | Python | false | false | 859 | py | 19 | schemas.py | 10 | 0.559953 | 0.547148 | 0 | 28 | 29.714286 | 79 |
nicolacavallini/explore-garmin-data | 6,700,148,984,640 | ac5751e2265b6ee6242d71b9c704880be415aa57 | 3693ffd6458b2035cd136b0801782f3997300a90 | /tools.py | e996ce82e698982e489e063825827ad535fb6278 | [] | no_license | https://github.com/nicolacavallini/explore-garmin-data | dd1f8c4fe779362284bbc4763916f457949b9351 | fc93440f76b51bb737e3405677c5b27e5f9ec639 | refs/heads/master | 2021-06-20T09:57:44.562525 | 2019-08-09T12:55:27 | 2019-08-09T12:55:27 | 114,737,944 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from fitparse import FitFile
import numpy as np
from scipy.special import binom as binomial
import scipy.sparse as spr
def non_negative(v):
ids = np.where(v<0)
v[ids] = 0
return v
def forward_convolution(data,conv_amplitude,conv_function):
assert data.ndim==1, "data should be one dimensional arrays"
m = get_forward_convolution_matrix(data.shape[0],conv_amplitude,conv_function)
return m.dot(data)
def get_forward_convolution_matrix(matrix_size,conv_amplitude,conv_function):
m = spr.dia_matrix((matrix_size, matrix_size),dtype=np.float64)
for i in range(conv_amplitude):
data = conv_function(i)*np.ones((m.shape[0],),dtype=np.float64)
d = spr.dia_matrix((data,i), shape=m.shape)
m+=d
return m
def bernstein_polynomial(n,i):
assert i in range(n+1), 'i out of range: [0,n)'
def func(n,i,t):
poly = binomial(n,i)*t**(i)*(1.-t)**(n-i)
return poly
return lambda t : func(n,i,t)
def normalize_function(v):
v_min = np.amin(v)
v_max = np.amax(v)
return lambda x : (x - v_min)/(v_max-v_min)
def normalize(v):
v = v - np.amin(v)
v = v/np.amax(v)
return v
def least_squares(x,y,space):
xn = normalize(x)
A = np.zeros((0,xn.shape[0]))
for f in space:
A = np.vstack((A,f(xn)))
b = np.reshape(y,(y.shape[0],1))
b = A.dot(b)
A = A.dot(A.T)
coeffs = np.linalg.solve(A, b)
return coeffs
def get_bernsetin_order_one():
return [lambda x : 1-x,
lambda x : x]
def get_bernsetin_derivs_order_one():
return [lambda x : -1.,
lambda x : 1.]
def get_bernsetin_order_two():
return [lambda x : (1-x)*(1-x),
lambda x : 2*x*(1-x),
lambda x : x*x]
def get_bernsetin_derivs_order_two():
return [lambda x : -2*(1-x),
lambda x : 2*(1-2*x),
lambda x : 2*x]
def get_function(coeffs,space):
def func(coeffs,space,sample):
if type(sample) is np.ndarray:
function = np.zeros(sample.shape)
else:
function = 0
for f,c in zip(space,coeffs.flatten()):
function += c*f(sample)
return function
return lambda x : func(coeffs,space,x)
def interpolate_and_get_derivative(x,y):
space = get_bernsetin_order_one()
space_prime = get_bernsetin_derivs_order_one()
coeffs = least_squares(x,y,space)
return get_function(coeffs,space_prime)
def linear_regression(x,y):
space = get_bernsetin_order_one()
coeffs = least_squares(x,y,space)
slope = np.abs(coeffs[0]-coeffs[1])
slope = slope/(np.amax(x)-np.amin(x))
return slope
| UTF-8 | Python | false | false | 2,663 | py | 9 | tools.py | 8 | 0.600451 | 0.588434 | 0 | 109 | 23.431193 | 82 |
KonstantinRupasov/SysAdminLib_01 | 17,248,588,671,734 | 10bbd68fd2a648d55e6b0fc999a8fc198e108bf0 | 71dd60034fe3dd6b946e8aedb86eb17d7422e611 | /src/lib/common/bootstrap.py | db1e75f4e1c3d4c0f6960c045b8d2cb99293c5be | [] | no_license | https://github.com/KonstantinRupasov/SysAdminLib_01 | 9ddeb65818caa79d276a53819f5bbb9194ebaaf2 | 3004ba129e99ddbffe1f7c820b824e2efbad6e90 | refs/heads/master | 2023-08-08T05:02:27.590154 | 2017-12-08T21:42:02 | 2017-12-08T21:42:02 | 111,958,896 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import re
from .logger import *
# Compare current version of Python interpreter against minimal required
if sys.version_info < (3, 4, 3):
raise Exception(
"Incompatible version of Python interpreter. "
"Minimum required version: 3.4.3"
)
import threading
import datetime
import platform
import subprocess as sp
import shlex
## Function, which acts as custom excepthook
#
# @param exctype Exception type
# @param value Exception value
# @param traceback Exception traceback
def my_except_hook(exctype, value, traceback):
# if keyboard interrupt came, terminate script
if exctype is KeyboardInterrupt:
global_logger.error(
"Keyboard interrupt", pid=os.getpid(),
test_mode=gv.TEST_MODE,
err_code=-1
)
os._exit(1)
else:
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = my_except_hook
## Detect encoding of default shell.
#
# @return String, which represent encoding. If decoding cannot be detected,
# "raw_unicode_escape" returns.
def detect_console_encoding():
encoding = "raw_unicode_escape"
system = platform.system()
try:
# if system is Windows, then detect via chcp
if system == "Windows":
popen = sp.Popen(
"chcp",
shell=True,
stdout=sp.PIPE,
stderr=sp.PIPE)
stdout, _ = popen.communicate(timeout=2)
encoding = re.search(b" ([a-zA-Z0-9_\\-]+)", stdout).groups()[0]\
.decode(gv.ENCODING)
# if Linux, then set utf-8
elif system == "Linux":
encoding = "utf-8"
test = "trying encode string".encode(encoding)
return encoding
except Exception as err:
return "raw_unicode_escape"
from .logger import *
from .errors import *
from ..utils.cvt import str_to_bool
## Set global lib::common::global_vars::ENCODING variable.
gv.ENCODING = detect_console_encoding()
## Default function for second argument in parse_cmd_args.
def set_debug_values(args):
for key, value in args.items():
if key.lower() in ["debug", "collapse-traceback", "print-begin",
"print-uuid", "print-function", "escape-strings"]:
try:
setattr(gv, key.upper().replace("-", "_"),
value)
except ValueError:
raise AutomationLibraryError("ARGS_ERROR",
"This arg should be True or False",
key=key, current_value=value)
## Parse input command line arguments (from sys.argv) an return tuple with
# (positional_args, named_args).
# @param s Input list or string. If None, then args retrieved from sys.argv[2:].
# If string, then args splitted via shlex.
# @return Tuple with (positional_args, named_args), where positional_args
# is list of args and named_args is dict.
def parse_cmd_args(s=None):
args = ([], {})
if s is None:
s = sys.argv
elif isinstance(s, list):
pass
elif isinstance(s, str):
s = shlex.split(s)
else:
raise TypeError("s should be None, str or list")
for arg in s:
key = None
value = "True"
# split arg on (keys string, value)
splitted_arg = re.search("--([^=\n]+)(?:=(.*))?", arg)
# if split successful, then set first captured group as key and
# if second group is not None, set it as value ("True" string otherwise)
if splitted_arg is not None:
key = splitted_arg.groups()[0]
if splitted_arg.groups()[1] is not None:
value = splitted_arg.groups()[1]
# if split fails, assume that we have positional argument and set value
# to arg, when keeping key as None.
else:
value = arg
# First, try to convert value to bool, after that to int, and then
# keep as str
try:
value = int(value)
except:
pass
try:
value = str_to_bool(value)
except:
pass
if key is None:
args[0].append(value)
else:
args[1][key] = value
return args
def get_pid_filename(pid):
return os.path.join(gv.PID_PATH, "AutomationLibrary_{}.pid".format(pid))
def get_log_filename(script_name, pid, log_folder, timestamp=None):
if timestamp is None:
timestamp = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
return os.path.join(
gv.PID_PATH, log_folder, script_name + "_" + str(timestamp)
+ "_" + str(pid) + ".log"
)
## Execute scenario.
#
# @param func Function, which represent scenario.
# @param script_name Name of script, needed for building log file name.
# @param script_args Positional args to `func`.
# @param script_kwargs Named args to `func`.
def main(func, script_name, script_args=(), script_kwargs={}):
## Wrapper for scenario, which do the necessary preparations, like creating
# PID file, logs, etc.
#
# @brief This function shouldn't be called directly,
# only from lib::common::bootstrap::main()
#
# @param func Function, which represent scenario.
# @param script_name Name of script, needed for building log file name.
# @param op_uuid UUID of global operation.
# @param script_args Positional args to `func`.
# @param script_kwargs Named args to `func`.
def scenario_executor(func, script_name, op_uuid, script_args=(),
script_kwargs={}):
# setting up pid file
pid = os.getpid()
pid_filename = get_pid_filename(pid)
pid_file = open(pid_filename, "w")
pid_file.write(str(pid))
pid_file.close()
# setting up logger
log_folder = 'script_logs'
if not os.path.exists(log_folder):
os.makedirs(log_folder)
global_logger.add_file_handler(
get_log_filename(script_name, pid, log_folder)
)
global_logger.add_stream_handler(sys.stdout)
# execute function, measure time and exit
res = func(*script_args, **script_kwargs)
_time = global_logger.finish_operation(op_uuid)
global_logger.info(
message="Scenario execution finished",
duration=int(_time.microseconds * 10**-3 + _time.seconds * 10**3),
scenario_name=script_name,
code=res
)
try:
os.remove(pid_filename)
except:
global_logger.warning(message="Couldn't remove pid file",
pid_filename=pid_filename)
# set return code and exit
os._exit(res)
op_uuid = global_logger.start_operation()
thread = threading.Thread(target=scenario_executor, args=(func,
script_name,
op_uuid,
script_args,
script_kwargs))
thread.start()
# wait for setting up configuration
import time
config_read_timeout = 30
i = 0
while "time-limit" not in gv.CONFIG and i < config_read_timeout:
time.sleep(0.1)
i += 1
# handle situation when reading configure is failed
if i >= config_read_timeout:
err = AutomationLibraryError("TIMEOUT_ERROR")
global_logger.error(
str(err), state="error"
)
time = global_logger.finish_operation(op_uuid)
global_logger.info(message="Scenario execution finished",
duration=int(
time.microseconds * 10**-3 + time.seconds * 10**3
),
scenario_name=script_name,
code=err.num_code)
os.remove(os.path.join(gv.PID_PATH,
"AutomationLibrary_{}.pid".format(os.getpid())))
os._exit(err.num_code)
# setting time-limit
thread.join(gv.CONFIG["time-limit"])
# if time-limit is expired, kill script
if thread.is_alive():
err = AutomationLibraryError("TIMEOUT_ERROR")
global_logger.error(
str(err), state="error"
)
time = global_logger.finish_operation(op_uuid)
global_logger.info(message="Scenario execution finished",
duration=int(
time.microseconds * 10**-3 + time.seconds * 10**3
),
scenario_name=script_name,
code=err.num_code)
os.remove(os.path.join(gv.PID_PATH,
"AutomationLibrary_{}.pid".format(os.getpid())))
os._exit(err.num_code)
| UTF-8 | Python | false | false | 8,942 | py | 61 | bootstrap.py | 46 | 0.560725 | 0.555692 | 0 | 252 | 34.484127 | 80 |
catatonicChimp/smscWeb | 7,988,639,193,970 | 2ba2f1720b75a807baa88d27972b418c3503bd0d | 492238843e63d8612f2653b5066d894cbba12693 | /src/club/migrations/0003_clubmember.py | 6f98de198cebba3c4a3a604b4c9742627d817194 | [] | no_license | https://github.com/catatonicChimp/smscWeb | f442c6fc3e9e306e43a7711bfcc0fa17fffdd2c4 | fcd07652d16975a5189fb3511894b976665a990c | refs/heads/master | 2016-09-13T21:23:42.487861 | 2016-05-09T08:38:00 | 2016-05-09T08:41:35 | 58,194,410 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-29 10:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
('club', '0002_memberaddress_user'),
]
operations = [
migrations.CreateModel(
name='ClubMember',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('slug', models.UUIDField(default=uuid.uuid4, editable=False)),
('picture', models.ImageField(blank=True, null=True, upload_to='profile_pics/%Y-%m-%d/', verbose_name='Profile picture')),
('bio', models.CharField(blank=True, max_length=200, null=True, verbose_name='Short Bio')),
('plates', models.BooleanField(default=False, verbose_name='Has Club Plates with SMSC')),
('number', models.CharField(blank=True, max_length=2, null=True, unique=True, verbose_name='Club Membership Number')),
('join_date', models.DateField(blank=True, null=True, verbose_name='Club Join Date')),
('boardMember', models.BooleanField(default=False, verbose_name='Club Board Member')),
('role', models.CharField(blank=True, max_length=30, null=True, verbose_name='Role')),
],
),
]
| UTF-8 | Python | false | false | 1,578 | py | 34 | 0003_clubmember.py | 19 | 0.631179 | 0.611534 | 0 | 33 | 46.818182 | 156 |
genetica/pylator | 16,767,552,364,762 | dd4dc1a818166760a12ecdc94cd19552f4f71357 | 033f47af4edf835f77ca49d1baa6661b7ebfc202 | /testScripts/multi5.py | 7e2616829f72b25a123abed85c2340001f635aa3 | [] | no_license | https://github.com/genetica/pylator | 889434e903c55e32676b0b843f9ebd7a7f36b78a | 4ce3ed895186400107d1e49e9a8c9928dd0c457a | refs/heads/master | 2021-01-02T22:32:36.998021 | 2017-11-30T08:35:49 | 2017-11-30T08:35:49 | 99,333,451 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import ctypes
import logging
import multiprocessing as mp
import time
from contextlib import closing
import numpy as np
t1 = time.time()
info = mp.get_logger().info
crit = mp.get_logger().critical
#logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
#logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
import weakref
def main():
logger = mp.log_to_stderr()
logger.setLevel(logging.CRITICAL)
#logger.setLevel(logging.FATAL)
lst = []
# create shared array
N, M = int(1e7), 11
base_ptr = mp.RawArray(ctypes.c_int64, N)
arr = np.frombuffer(base_ptr)
arr[:] = np.array(np.random.uniform(size=N)*1000, np.int64)
#arr_orig = arr.copy()
# print("info")
# print(repr(arr))
# print(repr(base_ptr))
# print(id(arr))
# print(id(base_ptr))
# print("done")
lst_event = []
lst_event.append(mp.Event())
lst_event.append(mp.Barrier(3))
lst_event.append(mp.Barrier(3))
#for i in range(5):
# lst_event.append(mp.Event())
lst_event[0].set()
ptr_lst = {}
#mgr = mp.Manager()
#ptr_lst = mgr.dict()
#ptr = mgr.list()
#mgr.register("array", base_ptr)
#ptr.append(base_ptr)
ptr_lst["array"] = base_ptr
ptr_lst["events"] = lst_event
#ptr_lst = []
#ptr_lst.append(base_ptr)
#ptr_lst.append(lst_event)
p1 = mp.Process(target=exec1, args=(ptr_lst,))
p2 = mp.Process(target=exec2, args=(ptr_lst,))
p1.start()
p2.start()
info("Starting")
lst_event[1].wait()
#lst_event[3].wait()
#lst_event[4].wait()
t1 = time.time()
info("All Started")
#lst_event[1].wait()
#lst_event[2].wait()
lst_event[2].wait()
t2 = time.time()
lst_event[0].clear()
#info(arr)
#arr = np.frombuffer(ptr_lst[0])
#arr = np.frombuffer(ptr_lst["array"])
#print(weakref.ref(arr))
#crit(arr[1])
#crit(arr[arr.size//2-1])
#crit(arr[arr.size//2])
#crit(arr[arr.size//2+1])
#crit(arr[arr.size-1])
return t1, t2
def exec1(ptr_lst):
#base_ptr = ptr_lst[0]
#flags = ptr_lst[1]
base_ptr = ptr_lst["array"]
flags = ptr_lst["events"]
#flags[3].set()
flags[1].wait()
#info('Exec1 Started')
count = 0
data = np.frombuffer(base_ptr)
#t1 = 0
t1 = time.time()
#crit(id(base_ptr))
#crit(id(data))
while count <= data.size//2:
count += 1
if count <= data.size//2 - 1:
#info('P1 %d', count)
data[count] = count#(count * np.sqrt(count )) ** (2.0/3.0)
t2 = time.time()
crit(t2 - t1)
# while flags[0].is_set():
# if count <= data.size//2 - 2:
# count += 1
# if t1 == 0:
# t1 = time.time()
# #info('P1 %d', count)
# data[count] = count#(count * np.sqrt(count )) ** (2.0/3.0)
# else:
# if not flags[1].is_set():
# t2 = time.time()
# crit(t2-t1)
flags[2].wait()
#flags[1].set()
#flags[0].wait()
#count -= 1
#info(data)
def exec2(ptr_lst):
#base_ptr = ptr_lst[0]
#flags = ptr_lst[1]
base_ptr = ptr_lst["array"]
flags = ptr_lst["events"]
#flags[4].set()
flags[1].wait()
#info('Exec2 Started')
count = 0
data = np.frombuffer(base_ptr)
#crit(id(base_ptr))
#crit(id(data))
t1 = 0
t1 =time.time()
while count <= data.size//2:
count += 1
if count <= data.size//2 - 1:
#info('P1 %d', count)
data[count + data.size//2] = count#(count * np.sqrt(count )) ** (2.0/ 3.0 )
t2 = time.time()
crit(t2 - t1)
# while flags[0].is_set():
# if count <= data.size//2 - 2:
# count += 1
# if t1 == 0:
# t1 = time.time()
# #info('P2 %d', count)
# data[count + data.size//2] = count#(count * np.sqrt(count )) ** (2.0/ 3.0 )
# else:
# if not flags[2].is_set():
# t2 = time.time()
# crit("{}".format(t2 - t1))
# #crit(flags[2].is_set())
flags[2].wait()
#flags[2].set()
#flags[0].wait()
#count -= 1
#info(data)
if __name__ == '__main__':
#mp.freeze_support()
for i in range(1):
t1, t2 = main()
#t2 = time.time()
print(t2 - t1) | UTF-8 | Python | false | false | 4,508 | py | 53 | multi5.py | 43 | 0.498447 | 0.4685 | 0 | 216 | 19.875 | 90 |
ushakovao/Py | 4,458,176,055,592 | 5c9f2ee1d4afbf61f4d31cbb3f134cbbba6e2c78 | 66308035c754d1b7e29eec08b716995c17f21566 | /Signals/tp8/C_zone.py | a2b91a66ef94eb1307b057e91c3f1c137b6fe62d | [] | no_license | https://github.com/ushakovao/Py | 5d603dd3a1ebff31c44a154c65c22961e75c7d60 | bdb06deff3ddbb5e1d5833178a14be87a3241e40 | refs/heads/master | 2020-05-27T03:42:45.340168 | 2019-05-24T18:34:34 | 2019-05-24T18:34:34 | 188,469,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import imageio
from PIL import Image
from pylab import *
import matplotlib.pyplot as plt
"""
Un algorithme de détection de composante connexe dans une image.
Comprenez-le. Faites le tourner pas à pas, sur un papier, avec un dessin.
"""
def add_neighbours_in_stack(image:np.ndarray, threshold:int, stack:list, i:int, j:int, inComponent:np.ndarray)->None:
inComponent[i, j] = True
neighbours = [(i+1,j),(i-1,j),(i,j-1),(i,j+1)]
for pixel in neighbours:
(k,l) = pixel
if 0<=k<image.shape[0] and 0<=l<image.shape[1]:
""" que ferait l'algo sans la seconde condition ?"""
if image[k,l]>threshold and not inComponent[k, l]:
inComponent[k, l] = True
stack.append(pixel)
def connected_component(image:np.ndarray, seuil:int, i0:int, j0:int, inComponent:np.ndarray)->None:
stack = [(i0,j0)]
while len(stack)>0:
""" pop() est une méthode qui renvoie le dernier élément d'une liste, tout en le supprimant de la liste"""
(i,j) = stack.pop()
add_neighbours_in_stack(image, seuil, stack, i, j, inComponent)
def step0():
img = imageio.imread("../assets/img/1-2.jpg")
im = array(Image.open('s.jpg').convert('L'))
image = img[:,:,0]
print(image.shape)
inComponent = np.empty(image.shape, dtype=np.bool)
inComponent[:, :] = False
connected_component(image, 10, image.shape[0] // 2, image.shape[1] // 2,inComponent)
plt.imshow(inComponent,cmap='gray',vmin=0,vmax=1)
plt.show()
"""
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.subplot(1,2,2)
contour(im, levels=[245], colors='black', origin='image')
plt.show()"""
if __name__=='__main__':
step0()
"""
BONUS :
Cette algorithme de détection de zone s'écrirait naturellement de manière récursive
Ré-écrivez-le en récursif.
L'emploie d'une pile (stack) permet d'éviter la récursivité.
Plus précisément, on crée nous même une pile
que l'ordinateur ferait naturellement dans une procédure récursive. L'avantage de faire nous même notre
pile, c'est qu'on maîtrise mieux les choses ; par exemple on pourrait controler la hauteur de la pile,
et utiliser le disque dure pour stocker des données quand la pile devient trop haute.
Maintenant, vous devrez comprendre le sens de : "stack-overflow"
"""
| UTF-8 | Python | false | false | 2,359 | py | 60 | C_zone.py | 56 | 0.662955 | 0.647966 | 0 | 75 | 30.12 | 117 |
Dwtexe/Python-Docs | 3,075,196,594,696 | 53a53985477a79fd4319c77b8057efc7aa8b7271 | 31fbaba0f6865538d76bc31b63bee65cfc27be1f | /Hello world/python_egitimi/combinations.py | f4c92b21d5c0ad73da9b60b36236a18a99ef3191 | [] | no_license | https://github.com/Dwtexe/Python-Docs | 3b59071978136170fb2db1a3785b15e859fd0d8d | e76fd5b0121836278ae8a039445f1047f027c33d | refs/heads/master | 2021-05-21T17:19:48.969151 | 2020-10-18T09:00:16 | 2020-10-18T09:00:16 | 252,732,861 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import itertools
import operator
# COMBINATIONS FONKSİYONU
meyve = ["elma", "armut", "karpuz", "portakal"]
kombin = itertools.combinations(meyve, 2)
# kombin = itertools.combinations_with_replacement(meyve,2)
for i in kombin:
print(i)
| UTF-8 | Python | false | false | 246 | py | 105 | combinations.py | 102 | 0.726531 | 0.718367 | 0 | 13 | 17.769231 | 59 |
Jeff654/my_machine_learning | 3,058,016,761,002 | 9362c2e85b76124efaa3dc5b3ea83e97de611069 | cd3391d7009ce1975541b6377f3e7dca5c4a8c6c | /NLP_exercise/基础算法/base_algorithm/sklearn/SGD/plot_iris.py | f4947bf2033249367231682731c3ce53e9598bf6 | [] | no_license | https://github.com/Jeff654/my_machine_learning | ee7399fb514e36859a406330820dac9b15a3e348 | cfca78f2d351caa58092ce5e6b955347aba6637e | refs/heads/master | 2022-11-29T21:50:05.018262 | 2019-11-26T12:04:34 | 2019-11-26T12:04:34 | 224,181,536 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
n_classes = 3
plot_colors = 'bry'
plot_step = 0.02
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]):
# only take the two corresponding features
x = iris.data[:, pair]
y = iris.target
# shuffle
idx = np.arange(x.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
x = x[idx]
y = y[idx]
# standardize
mean = x.mean(axis = 0)
std = x.std(axis = 0)
x = (x - mean) / std
clf = DecisionTreeClassifier().fit(x, y)
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step))
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
cs = plt.contourf(xx, yy, z, cmap = plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(x[idx, 0], x[idx, 1], c = color, label = iris.target_names[i], cmap = plt.cm.Paired)
plt.axis("tight")
plt.suptitle("decision surface of a decision tree using paired features")
plt.legend(loc = "best")
plt.show()
| UTF-8 | Python | false | false | 1,414 | py | 145 | plot_iris.py | 131 | 0.628713 | 0.602546 | 0 | 54 | 25.148148 | 98 |
amiller/glxcontext | 13,434,657,725,534 | 0dd5dd0ca7935a3ffa3571ff3d046b70a9751ab5 | d9f11335a07f294fe0878debb7a5fc180bc3bd46 | /setup.py | 110862f3690928a300ae839e7f28e9906af4a548 | [] | no_license | https://github.com/amiller/glxcontext | acfd90968218d8532b5617037fec9c398e37e8bb | 9a51f59b34ecb8c11e4ddd810c1bc7d43afd4235 | refs/heads/master | 2020-06-02T15:18:47.772333 | 2011-07-12T03:57:14 | 2011-07-12T03:57:14 | 2,034,003 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules=[Extension("glxcontext",
["glxcontext/_glxcontext.c",
"glxcontext/glxcontext.pyx"],
libraries=['X11','GL'])]
setup(name='glxcontext',
version='0.01',
packages=['glxcontext'],
cmdclass={'build_ext': build_ext},
ext_modules=ext_modules)
| UTF-8 | Python | false | false | 456 | py | 5 | setup.py | 3 | 0.605263 | 0.594298 | 0 | 14 | 31.571429 | 52 |
ola-ct/projecteuler | 18,700,287,611,289 | f7eaad66e5fed90f595c12179c3952d6e5b66d38 | 9f2a53f049f08af5bf598e5a77de104568b2ac2b | /3/3a.py | 436e69f65b2736516dd6975f695d5a5c04e65b7e | [] | no_license | https://github.com/ola-ct/projecteuler | a99423c4e40792211aa4db26c5f06c636b31daf3 | 0698da44952cd5fe88498cbabc705ab06cc09747 | refs/heads/master | 2022-08-14T06:55:30.843277 | 2020-05-21T08:00:55 | 2020-05-21T08:00:55 | 265,789,155 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# http://projecteuler.net/index.php?section=problems&id=3
# Copyright (c) 2010 Oliver Lau <oliver@ersatzworld.net>
from math import sqrt
def primes(n):
sieve = range(++n)
sieve[:2] = [0, 0]
for i in xrange(2, int(sqrt(n))+1):
if sieve[i]:
for j in xrange(i**2, n, i):
sieve[j] = 0
return [ p for p in sieve if p ]
def factors(n):
return [ p for p in primes(n) if n % p == 0 ]
v = 600851475143
print max(factors(v))
| UTF-8 | Python | false | false | 523 | py | 24 | 3a.py | 23 | 0.571702 | 0.521989 | 0 | 23 | 21.73913 | 57 |
cgregurich/PythonFoodApp2 | 10,161,892,645,017 | 7a37de18f20c34841aeb3936b5149879259227fc | 8f970d551fb64f5c882c2830dedce8b1374cd34f | /fooditemdao.py | 7c3232cab5ca1ddcab40d0cba5b3dacfc52bb9ef | [] | no_license | https://github.com/cgregurich/PythonFoodApp2 | 8977c4dac29478220058431a10601356662111b6 | be75a67eef4e633d98a06eb8ea3d93738d14a1f0 | refs/heads/master | 2023-05-06T08:33:17.936894 | 2021-05-26T02:11:34 | 2021-05-26T02:11:34 | 274,915,567 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fooditem import FoodItem
import sqlite3
class FoodItemDAO:
def __init__(self, db_name='fooditem.db'):
"""Creates the database of param db_name (if no name is supplied, defaults to
fooditem.db). Creates table fooditems if it doesn't yet exist"""
self.conn = sqlite3.connect(db_name)
self.c = self.conn.cursor()
self.c.execute("""CREATE TABLE IF NOT EXISTS fooditems (
name text, ss integer, unit text, cal integer, carb integer,
fat integer, protein integer, fiber integer, sugar integer
)""")
self.conn.commit()
def clear_database(self):
with self.conn:
self.c.execute("""DELETE FROM fooditems""")
def insert_food(self, food):
"""Inserts the FoodItem param food into the db"""
with self.conn:
self.c.execute("""INSERT INTO fooditems VALUES(
:name, :ss, :unit, :cal, :carb, :fat, :protein, :fiber, :sugar
)""", food.info)
def delete_food(self, name):
"""Deletes the food(s) that matches the param name from the db"""
with self.conn:
self.c.execute("""DELETE FROM fooditems WHERE name=?""", (name,))
return self.c.rowcount
def update_food(self, updated_food, old_name):
"""Updates the food in the db that has param old_name with all of the info
from param FoodItem updated_food. Name can be updated"""
with self.conn:
if old_name != updated_food.name:
self.c.execute("""UPDATE fooditems SET name=? WHERE name=?""", (updated_food.name, old_name))
self.c.execute("""UPDATE fooditems SET name=:name, ss=:ss, unit=:unit, cal=:cal,
carb=:carb, fat=:fat, protein=:protein, fiber=:fiber,
sugar=:sugar WHERE name=:name""", updated_food.info)
return self.c.rowcount
def retrieve_food(self, name):
"""Returns a FoodItem from the db that has the param name
(names should be unique, and this func assumes such)"""
with self.conn:
self.c.execute("""SELECT * FROM fooditems WHERE name=?""", (name,))
fetched_info = self.c.fetchone()
if fetched_info is None:
return None
food = FoodItem()
food.set_info_from_tuple(fetched_info)
return food
def retrieve_all_foods(self):
"""Return all FoodItems from the db as a list of FoodItems"""
with self.conn:
self.c.execute("""SELECT * FROM fooditems""")
info_tup_list = self.c.fetchall()
if not info_tup_list:
return None
foods_list = self._convert_to_food_items_list(info_tup_list)
return foods_list
def _convert_to_food_items_list(self, info_tup_list):
"""Receives param of a list of tuples which are info for a FoodItem.
Returns a list of FoodItems with the info supplised by the param"""
if not info_tup_list:
return None
foods_list = []
for info_tup in info_tup_list:
if len(info_tup) != 9:
raise ValueError("Element of list is not tuple of 9 elements.")
food = FoodItem()
food.set_info_from_tuple(info_tup)
foods_list.append(food)
return foods_list
def sort_foods(self, sort_type, order):
"""Sorts the FoodItems in the db and returns a FoodItems list of those sorted items
Param sort_type is a string of the column name
Param order is either 'ASC' or 'DESC'"""
crits = ('name', 'ss', 'unit', 'cal', 'carb', 'fat', 'protein', 'fiber', 'sugar')
if sort_type not in crits:
raise ValueError('Invalid arg for criteria')
if order != 'ASC' and order != 'DESC':
raise ValueError('Invalid arg for order')
with self.conn:
self.c.execute(f"SELECT * FROM fooditems ORDER BY {sort_type} {order}")
sorted_list = self.c.fetchall()
sorted_list = self._convert_to_food_items_list(sorted_list)
return sorted_list
def retrieve_all_food_names(self):
"""Returns a list of all FoodItems' names from fooditems table in DB"""
with self.conn:
self.c.execute("SELECT name FROM fooditems")
names = self.c.fetchall()
# return list of str instead of list of tuples
names_list = [name[0] for name in names]
return names_list | UTF-8 | Python | false | false | 4,527 | py | 32 | fooditemdao.py | 28 | 0.584714 | 0.583609 | 0 | 109 | 40.541284 | 109 |
AnVales/Pandas | 6,536,940,249,414 | c53f561d70d4b90077006977442d82a8c34d6445 | fbeb32b84ee5b117b296ad490bf0db9aeeb47780 | /pandas_df.py | e3ae847cde37cb9674fa56cd31ede10bf015b61d | [] | no_license | https://github.com/AnVales/Pandas | 2938b5128bd68cd84546532168c2fe3f58aff31a | 28d18cc1cca19fbd9563f3168ccf7b1bc45d9eff | refs/heads/main | 2023-03-25T07:39:06.835422 | 2021-03-20T17:58:22 | 2021-03-20T17:58:22 | 348,117,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # PANDAS: DATAFRAME #
# INFO: https://www.geeksforgeeks.org/python-pandas-dataframe/ #
import pandas as pd
import numpy as np
# HASH TO DATAFRAME #
# All the narray must be of the same length
hash_1 = {'col1': [1, 2], 'col2': [3, 4]}
df_1 = pd.DataFrame(data=hash_1)
# LIST TO DATAFRAME #
lst_2 = ['I', 'am', 'learning', 'pandas',
'and', 'it', 'is', 'cool']
df_2 = pd.DataFrame(lst_2)
# COLUMN SELECTION #
data_3 = {'Name':['Jai', 'Princi', 'Gaurav', 'Anuj'],
'Age':[27, 24, 22, 32],
'Address': ['Delhi', 'Kanpur', 'Allahabad', 'Kannauj'],
'Qualification': ['Msc', 'MA', 'MCA', 'Phd']}
df_3 = pd.DataFrame(data_3)
print(df_3[['Name', 'Qualification']])
# MAKE DATA FRAME FROM CSV FILE #
df_4 = pd.read_csv("nba.csv", index_col= "Name")
first = df_4.loc["Avery Bradley"]
second = df_4.loc["R.J. Hunter"]
# Df_4 type: pandas.core.frame.DataFrame
print(type(df_4))
# first type: pandas.core.series.Series
print(type(first))
first_1 = df_4["Age"]
print(first_1)
first_2 = df_4.loc["Avery Bradley"]["Age"]
print(first_2)
print(first, "\n\n\n", second)
# SELECT A SINGLE COLUMN
row_1 = df_4.iloc[3]
print(row_1)
# WORKING WITH MISSING DATA #
hash_2 = {'First Score': [100, 90, np.nan, 95],
'Second Score': [30, 45, 56, np.nan],
'Third Score':[np.nan, 40, 80, 98]}
df_5 = pd.DataFrame(hash_2)
print(df_5.isnull())
print(df_5.notnull())
df_6 = df_5.fillna(0)
print(df_6)
# we drop rows with at least one Nan value (Null value)
print(df_5.dropna())
# ITERATE OVER ROWS AND COLUMNS #
hash_3 = {'name':["aparna", "pankaj", "sudhir", "Geeku"],
'degree': ["MBA", "BCA", "M.Tech", "MBA"],
'score':[90, 40, 80, 98]}
df_7 = pd.DataFrame(hash_3)
for i, j in df_7.iterrows():
print(i, j)
print()
columns = list(df_7)
print(columns)
for i in columns:
# print the third element of the column
print (df_7[i][2]) | UTF-8 | Python | false | false | 1,928 | py | 2 | pandas_df.py | 1 | 0.593361 | 0.550311 | 0 | 74 | 24.081081 | 64 |
ivanmrosa/frangoFramework | 9,517,647,542,161 | 5656ff2c5683047dce680069656536343e318dfb | 32a265c0c3970d17a04443fa0674e04183267b6f | /frango.py | 2db1cb10d7ed37212d9f553a3a25c38a3b0a4c6c | [] | no_license | https://github.com/ivanmrosa/frangoFramework | d73f6e324749684fd8fc84ab7964e258f9ee330f | 37c09388f48569a67fa8e56a87f6e0bb1832328b | refs/heads/master | 2022-07-25T17:43:28.075318 | 2022-06-30T22:23:35 | 2022-06-30T22:23:35 | 119,444,458 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
#coding utf-8
import os
import sys
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
import json
from time import mktime
import time
from datetime import datetime
import shutil
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.common.exceptions import NoSuchElementException
try:
import msvcrt
has_msvcrt = True
except:
has_msvcrt = False
#host = '127.0.0.1'
host = '0.0.0.0'
port = 8081
server_address = (host, port)
base_url = 'http://' + host + ':' + str(port)
#base_dirjs = os.path.dirname(os.path.abspath(__file__)) + '/js'
base_dir = os.getcwd() #os.path.dirname(os.path.abspath(__file__))
base_dirjs = os.path.join(base_dir, 'js')
base_dircss = os.path.join(base_dir, 'css')
regex_debug_script = r'<!--begin script debugger-->.*<!--end script debugger-->'
HTML_REUSABLE_TEMPLATE = \
'''
<div id="[{([[[COMPONENT_NAME]]]) id }]" class="[[[COMPONENT_NAME]]]">
Component: [[[COMPONENT_NAME]]]
</div>
'''
HTML_SIMPLE_TEMPLATE = HTML_REUSABLE_TEMPLATE
REGISTER_COMPONENT_JS = \
'''
app.components.push(function () {
frango.component('[[[COMPONENT_NAME]]]').
setPathLocalTemplate('components/[[[CONTAINER_NAME]]][[[COMPONENT_NAME]]]/template/[[[COMPONENT_NAME]]].html').
objectGetData([[[COMPONENT_NAME]]]Component).
controller([[[COMPONENT_NAME]]]Component.controller).
register()
});
'''
CONTROLLER_COMPONENT_JS = \
'''
function [[[COMPONENT_NAME]]]ControllerClass() {
};
function [[[COMPONENT_NAME]]]ViewClass(instanceId, [[[COMPONENT_NAME]]]Controller) {
var self = this;
self.componentSelector = '#' + instanceId;
var htmlComponent = frango.find(self.componentSelector);
};
[[[COMPONENT_NAME]]]Component = frango.getComponentObject('[[[COMPONENT_NAME]]]', [[[COMPONENT_NAME]]]ViewClass,
[[[COMPONENT_NAME]]]ControllerClass);
'''
CONTROLLER_COMPONENT_REUSABLE_JS = CONTROLLER_COMPONENT_JS
COMPONENT_CSS_TEMPLATE = \
'''
/*always use the component class to select specific style roles - for general roles use app.css*/
/*
.[[[COMPONENT_NAME]]] .some_element {
}
*/
'''
ROUTES_FILE_TEMPLATE = \
'''
frango.app.configureRote(function () {
/*url : component-name*/
frango.app.routes =
[[[JSON-URL:COMPONENTS]]]
});
'''
ROUTE_COMPONENT_JSON_TEMPLANTE = \
'''
{
"url": "",
"componentName": "[[[COMPONENT_NAME]]]"
}
'''
class testHTTPServer_RequestHandler(SimpleHTTPRequestHandler):
def get_MIME(self, key):
mime = {
".aac": "audio/aac",
".abw": "application/x-abiword",
".arc": "application/octet-stream",
".avi": "video/x-msvideo",
".azw": "application/vnd.amazon.ebook",
".bin": "application/octet-stream",
".bz": "application/x-bzip",
".bz2": "application/x-bzip2",
".csh": "application/x-csh",
".css": "text/css",
".csv": "text/csv",
".doc": "application/msword",
".eot": "application/vnd.ms-fontobject",
".epub": "application/epub+zip",
".gif": "image/gif",
".htm": "",
".html": "text/html",
".ico": "image/x-icon",
".ics": "text/calendar",
".jar": "application/java-archive",
".jpeg": "",
".jpg": "image/jpeg",
".js": "application/javascript",
".json": "application/json",
".mid": "",
".midi": "audio/midi",
".mpeg": "video/mpeg",
".mpkg": "application/vnd.apple.installer+xml",
".odp": "application/vnd.oasis.opendocument.presentation",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".odt": "application/vnd.oasis.opendocument.text",
".oga": "audio/ogg",
".ogv": "video/ogg",
".ogx": "application/ogg",
".otf": "font/otf",
".png": "image/png",
".pdf": "application/pdf",
".ppt": "application/vnd.ms-powerpoint",
".rar": "application/x-rar-compressed",
".rtf": "application/rtf",
".sh": "application/x-sh",
".svg": "image/svg+xml",
".swf": "application/x-shockwave-flash",
".tar": "application/x-tar",
".tif": "",
".tiff": "image/tiff",
".ts": "application/typescript",
".ttf": "font/ttf",
".vsd": "application/vnd.visio",
".wav": "audio/x-wav",
".weba": "audio/webm",
".webm": "video/webm",
".webp": "image/webp",
".woff": "font/woff",
".woff2": "font/woff2",
".xhtml": "application/xhtml+xml",
".xls": "",
".xlsx": "application/vnd.ms-excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "",
".xml": "application/xml",
".xul": "application/vnd.mozilla.xul+xml",
".zip": "application/zip",
".3gp": "video/3gpp",
"audio/3gpp": "",
".3g2": "video/3gpp2",
"audio/3gpp2": "",
".7z": "application/x-7z-compressed",
".map" : "magnus-internal/imagemap"
}
if not key in mime:
result = ''
else:
result = mime[key]
return result
def do_OPTIONS(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header("Access-Control-Allow-Headers", '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS, PUT, DELETE')
self.end_headers()
return
def do_GET(self):
# Send response status code
getting_template = self.headers["GETTEMPLATE"]
path = 'app.html' if self.path == '/' else self.path[1:]
#building a single page application
pathSplited = path.split('/')
if pathSplited[0] == 'frango-framework-build-app':
path = 'app.html'
path = path.split('?')[0]
sendReply = False
mimetype = self.get_MIME(os.path.splitext(path)[1])
sendReply = len(mimetype) > 0
if sendReply == True:
# Send headers
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header("Access-Control-Allow-Headers", '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS, PUT, DELETE')
self.send_header('Content-type', mimetype)
self.end_headers()
try:
f = open(path, 'rb')
self.wfile.write(bytes(f.read()))
return
except IOError:
self.send_error(404, 'File Not Found: %s' % path)
return
def get_dependency_js_file(mode='r'):
return open(os.path.join(base_dirjs, 'js-dependency.json'), mode)
def get_dependency_css_file(mode='r'):
return open(os.path.join(base_dirjs, 'css-dependency.json'), mode)
def get_js_dependency_files():
with get_dependency_js_file() as fconfig:
files = json.loads(fconfig.read())
return files["general-js"] + files["components-js"]
def put_js_in_page(debugging = False):
script = ""
app_html = ""
if debugging:
files = get_js_dependency_files()
else:
files = ['js/js-centralizer.js']
for path in files:
script += ' <script type="text/javascript" src="{0}"></script>\n'.format(path.replace('\\', '/'))
script = '<!--begin script debugger-->\n' + script + ' <!--end script debugger-->'
with open(os.path.join(base_dir, 'app.html'), 'r+') as f:
app_html = f.read()
app_html = re.sub(regex_debug_script, script, app_html, 0, re.DOTALL)
f.seek(0)
f.truncate()
f.seek(0)
f.write(app_html)
def get_css_dependency_files():
with get_dependency_css_file() as fconfig:
files = json.loads(fconfig.read())
return files["general-css"] + files["components-css"]
def create_centralizer_js():
centralizerjs = os.path.join(base_dirjs, 'js-centralizer.js')
files = get_js_dependency_files()
fcentralize = open(centralizerjs, 'w')
try:
for fi in files:
with open( os.path.join( base_dir, fi), 'r') as fjs:
fcentralize.write('\n')
fcentralize.write(fjs.read())
fcentralize.close()
except Exception as e:
fcentralize.close()
raise Exception('compile failed.. ' + str(e))
def create_centralizer_css():
centralizerjs = os.path.join(base_dircss, 'css-centralizer.css')
files = get_css_dependency_files()
fcentralize = open(centralizerjs, 'w')
try:
for fi in files:
with open( os.path.join(base_dir, fi), 'r') as fjs:
fcentralize.write('\n')
fcentralize.write(fjs.read())
fcentralize.close()
except Exception as e:
fcentralize.close()
raise Exception('compile failed.. ' + str(e))
def put_routes_together():
try:
#component_path = os.path.join(base_dir, 'components')
with open(os.path.join(base_dirjs, 'components-config.json'), 'r') as fcomponent:
component_config = json.loads(fcomponent.read())
route_app_file = open(os.path.join(
base_dir, 'js', 'app-routes.js'), 'w')
route_app_file.seek(0)
new_routes = {}
for component_name in component_config:
relative_path = component_config[component_name]
component_folder = base_dir + relative_path
if os.path.isdir(component_folder):
component_rote_json = os.path.join(
component_folder, component_name + '-route.json')
with open(component_rote_json) as routef:
routes = json.loads(routef.read())
if routes["url"] and routes["componentName"]:
new_routes.update({routes["url"]: routes["componentName"]})
route_app_file.write(ROUTES_FILE_TEMPLATE.replace(
'[[[JSON-URL:COMPONENTS]]]', json.dumps(new_routes, indent = 4) ))
route_app_file.close()
except Exception as e:
route_app_file.close()
raise Exception(e)
def file_modified(path):
file_mod_time = os.stat(path).st_mtime
last_time = (time.time() - file_mod_time)
return last_time < 3
def check_modifications(thread_server):
compiling = False
while thread_server.is_alive():
time.sleep(3)
files = get_js_dependency_files()
for fi in files:
if file_modified(base_dir + '/' + fi):
compile(debugging=True)
break
files = get_css_dependency_files()
for fi in files:
if file_modified(base_dir + '/' + fi):
compile(debugging=True)
break
def create_register_component(path, component_name, container):
container_and_slash = container
if container:
container_and_slash += "/"
else:
container_and_slash = ""
with open(os.path.join(path, component_name + '-register.js'), 'w') as f:
f.write(REGISTER_COMPONENT_JS.replace(
"[[[COMPONENT_NAME]]]", component_name).replace('[[[CONTAINER_NAME]]]', container_and_slash) )
def create_controller_component(path, component_name, reusable):
if reusable:
template = CONTROLLER_COMPONENT_REUSABLE_JS
else:
template = CONTROLLER_COMPONENT_JS
with open(os.path.join(path, component_name + '.js'), 'w') as f:
f.write(template.replace(
"[[[COMPONENT_NAME]]]", component_name))
def create_template_html_file(path, component_name, reusable):
if reusable:
template = HTML_REUSABLE_TEMPLATE.replace("[[[COMPONENT_NAME]]]", component_name)
else:
template = HTML_SIMPLE_TEMPLATE.replace("[[[COMPONENT_NAME]]]", component_name)
f = open(os.path.join(path, component_name + '.html'), 'w')
f.write(template)
f.close()
def create_template_css_file(path, component_name):
template = COMPONENT_CSS_TEMPLATE.replace("[[[COMPONENT_NAME]]]", component_name)
f = open(os.path.join(path, component_name + '.css'), 'w')
f.write(template)
f.close()
def get_relative_path(path):
relative = path.replace(base_dir, '').replace('\\', '/')
if relative[0:1] in ['/', '\\']:
return relative[1:]
else:
return relative
def insert_js_dependency(path, component_name):
relative_path = get_relative_path(path) #path.replace(base_dir, '')
with get_dependency_js_file(mode='r+') as f:
config = json.loads(f.read())
config["components-js"].append(os.path.join(relative_path, component_name + '-register.js'))
config["components-js"].append(os.path.join(relative_path, component_name + '.js'))
f.seek(0)
f.write(json.dumps(config, indent=4))
f.truncate()
def insert_css_dependency(path, component_name):
relative_path = get_relative_path(path)#path.replace(base_dir, '')
with get_dependency_css_file(mode='r+') as f:
config = json.loads(f.read())
config["components-css"].append(os.path.join(relative_path, component_name + '.css'))
f.seek(0)
f.write(json.dumps(config, indent=4))
f.truncate()
def delete_component_config(component_name):
fcomponent = open(os.path.join(base_dirjs, 'components-config.json'), 'r+')
component_config = json.loads(fcomponent.read())
del component_config[component_name]
fcomponent.seek(0)
fcomponent.write(json.dumps(component_config, indent=4))
fcomponent.truncate()
fcomponent.close()
def delete_js_dependency(path, component_name):
relative_path = get_relative_path(path)#path.replace(base_dir, '')
with get_dependency_js_file(mode='r+') as f:
config = json.loads(f.read())
try:
config["components-js"].remove(os.path.join(relative_path, component_name + '-register.js'))
config["components-js"].remove(os.path.join(relative_path, component_name + '.js'))
except ValueError:
pass
f.seek(0)
f.write(json.dumps(config, indent=4))
f.truncate()
def delete_css_dependency(path, component_name):
relative_path = get_relative_path(path)#path.replace(base_dir, '')
with get_dependency_css_file(mode='r+') as f:
config = json.loads(f.read())
try:
config["components-css"].remove(os.path.join(relative_path, component_name + '.css'))
except ValueError:
pass
f.seek(0)
f.write(json.dumps(config, indent=4))
f.truncate()
def create_component_route(path, component_name):
try:
f = open(os.path.join(path, component_name + '-route.json'), 'w')
f.write(ROUTE_COMPONENT_JSON_TEMPLANTE.replace('[[[COMPONENT_NAME]]]', component_name))
f.close()
except Exception:
f.close()
raise
def validade_component_name(component_name):
regex = '[/\\~"\'^-]'
m = re.findall(regex, component_name, re.DOTALL)
if m:
raise Exception('You cannot use ' + regex + ' keys in component name')
#findall(regex, text, re.DOTALL)
def create_container(container_name):
os.makedirs(os.path.join(base_dir, 'components', container_name))
def create_component(component_name, reusable):
validade_component_name(component_name)
container_name = input('Type the container name this component belongs to or leave blank... ')
container_name = container_name.strip()
if container_name:
if os.path.isdir(os.path.join(base_dir, 'components', container_name)):
component_directory = os.path.join(base_dir, 'components', container_name, component_name)
else:
raise Exception('Invalid container. Type a existent container name')
else:
component_directory = os.path.join(base_dir, 'components', component_name)
cjsdir = os.path.join(component_directory, 'js')
cssdir = os.path.join(component_directory, 'css')
ctemplatedir = os.path.join(component_directory, 'template')
fcomponent = open(os.path.join(base_dirjs, 'components-config.json'), 'r+')
component_config = json.loads(fcomponent.read())
if component_name in component_config:
raise Exception('Component alredy exists')
# root
os.makedirs(component_directory)
# js
os.makedirs(cjsdir)
#css
os.makedirs(cssdir)
# template
os.makedirs(ctemplatedir)
# register-component js file
create_register_component(cjsdir, component_name, container_name)
# controller js file
create_controller_component(cjsdir, component_name, reusable)
# template-component html file
create_template_html_file(ctemplatedir, component_name, reusable)
# template-component css file
create_template_css_file(cssdir, component_name)
# insert js on js-dependency
insert_js_dependency(cjsdir, component_name)
# insert cs on css-dependency
insert_css_dependency(cssdir, component_name)
#insert file component-route.json
create_component_route(component_directory, component_name)
component_config.update({component_name: component_directory.replace(base_dir, '')})
fcomponent.seek(0)
fcomponent.write(json.dumps(component_config, indent = 4))
fcomponent.close()
def delete_component(component_name):
container_name = input('Type the container name this component belongs to or leave blank... ')
container_name = container_name.strip()
if container_name:
if os.path.isdir(os.path.join(base_dir, 'components', container_name)):
component_directory = os.path.join(base_dir, 'components', container_name, component_name)
else:
raise Exception('Invalid container. Type a existent container name')
else:
component_directory = os.path.join(base_dir, 'components', component_name)
print('Do you realy want to remove the component ' + component_directory + '? ')
response = input('Please enter y/n ')
if not response in ['y', 'n']:
raise Exception('Argument not recognized')
if response == 'y':
cjsdir = os.path.join(component_directory, 'js')
cssdir = os.path.join(component_directory, 'css')
delete_js_dependency(cjsdir, component_name)
delete_css_dependency(cssdir, component_name)
delete_component_config(component_name)
shutil.rmtree(component_directory)
def compile(debugging = False):
print('compiling...')
create_centralizer_js()
put_js_in_page(debugging)
create_centralizer_css()
put_routes_together()
print('compiled...')
def kbfunc():
if not has_msvcrt:
return 0
x = msvcrt.kbhit()
if x:
ret = ord(msvcrt.getch())
else:
ret = 0
return ret
def chek_key_press(server_class):
if not has_msvcrt:
return
while True:
try:
time.sleep(1)
x = kbfunc()
if x == 4: # Ctrl-D
raise KeyboardInterrupt('Execution interrupted by user.')
except KeyboardInterrupt as identifier:
server_class.shutdown()
server_class.socket.close()
#server_th
#server_sd_th,
#check_fl_th
break
class HTTPServerBreak(HTTPServer):
def service_actions(self):
x = kbfunc()
if x == 4: # Ctrl-D
raise KeyboardInterrupt('Execution interrupted by user.')
def serve(debugging = False):
compile(debugging)
print('starting server...')
httpd = HTTPServerBreak(server_address, testHTTPServer_RequestHandler)
print('running server at ' + base_url + '...')
print('To stop, press CTRL + D on windows or CTRL + C on UNIX based systems')
# Server settings
# Choose port 8080, for port 80, which is normally used for a http server, you need root access
# server_address = ('127.0.0.1', 8081)
#th_check_press = threading.Thread(target=chek_key_press, args=(httpd,))
#th_check_press.start()
httpd.serve_forever()
raise Exception('Server closed by user.')
def run_serve(debugging = False):
th_server = threading.Thread(target=serve, args=(debugging,))
th_check_file = threading.Thread(
target=check_modifications, args=(th_server,))
th_server.start()
if debugging:
th_check_file.start()
def build():
bin_dir = ""
if os.path.exists(os.path.join(base_dir, 'config.json')):
with open(os.path.join(base_dir, 'config.json'), 'r') as cf:
confi = json.loads(cf.read())
bin_dir = confi["build-directory"]
if bin_dir == "":
bin_dir = input('Please type the directory for generate the files.') #os.path.join(base_dir, 'bin')
if(os.path.exists(bin_dir)):
remove = input('The directory already exists. It will be deleted. Do you confirm? yes/no .')
if remove == 'yes':
shutil.rmtree(bin_dir)
else:
raise Exception('Operation canceled by user.')
run_serve()
os.makedirs(bin_dir)
compile()
shutil.copytree(os.path.join(base_dir,'css'), os.path.join(bin_dir,'css'))
shutil.copytree(os.path.join(base_dir,'extra-frameworks'),os.path.join(bin_dir,'extra-frameworks'))
shutil.copytree(os.path.join(base_dir,'img'),os.path.join(bin_dir,'img'))
shutil.copytree(os.path.join(base_dir,'js'),os.path.join(bin_dir,'js'))
shutil.copytree(os.path.join(base_dir,'frango'),os.path.join(bin_dir,'frango'))
#shutil.copy(os.path.join(base_dir, 'swregister.js'), bin_dir)
#shutil.copy(os.path.join(base_dir, 'service-worker.js'), bin_dir)
#shutil.copy(os.path.join(base_dir, 'manifest.json'), bin_dir)
browser = webdriver.Chrome(executable_path=os.path.join(base_dir, "chromedriver"))
print('loading application..')
browser.get('http://localhost:8081/frango-framework-build-app')
element = None
while not element:
try:
element = browser.find_element_by_class_name("frango-built")
except NoSuchElementException as e:
pass
#time.sleep(20)
html = browser.page_source
html = '<!doctype HTML>' + html
with open(os.path.join(bin_dir, 'index.html'), 'w') as f:
f.write(html)
browser.quit()
print('Build completed. Look at ' + bin_dir)
def create_project():
project_name = input('Please, type a name for the project:')
template_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'template')
shutil.copytree(template_dir, os.path.join(base_dir, project_name))
print('Project created on ' + template_dir, os.path.join(base_dir, project_name) )
def run():
if len(sys.argv) > 1:
command = sys.argv[1]
else:
command = 'serve'
if command == 'compile' or command == 'cp':
compile()
elif command == 'serve' or command == 'sv':
run_serve(debugging=True)
elif command == 'createcomponent' or command == 'cc':
component_name = sys.argv[2]
reusable = False
if len(sys.argv) > 3:
third_arg = sys.argv[3]
if third_arg == 'reusable' or third_arg == 'rs' :
reusable = True
create_component(component_name, reusable)
elif command == 'removecomponent' or command == 'rc' :
delete_component(sys.argv[2])
elif command == 'build' or command == 'bl':
build()
elif command == 'createproject' or command == 'cp':
create_project()
elif command == 'createcontainer' or command == 'ccn':
container_name = sys.argv[2]
create_container(container_name)
else:
raise Exception('Nothing defined for ' + command + ' command')
if __name__ == '__main__':
run()
| UTF-8 | Python | false | false | 25,026 | py | 12 | frango.py | 6 | 0.584552 | 0.580197 | 0 | 725 | 33.518621 | 119 |
rubenvermaas/Workflow | 14,181,982,035,768 | ffcf1bdb526166f7785a15784b419e77833c7276 | 7bdd88bb4fcf57fd1ba83b9954aeed8073454bec | /GCperc.py | a57e446c7d4e8ca5d5c9320f2347096ab9dec5fa | [] | no_license | https://github.com/rubenvermaas/Workflow | 9fbbdd45cab6e880069b5f7a16076dcbeeecf85b | 857ec06bcb374c00749ae9fef2568b6455bbefb6 | refs/heads/master | 2021-10-10T13:11:02.710727 | 2019-01-11T10:24:58 | 2019-01-11T10:24:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Bio import SeqUtils as SQ
seq = open(snakemake.input[0])
output = open(snakemake.output[0], "w")
header = ""
strSeq = ""
for line in seq:
if ">" in line:
header = line
output.write(line + "\n")
if line == "\n" and strSeq != "":
GCperc = SQ.GC(strSeq)
output.write(str(GCperc))
strSeq = ""
if ">" not in line:
strSeq += line
| UTF-8 | Python | false | false | 401 | py | 26 | GCperc.py | 19 | 0.526185 | 0.521197 | 0 | 18 | 20.777778 | 39 |
mansur007/bookassistant | 10,642,928,991,563 | 02ef0ede125d55f72dcf9e4531a47d65f3bd7ab0 | 7d39be409f732d296aabcd75430130bc624bdf9f | /gui.py | 115720e8797bf6e77cdea986ad564541cc69821e | [] | no_license | https://github.com/mansur007/bookassistant | 1c089b66b3e8f2507337e4fe7302cdfde02bf02b | 2ad1c10e2eb09a6568a1bb93904c4ccaa852776b | refs/heads/master | 2022-03-10T18:34:34.412252 | 2019-07-15T11:48:24 | 2019-07-15T11:48:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os, string
from tkinter import *
import threading
import text_processor
import text2speech
class GUI(threading.Thread):
def __init__(self, PL, T, D, TranscriptDuration):
super().__init__()
self.PL = PL # playlist
self.T = T # transcriber
self.D = D # dictionary
self.dictionaryVoice = text2speech.SpeechSynthesizer(lang_code='en-US', name='en-US-Wavenet-B')
self.translatorVoice = text2speech.SpeechSynthesizer(lang_code='ru-RU', name='ru-RU-Wavenet-C')
self.button_borderwidth=3
self.TranscriptDuration = TranscriptDuration
self.root = Tk()
self.track_list = Listbox(self.root, selectmode=SINGLE) # visual representation of playlist
self.track_list.config(font=("Times New Roman", 10), borderwidth=3, width=35)
self.track_list.grid(row=0, rowspan=2)
for item in PL.entry_list:
self.track_list.insert(END, item.audio_path)
self.track_list.selection_set(0)
self.track_list.activate(0)
self.icon_buttons = Frame(self.root) # will hold buttons with icons
self.icon_buttons.grid(row=0, column=1)
self.text_buttons = Frame(self.root)
self.text_buttons.grid(row=1, column=1)
self.prev_button = Button(self.icon_buttons, text='Prev')
self.prev_icon = PhotoImage(master=self.root,
file='icons/iconfinder_back_126585.png')
self.prev_icon = self.prev_icon.subsample(3, 3)
self.prev_button.config(image=self.prev_icon, borderwidth=self.button_borderwidth)
self.prev_button.grid(row=0, column=1)
self.play_button = Button(self.icon_buttons)
self.play_icon = PhotoImage(master=self.root,
file='icons/iconfinder_icon-play_211876.png')
self.play_icon = self.play_icon.subsample(6,6)
self.play_button.config(image=self.play_icon, borderwidth=self.button_borderwidth)
self.play_button.grid(row=0, column=2)
self.pause_button = Button(self.icon_buttons)
self.pause_icon = PhotoImage(master=self.root,
file='icons/iconfinder_media-pause_216309.png')
self.pause_icon = self.pause_icon.subsample(6, 6)
self.pause_button.config(image=self.pause_icon, borderwidth=self.button_borderwidth)
self.pause_button.grid(row=0, column=3)
self.stop_button = Button(self.icon_buttons)
self.stop_icon = PhotoImage(master=self.root,
file='icons/iconfinder_media-stop_216325.png')
self.stop_icon = self.stop_icon.subsample(6, 6)
self.stop_button.config(image=self.stop_icon, borderwidth=self.button_borderwidth)
self.stop_button.grid(row=0, column=4)
self.next_button = Button(self.icon_buttons)
self.next_icon = PhotoImage(master=self.root,
file='icons/iconfinder_forward_126569.png')
self.next_icon = self.next_icon.subsample(6, 6)
self.next_button.config(image=self.next_icon, borderwidth=self.button_borderwidth)
self.next_button.grid(row=0, column=5)
self.show_recent_words_button = Button(self.text_buttons, text='Show Recent Words')
self.show_recent_words_button.config(font=("Times New Roman", 10), borderwidth=self.button_borderwidth)
self.show_recent_words_button.grid(row=0, column=0)
self.speak_button = Button(self.text_buttons, text='Voice Command')
self.speak_button.config(font=("Times New Roman", 10), borderwidth=self.button_borderwidth)
self.speak_button.grid(row=0, column=1)
self.play_button.bind("<Button-1>", self.play_track)
self.stop_button.bind("<Button-1>", self.stop_track)
self.next_button.bind("<Button-1>", self.next_track)
self.prev_button.bind("<Button-1>", self.prev_track)
self.pause_button.bind("<Button-1>", self.pause_track)
self.speak_button.bind("<Button-1>", self.parse_voice)
self.show_recent_words_button.bind("<Button-1>", self.show_recent_words)
self.dialogue_box = Text(self.root, wrap=WORD, height=8, width=64)
self.dialogue_box.configure(font=("Times New Roman", 14), borderwidth=4)
self.dialogue_box.grid(row=3, columnspan=2)
self.dialogue_box_scrollbar = Scrollbar(self.root, orient="vertical", command=self.dialogue_box.yview)
self.dialogue_box.configure(yscrollcommand=self.dialogue_box_scrollbar.set)
self.dialogue_box_scrollbar.grid(row=3, column=3, sticky='ns')
self.transcription_box = Text(self.root, wrap=WORD, height=18, width=64)
self.transcription_box.configure(font=("Times New Roman", 14), borderwidth=4)
self.transcription_box.grid(row=4, columnspan=2)
self.transcription_scrollbar = Scrollbar(self.root, orient="vertical", command=self.transcription_box.yview)
self.transcription_box.configure(yscrollcommand=self.transcription_scrollbar.set)
self.transcription_scrollbar.grid(row=4, column=3, sticky='ns')
# making sure that a script for the first utterance shows up:
self.cur_interval_start = -0.001
self.cur_interval_end = 0
self.root.after(50, self.update_script)
# self.root.mainloop()
def play_track(self, event=None):
is_unpausing = self.PL.play()
if is_unpausing is True:
self.skip_update = True
def stop_track(self, event):
self.PL.stop()
def prev_track(self, event):
self.PL.goto_prev()
self.track_list.selection_clear(0, END)
self.track_list.selection_set(self.PL.curr_index)
self.track_list.activate(self.PL.curr_index)
def next_track(self, event):
self.PL.goto_next()
self.track_list.selection_clear(0, END)
self.track_list.selection_set(self.PL.curr_index)
self.track_list.activate(self.PL.curr_index)
def pause_track(self, event=None):
self.PL.pause()
def transcribe_recent(self, event):
offset = max(0, self.PL.current_time() - self.TranscriptDuration)
transcription = self.T.transcribe_audio(self.PL.get_cur_track_path(), self.TranscriptDuration, offset)
self.dialogue_box.insert(0.2, transcription + "\n\n")
def parse_voice(self, event=None):
while True:
self.dialogue_box.insert(0.2, "Speech Recognizer: Listening ...\n\n")
transcription = self.T.transcribe_mic()
if transcription!='inaudible':
break
else:
self.dialogue_box.insert(0.2, "Speech Recognizer: I didn't get it, please try again\n\n")
self.dialogue_box.insert(0.2, "User: {}\n\n".format(transcription))
parsed_command = text_processor.parse_command(transcription)
if parsed_command['func'] == 'play':
self.play_track()
elif parsed_command['func'] != 'unknown' and parsed_command['phrase'] == 'it':
self.dialogue_box.insert(0.2, "assistant could not comprehend the target phrase\n\n")
elif parsed_command['func'] == 'translate':
recently_played_words = self.PL.get_recent_words()
target_word = None
max_len = 0
for w in parsed_command['phrase']:
if len(w) > max_len:
target_word = w
max_len = len(w)
target_word = text_processor.find_most_similar_word(target_word, recently_played_words)
translation_dict = self.D.translate(target_word, 'ru')
translation = translation_dict['translatedText']
self.dialogue_box.insert(0.2, "translation of {}: {}\n\n".
format(target_word, translation))
self.translatorVoice.speak(translation)
elif parsed_command['func'] == 'define':
recently_played_words = self.PL.get_recent_words()
phrase = parsed_command['phrase']
target_word = None
max_len = 0
for w in phrase:
if len(w) > max_len:
target_word = w
max_len = len(w)
target_word = text_processor.find_most_similar_word(target_word, recently_played_words)
# remove punctuation from target_word:
exclude = set(string.punctuation)
target_word = ''.join(ch for ch in target_word if ch not in exclude)
context_utterance = self.PL.get_word_context(target_word)['text']
# self.dialogue_box.insert(0.2, "context: {}\n".format(context_utterance))
relevant_definition, pos = self.D.define(target_word, context_utterance)
self.dialogue_box.insert(0.2, "definition of {} as a {}: {}\n\n".
format(target_word, pos, relevant_definition[0]))
self.dictionaryVoice.speak(relevant_definition[0])
print("parsed command: {}\n".format(parsed_command))
# shows the most recent words from provided transcript
def show_recent_words(self, event):
recent_words = self.PL.get_recent_words()
self.dialogue_box.insert(0.2, ' '.join(recent_words) + "\n\n")
self.transcription_box.see("end")
def get_pos(self, event):
pos = self.PL.current_time()
print(pos)
def go_to(self, event):
target_time = float(self.target_time_entry.get())
self.PL.go_to(target_time)
def update_script(self):
t = max(self.PL.current_time(), 0)
if t > self.cur_interval_end or t < self.cur_interval_start:
# print("t: {}, cur_interval_start: {}, cur_interval_end: {}".
# format(t, cur_interval_start, cur_interval_end))
# sys.stdout.flush()
utterance = self.PL.get_utterance(t)
self.cur_interval_start = utterance['start_time']
self.cur_interval_end = utterance['end_time']
self.transcription_box.insert('end', '{}\n\n'.format(utterance['text']))
self.transcription_box.see('end')
self.root.after(150, self.update_script) | UTF-8 | Python | false | false | 10,177 | py | 13 | gui.py | 8 | 0.621008 | 0.60735 | 0 | 221 | 45.054299 | 116 |
tomd/act-workers | 9,620,726,749,086 | 390bc99b5f4de635c4ba3c434d1521569737abdb | 58dcd5c1de611954ad5328c1038a5177619c982c | /act/workers/libs/worker.py | 4e63ab053011f4ab0c7299dedd5ea3410d792e55 | [
"ISC"
] | permissive | https://github.com/tomd/act-workers | cac9edf4c840cfb0206244db143bccf093ad844b | ef42eaf26b14197a6bd1ac9ae12c4d39acc740c1 | refs/heads/master | 2020-05-25T04:04:55.844858 | 2020-03-11T12:54:44 | 2020-03-11T12:54:44 | 187,619,947 | 0 | 0 | ISC | true | 2019-05-20T10:33:15 | 2019-05-20T10:33:15 | 2019-05-20T08:51:31 | 2019-05-20T08:51:29 | 980 | 0 | 0 | 0 | null | false | false | """Common worker library"""
import argparse
import inspect
import os
import smtplib
import socket
import sys
from email.mime.text import MIMEText
from logging import error, warning
from typing import Any, Optional, Text, Dict
import caep
import requests
import urllib3
import act.api
CONFIG_ID = "actworkers"
CONFIG_NAME = "actworkers.ini"
class UnknownResult(Exception):
"""UnknownResult is used in API request (not 200 result)"""
def __init__(self, *args: Any) -> None:
Exception.__init__(self, *args)
class NoResult(Exception):
"""NoResult is used in API request (no data returned)"""
def __init__(self, *args: Any) -> None:
Exception.__init__(self, *args)
class UnknownFormat(Exception):
"""UnknownFormat is used on unknown parsing formats"""
def __init__(self, *args: Any) -> None:
Exception.__init__(self, *args)
def parseargs(description: str) -> argparse.ArgumentParser:
""" Parse arguments """
parser = argparse.ArgumentParser(
allow_abbrev=False,
description="{} ({})".format(description, worker_name()), epilog="""
--config INI_FILE Override default locations of ini file
Arguments can be specified in ini-files, environment variables and
as command line arguments, and will be parsed in that order.
By default, workers will look for an ini file in /etc/{1}
and ~/.config/{0}/{1} (or in $XDG_CONFIG_DIR if
specified).
Each worker will read the confiuration from the "DEFAULT" section in the
ini file, and in it's own section (in that order).
It is also possible to use environment variables for configuration.
Workers will look for environment variables for all arguments with
the argument name in uppercase and "-" replaced with "_".
E.g. set the CERT_FILE environment variable to configure the
--cert-file option.
""".format(CONFIG_ID, CONFIG_NAME), formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--http-timeout', dest='http_timeout', type=int,
default=120, help="Timeout")
parser.add_argument('--proxy-string', dest='proxy_string', help="Proxy to use for external queries")
parser.add_argument('--proxy-platform', dest='proxy_platform', action="store_true", help="Use proxy-string towards the ACT platform")
parser.add_argument('--cert-file', dest='cert_file', help="Cerfiticate to add if you are behind a SSL/TLS interception proxy.")
parser.add_argument('--user-id', dest='user_id',
help="User ID")
parser.add_argument('--act-baseurl', dest='act_baseurl',
help='ACT API URI')
parser.add_argument("--logfile", dest="logfile",
help="Log to file (default = stdout)")
parser.add_argument("--loglevel", dest="loglevel", default="info",
help="Loglevel (default = info)")
parser.add_argument("--output-format", dest="output_format", choices=["str", "json"], default="json",
help="Output format for fact (default=json)")
parser.add_argument('--http-user', dest='http_user', help="ACT HTTP Basic Auth user")
parser.add_argument('--http-password', dest='http_password', help="ACT HTTP Basic Auth password")
parser.add_argument('--disabled', dest='disabled', action="store_true", help="Worker is disabled (exit immediately)")
parser.add_argument('--origin-name', dest='origin_name', help="Origin name. This name must be defined in the platform")
parser.add_argument('--origin-id', dest='origin_id', help="Origin id. This must be the UUID of the origin in the platform")
return parser
def __mod_name(stack: inspect.FrameInfo) -> Text:
""" Return name of module from a stack ("_" is replaced by "-") """
mod = inspect.getmodule(stack[0])
return os.path.basename(mod.__file__).replace(".py", "").replace("_", "-")
def worker_name() -> Text:
""" Return first external module that called this function, directly, or indirectly """
modules = [__mod_name(stack) for stack in inspect.stack() if __mod_name(stack)]
return [name for name in modules if name != modules[0]][0]
def handle_args(parser: argparse.ArgumentParser) -> argparse.Namespace:
""" Wrapper for caep.handle_args where we set config_id and config_name """
return caep.handle_args(parser, CONFIG_ID, CONFIG_NAME, worker_name())
def init_act(args: argparse.Namespace) -> act.api.Act:
""" Initialize act api from arguments """
requests_kwargs: Dict[Text, Any] = {}
if args.http_user:
requests_kwargs["auth"] = (args.http_user, args.http_password)
if args.proxy_string and args.proxy_platform:
requests_kwargs["proxies"] = {
"http": args.proxy_string,
"https": args.proxy_string
}
if args.cert_file:
requests_kwargs["verify"] = args.cert_file
api = act.api.Act(
args.act_baseurl,
args.user_id,
args.loglevel,
args.logfile,
worker_name(),
requests_common_kwargs=requests_kwargs,
origin_name=args.origin_name,
origin_id=args.origin_id)
# This check is done here to make sure logging is set up
if args.disabled:
warning("Worker is disabled")
sys.exit(0)
return api
def fatal(message: Text, exit_code: int = 1) -> None:
"Send error to error() and stderr() and exit with exit_code"
sys.stderr.write(message.strip() + "\n")
error(message.strip())
sys.exit(exit_code)
def fetch_json(url: str, proxy_string: Optional[str], timeout: int = 60, verify_https: bool = False) -> Any:
"""Fetch remote URL as JSON
url (string): URL to fetch
proxy_string (string, optional): Optional proxy string on format host:port
timeout (int, optional): Timeout value for query (default=60 seconds)
"""
proxies = {
'http': proxy_string,
'https': proxy_string
}
options = {
"verify": verify_https,
"timeout": timeout,
"proxies": proxies,
"params": {}
}
if not verify_https:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
req = requests.get(url, **options)
except (urllib3.exceptions.ReadTimeoutError,
requests.exceptions.ReadTimeout,
socket.timeout) as err:
error("Timeout ({0.__class__.__name__}), query: {1}".format(err, req.url))
if not req.status_code == 200:
errmsg = "status_code: {0.status_code}: {0.content}"
raise UnknownResult(errmsg.format(req))
return req.json()
def sendmail(smtphost: str, sender: str, recipient: str, subject: str, body: str) -> None:
"""Send email"""
msg = MIMEText(body, "plain", "utf-8")
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
s = smtplib.SMTP(smtphost)
s.sendmail(sender, [recipient], msg.as_string())
s.quit()
| UTF-8 | Python | false | false | 6,985 | py | 17 | worker.py | 13 | 0.64209 | 0.637795 | 0 | 198 | 34.277778 | 137 |
fukenist/learn_hard_way | 12,068,858,111,831 | edca980c34a422c4df70e49c6d92749a9f126d11 | 49c964e7691b9ec37f4067e13be5e643a5782ca9 | /test9.py | e6d28d8b8329d5aa9faae73f7a809bf7ca25593a | [] | no_license | https://github.com/fukenist/learn_hard_way | 0ed09d4bf43f6460561bfd92cabd2359f5a67a07 | 548037277cf2bc77fd8fd9b177bf60234073c90f | refs/heads/master | 2023-03-02T01:04:06.200549 | 2021-02-09T20:26:52 | 2021-02-09T20:26:52 | 332,833,155 | 0 | 0 | null | false | 2021-01-25T19:11:04 | 2021-01-25T17:49:05 | 2021-01-25T17:52:53 | 2021-01-25T19:11:03 | 3 | 0 | 0 | 0 | Python | false | false | from sys import exit
def gold_room():
print("This room is full of shit. How long could you be there?")
choice = input('> ')
if choice.isnumeric():
how_long = int(choice)
else:
dead("Man, write a number")
if how_long < 50:
print("Do you understand how stinky you will be after?")
exit(0)
else:
dead("You sick bastard!")
def hroom_room():
print("There is not light in this room")
print("And you hear some noises")
print("You need to find the exit")
print("How would you use your inner vision?")
inner_eye = False
while True:
choice = input('> ')
if choice == "sleep":
dead("It was so cold there that you couldn't")
elif choice == 'meditate' and not inner_eye:
print("You could out of your body and see the exit")
inner_eye = True
elif choice == 'meditate' and inner_eye:
dead("You could not concentrate and failed")
elif choice == "open door" and inner_eye:
gold_room()
else:
dead("WTF")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input('> ')
if 'flee' in choice:
start()
elif "head" in choice:
dead("Well that was tasty")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("Do you flee for your life or eat your head?")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input('> ')
if choice == 'left':
hroom_room()
elif choice == 'right':
cthulhu_room()
else:
dead("You stumble around until rising")
start()
| UTF-8 | Python | false | false | 1,639 | py | 12 | test9.py | 11 | 0.649176 | 0.646736 | 0 | 82 | 18.97561 | 65 |
simmoncn/ml-class-python | 16,982,300,694,450 | a54d976b1f607f94b00c855fbd656b4699f2228f | 6af5cf28013378bc08ec2566a0657e0929be3056 | /solutions/ex2/ex2_sklearn.py | 8b0aee18c3d272f29b9f1633b6f5a2979f25b615 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/simmoncn/ml-class-python | 854b4ad1d4645235237803c65f089dba5a63b42b | a2ed01668196c5eef2e40edd0ede89e87fedf9cb | refs/heads/master | 2020-12-28T23:46:22.034025 | 2016-03-05T04:55:05 | 2016-03-05T04:55:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from ex2 import *
## Machine Learning Online Class - Exercise 2: Logistic Regression with sci-kit learn
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# linear exercise. You will need to complete a short section of code to perform logistic regression with scikit-learn library
if __name__ == "__main__":
plt.close('all')
plt.ion() # interactive mode
# ==================== Part 1: Plotting ====================
print('Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
data_file = '../../data/ex2/ex2data1.txt'
data = np.loadtxt(data_file, delimiter=',')
X = data[:,0:2]
y = data[:,2]
m = data.shape[0] # number of training examples
y = y.reshape((-1,1)) # create column matrix
# Note: You have to complete the code in function plotData
plotData(X,y, ['Admitted', 'Not admitted'])
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
plt.legend()
raw_input('Program paused. Press enter to continue')
# =================== Part 2: Logistic regression ===================
# Note that C (inverse of regularization term) is specified to a larger value, rather than default 1.0
# This means a weak regularization is used
logistic = linear_model.LogisticRegression(C=1e5, max_iter=400)
# ============= YOUR CODE HERE =============
# Instructions: Use linear_model.LogisticRegression to run logistic regression
logistic.fit(X, y)
# ===========================================
print('Number of iterations used: %f' % logistic.n_iter_)
print('Coefficient found by linear_model: ');
print(logistic.coef_)
print('Intercept found by linear_model: ');
print(logistic.intercept_)
plot_x = np.array([np.min(X[:,1]), np.max(X[:,1])])
plot_y = (-1 / logistic.coef_[0, 1]) * (logistic.coef_[0, 0] * plot_x + logistic.intercept_)
plt.plot(plot_x, plot_y, label='Decision Boundary')
plt.legend()
raw_input('Program paused. Press enter to continue')
# ============= Part 3: Predict and Accuracies =============
prob = logistic.predict_proba([[45, 85]])[0, 1]
print('For a student with scores 45 and 85, we predict an admission probability of %f' % prob)
print('Train Accuracy: %f' % logistic.score(X,y))
| UTF-8 | Python | false | false | 2,425 | py | 30 | ex2_sklearn.py | 28 | 0.612371 | 0.595052 | 0 | 69 | 34.144928 | 126 |
ohannes/pythonScripts | 12,403,865,554,169 | f0b47fc6cae52eedf1be154bacb83141c98d1de0 | 8c7a187ebfe858ff3f840602585d166b29fce576 | /movie.py | 8a8e6adaa7c6ddc6a226ff7d3c25334dc74f6d9c | [] | no_license | https://github.com/ohannes/pythonScripts | b756faa2e6d5314cb04c7afc0ca07f69027f59b2 | 5249b2735d8b2a9a2c6ad8a1ae625cb47f50d0b5 | refs/heads/master | 2020-04-06T04:20:29.565042 | 2015-07-19T17:40:39 | 2015-07-19T17:40:39 | 34,119,366 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time, os
SLEEP_PER_LOOP = 10 * 60
while True:
time.sleep(SLEEP_PER_LOOP)
os.system("c:\Python27\python.exe")
| UTF-8 | Python | false | false | 133 | py | 91 | movie.py | 79 | 0.631579 | 0.586466 | 0 | 7 | 17 | 39 |
eklyagina/RussianCorpusExtractor | 16,896,401,382,665 | 6c612edc164c80a7caf9be81e038167b0f1324e8 | 6d98058e58be5945877b7c38d0f86c04100f66b8 | /RussianCorpusExtractor.py | f0a8326a92fc1d6d527d77d43369be0ea4714b35 | [] | no_license | https://github.com/eklyagina/RussianCorpusExtractor | d683ac23d0e0b9429c5bcb8085047250d5797720 | 101b9a14df0441cb34b5e52be8977dc7e93264b0 | refs/heads/main | 2023-06-04T22:56:52.126648 | 2021-06-18T21:24:42 | 2021-06-18T21:24:42 | 378,105,080 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# RussianCorpusExtractor
#
# This is a program that extracts all the examples (from all the search pages) from a National Russian Corpus fitting an inquiry.
# As an input give the link to the FIRST page (it won't work if you use the other page), the number of pages and the name of the output file.
# As an output you will get take the file with three columns: the text of the examples, the info about the name and the author, the date.
# made by Evgenia Klyagina, https://github.com/eklyagina
# Function RussianCorpusExtractor
# In[73]:
import re
import urllib.request
import pandas as pd
from tqdm import tqdm
import pprint
def RussianCorpusExctractor(number_of_pages, output_file_name, url):
texts = []
dates = []
info = []
#search all the pages
for page in tqdm(rang(number_of_pages-1)):
final_examples = []
info_dates = []
#open the link
req = urllib.request.Request(url+ "&p=" + str(page))
with urllib.request.urlopen(req) as response:
html = response.read().decode('utf-8')
#extract the examples
regEx = re.compile('<!-- trim_up.html end -->.*?<!-- homonymy.html start -->', flags= re.DOTALL)
examples = regEx.findall(html)
#delete rubish
regTag = re.compile('<.*?>', re.DOTALL)
for ex in examples:
clean_ex = regTag.sub("", ex)
clean_ex = clean_ex.replace("\n", "")
clean_ex = re.sub("\s+", " ", clean_ex)
final_examples.append(clean_ex)
#split the example into text and info_dates
for ex in final_examples:
final_ex = ex.split("[")
text = final_ex[0]
#to be sure that phrases like "на том [берегу]" are not parsed like "info" or "date"
for phrase in final_ex[1:]:
res = re.search("\d\d\d\d", phrase)
if res:
info_dates.append(phrase)
#to work with it later
else:
text = final_ex[0] + "[" + phrase
texts.append(text)
#extract information about the date from the "info_dates"
for i in range(len(info_dates)):
res = re.search("\d\d\d\d-\d\d\d\d", info_dates[i])
#date and unwelcomed punctuation
pattern = ",?\s+\(?\d\d\d\d-\d\d\d\d\)?]?"
if not res:
res = re.search("\d\d\d\d", info_dates[i])
#date and unwelcomed punctuation
pattern = ",?\s+\(?\d\d\d\d\)?]?"
dates.append(res.group())
info.append(re.sub(pattern, "", info_dates[i])) # remove found date(s)
#make a table
data = pd.DataFrame({
"texts": texts ,
"info": info,
"dates": dates,
})
print(data)
return data.to_csv(output_file_name+".csv", encoding='utf-8-sig')
# In[75]:
#Check whether the function works
# In[76]:
RussianCorpusExctractor(3, "pryg", 'https://processing.ruscorpora.ru/search.xml?env=alpha&api=1.0&mycorp=&mysent=&mysize=&mysentsize=&dpp=&spp=&spd=&mydocsize=&mode=main&lang=ru&sort=i_grtagging&nodia=1&text=lexform&req=%D0%BF%D1%80%D1%8B%D0%B3')
| UTF-8 | Python | false | false | 3,236 | py | 2 | RussianCorpusExtractor.py | 1 | 0.579535 | 0.571783 | 0 | 86 | 36.488372 | 246 |
mrez0/mzAlarmy | 9,259,949,506,650 | b8095d293af36ffe74d499188df4739ba1a2f55d | 4240cf1d855ae8960e1f06ad672c452cc82729e9 | /helpers.py | 9ef8f2355e281c33560ab55bc41821261199ef5c | [] | no_license | https://github.com/mrez0/mzAlarmy | 8cf799187dc1d7b6dda81ec40c516c985642f954 | 72c7d26d02bb61ce4dec2707aa52616ddc497544 | refs/heads/master | 2021-04-15T10:34:07.607948 | 2018-03-23T15:39:29 | 2018-03-23T15:39:29 | 126,156,395 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def get_seconds_to_alarm(alarm_str):
from datetime import datetime
# Getting hour & min
alarm_hr, alarm_mt = alarm_str.split(':')
now = datetime.now()
alarm = datetime.now()
alarm = alarm.replace(hour=int(alarm_hr), minute=int(alarm_mt), second=0)
now_timestamp = now.timestamp()
alarm_timestamp = alarm.timestamp()
# If alarm was passed, calculate alarm for next day
seconds_to_next_alarm = (alarm_timestamp - now_timestamp)
if seconds_to_next_alarm < 0:
seconds_to_next_alarm += 24 * 60 * 60 # Adding 1 day: 24 hours x 60 minutes/hr x 60 seconds/min
return seconds_to_next_alarm
| UTF-8 | Python | false | false | 644 | py | 13 | helpers.py | 10 | 0.661491 | 0.638199 | 0 | 20 | 31.2 | 104 |
Marco2018/leetcode | 11,252,814,362,921 | 00ca833878e4b20c193ccdf244b700690cc0b00a | 036c69a0c27cfdc0b0d9d6169caf027635b66325 | /dp/leetcode650.py | 90851539e9536b66fa544839ffec92730786f328 | [] | no_license | https://github.com/Marco2018/leetcode | 19b6b2c19452babf0198f8705e0dd1f31c321017 | c7dc709a7a9b83ef85fbc2d0aad7a8829f1035d1 | refs/heads/master | 2020-03-23T17:34:09.637831 | 2020-01-18T11:00:36 | 2020-01-18T11:00:36 | 141,865,308 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
class Solution:
def minSteps(self, n):
if n==1:
return 0
factor=2
while n%factor!=0:
factor+=1
if factor==n:
return n
else:
return int(self.minSteps(int(n/factor))+factor)
solution=Solution()
num=18
print(solution.minSteps(num))
| UTF-8 | Python | false | false | 350 | py | 553 | leetcode650.py | 541 | 0.511429 | 0.491429 | 0 | 16 | 19.875 | 59 |
MrHamdulay/csc3-capstone | 7,361,573,973,699 | b72f740136f53a54e9e5894f264c9e68ac6b1a53 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/srxluc001/question3.py | 64a2189e9954adf679bdd50140244aea10eaaf4a | [] | no_license | https://github.com/MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #program to calculate the are of a circle
#SRXLUC001
#Lucy Sure
#Assignment 2 question 3
import math
print("Approximation of pi:",round(math.pi,3))
import math
x=eval(input("Enter the radius:\n"))
Area=float(math.pi)*x*x
print("Area:",round(Area,3))
| UTF-8 | Python | false | false | 274 | py | 12,294 | question3.py | 12,244 | 0.675182 | 0.649635 | 0 | 13 | 18.461538 | 46 |
gabsw/campus-monitoring | 18,330,920,437,048 | 46da6585e680252085c78d271cd8146332778c66 | 1e28d53f9cf8d2872effed59c9659a0d79eecc61 | /docker/simulated_collector/simulate_data.py | fb3bc4e7a578e208b0e2fc9860dd9be6701b71cc | [] | no_license | https://github.com/gabsw/campus-monitoring | 57cc39a16c04b9ef264e9aeb5e826ac5ee486908 | b4f33c9d182a4212a55a9c1ac4a47f7ec3a7a63d | refs/heads/master | 2020-09-05T19:51:29.152783 | 2019-12-19T20:30:18 | 2019-12-19T20:30:18 | 220,197,805 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import os
import time
import json
from datetime import datetime
import random
import pika
def random_jitter(value, minimum, maximum, step=0.1):
jitter = random.random() * step * 2 - step # the jitter is in [-step, +step]
new_value = value + jitter
if new_value < minimum:
return minimum
elif new_value > maximum:
return maximum
else:
return new_value
def get_rabbit_conn_and_channel(host, username, password, queue='task_queue'):
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(host, 5672, '/', credentials)
connection_ = pika.BlockingConnection(parameters)
channel_ = connection_.channel()
channel_.queue_declare(queue=queue, durable=True)
return connection_, channel_
def close_rabbit_connection(connection_):
connection_.close()
def send_message(channel_, message, queue='task_queue'):
#channel_.queue_declare(queue='task_queue')
channel_.basic_publish(exchange='', routing_key=queue, body=json.dumps(message),
properties=pika.BasicProperties(delivery_mode=2)) # make message persistent
def get_env_var(var_name):
return os.environ[var_name]
sensor_ids_str = get_env_var('SENSOR_IDS')
sensors = [{'Sensor_id': int(x), 'Temperature': 20.0, 'Humidity': 50.0, 'CO2': 600.0} for x in sensor_ids_str.split(',')]
rabbit_host = get_env_var('RABBIT_HOST')
rabbit_username = get_env_var('RABBIT_USERNAME')
rabbit_password = get_env_var('RABBIT_PASSWORD')
# waiting for rabbit container to go up
time.sleep(30)
connection, channel = get_rabbit_conn_and_channel(rabbit_host, rabbit_username, rabbit_password)
min_temp, max_temp = -10, 30
min_humidity, max_humidity = 0, 100
min_co2, max_co2 = 0, 1200
try:
stop = False
while not stop:
for sensor in sensors:
temperature = sensor['Temperature']
humidity = sensor['Humidity']
co2 = sensor['CO2']
sensor['Temperature'] = random_jitter(temperature, min_temp, max_temp, step=0.05)
sensor['Humidity'] = random_jitter(humidity, min_humidity, max_humidity)
sensor['CO2'] = random_jitter(co2, min_co2, max_co2, step=1.0)
sensor['Date'] = str(datetime.now())
# just let the script die if there is no connection, it will just be restarted
if not connection.is_open:
stop = True
print('Connection is closed. Terminating collector.')
break
send_message(channel, sensor)
# Offset between data
time.sleep(60)
except KeyboardInterrupt:
pass
| UTF-8 | Python | false | false | 2,826 | py | 74 | simulate_data.py | 54 | 0.618896 | 0.601557 | 0 | 86 | 30.860465 | 121 |
atultw/AdobeRPC | 13,872,744,396,606 | 259676f79360233b92954bacb178a0079e707258 | 46637d84ec06a1a433651eb7303e90b4c1d801a7 | /client.py | 01383cb3d5152dbe52aa9cb3e0e554269b6d4711 | [] | no_license | https://github.com/atultw/AdobeRPC | 0746e078cf5e8bff1a963736a1675334b679b786 | 6b0fd48edff81280e3299d150ba68e7d5309bd47 | refs/heads/master | 2023-05-18T20:03:04.996509 | 2021-06-10T18:54:10 | 2021-06-10T18:54:10 | 337,300,790 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | rpclient = None
timer = dict({
'app': None,
'start': 0
})
active_presence = None
notified = bool(False)
class client:
pass
| UTF-8 | Python | false | false | 140 | py | 8 | client.py | 4 | 0.6 | 0.592857 | 0 | 14 | 9 | 22 |
lauramoon/ProjectEuler | 266,287,980,506 | e09407879ee7a293cd16df796b646ffceba9a754 | 18d46606858f406814db2caf16ae3d7d36d3f2fd | /prob0017.py | 9758cc3cb7314b4fba07eda25d88ae82a9bc46c5 | [] | no_license | https://github.com/lauramoon/ProjectEuler | 9336b3785d13af2600d6c3079675443c35aaf028 | 8010aa9fa5b66029b5551c10030e39dcc1436308 | refs/heads/master | 2023-02-20T06:48:53.958299 | 2021-01-06T03:02:10 | 2021-01-06T03:02:10 | 287,099,123 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Project Euler Problem 17. https://projecteuler.net/problem=17"""
# 1-9 90 times as final digit
# 1-9 100 times before "hundred"
# 10 to 19 10 times
# 20, 30 ... 90 10 * 10 times
# "and" 9*99 times
# "hundred" 900 times
# "one thousand" once
one_to_nine = 3 + 3 + 5 + 4 + 4 + 3 + 5 + 5 + 4
ten_to_nineteen = 3 + 6 + 6 + 8 + 8 + 7 + 7 + 9 + 8 + 8
twenty_to_ninety = 6 + 6 + 5 + 5 + 5 + 7 + 6 + 6
the_and = 3
hundred = 7
one_thousand = 11
total = 190 * one_to_nine + 10 * ten_to_nineteen + 100 * twenty_to_ninety + 9 * 99 * the_and + 900 * hundred + \
one_thousand
one_to_ninety_nine = one_to_nine + ten_to_nineteen + 10 * twenty_to_ninety + 8 * one_to_nine
plain_hundreds = one_to_nine + 9 * hundred
fancy_hundreds = plain_hundreds * 99 + 3 * 99 * 9 + one_to_ninety_nine * 9
| UTF-8 | Python | false | false | 849 | py | 36 | prob0017.py | 35 | 0.558304 | 0.451119 | 0 | 23 | 34.913043 | 112 |
nagyist/sentry | 781,684,075,522 | 1030588372b0429cf2fee026be3956dc42eba5c1 | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /tests/sentry/api/endpoints/test_group_hashes_split.py | ff88d297ebc931e1b057ac0c745464f8a1f72819 | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | https://github.com/nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | true | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | 2015-12-17T09:42:48 | 2015-12-17T00:25:38 | 46,690 | 0 | 0 | 1 | Python | false | false | import pytest
from sentry.models import GroupHash
from sentry.testutils.helpers import Feature
pytestmark = pytest.mark.skip(reason="too flaky")
@pytest.fixture(autouse=True)
def hierarchical_grouping_features():
with Feature({"organizations:grouping-tree-ui": True}):
yield
@pytest.fixture(autouse=True)
def auto_login(settings, client, default_user):
assert client.login(username=default_user.username, password="admin")
@pytest.fixture
def store_stacktrace(default_project, factories):
default_project.update_option("sentry:grouping_config", "mobile:2021-02-12")
def inner(functions):
event = {
"exception": {
"values": [
{
"type": "ZeroDivisionError",
"stacktrace": {"frames": [{"function": f} for f in functions]},
}
]
}
}
return factories.store_event(data=event, project_id=default_project.id)
return inner
@pytest.mark.django_db
@pytest.mark.snuba
def test_basic(client, default_project, store_stacktrace, reset_snuba):
def _check_merged(seq):
event1 = store_stacktrace(["foo", "bar", "baz"])
event2 = store_stacktrace(["foo", "bam", "baz"])
assert event1.group_id == event2.group_id
url = f"/api/0/issues/{event1.group_id}/hashes/split/"
response = client.get(url, format="json")
assert response.status_code == 200
assert response.data == [
{
"childId": "3d433234e3f52665a03e87b46e423534",
"childLabel": "bam | ...",
"eventCount": seq,
"id": "dc6e6375dcdf74132537129e6a182de7",
"label": "baz",
"latestEvent": response.data[0]["latestEvent"],
"parentId": None,
},
{
"childId": "ce6d941a9829057608a96725c201e636",
"childLabel": "bar | ...",
"eventCount": seq,
"id": "dc6e6375dcdf74132537129e6a182de7",
"label": "baz",
"latestEvent": response.data[1]["latestEvent"],
"parentId": None,
},
]
return event1.group_id
group_id = _check_merged(1)
response = client.put(
f"/api/0/issues/{group_id}/hashes/split/?id=dc6e6375dcdf74132537129e6a182de7",
)
assert response.status_code == 200
event1 = store_stacktrace(["foo", "bar", "baz"])
event2 = store_stacktrace(["foo", "bam", "baz"])
assert event1.group_id != event2.group_id
assert event1.group_id != group_id
response = client.get(f"/api/0/issues/{event1.group_id}/hashes/split/", format="json")
assert response.status_code == 200
assert response.data == [
{
"childId": "eeb8cfaa8b792f5dc0abbd3bd30f5e39",
"childLabel": "<entire stacktrace>",
"eventCount": 1,
"id": "ce6d941a9829057608a96725c201e636",
"label": "bar | ...",
"latestEvent": response.data[0]["latestEvent"],
"parentId": "dc6e6375dcdf74132537129e6a182de7",
"parentLabel": "baz",
}
]
response = client.get(f"/api/0/issues/{event2.group_id}/hashes/split/", format="json")
assert response.status_code == 200
assert response.data == [
{
"childId": "e81cbeccec98c88097a40dd44ff20479",
"childLabel": "<entire stacktrace>",
"eventCount": 1,
"id": "3d433234e3f52665a03e87b46e423534",
"label": "bam | ...",
"latestEvent": response.data[0]["latestEvent"],
"parentId": "dc6e6375dcdf74132537129e6a182de7",
"parentLabel": "baz",
}
]
response = client.delete(
f"/api/0/issues/{event1.group_id}/hashes/split/?id=ce6d941a9829057608a96725c201e636",
)
assert response.status_code == 200
response = client.delete(
f"/api/0/issues/{event2.group_id}/hashes/split/?id=3d433234e3f52665a03e87b46e423534",
)
assert response.status_code == 200
# TODO: Once we start moving events, the old group should probably no
# longer exist.
assert _check_merged(2) == group_id
@pytest.mark.django_db
@pytest.mark.snuba
def test_split_everything(client, default_project, store_stacktrace, reset_snuba):
"""
We have two events in one group, one has a stacktrace that is a suffix of
the other. This presents an edgecase where it is legitimate to split up the
*last hash* of an event as that just happens to not be the last hash of
some other event. In that case we need to ignore the split for events that
don't have a next hash to group by.
"""
event = store_stacktrace(["foo"])
event2 = store_stacktrace(["bar", "foo"])
assert event2.group_id == event.group_id
assert event.data["hierarchical_hashes"] == ["bab925683e73afdb4dc4047397a7b36b"]
url = f"/api/0/issues/{event.group_id}/hashes/split/"
response = client.get(url, format="json")
assert response.status_code == 200
assert response.data == [
{
"childId": None,
"parentId": None,
"eventCount": 1,
"id": "bab925683e73afdb4dc4047397a7b36b",
"label": "<entire stacktrace>",
"latestEvent": response.data[0]["latestEvent"],
},
{
"childId": "aa1c4037371150958f9ea22adb110bbc",
"parentId": None,
"eventCount": 1,
"id": "bab925683e73afdb4dc4047397a7b36b",
"label": "foo",
"childLabel": "<entire stacktrace>",
"latestEvent": response.data[1]["latestEvent"],
},
]
response = client.put(
f"/api/0/issues/{event.group_id}/hashes/split/?id=bab925683e73afdb4dc4047397a7b36b",
)
assert response.status_code == 200
event3 = store_stacktrace(["foo"])
assert event3.group_id == event.group_id
event4 = store_stacktrace(["bar", "foo"])
assert event4.group_id not in (event.group_id, event2.group_id, event3.group_id)
@pytest.mark.django_db
@pytest.mark.snuba
def test_no_hash_twice(client, default_project, store_stacktrace, reset_snuba):
"""
Regression test for a bug where we accidentally created too large arrays
for groupby in underlying Snuba query, leading to duplicated nodes in the
tree
"""
event1 = store_stacktrace(["foo", "bar", "baz"])
event2 = store_stacktrace(["boo", "bar", "baz"])
assert event2.group_id == event1.group_id
url = f"/api/0/issues/{event1.group_id}/hashes/split/"
response = client.get(url, format="json")
assert response.status_code == 200
assert response.data == [
{
"childId": "ce6d941a9829057608a96725c201e636",
"parentId": None,
"childLabel": "bar | ...",
"eventCount": 2,
"id": "dc6e6375dcdf74132537129e6a182de7",
"label": "baz",
"latestEvent": response.data[0]["latestEvent"],
}
]
@pytest.mark.django_db
@pytest.mark.snuba
def test_materialized_hashes_missing(client, default_project, store_stacktrace, reset_snuba):
"""
Test that we are able to show grouping breakdown if hashes are materialized
improperly. This can happen if there's a fallback grouping strategy, or due
to yet-to-be-discovered bugs in merge/unmerge. In those cases we pretend we
are at the outer level.
Also test if splitting up the group to the next level works.
"""
event1 = store_stacktrace(["foo", "bar", "baz"])
event2 = store_stacktrace(["boo", "bam", "baz"])
assert event2.group_id == event1.group_id
GroupHash.objects.filter(group_id=event1.group_id).delete()
url = f"/api/0/issues/{event1.group_id}/hashes/split/"
response = client.get(url, format="json")
assert response.status_code == 200
assert response.data == [
{
"childId": "3d433234e3f52665a03e87b46e423534",
"parentId": None,
"childLabel": "bam | ...",
"eventCount": 1,
"id": "dc6e6375dcdf74132537129e6a182de7",
"label": "baz",
"latestEvent": response.data[0]["latestEvent"],
},
{
"childId": "ce6d941a9829057608a96725c201e636",
"parentId": None,
"childLabel": "bar | ...",
"eventCount": 1,
"id": "dc6e6375dcdf74132537129e6a182de7",
"label": "baz",
"latestEvent": response.data[1]["latestEvent"],
},
]
response = client.put(
f"/api/0/issues/{event1.group_id}/hashes/split/?id=dc6e6375dcdf74132537129e6a182de7",
)
assert response.status_code == 200
# There should only be one grouphash associated with this group now, the
# split one.
gh = GroupHash.objects.get(group_id=event1.group_id)
assert gh.hash == "dc6e6375dcdf74132537129e6a182de7"
assert gh.state == GroupHash.State.SPLIT
| UTF-8 | Python | false | false | 9,047 | py | 6,211 | test_group_hashes_split.py | 2,725 | 0.597657 | 0.52382 | 0 | 265 | 33.139623 | 93 |
sevas/csxj-crawler | 3,238,405,376,947 | 083f017f93b8f50716c819575f458d2ce51864a1 | 5e9e705cc78fb53e57af72b44ab0f064176a56b6 | /tests/datasources/csxj_test_tools.py | 219b9415e7a991243c76f60730444ab0a09c2685 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/sevas/csxj-crawler | 14e0791e7c6263c77c6584eeb42576105ef290a1 | 339fbe4b308f7dfbfca7869ec1da2bb0cfa791ca | refs/heads/master | 2021-01-18T20:28:47.797531 | 2013-12-07T09:06:52 | 2013-12-07T09:06:52 | 1,657,167 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
from nose.tools import eq_, ok_, nottest
import itertools as it
from csxj.common.tagging import TaggedURL
@nottest
def to_frozensets(taggedURLs):
return set([(url, title, frozenset(tags)) for url, title, tags in taggedURLs])
@nottest
def format_as_two_columns(expected, extracted):
expected, extracted = sorted(expected), sorted(extracted)
def make_pairs():
yield u".{0:100}-{0:100}.".format("-" * 100)
yield u"|{0:^100}|{1:^100}|".format("EXPECTED", "EXTRACTED")
yield u"|{0:100}|{0:100}|".format("-" * 100)
for left, right in it.izip_longest(expected, extracted, fillvalue=TaggedURL(u"NONE", u"NONE", set())):
yield u"|{0:100}|{1:100}|".format(left.URL[:100], right.URL[:100])
yield u"'{0:100}-{0:100}'".format("-" * 100)
return u"\n".join(make_pairs())
@nottest
def assert_taggedURLs_equals(expected_links, extracted_links):
""" Helper function to assess the equality of two lists of TaggedURL
Provides somewhat helpful feedback to know what is actually different
It does not compare the 'title' fields because character encoding
issues make me cry. A lot.
"""
expected_count, extracted_count = len(expected_links), len(extracted_links)
eq_(expected_count, extracted_count, msg=u"Expected {0} links. Extracted {1}\n{2}".format(expected_count, extracted_count, format_as_two_columns(expected_links, extracted_links)))
if to_frozensets(expected_links) != to_frozensets(extracted_links):
for expected, extracted in zip(sorted(expected_links), sorted(extracted_links)):
eq_(expected[0], extracted[0], msg=u'URLs are not the same: \n\t{0} \n\t{1}'.format(expected[0], extracted[0]))
#eq_(expected[1], extracted[1], msg='titles are not the same')
ok_(expected[2].issubset(extracted[2]), msg=u'[{0}]({1}): tags are not the same: \n\tExpected: {2} \n\tGot: {3}'.format(expected[1], expected[0], expected[2], extracted[2]))
else:
ok_(True)
@nottest
def assert_content_equals(expected_content, extracted_content):
expected_count, extracted_count = len(expected_content), len(extracted_content)
eq_(expected_count, extracted_count, msg=u"Expected {0} paragraphs. Extracted {1}".format(expected_count, extracted_count))
#eq_(expected_content, extracted_content)
| UTF-8 | Python | false | false | 2,366 | py | 145 | csxj_test_tools.py | 68 | 0.667794 | 0.634404 | 0 | 52 | 44.5 | 190 |
hamidzr/fog-alert | 13,958,643,715,547 | dfb54d71f559fbfec0ab9640a5a61f7475149e57 | 4619fbca79da41ca70208aa457eebd1e7e4a0b44 | /server/webServer.py | 8306b50a5263b6c2ad3a5101e9ceb1ef14b3d5e4 | [] | no_license | https://github.com/hamidzr/fog-alert | 9dfb564b2d78b25bddc16a95bece792bb8bea2fb | e0c4d3c95bc1e1fcc7150d5caafaa63b9fcdaaec | refs/heads/master | 2020-04-07T22:20:27.291183 | 2018-12-13T21:54:20 | 2018-12-13T21:54:20 | 158,765,952 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import request, Flask
from detectors.colorMaskDetector import has_intruder
from communicate import trigger_response
import configparser
import os
# load configuration in
config = configparser.ConfigParser()
config.sections()
config.read('config.ini')
UPLOAD_DIR='./uploads'
def sanitize_name(uploaded_file):
# TODO SEC actually sanitize
return uploaded_file.filename
app = Flask(__name__)
@app.route('/test', methods=['POST'])
def test_post():
print('got a post request', request)
return "got"
@app.route('/upload', methods=['POST'])
def upload():
print(request.files)
if 'file' not in request.files:
return '"file" field does not exist.'
posted_file = request.files['file']
file_name = sanitize_name(posted_file)
save_path = UPLOAD_DIR + '/' + file_name
posted_file.save(save_path)
detected = has_intruder(save_path)
# TODO trigger w/o blocking or trigger response elsewhere
print('threat:', detected)
if detected: trigger_response()
return str(detected)
@app.route('/')
def hello():
return "Hello World!"
if __name__ == '__main__':
app.run(port=config['DEFAULT'].get('PORT', 5000),
debug=config['DEFAULT'].get('DEBUG', True),
host= '0.0.0.0')
| UTF-8 | Python | false | false | 1,223 | py | 20 | webServer.py | 10 | 0.690924 | 0.684383 | 0 | 49 | 23.959184 | 59 |
tkkuehn/aoc2018 | 11,836,929,872,224 | 93c01d2bd87342365e079bba9e226787bed22bfb | df2a50fd87c2b127f3a2a30ee2aeab94a64bdb6c | /day18/day18_2.py | 21544d2ba5d0f6600896420cf43a5f547f6f6db4 | [] | no_license | https://github.com/tkkuehn/aoc2018 | bd460f1557e998b04fb13bc95709b9cc4523d020 | 022ccc443e3f230bb32bbae62a996f22e7d87d67 | refs/heads/master | 2020-04-09T09:10:29.440999 | 2019-07-30T17:58:13 | 2019-07-30T17:58:13 | 160,223,286 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
x_len = 50
y_len = 50
contents = []
with open('./resources/input.txt', 'r') as f:
contents = f.read().splitlines()
lumber_area = {}
for y in range(y_len):
for x in range(x_len):
lumber_area[(x, y)] = contents[y][x]
def read_area(area_tuple):
lumber_area = {}
for y in range(y_len):
for x in range(x_len):
lumber_area[(x, y)] = area_tuple[y][x]
return lumber_area
def print_area(area):
for y in range(y_len):
line = []
for x in range(x_len):
line.append(area[(x, y)])
print(''.join(line))
def area_tuple(area):
lines = []
for y in range(y_len):
line = []
for x in range(x_len):
line.append(area[(x, y)])
line_tuple = tuple(line)
lines.append(line_tuple)
return tuple(lines)
def neighbours(acre, current_area):
for x_change in [-1, 0, 1]:
for y_change in [-1, 0, 1]:
if (x_change, y_change) == (0, 0):
continue
yield (acre[0] + x_change, acre[1] + y_change)
minutes_complete = 0
state_cache = {}
cycle_start = None
repeats = 0
cycle_minute = 0
cycle_length = 0
current_area = lumber_area.copy()
next_area = {}
for minute in range(1000000000):
print(minute)
area = area_tuple(current_area)
if area in state_cache:
if repeats == 0:
cycle_start = area
cycle_minute = minute
print(cycle_minute)
elif area == cycle_start:
cycle_length = repeats - 1
print(cycle_length)
break
repeats += 1
current_area = read_area(state_cache[area])
continue
for x in range(x_len):
for y in range(y_len):
acre = current_area[(x, y)]
num_trees = 0
num_yards = 0
num_open = 0
for neighbour in neighbours((x, y), current_area):
if neighbour not in current_area:
continue
neighbour_type = current_area[neighbour]
if neighbour_type == '|':
num_trees += 1
elif neighbour_type == '#':
num_yards += 1
elif neighbour_type == '.':
num_open += 1
if acre == '.':
if num_trees >= 3:
next_area[(x, y)] = '|'
else:
next_area[(x, y)] = '.'
elif acre == '|':
if num_yards >= 3:
next_area[(x, y)] = '#'
else:
next_area[(x, y)] = '|'
elif acre == '#':
if (num_yards >= 1) and (num_trees >= 1):
next_area[(x, y)] = '#'
else:
next_area[(x, y)] = '.'
state_cache[area_tuple(current_area)] = area_tuple(next_area)
current_area = next_area.copy()
next_area = {}
cycle_num = round((1000000000 - cycle_minute) / cycle_length)
minute = (cycle_num + 1) * cycle_length
minutes_left = 1000000000 - minute
current_state = read_area(cycle_start)
for i in range(minutes_left):
area = area_tuple(current_area)
current_area = read_area(state_cache[area])
num_trees = 0
num_acres = 0
for x in range(x_len):
for y in range(y_len):
type = current_area[(x, y)]
if type == '|':
num_trees += 1
elif type == '#':
num_yards += 1
print_area(current_area)
print(num_trees * num_yards)
| UTF-8 | Python | false | false | 3,525 | py | 37 | day18_2.py | 35 | 0.489645 | 0.470638 | 0 | 127 | 26.755906 | 65 |
blaisep/ol-data-pipelines | 12,515,534,703,258 | 9fdf6d9b6dc9e5823ecfd052df105b2f5e3edf94 | a25638b09d633399031710e1690eeb5d1a3aa9cd | /src/ol_data_pipelines/lib/dagster_types.py | aa60f48186b5c69c6e9f7e71d7326ba74817ea95 | [
"BSD-3-Clause"
] | permissive | https://github.com/blaisep/ol-data-pipelines | e3648a076ae4b634270d65edb8d2a29bb72c9040 | 26766947d39c38f3eaf5dc3ad7a3a67295bdcda5 | refs/heads/main | 2023-09-02T09:35:31.137252 | 2021-07-14T15:23:04 | 2021-07-14T15:24:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pathlib import PosixPath
from dagster import PythonObjectDagsterType, usable_as_dagster_type
from google.cloud.bigquery.dataset import DatasetListItem
@usable_as_dagster_type
class DagsterPath(PosixPath):
pass # noqa: WPS420, WPS604
DatasetDagsterType = PythonObjectDagsterType(DatasetListItem, name="DatasetDagsterType")
| UTF-8 | Python | false | false | 337 | py | 42 | dagster_types.py | 30 | 0.824926 | 0.807122 | 0 | 12 | 27.083333 | 88 |
josealbm/PracticasPython | 8,839,042,716,145 | 55db9f6c2266bf7f877edb5a7f5bf7a6816c0ebe | fe78b00e01003800b8ec4ad864e7dde75da40a08 | /Práctica 7/P7E3.py | 843177b8311209255bd4c6f5c9a5451853ff6536 | [] | no_license | https://github.com/josealbm/PracticasPython | c327a72067f7dcb1f07f8ddbf4cb5d320d90a984 | 80cf92b84133953ea558b9c39897d1c466cf2f45 | refs/heads/master | 2020-04-27T13:53:54.758631 | 2019-03-07T17:13:52 | 2019-03-07T17:13:52 | 174,388,096 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Práctica 7 -Ej 3 - José Alberto Martín Marí
#Escribe un programa que lea una frase, y la pase como
#parámetro a un procedimiento, y éste
#debe mostrar la frase con un carácter en cada línea
def partir(p):
for i in p:
print (i+"")
frase=input("Escribe una frase: ")
partir(frase)
| UTF-8 | Python | false | false | 319 | py | 60 | P7E3.py | 60 | 0.662379 | 0.655949 | 0 | 12 | 24.916667 | 64 |
drbjim/target-bigquery | 8,409,545,987,741 | f40b6fab47203167d83f212053a7f14ab2a28f17 | 3883f1f25f637655b3942d916ce6c69c4afde10e | /tests/unittestcore.py | 59d70e9aa3d3fd60cb071b6a636588f9f26f0d66 | [
"BSD-3-Clause"
] | permissive | https://github.com/drbjim/target-bigquery | 2037bf6ca420233dc707b2011802bf722ccbcd32 | ef621c6101996d8cb48d83b5fed1b2e7bfb0d11c | refs/heads/master | 2023-07-15T02:45:32.641692 | 2021-08-05T00:09:19 | 2021-08-05T00:09:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import os
import sys
import unittest
from google.cloud import bigquery
class BaseUnitTest(unittest.TestCase):
def setUp(self):
os.environ["TARGET_BIGQUERY_STATE_FILE"] = "state.json.tmp"
self.delete_temp_state()
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.normpath(
os.path.join(os.path.dirname(__file__), "..", "sandbox", "sa.json"))
os.environ["TARGET_CONFIG"] = os.path.join(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json')
os.environ["TARGET_CONFIG_CACHE"] = os.path.join(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target_config_cache.json')
os.environ["TARGET_CONFIG_CACHE_APPEND"] = os.path.join(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target_config_cache_append.json')
os.environ["TARGET_CONFIG_CONTAINS_TARGET_TABLES_CONFIG"] = os.path.join(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target_config_contains_target_tables_config.json')
os.environ["MALFORMED_TARGET_CONFIG"] = os.path.join(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'malformed_target_config.json')
#TODO: make naming convention of target config files consistent "_" vs "-". Use "_" as it's easier to copy with a click
# I think we would just need to rename target-config.json to target_config.json (also update it in README)
self.client = None
self.project_id = None
self.dataset_id = None
def tearDown(self):
self.delete_temp_state()
self.delete_dataset()
def set_cli_args(self, ds_delete=True, *args, **kwargs):
arg = [arg for arg in args]
for k, v in kwargs.items():
if k == "stdin":
sys.stdin = open(v, "r")
continue
arg.append("--{}".format(k))
arg.append("{}".format(v))
sys.argv[1:] = arg
if "config" in kwargs and ds_delete:
c = None
with open(kwargs["config"], "r") as f:
c = json.load(f)
self.project_id = c["project_id"]
self.dataset_id = c["dataset_id"]
self.client = bigquery.Client(project=self.project_id)
self.delete_dataset()
def delete_temp_state(self):
try:
os.remove(os.environ["TARGET_BIGQUERY_STATE_FILE"])
except:
pass
def delete_dataset(self):
try:
self.client.delete_dataset(
dataset=self.dataset_id,
delete_contents=True
)
except:
pass
def get_state(self):
state = []
with open(os.environ["TARGET_BIGQUERY_STATE_FILE"], "rb") as f:
for line in f:
state.append(json.loads(line))
return state
| UTF-8 | Python | false | false | 3,116 | py | 15 | unittestcore.py | 9 | 0.568999 | 0.568678 | 0 | 91 | 33.241758 | 127 |
danielgunn/0478 | 3,341,484,589,550 | c10e4a8fc8ee3d366e963563673b699bd99e942d | fd0e1d55489ee4cb5d506fdc431b6f74072faa7e | /Sample_Games/circleSquareTriangle.py | 3785590959c85554d2b417b959061c7706473809 | [] | no_license | https://github.com/danielgunn/0478 | 27ef14155e2d3e3761c814fb5b55757113111a59 | 9666ab21219888fcb534a4c6c2f916a744e5b8a0 | refs/heads/master | 2020-05-24T05:33:08.837536 | 2020-03-25T03:16:30 | 2020-03-25T03:16:30 | 187,118,451 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This is a sample game to demonstrate pygame to my students.
It isn't a fun game, but demonstrates some key functions
of pygame library
There are three shapes on the screen with three different motion dynamics
author: Daniel Gunn
background image from https://opengameart.org/content/background-5
completion sound from https://opengameart.org/content/completion-sound
"""
import pygame
from pygame.math import Vector2
from random import randint
width = height = 460
# starting positions of triangle, circle and square are randomized
triangle_pos = Vector2(randint(10,width - 50), randint(10,height - 50))
circle_pos = Vector2(randint(10,width - 50), randint(10,height - 50))
square_pos = Vector2(randint(10,width - 50), randint(10,height - 50))
# background image
background = pygame.image.load("background.jpg")
background_rect = background.get_rect()
score = 0
triangle_original_surface = pygame.Surface((40, 40), pygame.SRCALPHA)
pygame.draw.polygon(triangle_original_surface, (210, 180, 0), [[0, 40], [20, 0], [40, 40]])
triangle = triangle_original_surface
pygame.init()
font = pygame.font.Font("freesansbold.ttf", 18)
pygame.mixer.music.load('gmae.wav')
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Circle Square Triangle')
running = True
clock = pygame.time.Clock()
circle_velocity = Vector2(-2.0, 1.2)
triangle_angle = 1
circle_color = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# square motion
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
square_pos.x -= 10
elif event.key == pygame.K_RIGHT:
square_pos.x += 10
elif event.key == pygame.K_UP:
square_pos.y -= 10
elif event.key == pygame.K_DOWN:
square_pos.y += 10
# circles motion
if (circle_pos.x + circle_velocity.x < 0) or (circle_pos.x + circle_velocity.x > (width - 40)):
circle_velocity.x = -circle_velocity.x
if (circle_pos.y + circle_velocity.y < 0) or (circle_pos.y + circle_velocity.y > (height - 40)):
circle_velocity.y = -circle_velocity.y
circle_pos = circle_pos + circle_velocity
screen.blit(background, background_rect)
# square rect and circle rect
square_rect = pygame.Rect(square_pos.x, square_pos.y, 40, 40)
circle_rect = pygame.Rect(circle_pos.x, circle_pos.y, 40, 40)
# if square and circle are touching
if square_rect.colliderect(circle_rect):
score = 0
screen.fill((255,0,0))
clock.tick(10) # slow motion death screen
# rotating triangle 1 degree at a time
triangle_angle = (triangle_angle + 1) % 360
triangle = pygame.transform.rotate(triangle_original_surface, triangle_angle)
triangle_rect = triangle.get_rect()
triangle_rect.center = triangle_pos
# if triangle and square are touching
if triangle_rect.colliderect(square_rect):
score += 1
pygame.mixer.music.play(0)
triangle_pos = Vector2(randint(10,width - 50), randint(10,height - 50))
# keep changing the red shade of the circle
circle_color = (circle_color + 1) % 255
pygame.draw.rect(screen, (0, 128, 255), square_rect)
pygame.draw.ellipse(screen, (circle_color, 128, 128), circle_rect)
screen.blit(triangle, triangle_rect)
text = font.render("Score " + str(score), True, (0, 128, 0))
screen.blit(text,(10,10))
pygame.display.flip()
clock.tick(60)
pygame.quit() | UTF-8 | Python | false | false | 3,555 | py | 6 | circleSquareTriangle.py | 3 | 0.667229 | 0.628692 | 0 | 107 | 32.233645 | 100 |
philipphock/BA_socketjs_proxy | 18,846,316,513,096 | a23566789bc544391b1967ff5b815ee47de44be5 | c679b53e3f39e84b85c1aa6bb949c8cbdc00f6c9 | /WebSocketProxy/src/lib/SockServer/SockServer.py | 79addc7fea6c7b081adefeceaa7befab229d2b70 | [] | no_license | https://github.com/philipphock/BA_socketjs_proxy | 81138f4faafce5747ab9101445fc04d45a9bed4d | 9b698ca1010088a9525006516b3295a0e2c165c8 | refs/heads/master | 2020-04-28T03:57:28.816205 | 2012-04-17T11:34:46 | 2012-04-17T11:34:46 | 3,597,233 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Aug 7, 2011
@author: phil
'''
import select
from SockServer.utils.basic.BasicSockServer import BasicSockServer
from SockServer.utils.basic.BasicTaskManager import BasicTaskManager
import socket as socket_p
import socket
class SockServer(BasicSockServer):
''' A non threaded SocketServer derived from the BasicSockServer
This implementation uses a Select method and a modified TaskManager to demultiplex the Sockets
Use this Server for a low resource implementation with limited connections
'''
def __init__(self,SocketMode, ConnectionHandler, max_con=-1):
BasicSockServer.__init__(self, SocketMode, NonThreadedTaskManager, ConnectionHandler, max_con)
# t=RTimer(10,self.status)
# t.start()
# def status(self,t):
# print(len(self._taskManager.clients))
def start(self):
self._taskManager.init()
try:
while self._loop:
read,_,_ = select.select([self.socket]+self._taskManager.clients,[],[])
try:
self.accept(read)
except socket_p.error:
pass
finally:
self._close()
def accept(self,con):
for r in con:
self._taskManager.demux(r)
class NonThreadedTaskManager(BasicTaskManager):
''' A special modified TaskManager with demultiplexing functions
used by the non threaded SockServer
'''
def __init__(self,ConnectionHandler,server,max_con=-1,autokick=True):
BasicTaskManager.__init__(self,ConnectionHandler, server, max_con)
self._clients=[]
self._handlers={}
self._socket=server.socket
self._dieHandler=[]
self._autokick=autokick
@property
def clients(self):
return self._clients
def demux(self,r):
''' Each Connection could be an accept from the ServerSocket or
a connection with an already connected socket
This method decides which one and delegates to the correct handler
'''
if(r == self._socket):#new connection
curCon=len(self._clients)
con = self._socket.accept()
if curCon==self.getCapacity():#full
if self._autokick:
self._handlers[(self.clients.pop(),)].doDisconnect()
self._clients.append(con[0])
h=self._handle((con[0],))
else:
con[0].close()
else:
self._clients.append(con[0])
h=self._handle((con[0],))
else:#input
h=self._handle((r,))
if h.isClosed():
try:
self._clients.remove(r)
except ValueError:
pass
def _handle(self,connection):
if connection not in self._handlers:
h = self._connectionHandler.__new__(self._connectionHandler)
h.__init__()
self._handlers[connection] = h
h.config(socket=self._socket,con=connection,server=self._server,taskmanager=self)
h.setup()
h=self._handlers[connection]
if not h.isClosed():
h.handle()
return h
def die(self):
for c in self._clients:
c.shutdown(socket.SHUT_RD)
c.close()
def config(self,**args):
self._autokick=args['autokick']
| UTF-8 | Python | false | false | 3,663 | py | 30 | SockServer.py | 29 | 0.537811 | 0.533989 | 0 | 124 | 28.25 | 102 |
eregnier/semver-git-hook | 10,290,741,645,638 | 93e5a00bbcf929704de070a511257e5734d35c19 | b7554fb15a4cad86502344f575d069b3c610717c | /semver_git_hook/__init__.py | d1d5de8339c53c6dc269d384b10c3cd387cf8e40 | [
"MIT"
] | permissive | https://github.com/eregnier/semver-git-hook | e7f6a495e37a2a19a8db26e23effca8fd5e5aa81 | 364e110e389a7ddc797be104e0e67f5706b626a6 | refs/heads/master | 2022-12-10T12:25:22.210948 | 2021-04-15T08:00:59 | 2021-04-15T08:00:59 | 192,092,785 | 3 | 0 | MIT | false | 2022-12-08T07:45:06 | 2019-06-15T15:15:01 | 2021-04-15T08:01:03 | 2022-12-08T07:45:05 | 66 | 3 | 0 | 2 | Python | false | false | import os
import stat
import sys
import subprocess
from pick import pick
from termcolor import colored
def main():
if "--init" in sys.argv:
init()
elif "--update" in sys.argv:
hook()
else:
print(
f"""
This tool helps managing project versionning by handling semver update in git pre commit hooks
Help:
{colored('--init', 'yellow')} Install the hook in the current .git folder
{colored('--update', 'yellow')} run the inteactive semver file update
"""
)
def init():
if not os.path.isdir(".git/hooks"):
print(f'\t{colored("✗", "red")} This is not a git folder, cannot init hook.')
sys.exit(1)
else:
hook_path = ".git/hooks/pre-commit"
commands = "\nexec < /dev/tty\nsemver-git-hook --update\n"
mode = "a" if os.path.isfile(hook_path) else "w"
if mode == 'a':
with open(hook_path) as f:
if commands in f.read():
print(
f'\t{colored("ⓘ", "cyan")} Hook already installed, nothing to do.'
)
sys.exit(0)
with open(hook_path, mode) as f:
f.write(commands)
handler = os.stat(hook_path)
os.chmod(hook_path, handler.st_mode | stat.S_IEXEC)
print(f'\t{colored("✓", "green")} Hook installed')
def hook():
path = f'{os.environ.get("SEMVER_HOOK_PATH_PREFIX", "")}.version'
title = "Is this commit change patch, minor or major ?"
options = ["no-change", "patch", "minor", "major"]
option, index = pick(options, title)
if os.path.isfile(path):
with open(".version") as f:
major, minor, patch = map(int, f.read().strip().split("."))
else:
major, minor, patch = 0, 0, 0
version = f"{major}.{minor}.{patch}"
print(f'Current version is {colored(version, "cyan")}')
if option == "major":
major += 1
minor = 0
patch = 0
if option == "minor":
minor += 1
patch = 0
if option == "patch":
patch += 1
version = f"{major}.{minor}.{patch}"
print(f'New {colored(option, "green")} version is now {colored(version, "cyan")}')
with open(path, "w") as f:
f.write(version)
subprocess.run(['git', 'add', path])
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 2,353 | py | 5 | __init__.py | 2 | 0.544099 | 0.539412 | 0 | 76 | 29.881579 | 94 |
ardentTech/ardent_api | 2,156,073,597,199 | c9f034c7d0e78f5544dbb603f70a68508845e973 | fb883bccd0885b49fd62d9492386a66978e45cf8 | /api/contact/signals.py | 28ab2856ec0135dad9bf38d3faf505a732a9c1bf | [] | no_license | https://github.com/ardentTech/ardent_api | 29573baf741fa7397715751148b0930c3a4115f6 | 3b6de18ce419b719de32b1abb61957d93e313f5c | refs/heads/master | 2020-03-22T23:03:16.548914 | 2018-08-29T21:57:10 | 2018-08-29T21:57:10 | 64,079,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.core.mail import mail_admins
from django.db.models.signals import post_save
from django.dispatch import receiver
@receiver(post_save, sender="contact.ContactMessage")
def send_contactmessage_email(sender, instance, created, **kwargs):
if created:
body = """
Name: {}
Email: {}
Body: {}
""".format(instance.name, instance.email, instance.body)
mail_admins("New Contact Message", body, fail_silently=False)
| UTF-8 | Python | false | false | 487 | py | 34 | signals.py | 32 | 0.652977 | 0.652977 | 0 | 14 | 33.785714 | 69 |
showerhhh/leetcode_python | 9,062,381,027,750 | 9034e5edab07e4a450410ba81541c97f14ac5ee2 | b71200b4693a709b79d15ee5659a246173bef02d | /t557.py | 525e73a8bd5fa4bbfc0ed3243a5bc2d4bd6ee93d | [
"MIT"
] | permissive | https://github.com/showerhhh/leetcode_python | 658e9f951867780ad0cb41976ac1bf6464a9044a | ea26e756dd10befbc22d99c258acd8198b215630 | refs/heads/master | 2023-04-09T05:16:23.707274 | 2021-04-13T03:22:30 | 2021-04-13T03:22:30 | 357,031,850 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def reverseWords(self, s: str) -> str:
def reverse(s):
n = len(s)
for i in range(int(n / 2)):
temp = s[i]
s[i] = s[n - i - 1]
s[n - i - 1] = temp
words = s.split(' ')
for i in range(len(words)):
words[i] = words[i][::-1]
return ' '.join(words)
if __name__ == '__main__':
solution = Solution()
print(solution.reverseWords("Let's take LeetCode contest"))
| UTF-8 | Python | false | false | 500 | py | 99 | t557.py | 98 | 0.446 | 0.438 | 0 | 18 | 26.777778 | 63 |
JiangWeixian/OpencvLearning | 7,791,070,676,901 | 3bb415e7130a728864ea00ec2441760794aba6e9 | 76e2da503cee9266bb8d02f9c89cb28b00213c8e | /test_glob.py | e074a49b66437fbceb895a63ed753009f1c39a04 | [] | no_license | https://github.com/JiangWeixian/OpencvLearning | 6bfd7397545c9a179e0a92c0279bbbbabd1616f9 | 6f98fa51f2169618168f786e182462b9efd81386 | refs/heads/master | 2020-03-03T23:37:38.711107 | 2017-09-27T08:24:20 | 2017-09-27T08:24:20 | 95,267,587 | 0 | 1 | null | false | 2017-07-21T02:05:58 | 2017-06-24T01:15:44 | 2017-07-19T07:12:43 | 2017-07-21T02:05:58 | 4,249 | 0 | 1 | 0 | Python | null | null | import glob.glob as glob
glob | UTF-8 | Python | false | false | 30 | py | 33 | test_glob.py | 30 | 0.8 | 0.8 | 0 | 3 | 9.333333 | 24 |
GustavoHennig/python-hpOneView | 7,258,494,751,359 | 826457415256c60eb9aeb203962779ff558e660c | 0d328b00d48924cae0b5552d573b27c9cdc96507 | /examples/scripts/add-enclosure.py | 006b9c76c512bf6eff4e12d290604d03d8655f06 | [
"MIT"
] | permissive | https://github.com/GustavoHennig/python-hpOneView | 28d485bec924d6d1a2e98f79aa94b65d7446a2fc | 6a6039416e7f4f05f55f5c515a609e8fa3b3ee96 | refs/heads/master | 2021-01-18T04:55:37.245443 | 2017-02-20T13:22:40 | 2017-02-20T13:22:40 | 57,912,480 | 1 | 0 | null | true | 2016-05-19T14:50:17 | 2016-05-02T18:45:10 | 2016-05-09T20:46:01 | 2016-05-19T14:50:16 | 1,144 | 1 | 0 | 1 | Python | null | null | #!/usr/bin/env python
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def import_enclosure(srv, sts, eg, ip, usr, pas, lic, baseline, force, forcefw,
monitor):
if not monitor:
# Locate the enclosure group
egroup = None
egroups = srv.get_enclosure_groups()
for group in egroups:
if group['name'] == eg:
egroup = group
break
if not egroup:
print('ERROR: Importing Enclosure')
print('Enclosure Group: "%s" has not been defined' % eg)
print('')
sys.exit()
print('Adding Enclosure')
# Find the first Firmware Baseline
uri = ''
if baseline:
spps = sts.get_spps()
for spp in spps:
if spp['isoFileName'] == baseline:
uri = spp['uri']
if not uri:
print('ERROR: Locating Firmeware Baseline SPP')
print('Baseline: "%s" can not be located' % baseline)
print('')
sys.exit()
if not uri:
add_enclosure = hpov.common.make_enclosure_dict(ip, usr, pas,
egroup['uri'],
licenseIntent=lic,
force=force,
forcefw=forcefw)
else:
add_enclosure = hpov.common.make_enclosure_dict(ip, usr, pas,
egroup['uri'],
licenseIntent=lic,
firmwareBaseLineUri=uri,
force=force,
forcefw=forcefw)
else:
add_enclosure = hpov.common.make_monitored_enclosure_dict(ip, usr, pas)
enclosure = srv.add_enclosure(add_enclosure)
if 'enclosureType' in enclosure:
print('Type: ', enclosure['enclosureType'])
print('Name: ', enclosure['name'])
print('Rack: ', enclosure['rackName'])
print('Serial Number: ', enclosure['serialNumber'])
else:
pprint(enclosure)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
This example script will import an enclosure into HPE OneView as a
managed device. The Onboard Administrator needs to have IP Address
configured for each module, and a valid Administrator account with a
password.
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HPE OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HPE OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HPE OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HPE OneView Authorized Login Domain''')
parser.add_argument('-eu', dest='encusr', required=True,
help='''
Administrative username for the c7000 enclosure OA''')
parser.add_argument('-ep', dest='encpass', required=True,
help='''
Administrative password for the c7000 enclosure OA''')
parser.add_argument('-oa', dest='enc', required=True,
help='''
IP address of the c7000 to import into HPE OneView''')
parser.add_argument('-s', dest='spp', required=False,
help='''
SPP Baseline file name. e.g. SPP2013090_2013_0830_30.iso''')
parser.add_argument('-l', dest='license', required=False,
choices=['OneView', 'OneViewNoiLO'],
default='OneView',
help='''
Specifies whether the intent is to apply either OneView or
OneView w/o iLO licenses to the servers in the enclosure
being imported.
Accepted values are:
- OneView
- OneViewNoiLO ''')
parser.add_argument('-f', dest='force', action='store_true',
required=False,
help='''
When attempting to add an Enclosure to the appliance, the appliance will
validate the target Enclosure is not already claimed. If it is, this
parameter is used when the Enclosure has been claimed by another appliance
to bypass the confirmation prompt, and force add the import of the
Enclosure ''')
parser.add_argument('-fw', dest='forcefw', action='store_true',
required=False,
help='''
Force the installation of the provided Firmware Baseline. ''')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-eg', dest='egroup',
help='''
Enclosure Group to add the enclosure to''')
group.add_argument('-m', dest='monitor', action='store_true',
help='''
Import the enclosure as a Monitored enclosure. ''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
import_enclosure(srv, sts, args.egroup, args.enc, args.encusr,
args.encpass, args.license, args.spp, args.force,
args.forcefw, args.monitor)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| UTF-8 | Python | false | false | 8,713 | py | 41 | add-enclosure.py | 37 | 0.568576 | 0.561689 | 0 | 217 | 39.152074 | 105 |
Caocoporation/rt_chat_app | 2,465,311,269,006 | 06530f0d8080f41839a7e7fe178ca8ef0597cf5d | 89946e1167ce74a2fc04ad889eb416c4aa270d2e | /chat_app/rt_chat_app/custom_auth.py | a338dd5a04c930ec71400ba1470c27160791f65d | [] | no_license | https://github.com/Caocoporation/rt_chat_app | bca03f20fe355ada3d3f43702a6b8bfc7a55a866 | 9e8d0bbe18ff7ca989e50a6fcbccb3e4f877ffdb | refs/heads/main | 2023-06-02T17:13:20.988594 | 2021-06-25T15:28:53 | 2021-06-25T15:28:53 | 362,325,601 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import asyncio
from channels.db import database_sync_to_async
from django.contrib.auth import get_user_model
User = get_user_model()
@database_sync_to_async
def get_user (user_id):
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
class QueryAuthMiddleware:
"""
Custom middleware (insecure) that takes user IDs from the query string.
"""
def __init__(self, inner):
# Store the ASGI application we were passed
self.inner = inner
def __call__(self, scope):
return QueryAuthMiddlewareInstance(scope, self)
class QueryAuthMiddlewareInstance:
"""
Inner class that is instantiated once per scope.
"""
def __init__(self, scope, middleware):
self.middleware = middleware
self.scope = dict(scope)
self.inner = self.middleware.inner
async def __call__(self, receive, send):
# Look up user from query string (you should also do things like
# checking if it is a valid user ID, or if scope["user"] is already
# populated).
self.scope['user'] = await get_user(int(self.scope['url_route']['kwargs']['user_id']))
# Instantiate our inner application
inner = self.inner(self.scope)
return await inner(receive, send) | UTF-8 | Python | false | false | 1,307 | py | 88 | custom_auth.py | 82 | 0.649579 | 0.649579 | 0 | 47 | 26.829787 | 94 |
allenai/allennlp | 12,463,995,099,890 | 9f6294cd4508f76d3feb6e3d55ecc32ecddb0df0 | 234c46d1249c9209f268417a19018afc12e378b4 | /allennlp/modules/seq2seq_encoders/pytorch_seq2seq_wrapper.py | d904cdbd85860dbac2dd7d6a389dfdd9598c12f1 | [
"Apache-2.0"
] | permissive | https://github.com/allenai/allennlp | 1f4bcddcb6f5ce60c7ef03a9a3cd6a38bdb987cf | 80fb6061e568cb9d6ab5d45b661e86eb61b92c82 | refs/heads/main | 2023-07-07T11:43:33.781690 | 2022-11-22T00:42:46 | 2022-11-22T00:42:46 | 91,356,408 | 12,257 | 2,712 | Apache-2.0 | false | 2022-11-22T00:42:47 | 2017-05-15T15:52:41 | 2022-11-21T21:47:06 | 2022-11-22T00:42:46 | 74,742 | 11,300 | 2,231 | 96 | Python | false | false | import torch
from torch.nn.utils.rnn import pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.modules.stacked_bidirectional_lstm import StackedBidirectionalLstm
class PytorchSeq2SeqWrapper(Seq2SeqEncoder):
"""
Pytorch's RNNs have two outputs: the hidden state for every time step, and the hidden state at
the last time step for every layer. We just want the first one as a single output. This
wrapper pulls out that output, and adds a `get_output_dim` method, which is useful if you
want to, e.g., define a linear + softmax layer on top of this to get some distribution over a
set of labels. The linear layer needs to know its input dimension before it is called, and you
can get that from `get_output_dim`.
In order to be wrapped with this wrapper, a class must have the following members:
- `self.input_size: int`
- `self.hidden_size: int`
- `def forward(inputs: PackedSequence, hidden_state: torch.Tensor) ->
Tuple[PackedSequence, torch.Tensor]`.
- `self.bidirectional: bool` (optional)
This is what pytorch's RNN's look like - just make sure your class looks like those, and it
should work.
Note that we *require* you to pass a binary mask of shape (batch_size, sequence_length)
when you call this module, to avoid subtle bugs around masking. If you already have a
`PackedSequence` you can pass `None` as the second parameter.
We support stateful RNNs where the final state from each batch is used as the initial
state for the subsequent batch by passing `stateful=True` to the constructor.
"""
def __init__(self, module: torch.nn.Module, stateful: bool = False) -> None:
super().__init__(stateful)
self._module = module
try:
if not self._module.batch_first:
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
except AttributeError:
pass
try:
self._is_bidirectional = self._module.bidirectional
except AttributeError:
self._is_bidirectional = False
if self._is_bidirectional:
self._num_directions = 2
else:
self._num_directions = 1
def get_input_dim(self) -> int:
return self._module.input_size
def get_output_dim(self) -> int:
return self._module.hidden_size * self._num_directions
def is_bidirectional(self) -> bool:
return self._is_bidirectional
def forward(
self, inputs: torch.Tensor, mask: torch.BoolTensor, hidden_state: torch.Tensor = None
) -> torch.Tensor:
if self.stateful and mask is None:
raise ValueError("Always pass a mask with stateful RNNs.")
if self.stateful and hidden_state is not None:
raise ValueError("Stateful RNNs provide their own initial hidden_state.")
if mask is None:
return self._module(inputs, hidden_state)[0]
batch_size, total_sequence_length = mask.size()
packed_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(
self._module, inputs, mask, hidden_state
)
unpacked_sequence_tensor, _ = pad_packed_sequence(packed_sequence_output, batch_first=True)
num_valid = unpacked_sequence_tensor.size(0)
# Some RNNs (GRUs) only return one state as a Tensor. Others (LSTMs) return two.
# If one state, use a single element list to handle in a consistent manner below.
if not isinstance(final_states, (list, tuple)) and self.stateful:
final_states = [final_states]
# Add back invalid rows.
if num_valid < batch_size:
_, length, output_dim = unpacked_sequence_tensor.size()
zeros = unpacked_sequence_tensor.new_zeros(batch_size - num_valid, length, output_dim)
unpacked_sequence_tensor = torch.cat([unpacked_sequence_tensor, zeros], 0)
# The states also need to have invalid rows added back.
if self.stateful:
new_states = []
for state in final_states:
num_layers, _, state_dim = state.size()
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2SeqEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - unpacked_sequence_tensor.size(1)
if sequence_length_difference > 0:
zeros = unpacked_sequence_tensor.new_zeros(
batch_size, sequence_length_difference, unpacked_sequence_tensor.size(-1)
)
unpacked_sequence_tensor = torch.cat([unpacked_sequence_tensor, zeros], 1)
if self.stateful:
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
return unpacked_sequence_tensor.index_select(0, restoration_indices)
@Seq2SeqEncoder.register("gru")
class GruSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "gru".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("lstm")
class LstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("rnn")
class RnnSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "rnn".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
nonlinearity: str = "tanh",
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlinearity,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("augmented_lstm")
class AugmentedLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "augmented_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
stateful: bool = False,
) -> None:
module = AugmentedLstm(
input_size=input_size,
hidden_size=hidden_size,
go_forward=go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("alternating_lstm")
class StackedAlternatingLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "alternating_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
stateful: bool = False,
) -> None:
module = StackedAlternatingLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("stacked_bidirectional_lstm")
class StackedBidirectionalLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "stacked_bidirectional_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
stateful: bool = False,
) -> None:
module = StackedBidirectionalLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
layer_dropout_probability=layer_dropout_probability,
use_highway=use_highway,
)
super().__init__(module=module, stateful=stateful)
| UTF-8 | Python | false | false | 10,542 | py | 611 | pytorch_seq2seq_wrapper.py | 550 | 0.624739 | 0.619237 | 0 | 289 | 35.477509 | 99 |
AmitMozitronics/IPPT_WebApp | 4,140,348,509,272 | 27267fd12f9f722e9a0a5424ebcd38d74114930f | 7a5a6376b006fe66dd901f9ca420d71c9b3bee45 | /frontend/summaryview_v2/size_wise_output_v2.py | 7e0f60077f01b71000555a71b06a4d7c1fc1f51b | [] | no_license | https://github.com/AmitMozitronics/IPPT_WebApp | d40272f5ae66917e67e18088e10bd164a59a51a6 | 7f53fe14b78a4a0f2b8d5e1bf40553d17c118e48 | refs/heads/main | 2023-07-10T08:43:15.656318 | 2021-08-06T09:53:16 | 2021-08-06T09:53:16 | 393,331,796 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_protect
import datetime, pytz
from datetime import timedelta
from django.utils import timezone as tuto
from django.db.models import Sum, Avg, Count
from django.db.models.functions import TruncDay
import xlwt
from api.models import *
from pytz import timezone
def summary_2_view(request):
startdate = request.GET.get('startdate')
enddate = request.GET.get('enddate')
enddate_searched = enddate
machine = Machine.objects.filter(user=request.user)
machine_list = []
startdate_re=startdate
for i in machine:
machine_list.append(i.toDic()["machine_id"])
#----Souren<24/04/2021>---------------------------->
machine_details=Machine.objects.get(machine_id=machine_list[0])
start_time = machine_details.shift1_start_time
machine_total_shift = machine_details.machine_total_shift
if machine_total_shift==2:
end_time = machine_details.shift2_end_time
else:
end_time = machine_details.shift3_end_time
shift1_time_duration = machine_details.shift1_time_duration
shift2_time_duration = machine_details.shift2_time_duration
if machine_total_shift==3:
shift3_time_duration = machine_details.shift3_time_duration
else:
shift3_time_duration = 0
if(startdate == None and enddate == None):
timezone_localtime = tuto.localtime(tuto.now())
enddate = startdate = timezone_localtime.strftime('%Y-%m-%d')
start_time = start_time
end_time = end_time
if (shift1_time_duration+shift2_time_duration+shift3_time_duration==24.00):
shift_flag = True
enddate = (datetime.datetime.strptime(enddate, '%Y-%m-%d')+timedelta(days=1)).strftime('%Y-%m-%d')
local_zone = MachineTimeZone.objects.get(machine_id=machine_list[0]).machine_timezone
endtimestamp = int(timezone(local_zone).localize(datetime.datetime.strptime(str(enddate) + ' ' + str(end_time), "%Y-%m-%d %H:%M:%S")).timestamp())
starttimestamp = int(timezone(local_zone).localize(datetime.datetime.strptime(str(startdate) + ' ' + str(start_time), "%Y-%m-%d %H:%M:%S")).timestamp())
sizewiseoutput = PipeDataProcessed.objects.annotate(group_day=TruncDay('site_time')).filter(timestamp__gte = starttimestamp, timestamp__lte = endtimestamp, machine_id__in = machine_list).values('group_day', 'basic_metarial', 'standard_type_classification', 'pressure_type_specification', 'outer_diameter', 'outer_diameter_unit', 'length', 'length_unit', 'shift').annotate(Sum('weight'), Avg('weight'), Count('weight'))
sizewiseoutput.query.clear_ordering(force_empty = True)
sizewiseoutput=sizewiseoutput.order_by('group_day')
previous_day=datetime.datetime.strptime(startdate, '%Y-%m-%d')
for i in sizewiseoutput:
i['weight__sum'] /= 1000
i['weight__avg'] /= 1000
i['next_day']=i['group_day']
machine_total_shift=str(machine_total_shift)
return 'summary_2.html', {"machine": machine, "summary_name": "MIS2", "startdate": startdate_re, "enddate": enddate, "sizewiseoutput": sizewiseoutput, 'starttimestamp': starttimestamp, "enddate_searched":enddate_searched, 'machine_total_shift': machine_total_shift, 'sizewiseoutput':sizewiseoutput}
| UTF-8 | Python | false | false | 3,493 | py | 77 | size_wise_output_v2.py | 55 | 0.704838 | 0.692814 | 0 | 68 | 50.367647 | 422 |
Ayushshah2023/Ayushshah2023 | 6,562,710,062,679 | a2b442d9a3955af778d385a98c74d4f669ff90e1 | 0bc3619bf436f0bfa9ab656f4c748652cc80fa3b | /script1.py | 7fa9c3be5023785c5296a011c57b21aa563b5c5f | [] | no_license | https://github.com/Ayushshah2023/Ayushshah2023 | 02e6b5ea47ec650074162e248464b029fe05d498 | afe9c3b869be76ac8360840936c4782239897e21 | refs/heads/master | 2021-05-22T13:11:32.002149 | 2020-04-04T07:59:48 | 2020-04-04T07:59:48 | 252,941,447 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
A program that stors this book information
title,Author,Year,ISBn<Price,Ratings,Publications,Genre
User can view all records
Search any entry
Add any entry
Update an entry
Delete
CLose
add a rating to any book
Scroll bar
"""
import data_py
from tkinter import *
from PIL import ImageTk,Image
#window=Tk()
#canvas=Canvas(window,width=300,height=160)
#canvas.grid(row=12,column=3)
#image1 =ImageTk.PhotoImage(Image.open("layout.png"))
#canvas.create_image(anchor=NW,image=image1)
#canvas.grid(row=0,column=0)
#x=Label(image=image1)
#x.grid(row=0,column=0)
# = Label(top, image=filename)
#background_label.place(x=0, y=0, relwidth=1, relheight=1)
def get_selected_row(event):
try:
global selected_tuple
index1=list1.curselection()
print(index1)
index = int(''.join(map(str,index1)))
print(index)
selected_tuple=list1.get(index)
e1.delete(0,END)
e1.insert(END,selected_tuple[1])
e2.delete(0,END)
e2.insert(END,selected_tuple[2])
e3.delete(0,END)
e3.insert(END,selected_tuple[3])
e4.delete(0,END)
e4.insert(END,selected_tuple[4])
e5.delete(0,END)
e5.insert(END,selected_tuple[5])
e6.delete(0,END)
e6.insert(END,selected_tuple[6])
e7.delete(0,END)
e7.insert(END,selected_tuple[7])
e8.delete(0,END)
e8.insert(END,selected_tuple[8])
except IndexError:
pass
def view_command():
list1.delete(0,END)
for row in data_py.view():
list1.insert(END,row)
def search_command():
list1.delete(0,END)
for row in data_py.search(title_text.get(),author_text.get(),genre_text.get(),price_text.get(),ISBN_text.get(),Publn_text.get(),ratings_text.get(),Year_text.get()):
list1.insert(END,row)
def add_command():
data_py.insert(title_text.get(),author_text.get(),genre_text.get(),price_text.get(),ISBN_text.get(),Publn_text.get(),ratings_text.get(),Year_text.get())
list1.delete(0,END)
list1.insert(END,(title_text.get(),author_text.get(),genre_text.get(),price_text.get(),ISBN_text.get(),Publn_text.get(),ratings_text.get(),Year_text.get()))
def delete_command():
data_py.delete(selected_tuple[0])
def update_command():
data_py.update(selected_tuple[0],title_text.get(),author_text.get(),genre_text.get(),price_text.get(),ISBN_text.get(),Publn_text.get(),ratings_text.get(),Year_text.get())
window=Tk()
l1=Label(window,text="Welcome to my BookStore")
l1.grid(row=0, column=0, columnspan=4, rowspan=2,sticky=W+E+N+S, padx=5, pady=5)
l2=Label(window,text="Title")
l2.grid(row=2, column=0,columnspan=2, padx=5, pady=5)
l3=Label(window,text="Author")
l3.grid(row=2, column=3,columnspan=2, padx=5, pady=5)
l4=Label(window,text="Genre")
l4.grid(row=3, column=0,columnspan=2, padx=5, pady=5)
l5=Label(window,text="Price")
l5.grid(row=3, column=3,columnspan=2, padx=5, pady=5)
l6=Label(window,text="ISBN Number")
l6.grid(row=4, column=0,columnspan=2, padx=5, pady=5)
l7=Label(window,text="Publications")
l7.grid(row=4, column=3,columnspan=2, padx=5, pady=5)
l8=Label(window,text="Ratings(out of 5")
l8.grid(row=5, column=0,columnspan=2, padx=5, pady=5)
l9=Label(window,text="Year of Publn")
l9.grid(row=5, column=3,columnspan=2, padx=5, pady=5)
title_text=StringVar()
e1=Entry(window,textvariable=title_text)
e1.grid(row=2, column=2, padx=5, pady=5)
author_text=StringVar()
e2=Entry(window,textvariable=author_text)
e2.grid(row=2, column=5, padx=5, pady=5)
genre_text=StringVar()
e3=Entry(window,textvariable=genre_text)
e3.grid(row=3, column=2, padx=5, pady=5)
price_text=StringVar()
e4=Entry(window,textvariable=price_text)
e4.grid(row=3, column=5, padx=5, pady=5)
ISBN_text=StringVar()
e5=Entry(window,textvariable=ISBN_text)
e5.grid(row=4, column=2, padx=5, pady=5)
Publn_text=StringVar()
e6=Entry(window,textvariable=Publn_text)
e6.grid(row=4, column=5, padx=5, pady=5)
ratings_text=StringVar()
e7=Entry(window,textvariable=ratings_text)
e7.grid(row=5, column=2, padx=5, pady=5)
Year_text=StringVar()
e8=Entry(window,textvariable=Year_text)
e8.grid(row=5, column=5, padx=5, pady=5)
list1=Listbox(window,height=6,width=100)
list1.grid(row=6,column=0, rowspan=5 ,columnspan=5, padx=5, pady=5)
sb1=Scrollbar(window)
sb1.grid(row=6,column=5,rowspan=6)
list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=list1.yview)
list1.bind('<<ListboxSelect>>',get_selected_row)
b1=Button(window,text="View all", width=12,command=view_command,padx=5)
b1.grid(row=11,column=0)
b2=Button(window,text="Search entry", width=12,command=search_command,padx=5)
b2.grid(row=11,column=1)
b3=Button(window,text="Add entry", width=12,command=add_command,padx=5)
b3.grid(row=11,column=2)
b4=Button(window,text="Update selected", width=12,command=update_command,padx=5)
b4.grid(row=11,column=3)
b5=Button(window,text="Delete selected", width=12,command=delete_command,padx=5)
b5.grid(row=11,column=4)
b6=Button(window,text="Close", width=12,command=window.destroy,padx=5)
b6.grid(row=11,column=5)
#canvas.pack()
window.mainloop()
| UTF-8 | Python | false | false | 5,237 | py | 3 | script1.py | 2 | 0.679969 | 0.632041 | 0 | 169 | 28.988166 | 174 |
johnhsq/thequants | 11,209,864,643,842 | c9bee51dcd7ae88c9c5a06a84e653060c8be2fef | e9eaeda8ed7699d97e8b17411a03444346299567 | /03.calculate.returns/calculate.returns.py | f044a2a950205e7f9d3b8155c1c300a1df5bb699 | [] | no_license | https://github.com/johnhsq/thequants | 8097da41ba435d5c9a12f71c56455152857c61b8 | 1e45cf94183c16abb56203ec4f508f470c3aa612 | refs/heads/master | 2021-06-23T17:59:42.565388 | 2021-04-23T16:32:25 | 2021-04-23T16:32:25 | 225,282,437 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
close = pd.DataFrame(
{
'ABC': [1, 5, 3, 6, 2],
'EFG': [12, 51, 43, 56, 22],
'XYZ': [35, 36, 36, 36, 37],},
pd.date_range('10/01/2018', periods=5, freq='D'))
print(close)
daily_ret=(close-close.shift(1))/close.shift(1)
print(daily_ret) | UTF-8 | Python | false | false | 288 | py | 23 | calculate.returns.py | 20 | 0.541667 | 0.416667 | 0 | 11 | 25.272727 | 53 |
AyoubBelhadji/CSSPy | 2,164,663,554,771 | 47bd20c43a16e51c09d5057e9cb2c78a75fcc683 | 624f6d4cbed3aa7b3af789d7d44643a092b38818 | /CSSPy/projection_dpp_sampler.py | b92e8c2192ec59c7911a85d3b93b43c1bd03c628 | [
"MIT"
] | permissive | https://github.com/AyoubBelhadji/CSSPy | 8b0f2b0794e22b23d16585696ff56de4cd8836cc | df66e48c7b39f4cc193b9793aa1813000b2c8b80 | refs/heads/master | 2021-06-23T12:22:11.433469 | 2019-08-20T09:29:28 | 2019-08-20T09:29:28 | 148,138,792 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import scipy.io
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from copy import deepcopy
import scipy.io
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import random, linalg, dot, diag, all, allclose
import timeit
from scipy.sparse.linalg import svds
class projection_DPP_Sampler:
def __init__(self, A, k, Q,N):
""" Create projection DPP Sampler for the matrix :math:`A` using the marginal kernel :math:`Q^TQ`.
:param A:
Matrix :math:`A`.
:type A:
array_type
:param Q:
Matrix containig the k right singular vectors of :math:`A`.
:type Q:
array_type
:param k:
The order of low rank apparoximation.
:type k:
int
:param N:
The dimension of subsampling (the number of columns) of A.
:type N:
int
"""
self.A = A
self.Q = Q
self.N = N
self.Q_temp = deepcopy(Q)
self.k = k
self.sampling_list = []
self.column_selected_temp = np.zeros(k)
self.sampling_round = 0
self.lvs_array = self.Estimate_Leverage_Scores()
def OneRound(self):
sampled_indices = np.random.choice(self.N, 1, replace=True, p=list(self.lvs_array))
column_selected = self.Q_temp[:,sampled_indices[0]]
self.column_selected_temp = 1/np.linalg.norm(column_selected)*column_selected
self.Project_On_The_Vector_Orthogonal()
self.sampling_round = self.sampling_round +1
return sampled_indices[0]
def MultiRounds(self):
self.Q_temp = deepcopy(self.Q)
self.sampling_list = []
self.column_selected_temp = np.zeros(self.k)
self.sampling_round = 0
for t in range(self.k):
self.lvs_array = self.Estimate_Leverage_Scores()
sampled_indices_ = self.OneRound()
self.sampling_list.append(sampled_indices_)
return self.A[:,self.sampling_list]
def Estimate_Leverage_Scores(self):
return 1/(self.k-self.sampling_round)*np.diag(np.dot(self.Q_temp.T,self.Q_temp))
def Project_On_The_Vector_Orthogonal(self):
projection_matrix = np.eye(self.k-self.sampling_round) - np.outer(np.transpose(self.column_selected_temp),self.column_selected_temp)
lambda_, W = np.linalg.eigh(projection_matrix)
self.Q_temp = self.Q_temp - np.outer(self.column_selected_temp,np.dot(self.column_selected_temp,self.Q_temp))
self.Q_temp = np.dot(W.T,self.Q_temp)
self.Q_temp = np.delete(self.Q_temp, 0, axis=0)
| UTF-8 | Python | false | false | 2,643 | py | 43 | projection_dpp_sampler.py | 26 | 0.614832 | 0.611048 | 0 | 68 | 37.838235 | 140 |
JorgitoR/Boletin-de-Notas-Django---Python | 6,090,263,636,960 | a06754d6a9917f051d0c9d1acbbfacbfbae16db6 | d531f4be5fd023b3bda0983bfd206ce2527f4153 | /Dashboard/migrations/0004_usuario_activo.py | 1c595eb7fedf0cd94275314ac0b74ecc3d936ca6 | [] | no_license | https://github.com/JorgitoR/Boletin-de-Notas-Django---Python | 6a98a183e551aa0d6f66e3821b787d9ae8bba0fe | e192c9c293a2a55f1f92cf68329de691d389905b | refs/heads/main | 2023-05-03T16:34:09.863682 | 2021-05-25T17:26:54 | 2021-05-25T17:26:54 | 366,082,026 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.2 on 2021-05-13 14:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Dashboard', '0003_auto_20210513_0748'),
]
operations = [
migrations.AddField(
model_name='usuario',
name='activo',
field=models.BooleanField(default=True),
),
]
| UTF-8 | Python | false | false | 389 | py | 29 | 0004_usuario_activo.py | 15 | 0.59126 | 0.511568 | 0 | 18 | 20.611111 | 52 |
iniyannatarajan/pyimager-olegs-mods | 15,126,874,854,454 | 39a439a47cc2f84b6c7df681b25b93fc73d3c7f4 | ead9b4e788bab8542a39215358e32c0df8a9533c | /LCU/checkhardware/lib/test_lib.py | 3e8fd47d0627ee2306bcebfa61a396e3f040066f | [] | no_license | https://github.com/iniyannatarajan/pyimager-olegs-mods | 34968740d47d8e38d65ff4005d215e6f87ea2a80 | 6218fdbed57c8c442238372cda408a69e91b2bb7 | refs/heads/master | 2022-11-12T21:50:18.539394 | 2015-08-18T09:30:25 | 2015-08-18T09:30:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# test lib
from general_lib import *
from lofar_lib import *
from search_lib import *
import os
import numpy as np
import logging
test_version = '1013e'
logger = None
def init_test_lib():
global logger
logger = logging.getLogger()
logger.debug("init logger test_lib")
#HBASubband = dict( DE601C=155, DE602C=155, DE603C=284, DE604C=474, DE605C=479, FR606C=155, SE607C=287, UK608C=155 )
#DefaultLBASubband = 301
#DefaultHBASubband = 155
# get and return recorded data in various ways
class cRCUdata:
global logger
def __init__(self, n_rcus, minvalue=1):
self.n_rcus = n_rcus
self.frames = 0
self.minvalue = minvalue
self.ssData = np.ones((n_rcus, 1, 512), np.float64)
self.testSignal_X = -1.0
self.testSubband_X = 0
self.testSignal_Y = -1.0
self.testSubband_Y = 0
def record(self, rec_time=2, read=True):
removeAllDataFiles()
logger.info("Wait %d seconds while recording data" %(rec_time))
rspctl('--statistics --duration=%d --integration=1 --directory=%s --select=0:%d' %(rec_time, dataDir(), self.n_rcus-1), wait=0.0)
if read:
self.readFiles()
def readFile(self, full_filename):
data = np.fromfile(full_filename, dtype=np.float64)
n_samples = len(data)
if (n_samples % 512) > 0:
logger.warn("data error: number of samples (%d) not multiple of 512 in '%f'" %(n_samples, full_filename))
self.frames = n_samples / 512
data = data.reshape(self.frames,512)
#logger.info("recorded data shape %s" %(str(data.shape)))
return (data)
def readFiles(self):
files_in_dir = sorted(os.listdir(dataDir()))
ssdata = np.array([self.readFile(os.path.join(dataDir(),file_name)) for file_name in files_in_dir])
# mask zero values and convert to dBm
self.ssData = np.log10(np.ma.masked_less(ssdata, self.minvalue)) * 10.0
self.ssData[:,:,0] = np.ma.masked
def getSubbands(self, rcu):
return (self.ssData[int(rcu),:,:].mean(axis=0))
def getSubbandX(self):
return (self.ssData[0::2,:,self.testSubband_Y].mean(axis=1))
def getSubbandY(self):
return (self.ssData[1::2,:,self.testSubband_Y].mean(axis=1))
def getAll(self):
return (self.ssData[:,:,:])
def getAllX(self):
return (self.ssData[0::2,:,:])
def getAllY(self):
return (self.ssData[1::2,:,:])
def getMedianRcu(self, rcu):
return(np.ma.median(self.ssData[int(rcu),:,:].mean(axis=0)))
def searchTestSignal(self, subband=-1, minsignal=75.0, maxsignal=100.0):
# ss = median for all band over all rcu's
# forget subband 0
ssX = np.ma.median(self.ssData[::2,:,:].mean(axis=1),axis=0)
ssY = np.ma.median(self.ssData[1::2,:,:].mean(axis=1),axis=0)
if subband != -1:
if ssX[subband] > minsignal and ssY[subband] > minsignal:
self.testSignal_X = ssX[subband]
self.testSubband_X = subband
self.testSignal_Y = ssY[subband]
self.testSubband_Y = subband
return
else:
logger.debug("Test signal on subband %d not strong enough X=%3.1fdB Y=%3.1fdB" %(subband, ssX[subband], ssY[subband]))
# no subband given or not in requested range, look for better
for i in range(1,ssX.shape[0],1):
if ssX[i] > minsignal and ssX[i] < maxsignal and ssX[i] > self.testSignal_X:
self.testSignal_X = ssX[i]
self.testSubband_X = i
if ssY[i] > minsignal and ssY[i] < maxsignal and ssY[i] > self.testSignal_Y:
self.testSignal_Y = ssY[i]
self.testSubband_Y = i
return
#### end of cRCUdata class ####
# class for checking TBB boards using tbbctl
class cTBB:
global logger
def __init__(self, db):
self.db = db
self.nr = self.db.nr_tbb
self.driverstate = True
#tbbctl('--free')
# check software versions of driver, tbbctl and TP/MP firmware
def checkVersions(self, driverV, tbbctlV, tpV, mpV ):
logger.info("=== TBB Version check ===")
answer = tbbctl('--version')
# check if Driver is available
if answer.find('TBBDriver is NOT responding') > 0:
logger.warn("No TBBDriver")
self.driverstate = False
self.db.tbbdriver_version = 0
else:
infolines = answer.splitlines()
info = infolines[4:6] + infolines[9:-1]
#print info
if info[0].split()[-1] != driverV:
logger.warn("Not right Driver version")
self.db.tbbdriver_version = info[0].split()[-1]
if info[1].split()[-1] != tbbctlV:
logger.warn("Not right tbbctl version")
self.db.tbbctl_version = info[1].split()[-1]
# check if image_nr > 0 for all boards
if str(info).count('V') != (self.nr * 4):
logger.warn("WARNING, Not all boards in working image")
for tbb in self.db.tbb:
board_info = info[2+tbb.nr].strip().split(' ')
#print board_info
if board_info[3].split()[1] != tpV:
logger.warn("Board %d Not right TP version" %(tbb.nr))
tbb.tp_version = board_info[3].split()[1]
if board_info[4].split()[1] != mpV:
logger.warn("Board %d Not right MP version" %(tbb.nr))
tbb.mp_version = board_info[4].split()[1]
logger.info("=== Done TBB Version check ===")
self.db.addTestDone('TV')
return
# Check memory address and data lines
def checkMemory(self):
logger.info("=== TBB Memory check ===")
tbbctl('--free')
for tbb in self.db.tbb:
answer = tbbctl('--testddr=%d' %(tbb.nr))
info = answer.splitlines()[-3:]
ok = True
if info[0].strip() != 'All Addresslines OK':
logger.warn("Board %d Addresline error" %(tbb.nr))
ok = False
if info[1].strip() != 'All Datalines OK':
logger.warn("Board %d Datalines error" %(tbb.nr))
ok = False
if not ok:
tbb.memory_ok = 0
logger.info(answer)
logger.info("=== Done TBB Memory check ===")
self.db.addTestDone('TM')
return
#### end of cTBB class ####
# class for checking RSP boards using rspctl
class cRSP:
global logger
def __init__(self, db):
self.db = db
self.nr = self.db.nr_rsp
# check software versions of driver, tbbctl and TP/MP firmware
def checkVersions(self, bpV, apV ):
logger.info("=== RSP Version check ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
answer = rspctl('--version')
# check if Driver is available
if answer.find('No Response') > 0:
logger.warn("No RSPDriver")
self.db.rspdriver_version = 0
else:
infolines = answer.splitlines()
info = infolines
images_ok = True
# check if image_nr > 0 for all boards
if str(info).count('0.0') != 0:
logger.warn("WARNING, Not all boards in working image")
images_ok = False
for rsp in self.db.rsp:
board_info = info[rsp.nr].split(',')
if board_info[1].split()[3] != bpV:
logger.warn("Board %d Not right BP version" %(rsp.nr))
rsp.bp_version = board_info[1].split()[3]
images_ok = False
if board_info[2].split()[3] != apV:
logger.warn("Board %d Not right AP version" %(rsp.nr))
rsp.ap_version = board_info[2].split()[3]
images_ok = False
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return (False)
logger.info("=== Done RSP Version check ===")
self.db.addTestDone('RV')
return (images_ok)
#### end of cRSP class ####
# class for testing LBA antennas
class cLBA:
global logger
# mode='lba_low' or 'lba_high'
def __init__(self, db, lba):
self.db = db
self.lba = lba
self.rcudata = cRCUdata(self.lba.nr_antennas*2)
# Average normal value = 150.000.000 (81.76 dBm) -3dB +3dB
# LOW/HIGH LIMIT is used for calculating mean value
self.lowLimit = -3.0 #dB
self.highLimit = 3.0 #dB
# MEAN LIMIT is used to check if mean of all antennas is ok
self.meanLimit = 66.0 #dB
def turnOffAnt(self, ant_nr):
ant = self.lba.ant[ant_nr]
ant.x.rcu_off = 1
ant.y.rcu_off = 1
logger.info("turned off antenna %d RCU(%d,%d)" %(ant.nr_pvss, ant.x.rcu, ant.y.rcu))
rspctl("--rcumode=0 --select=%d,%d" %(ant.x.rcu, ant.y.rcu), wait=2.0)
rspctl("--rcuenable=0 --select=%d,%d" %(ant.x.rcu, ant.y.rcu), wait=2.0)
return
# check for oscillating tiles and turn off RCU
# stop one RCU each run
def checkOscillation(self, mode):
logger.info("=== Start %s oscillation test ===" %(self.lba.label))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.lba.selectList())
self.lba.resetRcuState()
clean = False
while not clean:
clean = True
self.rcudata.record(rec_time=5)
# result is a sorted list on maxvalue
result = search_oscillation(self.rcudata.getAll(), delta=4.0, start_sb=120, stop_sb=400)
if len(result) > 1:
clean = False
rcu, peaks_sum, n_peaks, rcu_low = sorted(result[1:], reverse=True)[0] #result[1]
ant = rcu / 2
ant_polarity = rcu % 2
logger.info("RCU %d LBA %d Oscillation sum=%3.1f peaks=%d low=%3.1fdB" %\
(rcu, self.lba.ant[ant].nr_pvss, peaks_sum, n_peaks, rcu_low))
self.turnOffAnt(ant)
if ant_polarity == 0:
self.lba.ant[ant].x.osc = 1
else:
self.lba.ant[ant].y.osc = 1
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.lba.oscillation_check_done = 1
self.db.addTestDone('O%d' %(mode))
logger.info("=== Done %s oscillation test ===" %(self.lba.label))
return
def checkNoise(self, mode, record_time, low_deviation, high_deviation, max_diff):
logger.info("=== Start %s noise test ===" %(self.lba.label))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.lba.selectList())
self.lba.resetRcuState()
for ant in self.lba.ant:
if ant.x.rcu_off or ant.y.rcu_off:
logger.info("skip low-noise test for antenna %d, RCUs turned off" %(ant.nr))
self.rcudata.record(rec_time=record_time)
# result is a sorted list on maxvalue
low_noise, high_noise, jitter = search_noise(self.rcudata.getAll(), low_deviation, high_deviation, max_diff)
for n in low_noise:
rcu, val, bad_secs, ref, diff = n
ant = rcu / 2
if self.lba.ant[ant].x.rcu_off or self.lba.ant[ant].y.rcu_off:
continue
#self.turnOffAnt(ant)
logger.info("RCU %d Ant %d Low-Noise value=%3.1f bad=%d(%d) limit=%3.1f diff=%3.3f" %\
(rcu, self.lba.ant[ant].nr_pvss, val, bad_secs, self.rcudata.frames, ref, diff))
if rcu%2 == 0:
antenna = self.lba.ant[ant].x
else:
antenna = self.lba.ant[ant].y
antenna.low_seconds += self.rcudata.frames
antenna.low_bad_seconds += bad_secs
if val < self.lba.ant[ant].x.low_val:
antenna.low_noise = 1
antenna.low_val = val
antenna.low_ref = ref
antenna.low_diff = diff
for n in high_noise:
rcu, val, bad_secs, ref, diff = n
ant = rcu / 2
#self.turnOffAnt(ant)
logger.info("RCU %d Ant %d High-Noise value=%3.1f bad=%d(%d) ref=%3.1f diff=%3.1f" %\
(rcu, self.lba.ant[ant].nr_pvss, val, bad_secs, self.rcudata.frames, ref, diff))
if rcu%2 == 0:
antenna = self.lba.ant[ant].x
else:
antenna = self.lba.ant[ant].y
antenna.high_seconds += self.rcudata.frames
antenna.high_bad_seconds += bad_secs
if val > self.lba.ant[ant].x.high_val:
antenna.high_noise = 1
antenna.high_val = val
antenna.high_ref = ref
antenna.high_diff = diff
for n in jitter:
rcu, val, ref, bad_secs = n
ant = rcu / 2
logger.info("RCU %d Ant %d Jitter, fluctuation=%3.1fdB normal=%3.1fdB" %(rcu, self.lba.ant[ant].nr_pvss, val, ref))
if rcu%2 == 0:
antenna = self.lba.ant[ant].x
else:
antenna = self.lba.ant[ant].y
antenna.jitter_seconds += self.rcudata.frames
antenna.jitter_bad_seconds += bad_secs
if val > antenna.jitter_val:
antenna.jitter = 1
antenna.jitter_val = val
antenna.jitter_ref = ref
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.lba.noise_check_done = 1
self.db.addTestDone('NS%d=%d' %(mode, record_time))
logger.info("=== Done %s noise test ===" %(self.lba.label))
return
def checkSpurious(self, mode):
logger.info("=== Start %s spurious test ===" %(self.lba.label))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.lba.selectList())
self.lba.resetRcuState()
self.rcudata.record(rec_time=2)
# result is a sorted list on maxvalue
result = search_spurious(self.rcudata.getAll(), delta=3.0)
for rcu in result:
ant = rcu / 2
ant_polarity = rcu % 2
#self.turnOffAnt(ant)
logger.info("RCU %d Ant %d pol %d Spurious" %(rcu, self.lba.ant[ant].nr_pvss, ant_polarity))
if ant_polarity == 0:
self.lba.ant[ant].x.spurious = 1
else:
self.lba.ant[ant].y.spurious = 1
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.lba.spurious_check_done = 1
self.db.addTestDone('SP%d' %(mode))
logger.info("=== Done %s spurious test ===" %(self.lba.label))
return
def checkSignal(self, mode, subband, min_signal, low_deviation, high_deviation):
logger.info("=== Start %s RF test ===" %(self.lba.label))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
if mode < 3:
swapXY(state=1)
else:
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.lba.selectList())
self.lba.resetRcuState()
self.rcudata.record(rec_time=5)
self.rcudata.searchTestSignal(subband=subband, minsignal=min_signal, maxsignal=90.0)
logger.info("For X used test subband=%d (%3.1f dB) in mode %d" %\
(self.rcudata.testSubband_X, self.rcudata.testSignal_X, mode))
logger.info("For Y used test subband=%d (%3.1f dB) in mode %d" %\
(self.rcudata.testSubband_Y, self.rcudata.testSignal_Y, mode))
if self.rcudata.testSubband_X == 0 or self.rcudata.testSubband_Y == 0:
logger.warn("LBA mode %d, No test signal found" %(mode))
return
ssdataX = self.rcudata.getSubbandX()
ssdataY = self.rcudata.getSubbandY()
#if np.ma.count(ssdataX) == 0 or np.ma.count(ssdataY) == 0:
# all zeros (missing settings!!)
# return
# use only values between lowLimit and highLimit for average calculations
dataInBandX = np.ma.masked_outside(ssdataX, (self.rcudata.testSignal_X + self.lowLimit), (self.rcudata.testSignal_X + self.highLimit))
medianValX = np.ma.median(dataInBandX)
dataInBandY = np.ma.masked_outside(ssdataY, (self.rcudata.testSignal_Y + self.lowLimit), (self.rcudata.testSignal_Y + self.highLimit))
medianValY = np.ma.median(dataInBandY)
logger.info("used medianValX=%f" %(medianValX))
logger.info("used medianValY=%f" %(medianValY))
if medianValX < self.meanLimit or medianValY < self.meanLimit:
self.lba.avg_2_low = 1
self.lba.avg_x = medianValX
self.lba.avg_y = medianValY
self.lba.test_signal_x = medianValX
self.lba.test_signal_y = medianValY
self.lba.test_subband_x = self.rcudata.testSubband_X
self.lba.test_subband_y = self.rcudata.testSubband_Y
for ant in self.lba.ant:
ant.x.test_signal = ssdataX[ant.nr]
ant.y.test_signal = ssdataY[ant.nr]
loginfo = False
if ssdataX[ant.nr] < (medianValX + low_deviation):
ant.x.too_low = 1
if ssdataX[ant.nr] < 2.0:
ant.x.rcu_error = 1
loginfo = True
if ssdataX[ant.nr] > (medianValX + high_deviation):
ant.x.too_high = 1
loginfo = True
if ssdataY[ant.nr] < (medianValY + low_deviation):
ant.y.too_low = 1
if ssdataY[ant.nr] < 2.0:
ant.y.rcu_error = 1
loginfo = True
if ssdataY[ant.nr] > (medianValY + high_deviation):
ant.y.too_high = 1
loginfo = True
if loginfo:
logger.info("%s %2d RCU %3d/%3d X=%5.1fdB Y=%5.1fdB" %(self.lba.label, ant.nr_pvss, ant.x.rcu, ant.y.rcu, ssdataX[ant.nr], ssdataY[ant.nr]))
# mark lba as down if top of band is lower than normal and top is shifted more than 10 subbands to left or right
down, shifted = searchDown(self.rcudata.getAll(), subband)
for i in down:
ant, max_x_sb, max_y_sb, mean_max_sb = i
max_x_offset = max_x_sb - mean_max_sb
max_y_offset = max_y_sb - mean_max_sb
self.lba.ant[ant].x.offset = max_x_offset
self.lba.ant[ant].y.offset = max_y_offset
self.lba.ant[ant].down = 1
logger.info("%s %2d RCU %3d/%3d Down, offset-x=%d offset-y=%d" %\
(self.lba.label, self.lba.ant[ant].nr_pvss, self.lba.ant[ant].x.rcu, self.lba.ant[ant].y.rcu, max_x_offset, max_y_offset))
for i in shifted:
rcu, max_sb, mean_max_sb = i
ant = rcu / 2
logger.info("%s %2d RCU %3d shifted top on sb=%d, normal=sb%d" %(self.lba.label, self.lba.ant[ant].nr_pvss, rcu, max_sb, mean_max_sb))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.lba.signal_check_done = 1
self.db.addTestDone('S%d' %(mode))
logger.info("=== Done %s RF test ===" %(self.lba.label))
return
#### end of cLBA class ####
# class for testing HBA antennas
class cHBA:
global logger
def __init__(self, db, hba):
self.db = db
self.hba = hba
self.rcudata = cRCUdata(hba.nr_tiles*2)
self.rcumode = 0
def turnOnTiles(self):
pass
def turnOffTile(self, tile_nr):
tile = self.hba.tile[tile_nr]
tile.x.rcu_off = 1
tile.y.rcu_off = 1
logger.info("turned off tile %d RCU(%d,%d)" %(tile.nr, tile.x.rcu, tile.y.rcu))
rspctl("--rcumode=0 --select=%d,%d" %(tile.x.rcu, tile.y.rcu), wait=2.0)
return
def turnOffBadTiles(self):
for tile in self.hba.tile:
if tile.x.rcu_off and tile.y.rcu_off:
continue
no_modem = 0
modem_error = 0
for elem in tile.element:
if elem.no_modem:
no_modem += 1
if elem.modem_error:
modem_error += 1
if tile.x.osc or tile.y.osc or (no_modem >= 8) or (modem_error >= 8):
self.turnOffTile(tile.nr)
return
def checkModem(self, mode):
logger.info("=== Start HBA modem test ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
time.sleep(2.0)
ctrlstr1 = ('128,'* 16)[:-1]
ctrlstr2 = ('253,'* 16)[:-1]
for ctrl in (ctrlstr1, ctrlstr2):
rsp_hba_delay(delay=ctrl, rcus=self.hba.selectList())
data = rspctl('--realdelays', wait=0.0).splitlines()
ctrllist = ctrl.split(',')
for line in data:
if line[:3] == 'HBA':
rcu = int(line[line.find('[')+1:line.find(']')])
hba_nr = rcu / 2
ant_polarity = rcu % 2
realctrllist = line[line.find('=')+1:].strip().split()
for elem in self.hba.tile[hba_nr].element:
if ctrllist[elem.nr-1] != realctrllist[elem.nr-1]:
logger.info("Modemtest Tile=%d RCU=%d Element=%d ctrlword=%s response=%s" %\
(hba_nr, rcu, elem.nr, ctrllist[elem.nr-1], realctrllist[elem.nr-1]))
if realctrllist[elem.nr-1].count('?') == 3:
elem.no_modem += 1
else:
elem.modem_error += 1
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.modem_check_done = 1
self.db.addTestDone('M')
logger.info("=== Done HBA modem test ===")
return
# check for summator noise and turn off RCU
def checkSummatorNoise(self, mode):
logger.info("=== Start HBA tile based summator-noise test ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
delay_str = ('253,'* 16)[:-1]
rsp_hba_delay(delay=delay_str, rcus=self.hba.selectList())
self.rcudata.record(rec_time=15)
logger.debug("- test X -")
sum_noise = search_summator_noise(self.rcudata.getAllX())
for n in sum_noise:
tile, cnt, n_peaks = n
logger.info("RCU %d Tile %d Summator-Noise cnt=%3.1f peaks=%3.1f" %(self.hba.tile[tile].x.rcu, tile, cnt, n_peaks))
self.hba.tile[tile].x.summator_noise = 1
self.turnOffTile(tile)
logger.debug("- test Y -")
sum_noise = search_summator_noise(self.rcudata.getAllY())
for n in sum_noise:
tile, cnt, n_peaks = n
logger.info("RCU %d Tile %d Summator-Noise cnt=%3.1f peaks=%3.1f" %(self.hba.tile[tile].y.rcu, tile, cnt, n_peaks))
self.hba.tile[tile].y.summator_noise = 1
self.turnOffTile(tile)
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.summatornoise_check_done = 1
self.db.addTestDone('SN')
logger.info("=== Done HBA tile based summator-noise test ===")
return
# check for oscillating tiles and turn off RCU
# stop one RCU each run
def checkOscillation(self, mode):
logger.info("=== Start HBA tile based oscillation test ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
delay_str = ('253,'* 16)[:-1]
rsp_hba_delay(delay=delay_str, rcus=self.hba.selectList())
clean = False
while not clean:
clean = True
self.rcudata.record(rec_time=8)
# result is a sorted list on maxvalue
result = search_oscillation(self.rcudata.getAll(), delta=6.0, start_sb=0, stop_sb=511) # start_sb=45, stop_sb=350
if len(result) > 1:
clean = False
rcu, max_sum, n_peaks, rcu_low = sorted(result[1:], reverse=True)[0] #result[1]
tile = rcu / 2
tile_polarity = rcu % 2
logger.info("RCU %d Tile %d Oscillation sum=%3.1f peaks=%d low=%3.1f" %\
(rcu, tile, max_sum, n_peaks, rcu_low))
self.turnOffTile(tile)
if tile_polarity == 0:
self.hba.tile[tile].x.osc = 1
else:
self.hba.tile[tile].y.osc = 1
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.oscillation_check_done = 1
self.db.addTestDone('O%d' %(mode))
logger.info("=== Done HBA tile based oscillation test ===")
return
def checkNoise(self, mode, record_time, low_deviation, high_deviation, max_diff):
logger.info("=== Start HBA tile based noise test ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
for tile in self.hba.tile:
if tile.x.rcu_off or tile.y.rcu_off:
logger.info("skip low-noise test for tile %d, RCUs turned off" %(tile.nr))
delay_str = ('253,'* 16)[:-1]
rsp_hba_delay(delay=delay_str, rcus=self.hba.selectList())
self.rcudata.record(rec_time=record_time)
# result is a sorted list on maxvalue
low_noise, high_noise, jitter = search_noise(self.rcudata.getAll(), low_deviation, high_deviation, max_diff)
for n in low_noise:
rcu, val, bad_secs, ref, diff = n
tile = rcu / 2
if self.hba.tile[tile].x.rcu_off or self.hba.tile[tile].y.rcu_off:
continue
logger.info("RCU %d Tile %d Low-Noise value=%3.1f bad=%d(%d) limit=%3.1f diff=%3.3f" %\
(rcu, tile, val, bad_secs, self.rcudata.frames, ref, diff))
if rcu%2 == 0:
tile_polarity = self.hba.tile[tile].x
else:
tile_polarity = self.hba.tile[tile].y
tile_polarity.low_seconds += self.rcudata.frames
tile_polarity.low_bad_seconds += bad_secs
if val < tile_polarity.low_val:
tile_polarity.low_noise = 1
tile_polarity.low_val = val
tile_polarity.low_ref = ref
tile_polarity.low_diff = diff
for n in high_noise:
rcu, val, bad_secs, ref, diff = n
tile = rcu / 2
logger.info("RCU %d Tile %d High-Noise value=%3.1f bad=%d(%d) limit=%3.1f diff=%3.1f" %\
(rcu, tile, val, bad_secs, self.rcudata.frames, ref, diff))
if rcu%2 == 0:
tile_polarity = self.hba.tile[tile].x
else:
tile_polarity = self.hba.tile[tile].y
tile_polarity.high_seconds += self.rcudata.frames
tile_polarity.high_bad_seconds += bad_secs
if val > tile_polarity.high_val:
tile_polarity.high_noise = 1
tile_polarity.high_val = val
tile_polarity.high_ref = ref
tile_polarity.high_diff = diff
for n in jitter:
rcu, val, ref, bad_secs = n
tile = rcu / 2
logger.info("RCU %d Tile %d Jitter, fluctuation=%3.1fdB normal=%3.1fdB" %(rcu, tile, val, ref))
if rcu%2 == 0:
tile_polarity = self.hba.tile[tile].x
else:
tile_polarity = self.hba.tile[tile].y
tile_polarity.jitter_seconds += self.rcudata.frames
tile_polarity.jitter_bad_seconds += bad_secs
if val > tile_polarity.jitter_val:
tile_polarity.jitter = 1
tile_polarity.jitter_val = val
tile_polarity.jitter_ref = ref
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.noise_check_done = 1
self.db.addTestDone('NS%d=%d' %(mode, record_time))
logger.info("=== Done HBA tile based noise test ===")
return
def checkSpurious(self, mode):
logger.info("=== Start HBA tile based spurious test ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
delay_str = ('253,'* 16)[:-1]
rsp_hba_delay(delay=delay_str, rcus=self.hba.selectList())
self.rcudata.record(rec_time=2)
# result is a sorted list on maxvalue
result = search_spurious(self.rcudata.getAll(), delta=3.0)
for rcu in result:
tile = rcu / 2
tile_polarity = rcu % 2
logger.info("RCU %d Tile %d pol %d Spurious" %(rcu, tile, tile_polarity))
if tile_polarity == 0:
self.hba.tile[tile].x.spurious = 1
else:
self.hba.tile[tile].y.spurious = 1
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.spurious_check_done = 1
self.db.addTestDone('SP%d' %(mode))
logger.info("=== Done HBA spurious test ===")
return
def checkSignal(self, mode, subband, min_signal, low_deviation, high_deviation):
logger.info("=== Start HBA tile based RF test ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
if self.db.rcumode != mode:
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
# check twice
# 128 ...
# 253 ...
for tile in self.hba.tile:
if tile.x.rcu_off or tile.y.rcu_off:
logger.info("skip signal test for tile %d, RCUs turned off" %(tile.nr))
for ctrl in ('128,', '253,'):
if ctrl == '128,': ctrl_nr = 0
elif ctrl == '253,': ctrl_nr = 1
logger.info("HBA signal test, ctrl word %s" %(ctrl[:-1]))
delay_str = (ctrl*16)[:-1]
rsp_hba_delay(delay=delay_str, rcus=self.hba.selectList())
self.rcudata.record(rec_time=2)
self.rcudata.searchTestSignal(subband=subband, minsignal=min_signal, maxsignal=150.0)
logger.info("HBA, X used test subband=%d avg_signal=%3.1f" %(self.rcudata.testSubband_X, self.rcudata.testSignal_X))
logger.info("HBA, Y used test subband=%d avg_signal=%3.1f" %(self.rcudata.testSubband_Y, self.rcudata.testSignal_Y))
if (self.rcudata.testSubband_X != 0) and (self.rcudata.testSubband_Y != 0):
ssdataX = self.rcudata.getSubbandX()
ssdataY = self.rcudata.getSubbandY()
avgX = self.rcudata.testSignal_X
avgY = self.rcudata.testSignal_Y
minX = ssdataX.min()
minY = ssdataY.min()
# if all elements in range
#if minX < (avgX + self.min_dB) and minY < (avgY + self.min_dB):
# continue
logger.debug("X data: min=%5.3f max=%5.3f avg=%5.3f" %(minX, ssdataX.max(), avgX))
logger.debug("Y data: min=%5.3f max=%5.3f avg=%5.3f" %(minY, ssdataY.max(), avgY))
if self.rcudata.testSubband_X == 0 or self.rcudata.testSubband_X == 0:
logger.warn("HBA, No valid test signal")
for tile in self.hba.tile:
tile.x.ref_signal[ctrl_nr] = 0
tile.y.ref_signal[ctrl_nr] = 0
self.hba.signal_check_done = 1
self.db.addTestDone('S%d' %(mode))
return
for tile in self.hba.tile:
if tile.x.rcu_off or tile.y.rcu_off:
continue
logger.debug("HBA Tile=%d : X=%3.1fdB Y=%3.1fdB" %\
(tile.nr, ssdataX[tile.nr], ssdataY[tile.nr]))
tile.x.ref_signal[ctrl_nr] = avgX
tile.y.ref_signal[ctrl_nr] = avgY
tile.x.test_subband[ctrl_nr] = self.rcudata.testSubband_X
tile.y.test_subband[ctrl_nr] = self.rcudata.testSubband_Y
tile.x.test_signal[ctrl_nr] = ssdataX[tile.nr]
tile.y.test_signal[ctrl_nr] = ssdataY[tile.nr]
loginfo = False
if ssdataX[tile.nr] < (avgX + low_deviation):
if ssdataX[tile.nr] < 2.0:
tile.x.no_signal = 1
elif ssdataX[tile.nr] > 55.0 and ssdataX[tile.nr] < 65.0:
tile.no_power = 1
else:
tile.x.too_low = 1
loginfo = True
if ssdataX[tile.nr] > (avgX + high_deviation):
tile.x.too_high = 1
loginfo = True
if ssdataY[tile.nr] < (avgY + low_deviation):
if ssdataY[tile.nr] < 2.0:
tile.y.no_signal = 1
elif ssdataY[tile.nr] > 55.0 and ssdataY[tile.nr] < 65.0:
tile.no_power = 1
else:
tile.y.too_low = 1
loginfo = True
if ssdataY[tile.nr] > (avgY + high_deviation):
tile.y.too_high = 1
loginfo = True
if loginfo:
logger.info("HBA Tile=%d Error: X=%3.1fdB Y=%3.1fdB" %\
(tile.nr, ssdataX[tile.nr], ssdataY[tile.nr]))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.signal_check_done = 1
self.db.addTestDone('S%d' %(mode))
logger.info("=== Done HBA signal test ===")
return
# Next tests are element based
#
# 8bit control word
#
# bit-7 RF on/off 1 = on
# bit-6 delay 1 = 8 ns
# bit-5 delay 1 = 4 ns
# bit-4 delay 1 = 2 ns
# bit-3 delay 1 = 1 ns
# bit-2 delay 1 = 0.5 ns
# bit-1 LNA on/off 1 = off
# bit-0 LED on/off 1 = on
#
# control = 0 (signal - 30 db)
# control = 2 (signal - 40 db)
#
def checkElements(self, mode, record_time, subband,
noise_low_deviation, noise_high_deviation, noise_max_diff,
rf_min_signal, rf_low_deviation, rf_high_deviation,
skip_signal_test=False):
logger.info("=== Start HBA element based tests ===")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
n_rcus_off = 0
for ctrl in ('128', '253'):
if ctrl == '128': ctrl_nr = 0
elif ctrl == '253': ctrl_nr = 1
for elem in range(self.hba.tile[0].nr_elements):
logger.info("check elements %d, ctrlword=%s" %(elem+1, ctrl))
if n_rcus_off > 0:
rsp_rcu_mode(mode=mode, rcus=self.hba.selectList())
n_rcus_off = 0
for tile in self.hba.tile:
if tile.element[elem].no_modem or tile.element[elem].modem_error:
self.turnOffTile(tile.nr)
n_rcus_off += 1
logger.info("skip tile %d, modem error" %(tile.nr))
delay_str = ('2,'*elem + ctrl + ',' + '2,'*15)[:33]
rsp_hba_delay(delay=delay_str, rcus=self.hba.selectList())
clean = False
while not clean:
clean = True
self.rcudata.record(rec_time=record_time)
clean, n_off = self.checkOscillationElements(elem)
n_rcus_off += n_off
if n_off > 0: continue
n_off = self.checkSpuriousElements(elem)
n_rcus_off += n_off
if n_off > 0: continue
self.checkNoiseElements(elem, noise_low_deviation, noise_high_deviation, noise_max_diff)
if not skip_signal_test:
self.checkSignalElements(elem, ctrl_nr, subband, rf_min_signal, rf_low_deviation, rf_high_deviation)
else:
logger.info("skip signal test for mode %d" %(mode))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.element_check_done = 1
self.db.addTestDone('EHBA')
logger.info("=== Done HBA element tests ===")
return
# Do a complete element test testing only the signal
def checkElementsSignal(self, mode, subband, rf_min_signal, rf_low_deviation, rf_high_deviation):
logger.info("=== Start HBA element based signal test in mode %d ===" %(mode))
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
self.db.rcumode = mode
swapXY(state=0)
turnoffRCUs()
turnonRCUs(mode=mode, rcus=self.hba.selectList())
self.hba.resetRcuState()
n_rcus_off = 0
for ctrl in ('128', '253'):
if ctrl == '128': ctrl_nr = 0
elif ctrl == '253': ctrl_nr = 1
for elem in range(self.hba.tile[0].nr_elements):
logger.info("check elements %d, ctrlword=%s" %(elem+1, ctrl))
if n_rcus_off > 0:
rsp_rcu_mode(mode=mode, rcus=self.hba.selectList())
n_rcus_off = 0
for tile in self.hba.tile:
if tile.element[elem].no_modem or tile.element[elem].modem_error:
self.turnOffTile(tile.nr)
n_rcus_off += 1
logger.info("skip tile %d, modem error" %(tile.nr))
delay_str = ('2,'*elem + ctrl + ',' + '2,'*15)[:33]
rsp_hba_delay(delay=delay_str, rcus=self.hba.selectList())
self.rcudata.record(rec_time=2)
self.checkSignalElements(elem, ctrl_nr, subband, rf_min_signal, rf_low_deviation, rf_high_deviation)
if not checkActiveRSPDriver():
logger.warn("RSPDriver down while testing, skip result")
return
self.hba.element_check_done = 1
self.db.addTestDone('ES%d' %(mode))
logger.info("=== Done HBA element tests ===")
return
# check for oscillating tiles and turn off RCU
# stop one RCU each run
# elem counts from 0..15 (for user output use 1..16)
def checkOscillationElements(self, elem):
logger.info("--- oscillation test --")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
clean = True
n_rcus_off = 0
# result is a sorted list on maxvalue
result = search_oscillation(self.rcudata.getAll(), delta=3.0, start_sb=0, stop_sb=511)
if len(result) > 1:
clean = False
rcu, peaks_sum, n_peaks, rcu_low = sorted(result[1:], reverse=True)[0] #result[1]
tile = rcu / 2
if self.hba.tile[tile].element[elem].no_modem or self.hba.tile[tile].element[elem].modem_error:
return(True, 0)
tile_polarity = rcu % 2
logger.info("RCU %d Tile %d Element %d Oscillation sum=%3.1f peaks=%d, low=%3.1f" %\
(rcu, tile, elem+1, peaks_sum, n_peaks, rcu_low))
self.turnOffTile(tile)
n_rcus_off += 1
if tile_polarity == 0:
self.hba.tile[tile].element[elem].x.osc = 1
else:
self.hba.tile[tile].element[elem].y.osc = 1
return (clean, n_rcus_off)
def checkSpuriousElements(self, elem):
logger.info("--- spurious test ---")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
n_rcus_off = 0
# result is a sorted list on maxvalue
result = search_spurious(self.rcudata.getAll(), delta=3.0)
for rcu in result:
tile = rcu / 2
tile_polarity = rcu % 2
logger.info("RCU %d Tile %d Element %d pol %d Spurious" %(rcu, tile, elem+1, tile_polarity))
self.turnOffTile(tile)
n_rcus_off += 1
if tile_polarity == 0:
self.hba.tile[tile].element[elem].x.spurious = 1
else:
self.hba.tile[tile].element[elem].y.spurious = 1
return (n_rcus_off)
def checkNoiseElements(self, elem, low_deviation, high_deviation, max_diff):
logger.info("--- noise test ---")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
# result is a sorted list on maxvalue
low_noise, high_noise, jitter = search_noise(self.rcudata.getAll(), low_deviation, high_deviation, max_diff)
for n in low_noise:
rcu, val, bad_secs, ref, diff = n
tile = rcu / 2
logger.info("RCU %d Tile %d Element %d Low-Noise value=%3.1f bad=%d(%d) limit=%3.1f diff=%3.3f" %\
(rcu, tile, elem+1, val, bad_secs, self.rcudata.frames, ref, diff))
if rcu%2 == 0:
elem_polarity = self.hba.tile[tile].element[elem].x
else:
elem_polarity = self.hba.tile[tile].element[elem].y
elem_polarity.low_seconds += self.rcudata.frames
elem_polarity.low_bad_seconds += bad_secs
if val < elem_polarity.low_val:
elem_polarity.low_noise = 1
elem_polarity.low_val = val
elem_polarity.low_ref = ref
elem_polarity.low_diff = diff
for n in high_noise:
rcu, val, bad_secs, ref, diff = n
tile = rcu / 2
logger.info("RCU %d Tile %d Element %d High-Noise value=%3.1f bad=%d(%d) ref=%3.1f diff=%3.1f" %\
(rcu, tile, elem+1, val, bad_secs, self.rcudata.frames, ref, diff))
if rcu%2 == 0:
elem_polarity = self.hba.tile[tile].element[elem].x
else:
elem_polarity = self.hba.tile[tile].element[elem].y
elem_polarity.high_seconds += self.rcudata.frames
elem_polarity.high_bad_seconds += bad_secs
if val > elem_polarity.high_val:
elem_polarity.high_noise = 1
elem_polarity.high_val = val
elem_polarity.high_ref = ref
elem_polarity.high_diff = diff
for n in jitter:
rcu, val, ref, bad_secs = n
tile = rcu / 2
logger.info("RCU %d Tile %d Element %d Jitter, fluctuation=%3.1fdB normal=%3.1fdB" %\
(rcu, tile, elem+1, val, ref))
if rcu%2 == 0:
elem_polarity = self.hba.tile[tile].element[elem].x
else:
elem_polarity = self.hba.tile[tile].element[elem].y
elem_polarity.jitter_seconds += self.rcudata.frames
elem_polarity.jitter_bad_seconds += bad_secs
if val > elem_polarity.jitter_val:
elem_polarity.jitter = 1
elem_polarity.jitter_val = val
elem_polarity.jitter_ref = ref
return
def checkSignalElements(self, elem, ctrl_nr, subband, min_signal, low_deviation, high_deviation):
logger.info("--- RF test ---")
if not checkActiveRSPDriver():
logger.warn("RSPDriver down, skip test")
return
self.rcudata.searchTestSignal(subband=subband, minsignal=min_signal, maxsignal=120.0)
logger.info("HBA, X used test subband=%d avg_signal=%3.1f" %(self.rcudata.testSubband_X, self.rcudata.testSignal_X))
logger.info("HBA, Y used test subband=%d avg_signal=%3.1f" %(self.rcudata.testSubband_Y, self.rcudata.testSignal_Y))
ssdataX = self.rcudata.getSubbandX()
ssdataY = self.rcudata.getSubbandY()
avgX = self.rcudata.testSignal_X
avgY = self.rcudata.testSignal_Y
minX = ssdataX.min()
minY = ssdataY.min()
logger.debug("X data: min=%5.3f max=%5.3f avg=%5.3f" %(minX, ssdataX.max(), avgX))
logger.debug("Y data: min=%5.3f max=%5.3f avg=%5.3f" %(minY, ssdataY.max(), avgY))
for tile in self.hba.tile:
if tile.x.rcu_off or tile.y.rcu_off:
logger.info("skip signal test for tile %d, RCUs are turned off" %(tile.nr))
if self.rcudata.testSubband_X == 0 or self.rcudata.testSubband_X == 0:
logger.warn("HBA, No valid test signal")
for tile in self.hba.tile:
tile.element[elem].x.ref_signal[ctrl_nr] = 0
tile.element[elem].y.ref_signal[ctrl_nr] = 0
return
for tile in self.hba.tile:
if tile.x.rcu_off or tile.y.rcu_off:
continue
tile.element[elem].x.ref_signal[ctrl_nr] = avgX
tile.element[elem].y.ref_signal[ctrl_nr] = avgY
tile.element[elem].x.test_subband[ctrl_nr] = self.rcudata.testSubband_X
tile.element[elem].y.test_subband[ctrl_nr] = self.rcudata.testSubband_Y
tile.element[elem].x.test_signal[ctrl_nr] = ssdataX[tile.nr]
tile.element[elem].y.test_signal[ctrl_nr] = ssdataY[tile.nr]
#logger.debug("HBA Tile=%d Element=%d: X=%3.1fdB Y=%3.1fdB" %\
# (tile.nr, elem+1, ssdataX[tile.nr], ssdataY[tile.nr]))
loginfo = False
if ssdataX[tile.nr] < (avgX + low_deviation):
if ssdataX[tile.nr] < 2.0:
tile.element[elem].x.no_signal = 1
elif ssdataX[tile.nr] > 55.0 and ssdataX[tile.nr] < 65.0:
tile.element[elem].no_power = 1
else:
tile.element[elem].x.too_low = 1
loginfo = True
if ssdataX[tile.nr] > (avgX + high_deviation):
tile.element[elem].x.too_high = 1
loginfo = True
if ssdataY[tile.nr] < (avgY + low_deviation):
if ssdataY[tile.nr] < 2.0:
tile.element[elem].y.no_signal = 1
elif ssdataY[tile.nr] > 55.0 and ssdataY[tile.nr] < 65.0:
tile.element[elem].no_power = 1
else:
tile.element[elem].y.too_low = 1
loginfo = True
if ssdataY[tile.nr] > (avgY + high_deviation):
tile.element[elem].y.too_high = 1
loginfo = True
if loginfo:
logger.info("HBA Tile=%d Element=%d Error: X=%3.1fdB Y=%3.1fdB" %\
(tile.nr, elem+1, ssdataX[tile.nr], ssdataY[tile.nr]))
return
#### end of cHBA class ####
| UTF-8 | Python | false | false | 52,397 | py | 2,052 | test_lib.py | 1,505 | 0.506155 | 0.491421 | 0 | 1,259 | 40.597299 | 160 |
jonasht/cursoIntesivoDePython | 2,731,599,221,141 | 3e78b3f4c3f51ce1c29441f9696c32c39a25b3c6 | 0930b6c994225d44818887716ce4e8771af86b81 | /exercisesDosCapitulos/07-entradaDeUsuario-E-lacosWhile/7.7-infinito.py | 5b90b62f9ffe2aef16c94dbb5734315bf2893b4e | [] | no_license | https://github.com/jonasht/cursoIntesivoDePython | 44d81b08f1652c4fa7a6d14a0e3f62ee8e06944c | fb5f5c9884fb1a6062a7c4e7676e5cc3b13c0ebb | refs/heads/master | 2023-05-23T20:44:34.372825 | 2021-06-19T12:13:46 | 2021-06-19T12:13:46 | 293,325,804 | 4 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print('atençao, este codigo está em loop infinite')
print('precione ctrl + c para parar')
n = 1
while 1:
n = n + n
print(n)
if len(str(n)) >= 1000: break # apagar isso para ser infinito
| UTF-8 | Python | false | false | 202 | py | 243 | 7.7-infinito.py | 232 | 0.635 | 0.605 | 0 | 8 | 23.875 | 65 |
sathappan1989/Pythonlearning | 8,632,884,294,463 | a11613b286204790aadb4e66f4b29ef755d718bf | 50995b2d2d8528684079088e75faf55300494d6b | /Ordered.py | 2ba51c03dc5e31b8606e324a66bb11d7a9f3aee8 | [] | no_license | https://github.com/sathappan1989/Pythonlearning | f3c2c9f8e864cda1d69d47b220d23325fd915320 | 0e6fdaf39fcd3046139702547f594e5a9854cc9b | refs/heads/master | 2020-11-26T17:04:28.645106 | 2019-12-19T23:07:41 | 2019-12-19T23:07:41 | 229,149,812 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Ordered Working List
#Print the list in alphabetical order.
#Print the list in its original order.
#Print the list in reverse alphabetical order.
#Print the list in its original order.
#Print the list in the reverse order from what it started.
#Print the list in its original order
#Start with the list you created in Working List.
workinglists=['ABCD','DCSX','XSED','BAXD','DSSQ']
#You are going to print out the list in a number of different orders.
#You are going to print out the list in a number of different orders.
#Print a message each time telling us what order we should see the list in.
workinglists.sort()
for wl in workinglists:
print('sort order \t :' + wl)
workinglists.sort(reverse=True)
for wl1 in workinglists:
print('reverse sort order \t :' + wl1)
#Print the list in its original order.
ch=['chennai','delhi','kkdi','usa','abc']
for wl2 in sorted(ch):
print(wl2)
for wl2 in sorted(ch, reverse=True):
print(wl2)
print(ch)
#Permanently sort the list in alphabetical order, and then print it out.
#Permanently sort the list in reverse alphabetical order, and then print it out.
ch.reverse()
print(ch)
ch.reverse(False)
print(ch) | UTF-8 | Python | false | false | 1,176 | py | 46 | Ordered.py | 44 | 0.735544 | 0.730442 | 0 | 44 | 25.75 | 80 |
jerryeml/func_app | 2,070,174,261,097 | 7c56d8fd6871dd89f987549e1fbe6aaaadfa46f3 | dddd607f859d72ec97936dd6e3b8398a070d7844 | /func_testing_azure_cli/__init__.py | 19934765818f8af0540b490652a1d2f65bf01833 | [] | no_license | https://github.com/jerryeml/func_app | c30e3fb2324fff642959b333facf151c0d2b1ebb | 0746119bc46f9301ca07574213ca7d8974179239 | refs/heads/master | 2023-06-03T14:33:32.061386 | 2021-06-29T08:23:30 | 2021-06-29T08:23:30 | 352,038,798 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import azure.functions as func
from azure.cli.core import get_default_cli
def az_cli(args):
cli = get_default_cli()
cli.invoke(args)
if cli.result.result:
return cli.result.result
elif cli.result.error:
raise cli.result.error
return True
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
else:
# This will raise error because signal only works in main thread
# command = ['artifacts', 'universal', 'download', '--organization', 'https://dev.azure.com/infinite-wars/',
# '--feed', 'infinite-wars', '--name', 'data-l2-testing-case-v1.0.1776', '--version', '1.0.0',
# '--path', '.']
# a = az_cli(command)
# logging.info(f"a return {a}")
command = ['lab', 'get', '-g', 'rg-testing-env-lab', '--name', 'dtl-aladdin-test']
test = az_cli(command)
logging.info(f"test return {test}")
logging.info("123456")
return func.HttpResponse(
"This HTTP triggered function executed successfully. Pass a name in the query string or in the request body for a personalized response.",
status_code=200
)
| UTF-8 | Python | false | false | 1,591 | py | 2 | __init__.py | 2 | 0.591452 | 0.57951 | 0 | 53 | 29.018868 | 151 |
sipims/collective.xmpp.core | 10,986,526,356,073 | 8e460ffd986162878947b8f832d0bbdf090b73e1 | 9f0731c88a227a9101ee77eaae0a973b9a47e579 | /collective/xmpp/core/browser/userinfo.py | 35116d588d31b2d91a732ae8738342f266724d5f | [] | no_license | https://github.com/sipims/collective.xmpp.core | 4d709fafb5a14985c804b0c166f408cdd1a54b23 | 43a9213c422f2bed35184d17407d56a210e1a649 | refs/heads/master | 2020-12-25T05:02:23.610208 | 2014-04-29T14:57:23 | 2014-04-29T14:57:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from twisted.words.protocols.jabber.jid import JID
from AccessControl import Unauthorized
from Products.Five.browser import BrowserView
from Products.CMFCore.utils import getToolByName
from collective.xmpp.core.utils.users import unescapeNode
class XMPPUserInfo(BrowserView):
def __call__(self, user_id):
pm = getToolByName(self.context, 'portal_membership')
if pm.isAnonymousUser():
raise Unauthorized
user_id = unescapeNode(user_id)
info = pm.getMemberInfo(user_id)
if info is None:
return None
fullname = info.get('fullname') or user_id
portrait_url = pm.getPersonalPortrait(user_id).absolute_url()
portal_url = getToolByName(self.context, 'portal_url')
user_profile_url = '%s/author/%s' % (portal_url(), user_id)
response = self.request.response
response.setHeader('content-type', 'application/json')
response.setBody(json.dumps({'fullname': fullname,
'portrait_url': portrait_url,
'user_profile_url': user_profile_url }))
return response
class XMPPUserDetails(BrowserView):
def __init__(self, context, request):
super(BrowserView, self).__init__(context, request)
self.jid = request.get('jid')
self.user_id = JID(self.jid).user
self.bare_jid = JID(self.jid).userhost()
self.pm = getToolByName(context, 'portal_membership')
info = self.pm.getMemberInfo(self.user_id)
if info:
self._fullname = info.get('fullname') or self.user_id
self._portrait_url = self.pm.getPersonalPortrait(self.user_id).absolute_url()
else:
self._fullname = ''
self._portrait_url = ''
@property
def fullname(self):
return self._fullname
@property
def portrait_url(self):
return self._portrait_url
| UTF-8 | Python | false | false | 1,950 | py | 38 | userinfo.py | 21 | 0.623077 | 0.623077 | 0 | 55 | 34.454545 | 89 |
Gustavo-RC/Predictive_Analytics | 13,434,657,719,017 | 4136937f4549628574cfa29d99071fec1ec63856 | 1e097a92c9537af1a5a572579798e7f13f78023c | /Induction.py | 6e91bd4f336c54722bff7fe64f7cb30780635fae | [] | no_license | https://github.com/Gustavo-RC/Predictive_Analytics | 22e598c29baa6a8075eac9de5dc3c411d1ba14aa | c68d4cba9bf59c4ec50e41f7155cf9f888a1df47 | refs/heads/master | 2020-09-16T18:24:56.833557 | 2019-11-26T03:46:47 | 2019-11-26T03:46:47 | 223,852,270 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Bibliotecas necessarias para ler o conjunto de dados de treinamento
#Realizar calculos numericos em array
import numpy as np
#Analise de dados
import pandas as pd
#Efetuar a regressão logistica
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
#Gerar a matriz de confusao e o relatório de classificacao
from sklearn.metrics import confusion_matrix, classification_report
#Dividir dados
from sklearn.model_selection import train_test_split
#Codificar os dados de string para regressao
from sklearn.preprocessing import LabelEncoder # Need to encode string data for regression
#Leitura do arquivo após pré tratamento dos dados em Excel
banco = pd.read_csv("bank-full.csv")
#Tratamento dos dados
#Substitua todos os valores desconhecidos por NaN
banco = banco.replace('unknown', np.NaN)
#Apagar todas as linhas com NaN
banco = banco.dropna(axis=0)
#Imprime dimensoes
print("Dimensoes:", '\n', banco.shape, '\n')
#Imprime as caracteristicas
print("Características:", '\n', banco.columns.values)
#Cabecalho
banco.head()
#Simplificando o conjunto de dados
#Agrupa educacoes basicas
banco.loc[banco['education'] == 'basic.4y', 'education'] = 'basic'
banco.loc[banco['education'] == 'basic.6y', 'education'] = 'basic'
banco.loc[banco['education'] == 'basic.9y', 'education'] = 'basic'
#Agrupa trabalhos de colarinho branco
banco.loc[banco['job'] == 'admin.', 'job'] = 'white-collar'
banco.loc[banco['job'] == 'management', 'job'] = 'white-collar'
banco.loc[banco['job'] == 'entrepreneur', 'job'] = 'white-collar'
banco.loc[banco['job'] == 'technician', 'job'] = 'white-collar'
# Agrupa trabalhos de colarinho azul e servicos
banco.loc[banco['job'] == 'services', 'job'] = 'blue-collar'
banco.loc[banco['job'] == 'housemaid', 'job'] = 'blue-collar'
banco.loc[banco['job'] == 'services', 'job'] = 'blue-collar'
#Exploracao dos dados
banco.describe()
#Histograma dos valores y
pd.DataFrame.hist(banco, column='y', bins=10)
cont = 0
for i in banco['y']:
if i == 1:
cont += 1
#Proporcao de valor 1
print("Proporcao de adesao:", (cont / len(banco['y'])) * 100, "%")
#Proporcao de valor 0
print("Proporcao de nao adesao:", (1 - cont / len(banco['y'])) * 100, "%")
#Assinaturas por mes
meses = {}
for i, j in zip(banco['month'], banco['y']):
if i not in meses:
meses[i] = j
else:
meses[i] += j
meses = pd.Series(meses)
meses.plot.bar(grid=True)
print("Meses", " Adesao", '\n', meses.sort_values())
#Assinaturas por emprego
empregos = {}
for i, j in zip(banco['job'], banco['y']):
if i not in empregos:
empregos[i] = j
else:
empregos[i] += j
empregos = pd.Series(empregos)
empregos.plot.bar(grid=True)
print("Empregos", " Adesao", '\n', empregos.sort_values())
#Assinaturas por idade
idades = {}
for i, j in zip(banco['age'], banco['y']):
if i not in idades:
idades[i] = j
else:
idades[i] += j
idades = pd.Series(idades)
idades.plot()
print("Idades", " Adesao", '\n', idades.sort_values())
#Assinaturas por educacao
educacao = {}
for i, j in zip(banco['education'], banco['y']):
if i not in educacao:
educacao[i] = j
else:
educacao[i] += j
educacao = pd.Series(educacao)
educacao.plot.bar(grid=True)
print("Educacao", " Adesao", '\n', educacao.sort_values())
#Ajuste do modelo de regressão logistica
#Necessidade de transformar dados de string em valores numericos antes de aplicar o modelo
#Seleciona todas as colunas de dados nao numericos
transformado = banco.select_dtypes(exclude=['number'])
#Transforma valores de string em valores numericos
transformado = transformado.apply(LabelEncoder().fit_transform)
#Associa as colunas recém-codificadas ao restante do quadro
transformado = transformado.join(banco.select_dtypes(include=['number']))
#Dados divididos, 30% para teste
x_treino, x_teste = train_test_split(transformado, test_size = 0.3)
#Isola os valores y para treinamento e teste
y_treino, y_teste = x_treino['y'], x_teste['y']
#Apaga os valores y dos dados de treinamento de entrada
x_treino, x_teste = x_treino.drop(['y'], axis=1), x_teste.drop(['y'], axis=1)
#Ajusta um modelo aos dados de treinamento
modelo = LogisticRegression(solver='lbfgs', multi_class='auto').fit(x_treino, y_treino)
#Nome do modelo em disco
filename = 'modelo_finalizado.sav'
#Salva o modelo em disco
joblib.dump(modelo, filename)
#Previsao dos valores
previsao = modelo.predict(x_teste)
cont = 0
#Compara os valores previstos com os valores reais e conta os erros
for i, j in zip(previsao, y_teste):
if i == j:
cont += 1
#Taxa de sucesso
print("Acuracia:", (cont / len(y_teste)) * 100, "%")
#Matriz de confusao e relatorio de classificacao
print("Matriz de Confusao:", '\n', confusion_matrix(y_teste, previsao))
print("Relatorio de Classificacao:", '\n', classification_report(y_teste, previsao, target_names=['0', '1'])) | UTF-8 | Python | false | false | 4,915 | py | 4 | Induction.py | 2 | 0.696414 | 0.690098 | 0 | 156 | 30.467949 | 109 |
esrefeska/senecawoodworking | 17,119,739,666,188 | 383ff81666f176f8a26376a099dac106438f1f79 | efac4a1fa863ec72489a0c027adc6ac66f85404b | /shopify_ept/models/common_log_book_ept.py | 4b011959b538f7c4ad205f448fb1f454f52755a1 | [] | no_license | https://github.com/esrefeska/senecawoodworking | 8cc7e45ca96f3a8569b4bcbccafbc0b21cc964a8 | b14bb7275067e304ad497afc88cd9af877ccbd9a | refs/heads/main | 2023-07-16T08:20:13.679791 | 2021-08-27T21:22:07 | 2021-08-27T21:22:07 | 379,676,039 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
import logging
from datetime import datetime, timedelta
from odoo import models, fields
_logger = logging.getLogger(__name__)
class CommonLogBookEpt(models.Model):
"""Inherit the common log book here to handel the log book in the connector"""
_inherit = "common.log.book.ept"
shopify_instance_id = fields.Many2one("shopify.instance.ept", "Instance")
def create_crash_queue_schedule_activity(self, queue_id, model_id, note):
"""
This method is used to create a schedule activity for the queue crash.
Base on the Shopify configuration when any queue crash will create a schedule activity.
:param queue_id: Record of the queue(customer,product and order)
:param model_id: Record of model(customer,product and order)
:param note: Message
@author: Nilesh Parmar
@author: Maulik Barad as created common method for all queues on dated 17-Feb-2020.
Date: 07 February 2020.
Task Id : 160579
"""
mail_activity_obj = self.env['mail.activity']
activity_type_id = queue_id and queue_id.shopify_instance_id.shopify_activity_type_id.id
date_deadline = datetime.strftime(
datetime.now() + timedelta(days=int(queue_id.shopify_instance_id.shopify_date_deadline)), "%Y-%m-%d")
if queue_id:
for user_id in queue_id.shopify_instance_id.shopify_user_ids:
mail_activity = mail_activity_obj.search(
[('res_model_id', '=', model_id), ('user_id', '=', user_id.id), ('res_id', '=', queue_id.id),
('activity_type_id', '=', activity_type_id)])
if not mail_activity:
vals = self.prepare_vals_for_schedule_activity(activity_type_id, note, queue_id, user_id, model_id,
date_deadline)
try:
mail_activity_obj.create(vals)
except:
_logger.info("Unable to create schedule activity, Please give proper "
"access right of this user :%s ", user_id.name)
return True
def prepare_vals_for_schedule_activity(self, activity_type_id, note, queue_id, user_id, model_id, date_deadline):
""" This method used to prepare a vals for the schedule activity.
:param activity_type_id: Record of the activity type(email,call,meeting, to do)
:param user_id: Record of user(whom to assign schedule activity)
:param date_deadline: date of schedule activity dead line.
@return: values
@author: Haresh Mori @Emipro Technologies Pvt. Ltd on date 14 October 2020 .
Task_id: 167537
"""
values = {'activity_type_id': activity_type_id,
'note': note,
'res_id': queue_id.id,
'user_id': user_id.id or self._uid,
'res_model_id': model_id,
'date_deadline': date_deadline
}
return values
def shopify_create_common_log_book(self, process_type, instance, model_id):
""" This method used to create a log book record.
:param type: Generally, the type value is 'import' or 'export'.
:model_id: record of model.
@return: log_book_id
@author: Haresh Mori @Emipro Technologies Pvt. Ltd on date 16 October 2020 .
"""
log_book_id = self.create({"type": process_type,
"module": "shopify_ept",
"shopify_instance_id": instance.id if instance else False,
"model_id": model_id,
"active": True})
return log_book_id
class CommonLogLineEpt(models.Model):
_inherit = "common.log.lines.ept"
shopify_product_data_queue_line_id = fields.Many2one("shopify.product.data.queue.line.ept",
"Shopify Product Queue Line")
shopify_order_data_queue_line_id = fields.Many2one("shopify.order.data.queue.line.ept",
"Shopify Order Queue Line")
shopify_customer_data_queue_line_id = fields.Many2one("shopify.customer.data.queue.line.ept",
"Shopify Customer Queue Line")
def shopify_create_product_log_line(self, message, model_id, queue_line_id, log_book_id, sku=""):
"""
This method used to create a log line for product mismatch logs.
@return: log_line
@author: Haresh Mori @Emipro Technologies Pvt. Ltd on date 22/10/2019.
@change: Maulik Barad on Date 02-Sep-2020.
"""
vals = self.shopify_prepare_log_line_vals(message, model_id, queue_line_id, log_book_id)
vals.update({
'shopify_product_data_queue_line_id': queue_line_id.id if queue_line_id else False,
"default_code": sku
})
log_line = self.create(vals)
return log_line
def shopify_create_order_log_line(self, message, model_id, queue_line_id, log_book_id, order_ref=""):
"""This method used to create a log line for order mismatch logs.
@param : self, message, model_id, queue_line_id
@return: log_line
@author: Haresh Mori @Emipro Technologies Pvt. Ltd on date 11/11/2019.
"""
if order_ref:
domain = [("message", "=", message), ("model_id", "=", model_id), ("order_ref", "=", order_ref)]
log_line = self.search(domain)
if log_line:
log_line.update({"write_date": datetime.now(), "log_book_id": log_book_id.id if log_book_id else False,
"shopify_order_data_queue_line_id": queue_line_id and queue_line_id.id or False})
return log_line
vals = self.shopify_prepare_log_line_vals(message, model_id, queue_line_id, log_book_id)
vals.update({'shopify_order_data_queue_line_id': queue_line_id and queue_line_id.id or False,
"order_ref": order_ref})
log_line = self.create(vals)
return log_line
def shopify_create_customer_log_line(self, message, model_id, queue_line_id, log_book_id):
"""This method used to create a log line for customer mismatch logs.
"""
vals = self.shopify_prepare_log_line_vals(message, model_id, queue_line_id, log_book_id)
vals.update({
'shopify_customer_data_queue_line_id': queue_line_id and queue_line_id.id or False,
})
log_line = self.create(vals)
return log_line
def shopify_prepare_log_line_vals(self, message, model_id, res_id, log_book_id):
""" Prepare vals for the log line.
:param message: Error/log message
:param model_id: Record of model
:param res_id: Res Id(Here we can set process record id).
:param log_book_id: Record of log book.
@return: vals
@author: Haresh Mori @Emipro Technologies Pvt. Ltd on date 14 October 2020 .
Task_id: 167537
"""
vals = {'message': message,
'model_id': model_id,
'res_id': res_id.id if res_id else False,
'log_book_id': log_book_id.id if log_book_id else False,
}
return vals
| UTF-8 | Python | false | false | 7,570 | py | 1 | common_log_book_ept.py | 1 | 0.572523 | 0.562616 | 0 | 155 | 47.83871 | 119 |
crki76/Nessus-automation-scan | 240,518,176,843 | 9755d9ea1af4f57b3c7370cd49fa4fefbf8c15eb | 17f296850af4c07d59696970ce92c80be644a6dc | /nessus_scan.py | 21a90655fcae8a922f6e2f2cc7526768b101a4f0 | [] | no_license | https://github.com/crki76/Nessus-automation-scan | 0f5713daee8683cd74ce70a4e4ccef2962490230 | a796734140ba96ecd21e0d86115aeb9dc42b791d | refs/heads/master | 2020-12-31T07:33:21.169392 | 2016-05-12T11:49:25 | 2016-05-12T11:49:25 | 58,630,276 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import requests
import json
import time
import csv
import socket
requests.packages.urllib3.disable_warnings()
def get_list_templates_url(url):
templ_url = 'https://%s/editor/policy/templates' % (url)
return templ_url
def login_url(url):
log_url = 'https://%s/session' % (url)
return log_url
def get_policy(token, policies, test_policy):
try:
data = connect_policy(token, policies)
output = json.loads(data)
for entry in output['templates']:
if entry.get('title', None) == test_policy:
return entry['uuid']
except:
raise Exception ("Problem with the output in get_policy(). It does not contains UUID")
def check_if_policy_was_found(policy):
try:
if policy == None:
raise
except:
raise Exception ("The policy does not exist")
def validate_it(IP):
try:
socket.inet_aton(IP)
except:
socket.error
raise ValueError("Invalid IP format!")
def create_scan_name(name):
second_part = current_time_date()
return (name+"_"+second_part)
def add_new_scan(token, uuid, name, description, target, url_path):
try:
headers = my_header(token)
path = "<nessus ip/host name>%s" % (url_path)
payload = {'uuid': uuid,
'settings': {
'name': name,
'description': description,
'emails': "<receiver email address>",
'text_targets': target}}
payload = json.dumps(payload)
session = requests.session()
r = requests.post(path, payload, headers=headers, verify=False)
output = json.loads(r.content)
object = output['scan']
for i in object:
if i == "id":
return object['id']
except:
raise Exception
def connect_policy(token, command):
headers = my_header(token)
path = "<nessus ip/host name>%s" % (command)
session = requests.session()
r = requests.get(path, headers=headers, verify=False)
return r.content
def get_session_token(url, user, passwd):
try:
url_login = login_url(url)
payload = {"username":user,"password":passwd}
session = requests.session()
r = requests.post(url_login, data=payload, verify=False)
if r.status_code == 200:
jsonres = r.json()
token = jsonres.get('token')
return token
elif r.status_code == 401:
raise
else:
print "Test failed for incorrect response in get_session_token"
raise
except:
raise Exception ("Exception in get_session_token - invalid CREDENTIALS")
def launch_my_test(token, id):
try:
headers = my_header(token)
path = "<nessus ip/host name>/scans/%s/launch" % (id)
session = requests.session()
r = requests.post(path, headers=headers, verify=False)
output = json.loads(r.content)
return output['scan_uuid']
except:
raise Exception("Exception in launch_my_test. Problem with token or with ID or with the path")
def current_time_date():
"""
I want to add a date and time to the name of the scan to be more straight forward for a user on Nessus UI
"""
cas = time.strftime("[time %H-%M-%S]")
datum = time.strftime("[date %d-%m-%Y]")
return cas+"_"+datum
def my_header(token):
headers = {'X-Cookie': 'token={0}'.format(token),
'content-type': 'application/json'}
return headers
def access_file(path_of_the_file):
with open(path_of_the_file) as csvfile:
reader = csv.DictReader(csvfile)
issues = []
for row in reader:
if row.get('Risk',"None") != "None":
issues.append(row)
return issues
def display_issues(collection, known_issues):
issues_formated=[]
issue_for_testing = []
print "************************************************************************************************************"
print "*************************************** FOUND RISK ISSUES **************************************************"
print "************************************************************************************************************"
for row in collection:
name = row['Name']
risk = row['Risk']
host = row['Host']
protocol = row['Port']
issues_formated.append("[Risk] ---> "+risk+" [Issue name] ---> "+name+" [Host] ---> "+host+" [Protocol] ---> "+ protocol)
issue_for_testing.append(name)
print '\n'.join(issues_formated)
result = analyzing(issue_for_testing,known_issues)
if result:
print result
print "test fail"
else:
print "Test PASSED no new issues found"
def analyzing(issues_for, collection_of_known_issues):
issues_not_matched = []
for row in issues_for:
if row not in collection_of_known_issues:
issues_not_matched.append(row)
def get_scan_status(token, scan_uuid):
try:
headers = my_header(token)
path = "<nessus ip/host name>/scans/"
session = requests.session()
r = requests.get(path, headers=headers, verify=False)
output = json.loads(r.content)
for entry in output:
if entry == "scans":
for entry in output['scans']:
if entry['uuid'] == scan_uuid:
return (entry['status'])
except:
raise Exception ("Exception in get_scan_status - problem with finding the correct scan")
def check_if_scan_is_over(status):
if status != 'completed':
time.sleep(10)
date_for_log = current_time_date()
log = date_for_log + " Scan in process. The current status is %s" % (status)
print (log)
return "still working"
else:
return "finished"
def is_still_working(over, token, uuid):
while over != "finished":
current_test_status = get_scan_status(token,uuid)
over = check_if_scan_is_over(current_test_status)
def export_scan(token, scan_uuid, format):
try:
scan_id = get_scan_id(token, scan_uuid)
headers = my_header(token)
path = "<nessus ip/host name>/scans/%s" % (scan_id)
session = requests.session()
r = requests.get(path, headers=headers, verify=False)
output = json.loads(r.content)
for entry in output:
if entry == "history":
for entry in output['history']:
history_id = (entry['history_id'])
payload = {'history_id':history_id,'format': format}
path = "<nessus ip/host name>/scans/%s/export" % (scan_id)
session = requests.session()
payload = json.dumps(payload)
r = requests.post(path, payload, headers=headers, verify=False)
output = json.loads(r.content)
return output['file']
except:
raise Exception ("Exception raised - problem with export_scan - scan_id or payload are incorrect")
def get_scan_id(token, scan_uuid):
try:
headers = my_header(token)
path = "<nessus ip/host name>/scans/"
session = requests.session()
r = requests.get(path, headers=headers, verify=False)
output = json.loads(r.content)
for entry in output:
if entry == "scans":
for entry in output['scans']:
if (entry['uuid']) == scan_uuid:
return (entry['id'])
except:
raise Exception ("Exception in get_scan_id(). No such as dict - incorrect uuid")
def export_status(token, file_id, scan_id):
try:
headers = my_header(token)
path = "<nessus ip/host name>/scans/%s/export/%s/status" % (scan_id,file_id)
session = requests.session()
r = requests.get(path, headers=headers, verify=False)
output = json.loads(r.content)
return output['status']
except:
raise Exception
def download_scan_result(token, file_id, name, format, scan_id):
try:
local_environment = os.name
if local_environment is 'nt':
cur_dir = os.path.dirname(os.path.abspath(__file__))
else:
cur_dir = os.path.abspath(os.path.curdir)
filename = os.path.join(cur_dir, "%s_CSV.%s" % (name, format))
headers = my_header(token)
path = "<nessus ip/host name>/scans/%s/export/%s/download" % (scan_id,file_id)
session = requests.session()
r = requests.get(path, headers=headers, verify=False)
x = r.content
with open(filename, 'w+') as f:
f.write(x)
f.close()
return filename
except:
raise Exception ("Not such file or directory")
def delete_single_scan(token, scan_id):
try:
headers = my_header(token)
path = "<nessus ip/host name>/scans/%s" % (scan_id)
session = requests.session()
r = requests.delete(path, headers=headers, verify=False)
if r.status_code == 200:
double_check = check_if_scan_still_exists(token, scan_id)
if double_check == True:
print "Scan was deleted"
else:
print "Scan was NOT deleted"
except:
raise Exception ("Scan does not exist")
def check_if_scan_still_exists(token, scan_id):
headers = my_header(token)
path = "<nessus ip/host name>/scans/%s" % (scan_id)
session = requests.session()
r = requests.get(path, headers=headers, verify=False)
if r.status_code == 200:
return False
else:
return True
def delete_file_from_dir(name):
""" I want to delete the scan file from the directory at the end of the testing
It is implemented for Linux at the moment
"""
try:
cur_file_dir = os.path.abspath(os.path.curdir)+"/%s" % (name)
os.remove(cur_file_dir)
except:
raise Exception ("Not such as file or directories")
def check_if_dir_contains_any_scanning_file():
try:
cur_file_dir = os.path.abspath(os.path.curdir)
list_of_dir = os.listdir(cur_file_dir)
if list_of_dir:
for i in list_of_dir:
if "auto_scan_Nessus" in i:
delete_file_from_dir(i)
print "Deleted old scanning result %s" % (i)
print "There is no any previous scan in this directory"
except:
raise Exception ("no such a file or dir")
| UTF-8 | Python | false | false | 10,619 | py | 3 | nessus_scan.py | 2 | 0.558339 | 0.556832 | 0 | 316 | 32.601266 | 133 |
MahmoudHeshamBackup/videofiles | 6,871,947,693,203 | 7149120818a4ee38ec5f4ec2b1aeb6c42460fd46 | fe5cf20bd47b2c9763ed7dda1c641e6ca4155ce7 | /rendering/pytracer/core/test/PtPluginTest.py | 44628c41475ddaccde34767b5eb16290a9e7a8d8 | [] | no_license | https://github.com/MahmoudHeshamBackup/videofiles | aba5b8d2f9ac9786f9f1933f73ac8eda09ffc952 | 89aac3203cc4c08cbff4d085bcc11d1cf86b186d | refs/heads/master | 2021-05-26T13:07:43.587928 | 2012-11-16T17:45:20 | 2012-11-16T17:45:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import unittest
from pytracer.core.PtPlugin import PtPlugin
from pytracer.core import PtParam
class PtPluginTest(unittest.TestCase):
def test_addPtParam1(self):
val = 5.0
param = PtParam.PtParamFloat(value=val)
node = PtPlugin()
node.addParam(param)
self.assertEqual(node.numParams,1)
def test_addPtParam2(self):
node = PtPlugin()
#try to add an int
node.addParamInt("intPtParam",value=5)
self.assertEqual(node.numParams,1)
#try to add a float
node.addParamFloat("floatPtParam",value=5.0)
self.assertEqual(node.numParams,2)
if __name__ == '__main__':
print "Testing %s"%os.path.basename(__file__)[:-7]
unittest.main()
| UTF-8 | Python | false | false | 758 | py | 40 | PtPluginTest.py | 33 | 0.631926 | 0.617414 | 0 | 28 | 26.071429 | 54 |
galenguyer/infod | 14,955,076,130,884 | c1e0847734dd0c76d8f59ca30ffc77f8db2c57ca | 7a26e35e820ff5c0fe5802422a772a72663ced9d | /api/app.py | e2b1bd3eccbfd781ce58151d925a18b57436205f | [
"MIT"
] | permissive | https://github.com/galenguyer/infod | 12f46c734253323505147a2db071918cd6afa3ed | ea533d6dc909d7781d561e8b285d4c08915d5f54 | refs/heads/main | 2023-06-01T14:42:30.012605 | 2021-06-15T17:17:00 | 2021-06-15T17:17:00 | 376,989,265 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from infod import app
| UTF-8 | Python | false | false | 22 | py | 6 | app.py | 3 | 0.818182 | 0.818182 | 0 | 1 | 21 | 21 |
overuns/codejam_practice | 4,655,744,595,845 | 8f6ed2d6c2aeb33a23ed96e05009ae5d13def4b1 | 9fadf5644539acd53fc26cb74e5851e6d39de7a5 | /round_a_apac_test_2016/C.py | 977a92b1104c15c4960392425995a385cab1d500 | [] | no_license | https://github.com/overuns/codejam_practice | 8810d4194e4624244c2fca715056180da59e0552 | f238917584a3b9743aeb55ea7f0a558676d26e03 | refs/heads/master | 2018-01-10T15:49:18.584537 | 2016-04-10T11:30:35 | 2016-04-10T11:30:35 | 54,823,390 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
def main():
t = int(input())
for case in range(1, t + 1):
k = int(input())
print('Case #{}: {}'.format(case, k))
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 203 | py | 6 | C.py | 6 | 0.423645 | 0.408867 | 0 | 12 | 15.916667 | 45 |
laxte/coursework | 8,443,905,714,037 | 220d88717bc53dd9adfafc15ac1045e51b1167b9 | 4c459d5ebe66eb6fd0609a6081de60c5f7571396 | /sdn/ex3/module3-assignment1/CustomTopo.py | b706834e4168cabdd0bcc5872768ae278c126767 | [] | no_license | https://github.com/laxte/coursework | d95b39d438dd11678cffed5e9f51daf09842c9c3 | 564f02f14a5c83c3fefc00bfc65931cff77d2c80 | refs/heads/master | 2021-01-10T09:18:45.394047 | 2016-12-26T19:03:57 | 2016-12-26T19:03:57 | 48,134,750 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
'''
Coursera:
- Software Defined Networking (SDN) course
-- Module 3 Programming Assignment
Professor: Nick Feamster
Teaching Assistant: Muhammad Shahbaz
'''
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.log import setLogLevel
from mininet.util import irange,dumpNodeConnections
class CustomTopo(Topo):
"Simple Data Center Topology"
"linkopts - (1:core, 2:aggregation, 3: edge) parameters"
"fanout - number of child switch per parent switch"
def __init__(self, linkopts1, linkopts2, linkopts3, fanout=2, **opts):
# Initialize topology and default options
Topo.__init__(self, **opts)
# Add your logic here ...
self.fanout = fanout
switchCount = 1
hostCount = 1
core = self.addSwitch('s%s' % switchCount)
switchCount += 1
for i in irange(1, fanout):
aggregation = self.addSwitch('s%s' % switchCount)
switchCount += 1
self.addLink(core, aggregation, **linkopts1)
# linkopts1 = {'bw':50, 'delay':'5ms'}
for j in irange(1, fanout):
edge = self.addSwitch('s%s' % switchCount)
switchCount += 1
self.addLink(aggregation, edge, **linkopts2)
for k in irange(1, fanout):
host = self.addHost('h%s' % hostCount)
self.addLink(edge, host, **linkopts3)
hostCount += 1
def perfTest():
link1 = dict(bw=10, delay='5ms')
link2 = dict(bw=10, delay='10ms')
link3 = dict(bw=10, delay='15ms')
topo = CustomTopo(linkopts1 = link1, linkopts2 = link2, linkopts3 = link3, fanout=2)
net = Mininet(topo=topo, link=TCLink) #,host=CPULimitedHost,
net.start()
print "Dumping host connections"
dumpNodeConnections(net.hosts)
print "Testing network connectivity"
net.pingAll()
#print "Testing bandwidth between h1 and h4"
#h1, h4 = net.get('h1', 'h4')
#net.iperf((h1, h4))
net.stop()
if __name__ == '__main__':
# Tell mininet to print useful information
setLogLevel('info')
perfTest()
topos = { 'custom': ( lambda: CustomTopo() ) }
| UTF-8 | Python | false | false | 2,254 | py | 17 | CustomTopo.py | 15 | 0.605146 | 0.581633 | 0 | 71 | 30.746479 | 89 |
quantumInfection/blinkist | 7,516,192,778,751 | 210f83a8cc379100aa4b6008e5d63a8d5838a3ab | 424479451509a9f833b0c1e8942e9041e6950453 | /recommendation/__init__.py | 9f7453a474f855aa51444ed3ee5929081de954ac | [] | no_license | https://github.com/quantumInfection/blinkist | 1117804ad00106f0d0e192d2b048caa9e71c4c94 | 2c6270c030e619d14d9ab1df9152f4d07ff9b205 | refs/heads/master | 2023-05-11T19:08:53.172894 | 2019-06-10T17:28:14 | 2019-06-10T17:28:14 | 190,996,388 | 0 | 0 | null | false | 2023-05-01T20:57:47 | 2019-06-09T11:08:43 | 2019-06-10T17:28:20 | 2023-05-01T20:57:46 | 8 | 0 | 0 | 1 | Python | false | false | """This package contains all the rest api code for recommendations"""
from recommendation.api import rec_app
| UTF-8 | Python | false | false | 110 | py | 7 | __init__.py | 6 | 0.790909 | 0.790909 | 0 | 3 | 35.666667 | 69 |
ryys1122/pbs_accounting | 12,987,981,135,353 | 4428c683ec35b7211afa014622f9eddd19a75136 | 1a27450146f1a6fa8634e5ead6919684cd419133 | /other-scripts/totalcoredays.py | 8a9dc91d1b20d4a02748db469218b0fc3092de69 | [
"MIT"
] | permissive | https://github.com/ryys1122/pbs_accounting | c063500704ab38f526d291cbdd5317217a10bffb | 5dd01223b5c5beb870127bf04d17325d613c2284 | refs/heads/master | 2021-01-22T07:13:40.541851 | 2015-06-02T16:52:18 | 2015-06-02T16:52:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/software/tools/python-2.6.7/bin/python
import jobstats
import sys
import numpy as np
def main():
"""
Main routine: Process total core days used by all jobs
"""
if len(sys.argv) < 2:
sys.exit('Usage: ' + sys.argv[0] \
+ ' [Accounting files]')
else:
joblist = sys.argv[1:]
jobs = jobstats.alljobs(joblist)
coredays = 0.0
for job in jobs:
coredays += job.cores*job.walltime
coredays /= 24.0*3600.0
print( '%.1f' %coredays)
return
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 580 | py | 19 | totalcoredays.py | 18 | 0.539655 | 0.510345 | 0 | 29 | 18.344828 | 58 |
GuilhermeAureliano/programacao-1 | 5,892,695,155,615 | d37ca9066718280d9dd9d8843bcd4a4881c4e92b | 5dd815624418911e33d75b05400cd10af349a3d2 | /unidade1-2/nota_final.py | 1d882dd220e0ba60d85ef928218d418e5dfb64f3 | [] | no_license | https://github.com/GuilhermeAureliano/programacao-1 | e970275d58c024bab83022abb45caadd8578d1da | c9f2255956b0ab03afc38cd3efb0de531864cd3e | refs/heads/master | 2020-07-28T10:21:59.988617 | 2019-11-20T22:07:33 | 2019-11-20T22:07:33 | 209,392,092 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Universidade Federal de Campina Grande - UFCG
# Programação - 1
# Guilherme Aureliano
# Nota na Final
print('== Estágio 1 ==')
peso1 = float(input('Peso? '))
nota1 = float(input('Nota? '))
print('== Estágio 2 ==')
peso2 = float(input('Peso? '))
nota2 = float(input('Nota? '))
print('== Estágio 3 ==')
peso3 = float(input('Peso? '))
nota3 = float(input('Nota? '))
print('== Resultados ==')
media_parcial = (nota1 * peso1 + nota2 * peso2 + nota3 * peso3) / (peso1 + peso2 + peso3)
print('Média parcial: {:.1f}'.format(media_parcial))
desejo_5 = (5 - (media_parcial * 0.6)) / 0.4
desejo_7 = (7 - (media_parcial * 0.6)) / 0.4
print('Nota na final, pra média 5.0 = {:.1f}'.format(desejo_5))
print('Nota na final, pra média 7.0 = {:.1f}'.format(desejo_7))
| UTF-8 | Python | false | false | 798 | py | 161 | nota_final.py | 159 | 0.596203 | 0.54557 | 0 | 25 | 29.32 | 89 |
mgarcoder8z/oh-so-gitty | 11,055,245,847,318 | 27448d3204f5507a38ee4f60b3adf7117855dbf2 | fe5c72c7ea7b90a6fb4b77a9b679167c078f92d6 | /Python/grade2.py | a7fba086c37227348bd306395071f45eaf08bf9b | [] | no_license | https://github.com/mgarcoder8z/oh-so-gitty | f50b224e77bfd4ff1a003f7586409917701817d8 | 8227f7cc865f497b71fdc824fe8b0485d51e71b9 | refs/heads/master | 2021-05-24T04:05:19.036919 | 2020-11-02T19:32:09 | 2020-11-02T19:32:09 | 59,146,144 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | score=0
for i in xrange(1,11):
score=int(raw_input("Enter a score between 0 and 100: "))
if score >= 90 and score <= 100:
print(str(score) + "," + "Your grade is an A")
elif score >= 80 and score <= 89:
print(str(score) + "," + "Your grade is an B")
elif score >= 70 and score <= 79:
print(str(score) + "," + "Your grade is an C")
elif score >= 60 and score <= 69:
print(str(score) + "," + "Your grade is an D")
elif score >= 50 and score <= 59:
print(str(score) + "," + "Your grade is an F")
| UTF-8 | Python | false | false | 513 | py | 192 | grade2.py | 74 | 0.586745 | 0.530214 | 0 | 14 | 35.642857 | 58 |
ITSupportClamC/security_data | 13,176,959,698,625 | 2134a90751f7b58fee81bb1e5f09134571bfb216 | 8767ca26c4d46505c6e9f114c397e02280e7fdae | /services/security_attribute_services.py | 9b69c737efa6c48a9b4f3582ba5225d26aa5708d | [] | no_license | https://github.com/ITSupportClamC/security_data | aa20cfde4ef62c4b1264819b83f37e3dbd1e4ffc | 8d1b69509a3b8bd037f9390929ea0f1b5d93fc12 | refs/heads/master | 2023-05-05T14:38:24.096099 | 2021-05-28T09:29:35 | 2021-05-28T09:29:35 | 363,796,223 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
#
import logging
from security_data.models.security_attribute import SecurityAttribute
from sqlalchemy.orm import sessionmaker
from security_data.utils.error_handling import (SecurityAttributeAlreadyExistError,
SecurityAttributeNotExistError)
class SecurityAttributeServices:
def __init__(self, db):
self.logger = logging.getLogger(__name__)
self.db = db
def delete_all(self):
try:
session = sessionmaker(bind=self.db)()
session.query(SecurityAttribute).delete()
session.commit()
except Exception as e:
self.logger.error("Failed to delete all records in SecurityAttribute")
self.logger.error(e)
raise
finally:
session.close()
def create(self, security_attribute_info):
try:
session = sessionmaker(bind=self.db)()
has_record = bool(session.query(SecurityAttribute).filter_by(\
security_id_type=security_attribute_info['security_id_type'], \
security_id=security_attribute_info['security_id']).first()
)
if has_record:
message = "Record (" + security_attribute_info['security_id_type'] + "," + \
security_attribute_info['security_id'] + ") already exists"
self.logger.warn(message)
raise SecurityAttributeAlreadyExistError(message)
else:
security_attribute = SecurityAttribute(**security_attribute_info)
session.add(security_attribute)
session.commit()
self.logger.info("Record (" + security_attribute_info['security_id_type'] + "," + \
security_attribute_info['security_id'] + ") added successfully")
except SecurityAttributeAlreadyExistError:
#-- avoid SecurityAttributeAlreadyExistError being captured by Exception
raise
except Exception as e:
self.logger.error("Failed to add SecurityAttribute")
self.logger.error(e)
raise
finally:
session.close()
def update(self, security_attribute_info):
try:
session = sessionmaker(bind=self.db)()
security_attribute_to_update = session.query(SecurityAttribute).filter_by( \
security_id_type=security_attribute_info['security_id_type'], \
security_id=security_attribute_info['security_id']).first()
#-- throw error if security_base not exists
if not bool(security_attribute_to_update):
message = "Record (" + security_attribute_info['security_id_type'] + "," + \
security_attribute_info['security_id'] + ") not found"
self.logger.warn(message)
raise SecurityAttributeNotExistError(message)
#-- update transaction by updating the status to cancel
for key, value in security_attribute_info.items():
setattr(security_attribute_to_update, key, value)
session.commit()
self.logger.info("Record (" + security_attribute_info['security_id_type'] + "," + \
security_attribute_info['security_id'] + ") updated successfully")
except SecurityAttributeNotExistError:
#-- avoid SecurityAttributeNotExistError being captured by Exception
raise
except Exception as e:
self.logger.error("Failed to update SecurityAttribute")
self.logger.error(e)
raise
finally:
session.close()
def query(self, params):
try:
session = sessionmaker(bind=self.db)()
security_attributes = session.query(
SecurityAttribute.security_id_type.label("security_id_type"), \
SecurityAttribute.security_id.label("security_id"), \
SecurityAttribute.gics_sector.label("gics_sector"), \
SecurityAttribute.gics_industry_group.label("gics_industry_group"), \
SecurityAttribute.industry_sector.label("industry_sector"), \
SecurityAttribute.industry_group.label("industry_group"), \
SecurityAttribute.bics_sector_level_1.label("bics_sector_level_1"), \
SecurityAttribute.bics_industry_group_level_2.label("bics_industry_group_level_2"), \
SecurityAttribute.bics_industry_name_level_3.label("bics_industry_name_level_3"), \
SecurityAttribute.bics_sub_industry_name_level_4.label("bics_sub_industry_name_level_4"), \
SecurityAttribute.parent_symbol.label("parent_symbol"), \
SecurityAttribute.parent_symbol_chinese_name.label("parent_symbol_chinese_name"), \
SecurityAttribute.parent_symbol_industry_group.label("parent_symbol_industry_group"), \
SecurityAttribute.cast_parent_company_name.label("cast_parent_company_name"), \
SecurityAttribute.country_of_risk.label("country_of_risk"), \
SecurityAttribute.country_of_issuance.label("country_of_issuance"), \
SecurityAttribute.sfc_region.label("sfc_region"), \
SecurityAttribute.s_p_issuer_rating.label("s_p_issuer_rating"), \
SecurityAttribute.moody_s_issuer_rating.label("moody_s_issuer_rating"), \
SecurityAttribute.fitch_s_issuer_rating.label("fitch_s_issuer_rating"), \
SecurityAttribute.bond_or_equity_ticker.label("bond_or_equity_ticker"), \
SecurityAttribute.s_p_rating.label("s_p_rating"), \
SecurityAttribute.moody_s_rating.label("moody_s_rating"), \
SecurityAttribute.fitch_rating.label("fitch_rating"), \
SecurityAttribute.payment_rank.label("payment_rank"), \
SecurityAttribute.payment_rank_mbs.label("payment_rank_mbs"), \
SecurityAttribute.bond_classification.label("bond_classification"), \
SecurityAttribute.local_government_lgfv.label("local_government_lgfv"), \
SecurityAttribute.first_year_default_probability.label("first_year_default_probability"), \
SecurityAttribute.contingent_capital.label("contingent_capital"), \
SecurityAttribute.co_co_bond_trigger.label("co_co_bond_trigger"), \
SecurityAttribute.capit_type_conti_conv_tri_lvl.label("capit_type_conti_conv_tri_lvl"), \
SecurityAttribute.tier_1_common_equity_ratio.label("tier_1_common_equity_ratio"), \
SecurityAttribute.bail_in_capital_indicator.label("bail_in_capital_indicator"), \
SecurityAttribute.tlac_mrel_designation.label("tlac_mrel_designation"), \
SecurityAttribute.classif_on_chi_state_owned_enterp.label("classif_on_chi_state_owned_enterp"), \
SecurityAttribute.private_placement_indicator.label("private_placement_indicator"), \
SecurityAttribute.trading_volume_90_days.label("trading_volume_90_days")) \
.filter(SecurityAttribute.security_id_type == params['security_id_type'],
SecurityAttribute.security_id == params['security_id']) \
.order_by(SecurityAttribute.created_at)
#-- return as list of dictionary
def model2dict(row):
d = {}
for column in row.keys():
if column == "first_year_default_probability" or \
column == "tier_1_common_equity_ratio" or \
column == "trading_volume_90_days":
d[column] = float(getattr(row, column))
else:
d[column] = str(getattr(row, column))
return d
security_attribute_d = [model2dict(t) for t in security_attributes]
#self.logger.error("Print the list of dictionary output:")
#self.logger.debug(security_attribute_d)
return security_attribute_d
except Exception as e:
self.logger.error("Error message:")
self.logger.error(e)
raise
finally:
session.close() | UTF-8 | Python | false | false | 7,110 | py | 24 | security_attribute_services.py | 19 | 0.706751 | 0.703938 | 0 | 148 | 46.054054 | 102 |
ebennequin/meta-domain-shift | 4,509,715,664,198 | 0adec9170e6d68bb5bbc7e7b14cb215814bffb5c | e3b3747e3ccc7029c8fd99d9ae884482e6531ed2 | /configs/model_config.py | e8ec5f050849c9e02be43b67836736355eadcced | [] | no_license | https://github.com/ebennequin/meta-domain-shift | 87acef3f1c46dacf205044db1e5a987b8b93e8f7 | 4c7ca01afcb6df9113aaa255fa60c75ce3ccd507 | refs/heads/master | 2023-07-01T13:32:02.036322 | 2021-08-05T12:59:11 | 2021-08-05T12:59:11 | 320,583,661 | 11 | 1 | null | false | 2021-08-05T08:01:39 | 2020-12-11T13:36:08 | 2021-08-04T13:59:28 | 2021-08-05T08:01:38 | 6,302 | 3 | 0 | 1 | Python | false | false | from functools import partial
from src.modules.batch_norm import *
BATCHNORM = ConventionalBatchNorm
from src.modules.backbones import *
from src.modules import *
from src.methods import *
# Parameters of the model (method and feature extractor)
BACKBONE = Conv4
TRANSPORTATION_MODULE = OptimalTransport(
regularization=0.05,
learn_regularization=False,
max_iter=1000,
stopping_criterion=1e-4,
)
MODEL = partial(
ProtoNet,
transportation=TRANSPORTATION_MODULE,
)
| UTF-8 | Python | false | false | 494 | py | 64 | model_config.py | 47 | 0.757085 | 0.736842 | 0 | 25 | 18.76 | 56 |
emmettcowan/EC_Final_Year_Proejct | 14,886,356,668,204 | e5126f8d5031c5497550185a524cf52c6db0d074 | 215b867a43fe162580ad754f86f5833648dd27ae | /urlDetection.py | 1511b264236cfcc2a0a6cf84efc0ec635e0c79e0 | [] | no_license | https://github.com/emmettcowan/EC_Final_Year_Proejct | 7d73e8bb60c59f19405dcb324a115a585a1d518a | e32ba9b6c75a3817e9595768eeb056da9d67305a | refs/heads/master | 2023-04-22T02:03:27.724262 | 2021-05-17T14:32:25 | 2021-05-17T14:32:25 | 299,883,549 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #########################################################
# --- ***MONITOR*** --- #
# * * #
# --- *** --- By Emmett Cowan --- *** --- #
# * * #
# --- V 1 --- #
#########################################################
# This python script returns the url from chrome when it is the slected forground window
import uiautomation as auto # url info
import win32gui # active window info
import time
class Url():
active_window = ""
window = ""
def chromeUrl(self): # get url from chrome
window = win32gui.GetForegroundWindow() # Refrence :
chromeControl = auto.ControlFromHandle(window) # https://stackoverflow.com/questions/59595763/get-active-chrome-url-in-python
chromeWindow = chromeControl.EditControl() # "Get Active Chrome URL in Python"
try:
return '' + chromeWindow.GetValuePattern().Value
except:
return ''
| UTF-8 | Python | false | false | 1,176 | py | 34 | urlDetection.py | 5 | 0.410714 | 0.39966 | 0 | 30 | 38.166667 | 143 |
abhardwaj1998/Faculty-Webpage-Generator | 4,483,945,867,798 | 56610724c8a33331f97e6dbc83bb5a48004bc416 | 5cd7632cc442fae1870dc3d250a00da153c8b241 | /userprofile/admin.py | 1f1a385408eb9799be24a240041b011106f3da64 | [] | no_license | https://github.com/abhardwaj1998/Faculty-Webpage-Generator | 9da497ab810b21071609512fc7f352853798ebe3 | 4a7500ccfa6152ae4aac2301d8cc2d18c6fc4d40 | refs/heads/master | 2020-03-23T21:31:59.876836 | 2018-01-28T04:36:56 | 2018-01-28T04:36:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(About_us)
admin.site.register(Teaching)
admin.site.register(Education)
admin.site.register(Experience)
admin.site.register(Publications)
admin.site.register(Projecting)
admin.site.register(Awards) | UTF-8 | Python | false | false | 302 | py | 35 | admin.py | 18 | 0.817881 | 0.817881 | 0 | 12 | 24.25 | 33 |
INFORMSJoC/2021.0326 | 15,023,795,618,290 | 867b67d69d9b8ea7196b712ee1acb91d9f612987 | 21429319fcea4e6590374c22daf748528234dce9 | /src/modelGoogleCP.py | 33799811cefa885c1a8894cbb76d78eab9d429c4 | [
"MIT"
] | permissive | https://github.com/INFORMSJoC/2021.0326 | d5bf6493f4467fa1112e1507d6b2c29dc8221df2 | 15b70e43d230fc7581bdaa1cb37139b5adaf842b | refs/heads/master | 2023-04-17T04:55:20.228845 | 2023-03-23T12:38:55 | 2023-03-23T12:38:55 | 568,541,984 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ortools.sat.python import cp_model
import collections
# https://developers.google.com/optimization/reference/python/sat/python/cp_model#bestobjectivebound
#https://developers.google.com/optimization/reference/python/linear_solver/pywraplp#bestbound
###### main ########
def CPmodel_generation(instance,mdl,problemType):
if problemType == 'Flowshop':
pass
mdl = flowshopmodel(instance,mdl)
if problemType == 'Non-Flowshop': #done
mdl = Nonflowshopmodel(instance,mdl)
if problemType == 'Hybridflowshop': #done
mdl = Hybridflowshopmodel(instance,mdl)
if problemType == 'Distributedflowshop':
pass
mdl = Distributedflowshopmodel(instance,mdl)
if problemType == 'Nowaitflowshop':
mdl = Nowaitflowshopmodel(instance,mdl) #done
if problemType == 'Setupflowshop':
pass
mdl = Setupflowshopmodel(instance,mdl)
if problemType == 'Tardinessflowshop':
pass
mdl = Tardinessflowshopmodel(instance,mdl)
if problemType == 'TCTflowshop':
pass
mdl = TCTflowshopmodel(instance,mdl)
if problemType == 'Jobshop': #done
mdl = jobshopmodel(instance,mdl)
if problemType == 'Flexiblejobshop': #done
mdl = Flexiblejobshopmodel(instance,mdl)
if problemType == 'Openshop': #done
mdl = openshopmodel(instance,mdl)
if problemType == 'Parallelmachine': #done
mdl = prallelmachinemodel(instance,mdl)
return mdl
def Flexiblejobshopmodel(instance,mdl):
horizon = 100000
task_type = collections.namedtuple('task_type', 'start end is_present interval')
all_tasks = {}
machine_to_intervals = collections.defaultdict(list)
job_operation_to_intervals = [collections.defaultdict(list) for j in range(instance.n)]
for j in range(instance.n):
for k in range(instance.o[j]):
for i in range(instance.g):
if instance.p[j][k][i]== 0:
all_tasks[j, k, i] = []
else:
suffix = '_%i_%i_%i' % (j, k, i)
start_var = mdl.NewIntVar(0, horizon, 'start' + suffix)
end_var = mdl.NewIntVar(0, horizon, 'end' + suffix)
is_present_var = mdl.NewBoolVar('is_present'+ suffix)
interval_var = mdl.NewOptionalIntervalVar(start_var, instance.p[j][k][i], end_var, is_present_var, 'interval' + suffix)
all_tasks[j, k, i] = task_type(start=start_var, end=end_var, is_present =is_present_var, interval=interval_var)
machine_to_intervals[i].append(interval_var)
job_operation_to_intervals[j][k].append(interval_var)
for i in range(instance.g):
mdl.AddNoOverlap(machine_to_intervals[i])
for j in range(instance.n):
for k in range(instance.o[j]):
mdl.Add( sum([all_tasks[j, k, i].is_present for i in range(instance.g) if instance.p[j][k][i]>0]) == 1)
for j in range(instance.n):
for k in range(instance.o[j]-1):
for i in range(instance.g):
for i1 in range(instance.g):
if instance.p[j][k+1][i] > 0 and instance.p[j][k][i1]>0:
mdl.Add(all_tasks[j, k + 1, i].start >= all_tasks[j, k, i1].end )
# Makespan objective.
C_max = mdl.NewIntVar(0, horizon, 'C_max')
mdl.AddMaxEquality(C_max, [all_tasks[j, instance.o[j]-1, i].end for j in range(instance.n) for i in range(instance.g) if instance.p[j][instance.o[j]-1][i]>0])
mdl.Minimize(C_max)
return mdl
def Hybridflowshopmodel(instance,mdl):
horizon = 100000
task_type = collections.namedtuple('task_type', 'start end is_present interval')
all_tasks = {}
machine_to_intervals = [ [] for i in range(instance.g)]
for i in range(instance.g):
machine_to_intervals[i] = [[] for k in range(instance.m[i])]
#machine_to_intervals = collections.defaultdict(list)
job_operation_to_intervals = [collections.defaultdict(list) for j in range(instance.n)]
for j in range(instance.n):
for i in range(instance.g):
for k in range(instance.m[i]):
suffix = '_%i_%i_%i' % (j, i, k)
start_var = mdl.NewIntVar(0, horizon, 'start' + suffix)
end_var = mdl.NewIntVar(0, horizon, 'end' + suffix)
is_present_var = mdl.NewBoolVar('is_present'+ suffix)
interval_var = mdl.NewOptionalIntervalVar(start_var, instance.p[j][i], end_var, is_present_var, 'interval' + suffix)
all_tasks[j, i, k] = task_type(start=start_var, end=end_var, is_present =is_present_var, interval=interval_var)
machine_to_intervals[i][k].append(interval_var)
job_operation_to_intervals[j][i].append(interval_var)
for i in range(instance.g):
for k in range(instance.m[i]):
mdl.AddNoOverlap(machine_to_intervals[i][k])
for j in range(instance.n):
for i in range(instance.g):
mdl.Add( sum([all_tasks[j, i, k].is_present for k in range(instance.m[i])]) == 1)
for j in range(instance.n):
for i in range(instance.g-1):
for k in range(instance.m[i+1]):
for k1 in range(instance.m[i]):
mdl.Add(all_tasks[j, i + 1, k].start >= all_tasks[j, i, k1].end )
# Makespan objective.
C_max = mdl.NewIntVar(0, horizon, 'C_max')
mdl.AddMaxEquality(C_max, [all_tasks[j, instance.g-1, k].end for j in range(instance.n) for k in range(instance.m[instance.g-1])])
mdl.Minimize(C_max)
return mdl
def openshopmodel(instance,mdl):
horizon = sum([sum(instance.p[j]) for j in range(instance.n)])
task_type = collections.namedtuple('task_type', 'start end interval')
all_tasks = {}
machine_to_intervals = collections.defaultdict(list)
job_to_intervals = collections.defaultdict(list)
for j in range(instance.n):
for i in range(instance.g):
suffix = '_%i_%i' % (j, i)
start_var = mdl.NewIntVar(0, horizon, 'start' + suffix)
end_var = mdl.NewIntVar(0, horizon, 'end' + suffix)
interval_var = mdl.NewIntervalVar(start_var, instance.p[j][i], end_var,'interval' + suffix)
all_tasks[j, i] = task_type(start=start_var, end=end_var, interval=interval_var)
machine_to_intervals[i].append(interval_var)
job_to_intervals[j].append(interval_var)
for i in range(instance.g):
mdl.AddNoOverlap(machine_to_intervals[i])
for j in range(instance.n):
mdl.AddNoOverlap(job_to_intervals[j])
# Makespan objective.
C_max = mdl.NewIntVar(0, horizon, 'C_max')
mdl.AddMaxEquality(C_max, [all_tasks[j, i].end for j in range(instance.n) for i in range(instance.g) ])
mdl.Minimize(C_max)
return mdl
def prallelmachinemodel(instance,mdl):
Y = [[mdl.NewBoolVar(f'Y[{j}][{i}]') for i in range(instance.g)] for j in range(instance.n)]
C_max = mdl.NewIntVar(0, sum([max(instance.p[j]) for j in range(instance.n)]) , 'C_max')
for j in range(instance.n):
mdl.AddExactlyOne(Y[j][i] for i in range(instance.g))
mdl.AddMaxEquality(C_max, [sum([instance.p[j][i] * Y[j][i] for j in range(instance.n)]) for i in range(instance.g)])
mdl.Minimize(C_max)
return mdl
def Nowaitflowshopmodel(instance,mdl):
horizon = sum([sum(instance.p[j]) for j in range(instance.n)])
task_type = collections.namedtuple('task_type', 'start end interval')
all_tasks = {}
machine_to_intervals = collections.defaultdict(list)
for j in range(instance.n):
for i in range(instance.g):
suffix = '_%i_%i' % (j, i)
start_var = mdl.NewIntVar(0, horizon, 'start' + suffix)
end_var = mdl.NewIntVar(0, horizon, 'end' + suffix)
interval_var = mdl.NewIntervalVar(start_var, instance.p[j][i], end_var,'interval' + suffix)
all_tasks[j, i] = task_type(start=start_var, end=end_var, interval=interval_var)
machine_to_intervals[i].append(interval_var)
for i in range(instance.g):
mdl.AddNoOverlap(machine_to_intervals[i])
for j in range(instance.n):
for i in range(instance.g-1):
mdl.Add(all_tasks[j, i + 1].start == all_tasks[j, i].end)
C_max = mdl.NewIntVar(0, horizon, 'C_max')
mdl.AddMaxEquality(C_max, [all_tasks[j, instance.g-1].end for j in range(instance.n)])
mdl.Minimize(C_max)
return mdl
def Nonflowshopmodel(instance,mdl):
horizon = sum([sum(instance.p[j]) for j in range(instance.n)])
task_type = collections.namedtuple('task_type', 'start end interval')
all_tasks = {}
machine_to_intervals = collections.defaultdict(list)
for j in range(instance.n):
for i in range(instance.g):
suffix = '_%i_%i' % (j, i)
start_var = mdl.NewIntVar(0, horizon, 'start' + suffix)
end_var = mdl.NewIntVar(0, horizon, 'end' + suffix)
interval_var = mdl.NewIntervalVar(start_var, instance.p[j][i], end_var,'interval' + suffix)
all_tasks[j, i] = task_type(start=start_var, end=end_var, interval=interval_var)
machine_to_intervals[i].append(interval_var)
for i in range(instance.g):
mdl.AddNoOverlap(machine_to_intervals[i])
for j in range(instance.n):
for i in range(instance.g-1):
mdl.Add(all_tasks[j, i + 1].start >= all_tasks[j, i].end)
C_max = mdl.NewIntVar(0, horizon, 'C_max')
mdl.AddMaxEquality(C_max, [all_tasks[j, instance.g-1].end for j in range(instance.n)])
mdl.Minimize(C_max)
return mdl
def jobshopmodel(instance,mdl):
jobs_data = [ [] for j in range(instance.n)]
for j in range(instance.n):
jobs_data[j] = [(instance.r[j][i]-1, instance.p[j][i]) for i in range(instance.g)]
horizon = sum(task[1] for job in jobs_data for task in job)
task_type = collections.namedtuple('task_type', 'start end interval')
all_tasks = {}
machine_to_intervals = collections.defaultdict(list)
for job_id, job in enumerate(jobs_data):
for task_id, task in enumerate(job):
machine = task[0]
duration = task[1]
suffix = '_%i_%i' % (job_id, task_id)
start_var = mdl.NewIntVar(0, horizon, 'start' + suffix)
end_var = mdl.NewIntVar(0, horizon, 'end' + suffix)
interval_var = mdl.NewIntervalVar(start_var, duration, end_var,'interval' + suffix)
all_tasks[job_id, task_id] = task_type(start=start_var, end=end_var, interval=interval_var)
machine_to_intervals[machine].append(interval_var)
for i in range(instance.g):
mdl.AddNoOverlap(machine_to_intervals[i])
for j in range(instance.n):
for i in range(instance.g-1):
mdl.Add(all_tasks[j, i + 1].start >= all_tasks[j, i].end)
C_max = mdl.NewIntVar(0, horizon, 'C_max')
mdl.AddMaxEquality(C_max, [all_tasks[j, len(job) - 1].end for j, job in enumerate(jobs_data)])
mdl.Minimize(C_max)
return mdl
| UTF-8 | Python | false | false | 11,241 | py | 4,712 | modelGoogleCP.py | 10 | 0.603772 | 0.597901 | 0 | 271 | 40.479705 | 162 |
vdikan/aiida_siesta_plugin | 18,227,841,216,940 | 00b7d693c7e8d106202fe8f10c08bad460c13c61 | bbcb4e757466f04abde79e4e1e0e981b630fc9f9 | /aiida_siesta/utils/generator_absclass.py | 23d717baa43066c59ba8be4b7b67e6b9f658af78 | [
"MIT"
] | permissive | https://github.com/vdikan/aiida_siesta_plugin | 1e9366ee94f5ad93fb8828da25582364b36b8f8e | 932192d7a2242112523a645d1dbe37d65df05ac6 | refs/heads/master | 2022-09-11T06:52:35.083362 | 2022-03-08T08:14:59 | 2022-03-08T08:14:59 | 92,830,999 | 0 | 0 | NOASSERTION | true | 2019-05-13T11:46:16 | 2017-05-30T12:33:48 | 2019-03-25T17:47:42 | 2019-05-13T11:45:27 | 745 | 0 | 0 | 0 | Python | false | false | import warnings
from aiida_siesta.utils.warn import AiidaSiestaDeprecationWarning
from .protocols_system.generator_absclass import InputGenerator
message = ( #pylint: disable=invalid-name
'This module has been deprecated and will be removed in `v2.0.0`. `InputsGenerator` is now `InputGenerator` ' +
'in `aiida_siesta.utils.protocols_sysyem.generator_absclass`.'
)
warnings.warn(message, AiidaSiestaDeprecationWarning)
class InputsGenerator(InputGenerator): #pylint: disable=abstract-method
"""
Mirror of new class to keep back compatibility. Since abstract class, can't be instanciated.
"""
| UTF-8 | Python | false | false | 618 | py | 143 | generator_absclass.py | 84 | 0.771845 | 0.76699 | 0 | 16 | 37.625 | 115 |
ericye16/WEC-programming | 7,232,724,933,750 | 480d5078d832dcb1def496164cb8725b683d1300 | b9b1b26d1922f95e3129994985de7e578c427375 | /elastic.py | e20d76675958b4e226ae92f9138b719873e9c51c | [] | no_license | https://github.com/ericye16/WEC-programming | 1ed24b765d43a556fb2a401c24c4b1d48bdc4ca6 | b00375628d4a4ce2983cf4e43d68cac5e57872f3 | refs/heads/master | 2021-01-01T05:22:11.855770 | 2014-11-08T03:26:41 | 2014-11-08T03:26:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from file_read import read_map
transitions = (
(0, 1),
(0, -1),
(1, 0),
(-1, 0)
)
def between(x, min, max):
return x >= min and x < max
def is_street(ch):
return ch == 'H' or ch == ' '
def make_transition(y, x, transition):
return y + transition[0], x + transition[1]
# def construct_graph(mapp):
# g = Graph()
# for y in range(len(mapp)):
# for x in range(len(mapp[y])):
# if isStreet(mapp[y][x]):
# g.add_node((y, x))
# if (y, x) not in g.edges:
# for transition in transitions:
# yNew = y + transition[0]
# xNew = x + transition[1]
# if (between(yNew, 0, len(mapp))
# and between(xNew, 0, len(mapp[y]))):
# if isStreet(mapp[yNew][xNew]):
# g.add_edge((y, x), (yNew, xNew), 1)
# return g
def dist(y1, x1, y2, x2):
return abs(y1 - y2) + abs(x1 - x2)
def shortest_path(mapp, y1, x1, y2, x2):
'''Finds the shortest path between two points on the map.
Returns a tuple of pathlength and path. None if no path exists
or either point is not on a street
'''
# for example:
# mapp = ["XXX",
# "X X",
# "XHX"]
# x1, x2 = 1, 1
# y1, y2 = 1, 1
#
# greedy alg
yCurrent, xCurrent = y1, x1
path = []
feasible = True
visited = set()
while (yCurrent != y2 or xCurrent != x2) and feasible:
dists = [float('inf')] * 4
for i, transition in enumerate(transitions):
yNew, xNew = make_transition(yCurrent, xCurrent, transition)
if between(yNew, 0, len(mapp)) and between(xNew, 0, len(mapp[0])):
if (yNew, xNew) not in visited and is_street(mapp[yNew][xNew]):
dists[i] = dist(yNew, xNew, y2, x2)
if dists == [float('inf')] * 4:
feasible = False
bestTransition = transitions[dists.index(min(dists))]
yNew, xNew = make_transition(yCurrent, xCurrent, bestTransition)
print yNew, xNew
visited.add((yCurrent, xCurrent))
path.append((yCurrent, xCurrent))
yCurrent, xCurrent = yNew, xNew
if yCurrent == y2 and xCurrent == x2:
path.append((yCurrent, xCurrent))
return len(path), path
else:
return None
def shortest_path_nice(mapp, loca, locb):
y1, x1 = loca
y2, x2 = locb
result = shortest_path(mapp, y1, x1, y2, x2)
if result is None:
return None
else:
ret = []
for node in result[1]:
ret.append({'action': 'drive',
'y': node[0],
'x': node[1]})
return ret
if __name__ == "__main__":
m = read_map()
print shortest_path_nice(m, (5, 1), (6, 6))
| UTF-8 | Python | false | false | 2,889 | py | 1 | elastic.py | 1 | 0.498789 | 0.476982 | 0 | 103 | 27.048544 | 79 |
bartlomiejurczyk/python_training | 5,643,587,050,553 | f889420fcc3bf15072263606641fdbdadd6b0c1a | 1b30049304a5537665121b4eac270a111a9f7de8 | /test/test_phones.py | 716f0e285781b4274ba3a0f8a47a6c56058591d4 | [
"Apache-2.0"
] | permissive | https://github.com/bartlomiejurczyk/python_training | fd5341f39d6b7f1b6c0c39deee8091c81ca406d8 | e1d6396d25058c1acaa81508e82394606b795f24 | refs/heads/master | 2021-05-18T04:12:29.888309 | 2020-05-13T19:09:35 | 2020-05-13T19:09:35 | 251,100,968 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_homepage(contact_from_edit_page)
def test_phones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.telephone_home == contact_from_edit_page.telephone_home
assert contact_from_view_page.telephone_mobile == contact_from_edit_page.telephone_mobile
assert contact_from_view_page.telephone_work == contact_from_edit_page.telephone_work
assert contact_from_view_page.secondary_home == contact_from_edit_page.secondary_home
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_homepage(contact):
return "\n".join(
filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None, [contact.telephone_home, contact.telephone_mobile, contact.telephone_work, contact.secondary_home]))))
| UTF-8 | Python | false | false | 1,205 | py | 15 | test_phones.py | 13 | 0.706224 | 0.702905 | 0 | 27 | 43.62963 | 153 |
WUVsUQkP/data-structures-and-algorithms-in-python-solutions | 11,201,274,714,694 | 2ba07bd480ef37ad489b4c5f7947b78e5650ef18 | 34533cf7a8e9084907a86ba87a13bedf9cd7dd58 | /solutions/exercises/P/ch1/ex36/word_count | 8c5666d9166570ee7379c29396396fa17bd012c8 | [
"MIT"
] | permissive | https://github.com/WUVsUQkP/data-structures-and-algorithms-in-python-solutions | 5efba44d6e49503351a3b1a90cb73f578a908168 | dc0364159f2ad4e51f3f7434e6de9a1742ccb9dc | refs/heads/master | 2022-01-26T10:11:55.072287 | 2018-09-18T03:34:34 | 2018-09-18T03:34:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# ch1_ex36_sc1
import re
def word_count(whitespace_deliminated_list):
counts = {}
words = map(lambda x: x.lower(), filter(lambda x: len(x) > 0, re.split(r"\s+", whitespace_deliminated_list)))
for word in words:
counts[word] = counts.get(word, 0) + 1
return counts
if __name__ == "__main__":
print(word_count(input("Please enter a list of words separated by whitespace: ")))
| UTF-8 | Python | false | false | 429 | 87 | word_count | 63 | 0.638695 | 0.620047 | 0 | 15 | 27.6 | 113 |
|
beauvilerobed/data-structure-algorithm-practice | 7,224,135,012,756 | 5f685783fe5cbbee9abb7291d17e28531f7e4c51 | 0698b9339855ceb6f43223ec559c089b8f4efb45 | /merge_arrays/tests.py | 2d725afb2eb1f332cbf464737251403bce468688 | [] | no_license | https://github.com/beauvilerobed/data-structure-algorithm-practice | a90041eed6a5cadc28fbadc5b082db158d884fd3 | ecaa600782419297090f7878f2a70b28a0338a12 | refs/heads/main | 2023-01-20T12:41:20.849994 | 2022-01-26T04:51:17 | 2022-01-26T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import random
from merge_array import merge_array, merge_array_naive
class MergeArray(unittest.TestCase):
def test_small(self):
test_cases = [
([1], [2], [1, 2]),
([1], [1], [1, 1]),
([1, 2, 3, 4], [5, 6, 7], [1, 2, 3, 4, 5, 6, 7]),
([1, 2, 7, 7, 10], [3, 5, 7], [1, 2, 3, 5, 7, 7, 7, 10])
]
for nums1, nums2, answer in test_cases:
self.assertEqual(merge_array(nums1, nums2), answer)
def test_large(self):
test_cases = [
([1 for i in range(100)], [0 for i in range(1000)],
[0 for i in range(1000)]+[1 for i in range(100)]),
([1] * 10 ** 5, [7], [1] * 10 ** 5 + [7])
]
for nums1, nums2, answer in test_cases:
self.assertEqual(merge_array(nums1, nums2), answer)
def test_stress(self):
test_cases = []
for _ in range(10000):
temp = [random.randint(1, 1000)
for _ in range(random.randint(50, 100))]
temp.sort()
test_cases.append(temp)
for i in range(0, 1000, 2):
nums1 = test_cases[i]
nums2 = test_cases[i + 1]
self.assertEqual(merge_array(nums1, nums2),
merge_array_naive(nums1, nums2))
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 1,369 | py | 197 | tests.py | 181 | 0.477721 | 0.401753 | 0 | 43 | 30.837209 | 68 |
bengxy/FastNeuralStyle | 5,583,457,512,003 | 248642a5e4ff205f151ae77c75bf44228b4f1f32 | 21f2ad563c2cb6e5f878601ec35a5ddaa3ff8423 | /net.py | c2bd47464db2c5160f106f1f60ecaac3822c3794 | [
"MIT"
] | permissive | https://github.com/bengxy/FastNeuralStyle | 3fa852486432cc9dccd626436a37b2d91cea1e93 | 6d973e63a5e2ef249c716c688175c1d1365e2e0d | refs/heads/master | 2022-02-07T14:08:21.032419 | 2022-01-24T04:39:40 | 2022-01-24T04:39:40 | 82,258,763 | 94 | 19 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, channel):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(channel)
self.conv2 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(channel)
def forward(self, x):
hidden = F.relu(self.bn1(self.conv1(x)))
res = x + self.bn2( self.conv2(hidden))
return res
class Vgg16Part(nn.Module):
def __init__(self):
super(Vgg16Part, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
def forward(self, X):
h = F.relu(self.conv1_1(X))
h = F.relu(self.conv1_2(h))
relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
relu3_3 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
relu4_3 = h
return [relu1_2,relu2_2,relu3_3,relu4_3]
class StylePart(nn.Module):
def __init__(self):
super(StylePart, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=9, stride=1, padding=4)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.res1 = ResBlock(128)
self.res2 = ResBlock(128)
self.res3 = ResBlock(128)
self.res4 = ResBlock(128)
self.res5 = ResBlock(128)
self.deconv1 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv2 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.deconv3 = nn.Conv2d(32, 3, kernel_size=9, stride=1, padding=4)
def forward(self, X):
h = F.relu(self.bn1(self.conv1(X)))
h = F.relu(self.bn2(self.conv2(h)))
h = F.relu(self.bn3(self.conv3(h)))
h = self.res1(h)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = F.relu(self.bn4(self.deconv1(h)))
h = F.relu(self.bn5(self.deconv2(h)))
y = self.deconv3(h)
return y | UTF-8 | Python | false | false | 3,753 | py | 5 | net.py | 4 | 0.576339 | 0.488942 | 0 | 94 | 38.93617 | 86 |
aokitashipro/newbook_django | 7,928,509,655,696 | 680af914bf59fe769739636dcb9d4116ceb2a520 | 44d089a98ee628fda2826a75127a4370bae4f641 | /newbook/models/Plan.py | 9c564ddc2db7aa9b5f5eb033918caa4fafd8d1a6 | [] | no_license | https://github.com/aokitashipro/newbook_django | d8684d96ce055ee0852563e79e958347194f97d2 | 1d36592c4a909caee616bc3bab6530b26cdee8f6 | refs/heads/master | 2020-04-01T23:31:10.787990 | 2018-10-25T03:51:24 | 2018-10-25T03:51:24 | 153,761,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import uuid
from django.db import models
from django.utils import timezone
from newbook.models import Hotel, Room
class Plan(models.Model):
class Meta:
db_table = 'plan'
unique_together = (('hotel_id', 'plan_id'))
id = models.UUIDField(primary_key=True, default=uuid.uuid4, unique=True, editable=False)
hotel_id = models.ForeignKey('Hotel', on_delete=models.CASCADE)
plan_id = models.CharField(max_length=10)
plan_name = models.CharField(max_length=10)
def __str__(self):
return self.plan_id
| UTF-8 | Python | false | false | 545 | py | 27 | Plan.py | 21 | 0.684404 | 0.675229 | 0 | 19 | 27.684211 | 92 |
SevenJiao/GetMoreQrCode | 10,033,043,649,028 | 050803ae6e6dd33d1dafd87b520542e090e9c8ec | 25984457a81329041579241074bc6d70337869da | /run.py | 175944486268c321c58804dd9877aadce97be93b | [] | no_license | https://github.com/SevenJiao/GetMoreQrCode | 5e7c190312e25f39be9716076df7d1f6aa30cb90 | 6c7278e12cf5684a6072956e44ad8d48b7e1b286 | refs/heads/master | 2020-09-12T06:13:54.571063 | 2019-11-26T03:35:32 | 2019-11-26T03:35:32 | 222,337,596 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import tkinter as tk
from tkinter import filedialog
import numpy as np
import pandas as pd
import qrcode
'''
批量生成二维码
'''
qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4
)
class run:
def __init__(self, window):
self.window = window
self.label_path = tk.StringVar()
self.curr_path = sys.path[0].replace('/', "\\")
self.label_path.set('当前目录:'+self.curr_path)
self.sheet_name = tk.StringVar()
self.data_col = tk.StringVar()
self.res_dir = tk.StringVar()
self.label_res = tk.StringVar()
self.gui()
self.window.mainloop()
def choose_file(self):
file_name = filedialog.askopenfile(title='选择文件',
filetypes=[("excel文件", "*.xlsx"),
('excel2003文件', '*.xls')],
initialdir='g:/')
self.label_path.set('当前选择:'+file_name.name.replace('/', "\\"))
def check_dir(self, dir):
pathdir = self.curr_path+'\\'+dir
isexit = os.path.exists(pathdir)
if not isexit:
os.makedirs(pathdir)
return pathdir
def insert_start(self, var):
self.t4.insert(1.0, var)
def start(self):
sheet_name = self.sheet_name.get()
data_col = self.data_col.get()
res_dir = self.res_dir.get()
path_dir = self.check_dir(res_dir)
labelpath = self.label_path.get()[5::]
df = pd.read_excel(labelpath, sheet_name=sheet_name)
file = df.loc[:, [data_col]].values.ravel()
i = 0
for readline in file:
qr.add_data(readline)
qr.make(fit=True)
img = qr.make_image()
filename = str(readline)+'.png'
readline = ''
img.save(path_dir+'\\'+filename)
qr.clear()
i = i+1
res = '\ndone No. '+str(i)
self.insert_start(res)
print('done No. '+str(i))
self.window.update()
res_total = 'sucess '+str(i)+'!'
self.insert_start(res_total)
print(res_total)
self.window.update()
def gui(self):
# 路径label
self.l0 = tk.Label(self.window, textvariable=self.label_path,
fg='black', height=2, wraplength=400,
justify='left', font=('Arial', 12))
self.l0.place(x=20, y=0, anchor='nw')
# 选择button
self.btnChoose = tk.Button(self.window, text='选择excel文件', font=('Arial', 12),
width=10, height=1, command=self.choose_file)
self.btnChoose.place(x=480, y=5, anchor='nw')
# edit
self.l1 = tk.Label(self.window, text='请输入Sheet名:',
fg='black', height=2, font=('Arial', 12))
self.l1.place(x=130, y=50, anchor='nw')
self.e1 = tk.Entry(self.window, textvariable=self.sheet_name,
show=None, font=('Arial', 16))
self.e1.place(x=260, y=60, anchor='nw')
self.l2 = tk.Label(self.window, text='请输入数据列名:',
fg='black', height=2, font=('Arial', 12))
self.l2.place(x=130, y=100, anchor='nw')
self.e2 = tk.Entry(self.window, textvariable=self.data_col,
show=None, font=('Arial', 16))
self.e2.place(x=260, y=110, anchor='nw')
self.l3 = tk.Label(self.window, text='输出文件夹名称:',
fg='black', height=2, font=('Arial', 12))
self.l3.place(x=130, y=150, anchor='nw')
self.e3 = tk.Entry(self.window, textvariable=self.res_dir,
show=None, font=('Arial', 16))
self.e3.place(x=260, y=160, anchor='nw')
# 开始
self.btnstart = tk.Button(self.window, text='开始生成', font=('Arial', 18),
width=10, height=1, command=self.start)
self.btnstart.place(x=240, y=200)
# 结果展示
self.t4 = tk.Text(self.window, width=600)
self.t4.place(x=0, y=300)
if __name__ == '__main__':
w = tk.Tk()
w.title('批量生成二维码')
w.geometry('600x600')
r = run(w)
| UTF-8 | Python | false | false | 4,377 | py | 2 | run.py | 1 | 0.511661 | 0.483157 | 0 | 121 | 34.082645 | 85 |
MaoXianXin/codeLab | 16,166,256,910,394 | 7104ccea05ce30f83caeeb2f559e0ae1feda09f8 | f871abc2349feea99ae4a25d267cb01989485287 | /cifar100.py | 5ab135081d00f7df2a3f42a733a2d9d37af9fe43 | [] | no_license | https://github.com/MaoXianXin/codeLab | 02e089ac5142585832fc819b8f65d94ce3817d2a | d1402bbf145b2c28ebac0681308362cb9bd89402 | refs/heads/master | 2020-06-07T17:50:14.029513 | 2019-06-21T09:20:17 | 2019-06-21T09:20:17 | 193,066,777 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # USAGE
# python train_auto_keras.py
# import the necessary packages
from sklearn.metrics import classification_report
from tensorflow.keras.datasets import cifar100
import autokeras as ak
import os
def main():
# initialize the output directory
OUTPUT_PATH = "output"
# initialize the list of trianing times that we'll allow
# Auto-Keras to train for
TRAINING_TIMES = [
60 * 60, # 1 hour
60 * 60 * 2, # 2 hours
60 * 60 * 4, # 4 hours
60 * 60 * 8, # 8 hours
60 * 60 * 12, # 12 hours
60 * 60 * 24, # 24 hours
]
# load the training and testing data, then scale it into the
# range [0, 1]
print("[INFO] loading CIFAR-100 data...")
((trainX, trainY), (testX, testY)) = cifar100.load_data()
trainX = trainX.astype("float") / 255.0
testX = testX.astype("float") / 255.0
# initialize the label names for the CIFAR-10 dataset
labelNames = [
'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle',
'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel',
'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock',
'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur',
'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster',
'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion',
'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse',
'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear',
'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine',
'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose',
'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake',
'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table',
'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout',
'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman',
'worm'
]
# loop over the number of seconds to allow the current Auto-Keras
# model to train for
for seconds in TRAINING_TIMES:
# train our Auto-Keras model
print("[INFO] training model for {} seconds max...".format(
seconds))
model = ak.ImageClassifier(verbose=True)
model.fit(trainX, trainY, time_limit=seconds)
model.final_fit(trainX, trainY, testX, testY, retrain=True)
# evaluate the Auto-Keras model
score = model.evaluate(testX, testY)
predictions = model.predict(testX)
report = classification_report(testY, predictions,
target_names=labelNames)
# write the report to disk
if not os.path.exists(OUTPUT_PATH):
os.mkdir(OUTPUT_PATH)
p = os.path.join(OUTPUT_PATH, "{}.txt".format(seconds))
f = open(p, "w")
f.write(report)
f.write("\nscore: {}".format(score))
f.close()
# if this is the main thread of execution then start the process (our
# code must be wrapped like this to avoid threading issues with
# TensorFlow)
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 2,916 | py | 3 | cifar100.py | 3 | 0.65192 | 0.631344 | 0 | 80 | 35.4625 | 78 |
Gladysgong/scrapySet | 10,213,432,242,960 | a60a0bcf7e378098c92941f8bf041aa54cbea112 | 595b44fe0aa172bcee13cd0d88f1eb80c3e0d96d | /fao/fao/spiders/fao_countries_spider.py | d9f0f9396ebdfcfcafb496e0ebea2602a1806cdf | [] | no_license | https://github.com/Gladysgong/scrapySet | b19d45d325ec81f23e3646c0aed5cadf7b79943c | b676fa539ea1fa9c38ba309e2f96bf7321000b1e | refs/heads/master | 2021-05-26T08:01:47.645139 | 2019-12-26T12:21:59 | 2019-12-26T12:21:59 | 127,993,429 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
from ..items import FaoCountriesItem
import time
class FaoCountriesSpier(scrapy.Spider):
name = "faocountries"
start_urls = ["http://www.fao.org/countryprofiles/geographic-and-economic-groups/en/"]
# def start_requests(self):
# yield scrapy.Request(url=self.start_url, callback=self.parse)
def parse(self, response):
# 通过selenium打开chrome内核,从而获得网页加载后的源代码。
print("1:", response.body)
a = response.xpath('//div[@id="groups-list"]/div/h3/text()').extract() # 最外层三个
b = response.xpath('//div[@id="groups-list"]/div/div/h4/text()').extract() # 中间层
c = response.xpath('//div[@id="groups-list"]/div/div/ul/li/a/text()').extract() # 最里层
x = response.body
soup = BeautifulSoup(x, 'lxml')
secondtitles = soup.select("div.divgroup h4")
item = FaoCountriesItem()
for each in a:
item["first"] = each
print("first:", each)
if each == a[0]:
# print("第一个")
for t in secondtitles:
if 'geo' in t.get("rel"):
ul_id = "ul_" + t.get("rel")
# print(ul_id)
item["second"] = t.string
print("second:" + item["second"])
content = soup.select("ul#" + ul_id + " li a")
for i in content:
item['third'] = i.string
yield item
if each == a[1]:
# print("第2个")
for t in secondtitles:
if 'eco' in t.get("rel"):
ul_id = "ul_" + t.get("rel")
# print(ul_id)
item["second"] = t.string
print("second:" + item["second"])
content = soup.select("ul#" + ul_id + " li a")
for i in content:
item['third'] = i.string
print("third:", i.string)
yield item
if each == a[2]:
# print("第3个")
for t in secondtitles:
if 'spe' in t.get("rel"):
ul_id = "ul_" + t.get("rel")
# print(ul_id)
item["second"] = t.string
print("this is secondtitle:" + item["second"])
content = soup.select("ul#" + ul_id + " li a")
for i in content:
item['third'] = i.string
print("third:", i.string)
yield item | UTF-8 | Python | false | false | 2,861 | py | 16 | fao_countries_spider.py | 13 | 0.427237 | 0.423284 | 0 | 68 | 39.941176 | 94 |
Richard-Cod/atelieriia | 16,363,825,415,274 | afda4cad86fdbd57ef14c612d5a00a6a37b83750 | 34071b177568de0c0979f13777473f19fb7609fd | /atelieriia/urls.py | 077d5e50eddc8a31dd80b4a969feef4aa987f44c | [] | no_license | https://github.com/Richard-Cod/atelieriia | 0e1630fac3686c26f9fb5efef81ecf5af329877f | 5686cb8c1a1c09bf62950dd222c8bcd0ae0947bd | refs/heads/master | 2020-07-22T05:16:25.871998 | 2019-11-23T10:32:24 | 2019-11-23T10:32:24 | 207,084,292 | 0 | 0 | null | false | 2019-12-05T00:20:35 | 2019-09-08T08:36:55 | 2019-11-23T11:10:37 | 2019-12-05T00:20:34 | 10,614 | 0 | 0 | 2 | Python | false | false | """atelieriia URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
from django.contrib.auth import views as auth_view
from django.conf.urls.static import static
from . import settings
urlpatterns = [
#Vue de L'administration
path('admin/', admin.site.urls),
#Vue pour l'application atelieriia
path('',views.accueil,name="accueil"),
path('messages',views.messages,name="messages"),
path('about',views.about,name="about"),
#Toutes les vues de la partie registration
path('accounts/login/',auth_view.LoginView.as_view(),name='login'),
path('accounts/logout/',auth_view.LogoutView.as_view(),name='logout',kwargs={'next_page': '/'}),
path('signup',views.signup,name='signup'),
#VUe de gestion de compte
path('profile',views.profile,name="profile"),
#Vue de l'application BLOG
path('blog/',include('blog.urls')),
#Vue de test
path('test',views.test,name="test"),
#Vue pour l'application de Question
path('q/',include('questions.urls')),
#Vue pour l'application de Cours
path('cours/',include('cours.urls')),
#Vue pour l'application de Projets
path('projets/',include('projets.urls')),
] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| UTF-8 | Python | false | false | 1,940 | py | 49 | urls.py | 20 | 0.678351 | 0.674227 | 0 | 71 | 26.323944 | 100 |
VRDate/nao-recorder | 17,918,603,571,538 | 853c7d2bbf1618c728314d0fecdb04ded6d88605 | b04b911600f7c8c317a5a045988013071bc068b4 | /src/main/python/recorder/main.py | 5cccc3c1f4dbba2ea4dd657dd15c07774aed4dda | [] | no_license | https://github.com/VRDate/nao-recorder | fdc9160796167850da8c32c7d906470446e7c58d | c7fafd2e9720537862c4b29fa65fafec94533534 | refs/heads/master | 2021-01-17T04:57:37.444961 | 2013-07-27T09:50:53 | 2013-07-27T09:50:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on 7 Jul 2013
@author: davesnowdon
'''
import kivy
kivy.require('1.7.1')
from kivy.app import App
from kivy.extras.highlight import KivyLexer
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.spinner import Spinner, SpinnerOption
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.codeinput import CodeInput
from kivy.uix.textinput import TextInput
from kivy.uix.popup import Popup
from kivy.properties import ListProperty
from kivy.core.window import Window
from kivy.lang import Builder
from pygments import lexers
from pygame import font as fonts
import codecs, os
import logging
from naoutil import memory
from core import Robot, get_translator
WORD_RECOGNITION_MIN_CONFIDENCE = 0.6
main_logger = logging.getLogger("recorder.main")
class Fnt_SpinnerOption(SpinnerOption):
pass
class LoadDialog(Popup):
def load(self, path, selection):
self.choosen_file = [None, ]
self.choosen_file = selection
Window.title = selection[0][selection[0].rfind(os.sep) + 1:]
self.dismiss()
def cancel(self):
self.dismiss()
class SaveDialog(Popup):
def save(self, path, selection):
_file = codecs.open(selection, 'w', encoding='utf8')
_file.write(self.text)
Window.title = selection[selection.rfind(os.sep) + 1:]
_file.close()
self.dismiss()
def cancel(self):
self.dismiss()
class ConnectionDialog(Popup):
pass
class NaoJoints(BoxLayout):
pass
class NaoRecorderApp(App):
files = ListProperty([None, ])
def build(self):
self.robot = Robot(status_display=self, code_display=self)
# building Kivy Interface
b = BoxLayout(orientation='vertical')
menu = BoxLayout(
size_hint_y=None,
height='30pt')
fnt_name = Spinner(
text='DroidSansMono',
option_cls=Fnt_SpinnerOption,
values=sorted(map(str, fonts.get_fonts())))
fnt_name.bind(text=self._update_font)
# file menu
mnu_file = Spinner(
text='File',
values=('Connect', 'Open', 'SaveAs', 'Save', 'Close'))
mnu_file.bind(text=self._file_menu_selected)
# motors on/off
btn_motors_on = Button(text='Motors On')
btn_motors_on.bind(on_press=self._on_motors_on)
btn_motors_off = Button(text='Motors Off')
btn_motors_off.bind(on_press=self._on_motors_off)
# run script
btn_run_script = Button(text='Run Script')
btn_run_script.bind(on_press=self._on_run_script)
# add keyframe
btn_add_keyframe = Button(text='Add Keyframe')
btn_add_keyframe.bind(on_press=self._on_add_keyframe)
# root actions menu
robot_actions = Spinner(
text='Action',
values=sorted(self.robot.postures()))
robot_actions.bind(text=self.on_action)
# add to menu
menu.add_widget(mnu_file)
menu.add_widget(btn_add_keyframe)
menu.add_widget(btn_motors_on)
menu.add_widget(btn_motors_off)
menu.add_widget(btn_run_script)
menu.add_widget(robot_actions)
b.add_widget(menu)
m = BoxLayout()
code_status = BoxLayout(orientation='vertical', size_hint=(0.6, 1))
# code input
self.codeinput = CodeInput(
lexer=lexers.PythonLexer(),
font_name='data/fonts/DroidSansMono.ttf', font_size=12,
text="nao.say('hi')")
code_status.add_widget(self.codeinput)
# status window
self.status = TextInput(text="", readonly=True, multiline=True, size_hint=(1.0, 0.25))
code_status.add_widget(self.status)
m.add_widget(code_status)
self.joints_ui = NaoJoints(size_hint=(0.4, 1))
m.add_widget(self.joints_ui)
b.add_widget(m)
return b
def on_start(self):
self.show_connection_dialog(None)
def on_stop(self):
self.robot.disconnect()
def get_code(self):
return self.codeinput.text
def set_code(self, code):
self.codeinput.text = code
def append_code(self, code):
self.set_code("{}\r\n{}".format(self.get_code(), code))
def add_status(self, text):
self.status.text = self.status.text + "\n" + text
def show_connection_dialog(self, b):
p = ConnectionDialog()
p.bind(on_dismiss=self.do_connect)
p.open()
def do_connect(self, popup):
hostname = popup.f_hostname.text
portnumber = int(popup.f_port.text)
print "connect to = {}:{}".format(hostname, portnumber)
main_logger.info("Connecting to robot at {host}:{port}".format(host=hostname, port=portnumber))
if self.robot.connect(hostname, portnumber):
self.add_status("Connected to robot at {host}:{port}".format(host=hostname, port=portnumber))
else:
self.add_status("Error connecting to robot at {host}:{port}".format(host=hostname, port=portnumber))
self.show_connection_dialog(None)
def _update_size(self, instance, size):
self.codeinput.font_size = float(size)
def _update_font(self, instance, fnt_name):
instance.font_name = self.codeinput.font_name = \
fonts.match_font(fnt_name)
def _file_menu_selected(self, instance, value):
if value == 'File':
return
instance.text = 'File'
if value == 'Connect':
self.show_connection_dialog(None)
elif value == 'Open':
if not hasattr(self, 'load_dialog'):
self.load_dialog = LoadDialog()
self.load_dialog.open()
self.load_dialog.bind(choosen_file=self.setter('files'))
elif value == 'SaveAs':
if not hasattr(self, 'saveas_dialog'):
self.saveas_dialog = SaveDialog()
self.saveas_dialog.text = self.codeinput.text
self.saveas_dialog.open()
elif value == 'Save':
if self.files[0]:
_file = codecs.open(self.files[0], 'w', encoding='utf8')
_file.write(self.codeinput.text)
_file.close()
elif value == 'Close':
if self.files[0]:
self.codeinput.text = ''
Window.title = 'untitled'
def on_files(self, instance, values):
if not values[0]:
return
_file = codecs.open(values[0], 'r', encoding='utf8')
self.codeinput.text = _file.read()
_file.close()
def on_action(self, instance, l):
if self.connection:
try:
self.standard_positions[l]()
except KeyError as e:
print e
def _on_motors_off(self, instance):
self.robot.motors_off()
def _on_motors_on(self, instance):
self.robot.motors_on()
def _on_run_script(self, instance):
if self.robot.is_connected():
# TODO: run only selected code
# code = self.codeinput.selection_text
# if not code or len(code) == 0:
code = self.codeinput.text
self.robot.run_script(code)
def _on_add_keyframe(self, dummy1=None, dummy2=None, dummy=None):
code = self.robot.keyframe()
if code:
self.append_code(code)
if __name__ == '__main__':
NaoRecorderApp().run()
| UTF-8 | Python | false | false | 7,409 | py | 8 | main.py | 6 | 0.599001 | 0.593602 | 0 | 252 | 28.400794 | 112 |
zyong/GameServer | 12,240,656,806,410 | a99db4c080aece824f94efc3a3401d5404d7672d | 1c2711d8cfaf5da9340ce53bb0dd8348cf60f1a8 | /logic/perform/npcs/pf5127.py | c39b1c3005e89e3232c09649fd1e27fc06474d27 | [] | no_license | https://github.com/zyong/GameServer | 2bf362162f1988dcc95f52539f90a3a77dea1e41 | 9f47473b4467db37d970fdff40b4a860ccf57887 | refs/heads/master | 2017-12-22T01:02:19.852477 | 2016-11-26T04:07:43 | 2016-11-26T04:07:43 | 76,151,313 | 1 | 0 | null | true | 2016-12-11T03:13:19 | 2016-12-11T03:13:19 | 2016-11-26T03:31:02 | 2016-11-26T04:08:46 | 5,958 | 0 | 0 | 0 | null | null | null | # -*- coding: utf-8 -*-
from perform.defines import *
from perform.object import Perform as CustomPerform
#导表开始
class Perform(CustomPerform):
id = 5127
name = "唤灵"
configInfo = {
"符能":15,
}
#导表结束
#非第一回合进入战场时,回复主人15点的符能
def onSetup(self, w):
self.addFunc(w, "onSummoned", self.onSummoned)
def onSummoned(self, att, vic):
fuWen = self.transCode(self.configInfo["符能"], vic, att)
att.addFuWen(fuWen)
| UTF-8 | Python | false | false | 496 | py | 1,580 | pf5127.py | 1,295 | 0.669767 | 0.648837 | 0 | 20 | 19.75 | 57 |
522408323/pythondemo | 11,673,721,156,190 | 06ca6a47270b81b5da751afa1481b2942d749f0a | 1f1e6ba44e998d0766405ffb23fa1729ef1a3a15 | /Test.py | 44111f3c31676208cedb857651a1d83e6b29d5e6 | [] | no_license | https://github.com/522408323/pythondemo | a209f0218aa765269adf829a4c4fb6a197cdd682 | 6c7df1d7eed67f183f809ba8163a9b0dddb092a7 | refs/heads/main | 2023-05-06T09:06:47.766310 | 2021-06-02T10:00:16 | 2021-06-02T10:00:16 | 352,954,849 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
if __name__ == '__main__':
# print("111111")
# print(5+10)
# print("hello" + " world")
# print("love" * 3)
# print("love\n" * 3)
# 单行注释: # ,多行注释: ctrl + / 或者 3个单引号 或者 3个双引号 ,多行注释貌似需要先空tab行,不然后面代码报错
'''
temp =========
'''
# 使用变量前需要先赋值
#print(a)
# 变量名:可以是字符,数字,下划线,不能以数字开头
# 变量名区分大小写
# 转义字符:\ 或者 r'字符串' 比如r"c:\sss" ,注意字符串不能有\结尾,比如 c:\ss\
# str = 'C:\nss'
# print(str)
# str = 'C:\\nss'
# print(str)
# str = r'C:\nss'
# print(str)
# 跨越多行字符串,用三重引号,单引号或双引号都行
# str = """
# 我是一条鱼,
# 游荡在小溪里,
# 哗啦啦,
# 哗啦啦
# """
# print(str)
#条件语句 if else
# temp = input("猜一下数字:")
# guess = int(temp)
#if guess == 8:
# print("猜对了")
#else:
# print("猜错了")
# while
# a = 1
# while a < 3:
# print(a)
# a = a + 1
# and 条件中使用
# a = 1
# if a > 1 and a < 3:
# print("2")
# else:
# print("非2")
'''
while(条件):
代码块
else:
代码块
'''
num= int(input("输入一个整数:"))
count = num //2
while count > 1:
print(count)
if num % count == 0:
print("%d最大约数是%d" % (num,count))
break
count -=1
else:
print("%d是素数!" %num) | UTF-8 | Python | false | false | 1,510 | py | 15 | Test.py | 14 | 0.478456 | 0.455117 | 0 | 74 | 14.054054 | 69 |
jesuszarate/CTCI | 2,121,713,857,937 | 39025130e2ed656ec363d200a46dda00d6fd971e | 4d439fac4eeb1a3bd21f1a2d8c95edc736a0a5a5 | /LeetCode/copy_list_with_random_pointer_138.py | e519b7acd6f2ec609325c71045cd61c46432e890 | [] | no_license | https://github.com/jesuszarate/CTCI | 4962d6a5025343a01c26f5c5e64a4ce42cb541f8 | 8d1c11cc64570a2374798bd41818ad590bc6d661 | refs/heads/master | 2020-05-05T09:11:52.290405 | 2019-11-17T05:06:10 | 2019-11-17T05:06:10 | 179,894,197 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.
Return a deep copy of the list.
Example 1:
1
. \
2 . 2
Input:
{"$id":"1","next":{"$id":"2","next":null,"random":{"$ref":"2"},"val":2},"random":{"$ref":"2"},"val":1}
Explanation:
Node 1's value is 1, both of its next and random pointer points to Node 2.
Node 2's value is 2, its next pointer points to null and its random pointer points to itself.
Note:
You must return the copy of the given head as a reference to the cloned list.
'''
"""
# Definition for a Node.
"""
class Node:
def __init__(self, val, next, random):
self.val = val
self.next = next
self.random = random
class Solution:
def copyRandomList(self, head):
visited = dict()
return self.deepCopy(head, visited)
def deepCopy(self, node, visited):
if not node:
return None
new_node = Node(node.val, None, None)
visited[node] = new_node
if node.next:
new_node.next = self.deepCopy(node.next, visited)
if node.random:
if node not in visited:
new_node.random = self.deepCopy(node.random, visited)
else:
new_node.random = visited[node.random]
return new_node
| UTF-8 | Python | false | false | 1,365 | py | 50 | copy_list_with_random_pointer_138.py | 49 | 0.600733 | 0.589744 | 0 | 59 | 22.084746 | 131 |
BiagioDipalma/NAOSoftware | 7,069,516,171,910 | 85df72da05dc860e1df5c79861e3a30923b106ae | 9e4dc53577c5319668fda425dfac0994d179e9f2 | /ServerSide/vision_faceDetection.py | 8a5eaf0a1d417c0213fa19f9e08e6e29f28d0129 | [] | no_license | https://github.com/BiagioDipalma/NAOSoftware | b1fff791c03a6b1029ff238f8b67de7d66d02972 | 0cd84455e2bd668122f72a03ef78bff5d1729531 | refs/heads/master | 2020-03-23T20:10:16.788570 | 2018-07-23T14:33:07 | 2018-07-23T14:33:07 | 142,025,868 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os.path
import sys
import json
import random
import string
import webbrowser
import socket
import speech_recognition as sr
import playsound
import time
import time
from naoqi import ALProxy
IP = "192.168.1.102" # Replace here with your NaoQi's IP address.
PORT = 9559
#access token for dialogflow
CLIENT_ACCESS_TOKEN = 'a4c37b43822649869ac3deca4d7f82a7'
#try to import apiai
try:
import apiai
except ImportError:
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)
)
import apiai
# Create a proxy to ALFaceDetection
try:
faceProxy = ALProxy("ALFaceDetection", IP, PORT)
print ('faceDetectionProxy created')
except Exception, e:
print "Error when creating face detection proxy:"
print str(e)
exit(1)
# Subscribe to the ALFaceDetection proxy
# This means that the module will write in ALMemory with
# the given period below
period = 500
faceProxy.subscribe("Test_Face", period, 0.0 )
# ALMemory variable where the ALFacedetection modules
# outputs its results
memValue = "FaceDetected"
# Create a proxy to ALMemory
try:
memoryProxy = ALProxy("ALMemory", IP, PORT)
print('memoryCreated')
except Exception, e:
print "Error when creating memory proxy:"
print str(e)
exit(1)
tts = ALProxy("ALTextToSpeech", IP, PORT)
print('tts created')
#this function plays the sound for speech reco
def mPlaySound(path):
aup = ALProxy("ALAudioPlayer", "127.0.0.1", 54710)
aup.playFile("resources/audio.m4v")
#this function opens an audio stream, using google's api for decoding user's words
def listen():
r = sr.Recognizer()
print('listening..')
with sr.Microphone() as source:
#playsound('resources/audio.m4v')
audio = r.listen(source)
try:
print("recognizing..")
result = r.recognize_google(audio)
tts.say('you said: '+result)
return result
except:
print('error')
pass
#this function sends to NAO the results obtained from dialogflow
#is it necessary?
def socketSend(data):
#name of local node
HOST = '127.0.0.1'
#port used for comunication
PORT = 5007
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
while 1:
data = conn.recv(1024)
if not data: break
conn.send(data)
conn.close()
#this function builds request to dialogflow, uses the words decoded from the listen() function
#to build the string and makes the request
def buildRequest(request, mString):
mRequest = ""
#if the string is empty there is no context on dialogflow,
#so the function builds a new request with new context
if mString == "":
mRequest = raw_input('input_text:')
#print("input_text:")
#mRequest = listen()
request.query = mRequest
#if the string is not empty there is context running, so i need to complete this request
else:
mRequest+=mString
request.query = mRequest
#print (mRequest)
return request.query
#this function build the request id using random numbers
def buildSessionID():
session = 'Session_ID:'
for count in range(1,7):
session+=(random.choice(string.digits))
return session
def main(mString):
#new ai object
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
#define a new request
request = ai.text_request()
request.lang = 'en' # optional, default value equal 'en'
request.session_id = buildSessionID()
request.query = buildRequest(request, mString)
result = json.loads(request.getresponse().read())
print (u"%s" % result['result']['fulfillment']['speech'])
#socketSend(u"%s" % result['result']['fulfillment']['speech'])
tts.say(str(u"%s" % result['result']['fulfillment']['speech']))
#if the request is smalltalk, the context will became empty
if "smalltalk" in result['result']['action']:
mString = ""
startFaceReco()
#searchRestaurant intent is incomplete -> i need to complete it
elif (result['result']['actionIncomplete']) == True and (result['result']['metadata']['intentName']) == "searchRestaurant":
priceKind = raw_input('you:')
#priceKind = listen()
tts.say('What do you want to eat?')
foodKind = raw_input('What do you want to eat?\nyou:')
#foodKind = listen()
intent = result['result']['contexts'][2]['parameters']['Restarants_Bars1']
mString = intent + ' ' + priceKind + ' '+ foodKind
main(mString)
#searchRestaurant intent is complete -> the url of tripadvisor is opened
elif(result['result']['actionIncomplete']) == False and (result['result']['metadata']['intentName']) == "searchRestaurant":
url = result['result']['fulfillment']['data']
webbrowser.open(url)
mString = ""
main(mString)
#events intent
elif result['result']['metadata']['intentName'] == "findEvents":
url = result['result']['fulfillment']['data']
print 'opening...',url
webbrowser.open(url)
mString = ""
startFaceReco()
#intent monumenti incompleto -> richiedo monumento
elif (result['result']['actionIncomplete']) == True and (result['result']['metadata']['intentName']) == "findMonuments":
intent = result['result']['contexts'][0]['parameters']['monumentsIntentDetected']
monumentName = raw_input('input_text: ')
#monumentName = listen()
mString = intent + ' '+monumentName
main(mString)
#intent monumenti completo -> mostro foto + descrizione monumento
elif (result['result']['actionIncomplete']) == False and (result['result']['metadata']['intentName']) == "findMonuments":
url = result['result']['fulfillment']['data']
print 'opening...',url
webbrowser.open(url)
mString = ""
startFaceReco()
#se l'intent e' completo, dopo la risposta azzera mString e riparte
else:
mString = ""
startFaceReco()
def startFaceReco():
# A simple loop that reads the memValue and checks whether faces are detected.
for i in range(0, 20):
time.sleep(3)
val = memoryProxy.getData(memValue)
print ""
print "*****"
print ""
# Check whether we got a valid output.
if(val and isinstance(val, list) and len(val) >= 2):
# We detected faces !
# For each face, we can read its shape info and ID.
# First Field = TimeStamp.
timeStamp = val[0]
# Second Field = array of face_Info's.
faceInfoArray = val[1]
try:
# Browse the faceInfoArray to get info on each detected face.
for j in range( len(faceInfoArray)-1 ):
faceInfo = faceInfoArray[j]
# First Field = Shape info.
faceShapeInfo = faceInfo[0]
# Second Field = Extra info (empty for now).
faceExtraInfo = faceInfo[1]
print('face detected')
#tts.say("hi there!I'm NAO Mola, i can give you info about restaurants, events, monuments and public services. So, Ask me something!")
tts.say("hi, there!")
mString = ""
main(mString)
#print " alpha %.3f - beta %.3f" % (faceShapeInfo[1], faceShapeInfo[2])
#print " width %.3f - height %.3f" % (faceShapeInfo[3], faceShapeInfo[4])
except Exception, e:
print "faces detected, but it seems getData is invalid. ALValue ="
print val
print "Error msg %s" % (str(e))
else:
print "No face detected"
# Unsubscribe the module.
faceProxy.unsubscribe("Test_Face")
print "Test terminated successfully."
if __name__ == '__main__':
startFaceReco() | UTF-8 | Python | false | false | 8,129 | py | 4 | vision_faceDetection.py | 3 | 0.611022 | 0.599582 | 0 | 284 | 27.623239 | 146 |
pasandrei/MIRPR-pedestrian-and-vehicle-detection-SSDLite | 10,333,691,315,856 | 0021a327ebac0a9d42ed9bdb994330462c599cb3 | 3f768dac808469f4df5474dfeb2a3d8803957769 | /train/optimizer_handler.py | 5079fe72895e2dedb1eefaf998a635942e2eb1de | [
"MIT"
] | permissive | https://github.com/pasandrei/MIRPR-pedestrian-and-vehicle-detection-SSDLite | 4c7c466c2991d371b6f9d02a6e8127525cf74ad8 | 1e4f248426bfdb6cf8e2da9411438292752f1729 | refs/heads/master | 2021-08-18T20:30:59.306719 | 2020-08-22T18:17:31 | 2020-08-22T18:17:31 | 216,416,388 | 1 | 0 | MIT | false | 2020-05-18T20:17:42 | 2019-10-20T19:33:48 | 2020-05-18T14:06:14 | 2020-05-18T14:06:12 | 367,253 | 1 | 1 | 1 | Python | false | false | import torch.optim as optim
"""
Various optimizer setups
"""
def layer_specific_adam(model, params):
print("AMS grad is false")
return optim.Adam([
{'params': model.backbone.parameters(), 'lr': params.learning_rate * params.decay_rate},
{'params': model.loc.parameters()},
{'params': model.conf.parameters()},
{'params': model.additional_blocks.parameters()}
], lr=params.learning_rate, weight_decay=params.weight_decay, amsgrad=False)
def layer_specific_sgd(model, params):
return optim.SGD([
{'params': model.backbone.parameters(), 'lr': params.learning_rate * params.decay_rate},
{'params': model.loc.parameters()},
{'params': model.conf.parameters()},
{'params': model.additional_blocks.parameters()}
], lr=params.learning_rate, weight_decay=params.weight_decay, momentum=0.9)
def plain_adam(model, params):
return optim.Adam(model.parameters(), lr=params.learning_rate, weight_decay=params.weight_decay)
def plain_sgd(model, params):
return optim.SGD(model.parameters(), lr=params.learning_rate,
weight_decay=params.weight_decay, momentum=0.9)
| UTF-8 | Python | false | false | 1,171 | py | 46 | optimizer_handler.py | 34 | 0.667805 | 0.664389 | 0 | 33 | 34.484848 | 100 |
kimrla/Testcode | 1,005,022,388,042 | 0d647bc83c153e849c3d1d1113c0a91fbaa12945 | 4306a8943d6ac0da40726a0f3c394f4ab2688bdf | /py.day1/date.py | 3e9fdf7e98eccbeaaad862e9765e339798710862 | [] | no_license | https://github.com/kimrla/Testcode | 011d3fc1e64c4488bb6fafcda81c261fec345a74 | 7a18e64eadef8c7ecbe669513034cfd2ab608774 | refs/heads/master | 2020-04-29T11:56:06.445938 | 2019-03-31T10:08:18 | 2019-03-31T10:08:18 | 176,118,931 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("请输入年月日并用空格隔开(如2019 3 26)")
year, month, day = map(int, input().split())
status = 0 # 用来判断闰年的参数,0代表当前年份是平,1代表当前年份为闰年
date = 0
if year % 100 == 0:
if year % 400 == 0:
status = 1
else:
if year % 4 == 0:
status = 1
a = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if status == 1:
a[2] = 29
if month < 1 or month > 12 or day < 1 or day > a[month]:
print("请输入正确日期")
for i in a[:month]:
date = date + i # 算出当前月份之前累计天数
date = date + day # 算出结果
'''if status == 1 and month > 2:
print("今天是今年的第", date + 1, "天")
else:
'''
print("今天是今年的第", date, "天")
| UTF-8 | Python | false | false | 760 | py | 8 | date.py | 8 | 0.552013 | 0.45302 | 0 | 23 | 24.913043 | 56 |
ljwzz/PythonExercise | 4,406,636,447,134 | 40f7d16ebb52dc869ef05e296bbc0037cef8f1e8 | 1419437f67f12644abaf019e71744d299880fbb7 | /LeetCode/283.moveZero.py | 51661fa54691f5ba2818d7c92d857de2ae436b1d | [
"MIT"
] | permissive | https://github.com/ljwzz/PythonExercise | 35a68994d0e8a7454fc58c71c8527e6d32a599e2 | 431a76704eec269bd7362673788d0802975cf22a | refs/heads/master | 2019-07-13T08:22:16.967373 | 2018-04-09T01:31:09 | 2018-04-09T01:31:09 | 124,198,322 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def removeElement(nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
counts = 0
while counts < len(nums):
if nums[counts] == val:
del (nums[counts])
else:
counts += 1
if len(nums) == 0:
break
return len(nums)
a = [1]
b = 1
print(removeElement(a, b))
| UTF-8 | Python | false | false | 363 | py | 85 | 283.moveZero.py | 79 | 0.473829 | 0.460055 | 0 | 21 | 16.285714 | 31 |
mikeage/get_sidequest_urls | 3,161,095,949,266 | 7b0cba1b1a6c7751ebeb682b2cb0b4936a0972f1 | ddade3c171cd1f072e13d76b74fa4af6d0623dc4 | /get_sidequest_url.py | 2c5c0f49c21a6a97f9c0e69f77768b299b6d2a02 | [] | no_license | https://github.com/mikeage/get_sidequest_urls | 633def2962ab2daa17c26382b7e2f66fa1e21bf5 | 3381656d773f01ae28dd9c3c79d2805412feae2e | refs/heads/main | 2023-02-08T19:00:02.108127 | 2021-01-04T12:23:21 | 2021-01-04T12:28:55 | 326,675,179 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import requests
SIDEQUEST_API_URL = "https://api.sidequestvr.com"
def get_app_id(app):
if app.isdigit():
return app
app_id = app.split("/")[4]
assert app_id.isdigit()
return app_id
def get_token(app_id):
url = "%s/generate-install" % SIDEQUEST_API_URL
payload = {"msg": {"apps_id": app_id}}
headers = {
"Origin": "https://sidequestvr.com",
"Sec-Fetch-Site": "same-site",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://sidequestvr.com/",
"User-Agent": "Please-make-URLs-Available-Again-For-Offline",
}
r = requests.post(url, json=payload, headers=headers)
if not r.ok:
print("%s" % r.text)
assert r.ok
return r.json()["data"]["key"]
def get_game_url(key):
url = "%s/install-from-key" % SIDEQUEST_API_URL
headers = {
"Origin": "https://sidequestvr.com",
"User-Agent": "Please-make-URLs-Available-Again-For-Offline",
}
payload = {"token": key}
r = requests.post(url, headers=headers, json=payload)
if not r.ok:
print("%s" % r.text)
assert r.ok
for dl_url in r.json()["data"]["apps"][0]["urls"]:
if dl_url["extn"] in ("apk", "obb", "mod"):
print("%s" % dl_url["link_url"])
def main(args):
for app in args.app:
key = get_token(get_app_id(app))
get_game_url(key)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("app", nargs="+")
main(parser.parse_args())
| UTF-8 | Python | false | false | 1,565 | py | 2 | get_sidequest_url.py | 1 | 0.563578 | 0.5623 | 0 | 59 | 25.525424 | 69 |
wellcomecollection/data-science | 13,632,226,221,581 | b22f8ceb30ef27f4b74782e55b0c8fdf2dfe8527 | fc536757e799e4e63a618f54be00c814bf0f6c8d | /apis/devise_search/app/api.py | e8a53c1b097e15cf6db4f4fcc1ede5a377dbda82 | [
"MIT"
] | permissive | https://github.com/wellcomecollection/data-science | 94384593827bbeca3e6a6fb6cdd92a1f256ad256 | f5d158de6d4d652e7264093c64420288ecb6a85b | refs/heads/main | 2023-08-28T11:46:36.724074 | 2023-08-21T15:12:59 | 2023-08-21T15:12:59 | 217,507,441 | 7 | 1 | MIT | false | 2023-09-14T08:57:58 | 2019-10-25T10:11:57 | 2023-08-25T07:10:59 | 2023-09-14T08:57:57 | 2,749 | 5 | 1 | 1 | Jupyter Notebook | false | false | import os
import pickle
import nmslib
import numpy as np
from fastapi import FastAPI
from .utils import embed, id_to_url
from .aws import get_object_from_s3, download_object_from_s3
# Load model data (don't fetch from s3 if developing locally)
if 'DEVELOPMENT' in os.environ:
base_path = os.path.expanduser('~/datasets/devise_search/')
image_ids = np.load(
os.path.join(base_path, 'image_ids.npy'),
allow_pickle=True
)
search_index = nmslib.init(method='hnsw', space='cosinesimil')
search_index.loadIndex(os.path.join(base_path, 'search_index.hnsw'))
else:
download_object_from_s3('devise_search/search_index.hnsw')
ids = np.load(
get_object_from_s3('devise_search/image_ids.npy'),
allow_pickle=True
)
feature_index = nmslib.init(method='hnsw', space='cosinesimil')
feature_index.loadIndex('search_index.hnsw')
app = FastAPI(
title='Visual-Semantic Image Search',
description='Search Wellcome Collection\'s images based on their visual content, without making use of captions. Based on DeViSE: A Deep Visual-Semantic Embedding Model (https://papers.nips.cc/paper/5204-devise-a-deep-visual-semantic-embedding-model.pdf)',
docs_url='/devise_search/docs',
redoc_url='/devise_search/redoc'
)
@app.get('/devise_search')
def devise_search(query_text: str = 'An old wooden boat', n: int = 10):
query_embedding = embed(query_text)
neighbour_indexes, _ = search_index.knnQuery(query_embedding, n)
neighbour_ids = [image_ids[index] for index in neighbour_indexes]
neighbour_urls = [id_to_url(id) for id in neighbour_ids]
return {
'query_text': query_text,
'neighbour_ids': neighbour_ids,
'neighbour_urls': neighbour_urls
}
@app.get('/devise_search/health_check')
def health_check():
return {'status': 'healthy'}
| UTF-8 | Python | false | false | 1,853 | py | 404 | api.py | 154 | 0.692391 | 0.686454 | 0 | 55 | 32.690909 | 260 |
Mondanzo/pythongameservermanager | 4,982,162,082,768 | 602603a1eadf6d432e87afd70f72d22f0d49f3f9 | 13b40df2624f4fb163ba91815cf3abd357f46826 | /src/webmanagerapi.py | 0520dce83afe0aa8d00ad5ac2fdb5acb241f0db9 | [] | no_license | https://github.com/Mondanzo/pythongameservermanager | 02c565571f7c696adfa9e85d16776653d72037b3 | c63c3e88e6383bc3d13467dd6919baf6b19fe55c | refs/heads/master | 2023-03-14T15:43:50.783560 | 2021-03-06T02:09:30 | 2021-03-06T02:09:30 | 344,984,331 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Optional
import libtmux as tmux
from uuid import uuid4, UUID
from server.Wrappers import BaseGameServer
from typing import Dict
class GameServerManager:
def __init__(self) -> None:
self.tmux = tmux.Server()
self.servers = Dict[UUID, BaseGameServer]
def create_game_server(self, gsw: BaseGameServer, name: str):
server = gsw(self.tmux, name)
server.server_uuid
pass
def load_game_server(self, server):
pass
def get_gameserver_by_uuid(self, uuid: UUID or str) -> Optional[BaseGameServer]:
return self.servers.get(uuid, None)
def get_gameservers_by_types(self, type: BaseGameServer) -> Optional[BaseGameServer]:
for server in self.servers.values():
if isinstance(server, type):
return server
return None
def get_gameserver_by_name(self, name) -> Optional[BaseGameServer]:
for server in self.servers.values():
if server.server_name == name:
return server
return None
| UTF-8 | Python | false | false | 933 | py | 7 | webmanagerapi.py | 5 | 0.733119 | 0.732047 | 0 | 34 | 26.441176 | 86 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.