repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
โ | gha_stargazers_count
int32 0
178k
โ | gha_forks_count
int32 0
88.9k
โ | gha_open_issues_count
int32 0
2.72k
โ | gha_language
stringlengths 1
16
โ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pzar97/DFS-2
| 1,537,598,337,340 |
03eeb217ac3bc42fc63944d01c204c4e970b190b
|
d92beb95439f63ba1f142459fb01c7e034c4a00d
|
/decodeString.py
|
cc8b3bc0391f9cd8e5c41a514fb4921b3fe5d5a5
|
[] |
no_license
|
https://github.com/pzar97/DFS-2
|
678abe4513bc8a0aaaa0f5ff7d53fe500c87f443
|
aea23a72162b204e1ef7d2b92c51ff32742c2038
|
refs/heads/master
| 2023-07-15T17:49:45.279359 | 2021-08-29T21:20:01 | 2021-08-29T21:20:01 | 401,143,419 | 0 | 0 | null | true | 2021-08-29T21:12:37 | 2021-08-29T21:12:37 | 2021-07-08T19:46:39 | 2021-08-29T07:08:15 | 3 | 0 | 0 | 0 | null | false | false |
class Solution:
def decodeString(self, s: str) -> str:
"""
TC: O(n)
SC: O(no. of digits + no. of strings in the 's' string)
"""
# 1. base case
if s == None or len(s) == 0:
return s
# 2. when the string is not empty
stackInt = []
stackStr = []
currStr = ""
currNum = 0
temp = ""
popped = 0
for ch in s:
if ch.isdigit():
# convert the str to int and append
# to existing num
currNum = currNum * 10 + int(ch)
elif ch == "[":
# append to the both stacks
stackInt.append(currNum)
stackStr.append(currStr)
# reset both currNum and currStr
currNum = 0
currStr = ""
elif ch == "]":
# pop the from the num stack
popped = stackInt.pop()
# append temp to currStr
currStr += temp
# append multiple times to create the pattern
for i in range(popped):
temp += currStr
# pop the string stack
currStr = stackStr.pop() + temp
temp = ""
else:
# alphabet
currStr += ch
# 3. Result
return currStr
|
UTF-8
|
Python
| false | false | 1,419 |
py
| 2 |
decodeString.py
| 2 | 0.414376 | 0.408034 | 0 | 53 | 25.773585 | 64 |
jumphone/SPRINT
| 4,406,636,486,324 |
75915eb78fe5143c6769c15885b5db897048c12f
|
5403ab621f25f1ce7c9f8e9578b156abf148ba5e
|
/sprint/tools_zf/get_depth_old.py
|
d4edc4cfea6332ca885b5e4827048c9afaf4fe40
|
[
"MIT"
] |
permissive
|
https://github.com/jumphone/SPRINT
|
75015dcf6dc6677aef65101b68b16e3eb8deafdd
|
d5ff879eaade1563dda4bfd44ab7199a25c6e719
|
refs/heads/master
| 2023-06-23T07:00:40.581118 | 2023-06-12T08:42:46 | 2023-06-12T08:42:46 | 72,709,622 | 46 | 14 | null | false | 2018-10-24T13:20:03 | 2016-11-03T04:45:03 | 2018-09-26T12:31:50 | 2018-10-24T13:20:03 | 6,378 | 10 | 6 | 3 |
Python
| false | null |
def get_depth(zz_in_dir=0,bed_in_dir=0,bed_out_dir=0):
fread=open(zz_in_dir)# './zz_folder/all.zz')
fsnv=open(bed_in_dir) #'../bed_folder/try_new.bed') # Hap_SRR521447.bed')
fo=open(bed_out_dir,'w')#'./tmp/readspersite_new.zer','w')
class Read:
def __init__(self,read):
self.snv=read.split('\t')[4].split(';')
self.inter=read.split('\t')[3].split(';')
self.direct=read.split('\t')[1]
def locisin(self,loc):
isin=0
for inter in self.inter:
inter=inter.split(':')
if int(loc)<=int(inter[1]) and int(loc)>=int(inter[0]):
isin =1
break
if isin ==0:
return 0
elif isin ==1:
return 1
def snvisin(self,snv):
if snv in self.snv:
return 1
else:
return 0
def getmin(self):
return int(self.inter[0].split(':')[0])
def getmax(self):
return int(self.inter[-1].split(':')[1])
reads={}
for line in fread:
seq=line.split('\t')
try:
reads[seq[0]].append(Read(line[0:-1]))
except Exception,e :
print seq[0]+' begin'
reads[seq[0]]=[Read(line[0:-1])]
top=0
chrr=''
for line in fsnv:
seq=line[0:-1].split('\t')
deep=0
altdeep=0
snv=seq[3]+':'+seq[2]
if seq[0] != chrr:
top=0
chrr=seq[0]
if top < len(reads[seq[0]]):
while seq[0]==chrr and top < len(reads[seq[0]]) and reads[seq[0]][top].getmax() < int(seq[2]):
top=top+1
point=top
while seq[0]==chrr and point < len(reads[seq[0]]) and reads[seq[0]][point].getmin() <= int(seq[2]):
if reads[seq[0]][point].locisin(seq[2]) ==1:
deep=deep+1
if reads[seq[0]][point].snvisin(snv)==1:
altdeep=altdeep+1
point=point+1
fo.write(line[0:-1]+'\t'+str(altdeep)+':'+str(deep)+'\n')
fread.close()
fsnv.close()
fo.close()
|
UTF-8
|
Python
| false | false | 1,726 |
py
| 55 |
get_depth_old.py
| 51 | 0.577057 | 0.540556 | 0 | 71 | 23.28169 | 102 |
stymphalide/teaching
| 16,320,875,769,435 |
568cd0f25264c9fb374806fa8a0ab18826410dea
|
ff1b0b0f66c1c0c3b02b8b466956a8a55545e7cf
|
/nachhilfe/while_mit_randint.py
|
2811c75b04eb3d25010342ef6d8a84964055a43d
|
[] |
no_license
|
https://github.com/stymphalide/teaching
|
4b4ac57f9d150fc35689f600fc90f3036fa950c8
|
ba8f83d9fe90bb6db6098b9db8bbfac2934f32ab
|
refs/heads/master
| 2021-05-09T11:09:32.296533 | 2018-03-04T15:04:02 | 2018-03-04T15:04:02 | 118,984,791 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
#random.seed(0)
liste = []
i = 0
while i < 30:
liste.append(random.randint(0, 36))
i += 1
print(liste)
|
UTF-8
|
Python
| false | false | 119 |
py
| 19 |
while_mit_randint.py
| 11 | 0.647059 | 0.579832 | 0 | 9 | 12.333333 | 36 |
Samechanchan/TokyoGasINetHackathon-1
| 16,149,077,034,171 |
a1832620f141a8e5c5e5b0e553bde5d5ff97ea80
|
aa691f4fce334eb4296e29b806c6296779cafa47
|
/DangerMap/urls.py
|
a27f9bf67d8f1672300ecdbf56f61d6e33690974
|
[] |
no_license
|
https://github.com/Samechanchan/TokyoGasINetHackathon-1
|
428f2521be23d658738623bbcfbdb11fa1deef7a
|
ec293d37683707637acb5d1eb16dbc9af0bdb153
|
refs/heads/master
| 2023-01-20T12:11:15.400267 | 2020-11-22T01:04:25 | 2020-11-22T01:04:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('regist', views.regist, name='regist')
]
|
UTF-8
|
Python
| false | false | 156 |
py
| 7 |
urls.py
| 6 | 0.666667 | 0.666667 | 0 | 7 | 21.428571 | 47 |
banjarajanardan/algorithmicToolboxUniversityOfSanDiego
| 5,274,219,852,885 |
3872746240df240c3a3696bd4e7213246471bc04
|
46931a382db32299c905cf42cb536c5bfdf63848
|
/week3_greedy_algorithms/4_maximum_advertisement_revenue/dot_product.py
|
ed5cd5c990603fb486dd2745dbabc42a4bd592a7
|
[] |
no_license
|
https://github.com/banjarajanardan/algorithmicToolboxUniversityOfSanDiego
|
c188dfba04a632993bdf26694cd36941c27f61a3
|
b06bec5b6f4d1e3a0f56f919df83f11d0a8e80da
|
refs/heads/master
| 2020-12-19T12:12:36.895275 | 2020-02-12T04:38:09 | 2020-02-12T04:38:09 | 235,730,532 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Uses python3
import sys
def max_dot_product(a, b):
#write your code here
res = 0
for i in range(len(a)):
res += max(a) * max(b)
a.remove(max(a))
b.remove(max(b))
return res
if __name__ == '__main__':
n = int(input())
a = input()
b = input()
a = list(map(int, a.split()))
b = list(map(int, b.split()))
print(max_dot_product(a, b))
|
UTF-8
|
Python
| false | false | 403 |
py
| 13 |
dot_product.py
| 12 | 0.498759 | 0.493797 | 0 | 20 | 18.9 | 33 |
MichaelDarku/Python-for-crypto
| 10,488,310,141,852 |
01e4c46113ac1e9505fbd17d703035daac57fd9c
|
bd86fe0cf5cb27017de1efa7072896968fd7f7dd
|
/bitoin price ticker.py
|
d9d76dec4562ac68a15a119d69e26372f52cac33
|
[] |
no_license
|
https://github.com/MichaelDarku/Python-for-crypto
|
0087fb40f1a3c397f1511e3b288ed21a5fd39118
|
3257c67f389be26cc401be94bb9f31944a76d924
|
refs/heads/master
| 2023-07-24T10:16:01.568039 | 2021-09-06T07:08:18 | 2021-09-06T07:08:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 13 03:49:53 2018
@author: jacques
"""
import time
import requests
r = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')
j=(' Cuz Jacques says so and he controls the internet')
print(' The current price of Bitcoin is: $' + r.json()['bpi']['USD']['rate'])
print(j)
print (r.json()['time']['updated'])
|
UTF-8
|
Python
| false | false | 409 |
py
| 8 |
bitoin price ticker.py
| 8 | 0.586797 | 0.552567 | 0 | 14 | 27.285714 | 93 |
uktrade/lite-api
| 1,211,180,783,964 |
516600015c3d4c75075d7163e7bc33f4680e1c60
|
2509936d814fb6cdd283c2549c518c8dfad9450c
|
/api/licences/migrations/0002_licence_decisions.py
|
dae16bb75df5b3651ed411d9ba857be37a98f534
|
[
"MIT"
] |
permissive
|
https://github.com/uktrade/lite-api
|
19f829119fa96de3f4862eb233845508b0fef7eb
|
b35792fc981220285ed9a7b3659aba460f1b207a
|
refs/heads/dev
| 2023-08-25T10:11:17.594001 | 2023-08-24T14:24:43 | 2023-08-24T14:24:43 | 172,914,199 | 4 | 3 |
MIT
| false | 2023-09-14T17:36:47 | 2019-02-27T12:46:22 | 2023-03-02T15:55:05 | 2023-09-14T17:36:47 | 16,182 | 3 | 5 | 5 |
Python
| false | false |
# Generated by Django 2.2.11 on 2020-04-01 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("decisions", "0001_initial"),
("licences", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="licence",
name="decisions",
field=models.ManyToManyField(related_name="licence", to="decisions.Decision"),
),
]
|
UTF-8
|
Python
| false | false | 458 |
py
| 1,096 |
0002_licence_decisions.py
| 1,038 | 0.59607 | 0.543668 | 0 | 19 | 23.105263 | 90 |
prosecurity/Pyllywood
| 2,216,203,133,162 |
9ce18f92ed4aaf5eeaf512935943b3cd86ee2854
|
052cc19e31a4e445358892842c14ccbed97cf796
|
/libs/attack_creds_first.py
|
3eb9582a53cf200b420b50ed4e1f1571c2d1ad7e
|
[] |
no_license
|
https://github.com/prosecurity/Pyllywood
|
affb28abd93d10d90b72573e50b97791c7fd92ef
|
b217d7a835d92659d7d6998a0e1991ff3860956e
|
refs/heads/master
| 2022-08-22T03:52:56.662150 | 2020-05-20T21:19:24 | 2020-05-20T21:19:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import socket, os, json
from . import attack_routes as ar
from . import auth_helper as ah
from colorama import init
from termcolor import colored
init()
def attackRoutesWithCreds(target, username, password, port, authmethod):
try:
foundStreams = [] # List of found streams for current target:port
warnMass200 = dict() # Dict used as a counter
warnMass200[target] = 0 # that will increase itself by 1 for each stream found
for route in ar.routeBuilder(username, password):
if warnMass200[target] > 20: # If there are more than 20 found streams for the current target
print(colored('[-] Ending detection for {} due to mass 200 response codes.'.format(target), 'red'))
print(colored('[+] Try using: rtsp://{}:{}@{}:{}/'.format(username, password, target, port), 'green'))
print(colored("-" * 40, 'cyan'))
break # Break - something's wrong (the target responds with status 200 too many times)
recBuffer = "" # Used later with DIGEST auth
seq = 1 # Starting RTSP sequence number
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(10)
s.connect((target,port))
if authmethod == "digest":
# The next line might be wrong - we are trying without user/pass combo for now
describeURL = "rtsp://{}:{}/{}".format(target, port, route)
s.send(ah.genDESCRIBE(describeURL,seq,ah.configJson["clUagent"], ""))
recBuffer = s.recv(ah.configJson["bufLen"]).decode() # Buffer used in the next request, if DIGEST
seq += 1 # Increasing sequence number - for the next request
else:
# If not DIGEST, we append user:pass@ to the descibe URL
describeURL = "rtsp://{}:{}@{}:{}/{}".format(username, password, target, port, route)
# Describing the URL
s.send(ah.genDESCRIBE(describeURL,seq,ah.configJson["clUagent"], ah.authBuilder(authmethod, recBuffer, username, password, "/{}".format(route))))
finalBuffer = s.recv(ah.configJson["bufLen"]).decode() # Getting the final response
seq += 1 # Increasing sequence (everytime after a socket send)
if "RTSP/1.0 200" in finalBuffer: # If we get 200, we found a route accessible with current user/pass
warnMass200[target] += 1 # Increase the warning counter of the current target
foundStreams.append("rtsp://{}:{}@{}:{}/{}".format(username, password, target, port, route))
# When route-cycle is finished, if our warning counter is "normal"
if warnMass200[target] <= 20 and warnMass200[target] > 0:
for stream in foundStreams: # Print found streams from the list
print(colored("[+] Found Stream: {}".format(stream), 'green'))
except:
pass
def attackCredentials(target, port, authmethod):
try:
recBuffer = "" # Used in DIGEST, otherwise it is ignored by the auth-helper lib
finalBuffer = "" # Used to check the response status codes
seq = 1 # Starting sequence before each new connection
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(10)
s.connect((target,port))
# We start with an invalid URI, to avoid hitting continous 200's
describeURL = "rtsp://{}:{}{}".format(target, port, "/sdekfjvhejkhrv")
# Loading the credentials
with open(os.path.join(os.path.dirname(__file__), 'resources\\creds.json'), 'r') as f:
userpasslist = json.load(f)
# Combining credentials in a loop
for username in userpasslist['usernames']:
for password in userpasslist['passwords']:
if authmethod == "digest": # If auth method is DIGEST
# We have to DESCRIBE blindly first, to get nonce and realm
s.send(ah.genDESCRIBE(describeURL,seq,ah.configJson["clUagent"], ""))
recBuffer = s.recv(ah.configJson["bufLen"]).decode()
seq += 1 # Increasing the sequence
# Trying to describe the route (recBuffer will be ignored, if it's a BASIC auth target)
s.send(ah.genDESCRIBE(describeURL,seq,ah.configJson["clUagent"], ah.authBuilder(authmethod, recBuffer, username, password, "/sdekfjvhejkhrv")))
finalBuffer = s.recv(ah.configJson["bufLen"]).decode()
seq += 1 # Increasing seq again
if "RTSP/1.0 404" in finalBuffer: # If we get a 404, we found its credentials
print(colored("[+] Attacking routes at {}:{} with valid username '{}' and password '{}' ({})...".format(target, port, username, password, authmethod), 'cyan'))
attackRoutesWithCreds(target, username, password, port, authmethod)
except:
pass
|
UTF-8
|
Python
| false | false | 4,946 |
py
| 7 |
attack_creds_first.py
| 6 | 0.609584 | 0.596442 | 0 | 90 | 53.966667 | 179 |
przemekmaniecki/terminarz_floty
| 4,380,866,651,369 |
d545848ba2bee9a8896a06cc92c5af16e1a15ec8
|
ca72e9bee45e4c18072a9fc42f90fe1a7de501dc
|
/flota/Pojazdy/views.py
|
9e634d9f7f3a1d6e22a090eecd47d440c558198e
|
[] |
no_license
|
https://github.com/przemekmaniecki/terminarz_floty
|
4dc1051878b5e95c957f420ed01c83895f37e6ab
|
491b8c942b53b81a8bbe976b7f5c0f831e8dd4a7
|
refs/heads/master
| 2022-08-16T19:41:51.100639 | 2020-05-24T09:19:14 | 2020-05-24T09:19:14 | 266,500,466 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views import View
from Pojazdy.forms import AddVehicleForm, BT_Form, TACHO_Form, ADR_Form, EURO_Form, FRC_Form, UDT_Form, TDT_Form, SearchForm
from Pojazdy.forms import BridgeForm, UK_Form, BridgeDateForm
from .models import PojazdyModel, BT, tacho, ADR, NormaCzystosciSpalin, FRC, UDT, TDT, UKO
from django.db.models import Q
from datetime import date, datetime
# Create your views here.
class AddVehicleView(LoginRequiredMixin, View):
def get(self, request):
form = AddVehicleForm()
ctx = {
'form': form,
}
return render(request, 'add_vehicle.html', ctx)
def post(self, request):
form = AddVehicleForm(request.POST)
if form.is_valid():
info = 'Dodano pojazd do bazy danych'
new_vehicle = PojazdyModel.objects.create(**form.cleaned_data)
return render(request, 'add_vehicle.html', {'form':form, 'info':info})
else:
info = 'Niepoprawne dane !'
return render(request, 'add_vehicle.html', {'form': form, 'info': info})
class ShowVehicleView(LoginRequiredMixin, View):
def get(self, request, select):
if select == 0:
pojazdy = PojazdyModel.objects.all()
note = f"(wszystkie pojazdy). Pojazdรณw w bazie {len(pojazdy)}"
return render(request, 'show_all.html', {'pojazdy': pojazdy, 'note': note})
elif select == 1:
pojazdy = PojazdyModel.objects.exclude(rodzaj__icontains="epa")
note = f"(tylko samochody ciฤลผarowe i ciฤ
gniki siodลowe).Pojazdรณw w bazie {len(pojazdy)}"
return render(request, 'show_all.html', {'pojazdy': pojazdy, 'note': note})
elif select == 2:
pojazdy = PojazdyModel.objects.filter(rodzaj__icontains="epa")
note = f"(tylko przyczepy i naczepy).Pojazdรณw w bazie {len(pojazdy)}"
return render(request, 'show_all.html', {'pojazdy': pojazdy, 'note': note})
class DeleteVehicleView(LoginRequiredMixin, View):
def get(self, request, id):
vehicle = PojazdyModel.objects.get(id=id)
vehicle.delete()
return redirect('/wykaz_pojazdow/0/')
class SearchVehicleView(LoginRequiredMixin, View):
def get(self, request):
form = SearchForm(request.GET)
form.is_valid()
text = form.cleaned_data.get('text', '')
result = PojazdyModel.objects.filter(
Q(rodzaj__icontains=text)|
Q(marka__icontains=text)|
Q(model__icontains=text)|
Q(VIN__icontains=text)|
Q(nr_rej__icontains=text)
)
note = f"wyszukano {len(result)}"
ctx ={
'form': form,
'pojazdy': result,
'note': note,
}
return render(request, 'show_all.html', ctx)
class EditVehicleView(LoginRequiredMixin, View):
def get(self, request, nr):
p = PojazdyModel.objects.get(nr_rej=nr)
form = AddVehicleForm(instance=p)
ctx = {'form': form}
return render(request, 'edit_vehicle.html', ctx)
def post(self,request, nr):
p = PojazdyModel.objects.get(nr_rej=nr)
form = AddVehicleForm(request.POST, instance=p)
if form.is_valid():
form.save()
return redirect("/wykaz_pojazdow/0/")
class BridgeEditView(LoginRequiredMixin, View):
def get(self, request):
form = BridgeForm()
return render(request, 'bridge_edit.html', {'form': form})
def post(self, request):
form = BridgeForm(request.POST)
if form.is_valid():
object = PojazdyModel.objects.filter(id=form.cleaned_data['nr'])
if len(object) > 0:
return redirect(f'/edit/{object[0].nr_rej}')
else:
info = 'Brak pojazdu o takim ID'
return render(request, 'bridge_edit.html', {'form': form, 'info': info})
class BridgeDelView(LoginRequiredMixin, View):
def get(self, request):
form = BridgeForm()
return render(request, 'bridge_del.html', {'form': form})
def post(self, request):
form = BridgeForm(request.POST)
if form.is_valid():
object = PojazdyModel.objects.filter(id=form.cleaned_data['nr'])
if object.exists():
return redirect(f'/delete/{object[0].id}')
else:
info = 'Brak pojazdu o takim ID'
return render(request, 'bridge_del.html', {'form': form, 'info': info})
class VehicleDetailsView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.filter(id=id)
today = date.today()
return render(request, 'detail.html', {'unit': unit,'today': today})
class AddBtView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
if BT.objects.filter(pojazd=unit).exists():
bt_unit=BT.objects.get(pojazd=unit)
form = BT_Form(instance=bt_unit)
else:
form = BT_Form()
ctx = {'unit': unit, 'form': form}
return render(request, 'add_BT.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
form = BT_Form(request.POST)
object, created = BT.objects.get_or_create(pojazd=unit)
if form.is_valid():
object.nazwa="Przeglฤ
d techniczy pojazdu",
object.instytucja=form.cleaned_data['instytucja'],
object.wymagane=form.cleaned_data['wymagane'],
object.data_konc=form.cleaned_data['data_konc'],
object.pojazd=unit
object.save()
return redirect(f'/details/{id}')
else:
return redirect('wrong/')
class AddTachoView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
if tacho.objects.filter(pojazd=unit).exists():
bt_unit = tacho.objects.get(pojazd=unit)
form = TACHO_Form(instance=bt_unit)
else:
form = TACHO_Form()
ctx = {'unit': unit, 'form': form}
return render(request, 'add_tacho.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
form = TACHO_Form(request.POST)
object, created = tacho.objects.get_or_create(pojazd=unit)
if form.is_valid():
object.nazwa="Przeglฤ
d urzฤ
dzenia rejestrujฤ
cego",
object.instytucja=form.cleaned_data['instytucja'],
object.wymagane=form.cleaned_data['wymagane'],
object.data_konc=form.cleaned_data['data_konc'],
object.pojazd=unit
object.save()
return redirect(f'/details/{id}')
else:
return redirect('wrong/')
class AddAdrVehView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
if ADR.objects.filter(pojazd=unit).exists():
bt_unit = ADR.objects.get(pojazd=unit)
form = ADR_Form(instance=bt_unit)
else:
form = ADR_Form()
ctx = {'unit': unit, 'form': form}
return render(request, 'addadr.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
form = ADR_Form(request.POST)
object, created = UKO.objects.get_or_create(pojazd=unit)
if form.is_valid():
object.nazwa="Dopuszczenie do przewodu ADR",
object.instytucja=form.cleaned_data['instytucja'],
object.wymagane=form.cleaned_data['wymagane'],
object.data_konc=form.cleaned_data['data_konc'],
object.pojazd=unit
object.save()
return redirect(f'/details/{id}')
else:
return redirect('wrong/')
class AddUdtView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
if UDT.objects.filter(pojazd=unit).exists():
bt_unit = UDT.objects.get(pojazd=unit)
form = UDT_Form(instance=bt_unit)
else:
form = UDT_Form()
ctx = {'unit': unit, 'form': form}
return render(request, 'addudt.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
form = UDT_Form(request.POST)
object, created = TDT.objects.get_or_create(pojazd=unit)
if form.is_valid():
object.nazwa="Badanie dopuszczenia windy hydraulicznej lub HDS",
object.instytucja=form.cleaned_data['instytucja'],
object.wymagane=form.cleaned_data['wymagane'],
object.data_konc=form.cleaned_data['data_konc'],
object.pojazd=unit
object.save()
return redirect(f'/details/{id}')
else:
return redirect('wrong/')
class AddTdtView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
if TDT.objects.filter(pojazd=unit).exists():
bt_unit = TDT.objects.get(pojazd=unit)
form = TDT_Form(instance=bt_unit)
else:
form = TDT_Form()
ctx = {'unit': unit, 'form': form}
return render(request, 'addudt.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
form = TDT_Form(request.POST)
object, created = TDT.objects.get_or_create(pojazd=unit)
form = UK_Form(request.POST)
if form.is_valid():
object.nazwa="Badania elementรณw podlegajฤ
cych pod TDT",
object.instytucja=form.cleaned_data['instytucja'],
object.wymagane=form.cleaned_data['wymagane'],
object.data_konc=form.cleaned_data['data_konc'],
object.pojazd=unit
object.save()
return redirect(f'/details/{id}')
else:
return redirect('wrong/')
class AddEuroView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
if NormaCzystosciSpalin.objects.filter(pojazd=unit).exists():
bt_unit = NormaCzystosciSpalin.objects.get(pojazd=unit)
form = EURO_Form(instance=bt_unit)
else:
form = EURO_Form()
ctx = {'unit': unit, 'form': form}
return render(request, 'addeuro.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
form = EURO_Form(request.POST)
if form.is_valid():
if NormaCzystosciSpalin.objects.filter(pojazd=unit).exists():
bt_unit = NormaCzystosciSpalin.objects.get(pojazd=unit)
bt_unit.norma=form.cleaned_data['norma']
bt_unit.wymagane=form.cleaned_data['wymagane']
bt_unit.save()
else:
bt_unit = NormaCzystosciSpalin(**form.cleaned_data)
bt_unit.pojazd = unit
bt_unit.save()
return redirect(f'/details/{id}')
else:
info = 'Niepoprawne dane !'
return redirect('wrong/')
class AddUkView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
form = UK_Form()
if UKO.objects.filter(pojazd=unit).exists():
bt_unit = UKO.objects.get(pojazd=unit)
form = UK_Form(instance=bt_unit)
ctx = {'unit': unit, 'form': form}
return render(request, 'adduk.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
object, created = UKO.objects.get_or_create(pojazd=unit)
form = UK_Form(request.POST)
if form.is_valid():
object.instytucja=form.cleaned_data['instytucja']
object.data_konc=form.cleaned_data['data_konc'],
object.OC=form.cleaned_data['OC'],
object.AC=form.cleaned_data['AC'],
object.NNW=form.cleaned_data['NNW'],
object.nr_polisy=form.cleaned_data['nr_polisy'],
object.pojazd=unit
object.save()
return redirect(f'/details/{id}')
else:
return redirect('wrong/')
class AddFrcView(LoginRequiredMixin, View):
def get(self, request, id):
unit = PojazdyModel.objects.get(id=id)
if FRC.objects.filter(pojazd=unit).exists():
bt_unit = FRC.objects.get(pojazd=unit)
form = FRC_Form(instance=bt_unit)
else:
form = FRC_Form()
ctx = {'unit': unit, 'form': form}
return render(request, 'addfrc.html', ctx)
def post(self,request, id):
unit = PojazdyModel.objects.get(id=id)
form = FRC_Form(request.POST)
object, created = FRC.objects.get_or_create(pojazd=unit)
if form.is_valid():
object.nazwa="Certyfikat FRC",
object.instytucja=form.cleaned_data['instytucja'],
object.wymagane=form.cleaned_data['wymagane'],
object.data_konc=form.cleaned_data['data_konc'],
object.pojazd=unit
object.save()
return redirect(f'/details/{id}')
else:
return redirect('wrong/')
class BookShowView(LoginRequiredMixin, View):
def get(self, request):
form = BridgeForm()
return render(request, 'bridge_book.html', {'form': form})
def post(self, request):
form = BridgeForm(request.POST)
if form.is_valid():
if PojazdyModel.objects.filter(id=form.cleaned_data['nr']).exists():
object = PojazdyModel.objects.get(id=form.cleaned_data['nr'])
return redirect(f'/details/{object.id}')
else:
info = 'Brak pojazdu o takim ID'
return render(request, 'bridge_book.html', {'form': form, 'info': info})
else:
return redirect('wrong/')
class BridgeDateView(LoginRequiredMixin, View):
def get(self, request):
form = BridgeDateForm()
return render(request, 'dedline_bridge.html', {'form': form})
def post(self, request):
form = BridgeDateForm(request.POST)
if form.is_valid():
date2 = form.cleaned_data['date2']
return redirect(f'/dedlinevehicle/{date2}')
else:
info = "Nieprawidลowe dane"
return render(request, 'dedline_bridge.html', {'form': form, 'info': info})
class DedlineVehicleView(LoginRequiredMixin, View):
def get(self, request, date_string):
dedline = datetime.strptime(date_string, "%Y-%m-%d")
dedline = datetime.date(dedline)
pojazdy = PojazdyModel.objects.filter(
Q(przegladtech__data_konc__lte=dedline)|
Q(przegladtacho__data_konc__lte=dedline)|
Q(przegladadr__data_konc__lte=dedline)|
Q(przegladtdt__data_konc__lte=dedline)|
Q(przegladudt__data_konc__lte=dedline)|
Q(komunikacyjne__data_konc__lte=dedline)|
Q(przegladfrc__data_konc__lt=dedline)
)
note = f"(Pojazdy z koลcem terminu). Liczba pojazdรณw ze zbliลผajฤ
cym siฤ terminem: {len(pojazdy)}"
return render(request, 'show_all.html', {'pojazdy': pojazdy, 'note': note})
class HelpView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'help.html', {})
class AboutView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'about.html', {})
class WrongDataView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'bad_data.html', {})
|
UTF-8
|
Python
| false | false | 15,835 |
py
| 43 |
views.py
| 26 | 0.594208 | 0.593513 | 0 | 373 | 41.402145 | 124 |
tanvitiwari17/opencv
| 1,073,741,871,328 |
e365143167812ba1a9338b61275b200d057eb7fb
|
9ed44196a423f4d85cb6d3ce264cc2565b2f7b4e
|
/Hand_Gesture_image.py
|
13454184e370e6705a6a78f63452e00df98aab20
|
[] |
no_license
|
https://github.com/tanvitiwari17/opencv
|
48bc7455aa7a2b8f7479a183a4edbdb537375266
|
86baade5211e461ba47e4f8167550383cbeb9e1c
|
refs/heads/master
| 2023-03-06T04:04:07.574858 | 2021-02-17T11:13:00 | 2021-02-17T11:13:00 | 301,128,225 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 18:54:12 2020
@author: tanvi
"""
# convex hull :
#technique to find out the pixel of outer edges of any abject and connects them
import cv2
import numpy as np
hand = cv2.imread('cap.jpg',0)
ret, the = cv2.threshold(hand,70,255,cv2.THRESH_BINARY)
# to find connected pixels
contours,_= cv2.findContours(the.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# returns contour and hierarchy, we only require contour here.
hull = [cv2.convexHull(c) for c in contours]
final = cv2.drawContours(hand,hull,-1,(255,0,0))
cv2.imshow("Original",hand)
cv2.imshow('Thresh', the)
cv2.imshow("Convex Hull",final)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
UTF-8
|
Python
| false | false | 695 |
py
| 27 |
Hand_Gesture_image.py
| 27 | 0.716547 | 0.658993 | 0 | 30 | 22.166667 | 80 |
Kotaimen/awscfncli
| 11,914,239,320,990 |
c99ac935604a333fdf78d37f92f41164c5e4977e
|
ce8fe099312b3f70145c3b983f686353ce7ad803
|
/tests/unit/runner/test_bobo3_profile.py
|
7a4859bc53bbe4a3b3dcad56a45d683fde79184d
|
[
"MIT"
] |
permissive
|
https://github.com/Kotaimen/awscfncli
|
53876e5c85a59e443663907b9bfe12060a4969fa
|
91b5eb6d73a90691d0a9e0634b8cf1054209c64f
|
refs/heads/develop
| 2023-01-09T03:59:22.675633 | 2022-07-09T12:01:27 | 2022-07-09T12:01:27 | 77,896,914 | 62 | 17 |
MIT
| false | 2022-12-27T16:56:34 | 2017-01-03T08:11:09 | 2022-10-24T21:18:55 | 2022-12-27T16:56:33 | 715 | 55 | 13 | 27 |
Python
| false | false |
from awscfncli2.runner import Boto3Profile
class TestStackSelector(object):
def test_update(self):
s1 = Boto3Profile('foo','bar')
s2 = Boto3Profile('foo', 'baz')
assert s1.region_name == 'bar'
s1.update(s2)
|
UTF-8
|
Python
| false | false | 246 |
py
| 124 |
test_bobo3_profile.py
| 71 | 0.626016 | 0.589431 | 0 | 10 | 23.6 | 42 |
dagege1993/codeformyself
| 5,196,910,475,926 |
350398060e3d3b946563ed3a2c7e81e4fa92ad94
|
1787eb394653a963377f977980125e0d05a2ddbb
|
/flashtripdemo/competitor/api/competitor/routers.py
|
08553b76925cfb73e93e7a33b9d560d5790876f3
|
[] |
no_license
|
https://github.com/dagege1993/codeformyself
|
51283169b819309e63c135ebca501f87d37a1651
|
0b32c6e1b224f543d23e284104d311f2a5c41475
|
refs/heads/master
| 2021-10-25T01:23:59.036132 | 2019-06-06T06:23:37 | 2019-06-06T06:23:37 | 146,266,239 | 7 | 8 | null | false | 2021-10-06T23:16:41 | 2018-08-27T08:03:11 | 2021-07-14T12:44:14 | 2021-10-06T23:16:40 | 40,736 | 7 | 9 | 14 |
HTML
| false | false |
from api.types.handler import HealthCheck
from api.competitor.v1 import AvaiCompetitor, PrepCompetitor
routers = [
(r"/", HealthCheck),
(r"/api/v1/competitor/availability", AvaiCompetitor),
(r"/api/v1/competitor/preparation", PrepCompetitor),
]
|
UTF-8
|
Python
| false | false | 257 |
py
| 715 |
routers.py
| 617 | 0.743191 | 0.731518 | 0 | 8 | 31.25 | 60 |
foreloner87/Guvi-player
| 6,992,206,766,698 |
3295e72669fbe91b7d1806c0a57e5189c19b48c2
|
b0cce56edd7f08649ec0dc2fe744cdc15e3ddfef
|
/set4-31.py
|
bfaadc22697f203a29db259f3eaa217d3ba70748
|
[] |
no_license
|
https://github.com/foreloner87/Guvi-player
|
406c99fd451310ecc5ad8102652fd0e15a94bc0c
|
4d665ab7f817dbac7a90beaf4609ec2df873e0da
|
refs/heads/master
| 2020-03-28T15:53:06.564284 | 2019-03-12T03:43:33 | 2019-03-12T03:43:33 | 148,634,766 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
s=input()
f=0
for x in s:
if(x=='('):
f+=1;
if(x==')'):
f-=1;
if(f==0):
print("yes")
else:
print("no")
|
UTF-8
|
Python
| false | false | 135 |
py
| 10 |
set4-31.py
| 9 | 0.355556 | 0.325926 | 0 | 11 | 11.272727 | 16 |
TarzanQll/dataanalysis
| 14,783,277,482,400 |
b85727c1672ff42274a8cda5c0f5da6774b3237d
|
08bbe31eff30a121ec55f156881af52d9a3eaa96
|
/dmlib/src/main/python/nlp/cluster/LDA/GensimLDA.py
|
d08646dd8cbf0854639a50f1f82938b7c0477ff7
|
[] |
no_license
|
https://github.com/TarzanQll/dataanalysis
|
aeea3945727817d3431f872903121d7af8b2524f
|
5ac975a828ef222cb3875114dcee63f0db5376e6
|
refs/heads/master
| 2021-06-01T11:51:05.012191 | 2020-12-07T10:06:14 | 2020-12-07T10:06:14 | 103,211,255 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import gensim
import os
import jieba
import re
filepath = "/home/nyh/work/workspace/dataanalysis/dmlib/data/news/"
text = []
fp = open('/home/nyh/work/workspace/dataanalysis/dmlib/data/nlp/stopWords.txt', 'r')
StopWordsList = [line.strip() for line in fp]
for filename in os.listdir(filepath):
file = open(filepath + filename)
line = file.readline()
text.append(' '.join(jieba.cut(''.join(re.findall(u'[\u4e00-\u9fff]+', line)))))
while line:
line = file.readline()
text.append(' '.join(word for word in jieba.cut(''.join(re.findall(u'[\u4e00-\u9fff]+', line))) if word not in StopWordsList))
file.close()
words = []
for doc in text:
tdoc = doc.split(" ")
if len(tdoc) < 2:
pass
else:
words.append(tdoc)
# obtain: (word_id:word)
word_count_dict = gensim.corpora.Dictionary(words)
# print(words)
# ่ชๅปบ่ฏๅ
ธ
dict = gensim.corpora.Dictionary(words)
# ้่ฟdictๅฐ็จๅญ็ฌฆไธฒ่กจ็คบ็ๆๆกฃ่ฝฌๆขไธบ็จid่กจ็คบ็ๆๆกฃๅ้
corpus = [dict.doc2bow(text) for text in words]
# corpus = corpus = [[(0, 1.0), (1, 1.0), (2, 1.0)],
# [(2, 1.0), (3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (8, 1.0)],
# [(1, 1.0), (3, 1.0), (4, 1.0), (7, 1.0)],
# [(0, 1.0), (4, 2.0), (7, 1.0)],
# [(3, 1.0), (5, 1.0), (6, 1.0)],
# [(9, 1.0)],
# [(9, 1.0), (10, 1.0)],
# [(9, 1.0), (10, 1.0), (11, 1.0)],
# [(8, 1.0), (10, 1.0), (11, 1.0)]]
# lda_model = gensim.models.LdaModel(corpus, num_topics=2, id2word=word_count_dict, passes=5)
lda_model = gensim.models.LdaModel(corpus, num_topics=2)
# ่พๅบไธป้ข
lda_model.print_topics(2)
|
UTF-8
|
Python
| false | false | 1,730 |
py
| 100 |
GensimLDA.py
| 76 | 0.540767 | 0.478417 | 0 | 50 | 32.38 | 134 |
pemj/crawlblr
| 8,735,963,505,288 |
e39db6d8c57498256319cefca39ee83e523a7e01
|
f1e81c4c4076afc6a023f388024b0e8ba6c4e0d4
|
/crawlblr/crawltech.py
|
4f602bf791cabd4b1d04c75400d0564543312174
|
[] |
no_license
|
https://github.com/pemj/crawlblr
|
5a3e46cf524804a6e379c541803bde1e8f4eff6f
|
6165780ed97a4613029d1bde69ab55416ade1da7
|
refs/heads/master
| 2021-07-08T06:07:11.140753 | 2014-07-22T07:28:57 | 2014-07-22T07:28:57 | 17,350,827 | 4 | 0 | null | false | 2021-06-01T21:57:57 | 2014-03-03T00:40:27 | 2017-10-07T01:14:52 | 2021-06-01T21:57:54 | 2,094 | 6 | 0 | 1 |
Python
| false | false |
from urllib import request, error
import socket
from queue import Empty
import os
import json
# for some user
# opens a URL, uses JSON decoding to turn it into a dictionary, checks
# for validity. Retries a few times if it breaks
# Parameters:
# url: type: string, contents: URL representing a tumblr API request
# segment: type: string, contents: the code segment identifier
def openSafely(url, segment, f):
fChecker = True
for i in range(2):
try:
page = request.urlopen(url)
except error.HTTPError:
f.write("404, "+segment+": " + url + "\n")
break
except error.URLError:
f.write("URL error, "+segment+"\n")
break
except socket.error:
f.write("socket error, "+segment+"\n")
break
else:
fChecker = False
break
if fChecker:
f.write("unknown error in " + segment + "\n")
return False
page = page.read().decode('utf-8')
page = json.loads(page)
if(page['meta']['msg'] != "OK"):
f.write("Bad " + segment + " page, error code " + page['meta']['msg'])
return "nope"
return page
def stubCrawl(userDeck, usersSeen, dataQ, end, debug):
args = ("dduane", "01", 0, 0)
while dataQ.qsize() < 1000000:
dataQ.put(args)
return
def crawlUser(userDeck, usersSeen, dataQ, end, debug):
try:
username = userDeck.get(True, 5)
except Empty:
return "nope\n"
if hasattr(os, 'getppid'): # only available on Unix
pid = os.getppid()
print('parent process:', pid)
pid = os.getpid()
print('process id:', pid)
f = open(('database/crawlers/logfile_'+str(pid)), 'w')
f.write("begin crawler" + str(pid) + "\n")
apikey = "IkJtqSbg6Nd3OBnUdaGl9YWE3ocupygJcnPebHRou8eFbd4RUv"
blogString = "http://api.tumblr.com/v2/blog/" + username + ".tumblr.com/info?api_key=" + apikey
postString = "http://api.tumblr.com/v2/blog/" + username + ".tumblr.com/posts?api_key=" + apikey
noteString = "http://api.tumblr.com/v2/blog/" + username + ".tumblr.com/likes?api_key=" + apikey
##################################USER section######################
#get user info
info = openSafely(blogString, "user info", f)
if not info:
return "info error"
blogURL = info['response']['blog']['url']
username = info['response']['blog']['title']
updated = info['response']['blog']['updated']
#how stupid is that line up there, right? Just, just let it go.
postCount = info['response']['blog']['posts']
#if they make likes public
if info['response']['blog']['share_likes']:
likeCount = info['response']['blog']['likes']
else:
likeCount = -1
userArgs = (username, updated, postCount, likeCount)
if debug:
f.write("[DEBUG] user written: "+str(userArgs) + "\n")
#off to the database
dataQ.put(userArgs)
#################################POSTS section######################
offset = 0
previousPosts = set()
recentPosts = set()
#while we have yet to hit the final posts
while(offset < postCount):
if(debug):
f.write("[DEBUG] user:" + username+", post offset: "+(str(offset))+"\n")
#try to open the post page
posts = openSafely(postString +
"¬es_info=True&reblog_info&offset=" +
(str(offset)), "posts section", f)
if not posts:
continue
#for each post in our returned post object
for post in posts['response']['posts']:
postNumber = post['id']
if postNumber in previousPosts:
continue
recentPosts.add(postNumber)
postType = post['type']
postDate = post['timestamp']
noteCount = post['note_count']
#########Reblogged Section###################
if 'reblogged_from_name' in post.keys():
identity = post['reblogged_from_name']
noteArgs = (username, identity, postNumber, "reblog")
dataQ.put(noteArgs)
if (identity not in usersSeen):
usersSeen[identity] = 0
userDeck.put(identity)
usersSeen[identity] += 1
continue
postArgs = (username, postNumber, postType, postDate, noteCount)
if debug:
f.write("[DEBUG] post written: "+str(postArgs) + "\n")
dataQ.put(postArgs)
previousPosts = recentPosts
recentPosts = set()
#################################LIKES section######################
offset = 0
#while we have yet to hit the final likes
while(offset < likeCount):
if(debug):
f.write("[DEBUG] user:" + username+", note offset: " + (str(offset))+"\n")
#try to open the note page
notes = openSafely(notestring+(str(offset)))
if not notes:
continue
#for each post in our returned post object
for note in notes['response']['liked_posts']:
postNumber = note['id']
noteType = "like"
rebloggedFrom = note['blog_name']
noteArgs = (username, rebloggedFrom, postNumber, noteType)
if debug:
f.write("[DEBUG] like written: "+str(noteArgs) + "\n")
dataQ.put(noteArgs)
|
UTF-8
|
Python
| false | false | 5,476 |
py
| 12 |
crawltech.py
| 8 | 0.542549 | 0.536888 | 0 | 153 | 34.705882 | 100 |
betty29/code-1
| 18,116,172,074,787 |
b53ff2166642196a679b8541a1becadc062a5408
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/440694_Determine_size_console_window/recipe-440694.py
|
cf1781ca82f9c7a9dd7d29c3250b2feffeef807e
|
[
"Python-2.0",
"MIT"
] |
permissive
|
https://github.com/betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 |
MIT
| false | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | 2021-02-24T12:00:22 | 2021-02-24T15:39:59 | 8,183 | 0 | 0 | 0 |
Python
| false | false |
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
else:
sizex, sizey = 80, 25 # can't determine actual size - return default values
print sizex, sizey
|
UTF-8
|
Python
| false | false | 557 |
py
| 8,810 |
recipe-440694.py
| 3,493 | 0.694794 | 0.658887 | 0 | 20 | 26.85 | 83 |
linhthi/ranking-explorer
| 17,008,070,505,719 |
8b6afb9bf47d1bd6673188e581e8c7b037bb2ef2
|
07c322357a7f056f25a9ea822e4bc5d732762ee3
|
/config/config.py
|
8d5d6bad077edead2903d73bad4eea2f16072440
|
[] |
no_license
|
https://github.com/linhthi/ranking-explorer
|
b4252742698d48b62c9e205e200215ebce797b3c
|
f6ae74b2b07c5ef0bec865da1ea74a63238f294e
|
refs/heads/master
| 2022-12-06T19:50:21.529340 | 2020-08-26T01:18:10 | 2020-08-26T01:18:10 | 289,407,463 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import yaml
basedir = os.path.abspath(os.path.dirname(__file__))
with open('./config/test.yaml') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
MYSQL_DATABASE = data.get('mysql.database')
MYSQL_HOST = data.get('mysql.host')
MYSQL_USER = data.get('mysql.username')
MYSQL_PASSWORD = data.get('mysql.password')
MYSQL_PORT = data.get('mysql.port')
# class Config(object):
# SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://{0}:{1}@{2}:{3}/{4}'.format(
# MYSQL_USER, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_PORT, MYSQL_DATABASE
# )
# SQLALCHEMY_TRACK_MODIFICATIONS = False
class Config(object):
SQLALCHEMY_DATABASE_URI = 'sqlite:///ranking.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
UTF-8
|
Python
| false | false | 714 |
py
| 29 |
config.py
| 19 | 0.687675 | 0.680672 | 0 | 22 | 31.5 | 77 |
KammerBernd/krauler
| 8,332,236,576,786 |
6e86b063226eda01fb95b525d0929afcf51e8ea7
|
965873b4748d7fbe75fbeb7731a8912cdde66aeb
|
/krauler.py
|
639fc08da81246d30fd8a043ea5706262d56cc36
|
[] |
no_license
|
https://github.com/KammerBernd/krauler
|
c46894c46d5ed81f10c779de89422a1d12305636
|
a6e0c1d9185a782c0dfd856de41141cae9ef9496
|
refs/heads/master
| 2021-01-15T11:43:28.030462 | 2014-08-16T17:45:16 | 2014-08-16T18:07:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import gevent
from gevent import monkey
monkey.patch_all() # get HTTP working
monkey.patch_thread()
from gevent.queue import Queue
import argparse
import requests
import re
import os
import sys
class Krauler:
def __init__(self, threadcount=4, chunksize=1024, chan='krautchan'):
self.threadcount = threadcount
self.threads = []
self.chunksize = chunksize
self.tasks = Queue()
self.dir = ''
self.fetchedFiles = 0
self.totalFiles = 0
self.chan = chan
self.urls = {'krautchan':['http://krautchan.net/%s/thread-%s.html',
'http://krautchan.net/download/%s/%s',
r'href="/download/(?P<file>.*?)/(?P<name>.*?)"'],
'4chan':['http://boards.4chan.org/%s/thread/%s',
'http://i.4cdn.org/%s/%s',
r'File: <a href="//i.4cdn.org/(?P<board>.*?)/(?P<file>.*?)"']}
self.regex = re.compile(self.urls[chan][2])
def fetch(self, pid):
while not self.tasks.empty():
task = self.tasks.get_nowait()
url = self.urls[self.chan][1] % (task[0], task[1])
with open(self.dir+task[1],'wb') as img:
res = requests.get(url, stream=True)
if not res.ok:
print('[-] Failed getting File :/')
for block in res.iter_content(1024):
if not block:
break
img.write(block)
self.fetchedFiles += 1
sys.stdout.write('\r')
sys.stdout.write('[.] %3i/%3i [%-25s] %25s...' % (self.fetchedFiles, self.totalFiles, int((float(self.fetchedFiles)/self.totalFiles)*25)*'=', task[1][:25]))
sys.stdout.flush()
return
def parse(self, board, thread):
url = self.urls[self.chan][0] % (board, thread)
print('[ ] Fetching "%s"' % url)
data = requests.get(url)
matches = self.regex.findall(data.text)
return matches
def run(self, board, thread):
self.dir = '%s-%s/' % (board, thread)
if not os.path.exists(self.dir):
os.makedirs(self.dir)
matches = self.parse(board, thread)
self.totalFiles = len(matches)
print('[+] Found %i Files' % self.totalFiles)
for m in matches:
self.tasks.put_nowait(m)
for i in range(self.threadcount):
self.threads.append(gevent.spawn(self.fetch, i))
gevent.joinall(self.threads)
print('\n[+] Done :3')
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument('-b', required=True, dest='board', action='store', help='Board name e.g. "s","b",...')
arg.add_argument('-t', required=True, dest='thread', action='store', help='Thread ID')
arg.add_argument('-c', required=True, dest='chan', action='store', help='Chan mode "krautchan"/"4chan" ')
args = vars(arg.parse_args())
k = Krauler(chan=args['chan'])
k.run(args['board'], args['thread'])
|
UTF-8
|
Python
| false | false | 3,116 |
py
| 2 |
krauler.py
| 1 | 0.528562 | 0.51733 | 0 | 103 | 29.242718 | 172 |
OidaTiftla/CarND-Vehicle-Detection
| 15,229,954,062,995 |
32ea9b4bad208b41276c7ebdb92290e02ba5f28c
|
c3217e75b660540278d3dcd43902c9552b04779c
|
/create_non_vehicle_data.py
|
1479d5b489380f1ad78730091e2936334157428d
|
[
"MIT"
] |
permissive
|
https://github.com/OidaTiftla/CarND-Vehicle-Detection
|
a6fe96c3652e9ac43177e0198e3e5aef89aee563
|
f1e84156afa4374390a734d6e57f7bd1ff32f64f
|
refs/heads/master
| 2020-03-19T10:34:34.805234 | 2018-06-11T15:57:37 | 2018-06-11T15:57:37 | 136,383,639 | 0 | 0 | null | true | 2018-06-06T20:39:08 | 2018-06-06T20:39:07 | 2018-06-04T05:36:21 | 2018-02-10T00:13:35 | 29,568 | 0 | 0 | 0 | null | false | null |
import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
import random
import os
import glob
import helper
import matplotlib.image as mpimg
filenames = glob.glob('test_images/project_video_*.jpg')
classify_img_size = (64, 64)
neg_range_float = ((0, 0.35), (0.5, 1))
neg_samples = [
(64, 16),
(96, 12),
(128, 4),
(160, 2),
]
i = 0
for fname in filenames:
ext = os.path.splitext(fname)[-1]
if ext in ['.jpg', '.png']:
img = helper.read_img(fname)
width = img.shape[1]
height = img.shape[0]
neg_range = (
(int(neg_range_float[0][0] * width), int(neg_range_float[0][1] * height)),
(int(neg_range_float[1][0] * width), int(neg_range_float[1][1] * height)))
for neg_sample_size, samples_per_size_per_img in neg_samples:
for r in range(samples_per_size_per_img):
offset_x = random.randint(neg_range[0][0], neg_range[1][0] - neg_sample_size)
offset_y = random.randint(neg_range[0][1], neg_range[1][1] - neg_sample_size)
neg_sample = img[offset_y:offset_y + neg_sample_size,offset_x:offset_x + neg_sample_size]
if neg_sample_size != classify_img_size[0] or neg_sample_size != classify_img_size[1]:
neg_sample = cv2.resize(neg_sample, classify_img_size)
helper.write_img(neg_sample, 'neg_augmentation_img/project_video_{}.jpg'.format(i))
i += 1
else:
print("Unknown file extension:", fname)
print('Augmented', i, 'images')
|
UTF-8
|
Python
| false | false | 1,566 |
py
| 10 |
create_non_vehicle_data.py
| 9 | 0.588761 | 0.555556 | 0 | 44 | 34.590909 | 105 |
zshimanchik/battlecity
| 6,846,177,908,982 |
16a087a492ec71b21c9de5cd347811a614e607e1
|
10b26c4b5756eddf0b9fa962a193f4bc2f27969d
|
/battlecityplayer/tactics/hunt.py
|
3546b7c79680238c83a72731f960ce63fdf1b13d
|
[] |
no_license
|
https://github.com/zshimanchik/battlecity
|
b43b16a36f86e31e9726aeac678c0547602f22c4
|
5fa74de38824cd8d20a0c17ec2d2324e83c5babb
|
refs/heads/master
| 2020-08-21T23:11:03.941571 | 2019-10-19T20:44:10 | 2019-10-19T20:44:10 | 216,268,633 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from collections import deque
from models import Direction
from tactics import Tactics
from constants import *
class Hunt(Tactics):
def update(self, player):
self.usability = 0
self.action = ''
board = player.board
path = self.find_path_to_enemy(board)
if path and len(path) > 3:
print(path)
delta = path[1] - player.board.me.pos
print('delta: ', delta)
print('dir: ', delta.give_direction())
self.action = delta.give_direction().value
self.usability = 0.5
def find_path_to_enemy(self, board):
dist_mtrx = [[0] * board.n for _ in range(board.n)] # 0 - means unvisited
queue = deque()
queue.appendleft(board.me.pos)
dist_mtrx[board.me.pos.x][board.me.pos.y] = 1
while queue:
cur_pos = queue.pop()
cur_distance = dist_mtrx[cur_pos.x][cur_pos.y]
for dir in Direction:
next_pos = cur_pos + dir.get_delta()
if 0 <= next_pos.x < board.n and 0 <= next_pos.y < board.n:
ch = board.char(next_pos.x, next_pos.y)
if ch in ENEMIES:
dist_mtrx[next_pos.x][next_pos.y] = cur_distance + 1
return self._restore_path(next_pos, dist_mtrx)
if ch not in BARRIERS and dist_mtrx[next_pos.x][next_pos.y] == 0:
dist_mtrx[next_pos.x][next_pos.y] = cur_distance + 1
queue.appendleft(next_pos)
def _restore_path(self, cur_pos, dist_mtrx):
path = deque()
path.appendleft(cur_pos)
cur_dist = dist_mtrx[cur_pos.x][cur_pos.y]
while cur_dist > 1:
for dir in Direction:
next_pos = cur_pos + dir.get_delta()
if 0 <= next_pos.x < len(dist_mtrx) and 0 <= next_pos.y < len(dist_mtrx):
if dist_mtrx[next_pos.x][next_pos.y] == cur_dist - 1:
path.appendleft(next_pos)
cur_pos = next_pos
cur_dist -= 1
break
return path
|
UTF-8
|
Python
| false | false | 2,172 |
py
| 20 |
hunt.py
| 17 | 0.508748 | 0.50046 | 0 | 54 | 39.222222 | 89 |
blackhen/Python
| 10,934,986,769,426 |
edd575b3c7a8114bdcf9708a341c197aff755b82
|
ca7a03fd57f02a832fbb08c170b1bd08f389bc3e
|
/HW_Dealer.py
|
28c55d9affe1dbb19142b3ca87223d9412352548
|
[] |
no_license
|
https://github.com/blackhen/Python
|
5ac428f17e7e16ae25abac2a81cbcb2b896ea741
|
0edf3fbbd37b0088c0a0d63fb31c9d81287c7bf1
|
refs/heads/master
| 2020-05-16T21:01:54.944888 | 2015-02-26T09:01:49 | 2015-02-26T09:01:49 | 31,358,400 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''HW_Dealer'''
def dealer(number_order):
'''total price of order'''
for _ in range(number_order):
weight_spe = input()
weight_nor = input()
for _ in range(input()):
weight = input()
piece = input()
if weight >= weight_spe:
ans = piece+(piece*0.10)
elif weight >= weight_nor:
ans = piece+(piece*0.20)
else:
ans = piece
print int(ans) if ans % 1 ==0 else int(ans) + 1
dealer(input())
|
UTF-8
|
Python
| false | false | 535 |
py
| 154 |
HW_Dealer.py
| 151 | 0.46729 | 0.450467 | 0 | 17 | 30.470588 | 59 |
keysona/qq
| 6,476,810,724,440 |
cef4026203b58f28e4c68955a751c49c40247dee
|
1999bf6b07051534dbec753e45dc4c3c8d340c6d
|
/miniqq/admin.py
|
aba50b5192dd64d12c77004c07557babd420c0fc
|
[] |
no_license
|
https://github.com/keysona/qq
|
898e8f3a55f09cfe64be7b2391dabfe0ee672547
|
a3dca71dafddb37d14aecac53448cf7292b8a153
|
refs/heads/master
| 2016-08-10T11:11:28.403040 | 2015-12-26T16:24:27 | 2015-12-26T16:24:27 | 48,616,365 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: key
# @Date: 2015-12-18 12:35:48
# @Last Modified by: key
# @Last Modified time: 2015-12-27 00:23:47
from django.contrib import admin
from .models import User,Friend
# Register your models here.
admin.site.register(User)
admin.site.register(Friend)
|
UTF-8
|
Python
| false | false | 311 |
py
| 7 |
admin.py
| 7 | 0.70418 | 0.610932 | 0 | 12 | 24.916667 | 42 |
IanCallaghan/script.service.superepg
| 18,932,215,850,549 |
051930a41a7dfdf84956db7dbf3bc2c954629c67
|
1797e8e837696ca3f18564ee6a8f17346d88f470
|
/resources/lib/superepg/models/iDisplayShow.py
|
7a841a3989d69c6f4f502a5279bc30fd8285b3f9
|
[] |
no_license
|
https://github.com/IanCallaghan/script.service.superepg
|
a12ea4a45f872649014162584b34b8814d9b8215
|
b813417566fc0515c5b52def15ce44679e797081
|
refs/heads/master
| 2020-06-02T03:55:58.091854 | 2014-07-28T20:23:15 | 2014-07-28T20:23:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 17 Jun 2014
@author: Ian
'''
class IDisplayShow(object):
'''
classdocs
'''
json = {};
def __init__(self, params):
'''
Constructor
'''
def grabNext(self):
return None;
def getDescription(self):
return "description";
def getTitle(self):
return "title";
def getLength(self):
return 99;
def getSeason(self):
return -1;
def getEpisode(self):
return -1;
def playContent(self, player):
pass;
|
UTF-8
|
Python
| false | false | 567 |
py
| 53 |
iDisplayShow.py
| 47 | 0.492063 | 0.474427 | 0 | 37 | 14.351351 | 34 |
Wolverine-17/Solves_Codeforces
| 1,597,727,841,686 |
786317bcb09279a017c994e16b036117b8973002
|
2c8984e1c6ca359513959709ebb870428f350c9b
|
/D/101375D.py
|
acd84a8253a136c7ce8d5d8b84746ed184f334e8
|
[] |
no_license
|
https://github.com/Wolverine-17/Solves_Codeforces
|
cb298f6dbcf4f5127ac58a633b50684b5da13e0c
|
6d659c2d0f6e3528c769ba14087988bd762f5bd9
|
refs/heads/master
| 2020-04-07T08:45:13.720627 | 2019-05-14T11:33:38 | 2019-05-14T11:33:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n = int(input())
h = list(map(int, input().split()))
k = sum(h)**2
s = 0
for i in h:
s+=i**2
print((k-s)//2)
|
UTF-8
|
Python
| false | false | 110 |
py
| 45 |
101375D.py
| 45 | 0.509091 | 0.472727 | 0 | 7 | 14.714286 | 35 |
nigeljonez/newpyfibot
| 13,460,427,550,617 |
ea786a4f4074385fc2bb0044435a0f39cd305ad7
|
5b166e7bb66bec25126ce6622162075956397d07
|
/modules/module_sqliteseries.py
|
00593818009d61f7245d0a88e7fb6ffd6e6ac747
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/nigeljonez/newpyfibot
|
15f859c9434629a7d579346d0400f326dcad87b1
|
de090f1521dee8523d4a083b9665298e4e97d847
|
refs/heads/master
| 2021-01-22T11:46:22.344509 | 2012-09-12T10:25:28 | 2012-09-12T10:25:28 | 936,129 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
try:
import sqlite3
except:
import sqlite
import time
import datetime
# create table series (id INTEGER PRIMARY KEY, uid TEXT UNIQUE, serie TEXT, season INTEGER, episode INTEGER, title TEXT, airdate DATE);
# insert into series values(null, 'Alias', 1, 1, 'Pilot', date('2006-01-01'));
# select * from series where airdate = date('2006-01-02', '-1 day');
# select * from series where airdate = date('now', '-1 day');
def command_epinfo(bot, user, channel, args):
"""List series in the database"""
con = sqlite.connect("/home/shrike/pyfibot/modules/series.db");
cur = con.cursor()
cur.execute("SELECT DISTINCT serie FROM series");
res = []
for serie in cur:
res.append(serie[0])
res.sort()
bot.say(channel, "Known series ("+str(len(res))+"): " + ", ".join(res))
cur.close()
con.close()
def command_ep(bot, user, channel, args):
"""Usage: sqlep [today|yesterday|tomorrow] or [seriename]"""
con = sqlite.connect("/home/shrike/pyfibot/modules/series.db");
cur = con.cursor()
if not args:
bot.say(channel, "Usage: ep [today|yesterday|tomorrow] or [name of series]")
return
if args == "today":
cur.execute("SELECT * FROM series WHERE airdate = date('now', 'localtime');")
if cur.rowcount == 0:
bot.say(channel, "No known releases today")
return
elif args == "yesterday":
cur.execute("SELECT * FROM series WHERE airdate = date('now', 'localtime', '-1 day');")
if cur.rowcount == 0:
bot.say(channel, "No known releases yesterday")
return
elif args == "tomorrow":
cur.execute("SELECT * FROM series WHERE airdate = date('now', 'localtime', '+1 day');")
if cur.rowcount == 0:
bot.say(channel, "No known releases tomorrow")
return
else:
# try to find the serie
cur.execute("SELECT * FROM series WHERE serie LIKE %s AND airdate >= date('now', 'localtime') LIMIT 1", ("%"+args+"%",))
# nothing found, get more data from the web
if cur.rowcount == 0:
cur.execute("SELECT * FROM series WHERE serie LIKE %s", ("%"+args+"%",))
if cur.rowcount == 0:
bot.say(channel, "Series '%s' not found" % args) # TODO: add to 'wishlist' file or something?
return
else:
bot.say(channel, "No unaired episodes of '%s' found" % args)
return
episodes = []
# go through the results
for (idno, uid, serie, season, episode, title, airdate) in cur:
if episode < 10: episode = "0%d" % episode # pad ep with zeroes
# YYYY-MM-DD -> datetime -> timedelta
t = time.strptime(airdate, "%Y-%m-%d")
ad = datetime.date(t.tm_year, t.tm_mon, t.tm_mday)
now = datetime.date.today()
tomorrow = now + datetime.timedelta(days=1)
td = ad-now
# change 0 and 1 to today & tomorrow, don't show date if we're asking stuff for a certain day
airdatestr = ""
if td.days >= 0:
if ad == now:
if args != "today": airdatestr = "on %s (Today)" % airdate
elif ad == tomorrow:
if args != "tomorrow": airdatestr = "on %s (Tomorrow)" % airdate
else:
airdatestr = "on %s (%d days)" % (airdate, td.days)
episodes.append("%s %sx%s '%s' %s" % (serie, season, episode, title, airdatestr))
bot.say(channel, "-- ".join(episodes))
cur.close()
con.close()
|
UTF-8
|
Python
| false | false | 3,558 |
py
| 32 |
module_sqliteseries.py
| 31 | 0.569983 | 0.559584 | 0 | 96 | 36.0625 | 135 |
subhasmith/vagamap
| 738,734,423,495 |
a6eeeb95051d96264bd214a498affd0d297aa8c2
|
605d10b7df9ad4da2742c7bacd784511acd41888
|
/src/vagamap/pages/place.py
|
ccf41299501fb8781362eb5047fe6dfb6b78d74f
|
[] |
no_license
|
https://github.com/subhasmith/vagamap
|
947e7d71e582f4a2a2453166884238a3b0827508
|
a1243005970ac3b0780fef7cafe8464de6c20fa2
|
refs/heads/master
| 2016-09-12T18:23:06.343550 | 2012-03-28T15:16:11 | 2012-03-28T15:16:11 | 56,242,001 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import webapp2
import wtforms
import jinja2
import new
import base
from wtforms.ext.appengine.db import model_form
from vagamap.models import *
import logging
jinja_environment = base.jinja_environment
class PlaceForm(model_form(Place)):
pass
class ListPlaceHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
query = Place.all()
query.order = "provider"
query.order = "name"
places = query.fetch(limit=50)
logging.info('places fetched: {}'.format(len(places)))
template_values = {
'places':places
}
template = jinja_environment.get_template('place_list.html')
self.response.out.write(template.render(template_values))
def post(self):
self.redirect('/place/edit')
place_list = webapp2.WSGIApplication([('/place/list', ListPlaceHandler)], debug=True)
|
UTF-8
|
Python
| false | false | 978 |
py
| 27 |
place.py
| 16 | 0.630879 | 0.624744 | 0 | 36 | 25.222222 | 85 |
vSahakian/pltPySegy
| 10,677,288,727,946 |
ceccf6bfb32b1b48ee7288632e9b605d200a0cc3
|
7c9e7bb5fffc86a5b71e91ccd12b44e41877551b
|
/navtools.py
|
5aa8ab6eddad772b3aedee7ff9b54f3c1789b807
|
[] |
no_license
|
https://github.com/vSahakian/pltPySegy
|
a45fee7d003c65b3a04f53242b91694cc142f92c
|
9b87e7703d97ba891c03cc5a6fd508ebcfcda9bf
|
refs/heads/master
| 2021-01-10T02:10:39.473676 | 2020-12-11T18:28:20 | 2020-12-11T18:28:20 | 53,089,064 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def getnav(segyfile,navfile):
'''
VJS 1/2016
Extract nav from a segy
Input:
segyfile: String with segy file to extract nav
navfile: String with output nav file
Output:
To navfile
Shot Lon Lat
'''
import numpy as np
import obspy as obs
from struct import unpack
#Import segy data:
sdat=obs.read(segyfile)
#Extract header:
#Set to empty arrays:
lon=np.zeros((len(sdat),1))
lat=np.zeros((len(sdat),1))
shot=np.zeros((len(sdat),1))
#run through each trace:
for i in range(len(sdat)):
header=sdat[i].stats.segy.trace_header.unpacked_header
#Unpack the source lat/lon, format is arcseconds:
lonh=unpack(">i",header[72:76])[0]
lath=unpack(">i",header[76:80])[0]
shot[i]=unpack(">i",header[8:12])[0]
#Convert to decimal degrees:
lon[i]=lonh/(60.0*60.0)
lat[i]=lath/(60.0*60.0)
#Print to file:
out=np.c_[shot,lon,lat]
np.savetxt(navfile,out,fmt='%6i\t%12.8f\t%10.8f')
def nav_ll2utm(navfile_ll,navfile_utm):
'''
Convert a navfile made above to a utm file for kingdom
VJS 1/2016
Input:
navfile_ll: String of path to navfile, format: shot lon lat
navfile_utm: String of path to utm navfile, format: shot X Y
Output:
To navfile_utm:
Shot/RP Lon Lat
'''
import numpy as np
from pyproj import Proj
#Read in ll navfile:
lldat=np.genfromtxt(navfile_ll)
#Sort:
shot=lldat[:,0]
lon=lldat[:,1]
lat=lldat[:,2]
#Make projection:
p=Proj(proj='utm',zone='11S',ellps='WGS84')
#Project:
UTMx,UTMy=p(lon,lat)
#Print out and save:
out=np.c_[shot,UTMx,UTMy]
np.savetxt(navfile_utm,out,fmt='%6i\t%12.5f\t%12.5f')
|
UTF-8
|
Python
| false | false | 1,880 |
py
| 8 |
navtools.py
| 8 | 0.569681 | 0.537234 | 0 | 74 | 24.351351 | 71 |
peletiah/poab
| 9,225,589,759,142 |
4df188148559791c5e91bc9d5ef2655bfe883f23
|
330aceb15268d5cdeeb7d8e4e906c0129d44cb2c
|
/odd_functions/sql_add_prefix.py
|
210a17a7c90ed71b265c80fcfa3cc9d25e2cacd2
|
[] |
no_license
|
https://github.com/peletiah/poab
|
03e64a94e65c4800f809953be9f508651cfed22c
|
eb7462a026747e1d4eb411d6af1549e67b12b598
|
refs/heads/master
| 2021-01-21T13:36:57.811316 | 2010-09-12T13:24:41 | 2010-09-12T13:24:41 | 82,160 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sqlalchemy import and_, or_
import hashlib
import ConfigParser
import urllib
import os
import hashlib
import sys
sys.path.append('/root/poab')
from image_functions import sortedlistdir as sortedlistdir
from fill_photos_db import resize_990 as resize_990
import talk2flickr
import db_functions
from getfromxml import parsexml as parsexml
import time,datetime
def getcredentials(credentialfile):
config=ConfigParser.ConfigParser()
open(credentialfile)
config.read(credentialfile)
pg_user=config.get("postgresql","username")
pg_passwd=config.get("postgresql","password")
flickrapi_key=config.get("flickrcredentials","api_key")
flickrapi_secret=config.get("flickrcredentials","api_secret")
wteapi_key=config.get("worldtimeengine","api_key")
return pg_user,pg_passwd,flickrapi_key,flickrapi_secret,wteapi_key
def getcredentials(credentialfile):
config=ConfigParser.ConfigParser()
open(credentialfile)
config.read(credentialfile)
pg_user=config.get("postgresql","username")
pg_passwd=config.get("postgresql","password")
flickrapi_key=config.get("flickrcredentials","api_key")
flickrapi_secret=config.get("flickrcredentials","api_secret")
wteapi_key=config.get("worldtimeengine","api_key")
return pg_user,pg_passwd,flickrapi_key,flickrapi_secret,wteapi_key
pg_user,pg_passwd,flickrapi_key,flickrapi_secret,wteapi_key=getcredentials('/root/poab/credentials.ini')
database=db_functions.initdatabase(pg_user,pg_passwd)
session=database.db_session()
db_imageinfo=database.db_imageinfo
db_log=database.db_log
q = session.query(db_imageinfo)
images = q.all()
for image in images:
q = session.query(db_log).filter(db_log.id==image.log_id)
log=q.one()
createdate=log.createdate.strftime('%Y-%m-%d')
q = session.query(db_imageinfo).filter(db_imageinfo.id==image.id)
image=q.one()
if image.imgname[47:51]=='/srv':
print image.imgname
print '/trackdata/bydate/'+str(createdate)+'/images/sorted/990/'+image.imgname
print '/trackdata/bydate/'+str(createdate)+'/images/sorted/990/'+image.imgname[-12:-4]+'.jpg'
image.imgname='/trackdata/bydate/'+str(createdate)+'/images/sorted/990/'+image.imgname[-12:-4]+'.jpg'
session.commit()
session.close()
|
UTF-8
|
Python
| false | false | 2,277 |
py
| 268 |
sql_add_prefix.py
| 29 | 0.737374 | 0.725955 | 0 | 57 | 38.947368 | 109 |
FlorianPf/ATAT_VASP
| 42,949,698,523 |
53cd13b18ddd9c6b8a039f8382119e65d28b08b0
|
d5afc5fbbf3a03363ee8a38446d852b41d53f398
|
/atat_poscar.py
|
75e6d31f3dc9bb8702a7e9eadbecc01bdc7b3eed
|
[] |
no_license
|
https://github.com/FlorianPf/ATAT_VASP
|
8fe2935c1fa0383e1dde5249e67c90a354a57bd8
|
2f4659bc8f525287aa060c6b8c6a3b6e24756021
|
refs/heads/main
| 2023-01-14T10:48:25.131633 | 2020-11-19T18:48:58 | 2020-11-19T18:48:58 | 314,338,425 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/opt/local/bin/python
import os # Miscellaneous operating system interfaces (e.g. for manipulating directories). https://docs.python.org/3/library/os.html
import sys # System-specific parameters and functions (e.g. for exiting after error). https://docs.python.org/3/library/sys.html
import numpy as np # Fundamental package for scientific computing. https://numpy.org/doc/stable/
import argparse # Parser for command-line options, arguments and sub-commands. https://docs.python.org/3/library/argparse.html#module-argparse
class Structure:
def __init__(self, data, atom_list):
try:
self.data = data
pos = 0
# The first three lines describe the coordinate system used, the vectors are the rows of the matrix:
self.coordinate_system = np.matrix([values for vectors in self.data[pos:pos+3] for values in vectors.split()], dtype=float).reshape((3, 3))
pos += 3
# The following three lines describe the lattice vectors of the unit cell:
# The unit cell expands, therefor this is not a simple identity matrix.
self.lattice_vectors_org = np.matrix([values for vectors in self.data[pos:pos+3] for values in vectors.split()], dtype=float).reshape((3, 3))
pos += 3
self.lattice_vectors_car = self.lattice_vectors_org*self.coordinate_system
# Determine the atom types present.
self.atom_types = list(set([line.split()[-1] for line in self.data[pos:-2]])) # beware: the slicing to -2 is to not read 'end'
if atom_list is not None:
if len(self.atom_types) == len(atom_list):
self.atom_types = atom_list
else: raise ValueError("Length of order specification is different from number of atom types found in input file.")
self.atom_numbers = [0 for atom_type in self.atom_types]
atoms_unsorted = []
# Read in all the atoms
for i in range(len(self.data[pos:-2])):
atom_info = self.data[pos+i].split()
atom_type = atom_info.pop()
self.atom_numbers[self.atom_types.index(atom_type)] += 1
coords_org = np.transpose(np.matrix([float(val) for val in atom_info]))
atoms_unsorted.append(Atom(self, atom_type, coords_org))
self.atoms = []
# Getting the list of atoms in the right order (VASP needs this):
for atom_type in self.atom_types:
index=0
for multiplicity in range(int(self.atom_numbers[self.atom_types.index(atom_type)])):
atom_found = False
while not atom_found:
if atoms_unsorted[index].atom_type==atom_type:
self.atoms.append(atoms_unsorted[index])
atom_found=True
index += 1
# Output some structural information.
if sqs_count == 1:
print("\nStructural information (assuming all sqs are of the same composition):")
info_list = [None]*(2*len(self.atom_types))
info_list[::2] = self.atom_types
info_list[1::2] = [str(num) for num in self.atom_numbers]
print("Atom types (quantity):"+(" {} ({})"*len(self.atom_types)).format(*[info_list[i] for i in range(2*len(self.atom_types))])+"\n")
except OSError as oserr:
print("\nError while reading SQS {}: {}".format(sqs_count, oserr))
sys.exit(oserr.errno)
except ValueError as valerr:
print("\nError while reading SQS {}: {}".format(sqs_count, valerr))
sys.exit(-1)
class Atom:
def __init__(self, struc, atom_type, coords_org):
self.atom_type = atom_type
self.coords_org = coords_org
self.struc = struc
# One may insure oneself about these transformations by manually calculating the matrix operations.
def org_to_car(self):
transformation = np.transpose(self.struc.coordinate_system)
return np.dot(transformation, self.coords_org)
def org_to_dir(self):
transformation = np.linalg.inv(np.transpose(self.struc.lattice_vectors_car))
return np.dot(transformation, self.org_to_car())
def printPOSCAR(structure, name, representation):
# Check the VASP manual on POSCAR files: https://www.vasp.at/wiki/index.php/POSCAR
poscar_dir = output_dir+"/poscar_"+str(sqs_count)
os.mkdir(poscar_dir)
os.chdir(poscar_dir)
output_file = open("POSCAR", 'w')
output_file.write(name+" (SQS number "+str(sqs_count)+")\n") # Name of structure (comment).
output_file.write(" 1.0\n") # Scaling factor.
for i in range(3):
output_file.write(" "+(" {:12.8f}"*3).format(*[structure.lattice_vectors_car[i,j] for j in range(3)])+"\n") # Three vectors describing the lattice.
output_file.write(" "+(" {:<4s}"*len(structure.atom_types)).format(*structure.atom_types)+"\n") # Atom types in order of appearance.
output_file.write(" "+(" {:>4d}"*len(structure.atom_numbers)).format(*structure.atom_numbers)+"\n") # Corresponding quantities.
if representation in ['car', 'cartesian']:
output_file.write("Cartesian\n") # With this tag present the atom positions get read in cartesian coordinates.
for i in range(len(structure.atoms)):
output_file.write(" "+(" {:20.16f}"*3).format(*np.transpose(structure.atoms[i].org_to_car()).tolist()[0])+"\n")
else:
output_file.write("Direct\n") # With this tag present the atom positions get read in the coordinates defined by the lattice vectors.
for i in range(len(structure.atoms)):
output_file.write(" "+(" {:20.16f}"*3).format(*np.transpose(structure.atoms[i].org_to_dir()).tolist()[0])+"\n")
output_file.close()
os.chdir(output_dir)
return
def main():
print("\nNote: Reading of the coordinate system as \'a, b, c, alpha, beta, gamma\' has not been implemented yet.")
parser = argparse.ArgumentParser(description='Converts SQS output file (e.g. sqs.out) to VASP POSCAR files.\nNote: Reading of the coordinate system as \'a, b, c, alpha, beta, gamma\' has not been implemented yet.')
parser.add_argument('-i', '--ifile', dest='file_name', type=str, help='Name of input file.', required=True)
parser.add_argument('-n', '--name', dest='name', type=str, help='Name of structure (first line of POSCAR file)', required=False, default='Comment (name of structure).')
parser.add_argument('-o', '--order', nargs='*', dest='atom_list', help='Atom types in desired order (VASP calculations require matching order of atoms in POSCAR and POTCAR files).', required=False)
parser.add_argument('-r', '--repr', default='cartesian', type=str, choices=['car', 'cartesian', 'dir', 'direct'], required=False, help='Choose the representation of atom positions in the POSCAR file.', dest='representation')
args = parser.parse_args()
file_name = args.file_name
name = args.name
atom_list = args.atom_list
representation = args.representation
print("\nInput file name: ", file_name)
global output_dir
output_dir = os.getcwd()+"/output_files"
global input_dir
input_dir = os.getcwd()
# Delete and / or rename previous results:
if "output_files" in os.listdir(os.getcwd()):
print("")
if "output_files~" in os.listdir(os.getcwd()):
for root, dirs, files in os.walk(os.getcwd()+"/output_files~", topdown=False):
for file_to_del in files:
os.remove(os.path.join(root, file_to_del))
for dir_to_del in dirs:
os.rmdir(os.path.join(root, dir_to_del))
os.rmdir(os.getcwd()+"/output_files~")
print("Deleted second to last results stored within \'/output_files~\'.")
os.rename("output_files", "output_files~")
print("Renamed previous results. \'/output_files\' is now \'/output_files~\'.")
os.mkdir(output_dir)
input_file = open(file_name, 'r') # load file for reading only
content = [line.rstrip() for line in input_file.readlines()] # create list with lines, truncated space at end
input_file.close() # close file
os.chdir(output_dir)
pos = 0
global sqs_count
sqs_count = 0
data = []
# Creating a separate structure / POSCAR file for each SQS found in the input file.
num_backspace=0
while pos in range(len(content)):
line = content[pos]
data.append(line)
if line == 'end':
sqs_count += 1
printPOSCAR(Structure(data, atom_list), name, representation)
data = []
pos += 1
if sqs_count > 1:
num_backspace=int(np.floor(np.log10(sqs_count-1))+1)
sys.stdout.write("\b"*num_backspace+"%s" %sqs_count)
sys.stdout.flush()
pos += 1
print(" SQS successfully converted to POSCAR file(s).\n")
if __name__ == "__main__":
main()
"""
--------------------------
Manuals and documentaries:
--------------------------
VASP POSCAR files: https://www.vasp.at/wiki/index.php/POSCAR
argparse documentary: https://docs.python.org/3/library/argparse.html
mcsqs documentary: https://www.brown.edu/Departments/Engineering/Labs/avdw/atat/manual/node47.html
"""
|
UTF-8
|
Python
| false | false | 9,905 |
py
| 9 |
atat_poscar.py
| 8 | 0.585664 | 0.579303 | 0 | 185 | 52.545946 | 228 |
Byeongryul/kmu_chatbot_AI
| 5,841,155,559,302 |
930e88e04100b431f266306eb3c957b7cecdd7fe
|
e1bbf5515afd187614a2ddef45769fdd8755e9a9
|
/test.py
|
a565c0025942cedaa9a254d45117a1132a20a630
|
[] |
no_license
|
https://github.com/Byeongryul/kmu_chatbot_AI
|
5576dbbe1cb917776990bf8d8d360aa2c872a492
|
3f637665f07ef2d02017b1041e00a92e7c9e28c7
|
refs/heads/main
| 2023-04-30T19:58:13.601943 | 2021-05-12T09:24:59 | 2021-05-12T09:24:59 | 362,439,226 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print(df)
# qs = pd.read_csv('rsc/data_mapping/sholaship_qu_mapping.csv').drop('Unnamed: 0', axis = 1)
# q = 'ํ ์ต'
# qr = '?'
# for row in qs.T:
# if qs['0'][row] in q or q in qs['0'][row]:
# qr = qs['1'][row]
# break
# print(qr)
# wards = pd.read_csv('rsc/data_mapping/scholaship_name_mapping.csv').drop('Unnamed: 0', axis = 1)
# ward = '๊ตญ์ฅ'
# wardr = '?'
# for row in wards.T:
# if wards['0'][row] in ward or ward in wards['0'][row]:
# wardr = wards['1'][row]
# break
# print(wardr)
|
UTF-8
|
Python
| false | false | 538 |
py
| 30 |
test.py
| 13 | 0.537736 | 0.518868 | 0 | 19 | 26.894737 | 98 |
fengjianque/access_monitor
| 10,788,957,859,427 |
e5b43fab356a827a44eab44662dd6ecb17817d8a
|
6f29b47e69fee14ea32f15947fe38559c6665dab
|
/monit.py
|
abc4a7c85e696bef74e67274425875ef08f872f0
|
[] |
no_license
|
https://github.com/fengjianque/access_monitor
|
bf12e70ba552580f1a583a1daa39fe91fc323272
|
fcca8b9834a3b0480580fa4a1169537dcb60032e
|
refs/heads/master
| 2021-01-01T17:49:03.182380 | 2017-09-15T03:36:24 | 2017-09-15T03:36:24 | 98,163,463 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
import os
import time
import traceback
import optparse
from datetime import date, datetime
import logging
import logging.config
from core import pygtail
from core import util
from config import setting as sfg
from core.daemon import Daemon
current_dir = os.getcwd()
log_dir = current_dir + "/var/log"
run_dir = current_dir + "/var/run"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(run_dir):
os.makedirs(run_dir)
logging.config.dictConfig(sfg.LOGGING)
def _run(options):
######## file lock ########
file_lock = util.FileLock("var/run/monitor.lock")
if not file_lock.lock():
print "Monitor is running"
sys.exit(1)
pattern = util.build_pattern(sfg.LOG_FORMAT)
config_file = options.config_file
alert_status_config_file, response_range_config_file, response_ratio_config_file, request_ratio_config_file = util.parse_config(config_file)
while True:
count = 0
sys.stdout.flush()
if not options.continue_read and not options.loop_run:
fp = open(options.access_log, "r")
else:
fp = pygtail.Pygtail(options.access_log, offset_file="var/run/access_log.offset")
if fp is not None:
for line in fp:
try:
count = count + 1
record = util.parse_log(line, pattern)
if record is None:
continue
util.alert_status(record, alert_status_config_file)
util.alert_response_time_range(record, response_range_config_file)
util.alert_response_time_ratio(record, response_ratio_config_file)
util.alert_request_time_ratio(record, request_ratio_config_file)
except Exception, ex:
logging.error("monitor exception: %s" % traceback.format_exc())
if not options.loop_run:
break
if count < 1000:
time.sleep(1)
elif count < 10:
time.sleep(5)
class MDaemon(Daemon):
def run(self):
_run(self.options)
def main():
usage = "usage: %prog [OPTION]"
option_list = (
optparse.make_option("-l", "--access-log", dest="access_log",
help="Specify access log file path. "),
optparse.make_option("-f", "--config-file", dest="config_file",
help="Specify config file path. "),
optparse.make_option("-c", "--continue-read", dest="continue_read", action="store_true",
help="When using continue_run, this script will read the log from last position "
),
optparse.make_option("--loop", dest="loop_run", action="store_true",
help="run in loop, read from last postion"
),
optparse.make_option("-s", dest="start_op", action="store",
help="this script will run as daemon"
),
)
option_default = {
}
parser = optparse.OptionParser(usage=usage, option_list=option_list)
parser.set_defaults(**option_default)
options, args = parser.parse_args()
if options.access_log is None:
parser.error("you must specify access_log.")
if not os.path.isfile(options.access_log):
parser.error("access_log %s not exist." % options.access_log)
if options.config_file is None:
parser.error("you must specify config_file.")
if not os.path.isfile(options.config_file):
parser.error("config_file %s not exist." % options.config_file)
if options.start_op is None:
_run(options)
else:
daemon = MDaemon(current_dir + '/var/run/monit.pid',
stdin= "/dev/null",
stdout=current_dir + "/var/log/err.log",
stderr=current_dir + "/var/log/err.log",
options=options)
if options.start_op == "start":
daemon.start()
elif options.start_op == "stop":
daemon.stop()
elif options.start_op == "restart":
daemon.restart()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 4,129 |
py
| 8 |
monit.py
| 6 | 0.591184 | 0.588278 | 0 | 127 | 31.511811 | 144 |
XJCasper/Lab_3x
| 9,457,518,002,543 |
d6cb96e8475abca9c46e67d4ad9f2a42be0f3e59
|
f4ea34d41f639a652883835c76c5209c71989af7
|
/problem_2.py
|
1d880b9f4b15d8b3c8720452d2ba373b6a835579
|
[] |
no_license
|
https://github.com/XJCasper/Lab_3x
|
5398dcd6e5d0e8fd55d6a676e33458d6f9dc0271
|
022824a97c0e4d6dce910fde83442b6dcfed19b2
|
refs/heads/master
| 2021-04-12T05:09:57.267124 | 2014-10-22T00:54:18 | 2014-10-22T00:54:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
gis_file = open ("GIS_is_the_best.txt",'r')
file_list = gis_file.read()
system_count = 0
science_count = 0
geographic_count = 0
information_count = 0
other_count = 0
total_words = 0
for word in file_list.split(' '):
if word.lower() == 'systems':
system_count += 1
elif word.lower() == 'science':
science_count += 1
elif word.lower() == 'geographic':
geographic_count += 1
elif word.lower() == 'information':
information_count += 1
else:
other_count += 1
print(system_count + science_count + geographic_count + information_count + other_count)
print(system_count)
print(science_count)
|
UTF-8
|
Python
| false | false | 604 |
py
| 4 |
problem_2.py
| 4 | 0.675497 | 0.657285 | 0 | 25 | 23.16 | 88 |
EasonSun/e2eClaimVerif
| 4,449,586,140,005 |
ff0a336cb093353b9ff8bb2db51d9c2f0ce471d9
|
2df649f0b335963da4a53ce7ffaaec0b172bb123
|
/snopesDataCrawling/extractTopic.py
|
f7d9697589179c53827fdceb8e01bf7bca0c9b79
|
[] |
no_license
|
https://github.com/EasonSun/e2eClaimVerif
|
0df315e12e0f9f9266458e399d752b36893c0ce4
|
5774ec9f9301230ada764074b1a157fa4b1704a9
|
refs/heads/master
| 2021-08-22T17:57:00.834683 | 2017-11-30T21:22:38 | 2017-11-30T21:22:38 | 112,644,392 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
filename = 'cleaned_data.json'
topics = set()
outfile = open('topics.txt', 'w')
with open(filename, 'r') as f:
data = json.load(f)
for entry in data:
if entry['topic'] == '':
topics.add("_")
else:
topics.add(entry['topic'])
for topic in topics:
outfile.write(topics+'\n')
|
UTF-8
|
Python
| false | false | 298 |
py
| 10 |
extractTopic.py
| 7 | 0.634228 | 0.634228 | 0 | 15 | 18.933333 | 33 |
heheddff/python2018
| 14,748,917,730,815 |
c9889f714c2166386a60e0fdd64d37895a90e60e
|
ec87c361be4a2f9f842695b6a6e8601ebd735e83
|
/Speak.py
|
7fd58d13c37f3fb3d4d05707c8f5615b5784265d
|
[] |
no_license
|
https://github.com/heheddff/python2018
|
357d51bee7ea39f6a1df82101fb49c1568250e24
|
77a240dd155f679fffe33b62df57f689a8c85082
|
refs/heads/master
| 2020-03-27T23:13:38.789249 | 2018-12-08T14:55:21 | 2018-12-08T14:55:21 | 147,302,979 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
name = input("่ฏท่พๅ
ฅไฝ ๆณ่ฏด่ฏ็ๅฏนๆนๅๅญ")
s = input("่ฏท่พๅ
ฅไฝ ๆณ่ฏด็่ฏ")
print("{},ๆๅฟ
้กป่ฆๅ่ฏไฝ ๏ผ{}".format(name,s*3))
|
UTF-8
|
Python
| false | false | 149 |
py
| 8 |
Speak.py
| 7 | 0.634409 | 0.623656 | 0 | 3 | 29.333333 | 39 |
tefra/xsdata-w3c-tests
| 13,134,009,998,052 |
31b788845f4dc192baba3a299f9a3cdb8c6d32da
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/regex/re_k3_xsd/__init__.py
|
9a1ff7e3b970fd14e678a0da7cfbf744fa1b3de7
|
[
"MIT"
] |
permissive
|
https://github.com/tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| false | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | 2023-01-18T19:44:58 | 2023-07-25T14:19:02 | 36,096 | 2 | 0 | 0 |
Python
| false | false |
from output.models.ms_data.regex.re_k3_xsd.re_k3 import Doc
__all__ = [
"Doc",
]
|
UTF-8
|
Python
| false | false | 86 |
py
| 18,966 |
__init__.py
| 14,569 | 0.616279 | 0.593023 | 0 | 5 | 16.2 | 59 |
zhangwuyi1234/github
| 4,209,067,970,405 |
0602ff2e0739ee3caa79d96a3e40918b5d79bcc7
|
8b1e9a3d36858bd131489ccffe48656ae141c67b
|
/file/image/data/upMongo.py
|
407db23a8cc39cd4ac6e0c367beac868b6ad12b7
|
[] |
no_license
|
https://github.com/zhangwuyi1234/github
|
c1ee8fcdb60c9dbff048e258c7a71f0e22c5acf2
|
277bcd30e767a0b1190efea697d9f4a27706317b
|
refs/heads/master
| 2017-10-11T15:48:16.146604 | 2017-10-11T04:00:13 | 2017-10-11T04:00:13 | 85,694,967 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import os
import shutil
import oss2
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID','LTAIOEOb5yeFEcQT')
access_key_secret = os.getenv('6OSS_TEST_ACCESS_KEY_SECRET','qrEVuVmYVOcxZRHtKCTquXtFFz5u5p')
bucket_name = os.getenv('OSS_TEST_BUCKET', 'zhangwuyishare')
endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-shanghai.aliyuncs.com')
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'setPram' + param
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
#with open('test.png', 'rb') as fileobj:
# bucket.put_object('test.png', fileobj)
#bucket.get_object_to_file('share', 'share')
#test
#upImage("12344.png","test.png")
os.system('mongodump -h 127.0.0.1 -d share -o /usr/work/github/file/image/data ')
os.system('tar -cvf share.tar share ')
oss2.resumable_upload(bucket, 'share.tar', 'share.tar')
shutil.rmtree('share')
os.remove('share.tar')
|
UTF-8
|
Python
| false | false | 970 |
py
| 18 |
upMongo.py
| 7 | 0.710309 | 0.689691 | 0 | 31 | 30.290323 | 93 |
fermi-lat/pointlike
| 9,758,165,724,158 |
14158065f2f4b8a3e36a8ca802c788178f437b71
|
0ae50405d60e245a025c6f984cb315a7afeb6833
|
/python/uw/like2/plotting/__init__.py
|
4ce08315693de48e0b518a39d53a81b726232d6c
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/fermi-lat/pointlike
|
65977ee0085670c24800982ffa0855dba6c46680
|
edcdc696c3300e2f26ff3efa92a1bd9790074247
|
refs/heads/master
| 2021-11-11T19:18:42.676557 | 2020-01-23T20:57:54 | 2020-01-23T20:57:54 | 103,187,044 | 1 | 3 |
BSD-3-Clause
| false | 2021-11-05T15:33:32 | 2017-09-11T20:52:43 | 2020-11-13T18:39:01 | 2021-08-19T17:37:18 | 70,531 | 0 | 4 | 1 |
Python
| false | false |
"""
Package devoted to plotting code
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/plotting/__init__.py,v 1.3 2012/01/27 15:07:14 burnett Exp $
Authors: T. Burnett, M. Kerr, E. Wallace, M. Roth, J. Lande
"""
from . import sed, tsmap, counts
|
UTF-8
|
Python
| false | false | 261 |
py
| 292 |
__init__.py
| 260 | 0.697318 | 0.632184 | 0 | 7 | 36.285714 | 124 |
ihciah/BGmi
| 3,418,794,006,407 |
e9a2b0f9eaa90adced11bd15b641de168257f2f1
|
5c25b126bcadcaa24782412a679e46d63c812f2a
|
/setup.py
|
4215d521122a5121de294b4e4114207cd1c24685
|
[
"MIT"
] |
permissive
|
https://github.com/ihciah/BGmi
|
0adf8424729d9338368580e2b9577b2dd4a32970
|
da0d0e78cbe3e6c971b3748dcc2189f29af6471b
|
refs/heads/master
| 2021-01-20T01:57:45.323756 | 2017-04-25T11:40:25 | 2017-04-25T11:40:25 | 89,352,123 | 1 | 0 | null | true | 2017-04-25T11:18:55 | 2017-04-25T11:18:54 | 2017-04-17T18:19:02 | 2017-04-11T12:48:15 | 2,549 | 0 | 0 | 0 | null | null | null |
# coding=utf-8
import os
import codecs
from setuptools import setup, find_packages
from setuptools.command.install import install
from bgmi import __version__, __author__, __email__
with open('requirements.txt', 'r') as f:
requirements = f.read().splitlines()
ROOT = os.path.abspath(os.path.dirname(__file__))
def long_description():
with codecs.open('README.rst', 'rb') as f:
return str(f.read())
setup(
name='bgmi',
version=__version__,
author=__author__,
author_email=__email__,
keywords='bangumi, bgmi, feed',
description='BGmi is a cli tool for subscribed bangumi.',
long_description=long_description(),
url='https://github.com/RicterZ/BGmi',
download_url='https://github.com/RicterZ/BGmi/tarball/master',
packages=find_packages(),
package_data={'': ['LICENSES']},
include_package_data=True,
zip_safe=False,
install_requires=requirements,
entry_points={
'console_scripts': [
'bgmi = bgmi.main:setup',
'bgmi_http = bgmi.front.http:main'
]
},
license='MIT License',
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Other Audience',
'Natural Language :: Chinese (Traditional)',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
)
|
UTF-8
|
Python
| false | false | 1,742 |
py
| 17 |
setup.py
| 10 | 0.615959 | 0.609644 | 0 | 56 | 30.107143 | 70 |
ushakov/TX
| 3,487,513,448,520 |
4fb0227512789838e18e5f2448dd1c4d632ef8b4
|
3a4ccc1e06f042751b90f4deffdaea21696ebe5c
|
/host/ser.py
|
e6182bf88763a196216421b9cbb86ec5fcf742c0
|
[] |
no_license
|
https://github.com/ushakov/TX
|
5cad2f5a85a3a0fc59fa9009a97ba6befc5902c2
|
73013586cb99153644f342f6fff0c44869880f4e
|
refs/heads/master
| 2021-01-01T17:46:59.659317 | 2010-06-23T17:35:47 | 2010-06-23T17:35:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import serial
import gtk
import model
import vis
class Data(object):
def __init__(self, comm):
self.array = []
self.comm = comm
def Get(self, location):
return self.array[location.offset]
def Set(self, location, value):
if self.array[location.offset] != value:
print "setting", location.offset, "to", value
self.array[location.offset] = value
if self.comm:
self.comm.Set(location.offset, value)
else:
print "hit at ", location.offset, "to", value
class TXComm(object):
def __init__(self):
self.port = serial.Serial("/dev/ttyUSB0", 57600)
self.port.setTimeout(0.001)
self.line = ""
def Check(self):
r = self.port.readline()
if not r:
return None
if r[-1] != '\n':
self.line += r
return None
if len(r) > 1 and r[-2] == '\r':
r = r[:-2]
if not r:
return None
whole_input = self.line + r
self.line = ""
print "<<<", whole_input
return whole_input
def Send(self, line):
print ">>>", line
self.port.write(line)
self.port.write("\r\n")
def Set(self, where, val):
if val < 0:
val += 256
self.Send("S %03x %02x" % (where, val))
while True:
line = self.Check()
if not line:
continue
if line == "DONE":
return
if line == "Err":
raise RuntimeError("comm error")
def GetData(self):
self.Send("D")
array = []
while True:
line = self.Check()
if not line:
continue
if line == "DONE":
break
if line == "Err":
raise RuntimeError("comm error")
if line[3] != ':':
continue
off = int(line[:3], 16)
if off != len(array):
raise RuntimeError("comm out of sync: off=%s, received %s" % (
off, len(array)))
f = line[4:].split()
for hx in f:
val = int(hx, 16)
if val > 128:
val -= 256
array.append(val)
d = Data(self)
d.array = array
return d
def Reset(self):
self.Send("R R R")
while True:
line = self.Check()
if not line:
continue
if line == "DONE":
return
|
UTF-8
|
Python
| false | false | 2,603 |
py
| 21 |
ser.py
| 9 | 0.44295 | 0.429504 | 0 | 99 | 25.282828 | 78 |
atvf000/Alice
| 7,765,300,890,761 |
950c658627f6bdb5055dc2eb62449e4284b1f9e2
|
4d6ae11c3c07606c07adabd64882805b6489b724
|
/alice.py
|
aa365ef6f9ffd1fe7efafd7e8bcd32f714b435d1
|
[] |
no_license
|
https://github.com/atvf000/Alice
|
f8db6308d0be12c961ddb98a23eee406facab669
|
fe6a22e3a06bdc25319da0053f612c0df6891570
|
refs/heads/master
| 2020-11-24T01:57:04.019366 | 2019-12-14T08:31:46 | 2019-12-14T08:31:46 | 227,915,281 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##@package alice
# ะคะฐะนะป ั ัะตะฐะปะธะทะฐัะธะตะน ัะฐะผะพะน ะธะณัั ะะปะธัั
from __future__ import unicode_literals
from random import randint
RULES = '''\n- ะงะธัะปะพ ะฒ ััะตะนะบะต ะฟะพะบะฐะทัะฒะฐะตั, ัะบะพะปัะบะพ ะผะธะฝ ัะบัััะพ ะฒะพะบััะณ ะดะฐะฝะฝะพะน ััะตะนะบะธ\n
- ะัะปะธ ััะดะพะผ ั ะพัะบัััะพะน ััะตะนะบะพะน ะตััั ะฟัััะฐั ััะตะนะบะฐ, ัะพ ะพะฝะฐ ะพัะบัะพะตััั ะฐะฒัะพะผะฐัะธัะตัะบะธ.\n
- ะัะปะธ ะฒั ะพัะบััะปะธ ััะตะนะบั ั ะผะธะฝะพะน, ัะพ ะธะณัะฐ ะฟัะพะธะณัะฐะฝะฐ.'''
ALICE_TURN = ''' \nะะพะน ั
ะพะด!'''
COMMANDS = ''' - ะพัะบัััั <ะฑัะบะฒะฐ> <ัะธััะฐ>\n
- ะพัะผะตัะธัั <ะฑัะบะฒะฐ> <ัะธััะฐ>\n
- ัะฑัะฐัั <ะฑัะบะฒะฐ> <ัะธััะฐ>\n
- ะฟะพะผะพัั\n
- ะฟะพะบะฐะทะฐัั\n
- ะฟัะฐะฒะธะปะฐ\n
- ะบะพะผะฐะฝะดั\n'''
## ะัะพะฒะตัะบะฐ ะฟะพะฑะตะดั
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
# ะคัะฝะบัะธั ะดะปั ะพะฟัะตะดะตะปะตะฝะธั ะฟะพะฑะตะดั ะฒ ะธะณัะต
#@return ะะพะทะฒัะฐัะฐะตั ะธััะธะฝั, ะตัะปะธ ะฟะพะฑะตะดะธะป, ะปะพะถั, ะตัะปะธ ะฝะตั
def isWin(values, typeC):
count = 0
for i in range(10):
for j in range(10):
if values[i][j] == -1 and typeC[i][j] == 2:
count += 1
if count == 10:
return True
return False
## ะคัะฝะบัะธั ะฑะพัะฐ
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
#@param step: ััะฐะฟ ะธะณัั
# ะคัะฝะบัะธั ะธัะบัััะฒะตะฝะฝะพะณะพ ะธะฝัะตะปะตะบัะฐ ะดะปั ะธะณัั
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def bot(values, typeC, step):
probability = prob(step)
if probability == 1:
return randOpen(values, typeC)
else:
return cleverOpen(values, typeC)
## ะคัะฝะบัะธั ัะปััะฐะนะฝะพะณะพ ะพัะบัััะธั
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
# ะคัะฝะบัะธั ะธัะบัััะฒะตะฝะฝะพะณะพ ะธะฝัะตะปะตะบัะฐ ะดะปั ัะปััะฐะนะฝะพะณะพ ะพัะบัััะธั ะบะปะตัะบะธ
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def randOpen(values, typeC):
done = False
while not done:
x = randint(0, 9)
y = randint(0, 9)
if typeC[x][y] == 0:
return openC(values, typeC, x, y)
## ะคัะฝะบัะธั ะฟะพะธัะบะฐ ัะปะตะผะตะฝัะพะฒ ััะดะพะผ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
#@param i: ะบะพะพัะดะธะฝะฐัะฐ i
#@param j: ะบะพะพัะดะธะฝะฐัะฐ j
#@param smth: ะทะฐะดะฐะฝะฝัะน ัะปะตะผะตะฝั
# ะคัะฝะบัะธั ะดะปั ะพะฟัะตะดะตะปะตะฝะธั ะบะพะปะธัะตััะฒะฐ ะทะฐะดะฐะฝะฝัั
ัะปะตะผะตะฝัะพะฒ ััะดะพะผ ั ัะพัะบะพะน
#@return ะะพะทะฒัะฐัะฐะตั ะบะพะปะธัะตััะฒะพ
def isSmthNear(typeC, i, j, smth):
count = 0
if 0 < i:
if typeC[i - 1][j] == smth:
count += 1
if 0 < j:
if typeC[i - 1][j - 1] == smth:
count += 1
if j < 9:
if typeC[i - 1][j + 1] == smth:
count += 1
if i < 9:
if typeC[i + 1][j] == smth:
count += 1
if 0 < j:
if typeC[i + 1][j - 1] == smth:
count += 1
if j < 9:
if typeC[i + 1][j + 1] == smth:
count += 1
if 0 < j:
if typeC[i][j - 1] == smth:
count += 1
if j < 9:
if typeC[i][j + 1] == smth:
count += 1
return count
## ะคัะฝะบัะธั ะธะทะผะตะฝะตะฝะธั ัะปะตะผะตะฝัะพะฒ ััะดะพะผ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
#@param i: ะบะพะพัะดะธะฝะฐัะฐ i
#@param j: ะบะพะพัะดะธะฝะฐัะฐ j
#@param smth: ะทะฐะดะฐะฝะฝัะน ัะปะตะผะตะฝั
# ะคัะฝะบัะธั ะดะปั ะธะทะผะตะฝะตะฝะธั ัะพัะตะบ ะทะฐะดะฐะฝะฝัะผ ัะปะตะผะตะฝัะพะผ ััะดะพะผ ั ะทะฐะดะฐะฝะฝะพะน ัะพัะบะพะน
def SmthNear(typeC, i, j, smth):
if 0 < i:
if typeC[i - 1][j] == 0:
typeC[i - 1][j] = smth
if 0 < j:
if typeC[i - 1][j - 1] == 0:
typeC[i - 1][j - 1] = smth
if j < 9:
if typeC[i - 1][j + 1] == 0:
typeC[i - 1][j + 1] = smth
if i < 9:
if typeC[i + 1][j] == 0:
typeC[i + 1][j] = smth
if 0 < j:
if typeC[i + 1][j - 1] == 0:
typeC[i + 1][j - 1] = smth
if j < 9:
if typeC[i + 1][j + 1] == 0:
typeC[i + 1][j + 1] = smth
if 0 < j:
if typeC[i][j - 1] == 0:
typeC[i][j - 1] = smth
if j < 9:
if typeC[i][j + 1] == 0:
typeC[i][j + 1] = smth
## ะคัะฝะบัะธั ะฐะฝะฐะปะธะทะธัะพะฒะฐะฝะฝะพะณะพ ะพัะบัััะธั
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
# ะคัะฝะบัะธั ะธัะบัััะฒะตะฝะฝะพะณะพ ะธะฝัะตะปะตะบัะฐ ะดะปั ะพัะบัััะธั ะบะปะตัะบะธ ั ะฐะฝะฐะปะธะทะพะผ
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def cleverOpen(values, typeC):
done = False
find = False
x, y = 0, -1
while not done:
while not find:
if y == 9:
x += 1
y = 0
else:
y += 1
if x == 10:
return randOpen(values, typeC)
if typeC[x][y] == 1 and values[x][y] != 0 and isSmthNear(typeC, x, y, 0) != 0:
find = True
if values[x][y] == isSmthNear(typeC, x, y, 0) + isSmthNear(typeC, x, y, 2):
SmthNear(typeC, x, y, 2)
done = True
elif values[x][y] == isSmthNear(typeC, x, y, 2):
SmthNear(typeC, x, y, 1)
done = True
find = False
return printF(values, typeC)
## ะคัะฝะบัะธั ะฒะตัะพััะฝะพััะธ
#@param step: ัะฐะณ ะธะณัั
# ะคัะฝะบัะธั ะดะปั ะฟะพะดััะตัะฐ ะฒะตัะพััะฝะพััะธ
#@return ะฒะพะทะฒัะฐัะฐะตั ะธััะธะฝั ะธะปะธ ะปะพะถั ะฒ ะทะฐะฒะธัะธะผะพััะธ ะพั ัะฐะฝะดะพะผะฐ
def prob(step):
return (100 / step) > randint(0, 100)
## ะคัะฝะบัะธั ะพะฟัะตะดะตะปะตะฝะธั ะฟัััะพะน ะบะปะตัะบะธ
#@param point: ะบะปะตัะบะฐ
# ะคัะฝะบัะธั ะดะปั ะพะฟัะตะดะตะปะตะฝะธั ัะฒะปัะตััั ะปะธ ััะฐ ะบะปะตัะบะฐ ะฟัััะพะน
#@return ะธััะธะฝั, ะตัะปะธ ะบะปะตัะบะฐ ะฟัััะฐั
def isFree(point):
return point == 0
## ะคัะฝะบัะธั ะพัะบัััะธั ะบะปะตัะบะธ
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
# ะคัะฝะบัะธั ะดะปั ะพัะบัััะธั ะบะปะตัะบะธ ะฒ ะธะณัะต
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def openC(values, typeC, x, y):
if typeC[x][y] == 0:
typeC[x][y] = 1
if values[x][y] == -1:
return "ะั ะฟะพะฟะฐะปะธ ะฝะฐ ะฑะพะผะฑั :("
if values[x][y] == 0:
openNear(values, typeC, x, y)
return printF(values, typeC)
else:
return "ะญัะฐ ะบะปะตัะบะฐ ัะถะต ะพัะบัััะฐ"
## ะคัะฝะบัะธั ะพัะบัััะธั ะบะปะตัะบะพะบ ััะดะพะผ
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
#@param x: ะบะพะพัะดะธะฝะฐัะฐ x
#@param y: ะบะพะพัะดะธะฝะฐัะฐ y
# ะคัะฝะบัะธั ะดะปั ัะตะบัััะธะฒะฝะพะณะพ ะพัะบัััะธั ะบะปะตัะพะบ ััะดะพะผ, ะตัะปะธ ะพะฝะธ ะฟััััะต
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def openNear(values, typeC, x, y):
if 0 < x:
if typeC[x - 1][y] == 0:
typeC[x - 1][y] = int(isFree(values[x - 1][y]))
if typeC[x - 1][y] == 1:
openNear(values, typeC, x - 1, y)
elif typeC[x - 1][y] == 0:
typeC[x - 1][y] = 1
if x < 9:
if typeC[x + 1][y] == 0:
typeC[x + 1][y] = int(isFree(values[x + 1][y]))
if typeC[x + 1][y] == 1:
openNear(values, typeC, x + 1, y)
elif typeC[x + 1][y] == 0:
typeC[x + 1][y] = 1
if 0 < y:
if typeC[x][y - 1] == 0:
typeC[x][y - 1] = int(isFree(values[x][y - 1]))
if typeC[x][y - 1] == 1:
openNear(values, typeC, x, y - 1)
elif typeC[x][y - 1] == 0:
typeC[x][y - 1] = 1
if y < 9:
if typeC[x][y + 1] == 0:
typeC[x][y + 1] = int(isFree(values[x][y + 1]))
if typeC[x][y + 1] == 1:
openNear(values, typeC, x, y + 1)
elif typeC[x][y + 1] == 0:
typeC[x][y + 1] = 1
## ะคัะฝะบัะธั ะฟะพััะฐะฝะพะฒะบะธ ัะปะฐะณะฐ
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
#@param x: ะบะพะพัะดะธะฝะฐัะฐ x
#@param y: ะบะพะพัะดะธะฝะฐัะฐ y
# ะคัะฝะบัะธั ะดะปั ะฟะพััะฐะฝะพะฒะบะธ ัะปะฐะณะฐ ะฒ ัะตะบัััั ัะพัะบั
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def flag(values, typeC, x, y):
if typeC[x][y] == 0:
typeC[x][y] = 2
return printF(values, typeC)
else:
return "ะญัะฐ ะบะปะตัะบะฐ ัะถะต ะพัะผะตัะตะฝะฐ"
## ะคัะฝะบัะธั ัะดะฐะปะตะฝะธั ัะปะฐะณะฐ
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
#@param x: ะบะพะพัะดะธะฝะฐัะฐ x
#@param y: ะบะพะพัะดะธะฝะฐัะฐ y
# ะคัะฝะบัะธั ะดะปั ัะดะฐะปะตะฝะธั ัะปะฐะณะฐ ะฒ ัะตะบัััั ัะพัะบั
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def unFlag(values, typeC, x, y):
if typeC[x][y] == 2:
typeC[x][y] = 0
return printF(values, typeC)
else:
return "ะญัะฐ ะบะปะตัะบะฐ ะฝะต ะพัะผะตัะตะฝะฐ ัะปะฐะณะพะผ"
## ะคัะฝะบัะธั ะพะฟัะตะดะตะปะตะฝะธั ะฑะพะผะฑั
#@param point: ะบะปะตัะบะฐ
# ะคัะฝะบัะธั ะดะปั ะพะฟัะตะดะตะปะตะฝะธั ัะฒะปัะตััั ะปะธ ััะฐ ะบะปะตัะบะฐ ะฑะพะผะฑะพะน
#@return ะธััะธะฝั, ะตัะปะธ ะบะปะตัะบะฐ ะฑะพะผะฑะฐ
def isBomb(point):
return point == -1
## ะคัะฝะบัะธั ะธะฝะธัะธะฐะปะธะทะฐัะธะธ
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
# ะคัะฝะบัะธั ะฟะตัะฒะธัะฝะพะน ะธะฝะธัะธะฐะปะธะทะฐัะธะธ
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def init(values):
free = False
x, y = 0, 0
for k in range(10):
while not free:
x = randint(0, 9)
y = randint(0, 9)
free = isFree(values[x][y])
values[x][y] = -1
free = False
for i in range(10):
for j in range(10):
if values[i][j] == 0:
if 0 < i:
values[i][j] += isBomb(values[i - 1][j])
if 0 < j:
values[i][j] += isBomb(values[i - 1][j - 1])
if j < 9:
values[i][j] += isBomb(values[i - 1][j + 1])
if i < 9:
values[i][j] += isBomb(values[i + 1][j])
if 0 < j:
values[i][j] += isBomb(values[i + 1][j - 1])
if j < 9:
values[i][j] += isBomb(values[i + 1][j + 1])
if 0 < j:
values[i][j] += isBomb(values[i][j - 1])
if j < 9:
values[i][j] += isBomb(values[i][j + 1])
COLUMNS = ['ะฐ', 'ะฑ', 'ะฒ', 'ะณ', 'ะด', 'ะต', 'ะถ', 'ะท', 'ะธ', 'ะบ']
## ะคัะฝะบัะธั ะฟะตัะฐัะธ ะฟะพะปั
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
# ะคัะฝะบัะธั ะดะปั ะฟะตัะฐัะธ ะฟะพะปั ะธะณัั
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ
def printF(values, typeC):
printField = '|โฝ|'
printField += '|'.join([f' {i} ' for i in COLUMNS])
printField += '|\n'
for i in range(10):
printField += "| " + str(i) + " "
for j in range(10):
if typeC[i][j] == 1:
if values[i][j] == 0:
printField += "|โฝ"
elif values[i][j] == -1:
printField += "| B "
else:
printField += "| " + str(values[i][j]) + " "
elif typeC[i][j] == 0:
printField += "| x "
elif typeC[i][j] == 2:
printField += "| F "
printField += "|\n"
return printField
## ะคัะฝะบัะธั ะฟะตัะฐัะธ ัะธัะพะฒ
#@param values: ะผะฐััะธัะฐ ั ัะปะตะผะตะฝัะฐะผะธ
#@param typeC: ะผะฐััะธัะฐ ั ัะพััะพัะฝะธัะผะธ ัะปะตะผะตะฝัะพะฒ
# ะคัะฝะบัะธั ะดะปั ะฟะตัะฐัะธ ะฟะพะปั ะธะณัั
#@return ะะพะทะฒัะฐัะฐะตั ัะตะบัั ั ะฟะพะปะตะผ ัะธัะพะฒ
def printC(values):
printField = '|โฝ|'
printField += '|'.join([f' {i} ' for i in COLUMNS])
printField += '|\n'
for i in range(10):
printField += "| " + str(i) + " "
for j in range(10):
if values[i][j] == -1:
printField += "| B "
elif values[i][j] == 0:
printField += "|โฝ"
else:
printField += "| " + str(values[i][j]) + " "
printField += "|\n"
return printField
## ะคัะฝะบัะธั ะพะฑัะฐะฑะพัะบะธ ะดะธะฐะปะพะณะฐ
#@param request: ะทะฐะฟัะพั ะฟะพะปัะทะพะฒะฐัะตะปั
#@param response: ะพัะฒะตั ะฟะพะปัะทะพะฒะฐัะตะปั
#@param user_storage: ะดะฐะฝะฝัะต ัะตััะธะธ
# ะัะฝะพะฒะฝะฐั ััะฝะบัะธั ะดะปั ะพะฑัะฐะฑะพัะบะธ ะฒัะตั
ะดะธะฐะปะพะณะพะฒ ั ะะปะธัะพะน
#@return ะะพะทะฒัะฐัะฐะตะผ ะพัะฒะตั ะฟะพะปัะทะพะฒะฐัะตะปั ะธ ะดะฐะฝะฝัะต ัะตััะธะธ
def handle_dialog(request, response, user_storage):
if request.is_new_session or user_storage is None:
values = [[0 for j in range(10)] for i in range(10)]
typeC = [[0 for j in range(10)] for i in range(10)]
step = 1
init(values)
user_storage = {
"user_id": request.user_id,
"users_turn": True,
"matrix": values,
"open_cells": typeC,
"step": step,
}
response.set_text('ะัะธะฒะตั! ะะฐะฒะฐะน ััะณัะฐะตะผ ะฒ ะกะฐะฟะตัะฐ! ' + RULES)
return response, user_storage
user_message = request.command.lower().strip()
command = str(user_message).split()
values = user_storage["matrix"]
typeC = user_storage["open_cells"]
if not user_storage["users_turn"] and command[0] != "ะฟะพะบะฐะทะฐัั":
response.set_text("ะญะน! ะั ะฝะต ะฟะพัะผะพััะตะปะธ ะผะพะน ั
ะพะด!")
return response, user_storage
if user_storage["users_turn"]:
if command[0] == "ะฟะพะบะฐะทะฐัั":
answer = printF(values, typeC)
elif command[0] == "ะฟัะฐะฒะธะปะฐ" or command[0] == "ะฟะพะผะพัั":
answer = RULES
elif command[0] == "ะบะพะผะฐะฝะดั" or user_message == "ััะพ ัั ัะผะตะตัั":
answer = COMMANDS
elif command[0] == "ัะธั":
answer = printC(values)
elif command[0] == "ะพัะบัััั" and len(command) == 3:
if command[1] in COLUMNS and -1 < int(command[2]) < 10:
column_index = COLUMNS.index(command[1])
answer = openC(values, typeC, int(command[2]), column_index)
user_storage["users_turn"] = False
else:
answer = "ะะตะฟัะฐะฒะธะปัะฝัะต ะบะพะพัะดะธะฝะฐัั :("
elif command[0] == "ัะฑัะฐัั" and len(command) == 3:
if command[1] in COLUMNS and -1 < int(command[2]) < 10:
column_index = COLUMNS.index(command[1])
answer = unFlag(values, typeC, int(command[2]), column_index)
else:
answer = "ะะตะฟัะฐะฒะธะปัะฝัะต ะบะพะพัะดะธะฝะฐัั :("
elif command[0] == "ะพัะผะตัะธัั" and len(command) == 3:
if command[1] in COLUMNS and -1 < int(command[2]) < 10:
column_index = COLUMNS.index(command[1])
answer = flag(values, typeC, int(command[2]), column_index)
user_storage["users_turn"] = False
else:
answer = "ะะตะฟัะฐะฒะธะปัะฝัะต ะบะพะพัะดะธะฝะฐัั :("
elif command[0] == "ะฑะพั":
step = user_storage["step"]
answer = bot(values, typeC, step)
else:
answer = "ะฏ ะฒะฐั ะฝะต ะฟะพะฝัะปะฐ :("
else:
step = user_storage["step"]
answer = bot(values, typeC, step)
user_storage["users_turn"] = True
user_storage["matrix"] = values
user_storage["open_cells"] = typeC
user_storage["step"] += 1
response.set_text(answer)
if isWin(values, typeC):
user_storage = end(request, response, user_storage, "\nะั ะฟะพะฑะตะดะธะปะธ!\n")
if answer == "ะั ะฟะพะฟะฐะปะธ ะฝะฐ ะฑะพะผะฑั :(":
user_storage = end(request, response, user_storage, "\n" + answer)
if not user_storage["users_turn"]:
response.set_text(answer + ALICE_TURN)
return response, user_storage
## ะคัะฝะบัะธั ะพะบะพะฝัะฐะฝะธั ะธะณัั
#@param request: ะทะฐะฟัะพั ะฟะพะปัะทะพะฒะฐัะตะปั
#@param response: ะพัะฒะตั ะฟะพะปัะทะพะฒะฐัะตะปั
#@param user_storage: ะดะฐะฝะฝัะต ัะตััะธะธ
#@param ัััััะบ: ะฟะพัะปะตะดะฝะธะน ะพัะฒะตั ะฟะพะปัะทะพะฒะฐัะตะปั
# ะคัะฝะบัะธั, ะทะฐะฟััะบะฐััะฐััั ะฒ ัะฐะผะพะผ ะบะพะฝัะต ะดะปั ะธะฝะธัะธะฐะปะธะทะฐัะธะธ ะฝะพะฒะพะน ะธะณัั
#@return ะะพะทะฒัะฐัะฐะตะผ ะดะฐะฝะฝัะต ัะตััะธะธ
def end(request, response, user_storage, answer):
values = user_storage["matrix"]
typeC = user_storage["open_cells"]
field = printF(values, typeC)
values = [[0 for j in range(10)] for i in range(10)]
typeC = [[0 for j in range(10)] for i in range(10)]
step = 1
init(values)
user_storage = {
"user_id": request.user_id,
"users_turn": True,
"matrix": values,
"open_cells": typeC,
"step": step
}
response.set_text(field + "\n" + answer + '\n ะะฐะฒะฐะนัะต ััะณัะฐะตะผ ะทะฐะฝะพะฒะพ! ' + RULES)
return user_storage
|
UTF-8
|
Python
| false | false | 18,105 |
py
| 6 |
alice.py
| 4 | 0.544924 | 0.526835 | 0 | 470 | 31.110638 | 90 |
Rodot-/SpaceGameThing
| 1,855,425,878,728 |
92dec1f4274d15e68c9000ac64cdf5eaaeb97649
|
bdb58bd6aac41da284e79e3d136db48b2cefc4e7
|
/test/plotDat.py
|
cca3db19edcccaf6ca38655edc1587b289a496d4
|
[] |
no_license
|
https://github.com/Rodot-/SpaceGameThing
|
bdc10a95d1fa1f7affa5cc8f12f6661377c35747
|
8510495cddb19b9ae50d6c227c6d9125271a8efa
|
refs/heads/master
| 2021-01-09T20:01:07.974115 | 2018-08-28T20:09:17 | 2018-08-28T20:09:17 | 61,905,924 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from matplotlib.pyplot import subplots, show
import numpy as np
import sys
import time
fig, ax = subplots(1,1)
fig.canvas.draw()
show(False)
for i in sys.argv[1:]:
with open(i,'r') as f:
data = np.array([map(float, line.split()) for line in f])
line = {}
ax.set_xlim(0,26)
ax.set_ylim(0, 5)
for j in xrange(10):
line[j], = ax.plot([],[])
#show(False)
for t in xrange(len(data)):
ax.draw_artist(ax.patch)
for j in xrange(10):
line[j].set_data(data[:t,1+2*j], data[:t,2+2*j])
ax.draw_artist(line[j])
#fig.canvas.draw()
fig.canvas.update()
fig.canvas.flush_events()
#time.sleep(0.001)
show()
|
UTF-8
|
Python
| false | false | 636 |
py
| 76 |
plotDat.py
| 56 | 0.624214 | 0.592767 | 0 | 27 | 22.518519 | 59 |
DataDog/datadog-api-client-python
| 16,303,695,889,429 |
5b50bfe8a69167dba5542299bb7a018b40f384fc
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/src/datadog_api_client/v1/model/synthetics_step_detail.py
|
b3499dfdd80a3b1c2a132ae5673345d93ccae9e2
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
https://github.com/DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 |
Apache-2.0
| false | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | 2023-09-04T20:58:53 | 2023-09-14T18:22:38 | 127,694 | 77 | 35 | 29 |
Python
| false | false |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Any, List, Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
date,
datetime,
none_type,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.synthetics_browser_error import SyntheticsBrowserError
from datadog_api_client.v1.model.synthetics_check_type import SyntheticsCheckType
from datadog_api_client.v1.model.synthetics_playing_tab import SyntheticsPlayingTab
from datadog_api_client.v1.model.synthetics_step_type import SyntheticsStepType
from datadog_api_client.v1.model.synthetics_core_web_vitals import SyntheticsCoreWebVitals
from datadog_api_client.v1.model.synthetics_step_detail_warning import SyntheticsStepDetailWarning
class SyntheticsStepDetail(ModelNormal):
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.synthetics_browser_error import SyntheticsBrowserError
from datadog_api_client.v1.model.synthetics_check_type import SyntheticsCheckType
from datadog_api_client.v1.model.synthetics_playing_tab import SyntheticsPlayingTab
from datadog_api_client.v1.model.synthetics_step_type import SyntheticsStepType
from datadog_api_client.v1.model.synthetics_core_web_vitals import SyntheticsCoreWebVitals
from datadog_api_client.v1.model.synthetics_step_detail_warning import SyntheticsStepDetailWarning
return {
"browser_errors": ([SyntheticsBrowserError],),
"check_type": (SyntheticsCheckType,),
"description": (str,),
"duration": (float,),
"error": (str,),
"playing_tab": (SyntheticsPlayingTab,),
"screenshot_bucket_key": (bool,),
"skipped": (bool,),
"snapshot_bucket_key": (bool,),
"step_id": (int,),
"sub_test_step_details": ([SyntheticsStepDetail],),
"time_to_interactive": (float,),
"type": (SyntheticsStepType,),
"url": (str,),
"value": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
),
"vitals_metrics": ([SyntheticsCoreWebVitals],),
"warnings": ([SyntheticsStepDetailWarning],),
}
attribute_map = {
"browser_errors": "browserErrors",
"check_type": "checkType",
"description": "description",
"duration": "duration",
"error": "error",
"playing_tab": "playingTab",
"screenshot_bucket_key": "screenshotBucketKey",
"skipped": "skipped",
"snapshot_bucket_key": "snapshotBucketKey",
"step_id": "stepId",
"sub_test_step_details": "subTestStepDetails",
"time_to_interactive": "timeToInteractive",
"type": "type",
"url": "url",
"value": "value",
"vitals_metrics": "vitalsMetrics",
"warnings": "warnings",
}
def __init__(
self_,
browser_errors: Union[List[SyntheticsBrowserError], UnsetType] = unset,
check_type: Union[SyntheticsCheckType, UnsetType] = unset,
description: Union[str, UnsetType] = unset,
duration: Union[float, UnsetType] = unset,
error: Union[str, UnsetType] = unset,
playing_tab: Union[SyntheticsPlayingTab, UnsetType] = unset,
screenshot_bucket_key: Union[bool, UnsetType] = unset,
skipped: Union[bool, UnsetType] = unset,
snapshot_bucket_key: Union[bool, UnsetType] = unset,
step_id: Union[int, UnsetType] = unset,
sub_test_step_details: Union[List[SyntheticsStepDetail], UnsetType] = unset,
time_to_interactive: Union[float, UnsetType] = unset,
type: Union[SyntheticsStepType, UnsetType] = unset,
url: Union[str, UnsetType] = unset,
value: Union[Any, UnsetType] = unset,
vitals_metrics: Union[List[SyntheticsCoreWebVitals], UnsetType] = unset,
warnings: Union[List[SyntheticsStepDetailWarning], UnsetType] = unset,
**kwargs,
):
"""
Object describing a step for a Synthetic test.
:param browser_errors: Array of errors collected for a browser test.
:type browser_errors: [SyntheticsBrowserError], optional
:param check_type: Type of assertion to apply in an API test.
:type check_type: SyntheticsCheckType, optional
:param description: Description of the test.
:type description: str, optional
:param duration: Total duration in millisecond of the test.
:type duration: float, optional
:param error: Error returned by the test.
:type error: str, optional
:param playing_tab: Navigate between different tabs for your browser test.
:type playing_tab: SyntheticsPlayingTab, optional
:param screenshot_bucket_key: Whether or not screenshots where collected by the test.
:type screenshot_bucket_key: bool, optional
:param skipped: Whether or not to skip this step.
:type skipped: bool, optional
:param snapshot_bucket_key: Whether or not snapshots where collected by the test.
:type snapshot_bucket_key: bool, optional
:param step_id: The step ID.
:type step_id: int, optional
:param sub_test_step_details: If this step includes a sub-test.
`Subtests documentation <https://docs.datadoghq.com/synthetics/browser_tests/advanced_options/#subtests>`_.
:type sub_test_step_details: [SyntheticsStepDetail], optional
:param time_to_interactive: Time before starting the step.
:type time_to_interactive: float, optional
:param type: Step type used in your Synthetic test.
:type type: SyntheticsStepType, optional
:param url: URL to perform the step against.
:type url: str, optional
:param value: Value for the step.
:type value: bool, date, datetime, dict, float, int, list, str, none_type, optional
:param vitals_metrics: Array of Core Web Vitals metrics for the step.
:type vitals_metrics: [SyntheticsCoreWebVitals], optional
:param warnings: Warning collected that didn't failed the step.
:type warnings: [SyntheticsStepDetailWarning], optional
"""
if browser_errors is not unset:
kwargs["browser_errors"] = browser_errors
if check_type is not unset:
kwargs["check_type"] = check_type
if description is not unset:
kwargs["description"] = description
if duration is not unset:
kwargs["duration"] = duration
if error is not unset:
kwargs["error"] = error
if playing_tab is not unset:
kwargs["playing_tab"] = playing_tab
if screenshot_bucket_key is not unset:
kwargs["screenshot_bucket_key"] = screenshot_bucket_key
if skipped is not unset:
kwargs["skipped"] = skipped
if snapshot_bucket_key is not unset:
kwargs["snapshot_bucket_key"] = snapshot_bucket_key
if step_id is not unset:
kwargs["step_id"] = step_id
if sub_test_step_details is not unset:
kwargs["sub_test_step_details"] = sub_test_step_details
if time_to_interactive is not unset:
kwargs["time_to_interactive"] = time_to_interactive
if type is not unset:
kwargs["type"] = type
if url is not unset:
kwargs["url"] = url
if value is not unset:
kwargs["value"] = value
if vitals_metrics is not unset:
kwargs["vitals_metrics"] = vitals_metrics
if warnings is not unset:
kwargs["warnings"] = warnings
super().__init__(kwargs)
|
UTF-8
|
Python
| false | false | 8,159 |
py
| 1,490 |
synthetics_step_detail.py
| 1,425 | 0.632063 | 0.629857 | 0 | 198 | 40.207071 | 119 |
c-mattingly/CarCollector
| 5,884,105,229,239 |
0e6f894a7cd5571a6e1d5d7db6eaea2b0494e7d8
|
b301b95be9df28457d40d7761892f254a8570d3d
|
/main_app/models.py
|
e5074c1bb61d761cb53bb3a974b2430c7e0724e4
|
[] |
no_license
|
https://github.com/c-mattingly/CarCollector
|
88e1988f71b7b21f979f73d2c7abf996cc306922
|
cbb4fb9c7ad52efdae29b8ce916103ebd7eb0ddd
|
refs/heads/main
| 2023-06-13T08:04:43.856784 | 2021-07-08T23:19:54 | 2021-07-08T23:19:54 | 383,950,979 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.urls import reverse
# Create your models here.
WORKS = (
('O', 'Oil Changed'),
('T', 'Tune Up'),
('R', 'Tire Rotation'),
)
class Part(models.Model):
name = models.CharField(max_length=50)
type = models.CharField(max_length=50)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("parts_detail", kwargs={"pk": self.id})
class Car(models.Model):
year = models.CharField(max_length=4)
make = models.CharField(max_length=100)
model = models.CharField(max_length=100)
comment = models.TextField(max_length=200)
parts = models.ManyToManyField(Part)
def __str__(self):
return self.model
def get_absolute_url(self):
return reverse("detail", kwargs={'car_id': self.id})
class Maintenance(models.Model):
date = models.DateField('maintenance date')
work_done = models.CharField(
max_length=1,
choices=WORKS,
default=[0][0]
)
car = models.ForeignKey(Car, on_delete=models.CASCADE)
def __str__(self):
return f"{self.get_work_done_display()} on {self.date}"
class Meta:
ordering = ['-date']
|
UTF-8
|
Python
| false | false | 1,221 |
py
| 11 |
models.py
| 6 | 0.618346 | 0.604423 | 0 | 49 | 23.836735 | 63 |
martolini/djanglide
| 14,499,809,625,391 |
59dfab6aa64fadb53336a8dca132a103c31a94f2
|
db6772617d320342ea1120424b47a3b143eff682
|
/glide/app/marketplace/views.py
|
817f7822dc807a909e2b0d55ab9c3803d2e9a167
|
[] |
no_license
|
https://github.com/martolini/djanglide
|
a03a7acc615448ba76f57008c62449408fe24d82
|
9063099c0e035265b8e622a894713c0a5a34dadc
|
refs/heads/master
| 2016-09-06T04:26:41.794471 | 2014-02-03T00:53:12 | 2014-02-03T00:53:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from glide.app.marketplace.forms import SearchForm
from glide.core.profiles.models import Profile
from glide.app.interests.models import Interest
from django.db.models import Q
from glide.core.models import City
from math import ceil
def marketplace(request):
#search_form = SearchForm()
#return render(request, 'marketplace/base.html', {'search_form':search_form})
return search(request)
def search(request):
query = request.GET.get('query')
form = SearchForm(query=query)
hits = 0
occupation_filter = []
interest_filter = []
results = None
if query:
results = Profile.objects.filter(local=True, city__id=query).order_by('user__first_name')
hits = len(results)
else:
results = Profile.objects.filter(local=True).order_by('user__first_name')
hits = len(results)
if hits and query:
for result in results:
interests = result.interest_set.all()
occupations = result.occupation_set.all()
for interest in interests:
if interest.name not in interest_filter:
interest_filter.append(interest.name)
for occupation in occupations:
if occupation.name not in occupation_filter:
occupation_filter.append(occupation.name)
return render(request, 'marketplace/search.html', {'results': results,
'search_form': form,
'hits': hits,
'occupation_filter': occupation_filter,
'interest_filter': interest_filter,
'query': query})
def profile_has_interest(chosen_interests, profile):
for interest in profile.interest_set.all():
if interest.name in chosen_interests:
return True
return False
def profile_has_occupation(chosen_occupations, profile):
for occupation in profile.occupation_set.all():
if occupation.name in chosen_occupations:
return True
return False
def filter(request):
chosen_occupations = request.GET.getlist('occupation')
chosen_interests = request.GET.getlist('interest')
query = request.GET.get('query').encode('ascii')
form = SearchForm(query=query)
results = Profile.objects.filter(local=True, city__id=query).order_by('user__first_name')
occupation_filter = []
interest_filter = []
for result in results:
interests = result.interest_set.all()
occupations = result.occupation_set.all()
for interest in interests:
if interest.name not in interest_filter:
interest_filter.append(interest.name)
for occupation in occupations:
if occupation.name not in occupation_filter:
occupation_filter.append(occupation.name)
if len(chosen_occupations) > 0:
for result in results:
if not profile_has_occupation(chosen_occupations, result):
results = results.exclude(user=result.user)
if len(chosen_interests) > 0:
for result in results:
if not profile_has_interest(chosen_interests, result):
results = results.exclude(user=result.user)
hits = len(results)
return render(request, 'marketplace/search.html', {'results': results,
'search_form': form,
'hits': hits,
'occupation_filter': occupation_filter,
'interest_filter': interest_filter,
'query': query,
'chosen_occupations': chosen_occupations,
'chosen_interests': chosen_interests})
|
UTF-8
|
Python
| false | false | 3,295 |
py
| 99 |
views.py
| 59 | 0.713202 | 0.712291 | 0 | 104 | 30.682692 | 91 |
wq/django-data-wizard
| 9,380,208,590,773 |
52e1e7cde4fe5c75a035e3e7d44a929e069cd069
|
3f39a707fdcf8bd0184cf044a6cb039e659738b7
|
/data_wizard/sources/migrations/0003_model_options.py
|
8adf6e995e354739e301a2b32e4c132a56412e1b
|
[
"MIT"
] |
permissive
|
https://github.com/wq/django-data-wizard
|
b166882c163d19d3facb9cc648dc442ab8f64a51
|
2cf34acd96e6e6ad8eaff5f2fb5dee6dd94416d5
|
refs/heads/main
| 2023-07-20T03:59:38.137970 | 2023-07-13T21:33:40 | 2023-07-13T21:33:40 | 25,331,283 | 331 | 56 |
MIT
| false | 2023-06-08T15:57:41 | 2014-10-17T01:26:45 | 2023-06-08T03:29:25 | 2023-06-08T15:48:33 | 2,290 | 311 | 52 | 3 |
Python
| false | false |
# Generated by Django 4.2.2 on 2023-06-27 11:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sources", "0002_source_user"),
]
operations = [
migrations.AlterModelOptions(
name="filesource",
options={
"verbose_name": "File for Import",
"verbose_name_plural": "Import via File",
},
),
migrations.AlterModelOptions(
name="urlsource",
options={
"verbose_name": "URL for Import",
"verbose_name_plural": "Import via URL",
},
),
migrations.AlterField(
model_name="filesource",
name="name",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name (Optional)"
),
),
migrations.AlterField(
model_name="urlsource",
name="name",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name (Optional)"
),
),
]
|
UTF-8
|
Python
| false | false | 1,156 |
py
| 205 |
0003_model_options.py
| 96 | 0.50346 | 0.481834 | 0 | 41 | 27.195122 | 85 |
mkgray/mancala-reinforcement-learning
| 1,400,159,380,408 |
d16514b959c218f69c792d2db26a0d875479acda
|
389fa90c3a3387aaf01e493fdf9add7aec76200d
|
/mancala/mancala.py
|
90bb16453ff898ffadaf72a9439148f77a5c29d7
|
[
"MIT"
] |
permissive
|
https://github.com/mkgray/mancala-reinforcement-learning
|
9563bd94f0a2f5fdc1afdd278b063dc88cd7648c
|
5baee0a775ed41e30d498419692c51881047140f
|
refs/heads/master
| 2023-01-23T09:19:19.435932 | 2020-12-01T01:11:57 | 2020-12-01T01:11:57 | 112,622,220 | 0 | 0 |
MIT
| false | 2017-12-08T20:47:49 | 2017-11-30T14:39:23 | 2017-11-30T21:30:20 | 2017-12-08T20:47:49 | 35 | 0 | 0 | 0 |
Python
| false | null |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import random
import numpy as np
import agent
class Mancala:
def __init__(self, mancala_agent=None):
self.pockets = self.initialize_board()
# Load Mancala agent if necessary
if mancala_agent is None:
self.mancala_agent = agent.Agent()
else:
self.mancala_agent = mancala_agent
def play_game(self, reinforcement_learning = False):
# Reset board
self.pockets = self.initialize_board()
if reinforcement_learning == True:
player_1 = 'computer'
player_2 = 'computer'
mancala_agent = self.mancala_agent
mancala_agent.previous_state = self.get_state(player=2)
else:
# Assume both players are humans for now
player_1 = 'human'
player_2 = 'human'
# Computer or human player 1
if input("Player 1 human? (y/n) ") == 'n':
player_1 = 'computer'
#mancala_agent = agent.Agent()
# Proc user for computer or human opponent
if input("Player 2 human? (y/n) ") == 'n':
player_2 = 'computer'
mancala_agent = self.mancala_agent
mancala_agent.previous_state = self.get_state(player=2)
player_turn = 1
previous_move = -1 # Previous move marked in board draw
game_over = False
while not(game_over):
# Start by drawing the board
if reinforcement_learning == False:
self.draw_board(previous_move)
# Ask for move from corresponding player
if player_turn == 1:
if player_1 == 'human':
move = int(input("Player 1 - Choose Pocket 1-6: "))
move = self.convert_move(move, player=1)
else:
# Basic computer randomly chooses a Mancala position
valid_move = False
while not(valid_move):
move = self.convert_move(random.randint(1,6),player_turn)
valid_move = self.valid_move(move, player_turn)
else:
if player_2 == 'human':
move = int(input("Player 2 - Choose Pocket 1-6: "))
move = self.convert_move(move, player=2)
else:
# Basic computer randomly chooses a Mancala position
valid_move = False
while not(valid_move):
computer_action = mancala_agent.take_action(self.get_state(player_turn))
move = self.convert_move(computer_action, player_turn)
valid_move = self.valid_move(move, player_turn)
# Inject the state into the agent for learning
mancala_agent.update_q(self.get_state(player_turn))
# Check if move is valid prior to performing
if not(self.valid_move(move, player_turn)):
print("INVALID MOVE")
continue
# Perform assumed valid move and determine next to move
player_turn, game_over = self.simulate_move(move, player_turn)
# Update previous move
previous_move = move
if reinforcement_learning == True:
# Assume mancala agent is player 2 for now
mancala_agent.update_q(self.get_state(player=2), self.pockets[13])
# Update agent for persistence
self.mancala_agent = mancala_agent
if reinforcement_learning == False:
# Draw final board and announce winner
self.draw_board()
winner = self.determine_winner()
print("Winner: ", winner, "!!!")
def convert_move(self, move, player):
""" Converts the standard 1-6 input of the player into the corresponding
pocket for each player as needed
"""
if player == 1:
return move-1 # Shift left once to get the pocket position
if player == 2:
return move+6 # Shift right 6 spaces to refer to upper board spot
return False # Error case handling
def valid_move(self, pocket_position, player):
# Move is invalid if player chooses anything other than own pockets
player_1_side = (0 <= pocket_position <= 5)
player_2_side = (7 <= pocket_position <= 12)
# Must have stones in the pocket to be valid
if self.pockets[pocket_position] > 0:
if player_1_side and player==1:
return True
if player_2_side and player==2:
return True
# All other moves are false
return False
def initialize_board(self):
num_stones_on_start = 4
pockets = [num_stones_on_start]*14
pockets[6] = 0
pockets[13] = 0
return pockets
def check_game_over(self):
""" Checks if all pockets are empty of stones. If so assigns all
remaining stones to the appropriate mancala.
"""
game_over = False
empty_player_1 = sum(self.pockets[:6]) == 0
empty_player_2 = sum(self.pockets[7:13]) == 0
# If player 2 is empty, collect player 1's stones
if empty_player_2:
# Put remaining stones in player 2's mancala
self.pockets[6] += sum(self.pockets[:6])
self.pockets[:6] = [0]*6
game_over = True
# If player 1 is empty, collect player 1's stones
if empty_player_1:
# Put remaining stones in player 2's mancala
self.pockets[13] += sum(self.pockets[7:13])
self.pockets[7:13] = [0]*6
game_over = True
return game_over
def determine_winner(self):
if self.pockets[13]>self.pockets[6]:
return "Player 2"
elif self.pockets[13]<self.pockets[6]:
return "Player 1"
return "Draw"
def switch_player(self, player):
if player == 1:
return 2
return 1
def capture(self, pocket_position, mancala_pocket):
""" Captures all stones in the pocket and pocket opposite, goes into
The proper mancala pocket specified as input
"""
opposite_pocket_dict = {0: 12, 1:11, 2:10, 3:9, 4:8, 5:7,
7:5, 8:4, 9:3, 10:2, 11:1, 12:0}
# Take the stone from the pocket itself
self.pockets[mancala_pocket] += self.pockets[pocket_position]
self.pockets[pocket_position] = 0
# Take the stones from the opposite pocket
opposite_pocket = opposite_pocket_dict[pocket_position]
self.pockets[mancala_pocket] += self.pockets[opposite_pocket]
self.pockets[opposite_pocket] = 0
return True
def simulate_move(self, pocket_position, player):
# Condense to local version of pockets
pockets = self.pockets
stones_drawn = pockets[pocket_position]
pockets[pocket_position] = 0
# Inefficient loop, clean up in future
while stones_drawn > 0:
pocket_position += 1
# Case to handle looping back to start of board
if pocket_position > len(pockets)-1:
pocket_position = 0
# Consider special cases (mancala pocket) before normal stone drops
mancala_1_position = pocket_position==6
mancala_2_position = pocket_position==13
player_1 = player == 1
player_2 = player == 2
if mancala_1_position and player_2:
continue # Skip stone drop and proceeding logic
if mancala_2_position and player_1:
continue # Skip stone drop and proceeding logic
# Stone drop
pockets[pocket_position] += 1
stones_drawn -= 1
# Determine if capture occurs
end_on_player_1_side = (0 <= pocket_position <= 5)
end_on_player_2_side = (7 <= pocket_position <= 12)
# Only capture if stone is empty (has 1 stone after placement)
stone_was_empty = pockets[pocket_position] == 1
# Player 1 capture
if player_1 and end_on_player_1_side and stone_was_empty:
self.capture(pocket_position, 6)
# Player 2 capture
if player_2 and end_on_player_2_side and stone_was_empty:
self.capture(pocket_position, 13)
# Determine next player
if mancala_1_position and player_1:
next_player = player # Player 1 Mancala gets another turn
elif mancala_2_position and player_2:
next_player = player # Player 2 Mancala gets another turn
else:
next_player = self.switch_player(player) # All else switch player
game_over = self.check_game_over()
return next_player, game_over
def draw_board(self, previous_move=-1):
previous_move_marker = '__'
# Create copy for modification
pockets = list(self.pockets)
# Convert the last board movement to a special marker to stand out
# only if previous move is valid
if previous_move >= 0:
pockets[previous_move] = previous_move_marker
# Unpack list of stones in each spot for readability
pocket_1 = "{0:0>2}".format(pockets[0])
pocket_2 = "{0:0>2}".format(pockets[1])
pocket_3 = "{0:0>2}".format(pockets[2])
pocket_4 = "{0:0>2}".format(pockets[3])
pocket_5 = "{0:0>2}".format(pockets[4])
pocket_6 = "{0:0>2}".format(pockets[5])
mancala_1 = "{0:0>2}".format(pockets[6])
pocket_7 = "{0:0>2}".format(pockets[7])
pocket_8 = "{0:0>2}".format(pockets[8])
pocket_9 = "{0:0>2}".format(pockets[9])
pocket_10 = "{0:0>2}".format(pockets[10])
pocket_11 = "{0:0>2}".format(pockets[11])
pocket_12 = "{0:0>2}".format(pockets[12])
mancala_2 = "{0:0>2}".format(pockets[13])
lower_pockets = [pocket_1,pocket_2,pocket_3,pocket_4,pocket_5,pocket_6]
upper_pockets = [pocket_12,pocket_11,pocket_10,pocket_9,pocket_8,pocket_7]
print("___________________________________________________________________")
print("| ____ ____ ____ ____ ____ ____ ____ |")
print("| | | [_{}_] [_{}_] [_{}_] [_{}_] [_{}_] [_{}_] ____ |".format(*upper_pockets))
print("| | {} | | | |".format(mancala_2))
print("| |____| ____ ____ ____ ____ ____ ____ | {} | |".format(mancala_1))
print("| [_{}_] [_{}_] [_{}_] [_{}_] [_{}_] [_{}_] |____| |".format(*lower_pockets))
print("|_________________________________________________________________|")
return True
def get_state(self, player):
""" Returns the unique numeric state of the board for each player from
the players own perspective. Mancala pockets not necessary but they
can act as the reward to the computer at the end of the game.
"""
assumed_max_stones_per_pocket = 16
pocket_copy = list(self.pockets)
# Flip the board interpretation if player 2
if player == 1:
relevant_pockets = pocket_copy[:6] + pocket_copy[7:13]
else:
relevant_pockets = pocket_copy[7:13] + pocket_copy[:6]
# # Convert mancala base counting system to decimal for state
# # Conversion similar to octal-2-decimal except the base number
# # is max_stones+1
# base_number = assumed_max_stones_per_pocket + 1
#
# # Use int64 due to massive number of combinations which may occur
# # Should be optimized in the future to account for many situations
# # which do not occur in practice (eg, 12 stones in all pockets)
# multiplier_index = np.arange(len(relevant_pockets)-1,-1,-1, dtype='int64')
# multipliers = base_number**multiplier_index
# state_pieces = multipliers*np.array(relevant_pockets)
# state = np.sum(state_pieces)
return relevant_pockets
|
UTF-8
|
Python
| false | false | 12,862 |
py
| 7 |
mancala.py
| 5 | 0.52037 | 0.497357 | 0 | 332 | 37.743976 | 107 |
RuaBe/C4T-BO4
| 1,211,180,825,819 |
1a86a9e329188a5c99078b827cd7e159d7462fd9
|
ed523365d990f353a2b78e015bb1a6438afaa90b
|
/Session10/drill6.py
|
cd4c38b3fac74c57f0fe7f5c09cccbab566cf5d0
|
[] |
no_license
|
https://github.com/RuaBe/C4T-BO4
|
1cea76d7ac444952c6de8be8a38a3f28676be001
|
fdbba46946b52d15176d95196c0944d3854abb2c
|
refs/heads/master
| 2020-06-05T20:41:25.956034 | 2019-07-20T13:35:08 | 2019-07-20T13:35:08 | 192,540,777 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
bangLuong = [
{
"name" : "Huy",
"job" : "Waiter",
"age" : 12,
"salary" : 0.8,
},
{
"name" : "Huyen",
"job" : "Waitress",
"age" : 14,
"salary" : 1,
}
]
for i in range(len(bangLuong)):
luong = bangLuong[i]["age"]*bangLuong[i]["salary"]
|
UTF-8
|
Python
| false | false | 326 |
py
| 33 |
drill6.py
| 33 | 0.389571 | 0.368098 | 0 | 17 | 17.823529 | 54 |
gerlaxrex/Robotics-Car-Project
| 1,606,317,776,019 |
4d0fa887e5b8554aa90961d44629c285815e51ea
|
93a65e1cd4816628050f49967deb02d545d07c0f
|
/robotics_project_1/cfg/parameters.cfg
|
94a2037321fc3c38d8f77739002ebc052be89f00
|
[] |
no_license
|
https://github.com/gerlaxrex/Robotics-Car-Project
|
13ab90fb93790df8c6ad8f7d02a0310092e58e1b
|
f45a316d4da97ea8ae34f57bf68985f93e03c695
|
refs/heads/master
| 2020-09-29T01:59:08.855580 | 2019-12-09T16:52:52 | 2019-12-09T16:52:52 | 226,921,252 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
PACKAGE = "robotics_project_1"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
odometry_type = gen.enum([gen.const("Differential", bool_t, False, "Differential Drive odometry publishing"), gen.const("Ackerman", bool_t, True, "Ackerman Drive odometry publishing")], "Odometry Publish type")
gen.add("odom_type", bool_t, 0, "odometry type publishing", False, edit_method = odometry_type)
gen.add("theta", double_t , 0, "x position of the car (rear wheels center)", 0.0, -6.28318530718, 6.28318530718)
gen.add("posx", double_t , 0, "x position of the car (rear wheels center)", 0.0, -30.0, 30.0)
gen.add("posy", double_t , 0, "y position of the car (rear wheels center)", 0.0, -30.0, 30.0)
exit(gen.generate("robotics_project_1", "publisher", "carOdometry"))
|
UTF-8
|
Python
| false | false | 830 |
cfg
| 7 |
parameters.cfg
| 5 | 0.706024 | 0.648193 | 0 | 12 | 67.333333 | 210 |
npnet/MyCode
| 3,186,865,746,882 |
c0c81363924ca0871d1be8db5f3b9ae3ea06df33
|
8d7514271b08caaa8c757d0747853d54642e9e63
|
/tools/py/parse_mem_cfg.py
|
c150c2543362dfa1de35ce1096a25aa52b31a199
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/npnet/MyCode
|
0bd3e32d5890c411b16e01cdb8acf5624c7c0a02
|
9fa7aa78d62f1cd3c2a39a36875cb41cf00522fe
|
refs/heads/master
| 2023-07-23T13:19:29.575050 | 2018-07-05T13:01:09 | 2018-07-05T13:01:09 | 140,229,839 | 1 | 2 | null | false | 2023-07-21T10:21:01 | 2018-07-09T04:07:28 | 2023-06-04T13:37:34 | 2023-07-16T23:03:36 | 1,644,585 | 1 | 2 | 0 |
C
| false | false |
import os, sys
import re
import locale
import tempfile
import subprocess
from optparse import OptionParser
# from parse_mem_cfg_fpm_table import *
# =============== Const Setting ==================
g_build_path = r'%s\build'
g_cfg_path = r'%s\plutommi\Customer\ResGenerator\debug\mmi_mem_cfg.txt'
g_app_data_path = r'%s\plutommi\Customer\CustomerInc\mmi_rp_app_data.h'
g_obj_log_path = r'%s\build\%s\log\objcheck.log'
re_app_data = re.compile(r'#define\s+ASM_(CONC_)?(HEAP|EXTRA_BASE|APP_FG|CUI_BASE|CUI_FG|BASE|FG|TOTAL)_SIZE_(\w+)\s+\(([\W\w]+)\)')
re_app_data2 = re.compile(r'\s+ASM_(CONC_)?(HEAP|EXTRA_BASE|APP_FG|CUI_BASE|CUI_FG|BASE|FG|TOTAL)_SIZE_(\w+)\s=\s\(([\W\w]+)\),')
re_obj_check_1 = re.compile(r'\[D\]\s+app_asm_pool_(global|base|fg|sub|heap|total|extra_base|app_fg|cui_base|cui_fg)_union->(CONC_)?([\w]+)\s+(\d+)')
re_obj_check_2 = re.compile(r'\[\D\]\s+ASM_(CONC_)?(HEAP|EXTRA_BASE|APP_FG|CUI_BASE|CUI_FG|BASE|FG|TOTAL)_SIZE_([\w]+)\s+\(([\W\w]+)\)')
re_obj_check_total = re.compile(r'\[D\]\s+app_asm_pool_(sub_|global_|)union\s+(\d+)')
re_app_data_tag = re.compile(r'ASM_(CONC_)?(HEAP|EXTRA_BASE|APP_FG|CUI_BASE|CUI_FG|BASE|FG|TOTAL)_SIZE_(\w+)')
# =============== Global Setting ==================
g_setting_eval_macro = False
g_count_python_eval = 0
g_count_perl_eval = 0
g_bar_color = {
'heap' : '#9999FF',
'extra_base': '#99FF99',
'app_fg' : '#FF9999',
'base' : '#99FFFF',
'cui_base' : '#99FFFF',
'fg' : '#FFCCFF',
'cui_fg' : '#FFCCFF',
'total' : '#FFFF33',
'number' : '#FF3380',
'global' : '#FF9900',
}
# =============== Function ==================
def parse_cfg_file(cfg_file):
xml_list = {}
conc_list = []
for ln in file(cfg_file):
ln = ln.rstrip()
if ln.startswith("[id]"):
results = {}
for p in ln.split('['):
if not p:
continue
pair = p.strip().split(']')
results[pair[0]] = pair[1]
if results['id'] not in xml_list:
xml_list[results['id']] = []
xml_list[results['id']].append(results)
elif ln.startswith("[concurrent]"):
tag = ln[1:11]
ln = ln[12:]
results = [p.strip() for p in ln.split(',')]
results.sort()
conc_list.append((results, tag))
elif ln.startswith("[concurrent_w_order]"):
tag = ln[1:19]
ln = ln[20:]
results = [p.strip() for p in ln.split(',')]
conc_list.append((results, tag))
else:
print >> sys.stderr, "Line not handled: %s" % (ln)
return (xml_list, conc_list)
def parse_app_data_file(app_file):
data_list = {}
for ln in file(app_file):
m = re_app_data.match(ln)
if not m:
m = re_app_data2.match(ln)
if not m:
continue
conc = m.group(1)
app_id = m.group(3)
mem_type = m.group(2)
mem_value = m.group(4)
if conc:
app_id = conc + app_id
mem_value = mem_value.replace("ASM_FG_EXTRA(VRT_MEM_DEFAULT_CACHE_FACTOR)", "VRT_CACHE")
try:
data_list[app_id][mem_type] = mem_value
except KeyError:
data_list[app_id] = {mem_type: mem_value}
return data_list
def parse_obj_check_file(obj_file):
size_list = {}
detail_list = {}
for ln in file(obj_file):
m = re_obj_check_1.match(ln)
if m:
conc = m.group(2)
app_id = m.group(3)
size_type = m.group(1)
size_value = int(m.group(4))-4
if size_type == 'sub':
size_type = 'total'
size_type = size_type.upper()
if conc:
app_id = conc + app_id
if size_type == "GLOBAL":
if size_value < 0 or app_id == "sum_total":
continue
try:
size_list["_GLOBAL"][app_id] = size_value
except KeyError:
size_list["_GLOBAL"] = {app_id:size_value}
continue
try:
size_list[app_id][size_type] = size_value
except KeyError:
size_list[app_id] = {size_type: size_value}
continue
m = re_obj_check_2.match(ln)
if m:
conc = m.group(1)
app_id = m.group(3)
detail_type = m.group(2)
detail_value = m.group(4).strip()
if conc:
app_id = conc + app_id
detail_value = detail_value.replace("ASM_FG_EXTRA(VRT_MEM_DEFAULT_CACHE_FACTOR)", "VRT_CACHE")
try:
detail_list[app_id][detail_type] = detail_value
except KeyError:
detail_list[app_id] = {detail_type: detail_value}
continue
m = re_obj_check_total.match(ln)
if m:
t = m.group(1)
if t == "":
t = "total"
else:
t = t[:-1]
size = int(m.group(2))
if size < 4:
size = 0
try:
size_list['_ASM'][t] = size
except KeyError:
size_list['_ASM'] = {t:size}
continue
return (size_list, detail_list)
# split '+' and 'max' into pair
# in: a + max(b, c)
# out: [(a, b), (a, c)]
def parse_macro(input_str):
def parse_str(input_str, tag_list, result_array):
input_str = input_str.strip()
total = len(input_str)
if total == 0:
result_array.append(list(tag_list))
return
if input_str.find('max') == 0:
# extract a,b,c from "max(a, b) + c"
j = 3
level = 0
a_start = a_end = b_start = b_end = c_start = c_end = 0
while j < total:
c = input_str[j]
if c == '(':
level += 1
if level == 1:
a_start = j+1
elif c == ',':
if level == 1:
a_end = j
b_start = j+1
elif c == ')':
level -= 1
if level == 0:
b_end = j
c_start = j+1
c_end = total
break
j += 1
parse_str(input_str[a_start:a_end] + input_str[c_start:c_end], list(tag_list), result_array)
parse_str(input_str[b_start:b_end] + input_str[c_start:c_end], [[tag, 0] for tag, f in tag_list], result_array)
else:
p = input_str.find("+")
# extract a,b from "a+b"
if p >= 0:
a_str = input_str[:p].strip()
b_str = input_str[p+1:]
else:
a_str = input_str
b_str = ""
tag_list.append([a_str, 1])
parse_str(b_str, tag_list, result_array)
result_list = []
parse_str(input_str, [], result_list)
return result_list
def split_str(input_str, delim):
result_list = []
total = len(input_str)
j = 0
start = 0
level = 0
while j < total:
c = input_str[j]
if c == '(':
level += 1
elif c == delim:
if level == 0:
result_list.append(input_str[start:j].strip())
start = j+1
elif c == ')':
level -= 1
j += 1
input_str = input_str[start:].strip()
if input_str:
result_list.append(input_str)
return result_list
def parse_app_info(input_str):
m = re_app_data_tag.match(input_str)
if m:
if m.group(1):
return (m.group(1)+m.group(3), m.group(2))
else:
return (m.group(3), m.group(2))
return (None, None)
def eval_string(input_str):
global g_count_python_eval
global g_count_perl_eval
if not g_setting_eval_macro:
return (0, "")
# try python version
if input_str.find("?") < 0 and input_str.find("max") < 0:
try:
result_value = int(eval(input_str))
if result_value >= 0:
g_count_python_eval += 1
return (result_value, "")
except NameError:
return (0, "")
except SyntaxError:
pass
# try perl version
#print >> sys.stderr, input_str
g_count_perl_eval += 1
fd, fn = tempfile.mkstemp(text=True)
os.write(fd, "#!/usr/local/bin/perl -w")
os.write(fd, "use List::Util qw[min max];")
os.write(fd, '$a = "%s";' % (input_str))
os.write(fd, "print eval($a);")
os.close(fd)
run_cmd = ["perl", fn]
result = subprocess.Popen(run_cmd, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
(my_out, my_err) = result.communicate()
os.remove(fn)
try:
result_value = int(my_out)
except:
result_value = 0
return (result_value, str(my_err))
def get_div_str(color, width, hint, data, align_right=False):
if width=="0":
return ""
alignstr = ""
if align_right:
alignstr = "align=right"
hintstr = ""
if hint:
hintstr = "title='%s'" % (hint)
return "<div %s style='float:left;background-color:%s;width:%s%%;height:18px;' %s>%s</div>" % (alignstr, color, width, hintstr, data)
def get_percent_str(size, total):
if size == 0:
return "0"
return "%.2f" % (float(size * 10000 / total)/100)
def number_to_str(number):
if number < 0:
return "??"
elif number >= 1024 * 1024:
return "%.1fM" % (float(number) / (1024 * 1024))
elif number >= 1024:
return "%.1fK" % (float(number) / 1024)
else:
return "%dB" % (number)
def get_converted_macro(path_list, app_list):
map_list = {}
for scen in path_list:
for app_tag, flag in scen:
appid, memtype = parse_app_info(app_tag)
if not appid:
continue
try:
size = app_list[appid].get_mem_size(memtype)
except:
size = 0
disp_str = "%s(%s)" % (memtype.lower(), appid)
map_list[app_tag] = ("<a class=app href='#%s' title='Size: %d(%s)'>%s</a>" % (appid, size, number_to_str(size), disp_str), len(disp_str))
result_str = ""
for scen in path_list:
for app_tag, flag in scen:
appid, memtype = parse_app_info(app_tag)
if not appid:
if flag:
result_str += app_tag + " + "
else:
result_str += " " * (len(app_tag)+3)
else:
rep_str, rep_len = map_list[app_tag]
if flag:
result_str += rep_str + " + "
else:
result_str += " " * (rep_len+3)
result_str = result_str[:-3] + "<br>"
return result_str
class app_info:
def __init__(self, appid, cfg_list, data_list, size_list, detail_list):
self.id = appid
self.is_conc = appid.startswith("CONC")
self.is_sub = False
try:
self.cfg_info = cfg_list[appid]
except KeyError:
self.cfg_info = []
self.is_multi = len(self.cfg_info) > 1
try:
self.data_info = data_list[appid]
except KeyError:
self.data_info = {}
try:
self.size_info = size_list[appid]
except KeyError:
self.size_info = {}
try:
self.detail_info = detail_list[appid]
except KeyError:
self.detail_info = {}
# set default size if not exist
if 'FG' not in self.size_info:
self.size_info['FG'] = 0
if 'BASE' not in self.size_info:
self.size_info['BASE'] = 0
if 'TOTAL' not in self.size_info:
self.size_info['TOTAL'] = self.size_info['FG'] + self.size_info['BASE']
# setup dependency
#self.dep_list = []
#re_token = re.compile(r'\b\w+\b')
#token_list = []
#for value in self.cfg_info.itervalues():
# token_list += re_token.findall(value)
#for token in token_list:
# if token in cfg_list and token != self.id:
# self.dep_list.append(token)
def is_venus(self):
if 'app_type' in self.data and self.data['app_type'] == 'venus':
return True
return False
def get_mem_size(self, t):
try:
return self.size_info[t.upper()]
except KeyError:
return 0
def get_macro_str(self, t):
try:
return self.data_info[t.upper()]
except KeyError:
return ""
def get_max_path_list(self, memtype, app_list):
# all path
path_list = parse_macro(self.get_macro_str(memtype))
# find the max path
max_path = path_list[0]
if len(path_list) > 1:
max_size = 0
for path in path_list:
size = 0
for app_tag, flag in path:
appid, memtype = parse_app_info(app_tag)
if appid and appid in app_list:
size += app_list[appid].get_mem_size(memtype)
if size > max_size:
max_size = size
max_path = path
# result
result_path = []
for app_tag, flag in max_path:
appid, memtype = parse_app_info(app_tag)
if appid == self.id:
result_path += self.get_max_path_list(memtype, app_list)
else:
result_path.append([app_tag, 1])
return result_path
def print_html(self, app_list):
total_size = self.get_mem_size('total')
base_size = self.get_mem_size('base')
fg_size = self.get_mem_size('fg')
# title
if self.is_conc:
title_str = self.id
else:
title_str = "%s = %s (B:%s, F:%s)" % (self.id, number_to_str(total_size), number_to_str(base_size), number_to_str(fg_size))
if base_size > total_size:
title_str = "<font color=red>" + title_str + "</font>"
print "<a name='%s'><H2>%s</H2>" % (self.id, title_str)
# memory layout
self.print_memory_layout(app_list)
# Max path
'''
if self.get_mem_size('total') > 0:
print "<H4>Maximum path</H4>"
print "<table class=datatable><th>Type</th><th colspan=2>Size</th><th>Path</th>"
if self.is_conc:
print "<tr><td>total</td><td>%d</td><td>%s</td><td>%s</td></tr>" % (total_size, number_to_str(total_size), get_converted_macro([self.get_max_path_list('TOTAL', app_list)], app_list))
else:
print "<tr><td>base</td><td>%d</td><td>%s</td><td>%s</td></tr>" % (base_size, number_to_str(base_size), get_converted_macro([self.get_max_path_list('BASE', app_list)], app_list))
print "<tr><td>fg</td><td>%d</td><td>%s</td><td>%s</td></tr>" % (fg_size, number_to_str(fg_size), get_converted_macro([self.get_max_path_list('FG', app_list)], app_list))
print "</table>"
'''
# Macro
#self.print_macro_str(app_list)
# XML
self.print_xml_str();
# end
print "<br><a href='#top'>back to top</a>"
print "<HR>"
#
# when display_full = True, this API display a "full" memory layout (ruler, total, detail), used by normal app / concurrent
# when display_full = False, this API display only "partial" memory layout (only detail), used by sub-config
#
def print_memory_layout(self, app_list, display_full = True, display_total_size = 0):
total_size = self.get_mem_size('total')
if total_size == 0:
return
if not display_full:
total_size = display_total_size
if display_full:
#print >> sys.stderr, self.id
print "<H4>Memory layout</H4>"
print "<table class=bartable width=100%>"
for size, name in [(1024*1024, '1M'), (1024*100, '100K')]:
if total_size > size:
print "<tr><td>"
print get_div_str("#CCC", get_percent_str(size, total_size), name, name);
print "</td></tr>"
break
# Total layout
def print_path_list(result_list, app_list, padding_str=""):
for scen in result_list:
output_str = ""
for app_tag, flag in scen:
#print >> sys.stderr, app_tag
appid, memtype = parse_app_info(app_tag)
if not appid:
size, err_str = eval_string(app_tag)
memtype = 'NUMBER'
hint_str = app_tag
content_str = 'number'
elif appid in app_list:
size = app_list[appid].get_mem_size(memtype)
hint_str = "%s(%s)" % (memtype.lower(), appid)
if appid == self.id:
content_str = memtype.lower()
else:
content_str = "<a class=app href='#%s'>%s</a>" % (appid, appid)
if memtype == 'TOTAL':
base_size = app_list[appid].get_mem_size('BASE')
if base_size > 0:
content_str += "<div style='background-color:%s;width:%s%%;height:8px;' title='base = %s'></div>" % (g_bar_color['base'], get_percent_str(base_size, size), number_to_str(base_size))
else:
size = 0
if size > 0:
output_str += get_div_str(g_bar_color[memtype.lower()], get_percent_str(size, total_size), "%s = %s" % (hint_str, number_to_str(size)), content_str)
if output_str:
print "<tr><td>"
print padding_str
print output_str
print "</td></tr>"
# cui part, output "<tr><td>xxx</td></tr>"
def print_cui(selfid, cui_str, app_list, padding_str, total_size):
# recursively, output xxx
def parse_cui_str(cui_str, kind, app_list, total_size=0xFFFFFFFF):
#if selfid == 'APP_CALLSET2' and total_size != 0xFFFFFFFF:
# print >> sys.stderr, "[%5s] %s" % (kind, cui_str)
return_size = 0
return_str = ""
m = re.match(r'^\s*(max)?\s*\(\s*([\w\W]+?)\s*\)\s*$', cui_str)
if m:
if m.group(1) == 'max':
f = 'or'
delim = ','
content = m.group(2)
else:
f = 'and'
delim = '+'
content = m.group(2)
else:
f = 'and'
delim = '+'
content = cui_str
l = split_str(content, delim)
if len(l) == 1:
appid = l[0]
if appid in app_list:
if kind == 'total':
memlist = ('base', 'fg')
elif kind == 'base':
memlist = ('base',)
for memtype in memlist:
size = app_list[appid].get_mem_size(memtype)
if size == 0:
continue
hint_str = "%s(%s)" % (memtype, appid)
if appid == selfid:
content_str = memtype
else:
content_str = "<a class=app href='#%s'>%s</a>" % (appid, appid)
return_str += get_div_str(g_bar_color[memtype], get_percent_str(size, total_size), "%s = %s" % (hint_str, number_to_str(size)), content_str)
return_size += size
elif f == 'or':
max_size = 0
for s in l:
r1, foo = parse_cui_str(s, kind, app_list)
if r1 > max_size:
max_size = r1
str_list = []
for s in l:
str_list.append(parse_cui_str(s, kind, app_list, max_size)[1])
return_str = "<table class=cuitable width='%s%%'><tr><td>%s</tr></td></table>" % (get_percent_str(max_size, total_size), "</tr></td><tr><td>".join(str_list))
return_size = max_size
elif f == 'and':
if kind == 'total':
max_size = 0
for s in l:
sum_size = 0
for s2 in l:
if s2 != s:
sum_size += parse_cui_str(s2, "base", app_list)[0]
sum_size += parse_cui_str(s, "total", app_list)[0]
if sum_size > max_size:
max_size = sum_size
str_list = []
for s in l:
sum_str = ""
for s2 in l:
if s2 != s:
sum_str += parse_cui_str(s2, "base", app_list, max_size)[1]
sum_str += parse_cui_str(s, "total", app_list, max_size)[1]
str_list.append(sum_str)
return_str = "<table class=cuitable width='%s%%'><tr><td>%s</tr></td></table>" % (get_percent_str(max_size, total_size), "</tr></td><tr><td>".join(str_list))
return_size = max_size
elif kind == 'base':
for s in l:
r1, r2 = parse_cui_str(s, kind, app_list, total_size)
if r1 > return_size:
return_size = r1
return_str += r2
return (return_size, return_str)
print "<tr><td>"
print padding_str
s1, s2 = parse_cui_str(cui_str, 'total', app_list, total_size)
print s2
print "</tr></td>"
if self.is_conc:
print_path_list(parse_macro(self.get_macro_str('TOTAL')), app_list)
else:
# app base/fg
if display_full:
print "<tr><td>"
for tag in ['BASE', 'FG']:
size = self.get_mem_size(tag)
disp_str = "%s(%s)" % (tag.lower(), self.id)
print get_div_str(g_bar_color[tag.lower()], get_percent_str(size, total_size), "%s = %s" % (disp_str, number_to_str(size)), tag.lower())
print "</td></tr>"
if self.is_multi:
sub_count = len(self.cfg_info)
for i in xrange(sub_count):
sub_id = "%s_S%dS_" % (self.id, i)
app_list[sub_id].print_memory_layout(app_list, False, total_size)
else:
# print app only (heap + extra_base + app_fg)
print_path_list(parse_macro(" + ".join([t + self.id for t in ['ASM_HEAP_SIZE_', 'ASM_EXTRA_BASE_SIZE_', 'ASM_APP_FG_SIZE_']])), app_list)
# print app + cui
if self.cfg_info and 'cui' in self.cfg_info[0]:
size = self.get_mem_size('HEAP') + self.get_mem_size('EXTRA_BASE')
base_div = get_div_str('#F0F0F0', get_percent_str(size, total_size), "", "")
print_cui(self.id, self.cfg_info[0]['cui'], app_list, base_div, total_size)
if display_full:
print "</table>"
def print_macro_str(self, app_list):
# heap / extra_base / app_fg
if self.is_conc:
type_list = ['TOTAL']
else:
type_list = ['TOTAL', 'BASE', 'FG', 'HEAP', 'EXTRA_BASE', 'APP_FG', 'CUI_BASE', 'CUI_FG']
print_str = ""
for t in type_list:
size = self.get_mem_size(t)
macro_str = self.get_macro_str(t)
if t in ['BASE', 'FG']:
macro_str = macro_str.replace("ASM_CUI_%s_SIZE_%s" % (t, self.id), self.get_macro_str("CUI_" + t))
if macro_str and t in ['TOTAL', 'BASE', 'FG', 'EXTRA_BASE', 'APP_FG']:
macro_str = get_converted_macro(parse_macro(macro_str), app_list)
if size > 0:
print_str += "<tr><td>%s</td><td align=right>%d</td><td align=right>%s</td><td>%s</td></tr>" % (t.lower(), size, number_to_str(size), macro_str)
if print_str:
print "<H4>Macro</H4>"
print "<table class=datatable><th>Type</th><th colspan=2>Size</th><th>Macro</th>"
print print_str
print "</table>"
def print_xml_str(self):
# Memory tag
print "<H4><MEMORY> XML data</H4>"
tag_list = ['base', 'heap', 'extra_base', 'fg', 'cui', 'concurrent', 'concurrent_w_order']
found = False
for cfg in self.cfg_info:
for tag in tag_list:
if tag in cfg:
found = True
break
if found:
print "<table class='datatable'>"
for cfg in self.cfg_info:
print "<th>attr</th><th>value</th>"
for tag in tag_list:
if tag in cfg:
print "<tr><td>%s</td><td>%s</td></tr>" % (tag, cfg[tag])
print "</table>"
else:
print "n/a"
def print_sort_by_size(id_list, app_list, max_size):
size_sort_list = []
for appid in id_list:
app = app_list[appid]
size_sort_list.append((app.get_mem_size('total'), app.get_mem_size('base'), app.get_mem_size('fg'), appid))
size_sort_list.sort(reverse=True)
print "<table class=datatable><th>Name</th><th colspan=3>Size</th><th width=100%>Percentage</th>"
for size, basesize, fgsize, appid in size_sort_list:
percent_value = get_percent_str(size, max_size)
percent_base = get_percent_str(basesize, max_size)
percent_fg = get_percent_str(fgsize, max_size)
base_str = ""
fg_str = ""
percent_str = ""
print "<tr><td><a href='#%s'>%s<a></td><td align=right>%d</td><td align=right>%s</td><td align=right>%s%%</td><td>" % (appid, appid, size, number_to_str(size), percent_value)
if basesize != 0 or fgsize != 0:
if basesize > 0:
print get_div_str(g_bar_color['base'], percent_base, "", number_to_str(basesize), True)
if fgsize > 0:
print get_div_str(g_bar_color['fg'], percent_fg, "", number_to_str(fgsize), True)
else:
if size > 0:
print get_div_str(g_bar_color['total'], percent_value, "", number_to_str(size), True)
print "</td></tr>"
print "</table>"
def print_apps_html(app_list, total_size):
# --------------------------------------------------------------------------
# collect data
# find max name length of app
max_len = 0
for appid in app_list:
app_len = len(appid)
if app_len > max_len:
max_len = app_len
# group apps
group_tags = ["CONC", "SRV", "CUI", "APP"]
group_list = {}
for appid in app_list:
if app_list[appid].is_sub:
continue
found = False
for t in group_tags:
if appid.lower().find(t.lower()) >= 0:
found = True
try:
group_list[t].append(appid)
except KeyError:
group_list[t] = [appid]
break
if not found:
print >> sys.stderr, "Cannot group %s, skipped!" % (appid)
for t in group_list:
group_list[t].sort()
if len(group_list) == 0:
return
# --------------------------------------------------------------------------
# overview
print "<H1>App Part</H1>"
# ASM total size
print "<H4>Total size = %d (%s)</H4>" % (total_size, number_to_str(total_size))
# Sort by Size
todo_list = []
try:
todo_list.append(group_list["CONC"] + group_list["APP"])
except KeyError:
try:
todo_list.append(group_list["APP"])
except KeyError:
pass
try:
todo_list.append(group_list["CUI"])
except KeyError:
pass
try:
todo_list.append(group_list["SRV"])
except KeyError:
pass
for group in todo_list:
print_sort_by_size(group, app_list, total_size)
print "<HR>"
# --------------------------------------------------------------------------
# index
print "<a name='index'></a><H1>Index</H1>"
print "<table class=datatable><tr>"
percentage = 100 / len(group_list)
for t in group_list:
print "<th width=%d%%>%s</th>" % (percentage, t)
print "</tr><tr>"
for t in group_list:
print "<td>"
for appid in group_list[t]:
print "<a href='#%s'>%s<a>%s" % (appid, appid, " " * (max_len-len(appid)))
print "</td>"
print "</tr></table><HR>"
# --------------------------------------------------------------------------
# each app
for t in group_list:
for appid in group_list[t]:
if app_list[appid].is_sub:
continue
app_list[appid].print_html(app_list)
def print_globals_html(sizes, total_size):
# --------------------------------------------------------------------------
# overview
print "<a name='_global'></a><H1>Global Part</H1>"
# ASM total size
print "<H4>Total size = %d (%s)</H4>" % (total_size, number_to_str(total_size))
print "<table class=datatable><th>Name</th><th colspan=3>Size</th><th width=100%>Percentage</th>"
ak = sizes.keys();
ak.sort()
for app_id in ak:
size = sizes[app_id]
percent_value = get_percent_str(size, total_size)
print "<tr><td><a href='#%s'>%s<a></td><td align=right>%d</td><td align=right>%s</td><td align=right>%s%%</td><td>" % (app_id, app_id, size, number_to_str(size), percent_value)
print get_div_str(g_bar_color['global'], percent_value, "", number_to_str(size), True)
print "</td></tr>"
print "</table><HR>"
def print_asm_overview(sizes):
total_size = sizes['total']
app_size = sizes['sub']
global_size = sizes['global']
anony_size = total_size - (app_size + global_size)
type_info = ((app_size, "App", "#FF3"), (global_size, "Global", "#F90"), (anony_size, "Anonymous", "#3C0"))
# --------------------------------------------------------------------------
# overview
print "<H1>Overview</H1>"
# ASM total size
print "<H4>ASM Pool Total size = %d (%s)</H4>" % (total_size, number_to_str(total_size))
print "<table class=bartable width=100%><tr><td>"
for size, name, clr in type_info:
print get_div_str(clr, get_percent_str(size, total_size), "%d(%s)" % (size, number_to_str(size)), name);
print "</td></tr></table>"
print "<table class=datatable>"
print "<tr><th>Type</th><th>Size</th></tr>"
for size, name, clr in type_info:
print "<tr><td>%s</td><td>%d(%s)</td></tr>" % (name, size, number_to_str(size))
print "</table><HR>"
def main(opt):
# load name
data = opt.mcu_path.split('\\')
while True:
load_name = data.pop()
if load_name and load_name != 'mcu':
break
cfg_list, conc_list = parse_cfg_file(opt.cfg_path)
data_list = parse_app_data_file(opt.app_path)
size_list, detail_list = parse_obj_check_file(opt.obj_path)
print >> sys.stderr, "mem_cfg(id, conc), rp_app_data, obj_check(size, detail) = %d, %d, %d, %d, %d" % (len(cfg_list), len(conc_list), len(data_list), len(size_list), len(detail_list))
app_list = {}
# each id in mem_cfg
for appid in cfg_list:
app = app_info(appid, cfg_list, data_list, size_list, detail_list)
app_list[appid] = app
if app.is_multi:
for i in xrange(len(app.cfg_info)):
sub_id = "%s_S%dS_" % (appid, i)
app = app_info(sub_id, {sub_id: [cfg_list[appid][i], ]}, data_list, size_list, detail_list)
app.is_sub = True
app_list[sub_id] = app
# treat conc as a new app
for concs, conc_tag in conc_list:
appid = 'CONC_' + '_'.join(concs)
app = app_info(appid, {appid: [{conc_tag:', '.join(concs)},]}, data_list, size_list, detail_list)
app_list[appid] = app
# print html header
print '''
<HTML>
<style type="text/css">
h1,h2,h3,h4,h5,h6 {font-family:Arial}
table.datatable {border-collapse:collapse;}
table.datatable th {font-family:Arial; background-color: #E5EECC; border:1px solid gray; padding: 2px; font-size: 0.9em;}
table.datatable td {font-family:Lucida Console; border:1px solid gray; padding: 2px; font-size: 0.9em;}
table.datatable td a:hover {background-color:#CFF;}
a.app:link {text-decoration:none;color:#000080;}
a.app:visited {text-decoration:none;color:#000080;}
a.app:hover {text-decoration:underline;}
a.app:active {text-decoration:underline;}
table.bartable {border-collapse:collapse;}
table.bartable td {font-family:Lucida Console; padding: 1px; font-size: 0.8em;}
table.bartable td div {border:1px solid gray; margin: 0px -1px; overflow:hidden;}
table.cuitable {float:left; border-collapse:collapse;}
table.cuitable td {font-family:Lucida Console; padding: 0px; font-size: 0.8em; vertical-align:middle;}
table.cuitable td div {border:1px solid gray; margin: 0px -1px; overflow:hidden;}
</style>
'''
print "<TITLE>%s</TITLE><BODY>" % (load_name)
print "<a name='top'></a><H1>%s</H1>%s" % (load_name, opt.mcu_path)
if opt.app_name:
try:
app = app_list[opt.app_name]
app.print_html(app_list)
except KeyError:
print >> sys.stderr, "< %s > not found!" % (opt.app_name)
else:
print_asm_overview(size_list['_ASM'])
total_size = size_list['_ASM']['sub']
print_apps_html(app_list, total_size)
global_size = size_list['_ASM']['global']
if global_size > 0:
print_globals_html(size_list['_GLOBAL'], global_size)
print '''
</BODY>
</HTML>
'''
if g_setting_eval_macro:
print >> sys.stderr, "Count of Python eval = %d" % (g_count_python_eval)
print >> sys.stderr, "Count of Perl eval = %d" % (g_count_perl_eval)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", dest="cfg_file")
parser.add_option("--mcu", dest="mcu_path")
parser.add_option("--proj", dest="proj_name")
parser.add_option("--app", dest="app_name")
parser.add_option("--html", dest="output_html", action="store_true", default=False)
#parser.add_option("--eval", dest="eval_macro", action="store_true", default=False)
(opt, args) = parser.parse_args()
g_setting_eval_macro = True #opt.eval_macro
if not opt.mcu_path:
print >> sys.stderr, "Please use --mcu to specify load path"
sys.exit(-1)
if not os.path.exists(opt.mcu_path):
print >> sys.stderr, "Can not open %s" % (opt.mcu_path)
sys.exit(-1)
opt.mcu_path = os.path.abspath(opt.mcu_path)
if not opt.proj_name:
entries = os.listdir(g_build_path % (opt.mcu_path))
for d in entries:
opt.proj_name = d
break
opt.cfg_path = g_cfg_path % (opt.mcu_path)
opt.app_path = g_app_data_path % (opt.mcu_path)
opt.obj_path = g_obj_log_path % (opt.mcu_path, opt.proj_name)
error_found = False
for check_path in [opt.cfg_path, opt.app_path, opt.obj_path]:
if not os.path.exists(check_path):
print >> sys.stderr, "Can not open %s" % (check_path)
error_found = True
if error_found:
sys.exit(-1)
if 0:
a = r'450*1024 + 250*1024 + max(max(max(max(max(max(max(max(ASM_BASE_SIZE_VCUI_CONTACT_VIEW, ASM_BASE_SIZE_APP_DIALER), ASM_BASE_SIZE_VAPP_BOOKMARK_CUI), ASM_BASE_SIZE_VAPP_BROWSER), ASM_BASE_SIZE_VCUI_FMGR), ASM_BASE_SIZE_APP_DTCNT_CUI), ASM_BASE_SIZE_VCUI_CBM), ASM_BASE_SIZE_APP_MUSIC_PLAYER), ASM_BASE_SIZE_APP_GALLERY)'
split_max(a, 14, True)
sys.exit(0)
if 0:
a = r'ASM_BASE_SIZE_APP_MESSAGE + ASM_BASE_SIZE_APP_OPERA'
print format_html_string(a, None)
sys.exit(0)
if 0:
a, err = eval_string("max((150*1024 + (300*1024 + (150*1024))), max((600*1024 + (150*1024 + (300*1024 + (150*1024)))), (600*1024 + (0))))")
print "1=== %s ====" % (a)
print "2=== %s ====" % (err)
sys.exit()
if 0:
result_list = parse_macro("1 + 2 + max(3 + max(10,11), max(4, max(max(5,8), max(6, 7))))");
for line in result_list:
for tag, f in line:
if f:
print tag ,
else:
print len(tag)*" " ,
print ""
sys.exit()
if 0:
a = r'A + B, max(C,D) + D, max(E, F+G),'
print split_str(a, '+')
sys.exit(0)
if opt.cfg_file:
print_cfg_in_excel(opt)
else:
main(opt)
|
UTF-8
|
Python
| false | false | 38,581 |
py
| 7,634 |
parse_mem_cfg.py
| 260 | 0.472383 | 0.461419 | 0 | 1,097 | 34.169553 | 332 |
YesVen/Interview-Prep-All
| 13,357,348,335,022 |
1361cf2b4d2c58de89142e8e51fdcfb0826f8821
|
d9d8174ab88f88b588396cf58f96f3a5e5317d18
|
/src/main/java/com/practice/python/unique_paths.py
|
4726b69b22950a8abd1ca335018d22d3f322c60c
|
[] |
no_license
|
https://github.com/YesVen/Interview-Prep-All
|
fb4c88292e9113a009b7cf3331f1b1974e5efe46
|
ec123195958bebad85d9ce9fa7d903a799ddd4d8
|
refs/heads/master
| 2020-03-18T18:44:36.255814 | 2017-10-29T01:12:05 | 2017-10-29T01:12:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# O(m*n) time, O(m*n) space
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if m <= 0 or n <= 0:
return 0
dp = [[0] * n for i in xrange(m)]
for i in xrange(m):
dp[i][0] = 1
for i in xrange(n):
dp[0][i] = 1
for i in xrange(1, m):
for j in xrange(1, n):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[m-1][n-1]
# O(min(m, n)) space
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if m > n:
return self.uniquePaths(n, m)
cur = [1 for i in xrange(m)]
for j in xrange(1, n):
for i in xrange(1, m):
cur[i] += cur[i-1]
return cur[m-1]
|
UTF-8
|
Python
| false | false | 936 |
py
| 716 |
unique_paths.py
| 703 | 0.391026 | 0.370726 | 0 | 39 | 23.025641 | 50 |
alxhar/mayak
| 13,640,816,164,765 |
701a44eb8e566c008704383cc00e4e2d39bb8d86
|
be1392cc36fd1ca3c4549bdd318aab39eb2cb20d
|
/hotel/migrations/0001_initial.py
|
f96ebe4f63dcc5594f61bc73cb11140702f136d6
|
[] |
no_license
|
https://github.com/alxhar/mayak
|
7be35b83ecd661aa583ccf85a7840bbf627f82f1
|
19bdf7775338719f0241f7d4feb0828f2278633f
|
refs/heads/master
| 2020-03-06T01:15:34.966894 | 2017-06-29T10:03:04 | 2017-06-29T10:03:04 | 93,789,430 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-18 07:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='ะะฐะทะฒะฐะฝะธะต ะบะฐัะตะณะพัะธะธ')),
('description', models.TextField(blank=True, null=True, verbose_name='ะะฟะธัะฐะฝะธะต ะบะฐัะตะณะพัะธะธ')),
],
options={
'verbose_name': 'ะะฐัะตะณะพัะธั',
'verbose_name_plural': 'ะะฐัะตะณะพัะธะธ',
},
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=200, verbose_name='ะขะธะฟ ะฝะพะผะตัะฐ')),
('size', models.SmallIntegerField(blank=True, null=True, verbose_name='ะะพะปะธัะตััะฒะพ ะผะตัั ะฒ ะฝะพะผะตัะต')),
('square', models.SmallIntegerField(blank=True, null=True, verbose_name='ะะปะพัะฐะดั ะฝะพะผะตัะฐ, ะผ(ะฒะบะปััะฐั ั/ัะทะตะป)')),
('price_may', models.IntegerField(blank=True, null=True, verbose_name='ะฆะตะฝะฐ ั 1.05 - 10.06')),
('price_june', models.IntegerField(blank=True, null=True, verbose_name='ะฆะตะฝะฐ ั 11.06 - 30.06')),
('price_july', models.IntegerField(blank=True, null=True, verbose_name='ะฆะตะฝะฐ ั 1.07 - 25.08')),
('price_august', models.IntegerField(blank=True, null=True, verbose_name='ะฆะตะฝะฐ ั 26.08 - 30.09')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rooms', to='hotel.Category')),
],
options={
'verbose_name': 'ะะพะผะตั',
'verbose_name_plural': 'ะะพะผะตัะฐ',
},
),
]
|
UTF-8
|
Python
| false | false | 2,291 |
py
| 35 |
0001_initial.py
| 16 | 0.577602 | 0.552974 | 0 | 47 | 44.787234 | 160 |
chizel/os_dj
| 8,400,956,045,453 |
7e6fc075e691af2fbc3f9d379a08a97183017be2
|
7b1b75dd21cc9acf7a9ad4f40adadfb4c1ef8fd8
|
/wsgi/openshift/settings.py
|
ef8e1c932f05965020da1f5f0fc9503fb1340007
|
[] |
no_license
|
https://github.com/chizel/os_dj
|
2fe5525dfd3e451b4e22b9696aff0661f8be781f
|
96e7f2c6613d9f515a371e08b36f4b9a42c5736c
|
refs/heads/master
| 2021-01-01T19:28:29.641230 | 2015-01-30T13:39:23 | 2015-01-30T13:39:23 | 20,371,950 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import imp
import socket
# my settings
SECRET_KEY="vm4rl5*ymb@2&d_(gc^gb-^twq9w(u69hi--%&5xrh!xk(t%hw"
# email
EMAIL_HOST_USER = 'mydjangotest@gmail.com'
EMAIL_HOST_PASSWORD = 'MYDJ@NG)T*ST'
# twitter
SOCIAL_AUTH_TWITTER_KEY="ywIdQZ8GEcVPro2pMdAY7FRvy"
SOCIAL_AUTH_TWITTER_SECRET="HBgvGu16JwHn6SYB9HfcLbIagsgwyAii3TH5winvXilwWTPkvq"
# github
SOCIAL_AUTH_GITHUB_KEY="418f1b0de987958ca56d"
SOCIAL_AUTH_GITHUB_SECRET="261db37446f05b88da1e5c0a915708491718aacc"
# facebook
SOCIAL_AUTH_FACEBOOK_KEY="1385525765034186"
SOCIAL_AUTH_FACEBOOK_SECRET="ca9e0b0c014909384c3d63336a9ea33e"
ON_OPENSHIFT = False
if os.environ.has_key('OPENSHIFT_REPO_DIR'):
ON_OPENSHIFT = True
DEBUG = not ON_OPENSHIFT
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
if ON_OPENSHIFT:
ALLOWED_HOSTS = [os.environ['OPENSHIFT_APP_DNS'], socket.gethostname()]
SECRET_KEY = os.environ['OPENSHIFT_SECRET_TOKEN']
else:
ALLOWED_HOSTS = []
TEMPLATE_DEBUG = DEBUG
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# If you want configure the REDISCLOUD
if 'REDISCLOUD_URL' in os.environ and 'REDISCLOUD_PORT' in os.environ and 'REDISCLOUD_PASSWORD' in os.environ:
redis_server = os.environ['REDISCLOUD_URL']
redis_port = os.environ['REDISCLOUD_PORT']
redis_password = os.environ['REDISCLOUD_PASSWORD']
CACHES = {
'default' : {
'BACKEND' : 'redis_cache.RedisCache',
'LOCATION' : '%s:%d'%(redis_server,int(redis_port)),
'OPTIONS' : {
'DB':0,
'PARSER_CLASS' : 'redis.connection.HiredisParser',
'PASSWORD' : redis_password,
}
}
}
MIDDLEWARE_CLASSES = ('django.middleware.cache.UpdateCacheMiddleware',) + MIDDLEWARE_CLASSES + ('django.middleware.cache.FetchFromCacheMiddleware',)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR,'templates'),
)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
if ON_OPENSHIFT:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#MY_SETTINGS
DOMAIN = '127.0.01:8000'
USER_AVATARS = 'user_avatars'
LOGIN_URL = '/user/login/'
APPEND_SLASH = True
# EMAIL
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#my
'social.apps.django_app.default',
'forum',
'userprofile',
'blog',
'basesite',
'filesharing',
'udacity',
)
#SOCIAL AUTHORIZATION
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'userprofile.pipeline.user_details',
'userprofile.pipeline.get_user_avatar',
)
LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_DEFAULT_USERNAME = 'new_social_auth_user'
SOCIAL_AUTH_UID_LENGTH = 16
SOCIAL_AUTH_ASSOCIATION_HANDLE_LENGTH = 16
SOCIAL_AUTH_NONCE_SERVER_URL_LENGTH = 16
SOCIAL_AUTH_ASSOCIATION_SERVER_URL_LENGTH = 16
SOCIAL_AUTH_ASSOCIATION_HANDLE_LENGTH = 16
ACCESS_TOKEN_METHOD = 'GET'
AUTHENTICATION_BACKENDS = (
# 'social.backends.google.GoogleOAuth2Backend',
# 'social.backends.open_id.OpenIdAuth',
# 'social.backends.google.GoogleOpenId',
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GoogleOAuth',
# 'social.backends.yahoo.YahooOpenId',
'social.backends.facebook.FacebookOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
'django.contrib.auth.context_processors.auth',
)
SOCIAL_AUTH_ENABLED_BACKENDS = ('google', 'twitter', 'facebook', 'github')
|
UTF-8
|
Python
| false | false | 5,302 |
py
| 55 |
settings.py
| 29 | 0.702188 | 0.67748 | 0 | 181 | 28.292818 | 152 |
YSRKEN/eer-search-client
| 14,998,025,807,467 |
a064bd597eaf6a3314124a9a6178d3d32063a62c
|
17f3038fa5e7e60931fc6402e5ab8aeb0f04e9d5
|
/server/scraping.py
|
7edaac9a0af995b8c550d08ec43ea6440d00a86f
|
[] |
no_license
|
https://github.com/YSRKEN/eer-search-client
|
94fbcca74c4b43a940ceebca941a1e17c431d71b
|
c1a2e27b67369ef4808bd069f0deda09c8e72388
|
refs/heads/master
| 2023-01-08T19:39:30.026374 | 2021-09-02T03:20:58 | 2021-09-02T03:20:58 | 226,133,634 | 0 | 0 | null | false | 2023-01-05T16:22:11 | 2019-12-05T15:33:45 | 2021-09-02T03:21:09 | 2023-01-05T16:22:09 | 2,066 | 0 | 0 | 21 |
TypeScript
| false | false |
import time
from typing import List, Dict, Union
import requests
from lxml import html
from constant import WAIT_TIME
class DomObject:
dom: html.HtmlElement
@staticmethod
def from_string(page_data: str):
dom = DomObject()
dom.dom = html.fromstring(page_data)
return dom
@staticmethod
def from_html_element(html_element: html.HtmlElement):
dom = DomObject()
dom.dom = html_element
return dom
def select(self, css_query: str) -> Union['DomObject', None]:
temp = self.dom.cssselect(css_query)
if len(temp) > 0:
return DomObject.from_html_element(temp[0])
else:
return None
def select_all(self, css_query: str) -> List['DomObject']: # PEP484
return [DomObject.from_html_element(x) for x in self.dom.cssselect(css_query)]
def text_content(self) -> str:
return self.dom.text_content()
def attribute(self, key: str, default_value: str) -> str:
if key in self.dom.attrib:
return self.dom.attrib[key]
else:
return default_value
class HttpClient:
def __init__(self):
self.last_request = time.time()
def post_html(self, url: str, parameter: Dict[str, any]) -> str:
wait_time = self.last_request + WAIT_TIME - time.time()
if wait_time > 0.0:
time.sleep(wait_time)
self.last_request = time.time()
# ใใใใผใ่จญๅฎ
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'ja,en-US;q=0.9,en;q=0.8'
}
# ใใผใธใๅๅพ
page = requests.post(url=url, data=parameter, headers=headers)
return page.content
def get_html(self, url: str, parameter: Dict[str, any] = None) -> str:
wait_time = self.last_request + WAIT_TIME - time.time()
if wait_time > 0.0:
time.sleep(wait_time)
self.last_request = time.time()
# ใใใใผใ่จญๅฎ
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'ja,en-US;q=0.9,en;q=0.8'
}
# ใใผใธใๅๅพ
if parameter is not None:
url += '?' + '&'.join([f'{key}={str(val).replace(" ", "%20")}' for key, val in parameter.items()])
print(url)
page = requests.get(url=url, headers=headers)
return page.text
|
UTF-8
|
Python
| false | false | 2,668 |
py
| 9 |
scraping.py
| 5 | 0.569954 | 0.541284 | 0 | 84 | 30.142857 | 110 |
KuznetsovKirill/GeekBrains
| 13,623,636,294,322 |
a5a69f055df4a07574089d47cc89de1b2e4ba52d
|
e600f379ff44c8fec6df376a69d47294d0fb99c3
|
/python_1/main.py
|
d4008292f1c148e823ec3ed3707f46427c5e219b
|
[] |
no_license
|
https://github.com/KuznetsovKirill/GeekBrains
|
4bd3665a45468488eed2f2d36f373537f6593c18
|
1b5799c4026d8dc9bf3cb9f4cbb52c9b4ccd1269
|
refs/heads/master
| 2022-12-18T21:42:36.469793 | 2020-09-28T10:06:08 | 2020-09-28T10:06:08 | 287,452,299 | 0 | 0 | null | false | 2020-09-28T10:06:09 | 2020-08-14T05:35:15 | 2020-09-23T14:08:09 | 2020-09-28T10:06:08 | 15,347 | 0 | 0 | 0 |
Python
| false | false |
#made by Shyi
#----------------------------task number 1
var1 = 0
var2 = 0.1
var3 = "0"
print("First var", type(var1), var1)
print("Second var", type(var2), var2)
print("Third var", type(var3), var3)
input1 = input("Fisrt var: ")
input2 = input("Second var: ")
input3 = input("Third var: ")
print("First var -> ", input1," Second var -> ", input2, " Third var -> ",input3)
#----------------------------task number 3
a = input("Enter n: ")
if a.isdigit():
s = str(a) + str(a)
t = str(a) + str(a) + str(a)
a = int(a) + int(s) + int(t)
print(a)
#----------------------------task number 4
s = input("Enter the number -> ")
l = len(s)
i = 0
element = 0
if l > 0:
while i < l:
if s.isdigit(): # ะฟัะพะฒะตัะบะฐ ัััะพะบะธ ะฝะฐ ะฟัะตะดะผะตั ัะธะผะฒะพะปะพะฒ
if int(element) < int(s[i]):
element = int(s[i])
i = i + 1
else:
print("the variable must be a numeric type")
print(element)
else:
print("number must be more than 0")
#----------------------------task number 5
proceeds = input("proceeds -> ")
costs = input("costs -> ")
proceeds = int(proceeds)
costs = int(costs)
if proceeds < costs:
print("the company could not make money")
if proceeds == costs:
print("the company went to zero")
if proceeds > costs:
print("the company was able to make money")
proceeds = proceeds/costs
emp = input("Enter the number of employees -> ")
emp = int(emp)
salary = proceeds/emp
print("the salary per employee =",salary)
#----------------------------task number 6
a = input("First result -> ")
b = input("target km -> ")
a = int(a)
b = int(b)
i = 1
while a < b:
print(i,"-ะน ะดะตะฝั: ",a)
a = a + a * 0.1
i = i + 1
print(i,"-ะน ะดะตะฝั: ",a)
print("ะัะฒะตั: ะฝะฐ", i, "-ะน ะดะตะฝั ัะฟะพัััะผะตะฝ ะดะพััะธะณ ัะตะทัะปััะฐัะฐ - ะฝะต ะผะตะฝะตะต", b,"ะบะผ.")
|
UTF-8
|
Python
| false | false | 1,902 |
py
| 8 |
main.py
| 7 | 0.536639 | 0.518457 | 0 | 72 | 24.222222 | 81 |
gavinong10/CarND-Behavioral-Cloning-P3
| 1,434,519,122,239 |
e9017bc811cd1feac6d77715419eab472bb6e262
|
0a86b720be44c596f44b4fec22226b8142cf74bc
|
/model_runner_4.py
|
3ad225136c66a305652f7814c655c36609968dcc
|
[] |
no_license
|
https://github.com/gavinong10/CarND-Behavioral-Cloning-P3
|
5f020603afb4d7fb9afb577f87e87c684df92975
|
cc2ebd45fa6b24fce6fee99a1ce14de21b6fbe6e
|
refs/heads/master
| 2021-01-22T05:43:26.855695 | 2017-03-11T22:18:02 | 2017-03-11T22:18:02 | 81,690,034 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from model import *
import pandas as pd
from keras.models import load_model
from keras.optimizers import Adam, Nadam
train_batch_size = 9000
VAL_PORTION = 0.1
template = {
"optimizer": 'Adam',
"lr": 0.0001,
"dropout": 0.5,
"min_perc_val": 0.75,
"max_perc_val": 0.85,
"track1_data_dirs": ['data_download'],
"folders_to_exclude": [],
"TRAIN_BATCH_SIZE": 32,
"VAL_BATCH_SIZE": 32,
"EPOCHS": 140,
"OFFSET": 0.2,
"VAL_PORTION": VAL_PORTION,
"INCLUDE_MIRROR_TRAIN": True,
"INCLUDE_MIRROR_VAL": False,
"INCLUDE_SHADOW_TRAIN": False,
"INCLUDE_SHADOW_VAL": False,
"MIN_ANGLE_TRAIN": 0.0,
"MIN_ANGLE_VAL": 0.0 ,
"LOAD_MODEL": False,
"EPOCH_SIZE": None,
}
all_instances = [
{**template, **{
"optimizer": 'Adam',
"lr": 0.0001,
"output_path": 'training6_smallpreprocess/Adam_std_dirt_right_limepoch/Adam_std_dirt_right_limepoch/Adam_std_dirt_right_limepoch',
"track1_data_dirs": ['data_download', 'avoid_dirt', 'first_hard_right', 'first_hard_right_correction'],
"EPOCHS": 15,
"EPOCH_SIZE": 50000,
"LOAD_MODEL": "training6_smallpreprocess/Adam_std_dirt_right_limepoch/Adam_std_dirt_right_limepoch/model_epoch_3.h5",
}},
{**template, **{
"optimizer": 'Adam',
"lr": 0.00003,
"output_path": 'training6_smallpreprocess/Adam_std_dirt_right_limepoch/Adam_std_dirt_right_limepoch/Adam_std_dirt_right_limepoch_0.00003',
"track1_data_dirs": ['data_download', 'avoid_dirt', 'first_hard_right', 'first_hard_right_correction'],
"EPOCHS": 15,
"EPOCH_SIZE": 50000,
"LOAD_MODEL": "training6_smallpreprocess/Adam_std_dirt_right_limepoch/Adam_std_dirt_right_limepoch/model_epoch_5.h5",
}}]
if __name__ == "__main__":
# to_run = [instance1, instance2]
to_run = all_instances
for inst in to_run:
pd.Series(inst).to_csv('params.csv')
if inst["LOAD_MODEL"]:
model = load_model(inst["LOAD_MODEL"])
if inst["optimizer"] == "Adam":
opt = Adam(lr=inst["lr"])
elif inst["optimizer"] == "Nadam":
opt = Nadam(lr=inst["lr"])
model.compile(optimizer=opt, loss='mean_squared_error')
else:
model = nvidia_model(learning_rate=inst['lr'], dropout=inst['dropout'], optimizer = inst['optimizer'])
print(model.summary())
train(model, **inst)
|
UTF-8
|
Python
| false | false | 2,384 |
py
| 17 |
model_runner_4.py
| 9 | 0.612836 | 0.580956 | 0 | 69 | 33.565217 | 142 |
mabasith/Project1
| 14,671,608,321,135 |
005d698ec8bdd12938e0b860c1de488297e8415c
|
ad4d8db8ca96ccf9d99412613c773744b2480d06
|
/python_dev/Udemy/Udemy/gui_program.py
|
82b6f1e21b3815edfdb3c49465d471de3ea7e7bb
|
[] |
no_license
|
https://github.com/mabasith/Project1
|
5bcb436e0cc87686c73c0ddad65375399a7668d3
|
87f1dd72459b362b756c2b837bedccbbaf6920b2
|
refs/heads/master
| 2022-11-28T20:45:33.095608 | 2020-08-08T16:44:39 | 2020-08-08T16:44:39 | 286,009,587 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from tkinter import *
canvas_height = 50
canvas_width = 100
class App:
def __init__(self, master, width = canvas_width, height=canvas_height):
frame = Frame( master, width = canvas_width, height = canvas_height)
frame.pack()
self.button = Button(frame, text = 'Close', fg='red', command = frame.quit)
self.button.pack(side = LEFT)
self.slogan = Button(frame, text ="Click Me!", fg = "blue", command=self.write_slogan)
self.slogan.pack(side=LEFT)
self.button.config(height= 2, width= 20)
self.slogan.config(height=2, width=20)
def write_slogan(self):
print("Welcome to Python Advanced Programming")
root = Tk()
root.title("Hello World")
app = App(root)
root.mainloop()
#This code is for the timer
import tkinter as tk
counter = 0
def counter_label(label):
counter = 0
def count():
global counter
counter +=1
label.config(text = str(counter))
label.after(1000,count)
count()
root = tk.Tk()
root.title("ounting Second")
label = tk.Label(root, fg="dark green")
label.pack()
counter_label(label)
button = tk.Button(root, text = 'Stop', width=55, command = root.destroy)
button.pack()
root.mainloop()
|
UTF-8
|
Python
| false | false | 1,223 |
py
| 115 |
gui_program.py
| 111 | 0.645953 | 0.629599 | 0 | 43 | 27.465116 | 94 |
Price47/advent-of-code
| 5,446,018,571,810 |
da12fb348039ed9d62387bb38be8b8b7255394a7
|
da3d79607c7787e1d8036c291615c7996d262c1c
|
/AOC_20/day_13/bus_scheduler.py
|
0a87dc175c5295b4522ce89fd65b585f065b517d
|
[] |
no_license
|
https://github.com/Price47/advent-of-code
|
2b3512e2134cb958af7e0425643ad39c835192f4
|
e691b9276fd0f82e9760707da17a522ae512b9b0
|
refs/heads/master
| 2022-12-07T23:10:32.846923 | 2022-12-05T06:11:46 | 2022-12-05T06:11:46 | 160,543,240 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
from core import DefaultLogger, AOCBase
from collections import defaultdict
import re
log = DefaultLogger.get_log()
class BusScheduler(AOCBase):
"""
https://adventofcode.com/2020/day/13
"""
def __init__(self):
data = self.read_input()
self.depart_time = int(data[0])
self.buses = [b for b in data[1].split(',')]
self.hours = [int(f'{h}{i if i>9 else self._pad(i)}') for h in range(24) for i in range(60)]
def _pad(self, i):
return f'0{i}'
def bus_schedules(self):
buses = [int(b) for b in self.buses if b != 'x']
schedules = {b: [i for i in range(0, self.depart_time * 2, b)] for b in buses}
return schedules
def closest_time(self, schedule):
for s in schedule:
if s >= self.depart_time:
return s
def find_departure_time(self):
schedules = self.bus_schedules()
closest_bus_departures = []
for bus, schedule in schedules.items():
closest_bus_departures.append((bus, self.closest_time(schedule)))
return closest_bus_departures
def best_departure_time(self):
closest_departure_time = self.find_departure_time()
b = {d: b for b, d in closest_departure_time}
wait_time = min([d[1] - self.depart_time for d in closest_departure_time])
bus_id = b[wait_time + self.depart_time]
log.info(f"Best departure time is bus {bus_id} [ {wait_time * bus_id} ]")
def _check_subsequent(self, t, buses):
offset, bus = buses[0]
if (t + offset) % bus == 0:
if len(buses) > 1:
return self._check_subsequent(t, buses[1:])
else:
return True
else:
return False
def subsequent_departures(self):
d = [(i, v) for i, v in enumerate(self.read_input()[1].split(','))]
buses = [(i, int(v)) for i, v in filter(lambda x: x[1] != 'x', d)]
timestamp_subsequent = False
t = 0
while not timestamp_subsequent:
log.debug(f'timestamp {t}')
timestamp_subsequent = self._check_subsequent(t, buses)
if timestamp_subsequent:
return t
t += 1
def ExtendedEuclid(self, x, y):
x0, x1, y0, y1 = 1, 0, 0, 1
while y > 0:
q, x, y = math.floor(x / y), y, x % y
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return x0, y0 # gcd and the two coefficients
def invmod(self, a, m):
x, _ = self.ExtendedEuclid(a, m)
return x % m
def ChineseRemainderGauss(self, n, N, a):
result = 0
for i in range(len(n)):
ai = a[i]
ni = n[i]
bi = N // ni
result += ai * bi * self.invmod(bi, ni)
return result % N
def subsequent_departures_crt(self):
"""
https://en.wikipedia.org/wiki/Chinese_remainder_theorem#Search_by_sieving
https://medium.com/@astartekraus/the-chinese-remainder-theorem-ea110f48248c
:return:
"""
# order by modulo descending
departures = [(i, int(v)) for i, v in enumerate(self.read_input()[1].split(',')) if v != 'x']
sorted_departures = sorted(departures, key=lambda x: x[0], reverse=True)
print(sorted_departures)
t, increment = 0, sorted_departures[0][1]
for offset, time in sorted_departures[1:]:
log.debug(f'timestamp {t}')
while (t + offset) % time != 0:
t += increment
increment *= time
return t
def main(self):
# self.best_departure_time()
log.info(self.subsequent_departures_crt())
|
UTF-8
|
Python
| false | false | 3,734 |
py
| 91 |
bus_scheduler.py
| 91 | 0.543921 | 0.527317 | 0 | 119 | 30.369748 | 102 |
JulianWack/MPhysProject
| 3,951,369,956,430 |
1c9f617ae723db95658204129e1b924d2abe818c
|
deb16f5ca3f4c5f686e5638fdeda8668b49995d1
|
/SU2xSU2_preproduction/tests.py
|
eed016ea031ac76d3940dee8df4afe87ce7cc99c
|
[] |
no_license
|
https://github.com/JulianWack/MPhysProject
|
bb851296b338ca127d72c87b139c1b2500e3fcd1
|
d53328bc17d2f56e23326540a6157d7f0ff035f7
|
refs/heads/master
| 2023-08-18T07:23:58.013885 | 2023-08-13T10:11:04 | 2023-08-13T10:11:04 | 548,388,590 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from matplotlib.ticker import MaxNLocator
import matplotlib.pyplot as plt
import matplotlib as mpl
from cycler import cycler
from astropy.stats import jackknife_stats
from scipy.optimize import curve_fit
from alive_progress import alive_bar
import timeit
import time
from datetime import timedelta
plt.style.use('science')
plt.rcParams.update({'font.size': 20})
# plt.rcParams.update({'text.usetex': False}) # faster rendering
mpl.rcParams['axes.prop_cycle'] = cycler(color=['k', 'g', 'b', 'r'])
import SU2_mat_routines as SU2
from SU2xSU2 import SU2xSU2
from calibrate_paras import calibrate
import correlations
##### Check if my matrix routine gives same result as np #####
def test_SU2_routines():
# Define a N by N lattice and and describe matrices at every site by some arbitrary parameters
N = 10
# np.random.seed(42)
aa = 10*np.random.random((N,N,4))
bb = -66.8*np.random.random((N,N,4))
# compute quantities to compare using my routines
my_A = aa
my_B = bb
my_C = SU2.dot(my_A, my_B)
# compute quantities to compare using np routines
def make_np_mat(a):
return np.matrix( [[a[0]+1j*a[3], a[2]+1j*a[1]], [-a[2]+1j*a[1], a[0]-1j*a[3]]] )
np_A = np.empty((N,N), dtype=object)
np_B = np.empty((N,N), dtype=object)
np_C = np.empty((N,N), dtype=object)
for i in range(N):
for j in range(N):
np_A[i,j] = make_np_mat(aa[i,j,:])
np_B[i,j] = make_np_mat(bb[i,j,:])
np_C[i,j] = np.matmul(np_A[i,j], np_B[i,j])
# compare results: need to convert np.matrix object into ndarray (via .A) for allclose comparison
# sum
su2_element, k = SU2.sum(my_A, my_B)
my_sum = SU2.make_mats(k*su2_element)
all_equal = True
for i in range(N):
for j in range(N):
same = np.allclose(my_sum[i,j].A, (np_A[i,j]+np_B[i,j]).A)
if not same:
all_equal = False
print('Unequal sum at site: (%d, %d)'%(i,j))
if all_equal:
print('All sums equal')
# product
my_prod = SU2.make_mats(my_C)
all_equal = True
for i in range(N):
for j in range(N):
same = np.allclose(my_prod[i,j], np_C[i,j])
if not same:
all_equal = False
print('Unequal product at site: (%d, %d)'%(i,j))
if all_equal:
print('All products equal')
# hermitian conjugate
my_hc = SU2.make_mats(SU2.hc(my_A))
all_equal = True
for i in range(N):
for j in range(N):
same = np.allclose(my_hc[i,j].A, (np_A[i,j].H).A)
if not same:
all_equal = False
print('Unequal hc at site: (%d, %d)'%(i,j))
if all_equal:
print('All hc equal')
# det
my_det = SU2.det(my_A)
all_equal = True
for i in range(N):
for j in range(N):
same = np.allclose(my_det[i,j], np.linalg.det(np_A[i,j].A).real)
if not same:
all_equal = False
print('Unequal det at site: (%d, %d)'%(i,j))
if all_equal:
print('All det equal')
# trace
my_tr = SU2.tr(my_A)
all_equal = True
for i in range(N):
for j in range(N):
same = np.allclose(my_tr[i,j], np.trace(np_A[i,j].A).real)
if not same:
all_equal = False
print('Unequal tr at site: (%d, %d)'%(i,j))
if all_equal:
print('All tr equal')
# test_SU2_routines()
########
##### Compare speed of single matrix multiplication #####
def test_SU2_prod_speed():
# tabbing important to assure that string has no tabs. Otherwise timeit throws an error
# when using timeit outside a function, can tab all lines to the same height
set_up = '''
import numpy as np
import SU2_mat_routines as SU2
'''
my_test_code = '''
aa = np.random.random((1,1,4))
bb = np.random.random((1,1,4))
my_A = aa
my_B = bb
SU2.dot(my_A, my_B)
'''
np_test_code = '''
aa = np.random.random(4)
bb = np.random.random(4)
def make_np_mat(a):
return np.matrix( [[a[0]+1j*a[3], a[2]+1j*a[1]], [-a[2]+1j*a[1], a[0]-1j*a[3]]] )
np_A = make_np_mat(aa)
np_B = make_np_mat(bb)
np.matmul(np_A, np_B)
'''
n_iter = 10000
# print total time needed to perform n_iter executions of the test code
print('My product: ', timeit.timeit(setup=set_up, stmt=my_test_code, number=n_iter))
print('np product: ', timeit.timeit(setup=set_up, stmt=np_test_code, number=n_iter))
# test_SU2_prod_speed()
########
##### Check that my nearest neighbor construction and indexing works #####
def test_NN():
N = 4 # number of lattice sites
M = 2 # For readability, suppose matrix at each lattice site is described by M=2 parameters
phi = np.empty((N,N,M))
# asign arbitary parameter values
phi[:,:,0] = np.arange(N**2).reshape((N,N))
phi[:,:,1] = -np.arange(N**2).reshape((N,N))
# make a (N,N,2) array storing the row and col indices of each lattice sites
grid = np.indices((N,N)) # shape (2,N,N)
lattice_coords = grid.transpose(1,2,0) # turns axis i of grid into axis j of idxs when axis i listed at position j; shape (N,N,2)
# shift lattice coordinates by 1 such that the coordinates at (i,j) are those of the right, left, top, and bottom neighbor of lattice site (i,j); shape (N,N,2)
# rolling axis=1 by -1 means all columns are moved one step to the left with periodic bcs. Hence value of resulting array at (i,j) is (i,j+1), i.e the coordinates of the right neighbor.
right_n = np.roll(lattice_coords, -1, axis=1)
left_n = np.roll(lattice_coords, 1, axis=1)
top_n = np.roll(lattice_coords, 1, axis=0)
bottom_n = np.roll(lattice_coords, -1, axis=0)
# for each lattice site, for each neighbor, store row and col coord
NN = np.empty((N,N,4,2), dtype=int)
NN[:,:,0,:] = right_n # row and col indices of right neighbors
NN[:,:,1,:] = left_n
NN[:,:,2,:] = top_n
NN[:,:,3,:] = bottom_n
# make mask to apply to phi and get NN parameters
# separate the row and column neighbor coordinates for each lattice site to use in indexing of phi
# (N,N,4,1): all x-sites, all y-sites, all neighbors, only row coords or only col coords
NN_rows = NN[:,:,:,0]
NN_cols = NN[:,:,:,1]
NN_mask = (NN_rows, NN_cols)
# single test:
# find matrix parameters of the neighbors of site (0,0)
one_neighbor_paras = phi[NN_mask][0,0]
print(one_neighbor_paras)
# full test:
# find matrix parameters of the neighbors of site every lattice site (gives 4 sets of parameters for every site)
all_neighbor_paras = phi[NN_mask] # (N,N,4,# paras)
print(all_neighbor_paras)
# example of how to perform matrix operations between NN at each lattice site simultaneously
print(np.sum(all_neighbor_paras, axis=3))
# test_NN()
##### Check that leapfrog is reversible #####
def test_leapfrog():
N = 16
ell = 17 # increase ell at fixed eps to increase error
eps = 0.7 # 1/ell
model = SU2xSU2(N, a=1, ell=ell, eps=eps, beta=1)
np.random.seed(6)
# cold
# a0 = np.ones((N,N,1))
# ai = np.zeros((N,N,3))
# phi_start = np.concatenate([a0,ai], axis=2)
# hot
a = np.random.standard_normal((N,N,4))
phi_start = SU2.renorm(a)
# normal
# pi_start = np.random.standard_normal((N,N,3))
# phi_end, pi_end = model.leapfrog(phi_start, pi_start)
# phi_start_new, pi_start_new = model.leapfrog(phi_end, -pi_end)
# FA
model.A = model.kernel_inv_F()
pi_start = model.pi_samples()
phi_end, pi_end = model.leapfrog_FA(phi_start, pi_start)
phi_start_new, pi_start_new = model.leapfrog_FA(phi_end, -pi_end)
phi_delta = np.abs(phi_start_new-phi_start)
pi_delta = np.abs(pi_start_new+pi_start)
print('phi error:')
print('Total: ', np.sum(phi_delta))
print('Per site avg: ', 1/N**2 * np.sum(phi_delta))
print('Biggest: ', np.max(phi_delta))
print('\npi error:')
print('Total: ', np.sum(pi_delta))
print('Per site avg: ', 1/N**2 * np.sum(pi_delta))
print('Biggest: ', np.max(pi_delta))
# test_leapfrog()
##### Check equipartion #####
def test_equi():
'''SU(2) matrix has 3 DoF. Hence expect by equipartition (with k_b T = 1) that the average KE per site is 3*1/2.'''
def KE_per_site(pi, N):
K = 1/2 * np.sum(pi**2)
return K/N**2
N, ell, eps = 16, 10, 0.1
M = 2000
model = SU2xSU2(N, a=1, ell=ell, eps=eps, beta=1)
configs = np.empty((M+1, N, N, 4))
momenta = np.empty((M, N, N, 3))
kins = np.empty(M)
# cold start
a0 = np.ones((N,N,1))
ai = np.zeros((N,N,3))
configs[0] = np.concatenate([a0,ai], axis=2)
for i in range(1,M+1):
phi = configs[i-1]
pi = np.random.standard_normal((N,N,3))
phi_new, pi_new = model.leapfrog(phi, pi)
delta_H = model.Ham(phi_new,-pi_new) - model.Ham(phi,pi)
acc_prob = np.min([1, np.exp(-delta_H)])
if acc_prob > np.random.random():
configs[i] = phi_new
momenta[i-1] = pi_new
else:
configs[i] = phi
momenta[i-1] = pi
kins[i] = KE_per_site(momenta[i-1], N)
# reject 10% burn in
burn_in_idx = int(M*0.1)
KE_avg = np.mean(kins[burn_in_idx:])
KE_err = np.std(kins[burn_in_idx:]) / np.sqrt(kins[burn_in_idx:].shape)
print('avg KE per site = %.5f +/- %.5f'%(KE_avg, KE_err))
# test_equi()
##### Check equipartion for acceleration#####
def test_equi_FA():
'''SU(2) matrix has 3 DoF. Hence expect by equipartition (with k_b T = 1) that the average KE per site is 3*1/2.'''
def KE_per_site(pi, N, A):
pi_F_mag = np.sum( np.abs(np.fft.fft2(pi, axes=(0,1)))**2, axis=-1 ) # (N,N) find magnitude of FT of each component of momentum in Fourier space. Then sum over all 3 components
T = 1/(2*N**2) * np.sum(pi_F_mag*A) # sum over momentum Fourier lattice
return T/N**2
N, ell, eps = 32, 4, 1/4 # found manually/ by using calibration routine
M = 1000
model = SU2xSU2(N, a=0.3, ell=ell, eps=eps, beta=0.75)
configs = np.empty((M+1, N, N, 4))
momenta = np.empty((M, N, N, 3))
kins = np.empty(M)
A = model.kernel_inv_F()
model.A = A
# cold start: seems to have problems evolving away from starting configuration (get 0% acceptance). Hence use hot start.
# a0 = np.ones((N,N,1))
# ai = np.zeros((N,N,3))
# configs[0] = np.concatenate([a0,ai], axis=2)
a = np.random.standard_normal((N,N,4))
configs[0] = SU2.renorm(a)
n_acc = 0
with alive_bar(M) as bar:
for i in range(1,M+1):
phi = configs[i-1]
pi = model.pi_samples()
phi_new, pi_new = model.leapfrog_FA(phi, pi)
delta_H = model.Ham_FA(phi_new,-pi_new) - model.Ham_FA(phi,pi)
acc_prob = np.min([1, np.exp(-delta_H)])
if acc_prob > np.random.random():
n_acc += 1
configs[i] = phi_new
momenta[i-1] = pi_new
else:
configs[i] = phi
momenta[i-1] = pi
kins[i-1] = KE_per_site(momenta[i-1], N, A)
bar()
print('acc rate = %.2f%%'%(n_acc/M*100))
# reject 10% burn in
burn_in_idx = int(M*0.1)
KE_avg = np.mean(kins[burn_in_idx:])
KE_err = np.std(kins[burn_in_idx:]) / np.sqrt(kins[burn_in_idx:].shape)
print('avg KE per site = %.5f +/- %.5f'%(KE_avg, KE_err))
# test_equi_FA()
##### Check disordered phase #####
def test_avg_components():
'''In O(4) interpretation, each site hosts a 4D vector. The system is in the disordered phase for beta!=infty
such that the components of the vector when averaged over configurations and sites vanish'''
model = SU2xSU2(N=16, a=1, ell=10, eps=0.1, beta=1)
model.run_HMC(5000, 20, 0.1, store_data=False)
m_avg , m_err = model.order_parameter(make_plot=False)
for i,(avg, err) in enumerate(zip(m_avg, m_err)):
print('<m_%d> : %.5f +/- %.5f'%(i, avg, err))
# test_avg_components()
##### plot residual between simulation and coupling expansions #####
def residual_coupling():
res = np.loadtxt('data/coupling_expansion')
betas = res[:,0]
e_avg = res[:,4]
e_err = res[:,5]
fig = plt.figure(figsize=(8,6))
mask_s = betas<1
b_s = betas[mask_s]
strong = 1/2*b_s +1/6*b_s**3 +1/6*b_s**5
mask_w = betas>0.6
b_w = betas[mask_w]
Q1 = 0.0958876
Q2 = -0.0670
weak = 1 - 3/(8*b_w) * (1 + 1/(16*b_w) + (1/64 + 3/16*Q1 + 1/8*Q2)/b_w**2)
plt.errorbar(b_s, e_avg[mask_s]-strong, yerr=e_err[mask_s], color='g', fmt='.', capsize=2, label='HMC - s.c.')
plt.errorbar(b_w, e_avg[mask_w]-weak, yerr=e_err[mask_w], color='b', fmt='.', capsize=2, label='HMC - w.c.')
plt.legend(prop={'size': 12})
plt.xlabel(r'$\beta$')
plt.ylabel(r'residual')
plt.show()
return
# residual_coupling()
##### naive and FFT based computation of wall wall correlations #####
def ww_naive(model):
'''
Naive approach to compute the correlation function.
'''
def ww_correlation(i, j, m, model):
'''correlates ith and jth column of lattice, defined as the average point to point correlation for all points contained in the walls'''
pp_corrs = np.empty(model.N**2)
for p in range(model.N):
for q in range(model.N):
# correlate matrices A and B at points (p,i) and (q,j). Reshape to use routines
A = model.configs[m,p,i].reshape((1,1,4))
B = model.configs[m,q,j].reshape((1,1,4))
k = p*model.N + q
prod = SU2.dot(A, SU2.hc(B))
pp_corrs[k] = SU2.tr(prod + SU2.hc(prod))
return np.mean(pp_corrs)
L = model.N # largest considered separation
ww_cor = np.empty(L+1) # wall wall correlation for different separations
ww_cor_err = np.empty(L+1)
ds = np.arange(L+1)
t1 = time.time()
for d in ds:
# smaller errors when using each wall wall pair as data point to estimate mean and error
# all_ww_pairs = np.empty((model.M, model.N)) # each row contains all wall wall correlations at fixed d for a different configuration
# for m in range(model.M):
# for i in range(model.N):
# all_ww_pairs[m,i] = ww_correlation(i, (i+d)%model.N, m, model)
# ww_cor[d], _, ww_cor_err[d], _ = jackknife_stats(all_ww_pairs.flatten(), np.mean)
avg_ww_configs = np.empty(model.M) # average wall wall correlation of each configuration
for m in range(model.M):
avg_ww_pairs = np.empty(model.N) # average wall wall correlation from all pairs in a single configuration
for i in range(model.N):
avg_ww_pairs[i] = ww_correlation(i, (i+d)%model.N, m, model)
avg_ww_configs[m] = np.mean(avg_ww_pairs)
ww_cor[d], _, ww_cor_err[d], _ = jackknife_stats(avg_ww_configs, np.mean)
print('d=%d done'%d)
# normalize
ww_cor, ww_cor_err = ww_cor/ww_cor[0], ww_cor_err/ww_cor[0]
t2 = time.time()
return ww_cor, ww_cor_err, t2-t1
def ww_fast(model):
'''
Computes the wall to wall correlation as described in the report via the cross correlation theorem.
'''
# for nicer plotting and fitting include value for separation d=self.N manually as being equivalent to d=0 (using periodic boundary conditions)
ds = np.arange(model.N+1)
ww_cor, ww_cor_err = np.zeros(model.N+1),np.zeros(model.N+1)
ww_cor_err2 = np.zeros(model.N+1)
t1 = time.time()
Phi = np.sum(model.configs, axis=1) # (M,N,4)
for k in range(4):
# passes (N,M) arrays: correlations along axis 0 while axis 1 hosts results from repeating the measurement for different configurations
cf, cf_err = correlations.correlator_repeats(Phi[:,:,k].T, Phi[:,:,k].T)
ww_cor[:-1] += cf
ww_cor_err2[:-1] += cf_err**2 # errors coming from each component of the parameter vector add in quadrature
ww_cor *= 4/model.N**2
ww_cor_err = 4/model.N**2 * np.sqrt(ww_cor_err2)
ww_cor[-1], ww_cor_err[-1] = ww_cor[0], ww_cor_err[0]
# normalize
ww_cor, ww_cor_err = ww_cor/ww_cor[0], ww_cor_err/ww_cor[0]
t2 = time.time()
return ww_cor, ww_cor_err, t2-t1
def compare_ww():
'''Compares correlation function and its error from the naive and cross correlation theorem based approach.
'''
paras = np.loadtxt('data/single_run/model_paras.txt')
sim_paras = np.loadtxt('data/single_run/sim_paras.txt')
print('Loading simulation:\nN, a, ell, eps, beta\n',paras,'\nM, thin freq, burn in, accept rate\n',sim_paras)
model = SU2xSU2(*paras)
model.load_data()
ds = np.arange(model.N+1)
cor_func_naive, cor_func_err_naive, _ = ww_naive(model)
cor_func_fast, cor_func_err_fast, _ = ww_fast(model)
# difference in function values
fig = plt.figure(figsize=(8,6))
plt.plot(ds, 1 - cor_func_fast/cor_func_naive)
plt.xlabel(r'lattice separation [$a$]')
plt.ylabel(r'$1-C_{cross} / C_{sum}$')
fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True)) # set major ticks at integer positions only
plt.show()
# difference in errors
fig = plt.figure(figsize=(8,6))
plt.plot(ds, cor_func_err_naive, c='k', label='double sum')
plt.plot(ds, cor_func_err_fast, c='g', label='cross correlation')
plt.xlabel(r'lattice separation [$a$]')
plt.ylabel('wall-wall correlation error')
plt.legend(prop={'size':12})
fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True)) # set major ticks at integer positions only
plt.show()
# compare_ww()
##### naive and FFT based computation of susceptibility #####
def susceptibility_naive(phi):
'''
Computes the susceptibility for lattice configuration phi.
phi: (N,N,4) array
parameter values of SU(2) matrices at each lattice site
Returns
chi: float
the susceptibility
'''
N = phi.shape[0]
# find product of phi with phi at every other lattice position y
# phi_y is obtained by shifting the lattice by one position each loop
G = np.zeros((N,N))
for i in range(N):
for j in range(N):
phi_y = np.roll(phi, shift=(i,j), axis=(0,1))
A = SU2.dot(phi, SU2.hc(phi_y))
G += SU2.tr(A + SU2.hc(A))
chi = np.sum(G) / (2*N**2)
return chi
def susceptibility_fast(phi):
'''
Computes the susceptibility i.e. the average point to point correlation for configuration phi.
As described in the report, this closely related to summing the wall to wall correlation function which can be computed efficiently via the cross correlation theorem.
Returns:
chi: float
susceptibility of the passed configuration phi
'''
N = phi.shape[0]
ww_cor = np.zeros(N)
Phi = np.sum(phi, axis=0) # (N,4)
for k in range(4):
cf, _ = correlations.correlator(Phi[:,k], Phi[:,k])
ww_cor += cf
ww_cor *= 2/N**2
chi = np.sum(ww_cor)
return chi
def compute_chi():
'''Compares naive double sum and cross correlation theorem approach to computing the susceptibility.
'''
N = 64
# test extreme case in which chi=2*N^2
# config = np.zeros((model.N,model.N,4))
# config[:,:,0] = 1
# random lattice configuration
a = np.random.standard_normal((N,N,4))
config = SU2.renorm(a)
# choose a configuration manually from the chain
# model = SU2xSU2(N=N, a=1, ell=10, eps=1/10, beta=1)
# model.run_HMC(20, 1, 0, accel=False, store_data=False)
# config = model.configs[-10]
t1 = time.time()
chi_cross_cor = susceptibility_fast(config)
t2 = time.time()
print('cross_cor result: ',chi_cross_cor)
print('cross_cor time: %s'%(str(timedelta(seconds=t2-t1))))
t1 = time.time()
chi_naive = susceptibility_naive(config)
t2 = time.time()
print('naive result: ',chi_naive)
print('naive time: %s'%(str(timedelta(seconds=t2-t1))))
# compute_chi()
def chi_speed_compare():
'''
Makes plot to compare speed of chi computation using naive double sum or the cross correlation theorem.
'''
Ns = np.linspace(10, 512, num=15, endpoint=True, dtype=int) # naive method can take upto 1e4 sec for single calculation at N approx 400
ts_crosscor = np.empty_like(Ns, dtype=float)
ts_naive = np.empty_like(Ns, dtype=float)
for i,N in enumerate(Ns):
a = np.random.standard_normal((N,N,4))
phi = SU2.renorm(a)
t1 = time.time_ns() # nessessary to capture run time at small N
chi_cross_cor = susceptibility_fast(phi)
t2 = time.time_ns()
ts_crosscor[i] = t2-t1
t1 = time.time()
chi_cross_cor = susceptibility_naive(phi)
t2 = time.time()
ts_naive[i] = t2-t1
print('Completed N = ',N)
fig = plt.figure(figsize=(8,6))
plt.plot(Ns, ts_naive, c='k', label='double sum')
plt.plot(Ns, ts_crosscor, c='g', label='cross correlation')
plt.xlabel('lattice size N')
plt.ylabel('CPU time [sec]')
plt.yscale('log')
plt.legend(prop={'size': 12})
plt.show()
# fig.savefig('plots/chi_speed.pdf')
data = np.row_stack((Ns, ts_crosscor, ts_naive))
np.savetxt('data/chi_speed.txt', data, header='lattice size N, CPU time via cross correlation thm, CPU time via double sum')
# chi_speed_compare()
##### effective mass #####
def cosh_corfunc(cor, cor_err):
'''effective mass and its error based on a cosh correlation function.
A lattice of even size is assumed.
cor: (N/2)
value of wall to wall correlation function on the first half of the lattice
cor_err: (N/2)
error of correlation function on the first half of the lattice
Returns
m_eff: (N/2,)
effective mass
m_eff_err: (N/2)
error of the effective mass
'''
rel_err = cor_err / cor # relative error
cor_1, cor_err_1 = np.roll(cor, -1), np.roll(cor_err, -1) # shift to d+1
rel_err_1 = cor_err_1 / cor_1
cor__1, cor_err__1 = np.roll(cor, 1), np.roll(cor_err, 1) # shift to d-1
rel_err__1 = cor_err__1 / cor__1
A, B = cor_1/cor, cor__1/cor
x = (A+B)/2
m_eff = np.arccosh(x)
delta_x = 1/2 * (A*(rel_err_1 - rel_err) + B*(rel_err__1 - rel_err))
# delta_x = A/2*(np.sqrt(rel_err_1**2 + rel_err**2)) + B/2*(np.sqrt(rel_err__1**2 + rel_err**2))
m_eff_err = 1/np.sqrt(x**2-1) * delta_x
return m_eff, m_eff_err
def exp_corfunc(cor, cor_err):
'''effective mass and its error based on a cosh correlation function.
A lattice of even size is assumed.
cor: (N/2,)
value of wall to wall correlation function on the first half of the lattice
cor_err: (N/2,)
error of correlation function on the first half of the lattice
Returns
m_eff: (N/2,)
effective mass
m_eff_err: (N/2,)
error of the effective mass
'''
cor_1 = np.roll(cor, -1) # shift to d+1
m_eff = - np.log(cor_1 / cor)
m_eff_err = np.roll(cor_err, -1)/cor_1 - cor_err/cor
# m_eff_err = np.sqrt( (np.roll(ww_cor_err_mirrored, -1)/cor_1)**2 - (ww_cor_err_mirrored/cor)**2 )
return m_eff, m_eff_err
def effective_mass(N, beta):
'''
Effective mass plot for passed value pair of lattice size N (assumed even) and beta. This tends to highlight the worst in the data.
Loads in correlation function results for one value pair of N and beta and produces an effective mass plot.
The effective mass will be computed based on the assumption that the correlation function follows the shape of
a cosh (analytically expected due to periodic boundary conditions) or of a pure exponential decay with the later tending
to produce a better behaved plot.
'''
beta_str = str(np.round(beta, 4)).replace('.', '_')
ds, ww_cor, ww_cor_err = np.loadtxt('data/corfunc_beta/beta_%s.txt'%beta_str)
N_2 = int(N/2)
ds_2 = ds[:N_2+1]
# exploit symmetry about N/2 to reduce errors (effectively increasing number of data points by factor of 2)
cor = 1/2 * (ww_cor[:N_2+1] + ww_cor[N_2:][::-1])
cor_err = np.sqrt(ww_cor_err[:N_2+1]**2 + ww_cor_err[N_2::-1]**2)
m_eff_cosh, m_eff_err_cosh = cosh_corfunc(cor, cor_err)
m_eff_exp, m_eff_err_exp = exp_corfunc(cor, cor_err)
fig = plt.figure(figsize=(8,6))
cut = -1 # adjust manually
plt.errorbar(ds_2[:cut], m_eff_cosh[:cut], yerr=m_eff_err_cosh[:cut], fmt='.', capsize=2, label='$\cosh$', c='red')
plt.errorbar(ds_2[:cut], m_eff_exp[:cut], yerr=m_eff_err_exp[:cut], fmt='.', capsize=2, label='$\exp$', c='b')
plt.xlabel(r'wall separation [$a$]')
plt.ylabel('effective mass')
fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True)) # set major ticks at integer positions only
plt.legend(prop={'size': 12})
plt.show()
# fig.savefig('plots/corfunc_beta/effective_mass/%s.pdf'%beta_str)
return
# Ns = [40, 40, 64, 64, 64, 96, 96, 160, 160, 224, 300, 400, 400]
# betas = np.array([0.6, 0.6667, 0.7333, 0.8, 0.8667, 0.9333, 1.0, 1.0667, 1.1333, 1.2, 1.2667, 1.3, 1.3333])
# for (N,beta) in zip(Ns, betas):
# effective_mass(N, beta)
effective_mass(64, 0.7333)
|
UTF-8
|
Python
| false | false | 25,380 |
py
| 58 |
tests.py
| 23 | 0.596414 | 0.571474 | 0 | 727 | 33.911967 | 189 |
FabadaLitoral/PMDMDjango
| 15,298,673,508,596 |
2708c3b14ee8f5a634e4091040049441bdbf327f
|
b71c3e15f81e77d9eed6de2002352ab8774100cf
|
/polls/urls.py
|
238a683c5018be05c8f1ea07f68e04328eef5996
|
[] |
no_license
|
https://github.com/FabadaLitoral/PMDMDjango
|
cc063548b2b5a69507ee08322ab442426a7b24b9
|
3d90221f05b4f08e39b300716b49208a84f67a9c
|
refs/heads/master
| 2023-03-03T00:37:50.175981 | 2021-02-17T14:55:27 | 2021-02-17T14:55:27 | 331,359,994 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'question', views.QuestionViewSet)
router.register(r'choice', views.ChoiceViewSet)
urlpatterns = [
path('', include(router.urls)),
path('', views.index, name='index')
]
|
UTF-8
|
Python
| false | false | 317 |
py
| 2 |
urls.py
| 2 | 0.753943 | 0.753943 | 0 | 13 | 23.384615 | 51 |
Summerotter/heist
| 5,927,054,875,916 |
bbfffec73133b868ebbb9ad7fcbd1596e33ec1b7
|
8a20ef42e37a5524fff484d6b0dc3c3bcf00dbaa
|
/game_data/__init__.py
|
337e298d95377053367dba70e89868d20c342b03
|
[] |
no_license
|
https://github.com/Summerotter/heist
|
ae128d20a73f5e2a3676a0ed99f94cfc16b97c50
|
bd141a5e582bbb922703f86cb4cdec5076b25943
|
refs/heads/master
| 2021-01-18T21:32:09.606718 | 2016-09-27T02:28:48 | 2016-09-27T02:28:48 | 51,196,693 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#I don't know my BBF Jill?
__all__ = ["events"," heist_storage","items","option_storage","text_blurbs"]
|
UTF-8
|
Python
| false | false | 103 |
py
| 24 |
__init__.py
| 20 | 0.650485 | 0.650485 | 0 | 2 | 51 | 76 |
Keshav885/python-1
| 16,054,587,785,393 |
477c264e4101fee027fb612241a479f03197c893
|
f2e9986c99334c84da183d51d4fcdbc725ed261e
|
/Sample_code/ranks.py
|
6113e71a86a1d0a79e7a91fbcea9895c96a96c19
|
[] |
no_license
|
https://github.com/Keshav885/python-1
|
9488403fb9e8e3c083eaa3e8f1ad2436c7af79b3
|
194c8911108dfd2c0bdf0b9a14a48d6e1aead8d7
|
refs/heads/master
| 2021-01-22T06:12:40.108616 | 2017-11-01T18:16:25 | 2017-11-01T18:16:25 | 92,531,084 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print('Enter your name')
name =str(input())
print('Enter your rollno. ')
rollno=int(input())
print('Enter your marks in subjects')
sub1= int(input())
sub2= int(input())
sub3= int(input())
total = sub1+sub2+sub3
average = float((total/3))
if average > 80:
print(name)
print(rollno)
print(average)
else:
print(average)
|
UTF-8
|
Python
| false | false | 334 |
py
| 34 |
ranks.py
| 24 | 0.667665 | 0.640719 | 0 | 17 | 18.647059 | 37 |
the-fusy/VinnieJonesBot
| 11,982,958,770,612 |
c6ab1f3a64358ef793dea5c7637a3bcc8dcc349d
|
340f98d0774f43203552bd53c39892f4a54325f2
|
/vinniejonesbot/fns/migrations/0001_initial.py
|
24da1c8f15f25ac4f1f44a18315279d67dac5723
|
[] |
no_license
|
https://github.com/the-fusy/VinnieJonesBot
|
d10ea9fbdf10582394475c8209075f9730ab41c3
|
c031971b32a4ae6260dab3f6a3d439f5ad0f2f55
|
refs/heads/master
| 2020-05-27T16:24:52.110686 | 2019-07-14T04:56:43 | 2019-07-14T04:56:43 | 188,700,082 | 0 | 0 | null | false | 2020-06-05T21:38:48 | 2019-05-26T15:26:50 | 2019-07-14T04:57:11 | 2020-06-05T21:38:47 | 42 | 0 | 0 | 1 |
Python
| false | false |
# Generated by Django 2.2.3 on 2019-07-06 14:19
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FnsUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region='RU')),
('password', models.CharField(blank=True, max_length=16, null=True)),
],
),
]
|
UTF-8
|
Python
| false | false | 677 |
py
| 21 |
0001_initial.py
| 17 | 0.611521 | 0.581979 | 0 | 23 | 28.434783 | 126 |
Provab-Solutions/erp5
| 16,501,264,364,468 |
3a955f5fb5d592852dcebb22150ab994bc223ea6
|
d283c7cd57705b66e9cba658156dad4b7a0b0ba8
|
/erp5/util/testnode/SlapOSControler.py
|
ade1ce2e9b65a241cef7c107748b0cb6ec7583ae
|
[] |
no_license
|
https://github.com/Provab-Solutions/erp5
|
818cff4f102627541fca1248876745c1a42f14bf
|
72ec5cfd78e99c336b561c2ecf73a4abd4adb98a
|
refs/heads/master
| 2020-02-26T10:41:37.412947 | 2012-05-22T04:00:13 | 2012-05-22T04:01:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##############################################################################
#
# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import slapos.slap
import subprocess
import time
import xml_marshaller
import shutil
MAX_PARTIONS = 10
MAX_SR_RETRIES = 3
class SlapOSControler(object):
def __init__(self, config, log,
slapproxy_log=None, process_manager=None, reset_software=False):
log('SlapOSControler, initialize, reset_software: %r' % reset_software)
self.log = log
self.config = config
self.process_manager = process_manager
# By erasing everything, we make sure that we are able to "update"
# existing profiles. This is quite dirty way to do updates...
if os.path.exists(config['proxy_database']):
os.unlink(config['proxy_database'])
kwargs = dict(close_fds=True, preexec_fn=os.setsid)
if slapproxy_log is not None:
slapproxy_log_fp = open(slapproxy_log, 'w')
kwargs['stdout'] = slapproxy_log_fp
kwargs['stderr'] = slapproxy_log_fp
proxy = subprocess.Popen([config['slapproxy_binary'],
config['slapos_config']], **kwargs)
process_manager.process_pid_set.add(proxy.pid)
# XXX: dirty, giving some time for proxy to being able to accept
# connections
time.sleep(10)
slap = slapos.slap.slap()
slap.initializeConnection(config['master_url'])
# register software profile
self.software_profile = config['custom_profile_path']
slap.registerSupply().supply(
self.software_profile,
computer_guid=config['computer_id'])
computer = slap.registerComputer(config['computer_id'])
# Reset all previously generated software if needed
if reset_software:
software_root = config['software_root']
log('SlapOSControler : GOING TO RESET ALL SOFTWARE')
if os.path.exists(software_root):
shutil.rmtree(software_root)
os.mkdir(software_root)
os.chmod(software_root, 0750)
instance_root = config['instance_root']
if os.path.exists(instance_root):
# delete old paritions which may exists in order to not get its data
# (ex. MySQL db content) from previous testnode's runs
# In order to be able to change partition naming scheme, do this at
# instance_root level (such change happened already, causing problems).
shutil.rmtree(instance_root)
os.mkdir(instance_root)
for i in range(0, MAX_PARTIONS):
# create partition and configure computer
# XXX: at the moment all partitions do share same virtual interface address
# this is not a problem as usually all services are on different ports
partition_reference = '%s-%s' %(config['partition_reference'], i)
partition_path = os.path.join(instance_root, partition_reference)
os.mkdir(partition_path)
os.chmod(partition_path, 0750)
computer.updateConfiguration(xml_marshaller.xml_marshaller.dumps({
'address': config['ipv4_address'],
'instance_root': instance_root,
'netmask': '255.255.255.255',
'partition_list': [{'address_list': [{'addr': config['ipv4_address'],
'netmask': '255.255.255.255'},
{'addr': config['ipv6_address'],
'netmask': 'ffff:ffff:ffff::'},],
'path': partition_path,
'reference': partition_reference,
'tap': {'name': partition_reference},
}
],
'reference': config['computer_id'],
'software_root': config['software_root']}))
def spawn(self, *args, **kw):
return self.process_manager.spawn(*args, **kw)
def runSoftwareRelease(self, config, environment):
self.log("SlapOSControler.runSoftwareRelease")
cpu_count = os.sysconf("SC_NPROCESSORS_ONLN")
os.putenv('MAKEFLAGS', '-j%s' % cpu_count)
os.environ['PATH'] = environment['PATH']
# a SR may fail for number of reasons (incl. network failures)
# so be tolerant and run it a few times before giving up
for runs in range(0, MAX_SR_RETRIES):
status_dict = self.spawn(config['slapgrid_software_binary'], '-v', '-c',
config['slapos_config'], raise_error_if_fail=False,
log_prefix='slapgrid_sr', get_output=False)
return status_dict
def runComputerPartition(self, config, environment,
stdout=None, stderr=None):
self.log("SlapOSControler.runComputerPartition")
slap = slapos.slap.slap()
# cloudooo-json is required but this is a hack which should be removed
config['instance_dict']['cloudooo-json'] = "{}"
slap.registerOpenOrder().request(self.software_profile,
partition_reference='testing partition',
partition_parameter_kw=config['instance_dict'])
# try to run for all partitions as one partition may in theory request another one
# this not always is required but curently no way to know how "tree" of partitions
# may "expand"
for runs in range(0, MAX_PARTIONS):
status_dict = self.spawn(config['slapgrid_partition_binary'], '-v', '-c',
config['slapos_config'], raise_error_if_fail=False,
log_prefix='slapgrid_cp', get_output=False)
return status_dict
|
UTF-8
|
Python
| false | false | 6,973 |
py
| 113 |
SlapOSControler.py
| 48 | 0.606912 | 0.598021 | 0 | 141 | 48.453901 | 121 |
jacobcheatley/Euler
| 5,454,608,479,058 |
34a5ee7f7baf3d866d15f97fa611fddb3da11d89
|
d50c0f15888a270282395622e65e6fc22cff2b86
|
/Problems 051 - 100/Problem 060.py
|
68367e5c5590ef0623c440886ac5763afc263700
|
[] |
no_license
|
https://github.com/jacobcheatley/Euler
|
6229244488a100251d5a8aa2990ee3f153e0cf49
|
3aeee5a5e94b5836f0d86cf44eb0866cdac48220
|
refs/heads/master
| 2021-01-17T14:24:17.434299 | 2015-06-28T09:17:32 | 2015-06-28T09:17:32 | 36,707,944 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# TODO: Something other than brute force
from custom.math import is_prime, primes
prime_list = list(primes(10000))
def valid_pair(a, b):
if is_prime(int(str(a) + str(b))) and is_prime(int(str(b) + str(a))):
return True
return False
valid_fives = []
# Actually finds the answer faster than 97
for p1 in prime_list[:prime_list.index(97)]:
print(p1)
for p2 in prime_list[prime_list.index(p1):]:
if valid_pair(p1, p2):
for p3 in prime_list[prime_list.index(p2):]:
if valid_pair(p1, p3) and valid_pair(p2, p3):
for p4 in prime_list[prime_list.index(p3):]:
if valid_pair(p1, p4) and valid_pair(p2, p4) and valid_pair(p3, p4):
for p5 in prime_list[prime_list.index(p4):]:
if valid_pair(p1, p5) and valid_pair(p2, p5) and valid_pair(p3, p5) and valid_pair(p4, p5):
print(p1, p2, p3, p4, p5, "sum:", sum((p1, p2, p3, p4, p5)))
valid_fives.append((p1, p2, p3, p4, p5))
print(min(sum(five) for five in valid_fives))
|
UTF-8
|
Python
| false | false | 1,147 |
py
| 70 |
Problem 060.py
| 69 | 0.543156 | 0.496077 | 0 | 28 | 39.964286 | 123 |
Const-Babar-Tortue/Flask-Server
| 16,853,451,687,836 |
a68030a4554c8fb1577a0c65e62fc13bcdfebfed
|
96237ee876d86726cf43b97270b015900c7c547e
|
/interface-test/index.py
|
a180b560fd868c082b637f050fa7fde8f2a3d36b
|
[] |
no_license
|
https://github.com/Const-Babar-Tortue/Flask-Server
|
dc40c887154e7cd7299b7e82deab43a1130d4f74
|
e8b2564d13b77df05d06fe6ffe0007f8b235ef0e
|
refs/heads/master
| 2020-12-28T16:36:04.750357 | 2020-05-27T17:19:31 | 2020-05-27T21:50:12 | 238,407,555 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from datetime import datetime, date
from SHIDev.web import webPage
import admin_tableaub
import mod_tableaub
import etu_tableaub
import prof_tableaub
from yattag import Doc
if __name__ == '__main__':
from SHI.session import loginRequired
@loginRequired
def page(S=None):
doc, tag, text = Doc().tagtext()
if S:
with tag('div', klass='wrapper'):
# """TABLEAU DE BORD PROFS"""
# doc, tag, text = prof_tableaub.tableau_de_bord_page(doc, tag, text)
# doc, tag, text = mod_tableaub.tableau_de_bord_page(
# S, doc, tag, text)
# doc, tag, text = admin_tableaub.tableau_de_bord_page(
# doc, tag, text)
doc, tag, text = etu_tableaub.tableau_de_bord_page(doc, tag, text)
webPage(Page=doc, Session=S)
else:
webPage(doc)
if __name__ == '__main__':
page()
|
UTF-8
|
Python
| false | false | 918 |
py
| 61 |
index.py
| 39 | 0.569717 | 0.569717 | 0 | 33 | 25.818182 | 81 |
computermuseumarnhem/cma_inventoryapp
| 11,338,713,706,641 |
bf96c0f5967149572a94eaa80f207f107ad0b330
|
1a039789052be75bd5f23c6851f8b46c17e8e53f
|
/tmp/check.py
|
4aea27bb1d05a9a6f1f29875de7b54ab0f43b2e3
|
[] |
no_license
|
https://github.com/computermuseumarnhem/cma_inventoryapp
|
10da7dc2330ee93e8fa50c0726a89da07d676925
|
070ec36d5e75fbb7ada45b821285723bb21cd3f6
|
refs/heads/main
| 2023-04-17T19:53:27.973619 | 2021-04-26T14:44:13 | 2021-04-26T14:44:13 | 352,157,285 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import sheet
for item in sheet.row():
if item['picture']:
print(item)
print()
|
UTF-8
|
Python
| false | false | 123 |
py
| 25 |
check.py
| 16 | 0.577236 | 0.569106 | 0 | 9 | 12.777778 | 24 |
aiporre/QuinoaMarketForecast
| 14,972,256,024,508 |
77cfd36b65d609b44e3fa1b960fbfb54748bfadd
|
e9172452ed3777653ec7a4c7ef6d2269a2309a4c
|
/pandasRollingStats.py
|
30244426f797b5c72c4db22f9f4bba209db9fc6a
|
[] |
no_license
|
https://github.com/aiporre/QuinoaMarketForecast
|
ec7163ea52e7c63c34448c302d4539b96270a3dd
|
b76bf5380b930859392a7c6c46eade2464a94143
|
refs/heads/master
| 2021-09-24T09:20:13.704502 | 2016-10-03T06:51:14 | 2016-10-03T06:51:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
import quandl
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
def get_zinc_price():
api_key = open('data/myApiKey.txt', 'r').read()
gold = quandl.get('ODA/PZINC_USD', authtoken=api_key)
pd.DataFrame(gold).to_pickle('data/zinc.pickle')
def get_wheat_price():
api_key = open('data/myApiKey.txt', 'r').read()
gold = quandl.get('ODA/PWHEAMT_USD', authtoken=api_key)
pd.DataFrame(gold).to_pickle('data/wheat.pickle')
fig = plt.figure()
ax1 = plt.subplot2grid((4,1),(0,0))
ax2 = plt.subplot2grid((4,1),(1,0))
ax3 = plt.subplot2grid((4,1),(2,0))
ax4 = plt.subplot2grid((4,1),(3,0))
# read prices of zinc
try:
zinc = pd.read_pickle('data/zinc.pickle')
except:
zinc = get_zinc_price()
# read prices of wheat
try:
wheat = pd.read_pickle('data/wheat.pickle')
except:
wheat = get_wheat_price()
# calculatin rollings
zinc.columns = ['price_z']
wheat.columns = ['price_w']
zw = zinc.join(wheat)
zinc['priceRA'] = pd.rolling_mean(zinc['price_z'],12)
zinc['priceRS'] = pd.rolling_std(zinc['price_z'],12)
print zw.head(10)
zinc_wheat_corr = pd.rolling_corr(zw['price_z'],zw['price_w'],12)
print zinc.head(15)
print zinc_wheat_corr.head(15)
# zinc.dropna(inplace = True) # posible to use dorpna
zinc[['price_z','priceRA']].plot(ax = ax1)
zinc['priceRS'].plot(ax = ax2)
zw.plot(ax = ax3)
zinc_wheat_corr.plot(ax = ax4)
plt.show()
# standrd deviatio help to filter date that doesnlt fit
# an to undersatd the volatitty of the data.
|
UTF-8
|
Python
| false | false | 1,524 |
py
| 25 |
pandasRollingStats.py
| 15 | 0.679134 | 0.652887 | 0 | 53 | 27.698113 | 65 |
Djambek/Django_project
| 19,378,892,473,313 |
4892b7185a6c89f7c57b9ffd2010095802bc922c
|
eb831224d2867b7bb98b8ef818c81b022ab06868
|
/books/form_profile.py
|
62d7908a8b6757d4753ebc74efbc3433c6f976ea
|
[] |
no_license
|
https://github.com/Djambek/Django_project
|
de86dff9910820e311e7d38d598afb3c66b364fe
|
e0aeee0aeaea6bdbe08158e454d3532e11915294
|
refs/heads/master
| 2023-08-27T23:17:50.151198 | 2021-09-28T13:18:13 | 2021-09-28T13:18:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""ะคะพัะผะฐ ะดะปั ัะผะตะฝั ะฝะธะบะฐ ะธะปะธ ะฟะพััั"""
from django import forms
from django.contrib.auth.models import User
class Profile(forms.Form):
username = forms.CharField(max_length=200)
email = forms.CharField(max_length=200)
|
UTF-8
|
Python
| false | false | 250 |
py
| 21 |
form_profile.py
| 10 | 0.751111 | 0.724444 | 0 | 6 | 36.333333 | 46 |
jamco88/FOS_analytics
| 9,629,316,713,471 |
53a9b08d71d543d3c7f96e167442e718cec636ef
|
d5bc3e337e18052561672b32e762e7a52094295e
|
/server.py
|
d8a2403e341db37a215b5e078143ba6c33f0cfe6
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/jamco88/FOS_analytics
|
c1f58591af187309cf84088fd4c583f059d01134
|
c47fe174b149c673997ade313c4f4305ce0898bc
|
refs/heads/master
| 2020-07-27T07:39:23.524767 | 2019-09-17T09:59:42 | 2019-09-17T09:59:42 | 209,018,034 | 0 | 0 |
Apache-2.0
| false | 2019-10-30T06:34:49 | 2019-09-17T09:51:32 | 2019-09-17T10:23:35 | 2019-10-30T06:34:45 | 33,798 | 0 | 0 | 1 |
Python
| false | false |
import os
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from flask import Flask, send_from_directory, render_template
from werkzeug.utils import secure_filename
from scraper.scraper import scrape_ahead_n_complaints
from config import FILE_NAME
from main import create_output_tables
from datetime import datetime, timedelta
from scraper.scraper import get_retrieved_complaints
import json
app = Flask(__name__)
@app.route('/')
def home():
latest_output_files = [file for file in os.listdir("./output_data") if file.endswith(".csv")]
run_dates = json.load(open('run_info.json'))
scraped_nums = get_retrieved_complaints()
return render_template('downloads.html',
outputs=latest_output_files,
num_files=len(scraped_nums),
application_logging=run_dates,
latest_file=max(scraped_nums)
)
@app.route("/download_file/<library>/<filename>")
def file_download(filename, library):
if library == "output":
dl_dir = "./output_data/"
elif library == "scrapes":
dl_dir = os.path.dirname(FILE_NAME)
return send_from_directory(dl_dir, secure_filename(filename), as_attachment=True)
def scrape_run():
print('Scraping: %s' % datetime.now())
if len(get_retrieved_complaints()) == 1:
scrape_ahead_n_complaints(n_ahead=74000)
else:
scrape_ahead_n_complaints(n_ahead=1500)
data = json.load(open('run_info.json'))
data["scrape"] = "Last scrape successfully executed at " + datetime.strftime(datetime.today(), "%d-%m-%Y %H:%M")
with open("run_info.json", "w") as f:
json.dump(data, f)
def analytics_run():
print('Creating analytics tables: %s' % datetime.now())
create_output_tables()
data = json.load(open('run_info.json'))
data["analytics"] = "Output tables last created at " + datetime.strftime(datetime.today(), "%d-%m-%Y %H:%M")
with open("run_info.json", "w") as f:
json.dump(data, f)
def schedule_analytics_run():
""""""
scheduler = BackgroundScheduler()
# Add a scheduled run from restart so tables are populated
scheduler.add_job(analytics_run, "date", run_date=datetime.now().replace(minute=datetime.now().minute+1))
scheduler.add_job(scrape_run, "cron", day_of_week="mon,wed-fri", hour=0, minute=0)
scheduler.add_job(analytics_run, "cron", day_of_week="sat,tue", hour=0, minute=0)
scheduler.start()
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
schedule_analytics_run()
app.run(host='0.0.0.0', port=int(port))
|
UTF-8
|
Python
| false | false | 2,765 |
py
| 17 |
server.py
| 6 | 0.635081 | 0.626763 | 0 | 73 | 35.876712 | 116 |
ManishAradwad/Competitive-Programming
| 5,093,831,235,324 |
da14e867948c6057609b215ff820a64a58d895b2
|
15ace1f492ebaf11d8e1221bf16321277242d0fa
|
/Codeforces/StonesOnTheTable(266A).py
|
7ed75f2513a76226c540680abd75b6957b577eb8
|
[] |
no_license
|
https://github.com/ManishAradwad/Competitive-Programming
|
1818b800e8a8995e754f2aabd2923474341e454e
|
b49e863425209c24da9c27d2ba9ec4d7016850b1
|
refs/heads/master
| 2020-09-01T00:10:05.985713 | 2020-04-13T18:01:15 | 2020-04-13T18:01:15 | 218,823,391 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n = int(input())
stones = input()
print(sum([1 for i in range(n-1) if stones[i]==stones[i+1]]))
|
UTF-8
|
Python
| false | false | 95 |
py
| 80 |
StonesOnTheTable(266A).py
| 79 | 0.621053 | 0.589474 | 0 | 3 | 31 | 61 |
Cyberpunk3HAB/python.GUI.2
| 16,200,616,684,973 |
497d402e159dff51d94732f7e04f98387ec65223
|
f4c128ad43c50c8c6a2e9fc3ae412238f330af42
|
/python.GUI.2/python.GUI.2.py
|
bc505958bd60f6789f903b281e024861fc302e6e
|
[] |
no_license
|
https://github.com/Cyberpunk3HAB/python.GUI.2
|
22bc08acb7e0a237648b8e25a3874e0e44a43a03
|
11d4ea37446be1950ca09da78cc288d1a9d817b4
|
refs/heads/master
| 2023-04-06T15:32:39.616674 | 2021-04-21T11:17:02 | 2021-04-21T11:17:02 | 360,140,828 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from tkinter import *
def Button_click():
output_text.delete(1.0, END)
manipulated_text = "1. Dogs can see in the dark because of the tapetum lucidum 2.a mirror-like membrane in dogsโ eyes 3.There are light-and-motion sensitive cells in the retina that are referred to as rods"
output_text.insert(END, manipulated_text)
def Button_click1():
output_text.delete(1.0, END)
manipulated_text = "1. A house catโs genome is 95.6 percent tiger, and they share many behaviors with their jungle ancestors"
output_text.insert(END, manipulated_text)
def Button_click2():
output_text.delete(1.0, END)
manipulated_text = "1. Rats are medium-sized rodents with a long tail. A group of rats is called a โmischiefโ! Rats are mainly nocturnal and live underground"
output_text.insert(END, manipulated_text)
def Button_click3():
output_text.delete(1.0, END)
manipulated_text = "1. There are over 30,000 species of fish, There are lots of fish in the sea and some haven't even been discovered yet, Fish breathe through their gills"
output_text.insert(END, manipulated_text)
def Button_click4():
output_text.delete(1.0, END)
manipulated_text = "1. There are 10,000 species of bird, There are around 10,000 different species of bird, All birds lay eggs. All female birds lay eggs"
output_text.insert(END, manipulated_text)
window = Tk()
window.title("Animal Flashcards")
button1 = Button(window, text="DOG", width=5, pady = 150, background="white", command= Button_click)
button1.grid(row=2, column=0, sticky=W)
button2 = Button(window, text="CAT", width=5, pady = 150, background="cornsilk", command= Button_click1)
button2.grid(row=2, column=1, sticky=W)
button3 = Button(window, text="RAT", width=5, pady = 150, background="lemonchiffon", command= Button_click2)
button3.grid(row=2, column=2, sticky=W)
button4 = Button(window, text="FISH", width=5, pady = 150, background="khaki", command= Button_click3)
button4.grid(row=2, column=3, stick=W)
button5 = Button(window, text="BIRD", width=5, pady = 150, background="gold", command= Button_click4)
button5.grid(row=2, column=4, sticky=W)
output_text = Text(window, width=30, height=20, wrap=WORD, background="yellow")
output_text.grid(row=2, column=5, columnspan=4, sticky=W)
window.mainloop()
|
UTF-8
|
Python
| false | false | 2,305 |
py
| 2 |
python.GUI.2.py
| 1 | 0.727906 | 0.688724 | 0 | 49 | 45.877551 | 210 |
ComputerVisionaries/PSF
| 1,967,095,051,614 |
d0387846ee925f5ec76758ae17e0c8b5f019c6f6
|
9bdf4e7fffe5f0e47e889c5fe34dab02c0be2574
|
/fitness/similarity_gradient.py
|
177a7553e0b190c3a849586b98876f9124b197a5
|
[] |
no_license
|
https://github.com/ComputerVisionaries/PSF
|
314cb2cef092b9ad8a99770e84760b8ff462f13e
|
4e5c14f7ee31d9598d1c657fca00cb6efbaf2486
|
refs/heads/master
| 2020-04-02T18:46:07.765294 | 2018-11-30T03:19:28 | 2018-11-30T03:19:28 | 154,712,335 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from RelativePosition import RelativePosition
def Gil(img):
num_rows, num_cols, num_clrs = img.shape
gradients = np.zeros((num_rows, num_clrs))
for p in range(num_rows):
for c in range(num_clrs):
gradients[p, c] = img[p, num_cols-1, c].astype(np.float64)
- img[p, num_cols-2, c].astype(np.float64)
return gradients
def Gir(img):
num_rows, num_cols, num_clrs = img.shape
gradients = np.zeros((num_rows, num_clrs))
for p in range(num_rows):
for c in range(num_clrs):
gradients[p, c] = img[p, 0, c].astype(np.float64)
- img[p, 1, c].astype(np.float64)
return gradients
def Ui(gi, c):
p, _ = gi.shape
return np.sum(gi.astype(np.float64)[:, c]) / p
def Si(gi):
return np.cov(gi.T)
def Gijlr(img_i, img_j):
num_rows, num_cols, num_clrs = img_i.shape
gradients = np.zeros((num_rows, num_clrs))
for p in range(num_rows):
for c in range(num_clrs):
gradients[p, c] = img_i[p, 0, c].astype(np.float64)
- img_j[p, num_cols-1, c].astype(np.float64)
return gradients
def Gjirl(img_i, img_j):
num_rows, num_cols, num_clrs = img_i.shape
gradients = np.zeros((num_rows, num_clrs))
for p in range(num_rows):
for c in range(num_clrs):
gradients[p, c] = img_i[p, num_cols-1, c].astype(np.float64)
- img_j[p, 0, c].astype(np.float64)
return gradients
def Eq3(mat1, mat2):
term = 0
term += mat1[0] * (mat1[0] * mat2[0, 0]
+ mat1[1] * mat2[0, 1] + mat1[2] * mat2[0, 2])
term += mat1[1] * (mat1[0] * mat2[1, 0]
+ mat1[1] * mat2[1, 1] + mat1[2] * mat2[1, 2])
term += mat1[2] * (mat1[0] * mat2[2, 0]
+ mat1[1] * mat2[2, 1] + mat1[2] * mat2[2, 2])
return term
def mahalanobis_gradient_compat(img1, img2):
num_rows, _, _ = img1.shape
gil = Gil(img1)
uil = np.array([Ui(gil, 0), Ui(gil, 1), Ui(gil, 2)])
sil = Si(gil)
gijlr = Gijlr(img1, img2)
dlr = 0
for p in range(num_rows):
mat1 = gijlr[p, :] - uil
mat2 = np.linalg.inv(sil)
dlr += Eq3(mat1, mat2)
gir = Gir(img2)
uir = np.array([Ui(gir, 0), Ui(gir, 1), Ui(gir, 2)])
sir = Si(gir)
gjirl = Gjirl(img1, img2)
drl = 0
for p in range(num_rows):
mat1 = gjirl[p, :] - uir
mat2 = np.linalg.inv(sir)
drl += Eq3(mat1, mat2)
return dlr + drl
def gradient_similarity(img1, img2, pos):
if pos == RelativePosition.LEFT_RIGHT:
return mahalanobis_gradient_compat(img1, img2)
elif pos == RelativePosition.RIGHT_LEFT:
return mahalanobis_gradient_compat(img2, img1)
elif pos == RelativePosition.ABOVE_BELOW:
img1 = np.transpose(img1, [1, 0, 2])
img2 = np.transpose(img2, [1, 0, 2])
return mahalanobis_gradient_compat(img2, img1)
elif pos == RelativePosition.BELOW_ABOVE:
img1 = np.transpose(img1, [1, 0, 2])
img2 = np.transpose(img2, [1, 0, 2])
return mahalanobis_gradient_compat(img1, img2)
if __name__ == '__main__':
img0_0 = plt.imread("../images/frog0-0.jpeg")
img0_1 = plt.imread("../images/frog0-1.jpeg")
error = gradient_similarity(img0_0, img0_1, RelativePosition.LEFT_RIGHT)
print(error)
error = gradient_similarity(img0_0, img0_1, RelativePosition.RIGHT_LEFT)
print(error)
error = gradient_similarity(img0_0, img0_1, RelativePosition.ABOVE_BELOW)
print(error)
error = gradient_similarity(img0_0, img0_1, RelativePosition.BELOW_ABOVE)
print(error)
|
UTF-8
|
Python
| false | false | 3,668 |
py
| 20 |
similarity_gradient.py
| 19 | 0.576881 | 0.532715 | 0 | 130 | 27.215385 | 77 |
ClaudioSiqueira/Exercicios-Python
| 7,876,970,027,887 |
2be5aa6d5a736c4d94e3a9a17f6e506cc1a4d6ca
|
0a007ec5f618fa1774c4119fb65202d3b24557d0
|
/Exercicios Python/ex108/moeda.py
|
6081369ec4615005de695e0b430b124d664fa420
|
[
"MIT"
] |
permissive
|
https://github.com/ClaudioSiqueira/Exercicios-Python
|
0fe2830ac296bfac06e9fcf9ccc09b9417933b06
|
128387769b34b7d42aee5c1effda16de21216e10
|
refs/heads/master
| 2022-07-31T04:35:20.963539 | 2020-05-23T01:43:52 | 2020-05-23T01:43:52 | 266,242,109 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def metade(preco):
res = preco/2
return res
def aumentar(preco, taxa):
res = preco + (preco * taxa/100)
return res
def diminuir(preco, taxa):
res = preco - (preco * taxa/100)
return res
def dobro(preco):
res = preco * 2
return res
def formatacao(preco = 0, moeda = 'R$'):
return f'{moeda}{preco:.2f}'.replace('.', ',')
'''def moeda(preco = 0, moeda = 'R$'):
return f'{moeda}{preco:.2f}'.replace('.', ',')'''
|
UTF-8
|
Python
| false | false | 459 |
py
| 85 |
moeda.py
| 82 | 0.562092 | 0.535948 | 0 | 27 | 16 | 53 |
faker2081/pikachu2
| 16,724,602,654,670 |
657969e5cc87bb037060c5a1036748fa6d570ecb
|
2d2c10ffa7aa5ee35393371e7f8c13b4fab94446
|
/projects/ai/naic_seg/gseg/third/SkyWa7ch3r/convert_to_onnx.py
|
431ff28573578eab6b9098a2df1b1e1d9e2283b6
|
[] |
no_license
|
https://github.com/faker2081/pikachu2
|
bec83750a5ff3c7b5a26662000517df0f608c1c1
|
4f06d47c7bf79eb4e5a22648e088b3296dad3b2d
|
refs/heads/main
| 2023-09-02T00:28:41.723277 | 2021-11-17T11:15:44 | 2021-11-17T11:15:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import keras2onnx
import tensorflow as tf
from tensorflow import keras
import os
import argparse
import fast_scnn
import deeplabv3plus
import separable_unet
import onnx
import cityscapesscripts.helpers.labels as labels
def convert_logits_to_labels(x):
"""
When this function receives a tensor it should be of
shape [?, target_size[0], target_size[1], 20] and they
should be the logits, not probabilities or classes.
This function will be used in a lambda layer to convert these
to the label ids given by cityscapes. These are given by the table below:
| id | name | color |
-----------------------------------------------
| 0 | unlabeled | (0, 0, 0) |
| 1 | ego vehicle | (0, 0, 0) |
| 2 | rectification border | (0, 0, 0) |
| 3 | out of roi | (0, 0, 0) |
| 4 | static | (0, 0, 0) |
| 5 | dynamic | (111, 74, 0) |
| 6 | ground | (81, 0, 81) |
| 7 | road | (128, 64, 128) |
| 8 | sidewalk | (244, 35, 232) |
| 9 | parking | (250, 170, 160) |
| 10 | rail track | (230, 150, 140) |
| 11 | building | (70, 70, 70) |
| 12 | wall | (102, 102, 156) |
| 13 | fence | (190, 153, 153) |
| 14 | guard rail | (180, 165, 180) |
| 15 | bridge | (150, 100, 100) |
| 16 | tunnel | (150, 120, 90) |
| 17 | pole | (153, 153, 153) |
| 18 | polegroup | (153, 153, 153) |
| 19 | traffic light | (250, 170, 30) |
| 20 | traffic sign | (220, 220, 0) |
| 21 | vegetation | (107, 142, 35) |
| 22 | terrain | (152, 251, 152) |
| 23 | sky | (70, 130, 180) |
| 24 | person | (220, 20, 60) |
| 25 | rider | (255, 0, 0) |
| 26 | car | (0, 0, 142) |
| 27 | truck | (0, 0, 70) |
| 28 | bus | (0, 60, 100) |
| 29 | caravan | (0, 0, 90) |
| 30 | trailer | (0, 0, 110) |
| 31 | train | (0, 80, 100) |
| 32 | motorcycle | (0, 0, 230) |
| 33 | bicycle | (119, 11, 32) |
| -1 | license plate | (0, 0, 142) |
Any unlabeled by my training should be 19, thus these will turn into 0's
which are ignored for evaluation. But this is for the Jetson so let's not worry about that
here.
"""
# Get the label Ids and use 0 as the void class
CITYSCAPES_LABELS = [
label for label in labels.labels if -1 < label.trainId < 255]
CITYSCAPES_LABELS.append(labels.labels[0])
VAL_IDS = [label.id for label in CITYSCAPES_LABELS]
# Convert the IDs into a 2D tensor [20, 1]
VAL_IDS = tf.convert_to_tensor(VAL_IDS, dtype=tf.int32)
VAL_IDS = tf.reshape(VAL_IDS, (VAL_IDS.shape[0], 1))
# Get the trainId labels
x = tf.argmax(x, axis=-1)
x = tf.expand_dims(x, -1)
# Perform a one hot encode
x = tf.one_hot(x, 20, axis=-1)
# Remove the extra dimension
x = tf.squeeze(x, -2)
# Cast to int32
x = tf.cast(x, tf.int32)
# Do a matrix multiplication
return tf.linalg.matmul(x, VAL_IDS)
def keras_to_onnx(model_choice, weights):
"""
If this is being run on the Jetson
Then Tensorflow 1.15.0 is recommended,
keras2onnx 1.6 and onnxconverter-runtime 1.6 installed via pip.
Its able to be converted.
"""
if model_choice == 'fastscnn':
model = fast_scnn.model(num_classes=20, input_size=(1024, 2048, 3))
input_size = '1024x2048'
elif model_choice == 'deeplabv3+':
# Its important here to set the output stride to 8 for inferencing
model = deeplabv3plus.model(num_classes=20, input_size=(
1024, 2048, 3), depthwise=True, output_stride=8)
input_size = '1024x2048'
elif model_choice == 'separable_unet':
# Was trained on a lower resolution
model = separable_unet.model(num_classes=20, input_size=(512, 1024, 3))
input_size = '512x1024'
# Whatever the model is, load the weights chosen
print("Loading the weights for {} at input size {}".format(
model_choice, input_size))
model.load_weights(weights)
# Add the lambda layer to the model
model = keras.Sequential([
model,
keras.layers.Lambda(convert_logits_to_labels)
])
print("Weights Loaded")
# Plot the model for visual purposes in case anyone asks what you used
print("Plotting the model")
tf.keras.utils.plot_model(
model, to_file=os.path.join('./results', model_choice, model_choice+'.png'), show_shapes=True, dpi=300)
# Convert keras model to onnx
print("Converting Keras Model to ONNX")
onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=8)
# Get the filename
onnx_filename = os.path.join(
'results', model_choice, model_choice + '_' + input_size + '.onnx')
# Save the ONNX model
print("Saving the ONNX Model")
onnx.save_model(onnx_model, onnx_filename)
print("Conversion Complete, ready for Jetson AGX Xavier")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", help="Model to convert to uff",
choices=['fastscnn', 'deeplabv3+', 'separable_unet'])
parser.add_argument("-w", "--weights-file", type=str, default="",
help="The weights the model will use in the uff file")
args = parser.parse_args()
# If its a file then use the keras_to_onnx
if os.path.isfile(args.weights_file):
keras_to_onnx(args.model, args.weights_file)
# Else give an error
else:
parser.error("Please provide a weights file. File given not found.")
|
UTF-8
|
Python
| false | false | 6,021 |
py
| 1,584 |
convert_to_onnx.py
| 1,248 | 0.539777 | 0.474506 | 0 | 141 | 41.702128 | 111 |
IntroToCompBioLSU/week12
| 8,624,294,355,856 |
095b1eb1ee7b09716cf70c9f5c88621f5b12b8a3
|
2c334358e768153dae8aa74f7b84eb40a94c988f
|
/assignments/GentryHurst/Week12ghurst2.py
|
0b9b3a5eac68960fbec7ae3a249df684677d30de
|
[] |
no_license
|
https://github.com/IntroToCompBioLSU/week12
|
473106c83e0a31d93bb7411c5597da3b4353db4b
|
a8e6738007dc7fb307da02be44b75bf18274bc49
|
refs/heads/master
| 2020-04-05T06:48:43.942324 | 2018-12-12T08:48:31 | 2018-12-12T08:48:31 | 156,652,255 | 0 | 12 | null | false | 2018-12-11T21:40:10 | 2018-11-08T04:57:34 | 2018-12-11T21:38:47 | 2018-12-11T21:40:10 | 10 | 0 | 12 | 0 |
Python
| false | null |
#!/usr/bin/env python
#class that contains generic information
class clothes:
"""This is a class to store information about any item of clothing"""
def __init__(self, type=" ", season=""):
self.type = type
self.season = season
def summarize(self):
if (self.season) == "summer":
print("This item of clothing is worn during the summer.")
else:
print("This item of clothing is not worn during the summer.")
# a class (within the class of clothes) that contains more specific properties for the class clothing.
class shirt(clothes):
"""This is a class used to store information about clothes that are shirts"""
def __init__(self, color="yellow", brand=" ", season="summer", type="shirt"):
clothes.__init__(self)
self.color = color
self.brand = brand
self.season = season
self.type = type
def summarize(self):
clothes.summarize(self)
print("This item of clothing is a %s %s from the brand %s." %(self.color, self.type, self.brand))
#Class that shares properties with shirts.
class jacket(clothes):
"""This is a class used to store information about clothes that are jackets"""
def __init__(self, color="Black", brand=" ", season="winter", type="jacket"):
clothes.__init__(self)
self.color = color
self.brand = brand
self.type = type
def summarize(self):
clothes.summarize(self) #summarizes all properties of the clothing item
print("This item of clothing is a %s %s from the brand %s." %(self.color, self.type, self.brand))
#defining the items in the class
ItemOne = shirt(color="pink", brand="Gucci")
ItemTwo = jacket(color="black",brand="The North Face")
#run the program
print("What item of clothing would you like to look up?")
#try and except statment
while True:
try:
user = input()
if user == "ItemOne":
print("Item One: \n")
print(ItemOne.summarize())
elif user == "ItemTwo":
print("ItemTwo: \n")
print(ItemTwo.summarize())
break
#if not the correct input error message is printed
except AssertionError:
print("You didn't enter a correct item number. Try \"ItemOne\"")
# DB: I like these examples a lot, although I wasn't able to throw an AssertionError. You
# might need to add an assert statement for that.
|
UTF-8
|
Python
| false | false | 2,302 |
py
| 25 |
Week12ghurst2.py
| 24 | 0.668549 | 0.668549 | 0 | 64 | 34.96875 | 113 |
manitejadesetti/django_pro
| 8,641,474,239,752 |
c5445a562af57aff268a63138ed468b2a4cebbfd
|
65c905eecc25addaa3fac5b53e70ebc18078d5e4
|
/django/samplepro/EmployeeManagementPortal/apps.py
|
62c1ba19c7f0962090b877b26ca5ed1469a6460f
|
[] |
no_license
|
https://github.com/manitejadesetti/django_pro
|
1b53aae7ec74b7c88938dd6db4a4b16d3479eea1
|
c5c380a685af61a77a4947a8628a0e7767335875
|
refs/heads/master
| 2020-04-08T18:27:28.716829 | 2018-12-28T09:27:57 | 2018-12-28T09:27:57 | 159,608,910 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.apps import AppConfig
class EmployeemanagementportalConfig(AppConfig):
name = 'EmployeeManagementPortal'
|
UTF-8
|
Python
| false | false | 123 |
py
| 26 |
apps.py
| 11 | 0.821138 | 0.821138 | 0 | 5 | 23.6 | 48 |
hi-august/pyexercises
| 3,547,643,018,088 |
5899481c703f189be4028af33c73fe2b6d6fc462
|
7f92a710bc893a6dfc3cf69525368106caae24d9
|
/algorithm/selection_sort.py
|
555e3140621fe0579548fb39c4669176d3c664af
|
[] |
no_license
|
https://github.com/hi-august/pyexercises
|
a64e45412a741aa1a883008d8c39e7664d468551
|
94a731fc517682013e96bbccd69efe90728eb788
|
refs/heads/master
| 2021-10-22T07:11:57.899710 | 2019-03-09T02:48:12 | 2019-03-09T02:48:12 | 22,276,989 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
'''
้ฆๅ
ๅจๆชๆๅบๅบๅไธญๆพๅฐๆๅฐ๏ผๅคง๏ผๅ
็ด ๏ผ
ๅญๆพๅฐๆๅบๅบๅ็่ตทๅงไฝ็ฝฎ๏ผ็ถๅ๏ผ
ๅไปๅฉไฝๆชๆๅบๅ
็ด ไธญ็ปง็ปญๅฏปๆพๆๅฐ๏ผๅคง๏ผๅ
็ด ๏ผ
็ถๅๆพๅฐๅทฒๆๅบๅบๅ็ๆซๅฐพใ
ไปฅๆญค็ฑปๆจ๏ผ็ดๅฐๆๆๅ
็ด ๅๆๅบๅฎๆฏใ
'''
# ้ๆฉๆๅบ * ้ๆฉๆๅคงๆ่
ๆๅฐ็ฝฎไบ่ตทๅงๆ่
ๆซๅฐพ
def selectionSort(list):
list_length = len(list)
for x in range(list_length):
min = x
for y in range(x+1,list_length):
if list[min] > list[y]:
min = y
if min != x:
list[x],list[min]=list[min],list[x]
return list
if __name__ == '__main__':
list2 = range(1000)
print selectionSort(list2)
|
UTF-8
|
Python
| false | false | 749 |
py
| 135 |
selection_sort.py
| 93 | 0.553271 | 0.538318 | 0 | 23 | 21.26087 | 47 |
DragonWarrior15/HackerEarth
| 3,212,635,572,073 |
179161c6d285afdddb024044419a1efcfb69385f
|
0e9c5d81c2e31139255844d44ffe82c94fe5c9ea
|
/Python/Greedy/max_strength_cats.py
|
60d52b47319e1a70f823051e9c4a3f8a6ee64d63
|
[] |
no_license
|
https://github.com/DragonWarrior15/HackerEarth
|
f4172b96e744387399aa313d014c6e33d81ce3b1
|
83b2ae73aeadf49d1dcc33598f00a747e0178180
|
refs/heads/master
| 2021-01-19T13:36:51.216568 | 2020-10-04T16:58:40 | 2020-10-04T16:58:40 | 82,396,807 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Ashima has brought home n cats. Now, being a cat lover, she is taking care of the cats
and has asked me to bring cat food for them. Being a guy with no idea what to buy,
I brought some n packets of cat food (I atleast knew that each and every cat being a
good junkie will completely eat a whole packet of cat food and won't share anything with other cats).
Each food packet has some calorie value c. If a cat with original strength s eats that packet,
the strength of the cat becomes c*s. Now, Ashima is angry at me that I did not know this fact
and now all the cats won't be able to eat the maximum strength packet and increase their strength (and annoying powers).
To calm her mood, I need your help. I will provide you with the original strength of each cat
and the calorie value of each of the n packets. Help me by telling me what is the maximum
value of sum of the final strengths of the cats that can be obtained if each cat is given a whole packet of cat food to eat.
Input
The first line of the input will consist of n, the number of cats as well as the number of food packets brought by me.
The second line will consist of n space separated integers si, the original strength of the cats.
Third line consists of n space separated integers ci, the calorie value of food packets.
Output:
An integer which is the maximum value of sum of the final strengths of the cats that can be obtained.
Constraints:
1 โค n โค 106
1 โค si โค 106
1 โค ci โค 106
SAMPLE INPUT
2
3 1
4 3
SAMPLE OUTPUT
15
Explanation
The maximum sum is obtained by giving packet with calorie value 4 to the first cat and the packet with calorie value 3 to the second cat.
'''
from sys import stdin
N = int(stdin.readline().strip())
s = list(map(int, stdin.readline().split(' ')))
c = list(map(int, stdin.readline().split(' ')))
s = sorted(s, reverse = True)
c = sorted(c, reverse = True)
print(sum([s[i] * c[i] for i in range(N)]))
|
UTF-8
|
Python
| false | false | 1,936 |
py
| 122 |
max_strength_cats.py
| 118 | 0.743763 | 0.732848 | 0 | 44 | 42.727273 | 137 |
Inzaghi2012/tumblr_crawler
| 15,882,789,105,778 |
46fd1fb05319cf799c7beedcd4f8372738863987
|
e500a1b70ec14edccc2d5a632e84ff899a17569e
|
/tumblr.py
|
a2489e8e13e104c0410bf5a796e17c45e6762e7b
|
[
"MIT"
] |
permissive
|
https://github.com/Inzaghi2012/tumblr_crawler
|
55e2f4ec62068106c83ddaa8556eda28030760fb
|
94b40dad757ed82432943c43e9755388632593c7
|
refs/heads/master
| 2020-12-25T04:54:07.985689 | 2016-01-18T16:23:02 | 2016-01-18T16:23:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import gevent
from gevent import monkey
monkey.patch_all()
import os
import sys
import urllib2
import json
import re
from bs4 import BeautifulSoup
MAP_FILEEXT = {'video/mp4': 'mp4'}
NUM_WORKER = 20
class TumblrCrawler(object):
def __init__(self, url):
self.url = url.strip()
self.trunk_name = re.search("^http://(?P<name>.+)\.tumblr\.com$", url).group('name')
if not os.path.exists(self.trunk_name):
os.mkdir(self.trunk_name, 0755)
def _load_page(self, url):
retry = 0
while retry < 3:
try:
page = urllib2.urlopen(url)
#print page.headers
#size = int(page.headers['Content-Length'])
return BeautifulSoup(page.read(), "html.parser")
except Exception, e:
print e, url
retry += 1
raise e
def download(self, url, filename):
retry = 0
while retry < 3:
try:
data = urllib2.urlopen(url)
size = int(data.headers['Content-Length'])
if os.path.exists(filename) and os.path.getsize(filename) == size:
print "Already downloaded, skip - %s" % filename
data.close()
return
print "Download - ", filename, url
fp = open(filename, "wb")
fp.write(data.read())
fp.close()
return
except Exception, e:
print e, url
print "try again..."
retry += 1
def _get_file_from_img_tag(self, node):
for img in node.find_all('img'):
if img.has_attr('src'):
file_url = img['src']
filename = "%s/%s" % (self.trunk_name, file_url.rpartition('/')[-1])
self.download(file_url, filename)
def process_video_link(self, node):
for data in node.find_all('iframe'):
contents = self._load_page(data['src'])
for obj in contents.find_all(['source']):
meta = json.loads(obj.parent['data-crt-options'])
file_type = obj['type']
if meta['hdUrl'] != False and isinstance(meta['hdUrl'], (str, unicode)):
print meta['hdUrl']
file_url = meta['hdUrl']
else:
file_url = obj['src']
# Check one more time
if str(file_url.rpartition('/')[-1]).isdigit():
file_url = file_url.rpartition('/')[0]
filename = "%s/%s.%s" % (self.trunk_name, file_url.rpartition('/')[-1], MAP_FILEEXT.get(file_type, 'unknown'))
try:
self.download(file_url, filename)
pass
except Exception, e:
raise e
print contents
print file_url, file_type, filename, meta
def process_photo_link(self, node):
links = node.find_all('a')
if False and len(links) > 0:
try:
for data in links:
file_url = data['href']
contents = self._load_page(file_url)
for img in contents.find_all('img'):
if img.has_attr('data-src'):
file_url = img['data-src']
filename = "%s/%s" % (self.trunk_name, file_url.rpartition('/')[-1])
self.download(file_url, filename)
except Exception, e:
print e
self._get_file_from_img_tag(node)
else:
self._get_file_from_img_tag(node)
def process_photoset_link(self, node):
for data in node.find_all('iframe'):
contents = self._load_page(data['src'])
for img in contents.find_all('a', class_='photoset_photo'):
file_url = img['href']
filename = "%s/%s" % (self.trunk_name, img['href'].rpartition('/')[-1])
self.download(file_url, filename)
def crawler_page(self, page):
posts = page.find(id='posts')
if posts.name == 'ul':
child_tag = 'li'
elif posts.name == 'section':
child_tag = 'article'
for post in posts.find_all(name=child_tag):
for contents in post.find_all(class_=['post-content', 'post-body']):
for container in contents.find_all(class_=['image', 'tumblr_video_container', 'photo-wrapper', 'html_photoset']):
try:
if 'photo-wrapper' in container['class'] or 'image' in container['class']:
self.process_photo_link(container)
pass
elif 'html_photoset' in container['class']:
self.process_photoset_link(container)
pass
else:
self.process_video_link(container)
pass
except Exception, e:
print e, container
def do_crawling(self):
page_link = '/page/1'
worker_list = []
while page_link:
if len(worker_list) < NUM_WORKER:
try:
soup = self._load_page(self.url + page_link)
except Exception, e:
print e, self.url + page_link
gevent.sleep(1)
continue
print "## Crawl...", self.url + page_link
w = gevent.spawn(self.crawler_page, soup)
worker_list.append(w)
next_page_link = soup.find(id='footer').find('a')
if next_page_link:
page_link = next_page_link.get('href')
else:
page_link = None
else:
worker_list = filter(lambda x: x.successful() and x.dead, worker_list)
if len(worker_list) >= NUM_WORKER:
gevent.sleep(1)
gevent.joinall(worker_list)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage : tumblr <url>"
exit()
else:
url = sys.argv[1].strip('/')
c = TumblrCrawler(url)
c.do_crawling()
|
UTF-8
|
Python
| false | false | 6,446 |
py
| 1 |
tumblr.py
| 1 | 0.470214 | 0.465405 | 0 | 186 | 33.655914 | 129 |
Aasthaengg/IBMdataset
| 5,050,881,573,105 |
6b696c640c7ca15076971adc04c1bab6553f78fa
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03418/s196221636.py
|
a2ac656cc8587d49ecbe4c8e4e52a7718f82ac88
|
[] |
no_license
|
https://github.com/Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n, k = map(int, input().split())
ans = 0
for i in range(k+1, n+1):
d, m = divmod(n, i)
ans += d*(i-k) + max(0, m-k+1) - (k==0)
print(ans)
|
UTF-8
|
Python
| false | false | 154 |
py
| 202,060 |
s196221636.py
| 202,055 | 0.454545 | 0.415584 | 0 | 6 | 24.666667 | 47 |
Ilhampratama43/Project-Wabot
| 16,088,947,517,269 |
c0a82dda1fa33a6b82f9c104e417ec2e377decde
|
4e77b9cfd26a77b8655cd0dcc13b24c2ea2d0f84
|
/modelWA.py
|
6a106b6d648c4fc9d545249bdb63df7573003ec7
|
[] |
no_license
|
https://github.com/Ilhampratama43/Project-Wabot
|
673f35e8bb1ccc1c8768f26507cc421a8f1fd238
|
a72db263b2cba364a34276b09e9b4b0b60c6ec57
|
refs/heads/main
| 2023-07-17T01:02:13.682954 | 2021-09-02T14:14:47 | 2021-09-02T14:14:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#automatic open chrome driver when whatsapp class created
#use specific profile chrome
# options = webdriver.ChromeOptions()
# options.add_argument("--user-data-dir=C:\\Users\\ASUS\\AppData\\Local\\Google\\Chrome\\User Data")
# options.add_argument('--profile-directory=Profile 3')
# driver = webdriver.Chrome(executable_path='C:\Program Files\chromedriver\chromedriver.exe', chrome_options=options)
#use default chromedriver
path = 'C:\Program Files\chromedriver\chromedriver.exe'
driver = webdriver.Chrome(path)
#get address wa web
driver.get("https://web.whatsapp.com/")
class whatsapp:
lastMsg = ""
def __init__(self, name):
self.name = name
def setLastMsg(self, newMsg):
self.lastMsg = newMsg
def clickMSg(self, notif):
try:
print("klik kontak pengirim")
notif.click()
except:
print("Gagal Klik")
def getMsg(self):
msg = ""
try:
for msgIn in driver.find_elements_by_class_name("message-in"):
msgIn = msgIn.find_elements_by_class_name("_1Gy50")
msgIn = msgIn[len(msgIn)-1]
msg = msgIn.text
except:
print("Can't found last message")
return msg
def isNewMsg(self, to):
print("Mengecek pesan baru . . .")
try:
element = WebDriverWait(driver, to).until(
EC.presence_of_element_located((By.CLASS_NAME, "_23LrM"))
)
except:
return False
return True
def isEmptyMsg(self, str):
if(len(str) == 0):
return True
return False
def isUpdateMsg(self):
updateMsg = self.getMsg()
if (self.lastMsg != updateMsg) and (not self.isEmptyMsg(updateMsg)):
self.lastMsg = updateMsg
return True
return False
def processMsg(self, msgIn):
msg = msgIn.lower()
responseMsg = "Maaf, bahasa anda tidak kami kenali :("
if msg.find("hello") != -1:
responseMsg = "Hai, "
if msg.find("hai") != -1:
responseMsg = "Hello, "
if msg.find("?") != -1:
responseMsg += "Mohon maaf. Layanan belum bisa memproses pertanyaan anda! "
return responseMsg
def postMsg(self, msg):
try:
textInput = driver.find_elements_by_class_name("_13NKt")
textInput[len(textInput)-1].send_keys(msg)
try:
send = driver.find_element_by_class_name("_4sWnG")
send.click()
except:
print("Gagal Klik")
except:
print("tidak bisa input")
print("Response me : " + msg)
#mengirimi jawaban ke kontak yang berbeda
def sendRDC(self):
contacts = driver.find_elements_by_class_name('_23LrM')
for notif in contacts:
self.clickMSg(notif)
self.lastMsg = self.getMsg()
print("Pesan terakhir adalah : " + self.lastMsg)
self.postMsg(self.processMsg(self.lastMsg))
print("=====================================")
#mengirimi jawaban ke kontak yang sama atau active
def sendRSC(self):
print("Pesan terakhir adalah : " + self.lastMsg)
self.postMsg(self.processMsg(self.lastMsg))
|
UTF-8
|
Python
| false | false | 3,540 |
py
| 2 |
modelWA.py
| 2 | 0.579661 | 0.574859 | 0 | 112 | 30.616071 | 117 |
timspencer/idaho_covid_dashboard_scraper
| 18,262,200,954,716 |
8b544b9f5b0d5da4e7679844fe536de19179ff42
|
ee370b3bc056bd8c3cab1fa1e05642267d03f8c7
|
/list_idaho_covid_pages.py
|
d0d8a0a9af416f69d9076aa0b4862c739b0a72a0
|
[] |
no_license
|
https://github.com/timspencer/idaho_covid_dashboard_scraper
|
713e5f62f071d21b282f8930b175291b1f23de92
|
0feed0e242293e955c72970b6ed550c183580d0f
|
refs/heads/main
| 2023-01-08T18:58:01.199287 | 2020-11-15T17:57:18 | 2020-11-15T17:57:18 | 302,999,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import json
import re
url = "https://public.tableau.com/views/DPHIdahoCOVID-19Dashboard/Home"
r = requests.get(
url,
params={
":showVizHome": "no",
}
)
soup = BeautifulSoup(r.text, "html.parser")
tableauData = json.loads(soup.find("textarea", {"id": "tsConfigContainer"}).text)
dataUrl = f'https://public.tableau.com{tableauData["vizql_root"]}/bootstrapSession/sessions/{tableauData["sessionid"]}'
r = requests.post(dataUrl, data={
"sheet_id": tableauData["sheetId"],
})
dataReg = re.search('\d+;({.*})\d+;({.*})', r.text, re.MULTILINE)
info = json.loads(dataReg.group(1))
sheetsinfo = info['worldUpdate']['applicationPresModel']['workbookPresModel']['sheetsInfo']
print("sheet,subsheet")
for sheet in sheetsinfo:
if not sheet['isPublished']:
continue
for subsheet in sheet['namesOfSubsheets']:
print("%s,%s" % (sheet['sheet'], subsheet))
|
UTF-8
|
Python
| false | false | 965 |
py
| 4 |
list_idaho_covid_pages.py
| 3 | 0.678756 | 0.673575 | 0 | 36 | 25.805556 | 119 |
anil-chhetri/OpenCV
| 7,808,250,563,539 |
21ecdbea135b8ff19e6d57dc715efe179df17a7c
|
f085a680e736226192b734b8327877e10875a35b
|
/checkerboard.py
|
e3f6b1fafed99a148142c7a9a30c7fe926c9f221
|
[] |
no_license
|
https://github.com/anil-chhetri/OpenCV
|
854cadd86fab1f4c5f6af7886606966850035a4e
|
abbe8b6659e87d11e534494376cadf599ec41c1f
|
refs/heads/main
| 2023-01-06T17:50:27.233754 | 2020-11-03T16:23:28 | 2020-11-03T16:23:28 | 308,003,911 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import cv2
canvas = np.zeros((300, 300, 3), dtype = "uint8")
# loop over the image in 10 pixel blocks
for (row, y) in enumerate(range(0, 300, 20)):
for (col, x) in enumerate(range(0, 300, 20)):
# initialize the color as red
color = (0, 0, 255)
# check to see if BOTH the row and column are
# even or odd, and if so, update the background
# color
if row % 2 == col % 2:
color = (0, 0, 0)
# draw a 10x10 rectangle
cv2.rectangle(canvas, (x, y), (x + 20, y + 20),
color, -1)
cv2.imshow('canvas', canvas)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
UTF-8
|
Python
| false | false | 593 |
py
| 37 |
checkerboard.py
| 37 | 0.625632 | 0.546374 | 0 | 25 | 22.72 | 49 |
prakhar21/Automatic-Ticket-Booking
| 14,834,817,080,110 |
66f373e75d8f6233af00816849d984904377dd45
|
217e5c1fb4b35c9a0d4ada7588573de3416e919e
|
/bookTicket.py
|
43a25de0c7f85782b00f4b42eef8dd7910384a9e
|
[] |
no_license
|
https://github.com/prakhar21/Automatic-Ticket-Booking
|
a932cdc069d730348759be1df67a559830123b7d
|
4f0bd0f1dbf0308e7edcdd526e18344f8ff280cd
|
refs/heads/master
| 2020-12-31T04:56:28.470641 | 2016-05-24T14:42:25 | 2016-05-24T14:42:25 | 59,579,920 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
@author: Prakhar Mishra
@date: 16/02/2016
'''
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import Select
import pyscreenshot as ImageGrab
import time
import pytesseract
import Image
import sys
import re
from pyvirtualdisplay import Display
import datetime
import imaplib
import email
from bs4 import BeautifulSoup
import urllib
import csv
##----------------------------------
##uncommentting it will hide the browser window
##-----------------------------------
#display = Display(visible=0, size=(1920,1280))
#display.start()
## For integrating it through Mobile Application
socialcops_website = 'https://collect.socialcops.org/#login'
email = 'Your registered id'
password_social = 'your password'
driver = webdriver.Firefox()
# # Open socialcops 'collect' website get the user credetials
driver.get(socialcops_website)
time.sleep(2)
email_socialcops = driver.find_element_by_id('username')
email_socialcops.send_keys(email)
password_socialcops = driver.find_element_by_id('password')
password_socialcops.send_keys(password_social)
submit_socialcops = driver.find_element_by_id('submit').click()
# Change for page till loaded
time.sleep(12)
driver.find_element_by_id('download').click()
time.sleep(2)
driver.find_element_by_id('send').click()
print 'Data sent !!'
time.sleep(1)
driver.quit()
time.sleep(20)
# Wait for email to come
#Open Gmail and download data
username_gmail = 'your gmail id'
password_gmail = 'your gmail password'
## GMAIL AUTHENTICATION
def gmail_authenticate(u,p):
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login(u, p)
return mail
def parse_email(r):
regex01 = '<a.+?(http:.+?.csv)'
inter_link = re.findall(regex01,str(r))
final_link = inter_link[0].replace('=\\r\\n','')
return final_link
## opens gmail and fetches the email with given subject to download the data
def get_mail_uid(u):
u.select("inbox")
result , data = u.uid('search', None, '(HEADER Subject "SocialCops Collect : Raw Data Report")')
latest_email_uid = data[0].split()[-1]
result, data = u.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = data[0][1]
return latest_email_uid, raw_email
## Downloads the data into local file system
def download_data(u):
urllib.urlretrieve(u, filename="/home/prakhar/Desktop/scraping/Reservation/data.csv")
## Authenticates gmail credentials for user
mail = gmail_authenticate(username_gmail,password_gmail)
print 'Gmail Authentication Successful !!\n'
raw_email = get_mail_uid(mail)
link_to_data = parse_email(raw_email)
print link_to_data
download_data(link_to_data)
## Updating Variables
data_dict = {}
data_list = []
outfile_all = open('/home/prakhar/Desktop/scraping/Reservation/data.csv','rb' )
reader = csv.reader(outfile_all)
for row in reader:
data_list.append(row)
for i in data_list:
data_dict['Name'] = i[8]
data_dict['Age'] = i[9]
data_dict['Mobile'] = i[10]
if i[12] == 'No':
data_dict['Gender'] = 'Female'
elif i[12] == 'Yes':
data_dict['Gender'] = 'Male'
data_dict['From'] = i[14]
data_dict['To'] = i[15]
data_dict['Date'] = i[16]
data_dict['Train'] = i[17]
data_dict['Class'] = i[18]
# ## Passenger Details
SOURCE = data_dict['From']
DESTINATION = data_dict['To']
DATE = '19-03-2016'#data_dict['Date']
TRAIN_NUMBER = data_dict['Train']
CLASS = data_dict['Class']
YOUR_NAME = data_dict['Name']
AGE = data_dict['Age']
GENDER = data_dict['Gender']
CHECK_AUTO_UPGRADE = False
MOBILE_NO = data_dict['Mobile']
# display = Display(visible=0, size=(1920,1280))
# display.start()
driver = webdriver.Firefox()
## Now Open irctc website
irctc_website = 'https://www.irctc.co.in/eticketing/loginHome.jsf'
user_name = 'IRCTC user name'
pwd = 'IRCTC password'
driver.get(irctc_website)
time.sleep(2)
## Login Details - Autofill
Id = driver.find_element_by_id('usernameId')
Id.send_keys(user_name)
time.sleep(1)
Password = driver.find_element_by_name('j_password')
Password.send_keys(pwd)
time.sleep(1)
## Crack Captcha - Autofill
Captcha = driver.find_element_by_name('j_captcha')
im=ImageGrab.grab(bbox=(768,385,890,420)) # X1,Y1,X2,Y2
im.save('picT1.png')
temp = pytesseract.image_to_string(Image.open('picT1.png'))
temp = temp.replace(' ','')
Captcha.send_keys(temp)
Submit = driver.find_element_by_id('loginbutton').click()
time.sleep(1)
## Page after login - FILL IN TRAVEL DETAILS
source = driver.find_element_by_id('jpform:fromStation').send_keys(SOURCE)
destination = driver.find_element_by_id('jpform:toStation').send_keys(DESTINATION)
dateOfTravel = driver.find_element_by_id('jpform:journeyDateInputDate').send_keys(DATE)
findTrainSubmit = driver.find_element_by_id('jpform:jpsubmit').click()
#Hardcoded for class
## Can be changed to auto class , just by string concatination
classSelect = driver.find_element_by_id('cllink-14660-2S-4').click()
time.sleep(3)
driver.find_element_by_link_text("Book Now").click()
time.sleep(5)
elem = driver.find_element_by_xpath("//*")
source_code = elem.get_attribute("outerHTML")
f = open('/home/prakhar/Desktop/html_source_code.html', 'w')
f.write(source_code.encode('utf-8'))
f.close()
## Fill travel details for passengers
regex = 'addPassengerForm:psdetail:0:(p[0-9]+)?\"'
data = ''
with open('/home/prakhar/Desktop/html_source_code.html','r') as myfile:
data=myfile.read().replace('\n', '')
data = re.findall(regex,data)[0]
print data
# re.findall(file_sourcecode, regex)
passengerName = driver.find_element_by_id('addPassengerForm:psdetail:0:'+str(data))
passengerName.send_keys(YOUR_NAME)
time.sleep(1)
passengerAge = driver.find_element_by_id('addPassengerForm:psdetail:0:psgnAge')
passengerAge.send_keys(AGE)
time.sleep(1)
passengerGender = Select(driver.find_element_by_id('addPassengerForm:psdetail:0:psgnGender'))
passengerGender.select_by_visible_text(GENDER)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
checkAutoUpgradation = driver.find_element_by_id('addPassengerForm:autoUpgrade').click()
mobileDetails = driver.find_element_by_id('addPassengerForm:mobileNo')
mobileDetails.clear()
mobileDetails.send_keys(MOBILE_NO)
e = driver.find_element_by_id('bkg_captcha')
location = e.location
size = e.size
time.sleep(2)
driver.save_screenshot('captchaPassengerDetails.png')
im = Image.open('captchaPassengerDetails.png')
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
im = im.crop((left, top, right, bottom))
im.save('captchaPassengerDetails.png')
temp1 = pytesseract.image_to_string(Image.open('captchaPassengerDetails.png'))
temp1 = temp1.replace(' ','')
Captcha = driver.find_element_by_name('j_captcha')
Captcha.send_keys(temp1)
time.sleep(2)
## Proceed to payment gateway
submitPassengerDetails = driver.find_element_by_id('validate').click()
time.sleep(1)
## For debit card
clickDebitCard = driver.find_element_by_id('DEBIT_CARD').click()
## for icici bank
icici_debit_radio = driver.find_element_by_xpath(".//*[@type='radio'][@value='41']")
icici_debit_radio.click()
finalSubmit = driver.find_element_by_id('validate').click()
## Checkout Details - card payment
TYPE_OF_CARD = 'Visa-Debit/Credit' # also Mastercard
cardType = Select(driver.find_element_by_name('CardTypeSelectBox'))
cardType.select_by_visible_text(TYPE_OF_CARD)
time.sleep(3)
CARD_NO1 = 'card number first four digits'
CARD_NO2 = 'card number next four digits'
CARD_NO3 = 'card number next four digits'
CARD_NO4 = 'card number last four digits'
cardNumber = driver.find_element_by_name('CardNum1').send_keys(CARD_NO1)
time.sleep(1)
cardNumber = driver.find_element_by_name('CardNum2').send_keys(CARD_NO2)
time.sleep(1)
cardNumber = driver.find_element_by_name('CardNum3').send_keys(CARD_NO3)
time.sleep(1)
cardNumber = driver.find_element_by_name('CardNum4').send_keys(CARD_NO4)
time.sleep(3)
EXP_MONTH = 'Enter Month (eg. 12 for December)'
EXP_YEAR = 'Enter Year (eg. 2014')
expiryDate = Select(driver.find_element_by_name('ExpDtMon'))
expiryDate.select_by_value(EXP_MONTH)
expiryDate = Select(driver.find_element_by_name('ExpDtYr'))
expiryDate.select_by_value(EXP_YEAR)
time.sleep(3)
CVV = 'Enter CVV here'
cvvDetails = driver.find_element_by_name('CVVNum')
cvvDetails.send_keys(CVV)
time.sleep(3)
## inserts the Name on card details
NAME_CARDHOLDER = 'Enter Name on card'
nameOfCardHolder = driver.find_element_by_name('NameOnCard')
nameOfCardHolder.send_keys(NAME_CARDHOLDER)
time.sleep(3)
## Inserts the ATM pin
ATM = 'Enter Atm Pin in case of ICICI transaction'
atmPinDetails = driver.find_element_by_name("ATMPIN")
atmPinDetails.send_keys(ATM)
time.sleep(3)
payMoneyButton = driver.find_element_by_name('btnPay')
payMoneyButton.click()
|
UTF-8
|
Python
| false | false | 9,047 |
py
| 2 |
bookTicket.py
| 1 | 0.721676 | 0.704875 | 0 | 335 | 26.00597 | 100 |
qbrc-cnap/cnap
| 16,939,351,029,246 |
7355527f9d2c54fe596b76c04279233e45000825
|
322ac7676c4af546f6cf2500a8daacfdbc04d1e5
|
/base/resource_urls.py
|
2d43d4a55a223a21651e68b2035f6709cd070329
|
[
"MIT"
] |
permissive
|
https://github.com/qbrc-cnap/cnap
|
3842eda379e5c2c0595cfc8f19b16a0ea667e2aa
|
624683e91a64c3b4934b578c59db850242d2f94c
|
refs/heads/master
| 2022-12-15T01:37:20.300250 | 2020-03-13T19:03:28 | 2020-03-13T19:03:28 | 184,335,005 | 1 | 0 |
MIT
| false | 2022-12-08T05:46:57 | 2019-04-30T21:37:38 | 2021-07-08T14:06:04 | 2022-12-08T05:46:57 | 1,909 | 1 | 0 | 24 |
Python
| false | false |
from django.urls import re_path
from rest_framework.urlpatterns import format_suffix_patterns
from base import views
'''
For all the endpoints given here, consult the specific view for
details about the actual methods they support, and what sorts of
info they provide back
'''
urlpatterns = [
# endpoints related to querying Resources:
re_path(r'^$', views.ResourceList.as_view(), name='resource-list'),
re_path(r'^(?P<pk>[0-9]+)/$', views.ResourceDetail.as_view(), name='resource-detail'),
re_path(r'^user/(?P<user_pk>[0-9]+)/$', views.UserResourceList.as_view(), name='user-resource-list'),
re_path(r'^tree/$', views.get_tree_ready_resources, name='resource-list-tree'),
re_path(r'^rename/(?P<pk>[0-9]+)/$', views.FileRenameView.as_view(), name='resource-rename'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
UTF-8
|
Python
| false | false | 846 |
py
| 169 |
resource_urls.py
| 87 | 0.705674 | 0.698582 | 0 | 19 | 43.526316 | 105 |
cskanani/codechef_prepare
| 3,770,981,330,492 |
34e655789c3f31293a8e9fed34d1400aea41819b
|
8bcf7c1ed213d5a296d592a420b3d678f01ff716
|
/greedy_algorithms/LEMUSIC.py
|
f5a8bee1bb19c3e5d198d1cc9423f04ab733786c
|
[] |
no_license
|
https://github.com/cskanani/codechef_prepare
|
237b65455c2294c4a96d72cfa4cdecb8734b48ee
|
36075328b0f52dc6237a96a4094358128327dc0b
|
refs/heads/master
| 2020-12-03T14:11:04.963970 | 2020-03-05T17:29:23 | 2020-03-05T17:29:23 | 231,349,403 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
for _ in range(int(input())):
num_songs = int(input())
songs_list = []
for i in range(num_songs):
songs_list.append(tuple(map(int, input().split())))
songs_list.sort(key = lambda x: x[1])
played_list = [False]*num_songs
previously_palyed = {}
sweetness = 0
for i in range(len(songs_list)):
song = songs_list[i]
if song[0] not in previously_palyed:
previously_palyed[song[0]] = True
sweetness += song[1] * len(previously_palyed)
played_list[i] = True
for i in range(len(songs_list)):
song = songs_list[i]
if not played_list[i]:
sweetness += song[1] * len(previously_palyed)
print(sweetness)
|
UTF-8
|
Python
| false | false | 718 |
py
| 68 |
LEMUSIC.py
| 67 | 0.571031 | 0.562674 | 0 | 20 | 34.9 | 59 |
Pedestrian-794567740/OJCrawler
| 5,360,119,203,016 |
68de321360a10e4d24c48d4d05ebbe073e94e958
|
2915b310a4ac6901c8ffabb90188fb57d3a8342b
|
/HDUProblems.py
|
a53ba0f666bbeebb25a37a6c48f8843de57678c9
|
[] |
no_license
|
https://github.com/Pedestrian-794567740/OJCrawler
|
29b7c7b0844be60b304f0e26542e3f65c2336ece
|
ebc22831a94628ced20a5b095e1444170f0fd369
|
refs/heads/master
| 2022-03-25T07:32:50.131299 | 2017-08-28T15:02:38 | 2017-08-28T15:02:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests,re,json,os
from bs4 import BeautifulSoup
HOST = 'http://acm.hdu.edu.cn/'
BASELOC = os.getcwd()
NAME = 'HDUProblems'
def getHTMLText(url):
try:
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
r = requests.get(url,timeout=30,headers = headers)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ''
def getSoupObj(url):
try:
html = getHTMLText(url)
soup = BeautifulSoup(html,'html.parser')
return soup
except:
print('\nError: failed to get the Soup object')
return None
def getProblemList(url):
soup = getSoupObj(url)
tables = soup('table',{'class':'table_text'})
list = []
if len(tables) != 0:
tmps = tables[0].find_all('script')
if len(tmps) != 0:
tmp = tmps[0].text
tmp = tmps[0].text.split(';')
for i in tmp:
data = i.split(',')
try:
list.append('showproblem.php?pid=' + data[1])
except:
pass
return list
def parsePromblem(url):
pid = url[url.find('=')+1:]
soup = getSoupObj(url)
data = soup('div',{'class':re.compile(r'panel')})
title = soup('h1')[0].text
tr = re.compile(r'[<>,/,\,|,:,"",*,?]')
title = pid +' ' + tr.sub('',title)
problemLoc = os.getcwd()+'\\'+ title + '.txt'
print('ๆญฃๅจๅญๅจ้ข็ฎ๏ผ'+ title + '\n')
with open(problemLoc, 'w',encoding='utf-8') as f:
f.write('Link: '+url+'\n\n')
for i in data:
f.write(i.text+'\n')
def getInfo(url):
global BASELOC,HOST,NAME
hduLoc = BASELOC + '\\' + NAME
if not os.path.exists(BASELOC + '\\' + NAME):
os.mkdir(NAME)
print('ๅผๅง็ฌๅ: ' + NAME)
os.chdir(hduLoc)
soup = getSoupObj(url)
links = soup('a',{'href':re.compile(r'^(listproblem.php)')})
linkDict = {}
pageList = []
for i in links:
pageList.append(i.attrs['href'])
pageList = sorted(set(pageList),key=pageList.index)
for i in pageList:
problemList = getProblemList(HOST + i)
for problemUrl in problemList:
parsePromblem(HOST + problemUrl)
if __name__=="__main__":
requestUrl = HOST + 'listproblem.php'
print('************HDU้ขๅบ้ข็ฎ็ฌ่ซ************')
input('ๆEnter้ฎๅผๅง็ฌๅ')
getInfo(requestUrl)
|
UTF-8
|
Python
| false | false | 2,517 |
py
| 7 |
HDUProblems.py
| 6 | 0.547552 | 0.531364 | 0 | 81 | 29.506173 | 148 |
prestonTao/python-example
| 644,245,121,612 |
fcdf6c6b756e1cc90a6598d9fd5e1b2ba281ab0e
|
fe54dbe7dbee690e93c3d61ffc67b627176c2dc2
|
/test/test.py
|
da9826dc39a8651cd1a9f8a9e12cd1c30636bacb
|
[] |
no_license
|
https://github.com/prestonTao/python-example
|
106d9d2de16bb1b008111eec56c967d2dee07295
|
ac7feed76db5e34973217858140ced511ea6cb49
|
refs/heads/master
| 2021-01-10T04:42:35.732990 | 2015-07-03T09:32:27 | 2015-07-03T09:32:27 | 36,269,947 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def p(p,day=1):
print(day)
params = {"day":21}
p(params)
u = '1'
print(cmp(u,'1'))
print(cmp(u,'2'))
print(cmp(u,'7'))
params = {'name':'tao','age':18}
print('name' in params)
print '1 ###############################################################'
class Base():
name = "tao"
def __init__(self,value=None):
self.age = 18
self.name = value
def foo(self):
print self.name,self.age
b = Base()
b.foo()
b.age = 19
b.name = "taopopoo"
b.foo()
class Base_b():
name = "tao"
def __init__(self, age):
self.age = age
def foo(self):
print self.name,self.age
bb = Base_b(age = 19)
bb.foo()
print '###############################################################'
def excption(method):
def wrapper(self):
try:
return method()
except Exception,e:
print e
return e
return wrapper
@excption
def exportE():
print 'tao'
# exportE()
print '###############################################################'
print 3**3*2
|
UTF-8
|
Python
| false | false | 1,063 |
py
| 37 |
test.py
| 36 | 0.428034 | 0.41016 | 0 | 71 | 13.873239 | 74 |
shoicchi/python-lesson2
| 1,649,267,486,023 |
d4af29fb312619e92c75fbc97a0ff9220fb11dec
|
46583b1bc3956da12ae06c4b432400188066eba4
|
/file_split_by_module.py
|
852f170b84b73d0c58ea8eee39c6aaa726ad3e1a
|
[] |
no_license
|
https://github.com/shoicchi/python-lesson2
|
9061370a64cf3f72e37350b255c6925a2b309e1d
|
54f413a6730f7ea761d83572a5110b24832c1f6b
|
refs/heads/master
| 2020-07-26T22:36:40.522620 | 2019-09-24T15:34:06 | 2019-09-24T15:34:06 | 208,785,825 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# class User:
# def __init__(self, name):
# self.name = name
# def say_hi(self):
# print("hi {0}".format(self.name))
# class AdminUser(User):
# def __init__(self, name, age):
# super().__init__(name)
# self.age = age
# def say_hello(self):
# print("hello {0} ({1})".format(self.name, self.age))
# def say_hi(self):
# print("[admin] hi {0}".format(self.name))
#ไธใmoduleใซ่จ่ผใใใฆใใ
# import user ๏ผuser.pyใ่ชญใฟ่พผใ
# from user import AdminUser # user.pyใใAdminUserใฏใฉในใฎใฟ่ชญใฟ่พผใ
from user import AdminUser, User #่คๆฐใฎใฏใฉในใๆๅฎใใฆ่ชญใฟ่พผใใซใฏใซใณใๅบๅใใงๆๅฎใใ
# bob = user.AdminUser("bob", 23)
bob = AdminUser("bob", 23)
tom = User("tom")
print(bob.name)
bob.say_hi()
bob.say_hello()
# .ipynbใ ใจmoduleใฎ่ชญใฟ่พผใฟใซไธๆ้ๅฟ
่ฆ
|
UTF-8
|
Python
| false | false | 897 |
py
| 19 |
file_split_by_module.py
| 2 | 0.604468 | 0.593955 | 0 | 29 | 25.241379 | 63 |
AndrewErmakov/CompanyRepairProject
| 2,439,541,452,186 |
969903d108873f05154c3e9d6d4772622d7ab06e
|
61da429c2db1183716ef6e4cb2627ea2e2907a58
|
/company_repair_project/requests_app/migrations/0002_auto_20210730_0450.py
|
385ed690d02e47d727d673c07f1dddb1cb8f4a30
|
[] |
no_license
|
https://github.com/AndrewErmakov/CompanyRepairProject
|
89dacc0c9486301283a417c822d5d64d08710e10
|
d5d1f5205dcf4a2ea589960e059fd0904e17ffa8
|
refs/heads/main
| 2023-06-21T06:55:35.514399 | 2021-07-30T06:20:53 | 2021-07-30T06:20:53 | 390,953,825 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.2.5 on 2021-07-30 04:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('requests_app', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='request',
old_name='date_creation',
new_name='creation_date',
),
migrations.AlterField(
model_name='request',
name='executor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='ะัะฟะพะปะฝะธัะตะปั'),
),
]
|
UTF-8
|
Python
| false | false | 794 |
py
| 14 |
0002_auto_20210730_0450.py
| 11 | 0.633461 | 0.609195 | 0 | 26 | 29.115385 | 164 |
Phoeux/ProgressTaskControl
| 19,335,942,783,640 |
59969631319bddc48acd18548edec5c149c0f600
|
ec75c67f82968e37dc267ba21edf4af87c0ad835
|
/api/management/commands/notifications.py
|
38b8fd7c331f32370800284e07ce61da232d0479
|
[] |
no_license
|
https://github.com/Phoeux/ProgressTaskControl
|
d45c4f386f7947f5dd3037c5ee07d6306f40689e
|
a49bb97572d5f8a4b10bdba7c66648b6ff6983b4
|
refs/heads/main
| 2023-07-08T08:52:20.766051 | 2021-08-11T11:27:22 | 2021-08-11T11:27:22 | 386,531,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.mail import send_mail
from django.core.management import BaseCommand
from django.utils.timezone import now
from api.models import Tasks
class Command(BaseCommand):
def handle(self, *args, **options):
task = Tasks.objects.filter(date_to_check=now())
for notif in task:
subject = f"ะะพะฒัะพัะตะฝะธะต ัะตะผั {notif.title} ั {notif.user}"
message = f"ะกะตะณะพะดะฝั {notif.date_to_check}, {notif.user} ะดะพะปะถะตะฝ ะฟะพะบะฐะทะฐัั ะทะฝะฐะฝะธั ัะตะผั {notif.title}"
from_email = 'gleb.lobinsky@mbicycle.com'
user_email = notif.user.email
manager_email = notif.manager.email
send_mail(subject, message, from_email, [user_email, manager_email], fail_silently=False)
|
UTF-8
|
Python
| false | false | 781 |
py
| 10 |
notifications.py
| 6 | 0.673469 | 0.673469 | 0 | 17 | 42.235294 | 110 |
wkoki/Workspace
| 5,970,004,555,381 |
3ce02036c6d6923c27eba30d7dd82a3a2896e67a
|
7dabcbde11781ff4bc709749bc5113bddfb3bdf2
|
/python/deep-learning/oreilly/ch04/prac.py
|
063b077f9fa4361e0516f3bdc9071349294c6f84
|
[
"MIT"
] |
permissive
|
https://github.com/wkoki/Workspace
|
64c28581ddad6b68b831c13af060b7f55dbfc69a
|
964bbf4db1360e8e6fbab80c8cf130824e89e272
|
refs/heads/master
| 2021-08-07T01:02:11.191923 | 2017-11-07T08:10:26 | 2017-11-07T08:10:26 | 107,400,107 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*- #
import numpy as np
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
train_loss_list = []
train_acc_list = []
test_acc_list = []
# 1ใจใใใฏใใใใฎ็นฐใ่ฟใๆฐ
iter_per_epoch = max(train_size/batch_size, 1)
# ใใคใใผใใฉใกใผใฟ
iters_num = 10000
batch_size = 100
learning_rate = 0.1
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
for i in range(iters_num):
# ใใใใใใฎๅๅพ
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# ๅพ้
ใฎ่จ็ฎ
grad = network.gradient(x_batch, t_batch)
# ใใฉใกใผใฟใฎๆดๆฐ
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= leaning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
# 1ใจใใใฏใใจใซ่ช่ญ็ฒพๅบฆใ่จ็ฎ
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
traing_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
|
UTF-8
|
Python
| false | false | 1,328 |
py
| 53 |
prac.py
| 45 | 0.640033 | 0.618616 | 0 | 44 | 26.545455 | 85 |
X00143365/3rdyearprojectX00143365
| 5,050,881,541,889 |
05e3c7c783c20ca51b5fe8320dc418c84e6479a5
|
79e846ceb4eaceedecd97327f4619ea444d379cf
|
/module_task/urls.py
|
2f280b1dbe47888be2fcd9e00c69791ec9d2e556
|
[] |
no_license
|
https://github.com/X00143365/3rdyearprojectX00143365
|
172ae9097af335c16d860739300152dab77a45ec
|
3252b4ada03de21f621ebdbfe065f2f470e25c00
|
refs/heads/master
| 2021-01-05T07:12:02.728238 | 2020-02-28T16:22:28 | 2020-02-28T16:22:28 | 240,926,140 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('',views.index, name='index'),
path('people',views.people, name='people'),
path('task',views.task, name='task'),
path('horses',views.horses, name='horses'),
path('staff',views.staff, name='staff'),
path('rota',views.rota, name='rota'),
path('rotaview',views.rotaview, name='rotaview'),
path('lessons',views.lessons, name='lessons'),
path('delete/<task_id>', views.delete, name ='delete'),
path('getselectdate', views.getselectdate, name ='getselectdate'),
path('delallcomplete', views.delallcomplete, name ='delallcomplete'),
path('delpastrota', views.delpastrota, name ='delpastrota'),
path('deletestaff/<staff_id>', views.deletestaff, name ='deletestaff'),
path('deleterota/<rota_id>', views.deleterota, name ='deleterota'),
path('markcomplete/<task_id>', views.markcomplete, name='markcomplete'),
path('markincomplete/<task_id>', views.markincomplete, name='markincomplete'),
path('edittask/<task_id>', views.edittask, name='edittask'),
path('editstaff/<staff_id>', views.editstaff, name='editstaff'),
path('editrota/<rota_id>', views.editrota, name='editrota'),
]
|
UTF-8
|
Python
| false | false | 1,234 |
py
| 28 |
urls.py
| 20 | 0.668558 | 0.668558 | 0 | 28 | 43.035714 | 82 |
Grigorii-24/Classification-Sympsons
| 19,370,302,521,206 |
2bd9b55650e134bd4de34637378756f1b6934445
|
3ca6a34ec4a2f5f2e7073e51c839ed3b6861efb8
|
/sympsons.py
|
a60a45365941a49361257a9d9b3df871d5014622
|
[] |
no_license
|
https://github.com/Grigorii-24/Classification-Sympsons
|
bf9ecf837687ae00815a28812237130248322622
|
e99fd007e554c7f7fa8c9f51c8e4d106e2bec533
|
refs/heads/main
| 2022-12-30T15:10:25.166749 | 2020-10-01T20:10:16 | 2020-10-01T20:10:16 | 300,400,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""Sympsons.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lBYNd3B0WH4xVyNbzSwQZ1jVMlhnu1tu
"""
!pip install -U torch torchvision
# ัััะฐะฝะพะฒะบะฐ ะฟะพะดั
ะพะดััะตะน ะฒะตััะธะธ torch
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision
import torch
# we will verify that GPU is enabled for this notebook
# following should print: CUDA is available! Training on GPU ...
#
# if it prints otherwise, then you need to enable GPU:
# from Menu > Runtime > Change Runtime Type > Hardware Accelerator > GPU
import torch
import numpy as np
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
!pip uninstall -y Pillow
!pip install Pillow==5.3.0
import PIL
print(PIL.PILLOW_VERSION)
from google.colab import drive
drive.mount('/content/gdrive/')
!unzip -q /content/gdrive/My\ Drive/journey-springfield.zip -d yyy
!unzip -q /content/gdrive/My\ Drive/simpsons_dataset.zip -d train
!unzip -q /content/gdrive/My\ Drive/testset.zip -d test
!ls train
!nvidia-smi
import torch
torch.cuda.is_available()
# Commented out IPython magic to ensure Python compatibility.
import pickle
import numpy as np
from skimage import io
from tqdm import tqdm, tqdm_notebook
from PIL import Image
from pathlib import Path
from torchvision import transforms
from multiprocessing.pool import ThreadPool
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from matplotlib import colors, pyplot as plt
# %matplotlib inline
import warnings
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
# different mode of dataset
DATA_MODES = ['train', 'val', 'test']
# all images will be scaled to 224x224 px
RESCALE_SIZE = 224
# use GPU
DEVICE = torch.device("cuda")
"""
we redefine the getitem method for convenient work with this data structure.
LabelEncoder is also used to convert string class labels to id and back.
The dataset description says that the images are of different sizes because they
were taken directly from the video, so we should bring them to one size (the _prepare_sample method does this).
"""
class SimpsonsDataset(Dataset):
def __init__(self, files, mode):
super().__init__()
# file list for download
self.files = sorted(files)
# mode
self.mode = mode
if self.mode not in DATA_MODES:
print(f"{self.mode} is not correct; correct modes: {DATA_MODES}")
raise NameError
self.len_ = len(self.files)
self.label_encoder = LabelEncoder()
if self.mode != 'test':
self.labels = [path.parent.name for path in self.files]
self.label_encoder.fit(self.labels)
with open('label_encoder.pkl', 'wb') as le_dump_file:
pickle.dump(self.label_encoder, le_dump_file)
def __len__(self):
return self.len_
def load_sample(self, file):
image = Image.open(file)
image.load()
return image
def __getitem__(self, index):
# to convert images into PyTorch tensors and normalize the input
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
x = self.load_sample(self.files[index])
x = self._prepare_sample(x)
x = np.array(x / 255, dtype='float32')
x = transform(x)
if self.mode == 'test':
return x
else:
label = self.labels[index]
label_id = self.label_encoder.transform([label])
y = label_id.item()
return x, y
def _prepare_sample(self, image):
image = image.resize((RESCALE_SIZE, RESCALE_SIZE))
return np.array(image)
def imshow(inp, title=None, plt_ax=plt, default=False):
"""Imshow for tensors"""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt_ax.imshow(inp)
if title is not None:
plt_ax.set_title(title)
plt_ax.grid(False)
TRAIN_DIR = Path('train')
#TEST_DIR = Path('test')
TEST_DIR = Path('test')
train_val_files = sorted(list(TRAIN_DIR.rglob('*.jpg')))
test_files = sorted(list(TEST_DIR.rglob('*.jpg')))
from google.colab import drive
drive.mount('/content/drive')
from sklearn.model_selection import train_test_split
train_val_labels = [path.parent.name for path in train_val_files]
train_files, val_files = train_test_split(train_val_files, test_size=0.25, \
stratify=train_val_labels)
val_dataset = SimpsonsDataset(val_files, mode='val')
# uncomment if you have problem with pillow
# def register_extension(id, extension): Image.EXTENSION[extension.lower()] = id.upper()
# Image.register_extension = register_extension
# def register_extensions(id, extensions):
# for extension in extensions: register_extension(id, extension)
# Image.register_extensions = register_extensions
# Show picture
fig, ax = plt.subplots(nrows=3, ncols=3,figsize=(8, 8), \
sharey=True, sharex=True)
for fig_x in ax.flatten():
random_characters = int(np.random.uniform(0,1000))
im_val, label = val_dataset[random_characters]
img_label = " ".join(map(lambda x: x.capitalize(),\
val_dataset.label_encoder.inverse_transform([label])[0].split('_')))
imshow(im_val.data.cpu(), \
title=img_label,plt_ax=fig_x)
class SimpleCnn(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3),
nn.BatchNorm2d(96),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
"""
self.conv6 = nn.Sequential(
nn.Conv2d(in_channels=96, out_channels=256, kernel_size=3),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
"""
self.out = nn.Linear(96 * 5 * 5, n_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
#x = self.conv6(x)
x = x.view(x.size(0), -1)
logits = self.out(x)
return logits
def fit_epoch(model, train_loader, criterion, optimizer):
running_loss = 0.0
running_corrects = 0
processed_data = 0
for inputs, labels in train_loader:
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
preds = torch.argmax(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
processed_data += inputs.size(0)
train_loss = running_loss / processed_data
train_acc = running_corrects.cpu().numpy() / processed_data
return train_loss, train_acc
def eval_epoch(model, val_loader, criterion):
model.eval()
running_loss = 0.0
running_corrects = 0
processed_size = 0
for inputs, labels in val_loader:
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.argmax(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
processed_size += inputs.size(0)
val_loss = running_loss / processed_size
val_acc = running_corrects.double() / processed_size
return val_loss, val_acc
def train(train_files, val_files, model, epochs, batch_size):
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
history = []
log_template = "\nEpoch {ep:03d} train_loss: {t_loss:0.4f} \
val_loss {v_loss:0.4f} train_acc {t_acc:0.4f} val_acc {v_acc:0.4f}"
with tqdm(desc="epoch", total=epochs) as pbar_outer:
opt = torch.optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss()
for epoch in range(epochs):
train_loss, train_acc = fit_epoch(model, train_loader, criterion, opt)
print("loss", train_loss)
val_loss, val_acc = eval_epoch(model, val_loader, criterion)
history.append((train_loss, train_acc, val_loss, val_acc))
pbar_outer.update(1)
tqdm.write(log_template.format(ep=epoch+1, t_loss=train_loss,\
v_loss=val_loss, t_acc=train_acc, v_acc=val_acc))
return history
def predict(model, test_loader):
with torch.no_grad():
logits = []
for inputs in test_loader:
inputs = inputs.to(DEVICE)
model.eval()
outputs = model(inputs).cpu()
logits.append(outputs)
probs = nn.functional.softmax(torch.cat(logits), dim=-1).numpy()
return probs
n_classes = len(np.unique(train_val_labels))
simple_cnn = SimpleCnn(n_classes).to(DEVICE)
print("we will classify :{}".format(n_classes))
print(simple_cnn)
if val_dataset is None:
val_dataset = SimpsonsDataset(val_files, mode='val')
train_dataset = SimpsonsDataset(train_files, mode='train')
# Training NN
history = train(train_dataset, val_dataset, model=simple_cnn, epochs=40, batch_size=64)
# Build learning curves
loss, acc, val_loss, val_acc = zip(*history)
plt.figure(figsize=(15, 9))
plt.plot(loss, label="train_loss")
plt.plot(val_loss, label="val_loss")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
def predict_one_sample(model, inputs, device=DEVICE):
"""Prediction, for one picture"""
with torch.no_grad():
inputs = inputs.to(device)
model.eval()
logit = model(inputs).cpu()
probs = torch.nn.functional.softmax(logit, dim=-1).numpy()
return probs
random_characters = int(np.random.uniform(0,1000))
ex_img, true_label = val_dataset[random_characters]
probs_im = predict_one_sample(simple_cnn, ex_img.unsqueeze(0))
idxs = list(map(int, np.random.uniform(0,1000, 20)))
imgs = [val_dataset[id][0].unsqueeze(0) for id in idxs]
probs_ims = predict(simple_cnn, imgs)
label_encoder = pickle.load(open("label_encoder.pkl", 'rb'))
y_pred = np.argmax(probs_ims,-1)
actual_labels = [val_dataset[id][1] for id in idxs]
preds_class = [label_encoder.classes_[i] for i in y_pred]
"""Let's calculate the target metric in the validation sample."""
from sklearn.metrics import f1_score
f1_score(actual_labels, preds_class)
"""Let's do a cool visualization to see how confident the network is in its responses. You can use this to debug the correct output."""
import matplotlib.patches as patches
from matplotlib.font_manager import FontProperties
fig, ax = plt.subplots(nrows=3, ncols=3,figsize=(12, 12), \
sharey=True, sharex=True)
for fig_x in ax.flatten():
random_characters = int(np.random.uniform(0,1000))
im_val, label = val_dataset[random_characters]
img_label = " ".join(map(lambda x: x.capitalize(),\
val_dataset.label_encoder.inverse_transform([label])[0].split('_')))
imshow(im_val.data.cpu(), \
title=img_label,plt_ax=fig_x)
actual_text = "Actual : {}".format(img_label)
fig_x.add_patch(patches.Rectangle((0, 53),86,35,color='white'))
font0 = FontProperties()
font = font0.copy()
font.set_family("fantasy")
prob_pred = predict_one_sample(simple_cnn, im_val.unsqueeze(0))
predicted_proba = np.max(prob_pred)*100
y_pred = np.argmax(prob_pred)
predicted_label = label_encoder.classes_[y_pred]
predicted_label = predicted_label[:len(predicted_label)//2] + '\n' + predicted_label[len(predicted_label)//2:]
predicted_text = "{} : {:.0f}%".format(predicted_label,predicted_proba)
fig_x.text(1, 59, predicted_text , horizontalalignment='left', fontproperties=font,
verticalalignment='top',fontsize=8, color='black',fontweight='bold')
|
UTF-8
|
Python
| false | false | 13,874 |
py
| 2 |
sympsons.py
| 1 | 0.629504 | 0.608997 | 0 | 423 | 31.742317 | 135 |
artekw/gpx2map
| 9,019,431,331,116 |
a9ddff9c7abb2a39cc60e23c93ce27550951e09d
|
df60348adf898e4d170804a2c8884871ce2c9bcd
|
/gpx2map.py
|
6922ce4437bc5ce96aeb5f95add1412ef5ce3a6e
|
[
"MIT"
] |
permissive
|
https://github.com/artekw/gpx2map
|
fbfcba719a8fd1f92bb6df8199d413fd0495523d
|
f33916d868647d3cac1600d0985e6928db0f88b5
|
refs/heads/master
| 2020-08-30T13:31:09.395509 | 2017-02-05T11:03:26 | 2017-02-05T11:03:26 | 67,627,588 | 1 | 0 | null | false | 2019-11-11T08:26:56 | 2016-09-07T17:09:27 | 2016-09-07T17:09:48 | 2019-11-11T08:26:55 | 18 | 0 | 0 | 1 |
CSS
| false | false |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""./ngrok http -region=eu 5000"""
import os
import gpxpy
import gpxpy.gpx
import folium
from flask import Flask, send_file, request, redirect, url_for, render_template, abort, flash
from werkzeug.utils import secure_filename
__version__ = 0.1
__author__ = "Artur Wronowski"
UPLOAD_FOLDER = 'uploads'
MAP_FOLDER = 'maps'
ALLOWED_EXTENSIONS = set(['gpx'])
app = Flask(__name__)
app.secret_key = 'some_secret'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def genmap(filename, _color='red', _weight='4'):
""" generuje mape"""
gpx_file = open(filename, 'r')
gpx = gpxpy.parse(gpx_file)
points = []
if gpx.tracks != 1:
flash('Brak tras')
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
points.append(tuple([point.latitude, point.longitude]))
ave_lat = sum(p[0] for p in points)/len(points)
ave_lon = sum(p[1] for p in points)/len(points)
map = folium.Map(location=[ave_lat, ave_lon], zoom_start=13)
folium.PolyLine(points, color=_color, opacity=0.7, weight=_weight).add_to(map)
map.save('maps/map.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
"""formatka upload pliku"""
# sprawdzanie czy katalogi istniejฤ
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
if not os.path.exists(MAP_FOLDER):
os.makedirs(MAP_FOLDER)
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# jeลผeli user nie wybraล pliku
if file.filename == '':
flash('Nie wybrano pliku')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
# save file in folder
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# generate map
genmap(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# remove file
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('show_map'))
return render_template('index.html')
@app.route('/maps/map.html')
def show_map():
"""pokaลผ mapฤ"""
return send_file('maps/map.html')
if __name__ == '__main__':
app.run(debug=True, host= '0.0.0.0')
|
UTF-8
|
Python
| false | false | 2,612 |
py
| 11 |
gpx2map.py
| 2 | 0.62524 | 0.617185 | 0 | 96 | 26.166667 | 93 |
RohiteshThakur/CloudAnimatics
| 2,508,260,903,762 |
93c91b1ba142171f4369737c0d7ffb34b31b6147
|
43070b2ac3c195936352e31e33375ff5f209483d
|
/src/blender_functions_obsolete.py
|
a4f1d020820457b4059df6dd896516d24322f693
|
[] |
no_license
|
https://github.com/RohiteshThakur/CloudAnimatics
|
b9d967c8782df5362cf93008568393ae2fede09f
|
c09153f070f5e49ed0be62130988d17c36f72160
|
refs/heads/master
| 2021-08-17T06:54:56.739523 | 2017-11-20T21:55:59 | 2017-11-20T21:55:59 | 107,906,324 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
r"'''"
# Change font size in blender's python console..
# bpy.context.space_data.font_size = 18
# After opening blender you want to save your file first, to access "bpy.data.filepath". This gives Blender a path to search.
# bpy.ops.wm.save_mainfile(filepath=("E:\\AWS\\My Project\\AWS_Blender_programs\\temp.blend")) # Make sure we save the temp.blend where we have othwe python source codes.
# bpy.ops.wm.save_mainfile(filepath=('E:\\AWS\\My Project\\AWS_Blender_programs\\temp.blend'))
# When first opening blender....
# import os, sys, bpy ; sys.path.append(os.path.dirname(bpy.data.filepath)); import ec2_dep_pattern [Press Ctrl+V to paste in python console]
# Make blender pick changes from "below" modules...
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.ec2_pattern((0,-4,0), 25)
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.elastic_beanstalk_pattern((0,4,0), 1)
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_public_sub((0,-4,0))
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_private_sub((0, 4,0))
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_public_networkacl((0,-4,0)) : over Private subnet.
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_private_networkacl((0,4,0)) : over Public subnet.
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_route((0,-4,0))
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_route((0,4,0))
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_internet_gateway((0, -4, 0))
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.create_route_connector((0, -4, 0), (0, 4, 0))
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.components_connector((0, -4, 3.5), (0, -4, 4.5))
# import importlib ; importlib.reload(ec2_dep_pattern) ; import ec2_dep_pattern ; ec2_dep_pattern.components_connector((0, 4, 2.5), (0, 4, 5.5))
# NOTES:
# Press [CTRL] + SPACE to autocomplete in Python console.
r"'''"
import os
import sys
import bpy
import math # for sqrt function.
from math import cos, sin, radians
def frange (start, stop, step):
'''
This function implements a floating point "for loop". Start, End, and Step can be passed as floating point value.
'''
i = start
while (i < stop):
yield (i)
i += step
def get_object_offset (number_of_objects, columns, spacing): # Remember: 1st two args are integer and spacing is a tuple.
'''
Get Object location (x,y,z coordinates)
number_of_objects = Number of Objects to draw. (integer)
columns = number of columns (integer)
spacing = Spacing between objects (a tuple of x,y,z coordinates)
Returns = A tuple of (x,y,z coordinates)
'''
x_offset = (number_of_objects % columns) * spacing[0]
y_offset = (number_of_objects // columns) * spacing[1]
z_offset = spacing[2]
return (x_offset, y_offset, z_offset)
def create_public_sub(coord):
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_Public_Subnet.blend"
section = "\\Object\\"
object = "AWS_Public_Subnet"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=coord, constraint_axis=(True, True, True))
def create_private_sub(coord):
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_Private_Subnet.blend"
section = "\\Object\\"
object = "AWS_Private_Subnet"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=coord, constraint_axis=(True, True, True))
def create_security_group(xco, yco, zco):
#SGRP = ["AWS_SecurityGroup.blend", "\\Object\\", "AWS_SecurityGroup"]
#FDR = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_SecurityGroup.blend"
section = "\\Object\\"
object = "AWS_SecurityGroup"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
# convert coord tuple into list so that we can create SG just above EC2.
#coord_list = list(coord)
zco += 1
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(xco, yco, zco), constraint_axis=(True, True, True))
def create_public_networkacl(coord):
#SGRP = ["AWS_SecurityGroup.blend", "\\Object\\", "AWS_SecurityGroup"]
#FDR = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_NetworkAcl_Public.blend"
section = "\\Object\\"
object = "AWS_NetworkAcl"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
x = coord[0]
y = coord[1]
z = coord[2] + 3
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
def create_private_networkacl(coord):
#SGRP = ["AWS_SecurityGroup.blend", "\\Object\\", "AWS_SecurityGroup"]
#FDR = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_NetworkAcl_Private.blend"
section = "\\Object\\"
object = "AWS_NetworkAcl"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
x = coord[0]
y = coord[1]
z = coord[2] + 3
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
def create_internet_gateway(coord):
# AIG = ["AWS_InternetGateway.blend", "\\Object\\", "AWS_InternetGateway"]
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_InternetGateway.blend"
section = "\\Object\\"
object = "AWS_InternetGateway"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
x = coord[0]
y = coord[1]
z = coord[2] + 6.5
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
def create_route_table(coord):
'''
Creates an route table and envelope around VPC route. This method is called by create_route.
'''
x = coord[0]
y = coord[1]
z = coord[2]
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_RouteTable_with_Envelope.blend"
section = "\\Object\\"
object1 = "AWS_RouteTable"
object2 = "AWS_Route_Envelope"
filepath1 = blendfile + section + object1
directory1 = blendfile + section
filename1 = object1
filepath2 = blendfile + section + object2
directory2 = blendfile + section
filename2 = object2
bpy.ops.wm.append(filepath=filepath2, filename=filename2, directory=directory2)
bpy.context.scene.objects.active = bpy.data.objects[object2]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
bpy.ops.wm.append(filepath=filepath1, filename=filename1, directory=directory1)
bpy.context.scene.objects.active = bpy.data.objects[object1]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
def create_route(coord):
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_VPC_Router.blend"
section = "\\Object\\"
object = "AWS_VPC_Router"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
x = coord[0]
y = coord[1]
z = coord[2] + 4.5
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
create_route_table((x,y,z))
def create_route_connector(public_route_coord, private_route_coord):
# The co-ordinates are passed a a tuple. In order to calculate distance we need co-ordinates.
public_xco = public_route_coord[0]
public_yco = public_route_coord[1]
public_zco = public_route_coord[2]
private_xco = private_route_coord[0]
private_yco = private_route_coord[1]
private_zco = private_route_coord[2]
#print (private_xco)
#print (private_yco)
scale_factor = (math.sqrt(math.pow((public_xco - private_xco),2) + math.pow((public_yco - private_yco),2) + math.pow((public_zco - private_zco), 2))) / 2
private_zco += 4.5
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_Network_Connector.blend"
section = "\\Object\\"
object = "AWS_Network_Connector"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
#bpy.ops.transform.translate(value=(0, 0, 4.5), constraint_axis=(True, True, True))
bpy.ops.transform.translate(value=(private_xco, private_yco, private_zco), constraint_axis=(False, False, True))
#bpy.ops.mesh.primitive_cylinder_add(radius=1, depth=2.11, view_align=False, enter_editmode=False, location=(0, 0, 0))
#bpy.ops.transform.rotate(value=1.5708, axis=(1, 0, 0), constraint_axis=(True, False, False), constraint_orientation='GLOBAL')
#bpy.ops.transform.translate(value=(0,0,0), constraint_orientation='GLOBAL')
#bpy.ops.transform.rotate(value=1.5708, axis=(1, 0, 0), constraint_axis=(True, False, False), constraint_orientation='GLOBAL')
bpy.context.object.scale[2] = scale_factor # (This 3 units both side, so the over all length becomes 6 units)
def components_connector(pointA_coord, pointB_coord):
'''
This connects various AWS components stacked vertically. Since this is a vertical connector, The function expects the source and
destination co-ordinates to have same x and y coordinates. Only z co-coordinate should have different values with magnitude of
pointA_coord less than pointB_coord's.
'''
pointA_xco = pointA_coord[0]
pointA_yco = pointA_coord[1]
pointA_zco = pointA_coord[2]
pointB_xco = pointB_coord[0]
pointB_yco = pointB_coord[1]
pointB_zco = pointB_coord[2]
if (pointB_zco <= pointA_zco):
print ("Can\'t join components from top to down. Destination\'s [z] co-cordinate must be larger than source\'s")
return (-1)
else:
connector_location = ((pointB_zco - pointA_zco) / 2)
scale_factor = (math.sqrt(math.pow((pointB_xco - pointA_xco),2) + math.pow((pointB_yco - pointA_yco),2) + math.pow((pointB_zco - pointA_zco), 2))) / 2
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_Components_Connector.blend"
section = "\\Object\\"
object = "AWS_Components_Connector"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
# Make the connector vertical...again ;)
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
#bpy.ops.transform.rotate(value=1.5708, axis=(1, 0, 0), constraint_axis=(True, False, False), constraint_orientation='GLOBAL')
bpy.ops.transform.translate(value=(pointA_xco, pointA_yco, (pointA_zco+connector_location)), constraint_axis=(True, True, True), constraint_orientation='GLOBAL')
bpy.context.object.scale[2] = scale_factor
def create_ec2(coord, num_ins, radius):
'''
This function is called by ec2_pattern.
'''
#FDR = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
section = "\\Object\\"
object = "AWS_EC2_Instance"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
theta = 0
xco = coord[0]
yco = coord[1]
zco = coord[2]
#radius = 1.0
if (num_ins == 1):
radius = 0
dist = 6.28 / num_ins # Divide the circle in equal spacing to accomodate EC2 instances.
while (theta < 6.28):
x = xco + radius * cos(theta) # (rad * cos(*))-> follow a circular path. with xco as center.
y = yco + radius * sin(theta)
z = zco # These three variables form co-ordinates for EC2 on the fly.
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
create_security_group(x, y, z) # create SG on top of EC2.
theta += dist
def ec2_pattern(coord, num_ins):
if (num_ins < 10):
create_ec2(coord, num_ins, 1)
if (num_ins >=10 and num_ins < 26):
instances_in_inner_circle = 10
create_ec2(coord, instances_in_inner_circle, 1)
instances_in_outer_circle = (num_ins - 10)
create_ec2(coord, instances_in_outer_circle, 1.75)
if (num_ins >=26 and num_ins < 50):
instances_in_inner_circle = 10
create_ec2(coord, instances_in_inner_circle, 1)
instances_in_second_circle = 16
create_ec2(coord, instances_in_second_circle, 1.75)
instances_in_outer_circle = (num_ins - (10+16))
create_ec2(coord, instances_in_outer_circle, 2.5)
'''
if (num_ins >=50 and num_ins < 80):
instances_in_inner_circle = 10
create_ec2(coord, instances_in_inner_circle, 1)
instances_in_second_circle = 16
create_ec2(coord, instances_in_second_circle, 1.75)
instances_in_third_circle = 50
create_ec2(coord, instances_in_third_circle, 2.5)
instances_in_outer_circle = (num_ins - (10+16+50))
create_ec2(coord, instances_in_outer_circle, 3.5)
'''
'''
radius = 1.0
if (num_ins == 1):
radius = 0
dist = 6.28 / num_ins
while (theta < 6.28):
x = xco + radius * cos(theta) # (rad * cos(*))-> follow a circular path. with xco as center.
y = yco + radius * sin(theta)
z = zco # These three variables form co-ordinates for EC2 on the fly.
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
create_security_group(x, y, z)
# create EC2 on top of SG.
theta += dist
if (num_ins >=10 and num_ins <27):
rings = 2
if (num_ins >=27 and num_ins <40):
rings = 3
for radius in frange(1, rings, 0.75):
#print (radius)
theta = 0
#if (num_ins % 2 == 0): # Works only for EVEN number of instances.
#if (radius == 1):
dist = (6.28 / 9)
while (theta < 6.28):
x = xco + radius * cos(theta) # (rad * cos(*))-> follow a circular path. with xco as center.
y = yco + radius * sin(theta)
z = zco
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
create_security_group(x, y, z)
theta += dist
'''
def elastic_beanstalk_pattern(coord, num_ins, beanstalk_app = 'yes', sec_grp = 'yes'):
#FDR = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_ElasticBeanstalk.blend"
section = "\\Object\\"
object = "AWS_ElasticBeanstalk"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
xco = coord[0]
yco = coord[1]
zco = coord[2]
theta = 0
if (num_ins <= 10):
radius = 0.5
if (num_ins == 1):
radius = 0
dist = 6.28 / num_ins
while (theta < 6.28):
x = xco + radius * cos(theta) # (rad * cos(*))-> follow a circular path. with xco as center.
y = yco + radius * sin(theta)
z = zco
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
# Time to create a Application entitiy on top of Beanstalk (if exists)
z += 1.2 # Separation between bleanstalk and application.
create_beanstalk_app((x,y,z))
z += 1 # Separation between beanstalk app and security group.
create_beanstalk_security_group((x, y, z))
theta += dist
if (num_ins >=11 and num_ins <=20):
rings = 1.5
for radius in frange(0.5, rings, 0.5):
#print (radius)
theta = 0
if (num_ins % 2 == 0):
dist = (6.28 / (num_ins/2))
while (theta < 6.28):
x = xco + radius * cos(theta) # (rad * cos(*))-> follow a circular path. with xco as center.
y = yco + radius * sin(theta)
z = zco
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
# As of now, No provision of creating app deployment in case of multiple beanstalk deployments.
theta += dist
def create_beanstalk_app(coord):
#FDR = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_ElasticBeanstalk_Deployment.blend"
section = "\\Object\\"
object = "AWS_ElasticBeanstalk_Deployment"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
x = coord[0]
y = coord[1]
z = coord[2]
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
def create_beanstalk_security_group(coord):
#FDR = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_EC2_Instance.blend"
blendfile = "E:\\AWS\\My Project\\AWS_Blender_programs\\blend_files\\AWS_SecurityGroup.blend"
section = "\\Object\\"
object = "AWS_SecurityGroup"
filepath = blendfile + section + object
# print(filepath)
directory = blendfile + section
# print(directory)
filename = object
# print(filename)
x = coord[0]
y = coord[1]
z = coord[2]
bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)
bpy.context.scene.objects.active = bpy.data.objects[object]
bpy.ops.transform.translate(value=(x, y, z), constraint_axis=(True, True, True))
|
UTF-8
|
Python
| false | false | 21,605 |
py
| 40 |
blender_functions_obsolete.py
| 9 | 0.644666 | 0.626938 | 0 | 519 | 40.628131 | 173 |
andporfirio/study_python
| 14,345,190,807,378 |
e3b0994a45b3ca1cf2ef5352cd4c7fad247d2c87
|
3515fa9877df641d89cf99142840a0b68cbd0d8b
|
/python_zumbis/02.repeticao04.py
|
bbe513fa069a4177ffc710f6c3c7e2bc2d61b65b
|
[] |
no_license
|
https://github.com/andporfirio/study_python
|
a8d9263b6914a1de1768459c4b0ce10f3d52d510
|
4ab40b50c4b0359326ccb2e44776c1ac6e1fe5c6
|
refs/heads/master
| 2016-09-23T07:12:58.830190 | 2016-06-28T17:59:39 | 2016-06-28T17:59:39 | 61,669,998 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/local/bin/python3
# Repeticao02
# Uso do while para repeticao
#digito = int(input('Digite um numero: '))
x = 1
while x <= 10:
print(x * 3)
x = x + 1
|
UTF-8
|
Python
| false | false | 164 |
py
| 38 |
02.repeticao04.py
| 33 | 0.609756 | 0.560976 | 0 | 9 | 17.333333 | 42 |
sungbin2/pyserver
| 18,442,589,575,835 |
e46831cd06608e45eed48ebc93bbe1d4e2264076
|
d80f0664e2697d1dbb17a6893e2f84fda77c71dd
|
/server/routes/info/store.py
|
99fea7fe1aedd317752e7a4b40e5d774da39e6bd
|
[] |
no_license
|
https://github.com/sungbin2/pyserver
|
4e44bd67f0e869d4aebb50352ce5af4a3c653f23
|
5bd646e7c1213c8dd0a62a5f72a0e0729acb3293
|
refs/heads/master
| 2020-03-27T00:48:10.331199 | 2018-08-22T05:00:31 | 2018-08-22T05:00:31 | 145,658,239 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from server.main_ import app, orm, c
from dateutil import parser
from datetime import datetime, date
def _get_store(store_id):
with orm.session_scope() as ss: # type:c.typeof_Session
only = ss.query(orm.์ ๋ณด_๊ฐ๊ฒ) \
.filter_by(s=store_id) \
.one()
return c.OBJ_cp(only)
@app.route('/info/store')
def _info_store():
return c.display()
@app.route('/info/store1')
def _info_store1():
return c.display()
@app.route('/info/store2')
def _info_store2():
return c.display()
@app.route('/info/store3')
def _info_store3():
return c.display()
@app.route('/info/store4')
def _info_store4():
return c.display()
@app.route('/info/storelogin')
def _info_storelogin():
return c.display()
@app.route('/info/store/<int:store_id>', methods=['GET', 'POST', 'PUT'])
def _info_store_(store_id):
if c.is_GET():
if c.is_json():
return c.jsonify(_get_store(store_id).for_json())
else:
return c.display()
# elif c.is_POST() or c.is_PUT():
# with orm.session_scope() as ss: # type:c.typeof_Session
# only = ss.query(orm.์ ๋ณด_๊ฐ๊ฒ) \
# .filter_by(s=store_id) \
# .one()
# for k, v in c.data_POST().items():
# if getattr(only, k) != v:
# print(k, 'is changed')
# if k in ['๊ฐ์ ์ผ', 'ํ์ ์ผ']:
# try:
# setattr(only, k, parser.parse(v))
# except:
# setattr(only, k, None)
# else:
# setattr(only, k, v)
# only.issync = None
# return c.display()
elif c.is_POST():
with orm.session_scope() as ss: # type:c.typeof_Session
print(c.data_POST())
if c.data_POST()['i'] == '์ ๊ท':
only = c.newitem_web3(orm.์ ๋ณด_๊ฐ๊ฒ)
for k, v in c.data_POST().items():
if hasattr(only, k) and k != 'i':
if getattr(only, k) != v:
print(k, 'is changed')
if k in ['๊ฐ์ ์ผ', 'ํ์ ์ผ']:
try:
setattr(only, k, parser.parse(v))
except:
setattr(only, k, None)
else:
setattr(only, k, v)
only.issync = None
ss.add(only)
q1 = ss.query(orm.์ ๋ณด_๊ฐ๊ฒ) \
.order_by(ss.desc(orm.์ ๋ณด_๊ฐ๊ฒ.i)) \
.first()
q1.s = q1.i
only = c.newitem_web2(orm.account , q1.i)
only.์์ด๋ = q1.i
only.ํจ์ค์๋ = '15a66be023f335531096a3bb13e2e9a6372656c2caa85b309b4aa8413dbc7'
ss.add(only)
only = c.newitem_web2(orm.setting, q1.i)
q2 = ss.query(orm.setting) \
.filter_by(i=0) \
.one()
only.j = q2.j
ss.add(only)
only = c.newitem_web2(orm.์ ๋ณด_์ง์, q1.i)
only.์ง์๋ฒํธ = 1
only.์ง์์ํธ = 1
only.์ง์๋ช
= "๊ด๋ฆฌ์"
only.์ง๋ฌด = "๋ํ"
only.์ฌ์ง์ํ = "์ฌ์ง"
ss.add(only)
only = c.newitem_web2(orm.setting_๊ธฐ๋ฅ์ค์ , q1.i)
q3 = ss.query(orm.setting_๊ธฐ๋ฅ์ค์ ) \
.filter_by(i=0) \
.one()
only.j = q3.j
ss.add(only)
only = c.newitem_web2(orm.setting_์์์ฆ์์, q1.i)
q4 = ss.query(orm.setting_์์์ฆ์์) \
.filter_by(i=0) \
.one()
only.j = q4.j
ss.add(only)
return 'modified'
elif c.data_POST()['i'] != '์ ๊ท':
only = c.simple_query(ss, orm.์ ๋ณด_๊ฐ๊ฒ, s=store_id)
for x in only:
if int(x.i) == int(c.data_POST()['i']):
for k, v in c.data_POST().items():
if hasattr(x, k) and k != 'i':
if getattr(x, k) != v:
print(k, 'is changed')
if k in ['๊ฐ์ ์ผ', 'ํ์ ์ผ']:
try:
setattr(x, k, parser.parse(v))
except:
setattr(x, k, None)
else:
setattr(x, k, v)
x.issync = None
return 'modified'
return 'modified'
@app.route('/info/store/all', methods=['GET', 'POST', 'PUT'])
def _info_store_all():
if c.is_GET():
if True: # c.is_json()
l = []
with orm.session_scope() as ss: # type:c.typeof_Session
q1 = ss.query(orm.์ ๋ณด_๊ฐ๊ฒ) \
.all()
for x in q1:
dummy = x.__dict__.copy()
del dummy['_sa_instance_state']
for k, v in dummy.items():
if v is None:
dummy[k] = ''
elif isinstance(v, date):
dummy[k] = v.isoformat()
elif isinstance(v, datetime):
dummy[k] = v.isoformat(' ')
l.append(dummy)
return c.jsonify(l)
|
UTF-8
|
Python
| false | false | 5,876 |
py
| 17 |
store.py
| 2 | 0.388536 | 0.375132 | 0 | 166 | 33.162651 | 91 |
Aunsiels/pyformlang
| 5,257,039,993,888 |
1911a3d334f0508844e94624b2c7ffb34e888802
|
adc39afdb117e343a4ff3a1309a10ed04c78a51c
|
/pyformlang/finite_automaton/tests/test_symbol.py
|
63a893dad9a85b0c0073e84d0802bf5c26ea6f17
|
[
"MIT"
] |
permissive
|
https://github.com/Aunsiels/pyformlang
|
29485caaad2f3f60b93ac9782f193bbf512edc0f
|
49abbaeef84dcc8f6e5122f0939c950f812f8b16
|
refs/heads/master
| 2023-07-31T09:18:54.528110 | 2023-04-18T19:11:56 | 2023-04-18T19:11:56 | 164,653,723 | 28 | 9 |
MIT
| false | 2022-06-02T19:42:56 | 2019-01-08T13:22:11 | 2022-05-28T08:40:06 | 2022-06-02T19:42:56 | 386 | 13 | 5 | 1 |
Python
| false | false |
"""
Tests for the symbols
"""
import unittest
from pyformlang.finite_automaton import Symbol
class TestSymbol(unittest.TestCase):
""" Tests for the symbols
"""
def test_can_create(self):
""" Tests the creation of symbols
"""
self.assertIsNotNone(Symbol(""))
self.assertIsNotNone(Symbol(1))
def test_repr(self):
""" Tests the representation of symbols
"""
symbol1 = Symbol("ABC")
self.assertEqual(str(symbol1), "ABC")
symbol2 = Symbol(1)
self.assertEqual(str(symbol2), "1")
def test_eq(self):
""" Tests equality of symbols
"""
symbol1 = Symbol("ABC")
symbol2 = Symbol(1)
symbol3 = Symbol("ABC")
self.assertEqual(symbol1, symbol3)
self.assertEqual(symbol2, 1)
self.assertNotEqual(symbol2, symbol3)
self.assertNotEqual(symbol1, symbol2)
def test_hash(self):
""" Tests the hashing of symbols
"""
symbol1 = hash(Symbol("ABC"))
symbol2 = hash(Symbol(1))
symbol3 = hash(Symbol("ABC"))
self.assertIsInstance(symbol1, int)
self.assertEqual(symbol1, symbol3)
self.assertNotEqual(symbol2, symbol3)
self.assertNotEqual(symbol1, symbol2)
|
UTF-8
|
Python
| false | false | 1,282 |
py
| 104 |
test_symbol.py
| 86 | 0.596724 | 0.573323 | 0 | 49 | 25.163265 | 47 |
haolinnie/piSurveillance
| 14,791,867,416,247 |
f2f9aff74d271d9b9015bf928c3b450fe5da195e
|
9f13db220e07d23eadb4922880d0a1d79e36004d
|
/main.py
|
b6897aea4043c7025f93d016cd8843181376d6fc
|
[] |
no_license
|
https://github.com/haolinnie/piSurveillance
|
1e916c1f03d3f6a94c861ac470c67c9fd3f0875a
|
6b1af9f9be628c87db29149aa23db90f71d12731
|
refs/heads/master
| 2020-08-06T23:22:12.021634 | 2019-11-14T03:40:07 | 2019-11-14T03:40:07 | 213,196,216 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import time
import datetime
import threading
import cv2
import imutils
from motiondetector import MotionDetector
from imutils.video import VideoStream
from flask import Response, Flask, render_template
# Initialise output frames and a lock to ensure thread-safe exchange
# of the output frames
outputFrame = None
lock = threading.Lock()
# Initialise flask
app = Flask(__name__)
# Initialise the video stream object
if sys.platform == "darwin":
vs = VideoStream(src=0).start()
elif sys.platform == "linux":
# Assume Linux is raspberry pi
vs = VideoStream(usePiCamera=1).start()
time.sleep(2.0)
def detect_motion(frameCount):
# Get global references ot the video stream, frame and lock
global vs, outputFrame, lock
# Initialise motion detector and frame out
md = MotionDetector(accumWeight=0.01)
total = 0
while True:
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# if current frame count reached frameCount (sufficient to construct background model, proceed to process
if total > frameCount:
# detect motion
motion = md.detect(gray, tVal=10)
text = "Unocccupied"
# Check if motion is found
if motion is not None:
text = "Occupied"
(thresh, bbs) = motion
for bb in bbs:
(minX, minY, maxX, maxY) = bb
cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255), 2)
cv2.putText(frame, "Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# update the background model and increment the total number of frames read so far
md.update(gray)
total += 1
# acquire the lock, set the output frame and release the lock
with lock:
outputFrame = frame.copy()
def generate():
global outputFrame, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip the iteration of the loop
if outputFrame is None:
continue
# encode the frame in JPEG
(flag, encodedImage) = cv2.imencode('.jpg', outputFrame)
if not flag:
continue
# yield the output frame in the byte format
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
@app.route("/")
def index():
return render_template("index.html")
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media type (mime type)
return Response(generate(), mimetype="multipart/x-mixed-replace; boundary=frame")
if __name__ == '__main__':
### Camera stream and detection might not execute
### if running server from a CGI script
# Start a thread that will perform motion detection
t = threading.Thread(target=detect_motion, args=[32])
t.daemon = True
t.start()
# start the flask app
app.run(host='0.0.0.0', port=6666, debug=True, threaded=True, use_reloader=False)
vs.stop()
|
UTF-8
|
Python
| false | false | 3,543 |
py
| 4 |
main.py
| 1 | 0.623483 | 0.604008 | 0 | 113 | 30.345133 | 144 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.