repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
โ | gha_stargazers_count
int32 0
178k
โ | gha_forks_count
int32 0
88.9k
โ | gha_open_issues_count
int32 0
2.72k
โ | gha_language
stringlengths 1
16
โ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
NickSKim/csci321 | 8,572,754,757,915 | d1d48deab6a8a6b783edd7ac396c0ba12a6ecb0c | 5486779c8ff01bfdbd3a6b3264ff896e0594eb87 | /PygameDemos/0800gatherers/goodies.py | 458153bcc6a8684c2f74f0c4869f7ff4fd4e3ad4 | []
| no_license | https://github.com/NickSKim/csci321 | b10c1df202628945110c20a9160b246e41f39041 | 0dcffcb921ff50ddae02d5acfff387d4fbfaa1c3 | refs/heads/master | 2021-08-23T18:17:59.732956 | 2017-12-06T00:16:25 | 2017-12-06T00:16:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pygame,random,sys
from pygame.locals import *
from GM import utilities
from GM.statemachine import StateMachine, State
from GM.vector import *
import GM.makeimage
from functions import closest
from constants import *
class Goody(pygame.sprite.Sprite):
def __init__(self, pos, world, *groups):
pygame.sprite.Sprite.__init__(self, *groups)
self.image = GM.makeimage.star(GOODY_SIZE, GOODY_COLOR)
self.rect = self.image.get_rect()
self.pos = vector(pos)
self.rect.center = self.pos
self.world = world
def update(self):
self.rect.center = self.pos
| UTF-8 | Python | false | false | 645 | py | 255 | goodies.py | 127 | 0.654264 | 0.654264 | 0 | 21 | 28.428571 | 63 |
sczhan/wode | 16,569,983,869,388 | 34529f2828e69106d09d00d3cfdf97bb1e36417b | 5916383e8d3df886edd20ac00ce9706a78078f56 | /xitike(ไน ้ข่ฏพ)/xitike(็ฌฌ6็ซ ็ฌ่ซไธๅฎ่ทต)/linux/02_agent.py | efa6fb0cfd11931888836eb0f1cd55bce99d8662 | []
| no_license | https://github.com/sczhan/wode | 556154e8ccaa9192ea257bc88df3c5e4b268f88e | af4c721d0cedfdd2fe01dd681539724d1d64c378 | refs/heads/master | 2021-07-06T22:26:34.465708 | 2020-09-04T18:56:38 | 2020-09-04T18:56:38 | 181,295,279 | 1 | 0 | null | false | 2019-09-09T16:30:00 | 2019-04-14T10:53:57 | 2019-09-06T13:03:28 | 2019-09-09T16:29:59 | 198 | 0 | 0 | 0 | Python | false | false | """
http://www.langlang2017.com/index.html
http://www.langlang2017.com/route.html
http://www.langlang2017.com/FAQ.html
"""
import random
from urllib import request
def spider(url: str):
user_headers = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6823.400 QQBrowser/10.3.3117.400"
" Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
]
# ้ๆบuser_headers
user_agent = random.choice(user_headers)
# ๆๅปบheadersๅคด้จไฟกๆฏ
headers = {
"User_Agent": user_agent
}
# ๆๅปบRequestๅฏน่ฑก
req = request.Request(url, headers=headers)
response = request.urlopen(req)
html = response.read().decode()
name = url.split("/")
filename = name[-1]
with open(filename, "a", encoding="UTF-8")as f:
f.writelines(html)
if __name__ == '__main__':
url_list = ["http://www.langlang2017.com/index.html", "http://www.langlang2017.com/route.html", "http://www.langlang2017.com/FAQ.html"]
for urls in url_list:
spider(urls) | UTF-8 | Python | false | false | 1,155 | py | 258 | 02_agent.py | 194 | 0.648099 | 0.559682 | 0 | 38 | 28.789474 | 162 |
bulainarciss/Nyan-Cat-Russian-Version | 4,904,852,685,450 | c1736d7e0d75eac5b81f42d51978562df714e3be | 58806bd2a43da54a101e2b1339f348deb4c7fec1 | /nyan_cat.py | f6ba8ed548f6440dd406b4ce72541802af17e6bb | []
| no_license | https://github.com/bulainarciss/Nyan-Cat-Russian-Version | 0b5f98247521b5bfed2561026cb57c61d2813b58 | 8cdaaafd17b29f4bec0d9fb91829520d4180d264 | refs/heads/master | 2023-01-29T09:22:30.531440 | 2020-12-08T14:36:07 | 2020-12-08T14:36:07 | 319,663,101 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
class Cat():
def __init__(self, nc_game):
self.screen = nc_game.screen
self.settings = nc_game.settings
self.max_roation =self.settings.cat_roation
self.gravity = 9.8
self.tilt = 0
self.tick_count = 0
self.vel = 0
self.rot_vel = self.settings.cat_rot_vel
self.screen_rect = nc_game.screen.get_rect()
self.image = self.settings.cat_image
self.rect = self.image.get_rect()
self.rect.midleft = self.screen_rect.midleft
self.y = float(self.rect.y)
def blitme(self):
self.screen.blit(self.image, self.rect)
def jump(self):
self.vel = -10
self.tick_count = 0
def move(self):
self.tick_count += 1
displacement = self.vel * (self.tick_count) + 1.5*(self.tick_count)**2
if displacement >= 16:
displacement = 16
if displacement < 0:
displacement -= 2
if self.rect.y + displacement >= -20:
self.rect.y += displacement
if self.rect.y >= 600:
self.rect.y = 600
def center_cat(self):
self.rect.y = self.screen_rect.height / 2
| UTF-8 | Python | false | false | 1,187 | py | 6 | nyan_cat.py | 6 | 0.559393 | 0.536647 | 0 | 41 | 27.902439 | 78 |
ki93/TIL | 12,850,542,178,178 | b4af0ece8c2fd6abd84b3918bb7633bf25ceebdb | e74bf697595c2d3bf9a9753613dd82490ab5dc4c | /django/source/Day02/quiz.py | d016a4c830d44914e23cc0be7469b5a8db09fce9 | []
| no_license | https://github.com/ki93/TIL | 25f674465e03d16d8819af05d987099c1cfb7e23 | d027642f3c2b2718ec4b9c062556fc7efa8bd3f7 | refs/heads/master | 2020-05-26T19:42:08.834996 | 2019-12-10T08:49:56 | 2019-12-10T08:49:56 | 188,350,275 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import requests
import time
#Quiz
# 1. ํ๊ท ์ ๊ตฌํ์ธ์
scores ={
"์ํ" : 90,
"์์ด" : 87,
"ํ๊ตญ์ง๋ฆฌ " : 92
}
sum=0
for key, value in scores.items():
sum=sum+value
avg1=sum/len(scores.keys())
print(avg1)
# 2. ๊ฐ ํ์์ ํ๊ท ์ ์์ ๋ฐ ํ๊ท ์ ๊ตฌํ์ธ์
scores = {
"aํ์" : {
"์ํ" : 80,
"๊ตญ์ด" : 90,
"์์
" : 100
},
"bํ์" : {
"์ํ" : 100,
"๊ตญ์ด" : 100,
"์์
" : 100
}
}
# Key : {aํ์ , bํ์}
#์,๊ตญ,์ 80 90 100
for value in scores.values():
for val in value.values():
sum = sum + val
avg = sum/len(value.values())
sum = 0
print(avg)
url ="http://webtoon.daum.net/data/pc/webtoon/list_serialized/thu?timeStamp=1573024597086"
# response = requests.get(url)
# ์๋ต์ผ๋ก ์จ ๋ด์ฉ์ ๋ฐ๋กjson์ผ๋ก ๋ฐ๊ฟ์ค
# data = response.json()
# print(type(data))
# 3. ๋ค์ ์นํฐ์ ๊ธ์์ผ ์นํฐ ์ ์ฒด์ ๋ฆฌ์คํธ ์ค์์ ๊ฐ ์นํฐ์ ์ ๋ชฉ, ์ค๋ช
, ์๊ฐ์ด๋ฆ, ์ฅ๋ฅด, ์ธ๋ค์ผ ์ด๋ฏธ์ง(์ฃผ์)๋ง ๊ณจ๋ผ์๋ก์ด dictionary๋ฅผ ๋ง๋ค๊ณ ์ด dictionary๋ฅผ ๋ด๊ณ ์๋ list๋ฅผ ๋ง๋์์ค
# for d in data.keys():
# print(type(data["data"]))
# webtoon_data = data["data"]
# toons = []
# for toon in webtoon_data:
# #์ ๋ชฉ์ key ๋ title
# title = toon["title"]
# #์ค๋ช
์ key๋ introduction
# desc = toon["introduction"]
# #์ฅ๋ฅด์ ์์น๋ 'cartoon'์์ 'genre'๋ผ๋ ๋ฆฌ์คํธ ์์ 'name'์ด๋ผ๋ key
# genres = []
# for genre in toon["cartoon"]["genres"]:
# genres.append(genre["name"])
# artist = []
# for author in toon["cartoon"]["artists"]:
# artist.append(author["name"])
# img_url = toon["pcThumbnailImage"]["url"]
# tmp = {
# title : {
# "desc" : desc,
# "author" : artist,
# "img_url" : img_url
# }
# }
# toons.append(tmp)
# print(toons)
# 3-1. ๊ธ์์ผ ๋ฟ๋ง ์๋๋ผ ์ผ์์ผ๋ถํฐ ํ ์์ผ๊น์ง์ ์นํฐ ๋ฐ์ดํฐ๋ฅผ ํ์ฑํด์ ๊ฐ๊ฐ dictionary๋ก ๋ง๋์ธ์
# ํจ์ ๋ง๋๋ ๋ฒ
# def ํจ์๋ช
(ํ๋ผ๋ฏธํฐ):
def request_json_data_from_url(url):
response = requests.get(url)
#์๋ต์ผ๋ก ์จ ๋ด์ฉ์ ๋ฐ๋กjson์ผ๋ก ๋ฐ๊ฟ์ค
data = response.json()
return data
def parse_daum_webtoon_data(data):
toons = []
for toon in data["data"]:
#์ ๋ชฉ์ key ๋ title
title = toon["title"]
#์ค๋ช
์ key๋ introduction
desc = toon["introduction"]
#์ฅ๋ฅด์ ์์น๋ 'cartoon'์์ 'genre'๋ผ๋ ๋ฆฌ์คํธ ์์ 'name'์ด๋ผ๋ key
genres = []
for genre in toon["cartoon"]["genres"]:
genres.append(genre["name"])
artist = []
for author in toon["cartoon"]["artists"]:
artist.append(author["name"])
img_url = toon["pcThumbnailImage"]["url"]
tmp = {
title : {
"desc" : desc,
"author" : artist,
"img_url" : img_url
}
}
toons.append(tmp)
return toons
#์์์ผ ๋ถํฐ ์ผ์์ผ๊น์ง ๊ฐ๊ฐ์ ๋ํ๋ด๋ ๋ฌธ์์ด์ ์ ํ๋ค.
days = ['mon','tue','wed','thu','fri','sat','sun']
# days = {
# 'mon':'',
# 'tue':'',
# 'wed':'',
# 'thu':'',
# 'fri':'',
# 'sat':'',
# 'son':''
# }
daily_toon_data = {}
for day in days:
url = f'http://webtoon.daum.net/data/pc/webtoon/list_serialized/{day}'
data = request_json_data_from_url(url)
daily_toon_data[day] = parse_daum_webtoon_data(data)
print(url)
print(daily_toon_data[day])
time.sleep(3)
| UTF-8 | Python | false | false | 3,711 | py | 431 | quiz.py | 250 | 0.529839 | 0.51342 | 0 | 137 | 22.116788 | 120 |
iuni-cadre/DataPipelineAndProvenanceForCADRE | 16,896,401,366,278 | b86dca86848f4fab57b0c1964fb92abf6874f284 | c9834a72051a685857a8abd615747c1a18de6296 | /uspto20220630/check_exports.py | 37cbf5fa68737d182557a12eb391425e516636ac | []
| no_license | https://github.com/iuni-cadre/DataPipelineAndProvenanceForCADRE | fc9a5a7aa5dfc60b274928bd16b3f7879939c31c | 508712bcf6ac46edc3edea07e14cbc2c7f3fba9e | refs/heads/master | 2023-07-05T19:48:44.319934 | 2023-06-26T14:55:07 | 2023-06-26T14:55:07 | 200,693,643 | 0 | 2 | null | false | 2022-09-08T13:43:32 | 2019-08-05T16:43:43 | 2021-09-07T14:35:30 | 2022-09-08T13:43:26 | 3,632 | 0 | 2 | 4 | Jupyter Notebook | false | false | from os import listdir
from os import popen
import pandas as pd
path = '/N/project/iuni_cadre/uspto20220630/'
files = listdir(path)
for f in files:
df = pd.read_csv(path+f, sep='\t', low_memory=False)
df_rows = df.shape[0]
f_rows = int(popen(f'wc -l {path}{f}').read().split(' ')[0]) - 1
if f_rows != df_rows:
print('#'*20, f, 'number of rows do not match','#'*20)
else:
print(f, 'good') | UTF-8 | Python | false | false | 426 | py | 133 | check_exports.py | 49 | 0.593897 | 0.558685 | 0 | 16 | 25.6875 | 68 |
peerau/byceps | 7,730,941,141,815 | 83b03a6d3f4c61eb820ec8392afdb3b91416068a | c46f6015a2b9f7c6e5aec3d043f0b75057756852 | /tests/services/user/test_query.py | de2ee5299ec361309b3f769b05a90e250b9327a7 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/peerau/byceps | 6a7db0f8db00d8a77e824018d6efdbab57abbaaf | 1f691280f5c086179ce372a471a0f1d9952a86f5 | refs/heads/master | 2020-07-28T06:31:29.468607 | 2019-09-15T00:18:48 | 2019-09-15T00:18:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from uuid import UUID
from pytest import raises
from byceps.services.user import service as user_service
from tests.base import AbstractAppTestCase
from tests.helpers import create_user
class UserQueryTest(AbstractAppTestCase):
def test_find_user_by_screen_name_found(self):
screen_name = 'ghost'
user = create_user(screen_name)
actual = user_service.find_user_by_screen_name(screen_name)
assert actual.id == user.id
def test_find_user_by_screen_name_not_found(self):
actual = user_service.find_user_by_screen_name('unknown_dude')
assert actual is None
def test_get_anonymous_user(self):
user = user_service.get_anonymous_user()
assert user.id == UUID('00000000-0000-0000-0000-000000000000')
assert not user.enabled
assert not user.deleted
assert user.avatar is None
assert user.avatar_url is None
assert not user.is_orga
def test_get_email_address_found(self):
email_address = 'lanparty@lar.ge'
user = create_user('xpandr', email_address=email_address)
actual = user_service.get_email_address(user.id)
assert actual == email_address
def test_get_email_address_not_found(self):
unknown_user_id = UUID('00000000-0000-0000-0000-000000000001')
with raises(ValueError):
user_service.get_email_address(unknown_user_id)
| UTF-8 | Python | false | false | 1,521 | py | 75 | test_query.py | 57 | 0.678501 | 0.631164 | 0 | 58 | 25.224138 | 70 |
wangfan662012/test | 12,618,613,931,466 | 13eefd9b809ffb0534daa9b87473115e3953affd | b3c929c07e34a7fd24a078ba3b804a732894d3e0 | /python_dictionary/read_config.py | 142202249ddaf10096542eca4fa71fc2ca2acbf5 | []
| no_license | https://github.com/wangfan662012/test | 5abe45ef82d288510c70491ba6a07ca8618418be | 51b500828761d298685ad2d55ccdeb31c305765c | refs/heads/master | 2020-09-04T13:45:39.077213 | 2020-03-06T07:02:58 | 2020-03-06T07:02:58 | 219,747,687 | 0 | 0 | null | false | 2020-02-13T08:34:40 | 2019-11-05T13:09:01 | 2020-02-13T07:04:27 | 2020-02-13T08:24:19 | 21 | 0 | 0 | 1 | Python | false | false | import configparser
file = 'config.ini'
# ๅๅปบ้
็ฝฎๆไปถๅฏน่ฑก
con = configparser.ConfigParser()
# ่ฏปๅๆไปถ
con.read(file, encoding='utf-8')
# ่ทๅๆๆsection
sections = con.sections()
print(sections)
# ่ทๅ็นๅฎsection
items = con.items('test_db_config')
# ๅฏไปฅ้่ฟdictๆนๆณ่ฝฌๆขไธบๅญๅ
ธ
items = dict(items)
print(items)
| UTF-8 | Python | false | false | 351 | py | 48 | read_config.py | 42 | 0.730104 | 0.726644 | 0 | 21 | 12.761905 | 35 |
digipodium/string-and-string-functions-nikhilrastogi20 | 12,652,973,686,116 | e4f6e68dd68fffb30010d61296c58cfe095fb3d9 | 9e8917fc6e8e05a3fea1aa1f0af4be3dfb3c8c77 | /14_followed _by_dot.py | 623b6f41eff676ddffbb01076689616ea98ff9ee | []
| no_license | https://github.com/digipodium/string-and-string-functions-nikhilrastogi20 | 5eada55385b3319a6b60ddadaea956f0b3de82d0 | 45e21bf7c73b6a6d9a5c7f02b20cacd73cb46e4d | refs/heads/main | 2023-08-11T04:51:04.895162 | 2021-10-13T11:45:31 | 2021-10-13T11:45:31 | 414,640,007 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | for i in range(1,10) :
print( i ,end ='. \n' )
| UTF-8 | Python | false | false | 62 | py | 20 | 14_followed _by_dot.py | 20 | 0.387097 | 0.33871 | 0 | 3 | 17.666667 | 27 |
ashishsanjaysharma/TSN_CNC_Netconf_Notifications | 11,493,332,505,338 | 9ba38f46b6294725f79154fdedc5e5571682aa8f | d791695781b6b2de0c28ca5d8724bab25032b221 | /netconf_cli/connect_to_station.py | e056cd5f84db14b7dc5800e6e39489aa844fa256 | []
| no_license | https://github.com/ashishsanjaysharma/TSN_CNC_Netconf_Notifications | 971f783ce704c20462522774932268e28313fef6 | 304f7c1e38cf1f15faf9dea6e7167e1f95398718 | refs/heads/master | 2022-04-04T10:56:40.563380 | 2020-02-19T22:51:55 | 2020-02-19T22:51:55 | 241,738,456 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from ncclient import manager
from ncclient import operations
import xml.dom.minidom as xml_p
import xmltodict
from scheduler import run_scheduler
import threading
rpc_subscribtion_msg = '''
<rpc message-id="102" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<create-subscription xmlns="urn:ietf:params:xml:ns:netconf:notification:1.0">
<filter xmlns:cnc-data="urn:cnc:data:1.0"
type="xpath"
select="/cnc-data:cnc-notif"/>
</create-subscription>
</rpc>
'''
def look_good_xml(notif_xml):
print("------ RECEVIED NOTIFICATION XML DATA -----------")
xml_data = xml_p.parseString(str(notif_xml))
return xml_data.toprettyxml(indent=" ")
def conn_subs_thread(conn_details):
try:
with manager.connect(host=conn_details[0],
port=conn_details[1],
username=conn_details[2],
password=conn_details[3],
look_for_keys = False,
hostkey_verify=False) as conn_manager:
sess_id = conn_manager._session.id
print("Connected to END STATION NETCONF Server with Session ID : ", sess_id)
subs_resp = conn_manager._session.send(rpc_subscribtion_msg)
while True:
notification_msg = conn_manager.take_notification(block=True, timeout=None)
notif_xml = notification_msg.notification_xml
print(look_good_xml(notif_xml))
print("===== CALLING SCHEDULER ==========")
except:
print("END STATION DEVICE NOT REACHABLE")
def connect_to_end_station(end_station_conn_details):
threads = list()
for end_station_conn in range(len(end_station_conn_details)):
conn_threads = threading.Thread(target=conn_subs_thread, args=(end_station_conn_details[end_station_conn],))
#conn_threads.daemon = True
threads.append(conn_threads)
conn_threads.start()
#print end_station_conn_params[end_station_conn]
'''
def connect_to_end_station(end_station_conn_details):
for end_station_conn in range(len(end_station_conn_details)):
#print end_station_conn_params[end_station_conn]
conn_details = end_station_conn_details[end_station_conn]
with manager.connect(host=conn_details[0],
port=conn_details[1],
username=conn_details[2],
password=conn_details[3],
hostkey_verify=False,
look_for_keys = False) as conn_manager:
conn_manager._session.send(rpc_subscribtion_msg)
while True:
notification_msg = conn_manager.take_notification(block=True, timeout=None)
notif_xml = notification_msg.notification_xml
print look_good_xml(notif_xml)
run_scheduler(notif_xml)
'''
| UTF-8 | Python | false | false | 2,879 | py | 14 | connect_to_station.py | 3 | 0.614449 | 0.608545 | 0 | 76 | 36.710526 | 116 |
TakeMeHigher/Pytho_high_efficy | 9,216,999,843,898 | 0d9d693e222ce53bf406bbbcc517fbeb76c3ca33 | dd8b262720c21f72e2e320386baf2cb080234e6f | /ๅฟซ้ๆพๅฐๅคไธชๅญๅ
ธ็ๅ
ฌๅ
ฑkey.py | 470e16720831377f7836506e8abdb41419b1e607 | []
| no_license | https://github.com/TakeMeHigher/Pytho_high_efficy | 45d84aecdd1b96afdbb3b94c6376eb2361d9bab8 | 203fe3b8623d3a221f38e6b873a3ef28cde07bea | refs/heads/master | 2020-03-15T13:13:00.157426 | 2018-05-28T14:19:17 | 2018-05-28T14:19:17 | 132,161,349 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding:utf-8
from random import randint, sample
d1 = {k: randint(1, 10) for k in sample('abcdef', 3)}
d2 = {k: randint(1, 10) for k in sample('abcdef', 4)}
d3 = {k: randint(1, 10) for k in sample('abcdef', 5)}
# low
res = []
for i in d1:
if i in d2 and i in d3:
res.append(i)
print(res)
# ๅฏนไบPython2
print(d1.viewkeys() & d2.viewkeys() & d3.viewkeys())
# Python2 ๆ็จ
# ๅฏนไบn่ฝฎๆฅ่ฏด ็จไธ้ข่ฟไธช
ss = map(dict.viewkeys, [d1, d1, d3])
print (ss)
aa = reduce(lambda a, b: a & b, ss)
print aa
| UTF-8 | Python | false | false | 522 | py | 9 | ๅฟซ้ๆพๅฐๅคไธชๅญๅ
ธ็ๅ
ฌๅ
ฑkey.py | 9 | 0.617409 | 0.562753 | 0 | 22 | 21.454545 | 53 |
rickyschools/GIDEON | 4,552,665,375,981 | f9faa24167b221232e48eca865ebaa278f1d3012 | 43769a7851776d19ace866c72c4de0aef4ca6624 | /ml/__init__.py | d2d60e0bb24a2219a2c372c523a1d0d613dd5aac | []
| no_license | https://github.com/rickyschools/GIDEON | 824084474d91c1725eb03a8427fbd495e8be148e | bf37d6c929e195deba35099b08409440b4a88d76 | refs/heads/master | 2018-08-22T09:09:18.499538 | 2018-05-21T02:35:50 | 2018-05-21T02:35:50 | 74,931,686 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .learners import ActionModeler
| UTF-8 | Python | false | false | 36 | py | 29 | __init__.py | 24 | 0.861111 | 0.861111 | 0 | 1 | 35 | 35 |
RamsesMartinez/CK-Suministros | 412,316,881,729 | c7b78dabc79cd0fd56b52da2afe7eeb07078bb92 | 7602e72c362c32d03f34eaad34188b1c6a6cf840 | /sales/views.py | 42b84b777623c4fa02739ccd9c228f24c31c3602 | []
| no_license | https://github.com/RamsesMartinez/CK-Suministros | 78624871940ba2e242d29f03ec7ae989fdb48d1a | 1e2866192bcae8e726fe24bc0114194d12011e81 | refs/heads/master | 2021-05-13T17:57:43.853247 | 2018-01-03T07:08:35 | 2018-01-03T07:08:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from datetime import datetime, date, timedelta
from decimal import Decimal
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from branchoffices.models import CashRegister
from cloudkitchen.settings.base import PAGE_TITLE
from helpers import Helper, SalesHelper, ProductsHelper
from products.models import Cartridge, PackageCartridge, PackageCartridgeRecipe, \
ExtraIngredient
from sales.models import Ticket, TicketDetail, TicketExtraIngredient
from users.models import User as UserProfile
# ------------------------------------- Sales -------------------------------------
@login_required(login_url='users:login')
def sales(request):
helper = Helper()
sales_helper = SalesHelper()
if request.method == 'POST':
if request.POST['type'] == 'sales_day':
"""
Returns a list with objects:
Each object has the following characteristics
"""
sales_day_list = []
start_day = helper.naive_to_datetime(datetime.strptime(request.POST['date'], '%d-%m-%Y').date())
end_date = helper.naive_to_datetime(start_day + timedelta(days=1))
tickets_objects = sales_helper.get_all_tickets().filter(created_at__range=[start_day, end_date])
for ticket in tickets_objects:
"""
Filling in the sales list of the day
"""
earnings_sale_object = {
'id_ticket': ticket.id,
'datetime': timezone.localtime(ticket.created_at),
'earnings': 0
}
for ticket_detail in sales_helper.get_all_tickets_details():
if ticket_detail.ticket == ticket:
earnings_sale_object['earnings'] += ticket_detail.price
sales_day_list.append(earnings_sale_object)
return JsonResponse({'sales_day_list': sales_day_list})
if request.POST['type'] == 'ticket_details':
ticket_id = int(request.POST['ticket_id'])
ticket_object = {
'ticket_id': ticket_id,
'ticket_order': '',
'cartridges': [],
'packages': [],
}
# Get cartridges details
for ticket_detail in sales_helper.get_all_tickets_details():
if ticket_detail.ticket.id == ticket_id:
ticket_object['ticket_order'] = ticket_detail.ticket.order_number
if ticket_detail.cartridge:
cartridge_object = {
'name': ticket_detail.cartridge.name,
'quantity': ticket_detail.quantity,
'total': ticket_detail.price
}
ticket_object['cartridges'].append(cartridge_object)
elif ticket_detail.package_cartridge:
cartridges_list = []
package_cartridge_recipe = PackageCartridgeRecipe.objects.filter(
package_cartridge=ticket_detail.package_cartridge)
for cartridge_recipe in package_cartridge_recipe:
cartridges_list.append(cartridge_recipe.cartridge.name)
package_cartridge_object = {
'cartridges': cartridges_list,
'quantity': ticket_detail.quantity,
'total': ticket_detail.price
}
ticket_object['packages'].append(package_cartridge_object)
return JsonResponse({'ticket_details': ticket_object})
if request.POST['type'] == 'tickets':
tickets_objects_list = []
initial_dt = request.POST['dt_week'].split(',')[0]
final_dt = request.POST['dt_week'].split(',')[1]
initial_dt = helper.naive_to_datetime(datetime.strptime(initial_dt, '%d-%m-%Y').date())
final_dt = helper.naive_to_datetime(datetime.strptime(final_dt, '%d-%m-%Y').date())
for ticket in sales_helper.get_all_tickets().filter(created_at__range=[initial_dt, final_dt]):
for ticket_detail in sales_helper.get_all_tickets_details():
if ticket_detail.ticket == ticket:
ticket_object = {
'ID': ticket.id,
'Fecha': timezone.localtime(ticket.created_at).date(),
'Hora': timezone.localtime(ticket.created_at).time(),
'Vendedor': ticket.seller.username,
}
if ticket.payment_type == 'CA':
ticket_object['Tipo de Pago'] = 'Efectivo'
else:
ticket_object['Tipo de Pago'] = 'Crรฉdito'
if ticket_detail.cartridge:
ticket_object['Producto'] = ticket_detail.cartridge.name
else:
ticket_object['Producto'] = None
if ticket_detail.package_cartridge:
ticket_object['Paquete'] = ticket_detail.package_cartridge.name
else:
ticket_object['Paquete'] = None
ticket_object['Cantidad'] = ticket_detail.quantity
ticket_object['Total'] = ticket_detail.price
ticket_object['Precio Unitario'] = ticket_detail.price / ticket_detail.quantity
tickets_objects_list.append(ticket_object)
return JsonResponse({'ticket': tickets_objects_list})
if request.POST['type'] == 'sales_week':
initial_date = request.POST['dt_week'].split(',')[0]
final_date = request.POST['dt_week'].split(',')[1]
initial_date = helper.parse_to_datetime(initial_date)
final_date = helper.parse_to_datetime(final_date) + timedelta(days=1)
filtered_sales = sales_helper.get_sales_list(initial_date, final_date)
tickets = sales_helper.get_tickets(initial_date, final_date)
data = {
'sales': filtered_sales,
'tickets': tickets,
'week_number': helper.get_week_number(initial_date)
}
return JsonResponse(data)
template = 'sales/sales.html'
title = 'Registro de Ventas'
context = {
'title': PAGE_TITLE + ' | ' + title,
'page_title': title,
'actual_year': datetime.now().year,
'sales_week': sales_helper.get_sales_actual_week(),
'today_name': helper.get_name_day(datetime.now()),
'today_number': helper.get_number_day(datetime.now()),
'week_number': helper.get_week_number(date.today()),
'tickets': sales_helper.get_tickets_today_list(),
'dates_range': sales_helper.get_dates_range_json(),
}
return render(request, template, context)
@login_required(login_url='users:login')
def delete_sale(request):
if request.method == 'POST':
ticket_id = request.POST['ticket_id']
ticket = Ticket.objects.get(id=ticket_id)
ticket.delete()
return JsonResponse({'result': 'excelente!'})
@login_required(login_url='users:login')
def new_sale(request):
helper = Helper()
sales_helper = SalesHelper()
products_helper = ProductsHelper()
if request.method == 'POST':
if request.POST['ticket']:
username = request.user
user_profile_object = get_object_or_404(UserProfile, username=username)
cash_register = CashRegister.objects.first()
ticket_detail_json_object = json.loads(request.POST.get('ticket'))
payment_type = ticket_detail_json_object['payment_type']
order_number = 1
"""
Gets the tickets in the week and returns n + 1
where n is the Ticket.order_number biggest for the current week
TODO:
1. Get tickets in the current week range
2. Search the ticket with the largest order_number attribute
3. save the 'new_ticket_object' with the new attribute (n + 1)
4. Save the new object
"""
filtered_tickets = sales_helper.get_all_tickets().filter(
created_at__gte=datetime.now() - timedelta(days=helper.get_number_day(datetime.now())))
for ticket in filtered_tickets:
order_number_ticket = ticket.order_number
if order_number_ticket >= order_number:
order_number = order_number_ticket + 1
new_ticket_object = Ticket(
cash_register=cash_register, seller=user_profile_object,
payment_type=payment_type, order_number=order_number)
new_ticket_object.save()
"""
Saves the tickets details for cartridges
"""
for ticket_detail in ticket_detail_json_object['cartridges']:
cartridge_object = get_object_or_404(Cartridge, id=ticket_detail['id'])
quantity = ticket_detail['quantity']
price = ticket_detail['price']
new_ticket_detail_object = TicketDetail(
ticket=new_ticket_object,
cartridge=cartridge_object,
quantity=quantity,
price=price
)
new_ticket_detail_object.save()
for ticket_detail in ticket_detail_json_object['extra_ingredients_cartridges']:
cartridge_object = get_object_or_404(Cartridge, id=ticket_detail['cartridge_id'])
quantity = ticket_detail['quantity']
price = ticket_detail['price']
new_ticket_detail_object = TicketDetail(
ticket=new_ticket_object,
cartridge=cartridge_object,
quantity=quantity,
price=price
)
new_ticket_detail_object.save()
for ingredient in ticket_detail['extra_ingredients']:
extra_ingredient_object = ExtraIngredient.objects.get(id=ingredient['id'])
new_extra_ingredient_object = TicketExtraIngredient(
ticket_detail=new_ticket_detail_object,
extra_ingredient=extra_ingredient_object,
price=ingredient['cost']
)
new_extra_ingredient_object.save()
for ticket_detail_package in ticket_detail_json_object['packages']:
"""
Saves the tickets details for package cartridges
"""
package_object = get_object_or_404(PackageCartridge, id=ticket_detail_package['id'])
quantity = ticket_detail_package['quantity']
price = ticket_detail_package['price']
new_ticket_detail_object = TicketDetail(
ticket=new_ticket_object,
package_cartridge=package_object,
quantity=quantity,
price=price
)
new_ticket_detail_object.save()
json_response = {
'status': 'ready',
'ticket_id': new_ticket_object.id,
'ticket_order': new_ticket_object.order_number,
}
return JsonResponse(json_response)
return JsonResponse({'status': 'error'})
else:
path = request.get_full_path().split('/')[3]
if path == 'breakfast':
template = 'new/breakfast.html'
title = 'Vender Desayuno'
else:
template = 'new/food.html'
title = 'Vender Comida'
cartridges_list = products_helper.get_all_cartridges().order_by('name')
package_cartridges = products_helper.get_all_packages_cartridges().order_by('name')
extra_ingredients = products_helper.get_all_extra_ingredients()
extra_ingredients_products_list = []
for cartridge in cartridges_list:
cartridge_object = {
'id': cartridge.id,
'name': cartridge.name,
'extra_ingredients': [],
}
for ingredient in extra_ingredients:
if cartridge == ingredient.cartridge:
ingredient_object = {
'id': ingredient.id,
'name': ingredient.ingredient.name,
'image': ingredient.image.url,
'cost': str(ingredient.cost),
}
cartridge_object['extra_ingredients'].append(ingredient_object)
if len(cartridge_object['extra_ingredients']) > 0:
extra_ingredients_products_list.append(cartridge_object)
context = {
'title': PAGE_TITLE + ' | ' + title,
'page_title': title,
'cartridges': cartridges_list,
'package_cartridges': package_cartridges,
'extra_ingredients': extra_ingredients,
'extra_ingredients_products_list': extra_ingredients_products_list,
'extra_ingredients_products_list_json': json.dumps(extra_ingredients_products_list),
}
return render(request, template, context)
# -------------------------------- Test ------------------------------
def test(request):
template = 'sales/test.html'
tickets_details = TicketDetail.objects.select_related(
'ticket', 'ticket__seller', 'cartridge', 'package_cartridge').all()
tickets = Ticket.objects.all()
tickets_list = []
for ticket in tickets:
ticket_object = {
'ticket_parent': ticket,
'cartridges': [],
'packages': [],
'total': Decimal(0.00),
}
for ticket_details in tickets_details:
if ticket_details.ticket == ticket:
if ticket_details.cartridge:
cartridge_object = {
'cartridge': ticket_details.cartridge,
'quantity': ticket_details.quantity
}
ticket_object['cartridges'].append(cartridge_object)
ticket_object['total'] += ticket_details.price
elif ticket_details.package_cartridge:
package_cartridge_object = {
'package': ticket_details.package_cartridge,
'quantity': ticket_details.quantity
}
ticket_object['packages'].append(package_cartridge_object)
ticket_object['total'] += ticket_details.price
tickets_list.append(ticket_object)
context = {
'tickets': tickets_list,
}
return render(request, template, context)
| UTF-8 | Python | false | false | 15,237 | py | 7 | views.py | 7 | 0.537936 | 0.535639 | 0 | 347 | 42.907781 | 108 |
SilentCicero/sleth | 19,628,000,549,532 | 840d6423400c3a7450a9c59a8fb4b7594241a7d5 | 2ffa1f645e6c2b6f5f9ba26c09225aac955b7442 | /test/test_stops.py | 7ec001299bd3d616f2b40a789c68c2a85345cc0e | []
| no_license | https://github.com/SilentCicero/sleth | 4ca690c2425ab3f4f83e1dd7ffedd0ba86ba35ea | e8eb6db4f4714b204237409fa3dc22a586ac38e8 | refs/heads/master | 2021-01-17T05:36:44.555804 | 2015-02-10T13:13:04 | 2015-02-10T13:13:04 | 30,671,123 | 0 | 2 | null | true | 2015-02-11T21:40:31 | 2015-02-11T21:40:31 | 2015-02-10T13:13:07 | 2015-02-10T13:13:07 | 3,898 | 0 | 0 | 0 | null | null | null | from pyethereum import tester
class TestStops(object):
CONTRACT = """
def shared():
REEL_COUNT = 3
REEL_POSITIONS = 32
def get_stops(rnd):
stops = array(REEL_COUNT)
i = 0
while i < REEL_COUNT:
stops[i] = rnd % REEL_POSITIONS
rnd = rnd / REEL_POSITIONS
i += 1
return(stops, items=REEL_COUNT)
def pass_(rnd):
return(self.get_stops(rnd, outsz=REEL_COUNT), items=REEL_COUNT)
"""
def setup_class(cls):
cls.s = tester.state()
cls.c = cls.s.abi_contract(cls.CONTRACT)
cls.snapshot = cls.s.snapshot()
def setup_method(self, method):
self.s.revert(self.snapshot)
def test_get_stops(self):
assert self.c.get_stops(23888) == [16, 10, 23]
assert self.c.get_stops(1606) == [6, 18, 1]
assert self.c.get_stops(30464) == [0, 24, 29]
def test_pass(self):
assert self.c.pass_(23888) == [16, 10, 23]
| UTF-8 | Python | false | false | 925 | py | 40 | test_stops.py | 20 | 0.585946 | 0.537297 | 0 | 37 | 24 | 67 |
russellburdt/data-science | 7,705,171,372,433 | 0d79a7b6318b746366e8ccf30121f33b9f8de8a7 | 6861f6e996fb61249072788dace92e82ad188c99 | /sklearn/digits.py | 0ecd5fe3b31fd1e7e92c87432463c28b0c841570 | []
| no_license | https://github.com/russellburdt/data-science | 73409ef5de6add674a5537c4b219957ce2770aa2 | 8a69d577a8cacce8289cfc0d8d80c0076ed9a9f0 | refs/heads/master | 2019-08-08T10:06:59.981799 | 2019-02-13T16:42:37 | 2019-02-13T16:42:37 | 55,858,229 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
learning curves with digits dataset
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import cross_val_score, cross_val_predict, train_test_split, learning_curve
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
plt.style.use('bmh')
# extract training data
data = load_digits()
X, y = data['data'], data['target']
# X += np.random.randint(-20, 20, size=X.shape)
scaler = StandardScaler()
scaler.fit(X)
Xs = scaler.transform(X)
# initialize a list of classifiers
models = [LogisticRegression(), SVC(), GaussianNB(), RandomForestClassifier()]
# create and plot learning_curve results
fig, axes = plt.subplots(2, 2, figsize=(18, 10), sharex=True, sharey=True)
fig.canvas.set_window_title('Learning Curves for Digits Dataset')
for ax, model in zip(axes.flat, models):
train_sizes, train_scores, test_scores = \
learning_curve(model, Xs, y, train_sizes=np.linspace(0.1, 1, 10), cv=4)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')
ax.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
ax.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross-validation score')
ax.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
ax.legend(loc='upper right', fancybox=True, numpoints=3)
ax.set_title(type(model))
ax.set_ylim(0, 1)
ax.invert_yaxis()
fig.tight_layout()
plt.show() | UTF-8 | Python | false | false | 2,004 | py | 134 | digits.py | 117 | 0.726048 | 0.712575 | 0 | 52 | 37.538462 | 130 |
JavaRiceSparrow/projectTin_1 | 5,609,227,296,649 | 86f2527486dacee4036d3ffdf3fd58b50d2657d1 | 34294f473ce4004ce139c866e5bb8cdb84348689 | /backup/contour0.2.py | 7eb7203cf7eac6c3ea44d81cb4f7e2f9754e5d9e | []
| no_license | https://github.com/JavaRiceSparrow/projectTin_1 | 602bef838edcf6019f22568c79a7b383b1932a89 | 972df7973288d5420760fa7f4602f0e35b49a9f5 | refs/heads/master | 2022-10-09T00:05:49.989421 | 2020-06-13T06:20:35 | 2020-06-13T06:20:35 | 269,151,722 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from utils import *
from imglib import *
from PIL import Image
path1 = '1/1/database/base_1_2_1.bmp'
path2 = '1/4/database/base_1_1_4.bmp'
path3 = '1/7/database/base_1_1_7.bmp'
img1 = getImg(path2)
def getULpos(img):
if img.sum() == 0:
return 0
sizeX, sizeY = img.shape
imgt = img.copy()
sizeS = sizeX+sizeY-1
URline_bol = np.empty([sizeS],dtype=bool)
for i in reversed(range(sizeY-1)):
imgt[i][1:] = np.logical_or(imgt[i][1:],imgt[i+1][:-1])
URline_bol[sizeX+i] = imgt[i+1][-1]
URline_bol[:sizeX] = imgt[0]
pos = sizeS
for i in range(sizeS):
if imgt[0][i] !=0:
pos = i
break
if pos == sizeS:
print("It's empty!")
return 0
if pos < sizeY and pos < sizeX:
for i in range(pos+1):
if img[pos-i][i]:
return (pos-i,i)
print('Wrong')
return 0
else:
print("Oops!")
def detContour(img):
'''
new version, contour in white(empty) space
'''
DEBUG_OPT = True
DEBUG_OPT = False
Ox,Oy = getULpos(img)
Oy -= 1
sizeX,sizeY = img.shape
contourList = []
# -X
# ^
# |
# -Y<---O---> Y
# |
# V
# X
dirList = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]
dirIdx = 3
x,y = Ox,Oy
contourList.append((x,y))
mx,my = dirList[dirIdx]
rotateCount = 0
# import time
mx,my = dirList[dirIdx]
if DEBUG_OPT:
print("Start point = ({:d},{:d}).".format(Ox,Oy))
print("Direction = ({:d},{:d}).".format(mx,my))
# second node
x,y = x+mx,y+my
if DEBUG_OPT:
print("Move to pos ({:d},{:d})".format(x,y))
contourList.append((x,y))
count = 0
while(1):
count += 1
# time.sleep(1)
if DEBUG_OPT:
print("In pos ({:d},{:d}).".format(x,y))
# set the direction to the reverse
# and rotate by the Counterclockwise(reverse of )
dirIdx -= 4+1
if dirIdx < 0:
dirIdx += 8
mx,my = dirList[dirIdx]
rotateCount = 0
if DEBUG_OPT:
print("Init direction = ({:d},{:d}).".format(mx,my))
# It's point to black dot, so rotate until it's white
while img[x+mx,y+my] :
if DEBUG_OPT:
print("pos ({:d},{:d}) is Full".format(x+mx,y+my))
if rotateCount == 8:
print("TOO MANY ROTATE!")
return 0
rotateCount += 1
dirIdx -= 1
if dirIdx < 0:
dirIdx += 8
mx,my = dirList[dirIdx]
if DEBUG_OPT:
print("Direction = ({:d},{:d}).".format(mx,my))
# dirIdx -= 1
# if dirIdx < 0:
# dirIdx += 8
# mx,my = dirList[dirIdx]
if DEBUG_OPT:
print("pos ({:d},{:d}) is empty".format(x+mx,y+my))
x,y = x+mx,y+my
if DEBUG_OPT:
print("Move to pos ({:d},{:d})".format(x,y))
if x == Ox and y == Oy:
break
# if abs(x-Ox) <=1 or abs(y-Oy)<=1:
# print("Move to pos ({:d},{:d})".format(x,y))
contourList.append((x,y))
if count == 10000:
break
return contourList
# dirIdx -= 4
# if dirIdx < 0:
# dirIdx += 8
# dirIdx += 1
# if dirIdx >= 8:
# dirIdx -= 8
def getFrame(img):
l = np.empty_like(img)
r = np.empty_like(img)
u = np.empty_like(img)
d = np.empty_like(img)
l[:,:-1] = img[:,1:]
l[:,-1] = img[:,-1]
d[:-1] = img[1:]
d[-1] = img[-1]
r[:,1:] = img[:,:-1]
r[:,0] = img[:,0]
u[1:] = img[:-1]
u[0] = img[0]
out = np.logical_or(np.logical_or(l,r),np.logical_or(u,d))
return np.logical_and (np.logical_not(img), out)
# def getAllContour(img):
# data = img.copy()
# path_leave =
| UTF-8 | Python | false | false | 3,965 | py | 16 | contour0.2.py | 14 | 0.463304 | 0.437579 | 0 | 169 | 22.455621 | 82 |
VictorTadema/How_to_think_project_Victor | 8,710,193,719,032 | 220f0d5b9c1c4b58fb588b92800ae9047c3b6b76 | fce8815e3e5edecdf43c8a59ce878fae6e64f253 | /How to think/Week 5/5.1.19.7.py | e944a06a4633d09c542995eedc84c98ffc5399e5 | []
| no_license | https://github.com/VictorTadema/How_to_think_project_Victor | c03210a25ae813c309795bca7a8b335f63e0c0ad | cd24bf07ec9682721626812df7d4ab45ec55df12 | refs/heads/master | 2023-01-20T17:58:48.366713 | 2020-12-03T12:02:27 | 2020-12-03T12:02:27 | 296,403,900 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def reverse(text):
reverso = ""
for i in range(len(text) - 1, -1, -1):
reverso += text[i]
return reverso
print(reverse("bier"))
def mirror(text):
return text + reverse(text)
print(mirror("bavje")) | UTF-8 | Python | false | false | 225 | py | 68 | 5.1.19.7.py | 67 | 0.591111 | 0.577778 | 0 | 14 | 15.142857 | 42 |
shdev/exercism-python | 7,292,854,498,716 | 611f3ce7ef93c18c849fbd52a94760bc1b3d890e | 7d9a41fa7e2317b766f476e7ae22ceb4076734e5 | /simple-cipher/cipher.py | a3e8468d3b681a756c1ff08a9b91b90f1a387ec5 | []
| no_license | https://github.com/shdev/exercism-python | 08832675aad00d09e096ee59f4f13a3119956c11 | 26a217f96cdc86337b7fef877b84be7a44f69de8 | refs/heads/master | 2016-09-06T04:21:42.646162 | 2015-04-12T18:36:30 | 2015-04-12T18:36:30 | 33,655,484 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import randint
from itertools import cycle
class ArgumentError(Exception):
def __init__(self, value):
super(ArgumentError, self).__init__()
self.value = value
def __repr__(self):
return self.value
class Cipher(object):
_alphabet = 'abcdefghijklmnopqrstuvwxyz'
def __init__(self, key=None):
super(Cipher, self).__init__()
if key is None:
key = self.generate_key(100)
else:
if not self.is_key_valid(key):
raise ArgumentError('some charactor are not included in \'' +
self._alphabet + '\'')
self.key_as_ord = [ord(l) - ord('a') for l in key]
def encode(self, text):
text = self.sanitize_str(text.lower())
return ''.join(self._encodechar(l, c)
for l, c in zip(text, cycle(self.key_as_ord)))
def decode(self, text):
return ''.join(self._decodechar(l, c)
for l, c in zip(text, cycle(self.key_as_ord)))
@classmethod
def _encodechar(cls, char, cipher):
return chr(((ord(char) - ord('a') + cipher) %
(ord('z') - ord('a') + 1)) + ord('a'))
def _decodechar(self, char, cipher):
return chr(((ord(char) - ord('a') - cipher) %
(ord('z') - ord('a') + 1)) + ord('a'))
def getkey(self):
return ''.join(chr(o) for o in self.key_as_ord)
@classmethod
def generate_key(cls, length):
return ''.join(chr(randint(97, 122)) for i in xrange(0, length))
@classmethod
def is_key_valid(cls, key):
for l in key:
if l not in cls._alphabet:
return False
return True
@classmethod
def sanitize_str(cls, text):
return ''.join(l for l in text if l in cls._alphabet)
class Caesar(Cipher):
def __init__(self):
super(Caesar, self).__init__('d')
| UTF-8 | Python | false | false | 1,948 | py | 20 | cipher.py | 20 | 0.527207 | 0.521561 | 0 | 70 | 26.685714 | 77 |
egeulgen/Bioinformatics_Specialization | 506,806,178,182 | 17ef11e6eb352c9afad7db46d63641f6af488fed | 87b30670a56aeade5531a52bb9f298da05cfd9ae | /Bioinformatics I/Week I/PatternCount.py | 3ab1b2ed961910577e29568f5a5c57476aa21508 | [
"MIT"
]
| permissive | https://github.com/egeulgen/Bioinformatics_Specialization | e2dd88889edafc5eafdca56f87164ab8502ddb6b | 38581b471a54c41d780d9eeb26a7033eb57f3a01 | refs/heads/master | 2021-06-23T19:49:45.094652 | 2021-05-24T09:10:16 | 2021-05-24T09:10:16 | 220,064,156 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def PatternCount(Text, Pattern):
l = len(Pattern)
L = len(Text)
count = 0
for i in range(L - l + 1):
if Text[i:i+l] == Pattern:
count += 1
return count | UTF-8 | Python | false | false | 161 | py | 256 | PatternCount.py | 140 | 0.608696 | 0.590062 | 0 | 8 | 19.25 | 32 |
lgs-1008/hanlight | 12,429,635,385,669 | 3f533341a8bf354c81f5c1a3d94158b74b19eb7f | 65fa8297f019d520c36aef6f3f33623cfd3650a8 | /naver.py | a1962971a35411479ce614a6941e686341337430 | []
| no_license | https://github.com/lgs-1008/hanlight | 7f34ea0ea40fd842b914d4fa40804896d07b319b | 88d617a8cefbefc9f67d84306f9dbc6b69b037e1 | refs/heads/master | 2020-11-27T15:20:02.991560 | 2020-01-27T14:57:18 | 2020-01-27T14:57:18 | 229,509,146 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from bs4 import BeautifulSoup
import datetime
url = "https://www.naver.com/"
html = requests.get(url).text #๋ค์ด๋ฒ์ ์์ค์ฝ๋๋ฅผ ๋ฐ์์ต๋๋ค.
soup = BeautifulSoup(html, 'html.parser') #ํฌ๋กค๋ง์ ์ํด BeautifulSoup๋ฅผ ์ฌ์ฉํ์ต๋๋ค.
s = soup.select('.PM_CL_realtimeKeyword_rolling span[class=ah_k]') #์ค์๊ฐ ๊ฒ์์ด ๋ถ๋ถ๋ง ๊ณจ๋ผ์ ์ ์ฅํฉ๋๋ค.
time = datetime.datetime.now() #ํ ์๊ฐ ์ถ๋ ฅ์ ์ํด ์๊ฐ์ ์ ์ฅํฉ๋๋ค.
print( #์๊ฐ ์ถ๋ ฅ. ํ๊ธํฌ๋ฉง์ ์ํด ์ธ์ฝ๋ฉ๊ณผ ๋์ฝ๋ฉ ๊ณผ์ ์ ๊ฑฐ์ณ์ค๋๋ค.
time.strftime('\n%Y๋
%m์ %d์ผ %H์ %M๋ถ\n๋ค์ด๋ฒ ์ค์๊ฐ ๊ฒ์์ด\n'.encode('unicode-escape').decode()).encode().decode('unicode-escape')
)
print(s)
for i,s in enumerate(s): #์ค์๊ฐ ๊ฒ์์ดs๋ฅผ ๋์ดํ์ฌ ์ถ๋ ฅํด์ค๋๋ค.
print(s.text)
| UTF-8 | Python | false | false | 848 | py | 4 | naver.py | 3 | 0.711726 | 0.710098 | 0 | 18 | 33.111111 | 125 |
Svobodinang/legalro_backend | 17,008,070,515,463 | 4df6d30c25658ab557b6e4ae1ea35a1ee55328cd | 7906cc1c5cfed3437773cd00df2e73d49d74352c | /core/migrations/0005_runtitle.py | 73fc89c8385c6aa46116feb369305d9ec9289dfe | []
| no_license | https://github.com/Svobodinang/legalro_backend | 31bb3e505874140a7382c56966975388e31aa8fc | b0cca70b26bbab9ea509f9ed9f5a7c29d1cc5c1f | refs/heads/master | 2023-08-10T16:06:16.519851 | 2020-10-21T20:05:46 | 2020-10-21T20:05:46 | 280,234,738 | 0 | 0 | null | false | 2021-09-22T19:45:37 | 2020-07-16T18:59:13 | 2020-10-21T20:05:57 | 2021-09-22T19:45:35 | 12,547 | 0 | 0 | 2 | JavaScript | false | false | # Generated by Django 3.0.8 on 2020-07-09 19:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20200709_2045'),
]
operations = [
migrations.CreateModel(
name='RunTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(default='', max_length=30)),
],
),
]
| UTF-8 | Python | false | false | 524 | py | 43 | 0005_runtitle.py | 34 | 0.564885 | 0.501908 | 0 | 20 | 25.2 | 114 |
koftezz/google-ortools-ex | 7,739,531,069,672 | 5300296a299834149a003a118e156e14fbe912bf | f4ed588c87b706246e60bf4f829e4a2233ca5f09 | /cvrp_solver/optimization_params.py | d34526720135a4e1ef42916e9a1bf8478f46cc2f | []
| no_license | https://github.com/koftezz/google-ortools-ex | 555eb7600e27b3c05a41f783d740841581244b06 | 75dcd1faeae2b5792e027215029288b20001c1db | refs/heads/main | 2023-04-05T14:37:02.366782 | 2021-04-10T17:46:29 | 2021-04-10T17:46:29 | 352,383,502 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class OptimizationParams:
def __init__(self):
self.enable_output = True
self.time_limit = 50000
self.solver = 'SCIP' #'MIP' | UTF-8 | Python | false | false | 157 | py | 10 | optimization_params.py | 8 | 0.566879 | 0.535032 | 0 | 6 | 24.5 | 35 |
496268931/Automation | 7,456,063,270,357 | 575ebeeb9aba6a719701fab49671c4e50318a8b8 | 73d2e061d2e7a974782eff63a365d044d7a9ed21 | /ShowapiDemo.py | cec2c468b9c93ba37318890bfe24da526594d7d0 | []
| no_license | https://github.com/496268931/Automation | 4f6470c211cb8bbbe5f01270d2a6e303efa334c5 | 842ea2418cd65eb35c9bfca0950919bbbd6755d6 | refs/heads/master | 2021-09-05T07:29:16.488612 | 2018-01-25T07:35:23 | 2018-01-25T07:35:23 | 106,375,862 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from com.aliyun.api.gateway.sdk.util import showapi
import json
import base64
#get
req=showapi.ShowapiRequest( "่ฏทๆฑๅฐๅ๏ผๆฏๅฆhttp://ali-weather.showapi.com/area-to-weather","appcode" )
json_res=req.addTextPara("para1_name","para1_value")\
.addTextPara("para2_name","para2_value")\
.addTextPara("para3_name","para3_value")\
.get()
# #ๆๅๅฆๆๆฏpostๆไบคๅๆขๆ.post()
print ('json_res data is:', json_res)
#post form
# f=open(r'c:\a.jpg','rb')
# b_64=base64.b64encode(f.read())
# f.close()
# req=showapi.ShowapiRequest( "่ฏทๆฑๅฐๅ๏ผๆฏๅฆhttp://ali-checkcode.showapi.com/checkcode","appcode" )
# json_res= req.addTextPara("typeId","3040")\
# .addTextPara("img_base64",b_64)\
# .addTextPara("convert_to_jpg","1")\
# .post()
# print ('json_res data is:', json_res)
| UTF-8 | Python | false | false | 836 | py | 65 | ShowapiDemo.py | 57 | 0.668782 | 0.638325 | 0 | 27 | 28.148148 | 98 |
andrei4ka/bulls_n_cows | 16,243,566,335,217 | dd39a25344c3871c4237f1e38a5a6f80cfdfc06e | bf84781dc5647e1dad6ce10f47603ca227fd57d7 | /mod/master.py | cd82355ac3d2b93e0d81adb01a96439385f226a4 | []
| no_license | https://github.com/andrei4ka/bulls_n_cows | a3c40200ba5a891b01f5d3f94f2d7ee7d4a042c8 | fc95f868ff78780a0148452c95e8a91ce92e0775 | refs/heads/master | 2020-09-22T11:44:19.989427 | 2016-08-18T17:39:36 | 2016-08-18T17:39:36 | 65,913,721 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
class Master(object):
_master_number = None
def debug(self, message):
print('DEBUG: ' + str(message))
def __init__(self, guess_len=4):
"""
:param guess_len: The length of the secret number
:type guess_len: int
"""
self.number_length = guess_len
@property
def master_number(self):
"""
Return the generated master number
:return: Generated number
:rtype: str
"""
if self._master_number:
return self._master_number
else:
self._master_number = self.generate_number()
return self._master_number
def generate_number(self):
"""
Generate a new number-counted secret
:return: The generated number
:rtype: str
"""
while True:
result = ''
for i in range(self.number_length):
result += "%d" % random.randint(0, 9)
if self.check_no_duplicates(result):
return result
@staticmethod
def check_no_duplicates(master_number):
"""
Check that the generated number contains no duplicates
:return: The result of the check
:rtype: bool
"""
used_digits = set()
for digit in master_number:
if digit in used_digits:
return False
else:
used_digits.add(digit)
return True
def number_bulls_cows(self, guesser_number):
"""
Get the number of bulls and cows for the number
given by the guesser.
:param guesser_number: The number provided by the guesser
:type guesser_number: str
:return:
"""
# self.debug("Call: number_bulls_cows(%s)" % guesser_number)
self.debug("Master number is: %s" % self.master_number)
cows = 0
bulls = 0
for letter_number in range(len(self.master_number)):
master_letter = str(self.master_number[letter_number])
guesser_letter = str(guesser_number[letter_number])
# self.debug("Compare: %s with %s" % (master_letter, guesser_letter))
if master_letter == guesser_letter:
bulls += 1
else:
if guesser_letter in self.master_number:
cows += 1
return bulls, cows
| UTF-8 | Python | false | false | 2,400 | py | 4 | master.py | 4 | 0.540417 | 0.5375 | 0 | 81 | 28.62963 | 81 |
CloudOps-Cherif/vogeler | 7,499,012,909,746 | ad455319622e193acb2c75d08b817421b9e58635 | 7ccf013f0f4088887b0fc7c972c11e8212f9e032 | /vogeler/db/couch.py | 3e8b68c6e6d2930e96738472e0a5475075a2dc04 | []
| no_license | https://github.com/CloudOps-Cherif/vogeler | b223ec58d38c4a674f33441dbfea450c17f74976 | 0cf6c3313aaad647babe4169e1a96edd62284dfb | refs/heads/master | 2021-05-27T17:34:02.528245 | 2010-10-04T08:29:17 | 2010-10-04T08:29:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
import couchdbkit as couch
from couchdbkit.loaders import FileSystemDocsLoader
import vogeler.exceptions as exceptions
import vogeler.logger as logger
from vogeler.db.generic import GenericPersistence
log = logger.LogWrapper(name='vogeler.db.couch').logger()
class SystemRecord(couch.Document):
"""
A couchdbkit document for storing our base information
All documents, regardless of backend, should support
the following fields:
system_name
created_at
updated_at
"""
system_name = couch.StringProperty()
created_at = couch.DateTimeProperty()
updated_at = couch.DateTimeProperty()
class Persistence(GenericPersistence):
def hook_connect(self, **kwargs):
if self.username is None or self.password is None:
connection_string = "http://%s:%s" % (self.host, self.port)
else:
connection_string = "http://%s:%s@%s:%s" % (self.username, self.password, self.host, self.port)
self._server = couch.Server(uri=connection_string)
def hook_createdb(self, dbname):
try:
self.db = self._server.get_or_create_db(dbname)
SystemRecord.set_db(self.db)
except:
raise
def hook_dropdb(self, dbname):
try:
self._server.delete_db(dbname)
except:
raise
def hook_usedb(self, dbname):
try:
self.db = self._server.get_or_create_db(dbname)
SystemRecord.set_db(self.db)
except:
raise
def hook_create(self, node_name):
try:
node = SystemRecord.get_or_create(node_name)
node['system_name'] = node_name
node['created_at'] = datetime.datetime.utcnow()
node.save()
except:
raise
def hook_get(self, node_name):
try:
node = SystemRecord.get(node_name)
self.node = node
return node
except:
raise
def hook_touch(self, node_name):
try:
node = SystemRecord.get(node_name)
node['updated_at'] = datetime.datetime.utcnow()
node.save()
except:
raise
def hook_update(self, node_name, key, value):
try:
node = SystemRecord.get_or_create(node_name)
node[key] = value
node['updated_at'] = datetime.datetime.utcnow()
node.save()
except:
raise
def load_views(self, lp):
self.loadpath = lp
try:
print "Loading design docs from %s" % lp
loader = FileSystemDocsLoader(self.loadpath)
loader.sync(self.db, verbose=True)
print "Design docs loaded"
return 0
except:
log.fatal("Document load path not found: %s" % lp)
raise exceptions.VogelerPersistenceException()
class VogelerCouchPersistenceException(exceptions.VogelerPersistenceException): pass
# vim: set ts=4 et sw=4 sts=4 sta filetype=python :
| UTF-8 | Python | false | false | 3,034 | py | 48 | couch.py | 29 | 0.595583 | 0.594265 | 0 | 102 | 28.745098 | 107 |
datAnir/copybook | 16,560 | a563a28ca6f8a365b9c2df68d00aef6b4acfc8a0 | 6a49d7c14c8cc7e7b80630b670b854ddd4408653 | /tests/test_field.py | 422ae9cf6b6264cac16d64b7a3b283dfb84b0ad8 | [
"MIT"
]
| permissive | https://github.com/datAnir/copybook | a75695e55156e09c7238f4df9835b09125e93fb5 | 1afe405ee8466ca87706c6067b227a62ecd61203 | refs/heads/master | 2023-01-29T20:59:58.714862 | 2020-12-14T09:50:49 | 2020-12-14T09:50:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
import copybook
from copybook import Field, FieldGroup
#
# Tests
#
def test_total_length():
field:Field = Field("","",{
"type_numeric": {
"signed":True,
"length":"2",
"precision":"2",
},
"level":"01",
"name":"test"
})
assert field.get_total_length()==5
def test_total_length_explicit_decimal():
field:Field = Field("","",{
"type_numeric": {
"signed":True,
"length":"2",
"precision":"2",
"explicit_decimal":True
},
"level":"01",
"name":"test"
})
assert field.get_total_length()==6
def test_total_length_unsigned_implied_decimal():
field:Field = Field("","",{
"type_numeric": {
"length":"2",
"precision":"2",
},
"level":"01",
"name":"test"
})
assert field.get_total_length()==4
def test_total_length_string():
field:Field = Field("","",{
"type_string": {
"length":"2",
},
"level":"01",
"name":"test"
})
assert field.get_total_length()==2
| UTF-8 | Python | false | false | 1,152 | py | 10 | test_field.py | 9 | 0.471354 | 0.454861 | 0 | 53 | 20.716981 | 49 |
MicaelaTenKathen/EGPPSO_ASV | 7,670,811,615,977 | 35c7454a33e719dbd20a1f9eb8b8a1e6018d53df | 87ad1b2e2eb039700c4c5e99d884b8fd0b07c057 | /Data_scripts/ratio.py | 3fbc3928bb860292e21031ff922c9446b12fd269 | []
| no_license | https://github.com/MicaelaTenKathen/EGPPSO_ASV | 0ef0f868cb5068e101cd7a4cdd28a4f562426fb1 | d858221ac278e46dc7c0dcc559a1389b4a059972 | refs/heads/master | 2023-06-16T07:10:34.665123 | 2021-07-06T13:26:15 | 2021-07-06T13:26:15 | 373,453,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def ratio_s(x_int, y_int, grid, part):
x_int = int(x_int)
y_int = int(y_int)
x_left = x_int + 2
x_right = x_int - 2
y_up = y_int + 2
y_down = y_int - 2
x_i = int(part[0])
y_i = int(part[1])
if grid[x_right, y_down] == 1:
part[0] = x_right
part[1] = y_down
else:
if grid[x_int, y_down] == 1:
part[1] = y_down
part[0] = x_int
else:
if grid[x_left, y_i] == 1:
part[0] = x_left
part[1] = y_int
else:
if grid[x_right, y_i] == 1:
part[0] = x_right
part[1] = y_int
else:
if grid[x_i, y_up] == 1:
part[1] = y_up
part[0] = x_int
else:
part[0] = x_i
part[1] = y_i
return part | UTF-8 | Python | false | false | 931 | py | 22 | ratio.py | 20 | 0.344791 | 0.320086 | 0 | 32 | 28.0625 | 44 |
kiefersutherland/pythonLearning | 13,469,017,447,949 | 7feb2cd869053a91a9d88f2ab712b1ba1195a4b5 | a39e27aa0bead0c4888074dd1d3681091c53dcb3 | /learn/justatest.py | 03e6ba7d4a454dc5022dc011b46be500e64d3366 | []
| no_license | https://github.com/kiefersutherland/pythonLearning | fbe4fbcb56c159b81767d1ee78eb6696f15884c9 | 6fd085ed682b10bb085dad7baa0e58d3fc072379 | refs/heads/master | 2021-10-24T17:56:42.063883 | 2019-03-27T00:44:57 | 2019-03-27T00:44:57 | 114,739,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import Survey
class TestAnonymousSurvey(unittest.TestCase):
def setup(self):
question='ไฝ ็้ฆ้็ฎ็ๅฐ'
self.my_survey=Survey.AnonymousSurvey(question)
self.responses=['ไธๆตท','็ฅๆท','ไธไบฌ']
def test_stroe_Single(self):
self.my_survey.store_question(self.responses[0])
self.assertIn(self.responses[0],self.mysurvy.responses)
unittest.main() | UTF-8 | Python | false | false | 426 | py | 21 | justatest.py | 20 | 0.68 | 0.675 | 0 | 16 | 24.0625 | 65 |
BenjaminAnimator/RangleRig | 11,751,030,569,664 | 20bafd1c3e994ad8a9d16e57ae7c23313cc87d27 | 1aa4e57c617603f5207a51a32337edfc5b88e220 | /RangleRig/rigModules/clavicleRig.py | a84e6ce59031af1829131eeff6c5f6e668f029d5 | []
| no_license | https://github.com/BenjaminAnimator/RangleRig | 2bb5590351a8271deb85565f988033fe55770314 | bbe84a05371b3c1d89f07e62573f7d0c09078e98 | refs/heads/master | 2021-01-24T11:59:44.633702 | 2017-09-11T03:10:46 | 2017-09-11T03:10:46 | 56,138,514 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | "Module for creating Clavical Rig"
import maya.cmds as mc
from RangleRig.toolkit import controlGen
from RangleRig.toolkit import assetColourer
from RangleRig.toolkit import attrLocker
from RangleRig.toolkit import objDefine
from RangleRig.toolkit import setDvrKey
from RangleRig.toolkit import selectHirearchy
class clavGen():
def __init__(self, basejoint,characterName,rigGrp,visGrp):
self.setupJnt = basejoint
baseJntslst = selectHirearchy.jntHirearch(basejoint,False)
endSetup = baseJntslst[0]
ID = mc.getAttr(basejoint +'.UniqueID')
rigJnts = mc.duplicate(basejoint, name = ID+'_01_Jnt', renameChildren = True)
jnt1 = mc.rename(rigJnts[0], ID + '_01_Jnt')
jnt2 = mc.rename(rigJnts[1], ID + '_02_Jnt')
#Clavicle Set Up
locX = mc.getAttr(jnt1 + ".translateX")
if locX > 0:
assetCol = 6
prefix = "L_"
elif locX < 0:
prefix = "R_"
assetCol = 13
else:
assetCol = 22
prefix = "M_"
mc.setAttr(jnt1 + '.setUpJnt', lock= False)
mc.setAttr(jnt1 + '.UniqueID', lock= False)
mc.setAttr(jnt1 + '.Connect_to', lock= False)
mc.setAttr(jnt1 + '.Connection_type', lock= False)
mc.setAttr(jnt1 + '.characterName', lock= False)
mc.deleteAttr(jnt1 + '.setUpJnt')
mc.deleteAttr(jnt1 + '.characterName')
mc.deleteAttr(jnt1 + '.UniqueID')
mc.deleteAttr(jnt1 + '.Connect_to')
mc.deleteAttr(jnt1 + '.Connection_type')
clavJnts = mc.listRelatives(jnt1)
baseJnt = mc.rename(jnt1 , prefix +'clavBase_jnt')
self.childJnt = mc.rename(clavJnts[0], prefix +'clavBase_end')
#Create Clavicle Ik
clavIk = mc.ikHandle(startJoint = baseJnt,
endEffector = self.childJnt,
name = prefix +'clavicle_ikhandle')
'''
#ADVANCE CLAVS (WIP)
mc.distanceDimension( startPoint =[-1,0,0] , endPoint = [1,0,0])
baseLoc = 'locator1'
childLoc = 'locator2'
baseLoc = mc.rename(baseLoc, '%sbaseClav_loc' %prefix)
childLoc = mc.rename(childLoc, '%sendClav_loc' %prefix)
mc.parent(baseLoc, baseJnt)
mc.parent(childLoc, clavIk[0])
mc.move(0,0,0, baseLoc, objectSpace = True)
mc.move(0,0,0, childLoc, objectSpace = True)
mc.rename('distanceDimension1', prefix +'clavDist_util')
clavDist = prefix + "clavDist_utilShape"
'''
#Create Clavicle Controls
self.clavCtrl= controlGen.generateSquare(prefix + "clavicle_anim", clavIk[0] ,False)
assetColourer.colourer([self.clavCtrl], assetCol)
#Constrain Ik handle to controler
mc.parent(clavIk[0],self.clavCtrl)
#Grouping
self.clavGrp = mc.group(empty = True, name = prefix + "Clav_grp")
dntGroup= mc.group(empty = True, name = prefix + "DONOTTOUCH_Clav_grp")
mc.parent(dntGroup, self.clavGrp)
mc.parent(baseJnt,dntGroup)
mc.parent(self.clavCtrl, self.clavGrp)
#Clean Up (Non Joint)
attrLocker.lockCommon(dntGroup,['X','Y','Z'], ['X','Y','Z'], ['X','Y','Z'], True, True)
mc.setAttr(clavIk[0] + '.visibility', 0)
attrLocker.lockCommon(self.clavCtrl,[], [], ['X','Y','Z'], False, True)
#Clean Up (Joints)
mc.setAttr(self.childJnt +'.drawStyle', 2)
mc.setAttr(baseJnt +'.drawStyle', 2)
#Clav Vis
mc.select(visGrp)
mc.addAttr( shortName=ID + '_ClavVis', longName=ID + '_ClavVis', attributeType = 'enum', enumName = 'On:Off' , keyable = True, hidden = False )
for i in [self.clavCtrl]:
setDvrKey.setDvrK(visGrp + '.' + ID + '_ClavVis', i + '.visibility', 0, 1)
setDvrKey.setDvrK(visGrp + '.' + ID + '_ClavVis', i + '.visibility', 1, 0)
attrLocker.lockCommon(i,[],[],[],True,True)
#Define Controls
objDefine.definer('characterName', [self.clavCtrl], characterName)
objDefine.definer('controlArea', [self.clavCtrl], prefix + "clav")
objDefine.definer("Connection", [self.clavGrp], "root")
objDefine.definer("Connection", [self.clavCtrl], endSetup )
| UTF-8 | Python | false | false | 4,789 | py | 39 | clavicleRig.py | 38 | 0.531217 | 0.519106 | 0 | 131 | 33.832061 | 151 |
samuelxu999/Research | 12,884,901,894,390 | 179802c2d7de3ba5eace8fa0c29e16fa7fab8b4d | 5d04ab2ef298299935410a9e5c0a816cc9b87de0 | /Security/py_dev/Micro_chain/consensus/block.py | 391d0ef51b6a9e3ca21d722bd8a148d47de12906 | []
| no_license | https://github.com/samuelxu999/Research | 494b7a8d3a225098e17ad8af890b3246f321a88c | 03ff57e6fe0114ffd2dd953e79a73a893a6bc0ad | refs/heads/master | 2023-06-26T19:22:59.702799 | 2023-06-15T16:40:43 | 2023-06-15T16:40:43 | 88,461,577 | 1 | 1 | null | false | 2023-05-01T22:24:29 | 2017-04-17T02:53:59 | 2021-12-16T15:25:15 | 2023-05-01T22:24:28 | 15,398 | 1 | 0 | 7 | Python | false | false | '''
========================
block.py
========================
Created on June.18, 2019
@author: Xu Ronghua
@Email: rxu22@binghamton.edu
@TaskDescription: This module provide block data struct and functions implementation.
@Reference:
'''
from collections import OrderedDict
from merklelib import MerkleTree, jsonify as merkle_jsonify
from utils.utilities import TypesUtil, FuncUtil
from cryptolib.crypto_rsa import Crypto_RSA
from utils.configuration import *
from consensus.transaction import Transaction
class Block(object):
"""One node (roundrobin) adds a new block to the blockchain every
BLOCK_PROPOSAL_TIME iterations.
Args:
parent: parent block
transactions: committed transactions in new block.
nonce: nonce proof to meet difficult level.
"""
def __init__(self, parent=None, transactions=[], nonce = 0):
"""A block contains the following arguments:
self.hash: hash of the block
self.height: height of the block (genesis = 0)
self.previous_hash: hash of the parent block
self.transactions: transactions list
self.merkle_root: hash of merkle tree root.
"""
# If we are genesis block, set initial values
if not parent:
self.height = 0
self.previous_hash = 0
else:
self.height = parent.height+1
self.previous_hash = parent.hash
self.transactions = transactions
self.nonce = nonce
# convert to a order-dict transactions list
dict_transactions = Transaction.json_to_dict(self.transactions)
# build a Merkle tree for that dict_transactions
tx_HMT = MerkleTree(dict_transactions, FuncUtil.hashfunc_sha256)
# calculate merkle tree root hash
if(len(tx_HMT)==0):
self.merkle_root = 0
else:
tree_struct=merkle_jsonify(tx_HMT)
json_tree = TypesUtil.string_to_json(tree_struct)
self.merkle_root = json_tree['name']
block = {'height': self.height,
'previous_hash': self.previous_hash,
'transactions': self.transactions,
'merkle_root': self.merkle_root,
'nonce': self.nonce}
# calculate hash of block
self.hash = TypesUtil.hash_json(block)
return
def to_dict(self):
"""
Output dict block data structure.
"""
order_dict = OrderedDict()
order_dict['hash'] = self.hash
order_dict['height'] = self.height
order_dict['previous_hash'] = self.previous_hash
order_dict['transactions'] = self.transactions
order_dict['merkle_root'] = self.merkle_root
order_dict['nonce'] = self.nonce
return order_dict
def to_json(self):
"""
Output dict block data structure.
"""
return {'hash': self.hash,
'height': self.height,
'previous_hash': self.previous_hash,
'transactions': self.transactions,
'merkle_root': self.merkle_root,
'nonce': self.nonce }
def print_data(self):
print('Block information:')
print(' hash:',self.hash)
print(' height:',self.height)
print(' previous_hash:',self.previous_hash)
print(' transactions:',self.transactions)
print(' merkle_root:',self.merkle_root)
print(' nonce:',self.nonce)
def sign(self, sender_private_key, sk_pw):
'''
Sign block by using sender's private key and password
'''
try:
private_key_byte = TypesUtil.hex_to_string(sender_private_key)
private_key = Crypto_RSA.load_private_key(private_key_byte, sk_pw)
# generate hashed json_block
hash_data = TypesUtil.hash_json(self.to_json(),'sha1')
sign_value = Crypto_RSA.sign(private_key, hash_data)
except:
sign_value=''
return sign_value
def verify(self, sender_public_key, signature):
"""
Verify block signature by using sender's public key
"""
try:
public_key_byte = TypesUtil.hex_to_string(sender_public_key)
publick_key = Crypto_RSA.load_public_key(public_key_byte)
# generate hashed json_block
hash_data = TypesUtil.hash_json(self.to_json(),'sha1')
verify_sign=Crypto_RSA.verify(publick_key,signature,hash_data)
except:
verify_sign=False
return verify_sign
def get_epoch(self, epoch_size=EPOCH_SIZE):
"""
return the epoch height
"""
return(self.height // epoch_size)
@staticmethod
def json_to_block(block_json):
"""
Output block object given json block data structure.
"""
block = Block()
block.hash = block_json['hash']
block.height = block_json['height']
block.previous_hash = block_json['previous_hash']
block.transactions = block_json['transactions']
block.merkle_root = block_json['merkle_root']
block.nonce = block_json['nonce']
return block
@staticmethod
def isEmptyBlock(block_json):
"""
check if block_json is empty block.
"""
if( (block_json['height'] >0) and block_json['nonce']==0):
return True
return False
| UTF-8 | Python | false | false | 4,628 | py | 177 | block.py | 114 | 0.691227 | 0.686474 | 0 | 164 | 27.219512 | 85 |
reck1ess/PS | 6,863,357,752,524 | 1600fd11793210c7e128798a4af61e3374c449fd | 595c42b5e45ed8d451a53c72c2edb4d82adc9bb1 | /codility/codility9_3.py | b9aa708d67b17706a3bd8ccd43f2c9c69753ff7f | []
| no_license | https://github.com/reck1ess/PS | 151acb08e0e8f58f261db28429f7db10be46d280 | 14c13cb43789e36514437cd9d94ecc75fe6516ca | refs/heads/master | 2020-03-29T01:42:00.298873 | 2018-10-13T14:54:32 | 2018-10-13T14:54:32 | 149,401,766 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
def solution(A):
max_end = max_slice = -1000000
for a in A:
max_end = max(a, max_end + a)
max_slice = max(max_slice, max_end)
return max_slice
class TestExercise(unittest.TestCase):
def test_example(self):
self.assertEqual(
solution([3, 2, -6, 4, 0]), 5)
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 377 | py | 33 | codility9_3.py | 33 | 0.562334 | 0.527851 | 0 | 20 | 17.85 | 43 |
Nayan356/Python_DataStructures-Functions | 7,473,243,100,789 | 64907a53ef6d102cf619ac1dfe2428125ef6ff06 | 2dc001b3b354d05876f85ae736a9699a8d082d74 | /Functions/pgm10.py | 7e6a2abdda2e30731245555d8b41864be8b1398f | []
| no_license | https://github.com/Nayan356/Python_DataStructures-Functions | f8a829e93b714e23ab31a2ee07819e8c1f2d80d6 | 5bf7d9432c6326f77c022b5c2b03e971374538ba | refs/heads/master | 2022-10-19T07:09:07.202499 | 2020-06-08T06:53:08 | 2020-06-08T06:53:08 | 270,482,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Write a program which can filter() to make a list whose elements are even number
# between 1 and 20 ( both included)
# Python code to filter even values from a list
# Initialisation of list
lis = [1,2,3,4,5.6,7,8]
# Output list initialisation
out = []
for num in lis:
# checking condition
if num % 2 == 0:
out.append(num)
# printing output
print(out)
| UTF-8 | Python | false | false | 378 | py | 30 | pgm10.py | 30 | 0.669312 | 0.634921 | 0 | 19 | 18.842105 | 82 |
computational-imaging/spad_single | 1,889,785,638,566 | cccff741f30b9a96dc558d99d6ad92a6388327c8 | 2dbadf8d7c26b3dda69328229b60df160b69f917 | /models/data/nyuv2_official_nohints_dataset.py | 73bffb02abbf7a29e15d22cee35322d1a532b9eb | []
| no_license | https://github.com/computational-imaging/spad_single | a17c31d0564a16f08f4768dcc27c064272a5f70d | 54e18e26a6f3c33837da032063e8cf9cc287569e | refs/heads/master | 2022-11-18T08:32:37.513981 | 2020-07-19T04:44:56 | 2020-07-19T04:44:56 | 152,368,443 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import json
import numpy as np
from torch.utils.data import Dataset
import cv2
from torchvision import transforms
from models.data.data_utils.transforms import (ResizeAll, RandomHorizontalFlipAll, Normalize,
AddDepthMask, ToTensorAll)
from sacred import Experiment
nyuv2_nohints_ingredient = Experiment('data_config')
@nyuv2_nohints_ingredient.config
def cfg():
data_name = "nyu_depth_v2"
# Paths should be specified relative to the train script, not this file.
root_dir = os.path.join("data", "nyu_depth_v2_scaled16")
train_file = os.path.join(root_dir, "train.json")
train_dir = root_dir
val_file = os.path.join(root_dir, "val.json")
val_dir = root_dir
test_file = os.path.join(root_dir, "test.json")
test_dir = root_dir
del root_dir
# Indices of images to exclude from the dataset.
# Set relative to the directory from which the dataset is being loaded.
blacklist_file = "blacklist.txt"
min_depth = 0. # Minimum depth
max_depth = 10. # Maximum depth
use_dorn_normalization = True # Sets specific normalization if using DORN network.
# If False, defaults to using the empirical mean and variance from train set.
class NYUDepthv2Dataset(Dataset): # pylint: disable=too-few-public-methods
"""Class for reading and storing image and depth data together.
"""
def __init__(self, splitfile, data_dir, transform, file_types, min_depth, max_depth,
blacklist_file="blacklist.txt"):
"""
:param splitfile: string: json file mapping |global_id| to a dictionary of resource files.
:param data_dir: string - the root directory from which the resource files are specified
(via relative path)
:param transform - torchvision.transform - preprocessing applied to the data.
:param file_types - list of string - the keys for the dictionary of resource files
provided by each entry in splitfile
:param blacklist_file - list of string - keys in splitfile that should not be used.
"""
super(NYUDepthv2Dataset, self).__init__()
self.data_dir = data_dir
self.transform = transform
self.file_types = file_types
self.min_depth = min_depth
self.max_depth = max_depth
self.index = {}
self.data = []
self.info = {}
self.blacklist = []
if blacklist_file is not None:
print("Loading blacklist from {}".format(os.path.join(data_dir, blacklist_file)))
with open(os.path.join(data_dir, blacklist_file), "r") as f:
self.blacklist = [line.strip() for line in f.readlines()]
with open(splitfile, "r") as f:
self.index = json.load(f)
for entry in self.index:
if entry in self.blacklist:
continue # Exclude this entry.
self.data.append(entry)
self.rgb_mean, self.rgb_var = np.zeros(3), np.ones(3) # Default initialization
self.transform = transform
def get_mean_and_var(self, cache="mean_var.npy", write_cache=True):
"""Calculate mean and variance of each rgb channel.
Optionally caches the result of this calculation in outfile so
it doesn't need to be done each time the dataset is loaded.
Does everything in numpy.
"""
if cache is not None:
cache_file = os.path.join(self.data_dir, cache)
try:
mean_var = np.load(cache_file)
mean = mean_var[()]["mean"]
var = mean_var[()]["var"]
print("loaded stats cache at {}".format(cache_file))
print(mean, var)
return mean, var
except IOError:
print("failed to load stats cache at {}".format(cache_file))
print("creating new stats cache (this may take a while...) at {} ".format(cache_file))
S = np.zeros(3)
S_sq = np.zeros(3)
npixels = 0.
for entry in self.data:
rgb_img = self.load_all_images(entry)["rgb"]
npixels += rgb_img.shape[0] * rgb_img.shape[1]
# for channel in range(rgb_img.shape[2]):
S += np.sum(rgb_img, axis=(0, 1))
S_sq += np.sum(rgb_img ** 2, axis=(0, 1))
mean = S / npixels
var = S_sq / npixels - mean ** 2
if write_cache:
try:
output = {"mean": mean, "var": var}
cache_file = os.path.join(self.data_dir, cache)
np.save(cache_file, output)
print("wrote stats cache to {}".format(cache_file))
except IOError:
print("failed to write stats cache to {}".format(cache_file))
return mean, var
def load_all_images(self, image_id):
"""Given an image id, load the image as a
numpy array using cv2 from the path given in the index.
"""
imgs = {}
for file_type in self.file_types:
relpath = self.index[image_id][file_type]
imgs[file_type] = cv2.imread(os.path.join(self.data_dir, relpath),
cv2.IMREAD_UNCHANGED)
if file_type == "depth" or file_type == "rawdepth":
if imgs[file_type].dtype == np.uint16:
imgs[file_type] = imgs[file_type] * (self.max_depth - self.min_depth)/(2 ** 16 - 1) + self.min_depth
elif imgs[file_type].dtype == np.uint8:
imgs[file_type] = imgs[file_type] * (self.max_depth - self.min_depth)/(2 ** 8 - 1) + self.min_depth
else:
raise TypeError("DepthDataset: Unknown image data type: {}".format(str(imgs[file_type])))
imgs[file_type] = imgs[file_type].astype(np.float32)
return imgs
def __len__(self):
return len(self.data)
def __getitem__(self, i):
sample = dict()
sample["entry"] = self.data[i]
sample.update(self.load_all_images(self.data[i]))
if self.transform is not None:
sample = self.transform(sample)
return sample
def get_item_by_id(self, image_id):
"""Different way of getting an item that goes by image_id
instead of index i
"""
return self.__getitem__(self.data.index(image_id))
#############
# Load data #
#############
@nyuv2_nohints_ingredient.capture
def load_data(train_file, train_dir,
val_file, val_dir,
test_file, test_dir,
min_depth, max_depth, use_dorn_normalization,
blacklist_file):
"""Generates training and validation datasets from
text files and directories. Sets up datasets with transforms.py.
*_file - string - a text file containing info for DepthDataset to load the images
*_dir - string - the folder containing the images to load
min_depth - the minimum depth for this dataset
max_depth - the maximum depth for this dataset
blacklist_file - string - a text file listing, on each line, an image_id of an image to exclude
from the dataset.
test_loader - bool - whether or not to test the loader and not set the dataset-wide mean and
variance.
Returns
-------
train, val, test - torch.data_utils.data.Dataset objects containing the relevant splits
"""
train = NYUDepthv2Dataset(train_file, train_dir, transform=None,
file_types=["rgb", "rawdepth"],
min_depth=min_depth, max_depth=max_depth,
blacklist_file=blacklist_file)
train.rgb_mean, train.rgb_var = train.get_mean_and_var()
# Transform:
# Size is set to (353, 257) to conform to DORN conventions
# If use_dorn_normalization is true:
# Mean is set to np.array([[[103.0626, 115.9029, 123.1516]]]).astype(np.float32) to conform to DORN conventions
# Var is set to np.ones((1,1,3)) to conform to DORN conventions
if use_dorn_normalization:
transform_mean = np.array([[[103.0626, 115.9029, 123.1516]]]).astype(np.float32)
transform_var = np.ones((1, 1, 3))
else:
transform_mean = train.rgb_mean
transform_var = train.rgb_var
train_transform = transforms.Compose([
ResizeAll((353, 257), keys=["rgb", "depth", "rawdepth"]),
AddDepthMask(min_depth, max_depth, "rawdepth"), # introduces "mask"
RandomHorizontalFlipAll(flip_prob=0.5, keys=["rgb", "depth", "rawdepth", "mask"]),
Normalize(transform_mean, transform_var, key="rgb"), # introduces "rgb_orig"
ToTensorAll(keys=["rgb", "rgb_orig", "rawdepth", "mask"])
]
)
val_transform = transforms.Compose([
ResizeAll((353, 257), keys=["rgb", "depth", "rawdepth"]),
AddDepthMask(min_depth, max_depth, "rawdepth"),
Normalize(transform_mean, transform_var, key="rgb"),
ToTensorAll(keys=["rgb", "rgb_orig", "depth", "rawdepth", "mask"])
]
)
test_transform = transforms.Compose([
ResizeAll((353, 257), keys=["rgb", "rawdepth"]),
AddDepthMask(min_depth, max_depth, "rawdepth"),
Normalize(transform_mean, transform_var, key="rgb"),
ToTensorAll(keys=["rgb", "rgb_orig", "rawdepth", "mask"])
]
)
train.transform = train_transform
print("Loaded training dataset from {} with size {}.".format(train_file, len(train)))
val = None
if val_file is not None:
val = NYUDepthv2Dataset(val_file, val_dir, transform=val_transform,
file_types = ["rgb", "rawdepth"],
min_depth=min_depth, max_depth=max_depth)
val.rgb_mean, val.rgb_var = train.rgb_mean, train.rgb_var
print("Loaded val dataset from {} with size {}.".format(val_file, len(val)))
test = None
if test_file is not None:
test = NYUDepthv2Dataset(test_file, test_dir, transform=test_transform,
file_types = ["rgb", "rawdepth"],
min_depth=min_depth, max_depth=max_depth)
test.rgb_mean, test.rgb_var = train.rgb_mean, train.rgb_var
print("Loaded test dataset from {} with size {}.".format(test_file, len(test)))
return train, val, test
###########
# Testing #
###########
@nyuv2_nohints_ingredient.automain
def test_load_data(min_depth, max_depth):
train, val, test = load_data()
sample = train.get_item_by_id("dining_room_0001a/0001")
print(sample["rgb"].size())
| UTF-8 | Python | false | false | 10,689 | py | 211 | nyuv2_official_nohints_dataset.py | 110 | 0.585368 | 0.573019 | 0 | 259 | 40.266409 | 120 |
deathbypixels/noodles | 13,675,175,878,097 | 780424b8e80ae7918e2d881391879fa82f5ebcea | 69af1943b2b581e3000614feeeae392d49e81c64 | /setStartFirst_Frame.py | 6b0825c1659e2f87b7b363814d0b6019f6f37c85 | []
| no_license | https://github.com/deathbypixels/noodles | 92317de91567133454dffb6f693698dfc3dc6dca | 036f6c785ce70e2b0705036284533b82ed1e47f1 | refs/heads/master | 2021-01-12T09:15:12.601573 | 2017-12-04T03:43:00 | 2017-12-04T03:43:00 | 76,808,094 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | r = nuke.Root()
f = r.knob('first_frame').value()
n = nuke.selectedNode()
n['frame'].setValue(str(f))
n['frame_mode'].setValue('start_at') | UTF-8 | Python | false | false | 139 | py | 10 | setStartFirst_Frame.py | 9 | 0.647482 | 0.647482 | 0 | 6 | 22.333333 | 36 |
kaeru8714/BDJA-recommendation-project | 8,323,646,626,417 | ccf9ace66e36756337d3067ad2ba883c0dc14c1f | 3d94d7dc4e5c93fb9796e372a3a5e88b219117d4 | /bonglib.py | 454857471068ab8e661c1b0db56448a122520794 | []
| no_license | https://github.com/kaeru8714/BDJA-recommendation-project | 8c7332e1079809b9dbb9360eca893054e5b29eae | 3ff311a2aba55e9bb890504934fcfda2b8c56a17 | refs/heads/master | 2021-01-10T09:35:25.472506 | 2015-10-22T10:29:54 | 2015-10-22T10:29:54 | 44,738,351 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def pause():
p = input("\n<system>์ผ์ ์ค์ง ๋์์ต๋๋ค. </system>\n")
def dpause(param):
p = input("\n<system>์ผ์ ์ค์ง ๋์์ต๋๋ค. %s </system>\n" %(param)) | UTF-8 | Python | false | false | 178 | py | 5 | bonglib.py | 4 | 0.605634 | 0.605634 | 0 | 5 | 27.6 | 60 |
chalam/Pynaconda | 19,275,813,238,065 | fe74b368949f46831e03d94c7d27c69cfd175d50 | de69d99db8be567d97060149481091c25907d4ef | /src/numerical/IsPowerOfTwo.py | ea3f9076b3eed7d5186ee0f786102e8e3169e62e | []
| no_license | https://github.com/chalam/Pynaconda | 0dd5acdb19c38352ee5d4b92c002d05bd75e452d | e24600d26afbc685e3853a6037f50dfc3fe077d2 | refs/heads/master | 2021-01-10T13:37:54.811250 | 2018-10-13T20:48:44 | 2018-10-13T20:48:44 | 36,340,529 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # import urllib.parse
# import urllib.request
# def isPowerOfTwo_wolfram(n):
# params = urllib.parse.urlencode({"i": "log2(" + str(n) + ")"})
# url = "https://www.wolframalpha.com/input/?" + params
# page = urllib.request.urlopen(url).read().decode('utf-8')
# return page.find('More digits') == -1 ## Need JS browser
# assert isPowerOfTwo_wolfram(8)
# assert not isPowerOfTwo_wolfram(6)
# assert isPowerOfTwo_wolfram(16)
from math import ceil, floor, log2
def isPowerOfTwo_bit(n):
return n != 0 and (n & (n - 1) == 0)
def isPowerOfTwo_log(n):
# python log is not accurate, use log2
log_val = log2(n)
return ceil(log_val) == floor(log_val)
def isPowerOfTwo_div(n):
if n == 0:
return False
while n > 0 and n % 2 == 0:
n /= 2
return n == 1
doPass = True
powersOf10 = [1, 2, 4, 8, 16 ,32, 64, 128]
notPowersOf10 = [5, 390, 12, 144, 12, 50, 17]
for n in powersOf10:
if not isPowerOfTwo_bit(n):
print("Failed for " + str(n) + "\n")
doPass = False
if not isPowerOfTwo_log(n):
print("Failed for " + str(n) + "\n")
doPass = False
if not isPowerOfTwo_div(n):
print("Failed for " + str(n) + "\n")
doPass = False
for n in notPowersOf10:
if isPowerOfTwo_bit(n):
print("Failed for " + str(n) + "\n")
doPass = False
if isPowerOfTwo_log(n):
print("Failed for " + str(n) + "\n")
doPass = False
if isPowerOfTwo_div(n):
print("Failed for " + str(n) + "\n")
doPass = False
if doPass:
print("All tests pass\n")
| UTF-8 | Python | false | false | 1,586 | py | 236 | IsPowerOfTwo.py | 215 | 0.576923 | 0.542245 | 0 | 58 | 26.344828 | 68 |
RuyiLuo/LeetCode | 7,404,523,648,203 | 7c0d67bee5f19546dfd0ccd4a82acb4fa51af7c7 | 6d814600d6822961ef5f947cd710e1f6f2828fe2 | /Fei_LeetCode/Array/0926/746-Min Cost Climbing Stairs.py | 8094b1e699fa6204513a535ca6d954287aaaec8f | []
| no_license | https://github.com/RuyiLuo/LeetCode | eaf7a160f1c7321a52b4666c423171baea706772 | 6b5aa4c635b92fa43932d95b30aa690946b28e5f | refs/heads/master | 2020-07-06T08:08:48.714187 | 2019-12-04T14:20:51 | 2019-12-04T14:20:51 | 202,950,024 | 10 | 61 | null | false | 2019-11-26T15:50:43 | 2019-08-18T02:14:16 | 2019-11-25T15:51:32 | 2019-11-26T15:50:42 | 2,439 | 6 | 27 | 0 | Python | false | false | # ไฝฟ็จๅจๆ่งๅ่งฃๆณ
# ๅๅงๅ
# i=0ๆถ๏ผๅชๆไธไธช้ถๆขฏ๏ผๅ ๆญคๅฐ่พพ่ฏฅ้ถๆขฏ้กถ้จ้่ฆ็่ฝ้ๅณไธบๅฐ่พพ่ฏฅ้ถๆขฏๆ้่ฝ้๏ผไธบcost[0]๏ผ
# i=1ๆถ๏ผๆไธคไธช้ถๆขฏ๏ผๆไปฌๅฏไปฅ่ฟไธคๆญฅ๏ผ่ทณ่ฟ็ฌฌไธ็บงๅฐ้ถ๏ผๅฐ่พพ่ฏฅ้ถๆขฏๆ้่ฆ็่ฝ้ไธบๅฐ่พพ็ฌฌไบ็บงๅฐ้ถๆ้่ฝ้๏ผไธบcost[1]๏ผ
# dp[i]่กจ็คบๅฐ่พพไธๆ ไธบi็้ถๆขฏ้่ฆๆถ่็ๆๅฐ่ฝ้ใ่ฟ้้่ฆๆณจๆ๏ผ้กถ้จ้ถๆขฏๅฎ้
ไธๆฏ่ขซ้ข็ฎ็ผบ็ๆ็๏ผ
# ๅณๅฐ่พพ้กถ้จ้ถๆขฏๆ้่ฆๆถ่็่ฝ้ไธบ้ถ๏ผๆไปฌ้่ฆ่กฅๅๆฅ
# ็ถๆ่ฝฌ็งปๆน็จ
# i>1ๆถ๏ผๅฐ่พพ็ฌฌi็บงๅฐ้ถๅชๆไธค็ง้ๆฉ๏ผไธ็งๆฏไป็ฌฌi-1็บงๅฐ้ถ่ฟไธๆญฅ๏ผๅฆไธ็งๆฏไปi-2็บงๅฐ้ถ่ฟไธคๆญฅ๏ผ
# ่ฟไธค็ง้ๆฉๆถ่็ๆๅฐ่ฝ้ๅๅซๆฏdp[i-1]+cost[i]ๅdp[i-2]+cost[i]๏ผๆไปฌๅไธค่
็ๆๅฐๅผ๏ผๅณไธบๅฐ่พพไธๆ ไธบi็ๅฐ้ถๆ้็ๆๅฐ่ฝ้๏ผ
# dp[i] = min(dp[i-1], dp[i-2]) + cost[i]
def minCostClimbingStairs(cost: list):
if not cost:
return 0
if len(cost) <= 2:
return min(cost)
cost.append(0)
dp = [None for _ in range(len(cost))]
dp[0], dp[1] = cost[0], cost[1]
for i in range(2, len(cost)):
dp[i] = min(dp[i - 2], dp[i - 1]) + cost[i]
print(dp)
return dp[-1]
| UTF-8 | Python | false | false | 1,242 | py | 443 | 746-Min Cost Climbing Stairs.py | 278 | 0.647945 | 0.617808 | 0 | 24 | 29.416667 | 78 |
fstricker/aeroplant | 18,227,841,218,041 | a6dd167745fed88852f81e84f1b750eaf6ae3232 | d4d1daa805b11b04180553a97c2292ad0a61e343 | /main.py | 86e115d1bf6f4d55aff6eb30de7df8d733c9d645 | []
| no_license | https://github.com/fstricker/aeroplant | e806f147f38b16968a3e1c87939f3a279273e2e2 | 7a78a945731803fe425b863049c86a0f2379998c | refs/heads/master | 2020-06-02T11:05:37.175643 | 2019-06-10T09:16:58 | 2019-06-10T09:16:58 | 191,135,314 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #aeroplant.py aka main.py
from datetime import datetime
from time import sleep
from stat_summary import *
from process import *
from read import *
import settings
import linecache
import sys
#define more telling exception function
def DetailedException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
return f'EXCEPTION IN ({filename}, LINE {lineno} "{line.strip()}"): {exc_obj}'
#initialize variables
sleep(3)
settings.logger('Starting aeroplant session')
print('Starting in...')
for i in range(0,3):
print(3-i)
sleep(1)
settings.init()
#starting data flow
try:
inputs = serial_flush('/dev/ttyUSB0')
outputs = serial_flush('/dev/ttyUSB1')
except:
#write to log
settings.logger("Aeroplant session couldn't start, error:" + str(DetailedException()))
#
while True:
try:
if settings.timestop > datetime.now():
print('Running interval, collecting data')
serial_read(inputs, 0)
serial_read(outputs, 1)
else:
settings.timestop = settings.time_increment(15)
print('Finishing interval, writing data and starting anew')
sensors_in = return_stats(settings.dict_in)
sensors_out = return_stats(settings.dict_out)
#print(sensors_in)
#print(sensors_out)
#
write_data(sensors_in, sensors_out)
#reset dictionaries
for key in settings.dict_keys:
settings.dict_in[key] = []
settings.dict_out[key] = []
continue
except:
print(DetailedException())
#write to log
settings.logger('Aeroplant session disrupted, error:' + str(DetailedException()))
break
| UTF-8 | Python | false | false | 1,904 | py | 5 | main.py | 5 | 0.628676 | 0.622899 | 0 | 59 | 31.271186 | 90 |
Jumpst3r/Opt_ML_project | 13,889,924,271,898 | fbc72544a647428b4d890082c489495af70c9e1d | 3ed81919ad11c0f42db908f09d1f47cc50dda291 | /model_trainer.py | 60b093b2b0e439b207c19237ed49f3dc771d3b75 | []
| no_license | https://github.com/Jumpst3r/Opt_ML_project | 922c27904125c27745c37fa3acc171633e060e6a | 502fa9ef7aa8370e1d6692da0c43b7de4ea0055a | refs/heads/master | 2022-11-08T16:58:22.782180 | 2020-06-17T09:09:29 | 2020-06-17T09:09:29 | 249,695,144 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This file trains a simple model defined in models.py on the CIFAR-10 dataset
#
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import torch.optim as optim
import matplotlib.pyplot as plt
import math
from torchvision import datasets, transforms
from models import CIFAR_model
import torchvision.models as models
import glob, os
import setup_logger
import logging
logger = logging.getLogger()
# How many confident inputs to store.
NUMSAMPLES = 500
# Select device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
logger.info(f"using device {device}")
f_cifar = transforms.Compose([transforms.ToTensor()])
training_set_CIFAR10 = datasets.CIFAR10('.data/', download=True, train=True, transform=f_cifar)
trainloader_CIFAR10 = torch.utils.data.DataLoader(training_set_CIFAR10, batch_size=64, shuffle=True)
test_set_CIFAR10 = datasets.CIFAR10('.data/', download=True, train=False, transform=f_cifar)
testloader_CIFAR10 = torch.utils.data.DataLoader(test_set_CIFAR10, batch_size=64, shuffle=True)
loader = (trainloader_CIFAR10, testloader_CIFAR10)
# Remove previously generated confident outputs:
logger.info("Removing old files..")
files = glob.glob('confident_input/CIFAR_model/*.data')
for f in files:
os.remove(f)
# Train a given model
def train(model, data, epochs):
logger.info("training <" + str(model) + '>...')
criterion = F.cross_entropy
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, nesterov=True)
for e in range(epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inp, target = data
inp, target = inp.to(device), target.to(device)
out = model(inp)
loss = criterion(out, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 1000 == 0:
logger.info('Epoch %d, loss: %f' %
(e + 1, running_loss / 100))
running_loss = 0.0
def test(model, testloader):
logger.info("testing <" + str(model) + '>...')
correct = 0
total = 0
correct_samples = []
with torch.no_grad():
for inp, target in testloader:
inp, target = inp.to(device), target.to(device)
outputs = model(inp)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
# This super inefficient loop is just to save 1000 correctly classified examples:
for testin,(pred,tar) in zip(inp,zip(predicted, target)):
if (pred == tar):
correct_samples.append((testin, tar.item()))
if len(correct_samples) > NUMSAMPLES: break
logger.info(str(model) + ' test acc: %d %%' % (
100 * correct / total))
PATH = 'confident_input/' + str(model) + '/'
logger.info(f"saving {NUMSAMPLES} correctly classified samples to " + PATH)
for idx, e in enumerate(correct_samples):
im, label = e
# naming convention: im_ID_LABEL.data
torch.save(im, PATH + 'im_' + str(idx) + '_' + str(label) + '.data')
model= CIFAR_model()
trainloader, testloader = loader
model.to(device)
train(model, trainloader, 10)
model.eval()
test(model, testloader)
logger.info("saving model state to models/")
torch.save(model.state_dict(), 'models/' + str(model) + ".state")
| UTF-8 | Python | false | false | 3,559 | py | 19 | model_trainer.py | 9 | 0.634167 | 0.616746 | 0 | 102 | 33.872549 | 100 |
Aasthaengg/IBMdataset | 12,498,354,848,407 | 32abb041789add34339242e67a9834ded1e080f5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02935/s807057031.py | 23bfc33222d7999b6740f4bb9e5fb65af0ddc7a9 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
v = list(map(int,input().split()))
for i in range(n-1):
x = min(v)
v.remove(x)
y = min(v)
v.remove(y)
v.append((x+y)/2)
print(v[0]) | UTF-8 | Python | false | false | 156 | py | 202,060 | s807057031.py | 202,055 | 0.551282 | 0.532051 | 0 | 11 | 13.272727 | 34 |
brian83199/CreatePT | 6,975,026,896,882 | c8b4fff78ff66872f8de0f57885f04805ea6c9a1 | 6d624300ff579ce0fcbceba1f1a28e6ca6035242 | /Testing.py | 8fd2628e97f5c4c40f80307650bc292a40cdb8d8 | []
| no_license | https://github.com/brian83199/CreatePT | f605bb49d3abd423b6b1d147008b5c3ff7b97b57 | bba9b5df397537c3cb727a435d360bcb5a7fa205 | refs/heads/master | 2021-01-25T14:46:27.936499 | 2018-04-15T14:42:43 | 2018-04-15T14:42:43 | 123,727,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import webbrowser
def menu():
print("Hello")
print("Your temperature: 1")
print("Exit: 0")
print("Enter option:", end=" ")
option1 = eval(input())
return option1
#Parker Witt has helped me with ordering th functions to allow the program to work properly. Also helped by removing a return option4 that didnt need to be there.
def stress():
# done = true
print("Is the stress caused by work, school, or family? ")
print("[1 = work / 2 = school / 3 = family]: ")
ans = eval(input())
if (ans < 1) or (ans > 3):
print("invalid entry. Please try again")
stress()
elif ans == 1:
print("Stress Relief in the Workplace: ")
print(webbrowser.open('https://www.helpguide.org/articles/stress/stress-in-the-workplace.htm'))
exit(Main())
elif ans == 2:
print("Stress Relief at School: ")
print(webbrowser.open('https://www.verywellmind.com/reduce-student-stress-and-excel-in-school-3145175'))
exit (main())
else:
print("Stress Relief with the Family: ")
print(webbrowser.open('https://psychcentral.com/lib/tips-to-reduce-family-stress/'))
exit(main())
def anxiety():
print("Is the temperature caused anxiety? ")
print("[1 = Yes / 2 = No]: ")
ans = eval(input())
if (ans < 1) or (ans > 2):
print("invalid entry. Please try again")
anxiety()
elif ans == 1:
print("Here's how to lower your temperature from anxiety ")
print(webbrowser.open('https://www.calmclinic.com/anxiety/symptoms/hotness'))
else:
exit(main())
def feeling_sick():
print("Are you feeling sick? ")
print("[1 = Yes /2 = No]: ")
ans = eval(input())
if (ans < 1) or (ans > 2):
print("invalid entry. Please try again")
feeling_sick()
elif ans == 1:
print("You might want to see a doctor or get some rest")
else:
exit(main())
def naturally_cold_hands():
print("Do you have naturally cold hands? ")
print("[1 = Yes / 2 = No]: ")
ans = eval(input())
if (ans < 1) or (ans > 2):
print("invalid entry. Please try again")
naturally_cold_hands()
elif ans == 2:
print("Here's how to raise the temperature of your hands: ")
print(webbrowser.open('http://www.naturalfertilityandwellness.com/raise-basal-body-temperature/'))
else:
exit(main())
def cold_day_outside():
print("If you were just outside, was it cold out? ")
print("[1 = Yes / 2 = No]: ")
ans = eval(input())
if (ans < 1) or (ans > 2):
print("invalid entry. Please try again")
cold_day_outside()
elif ans == 1:
print("Here's how to raise your temperature on a cold day: ")
print(webbrowser.open("https://www.wikihow.com/Increase-Body-Temperature"))
else:
exit(main())
def workout():
done = False
print("Did you just workout? ")
print("[1 = Yes / 2 = No]: ")
ans = eval(input())
if (ans<1) or (ans>2):
print("invalid entry. Please try again")
workout()
elif ans == 1:
print("Here's how to lower your temperature after working out: ")
print(webbrowser.open("https://healthyliving.azcentral.com/lower-body-temperature-quickly-after-exercise-17366.html"))
else:
exit(main())
def questions():
done = False
print("Please press the reason if known: ")
print("stress 1 ")
print("anxiety 2 ")
print("feeling_sick 3 ")
print("naturally_cold_hands 4 ")
print("cold_day_outside 5 ")
print("workout 6 ")
print("Exit 0 ")
print("Enter option:", end=" ")
option4 = eval(input())
while not done:
if ((option4 < 0) or (option4 > 7)):
print("Invalid Option")
elif option4 == 1:
stress()
elif option4 == 2:
anxiety()
elif option4 == 3:
feeling_sick()
elif option4 == 4:
naturally_cold_hands()
elif option4 == 5:
cold_day_outside()
elif option4 == 6:
workout()
else:
done = True
break
##################################################################
def temp():
tfile = open("/sys/bus/w1/devices/10-000802824e58/w1_slave")
text = tfile.read()
tfile.close()
temperature_data = text.split()[-1]
temperature = float(temperature_data[2:])
temperature = temperature / 1000
temperature = temperature*(9/5)+32
print temperature
if ((temperature < 72.5) or (temperature > 80)):
print("Are you feeling well today?")
print('[ 1= Yes or 2 = No]: ')
ans = int(input())
if (ans < 1) or (ans > 2):
print(" Invalid response. Please enter 1 for yes or 2 for no")
temp()
elif ans == 1:
done = True
else:
questions()
else:
print("I am glad you are feeling well")
exit(main())
def main():
done = False
while not done:
option2 = menu()
if ((option2 < 0) or (option2 > 2)):
print(" Invalid selection")
elif option2 == 1:
temp()
elif option2 == 0:
done = True
break
###################################################################
if __name__ == '__main__': main()
| UTF-8 | Python | false | false | 5,548 | py | 2 | Testing.py | 1 | 0.530101 | 0.508652 | 0 | 194 | 27.597938 | 162 |
michaeliyke/fibonacci | 8,418,135,926,144 | 6114a8665e368fe5047d4a6ac79656b820c154ab | 212bf78b4d4274f967e2ee3915ade669f8d4937b | /stack/__main__.py | 9a76401530b9c325faf31d15e4a410ab2b64c371 | []
| no_license | https://github.com/michaeliyke/fibonacci | 9f8187d802e767a0c9ff4ed1ba04327336f4ed35 | 0c2a1957504b2f1689117fcc7cbebf9c71574afe | refs/heads/master | 2023-05-28T00:08:48.146459 | 2022-04-19T02:03:42 | 2022-04-19T02:03:42 | 373,456,959 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
opened is an empty stack
Mark state as valid false
If given symbols set is an odd number, return early
Loop through symbols creating an object of each
when encounter an openner, add it to opened stack
when encounter a closer, match it with the topmost openner:
yes - reduce opened.
no - quit. Mark state as valid false
At the end of loop, mark state as valid true
"""
# Balanced symbols
b1 = "(((())))"
b2 = "([]{}())"
b3 = "([{}])"
b4 = "([{}])(()()())[({()})]"
# Unbalanced symbols
u1 = "()((((([{]))))(){[()]}"
u2 = "()((((([]))))(){[()]}"
u3 = "[}([){]"
from challenge import isBalanced
print("\n# Balanced symbols")
print(" symbols: ", b1, "isBalanced: ", isBalanced(b1))
print(" symbols: ", b2, "isBalanced: ", isBalanced(b2))
print(" symbols: ", b3, "isBalanced: ", isBalanced(b3))
print(" symbols: ", b4, "isBalanced: ", isBalanced(b4))
print("\n")
print("\n# Unbalanced symbols")
print(" symbols: ", u1, "isBalanced: ", isBalanced(u1))
print(" symbols: ", u2, "isBalanced: ", isBalanced(u2))
print(" symbols: ", u3, "isBalanced: ", isBalanced(u3))
| UTF-8 | Python | false | false | 1,076 | py | 36 | __main__.py | 28 | 0.616171 | 0.596654 | 0 | 35 | 29.742857 | 59 |
cms-sw/cmssw | 14,705,968,032,490 | 118e67060aaa00ac395db3e1c5c4a6cddd4963ee | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoTracker/TrackProducer/python/GsfTrackProducer_cfi.py | e60ca75496e5d1426943645cd0cd20ab5aa9cc9b | [
"Apache-2.0"
]
| permissive | https://github.com/cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | false | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | 2023-09-09T18:47:07 | 2023-09-14T19:14:27 | 1,330,249 | 980 | 4,104 | 807 | C++ | false | false | import FWCore.ParameterSet.Config as cms
gsfTrackProducer = cms.EDProducer("GsfTrackProducer",
src = cms.InputTag("CkfElectronCandidates"),
beamSpot = cms.InputTag("offlineBeamSpot"),
producer = cms.string(''),
Fitter = cms.string('GsfElectronFittingSmoother'),
useHitsSplitting = cms.bool(False),
TrajectoryInEvent = cms.bool(False),
TTRHBuilder = cms.string('WithTrackAngle'),
Propagator = cms.string('fwdElectronPropagator'),
NavigationSchool = cms.string('SimpleNavigationSchool'),
MeasurementTracker = cms.string(''),
MeasurementTrackerEvent = cms.InputTag('MeasurementTrackerEvent'),
GeometricInnerState = cms.bool(False),
AlgorithmName = cms.string('gsf')
)
| UTF-8 | Python | false | false | 758 | py | 46,375 | GsfTrackProducer_cfi.py | 40,422 | 0.692612 | 0.692612 | 0 | 17 | 43.470588 | 89 |
WiktorWos/sw_test | 7,584,912,253,426 | 2044298469904688731f01aa8dc8fee08af30063 | cbae91c134147f1c4ab8103f96e7d4e42d6cdeaa | /host.py | 1597cfff0b7ae0843c4a996d6dd97093c7815c31 | []
| no_license | https://github.com/WiktorWos/sw_test | c2db4a0854233b4123d1780ee27d2b9ff3a07477 | 6bf2df7746127bb6d852c1a994bba641cc773254 | refs/heads/master | 2022-10-12T13:14:27.790623 | 2020-06-13T13:42:01 | 2020-06-13T13:42:01 | 272,014,197 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import subprocess
import os
PORT = 65000
FRAME = 1024
"wiadomoลฤ bitรณw, pierwszy bit to numer funkcji"
def functions(message):
f = int(message[0])
if f == 1:
what = int(message[1])
if what == 1:
cmd = "amixer -D pulse sset Master 10%-"
else:
cmd = "amixer -D pulse sset Master 10%+"
subprocess.call(cmd, shell=True)
return ""
if f == 2:
cmd = "amixer -D pulse get Master | awk -F 'Left:|[][]' '{ print $3 }' | tr -d '%' | tr -d '\n' > temp"
subprocess.call(cmd, shell=True)
with open("temp", 'r') as file:
vol = file.read()
os.remove("temp")
return "{}{}".format(2, vol[:len(vol)-1])
if f == 3:
"test"
return "random text"
if f == 4:
pass
if f == 5:
pass
if f == 6:
pass
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as host_socket:
host_socket.bind(('localhost', PORT))
host_socket.listen(1)
connection, address = host_socket.accept()
with connection:
print('Connected by', address)
data = ''
while True:
msg = connection.recv(FRAME)
print(msg)
msg = msg.decode("utf-8")
print(msg)
data += msg
if len(data) > 0:
func = functions(data)
to_send = "{}".format(func)
connection.send(bytes(to_send, "utf-8"))
data = ''
| UTF-8 | Python | false | false | 1,507 | py | 1 | host.py | 1 | 0.5 | 0.480718 | 0 | 55 | 26.345455 | 111 |
rnic92/ProteinComparisonTool | 2,972,117,396,990 | bb474d13edb4d0f16a92e5bf8e50e105df150b3c | f979384256d08ed5bcdd7b7a3b29ec72cc3977f0 | /fun/substring.py | 7e1c1ea361210b42e9531ad83f642b9ced7f69be | [
"LicenseRef-scancode-warranty-disclaimer"
]
| no_license | https://github.com/rnic92/ProteinComparisonTool | e92954c9ab999e3b63c768fd4bda9cb264676862 | 600f214025f1c5851c205b265d41eb63fe693a0c | refs/heads/main | 2023-02-24T23:43:30.274320 | 2021-02-05T05:17:38 | 2021-02-05T05:17:38 | 335,474,627 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def substr_after(s, delim):
return s.partition(delim)[2]
def substr_before(s, delim):
return s.partition(delim)[0]
| UTF-8 | Python | false | false | 131 | py | 8 | substring.py | 5 | 0.648855 | 0.633588 | 0 | 6 | 19.833333 | 32 |
flylzj/dota2_scrapy | 3,633,542,354,777 | cd799ea65f4b409983f206e38f6c791333cce7a1 | 7a662d0807b81a8c8f4a8b234cdee31a003cd577 | /dota2_scrapy/spiders/c5game.py | e935e2eb30a889d9d67321438215cf529283791f | []
| no_license | https://github.com/flylzj/dota2_scrapy | 92996eac264cf294c8fa60a27db6a0da0e82978e | b73b68f9c8d92fe0c7264945c668aa4cb0121ab0 | refs/heads/master | 2020-03-23T16:36:54.160444 | 2018-08-20T12:06:49 | 2018-08-20T12:06:49 | 141,820,127 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import scrapy
from dota2_scrapy.items import Dota2ScrapyItem
import re
import json
import requests
class c5game(scrapy.Spider):
name = "c5game"
custom_settings = {
"CONCURRENT_REQUESTS": 3,
"CONCURRENT_REQUESTS_PER_DOMAIN": 1,
"need_proxy": False,
"DOWNLOAD_DELAY": 1,
"PROXY_MODE": 0,
"PROXY_LIST": "/".join(__file__.split("/")[0:-2]) + "/proxy.txt",
"RETRY_HTTP_CODES": [500, 503, 504, 400, 403, 404, 408, 429],
"RETRY_TIMES": 100,
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.retry.RetryMiddleware': 90,
'scrapy_proxies.RandomProxy': 100,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110,
}
}
def start_requests(self):
u = "https://www.c5game.com/dota.html?page={}"
r = requests.get("https://www.c5game.com/dota.html")
tmp = re.findall(r'\?page=[0-9]{1,3}', r.text)[-1]
page = int(tmp.strip("?page="))
for i in range(1, page + 1):
url = u.format(i)
yield scrapy.Request(url, callback=self.get_items)
def get_items(self, response):
items = response.xpath('//*[@id="yw0"]/div[1]/ul/li')
for i in items:
item = Dota2ScrapyItem()
item["item_type"] = "c5game"
item["item_href"] = "https://www.c5game.com" + i.css('p.name > a::attr(href)').extract()[0]
item["item_name"] = i.css('p.name > a > span::text').extract()[0]
yield scrapy.Request(item["item_href"], meta={"item": item}, callback=self.get_item_api)
def get_item_api(self, response):
item = response.meta["item"]
api = response.css("#sale-body::attr(data-url)")[0].extract()
item_id = re.search(r'id=[0-9]*', api).group().strip("id=")
item["item_id"] = item_id
page_num = 1
api = "https://www.c5game.com/api/product/sale.json?id={}&page={}".format(item_id, page_num)
headers = {
"x-requested-with": "XMLHttpRequest"
}
yield scrapy.Request(api, meta={"item": item, "page_num": page_num, "need_proxy": self.custom_settings.get("need_proxy"), "api": api}, headers=headers, callback=self.get_sale_prices)
def get_sale_prices(self, response):
headers = {
"x-requested-with": "XMLHttpRequest"
}
item = response.meta["item"]
item_id = item["item_id"]
page_num = response.meta["page_num"]
if not item.get("sale_prices"):
item["sale_prices"] = []
api_data = json.loads(response.text)
data = api_data.get("body")
if not data:
yield scrapy.Request(response.url,
meta={"item": item, "page_num": page_num, "need_proxy": self.custom_settings.get("need_proxy")},
headers=headers, callback=self.get_sale_prices
)
more = data.get("more")
items = data.get("items")
for i in items:
price = i.get("price")
item["sale_prices"].append(price)
# print(more)
if more == 1:
page_num += 1
api = "https://www.c5game.com/api/product/sale.json?id={}&page={}".format(item_id, page_num)
yield scrapy.Request(api,
meta={"item": item, "page_num": page_num, "api": api, "need_proxy": self.custom_settings.get("need_proxy")},
headers=headers, callback=self.get_sale_prices)
elif more == 0:
page_num = 1
api = "https://www.c5game.com/api/product/purchase.json?id={}&page={}".format(item["item_id"], 1)
# print("api", api)
item["sale_count"] = len(item["sale_prices"])
yield scrapy.Request(api,
meta={"item": item, "page_num": page_num, "need_proxy": self.custom_settings.get("need_proxy"), "api": api},
headers=headers, callback=self.get_purchase_prices)
def get_purchase_prices(self, response):
headers = {
"x-requested-with": "XMLHttpRequest"
}
item = response.meta["item"]
item_id = item["item_id"]
page_num = response.meta["page_num"]
if not item.get("purchase_prices"):
item["purchase_prices"] = []
api_data = json.loads(response.text)
data = api_data.get("body")
try:
more = data.get("more")
items = data.get("items")
# print("pur", more)
for i in items:
price = i.get("price")
item["purchase_prices"].append(price)
if more == 1:
page_num += 1
api = "https://www.c5game.com/api/product/purchase.json?id={}&page={}".format(item_id, page_num)
yield scrapy.Request(api,
meta={"item": item, "page_num": page_num, "api": api, "need_proxy": self.custom_settings.get("need_proxy")},
headers=headers, callback=self.get_purchase_prices)
else:
item["purchase_count"] = len(item["purchase_prices"])
yield item
except Exception:
item["purchase_count"] = len(item["purchase_prices"])
yield item
| UTF-8 | Python | false | false | 5,420 | py | 18 | c5game.py | 13 | 0.526937 | 0.512731 | 0 | 125 | 42.24 | 190 |
sggpls/hse-python-ml | 16,192,026,719,731 | 58e1229b56f03382dcb59d3193b76818d0ba971b | cc1c2eccc35ab56454ea5a7a68a70a4bff049896 | /python/labs/lab03/min-max/sol_min_max.py | 9fd942246d66a7636116cede8a5c13e07b653970 | [
"MIT"
]
| permissive | https://github.com/sggpls/hse-python-ml | 2fce2b5c9592a1da8167ffa2ecf482076dfbeaea | 0a190b7eefd7548e00f07e83edc7a5a4b821a3b9 | refs/heads/master | 2020-04-04T00:43:27.442566 | 2018-11-14T19:57:58 | 2018-11-14T19:57:58 | 155,660,563 | 1 | 0 | MIT | true | 2018-11-01T04:09:19 | 2018-11-01T04:09:19 | 2018-10-24T03:44:35 | 2018-10-24T03:44:34 | 1,797 | 0 | 0 | 0 | null | false | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import Iterable
def _min_max(iterable, *args, cmp=None):
if not args:
if not isinstance(iterable, Iterable):
raise TypeError(f"'{iterable.__class__.__name__}'"
f" object is not iterable")
if not args:
it = iter(iterable)
current = next(it)
else:
it = iter(args)
current = iterable
for e in it:
if cmp(e, current):
current = e
return current
def minimum(iterable, *args, key=None):
"""
The same as built-in min (exclude default parameter).
With a single iterable argument, return its smallest item. The
default keyword-only argument specifies an object to return if
the provided iterable is empty.
>>> minimum(1, 2, 3) == min(1, 2, 3)
True
>>> minimum([1, 2, 3]) == min([1, 2, 3])
True
"""
if key is None:
def cmp(lhs, rhs):
return lhs < rhs
else:
def cmp(lhs, rhs):
return key(lhs) < key(rhs)
return _min_max(iterable, *args, cmp=cmp)
def maximum(iterable, *args, key=None):
"""
The same as built-in max (exclude default parameter).
With a single iterable argument, return its biggest item. The
default keyword-only argument specifies an object to return if
the provided iterable is empty.
>>> maximum(1, 2, 3) == max(1, 2, 3)
True
>>> maximum([1, 2, 3]) == max([1, 2, 3])
True
"""
if key is None:
def cmp(lhs, rhs):
return lhs > rhs
else:
def cmp(lhs, rhs):
return key(lhs) > key(rhs)
return _min_max(iterable, *args, cmp=cmp)
if __name__ == "__main__":
import doctest
doctest.testmod()
| UTF-8 | Python | false | false | 1,780 | py | 140 | sol_min_max.py | 72 | 0.560674 | 0.546067 | 0 | 73 | 23.383562 | 66 |
LYNCSS/CryptoCurrency_exchange | 12,257,836,712,544 | ae6da3af5d9d1f94308ace7c853fa4f483366bce | 4492073fecea53b766ba6bd4fc966b82ebc3783b | /Yellow_BCCX/Future_Ad.py | f00bc247cb1647d1764ea75fd41cb7cbab04f7d7 | []
| no_license | https://github.com/LYNCSS/CryptoCurrency_exchange | 30f0923141b20e247df2f424af8d67acefb66ae1 | 760cea167a0987ced75e3b454ff1b7dc925c4e83 | refs/heads/master | 2023-01-03T03:37:02.082639 | 2019-06-01T07:29:46 | 2019-06-01T07:29:46 | 175,184,930 | 4 | 2 | null | false | 2022-12-27T15:15:36 | 2019-03-12T10:12:08 | 2021-10-02T13:28:33 | 2022-12-27T15:15:35 | 8,129 | 4 | 3 | 5 | Python | false | false | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 27 10:56:06 2018
@author: LYNCSS
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import os, sys, pyautogui, time, imaplib, inspect, email, time
# To access Gmail, you have to apply two step. Step 1: Allow imap. Step 2: low secure access
class FutureWorker: #raymond788055 Aa19910917
def __init__(self, account, password):
self.Future_account = account
self.Futur_password = password
self.ChromeDV_route = "C:\\Users\\LYNCSS\\Google_drive\\WORKSTATION\\Crypto_work\\Yellow_BCCX\\configuration\\webdriver\\chromedriver.exe"
self.driver = webdriver.Chrome(self.ChromeDV_route)
self.driver.get("https://adpro.futurenet.club/login")
self._progress_bar = None
def login(self):
self.driver.find_element_by_name("email").click()
self.driver.find_element_by_name("email").clear()
self.driver.find_element_by_name("email").send_keys(self.Future_account)
self.driver.find_element_by_name("password").click()
self.driver.find_element_by_name("password").clear()
self.driver.find_element_by_name("password").send_keys(self.Futur_password)
self.driver.find_element_by_name("remember").click()
self.driver.find_element_by_css_selector("button.btn.btn-primary").click()
time.sleep(0.5)
while True:
cont = self.driver.find_element_by_css_selector("button.btn.ng-binding")
if cont.text == 'X':
cont.click()
pass
break
def Locate_Bar(self):
self.driver.set_window_position(0,0)
self._progress_bar = self.driver.find_element_by_css_selector("div.progress-bar.progress-bar-success")
y_relat_loc = self._progress_bar.location['y']
browser_navigation_panel_height = self.driver.execute_script('return window.outerHeight - window.innerHeight;')
y_abs_coord = y_relat_loc + browser_navigation_panel_height
x_abs_coord = self._progress_bar.location['x']
return (x_abs_coord, y_abs_coord)
def watchAd_Unit(self):
self.driver.find_element_by_link_text(u"ๆๅผ").click()
WebDriverWait(self.driver, None).until(EC.presence_of_element_located(By.CSS_SELECTOR, "div.progress-bar.progress-bar-success"))
tmp_coord = self.Locate_Bar()
pyautogui.moveTo(tmp_coord[0], tmp_coord[1])
while True:
try:
self.driver.find_element_by_link_text(u"ไธไธๅๅนฟๅ").click()
break
except:
time.sleep(0.5)
def watchAd(self, random_parm = 1):
if random_parm == 1:
time.sleep(5)
pyautogui.FAILSAFE = False
self.driver.find_element_by_link_text(u"่ง็ๅนฟๅ").click()
pyautogui.moveTo(0, 0)
screen_width_pixel = 1920
screen_depth_pixel = 1080
for mouseY_pos in range(0, screen_depth_pixel, 10):
find = False
try:
time.sleep(0.5)
self.watchAd_Unit()
break
except:
try:
self.driver.find_element_by_css_selector("div.progress-bar.progress-bar-success")
break
except:
pass
for mouseX_pos in range(0, screen_width_pixel, 100):
pyautogui.moveTo(mouseX_pos, mouseY_pos)
try:
time.sleep(0.5)
self.watchAd_Unit()
find = True
break
except:
try:
self.driver.find_element_by_css_selector("div.progress-bar.progress-bar-success")
find = True
break
except:
pass
pass
if find == True:
break
time.sleep(40)
self.driver.find_element_by_link_text(u"ไธไธๅๅนฟๅ").click()
def Authenticate(self, num_str):
self.driver.find_element_by_id("code").click()
self.driver.find_element_by_id("code").clear()
self.driver.find_element_by_id("code").send_keys(num_str)
self.driver.find_element_by_css_selector("button.btn.btn-primary").click()
class Gmail_reader:
def __init__(self, Gaccount, Gpassword): # raymond788055@gmail.com qweesd541682
self.account = Gaccount
self.password = Gpassword
self.imap = imaplib.IMAP4_SSL('imap.gmail.com')
self.imap.login(self.account, self.password)
def ReadLastmail(self):
self.imap.select()
result, data = self.imap.search(None, "UNSEEN")
mail_number = data[0].split()[-1]
typ, mail_content = self.imap.fetch(mail_number, '(RFC822)')
before_process = str(mail_content[0][1])
assert len(before_process.split("FutureAdPro")) > 1
first_process_content = before_process.split('color:green')[-1].split('</big>')[0]
final_content = ''
for index in first_process_content:
try:
final_content = final_content + str(int(index))
except:
pass
return final_content
def GetTime():
time_str = time.strftime("%H:%M:%S", time.gmtime())
time_list = time_str.split(":")
return time_list
def tst_main(F_account, F_password, Gaccount, Gpassword):
worker = FutureWorker(F_account, F_password)
worker.login()
Gworker = Gmail_reader(Gaccount, Gpassword)
try:
worker.watchAd()
except:
auth_code = Gworker.ReadLastmail()
worker.Authenticate(auth_code)
worker.watchAd()
def main(F_account, F_password, G_account, G_password, hour):
worker = FutureWorker(F_account, F_password)
worker.login()
Gworker = Gmail_reader(G_account, G_password)
now = GetTime()
while True:
now = GetTime()
if ((int(now[0]) == (hour-8)) and (int(now[1]) >= 0) and (int(now[2]) >= 0)):
break
for index in range(0, 9):
try:
worker.watchAd()
except :
auth_code = Gworker.ReadLastmail()
worker.Authenticate(auth_code)
worker.watchAd()
pass
return 1
| UTF-8 | Python | false | false | 6,748 | py | 28 | Future_Ad.py | 15 | 0.559261 | 0.545116 | 0 | 168 | 38.97619 | 146 |
Ruben0406/FlowerPower | 11,416,023,118,985 | 4476eb46e78acf476a8a15cdb48d0b2a2c8fec4e | 6dceac53d5bb2f65ab0dc6bd582092a72d16d10b | /routes/public_pages.py | 3d316d5b8efca8cf6ef140a2fc5c9b3298deea5f | []
| no_license | https://github.com/Ruben0406/FlowerPower | 38158efd287efadfbc5a27893178b952ad96d9ca | 4c766fd8acfea69fc9900f16081e9f0acf8bfdb2 | refs/heads/master | 2019-07-07T12:13:04.695821 | 2017-04-07T16:51:28 | 2017-04-07T16:51:28 | 87,048,619 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Blueprint, render_template, request, redirect, session
import sys
sys.path.append('./utilities')
sys.path.append('./classes')
import sanitize as sz
import users
import shops
import articles
import orders
import invoice_rules
import invoices
import db_handler as db
import session_handler as sh
import crypto
import time
import locale
public_pages = Blueprint('public_pages', __name__, template_folder='templates')
@public_pages.route('/', methods=['GET'])
def index_page():
shop = shops.get_shop_dictionary(3)
article = articles.get_article_list()[0:4]
return render_template('public/index.html', articles=article, shop=shop)
@public_pages.route('/bevestigen', methods=['GET', 'POST'])
def order_confirmation_page():
if request.method == 'POST':
if sh.check_session('user'):
user_id = db.execute("SELECT user_id FROM `users` where user_username = '{0}'".format(sh.read_session('user')))
if user_id == None: user_id = 0
invoice_id = db.execute("SELECT max(invoice_id) FROM `invoices`") + 1
for item in sh.read_session('shopping_cart'):
orders.add_order(item['id'], request.form['shop'], item['amount'], user_id, 0, 'False')
invoice_rules.add_invoice_rule(item['name'], invoice_id, item['amount'], item['price'])
sh.update_session('shopping_cart', [])
sh.update_session('shopping_cart_total_price', 0)
shop_list = shops.get_shop_dictionary(request.form['shop'])
user_list = users.get_user_dictionary([user_id])
user_name = '{0} {1} {2}'.format(user_list['initials'], user_list['preposition'], user_list['last_name'])
invoice_number = db.execute("SELECT max(invoice_number) FROM `invoices`") + 1
invoices.add_invoice(invoice_id, time.time(), invoice_number, user_id, shop_list['name'], shop_list['address'],
shop_list['postal'], shop_list['city'], shop_list['number'], user_name, user_list['address'], user_list['postal'], user_list['city'])
return redirect('/facturen')
return render_template('public/confirm_order.html', shops = shops.get_shop_list())
@public_pages.route('/boeketten', methods=['GET', 'POST'])
def article_page():
if request.method == 'POST':
if sh.not_user() and sh.not_employee():
return render_template('public/articles.html', articles=articles.get_article_list(), message='Log in of registreer om bloemstukken te kunnen bestellen.', type='danger')
article_id = request.form['id']
amount = int(request.form['amount'])
if amount > 0:
name = articles.get_article_name(article_id)
price = articles.get_article_price(article_id)
sh.shopping_cart_add_item(sh.shopping_cart_dictionary(article_id, name, price, amount))
return render_template('public/articles.html', articles=articles.get_article_list(), message='<b>{0} maal {1}</b> is toegevoegt aan uw winkelmandje.'.format(amount, name), type='success')
return render_template('public/articles.html', articles=articles.get_article_list(), message='Voer minimaal 1 in om de bestelling te plaatsen', type='danger')
return render_template('public/articles.html', articles=articles.get_article_list())
@public_pages.route('/winkelwagen/verwijderen/<item_id>', methods=['GET'])
def shopping_cart_delete_page(item_id):
sh.shopping_cart_remove_item(item_id)
return redirect('/boeketten')
@public_pages.route('/contact', methods=['GET'])
def contact_page():
return render_template('public/contact.html', shops=shops.get_shop_list())
@public_pages.route('/factuur/<invoice_id>', methods=['GET'])
def invoice_page(invoice_id):
invoice_data = invoices.get_invoice_dictionary(invoice_id)
invoice_rules = db.execute("SELECT * FROM `invoice_rules` WHERE invoice_id = ?", [invoice_id], fetch=2)
total = 0
for invoice_rule in invoice_rules:
total += int(invoice_rule[4])
total_pros = locale.currency(total / 100)
btw = db.execute("SELECT setting_value FROM `settings` WHERE setting_name = ?", ['btw'])
calc = (total * float('0.{0}'.format(btw))) / 100
btw_pros = locale.currency(calc)
total_checked = locale.currency((total / 100)- calc)
return render_template('public/invoice.html', invoice=invoice_data, invoice_rules=invoice_rules, btw=btw, btw_pros=btw_pros, total=total_pros, total_checked=total_checked)
@public_pages.route('/facturen', methods=['GET'])
def invoices_page():
user_id = db.execute("SELECT user_id FROM `users` where user_username = '{0}'".format(sh.read_session('user')))
invoice_list = invoices.get_invoice_user_list(user_id)
return render_template('public/invoices.html', invoices=invoice_list)
@public_pages.route('/gegevens', methods=['GET', 'POST'])
def setting_page():
if not sh.check_session('user'): return redirect('/')
user_id = []
user_id.append(db.execute("SELECT user_id FROM `users` where user_username = '{0}'".format(sh.read_session('user'))))
if not user_id[0]: user_id[0] = 0
if request.method == 'POST':
sanitized_request_data = sz.sanitize_user_edit_request_data(sz.get_request_form_data(request))
if sanitized_request_data[0]:
users.set_user_dictionary(user_id, sz.hash_request_data_password(sz.get_request_form_data(request)))
return render_template('public/settings.html', user=users.get_user_dictionary(user_id), message=sanitized_request_data[1], type=sanitized_request_data[2])
return render_template('public/settings.html', user=users.get_user_dictionary(user_id), message=sanitized_request_data[1], type=sanitized_request_data[2])
return render_template('public/settings.html', user=users.get_user_dictionary(user_id))
@public_pages.route('/registreren', methods=['GET', 'POST'])
def signup_page():
if request.method == 'POST':
sanitized_request_data = sz.sanitize_user_request_data(sz.get_request_form_data(request))
if sanitized_request_data[0]:
data_set = sz.lower_request_data_username(sz.hash_request_data_password(sz.get_request_form_data(request)))
users.add_user(data_set)
sh.create_session('user', request.form['username'].lower())
sh.create_session('shopping_cart', [])
sh.create_session('shopping_cart_total_price', 0)
sh.destroy_session('login_error')
return redirect('/')
else:
return render_template('public/signup.html', user=sz.get_request_form_data(request), message=sanitized_request_data[1], type=sanitized_request_data[2])
return render_template('public/signup.html')
@public_pages.route('/login', methods=['GET', 'POST'])
def login_page():
if request.method == 'POST':
username = request.form['username'].lower()
password = crypto.sha256(request.form['password'].encode())
user = db.user_login(username, password)
employee = db.employee_login(username, password)
if user or employee:
sh.create_session('shopping_cart', [])
sh.create_session('shopping_cart_total_price', 0)
if user: sh.create_session('user', username)
if employee: sh.create_session('employee', username)
sh.destroy_session('login_error')
return redirect('/')
sh.create_session('login_error', 'Onbekende login gegevens')
return redirect('/')
@public_pages.route('/logout', methods=['GET', 'POST'])
def logout():
sh.destroy_session('user')
sh.destroy_session('employee')
sh.destroy_session('shopping_cart')
sh.destroy_session('shopping_cart_total_price')
sh.destroy_session('login_error')
return redirect('/')
| UTF-8 | Python | false | false | 7,230 | py | 38 | public_pages.py | 20 | 0.715076 | 0.708437 | 0 | 151 | 46.880795 | 190 |
CiscoTestAutomation/genielibs | 17,729,625,023,573 | 12ef717650e72f56fa21f410c83d595c733f458b | f509ab9825c542e09b0c6591d86ef1f9feb540a6 | /pkgs/sdk-pkg/src/genie/libs/sdk/triggers/blitz/tests/test_blitz.py | f79a4293d9d75a70643ccb474f0353a8f261e82a | [
"Apache-2.0"
]
| permissive | https://github.com/CiscoTestAutomation/genielibs | 97f597117193aaa18028defeb69078ebb241173a | e42e51475cddcb10f5c7814d0fe892ac865742ba | refs/heads/master | 2023-08-11T16:39:41.959947 | 2023-07-27T17:58:42 | 2023-07-27T17:58:42 | 130,717,047 | 109 | 60 | Apache-2.0 | false | 2023-08-29T22:32:08 | 2018-04-23T15:21:56 | 2023-08-14T13:37:54 | 2023-08-29T22:32:07 | 12,197 | 96 | 53 | 14 | Python | false | false | #! /usr/bin/env python
import os
import yaml
import tempfile
import unittest
from unittest import mock
from genie.testbed import load
from genie.conf.base import Device
from genie.libs.sdk.triggers.blitz.blitz import Blitz
from genie.libs.sdk.triggers.blitz.actions import actions
from pyats.easypy import Task
from pyats.easypy.job import Job
from pyats.easypy import runtime
from pyats.aetest.steps import Steps
from pyats.datastructures import AttrDict
from pyats.aetest.parameters import ParameterMap
from pyats.aetest.signals import AEtestFailedSignal
from pyats.easypy.common_funcs import init_runtime
from pyats.results import Passed, Failed, Errored, Skipped,\
Aborted, Passx, Blocked
from genie.libs.sdk.triggers.blitz.markup import get_variable
class MockDevice(object):
def __init__(self, testbed, name, os):
self.testbed = testbed
self.name = name
self.os = os
class TestBlitz(unittest.TestCase):
actions_dict = {'execute': 'sample output',
'parse': {'a':'b'},
'learn': {'a':'b'},
'api': 1500}
yaml1 = '''
test:
groups: ['test']
description: Modifying the testcase description
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section:
- description: "section description"
- execute:
save:
- filter: (?P<host>host).*
regex: True
device: PE1
command: show version
'''
yaml2 = '''
test:
groups: ['test']
description: Modifying the testcase description
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section:
- description: "section description"
- parse:
device: PE1
alias: parse1
description: Mocked action description
save:
- variable_name: name1
filter: contains('hardware')
command: show version
'''
yaml3 = '''
test:
groups: ['test']
description: Modifying the testcase description
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section1:
- continue: False
- execute:
device: PE1
command: show version
include:
- [0-9]
- execute:
device: PE1
command: show vrf
- section2:
- description: "section description"
- execute:
device: PE1
command: show version
'''
yaml4 = '''
test:
groups: ['test']
description: Modifying the testcase description
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section1:
- execute:
continue: False
device: PE1
command: show version
include:
- [0-9]
- execute:
device: PE1
command: show vrf
'''
yaml5 = '''
test:
groups: ['test']
description: Modifying the testcase description
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section:
- execute:
save:
- variable_name: execute_output
regex_findall: ([a-z]+)
device: PE1
command: show version
'''
yaml6 = '''
test:
groups: ['test']
description: Modifying the testcase description
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section:
- description: "section description"
- execute:
device: PE1
command: show version
save:
- variable_name: execute_action_output
as_dict:
rt_2_if2:
rt_22: "%VARIABLES{action_output}"
'''
bad_yaml1 = '''
test:
groups: ['test']
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section:
- bad_action:
device: PE1
command: show version
'''
bad_yaml2 = '''
test:
groups: ['test']
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section1:
- invalid_action
'''
bad_yaml3 = '''
test:
groups: ['test']
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section2:
- empty_kwargs:
'''
bad_yaml4 = '''
test:
groups: ['test']
source:
pkg: genie.libs.sdk
class: triggers.blitz.blitz.Blitz
devices: ['PE1']
test_sections:
- section3:
- parse:
device: PE3
command: show version
'''
def setUp(self):
dir_name = os.path.dirname(os.path.abspath(__file__))
f, self.jobfile = tempfile.mkstemp()
init_runtime(runtime)
runtime.configuration.load()
runtime.job = Job(jobfile = self.jobfile,
runtime = runtime,
**runtime.configuration.components.job)
mgr = runtime.tasks
task = mgr.Task(testscript = os.path.join(dir_name, 'mock_yamls/trigger_datafile.yaml'),
taskid = 'awesome')
self._initiate_blitz_cls(self.yaml1)
def test_init(self):
self.assertEqual(self.blitz_cls().uid, 'test.PE1')
self.assertEqual(self.blitz_cls().description, 'Modifying the testcase description')
def test_dispatcher_1(self):
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
blitz_obj = self.blitz_cls()
self.uid = blitz_obj.uid
blitz_obj.parent = self
blitz_obj.parent.parameters = mock.Mock()
output = blitz_obj.dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
self.assertEqual(output, { 'action': 'execute',
'alias': None,
'continue_': True,
'description': '',
'device': 'PE1',
'saved_vars': {'host': 'host'},
'filters': '(?P<host>host).*',
'step_result': Passed})
self.assertEqual(new_section.description, "section description")
def test_dispatcher_2(self):
self._initiate_blitz_cls(self.yaml2)
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
blitz_obj = self.blitz_cls()
self.uid = blitz_obj.uid
blitz_obj.parent = self
blitz_obj.parent.parameters = mock.Mock()
output = blitz_obj.dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
desc = section.parameters['data'][1]['parse']['description']
self.assertEqual(output['description'], desc)
self.assertIn('parse1', blitz_obj.parameters['save_variable_name'])
self.assertIsInstance(
blitz_obj.parameters['save_variable_name']
['section.parameters'], AttrDict)
self.assertIn('name1', output['saved_vars'])
# TODO might have an issue, in real script it does stop
# probably because of dummy steps investigate
def test_dispatcher_section_continue_false(self):
pass
# self._initiate_blitz_cls(self.yaml3)
# blitz_discoverer = self.blitz_cls()._discover()
# for section in blitz_discoverer:
# new_section = section.__testcls__(section)
# steps = Steps()
# blitz_obj = self.blitz_cls()
# new_section.result = Failed
# with self.assertRaises(AEtestFailedSignal):
# blitz_obj.dispatcher(steps,
# self.testbed,
# new_section,
# section.parameters['data'])
def test_bad_action(self):
self._initiate_blitz_cls(self.bad_yaml1)
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
with self.assertRaises(Exception):
self.blitz_cls().dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
def test_invalid_action(self):
self._initiate_blitz_cls(self.bad_yaml2)
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
with self.assertRaises(Exception):
self.blitz_cls().dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
def test_save_findall(self):
self._initiate_blitz_cls(self.yaml5)
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
blitz_obj = self.blitz_cls()
self.uid = blitz_obj.uid
blitz_obj.parent = self
blitz_obj.parent.parameters = mock.Mock()
output = blitz_obj.dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
self.assertEqual(output, {
'action': 'execute',
'device': 'PE1',
'alias': None,
'continue_': True,
'description': '',
'saved_vars': {
'execute_output': [
'host', 'execute', 'output'
]
},
'step_result': Passed
})
def test_save_regex_var(self):
self._initiate_blitz_cls(self.yaml1)
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
blitz_obj = self.blitz_cls()
self.uid = blitz_obj.uid
blitz_obj.parent = self
blitz_obj.parent.parameters = mock.Mock()
output = blitz_obj.dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
self.assertEqual(output['saved_vars'], {'host': 'host'})
self.assertEqual(output['filters'], '(?P<host>host).*')
def test_invalid_device(self):
self._initiate_blitz_cls(self.bad_yaml4)
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
with self.assertRaises(Exception):
self.blitz_cls().dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
def _initiate_blitz_cls(self, yaml_file):
dir_name = os.path.dirname(os.path.abspath(__file__))
self.blitz_cls = Blitz
self.testbed = load(os.path.join(dir_name, 'mock_testbeds/testbed.yaml'))
self.blitz_cls.parameters = ParameterMap()
self.blitz_cls.parameters['testbed'] = self.testbed
self._mock_testbed_devs()
self.datafile = yaml.safe_load(yaml_file)
for key, value in self.datafile.items():
self.blitz_cls.uid = "{}.{}".format(key, value['devices'][0])
self.blitz_cls.parameters['test_sections'] = value['test_sections']
if value.get('description'):
self.blitz_cls.description = value['description']
def _mock_testbed_devs(self):
side_effects = {'configure': ['\n'],
'execute': ['host execute output', 'oop', 'oot', 'name'],
'parse': [{'a': '1', 'hardware': 'hardware_name'}]}
actions = ['configure', 'execute', 'parse']
for dev in self.testbed.devices:
for action in actions:
setattr(self.testbed.devices[dev], action, mock.Mock())
setattr(getattr(self.testbed.devices[dev], action), 'side_effect', side_effects[action])
def test_custom_start_step_messsage_with_variable(self):
#saved variable
Blitz.parameters['save_variable_name'] = {'command': 'sh version'}
self.blitz_obj = Blitz()
self.blitz_obj.parameters['test_sections'] = [{'section1': [{'action': {'command': 'a'}}]}]
sections = self.blitz_obj._discover()
self.section = sections[0].__testcls__(sections[0])
self.kwargs = {
'self': self.blitz_obj,
'section': self.section,
'custom_start_step_message': 'test command: %VARIABLES{command}'
}
#To get the saved variable
replaced_kwargs = get_variable(**self.kwargs)
self.assertEqual(replaced_kwargs['custom_start_step_message'], 'test command: sh version')
def test_save_as_dict(self):
self._initiate_blitz_cls(self.yaml6)
blitz_discoverer = self.blitz_cls()._discover()
for section in blitz_discoverer:
new_section = section.__testcls__(section)
steps = Steps()
blitz_obj = self.blitz_cls()
self.uid = blitz_obj.uid
blitz_obj.parent = self
blitz_obj.parent.parameters = mock.Mock()
output = blitz_obj.dispatcher(steps,
self.testbed,
new_section,
section.parameters['data'])
self.assertEqual(output['saved_vars']['execute_action_output'], {'rt_2_if2': {'rt_22': 'host execute output'}})
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 16,881 | py | 2,248 | test_blitz.py | 1,588 | 0.466797 | 0.462236 | 0 | 469 | 34.993603 | 123 |
AVProkhorov/Python_6 | 3,238,405,364,516 | b0528406b4239b7d657fb8090340ce089279c3f8 | 9429ed31f019048609209443c1c086bc67309f1f | /ะะฑัะฐั ะฟะพะดะฟะพัะปะตะดะพะฒะฐัะตะปัะฝะพััั.py | 65da678e636f33bd00b3a967dc5d88d0f6494477 | []
| no_license | https://github.com/AVProkhorov/Python_6 | 51ade6611f4d4788fce5584aa0fa8ae21d32c5f2 | 049b953fbca9ec3f388640363f2a82f67fefcedb | refs/heads/master | 2021-09-07T00:01:54.150323 | 2018-02-13T20:17:49 | 2018-02-13T20:17:49 | 108,101,107 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def f(l1, l2):
m, n = len(l1), len(l2)
S = [[0] * (n + 1) for i in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if l1[i - 1] == l2[j - 1]:
S[i][j] = S[i - 1][j - 1] + 1
elif S[i - 1][j] >= S[i][j - 1]:
S[i][j] = S[i - 1][j]
else:
S[i][j] = S[i][j - 1]
k = 0
for i in S:
for j in i:
if j > k: k = j
return k
l1 = list(map(int, input().split()))
l2 = list(map(int, input().split()))
print(f(l1, l2)) | UTF-8 | Python | false | false | 578 | py | 26 | ะะฑัะฐั ะฟะพะดะฟะพัะปะตะดะพะฒะฐัะตะปัะฝะพััั.py | 19 | 0.34083 | 0.294118 | 0 | 22 | 24.363636 | 45 |
ThaddeusNase/profiles-rest-api | 2,396,591,793,747 | 535402443a009e5f54352d610e75161be4aa1922 | f532c19003f4e3343a952cf09ee7fb89a153833b | /profiles_api/views.py | 9fb51b3f0233efd5be02ebdecdb8de2767f41a98 | [
"MIT"
]
| permissive | https://github.com/ThaddeusNase/profiles-rest-api | c57e08b36b142f4bffae7bacd5b1549c2a24fb15 | 502f799acaa2a748253b3a516c8cd280cfda2206 | refs/heads/master | 2022-05-17T23:50:45.844277 | 2019-11-12T12:54:22 | 2019-11-12T12:54:22 | 216,558,045 | 0 | 0 | MIT | false | 2022-04-22T22:38:47 | 2019-10-21T12:04:53 | 2019-11-12T12:55:17 | 2022-04-22T22:38:45 | 27 | 0 | 0 | 2 | Python | false | false | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import viewsets
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from .serializers import HelloSerializer, ProfileFeedItemSerializer, UserProfileSerializer
from profiles_api import models
from profiles_api import permissions
# Create your views here.
class HelloApiView(APIView):
# Test API VIEW
serializer_class = HelloSerializer
def get(self, request, format=None):
""" returns a List of APIView features """
an_apiview = [
"uses HTTP methods as functions (get, post, patch, put, delete)",
"is similar to a traditional Django View",
"Gives u the most control over ur app-logic",
"is mapped manually to urls"
]
# da jedes APIView function ein Response_Object zurรผckgeben muss, welches in JSON umwgeandelt wird -> ...
json_dict = {
"message": "Hello", # "message" = key, "hello" = value
"an_apiview": an_apiview
}
return Response(json_dict)
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request):
return Response({"method": "PUT"})
def patch(self, request):
return Response({"method": "PATCH"})
def delete(self, request):
return Response({"method": "DELETE"})
class HelloViewSet(viewsets.ViewSet):
serializer_class = HelloSerializer
def list(self, request):
a_viewlist = [
"uses actions -> list, create, retrieve, updatem partial_update",
"automatically maps to url using Routers",
"Provides more functionality with less code"
]
return Response({"message": "hello", "a_viewlist": a_viewlist})
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get("name")
message = f"Hello {name}!"
return Response({"message": message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# retrurn bestimmtes/einzelndes Object -> pk nรถtig
# maps to HTTP-Get
def retrieve(self, request, pk=None):
return Response({"http_method": "GET"})
# maps to HTTP-Put
def update(self, request, pk=None):
return Response({"http_method": "PUT"})
# handle updating part of an update
def partial_update(self, request, pk=None):
return Response({"http_method": "PATCH"})
# ViewSet function/action mapt zur http-delete-function/request
def destroy(self, request, pk=None):
return Response({"http_method": "DELETE"})
class ProfileViewSet(viewsets.ModelViewSet):
# handle creating/updating profiles
serializer_class = UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,) # als Tuple!
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ("name", "email", )
class UserLoginApiView(ObtainAuthToken):
# handle creating user authentication tokens for user -> indem man diesen dabei angibt
# ordnet dann auth token string einem user zu
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
# handles creating, reading, updating profileFeedItems
class UserProfileFeedViewSet(viewsets.ModelViewSet):
serializer_class = ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (
permissions.UpdateOwnProfile,
IsAuthenticated,
)
# da ProfileFeedView().user_profile = logged in user ->
def perform_create(self, serializer):
serializer.save(user_profile=self.request.user)
| UTF-8 | Python | false | false | 4,603 | py | 2 | views.py | 2 | 0.668116 | 0.666812 | 0 | 144 | 30.951389 | 113 |
hostedposted/convertunits | 1,529,008,369,181 | f471f040d63a3e7147d76ed7d9479f4df5846703 | d5931d71ae54394c02def9071f10d20747847c5c | /convertunits/formula/volume/us_liquid_gallon.py | 599bff6cbfac1726a9e6407bbaa6fd205884b9a7 | []
| no_license | https://github.com/hostedposted/convertunits | c95a92f36b4014e07170959a231fd7fa8a44b7e6 | a587a07a3c5994f02fc2938c3af4d6833157832b | refs/heads/master | 2023-05-12T10:07:15.900667 | 2021-05-30T19:43:31 | 2021-05-30T19:43:31 | 366,511,698 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | us_liquid_gallon = "us_liquid_gallon"
us_liquid_quart = "us_liquid_gallon * 4"
us_liquid_pint = "us_liquid_gallon * 8"
us_legal_cup = "us_liquid_gallon * 15.773"
us_fluid_ounce = "us_liquid_gallon * 128"
us_tablespoon = "us_liquid_gallon * 256"
us_teaspoon = "us_liquid_gallon * 768"
cubic_meter = "us_liquid_gallon / 264"
liter = "us_liquid_gallon * 3.785"
milliliter = "us_liquid_gallon * 3785"
imperial_gallon = "us_liquid_gallon / 1.201"
imperial_quart = "us_liquid_gallon * 3.331"
imperial_pint = "us_liquid_gallon * 6.661"
imperial_cup = "us_liquid_gallon * 13.323"
imperial_fluid_ounce = "us_liquid_gallon * 133"
imperial_tablespoon = "us_liquid_gallon * 213"
imperial_teaspoon = "us_liquid_gallon * 639"
cubic_foot = "us_liquid_gallon / 7.481"
cubic_inch = "us_liquid_gallon * 231"
| UTF-8 | Python | false | false | 790 | py | 164 | us_liquid_gallon.py | 163 | 0.701266 | 0.625316 | 0 | 19 | 40.578947 | 47 |
richiefoster/image_processing_app | 9,320,079,080,936 | ed088a4e4907499f49377ff46f6a4d2ce3982c01 | 8c8b78a4f081b9b9051446b315d97eb6c55ba580 | /process_images_ground.py | 3a824f4ed562b84ffd36ee9a28dfd3da399933d3 | []
| no_license | https://github.com/richiefoster/image_processing_app | ac83a8e95b8a4b0248854dfc623cd55caef5a2f5 | 1f2712df5a97403323247364a589d524413c0eea | refs/heads/master | 2023-06-01T03:16:35.893450 | 2021-07-01T16:39:17 | 2021-07-01T16:39:17 | 382,042,099 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 09:37:48 2021
@author: Richie Foster
"""
import pandas as pd
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
import os
from shapely.geometry import Point
import geopandas
import boto3
sqs_client = boto3.client('sqs', region_name='us-east-1')
def main():
file1 = open('/home/ec2-user/save_dirs.txt', 'r+')
dir_var = file1.read()
print(dir_var)
df_name = str(dir_var) + str('_dataframe')
df_name = pd.DataFrame()
dir_path = '/home/ec2-user/' + str(dir_var) + str('/')
shp_dir = dir_path + str('shp/')
exif_table = {}
for files in os.listdir(dir_path):
img_path = dir_path + files
exif_table = {}
print(img_path)
image = Image.open(img_path)
info = image._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
exif_table[decoded] = value
gps_info = {}
#things working to this point
for key in exif_table['GPSInfo'].keys():
decode = GPSTAGS.get(key,key)
gps_info[decode] = exif_table['GPSInfo'][key]
#print(gps_info)
for key in gps_info:
lat_ref = gps_info.get("GPSLatitudeRef")
lat = gps_info.get("GPSLatitude")
lon_ref = gps_info.get("GPSLongitudeRef")
lon = gps_info.get("GPSLongitude")
img_direction_tup = gps_info.get('GPSImgDirection')
(good_img_dir, dividend) = img_direction_tup
img_direction = good_img_dir / dividend
lat = list(lat)
lon = list(lon)
lat.append(lat_ref)
lon.append(lon_ref)
#lat convert
lat_deg = lat[0]
#extract lat deg from tuple
(my_lat_deg, trash1) = lat_deg
lat_min = lat[1]
(lat_x, dividend_x) = lat_min
lat_min_int = lat_x / dividend_x
#back to normal
lat_ref = lat[3]
if lat_ref == 'S':
lat_ref_sign = int(-1)
else:
lat_ref_sign = int(1)
lat_min_new = lat_min_int / 60
lat_dec = (my_lat_deg + lat_min_new) * lat_ref_sign
lat_dec = float(lat_dec)
#lon convert
lon_deg = lon[0]
#extract lon deg from tuple
(my_lon_deg, trash2) = lon_deg
lon_min = lon[1]
(lon_y, dividend_y) = lon_min
lon_min_int = lon_y / dividend_y
#back to normal
lon_ref = lon[3]
if lon_ref == 'W':
lon_ref_sign = int(-1)
else:
lon_ref_sign = int(1)
lon_min_new = lon_min_int / 60
lon_dec = (my_lon_deg + lon_min_new) * lon_ref_sign
lon_dec = float(lon_dec)
#except:
datadict = {
'Image Name': files,
'lat': lat_dec,
'lon': lon_dec,
'Heading': img_direction
}
df_name = df_name.append(datadict, ignore_index=True)
print(df_name)
csv_name = dir_path + dir_var + str('_gps_data.csv')
df_name.to_csv(csv_name)
df_name['geometry'] = df_name.apply(lambda x: Point((float(x.lon), float(x.lat))), axis=1)
df_geo_name = geopandas.GeoDataFrame(df_name, geometry='geometry')
os.mkdir(shp_dir)
shp_path = shp_dir + dir_var + str('.shp')
df_geo_name.to_file(shp_path, driver='ESRI Shapefile')
return 200
if __name__ == '__main__':
main()
if main() != 200:
error_response = sqs_client.send_message(
QueueUrl='https://sqs.us-east-1.amazonaws.com/637137674144/rf_ec2_errors',
MessageBody='PROCESS IMAGES: process_images.py returned a code other than 200')
| UTF-8 | Python | false | false | 3,823 | py | 12 | process_images_ground.py | 11 | 0.522626 | 0.507193 | 0 | 112 | 33.125 | 95 |
pixpil/gii | 10,196,252,388,065 | 28e1160e6fec261ca41eb64ebaec9c9cea3ee812 | de644b254b17a28f82e9212d80872a3d9eca2149 | /lib/gii/moai/exceptions.py | 06243f29084da8b7879afc6cad1f02ce2d41b9ec | [
"MIT"
]
| permissive | https://github.com/pixpil/gii | 506bee02b11eb412016b583d807dcfcc485e189c | ba6d94ada86d82bacae06f165567a02585264440 | refs/heads/master | 2021-12-03T06:30:31.503481 | 2021-11-24T03:02:49 | 2021-11-24T03:02:49 | 431,331,021 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | class MOAIException(Exception):
def __init__(self, code):
self.args=(code,)
self.code=code
| UTF-8 | Python | false | false | 96 | py | 882 | exceptions.py | 627 | 0.6875 | 0.6875 | 0 | 4 | 23 | 31 |
kevinamos/axploit | 11,287,174,074,699 | 3947893af8d200374de2a3d73023da26b073896c | e39e3a7ad4b8c82a341bb7e543ef7537d7fd463e | /MitM/arp_spoofing.py | 295434197419071880dbdc54093d6faf97937a29 | []
| no_license | https://github.com/kevinamos/axploit | 222094e6a23ecf3f304da31ca0577a253c8f26c2 | ce41066c37d1d6ebaea70f02b5f7636c70abb2d0 | refs/heads/master | 2021-01-25T12:08:06.293020 | 2019-07-18T08:51:24 | 2019-07-18T08:51:24 | 123,454,735 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sys
from scapy.all import sniff, sendp, ARP, Ether
#router_ip='192.168.0.1'
from time import *
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((socket.gethostname(), 219))
my_ip=s.getsockname()[0]
s.close()
class ARP_spoofing():
def __init__(self, *args):
self.dev=''
self.stop=False
self.victim=''
self.spoofing_messages_list=[]
self.finished_arp_spoof=False
self.arp_spoof_msg=''
def arp_poison_callback(self, packet):
if self.stop == True:
print("arp sooofing stoped")
exit(0)
router_ip=''
# Got ARP request?
answer = Ether(dst=packet[ARP].hwsrc) / ARP()
if self.victim !='':
if str(packet[ARP].psrc) !=str(self.victim) and str(packet[ARP].pdst) != str(self.victim):
return 0
print(packet[ARP].psrc)
print(packet[ARP].pdst)
answer = Ether(dst=packet[ARP].hwsrc) / ARP()
answer[ARP].op = "is-at"
answer[ARP].hwdst = packet[ARP].hwsrc
answer[ARP].psrc = packet[ARP].pdst
answer[ARP].pdst = packet[ARP].psrc
#router_ip=packet[ARP].pdst
self.arp_spoof_msg="Fooling " + packet[ARP].psrc + " that " + packet[ARP].pdst + " is me"
print(self.arp_spoof_msg)
try:
sendp(answer, iface=self.dev)
except Exception as e:
print("The following error occurred " + str(e))
def start_arp_spoofing(self, *args):
self.dev=args[1]
self.victim=args[0]
print("sniffing on "+ str (self.dev))
if self.victim !='':
sniff(prn=self.arp_poison_callback,filter="arp and host "+ self.victim,iface=self.dev,store=0)
print("victim is "+str(self.victim))
else:
sniff(prn=self.arp_poison_callback,filter="arp",iface=self.dev, store=0)
print("no victim supplied") | UTF-8 | Python | false | false | 1,965 | py | 25 | arp_spoofing.py | 16 | 0.572519 | 0.563359 | 0 | 58 | 32.896552 | 106 |
nuh-temp/rest-app | 18,708,877,543,851 | 5624cd71369db4ad835ab46b9435a4de14fc5e8c | 050985681d60316ca5a12818156d0cd2cc75e692 | /tmp/mod1.py | b2e9be665b5cfa777497f28827f3885f8bc119cc | [
"Apache-2.0"
]
| permissive | https://github.com/nuh-temp/rest-app | 767d422e945f8249a5754647e0cc31e3c40d83b2 | d9276fc7bae4033778abe1b4a110446d18d84893 | refs/heads/master | 2020-05-20T02:25:33.980810 | 2016-01-21T05:13:30 | 2016-01-21T05:13:30 | 19,071,654 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import inspect
import collections
Env = collections.namedtuple('Env', ['name'])
class BaseModel(object):
_factory_env = None
_a = None
class MyModel(BaseModel):
# logging.info('--m1--> MyModel real class init: factory_env: %s, id: %s', f, id(f))
_a = []
def __init__(self, name):
logging.info('--m1--> !!!! run __init__: %s', name)
self._type = 'model'
self._name = name
@property
def env(self):
return self._factory_env
@classmethod
def ClassEnv(cls):
return cls._factory_env or Env('NEW')
def factory(i):
# factory_env = Env(i)
# logging.info('--m1--> init factory: %s, factory_env: %s, factory_env_id: %s', i, factory_env, id(factory_env))
CopyOfModel = type('MyModel', MyModel.__bases__, dict(MyModel.__dict__))
CopyOfModel._factory_env = Env(i)
CopyOfModel._a.append(i)
return CopyOfModel
# add shortcut for Factory.Env to GetEnv class variable
# for name in dir(Factory):
# if name.startswith('__'):
# continue
# child = getattr(Factory, name)
# if not inspect.isclass(child):
# continue
# parents = inspect.getmro(child)
# # logging.info('=====> name4: %s, %s, %s', name, parents, bool(BaseModel in parents))
# if BaseModel in parents:
# child.GetEnv = Factory.Env
| UTF-8 | Python | false | false | 1,275 | py | 13 | mod1.py | 9 | 0.63451 | 0.631373 | 0 | 52 | 23.519231 | 114 |
verkaufer/animalweights | 6,966,436,999,457 | e92db80f48bb5c1bc1c0fe0b79073b61712ffee6 | 521cf27b2edd9545ee0ae94ca5b514b01ce805c7 | /animals/admin.py | cd0a3e8c20905b053272ebd2d00745d49dc224e7 | []
| no_license | https://github.com/verkaufer/animalweights | cfc300045704ab7d31ca21759fac9a2dd8364ae3 | bda54bbca5ec215276f675cfdf821028bef82148 | refs/heads/master | 2020-03-21T10:10:07.015756 | 2018-09-14T19:46:02 | 2018-09-14T19:46:02 | 138,436,704 | 0 | 0 | null | false | 2018-09-14T19:46:03 | 2018-06-23T22:06:54 | 2018-06-23T22:08:51 | 2018-09-14T19:46:03 | 9 | 0 | 0 | 0 | Python | false | null | from django.contrib import admin
# Register your models here.
from animals.models import Animal, Weight
admin.site.register(Animal)
admin.site.register(Weight)
| UTF-8 | Python | false | false | 162 | py | 10 | admin.py | 8 | 0.808642 | 0.808642 | 0 | 7 | 22.142857 | 41 |
ErickGiffoni/Python | 10,419,590,695,228 | ee125b41d2edd7ca9174a8d9975d24f07e3da230 | bb77d9cf24f74c9dfe60e67fb24ac2e9c863e542 | /Learning_Python/enumerate_function.py | 652e09ab0658cf9a9bc397a654de9c2c8ee84797 | []
| no_license | https://github.com/ErickGiffoni/Python | 687fd90a76e81cab2ae8026cf91679b608753a37 | 01f54c7db5bb2a0120169f5f75a6da6a6c6710f4 | refs/heads/master | 2020-03-29T10:35:41.051030 | 2019-03-22T14:16:16 | 2019-03-22T14:16:16 | 149,813,464 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | list = [0,1,4,7,-1,-4,1000]
for x,e in enumerate(list) :
print("[%d] %d " %(x,e), end='')
print()
| UTF-8 | Python | false | false | 102 | py | 9 | enumerate_function.py | 9 | 0.5 | 0.401961 | 0 | 4 | 24.5 | 36 |
JoyDajunSpaceCraft/leetcode_job | 5,196,910,453,693 | 44636d2769aecb662b747f5c33afeec3919cbf3b | 35a5143c2b553b756a69fb25e732d82d692c16ef | /372-super-pow.py | 843e058bdde83ef02645695e6034599b4aaf9bf5 | []
| no_license | https://github.com/JoyDajunSpaceCraft/leetcode_job | 863ae7d3e351b3163dfc623fbcd70b1b8e5cf4f1 | 73fee35139400c06a3e2d64a1f233ff4581e50c9 | refs/heads/master | 2022-10-07T03:39:54.828250 | 2020-06-10T07:43:38 | 2020-06-10T07:43:38 | 239,254,742 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ไฝ ็ไปปๅกๆฏ่ฎก็ฎย abย ๅฏนย 1337 ๅๆจก๏ผa ๆฏไธไธชๆญฃๆดๆฐ๏ผb ๆฏไธไธช้ๅธธๅคง็ๆญฃๆดๆฐไธไผไปฅๆฐ็ปๅฝขๅผ็ปๅบใ
# ็คบไพ 1:
# ่พๅ
ฅ: a = 2, b = [3]
# ่พๅบ: 8
# ็คบไพย 2:
# ่พๅ
ฅ: a = 2, b = [1,0]
# ่พๅบ: 1024
# ่งฃ้ข https://mp.weixin.qq.com/s/GjS9ORJv3KtXEOU5WsyqYQ
class Solution(object):
def mypow(self, a, k):
base = 1337
a%=base
res = 1
for i in range(k):
res *= a
res %= base
return res
def superPow(self, a, b):
"""
:type a: int
:type b: List[int]
:rtype: int
"""
base = 1337
if not b:return 0
res = 1
for i in b:
res = self.mypow(res,10) * self.mypow(a, i)
return res % base
| UTF-8 | Python | false | false | 781 | py | 156 | 372-super-pow.py | 155 | 0.466568 | 0.419019 | 0 | 33 | 19.393939 | 55 |
yangks0522/meiduo_15 | 7,284,264,581,570 | 1a2207afddd7c545d0f10027711d62a2c4780885 | 99f89a22ddac7be391864925a94fc6d2e713b309 | /mall/celery_tasks/email/tasks.py | c98cf9a9017e89c72b0cd64b05358ecdf7fd0a74 | []
| no_license | https://github.com/yangks0522/meiduo_15 | a4a208ca7c5e50cdc79c9e988a4d6106080b0b5e | 0eb5ac946a94dea6182fcf77a33e4bddf4c4ff5d | refs/heads/master | 2020-04-04T16:10:21.671723 | 2018-11-20T11:44:49 | 2018-11-20T11:44:49 | 156,067,266 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from celery_tasks.main import app
from django.core.mail import send_mail
from mall import settings
from django.core.mail import send_mail
from users.utils import generic_active_url
@app.task(name='send_verify_mail')
def send_verify_mail(email, user_id):
subject = '็พๅคๅๅๆฟๆดป้ฎไปถ'
message = ''
from_email = settings.EMAIL_FROM
recipient_list = [email]
verify_url = generic_active_url(user_id, email)
html_message = '<p>ๅฐๆฌ็็จๆทๆจๅฅฝ๏ผ</p>' \
'<p>ๆ่ฐขๆจไฝฟ็จ็พๅคๅๅใ</p>' \
'<p>ๆจ็้ฎ็ฎฑไธบ๏ผ%s ใ่ฏท็นๅปๆญค้พๆฅๆฟๆดปๆจ็้ฎ็ฎฑ๏ผ</p>' \
'<p><a href="%s">%s<a></p>' % (email, verify_url, verify_url)
send_mail(
subject=subject,
message=message,
from_email=from_email,
recipient_list=recipient_list,
html_message=html_message
)
| UTF-8 | Python | false | false | 902 | py | 21 | tasks.py | 21 | 0.606173 | 0.606173 | 0 | 29 | 26.931034 | 80 |
ChoiSuhyeonA/infoSec | 6,098,853,579,885 | 2884c907eb22d2a68dd3d8e8bcc3b84754e28c34 | b3679a676efbf51e20beaf8dbfa147d67c4cc746 | /Integrated/PGP_All_Alice.py | 090d0d1ad7d2fc43bd788fd62505e2cf3332d650 | []
| no_license | https://github.com/ChoiSuhyeonA/infoSec | 97483d400c752e97573535155aa53464c2b167d5 | 8845402934ec2fd0e3f351c043a05436a5486bb4 | refs/heads/master | 2022-07-07T10:46:33.700605 | 2020-05-14T13:51:11 | 2020-05-14T13:51:11 | 263,926,356 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
import socket # Import socket module
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
import base64
from Crypto.Cipher import AES
from Crypto import Random
# Step 0 : Alice Generation Key of Alice
privatekey = RSA.generate(2048)
f = open('./HybridAlice/aliceprivatekey.txt','wb')
f.write(bytes(privatekey.exportKey('PEM'))); f.close()
publickey = privatekey.publickey()
f = open('./HybridAlice/received_alicepublickey.txt','wb')
f.write(bytes(publickey.exportKey('PEM'))); f.close()
# Step 1-1 : Alice(Server) Transfer Public Key
port = 10005 # Reserve a port for your service.
host = 'localhost' # Get local machine name
server_socket = socket.socket() # Create a socket object
server_socket.bind((host, port)) # Bind to the port
server_socket.listen(5) # Now wait for client connection.
print('Server listening....')
while True:
client_socket_S, addr = server_socket.accept() # Establish connection with client.
print('Got connection from', addr)
data = client_socket_S.recv(1024)
print('Server received', repr(data))
filename='./HybridAlice/received_alicepublickey.txt'
f = open(filename,'rb')
l = f.read(1024)
while (l):
client_socket_S.send(l)
print('Sent ',repr(l))
l = f.read(1024)
f.close()
print('Done sending')
client_socket_S.send(b'')
break
client_socket_S.close()
server_socket.close()
# Step 1-2 : Receive Bob's Public Key
host = 'localhost' # Get local machine name
port = 10006 # Reserve a port for your service.
client_socket_C = socket.socket() # Create a socket object
client_socket_C.connect((host, port))
client_socket_C.send(b"Client OK")
f = open('./HybridAlice/received_bobpublickey.txt', 'wb')
print('file opened')
while True:
print('receiving data...')
data = client_socket_C.recv(1024)
print('data=', data.decode('utf-8'))
if not data:
break
f.write(data)
f.close()
print('Successfully get the file')
client_socket_C.close()
print('connection closed')
# Step 2 : Alice Generate RSA Signature
# creation of signature
f = open('./HybridAlice/plaintext.txt','rb')
plaintext = f.read(); f.close()
privatekey = RSA.importKey(open('./HybridAlice/aliceprivatekey.txt','rb').read())
myhash = SHA.new(plaintext) # Generate Hash
signature = PKCS1_v1_5.new(privatekey) # Signature algo
sigVal = signature.sign(myhash) # signature value
print("Length of Signature: ", len(sigVal))
print("Signature: ", sigVal)
output = sigVal + plaintext ## concatnate message
f = open('./HybridAlice/sig_MSG_Alice.txt','wb')
f.write(bytes(output)); f.close()
# Step 3 :
# creation 256 bit session key
sessionkey = Random.new().read(32) # 256 bit
# encryption AES of the message
f = open('./HybridAlice/sig_MSG_Alice.txt','rb') ### signature.txt || plaintext
plaintext = f.read(); f.close()
iv = Random.new().read(16) # 128 bit
obj = AES.new(sessionkey, AES.MODE_CFB, iv)
ciphertext = iv + obj.encrypt(plaintext)
# encryption RSA of the session key
publickey = RSA.importKey(open('./HybridAlice/received_bobpublickey.txt','rb').read())
cipherrsa = PKCS1_OAEP.new(publickey)
enc_sessionkey = cipherrsa.encrypt(sessionkey)
print("Length of encrypted session key: ", len(enc_sessionkey)) #### Length of session key: 256 byte
print("Encrypted Session Key:", enc_sessionkey)
f = open('./HybridAlice/outputAlice.txt','wb')
f.write(bytes(enc_sessionkey))
f.write(bytes(ciphertext))
f.close()
print("*******************************************")
# Step 3-1 : Alice Send Encrypted File to Bob...
fromFile = './HybridAlice/outputAlice.txt'
toFile = './HybridAlice/b64_outputAlice.txt'
def B64Encoding(fromFile, toFile):
ff = open(fromFile, 'rb')
l = ff.read(768) # 3byte * 256 = 768
tf = open(toFile, 'wb')
while(l):
l = base64.b64encode(l)
tf.write(l)
l = ff.read(768)
tf.close()
ff.close()
B64Encoding(fromFile, toFile)
port = 10002 # Reserve a port for your service.
host = 'localhost' # Get local machine name
server_socket = socket.socket() # Create a socket object
server_socket.bind((host, port)) # Bind to the port
server_socket.listen(5) # Now wait for client connection.
print('Server listening....')
while True:
client_socket, addr = server_socket.accept() # Establish connection with client.
print('Got connection from', addr)
data = client_socket.recv(1024)
print('Server received', repr(data))
#filename='./Hybrid/received_alicepublickey.txt'
filename = toFile
f = open(filename,'rb')
l = f.read(1024)
while (l):
client_socket_S.send(l)
print('Sent ',repr(l))
l = f.read(1024)
f.close()
print('Done sending')
client_socket_S.send(b'')
break
client_socket.close()
server_socket.close() | UTF-8 | Python | false | false | 5,040 | py | 67 | PGP_All_Alice.py | 54 | 0.655159 | 0.63373 | 0 | 162 | 30.117284 | 101 |
dekanayake/sell_or_exhange | 15,393,162,826,572 | 54593835ba3b58e7f7d8522542115796c05c6a19 | 42baede403a6c13e3913abcda8d2c9e016c7aa69 | /product/migrations/0004_auto_20160628_0349.py | 89b78dd317071f53052159672e61ae304f4508b6 | []
| no_license | https://github.com/dekanayake/sell_or_exhange | e5e91084079c68de06f5b5ef7c4c419d8a10e34f | 93f652710650f2863b436f1162822970898cae4a | refs/heads/develop | 2020-12-23T08:53:00.018575 | 2017-02-26T12:10:05 | 2017-02-26T12:10:05 | 237,102,368 | 0 | 0 | null | false | 2020-04-30T14:28:15 | 2020-01-29T23:24:34 | 2020-01-29T23:28:09 | 2020-04-30T14:28:13 | 252 | 0 | 0 | 1 | Python | false | false | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-28 03:49
from __future__ import unicode_literals
from django.db import migrations, models
import product.models
class Migration(migrations.Migration):
dependencies = [
('product', '0003_temporyproductimage'),
]
operations = [
migrations.AddField(
model_name='temporyproductimage',
name='fileName',
field=models.CharField(default='a', max_length=150),
preserve_default=False,
),
migrations.AlterField(
model_name='temporyproductimage',
name='image',
field=models.FileField(upload_to=product.models.generate_temp_image_filename),
),
]
| UTF-8 | Python | false | false | 741 | py | 47 | 0004_auto_20160628_0349.py | 19 | 0.612686 | 0.581646 | 0 | 27 | 26.444444 | 90 |
amitamitamitamit/earthio | 7,567,732,425,168 | 5285fe4b79c0737ed794a8b22dea53c87ce5ced3 | b0928a7bdb9b8a47d49b3d64ec9b0f54259d1f4d | /earthio/tif.py | e7fd662322d978cec8d0b34c464c811b09d8d74b | []
| no_license | https://github.com/amitamitamitamit/earthio | ca1591e50dc0866e9236c1655aeaefe12f9bf3c2 | 766bbeb6cdcd310e1e6c3a2072aa8a72b565a0f2 | refs/heads/master | 2019-11-20T00:47:07.795954 | 2017-07-28T03:06:59 | 2017-07-28T03:06:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
------------------
``earthio.tif``
~~~~~~~~~~~~~~~~~~~
Tools for reading GeoTiff files. Typically use the interface through
- :func:`earthio.load_array`
- :func:`earthio.`load_meta`
'''
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import copy
import gc
import logging
import os
import numpy as np
import rasterio as rio
import xarray as xr
from earthio.metadata_selection import match_meta
from earthio.util import (geotransform_to_coords,
geotransform_to_bounds,
SPATIAL_KEYS,
raster_as_2d,
READ_ARRAY_KWARGS,
take_geo_transform_from_meta,
BandSpec,
meta_strings_to_dict)
from earthio import ElmStore
from six import string_types
logger = logging.getLogger(__name__)
__all__ = ['load_tif_meta',
'load_dir_of_tifs_meta',
'load_dir_of_tifs_array',]
def load_tif_meta(filename):
'''Read the metadata of one TIF file
Parameters:
:filename: str: path and filename of TIF to read
Returns:
:file: TIF file
:meta: Dictionary with meta data about the file, including;
- **meta**: Meta attributes of the TIF file
- **geo_transform**: transform
- **bounds**: Bounds of the TIF
- **height**: Hight of the TIF
- **width**: Width of the TIF
- **name**: The filename
- **sub_dataset_name**: The filename
'''
r = rio.open(filename, driver='GTiff')
if r.count != 1:
raise ValueError('earthio.tif only reads tif files with 1 band (shape of [1, y, x]). Found {} bands'.format(r.count))
meta = {'meta': r.meta}
meta['geo_transform'] = r.get_transform()
meta['bounds'] = r.bounds
meta['height'] = r.height
meta['width'] = r.width
meta['name'] = meta['sub_dataset_name'] = filename
return r, meta_strings_to_dict(meta)
def ls_tif_files(dir_of_tiffs):
tifs = os.listdir(dir_of_tiffs)
tifs = [f for f in tifs if f.lower().endswith('.tif') or f.lower().endswith('.tiff')]
return [os.path.join(dir_of_tiffs, t) for t in tifs]
def array_template(r, meta, **reader_kwargs):
dtype = getattr(np, r.dtypes[0])
if not 'window' in reader_kwargs:
if 'height' in reader_kwargs:
height = reader_kwargs['height']
else:
height = meta['height']
if 'width' in reader_kwargs:
width = reader_kwargs['width']
else:
width = meta['width']
else:
if 'height' in reader_kwargs:
height = reader_kwargs['height']
else:
height = np.diff(reader_kwargs['window'][0])[0]
if 'width' in reader_kwargs:
width = reader_kwargs['width']
else:
width = np.diff(reader_kwargs['window'][0])[0]
return np.empty((1, height, width), dtype=dtype)
def load_dir_of_tifs_meta(dir_of_tiffs, band_specs=None, **meta):
'''Load metadata from same-directory GeoTiffs representing
different bands of the same image.
Parameters:
:dir_of_tiffs: Directory with GeoTiffs
:band_specs: List of earthio.BandSpec objects
:meta: included in returned metadata'''
logger.debug('load_dir_of_tif_meta {}'.format(dir_of_tiffs))
tifs = ls_tif_files(dir_of_tiffs)
meta = copy.deepcopy(meta)
band_order_info = []
for band_idx, tif in enumerate(tifs):
raster, band_meta = load_tif_meta(tif)
if band_specs:
for idx, band_spec in enumerate(band_specs):
if (isinstance(band_spec, BandSpec) and match_meta(band_meta, band_spec)) or (isinstance(band_spec, string_types) and band_spec in tif):
band_order_info.append((idx, tif, band_spec, band_meta))
break
else:
band_name = 'band_{}'.format(band_idx)
band_order_info.append((band_idx, tif, band_name, band_meta))
if not band_order_info or (band_specs and (len(band_order_info) != len(band_specs))):
logger.debug('len(band_order_info) {}'.format(len(band_order_info)))
raise ValueError('Failure to find all bands specified by '
'band_specs with length {}.\n'
'Found only {} of '
'them.'.format(len(band_specs), len(band_order_info)))
# error if they do not share coords at this point
band_order_info.sort(key=lambda x:x[0])
meta['band_meta'] = [b[-1] for b in band_order_info]
meta['band_order_info'] = [b[:-1] for b in band_order_info]
return meta
def open_prefilter(filename, meta, **reader_kwargs):
'''Placeholder for future operations on open file rasterio
handle like resample / aggregate or setting width, height, etc
on load. TODO see optional kwargs to rasterio.open'''
try:
r = rio.open(filename)
raster = array_template(r, meta, **reader_kwargs)
logger.debug('reader_kwargs {} raster template shape {}'.format(reader_kwargs, raster.shape))
r.read(out=raster)
return r, raster
except Exception as e:
logger.info('Failed to rasterio.open {}'.format(filename))
raise
def load_dir_of_tifs_array(dir_of_tiffs, meta, band_specs=None):
'''Return an ElmStore where each subdataset is a DataArray
Parameters:
:dir_of_tiffs: directory of GeoTiff files where each is a
single band raster
:meta: meta from earthio.load_dir_of_tifs_meta
:band_specs: list of earthio.BandSpec objects,
defaulting to reading all subdatasets
as bands
Returns:
:X: ElmStore
'''
logger.debug('load_dir_of_tifs_array: {}'.format(dir_of_tiffs))
band_order_info = meta['band_order_info']
tifs = ls_tif_files(dir_of_tiffs)
logger.info('Load tif files from {}'.format(dir_of_tiffs))
if not len(band_order_info):
raise ValueError('No matching bands with '
'band_specs {}'.format(band_specs))
native_dims = ('y', 'x')
elm_store_dict = OrderedDict()
attrs = {'meta': meta}
attrs['band_order'] = []
for (idx, filename, band_spec), band_meta in zip(band_order_info, meta['band_meta']):
band_name = getattr(band_spec, 'name', band_spec)
if not isinstance(band_spec, string_types):
reader_kwargs = {k: getattr(band_spec, k)
for k in READ_ARRAY_KWARGS
if getattr(band_spec, k)}
else:
reader_kwargs = {}
if 'buf_xsize' in reader_kwargs:
reader_kwargs['width'] = reader_kwargs.pop('buf_xsize')
if 'buf_ysize' in reader_kwargs:
reader_kwargs['height'] = reader_kwargs.pop('buf_ysize')
if 'window' in reader_kwargs:
reader_kwargs['window'] = tuple(map(tuple, reader_kwargs['window']))
# TODO multx, multy should be handled here as well?
if reader_kwargs:
multy = band_meta['height'] / reader_kwargs.get('height', band_meta['height'])
multx = band_meta['width'] / reader_kwargs.get('width', band_meta['width'])
else:
multx = multy = 1.
band_meta.update(reader_kwargs)
geo_transform = take_geo_transform_from_meta(band_spec, **attrs)
handle, raster = open_prefilter(filename, band_meta, **reader_kwargs)
raster = raster_as_2d(raster)
if getattr(band_spec, 'stored_coords_order', ['y', 'x'])[0] == 'y':
rows, cols = raster.shape
else:
rows, cols = raster.T.shape
if geo_transform is None:
band_meta['geo_transform'] = handle.get_transform()
else:
band_meta['geo_transform'] = geo_transform
band_meta['geo_transform'][1] *= multx
band_meta['geo_transform'][-1] *= multy
coords_x, coords_y = geotransform_to_coords(cols,
rows,
band_meta['geo_transform'])
elm_store_dict[band_name] = xr.DataArray(raster,
coords=[('y', coords_y),
('x', coords_x),],
dims=native_dims,
attrs=band_meta)
attrs['band_order'].append(band_name)
gc.collect()
return ElmStore(elm_store_dict, attrs=attrs)
| UTF-8 | Python | false | false | 8,725 | py | 17 | tif.py | 17 | 0.563897 | 0.561834 | 0 | 231 | 36.770563 | 152 |
hermanramos1/east_la_yp | 1,142,461,347,928 | 0d485c273b86b89aba955188d3cade196661e35b | 956456f3a16c470dc86ea2dc2f45844ac62f1d2b | /apps/main_app/admin.py | 41d94d242b6980487a7e4b009b399b88e0a1f378 | []
| no_license | https://github.com/hermanramos1/east_la_yp | 832dc869bb48c7263d839ac2b3f5f44223478642 | 3d368ebc26679bb682fa021729e710749bca7c95 | refs/heads/master | 2022-12-10T17:08:30.346113 | 2019-09-04T03:14:38 | 2019-09-04T03:14:38 | 199,695,032 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Contact, SSOT, Winter_Jubilee, Message_Board
admin.site.register(Contact)
admin.site.register(SSOT)
admin.site.register(Winter_Jubilee)
admin.site.register(Message_Board)
| UTF-8 | Python | false | false | 226 | py | 16 | admin.py | 10 | 0.814159 | 0.814159 | 0 | 8 | 27.25 | 64 |
Design-Pattern-Practice/Factory-Pattern | 5,875,515,276,617 | 51a2f46a8a9ead150a26d46845dd6e5a7ffb1d16 | c93e2965534518e53ae4fe7f7b9461d764df24f4 | /creator/conCreatorA.py | 7946db3b863f4be8623866c1127d7dafce7350cd | []
| no_license | https://github.com/Design-Pattern-Practice/Factory-Pattern | 81ed7f5854e928754de9c3a4ab517c37e06fa53a | 3b78132a3f4796d07f390032005e4757a2223a1a | refs/heads/main | 2023-05-03T21:54:12.946305 | 2021-05-25T13:58:40 | 2021-05-25T13:58:40 | 370,711,100 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .creator import Creator
from product.productA import ConcreteProductA
class ConCreatorA(Creator):
def createProduct(self,data):
return ConcreteProductA(data) | UTF-8 | Python | false | false | 176 | py | 6 | conCreatorA.py | 5 | 0.784091 | 0.784091 | 0 | 7 | 24.285714 | 45 |
roksikonja/dslab_virgo_tsi | 2,164,663,559,685 | 4ec55e998ec814fa698b966627901ba819b2d09e | 7048e155b165261dda8f853e4b673a97bb9ad8ed | /dslab_virgo_tsi/status_utils.py | e93c901d0d69c13608fb517dbc42b680dfff08fb | []
| no_license | https://github.com/roksikonja/dslab_virgo_tsi | 08dcf215c0869b822ae229d058b10afa983998bb | 14a9463a62ad3b05599ca30375faaa364ae5f972 | refs/heads/master | 2020-08-02T20:08:06.000373 | 2020-04-24T14:28:46 | 2020-04-24T14:28:46 | 211,490,464 | 0 | 0 | null | false | 2019-12-27T18:30:50 | 2019-09-28T11:32:34 | 2019-12-18T14:21:05 | 2019-12-27T18:30:50 | 200,988 | 0 | 0 | 3 | Jupyter Notebook | false | false | from enum import Enum
from threading import Lock
from time import time
from flask import render_template
class StatusField(Enum):
RUNNING = "is_running"
JOB_NAME = "job_name"
JOB_PERCENTAGE = "job_percentage"
JOB_DESCRIPTION = "job_description"
DATASET_TABLE = "dataset_table"
DATASET_LIST = []
LAST_DB_UPDATE = "last_db_update"
LAST_JOB_UPDATE = "last_job_update"
LAST_DB_TABLE_RENDER = "last_db_table_render"
RESULT_FOLDER = "result_folder"
JOB_TYPE = "job_type"
class JobType(Enum):
ANALYSIS = "analysis"
IMPORT = "import"
class Status:
def __init__(self):
self._data = {StatusField.RUNNING: False,
StatusField.JOB_NAME: "",
StatusField.JOB_PERCENTAGE: 0,
StatusField.JOB_DESCRIPTION: "",
StatusField.DATASET_TABLE: "",
StatusField.DATASET_LIST: None,
StatusField.LAST_DB_UPDATE: 0.0,
StatusField.LAST_JOB_UPDATE: 0.0,
StatusField.LAST_DB_TABLE_RENDER: 0.0,
StatusField.RESULT_FOLDER: ""}
self._lock = Lock()
self._no_include = {StatusField.DATASET_LIST, StatusField.LAST_DB_TABLE_RENDER, StatusField.RESULT_FOLDER}
def set(self, key, value):
with self._lock:
self._data[key] = value
current_time = time()
if key == StatusField.DATASET_LIST:
self._data[StatusField.LAST_DB_UPDATE] = current_time
elif key != StatusField.DATASET_TABLE:
self._data[StatusField.LAST_JOB_UPDATE] = current_time
def set_description(self, description):
self.set(StatusField.JOB_DESCRIPTION, description)
def set_percentage(self, percentage):
self.set(StatusField.JOB_PERCENTAGE, percentage)
def set_folder(self, folder):
self.set(StatusField.RESULT_FOLDER, folder)
def update_progress(self, description, percentage):
with self._lock:
self._data[StatusField.JOB_DESCRIPTION] = description
self._data[StatusField.JOB_PERCENTAGE] = percentage
self._data[StatusField.LAST_JOB_UPDATE] = time()
def set_dataset_list(self, dataset_list):
self.set(StatusField.DATASET_LIST, dataset_list)
def release(self):
self.set(StatusField.RUNNING, False)
def new_job(self, job_type: JobType, description, name):
with self._lock:
self._data[StatusField.RUNNING] = True
self._data[StatusField.JOB_TYPE] = job_type.value
self._data[StatusField.JOB_DESCRIPTION] = description
self._data[StatusField.JOB_NAME] = name
self._data[StatusField.JOB_PERCENTAGE] = 0
self._data[StatusField.LAST_JOB_UPDATE] = time()
def get(self, key):
with self._lock:
return self._data[key]
def get_folder(self):
return self.get(StatusField.RESULT_FOLDER)
def get_dataset_list(self):
return self.get(StatusField.DATASET_LIST)
def is_running(self):
return self.get(StatusField.RUNNING)
def get_json(self):
with self._lock:
# Only render table if it has changed since it was last rendered
if self._data[StatusField.LAST_DB_TABLE_RENDER] < self._data[StatusField.LAST_DB_UPDATE]:
# Render table
self._data[StatusField.DATASET_TABLE] = render_template("dataset_table.html",
datasets=self._data[StatusField.DATASET_LIST])
self._data[StatusField.LAST_DB_TABLE_RENDER] = self._data[StatusField.LAST_DB_UPDATE]
return {key.value: self._data[key] for key in self._data if key not in self._no_include}
status = Status()
| UTF-8 | Python | false | false | 3,856 | py | 40 | status_utils.py | 24 | 0.602956 | 0.600882 | 0 | 105 | 35.72381 | 118 |
AdamZhouSE/pythonHomework | 2,078,764,210,982 | 4085da57bab7eb35bc2a29ab4795981a8eda5278 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/Cases/2750/.mooctest/answer.py | 4447bf3a1332d5e59f1bda6b7a5d490da432f744 | []
| no_license | https://github.com/AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
class Solution:
def findMinHeightTrees(self, n: 'int',edges: 'List[List[int]]') -> 'List[int]':
if n == 1: return [0]
paths = [set() for _ in range(n)]
for node1, node2 in edges:
paths[node1].add(node2)
paths[node2].add(node1)
leaves = [node for node in range(n) if len(paths[node]) == 1]
roots = n
while roots > 2:
roots -= len(leaves)
newleaves = []
for node in leaves:
parent = paths[node].pop()
paths[parent].remove(node)
if len(paths[parent]) == 1: newleaves.append(parent)
leaves = newleaves
return leaves
n=int(input())
ed=eval(input())
print(Solution().findMinHeightTrees(n,ed)) | UTF-8 | Python | false | false | 790 | py | 45,079 | answer.py | 43,489 | 0.536709 | 0.522785 | 0 | 22 | 34.954545 | 83 |
lzxysf/python | 14,577,119,026,143 | 90769df739fb3c9a027c243a2bcc6bce2da9d0b6 | 50bd87708a19ac7003c2cc3e2834047acc684eb9 | /cli/python_049_pool.py | 2cf3fe901b06e9333f722b1e112bdfa198244d92 | []
| no_license | https://github.com/lzxysf/python | db30bef343bb02d99c3f2a0f96c4c942b69b8b52 | 7d1f09b788eec293ae0bddcc0117e2dcd8a79f0b | refs/heads/master | 2020-12-05T06:43:24.461101 | 2020-04-08T13:21:02 | 2020-04-08T13:21:02 | 232,037,659 | 0 | 0 | null | false | 2020-04-08T13:14:39 | 2020-01-06T06:23:20 | 2020-04-08T13:14:11 | 2020-04-08T13:14:38 | 0 | 0 | 0 | 1 | Python | false | false | '''
่ฟ็จๆฑ
ๅๅงๅPoolๆถ๏ผๅฏไปฅๆๅฎไธไธชๆๅคง่ฟ็จๆฐ
ๅฝๆๆฐ็่ฟ็จไปปๅกๆไบคๅฐPoolๆถ๏ผๅฆๆ่ฟ็จๆฑ ่ฟๆฒกๆๆปก๏ผ้ฃไนๅฐฑไผๅจ็ฉบ้ฒ่ฟ็จไธญๆง่ก่ฏฅไปปๅก๏ผๅฆๆ่ฟ็จๆฑ ๆปกไบ๏ผๅฐฑไผ็ญๅพ
่ฟ็จๆฑ ๆ็ฉบ้ฒไฝ็ฝฎๅๅๆง่ก่ฏฅ่ฟ็จ
'''
from multiprocessing import Pool
def proc_msg(msg):
print('่ฟ็จๅค็็ๆฐๆฎmsgไธบ{}'.format(msg))
pool = Pool(3)
for i in range(10):
pool.apply_async(proc_msg, (i,))
pool.close() # ๅ
ณ้ญ่ฟ็จๆฑ ๏ผๅ
ณ้ญๅpoolไธๅๆฅๅๆฐ็่ฏทๆฑ
pool.join() # ็ญๅพ
poolไธญๆๆๅญ่ฟ็จๆง่กๅฎๆฏ๏ผๅฟ
้กปๆพๅจๆๆclose่ฟ็จไนๅ
# ๅฆๆไธ่ฐ็จpool.join(),็ถ่ฟ็จๆง่กๅฎๆฏๅ๏ผๅญ่ฟ็จไนไผ่ท็ๅฎ่๏ผๆญคๆถ่ฆๆณๆญฃๅธธๆง่ก๏ผ็ถ่ฟ็จไธ่ฝ็ปๆ๏ผๅฏไปฅๅพช็ฏ๏ผๅฆไธ
# while True:
# pass
'''
multiprocessing.Poolๅธธ็จๅฝๆฐ่งฃๆ๏ผ
apply_async(func[, args[, kwds]]) ๏ผไฝฟ็จ้้ปๅกๆนๅผ่ฐ็จfunc๏ผๅนถ่กๆง่ก๏ผๅ ตๅกๆนๅผๅฟ
้กป็ญๅพ
ไธไธไธช่ฟ็จ้ๅบๆ่ฝๆง่กไธไธไธช่ฟ็จ,
argsไธบไผ ้็ปfunc็ๅๆฐๅ่กจ๏ผkwdsไธบไผ ้็ปfunc็ๅ
ณ้ฎๅญๅๆฐๅ่กจ
ๆณจ่งฃ๏ผไฝฟ็จๆญคๆนๆณ๏ผไธ็ดๅpoolไธญๆทปๅ ่ฟ็จไปปๅกไนไธไผ้ปๅก๏ผ่ฟ็จไผๅจ่ฟ็จๆฑ ๆ้็ญๅพ
่ขซๅค็
apply(func[, args[, kwds]])๏ผไฝฟ็จ้ปๅกๆนๅผ่ฐ็จfunc
ๆณจ่งฃ๏ผไฝฟ็จๆญคๆนๆณ๏ผไผ่ขซ้ปๅกใๅณไฝฟ่ฟ็จๆฑ ๆฒกๆๆปก๏ผไฝฟ็จๆญคๆนๆณๆไบคไธไธช่ฟ็จไปปๅกๅ๏ผ่ฏฅๆนๆณไผไธ็ด้ปๅกๅฐ่ฏฅ่ฟ็จๆง่กๅฎๆฏ
close()๏ผๅ
ณ้ญPool๏ผไฝฟๅ
ถไธๅๆฅๅๆฐ็ไปปๅก
terminate()๏ผไธ็ฎกไปปๅกๆฏๅฆๅฎๆ๏ผ็ซๅณ็ปๆญข
join()๏ผไธป่ฟ็จ้ปๅก๏ผ็ญๅพ
ๅญ่ฟ็จ็้ๅบ๏ผ ๅฟ
้กปๅจcloseๆterminateไนๅไฝฟ็จ
'''
| UTF-8 | Python | false | false | 1,657 | py | 91 | python_049_pool.py | 72 | 0.771496 | 0.767962 | 0 | 41 | 19.707317 | 79 |
innvariant/pyklopp | 12,086,038,000,799 | acd36301fa3f50b809beddda2e8dcc1423ec0a51 | 03875da107c5a1331e6b450d5d07d934d173d216 | /pyklopp/console/commands/train.py | d7576cd4bd79250fed7b7261e0c24cc5d1abc679 | []
| no_license | https://github.com/innvariant/pyklopp | 3cfe94eeefb017bd159161ef7b01e7ad66c50b33 | 9704688f3cedbf8546a490db7fcbaf0b56a499e3 | refs/heads/master | 2023-05-25T19:15:43.132859 | 2020-10-05T14:28:11 | 2020-10-05T14:28:11 | 248,254,719 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import os
import random
import socket
import time
import uuid
import ignite
import numpy as np
import torch
from cleo import Command
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Events
from ignite.engine import create_supervised_evaluator
from ignite.engine import create_supervised_trainer
import pyklopp.metadata as pkmd
from pyklopp import __version__
from pyklopp import subpackage_import
from pyklopp.loading import add_local_path_to_system
from pyklopp.loading import load_modules
from pyklopp.util import load_custom_config
from pyklopp.util import load_dataset_from_argument
from pyklopp.util import save_paths_obtain_and_check
class TrainCommand(Command):
"""
Trains a model
train
{model : Path to the pytorch model file}
{dataset : Data set module for training}
{--m|modules=* : Optional module file to load.}
{--c|config=* : Configuration JSON string or file path.}
{--s|save= : Path (including name) to save the model to}
"""
def handle(self):
# Early check for save path
save_path_base, model_file_name = save_paths_obtain_and_check(self)
# Model file path (a persisted .pth pytorch model)
model_root_path = self.argument("model")
if not os.path.exists(model_root_path):
raise ValueError('Model not found in path "%s"' % model_root_path)
# Add current absolute path to system path to load local modules
# If initialized a module previously from a local module, then it must be available in path later again
add_local_path_to_system(self.info)
"""
Optional (local) module to load.
There several functionalities can be bundled at one place.
"""
modules_option = self.option("modules")
loaded_modules = load_modules(modules_option)
"""
Load dataset module file
"""
# `dataset` will be the dataset class, should be of type 'torch.utils.data.Dataset'
# `fn_get_dataset` optional function to load the dataset based on the allocated configuration
# `class_dataset` optional class which will be instanatiated with the configuration sub key 'dataset_config'
# Either 'my_dataset' / 'my_dataset.py' or s.th. like 'torchvision.datasets.cifar.CIFAR10' or 'torchvision.datasets.mnist.MNIST'
dataset_argument = str(self.argument("dataset"))
"""
Assemble configuration with pre-defined config and user-defined json/file.
"""
config = {
"global_unique_id": str(uuid.uuid4()),
"pyklopp_version": __version__,
"loaded_modules": loaded_modules,
"python_seed_initial": None,
"python_seed_random_lower_bound": 0,
"python_seed_random_upper_bound": 10000,
"python_cwd": os.getcwd(),
"hostname": socket.gethostname(),
"time_config_start": time.time(),
"model_root_path": model_root_path,
"model_persistence_name": model_file_name, # If later set to None/empty, model will not be persisted
"save_path_base": save_path_base,
"config_persistence_name": "config.json",
"config_key": "training",
"gpus_exclude": [],
"gpu_choice": None, # if None, then random uniform of all available is chosen
"num_epochs": 10,
"batch_size": 100,
"learning_rate": 0.01,
"argument_dataset": dataset_argument,
"get_dataset_transformation": "pyklopp.defaults.get_transform",
"get_optimizer": "pyklopp.defaults.get_optimizer",
"get_loss": "pyklopp.defaults.get_loss",
"get_dataset_test": None,
}
# TODO metadata will replace config in future releases
metadata = pkmd.init_metadata()
metadata.system_loaded_modules = loaded_modules
# Load user-defined configuration
if self.option("config"):
for config_option in self.option("config"):
config_option = str(config_option)
user_config = load_custom_config(config_option)
config.update(user_config)
# Dynamic configuration computations.
# E.g. random numbers or seeds etc.
# If desired set an initial (global) random seed
if config["python_seed_initial"] is not None:
random.seed(int(config["python_seed_initial"]))
# Generate a (local) initial seed, which might depend on the initial global seed
# This enables reproducibility
a = config["python_seed_random_lower_bound"]
b = config["python_seed_random_upper_bound"]
python_seed_local = random.randint(a, b)
config["python_seed_local"] = (
python_seed_local
if "python_seed_local" not in config
else config["python_seed_local"]
)
random.seed(config["python_seed_local"])
np.random.seed(config["python_seed_local"])
torch.manual_seed(config["python_seed_local"])
torch.cuda.manual_seed(config["python_seed_local"])
# Re-Check file path for persistence before going into training
if (
config["model_persistence_name"] is not None
and len(config["model_persistence_name"]) > 0
):
model_file_name = config["model_persistence_name"]
model_file_path = os.path.join(save_path_base, model_file_name)
if os.path.exists(model_file_path):
raise ValueError(
'Model file path "%s" already exists' % model_file_path
)
config["time_config_end"] = time.time()
"""
Load configured dataset for training.
"""
self.info("Loading dataset.")
dataset = load_dataset_from_argument(dataset_argument, config)
config["dataset"] = str(dataset.__class__.__name__)
n_training_samples = len(dataset)
train_sampler = torch.utils.data.SubsetRandomSampler(
np.arange(n_training_samples, dtype=np.int64)
)
config["time_dataset_loading_start"] = time.time()
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=config["batch_size"],
sampler=train_sampler,
num_workers=2,
)
dataiter = iter(train_loader)
next(dataiter)
config["time_dataset_loading_end"] = time.time()
# Determine device to use (e.g. cpu, gpu:0, gpu:1, ..)
if torch.cuda.is_available():
cuda_no = config["gpu_choice"]
if cuda_no is None:
cuda_no = np.random.choice(
np.setdiff1d(
np.arange(torch.cuda.device_count()), config["gpus_exclude"]
)
)
elif cuda_no in config["gpus_exclude"]:
raise ValueError(
"Your configured GPU device number is in the exclusion list of your configuration."
)
device = torch.device("cuda:%s" % cuda_no)
else:
device = torch.device("cpu")
config["device"] = str(device)
# Load the model
try:
model = torch.load(model_root_path, map_location=device)
except ModuleNotFoundError as e:
raise ValueError("Could not find module when loading model: %s" % e)
model.to(device)
config["model_pythonic_type"] = str(type(model))
fn_get_optimizer = subpackage_import(config["get_optimizer"])
optimizer = fn_get_optimizer(model.parameters(), **config)
config["optimizer"] = str(optimizer.__class__.__name__)
fn_get_loss = subpackage_import(config["get_loss"])
fn_loss = fn_get_loss(**config)
config["loss"] = str(fn_loss.__class__.__name__)
self.info("Configuration:")
self.info(json.dumps(config, indent=2, sort_keys=True))
"""
The training loop.
"""
trainer = create_supervised_trainer(model, optimizer, fn_loss, device=device)
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names="all")
metric_accuracy = ignite.metrics.Accuracy()
metric_precision = ignite.metrics.Precision(average=False)
metric_recall = ignite.metrics.Recall(average=False)
metric_f1 = (
metric_precision * metric_recall * 2 / (metric_precision + metric_recall)
).mean()
evaluation_metrics = {
"accuracy": metric_accuracy,
"precision": ignite.metrics.Precision(average=True),
"recall": ignite.metrics.Recall(average=True),
"f1": metric_f1,
"loss": ignite.metrics.Loss(fn_loss),
}
evaluator = create_supervised_evaluator(
model, metrics=evaluation_metrics, device=device
)
histories = {metric_name: [] for metric_name in evaluation_metrics}
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
metric_infos = []
for metric_name in evaluation_metrics:
histories[metric_name].append(metrics[metric_name])
metric_infos.append(
"{metric}: {val:.3}".format(
metric=metric_name, val=metrics[metric_name]
)
)
pbar.log_message(
"Training - Epoch: {}, ".format(engine.state.epoch)
+ ", ".join(metric_infos)
)
config["time_model_training_start"] = time.time()
trainer.run(train_loader, max_epochs=config["num_epochs"])
config["time_model_training_end"] = time.time()
# Add the collected histories after each completed epoch to config
for metric_name in evaluation_metrics:
config["training_%s" % metric_name] = histories[metric_name]
"""
Evaluation
"""
if "get_dataset_test" in config and config["get_dataset_test"] is not None:
fn_get_dataset_test = subpackage_import(config["get_dataset_test"])
dataset_test = fn_get_dataset_test(**config)
config["dataset_test"] = str(dataset_test.__class__.__name__)
dataset_test_length = len(dataset_test)
test_sampler = torch.utils.data.SubsetRandomSampler(
np.arange(dataset_test_length, dtype=np.int64)
)
test_loader = torch.utils.data.DataLoader(
dataset_test,
batch_size=config["batch_size"],
sampler=test_sampler,
num_workers=2,
)
config["time_model_evaluation_start"] = time.time()
evaluation_state = evaluator.run(test_loader)
config["time_model_evaluation_end"] = time.time()
for metric_name in evaluation_metrics:
config["evaluation_%s" % metric_name] = np.float(
evaluation_state.metrics[metric_name]
)
"""
Optional configuration & model persistence.
"""
if save_path_base is not None:
if (
config["model_persistence_name"] is not None
and len(config["model_persistence_name"]) > 0
):
model_file_name = config["model_persistence_name"]
model_file_path = os.path.join(save_path_base, model_file_name)
# If the model file path already exists, increment the name as long as we find a free name
model_file_name_increment = 1
model_file_name_parts = config["model_persistence_name"].split(".")
model_file_name_ending = model_file_name_parts[-1]
model_file_name_head = ".".join(model_file_name_parts[:-1])
while os.path.exists(model_file_path):
self.info(
'Model persistence path "%s" already exists. Choosing new one automatically.'
% model_file_path
)
model_file_name = ".".join(
[
model_file_name_head,
model_file_name_increment,
model_file_name_ending,
]
)
model_file_path = os.path.join(save_path_base, model_file_name)
model_file_name_increment += 1
# Write model file name back in to config in case it changed during free-name-search
config["model_persistence_name"] = model_file_name
config["model_persistence_path"] = model_file_path
self.info('Saving to "%s"' % model_file_path)
config["time_model_save_start"] = time.time()
torch.save(model, model_file_path)
config["time_model_save_end"] = time.time()
config_file_name = config["config_persistence_name"]
config_file_path = os.path.join(save_path_base, config_file_name)
full_config = {}
if os.path.exists(config_file_path):
# Load possible existing config
with open(config_file_path, "r") as handle:
full_config = json.load(handle)
config_key = config["config_key"]
if config_key not in full_config:
# Add config in the dict with a sub-key, e.g. { 'train': {..} }
full_config[config_key] = config
else:
# Add config in a list of the dict, e.g. { 'train': [{previous_config}, {..}] }
if type(full_config[config_key]) is not list:
# Make sure, the config-key is a list, e.g. "{ 'train': {xyz} }" -> "{ 'train': [{xyz}] }"
full_config[config_key] = [full_config[config_key]]
full_config[config_key].append(config)
self.info('Writing configuration to "%s"' % config_file_path)
with open(config_file_path, "w") as handle:
json.dump(full_config, handle, indent=2, sort_keys=True)
self.info("Final configuration:")
self.info(json.dumps(config, indent=2, sort_keys=True))
self.info("Done.")
| UTF-8 | Python | false | false | 14,537 | py | 26 | train.py | 20 | 0.573365 | 0.570613 | 0 | 353 | 40.181303 | 136 |
keremistan/Data-Storage-with-Influx | 17,514,876,658,484 | ecbca9d50b9bf7f23d84ccdbfc6e962a8fc04edc | 1dc2e09c55de6b1c952e0b6d550e740884d5c56c | /LatencyVisualisation/temp_storage.py | 150d984b3964bad6c097fcda1ad6096a4de32617 | []
| no_license | https://github.com/keremistan/Data-Storage-with-Influx | b6e93c355728945d89e3f61a024ee9486143ccff | fa29150625c2ca05ab26136c102e594d71fca373 | refs/heads/master | 2023-05-12T14:26:00.799638 | 2020-01-11T07:48:37 | 2020-01-11T07:48:37 | 223,602,495 | 0 | 0 | null | false | 2023-05-01T21:19:19 | 2019-11-23T14:31:58 | 2020-01-11T07:48:45 | 2023-05-01T21:19:19 | 22 | 0 | 0 | 1 | Python | false | false | from queue import Queue
# class TemporaryDataStorage(Queue):
# __instance = None
# @staticmethod
# def get_instance():
# if TemporaryDataStorage.__instance == None:
# TemporaryDataStorage()
# return TemporaryDataStorage.__instance
# def __init__(self):
# if TemporaryDataStorage.__instance != None:
# raise Exception("This class is singleton!")
# else:
# TemporaryDataStorage.__instance = self
@Singleton
class TemporaryDataStorage(Queue):
__instance = None
@staticmethod
def get_instance():
if TemporaryDataStorage.__instance == None:
TemporaryDataStorage()
return TemporaryDataStorage.__instance
def __init__(self):
if TemporaryDataStorage.__instance != None:
raise Exception("This class is singleton!")
else:
TemporaryDataStorage.__instance = self
| UTF-8 | Python | false | false | 927 | py | 10 | temp_storage.py | 5 | 0.615965 | 0.615965 | 0 | 34 | 26.264706 | 57 |
rhoboro/events | 19,052,474,940,876 | ff850d596354c569ee68b4f060db2eacd6f2d6e0 | e627f9734fc3176f51b5ca40086fbf15f36cd65f | /pycon.jp.20221014/src/hello_world.py | 5b74496079a260b82d50c91a1bee2236771042bd | []
| no_license | https://github.com/rhoboro/events | ac5eeb1826084dcb7584b5d6882b0755ac4add41 | 54c25e4e7e5c493a86f196c9aa395773c4642bf6 | refs/heads/main | 2023-05-27T05:08:42.678190 | 2023-01-28T23:59:39 | 2023-01-28T23:59:39 | 150,962,154 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def hello():
# This is a comment line
print("Hello, world")
if __name__ == "__main__":
hello()
| UTF-8 | Python | false | false | 109 | py | 51 | hello_world.py | 17 | 0.513761 | 0.513761 | 0 | 7 | 14.571429 | 28 |
jwilk/i18nspector | 7,000,796,701,150 | ec8070d7fb621db91cfbbfb9dcce235ec94d2b9d | 5e734cd4e071272688ab635243290936c5c2db40 | /tests/test_strformat_c.py | ebb476f5002b45a7e8b7aa255f72c1d71e6ae25b | [
"MIT"
]
| permissive | https://github.com/jwilk/i18nspector | a2a4aecee00de9cfb8d9a0354614f7413e19f1b9 | d9762416937399b81abaedc9ddcdc36dbda1c318 | refs/heads/master | 2023-09-04T12:32:35.255101 | 2023-08-22T08:41:50 | 2023-08-22T08:41:50 | 29,258,684 | 2 | 3 | MIT | false | 2022-06-27T19:04:57 | 2015-01-14T18:22:23 | 2022-02-11T14:10:31 | 2022-06-27T19:04:54 | 5,178 | 1 | 4 | 8 | Python | false | false | # Copyright ยฉ 2014-2022 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the โSoftwareโ), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED โAS ISโ, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import struct
import sys
import unittest.mock
import lib.strformat.c as M
from .tools import (
assert_equal,
assert_greater,
assert_is_instance,
assert_raises,
assert_sequence_equal,
collect_yielded,
)
def test_INT_MAX():
struct.pack('=i', M.INT_MAX)
with assert_raises(struct.error):
struct.pack('=i', M.INT_MAX + 1)
def is_glibc():
try:
os.confstr('CS_GNU_LIBC_VERSION')
except (ValueError, OSError):
return False
return True
def test_NL_ARGMAX():
plat = sys.platform
if plat.startswith('linux') and is_glibc():
assert_equal(
M.NL_ARGMAX,
os.sysconf('SC_NL_ARGMAX')
)
else:
raise unittest.SkipTest('Test specific to Linux with glibc')
small_NL_ARGMAX = unittest.mock.patch('lib.strformat.c.NL_ARGMAX', 42)
# Setting NL_ARGMAX to a small number makes the *_index_out_of_range() tests
# much faster.
def test_lone_percent():
with assert_raises(M.Error):
M.FormatString('%')
def test_invalid_conversion_spec():
with assert_raises(M.Error):
M.FormatString('%!')
def test_add_argument():
fmt = M.FormatString('%s')
with assert_raises(RuntimeError):
fmt.add_argument(2, None)
def test_text():
fmt = M.FormatString('eggs%dbacon%dspam')
assert_equal(len(fmt), 5)
fmt = list(fmt)
assert_equal(fmt[0], 'eggs')
assert_equal(fmt[2], 'bacon')
assert_equal(fmt[4], 'spam')
class test_types:
def t(self, s, tp, warn_type=None, integer=False):
fmt = M.FormatString(s)
[conv] = fmt
assert_is_instance(conv, M.Conversion)
assert_equal(conv.type, tp)
if tp == 'void':
assert_sequence_equal(fmt.arguments, [])
else:
[[arg]] = fmt.arguments
assert_equal(arg.type, tp)
if warn_type is None:
assert_sequence_equal(fmt.warnings, [])
else:
[warning] = fmt.warnings
assert_is_instance(warning, warn_type)
assert_equal(conv.integer, integer)
@collect_yielded
def test_integer(self):
def t(s, tp, warn_type=None):
if s[-1] == 'n':
tp += ' *'
integer = False
else:
integer = True
return (self.t, s, tp, warn_type, integer)
for c in 'din':
yield t('%hh' + c, 'signed char')
yield t('%h' + c, 'short int')
yield t('%' + c, 'int')
yield t('%l' + c, 'long int')
yield t('%ll' + c, 'long long int')
yield t('%L' + c, 'long long int', M.NonPortableConversion)
yield t('%q' + c, 'long long int', M.NonPortableConversion)
yield t('%j' + c, 'intmax_t')
yield t('%z' + c, 'ssize_t')
yield t('%Z' + c, 'ssize_t', M.NonPortableConversion)
yield t('%t' + c, 'ptrdiff_t')
for c in 'ouxX':
yield t('%hh' + c, 'unsigned char')
yield t('%h' + c, 'unsigned short int')
yield t('%' + c, 'unsigned int')
yield t('%l' + c, 'unsigned long int')
yield t('%ll' + c, 'unsigned long long int')
yield t('%L' + c, 'unsigned long long int', M.NonPortableConversion)
yield t('%q' + c, 'unsigned long long int', M.NonPortableConversion)
yield t('%j' + c, 'uintmax_t')
yield t('%z' + c, 'size_t')
yield t('%Z' + c, 'size_t', M.NonPortableConversion)
yield t('%t' + c, '[unsigned ptrdiff_t]')
@collect_yielded
def test_double(self):
t = self.t
for c in 'aefgAEFG':
yield t, ('%' + c), 'double'
yield t, ('%l' + c), 'double', M.NonPortableConversion
yield t, ('%L' + c), 'long double'
@collect_yielded
def test_char(self):
t = self.t
yield t, '%c', 'char'
yield t, '%lc', 'wint_t'
yield t, '%C', 'wint_t', M.NonPortableConversion
yield t, '%s', 'const char *'
yield t, '%ls', 'const wchar_t *'
yield t, '%S', 'const wchar_t *', M.NonPortableConversion
@collect_yielded
def test_void(self):
t = self.t
yield t, '%p', 'void *'
yield t, '%m', 'void'
yield t, '%%', 'void'
@collect_yielded
def test_c99_macros(self):
# pylint: disable=undefined-loop-variable
def _t(s, tp):
return self.t(s, tp, integer=True)
def t(s, tp):
return (
_t,
f'%<{s.format(c=c, n=n)}>',
('u' if unsigned else '') + tp.format(n=n)
)
# pylint: enable=undefined-loop-variable
for c in 'diouxX':
unsigned = c not in 'di'
for n in {8, 16, 32, 64}:
yield t('PRI{c}{n}', 'int{n}_t')
yield t('PRI{c}LEAST{n}', 'int_least{n}_t')
yield t('PRI{c}FAST{n}', 'int_fast{n}_t')
yield t('PRI{c}MAX', 'intmax_t')
yield t('PRI{c}PTR', 'intptr_t')
class test_invalid_length:
def t(self, s):
with assert_raises(M.LengthError):
M.FormatString(s)
_lengths = ['hh', 'h', 'l', 'll', 'q', 'j', 'z', 't', 'L']
@collect_yielded
def test_double(self):
t = self.t
for c in 'aefgAEFG':
for l in self._lengths:
if l in 'lL':
continue
yield t, ('%' + l + c)
@collect_yielded
def test_char(self):
t = self.t
for c in 'cs':
for l in self._lengths:
if l != 'l':
yield t, '%' + l + c
yield t, ('%' + l + c.upper())
@collect_yielded
def test_void(self):
t = self.t
for c in 'pm%':
for l in self._lengths:
yield t, ('%' + l + c)
class test_numeration:
def test_percent(self):
with assert_raises(M.ForbiddenArgumentIndex):
M.FormatString('%1$%')
def test_errno(self):
# FIXME?
fmt = M.FormatString('%1$m')
assert_equal(len(fmt), 1)
assert_equal(len(fmt.arguments), 0)
def test_swapped(self):
fmt = M.FormatString('%2$s%1$d')
assert_equal(len(fmt), 2)
[a1], [a2] = fmt.arguments
assert_equal(a1.type, 'int')
assert_equal(a2.type, 'const char *')
def test_numbering_mixture(self):
def t(s):
with assert_raises(M.ArgumentNumberingMixture):
M.FormatString(s)
t('%s%2$s')
t('%2$s%s')
@small_NL_ARGMAX
def test_index_out_of_range(self):
with assert_raises(M.ArgumentRangeError):
M.FormatString('%0$d')
def fs(n):
s = str.join('', (
f'%{i}$d'
for i in range(1, n + 1)
))
return M.FormatString(s)
fmt = fs(M.NL_ARGMAX)
assert_equal(len(fmt), M.NL_ARGMAX)
assert_equal(len(fmt.arguments), M.NL_ARGMAX)
with assert_raises(M.ArgumentRangeError):
fs(M.NL_ARGMAX + 1)
def test_initial_gap(self):
with assert_raises(M.MissingArgument):
M.FormatString('%2$d')
def test_gap(self):
with assert_raises(M.MissingArgument):
M.FormatString('%3$d%1$d')
class test_redundant_flag:
def t(self, s):
fmt = M.FormatString(s)
[exc] = fmt.warnings
assert_is_instance(exc, M.RedundantFlag)
def test_duplicate(self):
self.t('%--17d')
def test_minus_zero(self):
self.t('%-017d')
def test_plus_space(self):
self.t('%+ d')
# TODO: Check for other redundant flags, for example โ%+sโ.
class test_expected_flag:
def t(self, s):
fmt = M.FormatString(s)
assert_equal(len(fmt), 1)
@collect_yielded
def test_hash(self):
for c in 'oxXaAeEfFgG':
yield self.t, ('%#' + c)
@collect_yielded
def test_zero(self):
for c in 'diouxXaAeEfFgG':
yield self.t, ('%0' + c)
@collect_yielded
def test_apos(self):
for c in 'diufFgG':
yield self.t, ("%'" + c)
@collect_yielded
def test_other(self):
for flag in '- +I':
for c in 'diouxXaAeEfFgGcCsSpm':
yield self.t, ('%' + flag + c)
class test_unexpected_flag:
def t(self, s):
with assert_raises(M.FlagError):
M.FormatString(s)
@collect_yielded
def test_hash(self):
for c in 'dicCsSnpm%':
yield self.t, ('%#' + c)
@collect_yielded
def test_zero(self):
for c in 'cCsSnpm%':
yield self.t, ('%0' + c)
@collect_yielded
def test_apos(self):
for c in 'oxXaAeEcCsSnpm%':
yield self.t, ("%'" + c)
@collect_yielded
def test_other(self):
for c in '%n':
for flag in '- +I':
yield self.t, ('%' + flag + c)
class test_width:
@collect_yielded
def test_ok(self):
def t(s):
fmt = M.FormatString(s)
assert_equal(len(fmt), 1)
for c in 'diouxXaAeEfFgGcCsSp':
yield t, ('%1' + c)
yield t, '%1m' # FIXME?
def test_invalid(self):
for c in '%n':
with assert_raises(M.WidthError):
M.FormatString('%1' + c)
def test_too_large(self):
fmt = M.FormatString(f'%{M.INT_MAX}d')
assert_equal(len(fmt), 1)
assert_equal(len(fmt.arguments), 1)
with assert_raises(M.WidthRangeError):
M.FormatString(f'%{M.INT_MAX + 1}d')
def test_variable(self):
fmt = M.FormatString('%*s')
assert_equal(len(fmt), 1)
assert_equal(len(fmt.arguments), 2)
[a1], [a2] = fmt.arguments
assert_equal(a1.type, 'int')
assert_equal(a2.type, 'const char *')
def _test_index(self, i):
fmt = M.FormatString(f'%2$*{i}$s')
assert_equal(len(fmt), 1)
assert_equal(len(fmt.arguments), 2)
[a1], [a2] = fmt.arguments
assert_equal(a1.type, 'int')
assert_equal(a2.type, 'const char *')
def test_index(self):
self._test_index(1)
def test_leading_zero_index(self):
self._test_index('01')
self._test_index('001')
@small_NL_ARGMAX
def test_index_out_of_range(self):
with assert_raises(M.ArgumentRangeError):
M.FormatString('%1$*0$s')
def fs(n):
s = str.join('', (
f'%{i}$d'
for i in range(2, n)
)) + f'%1$*{n}$s'
return M.FormatString(s)
fmt = fs(M.NL_ARGMAX)
assert_equal(len(fmt), M.NL_ARGMAX - 1)
assert_equal(len(fmt.arguments), M.NL_ARGMAX)
with assert_raises(M.ArgumentRangeError):
fs(M.NL_ARGMAX + 1)
def test_numbering_mixture(self):
def t(s):
with assert_raises(M.ArgumentNumberingMixture):
M.FormatString(s)
t('%1$*s')
t('%*1$s')
t('%s%1$*2$s')
t('%1$*2$s%s')
class test_precision:
@collect_yielded
def test_ok(self):
def t(s):
fmt = M.FormatString(s)
assert_equal(len(fmt), 1)
for c in 'diouxXaAeEfFgGsS':
yield t, ('%.1' + c)
@collect_yielded
def test_redundant_0(self):
def t(s):
fmt = M.FormatString(s)
assert_equal(len(fmt), 1)
[warning] = fmt.warnings
assert_is_instance(warning, M.RedundantFlag)
for c in 'diouxX':
yield t, ('%0.1' + c)
@collect_yielded
def test_non_redundant_0(self):
def t(s):
fmt = M.FormatString(s)
assert_equal(len(fmt), 1)
assert_sequence_equal(fmt.warnings, [])
for c in 'aAeEfFgG':
yield t, ('%0.1' + c)
@collect_yielded
def test_unexpected(self):
def t(s):
with assert_raises(M.PrecisionError):
M.FormatString(s)
for c in 'cCpnm%':
yield t, ('%.1' + c)
def test_too_large(self):
fmt = M.FormatString(f'%.{M.INT_MAX}f')
assert_equal(len(fmt), 1)
with assert_raises(M.PrecisionRangeError):
M.FormatString(f'%.{M.INT_MAX + 1}f')
def test_variable(self):
fmt = M.FormatString('%.*f')
assert_equal(len(fmt), 1)
assert_equal(len(fmt.arguments), 2)
[a1], [a2] = fmt.arguments
assert_equal(a1.type, 'int')
assert_equal(a2.type, 'double')
def _test_index(self, i):
fmt = M.FormatString(f'%2$.*{i}$f')
assert_equal(len(fmt), 1)
assert_equal(len(fmt.arguments), 2)
[a1], [a2] = fmt.arguments
assert_equal(a1.type, 'int')
assert_equal(a2.type, 'double')
def test_index(self):
self._test_index(1)
def test_leading_zero_index(self):
self._test_index('01')
self._test_index('001')
@small_NL_ARGMAX
def test_index_out_of_range(self):
with assert_raises(M.ArgumentRangeError):
M.FormatString('%1$.*0$f')
def fs(n):
s = str.join('', (
f'%{i}$d'
for i in range(2, n)
)) + f'%1$.*{n}$f'
return M.FormatString(s)
fmt = fs(M.NL_ARGMAX)
assert_equal(len(fmt), M.NL_ARGMAX - 1)
assert_equal(len(fmt.arguments), M.NL_ARGMAX)
with assert_raises(M.ArgumentRangeError):
fs(M.NL_ARGMAX + 1)
def test_numbering_mixture(self):
def t(s):
with assert_raises(M.ArgumentNumberingMixture):
M.FormatString(s)
t('%1$.*f')
t('%.*1$f')
t('%f%2$.*1$f')
t('%2$.*1$f%f')
class test_type_compatibility:
def test_okay(self):
def t(s, tp):
fmt = M.FormatString(s)
[args] = fmt.arguments
assert_greater(len(args), 1)
for arg in args:
assert_equal(arg.type, tp)
t('%1$d%1$d', 'int')
t('%1$d%1$i', 'int')
def test_mismatch(self):
def t(s):
with assert_raises(M.ArgumentTypeMismatch):
M.FormatString(s)
t('%1$d%1$hd')
t('%1$d%1$u')
t('%1$d%1$s')
@small_NL_ARGMAX
def test_too_many_conversions():
def t(s):
with assert_raises(M.ArgumentRangeError):
M.FormatString(s)
s = M.NL_ARGMAX * '%d'
fmt = M.FormatString(s)
assert_equal(len(fmt), M.NL_ARGMAX)
t(s + '%f')
t(s + '%*f')
t(s + '%.*f')
class test_get_last_integer_conversion:
def test_overflow(self):
fmt = M.FormatString('%s%d')
for n in [-1, 0, 3]:
with assert_raises(IndexError):
fmt.get_last_integer_conversion(n=n)
def t(self, s, n, tp=M.Conversion):
fmt = M.FormatString(s)
conv = fmt.get_last_integer_conversion(n=n)
if tp is None:
tp = type(tp)
assert_is_instance(conv, tp)
return conv
def test_okay(self):
self.t('%d', 1)
self.t('%s%d', 1)
def test_non_integer(self):
self.t('%s', 1, None)
self.t('%c', 1, None)
def test_too_many(self):
self.t('%s%d', 2, None)
self.t('%d%d', 2, None)
def test_var(self):
self.t('%*d', 1)
self.t('%*d', 2)
self.t('%.*d', 2)
self.t('%1$*2$d', 2)
self.t('%2$*3$.*1$d', 3)
def test_broken_var(self):
self.t('%1$*2$d', 1, None)
self.t('%1$*2$d%3$d', 2, None)
self.t('%1$*3$d%2$d', 2, None)
# vim:ts=4 sts=4 sw=4 et
| UTF-8 | Python | false | false | 16,820 | py | 91 | test_strformat_c.py | 59 | 0.52252 | 0.511811 | 0 | 571 | 28.434326 | 80 |
Ilya1691/computers-crawler | 12,945,031,435,862 | 594e042e2606f5facaed3f351736750ede02923a | 5b8f9dad9900767b9811080f08e4168e089d7c41 | /parsers/product/product_dict.py | 76a7b1f2422516a99f3224450af74c5ffd0dce7a | []
| no_license | https://github.com/Ilya1691/computers-crawler | d1614e2f08ce73003b309ba45850e291f6c5e895 | f199c3c6660717a467627fb0a9dcd36d79e4ccb8 | refs/heads/master | 2021-08-23T22:07:01.990135 | 2017-12-06T19:44:32 | 2017-12-06T19:44:32 | 100,620,837 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class ProductDicts:
product_characteristics = {
"ะัะพัะตััะพั:": "processor",
"ะขะฐะบัะพะฒะฐั ัะฐััะพัะฐ ะฟัะพัะตััะพัะฐ:": "clock_frequency",
"ะะพะป-ะฒะพ ัะดะตั:": "NumberOfCores",
"ะกะพะบะตั:": "socket",
"ะคะพัะผ-ัะฐะบัะพั:": "Form_factor",
"ะขะธะฟ ะฟะฐะผััะธ:": "type_ram",
"ะะฑัะตะผ ะฟะฐะผััะธ:": "volume_ram",
"ะงะฐััะพัะฐ ะฟะฐะผััะธ:": "frequency_ram",
"ะะฑัะตะผ ะถะตััะบะพะณะพ ะดะธัะบะฐ:": "volume_hdd",
"ะะธะดะตะพะบะฐััะฐ:": "videocarta",
"ะะฑัะตะผ ะฒะธะดะตะพะฟะฐะผััะธ:": "volume_video",
"ะคะพัะผ-ัะฐะบัะพั:": "Form_fac_Case",
"ะะพัะฝะพััั ะฑะปะพะบะฐ ะฟะธัะฐะฝะธั:": "PowerSupply",
}
| UTF-8 | Python | false | false | 765 | py | 12 | product_dict.py | 7 | 0.56 | 0.56 | 0 | 17 | 34.294118 | 58 |
CJYang-Yale/rotcscraper | 11,467,562,718,471 | 89deedd4b567b9865cb1f26abaa64bf50d360beb | 60765f586f705f9e2522655bd8af8fa839eaf863 | /scraper.py | 724a1103f3f7ad944967a1d3ee80d02490181573 | []
| no_license | https://github.com/CJYang-Yale/rotcscraper | a89ff3d25ec6150a6f3483cd4f3fcd57dbc333de | 8ed09ea2bd440ecf00eb074c406874eefd8732e8 | refs/heads/main | 2023-02-15T07:51:42.131089 | 2021-01-10T06:10:43 | 2021-01-10T06:10:43 | 328,315,265 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bs4 import BeautifulSoup
import requests
import json
from cs50 import SQL
states = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FL", "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY", "PR", "GU"]
db = SQL("sqlite:///database.db")
for state in states:
airforce = requests.get('https://www.afrotc.com/wp-json/afrotc/colleges/state/' + str(state) + '?admission_type=&type=&dispositions=')
det = json.loads(airforce.content)
for dictionary in det:
# print(dictionary['name'])
# print(dictionary['type']['label'])
# print(dictionary['longitude'], dictionary['latitude'])
longitude = dictionary['longitude']
latitude = dictionary['latitude']
school = dictionary['name']
if dictionary['type']['label'] == "Host":
db.execute("INSERT or IGNORE INTO airforce (school, latitude, longitude, type) VALUES (:school, :latitude, :longitude, 'host')", school=school, latitude=latitude, longitude=longitude)
else:
db.execute("INSERT or IGNORE INTO airforce (school, latitude, longitude, type) VALUES (:school, :latitude, :longitude, 'crosstown')", school=school, latitude=latitude, longitude=longitude)
for state in states:
army = requests.get('https://www.goarmy.com/rotc/find-schools.' + str(state) + '-.results.html')
soup = BeautifulSoup(army.content, 'html.parser')
div = soup.find_all("div", {"class": "resultsSchoolList darkLinks"})
for x in range(len(div)):
school = div[x].get_text()
db.execute("INSERT or IGNORE INTO army (school, type) VALUES (:school, 'host')", school=school)
crosstown = soup.find_all("div", {"class": "resultsSubSchools"})
for x in range(len(crosstown)):
school = crosstown[x].get_text()
db.execute("INSERT or IGNORE INTO army (school, type) VALUES (:school, 'crosstown')", school=school)
# for x in range(len(div)):
# print(div[x].get_text())
navy = requests.get('https://www.netc.navy.mil/Commands/Naval-Service-Training-Command/NROTC/Navy-ROTC-Schools/#')
stew = BeautifulSoup(navy.content, 'html.parser')
schools = stew.find_all('a', {'target': '_blank'})
for x in range(len(schools)):
#print(schools[x].get_text())
school = schools[x].get_text()
db.execute("INSERT or IGNORE INTO navy (school) VALUES (:school)", school=school)
| UTF-8 | Python | false | false | 2,541 | py | 1 | scraper.py | 1 | 0.622983 | 0.621802 | 0 | 44 | 56.75 | 327 |
vmgabriel/client-base | 3,307,124,855,840 | 5dfb0736d70f93a2f94c6a0fd570867dbd8a8a77 | 277f171d59a284f8835b9bda37b8970232030ae3 | /config/views.py | 2bf6abc136825786eff35656349333793501a336 | [
"Apache-2.0"
]
| permissive | https://github.com/vmgabriel/client-base | 226691b82cd71979ede93aeeb7b4acff9c3fb1cb | 322a6bdcbbfa1d363efa5839558788a62bdfe9bf | refs/heads/master | 2023-03-26T07:26:04.116505 | 2021-03-22T09:20:02 | 2021-03-22T09:20:02 | 340,098,762 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """View of the Page"""
# Libraries
from django.shortcuts import render
from django.http import HttpResponseRedirect
def home_view(request):
"""Home View page"""
if request.user.is_authenticated:
return HttpResponseRedirect('/clients')
return render(
request,
'index.html',
{
'user': request.user
}
)
| UTF-8 | Python | false | false | 371 | py | 24 | views.py | 13 | 0.61186 | 0.61186 | 0 | 18 | 19.611111 | 47 |
CodeSoju/ImpracticalPythonProjects | 7,481,833,073,092 | 74f177d868775ea42d29d071cc39e93339e1fbc2 | 89c1fbf34fd1e95a53be9f1e6c7c11be61ba6f51 | /PalingramSpelling/load_dictionary.py | 5d9e38875a58e1db1174d8acdc025fad4ac2e1da | []
| no_license | https://github.com/CodeSoju/ImpracticalPythonProjects | c62687580a7654e35452bf68ab61dc7ce45bb644 | e54504132ccf3f3ceaf3f9a4d0a0b62f7673715c | refs/heads/master | 2022-12-14T05:30:20.757331 | 2020-09-03T00:57:21 | 2020-09-03T00:57:21 | 292,429,304 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Load a text file as a list.
Arguments:
- text file name (and a dictionary path, if needed)
Exceptions:
- IOError if filename not found
Returns:
-A list of all words in a text file in lower case
Requires:
- import sys
'''
import sys
def load(file):
'''
Open a text file & return a list of lowercase strings.
The strip method returns a copy of the string in which all chars have been stripped from the beginning and the end of the string
(default whitespace chars)
'''
try:
with open(file) as in_file:
loaded_txt = in_file.read().strip().split('\n')
loaded_txt = [x.lower() for x in loaded_txt]
return loaded_txt
except IOError as e:
print("{}\nError opening {}. Terminating program.".format(e, file), file=sys.stderr)
sys.exit(1)
'''
Function based on the previous file-opening discussion. The function takes a filename as an arg.
If no exceptions are raised, the text file's whitespace is removed, and it's items are split into separate lines and added to a list.
We want each word to be separate item in the list, before the list is returned. And since case matters to Python, the words in the list
are converted to lowercase via list comprehension.
List Comprehension: a shorthand way to convert a list, or other iterabel, into another list. In this case it replaces a for-loop
Generally, you wouldn't call sys.exit() from a module, as you may want your program to do something_like write a log file- prior to
terminating. In later chapters, we'll move both the try-except blocks and sys.exit() into a main() function for clarity and control.
'''
| UTF-8 | Python | false | false | 1,596 | py | 7 | load_dictionary.py | 7 | 0.734336 | 0.733709 | 0 | 45 | 34.444444 | 135 |
sprinuko/kaoruko | 11,089,605,584,804 | ec2eb40562e2edb6026e787437243a11612fd917 | 713daaadb15b8a6ed3d8ac95557a3bae0eee66b3 | /cogs/background.py | e4124c89182e720ee869ca930bb06a326c65323c | []
| no_license | https://github.com/sprinuko/kaoruko | 9760aef0bab3766d5bd49057c359b585cca13b13 | 5903af64a97a0ea6d0291f4337c1ac6f8a5b446f | refs/heads/master | 2018-12-26T13:38:40.626598 | 2018-10-21T16:28:52 | 2018-10-21T16:28:52 | 144,898,612 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #stuff that runs in the background
import discord
from discord.ext import commands
msg = ''
class background():
def __init__(self, bot):
self.bot = bot
self.access_channelid = 'yea1'
self.general_channelid = 'yea2'
async def on_member_join(self, member : discord.Member):
await self.bot.send_message(member.server.get_channel(self.access_channelid), '<@{}>, please send a message in this channel to get access to the rest of the server.'.format(member.id))
async def on_message(self, message : discord.Message):
#server access
if (message.channel.id == self.access_channelid):
if (message.author.id != self.bot.user.id):
for role in message.server.roles:
if (role.name == 'access'):
await self.bot.add_roles(message.author, role)
await self.bot.send_message(message.server.get_channel(self.general_channelid), 'successfully granted <@{}> access!'.format(message.author.id))
await self.bot.send_message(message.server.get_channel(self.general_channelid), 'welcome to the server!')
#message repeat
global msg
log = []
async for m in self.bot.logs_from(message.channel, limit = 3):
log.append(m)
#last msg check, content check x2, author check x3, non bot check x3
if (log[0].content != msg) and (log[0].content == log[1].content) and (log[1].content == log[2].content) and (log[0].author != log[1].author) and (log[1].author != log[2].author) and (log[2].author != log[0].author) and (log[0].author.id != self.bot.user.id) and (log[1].author.id != self.bot.user.id) and (log[2].author.id != self.bot.user.id):
msg = log[0].content
await self.bot.send_message(message.channel, log[0].content)
#
def setup(bot):
bot.add_cog(background(bot))
| UTF-8 | Python | false | false | 1,949 | py | 9 | background.py | 6 | 0.608004 | 0.596716 | 0 | 40 | 46.725 | 353 |
arra1997/zipline | 223,338,337,079 | 56cdea5864ff45c39a5ac8e9c91ef7a8eb8308eb | 00b345695bde9f11b1721fe3398f73c913647bd6 | /zipline/pipeline/loaders/equity_pricing_loader.py | f482896ea49fc3ac3964cf2a3192fd6749550a02 | [
"Apache-2.0"
]
| permissive | https://github.com/arra1997/zipline | 0115a40962a00c720cc8dec60ea4a9aadeb4132c | 38d47f1b470f47ff7e8c35d9874d68785d6d2927 | refs/heads/master | 2022-12-16T21:10:35.030046 | 2019-11-15T23:04:45 | 2019-11-15T23:04:45 | 195,696,228 | 1 | 1 | Apache-2.0 | false | 2022-12-08T03:03:03 | 2019-07-07T20:55:41 | 2019-11-15T23:06:54 | 2022-12-08T03:03:01 | 341,223 | 1 | 1 | 12 | Python | false | false | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from interface import implements
from numpy import iinfo, uint32
from zipline.lib.adjusted_array import AdjustedArray
from .base import PipelineLoader
from .utils import shift_dates
UINT32_MAX = iinfo(uint32).max
class EquityPricingLoader(implements(PipelineLoader)):
"""A PipelineLoader for loading daily OHLCV data.
Parameters
----------
raw_price_reader : zipline.data.session_bars.SessionBarReader
Reader providing raw prices.
adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader
Reader providing price/volume adjustments.
"""
def __init__(self, raw_price_reader, adjustments_reader):
self.raw_price_reader = raw_price_reader
self.adjustments_reader = adjustments_reader
def load_adjusted_array(self, domain, columns, dates, sids, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the start of each date. We assume that the latest data
# known on day N is the data from day (N - 1), so we shift all query
# dates back by a day.
sessions = domain.all_sessions()
start_date, end_date = shift_dates(
sessions, dates[0], dates[-1], shift=1,
)
colnames = [c.name for c in columns]
raw_arrays = self.raw_price_reader.load_raw_arrays(
colnames,
start_date,
end_date,
sids,
)
adjustments = self.adjustments_reader.load_pricing_adjustments(
colnames,
dates,
sids,
)
out = {}
for c, c_raw, c_adjs in zip(columns, raw_arrays, adjustments):
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
c_adjs,
c.missing_value,
)
return out
# Backwards compat alias.
USEquityPricingLoader = EquityPricingLoader
| UTF-8 | Python | false | false | 2,566 | py | 3,245 | equity_pricing_loader.py | 8 | 0.655885 | 0.64887 | 0 | 73 | 34.150685 | 79 |
xueanxi/learnAi | 9,337,258,936,642 | 5b3a073b932e135bbbd907b97efa31e4e4769a08 | acfb40839a7d74d36bce820a01e05cb5488061da | /test/48_Iteration.py | 68f2ceec41e811f8bb4b570852574fb49f97bec1 | []
| no_license | https://github.com/xueanxi/learnAi | 04683246a7eafea948d8ba9710b642c3d22aab51 | 7f9fd6d6fd68d751b0bef95783d06b746cf47843 | refs/heads/master | 2021-10-21T19:35:45.980539 | 2019-03-06T02:00:42 | 2019-03-06T02:00:42 | 162,879,676 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-01-21 22:32:27
# @Author : anxi.xue (xueanxi@163.com)
# @Version : $Id$
import os
def getMaxAndMin(list1):
max = list1[0]
min = list1[0]
for item in list1:
if item >= max:
max = item
for item in list1:
if item <= min:
min = item
return max, min
list1 = [-32, 4234, 232, 3213, 545, 67, 1, 13, -3213]
print('max and min:', getMaxAndMin(list1))
L1 = ['Hello', 'World', 18, 'Apple', None]
L2 = [item.lower() for item in L1 if isinstance(item, str)]
print('L2', L2)
| UTF-8 | Python | false | false | 592 | py | 125 | 48_Iteration.py | 109 | 0.557432 | 0.45777 | 0 | 27 | 20.925926 | 59 |
LevupCompany/beton | 18,408,229,835,970 | 280ea35ca0d037a3eb651829e47dbf060d85ee4b | 2bc10ff3501d32d8bcac5ae17fb1f1819ee20523 | /catalog/migrations/0010_auto_20190516_1630.py | d72e6b24a046a94e16dd56bd7fbf64d962f9ea0d | []
| no_license | https://github.com/LevupCompany/beton | cc57ba62d20ec578f81bedffbab47e8621113b72 | b555f5949d062ac96a4651541cbd1ed6b680eddd | refs/heads/master | 2022-11-26T07:17:37.193772 | 2019-06-06T16:26:00 | 2019-06-06T16:26:00 | 187,328,141 | 0 | 0 | null | false | 2022-11-22T03:49:20 | 2019-05-18T07:31:57 | 2019-06-06T16:26:45 | 2022-11-22T03:49:17 | 11,497 | 0 | 0 | 2 | JavaScript | false | false | # Generated by Django 2.2 on 2019-05-16 13:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0009_delete_ordercheck'),
]
operations = [
migrations.AddField(
model_name='order',
name='cancel',
field=models.BooleanField(default=False, verbose_name='ะัะผะตะฝะตะฝ'),
),
migrations.AddField(
model_name='order',
name='check',
field=models.BooleanField(default=False, verbose_name='ะัะฟะพะปะฝะตะฝ'),
),
]
| UTF-8 | Python | false | false | 599 | py | 79 | 0010_auto_20190516_1630.py | 52 | 0.580479 | 0.549658 | 0 | 23 | 24.391304 | 78 |
tsetsoo/forestFires | 9,672,266,371,887 | 8ab82312003b1df4eaada8c0935b9759165f8a8f | 88632c7653e2cb4a0f27875234f826f13552c5b1 | /OpenCV-Verification/OpenCV_Verification.py | 1903fb058927234b41f84c8248b98e646ef6e1b1 | []
| no_license | https://github.com/tsetsoo/forestFires | 16869db7023ddafe515ebba738a4fd93a9108ef1 | 7809f8b8bec64eb1169ed975543d612f54715232 | refs/heads/master | 2018-12-26T07:21:24.261103 | 2018-10-22T20:48:46 | 2018-10-22T20:48:46 | 153,888,474 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import sys
#----------------------------
#File Path Initialization
FileName = sys.argv[1]
test = sys.argv[0]
print (FileName)
print (test)
#----------------------------
#----------------------------
#User Image Loading
original = cv2.imread(FileName)
img = cv2.imread(FileName)
#img_G = cv2.imread('fire.jpg',cv2.IMREAD_GRAYSCALE)
#----------------------------
#----------------------------
#Edge Detection
#edges = cv2.Canny(img_G,10,200)
#----------------------------
#----------------------------
#Blurring
#blur = cv2.GaussianBlur(img, (21, 21), 0)
#----------------------------
#----------------------------
#Chaning the colourspace, we will be looking for fire intensity which directly correlates with "hsVALUE"
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#----------------------------
#for r in range(0,img.rows):
# print "HI"
#----------------------------
#Resizing the input photo
scale_percent = 80 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
#----------------------------
#lower = [18, 10, 20] upper = [25, 255, 255] - trees are traced lol
#----------------------------
#Fitting the HSV value of the fire
#Red mask - low temperature spots
lower = [0, 135, 60] #[0, 180, 60]-good #[2, 90, 90]-orig
upper = [40, 212, 245] #[19, 255, 255]-orig
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
mask = cv2.inRange(hsv, lower, upper)
#Yellow-Orange mask - high temperature spots
lower_yellow = [7, 145, 210] #[22, 188, 247]
upper_yellow = [29, 204, 255] #[14, 204, 241]
lower_yellow = np.array(lower_yellow, dtype="uint8")
upper_yellow = np.array(upper_yellow, dtype="uint8")
mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow)
#Dark Red Mask - medium temperature spots
lower_dred = [5, 210, 220]
upper_dred = [18, 240, 255]
lower_dred = np.array(lower_dred, dtype="uint8")
upper_dred = np.array(upper_dred, dtype="uint8")
mask_dred = cv2.inRange(hsv, lower_dred, upper_dred)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img,img, mask= mask)
res2 = cv2.bitwise_and(img,img, mask= mask_yellow)
res3 = cv2.bitwise_and(img,img, mask= mask_dred)
#----------------------------
#----------------------------
#Image Blending
#vis = np.concatenate((res, res2), axis=1) #defunct
add = res + res2
add2 = add + res3
#----------------------------
#----------------------------
#Saving Validated Image for Dataset population
#status = cv2.imwrite('/submissions')
#----------------------------
#---------------------------
#HSV Colour Checker
#Pure Red
#red = np.uint8([[[0,0,255 ]]])
#hsv_red = cv2.cvtColor(red,cv2.COLOR_BGR2HSV)
#print (hsv_red) #[[[ 0 255 255]]]
#Orange flame
#red = np.uint8([[[41,101,245 ]]])
#hsv_red = cv2.cvtColor(red,cv2.COLOR_BGR2HSV)
#print (hsv_red) #[[[ 9 212 245]]]
#Dark Reddish
#red = np.uint8([[[34,57,219 ]]])
#hsv_red = cv2.cvtColor(red,cv2.COLOR_BGR2HSV)
#print (hsv_red) #[[[ 4 215 219]]]
#Bright Yellow
#red = np.uint8([[[48,140,241 ]]])
#hsv_red = cv2.cvtColor(red,cv2.COLOR_BGR2HSV)
#print (hsv_red) #[[[ 14 204 241]]]
#Yellow-Orange
#red = np.uint8([[[65,199,247 ]]])
#hsv_red = cv2.cvtColor(red,cv2.COLOR_BGR2HSV)
#print (hsv_red) #[[[ 22 188 247]]]
#Dark Red-Orange
#red = np.uint8([[[37,116,255 ]]])
#hsv_red = cv2.cvtColor(red,cv2.COLOR_BGR2HSV)
#print (hsv_red) #[[[ 11 218 255]]]
#Dark Red-Yellow
#red = np.uint8([[[26,87,237 ]]])
#hsv_red = cv2.cvtColor(red,cv2.COLOR_BGR2HSV)
#print (hsv_red) #[[[ 9 227 237]]]
#----------------------------
#----------------------------
#Debug
#cv2.imshow('Editing',blur)
#cv2.imshow('Original',original)
#cv2.imshow("Edge Detected Image", edges)
#cv2.imshow("HSV", mask_dred)
#cv2.imshow("Overlay",res)
#cv2.imshow("Overlaya",res2)
#cv2.imshow("Overlayb",res3)
#cv2.imshow("Overlay3",add)
#cv2.imshow("Overlay4",add2)
print ("Yes.")
sys.stdout.flush()
#----------------------------
| UTF-8 | Python | false | false | 4,211 | py | 8 | OpenCV_Verification.py | 3 | 0.548801 | 0.479221 | 0 | 149 | 26.248322 | 104 |
sm1lex/learn_py | 2,980,707,333,514 | fbdbbfd4e8655b5ab69b0cbb4aecc3586a8128bb | a2efb92e39c03249581ee5f167512d9da6f04b8f | /backup_ver2.py | 54b9440529a6acbc15856e2f5908752d312e3ac0 | []
| no_license | https://github.com/sm1lex/learn_py | b580da43ac043102ee4d79d21eb4dffe52569f48 | 980b5a0e648e8384ee9728a90e45ea27117dc031 | refs/heads/master | 2020-09-11T07:02:13.127680 | 2020-01-10T07:45:53 | 2020-01-10T07:45:53 | 221,981,552 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #version 2 with folder creating
import os
import time
source = ['~/git']
backup_folder = '/home/aleksey/backup'
#Check out has been folder creating
if not os.path.exists(backup_folder):
os.mkdir(backup_folder)
print("Folder is created")
name_backup_folder = backup_folder + os.sep + time.strftime('%Y%m%d')
name_backup_archive = time.strftime('%H%M')
#check creating name_backup_folder
if not os.path.exists(name_backup_folder):
os.mkdir(name_backup_folder)
print('name_backup_folder successful created')
comment = input('Enter some comment for your backup file --> ')
#check out has been comment enter
if len(comment) == 0:
path_to_backup = name_backup_folder + os.sep + name_backup_archive
else:
path_to_backup = name_backup_folder + os.sep + name_backup_archive + '_' + \
comment.replace(' ', '_') + '.zip'
zip_command = 'zip -r {0} {1}'. format(path_to_backup, ' '.join(source))
print('Zip command is:')
print(zip_command)
print('Running')
if os.system(zip_command) == 0:
print('Successful backup file to', path_to_backup)
else:
print('Backup FAILED')
| UTF-8 | Python | false | false | 1,100 | py | 44 | backup_ver2.py | 42 | 0.687273 | 0.682727 | 0 | 38 | 27.947368 | 80 |
furushchev/jsk_pr2_inference | 17,085,379,941,762 | 748a0d5ee10acbc689637aa5b5a34fe67c7c5d3d | 4af2779de09ba1205db3513d101606230abfff5a | /scripts/elevator-success-rate.py | e3807855cbd5b1636915f38016422100c1815e9e | []
| no_license | https://github.com/furushchev/jsk_pr2_inference | 07ab401508e838d11794f66d26d10e99482f8a2b | 5c08b623c7090b05e44ca523ef34a7c859c4376a | refs/heads/master | 2018-01-09T06:59:07.140847 | 2016-01-15T00:25:03 | 2016-01-15T00:25:03 | 44,508,913 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import argparse
import pymongo
import pprint
DEBUG=False
CSV_FILE="succ_rate2.csv"
def aggregate(host, port, db_name, col_name):
if DEBUG:
print "host:", host
print "port:", port
print "db:", db_name
print "col:", col_name
conn = pymongo.Connection(host=host, port=port)
cur = conn[db_name][col_name]
cnt = 0
buff = 10
succ = []
f = open(CSV_FILE,'w')
rprev=None
for r in cur.find({'header.context': {'$regex': '^ELEVATOR'},
'name': 'CHECK-BUTTON-LIGHT',
# '_meta.status': 'success',
'_meta.stored_type': 'jsk_demo_common/FunctionEvent'}).sort('$natural', 1):
succ_cnt = 0
if rprev:
pd = rprev["_meta"]["inserted_at"]
cd = r["_meta"]["inserted_at"]
if (cd - pd).seconds < 10:
s = r["_meta"]["status"]
cnt += 1
if s == 'success':
succ += [cnt]
if cnt > 0:
for i in succ:
if i > cnt - buff:
succ_cnt += 1
print succ_cnt, cnt, 100.0 * succ_cnt / buff
# print succ, cnt, 100.0 * succ / cnt
rprev=r
if succ_cnt > 0:
f.write('%d,%d,%f\n' % (succ_cnt, cnt, 100.0 * succ_cnt / buff))
pass
f.close()
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('destination',
default='localhost:27017/test',
type=str,
metavar='host:port/db',
help='hostname:port/database')
p.add_argument('collection',
default='test',
type=str,
help='collection')
args = p.parse_args()
try:
if args.destination.find(':') == -1:
hostname = args.destination.split('/')[0]
port = 27017
else:
hostname = args.destination.split(':')[0]
port = int(args.destination[len(hostname)+1:].split('/')[0])
db_name = args.destination.split('/')[1]
col_name = args.collection
aggregate(hostname, port, db_name, col_name)
except Exception as e:
print "Error:", e
print
p.print_help()
exit(1)
| UTF-8 | Python | false | false | 2,393 | py | 25 | elevator-success-rate.py | 5 | 0.464271 | 0.447137 | 0 | 77 | 30.077922 | 98 |
princemathew1997/random-python | 14,903,536,551,703 | 1cd01861ab7e406d346c59b6563eabae9c35d64d | f1c6178b5f0bb6cbd3d42d9326e9f9c41701e0a6 | /Day 6/d5-3.py | f9a720cd77773022afaba9b46ed6dfe90a24b43c | []
| no_license | https://github.com/princemathew1997/random-python | 779d377fb43a39b37584b7f3a5702f0f29e98ad0 | 80b9065353525465b87636efcd7879d5f7a8ae76 | refs/heads/main | 2023-01-31T09:38:27.056368 | 2020-12-19T15:20:39 | 2020-12-19T15:20:39 | 319,009,807 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #tuple
a=(1,2,3,4)
print(type(a)) | UTF-8 | Python | false | false | 33 | py | 77 | d5-3.py | 76 | 0.606061 | 0.484848 | 0 | 3 | 10.333333 | 14 |
JoanaMWarnecke/sftt | 3,049,426,801,370 | 87d2966272efe654f764b1fa01c52b28924b933c | 2bdc4255d0d5c0635933da07e1e12dde749c1fff | /visualization/gauss_linear_transform.py | 321b60ab93f155fc476e3f06098681562173563e | []
| no_license | https://github.com/JoanaMWarnecke/sftt | cd80aec590374e1ad97b60ba26d9bf6b86b18076 | 5824741fd7ada7b1ebbde011b40a08f4deede842 | refs/heads/master | 2021-09-07T13:19:13.498229 | 2018-02-22T20:02:19 | 2018-02-23T12:14:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import scipy.stats as stats
normal_var = stats.norm(loc=0, scale=1)
dx = 10
x = np.linspace(-dx, dx, 100)
p = normal_var.pdf(x)
samples = 2.5 * normal_var.rvs(100000) + 2
xb = 9
plt.figure(figsize=(4, 8))
plt.subplot(2, 1, 1)
plt.plot(x, p)
plt.xlabel(r'$x$')
plt.title(r'pdf of $\mathcal{N}(\mu=0, \sigma=1)$')
ax = plt.gca()
ax.set_xlim([-xb, xb])
ax.set_ylim([-0.005, 0.42])
ax.set_yticks([])
plt.subplot(2, 1, 2)
ax = plt.gca()
plt.hist(samples, bins=100, normed=True)
plt.title(r'Histogram of $s_x$')
plt.xlabel(r'$s_x$')
ax.set_xlim([-xb, xb])
ax.set_yticks([])
plt.tight_layout(pad=0)
plt.savefig('comp_2.png', dpi=75)
plt.show()
| UTF-8 | Python | false | false | 739 | py | 29 | gauss_linear_transform.py | 27 | 0.652233 | 0.596752 | 0 | 39 | 17.948718 | 51 |
JoanPuig/PyFIT | 5,970,004,553,145 | ed3d844ec32b29095d9b9a75664b89d3e890a55d | 76bd6b0169b6dcd919a6ace44e2b0ed8c8c4adc0 | /examples/example_decode_fit_activity.py | 0a419cb409e7006994be5ea32795a824575d28a3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | https://github.com/JoanPuig/PyFIT | 8e8d256f2f069eb407ea7967dfc610ec5d21cce1 | bc0a2b3984e77008bd6e40bde72ac05ae3f32c31 | refs/heads/master | 2020-07-10T19:15:52.856753 | 2019-09-23T19:37:24 | 2019-09-23T19:37:24 | 204,345,141 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2019 Joan Puig
# See LICENSE for details
from FIT.activities import ActivityDecoder
def main():
# This sample code shows how to decode a FIT file into an activity
# Modify to fit your directory setup
file_name = './data/FIT/MY_ACTIVITY_FILE.fit'
activity = ActivityDecoder.decode_activity(file_name)
print(activity)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 396 | py | 23 | example_decode_fit_activity.py | 21 | 0.684343 | 0.674242 | 0 | 20 | 18.75 | 70 |
mahidulmoon/Online-Quiz-Portal | 14,362,370,646,454 | 0ef8d674a4161347da3607b64d37953547de3740 | 7e5bd07780961a2bc56d28242b5ad1193b8d0dbc | /backend/quiz/migrations/0004_auto_20200908_2016.py | 89a1e378cdda1b272734969d41079128b2d8014d | []
| no_license | https://github.com/mahidulmoon/Online-Quiz-Portal | 889b5e6e0f9b68611a63a8ba91656ce1242f77d9 | 156bd4f0d2a1a614d1361bf3427e8ea89e8f2fe9 | refs/heads/master | 2023-01-02T17:24:40.807160 | 2020-10-28T04:43:18 | 2020-10-28T04:43:18 | 292,612,613 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.6 on 2020-09-08 14:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0003_auto_20200908_2010'),
]
operations = [
migrations.AlterField(
model_name='addnewquizquestion',
name='answer',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='addnewquizquestion',
name='option1',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='addnewquizquestion',
name='option2',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='addnewquizquestion',
name='option3',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='addnewquizquestion',
name='option4',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='addnewquizquestion',
name='question',
field=models.CharField(blank=True, max_length=100),
),
]
| UTF-8 | Python | false | false | 1,299 | py | 41 | 0004_auto_20200908_2016.py | 21 | 0.56659 | 0.529638 | 0 | 43 | 29.209302 | 63 |
Venezix/robloxapi | 11,562,051,994,332 | 33f3c482c75523c1228c52cb422bfeab0d6b10c0 | 3dba23773c6f73178f943bdbc4b8f42fb6494a0a | /robloxapi/Trade.py | a51bda70838114ed3a692db4ee4652cae8a7c276 | [
"MIT"
]
| permissive | https://github.com/Venezix/robloxapi | 090a44ba4f3c9d8590f469b2de8466c9dc992784 | e99e0ada2b1b080c223d819c80e5a175c3170aa2 | refs/heads/master | 2020-06-25T07:30:48.082558 | 2019-07-12T19:11:49 | 2019-07-12T19:11:49 | 199,246,220 | 1 | 0 | null | true | 2019-07-28T05:26:15 | 2019-07-28T05:26:14 | 2019-07-12T19:12:22 | 2019-07-12T20:13:02 | 341 | 0 | 0 | 0 | null | false | false | import json
class Trade:
def __init__(self, request):
self._request = request.request
self.rawRequest = request
self.authorized = request.auth
self.getTrades = 'https://www.roblox.com/my/money.aspx/getmyitemtrades'
self.action = 'https://www.roblox.com/trade/tradehandler.ashx'
self.tradeFormat = {
'AgentID': '',
'OfferList': [],
'OfferRobux': 0,
'OfferValue': 0
}
def getTradeList(self):
data = {
'startindex': 0,
'statustype': 'inbound'
}
r = self._request(url=self.getTrades, data=json.dumps(data), method='POST')
return json.loads(json.loads(r)['d'])
def getTrade(self, tradeId):
data = {
'TradeID': tradeId,
'cmd': 'pull'
}
r = self._request(url=self.action, data=json.dumps(data), method='POST')
return json.loads(r)
def acceptTrade(self, tradeId):
data = {
'TradeID': tradeId,
'cmd': 'accept'
}
r = self._request(url=self.action, data=json.dumps(data), method='POST')
return json.loads(r)
def declineTrade(self, tradeId):
data = {
'TradeID': tradeId,
'cmd': 'decline'
}
r = self._request(url=self.action, data=json.dumps(data), method='POST')
return json.loads(r)
def sendTrade(self, id: int, SendItems: list, GetItems: list):
selfId = self.rawRequest.user_info['Id']
url = f'https://inventory.roblox.com/v1/users/{selfId}/assets/collectibles?cursor=&sortOrder=Desc&limit=100'
r = self._request(url=url)
data = json.loads(r)
TradeJSON = {}
TradeJSON['AgentOfferList'] = [{
'AgentOfferList': [],
'IsActive': False,
'TradeStatus': 'Open'
}]
tradeMe = {
'AgentID': selfId,
'OfferList': [],
'OfferRobux': 0,
'OfferValue': 0
}
for item in data['data']:
if (len(list(filter(lambda x: str(x) == str(item['assetId']), SendItems))) > 0):
assetId = item['assetId']
tradeMe['OfferList'].append({
'UserAssetID': item['userAssetId'],
'Name': item['name'].replace(' ', '+'),
'ItemLink': f'https://www.roblox.com/catalog/{assetId}/redirect',
'ImageLink': f'https://www.roblox.com/asset-thumbnail/image?assetId={assetId}&height=110&width=110',
'AveragePrice': item['recentAveragePrice'],
'OriginalPrice': item['originalPrice'],
'SerialNumber': item['serialNumber'],
'SerialNumberTotal': item['assetStock'],
'MembershipLevel': item['buildersClubMembershipType']
})
tradeMe['OfferValue'] = tradeMe['OfferValue'] + int(item['recentAveragePrice'])
TradeJSON['AgentOfferList'].append(tradeMe)
#
# check for items to get from trade
#
url = f'https://inventory.roblox.com/v1/users/{id}/assets/collectibles?cursor=&sortOrder=Desc&limit=100'
userItems = self._request(url=url, method='GET')
userItems = json.loads(userItems)
tradeMe = self.tradeFormat
for item in userItems['data']:
if (len(list(filter(lambda x: str(x) == str(item['assetId']), GetItems))) > 0):
tradeMe['AgentID'] = id
assetId = item['assetId']
tradeMe['OfferList'].append({
'UserAssetID': item['userAssetId'],
'Name': item['name'].replace(' ', '+'),
'ItemLink': f'https://www.roblox.com/catalog/{assetId}/redirect',
'ImageLink': f'https://www.roblox.com/asset-thumbnail/image?assetId={assetId}&height=110&width=110',
'AveragePrice': item['recentAveragePrice'],
'OriginalPrice': item['originalPrice'],
'SerialNumber': item['serialNumber'],
'SerialNumberTotal': item['assetStock'],
'MembershipLevel': item['buildersClubMembershipType']
})
tradeMe['OfferValue'] = tradeMe['OfferValue'] + int(item['recentAveragePrice'])
TradeJSON['AgentOfferList'].append(tradeMe)
#Send data to roblox
data = json.dumps({
'cmd': 'send',
'TradeJSON': json.dumps(TradeJSON)
})
r = self._request(url='https://www.roblox.com/Trade/tradehandler.ashx', data=data, method='POST')
return json.loads(r.text)
| UTF-8 | Python | false | false | 4,733 | py | 6 | Trade.py | 5 | 0.532221 | 0.526516 | 0 | 116 | 39.767241 | 120 |
yhyuan/SportFish | 15,736,760,199,341 | 468ba1d091091196130d8d5f68a20be359222d6f | 82d106b78adf150fdba26640914ca61124268445 | /SportFish.py | a9289518869726d185a0de60d65b24d40b5c2dbf | []
| no_license | https://github.com/yhyuan/SportFish | 4b89d3d952a7ad71078c8407fd264cc3186b31e1 | 5699f28ae24d307e98667041be6326c52ded91b1 | refs/heads/master | 2016-09-11T00:58:54.756495 | 2015-04-27T16:56:10 | 2015-04-27T16:56:10 | 12,625,929 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This script reads a table named FISH_ADVISORY to generate a file geodatabase.
import sys, arcpy, os, zipfile, time
reload(sys)
sys.setdefaultencoding("latin-1")
import cx_Oracle
from datetime import date
start_time = time.time()
def createFeatureClass(featureName, featureData, featureFieldList, featureInsertCursorFields):
print "Create " + featureName + " feature class"
featureNameNAD83 = featureName + "_NAD83"
featureNameNAD83Path = arcpy.env.workspace + "\\" + featureNameNAD83
arcpy.CreateFeatureclass_management(arcpy.env.workspace, featureNameNAD83, "POINT", "", "DISABLED", "DISABLED", "", "", "0", "0", "0")
# Process: Define Projection
arcpy.DefineProjection_management(featureNameNAD83Path, "GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]")
# Process: Add Fields
for featrueField in featureFieldList:
arcpy.AddField_management(featureNameNAD83Path, featrueField[0], featrueField[1], featrueField[2], featrueField[3], featrueField[4], featrueField[5], featrueField[6], featrueField[7], featrueField[8])
# Process: Append the records
cntr = 1
try:
with arcpy.da.InsertCursor(featureNameNAD83, featureInsertCursorFields) as cur:
for rowValue in featureData:
cur.insertRow(rowValue)
cntr = cntr + 1
except Exception as e:
print "\tError: " + featureName + ": " + e.message
# Change the projection to web mercator
arcpy.Project_management(featureNameNAD83Path, arcpy.env.workspace + "\\" + featureName, "PROJCS['WGS_1984_Web_Mercator_Auxiliary_Sphere',GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Mercator_Auxiliary_Sphere'],PARAMETER['False_Easting',0.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',0.0],PARAMETER['Standard_Parallel_1',0.0],PARAMETER['Auxiliary_Sphere_Type',0.0],UNIT['Meter',1.0]]", "NAD_1983_To_WGS_1984_5", "GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]")
arcpy.FeatureClassToShapefile_conversion([featureNameNAD83Path], OUTPUT_PATH + "\\Shapefile")
arcpy.Delete_management(featureNameNAD83Path, "FeatureClass")
print "Finish " + featureName + " feature class."
def createTextFile(fileName, rows, featureFieldList):
f = open (fileName,"w")
f.write("\t".join(map(lambda field: field[0], featureFieldList)) + "\r\n")
f.write("\r\n".join(map(lambda row: "\t".join(map(lambda item: str(item), row[1:])), rows)))
f.close()
OUTPUT_PATH = "output"
INPUT_PATH = "input"
if arcpy.Exists(OUTPUT_PATH + "\\SportFish.gdb"):
os.system("rmdir " + OUTPUT_PATH + "\\SportFish.gdb /s /q")
os.system("del " + OUTPUT_PATH + "\\*SportFish*.*")
os.system("del " + OUTPUT_PATH + "\\Shapefile\\*SportFish.*")
arcpy.CreateFileGDB_management(OUTPUT_PATH, "SportFish", "9.3")
arcpy.env.workspace = OUTPUT_PATH + "\\SportFish.gdb"
# Read password file to get the password.
file = open("password.txt")
password = file.readline()
file.close()
file = open("username.txt")
username = file.readline()
file.close()
connection = cx_Oracle.connect(username + '/' + password + '@sde')
cursor = connection.cursor()
speciesEnglishURLDict = {}
speciesFrenchURLDict = {}
file = open(INPUT_PATH + '\\Species_URLs.txt', 'r')
i = 0
for line in file:
i = i + 1
if i == 1:
continue
items = line.strip().split('\t')
if items[2] == 'English':
if len(items) == 4:
speciesEnglishURLDict[items[1]] = items[3]
else:
speciesEnglishURLDict[items[1]] = ""
else:
if len(items) == 4:
speciesFrenchURLDict[items[1]] = items[3]
else:
speciesFrenchURLDict[items[1]] = ""
# Generate SPECIES feature class.
featureName = "SPECIES"
featureFieldList = [["SPECIES_CODE", "TEXT", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["SPECNAME", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["NOM_D_ESPECE", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["SPECIES_URL_EN", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["SPECIES_URL_FR", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""]]
featureInsertCursorFields = tuple(["SHAPE@XY"] + map(lambda field: field[0], featureFieldList))
cursor.execute('SELECT SPECIES_CODE, SPECNAME, NOM_D_ESPECE FROM FISH_ADVISORY')
rows = map(lambda row: [(0, 0), "" if (row[0] is None) else row[0], "" if (row[1] is None) else row[1], "" if (row[2] is None) else row[2]] + [speciesEnglishURLDict.get(row[0],''), speciesFrenchURLDict.get(row[0],'')], list(set(cursor.fetchall())))
#print len(rows)
createFeatureClass(featureName, rows, featureFieldList, featureInsertCursorFields)
speciesDict = {}
for row in rows:
speciesDict[row[1]] = [row[2], row[3]]
createTextFile(OUTPUT_PATH + "\\TXT\\" + featureName + ".txt", rows, featureFieldList)
# Generate ADVISORIES feature class.
featureName = "ADVISORIES"
featureFieldList = [["GUIDE_WATERBODY_CODE", "TEXT", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["SPECIES_CODE", "TEXT", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["POPULATION_TYPE_ID", "LONG", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["LENGTH_CATEGORY_ID", "LONG", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["ADV_LEVEL", "LONG", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["ADV_CAUSE_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""]]
featureInsertCursorFields = tuple(["SHAPE@XY"] + map(lambda field: field[0], featureFieldList))
cursor.execute('SELECT GUIDE_WATERBODY_CODE, SPECIES_CODE, POPULATION_TYPE_ID, LENGTH_CATEGORY_ID, ADV_LEVEL, ADV_CAUSE_ID FROM FISH_ADVISORY')
rows = map(lambda row: [(0, 0)] + list(row), cursor.fetchall())
createFeatureClass(featureName, rows, featureFieldList, featureInsertCursorFields)
createTextFile(OUTPUT_PATH + "\\TXT\\" + featureName + ".txt", rows, featureFieldList)
# Generate POPULATION_TYPE feature class.
featureName = "POPULATION_TYPE"
featureFieldList = [["POPULATION_TYPE_ID", "LONG", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["POPULATION_TYPE_DESC", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""]]
featureInsertCursorFields = tuple(["SHAPE@XY"] + map(lambda field: field[0], featureFieldList))
cursor.execute('SELECT POPULATION_TYPE_ID, POPULATION_TYPE_DESC FROM FISH_ADVISORY')
rows = map(lambda row: [(0, 0)] + list(row), list(set(cursor.fetchall())))
createFeatureClass(featureName, rows, featureFieldList, featureInsertCursorFields)
createTextFile(OUTPUT_PATH + "\\TXT\\" + featureName + ".txt", rows, featureFieldList)
# Generate LENGTH_CATEGORY feature class.
featureName = "LENGTH_CATEGORY"
featureFieldList = [["LENGTH_CATEGORY_ID", "LONG", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["LENGTH_CATEGORY_LABEL", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""]]
featureInsertCursorFields = tuple(["SHAPE@XY"] + map(lambda field: field[0], featureFieldList))
cursor.execute('SELECT LENGTH_CATEGORY_ID, LENGTH_CATEGORY_LABEL FROM FISH_ADVISORY')
rows = map(lambda row: [(0, 0)] + list(row), list(set(cursor.fetchall())))
createFeatureClass(featureName, rows, featureFieldList, featureInsertCursorFields)
createTextFile(OUTPUT_PATH + "\\TXT\\" + featureName + ".txt", rows, featureFieldList)
# Generate ADV_CAUSE feature class.
featureName = "ADV_CAUSE"
featureFieldList = [["ADV_CAUSE_ID", "LONG", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["ADV_CAUSE_DESC", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""]]
featureInsertCursorFields = tuple(["SHAPE@XY"] + map(lambda field: field[0], featureFieldList))
cursor.execute('SELECT ADV_CAUSE_ID, ADV_CAUSE_DESC FROM FISH_ADVISORY WHERE ADV_CAUSE_ID IS NOT NULL')
rows = map(lambda row: [(0, 0)] + list(row), list(set(cursor.fetchall())))
createFeatureClass(featureName, rows, featureFieldList, featureInsertCursorFields)
createTextFile(OUTPUT_PATH + "\\TXT\\" + featureName + ".txt", rows, featureFieldList)
# Generate GUIDELOCATIONS feature class.
def convertLatLngString(latlng):
latlngStr = str(latlng)
degrees = latlngStr[:2]
minutes = latlngStr[2:4]
seconds = "00"
if(len(latlngStr) > 5):
seconds = latlngStr[4:]
elif (len(latlngStr) == 5):
seconds = latlngStr[4:] + "0"
return degrees + minutes + seconds
def convertLatLng(latlng):
latlngStr = str(latlng)
degrees = int(latlngStr[:2])
minutes = int(latlngStr[2:4])
seconds = 0
if(len(latlngStr) > 4):
seconds = int(latlngStr[4:])
return (degrees + minutes/60.0 + seconds/3600.0)
def getSpeciesNames(speciesList, language):
index = 1
if (language == "EN"):
index = 0
speciesNames = map(lambda species: speciesDict[species][index].replace(" ", "_").decode('latin-1').upper(), speciesList)
return "-" + "-".join(speciesNames) + "-"
def getLocationDescription(location, language):
locDesc = [" ", " "]
if((not(location is None)) and ("|" in location)):
locDesc = location.split("|")
if (language == "EN"):
return locDesc[0]
else:
return locDesc[1]
cursor.execute('SELECT GUIDE_WATERBODY_CODE, SPECIES_CODE FROM FISH_ADVISORY')
rows = cursor.fetchall()
waterbodySpeciesDict = {}
for row in rows:
if row[0] in waterbodySpeciesDict:
waterbodySpeciesDict[row[0]].append(row[1])
else:
waterbodySpeciesDict[row[0]] = [row[1]]
featureName = "GUIDELOCATIONS"
featureFieldList = [["WATERBODYC", "LONG", "", "", "", "", "NON_NULLABLE", "REQUIRED", ""], ["LATITUDE", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["LONGITUDE", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["LAT_DISPLAY", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["LONG_DISPLAY", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["SPECIES_EN", "TEXT", "", "", "4000", "", "NULLABLE", "NON_REQUIRED", ""], ["SPECIES_FR", "TEXT", "", "", "4000", "", "NULLABLE", "NON_REQUIRED", ""], ["LOCNAME_EN", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["LOCNAME_FR", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["GUIDELOC_EN", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""], ["GUIDELOC_FR", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""]]
featureInsertCursorFields = tuple(["SHAPE@XY"] + map(lambda field: field[0], featureFieldList))
cursor.execute('SELECT GUIDE_WATERBODY_CODE, GUIDE_LOCNAME_ENG, GUIDE_LOCNAME_FR, LATITUDE, LONGITUDE, GUIDE_LOCDESC FROM FISH_ADVISORY')
rows = map(lambda row: [(-convertLatLng(row[4]), convertLatLng(row[3]))] + [row[0], convertLatLng(row[3]), -convertLatLng(row[4]), convertLatLngString(row[3]), convertLatLngString(row[4]), getSpeciesNames(waterbodySpeciesDict[row[0]], "EN"), getSpeciesNames(waterbodySpeciesDict[row[0]], "FR"), row[1], row[2], getLocationDescription(row[5], "EN"), getLocationDescription(row[5], "FR")], list(set(cursor.fetchall())))
createFeatureClass(featureName, rows, featureFieldList, featureInsertCursorFields)
print len(rows)
featureFieldList = featureFieldList[:5] + featureFieldList[7:]
rows = map(lambda row: row[:6] + row[8:], rows)
createTextFile(OUTPUT_PATH + "\\TXT\\" + featureName + ".txt", rows, featureFieldList)
# Process: Add Attribute Index
arcpy.AddIndex_management(arcpy.env.workspace + "\\GUIDELOCATIONS", "SPECIES_EN;SPECIES_FR;LOCNAME_EN;LOCNAME_FR", "GUIDELOCATIONSIndex", "NON_UNIQUE", "NON_ASCENDING")
arcpy.AddIndex_management(arcpy.env.workspace + "\\ADVISORIES", "GUIDE_WATERBODY_CODE", "ADVISORIESIndex", "NON_UNIQUE", "NON_ASCENDING")
arcpy.AddIndex_management(arcpy.env.workspace + "\\SPECIES", "SPECIES_CODE", "SPECIESIndex", "NON_UNIQUE", "NON_ASCENDING")
# Prepare the msd, mxd, and readme.txt
os.system("copy " + INPUT_PATH + "\\SportFish.msd " + OUTPUT_PATH)
os.system("copy " + INPUT_PATH + "\\SportFish.mxd " + OUTPUT_PATH)
f = open (INPUT_PATH + "\\readme_SportFish.txt","r")
data = f.read()
f.close()
import time
dateString = time.strftime("%Y/%m/%d", time.localtime())
data = data.replace("[DATE]", dateString)
f = open (OUTPUT_PATH + "\\readme_SportFish.txt","w")
f.write(data)
f.close()
# Compress the msd, mxd, readme.txt and file geodatabase together into a zip file named SportFish.zip, which will be send to web service publisher.
target_dir = OUTPUT_PATH + '\\TXT'
zip = zipfile.ZipFile(OUTPUT_PATH + '\\SportFish.TXT.zip', 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for file in files:
fn = os.path.join(base, file)
zip.write(fn, "TXT\\" + fn[rootlen:])
zip.close()
# Compress the msd, mxd, readme.txt and file geodatabase together into a zip file named SportFish.zip, which will be send to web service publisher.
target_dir = OUTPUT_PATH + '\\SportFish.gdb'
zip = zipfile.ZipFile(OUTPUT_PATH + '\\SportFish.zip', 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for file in files:
fn = os.path.join(base, file)
zip.write(fn, "SportFish.gdb\\" + fn[rootlen:])
zip.write(OUTPUT_PATH + '\\SportFish.msd', "SportFish.msd")
zip.write(OUTPUT_PATH + '\\SportFish.mxd', "SportFish.mxd")
zip.write(OUTPUT_PATH + '\\readme_SportFish.txt', "readme_SportFish.txt")
zip.close()
# Generate Data download file .
"\t".join
cursor.execute('SELECT GUIDE_WATERBODY_CODE, GUIDE_LOCNAME_ENG, GUIDE_LOCNAME_FR, LATITUDE, LONGITUDE, SPECIES_CODE, SPECNAME, NOM_D_ESPECE, POPULATION_TYPE_ID, POPULATION_TYPE_DESC, LENGTH_CATEGORY_ID, LENGTH_CATEGORY_LABEL, ADV_LEVEL, ADV_CAUSE_ID, ADV_CAUSE_DESC, GUIDE_YEAR, GUIDE_LOCDESC FROM FISH_ADVISORY')
rows = map(lambda row: map(lambda item: ("\"" + item + "\"") if (isinstance(item, str)) else ("" if (item is None) else str(int(item))), list(row)), cursor.fetchall())
rows = map(lambda row: row[:3] + [(row[3] + "00") if (len(row[3]) == 4) else ((row[3] + "0") if (len(row[3]) == 5) else (row[3]))] + row[4:], rows)
rows = map(lambda row: row[:4] + [(row[4] + "00") if (len(row[4]) == 4) else ((row[4] + "0") if (len(row[4]) == 5) else (row[4]))] + row[5:], rows)
rows = map(lambda row: "\t".join(row), rows)
f = open (OUTPUT_PATH + "\FishGuide.txt","w")
f.write("GUIDE_WATERBODY_CODE\tGUIDE_LOCNAME_ENG\tGUIDE_LOCNAME_FR\tLATITUDE\tLONGITUDE\tSPECIES_CODE\tSPECNAME\tNOM_D_ESPECE\tPOPULATION_TYPE_ID\tPOPULATION_TYPE_DESC\tLENGTH_CATEGORY_ID\tLENGTH_CATEGORY_LABEL\tADV_LEVEL\tADV_CAUSE_ID\tADV_CAUSE_DESC\tGUIDE_YEAR\tGUIDE_LOCDESC\n")
f.write("\n".join(rows))
f.close()
elapsed_time = time.time() - start_time
print elapsed_time
| UTF-8 | Python | false | false | 14,447 | py | 5 | SportFish.py | 2 | 0.680833 | 0.657022 | 0 | 239 | 59.447699 | 825 |
tianmingbo/LeetCode | 6,760,278,570,234 | fc00489323ef7cf3fce730da3a80d445eb440503 | f28c31b437d1e6c5b4102a2033a3e373e6bbaa2f | /LeetCode/ๆฐ็ป/670. ๆๅคงไบคๆข.py | 912f0465e9c4c2cea3f19a2bcde1b8602725fa69 | []
| no_license | https://github.com/tianmingbo/LeetCode | 6a12923a0bf9001ab1cbe0256e9a3ad8ab5a7681 | 403db1af8a1dfbdad0c8e5ebda0c479a98b0fb43 | refs/heads/master | 2023-04-03T21:08:17.934636 | 2023-03-28T03:31:16 | 2023-03-28T03:31:16 | 290,133,658 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/1/8 20:52
# @Author : tmb
class Solution:
def maximumSwap(self, num: int) -> int:
nums = [int(i) for i in str(num)]
max_num = -1
tmp = [0 for _ in range(len(nums))]
for i in range(len(nums) - 1, -1, -1):
if nums[i] > max_num:
tmp[i] = i
max_num = nums[i]
else:
tmp[i] = tmp[i + 1]
for j in range(len(nums)):
if tmp[j] == j or nums[j] == nums[tmp[j]]: # ้ฒๆญข1993ๆๅไบคๆข
continue
nums[j], nums[tmp[j]] = nums[tmp[j]], nums[j]
break
nums = [str(i) for i in nums]
return int(''.join(nums))
if __name__ == '__main__':
a = Solution()
print(a.maximumSwap(1993))
| UTF-8 | Python | false | false | 828 | py | 921 | 670. ๆๅคงไบคๆข.py | 815 | 0.426471 | 0.395833 | 0 | 27 | 28.222222 | 68 |
Leviona/barbershop-site | 10,411,000,745,949 | 25b4a36075569a30a029d8c712009636a5d4a7d4 | 85b218e0e4456404e2bc6be6b13e3fec11c5e86b | /appointment/views.py | 946ea56fd769f198e21309dde4ac5a5c3804c1c1 | []
| no_license | https://github.com/Leviona/barbershop-site | 6d6fddb0b0c4c7c011cf676b5ccdb6b5e713bbee | f932fa53a765da219bcff9fbac9904d6863ef1ce | refs/heads/master | 2020-03-17T04:56:53.382824 | 2018-06-30T03:51:02 | 2018-06-30T03:51:02 | 133,295,856 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
# Create your views here.
def home_page(request):
if request.method == "POST":
time = request.POST.get("time")
return render(request, 'appointment/home_page.html', {'time': time})
return render(request, 'appointment/home_page.html') | UTF-8 | Python | false | false | 279 | py | 3 | views.py | 2 | 0.72043 | 0.72043 | 0 | 12 | 22.333333 | 70 |
DamianoGiani/Tesi | 5,660,766,923,051 | d27aa0e2ecb21306840cee8e7c095eb41b7b7383 | fe891e0c86268b1292ffed78328c07b06b48a6f9 | /encode.py | 7f8ee5e9760a3747328ea3493b03a21d259ec55b | []
| no_license | https://github.com/DamianoGiani/Tesi | 4dd05ee4b58ce401a0e80005e2b105d9f40b1b64 | 740cafe0aa5e471e0411ce805a48513a1c3ca558 | refs/heads/master | 2022-12-15T20:52:52.950854 | 2020-09-16T11:05:43 | 2020-09-16T11:05:43 | 292,057,365 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from PIL import Image
img = Image.open("IMG_2362.JPG")
out = img.convert("RGB")
img_array = np.array(out)
print(img_array.shape[0]*img_array.shape[1])
for x in range(0, img_array.shape[0]):
if x%2==0:
for y in range(0, img_array.shape[1]):
if y%2==0:
for z in range(1,img_array.shape[2]):
img_array[x][y][z]=0
else:
for z in range(0, img_array.shape[2], 2):
img_array[x][y][z] = 0
else:
for y in range(0, img_array.shape[1]):
if y%2==0:
for z in range(0, img_array.shape[2], 2):
img_array[x][y][z] = 0
else:
for z in range(0, img_array.shape[2] - 1):
img_array[x][y][z] = 0
#alcuni erano 3 zeri e perdevo dei bit
for x in range(0, img_array.shape[0]):
for y in range(0, img_array.shape[1]):
if img_array[x][y][0]==0 and img_array[x][y][1]==0 and img_array[x][y][2]==0:
img_array[x][y][1]=1
img_array = img_array[img_array != 0]
img_array=np.array(img_array)
out=[]
for x in range(0,img_array.shape[0]):
out.append((img_array[x]/0xFF) * 0x3FFF)
out=np.array(out).astype(np.float32)
OutputFile = open('OutputFilePath','wb')
OutputFile1= open('OutputFilePath1','wb')
BlockArray= np.array(out).astype(np.uint16)
Blockarray1= np.array(out).astype(np.uint8)
Blockarray1.tofile(OutputFile1)
BlockArray.tofile(OutputFile)
OutputFile.close()
OutputFile1.close() | UTF-8 | Python | false | false | 1,566 | py | 7 | encode.py | 5 | 0.554278 | 0.514049 | 0 | 47 | 31.361702 | 85 |
fabian57fabian/Watermark-DnCNN | 2,156,073,611,521 | 314caf995f09aa81ff9048aa89336bc547a6d2a6 | f5f3d26ac4de408e57041bfb954de4e8cf14623f | /utility.py | 8014ca145986f056a6216e39dcdcde08ea8e785d | []
| no_license | https://github.com/fabian57fabian/Watermark-DnCNN | 61995ea4a8288dbf8dbacefed58c56c69a9c6102 | e1f7486e6683c72514d95afd764415e08287a8e9 | refs/heads/master | 2023-06-17T14:21:23.644681 | 2021-07-09T08:46:53 | 2021-07-09T08:46:53 | 335,776,389 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import os
import cv2
import numpy as np
import time
import json
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
def show_image(img, title="", wait=True):
if img.shape[0] > 1000:
img = ResizeWithAspectRatio(img, height=1000)
cv2.imshow(title, img)
time.sleep(.5)
if wait: cv2.waitKey(0)
def create_folder(path_folder):
try:
os.makedirs(path_folder)
except FileExistsError:
print('directory {} already exist'.format(path_folder))
pass
except OSError:
print('creation of the directory {} failed'.format(path_folder))
pass
else:
print("Succesfully created the directory {} ".format(path_folder))
return path_folder
def get_last_model(path:str):
_models = [file[:-len('.ckpt.meta')] for file in sorted(os.listdir(path)) if file.endswith('.ckpt.meta')]
return _models[-1]
def get_first_model(path:str):
_models = [file[:-len('.ckpt.meta')] for file in sorted(os.listdir(path)) if file.endswith('.ckpt.meta')]
return _models[0]
def stack_images_row(eval_imgs: list):
image = np.hstack(eval_imgs)
return image
def stack_images_square(eval_imgs: list):
l = int(math.ceil(math.sqrt(len(eval_imgs))))
rows = []
for row in range(l):
r = []
for col in range(l):
i = row * l + col
if i < len(eval_imgs):
r.append(eval_imgs[i])
else:
r.append(np.zeros([eval_imgs[0].shape[0], eval_imgs[0].shape[1]], dtype=np.uint8))
rows.append(np.hstack(r))
image = np.vstack(rows)
return image
def create_text_image(image, text:str):
font = cv2.FONT_HERSHEY_SIMPLEX
org = (0, int(image.shape[1]/2))
fontScale = 1
color = (0, 0, 255)
thickness = 2
image = cv2.putText(image, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
return image
def create_empty_image(w:int, h:int):
return np.ones([w, h], dtype=np.uint8)*255
def psnr(img1, img2):
img1 = np.clip(img1, 0, 255)
img2 = np.clip(img2, 0, 255)
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
if (len(img1.shape) == 2):
m, n = img1.shape
k = 1
elif (len(img1.shape) == 3):
m, n, k = img1.shape
B = 8
diff = np.power(img1 - img2, 2)
MAX = 2 ** B - 1
MSE = np.sum(diff) / (m * n * k)
sqrt_MSE = np.sqrt(MSE)
PSNR = 20 * np.log10(MAX / sqrt_MSE)
return PSNR
def save_json_results(datas_json:dict, file_path:str):
with open(file_path, 'w') as out_file:
json.dump(datas_json, out_file, indent=4) | UTF-8 | Python | false | false | 3,002 | py | 13 | utility.py | 12 | 0.588274 | 0.561292 | 0 | 117 | 24.666667 | 109 |
paulmorio/grusData | 5,136,780,933,584 | 61485fd7e6b16947c36708a5be48c9fdcdc521ef | 6d92e70f8329b6db68fa83e753fccc681d2f0b2e | /gradientDescent/gradient.py | f2e3ea91cea3b3548da652334d094b6adabb5f07 | [
"MIT"
]
| permissive | https://github.com/paulmorio/grusData | 2417ba15e05d7475bca1f76d7fa33a83dc7612b5 | 3482f9c897e70493fd5320381607cf42c5c30eb5 | refs/heads/master | 2021-01-10T05:25:47.083756 | 2018-02-09T22:45:10 | 2018-02-09T22:45:10 | 48,182,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
def step(v, direction, step_size):
"""
move step-size in the direction from v
"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v,direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
# pick a random starting point
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
gradient = sum_of_squares_gradient(v)
next_v = step(v, gradient, -0.01)
if distance(next_v, v) < tolerance:
break
v = next_v | UTF-8 | Python | false | false | 496 | py | 24 | gradient.py | 17 | 0.667339 | 0.633065 | 0 | 25 | 18.88 | 46 |
shivambhatia/selenium_ocr | 14,963,666,065,279 | 055182737b61dfd19a5983eedb2a9b03f3ea5f03 | b97021ba02733b10ed4a62116906241b3dd9d82f | /app.py | cc0bd97c73250396caf3344478551ce72471e00a | []
| no_license | https://github.com/shivambhatia/selenium_ocr | 16f5a78799e5cea3c9273191e969f5828e5a1e73 | 40de54f341af3e933b70be5836ae986d257e0951 | refs/heads/main | 2023-08-11T10:55:58.181921 | 2021-10-07T00:52:44 | 2021-10-07T00:52:44 | 414,416,850 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Designed By: Shivam Bhatia
Date: 10/07/2021
Used to extract data from the given url using cloud vision api
to bypass the captcha text using OCR
"""
from flask import Flask, redirect, url_for, request
app = Flask(__name__)
from flask import jsonify
import detect
url = "https://drt.gov.in/front/page1_advocate.php"
@app.route('/getData',methods = ['POST', 'GET'])
def getData():
"""
request params:
name: Enter Party Name
schemaname: Select DRT/DRAT
Function will return the list of data rows return from table
"""
if request.method == 'POST':
name = request.form.get('name',None)
schemaname=request.form.get('schemaname',None)
if name==None or schemaname==None:
return "Invalid Inputs"
else:
data=detect.main(url,name,schemaname)
return jsonify(data)
else:
return "Invalid Request Use POST REQUEST ONLY WITH PARAMS"
if __name__ == '__main__':
app.run(debug = True) | UTF-8 | Python | false | false | 973 | py | 4 | app.py | 2 | 0.656732 | 0.647482 | 0 | 38 | 24.631579 | 64 |
kosyachniy/crypto | 13,898,514,177,579 | 887138f1ab8db9c675b1e40063a127362bffa951 | 2f89785f8b2836447a7c0322bf93ad916f7dc335 | /func/main.py | 8536bc6acb66a699b465bb10dcd266cf526d674f | []
| no_license | https://github.com/kosyachniy/crypto | d76157b89a8c594d777c10d994838ab239eab891 | da2f585f73c2d8a7a0a0c272f8e0f77cb6edfe5f | refs/heads/master | 2021-03-24T12:53:38.914984 | 2018-02-02T13:06:46 | 2018-02-02T13:06:46 | 104,661,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from func.data import *
from func.telegram import *
from func.trade import stock, graph | UTF-8 | Python | false | false | 87 | py | 42 | main.py | 23 | 0.793103 | 0.793103 | 0 | 3 | 28.333333 | 35 |
nikolajbaer/robosub_pyweek6 | 17,265,768,531,009 | 2dbff70b933056c07f2537a07d152dd91e567941 | 0913d7165ae1cb28854dfe223730be5bd2dbf237 | /lib/main.py | f1edfbe63cdca76f654e2bbd2fdedbb3b2b22f48 | [
"MIT"
]
| permissive | https://github.com/nikolajbaer/robosub_pyweek6 | 33ea69e63668e3e41076d0bf2bfcd0846aebb3b5 | 2fe9bbb5d7a030c3a316fef0299f506055c67464 | refs/heads/main | 2023-05-05T21:22:03.683311 | 2021-05-29T21:14:44 | 2021-05-29T21:14:44 | 372,070,304 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''Game main module.
Contains the entry point used by the run_game.py script.
Feel free to put all your game code here, or in other modules in this "lib"
directory.
'''
import data
import game
#import pygletgame
def main():
#pygletgame.main()
game.main()
| UTF-8 | Python | false | false | 266 | py | 5 | main.py | 4 | 0.714286 | 0.714286 | 0 | 14 | 18 | 75 |
moe-halabia/netbox | 16,922,171,177,853 | 9d83feb208ec0d1f871f65eb7b633d55e62baae6 | 0baed54a80521e56383e3753eeef36180a907481 | /230_virtual_machines.py | f138886fd3cd626a66626da5615947525a294b6b | [
"Apache-2.0"
]
| permissive | https://github.com/moe-halabia/netbox | d499c56391446a15bfb5a474e87a9dd4616e5e80 | 562ff3f920064dc34e38b6f3e702bd6b783a3586 | refs/heads/main | 2023-04-07T03:19:10.733668 | 2021-04-11T16:30:44 | 2021-04-11T16:30:44 | 356,919,378 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from dcim.models import Site, Platform, DeviceRole
from virtualization.models import Cluster, VirtualMachine
from tenancy.models import Tenant
from extras.models import CustomField, CustomFieldValue
from startup_script_utils import load_yaml
import sys
virtual_machines = load_yaml('/opt/netbox/initializers/virtual_machines.yml')
if virtual_machines is None:
sys.exit()
required_assocs = {
'cluster': (Cluster, 'name')
}
optional_assocs = {
'tenant': (Tenant, 'name'),
'platform': (Platform, 'name'),
'role': (DeviceRole, 'name')
}
for params in virtual_machines:
custom_fields = params.pop('custom_fields', None)
# primary ips are handled later in `270_primary_ips.py`
params.pop('primary_ip4', None)
params.pop('primary_ip6', None)
for assoc, details in required_assocs.items():
model, field = details
query = { field: params.pop(assoc) }
params[assoc] = model.objects.get(**query)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = { field: params.pop(assoc) }
params[assoc] = model.objects.get(**query)
virtual_machine, created = VirtualMachine.objects.get_or_create(**params)
if created:
if custom_fields is not None:
for cf_name, cf_value in custom_fields.items():
custom_field = CustomField.objects.get(name=cf_name)
custom_field_value = CustomFieldValue.objects.create(
field=custom_field,
obj=virtual_machine,
value=cf_value
)
virtual_machine.custom_field_values.add(custom_field_value)
print("๐ฅ๏ธ Created virtual machine", virtual_machine.name)
| UTF-8 | Python | false | false | 1,656 | py | 14 | 230_virtual_machines.py | 12 | 0.691702 | 0.688674 | 0 | 56 | 28.482143 | 77 |
Novandev/gn_api | 11,605,001,671,064 | 2d0df3123329bc5e6a27a1b3b08e29e938fd493d | 7c40224d1887d0942357d2bae1b2ea3a95c2a920 | /config.py | 7022967e05f17620fe99d717ef6ffdcfc85d9283 | [
"MIT"
]
| permissive | https://github.com/Novandev/gn_api | 547b18f55635a93358d6ef01e6b3f475aaa10d09 | 08b071ae3916bb7a183d61843a2cd09e9fe15c7b | refs/heads/master | 2020-05-18T05:20:26.562442 | 2019-04-30T17:08:44 | 2019-04-30T17:08:44 | 184,194,781 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
import boto3,os
from dotenv import load_dotenv
load_dotenv()
# Load environment variables
host = os.getenv("HOST")
region = os.getenv("REGION")
service = os.getenv("SERVICE")
access_key = os.getenv("ACCESS")
secret_key = os.getenv("SECRET")
awsauth = AWS4Auth(access_key, secret_key, region, service)
es = Elasticsearch(
hosts = [{'host': host, 'port': 443}],
http_auth = awsauth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection
)
# This will be used in other parts of the project for ElaticSearch queries
def elastic():
return es | UTF-8 | Python | false | false | 719 | py | 9 | config.py | 4 | 0.702364 | 0.692629 | 0 | 25 | 27.76 | 74 |
ugoiloh/zuri-projects | 10,247,792,000,941 | 779f849d1617b7c48147ea9e0b95d6d31dedd201 | 8ed3bffc2e9deb3f95b3a8486a4003f87d958de8 | /Django-projects/newsfeed/urls.py | 25cdb15ecfef56310a46998dad99f451db8dc918 | []
| no_license | https://github.com/ugoiloh/zuri-projects | 0b5f6220b28400094ef31717a36a2b583f9efaeb | a20f7be87d37b7aedddb666cea6c88ea1f3f026d | refs/heads/master | 2023-05-13T20:41:00.485764 | 2021-05-21T23:11:36 | 2021-05-21T23:11:36 | 356,688,939 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from django.urls import path
from .views import HomePage, PostDetailView
app_name = 'newsfeed'
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomePage.as_view() , name='index'),
path('detail/<int:pk>/', PostDetailView.as_view(), name='detail')
] | UTF-8 | Python | false | false | 303 | py | 8 | urls.py | 5 | 0.689769 | 0.689769 | 0 | 11 | 26.636364 | 69 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.