repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
irvingr/python | 18,468,359,381,595 | ee0c0cec9174d92545443bec62e7c859fb9deaa2 | 8597ef1dc3970b6ccae5034d7cf7935d067596f6 | /20_while.py | 8415426288ce48aa0f366185611a4c22cd51c9c5 | [] | no_license | https://github.com/irvingr/python | e3fc7a795682d3d5c15b33ce3347232dcc8e1ce1 | d8c179a19466ab8ee6685ff52b3f8bcc280245e2 | refs/heads/master | 2020-05-17T11:32:16.423106 | 2019-04-29T14:56:47 | 2019-04-29T14:56:47 | 183,687,447 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # while
valor = 1
fin = 10
while (valor < fin):
print(valor)
valor += 1
print("- - - - - - - - - BREAK - - - - - - - - - -")
# break
valor = 1
fin = 10
while (valor < fin):
print(valor)
valor += 1
if (valor == 5):
break
print("- - - - - - - - - CONTINUE - - - - - - - - - -")
# continue
valor = 1
fin = 10
while (valor < fin):
valor += 1
if (valor == 7):
continue
print(valor) | UTF-8 | Python | false | false | 432 | py | 53 | 20_while.py | 48 | 0.439815 | 0.407407 | 0 | 32 | 12.53125 | 55 |
ktertikas/MLHPrime | 13,271,448,988,252 | b20364547a83f2e6326efede7cf9035f2bbf047d | cc14c742de96fe9fef9209afc9ff370cfcb3e09a | /analysis/Metadata.py | 2c6fd16c021c08978c977bc40d60f19e442f91de | [] | no_license | https://github.com/ktertikas/MLHPrime | f0891def7554323a773f7c405ecb9fbf3b22a564 | ee9387d35a11bdec4e834ef4d95305be8dd71a98 | refs/heads/master | 2021-01-18T19:42:58.107896 | 2017-04-02T12:17:12 | 2017-04-02T12:17:12 | 86,908,205 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bs4 import BeautifulSoup
import requests
def get_metadata(link):
r = requests.get(link)
soup = BeautifulSoup(r.text)
title_text = soup.title.text
if not soup.title:
title_text = soup.h1.text
meta = soup.findAll(attrs={"name":"description"})
meta_text=""
for name in meta:
meta_text = meta_text + name["content"]
soup1 = BeautifulSoup(r.text)
sources=soup1.findAll('img',{"src":True})
image_link = sources[0]["src"]
return (title_text, meta_text, image_link)
| UTF-8 | Python | false | false | 527 | py | 11 | Metadata.py | 6 | 0.641366 | 0.631879 | 0 | 20 | 25.35 | 53 |
databill86/rank-aggregation-with-ties | 13,048,110,668,187 | e886e8f7316613c2a8914dee6e3b0bf481a0621f | 1b9966ae15bee555ac0838d27cbdd0405c3e8bf0 | /sources/rnt/mediane/migrations/0022_auto_20180215_2155.py | d28a7dcb363d29a25cd6426f8d58b18c80b05cdb | [
"Apache-2.0"
] | permissive | https://github.com/databill86/rank-aggregation-with-ties | f3ef509401f07b05a4e34689245bb71c9c5efe50 | 63b30d7c1eece837ef23ffc9414c3538052d9b9c | refs/heads/master | 2020-04-02T17:04:41.213764 | 2018-10-05T07:51:09 | 2018-10-05T07:51:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-15 21:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mediane', '0021_distance_id_order'),
]
operations = [
migrations.CreateModel(
name='ResultsToProduceDecorator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RenameField(
model_name='result',
old_name='results',
new_name='consensuses',
),
migrations.AddField(
model_name='resultstoproducedecorator',
name='result',
field=models.ForeignKey(help_text='The result to produce', on_delete=django.db.models.deletion.CASCADE, to='mediane.Result'),
),
]
| UTF-8 | Python | false | false | 971 | py | 127 | 0022_auto_20180215_2155.py | 99 | 0.594233 | 0.572606 | 0 | 32 | 29.34375 | 137 |
luwenchun/Automated_Test | 2,697,239,505,978 | e8f60a6f35d285f913c4c60d62d739802a105b54 | d7390fea6c7f712ee32be6d3478835d965d795e0 | /py26_02day/01±êʶ·û.py | 5760a9aebce248d97e46f18773a430f4e368c68b | [] | no_license | https://github.com/luwenchun/Automated_Test | 2f424655d80127e3ed98657869021a775beca868 | 79b9937cfc0841b0a80d4fd45d8ff467654b5b55 | refs/heads/master | 2021-02-10T15:23:08.446463 | 2020-03-26T10:39:38 | 2020-03-26T10:39:38 | 244,393,626 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
============================
Author:柠檬班-木森
Time:2019/12/25
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
encoding='utf-8'
"""
标识符
输入:input
"""
name = "musen"
# 下划线命名(变量命名使用这个风格,函数名也推荐使用这种风格)
max_number = 100
# 大驼峰命名(类命名推荐使用这个风格)
MaxNumber = 100
# 小驼峰命名(包命名和模块命名的使用比较常见)
maxNumber = 100
# 输出print
print(max_number)
print("今天天气不太好")
# 输入:input
# 使用input输入进来的内容都是字符串类型的
age = input("请输入您的年龄:")
# print("我的年龄是:",age)
# print("我的年龄是:",18) | UTF-8 | Python | false | false | 803 | py | 194 | 01±êʶ·û.py | 166 | 0.557875 | 0.500949 | 0 | 43 | 10.302326 | 32 |
KenN7/jasper-client | 16,827,681,903,801 | 5d62f80745654901d533e7eae46a705b883aac87 | cdb0beb13f8b0e768aa82dbbfdf0352573440463 | /client2/notifiersMod/__init__.py | 3242929528f67f98ae228a16e70f75ea4c7f3dca | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | https://github.com/KenN7/jasper-client | 6e8292f9ebc0d4ae788e5cf7fd607066c9bf91e8 | e3900b890edf635021c9911025cd0a7fdc3c8ef7 | refs/heads/master | 2021-01-09T06:30:15.816565 | 2014-04-27T19:49:43 | 2014-04-27T19:49:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from hour import Hour
| UTF-8 | Python | false | false | 22 | py | 10 | __init__.py | 9 | 0.818182 | 0.818182 | 0 | 1 | 21 | 21 |
shohta-tera/image | 17,540,646,440,801 | 9d161dc514662bd5c99f74e341a329989b60c970 | 79da739986053563fa81512744bb4290020e9343 | /テンプレートマッチング.py | 61f58633b81d5024c0cc99d53c4fc257611ec20a | [] | no_license | https://github.com/shohta-tera/image | 362475f3d65d1cbd1b21142f0c285adf7721e25b | ab520f5fa2b568f96e5eedcaa4ddd146124e05d1 | refs/heads/master | 2020-04-08T21:20:14.063521 | 2019-05-14T14:26:15 | 2019-05-14T14:26:15 | 159,739,621 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #一つだけマッチング
import cv2
import numpy as np
#画像をグレースケールで読み込む
img = cv2.imread("osero4.jpg", 0)
tempb = cv2.imread("tempw4.jpg", 0)
#マッチングテンプレートを実行
#比較方法はcv2.TM_CCOEFF_NORMEDを選択
result = cv2.matchTemplate(img, tempb, cv2.TM_CCOEFF_NORMED)
#類似度の設定(0~1)
threshold = 0.9
#検出結果から検出領域の位置を取得
#np.where 条件を満たすインデックスを返す
loc = np.where(result >= threshold)
#検出領域を四角で囲んで保存
result = cv2.imread("osero4.jpg")
w, h = tempw.shape[::-1]
for top_left in zip(*loc[::-1]):
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(result,top_left, bottom_right, (255, 0, 0), 2)
cv2.imwrite("osero12.png", result)
import numpy as np
import cv2
img = cv2.imread('001.jpg',0)
img2 = cv2.imread('012.jpg')
tmp = cv2.imread('tmp.jpg',0)
h,w = tmp.shape
match = cv2.matchTemplate(img,tmp,cv2.TM_CCOEFF_NORMED)
minval,maxval,minpt,maxpt = cv2.minMaxLoc(match)
pt = maxpt
print('Score = %f' % maxval)
print('X = %d ' % pt[0])
print('Y = %d ' % pt[1])
#print(match)
cv2.rectangle(img2,(pt[0],pt[1]),(pt[0]+w,pt[1]+h),(0,0,200),3)
cv2.imwrite('result.jpg',img2)
print(pt[0])
print(pt[1])
| SHIFT_JIS | Python | false | false | 1,320 | py | 14 | テンプレートマッチング.py | 14 | 0.64805 | 0.591312 | 0 | 50 | 20.56 | 63 |
anhpt379/inforlearn-python-client | 2,611,340,148,463 | 47578831878f4e3379163952d81b51f08abff6d4 | 1ec85933e203dddbb2f1f6634e8385ef8be4ac20 | /test.py | 04986ed499de8630ea81f9a6f2b1827d46d5a197 | [] | no_license | https://github.com/anhpt379/inforlearn-python-client | 2f9b4dca315a901fe2ef2cc194e71b48b347fc43 | b4223065f84f935f46dee8d1aed64d6f90ead864 | refs/heads/master | 2021-05-26T13:36:28.069576 | 2010-09-06T13:34:42 | 2010-09-06T13:34:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import oauth, httplib
import urllib2
#Define Constants
KEY = '0299cc35f5ca45cf9f1974f056fea080' #Change Me
SECRET = '181a73a06ea04d9f964c020e518a531a' #Change Me
SERVER = 'www.jaiku.com'
REQURL = '/api/request_token'
REQUEST_TOKEN_URL = '/api/request_token'
ACCESS_TOKEN_URL = 'http://www.jaiku.com/access_token'
AUTHORIZATION_URL = 'http://www.jaiku.com/authorize'
if __name__ == "__main__":
consumer = oauth.OAuthConsumer(KEY, SECRET)
access_token = oauth.OAuthToken('88480d7c8e7248b9b1a674217df5762a', '5f9cab3c72e342b9a71a681a7b45a897')
parameters = {'nick': 'AloneRoad', 'method': 'actor_get'}
request = oauth.OAuthRequest.from_consumer_and_token(oauth_consumer=consumer,
token=access_token,
http_url='http://api.jaiku.com/json',
http_method='POST',
parameters=parameters)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),
consumer,
access_token)
result = urllib2.urlopen(urllib2.Request('http://api.jaiku.com/json',
request.to_postdata()))
print result.read()
result.close() | UTF-8 | Python | false | false | 1,344 | py | 3 | test.py | 3 | 0.575893 | 0.509673 | 0 | 29 | 45.37931 | 105 |
AlbertGaz/epam_python_courses | 12,395,275,619,019 | 9f456694a5464102f2f9be806d66f33fe5819ea6 | bcf5ee7d356c32fe9f60308f885e4528dc138c62 | /test/test1/test_task2.py | 3edc9c3c789b82b706e412fea9545177adbab015 | [] | no_license | https://github.com/AlbertGaz/epam_python_courses | 81842e1efd49f9fd280ff45b9bb8c160c878ed17 | 1372202e477611840aa7071904e3362b16073068 | refs/heads/master | 2023-04-18T12:39:54.831518 | 2021-04-29T14:32:57 | 2021-04-29T14:32:57 | 345,305,812 | 0 | 0 | null | false | 2021-04-29T14:32:58 | 2021-03-07T09:29:14 | 2021-04-26T19:26:43 | 2021-04-29T14:32:57 | 184 | 0 | 0 | 0 | HTML | false | false | """TEST Homework 1.2.
Check if int sequence is fibonacci.
"""
import pytest
from hw.hw1.task2 import check_fibonacci
@pytest.mark.parametrize(
("sequence", "expected_result"),
[
([], False),
([0], True),
([13], True),
([-1, 0], False),
([0, 1], True),
([5, 8, 13], True),
([1, 1], True),
([1, 1, 1], False),
([354224848179261915075, 573147844013817084101, 927372692193078999176], True),
],
)
def test_fib_seq(sequence: list, expected_result: bool):
actual_result = check_fibonacci(sequence)
assert actual_result == expected_result
| UTF-8 | Python | false | false | 628 | py | 84 | test_task2.py | 71 | 0.571656 | 0.43949 | 0 | 27 | 22.259259 | 86 |
sfschouten/semantic-kge | 12,730,283,088,720 | b65f01d3b11d2744a3a47d11956e050e9d7a7307 | de5704ec42dd71a96c142f3bc4f5ab5ad0cd5852 | /sem_kge/model/config_logging_mixin.py | 78590e8d42c534606f7becb76f5f6db4cb60a76f | [
"MIT"
] | permissive | https://github.com/sfschouten/semantic-kge | 31be1700739477be9f34fcd2c82edff1c9b7fe72 | d653f6a7eb7222dba38fcdaa668eb892143d0b1e | refs/heads/main | 2023-08-26T22:04:03.916836 | 2021-10-06T10:04:29 | 2021-10-06T10:04:29 | 381,327,115 | 0 | 0 | MIT | false | 2021-09-21T08:17:18 | 2021-06-29T10:34:02 | 2021-09-13T17:26:38 | 2021-09-21T08:17:17 | 98 | 0 | 0 | 2 | Python | false | false |
class LoggingMixin():
"""
Mixin class with methods for logging of configurations.
"""
def get_option_and_log(self, key):
value = self.get_option(key)
self.config.log(f"{self.configuration_key}.{key} set to {value}")
return value
| UTF-8 | Python | false | false | 295 | py | 39 | config_logging_mixin.py | 24 | 0.569492 | 0.569492 | 0 | 11 | 25.545455 | 73 |
AakanshaDhawan/gameBoard | 3,719,441,701,978 | 610ef8fdfae53b279c69a2eec4ea8d7b402d9a98 | 8cc22effcfbf4a3ac16bc155111bd47dbc5a0f85 | /backend/forms/models.py | 7dae7ee793bf3fa89ac698eaacea23d303bbe932 | [] | no_license | https://github.com/AakanshaDhawan/gameBoard | 1643624dd103a91fde2546b54adbb8d369d06627 | d8f67cd55ac8fbbc924fd7d60effad51eb9ab72b | refs/heads/master | 2023-06-04T12:54:56.142229 | 2021-06-17T16:50:28 | 2021-06-17T16:50:28 | 376,027,098 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class SubscriptionEmail(models.Model):
email = models.CharField(max_length=100, blank=False)
datetime = models.DateTimeField(auto_now=True)
class ContactForm(models.Model):
firstName= models.CharField(max_length=30, blank=True)
lastName= models.CharField(max_length=30, blank=True)
email= models.CharField(max_length=100, blank=False)
message=models.TextField()
datetime = models.DateTimeField(auto_now=True)
class CorporateEnquiryForm(models.Model):
firstName= models.CharField(max_length=30, blank=True)
lastName= models.CharField(max_length=30, blank=True)
email= models.CharField(max_length=100, blank=False)
company= models.CharField(max_length=100, blank=True)
phone= models.CharField(max_length=20, blank=True)
budget= models.CharField(max_length=12, blank=True)
datetime = models.DateTimeField(auto_now=True)
class CatanForm(models.Model):
firstName= models.CharField(max_length=30, blank=True)
lastName= models.CharField(max_length=30, blank=True)
email= models.CharField(max_length=100, blank=False)
insta= models.CharField(max_length=50, blank=True)
phone= models.CharField(max_length=20, blank=True)
referal= models.CharField(max_length=30, blank=True)
datetime = models.DateTimeField(auto_now=True) | UTF-8 | Python | false | false | 1,355 | py | 34 | models.py | 26 | 0.748339 | 0.721033 | 0 | 31 | 42.741935 | 58 |
andela-ooshodi/library | 3,882,650,466,227 | 4205e55f87ac0b7fc4cf2d51ffec732ce94cdf9e | 6c81d2e1d00250bb1a86f3990a96223b7ce234e0 | /bookshelf/models.py | e04563bb5719d458cb458360fe2fa0ec5f9181fc | [
"MIT"
] | permissive | https://github.com/andela-ooshodi/library | bbf9634227d2e857db0c88f1ad8d920d6e216c44 | 6a95116b097e1ba23508166057f16a0ea7679d43 | refs/heads/master | 2021-01-19T08:51:25.421692 | 2017-05-11T00:28:35 | 2017-05-11T00:28:35 | 87,689,850 | 0 | 0 | null | false | 2017-05-11T00:28:35 | 2017-04-09T07:08:04 | 2017-04-09T07:50:57 | 2017-05-11T00:28:35 | 25 | 0 | 0 | 0 | Python | null | null | from __future__ import unicode_literals
from django.db import models
class Base(models.Model):
"""
Base model
"""
name = models.CharField(max_length=50, unique=True)
description = models.CharField(max_length=200, blank=True, default='No description')
class Meta:
abstract = True
def __str__(self):
"""
:return: string representation of the object
"""
return self.name
class Category(Base):
pass
class Book(Base):
author = models.CharField(max_length=50, default='Unknown')
category = models.ForeignKey(Category, related_name='books')
| UTF-8 | Python | false | false | 623 | py | 21 | models.py | 14 | 0.64366 | 0.632424 | 0 | 29 | 20.482759 | 88 |
marcelopoars/estudando-python-3 | 13,357,348,311,426 | 49e425ede1c26368c0f045f29d3f36e8ffb3d6d6 | 924468f1404e67e05ac11cc3b216b9a3283dc5f0 | /aulas/content_06_for/exemplo_1.py | d57e4fb4b4306b4dd94397a18347d01a08dad224 | [] | no_license | https://github.com/marcelopoars/estudando-python-3 | e0c7f8cb27b9841c226dda6983fd0042a1336243 | 9f740403b9d89e7f92be3688149d97cb3fc31a41 | refs/heads/main | 2023-04-09T09:17:27.814005 | 2023-03-17T12:04:27 | 2023-03-17T12:04:27 | 150,115,680 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Exemplo 1 - Como funciona o FOR. """
# O FOR é um laço de repetição (loop)
# O FOR tem começo e fim
# A variável "item" vai receber um caracter por vez
# A cada ciclo mostra o caracter na tela
# A palavra "Python" é uma STRING
""" EXEMPLO 1 """
for item in "Python":
print(item)
| UTF-8 | Python | false | false | 347 | py | 111 | exemplo_1.py | 101 | 0.644118 | 0.632353 | 0 | 13 | 25.153846 | 52 |
vanigupta20024/Programming-Challenges | 4,260,607,597,506 | da0f4907511cee0523973438855051b6498f9791 | de813a0ee710328aae34d30b5abc4b98146cf894 | /PartyPlanning1.py | e1d87be6c46761b5d0196357a571c9c53e027694 | [
"MIT"
] | permissive | https://github.com/vanigupta20024/Programming-Challenges | ab885d9af98ce6a967e1b8edbc70da1dcd17ac73 | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | refs/heads/master | 2023-08-31T08:11:54.650564 | 2021-10-07T07:09:19 | 2021-10-07T07:09:19 | 271,755,231 | 21 | 4 | MIT | false | 2020-08-25T19:17:14 | 2020-06-12T09:03:13 | 2020-08-25T19:16:29 | 2020-08-25T19:16:26 | 209 | 0 | 1 | 0 | Python | false | false | # You're running a catering business that serves desserts. Customers call in and tell you a party size, and you provide enough dessert for everyone to have a piece.
# Cakes serve 25 guests.
# Pies serve 8 guests.
# Cupcakes serve 1 guest.
# The more people a dessert serves, the less it costs to make, so we want you to optimize for serving the biggest desserts first.
# Write a function that, given a party size, will output a dictionary with each dessert and the number to provide like this:
# input:
# party_size = 60
# # output:
# order = {
# "cakes": 2,
# "pies": 1,
# "cupcakes": 2
# }
def create_order(party_size):
desserts = {
"cakes": 25,
"pies": 8,
"cupcakes": 1
}
rev_des = {v:k for k,v in desserts.items()}
answer = {}
dessert_list = sorted(rev_des.keys(), reverse = True)
i = 0
while party_size > 0:
if party_size >= dessert_list[i]:
party_size -= dessert_list[i]
if rev_des[dessert_list[i]] in answer.keys():
answer[rev_des[dessert_list[i]]] += 1
else:
answer[rev_des[dessert_list[i]]] = 1
else:
i += 1
return answer
print(create_order(108))
| UTF-8 | Python | false | false | 1,142 | py | 172 | PartyPlanning1.py | 168 | 0.646235 | 0.627846 | 0 | 38 | 29.052632 | 164 |
jakobkhansen/KattisSolutions | 12,386,685,731,635 | a7eaa096fe6f86acca66e845553a2ba1f7128308 | afcf26bbd84161f2775b879a68b2c163878984d9 | /grid/grid.py | b7572f607ffbfae2eaa88fc8031fec969f147468 | [] | no_license | https://github.com/jakobkhansen/KattisSolutions | 505a0657fa02a5156c853fc0a6566dd51591d36d | 2869d6c9027515fd41eac6fcaee281aa474810c4 | refs/heads/master | 2023-07-07T19:17:37.614836 | 2023-06-28T18:12:50 | 2023-06-28T18:12:50 | 191,001,396 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from queue import Queue
class Point:
def __init__(self, spot, previous):
self.spot = spot
self.previous = previous
def __repr__(self):
return str(self.spot)
def grid(lines):
n, m = [int(x) for x in lines[0].split(" ")]
if n == 0 or m == 0:
return -1
if n == 1 and m == 1:
return 0
grid = []
visited = []
for i in range(1,n+1):
row = [int(x) for x in lines[i]]
visitedRow = [False for x in lines[i]]
grid.append(row)
visited.append(visitedRow)
queue = Queue()
start = Point((0,0), 0)
visited[0][0] = True
goal = (n-1,m-1)
queue.put(start)
while not queue.empty():
current = queue.get()
curSpot = current.spot
moveVal = grid[curSpot[0]][curSpot[1]]
if moveVal == 0:
continue
if not curSpot[0]-moveVal < 0:
newPoint = Point((curSpot[0]-moveVal, curSpot[1]), current.previous+1)
if newPoint.spot == goal:
return current.previous+1
if visited[newPoint.spot[0]][newPoint.spot[1]]:
pass
else:
visited[newPoint.spot[0]][newPoint.spot[1]] = True
queue.put(newPoint)
if curSpot[0]+moveVal < len(grid):
newPoint = Point((curSpot[0]+moveVal, curSpot[1]), current.previous+1)
if newPoint.spot == goal:
return current.previous+1
if visited[newPoint.spot[0]][newPoint.spot[1]]:
pass
else:
visited[newPoint.spot[0]][newPoint.spot[1]] = True
queue.put(newPoint)
if not curSpot[1]-moveVal < 0:
newPoint = Point((curSpot[0], curSpot[1]-moveVal), current.previous+1)
if newPoint.spot == goal:
return current.previous+1
if visited[newPoint.spot[0]][newPoint.spot[1]]:
pass
else:
visited[newPoint.spot[0]][newPoint.spot[1]] = True
queue.put(newPoint)
if curSpot[1]+moveVal < len(grid[0]):
newPoint = Point((curSpot[0], curSpot[1]+moveVal), current.previous+1)
if newPoint.spot == goal:
return current.previous+1
if visited[newPoint.spot[0]][newPoint.spot[1]]:
pass
else:
visited[newPoint.spot[0]][newPoint.spot[1]] = True
queue.put(newPoint)
return -1
def main():
lines = [line.strip() for line in sys.stdin]
print(grid(lines))
main()
| UTF-8 | Python | false | false | 2,623 | py | 352 | grid.py | 334 | 0.516203 | 0.493709 | 0 | 108 | 23.287037 | 82 |
RoKu1/cracking-the-coding-interview | 6,863,357,762,232 | d5721fe8ea6b6b82859a2c7f383f0098bc8145f9 | 52b28f756e7c35fd97bbe1f5b3e47b7be3e59757 | /Trees_and_Graphs/1Route_Between_Nodes.py | 58055e41cb19084cecfd67b0af54b748ebae4af0 | [
"Apache-2.0"
] | permissive | https://github.com/RoKu1/cracking-the-coding-interview | e6d7cc5cdf28f7604f71a09d83f6b0c9cf42d444 | ce2fabba75f1edf69b81a80022eb9ebac8a09af2 | refs/heads/master | 2023-07-11T12:37:36.394227 | 2021-08-11T16:35:56 | 2021-08-11T16:35:56 | 296,645,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Trees_and_Graphs import Graph as GP
"""
4.1 Route Between Nodes: Given a directed graph, design an algorithm to find out whether there is a
route between two nodes.
"""
"""
We will traverse the garpgh from given node using DFS and if we find the end node in visited we will say that
the route is available
"""
def checkifroute(g, pair):
g.DFS(g.maper[pair[0]])
if g.maper[pair[1]] in g.visited:
print("Route is Possible\n")
else:
print("Route is not Posible\n")
ardire = [['0', '1'], ['0', '4'], ['0', '5'], ['1', '3'], ['1', '4'], ['2', '1'], ['3', '2'], ['3', '4']]
g = GP.GraphDirected(ardire)
pair = ['0', '5'] # pair of start and end node
checkifroute(g, pair)
| UTF-8 | Python | false | false | 706 | py | 37 | 1Route_Between_Nodes.py | 36 | 0.611898 | 0.580737 | 0 | 24 | 28.416667 | 109 |
dr-dos-ok/Code_Jam_Webscraper | 2,774,548,895,868 | 72876541d4862d2e5cb23ace3ecbb29e449c0735 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_46/59.py | 889c97a393e369356699b130d8d7d03cac9b47c7 | [] | no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def rightmost(row):
rrow = row[:]
rrow.reverse()
n = len(row)
if 1 in rrow:
return n - 1 - rrow.index(1)
return -1
def solve(m):
rows = len(m)
res = 0
for row in xrange(rows):
for row2 in xrange(row, rows):
ind = rightmost(m[row2])
if ind <= row:
temp = m[row2]
m.pop(row2)
m.insert(row, temp)
res += row2 - row
break
return res
T = int(raw_input())
for t in xrange(T):
N = int(raw_input())
m = []
for row in xrange(N):
m.append(map(lambda x: int(x), raw_input()))
print 'Case #%d: %d' % (t+1, solve(m))
| UTF-8 | Python | false | false | 693 | py | 60,747 | 59.py | 60,742 | 0.455988 | 0.440115 | 0 | 36 | 18.25 | 52 |
Shakileash5/ImageEncryption | 4,294,967,316,114 | e73eb99aeb57cb3bc3813cd474ce8a687ffb6114 | 052d8c29f264d92b114a1e7a73c45c41592c5894 | /clientCv.py | ccd9f54a592ac738925286bfc7ebc8779d1ca1f5 | [] | no_license | https://github.com/Shakileash5/ImageEncryption | c5d97b17067911404c3cbc32de717a0a9f9935db | 8a47c45b1de45f38cecee2d35d8e724cbc8d3ee9 | refs/heads/main | 2023-08-13T05:41:07.073360 | 2021-09-27T14:34:40 | 2021-09-27T14:34:40 | 410,772,913 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import socket
import sys
import pickle
import struct
import chaosMap
import json
TYPE_ = 2 # 0 - ArnoldCat , 1 - HenonMap , 2 - LogisticMap
KEY = "abcdefghijklm"#20#(0.1,0.1) # key to encrypt the image
HOST = "127.0.0.1" # server ip
PORT = 5001 # Reserve a port for server.
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM) # Create a socket object
sock.connect((HOST,PORT))
print("[+] Connected to server")
img = cv2.imread('HorizonZero.png') # read image from file
img = chaosMap.chaosEncryption(img,KEY,TYPE_) # encrypt image
data = {
'type' : TYPE_,
'key' : KEY,
}
data = json.dumps(data)
print("[!] Sending data to server : ",data)
sock.send(data.encode()) # send encryption type and key to server
data = pickle.dumps(img, 0) # convert image to byte
size = len(data)
print("[!] Size of the image : ",size)
sock.sendall(struct.pack(">L", size) + data) # send image bytes to server
sock.close() | UTF-8 | Python | false | false | 950 | py | 6 | clientCv.py | 5 | 0.692632 | 0.668421 | 0 | 36 | 25.416667 | 78 |
chenweican/Octopus | 12,601,434,054,246 | 8751a72a9281b982027b14d264b471430f6d22cb | c204ea42db6cee8bf244e0275550e2001e77c6bf | /app/dispatcher.py | 2a58f41e519940562c6e40a192e377d605652ca4 | [] | no_license | https://github.com/chenweican/Octopus | 2879af52c5298893e6deeff704014cf21480944c | 1c5e6357397cabacacaae59b1636a433660ef6d6 | refs/heads/master | 2019-04-09T19:55:38.288457 | 2016-05-10T09:54:36 | 2016-05-10T09:54:36 | 57,180,669 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from app.crawler import *
from app.worker import Worker
from app.page_analyst import *
import json
import base64
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class Dispatcher:
def __init__(self, SECRET):
self.url_crawler = UrlCrawler()
self.workers = {
'weibo_rank_list': Worker(WeiboCrawler(), WeiboHotPageAnalyst()),
'url': Worker(self.url_crawler, None),
'baidu_hot_url': Worker(self.url_crawler, BaiduHotPageAnalyst())
}
sql_conf = SECRET['sql']
self.engine = create_engine("mysql://%s:%s@%s:%s/test?charset=utf8" % (
sql_conf['username'], sql_conf['password'], sql_conf['host'],
sql_conf['port']
), encoding='utf8', convert_unicode=True)
self.DBSession = sessionmaker(bind=self.engine)
self.session = self.DBSession()
return None
def dispatch_a_job(self, data):
if data['type'] in self.workers:
job = self.workers[data['type']].work(data)
if 'succ' == job.status:
# save to storage
self.session.add(job)
self.session.commit()
return True
else:
return False
else:
return False
return False
| UTF-8 | Python | false | false | 1,374 | py | 30 | dispatcher.py | 26 | 0.577875 | 0.574236 | 0 | 40 | 33.35 | 79 |
FanziSufu/Simple-ML | 2,070,174,245,304 | 10bf0153b3d48de101b4406617bdef678f64b21a | 2fd98905f22ca22e52a7013b64583668d1cead94 | /PCA.py | 33ec4388992ca02c34ee83543fbddeb89964ba50 | [] | no_license | https://github.com/FanziSufu/Simple-ML | 16883a3ccecbeedce0c8b0b20c16a6408ea24764 | e2a0442d7d81741583ec3c823d7ddb773a96160f | refs/heads/master | 2020-04-06T23:19:55.180931 | 2018-11-16T12:40:15 | 2018-11-16T12:40:15 | 157,865,635 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from sklearn import preprocessing as pp
from sklearn import datasets
class PCA:
def __init__(self, k=10):
self.k = k # 目标维度
def training(self, dataset, svd=False):
"""
使用主成分分析算法,把n维数据集降为k维数据
:param dataset: 数据集n*m,n为特征数,m为样本数,np.array
:param svd: 是否使用奇异值分解求特征值
:return: 降维后的数据集k*m,和信息量占比t
"""
dataset = pp.scale(dataset, axis=1) # 去均值化
cov_mat = np.cov(dataset) # 计算协方差矩阵
if svd: # 使用SVD(奇异值分解)求解
u, s, vt = np.linalg.svd(cov_mat) # s存储奇异值,u存储对应的特征向量
eig_vals = s[: self.k] # 选取前K个奇异值,对应特征值
red_eig_vects = u[:, : self.k] # 选取对应的特征向量, n*k
t = np.sum(eig_vals) / np.sum(s) # 计算信息保留度
else: # 使用求解特征值和特征矩阵的方式
eig_vals, eig_vects = np.linalg.eig(cov_mat) # 计算协方差矩阵的特征值和特征向量
eig_vals_ind = np.argsort(eig_vals)[:: -1][: self.k] # 获取前k个最大特征值的索引
red_eig_vects = eig_vects[:, eig_vals_ind] # 根据索引获取对应的特征向量
# 在PCA,特征值等于对应特征向量*原数据后的方差,这里用方差代表信息量,该值衡量降维后保留的原数据多少的信息量
t = np.sum(eig_vals[eig_vals_ind]) / np.sum(eig_vals)
low_dim_data = np.dot(red_eig_vects.T, dataset) # 特征向量*去均值化的原数据=降维后的数据
return low_dim_data, t
def test():
dataset = datasets.make_classification(n_samples=3000, n_features=100)[0]
pca = PCA(k=60)
print(pca.training(dataset, svd=True)[1])
print(pca.training(dataset, svd=False)[1])
test()
| UTF-8 | Python | false | false | 1,982 | py | 12 | PCA.py | 11 | 0.592266 | 0.581411 | 0 | 42 | 34.095238 | 81 |
HDClark94/VRnav | 13,683,765,851,720 | 38317aee69fb0131c501f6534f9622172b31d9f1 | b1552d33b62144f782b6cb85f6f78dadc9dede12 | /summarize/powersimulations_refactor_assay.py | 570934f8c21b9f0fbfb048ea01edc01efaf6e9ca | [] | no_license | https://github.com/HDClark94/VRnav | 6d58cd12409cea172cb1e5ac0740ee0384f2b92a | 4a721102efe21cbc7015c90e831a7c03f790591d | refs/heads/master | 2023-03-16T07:02:11.348536 | 2021-04-08T14:18:50 | 2021-04-08T14:18:50 | 190,720,949 | 1 | 1 | null | false | 2021-04-09T09:18:22 | 2019-06-07T09:46:18 | 2021-04-08T14:18:53 | 2021-04-09T09:18:22 | 4,402 | 1 | 1 | 0 | HTML | false | false | import numpy as np
import matplotlib.pyplot as plt
import pingouin as pg
import pandas as pd
import time
from scipy.stats import f
np.random.seed(1)
def data():
# have this make a sigmoid with random number of correct trials each time its called.
start_time = time.time()
iterations = 500
trials = 60
track_lengths = np.array([50, 75, 112.5, 168.75, 253.125])
coef1 = 5
coef2 = -0.05
coef3 = 4
coef4 = -0.02
n_conditions = 2
n_subjects = np.array([2,4,5,10,20,30,40])
coef3s = np.linspace(1, 10, 5)
coef4s = np.linspace(-0.01, -0.1, 5)
parameters_powers_conditions = np.zeros((len(n_subjects), len(coef3s), len(coef4s)))
parameters_powers_track_length = np.zeros((len(n_subjects), len(coef3s), len(coef4s)))
parameters_powers_interaction = np.zeros((len(n_subjects), len(coef3s), len(coef4s)))
# consider condition 1 first
group1_theo = (np.e**(coef1 + (coef2*track_lengths)))/ \
(np.e**(coef1 + (coef2*track_lengths))+1)
z1 = coef1 + (coef2*track_lengths)
pr = 1/(1+np.e**(-z1))
# now consider condition 2
group2_theo = (np.e**(coef3 + (coef4*track_lengths)))/ \
(np.e**(coef3 + (coef4*track_lengths))+1)
z2 = coef3 + (coef4*track_lengths)
pr2 = 1/(1+np.e**(-z2))
for counter3, coef3 in enumerate(coef3s):
for counter4, coef4 in enumerate(coef4s):
z1 = coef1 + (coef2*track_lengths)
pr = 1/(1+np.e**(-z1))
z2 = coef3 + (coef4*track_lengths)
pr2 = 1/(1+np.e**(-z2))
for n_counter, n in enumerate(n_subjects):
condition_p = []
track_length_p = []
interaction_p = []
subject_id_long = np.tile(np.transpose(np.tile(np.linspace(1,n,n), (len(track_lengths),1))).flatten(), n_conditions)
conditions_long = np.append(np.ones(len(track_lengths)*n), np.ones(len(track_lengths)*n)*2) # currently hardcoded for only 2 conditions
track_lengths_long = np.tile(track_lengths, n_conditions*n)
for i in range(iterations):
y_percentage1 = ((np.random.binomial(trials,pr, (n,len(track_lengths)))/trials)*100).flatten()
y_percentage2 = ((np.random.binomial(trials,pr2, (n,len(track_lengths)))/trials)*100).flatten()
appended_correct = np.append(y_percentage1, y_percentage2) # currently hardcoded for only 2 conditions
df = pd.DataFrame({"subject": subject_id_long, "Condition": conditions_long, 'Track_length': track_lengths_long,'percentage_corr_trials': appended_correct})
aov = pg.rm_anova(dv='percentage_corr_trials', within=['Condition', 'Track_length'],subject='subject', data=df, detailed=True)
condition_p.append(np.nan_to_num(aov[aov.Source=="Condition"]['p-unc'].values[0]))
track_length_p.append(np.nan_to_num(aov[aov.Source=="Track_length"]['p-unc'].values[0]))
interaction_p.append(np.nan_to_num(aov[aov.Source=="Condition * Track_length"]['p-unc'].values[0]))
condition_p = np.array(condition_p)
track_length_p = np.array(track_length_p)
interaction_p = np.array(interaction_p)
power_condition = len(condition_p[condition_p<0.05])/iterations
power_track_length = len(track_length_p[track_length_p<0.05])/iterations
power_interaction = len(interaction_p[interaction_p<0.05])/iterations
parameters_powers_conditions[n_counter, counter3, counter4] = power_condition
parameters_powers_track_length[n_counter, counter3, counter4] = power_track_length
parameters_powers_interaction[n_counter, counter3, counter4] = power_interaction
print("it took ", time.time()-start_time, "for 1 simulated loop to run")
print("currently on ", str(n), "n subjects, ", str(coef3), "coef3 and ", str(coef4), "coef4")
start_time = time.time()
#np.save('/mnt/datastore/Harry/OculusVR/Power_analysis/Harry_figs/conditions_assay.npy', parameters_powers_conditions)
#np.save('/mnt/datastore/Harry/OculusVR/Power_analysis/Harry_figs/track_length.npy', parameters_powers_track_length)
#np.save('/mnt/datastore/Harry/OculusVR/Power_analysis/Harry_figs/interaction.npy', parameters_powers_interaction)
np.save(r'Z:\ActiveProjects\Harry\OculusVR\Power_analysis\Harry_figs\conditions_assay.npy', parameters_powers_conditions)
np.save(r'Z:\ActiveProjects\Harry\OculusVR\Power_analysis\Harry_figs\track_length.npy', parameters_powers_track_length)
np.save(r'Z:\ActiveProjects\Harry\OculusVR\Power_analysis\Harry_figs\interaction.npy', parameters_powers_interaction)
'''
fig = plt.figure(figsize = (12,4))
ax = fig.add_subplot(1,1,1) #stops per trial
ax.set_title('Power analysis', fontsize=20, verticalalignment='bottom', style='italic') # title
ax.plot(power_condition,np.arange(n_subjects), color = 'black', label = 'F = Condition', linewidth = 2)
ax.plot(power_track_length,np.arange(n_subjects),color = 'red', label = 'F = Track Length', linewidth = 2)
ax.plot(power_interaction,np.arange(n_subjects), color = 'blue', label = 'Interaction', linewidth = 2)
ax.set_xlim(0,200)
#ax.set_ylim(0, nb_y_max+0.01)
plt.subplots_adjust(hspace = .35, wspace = .35, bottom = 0.6, left = 0.15, right = 0.82, top = 0.85)
#fig.text(0.5, 0.04, 'Track Position Relative to Goal (cm)', ha='center', fontsize=16)
#fig.text(0.05, 0.94, Mouse, ha='center', fontsize=16)
#ax.legend(loc=(0.99, 0.5))
plt.show()
#fig.savefig(save_path, dpi=200)
plt.close()
#plt.show()
'''
def main():
print('-------------------------------------------------------------')
print('-------------------------------------------------------------')
data()
#a = np.load('/mnt/datastore/Harry/OculusVR/Power_analysis/Harry_figs/conditions_assay.npy')
print("hello there")
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 6,152 | py | 95 | powersimulations_refactor_assay.py | 63 | 0.60972 | 0.575748 | 0 | 132 | 45.606061 | 176 |
Singh-Sg/DataCreaftWithReact | 8,400,956,032,899 | 209db267f69922814336aae48e29cd6282124390 | 355155ba62cdfc3eb1fafa44d54c64a82172770e | /DataCraft/DataSearch/migrations/0003_auto_20180223_1214.py | 849d57c6849531f7e2fa415d22557d0116a176a0 | [] | no_license | https://github.com/Singh-Sg/DataCreaftWithReact | 863e4408a8ba106df23d18f69bfcf7c98803aea4 | 4285124dc5581228f5266b3c9c21f120a349a333 | refs/heads/master | 2020-03-23T09:36:37.298854 | 2018-07-18T07:36:59 | 2018-07-18T07:36:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.2 on 2018-02-23 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DataSearch', '0002_auto_20180223_1209'),
]
operations = [
migrations.AlterField(
model_name='client',
name='department',
field=models.CharField(blank=True, choices=[('1', 'IT'), ('2', 'Engineering'), ('3', 'Enterprise Analytics'), ('4', 'CGSO')], default='---', max_length=200, null=True, verbose_name='Departmnet Type'),
),
migrations.AlterField(
model_name='client',
name='user_role',
field=models.CharField(blank=True, choices=[('Admin', 'Admin'), ('publisher', 'Publisher'), ('consumer', 'Consumer')], default='---', max_length=200, null=True, verbose_name='User Role'),
),
]
| UTF-8 | Python | false | false | 860 | py | 48 | 0003_auto_20180223_1214.py | 30 | 0.584884 | 0.537209 | 0 | 23 | 36.391304 | 212 |
kleysonr/snsdl | 11,407,433,146,949 | 38e84cd66ef528c0d4f5cb8966f86ed32751f8a6 | 4aeef11a968533ead4d39877ed598d8376419db3 | /myModels/shallownet.py | c58ecf3a36a90cb73e7682adf42cbc31dbb19a81 | [
"Apache-2.0"
] | permissive | https://github.com/kleysonr/snsdl | 11bcb34ce81f7d3635356b61cd0c1849a39ab16e | 2f3cc61d585774f1cb2513b6507e7b872f9eb888 | refs/heads/master | 2022-10-21T16:47:29.253442 | 2019-05-28T19:38:56 | 2019-05-28T19:38:56 | 165,902,755 | 1 | 2 | Apache-2.0 | false | 2022-10-12T02:02:49 | 2019-01-15T18:31:42 | 2019-05-28T19:41:30 | 2019-05-28T19:44:55 | 95 | 1 | 2 | 2 | Python | false | false | import keras
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential
from snsdl.keras.wrappers.base_model import BaseModel
class ShallowNet(BaseModel):
def create_model(self,input_shape=None, num_classes=None, optimizer=None):
optimizers = {
'adadelta': keras.optimizers.Adadelta()
}
# CNN Model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizers[optimizer], metrics=['accuracy'])
return model | UTF-8 | Python | false | false | 1,015 | py | 30 | shallownet.py | 29 | 0.664039 | 0.641379 | 0 | 29 | 34.034483 | 120 |
Derpmander/Prog1-ovn | 18,296,560,696,450 | 4196a7e67d631ce050d2dca59a49fe4760209cbc | 5305c03e01e57688443f6e8236f47d24e7cef4b4 | /4.4.py | 3767fbf4dee6584c4d02a38bbbed45b63e33b810 | [] | no_license | https://github.com/Derpmander/Prog1-ovn | 52e4c974b93c08ac11043366b3a94e5ea4bf9c94 | b518437764020884cf800ae012fd40b48b14a3c9 | refs/heads/master | 2023-01-22T22:35:11.271139 | 2020-11-16T13:47:11 | 2020-11-16T13:47:11 | 291,701,268 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | startHöjd=float(input("Start höjd:"))
höjd=startHöjd
förlust=0.7
while höjd>0.01:
höjd=höjd*förlust
studsar=startHöjd/höjd
studsar=int(studsar)
print(f"Bollen studsar {studsar} gånger") | UTF-8 | Python | false | false | 209 | py | 25 | 4.4.py | 24 | 0.741117 | 0.715736 | 0 | 8 | 23.75 | 41 |
sejoung/python-win32api-test | 6,571,299,983,272 | 098acd8ee10ad554fd04c8b4133b6b87118537b2 | bd812be03e5af41a22c77764df980c3aaf3703cd | /EventFiltering.py | 50f03a615c683dc4cb08c0e4fff65339a709e5d1 | [] | no_license | https://github.com/sejoung/python-win32api-test | 7b9273d8562d64095a22b10f1a885b25e1ff2b64 | 9df86f9909859aecd414319dcbc217af13090167 | refs/heads/master | 2021-01-10T04:56:57.632640 | 2015-11-05T23:23:04 | 2015-11-05T23:23:04 | 45,645,604 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'sanaes'
import pythoncom, pyHook
def OnKeyboardEvent(event):
# block only the letter A, lower and uppercase
return (event.Ascii not in (ord('a'), ord('A')))
# create a hook manager
hm = pyHook.HookManager()
# watch for all mouse events
hm.KeyDown = OnKeyboardEvent
# set the hook
hm.HookKeyboard()
# wait forever
pythoncom.PumpMessages() | UTF-8 | Python | false | false | 357 | py | 14 | EventFiltering.py | 13 | 0.728291 | 0.728291 | 0 | 15 | 22.866667 | 50 |
SolubleCode/tubuy | 10,737,418,264,944 | ac3a94ebebae756d7f5cc13292efe5f4bfafa17d | 6d6b1844c967bfbaafd1cd88cb6b788a915fd7e5 | /api/permissions.py | 5cd7883afd099d7bc88b75cbd04bab1b8da4797f | [
"MIT"
] | permissive | https://github.com/SolubleCode/tubuy | b8e703716120709f39c2066f957526b0f06bcfa9 | f61ee95966ab3bf7a4bbd6d58fdca765706248e2 | refs/heads/develop | 2019-08-09T12:11:27.855162 | 2019-04-12T14:24:06 | 2019-04-12T14:24:06 | 52,166,912 | 4 | 4 | MIT | false | 2018-11-04T06:56:54 | 2016-02-20T17:53:56 | 2018-07-04T17:57:18 | 2018-11-04T06:56:54 | 90 | 3 | 0 | 3 | Python | false | null | from rest_framework import permissions
class IsStaffOrTargetUser(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return request.user.is_superuser or obj == request.user
class CommodityOwner(permissions.BasePermission):
""" Custom permission to only allow commodity owners to edit it
"""
def has_permission(self, request, view):
return request.user and request.user.is_authenticated()
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.requestor == request.user
| UTF-8 | Python | false | false | 715 | py | 20 | permissions.py | 14 | 0.706294 | 0.706294 | 0 | 20 | 34.75 | 67 |
stephendonner/loads-broker | 11,218,454,622,864 | ab8bb8ad865c3be3f445c148263288b66ab39ba5 | e88550201a4ce2601fbd25539acb6cb453ac709c | /loadsbroker/broker.py | b5a8a0b6d18c6031833225435bdb18364018b2ce | [
"Apache-2.0"
] | permissive | https://github.com/stephendonner/loads-broker | 89f6f24f575a5696faee46205f5d4e9ee12f386b | 81ede34fd551f2ed7a71c14ca8c568d3d47d5c2b | refs/heads/master | 2020-12-24T10:24:35.454131 | 2016-12-30T01:03:31 | 2016-12-30T01:06:29 | 73,158,265 | 0 | 0 | null | true | 2016-11-08T06:58:14 | 2016-11-08T06:58:13 | 2016-11-08T06:58:09 | 2016-11-08T01:21:13 | 1,310 | 0 | 0 | 0 | null | null | null | """Broker Orchestration
The Broker is responsible for:
* Coordinating runs
* Ensuring run transitions
* Providing a rudimentary public API for use by the CLI/Web code
Complete name of environment variables available in a test container:
- BROKER_ID
Unique ID of the broker running this test.
- BROKER_VERSION
Version of the broker running the test.
- RUN_ID
Unique run ID.
- CONTAINER_ID
Container ID for the collection.
- HOST_IP
The public IP of the host running the container.
- PRIVATE_IP
The AWS internal IP of the host.
- STATSD_HOST
IP of the statsd host to send metrics to.
- STATSD_PORT
Port of the statsd host.
"""
import os
import time
import concurrent.futures
from collections import namedtuple
from datetime import datetime
from functools import partial
from pprint import pformat
from sqlalchemy.orm.exc import NoResultFound
from tornado import gen
try:
from influxdb.influxdb08 import InfluxDBClient
except ImportError:
InfluxDBClient = None
from loadsbroker import logger, aws, __version__
from loadsbroker.db import (
Database,
Run,
Project,
RUNNING,
TERMINATING,
COMPLETED,
setup_database,
)
from loadsbroker.exceptions import LoadsException
from loadsbroker.extensions import (
DNSMasq,
Docker,
Heka,
Watcher,
Ping,
SSH,
ContainerInfo,
)
from loadsbroker.webapp.api import _DEFAULTS
import threading
BASE_ENV = dict(
BROKER_VERSION=__version__,
)
WATCHER_INFO = ContainerInfo(
"loadswatch:latest",
"https://s3.amazonaws.com/loads-docker-images/loadswatch.tar.bz2")
HEKA_INFO = ContainerInfo(
"kitcambridge/heka:0.8.1",
"https://s3.amazonaws.com/loads-docker-images/heka-0.8.1.tar.bz2")
DNSMASQ_INFO = ContainerInfo(
"kitcambridge/dnsmasq:latest",
"https://s3.amazonaws.com/loads-docker-images/dnsmasq.tar.bz2")
def log_threadid(msg):
"""Log a message, including the thread ID"""
thread_id = threading.currentThread().ident
logger.debug("Msg: %s, ThreadID: %s", msg, thread_id)
class RunHelpers:
"""Empty object used to reference initialized extensions."""
pass
class Broker:
def __init__(self, name, io_loop, sqluri, ssh_key,
heka_options, influx_options, aws_port=None,
aws_owner_id="595879546273", aws_use_filters=True,
aws_access_key=None, aws_secret_key=None, initial_db=None):
self.name = name
logger.debug("loads-broker (%s)", self.name)
self.loop = io_loop
self._base_env = BASE_ENV.copy()
self.watcher_options = {'AWS_ACCESS_KEY_ID': aws_access_key,
'AWS_SECRET_ACCESS_KEY': aws_secret_key}
user_data = _DEFAULTS["user_data"]
if user_data is not None and os.path.exists(user_data):
with open(user_data) as f:
user_data = f.read()
self.influx_options = influx_options
if influx_options is None:
self.influx = None
else:
influx_args = {
"host": influx_options.host,
"port": influx_options.port,
"username": influx_options.user,
"password": influx_options.password,
"database": "loads"
}
if influx_options.secure:
influx_args["ssl"] = True
influx_args["verify_ssl"] = True
if InfluxDBClient is None:
raise ImportError('You need to install the influx lib')
self.influx = InfluxDBClient(**influx_args)
self.pool = aws.EC2Pool(self.name, user_data=user_data,
io_loop=self.loop, port=aws_port,
owner_id=aws_owner_id,
use_filters=aws_use_filters,
access_key=aws_access_key,
secret_key=aws_secret_key)
# Utilities used by RunManager
ssh = SSH(ssh_keyfile=ssh_key)
self.run_helpers = run_helpers = RunHelpers()
run_helpers.ping = Ping(self.loop)
run_helpers.docker = Docker(ssh)
run_helpers.dns = DNSMasq(DNSMASQ_INFO, run_helpers.docker)
run_helpers.heka = Heka(HEKA_INFO, ssh=ssh, options=heka_options,
influx=influx_options)
run_helpers.watcher = Watcher(WATCHER_INFO,
options=self.watcher_options)
run_helpers.ssh = ssh
self.db = Database(sqluri, echo=True)
# Run managers keyed by uuid
self._runs = {}
# Ensure the db is setup
if initial_db:
setup_database(self.db.session(), initial_db)
def shutdown(self):
self.pool.shutdown()
def get_projects(self, fields=None):
projects = self.db.session().query(Project).all()
return [proj.json(fields) for proj in projects]
def get_project(self, project_id, fields=None):
session = self.db.session()
try:
proj = session.query(Project).filter(
Project.uuid == project_id).one()
except NoResultFound:
return None
return proj.json(fields)
def delete_project(self, project_id):
session = self.db.session()
try:
proj = session.query(Project).filter(
Project.uuid == project_id).one()
except NoResultFound:
return None
session.delete(proj)
session.commit()
def get_runs(self, fields=None, limit=None, offset=None):
# XXX filters
log_threadid("Getting runs")
runs = self.db.session().query(Run)
if limit is not None:
runs = runs.limit(limit)
if offset is not None:
runs = runs.offset(offset)
return [run.json(fields) for run in runs]
def _get_run(self, run_id):
session = self.db.session()
try:
run = session.query(Run).filter(Run.uuid == run_id).one()
except NoResultFound:
run = None
return run, session
def _run_complete(self, session, mgr, future):
logger.debug('Run Plan completed')
try:
response = future.result()
logger.debug("Run response of: %s", response)
except:
logger.error("Run did an exception", exc_info=True)
def abort_run(self, run_id):
"""Aborts a run."""
if run_id not in self._runs:
return False
self._runs[run_id].abort = True
return True
def run_plan(self, strategy_id, create_db=True, **kwargs):
session = self.db.session()
log_threadid("Running strategy: %s" % strategy_id)
uuid = kwargs.pop('run_uuid', None)
owner = kwargs.pop('owner', None)
# now we can start a new run
try:
mgr, future = RunManager.new_run(
run_helpers=self.run_helpers,
db_session=session,
pool=self.pool,
io_loop=self.loop,
plan_uuid=strategy_id,
run_uuid=uuid,
additional_env=kwargs,
owner=owner)
except NoResultFound as e:
raise LoadsException(str(e))
callback = partial(self._run_complete, session, mgr)
future.add_done_callback(callback)
self._runs[mgr.run.uuid] = mgr
# create an Influx Database
if create_db:
try:
self._create_dbs(mgr.run.uuid)
except:
mgr.abort = True
import pdb
pdb.set_trace()
raise
return mgr.run.uuid
def _create_dbs(self, run_id):
if self.influx is None:
return
def create(name):
return self.influx.create_database("db"+name.replace('-', ''))
return self._db_action(run_id, create)
def _delete_dbs(self, run_id):
if self.influx is None:
return
def delete(name):
return self.influx.drop_database("db"+name.replace('-', ''))
return self._db_action(run_id, delete)
def _db_action(self, run_id, action):
names = [run_id]
with concurrent.futures.ThreadPoolExecutor(len(names)) as e:
results = e.map(action, names)
return all(results)
def delete_run(self, run_id):
run, session = self._get_run(run_id)
self._delete_dbs(run_id)
session.delete(run)
session.commit()
# delete grafana
class StepRecordLink(namedtuple('StepRecordLink',
'step_record step ec2_collection')):
"""Named tuple that links a EC2Collection to the step and the actual
step record."""
class RunManager:
"""Manages the life-cycle of a load run.
"""
def __init__(self, run_helpers, db_session, pool, io_loop, run):
self.helpers = run_helpers
self.run = run
self._db_session = db_session
self._pool = pool
self._loop = io_loop
self._set_links = []
self._dns_map = {}
self.abort = False
self._state_description = ""
# XXX see what should be this time
self.sleep_time = 1.5
self.base_containers = [HEKA_INFO, DNSMASQ_INFO, WATCHER_INFO]
# Setup the run environment vars
self.run_env = BASE_ENV.copy()
self.run_env["RUN_ID"] = str(self.run.uuid)
def _set_state(self, state):
self._state_description = state
if state:
logger.debug(state)
def _get_state(self):
return self._state_description
state_description = property(_get_state, _set_state)
@classmethod
def new_run(cls, run_helpers, db_session, pool, io_loop, plan_uuid,
run_uuid=None, additional_env=None, owner=None):
"""Create a new run manager for the given strategy name
This creates a new run for this strategy and initializes it.
:param db_session: SQLAlchemy database session
:param pool: AWS EC2Pool instance to allocate from
:param io_loop: A tornado io loop
:param plan_uuid: The strategy UUID to use for this run
:param run_uuid: Use the provided run_uuid instead of generating one
:param additional_env: Additional env args to use in container set
interpolation
:returns: New RunManager in the process of being initialized,
along with a future tracking the run.
"""
# Create the run for this manager
logger.debug('Starting a new run manager')
run = Run.new_run(db_session, plan_uuid, owner)
if run_uuid:
run.uuid = run_uuid
db_session.add(run)
db_session.commit()
log_threadid("Committed new session.")
run_manager = cls(run_helpers, db_session, pool, io_loop, run)
if additional_env:
run_manager.run_env.update(additional_env)
future = gen.convert_yielded(run_manager.start())
return run_manager, future
@classmethod
def recover_run(cls, run_uuid):
"""Given a run uuid, fully reconstruct the run manager state"""
pass
@property
def uuid(self):
return self.run.uuid
@property
def state(self):
return self.run.state
async def _get_steps(self):
"""Request all the step instances needed from the pool
This is a separate method as both the recover run and new run
will need to run this identically.
"""
logger.debug('Getting steps & collections')
steps = self.run.plan.steps
collections = await gen.multi(
[self._pool.request_instances(
self.run.uuid,
s.uuid,
count=s.instance_count,
inst_type=s.instance_type,
region=s.instance_region,
plan=self.run.plan.name,
owner=self.run.owner,
run_max_time=s.run_delay + s.run_max_time)
for s in steps])
try:
# First, setup some dicst, all keyed by step.uuid
steps_by_uuid = {x.uuid: x for x in steps}
step_records_by_uuid = {x.step.uuid: x for x in
self.run.step_records}
# Link the step/step_record/ec2_collection under a single
# StepRecordLink tuple
for coll in collections:
step = steps_by_uuid[coll.uuid]
step_record = step_records_by_uuid[coll.uuid]
setlink = StepRecordLink(step_record, step, coll)
self._set_links.append(setlink)
except Exception:
# Ensure we return collections if something bad happened
logger.error("Got an exception in runner, returning instances",
exc_info=True)
try:
await gen.multi([self._pool.release_instances(x)
for x in collections])
except:
logger.error("Wat? Got an error returning instances.",
exc_info=True)
# Clear out the setlinks to make sure they aren't cleaned up
# again
self._set_links = []
async def start(self):
"""Fully manage a complete run
This doesn't return until the run is complete. A reference
should be held so that the run state can be checked on as
needed while this is running. This method chains to all the
individual portions of a run.
"""
try:
# Initialize the run
await self._initialize()
# Start and manage the run
await self._run()
# Terminate the run
await self._shutdown()
except:
await self._cleanup(exc=True)
else:
await self._cleanup()
return True
async def _initialize(self):
# Initialize all the collections, this needs to always be done
# just in case we're recovering
await self._get_steps()
# Skip if we're running
if self.state == RUNNING:
return
# Wait for the collections to come up
self.state_description = "Waiting for running instances."
await gen.multi([x.ec2_collection.wait_for_running()
for x in self._set_links])
# Setup docker on the collections
docker = self.helpers.docker
await gen.multi([docker.setup_collection(x.ec2_collection)
for x in self._set_links])
# Wait for docker on all the collections to come up
self.state_description = "Waiting for docker"
await gen.multi([docker.wait(x.ec2_collection, timeout=360)
for x in self._set_links])
# Pull the base containers we need (for heka)
self.state_description = "Pulling base container images"
for container in self.base_containers:
logger.debug("Pulling base container " + container.name)
await gen.multi(
[docker.load_containers(x.ec2_collection, container.name,
container.url)
for x in self._set_links])
logger.debug("Pulling containers for this step.")
# Pull the appropriate containers for every collection
self.state_description = "Pulling step images"
await gen.multi(
[docker.load_containers(x.ec2_collection, x.step.container_name,
x.step.container_url)
for x in self._set_links])
self.state_description = ""
self.run.state = RUNNING
self.run.started_at = datetime.utcnow()
self._db_session.commit()
log_threadid("Now running.")
async def _shutdown(self):
# If we aren't terminating, we shouldn't have been called
if self.state != TERMINATING:
return
# Tell all the collections to shutdown
await gen.multi([self._stop_step(s) for s in self._set_links])
self.run.state = COMPLETED
self.run.aborted = self.abort
self._db_session.commit()
async def _cleanup(self, exc=False):
if exc:
# Ensure we try and shut them down
logger.debug("Exception occurred, ensure containers terminated.",
exc_info=True)
try:
await gen.multi([self._stop_step(s) for s in self._set_links])
except Exception:
logger.error("Le sigh, error shutting down instances.",
exc_info=True)
# Ensure we always release the collections we used
logger.debug("Returning collections")
try:
await gen.multi([self._pool.release_instances(x.ec2_collection)
for x in self._set_links])
except Exception:
logger.error("Embarassing, error returning instances.",
exc_info=True)
self._set_links = []
async def _run(self):
# Skip if we're not running
if self.state != RUNNING:
return
# Main run loop
while True:
if self.abort:
logger.debug("Aborted, exiting run loop.")
break
stop = await self._check_steps()
if stop:
break
# Now we sleep for a bit
await gen.Task(self._loop.add_timeout, time.time() +
self.sleep_time)
# We're done running, time to terminate
self.run.state = TERMINATING
self.run.completed_at = datetime.utcnow()
self._db_session.commit()
async def _check_steps(self):
"""Checks steps for the plan to see if any existing steps
have finished, or new ones need to start.
When all the steps have run and completed, returns False
to indicate nothing remains for the plan.
"""
# Bools of collections that were started/finished
started = [x.ec2_collection.started for x in self._set_links]
finished = [x.ec2_collection.finished for x in self._set_links]
# If all steps were started and finished, the run is complete.
if all(started) and all(finished):
return True
# Locate all steps that have completed
dones = await gen.multi([self._is_done(x) for x in self._set_links])
dones = zip(dones, self._set_links)
# Send shutdown to steps that have completed, we can shut them all
# down in any order so we run in parallel
async def shutdown(setlink):
try:
await self._stop_step(setlink)
except:
logger.error("Exception in shutdown.", exc_info=True)
setlink.step_record.completed_at = datetime.utcnow()
self._db_session.commit()
await gen.multi([shutdown(s) for done, s in dones if done])
# Start steps that should be started, ordered by delay
starts = list(filter(self._should_start, self._set_links))
starts.sort(key=lambda x: x.step.run_delay)
# Start steps in order of lowest delay first, to ensure that steps
# started afterwards can use DNS names/etc from prior steps
for setlink in starts:
# We tag the collection here since this may not actually run
# until another time through this loop due to async nature
setlink.ec2_collection.local_dns = bool(self._dns_map)
try:
await self._start_step(setlink)
except:
logger.error("Exception starting.", exc_info=True)
setlink.step_record.failed = True
setlink.step_record.started_at = datetime.utcnow()
self._db_session.commit()
# If this collection reg's a dns name, add this collections
# ip's to the name
if setlink.step.dns_name:
ips = [x.instance.ip_address for x
in setlink.ec2_collection.instances]
self._dns_map[setlink.step.dns_name] = ips
return False
async def _start_step(self, setlink):
setlink.ec2_collection.started = True
# Reload sysctl because coreos doesn't reload this right
await self.helpers.ssh.reload_sysctl(setlink.ec2_collection)
# Start Watcher
await self.helpers.watcher.start(setlink.ec2_collection,
self.helpers.docker)
# Start heka
await self.helpers.heka.start(setlink.ec2_collection,
self.helpers.docker,
self.helpers.ping,
"db"+self.run.uuid.replace('-', ''),
series=setlink.step.docker_series)
# Startup local DNS if needed
if setlink.ec2_collection.local_dns:
logger.debug("Starting up DNS")
await self.helpers.dns.start(setlink.ec2_collection, self._dns_map)
# Startup the testers
env = self.run_env.copy()
env.update(setlink.step.environment_data)
env['CONTAINER_ID'] = setlink.step.uuid
logger.debug("Starting step: %s", setlink.ec2_collection.uuid)
await self.helpers.docker.run_containers(
setlink.ec2_collection,
setlink.step.container_name,
setlink.step.additional_command_args,
env=env,
ports=setlink.step.port_mapping or {},
volumes=setlink.step.volume_mapping or {},
delay=setlink.step.node_delay,
)
async def _stop_step(self, setlink):
# If we're already finished, don't shut things down twice
if setlink.ec2_collection.finished:
return
setlink.ec2_collection.finished = True
# Stop the docker testing agents
await self.helpers.docker.stop_containers(
setlink.ec2_collection, setlink.step.container_name)
# Stop heka
await self.helpers.heka.stop(setlink.ec2_collection,
self.helpers.docker)
# Stop watcher
await self.helpers.watcher.stop(setlink.ec2_collection,
self.helpers.docker)
# Stop dnsmasq
if setlink.ec2_collection.local_dns:
await self.helpers.dns.stop(setlink.ec2_collection)
# Remove anyone that failed to shutdown properly
gen.convert_yielded(setlink.ec2_collection.remove_dead_instances())
async def _is_done(self, setlink):
"""Given a StepRecordLink, determine if the collection has
finished or should be terminated."""
# If we haven't been started, we can't be done
if not setlink.step_record.started_at:
return False
# If we're already stopped, then we're obviously done
if setlink.ec2_collection.finished:
return True
# If the collection has no instances running the container, its done
docker = self.helpers.docker
container_name = setlink.step.container_name
instances_running = await docker.is_running(
setlink.ec2_collection,
container_name,
prune=setlink.step.prune_running
)
if not instances_running:
inst_info = []
for inst, info in self._instance_debug_info(setlink).items():
inst_info.append(inst)
inst_info.append(pformat(info))
logger.debug("No instances running, collection done.")
logger.debug("Instance information:\n%s", '\n'.join(inst_info))
return True
# Remove instances that stopped responding
await setlink.ec2_collection.remove_dead_instances()
# Otherwise return whether we should be stopped
return setlink.step_record.should_stop()
def _instance_debug_info(self, setlink):
"""Return a dict of information describing a link's instances"""
infos = {}
for ec2i in setlink.ec2_collection.instances:
infos[ec2i.instance.id] = info = dict(
aws_state=ec2i.instance.state,
broker_state=vars(ec2i.state),
step_started_at=setlink.step_record.started_at,
)
docker = getattr(ec2i.state, 'docker', None)
if not docker:
continue
try:
containers = docker.get_containers(all=True)
except Exception as exc:
ps = "get_containers failed: %r" % exc
else:
ps = []
for ctid, ct in containers.items():
try:
state = docker._client.inspect_container(ctid)['State']
except Exception as exc:
state = "inspect_container failed: %r" % exc
ct['State'] = state
ps.append(ct)
info['docker_ps'] = ps
return infos
def _should_start(self, setlink):
"""Given a StepRecordLink, determine if the step should be started."""
return setlink.step_record.should_start()
| UTF-8 | Python | false | false | 25,543 | py | 6 | broker.py | 5 | 0.573699 | 0.57092 | 0 | 758 | 32.697889 | 79 |
zig-zagreus/async-ev-cnn | 4,526,895,558,681 | 0199b10834b1c3166e3e08fe63db6f400af918ea | e8c7a56449038a5c8f12b974cc99a80257cbd34e | /src/libs/runner.py | 2b23cf7128b9d98d8e45924efe40e27b30f46a8d | [
"MIT"
] | permissive | https://github.com/zig-zagreus/async-ev-cnn | f13ee2852191ebfb522a10fae93d7bef156b9452 | 1e844ab93b70cdf39d3f45315cafe095363ba9b8 | refs/heads/main | 2023-06-07T17:17:12.433009 | 2021-03-02T11:46:07 | 2021-03-02T11:46:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import time
import numpy as np
from functools import partial
from src.libs.utils import center_crop
from src.libs.viz import draw_bboxes, integrate_frame
class Runner:
def __init__(self, args, reader, profile_integration=False):
self.args = args
self.reader = reader
self.num_classes = self.reader.num_classes()
self.profile_integration = profile_integration
label_k = np.array(list(reader.label_to_idx().keys()))
label_v = np.array(list(reader.label_to_idx().values()))
self.idx_to_label = label_k[np.argsort(label_v)]
@staticmethod
def data_transform(l, x, y, ts, p, bboxes, args):
ts = ts - ts[0]
if args.frame_h != args.example_h or \
args.frame_w != args.example_w:
l, x, y, ts, p, bboxes = center_crop(l, x, y, ts, p, bboxes,
(args.example_h, args.example_w),
(args.frame_h, args.frame_w))
events = np.stack([y, x, ts], axis=-1)
return l, events
def show_frames(self, net_out, frames, *args, **kwargs):
drawn_frames = draw_bboxes(net_out, frames, self.args.yolo_num_cells_h, self.args.yolo_num_cells_w,
self.num_classes, idx_to_label=self.idx_to_label, conf_threshold=0.1,
nms_threshold=0., use_nms=True,
max_thickness=1, highlight_top_n=2, resize_ratio=5)
for frame in drawn_frames:
cv2.imshow('Predictions', frame)
cv2.waitKey(self.args.frame_delay)
cv2.waitKey(1)
def feed_network(self, network, events, frames, reset_state, *args, **kwargs):
raise NotImplementedError()
def run(self, network, *args, **kwargs):
n = 0
ex_time = []
# Test loop: forward pass through all the test examples
for i in range(int(np.ceil(self.reader.test_size() / self.args.batch_size))):
start_read = time.time()
_, events = self.reader.next_batch(self.args.batch_size, dataset='test',
preprocessing_fn=partial(self.data_transform, args=self.args),
concat_features=False,
threads=self.args.reader_threads)
end_reading = time.time()
loop_state = None
reset_state = True
if self.args.batch_event_usec is not None:
bins = np.arange(0, events[:, -1][-1], self.args.batch_event_usec)
bin_ids = np.digitize(events[:, -1], bins)
split_indices = np.where(bin_ids[:-1] != bin_ids[1:])[0] + 1
event_batches = np.array_split(events, indices_or_sections=split_indices, axis=0)
else:
num_event_batches = int(np.ceil(events.shape[0] / self.args.batch_event_size))
event_batches = np.array_split(events, indices_or_sections=num_event_batches, axis=0)
for events_batch in event_batches:
# Reconstruct frame
# =================
if self.profile_integration:
start_fw = time.time()
frames, prev_ts = integrate_frame(events_batch, self.args.leak,
self.args.frame_h, self.args.frame_w,
loop_state)
loop_state = [frames, prev_ts]
if not self.profile_integration:
start_fw = time.time()
# Network forward step
# ====================
net_out = self.feed_network(network, events, frames, reset_state, *args, **kwargs)
time_fw = time.time() - start_fw
ex_time.append(time_fw)
n += 1
print("Test batch {:<2} - sec/example: {:.3f} reading: {:.3f} sec"
"".format(i + 1, time_fw, end_reading - start_read))
if n % 1000 == 0:
print("Mean fw time ({} runs): {}".format(n, np.mean(ex_time)))
# Show frames
# ====================
self.show_frames(net_out, frames, loop_state)
reset_state = False
cv2.destroyAllWindows()
class TfFrameRunner(Runner):
def __init__(self, args, reader):
super().__init__(args, reader, profile_integration=True)
def feed_network(self, network, events, frames, reset_state, sess, placeholder, *args, **kwargs):
return sess.run(network, feed_dict={placeholder: frames})
class NumpyFrameRunner(Runner):
def __init__(self, args, reader):
super().__init__(args, reader, profile_integration=True)
def feed_network(self, network, events, frames, reset_state, *args, **kwargs):
return network(frames)
class NumpyEventRunner(Runner):
def __init__(self, args, reader):
super().__init__(args, reader, profile_integration=False)
def feed_network(self, network, events, frames, reset_state, *args, **kwargs):
return network(events, reset_state)
| UTF-8 | Python | false | false | 5,266 | py | 24 | runner.py | 20 | 0.528295 | 0.521458 | 0 | 127 | 40.456693 | 109 |
martinodonnell/LowResImageRecognision | 15,461,882,270,028 | 95205feec07c5c9ae39e92b4246d1c4c479d536e | e579eeb07f7b296898e673318445edd5bc3f7a9d | /unit_tests/datasets/stanford/test_stanford_datasets.py | 1cd9730bdb04d15e3055ab9071b40eb713bff7f5 | [] | no_license | https://github.com/martinodonnell/LowResImageRecognision | d0e86a90d635bb2ad6433cbd09abb26b5c728723 | a69dfc60f2859a16b0ddea88ae9ce696e19045f4 | refs/heads/master | 2023-03-01T19:47:14.666650 | 2021-02-13T10:07:26 | 2021-02-13T10:07:26 | 232,346,151 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torchvision
import sys
import os
if ('..' not in sys.path) : sys.path.append("..")
from config import STANFORD_CARS_TRAIN,STANFORD_CARS_TEST,STANFORD_CARS_TRAIN_ANNOS,STANFORD_CARS_TEST_ANNOS
from datasets.stanford.stanford_datasets import StanfordCarsDatasetV1,StanfordCarsDatasetV2,StanfordCarsDatasetV3
from torchvision import transforms
import torch
def setup_module(module):
""" setup any state specific to the execution of the given module."""
if os.getcwd().split('/')[-1].lower() != 'lowresimagerecognision' : os.chdir("..")
print('after',os.getcwd())
def test_check_stanford_v1_dataset_split_low_version():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV1(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),True)
test_dataset = StanfordCarsDatasetV1(STANFORD_CARS_TEST, STANFORD_CARS_TEST_ANNOS, basic_transform, (224,224),True)
assert len(train_dataset) ==8144
assert len(test_dataset) ==8041
def test_check_stanford_v1_dataset_split_high_version():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV1(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),False)
test_dataset = StanfordCarsDatasetV1(STANFORD_CARS_TEST, STANFORD_CARS_TEST_ANNOS, basic_transform, (224,224),False)
assert len(train_dataset) ==8144
assert len(test_dataset) ==8041
def test_stanford_v1_data():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV1(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),False)
#Get first output
sample = next(iter(train_dataset))
#Check dimensions of image
assert (sample[0].size()[0] == 3)
assert (sample[0].size()[1] == 224)
assert (sample[0].size()[2] == 224)
#Check datatype of target(int)
assert type(sample[1])== int
def test_check_stanford_v2_dataset_split_high_version():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV2(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),False)
test_dataset = StanfordCarsDatasetV2(STANFORD_CARS_TEST, STANFORD_CARS_TEST_ANNOS, basic_transform, (224,224),False)
assert len(train_dataset) ==8144
assert len(test_dataset) ==8041
def test_check_stanford_v2_dataset_split_low_version():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV2(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),True)
test_dataset = StanfordCarsDatasetV2(STANFORD_CARS_TEST, STANFORD_CARS_TEST_ANNOS, basic_transform, (224,224),True)
assert len(train_dataset) ==8144
assert len(test_dataset) ==8041
def test_stanford_v2_data():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV2(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),False)
#Get first output
sample = next(iter(train_dataset))
#Check dimensions of image
assert (sample[0].size()[0] == 3)
assert (sample[0].size()[1] == 224)
assert (sample[0].size()[2] == 224)
#Check datatype of target(int)
assert type(sample[1])== int
assert type(sample[2])== int
def test_check_stanford_v3_dataset_split():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV3(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),False)
test_dataset = StanfordCarsDatasetV3(STANFORD_CARS_TEST, STANFORD_CARS_TEST_ANNOS, basic_transform, (224,224),False)
assert len(train_dataset) ==8144*2
assert len(test_dataset) ==8041*2
def test_stanford_v3_data():
basic_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
]
)
train_dataset = StanfordCarsDatasetV3(STANFORD_CARS_TRAIN, STANFORD_CARS_TRAIN_ANNOS, basic_transform, (224,224),False)
#Get first output
sample = next(iter(train_dataset))
#Check dimensions of image
assert (sample[0].size()[0] == 3)
assert (sample[0].size()[1] == 224)
assert (sample[0].size()[2] == 224)
#Check datatype of target(int)
assert type(sample[1])== int
| UTF-8 | Python | false | false | 6,229 | py | 60 | test_stanford_datasets.py | 40 | 0.663349 | 0.563814 | 0 | 176 | 34.346591 | 123 |
DanielAndrews43/Tic-Tac-Toe | 10,608,569,258,432 | 28ddcf5a4a6c07b1c1fafe8fe6d103315173cd80 | 7ddf5ae570ef384422e10480622cadf938082ca5 | /tic-tac-toe.py | 1e34c5a6ed5822065f285f515cd4c0f7346a7eb1 | [] | no_license | https://github.com/DanielAndrews43/Tic-Tac-Toe | 3d99e2170d371ebe0258dbbaab15bf7ed7b00f42 | 40713c2da6683a9ff0af594ea5cbb800e8254b77 | refs/heads/master | 2021-01-16T00:12:10.262135 | 2013-11-27T18:42:27 | 2013-11-27T18:42:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Tic-Tac-Toe
''' Example boards
-----------
| x | x | x |
|--- --- ---|
| x | x | x |
|--- --- ---|
| x | x | x |
-----------
x | x | x
--- --- ---
x | x | x
--- --- ---
x | x | x
'''
def intro(keyword):
'''
starts the game
returns output based on input
starts game then not talked to again
'''
keyword.lower()
if keyword == 'beginning':
print 'Welcome to Tic-Tac-Toe!'
print 'Type \'start\' to start, \'help\' for help, or \'rules\' for the game rules'
elif keyword == 'help':
print 'follow the directions on the screen!'
elif keyword == 'rules':
print 'The goal of the game is to get three of your letter in a row, Xs or Os'
print 'Write in the two letter combination of any untaken square, such as tm for top middle,'
print 'and br for bottom right. (b=bottom,m=middle,t=top,l=left,r=right) Firs to three in a row wins!'
elif keyword == 'start' or keyword == 'start game' or keyword == 'startgame':
print 'Starting game now!'
return main()
else:
print 'That input is not understood.'
print 'Type \'start\' to start, \'help\' for help, or \'rules\' for the game rules'
userKeyword = raw_input('Where would you like to go?')
return intro(userKeyword)
def board(moves):
'''
returns game board using moves dict
'''
print (' ' + str(moves.get('tl')) + ' | ' + str(moves.get('tm')) + ' | ' + str(moves.get('tr')))
print '--- --- ---'
print (' ' + str(moves.get('ml')) + ' | ' + str(moves.get('mm')) + ' | ' + str(moves.get('mr')))
print '--- --- ---'
print (' ' + str(moves.get('bl')) + ' | ' + str(moves.get('bm')) + ' | ' + str(moves.get('br')))
return
def winner(who):
print ('Winner is: ' + str(who)) * 5
def checkIfWin(turn,moves):
'''
checks for three in a row
'''
player = ''
if moves != turn%2:
player = 'X'
else:
player = 'O'
print('Checking for a win...')
if moves.get('tr') == moves.get('tm') == moves.get('tl') == player:
return True
elif moves.get('mr') == moves.get('mm') == moves.get('ml') == player:
return True
elif moves.get('br') == moves.get('bm') == moves.get('bl') == player:
return True
elif moves.get('tr') == moves.get('mr') == moves.get('br') == player:
return True
elif moves.get('bm') == moves.get('mm') == moves.get('tm') == player:
return True
elif moves.get('bl') == moves.get('ml') == moves.get('tl') == player:
return True
elif moves.get('br') == moves.get('mm') == moves.get('tl') == player:
return True
elif moves.get('bl') == moves.get('mm') == moves.get('tr') == player:
return True
else:
return False
def main():
'''
runs the game
'''
turn = 1
remainingSquares = ['tr','tm','tl','mr','mm','ml','br','bm','bl']
moves = {'tr':' ','tm':' ','tl':' ','mr':' ','mm':' ','ml':' ','br':' ','bl':' ','bm':' '} #holds moves
gameOver = False
while gameOver == False: #Runs until game is over :)
print
print
print
print ('It is turn #' + str(turn))
print
board(moves)
print
print 'Available moves: ' + str(remainingSquares)
if turn%2 != 0: #Player 1's turn
player = 'player one'
else:
player = 'player two'
print 'It is %s\'s turn. Please pick where you would like to go' % (player)
newMove = raw_input('Please enter your next move: ')
if newMove in remainingSquares:
turn += 1
if player == 'player one':
moves.update({newMove:'X'})
else:
moves.update({newMove:'O'})
remainingSquares.remove(newMove) #Old move can't be chosen anymore
if checkIfWin(turn,moves):
gameOver = True
winner(player)
continue #Restarts the game
else:
print 'Next Turn!'
else:
print str(newMove) + ' is not a valid move! Please enter a vaild move!'
print remainingSquares
if turn == 10: #10 turns means all squares are filled with no winner
print 'Cat\'s Game! Game Over!'
gameOver = True
continue #Restarts the game
print
print
print
print 'Starting new game...'
return main() #Recursion? :P
intro('beginning') #starts the game | UTF-8 | Python | false | false | 4,590 | py | 2 | tic-tac-toe.py | 1 | 0.509368 | 0.506972 | 0 | 148 | 30.02027 | 110 |
ralphbean/tos-rit-projects-seminar | 10,797,547,828,074 | 5135916a4b38642dee806cc82e9fb0fd3359a1a3 | a8b75179b0df027c504cddc9757b5ddd79415be2 | /lib/ritfloss/rubric_stats/stats.py | 390a620c264ec479fb8965ff8fa850e4723cc6a0 | [] | no_license | https://github.com/ralphbean/tos-rit-projects-seminar | 1da96e2a7ece7bf562f510510220c98a7ff564e7 | 407f8b95d66e49b35ea549282f6ae5790e488bc2 | refs/heads/master | 2021-01-01T17:09:27.904662 | 2018-10-08T13:15:09 | 2018-10-08T13:15:09 | 2,473,022 | 1 | 16 | null | false | 2018-10-08T13:15:10 | 2011-09-28T05:34:14 | 2016-10-11T13:48:55 | 2018-10-08T13:15:10 | 20,999 | 17 | 16 | 0 | Python | false | null |
from pbs import grep, git
import pprint
def get_authors():
lines = grep(git("log"), "Author").split("\n")
names = [' '.join(line.split()[1:-1]) for line in lines]
garbage = ['', 'unknown', 'Ralph Bean']
return [name for name in set(names) if name not in garbage]
def adjust_impact(impact):
duplicates = {
"posiden": "Ross Delinger",
"kaeedo": "Kai Ito",
"Rabenvald": "Ben Boozer",
"Remy D": "Remy DeCausemaker",
"Phil Moccio": "Philip Moccio",
}
for old, new in duplicates.iteritems():
impact[new] = impact[new] + impact[old]
del impact[old]
return impact
def _get_impact_per(author):
first = "c71f302cc5cbc9533b43fd076b76d006d08b6d30"
last = "01cbd7a77cf3f6cdd4b238c475513252d5bc057b"
opts = "--author='%s' --oneline --numstat --pretty=format:" % author
span = "%s..%s" % (first, last)
lines = git("log %s %s" % (opts, span)).split("\n")
lines = [line.strip() for line in lines if line.strip()]
total = 0
for line in lines:
try:
total += sum(map(int, line.split()[:2]))
except ValueError:
print "(skipping)", line
return total
def get_impact(authors):
raw_impact = dict(zip(authors, map(_get_impact_per, authors)))
impact = adjust_impact(raw_impact)
return impact
def get_grades(impact):
_min, _max = min(impact.values()), max(impact.values())
for key in impact.keys():
impact[key] = float(impact[key] - _min) / (_max - _min)
impact[key] = 75 + impact[key] * 25
return impact
def main():
authors = get_authors()
print "Found %i unique authors." % len(authors)
impact = get_impact(authors)
print
print "Determined the following 'impact' per author:"
for key, value in impact.iteritems():
print "", key, value
grades = get_grades(impact)
print
print "Got the following grades per student:"
for key, value in grades.iteritems():
print "", key, value
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,063 | py | 22 | stats.py | 6 | 0.595734 | 0.568105 | 0 | 74 | 26.864865 | 72 |
dmitriySSAU/TestRunner | 5,016,521,834,381 | 60512fcdbbad5e3e24d2928cfa0ebe88f9ef8b13 | b7c89caa1d11320265f7d19aa93a652103c40a13 | /scripts/tools/ptz.py | 9af217b9b5e4211c30d02456f04ad9a27ce5a5e6 | [] | no_license | https://github.com/dmitriySSAU/TestRunner | 9a6f9d3516b1f8a9ca047aa798308184ed639f52 | 51dd5ad499adf15efedf37d902214ec2504a637d | refs/heads/master | 2021-05-19T12:50:39.065949 | 2020-09-01T06:30:54 | 2020-09-01T06:30:54 | 251,703,280 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from lib.log_and_statistic import log
from lib.log_and_statistic.statistic import Statistic
from lib.client.soapClient import SoapClient
from scripts.ws import ptz as ws
from scripts.common import tools
def compare_coordinates(first_coordinate: int, second_coordinate: int, inaccuracy: int = 0) -> bool:
"""Функция сравнения двух координат.
:param first_coordinate: первая координата;
:param second_coordinate: вторая координата;
:param inaccuracy: погершность равенства координат.
:return: True - равны; False - различны.
"""
if first_coordinate == second_coordinate:
return True
else:
if inaccuracy == 0:
return False
if first_coordinate > second_coordinate:
for inaccuracy_ in range(1, inaccuracy + 1):
second_coordinate += 1
if first_coordinate == second_coordinate:
return True
else:
for inaccuracy_ in range(1, inaccuracy + 1):
first_coordinate += 1
if first_coordinate == second_coordinate:
return True
return False
def get_coordinates(client: SoapClient, login: str, password: str, key2: str, statistic: Statistic) -> dict:
"""Функция для получения координат (использует ws метод ptzclient:Command).
:param client: объект soap клиента;
:param login: логин пользователя;
:param password: пароль пользователя;
:param key2: имя камеры;
:param statistic: объект класса Statistic.
:return: словарь с координатами (ключи pan и tilt).
"""
logger = statistic.get_log().get_logger("scripts/tools/ptz")
logger.info("was called (client: SoapClient, login: str, password: str, key2: str)")
logger.debug("with params (client_obj, " + login + ", " + password + ", " + key2 + ")")
logger.info("call ptzclient_command_simple()")
statistic.append_info("получение старых координат...", "ИНФО")
logger.info("getting old coordinates")
query_all_result = ws.ptzclient_command_simple(client, key2, login, password, False, 0, 0, 0, 0, 0, 0, True, 0, 0,
-1, -1, False)
tools.check_types(["old_coordinates['result'][0]"], [query_all_result["result"][0]], [dict], statistic)
is_old_coordinates = tools.check_keys_exist(query_all_result["result"][0], ["pan", "tilt", "timecoords"],
'old_coordinates["result"][0]', False, statistic)
old_timecoords = 0
if is_old_coordinates:
old_pan = query_all_result["result"][0]["pan"]
old_tilt = query_all_result["result"][0]["tilt"]
old_timecoords = query_all_result["result"][0]["timecoords"]
logger.debug("old_pan: " + str(old_pan) + ", old_tilt: " + str(old_tilt) + ", old_timecoords: "
+ str(old_timecoords))
count_coordinates_missing = 0
while True:
logger.info("call ptzclient_command_simple()")
statistic.append_info("получение текуших координат...", "ИНФО")
logger.info("getting current coordinates")
current_coordinates = ws.ptzclient_command_simple(client, key2, login, password, False, 0, 0, 0, 0, 0, 0, True,
0, 0, -1, -1, False)
tools.check_types(["current_coordinates['result'][0]"], [current_coordinates["result"][0]], [dict], statistic)
is_current_coordinates = tools.check_keys_exist(current_coordinates["result"][0], ["pan", "tilt"],
'current_coordinates["result"][0]', False, statistic)
if is_current_coordinates is False:
count_coordinates_missing += 1
if count_coordinates_missing == 3:
statistic.append_error("Получение координат завершилось с ошибкой!", "НЕТ_КООРДИНАТ", False)
break
continue
current_timecoords = current_coordinates["result"][0]["timecoords"]
if is_old_coordinates and current_timecoords != old_timecoords or is_old_coordinates is False:
current_pan = current_coordinates["result"][0]["pan"]
current_tilt = current_coordinates["result"][0]["tilt"]
logger.debug("current_pan: " + str(current_pan) + ", current_tilt: " + str(current_tilt))
return {
"pan": current_pan,
"tilt": current_tilt
}
def turn(client: SoapClient, login: str, password: str, key1: str, key2: str, key3: str, cmd: str,
statistic: Statistic) -> bool:
"""Функция для поворота камеры в нужную сторону (right, left, up, down) с использованием нужного ws метода.
либо ptzserver:command (key1 и key3 != ""), либо ptzclient:command (key1 и key3 == "").
:param client: объект soap клиентя
:param login: логин пользователя
:param password: пароль пользователя
:param key1: имя сервера. Если нужно осуществить поворот с помощью ws метода ptzserver:command, то key1 должен
быть обязательно заполнен (!= "")
:param key2: имя камеры
:param key3: профиль камеры. Если нужно осуществить поворот с помощью ws метода ptzserver:command, то key3 должен
быть обязательно заполнен (!= "")
:param cmd: направление поворота - right, left, up, down
:param statistic: объект класса Statistic
:return: флаг успешности поворота
"""
logger = statistic.get_log().get_logger("scripts/tools/ptz")
logger.info("was called (client: SoapClient, login: str, password: str, key1: str, key2: str, key3: str, cmd: str)")
logger.debug("with params (client_obj, " + login + ", " + password + ", " + key1 + ", " + key2 + ", " + key3 +
", " + cmd + ")")
left = 0
right = 0
up = 0
down = 0
if cmd == "left":
left = 70
elif cmd == "right":
right = 70
elif cmd == "up":
up = 70
elif cmd == "down":
down = 70
coordinates = get_coordinates(client, login, password, key2, statistic)
if cmd == "up" or cmd == "down":
current_coordinate = coordinates["tilt"]
else:
current_coordinate = coordinates["pan"]
statistic.append_info("поворот " + cmd + "...", "ИНФО")
logger.info("turning " + cmd)
if key1 == "" and key3 == "":
ws.ptzclient_command_simple(client, key2, login, password, False, left, right, up, down, 0, 0, False, 0, 0, -1,
-1, False)
statistic.append_info("ws 'ptzclient:Command[" + cmd + "=70]' отправлен...", "ПОВОРОТ")
logger.info("ws method 'ptzclient:Command[" + cmd + "=70]' was sent")
ws.ptzclient_command_simple(client, key2, login, password,
True, 0, 0, 0, 0, 0, 0, False, 0, 0, -1, -1, False) # отправка остановки
statistic.append_info("ws 'ptzclient:Command[" + cmd + "=0]' отправлен...", "ОСТАНОВКА")
logger.info("ws method 'ptzclient:Command[" + cmd + "=0]' was sent")
old_coordinate = current_coordinate
coordinates = get_coordinates(client, login, password, key2, statistic)
if cmd == "up" or cmd == "down":
current_coordinate = coordinates["tilt"]
else:
current_coordinate = coordinates["pan"]
if compare_coordinates(old_coordinate, current_coordinate) is False:
statistic.append_info("Поворот " + cmd + " успешно выполнен!", "ПОВОРОТ")
logger.info("turning " + cmd + " was executed successfully!")
return True
else:
return False
def go_to_coordinate(client: SoapClient, login: str, password: str, key2: str, ws_method: str,
cmd: str, coordinate: int, statistic: Statistic, inaccuracy: int = 0) -> bool:
"""Функция перевода камеры в указанные координаты.
:param client: объект soap клиентя
:param login: логин пользователя
:param password: пароль пользователя
:param key2: имя камеры
:param ws_method: через какой метод выполнять команду (ptzserver или ptzclient)
:param cmd: команда
:param coordinate: координаты
:param statistic: объект класса Statistic
:param inaccuracy точность
:return: флаг успешности перехода
"""
logger = statistic.get_log().get_logger("scripts/tools/ptz")
logger.info("was called (client: SoapClient, login: str, password: str, key2: str, \
cmd: str, coordinate: int, inaccuracy: int = 0)")
logger.debug("with params (client_obj, " + login + ", " + password + ", " + key2 + ", " +
cmd + ", " + str(coordinate) + ", " + str(inaccuracy) + ")")
pan_to = -1
tilt_to = -1
if cmd == "PanTo":
pan_to = coordinate
elif cmd == "TiltTo":
tilt_to = coordinate
else:
statistic.append_error(cmd, "НЕВАЛИД_КОМАНДА_PTZ", True)
if ws_method == "ptzclient":
ws.ptzclient_command_simple(client, key2, login, password, False, 0, 0, 0, 0, 0, 0, False, 0, 0, pan_to,
tilt_to, False)
message = "ws 'ptzclient:Command[" + cmd + "=" + str(coordinate) + "]' отправлен..."
elif ws_method == "ptzserver":
ws.ptzserver_command_simple(client, key2, login, password, False, 0, 0, 0, 0, 0, 0, False, 0, 0,
pan_to, tilt_to, False)
message = "ws 'ptzserver:Command[" + cmd + "=" + str(coordinate) + "]' отправлен..."
else:
statistic.append_error(ws_method, "НЕВАЛИД_МЕТОД_PTZ", True)
statistic.append_info(message, "ПЕРЕХОД В КООРДИНАТЫ")
logger.info(message)
time.sleep(1)
coordinates = get_coordinates(client, login, password, key2, statistic)
if cmd == "TiltTo":
current_coordinate = coordinates["tilt"]
else:
current_coordinate = coordinates["pan"]
if compare_coordinates(coordinate, current_coordinate, inaccuracy):
statistic.append_info(cmd + " " + str(coordinate) + " выполнена успешно!", "УСПЕХ")
logger.info(cmd + " " + str(coordinate) + " was executed successfully!")
return True
else:
return False
| UTF-8 | Python | false | false | 11,146 | py | 34 | ptz.py | 29 | 0.604206 | 0.590517 | 0 | 227 | 43.409692 | 120 |
ucandoitrohit/Python3 | 4,758,823,778,311 | 046b76826d33bd4b495a5c38fd58cd40dc525bb5 | c21df9427b4573fbb36e923543b61eeb84e2d1e6 | /Python_Basic_script/Python_Basic/16.file-io.basic.py | 54696f31f6504848c409b916f22b6bb3d6fe5601 | [] | no_license | https://github.com/ucandoitrohit/Python3 | db56d60604752b6eb3c9037432df9dfc7fe6ed19 | 2e93a9cf352394e04bffda93065df15cb85508de | refs/heads/master | 2023-07-22T02:11:26.557496 | 2023-07-11T04:38:22 | 2023-07-11T04:38:22 | 175,063,714 | 0 | 1 | null | false | 2022-12-17T03:33:55 | 2019-03-11T18:47:43 | 2022-12-17T03:33:24 | 2022-12-17T03:33:53 | 713 | 0 | 1 | 0 | PowerShell | false | false | f = open("rohit.txt")
content = f.read()
print(content)
f.close()
f = open("rohit.txt","r")
content = f.read()
print(content)
f.close()
f = open("rohit.txt","br")
content = f.read()
print(content)
f.close()
f = open("rohit.txt","tr")
content = f.read(3)
print(content)
content = f.read(3)
print(content)
f.close()
f = open("rohit.txt","rt")
for i in f:
print(i, end="")
f.close()
f = open("rohit.txt","rt")
print(f.readline())
f = open("rohit.txt","rt")
print(f.readlines())
| UTF-8 | Python | false | false | 491 | py | 570 | 16.file-io.basic.py | 282 | 0.608961 | 0.604888 | 0 | 35 | 12.942857 | 26 |
anirban89/My-Masters_Code | 9,148,280,372,297 | b94cb2e61d9ff7c5716442ce855cd8942bde85a4 | 6a16116f96e1c26c107bef76ed9e5c9c78aafcc0 | /1d/testEBeta.py | 47416e7246113d0eb74f9111ff2509c38dc71806 | [] | no_license | https://github.com/anirban89/My-Masters_Code | f0ac7eb5fa900d7794ce1dfa1fe9e5a87990d4c6 | f16c49ea31a9a9215699e308aba181cd9307f975 | refs/heads/master | 2021-01-15T13:36:43.751511 | 2014-08-26T02:16:50 | 2014-08-26T02:16:50 | 23,330,647 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from finiteDifference import *;
from integrator import *;
from boundary import *;
from pylab import *;
import scipy as sp;
import pylab as m;
import sys;
resume = 0;
path = '/home/joy/NewEquatorialBeta/';
if(len(sys.argv) > 1):
print "Resuming";
resume = int(sys.argv[1]);
N = 128;
g = 10;
H = 250;
beta = 10**(-10);
Ly = 8*sqrt(sqrt(g*H)/beta);
Lx = 4*(10**7);
hy = Ly/N;
hx = Lx/(2*N);
y = zeros(N);
y1 = zeros(N);
epsilon = zeros(N);
for i in arange(N):
y1[i] = (i-N/2+1)*hy;
if(i < N/4 or i > 3*N/4):
epsilon[i] = .001;
u = zeros((2*N,N));
v = zeros((2*N,N));
n = zeros((2*N,N));
x = linspace(0,Lx,2*N)
y = linspace(0,Ly,N)
Q = zeros((2*N,N));
for j in arange(N):
for i in arange(2*N):
#Q[i][j] = -0.1*exp(-(pow(y[j]-4*pi,2)+pow(x[i]-4*pi,2))/(4*pi));
#Q[i][j] = \
# -3*(10**-5)*exp(-(pow(y[j]-(Ly/2),2)+pow(x[i]-(Lx/4),2))/(2000*Lx));
Q[i][j] = \
-3*(10**-5)*exp(-(pow(y[j]-(Ly/2),2)+pow(x[i]-(Lx/4),2))/(2000*Lx));
#n = Q;
#u = -Q;
#imshow(Q);
#colorbar();
#show();
#for i in arange(N):
# for j in arange(N):
# n[i,j] = 0.1*exp(-(pow(i-N/2,2)+pow(j-N/2,2))/100);
omega = (u,v,n);
#for i in arange(20,80):
# for j in arange(20,80):
# omega[i,j] = 1000*rand();
# omega[N-i,j] = value;
# omega[i,N-j] = value;
d = 0.0005;
def dudt(dt, omega, args):
"""
(psi_xx + psi_yy - psi) = omega. Invert, calculate u = -psi_y, v = psi_x
omega_t = -(u*omega_x + v*omega_y + v + delta*psi)
"""
(u,v,n) = omega;
(bc, Qn) = args;
vy = zeros((2*N,N));
for i in arange(N):
vy[:,i] = v[:,i]*y1[i];
pressure = g*boydPartialX(n,Lx);
print 'Rotation ', average(abs(beta*vy));
# print 'V ', average(abs(v));
print 'Pressure ',average(abs(pressure));
#u_n = real(-(boydPartialX(n,16*pi) + epsilon*u) + vy/(4*pi));
# u_n = real(-( v*boydPartialY(u,bc,8*pi) + d*u/epsilon) + vy);
#u_n = real(-( v*boydPartialY(u,bc,8*pi) - d*u/epsilon) + vy);
u_n = real(-(pressure - beta*vy ));
return(u_n);
def dvdt(dt, omega, args):
"""
(psi_xx + psi_yy - psi) = omega. Invert, calculate u = -psi_y, v = psi_x
omega_t = -(u*omega_x + v*omega_y + v + delta*psi)
"""
(u,v,n) = omega;
(bc, Qn) = args;
uy = zeros((2*N,N));
epsV = zeros((2*N,N));
for i in arange(N):
uy[:,i] = u[:,i]*y1[i];
epsV[:,i] = v[:,i]*epsilon[i];
v_n = real(-(g*boydPartialY(n,bc,Ly) + beta*uy + d*v));
#v_n = real(-(g*boydPartialY(n,bc,Ly) + beta*uy + d*v));
return(v_n);
def dndt(dt, omega, args):
"""
(psi_xx + psi_yy - psi) = omega. Invert, calculate u = -psi_y, v = psi_x
omega_t = -(u*omega_x + v*omega_y + v + delta*psi)
"""
(u,v,n) = omega;
(bc, Qn) = args;
epsN = zeros((2*N,N));
for i in arange(N):
epsN[:,i] = n[:,i]*epsilon[i];
divergence = -H*(boydPartialY(v,bc,Ly) + boydPartialX(u,Lx));
#n_n = real(-(boydPartialX(u,16*pi) + boydPartialY(v, bc, 8*pi) + epsilon*n + Qn));
# n_n = real(-(boydPartialY(h*v,bc, 8*pi) + boydPartialY(v,bc,8*pi) + d*n/epsilon) + Q/epsilon);
#n_n = real(-(boydPartialY(h*v,bc, 8*pi) + boydPartialY(v,bc,8*pi) + d*n) + Q);
n_n = real( divergence + Qn);
return(n_n);
def dSystemdt(dt, omega, args):
u_n = dudt(dt, omega, args);
v_n = dvdt(dt, omega, args);
n_n = dndt(dt, omega, args);
return [u_n, v_n, n_n];
def diffusion_SWE(dt,vals,args):
return vals;
def calcMax_omega(omega, args):
(u,v,n) = (omega);
maximum = sqrt(amax((amax(u*u),amax(v*v))));
print 'Max velocity: ', maximum;
max_eta = amax(amax(abs(n)));
print 'Max eta: ', max_eta;
gravity_speed = sqrt(g*H);
print 'Gravity wave velocity: ', gravity_speed;
return 4*amax((maximum,gravity_speed));
def calcPower(field):
[x,y] = shape(field);
power = zeros(y);
for i in arange(x):
power = power + abs(fft(field[i,:]));
power = power/x;
return log(power);
stepper_omega = integrator(hx, [dSystemdt], [diffusion_SWE], calcMax_omega, dim=3);
ion();
prev_t = 0;
omega_n = (u,v,n);
#jet();
if(resume > 0):
print 'Resuming from step: ',resume;
eta = \
np.load(path+'/eta_data_'+str(resume)+'.npy');
u = np.load(path+'/uVel_data_'+str(resume)+'.npy');
v = \
np.load(path+'/vVel_data_'+str(resume)+'.npy');
omega_n = (u,v,eta);
sec = resume+1;
tn = resume;
ion();
figure(figsize=(18,6));
handle_n = imshow(n.transpose());
colorbar();
draw();
DT=0;
north = zeros((2*N,4));
south = zeros((2*N,4));
bc = boundary(yBcType='Dirichlet',bcs=(north,south));
numerator = zeros((2*N,N));
tn = 0;
nextUpdate = tn + 3600;
hour = 0;
while (True):
(u,v,n) = (omega_n);
'''ensure that psi (eta) does not cross the ranges under which shallow water
equations are valid'''
#n[n>eta_u] = eta_u;
#n[n<-eta_u] = -eta_u;
# print 'forcing strength: ',sum(delta);
(DT,omega_n) = \
stepper_omega.integrate(prev_t,omega_n,(bc, Q));
prev_t = tn;
print DT;
tn = tn + DT/2.0;
#DT = tn2 - prev_t;
#tn = tn + DT;
(u,v,n) = omega_n;
print 'Time elapsed: ',tn/3600.0, 'hours';
# print 'negative RH cells: ', negativeRHCount;
# print 'minimum vapor value: ', amin(amin(vapor));
# if(tn > 1):
# Q = zeros((N));
if(tn >= nextUpdate):
nextUpdate = tn + 3600;
hour = hour+1;
handle_n.set_array(n.transpose());
handle_n.autoscale();
print '--------------------------------------'
print 'Energy:', sum(u*u + v*v + 10*n*n);
print '--------------------------------------'
# handle2.set_array(omega_n.transpose());
# handle2.autoscale();
# c2.update_bruteforce(handle2);
savefig(path+'/fig'+str(hour)+'.png');
np.save(path+'/eta_data_'+str(hour)+'.npy',n);
np.save(path+'/uVel_data_'+str(hour)+'.npy',u);
np.save(path+'/vVel_data_'+str(hour)+'.npy',v);
draw();
ioff();
| UTF-8 | Python | false | false | 6,089 | py | 111 | testEBeta.py | 93 | 0.512235 | 0.486944 | 0 | 279 | 20.820789 | 99 |
changc42/Learning-Python | 1,211,180,828,279 | 918056a00a095e34beb834eabe8355d7038f414a | 451f1f3c5108761b127153a235c41078954183c1 | /test.py | 17a7700d4188f972747559c49d73e6ed36cbc97c | [] | no_license | https://github.com/changc42/Learning-Python | a63a760eea881a8bfec55aeaed4effee395ca0cd | 7a6baab6485fa57ed0c3b8dc7edd0771a892ef24 | refs/heads/master | 2020-09-05T12:21:59.331258 | 2019-11-07T15:25:53 | 2019-11-07T15:25:53 | 220,102,681 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | count = [0]
count[0]=9
def counter(n):
n[0]=5
counter(count)
print(count[0]) | UTF-8 | Python | false | false | 84 | py | 4 | test.py | 3 | 0.595238 | 0.52381 | 0 | 10 | 7.5 | 15 |
beky25/Social-Analytics- | 18,159,121,762,812 | b19070e5e4289d4c685ff61fe661f8f25be68176 | 1d265c94afbd100241def925a3a26ccde0cb8a08 | /social-Analytics /config1.py | f84aee303f62f522c5e605651a7e1d814d2e4872 | [] | no_license | https://github.com/beky25/Social-Analytics- | 5691ac4644bb7282cf00dd24a197a18ac349170b | f64dc729a6377b87a60e9bf98bc965e2fca402db | refs/heads/master | 2020-03-23T04:05:20.639141 | 2018-07-15T23:19:40 | 2018-07-15T23:19:40 | 141,064,815 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | consumer_key = '0gNvW3tTD4VNNs9F5BN8Y8aej'
consumer_secret = 'RDBzocYSmzymp98H340LDPVIDRVEgfRJvkDh69NFNufCSMBhmY'
access_token ='1013062013373820928-uC4zT1ds1pjOo8jIfVpuE4OHXySUb9'
access_token_secret ='Qe094f079V8UxmJACjgTo8bfVijU5wHXmrQPUZc1OBcyX' | UTF-8 | Python | false | false | 254 | py | 3 | config1.py | 1 | 0.874016 | 0.681102 | 0 | 9 | 27.333333 | 70 |
konkolorado/ensemble_santander | 6,811,818,131,881 | 5544634d1f611387e3052a3b7c1782ee97ac5a3d | 96ceb436e3fc419d98eb7f4d90b434a76cb19b2e | /pca.py | 36aa87f601bf546293e72c2e1fd73243201633e6 | [] | no_license | https://github.com/konkolorado/ensemble_santander | 65c423f1803ecd26c8bc28a3334ab8a90578d858 | 9961436462ab859e14982f60766761699370be8b | refs/heads/master | 2021-06-01T07:28:06.878183 | 2016-05-13T17:49:21 | 2016-05-13T17:49:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Uriel Mandujano
A program for performing Principal Component Analysis on the
training data provided by Santander competition on Kaggle
"""
from scipy import linalg
from sklearn import preprocessing
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.cross_validation import cross_val_score
import numpy as np
import cPickle as pk
import parser
import sys
import os
class PrincipalComponentAnalysis(object):
def __init__(self, preprocessing=""):
ps = parser.ParseSantander()
self.feature_labels = ps.getFeatureLabels()
self.users_data = ps.readFile(ps.training)
self.test_data = ps.readTest()
self.preprocessor = preprocessing
# n_components indicates the top n features we want to identify
self.n_components = np.arange(0, len(self.feature_labels), 5)
# better scores are most positive
self.scores = []
self.n_components_pca = 0
self.run()
def run(self):
"""
Performs the full principal component analysis task
"""
if self.loadData():
print "<Loading data>"
return
data = [x[0] for x in self.users_data.values()]
data = self.preprocess(data)
pca_scores = self.computeScores(data)
n_components_pca = self.n_components[np.argmax(pca_scores)]
print "pca_scores", pca_scores
print "n_componenets_pca", n_components_pca
self.writeData(pca_scores, n_components_pca)
def computeScores(self, X):
"""
Computes the scores for a given X feature vector considering various
numbers of features
"""
pca = PCA()
pca_scores = []
for n in self.n_components:
print "Computing score for", n, "components"
sys.stdout.flush()
pca.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
return pca_scores
def writeData(self, scores, n_components):
"""
Saves the scores for each feature dimension as an array as well as the
number of ideal components determined by PCA. Saved as a pickle file.
"""
save_location = "data/"
scores_file = save_location + self.preprocessor + "_scores.pk"
components_file = save_location + self.preprocessor + "_components.pk"
if not os.path.isdir(save_location):
os.makedirs(save_location)
with open(scores_file, "wb") as f:
pk.dump(scores, f)
f.close()
with open(components_file, "wb") as f:
pk.dump(n_components, f)
f.close()
def loadData(self):
"""
Loads pre-existing PCA data. Saves unnecessary computation time
"""
scores_file = "data/" + self.preprocessor + "_scores.pk"
components_file = "data/" + self.preprocessor + "_components.pk"
if (not os.path.exists(scores_file)) or \
(not os.path.exists(components_file)):
print "Attempted to load non-existant data. Will run PCA"
return False
self.scores = pk.load(open(scores_file, "rb"))
self.n_components_pca = pk.load(open(components_file, "rb"))
return True
def preprocess(self, X):
"""
Performs preprocessing on the data. If none is specified, no
preprocessing occurs.
"""
print "Preprocessing using", self.preprocessor
if self.preprocessor == "scale":
X = self.scale(X)
if self.preprocessor == "normalize":
X = self.normalize(X)
if self.preprocessor == "sparse":
X = self.sparse(X)
return X
def scale(self, X):
"""
This function standardizes the values of a feature. Experimental use
only. Final scores will determine whether or not this is a useful
preprocessing step
"""
return preprocessing.scale(X)
def normalize(self, X):
"""
This function normalizes the values for a feaure. Experimental use
only. Final scores will determine whether or not this is a useful
preprocessing step
"""
return preprocessing.normalize(X, norm='l2')
def sparse(self, X):
"""
This function performs a preprocessing on features which retains
the sparsity of features. A lot of the data is 0 which probably means
it's missing. Experimental use only.
"""
return preprocessing.maxabs_scale(X)
def main():
pca = PrincipalComponentAnalysis("scale")
print pca.n_components_pca
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 4,717 | py | 6 | pca.py | 3 | 0.603986 | 0.602714 | 0 | 151 | 30.238411 | 79 |
AlbeyAl/aDOTcalendar | 6,863,357,785,300 | 8ca5341cf4062e01e9b14df0f771bf6e76e17bc2 | f908dfc6fd4499e16ba03785b932ba54f1e263cd | /calendar_index/apps.py | 5b1d5a25c6b1ee33e056cf95bc12bba7ebbfe99a | [] | no_license | https://github.com/AlbeyAl/aDOTcalendar | d749d944128cce33e903d654e0247c06fcf01677 | 67662b50e687df605e891341f967a057d21a9110 | refs/heads/main | 2023-03-05T03:00:23.584485 | 2021-02-18T09:01:10 | 2021-02-18T09:01:10 | 322,116,990 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class CalendarIndexConfig(AppConfig):
name = 'calendar_index'
| UTF-8 | Python | false | false | 102 | py | 18 | apps.py | 13 | 0.77451 | 0.77451 | 0 | 5 | 19.4 | 37 |
TonyCrespoMe/python-limacharlie | 223,338,308,553 | 67d55e1116b326704dbd36d5c2afc452bcdec727 | b5fd72eab002bef11b0498ee12f981002017eb9e | /limacharlie/Logs.py | 22fe0dbce77e5dc85b112929a33f641b338380f9 | [
"Apache-2.0"
] | permissive | https://github.com/TonyCrespoMe/python-limacharlie | 59b428f722b9b879c43af33e229cd02a984aa25e | caae87c3ebd6b2961663ddcae0e7cb1d31cccc63 | refs/heads/master | 2022-04-09T11:10:14.327198 | 2020-03-11T17:03:49 | 2020-03-11T17:03:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from limacharlie import Manager
from .utils import LcApiException
from .utils import GET
# Detect if this is Python 2 or 3
import sys
_IS_PYTHON_2 = False
if sys.version_info[ 0 ] < 3:
_IS_PYTHON_2 = True
if _IS_PYTHON_2:
from urllib2 import HTTPError
from urllib2 import Request as URLRequest
from urllib2 import urlopen
else:
from urllib.error import HTTPError
from urllib.request import Request as URLRequest
from urllib.request import urlopen
import os
import os.path
import uuid
import base64
import json
import requests
import time
MAX_UPLOAD_PART_SIZE = ( 1024 * 1024 * 15 )
class Logs( object ):
'''Helper object to upload External Logs to limacharlie.io without going through a sensor.'''
def __init__( self, manager, accessToken = None ):
'''Create a Log manager object to prepare for upload.
Args:
manager (limacharlie.Manager obj): a Manager to use for identification (NOT authentication since API key is not required for this utility class).
accessToken (str): an ingestion key to use for log upload.
'''
self._lc = manager
self._accessToken = accessToken
if self._accessToken is None:
# Load the token from an environment variable.
self._accessToken = os.environ.get( 'LC_LOGS_TOKEN', None )
if self._accessToken is not None:
self._accessToken = str( uuid.UUID( str( self._accessToken ) ) )
self._uploadUrl = None
def upload( self, filePath, source = None, hint = None, payloadId = None, allowMultipart = False, originalPath = None, nDaysRetention = 30 ):
'''Upload a log.
Args:
filePath (str): path to the file to upload.
source (str): optional source identifier for where the log came from.
hint (str): optional data format hint for the log.
payloadId (str): optional unique payload identifier for the log, used to perform idempotent uploads.
allowMultipart (bool): unused, if True will perform multi-part upload for large logs.
nDaysRetention (int): number of days the data should be retained in the cloud.
'''
if self._accessToken is None:
raise LcApiException( 'access token not specified' )
if self._uploadUrl is None:
# Get the ingest URL from the API.
self._uploadUrl = self._lc.getOrgURLs()[ 'logs' ]
headers = {
'Authorization' : 'Basic %s' % ( base64.b64encode( ( '%s:%s' % ( self._lc._oid, self._accessToken ) ).encode() ).decode(), )
}
if source is not None:
headers[ 'lc-source' ] = source
if hint is not None:
headers[ 'lc-hint' ] = hint
if payloadId is not None:
headers[ 'lc-payload-id' ] = payloadId
if originalPath is not None:
headers[ 'lc-path' ] = base64.b64encode( os.path.abspath( originalPath ).encode() ).decode()
if nDaysRetention is not None:
headers[ 'lc-retention-days' ] = str( nDaysRetention )
with open( filePath, 'rb' ) as f:
# Get the file size.
f.seek( 0, 2 )
fileSize = f.tell()
f.seek( 0 )
if MAX_UPLOAD_PART_SIZE > fileSize:
# Simple single-chunk upload.
request = URLRequest( str( 'https://%s/ingest' % ( self._uploadUrl, ) ),
data = f.read(),
headers = headers )
try:
u = urlopen( request )
except HTTPError as e:
raise Exception( '%s: %s' % ( str( e ), e.read().decode() ) )
try:
response = json.loads( u.read().decode() )
except:
response = {}
else:
# Multi-part upload.
partId = 0
if payloadId is None:
headers[ 'lc-payload-id' ] = str( uuid.uuid4() )
while True:
chunk = f.read( MAX_UPLOAD_PART_SIZE )
if not chunk:
break
if len( chunk ) != MAX_UPLOAD_PART_SIZE:
headers[ 'lc-part' ] = "done"
else:
headers[ 'lc-part' ] = str( partId )
request = URLRequest( str( 'https://%s/ingest' % ( self._uploadUrl, ) ),
data = chunk,
headers = headers )
try:
u = urlopen( request )
except HTTPError as e:
raise Exception( '%s: %s' % ( str( e ), e.read().decode() ) )
try:
response = json.loads( u.read().decode() )
except:
response = {}
partId += 1
return response
def getOriginal( self, payloadId, filePath = None, fileObj = None ):
'''Download an orginal log.
Args:
payloadId (str): the payload identifier to download.
filePath (str): optional path where to download the file to.
fileObj (file obj): optional file object where to write the log.
'''
response = self._lc._apiCall( '/insight/%s/logs/originals/%s' % ( self._lc._oid, payloadId ), GET )
# If no local output is specified, we interpret this
# as an asynchronous export request.
if filePath is None and fileObj is None:
if 'payload' in response:
return response[ 'payload' ]
return response[ 'export' ]
# Response can either be inline if small enough.
if 'payload' in response:
data = self._lc._unwrap( response[ 'payload' ], isRaw = True )
if filePath is not None:
with open( filePath, 'wb' ) as f:
f.write( data )
elif fileObj is not None:
fileObj.write( data )
response.pop( 'payload', None )
# Or it can be a GCS signed URL.
elif 'export' in response:
# The export is asynchronous, so we will retry
# every 5 seconds up to 5 minutes.
status = None
for _ in range( int( 300 / 5 ) ):
dataReq = requests.get( response[ 'export' ], stream = True )
status = dataReq.status_code
if 200 == status:
break
dataReq.close()
dataReq = None
if 404 != status:
break
time.sleep( 5 )
if dataReq is None:
raise LcApiException( "Failed to get log payload: %s." % ( status, ) )
try:
if filePath is not None:
with open( filePath, 'wb' ) as f:
for chunk in dataReq.iter_content( chunk_size = 1024 * 512 ):
if not chunk:
continue
f.write( chunk )
elif fileObj is not None:
for chunk in dataReq.iter_content( chunk_size = 1024 * 512 ):
if not chunk:
continue
fileObj.write( chunk )
response.pop( 'export', None )
finally:
dataReq.close()
return response
def main( sourceArgs = None ):
import argparse
parser = argparse.ArgumentParser( prog = 'limacharlie logs' )
actions = {
'upload' : main_upload,
'get_original' : main_getOriginal,
}
parser.add_argument( 'log_action',
type = str,
help = 'action to take, one of %s' % ( ', '.join( actions.keys(), ) ) )
parser.add_argument( 'opt_arg',
type = str,
nargs = "?",
default = None,
help = 'optional argument depending on log_action' )
args = parser.parse_args( sourceArgs[ 0 : 1 ] )
if args.log_action not in actions:
print( "Unknown action: %s" % ( args.log_action, ) )
sys.exit( 1 )
return actions[ args.log_action ]( sourceArgs[ 1 : ] )
def main_upload( sourceArgs = None ):
import argparse
parser = argparse.ArgumentParser( prog = 'limacharlie logs upload' )
parser.add_argument( 'log_file',
type = str,
help = 'path to the log file to upload.' )
parser.add_argument( '--source',
type = str,
required = False,
dest = 'source',
default = None,
help = 'name of the log source to associate with upload.' )
parser.add_argument( '--original-path',
type = str,
required = False,
dest = 'originalPath',
default = None,
help = 'override the original path recorded for the log.' )
parser.add_argument( '--hint',
type = str,
required = False,
dest = 'hint',
default = 'auto',
help = 'log type hint of the upload.' )
parser.add_argument( '--payload-id',
type = str,
required = False,
dest = 'payloadId',
default = None,
help = 'unique identifier of the log uploaded, can be used to de-duplicate logs.' )
parser.add_argument( '--access-token',
type = uuid.UUID,
required = False,
dest = 'accessToken',
default = None,
help = 'access token to upload.' )
parser.add_argument( '--oid',
type = lambda o: str( uuid.UUID( o ) ),
required = False,
dest = 'oid',
default = None,
help = 'organization id to upload for.' )
parser.add_argument( '--days-retention',
type = int,
required = False,
dest = 'retention',
default = None,
help = 'number of days of retention for the data.' )
args = parser.parse_args( sourceArgs )
logs = Logs( Manager( args.oid, None ), args.accessToken )
originalPath = args.originalPath
if args.originalPath is None:
originalPath = args.log_file
response = logs.upload( args.log_file,
source = args.source,
hint = args.hint,
payloadId = args.payloadId,
allowMultipart = False,
originalPath = originalPath,
nDaysRetention = args.retention )
print( json.dumps( response ) )
def main_getOriginal( sourceArgs = None ):
import argparse
parser = argparse.ArgumentParser( prog = 'limacharlie logs get_original' )
parser.add_argument( 'payloadid',
type = str,
help = 'unique identifier of the log uploaded.' )
parser.add_argument( 'destination',
type = str,
help = 'file path where to download the log.' )
args = parser.parse_args( sourceArgs )
logs = Logs( Manager() )
response = logs.getOriginal( args.payloadid, filePath = args.destination )
print( json.dumps( response ) ) | UTF-8 | Python | false | false | 12,027 | py | 2 | Logs.py | 2 | 0.490646 | 0.484909 | 0 | 324 | 36.123457 | 157 |
NSLS-II-LIX/py4xs | 5,927,054,880,294 | 508bd92aa36eda0a74b4010cfff352aeae17cb7f | b5e44fd0010dd41834738ef84ddedf222d2c64cf | /py4xs/detector_config.py | ce75dc5d1d81a5a3110fb0f7fa2ba641811ba066 | [] | no_license | https://github.com/NSLS-II-LIX/py4xs | ba89e57355fc24cd2c904a7e503bcc9ad3b624b7 | 1173bad79c4aed32541550e75df75dd4b7028327 | refs/heads/master | 2023-07-19T04:25:04.772887 | 2023-07-18T15:39:28 | 2023-07-18T15:39:28 | 118,817,574 | 4 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import copy
import pickle,bz2
import numpy as np
from py4xs.local import ExpPara,exp_attr
det_attr = ['ImageWidth', 'ImageHeight', 'extension', 'fix_scale']
def create_det_from_attrs(attrs): #, qgrid):
det = DetectorConfig() #qgrid=qgrid)
det.unpack_dict(attrs)
return det
class DetectorConfig():
""" tis deals with things that are specific to
"""
def __init__(self, extension = "", exp_para = None, qgrid = None,
dark = None, flat = None, dezinger = False,
desc = "Pilatus", pixel_size=0.172,
fix_scale = None, bad_pixels = [[], []]):
self.extension = extension
self.exp_para = exp_para
if exp_para is not None:
self.ImageWidth = exp_para.ImageWidth
self.ImageHeight = exp_para.ImageHeight
self.exp_para.init_coordinates()
self.s2d_distance = pixel_size*self.exp_para.Dd
#self.qgrid = qgrid
if qgrid is not None:
print("Warning: qgrid under DectorConfig is no longer in use.")
self.fix_scale = fix_scale
self.dark = dark
self.flat = flat
self.dezinger = dezinger
[bpx, bpy] = bad_pixels
if exp_para!=None:
# seems unnecessary
## make a copy in case the bad pixel list need to be revised and the original exp_para.mask need
## to be preserved
#self.mask = copy.copy(exp_para.mask)
if len(bpx)>0:
for i in range(len(bpx)):
exp_para.mask.set_bit(bpx[i],bpy[i])
##self.mask.set_bit(bpx[i],bpy[i])
#else:
# self.mask = None
def pack_dict(self):
det_dict = {}
exp_dict = {}
for attr in exp_attr:
exp_dict[attr] = self.exp_para.__getattribute__(attr)
for attr in det_attr:
det_dict[attr] = self.__getattribute__(attr)
exp_dict['mask'] = list(bz2.compress(pickle.dumps(self.exp_para.mask)))
det_dict['exp_para'] = exp_dict
return det_dict
def unpack_dict(self, det_dict, pixel_size=0.172):
for attr in det_dict:
self.__setattr__(attr, det_dict[attr])
#self.qgrid = np.asarray(det_dict['qgrid'])
self.exp_para = ExpPara(self.ImageWidth, self.ImageHeight)
for attr in exp_attr:
self.exp_para.__setattr__(attr, det_dict['exp_para'][attr])
self.exp_para.mask = pickle.loads(bz2.decompress(bytes(det_dict['exp_para']['mask'])))
self.exp_para.calc_rot_matrix()
self.exp_para.init_coordinates()
self.s2d_distance = pixel_size*self.exp_para.Dd
def pre_process(self, data):
""" this deals with flat field and dark current corrections, and dezinger
"""
if self.dezinger:
pass
if self.dark is not None:
pass
if self.flat is not None:
pass
| UTF-8 | Python | false | false | 2,984 | py | 22 | detector_config.py | 11 | 0.558646 | 0.553619 | 0 | 81 | 35.728395 | 108 |
relekang/python-semantic-release | 11,690,900,998,311 | 28bd755a9661c13e512a195dbeec80d874f5755a | 394141477e016101200511ab97b7fde4300abae2 | /tests/parsers/test_emoji.py | 83cfbe60bce4d2b080fa0ff88eccc43baec49124 | [
"MIT"
] | permissive | https://github.com/relekang/python-semantic-release | 51dead47dd42d57d1d2fa92da7343fafd9b49a24 | 2d9f77a04d287552fb51611585c69968aef0b367 | refs/heads/master | 2022-10-27T13:27:55.327332 | 2022-10-22T17:30:39 | 2022-10-22T17:30:39 | 39,765,732 | 526 | 207 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mock
import pytest
from semantic_release.history import emoji_parser
from .. import wrapped_config_get
def test_major():
commit = (
":boom: Breaking changes\n\n" "More description\n\n" "Even more description"
)
parsed_commit = emoji_parser(commit)
assert parsed_commit[0] == 3
assert parsed_commit[1] == ":boom:"
assert parsed_commit[3] == [
":boom: Breaking changes",
"More description",
"Even more description",
]
assert parsed_commit[4] == ["More description", "Even more description"]
def test_minor():
commit = ":sparkles: Add a new feature\n\n" "Some description of the feature"
parsed_commit = emoji_parser(commit)
assert parsed_commit[0] == 2
assert parsed_commit[1] == ":sparkles:"
assert parsed_commit[3] == [
":sparkles: Add a new feature",
"Some description of the feature",
]
assert parsed_commit[4] == []
def test_patch():
commit = ":bug: Fixing a bug\n\n" "The bug is finally gone!"
parsed_commit = emoji_parser(commit)
assert parsed_commit[0] == 1
assert parsed_commit[1] == ":bug:"
assert parsed_commit[3] == [":bug: Fixing a bug", "The bug is finally gone!"]
assert parsed_commit[4] == []
def test_other_emoji():
commit = ":pencil: Documentation changes"
parsed_commit = emoji_parser(commit)
assert parsed_commit[0] == 0
assert parsed_commit[1] == "Other"
assert parsed_commit[3] == [":pencil: Documentation changes"]
assert parsed_commit[4] == []
def test_multiple_emojis():
commit = ":sparkles::pencil: Add a feature and document it"
parsed_commit = emoji_parser(commit)
assert parsed_commit[0] == 2
assert parsed_commit[1] == ":sparkles:"
assert parsed_commit[3] == [":sparkles::pencil: Add a feature and document it"]
assert parsed_commit[4] == []
def test_emoji_in_description():
commit = ":sparkles: Add a new feature\n\n" ":boom: should not be detected"
parsed_commit = emoji_parser(commit)
assert parsed_commit[0] == 2
assert parsed_commit[1] == ":sparkles:"
assert parsed_commit[3] == [
":sparkles: Add a new feature",
":boom: should not be detected",
]
assert parsed_commit[4] == []
@mock.patch(
"semantic_release.history.parser_emoji.config.get",
wrapped_config_get(use_textual_changelog_sections=True),
)
@pytest.mark.parametrize(
"level,commit,commit_type",
[
(
3,
":boom: Breaking changes\n\n"
"More description\n\n"
"Even more description",
"breaking",
),
(
2,
":sparkles: Add a new feature\n\n" "Some description of the feature",
"feature",
),
(
1,
":bug: Fixing a bug\n\n" "The bug is finally gone!",
"fix",
),
],
)
def test_use_textual_changelog_sections(level, commit, commit_type):
parsed_commit = emoji_parser(commit)
assert parsed_commit[0] == level
assert parsed_commit[1] == commit_type
| UTF-8 | Python | false | false | 3,088 | py | 44 | test_emoji.py | 28 | 0.604922 | 0.593588 | 0 | 104 | 28.692308 | 84 |
wolfram74/kata_05 | 16,733,192,624,955 | 674dc6b865935b68b4cbd025690c78aa196702a1 | 8b14ef11f2f642d1808338126e81b76b3b1d4a9c | /test_bloom.py | 98403b989a55aba72422ad3dc1948286a3d42a20 | [] | no_license | https://github.com/wolfram74/kata_05 | d9e13dddc0fc4150bb21d1ac14c3ebc06c622db4 | ac65dcc4a0899d3da025715d2aad10b4bff7677c | refs/heads/master | 2021-01-20T19:57:52.172304 | 2016-07-20T21:33:43 | 2016-07-20T21:33:43 | 63,352,761 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import bloom_filter
class BloomTest(unittest.TestCase):
def setUp(self):
self.filter = bloom_filter.BloomFilter()
def test_existence(self):
self.assertEqual(type(self.filter), bloom_filter.BloomFilter)
def test_not_included(self):
self.assertFalse(self.filter.includes('nothing'))
def test_addition(self):
self.assertTrue(self.filter.add('something'))
self.assertTrue(self.filter.includes('something'))
self.assertFalse(self.filter.includes('nothing'))#may sometimes fail
@unittest.skip('optional feature')
def test_size_of_set(self):
self.assertEqual(0, self.filter.elements())
self.fillter.add('something')
self.assertEqual(1, self.filter.elements())
@unittest.skip('optional feature')
def test_expected_size(self):
small_filter = bloom_filter.BloomFilter(size=10)
self.assertNotEqual(small_filter.size(), self.filter.size())
@unittest.skip('optional feature')
def test_custom_hash_iterations(self):
hashy_filter = bloom_filter.BloomFilter(hashes=1000)
self.assertNotEqual(small_filter.hashings(), self.filter.hashings())
@unittest.skip('optional feature')
def test_false_positive_rate(self):
pass
| UTF-8 | Python | false | false | 1,285 | py | 6 | test_bloom.py | 4 | 0.684047 | 0.677821 | 0 | 37 | 33.702703 | 76 |
xuesyn/Occluded-Dataset-with-Color-Augmentation | 6,957,847,040,340 | 6f6b290d4c9724b7d323a63f5e4505a14654977e | 0cfc2d67adc662ea8aa03b8339fbb5e7da143f98 | /CreateOccludedDatasetWithColorAugmentation.py | 26b4f246aa1637cabd2940aa8331f31095a6c584 | [
"MIT"
] | permissive | https://github.com/xuesyn/Occluded-Dataset-with-Color-Augmentation | 38ca9b2d74eb449d056a0e912925db662d9f0fe5 | 0f9c5feac4931769770f8695f07c1b95804cc688 | refs/heads/main | 2023-07-27T18:33:24.224599 | 2021-09-12T00:13:40 | 2021-09-12T00:13:40 | 359,424,444 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import BboxTools.BboxTools as bbt
import os
import scipy.io
from PIL import Image
import cv2
import time
import pandas as pd
import argparse
from WBAugmenter import WBEmulator as wbAug
import shutil
def parse_args():
parser = argparse.ArgumentParser(description="WB color augmenter")
parser_augment = parser.add_argument
parser_augment("--input_image_dir", default='./test/',
help="Training image directory")
parser_augment("--output_image_dir", default='./OccludedPASCAL3D/',
help="augmented image directory")
parser_augment("--out_number", type=int, default=5,
help="Number of color-augmented images for each input image")
return parser.parse_args()
args = parse_args()
occ_libs_dir = './occluded_lib/occluder_libs_test_%s.npz'
occ_libs_name = ['large', 'medium', 'small']
path_save = args.output_image_dir
path_to_original_pascal3dp = args.input_image_dir
#categories = ['aeroplane', 'bicycle', 'bus', 'car', 'motorbike', 'train', 'boat', 'bottle', 'chair', 'diningtable',
# 'sofa', 'tvmonitor']
categories = ['test']
save_anno_path = path_save + '/annotations_grouped'
save_img_path = path_save + '/images'
save_list_path = path_save + '/lists'
source_list_path = path_to_original_pascal3dp + '/Image_sets/%s_imagenet_val.txt'
source_image_path = path_to_original_pascal3dp + '/Images/%s_imagenet'
source_anno_path = path_to_original_pascal3dp + '/Annotations/%s_imagenet'
source_mask_path = path_to_original_pascal3dp + '/obj_mask/%s'
if not os.path.exists(path_to_original_pascal3dp + 'obj_mask'):
os.system('mv ./obj_mask ' + path_to_original_pascal3dp)
# 0: only start randomly, 1: only start in box, 2: using both mode
l_s_thr = 150000
occluding_modes_l = ['s', 'm', 'l', 'l','lm', 'll', 'lll']
start_mode_l = [1, 2, 2, 2, 0, 2, 0]
occluding_modes_s = ['m', 'l', 'l','lm', 'mm', ]
start_mode_s = [2, 2, 2, 2, 2, ]
#occluding_rate = [(0.3, 0.1), (0.5, 0.1), (0.7, 0.1), (0.3, 0.3), (0.5, 0.3), (0.7, 0.3), (0.3, 0.5), (0.5, 0.5),
# (0.7, 0.5), ]
occluding_rate = [(0.3, 0.5), ]
folder_name_list = ['%sFGL%d_BGL%d' % ('%s', j, i) for i in range(1, 2) for j in range(1, 2)]
allowed_var = 0.1
start_off_box_l = [md for md, sd in zip(occluding_modes_l, start_mode_l) if sd == 0 or sd == 2]
start_in_box_l = [md for md, sd in zip(occluding_modes_l, start_mode_l) if sd == 1 or sd == 2]
start_off_box_s = [md for md, sd in zip(occluding_modes_s, start_mode_s) if sd == 0 or sd == 2]
start_in_box_s = [md for md, sd in zip(occluding_modes_s, start_mode_s) if sd == 1 or sd == 2]
limited_trying_times = 40
def mix_masks(masks, boxes):
back = np.zeros(boxes[0].boundary, dtype=bool)
for box, mask in zip(boxes, masks):
box.assign(back, np.logical_or(mask, box.apply(back)))
return back
def mix_imgs(masks, boxes, imgs):
back_im = np.zeros(tuple(boxes[0].boundary) + (3,), dtype=np.uint8)
for box, mask, img in zip(boxes, masks, imgs):
mask = mask.reshape(mask.shape + (1,))
box.assign(back_im, mask * img + (1 - mask) * box.apply(back_im))
return back_im
def merge_occ_image(masks, occluder_map, image):
masks = np.expand_dims(masks, axis=2)
return masks * occluder_map + (1 - masks) * image
def check_occ_ratio(mask_map, object_annotation_box):
in_box_size = 0
for in_box_ in object_annotation_box:
in_box_size += in_box_.size
out_box_size = mask_map.size - in_box_size
in_box_value = 0
for in_box_ in object_annotation_box:
in_box_value += np.sum(in_box_.apply(mask_map))
out_box_value = np.sum(mask_map) - in_box_value
return in_box_value / in_box_size, out_box_value / out_box_size
def check_occ_ratio_seg(mask_map, mask_obj):
mask_obj = mask_obj > 10
mask_map = mask_map > 0.5
in_box_size = np.sum(mask_obj)
in_box_value = np.sum(np.logical_and(mask_obj, mask_map))
out_box_size = np.sum(np.logical_not(mask_obj))
out_box_value = np.sum(np.logical_and(np.logical_not(mask_obj), mask_map))
return in_box_value / in_box_size, out_box_value / out_box_size
def process_inbox(shape, center, boundary):
tem_box = bbt.box_by_shape(shape, center, boundary)
tem_box_ = bbt.box_by_shape(shape, center)
return tem_box_.box_in_box(tem_box)
def apply_n_occluder(occluder_boxes, img_shape, in_box, boundary_constraint=25, overlap_constraint=-5):
box_list = []
processing_list = [None for _ in range(len(occluder_boxes))]
for i in range(len(occluder_boxes)):
flag_ = False
x = 0
y = 0
ti_ = 0
if in_box and i == 0:
while not flag_:
flag_ = True
x = np.random.randint(boundary_constraint, img_shape[0] - boundary_constraint, dtype=int)
y = np.random.randint(boundary_constraint, img_shape[1] - boundary_constraint, dtype=int)
flag__ = False
for iter_box in in_box:
if iter_box.include((x, y)):
flag__ = True
if not flag__:
flag_ = False
else:
while not flag_ and ti_ < 40:
ti_ += 1
flag_ = True
x = np.random.randint(boundary_constraint, img_shape[0] - boundary_constraint, dtype=int)
y = np.random.randint(boundary_constraint, img_shape[1] - boundary_constraint, dtype=int)
for exist_box in box_list:
if exist_box.pad(overlap_constraint).include((x, y)):
flag_ = False
center = (x, y)
occluder_box = occluder_boxes[i]
this_box = bbt.box_by_shape(occluder_box.shape, center, image_boundary=img_shape)
box_list.append(this_box)
if not occluder_box.size == this_box.size:
processing_list[i] = process_inbox(occluder_box.shape, center, img_shape)
return box_list, processing_list
def get_occ(required_type, occ_libs):
out_boxes = []
out_masks = []
out_images = []
out_cates = []
t_flag = ''
for t in required_type:
if t == 'l':
t_flag = t
this_lib = occ_libs['large']
elif t == 's':
t_flag = t
this_lib = occ_libs['small']
else:
t_flag = 'm'
this_lib = occ_libs['medium']
# randomly choose an occluded object from 1231 coco objects
idx = np.random.randint(0, this_lib['masks'].shape[0], dtype=int)
# out_cates.append(findcategory(idx))
out_cates.append(t_flag + str(idx))
out_boxes.append(this_lib['boxes'][idx])
out_masks.append(this_lib['masks'][idx])
out_images.append(this_lib['images'][idx])
return out_boxes, out_masks, out_images, out_cates
def generate_one_img(img, box_anno, occ_libs, seg_anno):
img_size = img.shape[0] * img.shape[1]
if img_size > l_s_thr:
using_start_off_box = start_off_box_l
using_start_in_box = start_in_box_l
else:
using_start_off_box = start_off_box_s
using_start_in_box = start_in_box_s
tried_times = 0
fully_filled = False
filled_level = np.zeros(len(occluding_rate), dtype=bool)
filled_score = np.zeros(len(occluding_rate), dtype=bool)
using_box = np.zeros(len(occluding_rate), dtype=object)
using_mask = np.zeros(len(occluding_rate), dtype=object)
using_occluder = np.zeros(len(occluding_rate), dtype=object)
using_cate = np.zeros(len(occluding_rate), dtype=object)
while tried_times < limited_trying_times and not fully_filled:
tried_times += 1
boxes = []
masks = []
occluders = []
ratios = []
cates = []
for working_mode in using_start_in_box:
t_boxes, t_masks, t_images, t_cates = get_occ(working_mode, occ_libs)
t_boxes, t_process = apply_n_occluder(t_boxes, img_shape=img.shape[0:2], in_box=box_anno)
for i, proc in enumerate(t_process):
if proc:
# print()
# print(proc)
# print(t_masks[i].shape)
# print(t_boxes[i])
t_masks[i] = proc.apply(t_masks[i])
t_images[i] = proc.apply(t_images[i])
mask_map = mix_masks(t_masks, t_boxes)
occluder_map = mix_imgs(t_masks, t_boxes, t_images)
# ratios.append(check_occ_ratio(mask_map, box_anno))
ratios.append(check_occ_ratio_seg(mask_map, seg_anno))
masks.append(mask_map)
boxes.append(t_boxes)
occluders.append(occluder_map)
cates.append(t_cates)
for working_mode in using_start_off_box:
t_boxes, t_masks, t_images, t_cates = get_occ(working_mode, occ_libs)
t_boxes, t_process = apply_n_occluder(t_boxes, img_shape=img.shape[0:2], in_box=None)
for i, proc in enumerate(t_process):
if proc:
t_masks[i] = proc.apply(t_masks[i])
t_images[i] = proc.apply(t_images[i])
mask_map = mix_masks(t_masks, t_boxes)
occluder_map = mix_imgs(t_masks, t_boxes, t_images)
ratios.append(check_occ_ratio(mask_map, box_anno))
masks.append(mask_map)
boxes.append(t_boxes)
occluders.append(occluder_map)
cates.append(t_cates)
ratios_np = np.array(ratios)
ratios_base = np.array(occluding_rate)
# n * 2 - 9 * 2 -> n * 1 * 2 - 1 * 9 * 2 -> n * 9 * 2 -> all(2) -> any(n) -> 9
legal_assign = np.any(
np.all(np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) < allowed_var,
axis=2), axis=0)
# n * 2 - 9 * 2 -> n * 1 * 2 - 1 * 9 * 2 -> n * 9 * 2 -> sum(2) -> argmin(n) -> 9
dist_assign = np.argmin(
np.sum(np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) + 10 * (np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) >= allowed_var), axis=2), axis=0)
dist_score = np.min(
np.sum(np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) + 10 * (np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) >= allowed_var), axis=2), axis=0)
for i in range(len(occluding_rate)): # 9
if legal_assign[i]:
if (not filled_level[i]) or dist_score[i] < filled_score[i]:
filled_level[i] = legal_assign[i] # False -> True
filled_score[i] = dist_score[i]
idx_ = dist_assign[i]
using_box[i] = boxes[idx_]
using_mask[i] = masks[idx_]
using_occluder[i] = occluders[idx_]
using_cate[i] = cates[idx_]
fully_filled = np.all(filled_level)
image_out = np.zeros(len(occluding_rate), dtype=object)
for i in range(len(occluding_rate)): # 9
if filled_level[i]:
image_out[i] = (merge_occ_image(using_mask[i], using_occluder[i], img.copy()))
return filled_level, image_out, using_mask, using_box, using_cate
def load_one_annotation(anno_path,imgs_shape):
bbox_ = []
with open(anno_path) as jnt:
lines = jnt.readlines()
for line in lines:
sl = line.split(' ')
#print(line)
#print(int(sl[0]),float(sl[1]),float(sl[2]),float(sl[3]),float(sl[4][:-1]))
width = imgs_shape[1] * float(sl[3])
height = imgs_shape[0] * float(sl[4][:-1])
lx = imgs_shape[1] * float(sl[1]) - 0.5 * width
ly = imgs_shape[0] * float(sl[2]) - 0.5 * height
#print(int(sl[0]),':',lx,ly,lx + width,ly+height)
bbox_.append(np.array([lx,ly,lx + width,ly+height]))
return bbox_
def generate_dataset(cate, file_list, img_dir, anno_dir, mask_dir, save_img_dir, save_list_dir, save_anno_dir, occ_lib_dir,
occ_lib_names, record_file):
occ_libs = {}
annotations = [{'source': [], 'mask': [], 'box': [], 'occluder_box': [], 'occluder_mask': [], 'occluder_category': []} for _ in range(len(occluding_rate))]
img_list_ = ['' for _ in range(len(occluding_rate))]
for k in occ_lib_names:
occ_libs[k] = dict(np.load(occ_lib_dir % k, allow_pickle=True))
# occ_libs[k] = dict(np.load('tem_lib.npz', allow_pickle=True))
occ_libs[k]['boxes'] = bbt.from_numpy(occ_libs[k]['boxes'])
save_img_dir_list = [os.path.join(save_img_dir, folder_name % cate) for folder_name in folder_name_list]
for folder_name in save_img_dir_list:
os.makedirs(folder_name, exist_ok=True)
os.makedirs(save_list_dir, exist_ok=True)
os.makedirs(save_anno_dir, exist_ok=True)
for file_name in file_list:
#print(file_name)
#try:
# open images in PASCAL3D+
box = []
img = np.array(Image.open(os.path.join(img_dir, file_name + '.jpg')))
mask = np.array(Image.open(os.path.join(mask_dir, file_name + '.JPEG')))
anno = load_one_annotation(os.path.join(anno_dir, file_name + '.txt'),img.shape)
# different height and width for images in PASCAL3D+
if not mask.shape[0] == img.shape[0] and mask.shape[1] == img.shape[1]:
mask = cv2.resize(mask, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_NEAREST)
for anno_file in anno:
box.append(bbt.from_numpy(anno_file, image_boundary=img.shape[0:2], sorts=('x0', 'y0', 'x1', 'y1')))
filled_, images_, masks_, boxes_, cates_ = generate_one_img(img, box, occ_libs, mask)
# try:
#except:
#print('Unknown Expectations at %s' % file_name)
#record_file.write('Unknown Expectations at %s\n' % file_name)
#continue
if not np.all(filled_):
record_file.write('Unfill %s: ' % file_name)
for i in range(filled_.size):
if filled_[i]:
#print(i)
Image.fromarray(images_[i].astype(np.uint8)).save(os.path.join(save_img_dir_list[i], file_name + '.JPEG'))
annotations[i]['source'].append(os.path.join(img_dir, file_name + '.jpg'))
annotations[i]['occluder_mask'].append(masks_[i])
annotations[i]['mask'].append(mask)
annotations[i]['box'].append(bbt.list_box_to_numpy(box, save_image_boundary=True).ravel())
annotations[i]['occluder_box'].append(bbt.list_box_to_numpy(boxes_[i], save_image_boundary=True))
annotations[i]['occluder_category'].append(cates_[i])
img_list_[i] += file_name + '.JPEG' + '\n'
else:
record_file.write(' %d' % i)
if not np.all(filled_):
record_file.write('\n')
for name_, anno_ in zip(folder_name_list, annotations):
np.savez(os.path.join(save_anno_dir, (name_ % cate) + '.npz'), **anno_)
for name_, list_ in zip(folder_name_list, img_list_):
with open(os.path.join(save_list_dir, (name_ % cate) + '.txt'), 'w') as file:
file.write(list_)
return
def processSyntheticOcclusion():
for cate in categories:
print('Start cate: ', cate)
tem = open('generating_record_%s_1030.txt' % cate, 'w')
file_list_ = open(source_list_path % cate).readlines()
file_list_ = [tem.strip('\n') for tem in file_list_]
source_image_path_ = source_image_path % cate
source_anno_path_ = source_anno_path % cate
source_mask_path_ = source_mask_path % cate
#print((cate, file_list_, source_image_path_, source_anno_path_, source_mask_path_, save_img_path, save_list_path,
# save_anno_path, occ_libs_dir, occ_libs_name, tem))
generate_dataset(cate, file_list_, source_image_path_, source_anno_path_, source_mask_path_, save_img_path, save_list_path,
save_anno_path, occ_libs_dir, occ_libs_name, tem)
tem.close()
def unzipingAnnotations():
source_anno_path = path_save + '/annotations_grouped'
save_anno_path = path_save + '/annotations'
for cate in categories:
for name_ in folder_name_list:
print('Start cate: ' + name_ % cate)
this_folder = os.path.join(save_anno_path, name_) % cate
os.makedirs(this_folder, exist_ok=True)
data = np.load(os.path.join(source_anno_path, (name_ % cate) + '.npz'), allow_pickle=True)
source_list = data['source']
mask_list = data['mask']
box_list = data['box']
occluder_box_list = data['occluder_box']
occluder_mask = data['occluder_mask']
occluder_category = data['occluder_category']
for i in range(data['mask'].size):
this_name = source_list[i].split('/')[-1].split('.')[0]
np.savez(os.path.join(this_folder, this_name + '.npz'), source=source_list[i], mask=mask_list[i], box=box_list[i], occluder_mask=occluder_mask[i], occluder_box=occluder_box_list[i], occluder_category=occluder_category[i], category=cate, occluder_level=name_.strip('%s')[1])
def generalizingTrainingAnnotations():
for count,cate in enumerate(categories):
for i in folder_name_list:
folder_name = i % cate
print('Start cate: ' + folder_name)
listPath = args.output_image_dir + '/lists/' + folder_name + '.txt'
imgDir = args.output_image_dir + '/images/' + folder_name + '/'
os.makedirs(imgDir.replace('images','labels'), exist_ok=True)
imgList = open(listPath)
lines = imgList.readlines()
for line in lines:
imgPath = imgDir + line.split('.')[-2] + '.JPEG'
annoPath = imgPath.replace('images','annotations').replace('JPEG','npz')
jsonPath = imgPath.replace('images','labels').replace('JPEG','txt')
#print(jsonPath)
imgJson = open(jsonPath,'a')
anno = np.load(annoPath)
oc_cate_list = anno['occluder_category']
oc_box_list = anno['occluder_box']
for index in range(len(oc_cate_list)):
oc_cate = oc_cate_list[index]
oc_box = oc_box_list[index]
dictPath = './dictionary/' + oc_cate[0] + '.csv'
idx = int(oc_cate[1:])
oc_label = pd.read_csv(dictPath).loc[idx:idx,('label')]
oc_label = oc_label.values[0]
y1,y2,x1,x2,height,width = oc_box
y_cr = ((y1+y2)/2)/height
x_cr = ((x1+x2)/2)/width
w = abs(x2-x1)/width
h = abs(y2-y1)/height
#print(x_cr,y_cr,w,h)
imgJson.write(str(oc_label) + ' ' + '%.6g' % x_cr + ' ' + '%.6g' % y_cr + ' ' + '%.6g' % w + ' ' + '%.6g' % h + ' \n' )
imgJson.close()
imgList.close()
def chosenColorAugmentation():
wbColorAug = wbAug.WBEmulator() # create an instance of the WB emulator
for count,cate in enumerate(categories):
for i in folder_name_list:
folder_name = i % cate
print('Start cate: ' + folder_name)
cateDir = args.output_image_dir + '/colorAugmentedImages/' + folder_name + '/'
imgDir = args.output_image_dir + '/images/' + folder_name + '/'
jsonDir = cateDir.replace('colorAugmentedImages','colorAugmentedlabels')
labelDir = args.output_image_dir + '/labels/' + folder_name + '/'
os.makedirs(cateDir, exist_ok=True)
os.makedirs(jsonDir, exist_ok=True)
wbColorAug.batch_processing(imgDir, cateDir,
args.out_number, 1)
for root, dirs, imgs in os.walk(cateDir):
for img in imgs:
imgidx = img.split('_')[0]
imgname = img.split('.')[0]
try:
srcp = labelDir + imgidx + '.txt'
dstp = jsonDir + imgname + '.txt'
shutil.copy(srcp, dstp)
except Exception:
print(srcp + '--x-->' + dstp)
if __name__ == '__main__':
print('processing synthetic occlusion ..')
processSyntheticOcclusion()
print('unziping annotations ..')
unzipingAnnotations()
print('generalizing darknet style annotations .. ')
generalizingTrainingAnnotations()
print('processing color augmentation')
chosenColorAugmentation()
| UTF-8 | Python | false | false | 21,450 | py | 18 | CreateOccludedDatasetWithColorAugmentation.py | 3 | 0.5462 | 0.534825 | 0 | 500 | 40.88 | 289 |
iranmolina3/DevDjangoSchool | 6,390,911,360,677 | 333f0744993d95d802ac962b229b633343a1e267 | 480b0f8e985ccced2c83c939d1ada30e8be3da6e | /Apps/AppUsuario/migrations/0004_remove_clsusuario_fk_establecimiento.py | 41b143afe8313f8f5e716c21901f4f797695202e | [] | no_license | https://github.com/iranmolina3/DevDjangoSchool | 205243a39c793b0631c0b25732725053d262e286 | bf91cb302a92e7fff8a977732db2a3c3fef81011 | refs/heads/master | 2022-03-20T13:58:28.916185 | 2019-11-16T02:08:12 | 2019-11-16T02:08:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.7 on 2019-11-15 09:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('AppUsuario', '0003_auto_20191113_0005'),
]
operations = [
migrations.RemoveField(
model_name='clsusuario',
name='fk_establecimiento',
),
]
| UTF-8 | Python | false | false | 347 | py | 81 | 0004_remove_clsusuario_fk_establecimiento.py | 69 | 0.599424 | 0.510086 | 0 | 17 | 19.411765 | 50 |
christophecasson/ttfobservatory | 6,167,573,052,264 | ffc9fc2fe3b507db228f0c688f31416d95dc30fb | fd589e21e96ccd80f9348897dae0cd515d136d21 | /app/observatorycontroller/roof-controller.py | 7de71a89f5bf68d2534fc87a6c71103ac37a3706 | [] | no_license | https://github.com/christophecasson/ttfobservatory | c949b4518f18073a019bc83c8ed3efdccb82e2b4 | f45c868512d66f8490072a6d7c32d39404319918 | refs/heads/master | 2023-04-07T12:20:06.619695 | 2021-04-04T14:44:44 | 2021-04-04T14:44:44 | 112,009,267 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2
import os
import errno
import time
from datetime import datetime
import sys
import signal
import serial
from tools import *
BOARDNAME = "roof"
status = { "board_state":"Connecting",
"board_vin":"Connecting",
"state":"Connecting",
"opened":"Connecting",
"closed":"Connecting"
}
control = { "move":""
}
#handle Ctrl-C (SIGINT) and Kill (SIGTERM) properly
def sigint_handler(signum, frame):
debug("SIGINT received! Closing connections...")
deleteFifos()
disconnect()
debug("exit(0)")
sys.exit(0)
def sigterm_handler(signum, frame):
debug("SIGTERM received! Closing connections...")
deleteFifos()
disconnect()
debug("exit(0)")
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
#fifo stuff
fifo_board_path = fifo_root_path + BOARDNAME + "/"
fifo_status_path = fifo_board_path + "status/"
fifo_control_path = fifo_board_path + "control/"
def createFifos():
mkdir(fifo_status_path)
mkdir(fifo_control_path)
for name in status:
mkfifo(fifo_status_path + name)
for name in control:
mkfifo(fifo_control_path + name)
def deleteFifos():
for name in status:
rmfile(fifo_status_path + name)
for name in control:
rmfile(fifo_control_path + name)
rmdir(fifo_status_path)
rmdir(fifo_control_path)
rmdir(fifo_board_path)
def ReadFIFO_control_move():
fifo_path = fifo_control_path + "move"
try:
pipe = os.open(fifo_path, os.O_RDONLY | os.O_NONBLOCK)
data = os.read(pipe, 4096)
os.close(pipe)
except OSError as err:
if err.errno == 11:
return
else:
raise err
if data != '':
item = data.split()
lastitem = item[len(item)-1]
sendCmd(lastitem)
def WriteFIFOs_status():
for name in status:
try:
fifo_path = fifo_status_path + name
pipe = os.open(fifo_path, os.O_WRONLY | os.O_NONBLOCK)
os.write(pipe, status[name] + "\n")
os.close(pipe)
except:
pass
#serial port stuff
serialport = str(sys.argv[1])
serialportbaudrate = 9600
def connect():
global ser
debug("Connecting " + serialport + " ")
while True:
try:
sys.stdout.write(".")
sys.stdout.flush()
ser = serial.Serial( port=serialport, baudrate=serialportbaudrate, timeout=5 )
ser.isOpen()
print("")
debug("serial port " + serialport + " connected at " + str(serialportbaudrate) + " bauds")
serialWrite("@") #start automatic status sending on arduino
return
except serial.SerialException:
WriteFIFOs_status()
time.sleep(1)
def disconnect():
try:
global ser
if ser.isOpen():
serialWrite("#") #stop automatic status sending on arduino
ser.close()
debug("serial port disconnected")
except:
return
def reconnect():
debug("reconnecting serial port...")
status["board_state"] = "Reconnecting"
status["board_vin"] = "Reconnecting"
status["state"] = "Reconnecting"
status["opened"] = "Reconnecting"
status["closed"] = "Reconnecting"
try:
ser.close()
except:
pass
connect()
def serialRead(len):
try:
data = ser.read(len)
return data
except serial.SerialException:
debug("Error reading " + str(len) + " byte")
reconnect()
def serialWrite(data):
try:
ser.write(data.encode())
return
except serial.SerialException:
debug("Error sending " + str(data) )
reconnect()
def sendCmd(cmd):
debug("sendCmd(\"" + cmd + "\")")
serialWrite(cmd+"\r")
def updateStatus():
datastring = ""
data = ''
serialWrite("@")
# time.sleep(0.25)
while ser.inWaiting() > 0:
data = serialRead(1)
datastring += data
if data == "@":
datastring = "@"
if data == "$":
#datastring = "@-O-12036-STATE-0-0-$"
item = datastring.split('-')
if len(item) != 7:
return
board_state = True
if item[0] != '@':
board_state = False
if item[6] != '$':
board_state = False
if item[1] != 'O':
board_state = False
if board_state == True:
status["board_state"] = "OK"
else:
status["board_state"] = "Error"
status["board_vin"] = item[2] + "mV"
status["state"] = item[3]
status["opened"] = item[4]
status["closed"] = item[5]
return
#main code
connect()
createFifos()
debug("Init done!")
while True:
updateStatus()
WriteFIFOs_status()
ReadFIFO_control_move()
time.sleep(0.1)
| UTF-8 | Python | false | false | 4,266 | py | 29 | roof-controller.py | 26 | 0.651195 | 0.641585 | 0 | 248 | 16.197581 | 93 |
0xafbf/lluvia | 16,862,041,624,814 | 15dc59a51065c71ec73505193b8889a1e42df9fa | 12c0f26d96756f51244a126fcebb1e3e98ba17a4 | /python/src/lluvia/core/enums/__init__.py | 856057461ac8ed6db82cd3d890fbce35e69e52de | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/0xafbf/lluvia | bfd2a9d9913efe978dc82d308996037d4ecef02e | ea0633dbec9d38f4a6de5577b3c19908eb8a7ba4 | refs/heads/master | 2022-09-11T08:27:09.711825 | 2020-05-23T20:06:34 | 2020-05-23T20:06:34 | 262,950,108 | 1 | 0 | Apache-2.0 | true | 2020-05-11T05:47:16 | 2020-05-11T05:47:16 | 2020-05-06T16:31:23 | 2020-05-01T19:16:51 | 849 | 0 | 0 | 0 | null | false | false | """
lluvia.core.enums
-----------------
:copyright: 2018, Juan David Adarve Bermudez. See AUTHORS for more details.
:license: Apache-2 license, see LICENSE for more details.
"""
from .image import ChannelType, ChannelCount, ImageUsageFlagBits, ImageAxis, ImageFilterMode, ImageAddressMode
from .node import NodeState, NodeType, PortDirection, PortType
from .vulkan import BufferUsageFlagBits, MemoryPropertyFlagBits, ImageLayout, Format, ImageType, ImageTiling
from .parameter import ParameterType
| UTF-8 | Python | false | false | 516 | py | 74 | __init__.py | 25 | 0.763566 | 0.753876 | 0 | 12 | 42 | 110 |
tableau/altimeter | 11,974,368,853,385 | 246e727dce04f520642216db2bdddefaa62364d8 | 274eb3a3c4202c86a40e13d2de7c2d6f2a982fcb | /tests/unit/altimeter/core/graph/test_schema.py | 80aa1077f3775da60b66022353dd4b1c8ffff7e0 | [
"MIT",
"Python-2.0"
] | permissive | https://github.com/tableau/altimeter | 6199b8827d193946bb0d0d1e29e462fc8749d3e4 | eb7d5d18f3d177973c4105c21be9d251250ca8d6 | refs/heads/master | 2023-08-15T16:21:31.265590 | 2023-07-04T13:13:32 | 2023-07-04T13:13:32 | 212,153,766 | 75 | 25 | MIT | false | 2023-08-02T02:05:22 | 2019-10-01T17:10:16 | 2023-06-22T14:24:42 | 2023-08-02T02:05:17 | 1,670 | 67 | 23 | 15 | Python | false | false | from unittest import TestCase
from altimeter.core.graph.field.scalar_field import ScalarField
from altimeter.core.graph.links import LinkCollection, SimpleLink
from altimeter.core.graph.schema import Schema
class TestSchema(TestCase):
def test_parse(self):
schema = Schema(ScalarField("Key1"), ScalarField("Key2"))
data = {"Key1": "Value1", "Key2": "Value2"}
link_collection = schema.parse(data, {})
expected_link_collection = LinkCollection(
simple_links=(
SimpleLink(pred="key1", obj="Value1"),
SimpleLink(pred="key2", obj="Value2"),
)
)
self.assertEqual(link_collection, expected_link_collection)
| UTF-8 | Python | false | false | 711 | py | 248 | test_schema.py | 225 | 0.652602 | 0.638537 | 0 | 19 | 36.421053 | 67 |
Pratham82/Python-Programming | 3,083,786,536,636 | 70bd5ea834f52851e15b7a18ed863ee96617ecc8 | bf616736ea66c0ce3f36f0d75d9f2951c52b74d7 | /Exercises from books/Learn python the hard way/Exercise_14.py | cb099f944dd67dc413863a072cd9ba6df069ffef | [
"MIT"
] | permissive | https://github.com/Pratham82/Python-Programming | 40a03e163bdc6985a337a8a9638f4eb77ae43ad9 | bbe5fd9132d5cf42ed9f29c3dd758cdc2c17760c | refs/heads/master | 2021-12-12T15:13:32.018356 | 2021-12-09T18:16:43 | 2021-12-09T18:16:43 | 230,051,536 | 3 | 2 | MIT | false | 2021-10-06T10:11:37 | 2019-12-25T06:13:04 | 2021-10-06T09:12:50 | 2021-10-06T10:11:37 | 281 | 3 | 2 | 0 | Python | false | false | #* Prompting and passing
from sys import argv
import re
script, username = argv
clean_script = re.sub('[.py]', '',script)
prompt="-->"
def prompts_passing_game():
while True:
print(f"Hi {username} how you doing?.\nI'm the {clean_script} script")
print("I'm going to ask you some questions, is it okay? answer in 'y' or 'n" )
permission =input(prompt)
if (permission == 'y' or 'yes'):
pass
else:
print("Okay that's fine comeback once you are ready.")
print(f"{clean_script} signing off bye 👋")
return False
print(f"Do you like me {username}")
likes = input(prompt)
print("What do you do for living ?")
occupation = input(prompt)
print(f"What's your age? {username} ")
age = input(prompt)
print(f"""So you said you {likes} about liking me.\nProfessionally you do {occupation}.\nAnd your age is {age}.\nThanks for the survey {username}.""")
return False
prompts_passing_game() | UTF-8 | Python | false | false | 1,066 | py | 270 | Exercise_14.py | 259 | 0.582314 | 0.582314 | 0 | 36 | 28.555556 | 158 |
anuragtomar2510/iitb-reddit | 7,825,430,444,065 | e93a04d5a4f34ab3dea187ec2deeab262b98e792 | dab3d1431dd63e1e73a53478ad1ddd3e8ba25921 | /reddit/subreddits/models.py | 02c2395a20f1aac55ca7f867956421698ea4367b | [] | no_license | https://github.com/anuragtomar2510/iitb-reddit | b359baf95167b5c0fcc77f5c5703ef43b019a085 | 7d8d86c5637cf33a3a1b0b4ce13dd724546cdcae | refs/heads/master | 2021-06-07T23:22:09.444472 | 2016-11-23T20:18:22 | 2016-11-23T20:18:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_nospace(value):
if value.find(' ') != -1:
raise ValidationError(
_('%(value)s contains space'),
params={'value': value},
)
class Subreddit(models.Model):
created_on = models.DateTimeField('created_on', auto_now_add=True)
title = models.CharField('title', max_length=20, validators=[validate_nospace], primary_key=True)
description = models.TextField('description')
def __unicode__(self):
return self.title
| UTF-8 | Python | false | false | 668 | py | 30 | models.py | 22 | 0.678144 | 0.673653 | 0 | 21 | 30.809524 | 101 |
makovako-tutorials/flask-app-tutorial | 10,075,993,276,967 | 64142280d194b08f38b2dd80b9053ddd6b366d6c | a034f129622faad5f8d9818a7a568ba43c20502d | /06-configuration/config.py | 2644394a900e1b20e6c0277ba53084f3ffadb1cb | [] | no_license | https://github.com/makovako-tutorials/flask-app-tutorial | c70f25687d7e74c287cd328c0542d297c07c1409 | 7d72c80b1a0c00c9510b6316271da4419209aa9a | refs/heads/master | 2021-02-26T08:16:14.261135 | 2020-03-11T15:36:10 | 2020-03-11T15:36:10 | 245,509,760 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Flask config class."""
import os
class Config:
"""Set Flask configuration vars."""
# General Config
TESTING = True
DEBUG = True
SECRET_KEY = b'_5#y2L"F4Q8z\n\xec]/'
SESSION_COOKIE_NAME = 'my_cookie'
# From .env
TESTING = os.environ.get('TESTING')
DEBUG = os.environ.get('DEBUG')
SECRET_KEY = os.environ.get('SECRET_KEY')
SESSION_COOKIE_NAME = os.environ.get('SESSION_COOKIE_NAME')
# we can have more config variations
class ProdConfig(Config):
DEBUG = False
TESTING = False
DATABASE_URI = os.environ.get('PROD_DATABASE_URI')
class DevConfig(Config):
DEBUG = True
TESTING = True
DATABASE_URI = os.environ.get('DEV_DATABASE_URI')
# some other configuraiton
# FLASK_ENV
# DEBUG
# TESTING
# SECRET_KEY
# SERVER_NAME
# ASSETS_DEBUG
# COMPRESSOR_DEBUG
# FLASK_ASSETS_USE_S3
# FLASK_ASSETS_USE_CDN
# SQLALCHEMY_DATABASE_URI
# SQLALCHEMY_ECHO
# SQLALCHEMY_ENGINE_OPTIONS
# SESSION_TYPE
# SESSION_PERMANENT
# SESSION_KEY_PREFIX
# SESSION_REDIS
# SESSION_MEMCASHED
# SESSION_MONGODB
# SESSION_SQLALCHEMY
| UTF-8 | Python | false | false | 1,076 | py | 11 | config.py | 8 | 0.686803 | 0.682156 | 0.000929 | 54 | 18.925926 | 63 |
LeeEunHak/Python_Study | 1,563,368,099,427 | 2d2acc327a416ac49701f3e442b958c1b2e20656 | 8ffec9a0ca89c4eebb33bff1b8505b2696968eae | /CHAPTER06_리스트/Pythagoras.py | 8e94c62a7cee065cfeb0e5cf370fc42951312992 | [] | no_license | https://github.com/LeeEunHak/Python_Study | f26b6ca1adb63cd4c8e146e97e63a8b1e118f4a6 | 2892ba8d77f6b6cbe70db69a0f718443de3d6fec | refs/heads/master | 2020-04-07T08:31:21.294187 | 2019-08-01T05:34:56 | 2019-08-01T05:34:56 | 158,217,662 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 피타고라스 정리를 만족하는 삼각형들을 모두 찾아보자.
# 삼각형의 한 변의 길이는 1부터 30이하이다.
new_list=[]
for x in range(1,30):
for y in range(x,30):
for z in range(y,30):
if x**2 + y**2 == z**2:
new_list.append((x, y, z))
print(new_list)
| UTF-8 | Python | false | false | 325 | py | 40 | Pythagoras.py | 38 | 0.51417 | 0.461538 | 0 | 11 | 21.454545 | 42 |
harshittiwari/LeetCode | 9,388,798,540,940 | de19b828b3d09da70628d9cf33cd79128339d562 | 5a0b3b6742a23f23295f38d8822f366ec5521350 | /Two Sum.py | b720648daab9894195690d77a2521c8eb0f5aa00 | [] | no_license | https://github.com/harshittiwari/LeetCode | 672e83e773758dfac1f80c88042536e7c81d160b | 478bb4019ed38d489171428dced8cbc6f9b3eb52 | refs/heads/master | 2020-12-24T08:03:10.489683 | 2016-08-29T22:02:26 | 2016-08-29T22:02:26 | 56,408,632 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
x = target - nums[i]
for j in range(i+1,len(nums)):
if nums[j] == x:
return [i+1,j+1]
pass
pass
print(Solution().twoSum([2, 7, 11, 15],9))
print(Solution().twoSum([7, 2, 11, 15],9))
print(Solution().twoSum([7, 11, 15, 2],9)) | UTF-8 | Python | false | false | 498 | py | 43 | Two Sum.py | 42 | 0.473896 | 0.425703 | 0 | 18 | 26.722222 | 42 |
xsig/grader | 13,331,578,510,462 | f1248ed323829bd6979735022505805180ff1774 | b26116ebb2291c5e4dbf38abb7ae04c6d1160b07 | /hito1_ej3/hito1_ej3_02ef5d365ea8ffa81bfe24ebb25bf3cd.py | bef2bf0d48e7ed42942e7f9e4d5e0e90fad2e5c7 | [] | no_license | https://github.com/xsig/grader | 91538b149a263adbe68056060f26e1a822b593d7 | 8a65cb7970466ed83fe061839ba7776a7d45d38b | refs/heads/master | 2018-02-14T05:06:29.887487 | 2017-06-10T22:05:35 | 2017-06-10T22:05:35 | 64,847,548 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Aprobación de créditos
ingreso = int(input("Ingreso: "))
nacimiento = int(input("Ano de nacimiento: "))
hijos = int(input("Hijos: "))
anosBanco = int(input("Anos banco: "))
estado = input("Estado civil (S, C): ")
vivienda = input("Vivienda (U, R): ")
anos = 2016 - nacimiento
if anosBanco > 10 and hijos >= 2:
print("APROBADO")
elif estado == 'C' and hijos > 3 and 45 > anos > 55:
print("APROBADO")
elif ingreso > 2500000 and estado == 'S' and vivienda == 'U':
print("APROBADO")
elif ingreso > 3500000 and anosBanco > 5:
print("APROBADO")
elif vivienda == 'R' and estado == 'C' and hijos < 2:
print("APROBADO")
else:
print("RECHAZADO") | UTF-8 | Python | false | false | 646 | py | 1,840 | hito1_ej3_02ef5d365ea8ffa81bfe24ebb25bf3cd.py | 1,834 | 0.656832 | 0.613354 | 0 | 22 | 28.318182 | 61 |
AneesKazi/TrainingOnFly | 901,943,158,373 | 734efc87eaa360afa8c3c43c26575dfaec751786 | 4027a84e303130252054bed63d21e27ef3fe06c2 | /CodeToTrainOnFly/investigate.py | 287847148a8e05f3b1bad329be55cc8f4d972c39 | [] | no_license | https://github.com/AneesKazi/TrainingOnFly | 343657f8549b8e358a7b4671aab4d7bf18c849de | 81a3524f2105bea8c54949d952d189f38f60a893 | refs/heads/master | 2020-04-03T03:18:42.287718 | 2018-10-27T15:56:04 | 2018-10-27T15:56:04 | 154,982,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import oct_io as io
import models
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import copy
class Denoiser(object):
def __init__(self, checkpoint, basename):
# load model
self.basename = basename
self.checkpoint = basename + checkpoint
self.garch = basename + 'generator_architecture.pkl'
self.darch = basename + 'discriminator_architecture.pkl'
self.inputs = {'hq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'HQ-Input'),
'lq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'LQ-Input')}
self.model = models.CyclicGAN.from_trained_model(self.inputs, self.garch, self.darch, self.checkpoint)
def denoise(self, image):
return self.model.generate(image[np.newaxis, ..., np.newaxis], 'HQ')[0, ..., 0]
class Investigator(object):
def __init__(self, checkpoint, basename='', sample_files='', cmap='gray', figsize=(8, 4)):
# load model
self.basename = '/media/network/DL_PC/ilja/cycoct-alldata/gen_norm-in_act-selu_scale-3_res-6x3_f-16/dis_norm-id_act-selu_f-1/1-cyc_1-dis/' if not basename else basename
self.checkpoint = basename + checkpoint
self.garch = basename + 'generator_architecture.pkl'
self.darch = basename + 'discriminator_architecture.pkl'
# garch = cg.CyclicGAN.generate_gen_arch(3, 6, 3, tf.nn.selu, filters=16, skip_conn=True)
# darch = cg.CyclicGAN.generate_dis_arch(512, tf.nn.selu, norm_func=cg.identity)
self.logdir = '/home/kazuki/testing'
self.inputs = {'hq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'HQ-Input'),
'lq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'LQ-Input')}
self.model = models.CyclicGAN.from_trained_model(self.inputs, self.garch, self.darch, self.checkpoint)
self.graph = self.model.graph
# self.graph = tf.get_default_graph()
# self.model = cg.CyclicGAN(garch, darch, self.inputs, None, logdir, graph=self.graph, inference_mode=True)
# self.model.load(self.checkpoint)
sample_files = sample_files if sample_files else ['/media/network/ImageRepository/cimg_db/24573/564875/6/R/pat.dcm', '/media/network/ImageRepository/cimg_db/24573/564879/6/R/pat.dcm']
# self.samples = samples if samples else ['../DeepOCTPrior/test_octs/lo.dcm', '../DeepOCTPrior/test_octs/hi.dcm']
# load in sample dicoms
self.samples = {}
self.load_sample(*sample_files)
# set pyplot parameters
self.figsize = figsize
self.cmap = cmap
def change_checkpoint(self, checkpoint):
self.checkpoint = self.basename + checkpoint
self.model.load(self.checkpoint)
def load_sample(self, lq, hq):
self.sample_files = [lq, hq]
lq_oct = io.OCTVolume(lq, load=True, pad={}, normalize={'min': 'mean-0.5'})
hq_oct = io.OCTVolume(hq, load=True, pad={}, normalize={'min': 'mean-0.5'})
hq = np.reshape(hq_oct.image_data, (49, 512, 512, 1))
lq = np.reshape(lq_oct.image_data, (49, 512, 512, 1))
self.samples = {'hq': hq, 'lq': lq}
def generate(self, source_index, target, source='', graphical=False):
if not source:
source = target
sample = self.samples[source][source_index:source_index + 1]
result = self.model.generate(sample, target)
output = np.c_[sample[0,...,0], result[0,...,0]]
if graphical:
plt.figure(figsize=self.figsize)
plt.imshow(output, cmap=self.cmap)
plt.show()
return result
def show_activations(self, source_index, model_part, target, source=''):
# helper for domain input
complement = 'hq' if target == 'lq' else 'lq'
if not source:
source = complement
# get relevant layers
if model_part == 'gen':
if target == 'hq':
layers = self.model.hq.gen.layers
elif target == 'lq':
layers = self.model.lq.gen.layers
elif model_part == 'dis':
if target == 'hq':
layers = self.model.hq.dis.layers
elif target == 'lq':
layers = self.model.lq.dis.layers
else:
raise ValueError('unknown model part or target: {}, {}'.format(model_part, target))
sample = self.samples[source][source_index:source_index + 1]
activations = self.model.sess.run(layers, {layers[0]: sample})
[mosaic(act[0,...], cmap=self.cmap, figsize=self.figsize) for act in activations]
return activations
def modify(self, layer_index, channel_index, model_part, target, source='', source_index=0):
# get relevant layers
if model_part == 'gen':
if target == 'hq':
layers = self.model.hq.gen.layers
elif target == 'lq':
layers = self.model.lq.gen.layers
elif model_part == 'dis':
if target == 'hq':
layers = self.model.hq.dis.layers
elif target == 'lq':
layers = self.model.lq.dis.layers
else:
raise ValueError('unknown model part or target: {}, {}'.format(model_part, target))
# helper for domain input
complement = 'hq' if target == 'lq' else 'lq'
# setup sample
if not source:
source = complement
sample = self.samples[source][source_index:source_index + 1]
# get tensor for the specified layer
modified = self.model.sess.run(layers[layer_index], {layers[0]: sample})
# drop channel
modified[..., channel_index] = np.zeros((modified.shape[:-1]))
# generate from modified tensor
result = self.model.sess.run(layers[-1], {layers[layer_index]: modified})
# show result
output = np.c_[sample[0, ..., 0], result[0, ..., 0]]
plt.figure(figsize=self.figsize)
plt.imshow(output, cmap=self.cmap)
plt.show()
def setup():
# load model
checkpoint = '/media/network/DL_PC/ilja/cycoct-skip_processed/gen_norm-in_act-selu_scale-3_res-6x3_f-16/dis_norm-id_act-selu_f-1/10-cyc_1-dis/cGAN.ckpt-333200'
garch = models.CyclicGAN.generate_gen_arch(3, 6, 3, tf.nn.selu, filters=16, skip_conn=True)
darch = models.CyclicGAN.generate_dis_arch(512, tf.nn.selu, norm_func=models.identity)
logdir = '/home/kazuki/testing'
inputs = {'hq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'HQ-Input'),
'lq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'LQ-Input')}
graph = tf.get_default_graph()
cgan = models.CyclicGAN(garch, darch, inputs, None, logdir, graph=graph, inference_mode=True)
# load in sample dicoms
# lq_oct = io.OCTVolume('../DeepOCTPrior/test_octs/lo.dcm')
# hq_oct = io.OCTVolume('../DeepOCTPrior/test_octs/hi.dcm')
lq_oct = io.OCTVolume('/media/network/ImageRepository/cimg_db/24573/564875/6/R/pat.dcm')
hq_oct = io.OCTVolume('/media/network/ImageRepository/cimg_db/24573/564879/6/R/pat.dcm')
lq_oct.load()
hq_oct.load()
hq_oct.pad()
lq_oct.pad()
return hq_oct, lq_oct, cgan, checkpoint, inputs
def convert_to_uint8(nparray):
nparray += np.abs(nparray.min())
nparray /= nparray.max()
nparray *= 255
nparray = nparray.astype(np.uint8)
return nparray
def mosaic(volume, **plot_args):
import math
n_images = volume.shape[-1]
cols = int(math.sqrt(n_images))
rows = int(math.ceil(n_images / cols))
image = []
for r in range(rows):
image.append([])
for c in range(cols):
if r * cols + c >= n_images:
image[r].append(np.zeros(volume.shape[:-1]))
else:
image[r].append(volume[..., r * cols + c])
for i in range(len(image)):
image[i] = np.c_[tuple(image[i])]
image = np.r_[tuple(image)]
if len(plot_args) > 0:
if 'figsize' in plot_args.keys():
plt.figure(figsize=plot_args.pop('figsize'))
plt.imshow(image, **plot_args)
plt.show()
return image
def modify(layer, index, model, target='hq'):
modified = copy.copy(layer)
modified[...,index] = np.zeros(layer.shape[:-1])
#mosaic(modified)
modified = np.reshape(modified, (1, *modified.shape))
result = model.sess.run(model.hq.gen.layers[-1], {model.hq.gen.layers[-2]: modified})
plt.imshow(result[0,...,0], cmap='inferno')
plt.show()
def show(sample, min=None, max=None):
temp = sample.copy()
if min is not None:
vmin = min
temp[temp<min] = 0
else:
vmin = temp.min()
if max is not None:
vmax = max
temp[temp>max] = 0
else:
vmax = temp.max()
plt.imshow(temp, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
| UTF-8 | Python | false | false | 8,843 | py | 8 | investigate.py | 7 | 0.596065 | 0.572769 | 0 | 239 | 35.991632 | 191 |
iiasa/message_ix | 10,823,317,588,457 | 9c1c3ac19e2c7c9c1c97a96eef3843b76b7e0006 | 97efdfac795c44f9a2a62e48ba71bf1783c523bf | /message_ix/tests/test_feature_bound_emission.py | 61c22d4b1bb682b7975fb9645ed1b0a3aa139474 | [
"CC-BY-4.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | https://github.com/iiasa/message_ix | 1e96ee254b6f67117faf4fb78ef8fd75d9ee0a6c | bf4469111a2d10c5dbc2c921e6a7e502e96aea34 | refs/heads/main | 2023-08-17T20:36:23.055462 | 2023-08-14T08:46:24 | 2023-08-14T08:46:24 | 113,015,968 | 103 | 162 | Apache-2.0 | false | 2023-09-08T11:56:21 | 2017-12-04T08:29:06 | 2023-08-25T03:47:48 | 2023-09-08T11:56:21 | 11,347 | 97 | 141 | 119 | Jupyter Notebook | false | false | from message_ix import Scenario
def model_setup(scen, years):
scen.add_set("node", "node")
scen.add_set("lvl_spatial", "country")
scen.add_set("map_spatial_hierarchy", ["country", "node", "World"])
scen.add_set("commodity", "comm")
scen.add_set("emission", "emiss")
scen.add_cat("emission", "emiss_type", "emiss")
scen.add_set("level", "level")
scen.add_set("year", years)
scen.add_set("type_year", years)
scen.add_set("technology", ["tec1", "tec2"])
scen.add_set("mode", "mode")
output_specs = ["node", "comm", "level", "year", "year"]
dict_var_cost = {"tec1": 1, "tec2": 2}
dict_em_factor = {"tec1": 1.5, "tec2": 1}
for yr in years:
scen.add_par("demand", ["node", "comm", "level", yr, "year"], 1, "GWa")
for t in dict_var_cost.keys():
tec_specs = ["node", t, yr, yr, "mode"]
scen.add_par("output", tec_specs + output_specs, 1, "GWa")
scen.add_par("var_cost", tec_specs + ["year"], dict_var_cost[t], "USD/GWa")
scen.add_par(
"emission_factor", tec_specs + ["emiss"], dict_em_factor[t], "kg/kWa"
)
def add_bound_emission(scen, bound, year="cumulative"):
scen.check_out()
scen.add_par("bound_emission", ["node", "emiss_type", "all", year], bound, "kg")
scen.commit("Emission bound added")
def assert_function(scen, year):
var_em = scen.var("EMISS", {"node": "node"}).set_index(["year"])["lvl"]
bound_em = scen.par("bound_emission", {"type_year": year}).at[0, "value"]
if year == "cumulative":
duration = scen.par("duration_period").set_index("year")["value"]
assert sum(var_em * duration) / sum(duration) <= bound_em
else:
assert var_em[year] <= bound_em
# Testing emission bound per one year
def test_bound_emission_year(test_mp):
scen = Scenario(test_mp, "test_bound_emission", "standard", version="new")
model_setup(scen, [2020, 2030])
scen.commit("initialize test model")
add_bound_emission(scen, bound=1.250, year=2020)
scen.solve(case="bound_emission_year", quiet=True)
assert_function(scen, year=2020)
# Testing cumulative emission bound for model years with equal intervals
def test_bound_emission_10y(test_mp):
scen = Scenario(test_mp, "test_bound_emission", "standard", version="new")
model_setup(scen, [2020, 2030, 2040, 2050])
scen.commit("initialize test model")
add_bound_emission(scen, bound=1.250)
scen.solve(case="bound_emission_10y", quiet=True)
assert_function(scen, year="cumulative")
# Testing cumulative emission bound for model years with mixed intervals
def test_bound_emission_5y(test_mp):
scen = Scenario(test_mp, "test_bound_emission", "standard", version="new")
model_setup(scen, [2020, 2025, 2030, 2040])
scen.commit("initialize test model")
add_bound_emission(scen, bound=1.250)
scen.solve(case="bound_emission_5y", quiet=True)
assert_function(scen, year="cumulative")
| UTF-8 | Python | false | false | 2,990 | py | 156 | test_feature_bound_emission.py | 46 | 0.626421 | 0.599666 | 0 | 76 | 38.342105 | 87 |
STProgrammer/PythonExercises | 2,714,419,378,979 | 890b6fff6d71d0de17af2c974c5c3be51ed0c345 | 76f90608fb23f348afa0479d8fac517e2a39e758 | /example-objects.py | 9c26af9a950f0d3ea0705557e1ddf6e026cc447a | [] | no_license | https://github.com/STProgrammer/PythonExercises | e0153246c42840078c278afb9859dce90f7202e3 | 451431a72076afb911edfee36a2f92cc9e26ab90 | refs/heads/master | 2023-02-22T06:10:15.681781 | 2023-02-16T00:48:03 | 2023-02-16T00:48:03 | 172,222,765 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
class Apple:
def __init__(self, w, c, land, worm):
self.weight = w
self.color = c
self.country = land
self.worm = worm
def getinfo(self):
print("A",self.color,"apple weighing",self.weight,
"grams, from the country ",self.country)
if self.worm:
print("The apple has a worm inside it")
apple = Apple(30,"green","Norway",True)
apple.getinfo()
class Circle:
def __init__(self,r):
self.radius = r
def area(self):
return int(self.radius**2 * math.pi)
circle = Circle(15)
print("The area of the circle is",circle.area())
class Triangle:
def __init__(self, base, height):
self.base = base
self.height = height
def area(self):
return int((self.height * self.base)/2)
triangle = Triangle(4,5)
print("The area of the triangle is",triangle.area())
class Hexagon:
def __init__(self, s1, s2, s3, s4, s5, s6):
self.s1 = s1
self.s2 = s2
self.s3 = s3
self.s4 = s4
self.s5 = s5
self.s6 = s6
def calculate_perimeter(self):
return self.s1 + self.s2 + self.s3 + self.s4 + self.s5 + self.s6
hexagon = Hexagon(5,5,6,4,3,4)
print("The perimeter of the hexagon is",hexagon.calculate_perimeter())
| UTF-8 | Python | false | false | 1,310 | py | 27 | example-objects.py | 26 | 0.574809 | 0.545802 | 0 | 57 | 21.894737 | 72 |
tonyduydao/perfrunner | 5,025,111,750,916 | 79d3334dbace826059778d19791fa2643a106457 | e3a8fea71f740274001ae6fd8eb7a11b8810c89b | /clusters/numa.spec | 6289f1db28e8bb58e87cf98dcb6486b66415368e | [
"Apache-2.0"
] | permissive | https://github.com/tonyduydao/perfrunner | efeedf5faf25c1ef105203dbee94cf315c063a41 | 5e0dab1af6083dc6408efcaa9f8d61262a43e6f2 | refs/heads/master | 2021-01-21T00:07:48.046467 | 2016-07-05T18:55:57 | 2016-07-05T21:02:14 | 62,670,707 | 1 | 0 | null | true | 2016-07-05T21:25:44 | 2016-07-05T21:25:44 | 2016-02-22T00:51:01 | 2016-07-05T21:02:16 | 19,627 | 0 | 0 | 0 | null | null | null | [clusters]
numa =
172.23.105.215:8091
[clients]
hosts =
172.23.105.215
credentials = root:couchbase
[storage]
data = /ssd1
index = /ssd2
[credentials]
rest = Administrator:password
ssh = root:couchbase
[parameters]
Platform = HW
OS = Ubuntu 14.04
CPU = E5-4610 (48 vCPU)
Memory = 256 GB
Disk = SSD
| UTF-8 | Python | false | false | 310 | spec | 77 | numa.spec | 63 | 0.693548 | 0.558065 | 0 | 23 | 12.478261 | 29 |
juandausa/CompetitiveProgrammingCoreSkills | 8,194,797,614,466 | 2a6a633465a7ae5f580839238f09d0ba827ceb7c | f4aa554cb461f93ee76a431ce0898d93d72b40fd | /Semana 4/maximal_manhattan_distance/test_pytest_point.py | b4790ee656a0c8cf4a7342143cf76dcf58809582 | [
"MIT"
] | permissive | https://github.com/juandausa/CompetitiveProgrammingCoreSkills | 7235e98a05497b8ee511de50fac4f385d3f9fa04 | 1ef0d902b6c05afba4b44c34ade87c376187fd1d | refs/heads/master | 2021-03-27T22:17:09.875259 | 2020-03-27T13:55:39 | 2020-03-27T13:55:39 | 247,812,295 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from point import Point
def test_create():
assert Point((0, 0)) is not None
def test_create_point_get_distance():
point = Point((10, 10))
assert point.maximal_manhattan_distance == 0
assert point.maximal_manhattan_distance_coordinate == (10, 10)
def test_create_point_update_distance():
point = Point((0, 0))
other_point = Point((10, 10))
point.update_manhattan_distance(other_point)
assert point.maximal_manhattan_distance_coordinate == (10, 10)
assert point.maximal_manhattan_distance > 0
def test_create_point_update_distance_several_times():
point_a = Point((0, 0))
point_b = Point((10, 10))
point_a.update_manhattan_distance(point_b)
point_c = Point((1, 1))
point_a.update_manhattan_distance(point_c)
assert point_a.maximal_manhattan_distance_coordinate == (point_b.x_coordinate, point_b.y_coordinate)
assert point_a.maximal_manhattan_distance > 0
| UTF-8 | Python | false | false | 956 | py | 52 | test_pytest_point.py | 50 | 0.670502 | 0.638075 | 0 | 29 | 30.896552 | 104 |
awardnopoints/SBSW | 3,865,470,585,403 | 678431f7af253fd24e5d26ae7354c1e1f698e568 | 96f0e3994b31f00b9642b75087c38d57d949b0a8 | /dublin_bus/dbus/urls.py | ac17b6e3ae8fee881f4e47b2552ae5af84a8a166 | [] | no_license | https://github.com/awardnopoints/SBSW | c0b3e025e9ab420cc55b8820d8af9f594abb83d3 | f91b5a4000658fb329454ca31b9b1762fa673715 | refs/heads/master | 2022-12-11T12:38:08.070060 | 2018-10-03T10:34:44 | 2018-10-03T10:34:44 | 138,013,054 | 0 | 0 | null | false | 2022-12-08T02:25:12 | 2018-06-20T09:51:48 | 2018-10-03T10:35:30 | 2022-12-08T02:25:11 | 3,164,758 | 0 | 0 | 7 | Jupyter Notebook | false | false | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name = 'home'),
path('predict_request/', views.predict_request, name='predict_request'),
path('popStops/', views.popStops, name = 'popStops'),
path('predict_address/' , views.predict_address, name='predict_address'),
#path('get_routes/', views.getRoutes, name = 'getRoutes'),
#path('get_stops/', views.getStops, name= 'getStops'),
path('leap_stores/', views.leapStores, name='leapStores')
]
| UTF-8 | Python | false | false | 530 | py | 84 | urls.py | 61 | 0.637736 | 0.637736 | 0 | 12 | 42.916667 | 81 |
scon/python-horiba-uploader | 13,073,880,458,022 | 5edde6e252e82918679965f8658fbe8ba0362d9f | 0ff8b5d3536a7d08efbc46053f25118d6bf3fcf5 | /horiba.py | f92c5f92bc65458d85f9768c8494f3e75e0ca5ad | [] | no_license | https://github.com/scon/python-horiba-uploader | 8355ec5cc1b38e1f55d3616e2277b2c10064ee69 | de75d17218658ae3e512abc670279200b791d5c4 | refs/heads/master | 2021-09-04T11:51:31.046066 | 2018-01-18T13:09:22 | 2018-01-18T13:09:22 | 112,487,486 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import serial
import time
import math
data_request = "\x02DA\x0304"
class NOMonitor(object):
"""Horiba NO-Monitor"""
def __init__(self, tty):
self.tty = tty
self.NO = 0.00
self.NO2 = 0.00
self.NOX = 0.00
self.serial = serial.Serial()
self.error = False
self.data = "DATASTRING"
def connect(self):
self.serial = serial.Serial(self.tty)
pass
def disconnect(self):
self.serial.close
pass
def update(self):
time.sleep(1)
self.serial.reset_input_buffer() #Reset Port
self.serial.write(data_request.encode('ascii')) # Request DATA
time.sleep(1)
bytestoread = self.serial.inWaiting()
msg = self.serial.read(bytestoread)
self.data = msg
NO_b = int(msg[10:15])
NO_p = int(msg[15:18])
NO2_b = int(msg[40:45])
NO2_p = int(msg[45:48])
NOX_b = int(msg[70:75])
NOX_p = int(msg[75:78])
if (bytestoread == 99 and str(msg[1:5].decode()) == 'MD03'): #Check DATA
self.error = False
self.NO = round(NO_b * math.pow(10,NO_p) ,2)
self.NO2 = round(NO2_b * math.pow(10,NO2_p),2)
self.NOX = round(NOX_b * math.pow(10,NOX_p),2)
else:
self.error = True
class O3Monitor(object):
"""Horiba O3-Monitor"""
def __init__(self, tty):
self.tty = tty
self.O3 = 0.00
self.serial = serial.Serial()
self.error = False
self.data = "DATASTRING"
def connect(self):
self.serial = serial.Serial(self.tty)
pass
def disconnect(self):
self.serial.close
pass
def update(self):
print("updating!!!")
time.sleep(1)
self.serial.reset_input_buffer() #Reset Port
self.serial.write(data_request.encode('ascii')) # Request DATA
time.sleep(1)
bytestoread = self.serial.inWaiting()
msg = self.serial.read(bytestoread)
self.data = msg
O3_b = int(msg[10:15])
O3_p = int(msg[15:18])
if (bytestoread == 39 and str(msg[1:5].decode()) == 'MD01'): #Check DATA
self.error = False
self.O3 = round(O3_b * math.pow(10,O3_p),2)
else:
self.error = True
| UTF-8 | Python | false | false | 1,969 | py | 4 | horiba.py | 2 | 0.629761 | 0.583037 | 0.001016 | 94 | 19.87234 | 74 |
yagoadc/Sistemas_Distribuidos-20.2- | 1,357,209,712,419 | ac04c3c8846783b846ac0dab180cec5192ed3042 | 7fb7fc917b69868ed86464108f7436980809ccff | /LAB3/servidor.py | 14c64a826b1e5ddc9f2cb43c32155bd3c3361869 | [] | no_license | https://github.com/yagoadc/Sistemas_Distribuidos-20.2- | e1f93b260c6ef2f00d15da0500acce8ff2d67908 | 5781bb3d9ea75675193bccc6758b66af765e6108 | refs/heads/main | 2023-04-12T01:35:03.703368 | 2021-05-07T14:23:00 | 2021-05-07T14:23:00 | 351,749,467 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ####### LAB 3 - Sistemas Distribuidos ###########
# Alunos:
# Rodrigo Passos - 115196299
# Yago Alves - 115212477
##################################################
# lado servidor: implementação concorrente utilizando threads e na finalização do servidor usando join.
from processamento import processa_dados
import socket
import select
import sys
import threading
HOST = '' # '' possibilita acessar qualquer endereco alcancavel da maquina local
PORTA = 5000 # porta onde chegarao as mensagens para essa aplicacao
# cria um socket para comunicacao
sock = socket.socket() # valores default: socket.AF_INET, socket.SOCK_STREAM
# define a lista de I/O de interesse (jah inclui a entrada padrao)
entradas = [sys.stdin]
# armazena as conexoes ativas. Recurso compartilhado por threads.
conexoes = {}
# lock para acesso do dicionario 'conexoes'
lock = threading.Lock()
def iniciaServidor():
'''Cria um socket de servidor e o coloca em modo de espera por conexoes
Saida: o socket criado'''
# cria o socket
# Internet( IPv4 + TCP)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# vincula a localizacao do servidor
sock.bind((HOST, PORTA))
# coloca-se em modo de espera por conexoes
sock.listen(5)
# configura o socket para o modo nao-bloqueante
sock.setblocking(False)
# inclui o socket principal na lista de entradas de interesse
entradas.append(sock)
return sock
def aceitaConexao(sock):
'''Aceita o pedido de conexao de um cliente
Entrada: o socket do servidor
Saida: o novo socket da conexao e o endereco do cliente'''
# estabelece conexao com o proximo cliente
clisock, endr = sock.accept()
# registra a nova conexao
lock.acquire()
conexoes[clisock] = endr
lock.release()
return clisock, endr
def atendeRequisicoes(clisock, endr):
'''Recebe mensagens com uma lista de nomes de arquivos .txt e as envia de volta para o cliente uma mensagem no formato string contendo as 10 palavras mais ocorredas em cada arquivo (ate o cliente finalizar)
Entrada: socket da conexao e endereco do cliente
Saida: '''
while True:
try:
print('Esperando mensagem de'+str(endr)+'...')
# argumento indica a qtde maxima de dados
msg = clisock.recv(1024)
if not msg:
lock.acquire()
# retira o cliente da lista de conexoes ativas
del conexoes[clisock]
lock.release()
clisock.close() # encerra a conexao com o cliente
print('Encerrando conexão.')
print(str(endr) + '-> encerrou')
clisock.close()
return
else:
# Separa o nome dos arquivo por virgula e salva em uma lista
msg = msg.decode("utf-8").split(',')
print('Recebi essa lista de nomes de arquivos: ' + str(msg))
# A partir daqui é feito o acesso para a camada de processamento
print('Processando...')
res = processa_dados(msg)
# Envio dos dados
print('Enviando resposta para '+str(endr))
clisock.send(bytes(res, 'utf-8'))
except:
# Caso aconteça algum erro, o servidor comunica o problema pede novamente o arquivo
clisock.send(
bytes("Tivemos algum problema, tente novamente", 'utf-8'))
def main():
'''Inicializa e implementa o loop principal do servidor'''
print("Iniciando servidor.")
sock = iniciaServidor()
print('Para encerrar servidor, apenas digite out')
print('Para saber se há conexões, apenas digite hist')
clientes =[] #threads ativas
print("Pronto para receber conexoes...")
while True:
# espera por qualquer entrada de interesse
leitura, escrita, excecao = select.select(entradas, [], [])
# tratar todas as entradas prontas
for pronto in leitura:
if pronto == sock: # pedido novo de conexao
clisock, endr = aceitaConexao(sock)
print('Conectado com: ', endr)
# cria nova thread para atender o cliente
cliente = threading.Thread(
target=atendeRequisicoes, args=(clisock, endr))
cliente.start()
clientes.append(cliente)
elif pronto == sys.stdin: # entrada padrao
cmd = input()
if cmd == 'out':
for c in clientes: #aguarda todas as threads terminarem
c.join()
sock.close()
sys.exit()
elif cmd == 'hist': #outro exemplo de comando para o servidor
print(str(conexoes.values()))
main()
| UTF-8 | Python | false | false | 4,861 | py | 10 | servidor.py | 8 | 0.602432 | 0.595425 | 0 | 135 | 34.940741 | 210 |
tboudreaux/SummerSTScICode | 8,495,445,334,938 | d0341661e2d685bd1292907e30dab679ea2d565a | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_171424.16+614710.9/sdB_sdssj_171424.16+614710.9_coadd.py | 6b127ae0e1312e4cdc7334b4ec249fdcf945bd58 | [] | no_license | https://github.com/tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[258.600667,61.786361], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sdssj_171424.16+614710.9/sdB_sdssj_171424.16+614710.9_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sdssj_171424.16+614710.9/sdB_sdssj_171424.16+614710.9_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 482 | py | 12,360 | sdB_sdssj_171424.16+614710.9_coadd.py | 2,022 | 0.753112 | 0.524896 | 0 | 5 | 95.4 | 404 |
tsouche/kivy_tuto | 8,693,013,812,789 | 643f62526640277ad6fd58c0c9cec41d5943a02c | 2fa3ea088477756e7731b0fc08f0f2b1c6796a38 | /00-set/constants.py | 03a6349f3b3e4c5c93bf7ed4fb5290a907b5a5f7 | [] | no_license | https://github.com/tsouche/kivy_tuto | a11bf676a6fc5ded5871b9eea212a4e7ea41a9a4 | 97362de5e16bdc9f32a7b7e5058ccd26655bd67b | refs/heads/master | 2021-01-12T15:09:13.471877 | 2016-10-17T21:56:56 | 2016-10-17T21:56:56 | 69,352,090 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Oct 15, 2016
@author: thierry
'''
constant_unit = 10
constant_card_height = 15 * constant_unit
constant_card_width = 10 * constant_unit
constant_nb_cols = 4
constant_nb_rows = 3
constant_color_background = (0.0, 0.4, 0.0, 1.0) # rgba
constant_color_highlight = (1.0, 0.2, 0.2, 0.6) # rgba
constant_color_card = (1.0, 1.0, 1.0, 1.0) # rgba
constant_color_card_back = (0.0, 0.2, 0.0, 1.0) # rgba
constant_color_blue = (0.0, 0.0, 0.8, 1.0) # rgba
constant_color_blue_half = (0.0, 0.0, 0.8, 0.3) # rgba
constant_color_red = (0.8, 0.0, 0.0, 1.0) # rgba
constant_color_red_half = (0.8, 0.0, 0.0, 0.3) # rgba
constant_color_green = (0.0, 0.8, 0.0, 1.0) # rgba
constant_color_green_half = (0.0, 0.8, 0.0, 0.3) # rgba
constant_spacing = constant_unit
constant_padding = constant_unit
constant_table_width = constant_card_width * constant_nb_cols \
+ constant_spacing * (constant_nb_cols - 1) \
+ constant_padding * 2
constant_table_height = constant_card_height * constant_nb_rows \
+ constant_spacing * (constant_nb_rows - 1) \
+ constant_padding * 2
| UTF-8 | Python | false | false | 1,241 | py | 9 | constants.py | 8 | 0.564867 | 0.485898 | 0 | 30 | 40.3 | 71 |
Erik-A-Smith/demo-python3-command-pattern | 850,403,529,089 | 9ce0d518b3d845c9721608b5487e5c7a95236176 | b410989abee66fc118ccb537f3990534472d1df4 | /Classes/Deck.py | 1b236cb2360d66286431880168a8b5dd30716153 | [
"MIT"
] | permissive | https://github.com/Erik-A-Smith/demo-python3-command-pattern | eac84567c0db02cbfde800cc7cc42e645a36bd85 | 9a0bde5a91e0fa81d3126e9333a220143833ce19 | refs/heads/master | 2020-04-26T10:48:17.885897 | 2019-03-02T20:50:59 | 2019-03-02T20:50:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Classes.Card import Card
from Classes.Face import Face
from Classes.Suit import Suit
import random
class Deck:
cards = []
def __init__(self):
self.cards = self.generateDeck(Suit.asArray(),Face.asArray())
def __str__(self):
compiledString = "-----------my deck of cards-----------\n"
for card in self.cards:
compiledString += str(card) + "\n"
return compiledString
def generateDeck(self,suits,faces):
deck = []
for suit in suits:
for face in faces:
card = Card(suit,face,face)
deck.append(card)
return deck
def shuffle(self):
random.shuffle(self.cards)
def draw(self, quantity = 1):
if(len(self.cards) < quantity):
raise Exception('Not enough cards in deck to draw {}'.format(quantity))
# Single draw
if(quantity == 1):
return self.cards.pop(quantity)
# Multi draw
cards = []
for index in range(quantity):
cards.append(self.cards.pop())
return cards
| UTF-8 | Python | false | false | 1,133 | py | 11 | Deck.py | 10 | 0.540159 | 0.538394 | 0 | 44 | 24.75 | 83 |
pbuckin/sample-projects | 12,249,246,773,650 | f087ed1ccbc18ebb72611b6eae39fa10eeb93a96 | aa62fc4f34f19b9d7424711c778e58d7ed19a74e | /NeuralNetworks.py | 5cc8374ced8b7fe4d7b4d735d17cd52a4cc49271 | [] | no_license | https://github.com/pbuckin/sample-projects | 051e785403bac1c0a892cdbf96491ddcb574e444 | 20e7a38d1f36c2c3cdb3fdfe6e455ae0f3d26ed3 | refs/heads/master | 2017-11-24T20:28:20.948736 | 2016-08-08T07:18:07 | 2016-08-08T07:18:07 | 64,881,278 | 0 | 0 | null | false | 2016-08-08T07:18:08 | 2016-08-03T21:40:42 | 2016-08-04T23:33:01 | 2016-08-08T07:18:07 | 13 | 0 | 0 | 0 | Python | null | null | """
A library to construct a neural network class.The biases and weights are
itialized according to a normal distribution and will be updated as the system
learns. Note the options for the activation function: sigmoid, tanh, arctan,
identity, softsign, softplus, and reLU activation functions are available.
For the output, one can also use softmax activation.
Note also the cost function options: quadratic, cross_entropy, and
log_likelihood.
The L2 regularization constant and learning rate can be set when the network is
initialized, and updated using the set_reg_rate and set_learning_rate commands.
"""
# Standard libraries
# Third-party libraries
import numpy
class network(object):
def __init__(self, nodes, actfn="sigmoid", outputfn="sigmoid",
costfn="quadratic", L2_reg_rate=0, learning_rate=0.1):
"""
Initializes the network, where nodes is a list of the number of
nodes needed in each layer of the network. Note the first element in
the list should be the number of inputs and the last should be the
number of outputs.
"""
options= ["sigmoid", "tanh", "arctan", "identity", "softsign",
"softplus", "softplus"]
if actfn not in options:
raise ValueError("Not a valid value for actfn")
options.append("softmax")
if outputfn not in options:
raise ValueError("Not a valid value for outputfn")
if costfn not in ["quadratic", "cross_entropy", "log_likelihood"]:
raise ValueError("Not a valid alue for costfn")
self.num_nodes=nodes
self.num_layers=len(nodes)
self.biases={}
self.weights={}
self.activations={}
self.actinputs={}
self.actfn=actfn
self.costfn=costfn
self.outputfn=outputfn
self.step_size=learning_rate
self.reg_rate=L2_reg_rate
# Initialize biases
# Initialize weights and biases
for k in range(self.num_layers-1):
self.weights[k]=numpy.random.randn(nodes[k+1],nodes[k])*0.01+0.1
self.biases[k]=numpy.random.randn(nodes[k+1],1)*0.01+0.1
def set_reg_rate(self, k):
self.L2_reg_rate=k
def set_learning_rate(self, k):
self.step_size=k
def feed_forward(self, k):
"""
Evaluates the values that enter layer k+1, given the values that left
layer k.
"""
return numpy.dot(self.weights[k], self.activations[k])+self.biases[k]
def evaluate(self, inval):
"""
Evaluates the output of the network at a given input list inval. This
stores the activations of all layers, with the final activation layer
being the output.
"""
self.activations[0]=numpy.reshape(numpy.asarray(inval),
(self.num_nodes[0],1))
for i in range(self.num_layers-1):
self.actinputs[i+1]=self.feed_forward(i)
input=self.actinputs[i+1]
self.activations[i+1]=act(input, self.actfn)
self.actinputs[self.num_layers-1]=self.feed_forward(self.num_layers-2)
input=self.actinputs[self.num_layers-1]
self.activations[self.num_layers-1]=act(input, self.outputfn)
def feedback(self, target):
"""
Determines the corrections to be made to the network after evaluating
an output.
"""
target=numpy.reshape(numpy.asarray(target),(self.num_nodes[-1],1))
self.del_b={}
self.del_w={}
# Output backpropagation, using cost function and output function.
inputs=self.actinputs[self.num_layers-1]
outputs=self.activations[self.num_layers-1]
ap=act_prime(inputs, outputs, self.outputfn)
cp=cost_prime(outputs, target, self.costfn)
if self.outputfn=="softmax":
delta=numpy.dot(ap, cp)
else:
delta=ap*cp
self.del_b[self.num_layers-2]=delta
self.del_w[self.num_layers-2]= (
numpy.dot(delta,self.activations[self.num_layers-2].transpose()))
# Previous layers backpropagation, using inner activation function.
for i in range(self.num_layers-3,-1,-1):
inputs=self.actinputs[i+1]
outputs=self.activations[i+1]
ap=act_prime(inputs, outputs, self.actfn)
self.del_b[i]=numpy.dot(self.weights[i+1].transpose(),
self.del_b[i+1])*ap
self.del_w[i]=numpy.dot(self.del_b[i],
self.activations[i].transpose())
def update(self):
"""
Updates the weights and biases after the feedback step.
"""
for i in self.weights:
self.weights[i]=((1-self.step_size*self.reg_rate)*self.weights[i]
-self.step_size*self.del_w[i])
for i in self.biases:
self.biases[i]=self.biases[i]-self.step_size*self.del_b[i]
def train(self, inputs, targets, epochs=100):
"""
Trains the system to evaluate the inputs to the given targets.Note that
the program requires a list of (lists of) inputs and targets.
"""
for k in range(epochs):
for i in range(len(inputs)):
self.evaluate(inputs[i])
self.feedback(targets[i])
self.update()
def test(self, inputs, targets, tol=0.05):
"""
Compares the output of the network to given target results and generates
a report on the results.
"""
tests=len(inputs)
correct=0
for i in range(tests):
self.evaluate(inputs[i])
result=self.activations[self.num_layers-1]
if cost(result, targets[i], self.costfn)<tol:
correct+=1
print(str(correct)+" correct out of "+str(tests))
percentage=format(correct/tests*100, '.8f')
print("The network accurately categorizes "+str(percentage)+
" percent of test cases")
def predict(self, input_, result="category"):
""" Generates output based on the input given. Note that there are
options for the desired output:
category: This tells the predictor that you want to categorize the
input, and as such the result will be the index of the
largest output value, that is, the most likely category.
function: This tells the predictor that you want the output of the
function, and so it will give you a vector all the outputs.
"""
self.evaluate(input_)
if result=="category":
return numpy.argmax(self.activations[self.num_layers-1])
elif result=="function":
return self.activations[self.num_layers-1]
else:
raise ValueError("Invalid input value for variable result")
def act(input_, actfn):
func=getattr(actfunctions, actfn)
return func(input_)
def act_prime(input_, output, actfn):
func=getattr(actprimefunctions, actfn)
return func(input_, output)
def cost(result, target, costfn):
"""
Returns the error between the output of the network and the actual
target value.
"""
if len(result) != len(target):
raise IndexError("Target and output of different lengths")
target=numpy.reshape(numpy.asarray(target),(len(result),1))
func=getattr(costfunctions, costfn)
return func(result, target)
def cost_prime(result, target, costfn):
"""Returns the partial derivatives of the cost function"""
func=getattr(costprimefunctions, costfn)
return func(result, target)
class actfunctions(object):
def sigmoid(input_):
return 1/(1+numpy.exp(-input_))
def tanh(input_):
return numpy.tanh(input_)
def arctan(input_):
return numpy.arctan(input_)
def identity(input_):
result=input_[:]
return result
def softsign(input_):
return input_ /(1 + numpy.absolute(input_))
def softplus(input_):
return numpy.log(1+numpy.exp(input_))
def reLU(input_):
result=numpy.zeros((len(input_),1))
for i in range(len(input_)):
if input_[i]<=0:
result[i]=0
else:
result[i]=input_[i]
return result
def softmax(input_):
return numpy.exp(input_)/sum(numpy.exp(input_))
class actprimefunctions(object):
def sigmoid(input_, output):
return output*(1-output)
def tanh(input_, output):
return 1-output**2
def arctan(input_, output):
return 1/(input_**2 + 1)
def identity(input_, output):
return numpy.ones((len(input_),1))
def softsign(input_, output):
return 1/(1+numpy.absolute(input_))**2
def softplus(input_, output):
return 1/(1+numpy.exp(-input_))
def reLU(input_, output):
result=numpy.zeros((len(input_),1))
for i in range(len(input_)):
if input_[i]<=0:
result[i]=0
else:
result[i]=1
return result
def softmax(input_, output):
result=numpy.zeros((len(output), len(output)))
for i in range(len(output)):
for j in range(len(output)):
if i==j:
result[i,j]=output[i]*(1-output[i])
else:
result[i,j]=-output[i]*output[j]
return result
class costfunctions(object):
def quadratic(result, target):
error=sum((result-target)**2)
error=error/(2*len(result))
return error
def cross_entropy(result, target):
error=-sum(target*numpy.log(result)+(1-target)*numpy.log(1-result))
return error
def log_likelihood(result, target):
error=sum(-target*numpy.log(result))
return error
class costprimefunctions(object):
def quadratic(result, target):
return result-target
def cross_entropy(result, target):
prime=-(target/result)+((1-target)/(1-result))
return prime
def log_likelihood(result, target):
return -target/result
| UTF-8 | Python | false | false | 10,427 | py | 2 | NeuralNetworks.py | 2 | 0.584828 | 0.575813 | 0 | 285 | 35.585965 | 80 |
nondejus/clvm | 4,535,485,507,824 | 72c907a0e0fdf769ccf8515cf83ec87df49aea4b | 0d7afbd8a6c0b96044e10ee2ae85c33863007572 | /setup.py | 67225a20f29467e7ead40b0eddfb578b20c20094 | [
"Apache-2.0"
] | permissive | https://github.com/nondejus/clvm | ffa437e2f6b7d92f91c7eb81ce2722bd7e59a959 | c0414d916d2685c3eecef02ef53f39349eabae09 | refs/heads/master | 2020-09-26T16:32:49.302638 | 2019-12-06T09:36:12 | 2019-12-06T09:36:12 | 226,292,370 | 0 | 0 | Apache-2.0 | true | 2019-12-06T09:27:37 | 2019-12-06T09:27:36 | 2019-12-05T00:10:45 | 2019-12-03T05:52:22 | 399 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python
from setuptools import setup
from clvm.version import version
setup(
name="clvm",
version=version,
packages=[
"clvm",
"clvm.ecdsa",
],
author="Chia Network, Inc.",
author_email="kiss@chia.net",
url="https://github.com/Chia-Network/clvm",
license="https://opensource.org/licenses/Apache-2.0",
description="Script compiler.",
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Topic :: Security :: Cryptography',
],)
| UTF-8 | Python | false | false | 723 | py | 58 | setup.py | 11 | 0.600277 | 0.589212 | 0 | 27 | 25.777778 | 61 |
haileystudy/leetcode | 5,566,277,639,691 | b5db008363fb3ea728d3aad8e152dc3754a00cc5 | cd87cadf8a20df8617a46f260fac95df7d9587da | /442-find-all-duplicates-in-an-array.py | 884302b361348dc5669645acf4da9fee15e37310 | [] | no_license | https://github.com/haileystudy/leetcode | 0fbf0eaac46b07fe98412a1f6786a77ffbcbb8d7 | d384c8f953e6e9ff1ff27ce84e74539dd25beba3 | refs/heads/master | 2020-04-01T21:13:17.281229 | 2019-02-17T14:22:12 | 2019-02-17T14:22:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def findDuplicates(self, nums):
result = []
onlyone = set()
for num in nums:
if num not in onlyone:
onlyone.add(num)
else:
result.append(num)
return result
| UTF-8 | Python | false | false | 264 | py | 42 | 442-find-all-duplicates-in-an-array.py | 41 | 0.481061 | 0.481061 | 0 | 10 | 25.4 | 35 |
tanmay-rpatil/wiki-demo | 3,770,981,326,120 | a181ee067cd9595a1fc2c744a37835f8ae0d1dac | f9894a5dda6aa93dee5a195aca5453f834600161 | /api/urls.py | d9c9ff73b374ce6f5694d3acee0805518042188e | [] | no_license | https://github.com/tanmay-rpatil/wiki-demo | 2c547a6fd9a273c5c71f583759f35c1079167148 | b4bde99fb5f31a3fa0d9d3a31383b1f3b4c6cc8d | refs/heads/master | 2023-06-02T03:12:08.009980 | 2021-06-22T07:34:13 | 2021-06-22T07:34:13 | 377,056,877 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
urlpatterns = [
path('', views.apiOverview, name="api-overview"),
path('entry-list/', views.entrylist ,name='entry-list'),
path('entry-search/', views.entrysearch ,name='entry-search'),
path('entry-random/', views.entryrandom ,name='entry-random'),
path('entry-detail/<int:pk>/', views.entrydeatil ,name='entry-detail'),
path('entry-create/', views.entrycreate ,name='entry-create'),
path('entry-update/<int:pk>/', views.entryupdate ,name='entry-update'),
path('entry-delete/<int:pk>/', views.entrydelete ,name='entry-delete'),
]
urlpatterns = format_suffix_patterns(urlpatterns) # in order to accept URLS like "/entry-details/2.json" or "/entry-details/2.api" or "/entry-list.json"
| UTF-8 | Python | false | false | 805 | py | 13 | urls.py | 7 | 0.726708 | 0.724224 | 0 | 14 | 56.5 | 152 |
priyanshuinn/Survelon | 13,434,657,720,474 | 1f88ee4f2d069092ab19eff5e674ee21ad34176f | 77a55d19b1b5ff33055e5de04c5ed49350a73bf6 | /course/urls.py | 12360d489257d60af61dd14e5315cf05e063c92b | [] | no_license | https://github.com/priyanshuinn/Survelon | d56def9a9ab99590e92cd0a5419afd484b51bc66 | a0229cea1321e28a791db52ff109773f693462f0 | refs/heads/main | 2023-04-16T12:05:59.130597 | 2021-04-12T18:41:56 | 2021-04-12T18:41:56 | 356,881,305 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
from .views import POSTSpeed
urlpatterns = [
path('post/<int:my_id>', views.post, name = 'post'),
path('describe', views.overview, name = 'overview'),
path('Rvedio',views.display,name='real_video'),
path('stream/',POSTSpeed.as_view(),name='stream'),
path('show/',views.show,name='show'),
path('maskload/',views.mask,name='mask'),
path('mask/',views.load,name='mask_page'),
] | UTF-8 | Python | false | false | 452 | py | 24 | urls.py | 10 | 0.654867 | 0.654867 | 0 | 12 | 36.75 | 56 |
friquette/OC_Projet_09 | 11,527,692,225,347 | b2f07f149ed76ef1dddee28e5914450cbf4ef16b | 8d317aaa59b18c7b5253e6b7835439a2314dad1e | /litreview/follows/urls.py | c5785084624193bba84a52588a45540bfc886327 | [] | no_license | https://github.com/friquette/OC_Projet_09 | 78db03e8d3ddcc552e69ebc9d43dbe34a46831de | 1d2d3adddf16233f032f5471a362412abde2b88f | refs/heads/main | 2023-06-21T13:08:50.168383 | 2021-08-06T19:04:44 | 2021-08-06T19:04:44 | 359,482,813 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.follows, name='follows'),
path('unfollow/', views.unfollows, name='unfollows')
]
| UTF-8 | Python | false | false | 171 | py | 20 | urls.py | 8 | 0.678363 | 0.678363 | 0 | 8 | 20.375 | 56 |
quantmind/lux | 154,618,853,204 | 86789442dd4f0b26e659c35941f4a800a4013f69 | d6b8c5594c668cd2aa907f79e80ea00f97b82d97 | /lux/utils/files.py | 17c303e427949b0b2e8c71817bfc06c7953ce00e | [
"BSD-3-Clause"
] | permissive | https://github.com/quantmind/lux | 68d44242cd10a012f32888174d9db801c09b2715 | 7318fcd86c77616aada41d8182a04339680a554c | refs/heads/master | 2021-01-23T11:32:03.180026 | 2018-01-06T09:28:30 | 2018-01-06T09:28:30 | 16,417,125 | 21 | 16 | BSD-3-Clause | false | 2019-10-22T23:21:37 | 2014-01-31T18:53:55 | 2019-08-13T15:34:39 | 2019-10-22T23:21:36 | 9,589 | 20 | 9 | 12 | Python | false | false | '''
Some code is taken from django:
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
'''
import os
import re
import itertools
__all__ = ['Filehandler']
def skipfile(name):
return name.startswith('.') or name.startswith('_')
def directory(dir):
bd, fname = os.path.split(dir)
return dir if fname else bd
def get_rel_dir(dir, base, res=''):
'''Return the ``base`` path relative to ``dir``
'''
dir = directory(dir)
base = directory(base)
if len(base) > len(dir):
raise RuntimeError('Base directory not in path')
if dir == base:
return res
dir, fname = os.path.split(dir)
if res:
fname = os.path.join(fname, res)
return get_rel_dir(dir, base, fname)
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = s.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
class Filehandler:
def open(self, name, mode='rb'):
"""Retrieves the specified file from storage, using the optional mixin
class to customize what features are available on the File returned.
"""
raise NotImplementedError()
def save(self, file):
'''Save an instance of :class:`~.File` into the backened storage.'''
name = file.name
name = self.get_available_name(name)
name = self._save(name, file)
# Store filenames with forward slashes, even on Windows
return name.replace('\\', '/')
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number
# (before the file extension, if one exists) to the filename until
# the generated filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" %
(file_root, next(count), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError(
"This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists
in the storage system, or False if the name is available for a
new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
| UTF-8 | Python | false | false | 4,997 | py | 269 | files.py | 203 | 0.613568 | 0.611567 | 0 | 160 | 30.23125 | 79 |
pragmagrid/pragma_boot | 18,133,351,958,569 | 1d78f4ae8287f78689e026bc253ae6f2b24fde3b | c2c99b5331f6cc5430fcf7e8d372585b155691c1 | /pragma/repository/processor/xmlprocessor.py | 9ce27a5b8d78d354fcdf21ae0a777cfdadacd5cd | [] | no_license | https://github.com/pragmagrid/pragma_boot | cca8568521edc59ceffeffdc78a62af83cd6eb7f | efcd402eef84a7e26f617fb7977557839419ae95 | refs/heads/master | 2021-02-27T04:57:07.885271 | 2018-08-22T03:41:47 | 2018-08-22T03:41:47 | 11,617,534 | 5 | 2 | null | false | 2017-03-30T21:53:50 | 2013-07-23T19:46:50 | 2017-03-17T12:52:49 | 2017-03-30T21:53:50 | 384 | 4 | 2 | 0 | Python | null | null | import logging
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
from pragma.utils import Abort, ClusterNetwork
class XmlInput:
def __init__(self, xmltree, dir):
self.xml = xmltree
self.dir = dir
logging.basicConfig()
self.logger = logging.getLogger(self.__module__)
# information about frontend/compute nodes disks
self.diskinfo = {}
# set values from xml
self.getValues()
def getValues(self):
"""collect values that will be used for processing"""
self.setDiskInfo() # collect disk images info
def getArch(self):
virt = self.xml.find("virtualization")
if virt != None:
return virt.attrib['arch']
else:
self.logger.error("Unable to find virtualization tag")
return None
def getImageNames(self):
""" Returns an array of virtual cluster images file names """
names = []
for key in self.diskinfo.keys():
vals = self.diskinfo[key]
parts = vals['parts']
if parts:
names += parts
else:
if 'file' in vals:
names.append(vals['file'])
return names
def setDiskInfo(self):
"""Parse xml tree info and collect disk-related information for
frontend and compute nodes. Return it as a dictionary where
keys are 'frontend', 'compute' if exist, and values are dictionaries
For example:
{'type': 'raw', 'name': 'qemu', 'file': 'disk-iamge.img}
"""
vctree = self.xml.__dict__['_root'] # xml tree object
for nodetype in ('frontend', 'compute'):
diskinfo = {}
# collect disk info for each nodetype
node = vctree.find("./%s" % nodetype) # object for frontend or compute
try:
diskinfo.update(node.find(".//disk/driver").attrib) # add keys 'type', 'name'
diskinfo.update(node.find(".//disk/source").attrib) # add key 'file'
type,parts = self.getFileType(node) # check file type
if type:
diskinfo['type'] = type
if not parts:
Abort("Error in cluster xml file. Check <part> definition for disk image %s" % diskinfo['file'])
diskinfo.update({'parts':parts})
self.diskinfo[nodetype] = diskinfo
except AttributeError:
continue
def getFileType(self, node):
""" check the virtual image file type and if there are multiple parts"""
# check if <file type="ftype"> is present
ftype = node.find(".//file")
try:
type = ftype.attrib['type']
except AttributeError:
type = None
# collect file parts
parts = []
if type:
partlist = node.findall(".//part")
for item in partlist:
parts.append(item.text)
return type, parts
def getDiskInfo(self):
return self.diskinfo
def get_disk(self, node):
disk = self.xml.find("%s/domain/devices/disk/source" % node)
if disk != None:
return disk.attrib
else:
self.logger.error("Unable to find disk for node %s" % node)
return None
class XmlOutput:
def __init__(self, filename):
self.filename = filename
self.compute_filenames = {}
self.cpus_per_node = {}
self.network = None
logging.basicConfig()
self.logger = logging.getLogger(self.__module__)
def __str__(self):
vc = ET.Element('vc')
frontend = ET.SubElement(vc, 'frontend', attrib={
'fqdn': self.network.get_fqdn(),
'name': self.network.get_frontend(),
'gw': self.network.get_gw(self.network.get_frontend())
})
for iface in self.network.get_ifaces('frontend'):
iface_attrs = self.network.get_net_attrs(iface.network)
iface_attrs.update(iface.get_attrs()) # local overrides network
ET.SubElement(frontend, iface.network, attrib=iface_attrs)
computes = ET.SubElement(vc, 'compute', attrib={
'count':str(len(self.network.get_computes()))})
for node in self.network.get_computes():
compute = ET.SubElement(computes, 'node', attrib={
'name':self.network.get_node_name(node),
'cpus':str(self.cpus_per_node[node]),
'gw': self.network.get_gw(node)})
for iface in self.network.get_ifaces(node):
iface_attrs = self.network.get_net_attrs(iface.network)
iface_attrs.update(iface.get_attrs()) # local overrides network
ET.SubElement(compute, iface.network, attrib=iface_attrs)
self.append_network_key(vc)
return self.prettify(vc)
def append_network_key(self, vc):
network = ET.SubElement(vc, 'network')
ET.SubElement(network, 'dns', attrib = {
'ip':self.dns, 'search':"local", 'domain':""})
key = ET.SubElement(vc, 'key')
key.text = self.key
def clean(self):
if os.path.exists(self.filename):
os.remove(self.filename)
for node in self.compute_filenames:
if os.path.exists(self.compute_filenames[node]):
os.remove(self.compute_filenames[node])
def get_compute_names(self):
return sorted(self.compute_filenames.keys())
def get_compute_vc_out(self, node):
vc = ET.Element('vc')
ET.SubElement(vc, 'frontend', attrib={
'fqdn': self.network.get_fqdn()})
compute = ET.SubElement(vc, 'compute', attrib={
'name':self.network.get_node_name(node),
'gw': self.network.get_gw(node)
})
for iface in self.network.get_ifaces(node):
iface_attrs = self.network.get_net_attrs(iface.network)
iface_attrs.update(iface.get_attrs()) # local overrides network
ET.SubElement(compute, iface.network, attrib=iface_attrs)
self.append_network_key(vc)
return self.prettify(vc)
def get_frontend(self):
return self.network.get_frontend()
def get_kvm_diskdir(self):
return self.kvm_diskdir
def get_vc_out(self, node):
return self.compute_filenames[node]
def prettify(self, elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def read(self):
root = ET.parse(self.filename).getroot()
self.key = root.find("key").text.strip()
frontend = root.find('frontend')
self.network = ClusterNetwork(frontend.attrib['name'], frontend.attrib['fqdn'])
for net in frontend:
self.network.add_net(net.tag, net.attrib['subnet'], net.attrib['netmask'], net.attrib['mtu'])
self.network.add_iface(
self.network.get_frontend(), net.tag, net.attrib['ip'],
net.attrib['mac'], net.attrib['iface'])
self.network.add_gw(self.network.get_frontend(), frontend.attrib['gw'])
compute = root.find('compute')
dir = os.path.dirname(self.filename)
for node in compute.getchildren():
node_name = node.attrib["name"]
for net in node:
self.network.add_iface(node_name, net.tag,
net.attrib['ip'], net.attrib['mac'], net.attrib['iface'])
self.network.add_gw(node_name, node.attrib['gw'])
self.compute_filenames[node_name] = os.path.join(dir, "%s.xml" % node_name)
self.cpus_per_node[node_name] = node.attrib["cpus"]
self.dns = root.find("network").find("dns").attrib["ip"]
def set_frontend(self, name, public_ip, private_ip, fqdn):
self.frontend = {'name':name, 'public_ip':public_ip, 'private_ip':private_ip, 'fqdn':fqdn}
def set_key(self, key):
file = open(key, "r")
self.key = file.read()
file.close()
self.key = self.key.rstrip("\n")
def set_kvm_diskdir(self, dir):
self.kvm_diskdir = dir
def set_network(self, cluster_network, dns):
self.network = cluster_network
self.dns = dns
def set_compute_nodes(self, compute_nodes, cpus_per_node):
dir = os.path.dirname(self.filename)
for node in compute_nodes:
self.compute_filenames[node] = os.path.join(dir, "%s.xml" % node)
self.cpus_per_node = cpus_per_node
def write_compute(self, node):
file = open(self.compute_filenames[node], "w")
file.write(self.get_compute_vc_out(node))
file.close()
self.logger.debug("Writing vc-out file to %s" % self.compute_filenames[node])
def write(self):
file = open(self.filename, "w")
file.write(str(self))
file.close()
self.logger.debug("Writing vc-out file to %s" % self.filename)
for node in self.network.get_computes():
self.write_compute(node)
| UTF-8 | Python | false | false | 9,258 | py | 46 | xmlprocessor.py | 34 | 0.572046 | 0.571938 | 0 | 246 | 36.634146 | 120 |
ghilbing/Ejemplos | 6,373,731,516,955 | 0ecd0d0fcc355f9ed4c6b535630240385e4828d3 | 2491df3f643539e6055bb0b2a4b659474c57491f | /isValidBinarySearchTree.py | 183e97b32d0188ba957d1ad448fbfc3941839c03 | [] | no_license | https://github.com/ghilbing/Ejemplos | 85efc91346028b8a3d26d7680d9286b26234c771 | 339a45ef48c9a61002a01f7c823cc42d34fab409 | refs/heads/master | 2021-05-13T13:58:33.010157 | 2018-02-26T20:44:44 | 2018-02-26T20:44:44 | 116,724,506 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def isValidBST(A):
output = []
self.inOrder(A, output)
for i in range(1, len(output)):
if output[i - 1] >= output[i]:
return 0
return 1
def inOrder(self, A, output):
if A is None:
return
self.inOrder(A.left, output)
output.append(A.val)
self.inOrder(A.right, output)
def isValidBSTII(self, root):
if not root:
return True
stack = []
res = []
while root or stack:
if root:
stack.append(root)
root = root.left
else:
root = stack.pop()
res.append(root.val)
root = root.right
if res == sorted(res) and len(res) == len(set(res)):
return True
else:
return False
| UTF-8 | Python | false | false | 745 | py | 150 | isValidBinarySearchTree.py | 149 | 0.520805 | 0.515436 | 0 | 35 | 20.285714 | 56 |
ov8525/ohany-villa-project-1 | 9,285,719,332,171 | aac68c8308a85446c70992e366ce5428deb03255 | b65ff6ca3514f24a8bd6faba27c3c2d932262ce5 | /main.py | 358e50a83104ff797afd118bbf8a3257c0a63aac | [] | no_license | https://github.com/ov8525/ohany-villa-project-1 | e7e2bb0a9d73f74bfe7d9c1338051526fd66331f | 30b830c1c2c23929cbc46628c97881710c5e4dfc | refs/heads/master | 2023-01-28T00:58:15.589420 | 2020-11-29T19:15:39 | 2020-11-29T19:15:39 | 317,003,435 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | while True:
print("Temperature(F): " + input.temperature(TemperatureUnit.FAHRENHEIT))
if input.temperature(TemperatureUnit.FAHRENHEIT) > 160 :
light.set_brightness(100)
light.set_all(light.rgb(255,0,0))
pause(300)
light.clear()
pause(300)
elif input.temperature(TemperatureUnit.FAHRENHEIT) > 60 :
light.set_brightness(10)
light.set_pixel_color(0, light.rgb(255,165,0))
pause(300)
light.clear()
light.set_pixel_color(1, light.rgb(255,255,0))
pause(300)
light.clear()
light.set_pixel_color(2, light.rgb(50,238,50))
pause(300)
light.clear()
light.set_pixel_color(3, light.rgb(0,255,0))
pause(300)
light.clear()
light.set_pixel_color(4, light.rgb(20,81,230))
pause(300)
light.clear()
light.set_pixel_color(5, light.rgb(0,255,255))
pause(300)
light.clear()
light.set_pixel_color(6, light.rgb(0,0,255))
pause(300)
light.clear()
light.set_pixel_color(7, light.rgb(238,130,238))
pause(300)
light.clear()
light.set_pixel_color(8, light.rgb(255,105,180))
pause(300)
light.clear()
light.set_pixel_color(9, light.rgb(255,0,0))
pause(300)
light.clear()
else:
light.set_all (light.rgb(0,0,255))
| UTF-8 | Python | false | false | 1,394 | py | 2 | main.py | 2 | 0.571736 | 0.47561 | 0 | 42 | 32.190476 | 77 |
akdeveloper0791/green_content | 12,584,254,217,497 | 71d78cf847f9e9530ca95f3d9ff11ff8ba11ec86 | 9d65e8c566992b53ed164d70f6e07a2618725871 | /player/migrations/0027_auto_20190323_1711.py | 43b241df3ff3f8edf3d30aab704f6fad2548081e | [] | no_license | https://github.com/akdeveloper0791/green_content | 0141b0784a4a58a84429cac5d326f65edbf11921 | c04c8d863b90cd2ff4d2e26b4e814f4352251191 | refs/heads/master | 2022-05-18T09:37:51.475697 | 2020-10-26T05:27:10 | 2020-10-26T05:27:10 | 164,104,618 | 0 | 0 | null | false | 2022-04-22T21:05:12 | 2019-01-04T12:25:38 | 2020-10-26T05:27:16 | 2022-04-22T21:05:10 | 203,759 | 0 | 0 | 3 | JavaScript | false | false | # Generated by Django 2.0.9 on 2019-03-23 11:41
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('player', '0026_auto_20190323_1442'),
]
operations = [
migrations.AddField(
model_name='campaign_reports',
name='last_played_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='age_geder_metrics',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 23, 17, 10, 54, 582538)),
),
migrations.AlterField(
model_name='campaign_reports',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 23, 17, 10, 54, 584634)),
),
migrations.AlterField(
model_name='last_seen_metrics',
name='accessed_at',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 23, 17, 10, 54, 584634)),
),
migrations.AlterField(
model_name='player',
name='registered_at',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 23, 17, 10, 54, 574413)),
),
]
| UTF-8 | Python | false | false | 1,289 | py | 310 | 0027_auto_20190323_1711.py | 246 | 0.581071 | 0.498061 | 0 | 39 | 32.051282 | 99 |
heimish-kyma/OCR-Detectors | 927,712,978,871 | abbd2e046e1f107d1e1806f451f01239e700027f | 3f5f70f23259d76f23f55517f1de068631669496 | /src/datasets/textfusenet/__init__.py | 34249cbce084d4335be9a95f683d078e8e85fe00 | [] | no_license | https://github.com/heimish-kyma/OCR-Detectors | 4ab662028ae13a3081a59856f65bd8aa5a765938 | 47e953f553f19812c9b28ac4aeca1124c9ff48e3 | refs/heads/main | 2023-08-29T07:52:03.636896 | 2021-11-09T03:00:46 | 2021-11-09T03:00:46 | 424,207,894 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .base import label2cls, cls2label
from .base import TextFuseBaseDS
from .synthtext import TextFuseNetSynthTextDS
from .icdar13 import TextFuseNetICDAR13DS
class CollateFN():
def __init__(self):
pass
def __call__(self, batch):
return tuple(zip(*batch)) | UTF-8 | Python | false | false | 283 | py | 54 | __init__.py | 43 | 0.713781 | 0.69258 | 0 | 12 | 22.666667 | 45 |
polyswarm/worker-registry | 10,823,317,613,314 | 3805de0baeba8bb95e4446737f172219739f3bf4 | 61ffbb1f4d8b1a77b7dbb1dd84e29e510512edb8 | /service/setup.py | 28486e7e072efaaf53b90bbbfe0bd0d36bc53319 | [
"MIT"
] | permissive | https://github.com/polyswarm/worker-registry | 9005325bf269199948fe8cbc724830a5fdf9e5ca | dcc581d2ce14424b3b14561addc6b0defa580b59 | refs/heads/master | 2020-03-24T05:41:04.675066 | 2018-09-06T18:16:27 | 2018-09-06T18:16:27 | 142,497,559 | 0 | 1 | MIT | false | 2018-08-16T23:25:58 | 2018-07-26T21:51:50 | 2018-07-31T03:56:33 | 2018-08-16T22:59:40 | 395 | 0 | 1 | 0 | JavaScript | false | null | from setuptools import setup
def parse_requirements():
with open('requirements.txt', 'r') as f:
return f.read().splitlines()
setup(
name='workerregistry',
version='0.1',
description='Server for querying the PolySwarm worker registry',
author='PolySwarm Developers',
author_email='info@polyswarm.io',
url='https://github.com/polyswarm/worker-registry',
license='MIT',
install_requires=parse_requirements(),
include_package_data=True,
packages=['workerregistry'],
package_dir={
'workerregistry': 'src/workerregistry',
},
entry_points={
'console_scripts': ['worker-registry=workerregistry.__main__:main'],
},
)
| UTF-8 | Python | false | false | 696 | py | 43 | setup.py | 20 | 0.658046 | 0.655172 | 0 | 26 | 25.769231 | 76 |
RedBlaze42/Anti-Flag-bot | 1,434,519,095,343 | e7c16e4e391aaaf7aaef897e328f84542b4f739a | 2893e5466ba5a78aef57901cadff2476b8626883 | /main.py | 5cd20ca9c00c31b83b51910293775153c0d42ff5 | [] | no_license | https://github.com/RedBlaze42/Anti-Flag-bot | 83b6addf2c306d68100467abf9cada2242b19e19 | 2aee458dc026e0284800a21f4b8ab8feaacd4783 | refs/heads/master | 2022-03-04T18:03:02.179798 | 2019-10-09T21:51:14 | 2019-10-09T21:51:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import discord
bot=discord.Client()
with open("flag_list.txt","r") as flag_file:
flag_database=list()
for line in flag_file.read().split("\n"):
if len(line)>=2:
flag_database.append(line[0:2])
@bot.event
async def on_ready():
print("The bot is ready !")
def is_flag(emoji):
if isinstance(emoji, str):
emoji_name=emoji
else:
emoji_name=emoji.name
return emoji_name in flag_database
async def remove_flags(message):
reactions=message.reactions
for reaction in reactions:
if is_flag(reaction.emoji):
users = await reaction.users().flatten()
for user in users:
await reaction.remove(user)
@bot.event
async def on_raw_reaction_add(payload):
guild, emoji = bot.get_guild(payload.guild_id), payload.emoji
message=await bot.get_channel(payload.channel_id).fetch_message(payload.message_id)
if guild is not None:
await remove_flags(message)
bot.run("some secret token")
| UTF-8 | Python | false | false | 1,022 | py | 1 | main.py | 1 | 0.637965 | 0.635029 | 0 | 37 | 26.621622 | 87 |
OlivierNDO/image_proc | 644,245,112,704 | bd27232a932459e0dbca8e1b4a1313c265fe8bf7 | 36f4c30ac9ed0ff08c63b94a3d507a9587a8d205 | /room_img_read.py | 3e366dfd970aaec7c311099bcacdaed2dae7c80a | [] | no_license | https://github.com/OlivierNDO/image_proc | c91f2f11fa41a504eb4b70d53a9f642b9667f4d7 | 59078486799861c6583a2675b1862a09f64d4fd8 | refs/heads/master | 2021-06-27T03:34:36.477540 | 2020-12-31T20:30:52 | 2020-12-31T20:30:52 | 202,822,283 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Configuration
###############################################################################
# Import packages
import numpy as np
import skimage
from PIL import Image
import requests
from io import BytesIO
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img
# File configuration
config_folder_path = 'D:/indoor_scenes/cloud_trained_models/resnet_220_220_002/'
config_model_file = 'cloud_scene_img_model_sgd_lrs_20191007_0313.hdf5'
# Example image
config_example_img = 'https://ssl.cdn-redfin.com/photo/108/bigphoto/983/1413983_19_0.jpg'
# Define functions / classes
###############################################################################
class RoomPrediction:
"""
Predict room probabilities from image url or local file path
Args:
img : either url or file path to local machine
img_type: {'url' or 'file'} depending on <img> attribute
pred_type: type of prediction output {'numeric' or 'display'}
img_height: height of images used in model training
img_width: width of images used in model training
model_object: loaded Keras model
"""
def __init__(self,
img,
model_object,
img_type = 'url',
pred_type = 'numeric',
img_height = 220,
img_width = 220):
self.img = img
self.img_type = img_type
self.pred_type = pred_type
self.img_height = img_height
self.img_width = img_width
self.model_object = model_object
def read_resize_image(self):
"""Read image (url or file path) and resize"""
if self.img_type == 'url':
img_load = Image.open(BytesIO(requests.get(self.img).content))
elif self.img_type == 'file':
img_load = tensorflow.keras.preprocessing.image.load_img(self.img)
else:
print('Error: Attribute img_type must be "url" or "file"')
resized_img = skimage.transform.resize(np.array(img_load), (self.img_height, self.img_width))
return np.expand_dims(np.array(resized_img), axis = 0)
def predict_rooms(self):
input_image = self.read_resize_image()
pred_list = list(model_object.predict(input_image)[0])
if self.pred_type == 'display':
pred_dict = dict(zip(['Bathroom', 'Bedroom', 'Diningroom', 'Kitchen', 'Livingroom'],
[str(round(p * 100,3)) + "%" for p in pred_list]))
else:
pred_dict = dict(zip(['Bathroom', 'Bedroom', 'Diningroom', 'Kitchen', 'Livingroom'],
pred_list))
return pred_dict
# Execute on example
###############################################################################
model_object = keras.models.load_model('{}{}'.format(config_folder_path, config_model_file))
room_predicter = RoomPrediction(model_object = model_object, img = config_example_img, img_type = 'url', pred_type = 'display')
preds = room_predicter.predict_rooms()
print(preds)
| UTF-8 | Python | false | false | 3,287 | py | 17 | room_img_read.py | 6 | 0.552175 | 0.536964 | 0 | 80 | 38.9125 | 127 |
Ilyosbek07/Restaurant | 11,158,325,036,013 | 93f3f7c738d5b479d70cec36f7b04dbef1c515d5 | 1d8304084479700fb1dccfaf3f9dae7408c11a4b | /bot_users/admin.py | 8b8254e79f7c32a624af4c5d0104be59abc61a25 | [] | no_license | https://github.com/Ilyosbek07/Restaurant | 88af5564a87650c1fe5e3df8eaf6a2e93b5c49f7 | 41599507cb46f79efb9ad73fef5014f776489864 | refs/heads/master | 2023-08-13T01:53:07.516623 | 2021-10-13T11:14:29 | 2021-10-13T11:14:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from bot_users.models import TelegramUserModel, OrderModel
@admin.register(TelegramUserModel)
class TelegramUserModelAdmin(admin.ModelAdmin):
list_display = ['tg_id', 'username', 'first_name', 'last_name']
list_filter = ['created_at']
search_fields = ['username', 'first_name']
@admin.register(OrderModel)
class OrderModelAdmin(admin.ModelAdmin):
list_display = ['price']
list_filter = ['created_at']
search_filter = ['user', 'product', 'price']
| UTF-8 | Python | false | false | 490 | py | 86 | admin.py | 66 | 0.734694 | 0.734694 | 0 | 17 | 27.823529 | 64 |
harshtikuu/tensorflow_dev | 11,020,886,101,026 | 95786a9f7fed5b9e91874cd11d6ec5af43db0f85 | 4dd3a04cb14cbe8d755c00b70fd140988b3a7024 | /firsttensor.py | 0c53c6219d8d5e507e4c38251452820c921c721a | [] | no_license | https://github.com/harshtikuu/tensorflow_dev | b9b90bcc65be572c51b43a29eabffb2bf46807a8 | ed139686f352fe01ff8e455821b4abb15ca41130 | refs/heads/master | 2021-07-11T06:23:40.396794 | 2017-10-04T15:13:04 | 2017-10-04T15:13:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
from sklearn.datasets import make_regression
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LinearRegression
#Create dummy regression problem and split it into training and testing data
data=make_regression(100,3)
X_train,X_test,Y_train,Y_test=train_test_split(data[0],data[1])
clf=LinearRegression()
clf.fit(X_train,Y_train)
print(clf.coef_)
print('breakpoint\n')
#Create a machine learning Regression model
w=tf.Variable([0.5,0.9,0.7],dtype='float32')
b=tf.Variable([0.7],dtype='float32')
x=tf.placeholder(tf.float32)
y=w*x+b
loss=tf.reduce_sum(tf.square(y-Y_train))
'''hro'''
'''
init=tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
print(sess.run(loss,{x:X_train}))
'''
optimizer=tf.train.GradientDescentOptimizer(0.01)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
for i in range(1000):
sess.run(train,{x:X_train[i]})
print(sess.run([w,b],{x:X_train[i]})) | UTF-8 | Python | false | false | 1,009 | py | 6 | firsttensor.py | 5 | 0.751239 | 0.72448 | 0 | 39 | 24.897436 | 76 |
levabala/gopher-s-tales | 3,332,894,632,014 | c0d3ec1d3d61b9670eea287a9caf53fbfae6b9d4 | 2447d414b6c5619f2114dff5204af1b98b53fee1 | /texts/events/UserActionTexts.py | 35e12a420f401f6b4015849dff361e1157df4a5e | [] | no_license | https://github.com/levabala/gopher-s-tales | 0f9a2c6ed333b87db0b5f5e2488cc633fe031c29 | 9179c97646d001efdf0220791742f831790edf1a | refs/heads/master | 2020-04-01T15:15:47.143232 | 2018-11-09T19:34:49 | 2018-11-09T19:34:49 | 153,328,913 | 1 | 0 | null | false | 2018-11-01T17:16:25 | 2018-10-16T17:46:49 | 2018-10-21T16:53:06 | 2018-11-01T17:16:25 | 120 | 1 | 0 | 0 | Python | false | null | DESCRIBE = '''
'''
| UTF-8 | Python | false | false | 21 | py | 99 | UserActionTexts.py | 97 | 0.380952 | 0.380952 | 0 | 2 | 9.5 | 16 |
HuangDayu/yibuwulianwang | 2,765,958,970,585 | 980d64b3215368ff6138737a3e50c9e92d25487f | f2aa3abb5048e43c92ba4beaedf4fccff29443ca | /DuerOS/百度语音合成/aip-python-sdk-1.6.1/test_5.py | 18caa9e2a4d5ee33d8bce487f098f2039ce4d1d4 | [
"Apache-2.0"
] | permissive | https://github.com/HuangDayu/yibuwulianwang | bcd429d92709599ca7190fb151edab60d78fa1b2 | e23f6115d0f20f651b966af21ec0fc381f2c168d | refs/heads/master | 2020-02-17T01:22:41.898381 | 2018-09-22T09:56:48 | 2018-09-22T09:56:48 | 124,677,422 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | "path/to/vad/audio_stream.py"
#!usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from pyaudio import PyAudio,paInt16
from datetime import datetime
import wave
from Tkinter import *
import sys
from ffnn import FFNNVADGeneral
import logging
# import chardet # 查看编码
# define of params
NUM_SAMPLES =160
FRAMERATE = 16000
CHANNELS = 1
SAMPWIDTH = 2
FORMAT = paInt16
TIME = 125
FRAMESHIFT = 160
def save_wave_file(filename,data):
'''save the date to the wav file'''
wf = wave.open(filename,'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(SAMPWIDTH)
wf.setframerate(FRAMERATE)
wf.writeframes("".join(data)) # ""中间不能有空格,不然语音录入会有很多中断。
wf.close()
def my_button(root,label_text,button_text,button_stop,button_func,stop_func):
'''create label and button'''
label = Label(root,text=label_text,width=30,height=3).pack()
button = Button(root,text=button_text,command=button_func,anchor='center',width=30,height=3).pack()
button = Button(root,text=button_stop,command=stop_func,anchor='center',width=30,height=3).pack()
def record_wave():
'''open the input of wave'''
pa = PyAudio()
# 录音
stream = pa.open(format=FORMAT,
channels=CHANNELS,
rate=FRAMERATE,
input=True,
frames_per_buffer=NUM_SAMPLES) #一个buffer存NUM_SAMPLES个字节,作为一帧
vad = FFNNVADGeneral('/path/to/VAD/alex-master/alex/tools/vad_train/model_voip/vad_nnt_546_hu32_hl1_hla6_pf10_nf10_acf_1.0_mfr20000_mfl20000_mfps0_ts0_usec00_usedelta0_useacc0_mbo1_bs100.tffnn',
filter_length=2, sample_rate=16000, framesize=512, frameshift=160,
usehamming=True, preemcoef=0.97, numchans=26, ceplifter=22, numceps=12,
enormalise=True, zmeansource=True, usepower=True, usec0=False,
usecmn=False, usedelta=False, useacc=False, n_last_frames=10,
n_prev_frames=10, lofreq=125, hifreq=3800, mel_banks_only=True)
# 语音激活检测神经网络方法的类FFNNVADGeneral.
save_buffer = []
count = 0
# logging设置,用于记录日志
logging.basicConfig(level=logging.INFO,
filename='log.txt',
filemode ='w',
format='%(message)s')
while count < TIME*4:
string_audio_data = stream.read(NUM_SAMPLES)
result = vad.decide(string_audio_data)
frame = count*NUM_SAMPLES/float(FRAMESHIFT)
time = count*NUM_SAMPLES/float(FRAMERATE) # time=frame*frameshift/framerate
logging.info('frame: '+str(frame)+' time: '+str(time)+' prob: '+str(result)) # logging记录字符串,用‘+’连接
save_buffer.append(string_audio_data)
count += 1
#chardet.detect(string_audio_data) #查看编码类型
print "."
filename = datetime.now().strftime("%Y-%m-%d_%H_%M_%S")+".wav"
save_wave_file(filename,save_buffer)
save_buffer = []
print "filename,saved."
def record_stop():
# stop record the wave
sys.exit(0)
def main():
root = Tk()
root.geometry('300x200+200+200')
root.title('record wave')
my_button(root,"Record a wave","clik to record","stop recording",record_wave,record_stop)
root.mainloop()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,380 | py | 365 | test_5.py | 184 | 0.639802 | 0.605084 | 0 | 95 | 32.957895 | 198 |
AlexandraMeciu/someCode | 11,965,778,915,720 | 5565a9ec40440e7d497d9d2cac54887d32090037 | 0aa1f854b98e3c02f7327de87699ce1d6e913f18 | /Patterm1.py | d8f213e1ed4706ce9698af7640409249831686c2 | [] | no_license | https://github.com/AlexandraMeciu/someCode | 2c98c7f33f82987c42c790a26fd9a8237f4fce17 | 281b89d1f3ca359d610a6b60a2454b0e8f8b94e9 | refs/heads/master | 2020-05-06T20:06:36.492326 | 2019-04-08T20:06:50 | 2019-04-08T20:06:50 | 180,222,420 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def pattern1(rows):
i=1;
for i in range(int(rows)+1):
print(str(i)*i)
def pattern2(rows):
x=1;
for i in range(x,int(rows)+1):
j=1
for j in range(x,i+1):
print(j,end="")
print("\r")
def pattern3(rows):
x=1;
for i in range(x,(int(rows))*2,2):
j=1
for j in range(x,i+1,2):
print(j,end="")
print("\r")
def pattern4(rows):
x=1;
for i in range(x,int(rows)+1):
j=1
for j in range(x,i+1):
if j%2==0:
print('1',end="")
else:
print('0',end="")
print("\r")
rows=input("Enter the number of rows:")
pattern1(rows)
pattern2(rows)
pattern3(rows)
pattern4(rows) | UTF-8 | Python | false | false | 790 | py | 2 | Patterm1.py | 1 | 0.439241 | 0.403797 | 0 | 39 | 18.307692 | 39 |
Ascend/ModelZoo-PyTorch | 3,478,923,544,555 | b4cf2600151b092e23919e80874b828a7dfc562d | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /ACL_PyTorch/contrib/nlp/tinybert/TinyBERT_postprocess_data.py | 70a2bd262e7130cb96eebe441fb90703c48b9ca5 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | https://github.com/Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | false | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | 2022-10-10T08:03:54 | 2022-10-15T04:01:18 | 53,470 | 7 | 5 | 2 | Python | false | false | # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run TinyBERT on SST-2."""
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
import csv
import numpy as np
import io
from transformer.tokenization import BertTokenizer
import torch
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with io.open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_aug_examples(self, data_dir):
"""get the augmented examples"""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length))
return features
def get_label_ids(features):
"""get the label id"""
return torch.tensor([f.label_id for f in features], dtype=torch.long)
def simple_accuracy(preds, labels):
"""calculate the accuracy"""
return (preds == labels).mean()
def bin2predlabel(test_num, args):
"""(adapt to benchmark inference)change the bin files into logits"""
logit1 = []
logit2 = []
for i in range(test_num):
n1, n2 = np.fromfile('{}/Bert_{}_1.bin'.format(args.result_dir, i), dtype='float32')
logit1.append(n1)
logit2.append(n2)
logit = np.concatenate((np.array(logit1).reshape(1, -1), np.array(logit2).reshape(1, -1)), axis = 0)
pred_label = np.argmax(logit, axis = 0)
return pred_label
def txt2predlabel(test_num, args):
"""(adapt to msame inference):change the txt files into logits"""
logit1 = []
logit2 = []
for i in range(test_num):
txtname = "input" + str(i) + "_output_0.txt"
dir = os.path.join(args.result_dir, txtname)
with open(dir, "r") as f:
line = f.readline()
n1, n2 = [float(i) for i in line.split()]
logit1.append(n1)
logit2.append(n2)
logit = np.concatenate((np.array(logit1).reshape(1, -1), np.array(logit2).reshape(1, -1)), axis = 0)
pred_label = np.argmax(logit, axis = 0)
return pred_label
def txt2predlabel_ais_infer(test_num, args):
"""(adapt to msame inference):change the txt files into logits"""
logit1 = []
logit2 = []
for i in range(test_num):
txtname = "input" + str(i) + "_0.txt"
dir = os.path.join(args.result_dir, txtname)
with open(dir, "r") as f:
line = f.readline()
n1, n2 = [float(i) for i in line.split()]
logit1.append(n1)
logit2.append(n2)
logit = np.concatenate((np.array(logit1).reshape(1, -1), np.array(logit2).reshape(1, -1)), axis = 0)
pred_label = np.argmax(logit, axis = 0)
return pred_label
def main():
"""postprocess the data and calculate the accuracy"""
parser = argparse.ArgumentParser()
parser.add_argument("--max_seq_length",
default=64,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--result_dir",
default=None,
type=str,
required=True,
help="NPU benchmark infer result path")
parser.add_argument("--model",
default=None,
type=str,
required=True,
help="The student model dir.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--inference_tool", type = str,
help = "inference tool:benchmark or msame")
args = parser.parse_args()
test_num = 872
processor = Sst2Processor()
tokenizer = BertTokenizer.from_pretrained(args.model, do_lower_case=args.do_lower_case)
eval_examples = processor.get_dev_examples(args.data_dir)
label_list = ["0", "1"]
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer,
output_mode="classification")
#data processing
eval_labels = get_label_ids(eval_features).numpy()
if args.inference_tool == "benchmark":
pred_labels = bin2predlabel(test_num, args)
elif args.inference_tool == "ais_infer":
pred_labels = txt2predlabel_ais_infer(test_num, args)
elif args.inference_tool == "msame":
pred_labels = txt2predlabel(test_num, args)
result = simple_accuracy(pred_labels, eval_labels)
print("acc:{}".format(result))
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 11,229 | py | 11,303 | TinyBERT_postprocess_data.py | 8,028 | 0.56835 | 0.559444 | 0 | 298 | 35.687919 | 117 |
Sollimann/spot_mini | 1,047,972,069,018 | be8a08396bb2d93f90fc3725e4cad9e190dcb7e3 | 0cbb23fef6af61f907730c6e83bc4e775a20250d | /spot/config.py | c57b0d8488f946ba1d42622ecc421de3dac7fb59 | [] | no_license | https://github.com/Sollimann/spot_mini | 09ffead172b6ade890b25538f41e131dea12d523 | ad137ec1ec06c9a5faa11bea0d52fa610289d9ec | refs/heads/master | 2022-04-08T00:29:26.985559 | 2020-03-12T09:45:32 | 2020-03-12T09:45:32 | 243,859,765 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from dataclasses import dataclass
from typing import Optional
from yaml import safe_load
from dacite import from_dict
@dataclass
class SpotConfig:
hostname: str
username: str
password: str
app_token: Optional[str]
@dataclass
class Config:
spot: SpotConfig
def read_config(filepath: str) -> Config:
with open(filepath, "r") as file:
data = safe_load(file)
return from_dict(data_class=Config, data=data) # ['config'])
if __name__ == "__main__":
config: Config = read_config("../config.yaml")
print(config)
| UTF-8 | Python | false | false | 562 | py | 10 | config.py | 7 | 0.66726 | 0.66726 | 0 | 29 | 18.37931 | 69 |
cjwmay/CodingDojoAssignments | 15,375,982,949,251 | 8a791365b47b2515e600232b03edf00ca298778b | 514081f2e26413720a02d7d64b14866faa84d568 | /Python/Filterbytype.py | 5e79faa08932cab42a5222ef17feb0d1bf065f8a | [] | no_license | https://github.com/cjwmay/CodingDojoAssignments | a41bc40f79a233518aa727acef3c8e8e8ecc2cb4 | 85f21d93bc957acb96a0546b88fa898dc0e6a746 | refs/heads/master | 2021-01-18T04:04:50.931337 | 2017-06-26T17:30:10 | 2017-06-26T17:30:10 | 85,765,439 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def functionInteger(a):
if a>=100:
print "That's a big number"
else:
print "That's a small number"
return;
functionInteger(100);
#String
def functionString(str):
if len(str)>=50:
print "Long sentence"
else:
print "Short sentence"
return;
functionString("Tell me and I forget. Teach me and I remember. Involve me and I learn.");
#list
def functionList(li):
if len(li)>=10:
print "Big list!"
else:
print "Short list!"
return
functionList([4,34,22,68,9,13,3,5,7,9,2,12,45,923]);
| UTF-8 | Python | false | false | 562 | py | 163 | Filterbytype.py | 78 | 0.61032 | 0.553381 | 0 | 25 | 21.48 | 89 |
Mozzarella123/calc-back | 10,694,468,593,760 | b48c1c78e386b2c4b4b9b409775f5dd25dcd454f | 31affbec9ba4c81346865476876f7e2683459ea5 | /models/ParameterValue.py | e6199a38023661c3a61314bbac70079e322580d8 | [] | no_license | https://github.com/Mozzarella123/calc-back | c0c1450eded1d9f01c249a84d6bf582353312726 | a9c5d209361b3275586fdbb590e773320bc3007a | refs/heads/master | 2022-11-23T23:31:25.493777 | 2020-07-29T11:55:51 | 2020-07-29T11:55:51 | 283,485,766 | 0 | 0 | null | true | 2020-07-29T11:53:50 | 2020-07-29T11:53:50 | 2020-07-29T11:49:48 | 2020-07-28T19:45:24 | 65 | 0 | 0 | 0 | null | false | false | from models.db import db
from sqlalchemy.orm import relationship
from models.Parameter import Parameter
class ParameterValue(db.Model):
__tablename__ = "ParameterWithValues"
id = db.Column(
db.Integer,
name="Id",
primary_key=True,
nullable=False
)
value = db.Column(
db.Float,
name='Value',
nullable=False
)
parameter_id = db.Column(
db.Integer,
db.ForeignKey("Parameters.Id"),
nullable=False
)
parameter = relationship("Parameter")
def to_json(self):
return {
'id': self.id,
'parameter': self.parameter,
'value': self.value
}
def update_from_json(self, data):
self.value = data['value']
@classmethod
def from_json(cls, data):
return cls(
value=data['value'],
parameter=Parameter.from_json(data['parameter'])
)
@classmethod
def array_from_json(cls, data, prev_array=[]):
res = []
for idx, item in enumerate(data):
prev_item = next((x for x in prev_array if x.id == item['id']), None)
if prev_item is not None:
prev_item.update_from_json(item)
else:
prev_item = ParameterValue.from_json(item)
res.append(prev_item)
return res
| UTF-8 | Python | false | false | 1,442 | py | 39 | ParameterValue.py | 39 | 0.521498 | 0.521498 | 0 | 62 | 21.258065 | 81 |
graphcore/popart | 13,477,607,380,401 | 6123beef8f1a2b4bbb7095a941e360bdf7338b17 | 7d93616b09afdd38ba25f70bf56e84d92d16f8e1 | /tests/integration/operators_test/min_max_test.py | a7b8cee10c4a936494731ffe12249a7a11ab504f | [
"MIT",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | https://github.com/graphcore/popart | ac3c71617c5f0ac5dadab179b655f6b2372b453d | efa24e27f09b707865326fe4a30f4a65b7a031fe | refs/heads/sdk-release-3.0 | 2023-07-08T08:36:28.342159 | 2022-09-23T12:22:35 | 2022-09-23T15:10:23 | 276,412,857 | 73 | 13 | NOASSERTION | false | 2022-09-29T12:13:40 | 2020-07-01T15:21:50 | 2022-09-20T10:27:49 | 2022-09-29T12:13:39 | 22,515 | 64 | 6 | 3 | C++ | false | false | # Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
def test_max_training(op_tester):
d1 = np.random.rand(5, 7, 5).astype(np.float32)
d2 = np.random.rand(7, 5).astype(np.float32)
d3 = np.random.rand(5).astype(np.float32)
d4 = np.random.rand(1, 1, 5).astype(np.float32)
d5 = np.random.rand(5, 1, 5).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
i3 = builder.addInputTensor(d3)
i4 = builder.addInputTensor(d4)
i5 = builder.addInputTensor(d5)
o = builder.aiOnnx.max([i1, i2, i3, i4, i5], "test_max")
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + i2,
popart.reservedGradientPrefix() + i3,
popart.reservedGradientPrefix() + i4,
popart.reservedGradientPrefix() + i5,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
t1 = torch.tensor(d1, requires_grad=True)
t2 = torch.tensor(d2, requires_grad=True)
t3 = torch.tensor(d3, requires_grad=True)
t4 = torch.tensor(d4, requires_grad=True)
t5 = torch.tensor(d5, requires_grad=True)
out = torch.max(t1, t2)
out = torch.max(t3, out)
out = torch.max(t4, out)
out = torch.max(t5, out)
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, t1.grad, t2.grad, t3.grad, t4.grad, t5.grad, d__o]
op_tester.setPatterns(["OpToIdentity"], enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, "train")
def test_min_training_0(op_tester):
d1 = np.random.rand(3, 4).astype(np.float32)
d2 = np.random.rand(4).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.min([i1, i2], "test_min")
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + i2,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
t1 = torch.tensor(d1, requires_grad=True)
t2 = torch.tensor(d2, requires_grad=True)
out = torch.min(t1, t2)
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, t1.grad, t2.grad, d__o]
op_tester.setPatterns(["OpToIdentity"], enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, "train")
def test_min_training_1(op_tester):
d1 = np.random.rand(2, 3, 4).astype(np.float32)
d2 = np.random.rand(4).astype(np.float32)
d3 = np.random.rand(1, 1, 4).astype(np.float32)
d4 = np.random.rand(2, 1, 4).astype(np.float32)
d5 = np.random.rand(1, 3, 4).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
i3 = builder.addInputTensor(d3)
i4 = builder.addInputTensor(d4)
i5 = builder.addInputTensor(d5)
o = builder.aiOnnx.min([i1, i2, i3, i4, i5], "test_min")
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + i2,
popart.reservedGradientPrefix() + i3,
popart.reservedGradientPrefix() + i4,
popart.reservedGradientPrefix() + i5,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
t1 = torch.tensor(d1, requires_grad=True)
t2 = torch.tensor(d2, requires_grad=True)
t3 = torch.tensor(d3, requires_grad=True)
t4 = torch.tensor(d4, requires_grad=True)
t5 = torch.tensor(d5, requires_grad=True)
out = torch.min(t1, t2)
out = torch.min(t3, out)
out = torch.min(t4, out)
out = torch.min(t5, out)
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, t1.grad, t2.grad, t3.grad, t4.grad, t5.grad, d__o]
op_tester.setPatterns(["OpToIdentity"], enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, "train")
| UTF-8 | Python | false | false | 4,373 | py | 2,412 | min_max_test.py | 2,241 | 0.604391 | 0.565744 | 0 | 127 | 33.433071 | 71 |
hsnet-fork/BitGlitter | 3,710,851,761,496 | 9c716af7dc07b8d082b5902f1e230d834e92f579 | c95ec26e54cacfe0a50a051413273826b0b67110 | /bitglitter/palettes/paletteutilities.py | f34b88f5be9a5c081fe5928e5c60ae6eb77a8a27 | [
"MIT"
] | permissive | https://github.com/hsnet-fork/BitGlitter | 5c28cd6bb2a63d617a5731be6e2e31339ceed3cb | 0e84e5553dd5365f05a6715be95869b7147bb47c | refs/heads/master | 2021-05-27T05:40:08.809500 | 2019-07-18T07:21:51 | 2019-07-18T07:21:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import hashlib
import itertools
import logging
import math
from bitstring import BitArray, ConstBitStream
from bitglitter.config.config import config
from bitglitter.palettes.paletteobjects import CustomPalette
class ValuesToColor:
'''This generates a dictionary linking a string binary value to an RGB value. This is how binary data gets directly
converted to colors. This step required more than a dictionary, as additional logic was required to switch between
a standard dictionary used by default and custom palettes, and 24 bit palettes. They both convert data to colors in
different ways, and this provides a single clean interface for that.
'''
def __init__(self, palette, type):
logging.debug(f'Generating binary : color dictionary for {type}...')
self.palette = palette
self.bit_length = self.palette.bit_length
self.type = type
self.return_value = self.generate_dictionary()
def generate_dictionary(self):
def twenty_four_bit_values(value):
red_channel = value.read('uint : 8')
green_channel = value.read('uint : 8')
blue_channel = value.read('uint : 8')
return (red_channel, green_channel, blue_channel)
color_dict = {}
if self.palette.bit_length != 24:
for value in range(len(self.palette.color_set)):
temp_bin_holder = str(BitArray(uint=value, length=self.palette.bit_length))
temp_bin_holder = ConstBitStream(temp_bin_holder)
color_dict[temp_bin_holder] = self.palette.color_set[value]
return color_dict
else:
return twenty_four_bit_values
def get_color(self, value):
if self.bit_length != 24:
return self.return_value[value]
else:
return self.return_value(value)
class ColorsToValue:
'''This class does the exact opposite as ValuesToColor. This first generates a dictionary linking colors to
specific bit values, and then get_value() accomplishes that. It is worth noting that 24 bit color functions
differently than the other color palettes, in that it doesn't use a dictionary, but rather converts each byte into
an unsigned integer for each of it's three color channels, and then returns that color.
'''
def __init__(self, palette):
self.palette = palette
self.return_value = self.generate_dictionary()
def generate_dictionary(self):
def twenty_four_bit_values(color):
outgoing_data = BitArray()
for color_channel in color:
outgoing_data.append(BitArray(uint=color_channel, length=8))
return outgoing_data
value_dict = {}
if self.palette.bit_length != 24:
for value in range(len(self.palette.color_set)):
temp_bin_holder = str(BitArray(uint=value, length=self.palette.bit_length))
temp_bin_holder = ConstBitStream(temp_bin_holder)
value_dict[self.palette[value]] = temp_bin_holder
return value_dict
else:
return twenty_four_bit_values
def get_value(self, color):
if self.palette.bit_length != 24:
return self.return_value[color]
else:
return self.return_value(color)
def palette_grabber(id_or_nick):
'''Goes through each of the dictionaries to return the color object.'''
if id_or_nick in config.color_handler.default_palette_list:
return config.color_handler.default_palette_list[id_or_nick]
elif id_or_nick in config.color_handler.custom_palette_list:
return config.color_handler.custom_palette_list[id_or_nick]
elif id_or_nick in config.color_handler.custom_palette_nickname_list:
return config.color_handler.custom_palette_nickname_list[id_or_nick]
else:
raise ValueError('palette_grabber(): This value is not present.')
def _validate_and_add_palette(palette_name, palette_description, date_created, color_set):
'''This is solely to input custom palettes without all of the other prompts. Returns True if validated and added,
and false if it isn't.
'''
distance = color_distance(color_set)
if distance == 0:
return False
if len(color_set) % 2 != 0 or len(color_set) < 2:
return False
id = return_palette_id(palette_name, palette_description, date_created, color_set)
_add_custom_palette_direct(palette_name, palette_description, color_set, distance, date_created, id)
return True
def color_distance(palette):
'''This function takes in the set of tuples in a palette, and calculates their proximity to each other in RGB space.
Higher number denote 'safer' palettes to use in the field, as they are less prone to errors in the field. Getting 0
returned means you have at least a single pair of identical RGB values. All values must be unique!
'''
local_distances = []
for unique_set in itertools.combinations(palette, 2):
active_distance = math.sqrt(
((unique_set[1][0] - unique_set[0][0]) ** 2) + ((unique_set[1][1] - unique_set[0][1])
** 2) + ((unique_set[1][2] - unique_set[0][2]) ** 2))
local_distances.append(active_distance)
return round(min(local_distances), 2)
def return_palette_id(name, description, date_created, color_set):
'''Taking in the various parameters, this creates a unique ID for the object.'''
color_set_string = str(color_set)
hasher = hashlib.sha256(str(name + description + date_created + color_set_string).encode())
return(hasher.hexdigest())
def _add_custom_palette_direct(name, description, color_set, distance, date_created, id, nickname=None):
'''After validation is done, this function is ran to actually instantiate the palette object, as well as load it
into the appropriate dictionaries and save the configuration file. This should never be ran by itself because it
blindly accepts all values!
'''
new_palette = CustomPalette(name, description, color_set, distance, date_created, id, nickname)
config.color_handler.custom_palette_list[id] = new_palette
if nickname:
config.color_handler.custom_palette_nickname_list[nickname] = new_palette
config.save_session()
| UTF-8 | Python | false | false | 6,341 | py | 41 | paletteutilities.py | 41 | 0.67103 | 0.664564 | 0 | 172 | 35.866279 | 120 |
eQu1NoX/ctf | 8,615,704,424,746 | 335e77cb7569cbfa4deb6fb802a0c6c2dc6592e7 | d77e21dcca0ce0caa568680b3e033096c19e9386 | /Hack The Vote 16/Trumpervisor/trumpervisor.py | 8669d6892958e9794f6bd66a6e9a75207c5f0b0b | [] | no_license | https://github.com/eQu1NoX/ctf | e786b6d2f2815098a0e7b1711b5d5e7c2530d02a | 6e0fb1b96d3bab12057d34a11bc24d91a78b79a1 | refs/heads/master | 2017-12-11T11:06:48.967406 | 2016-11-19T20:40:43 | 2016-11-19T20:40:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pwn import *
from time import sleep
import angr
REGS_ORDER = ['rax', 'rbx', 'rcx', 'rdx', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15']
def send_regs(regs):
p = remote('trumpervisor.pwn.republican', 9000)
for i in xrange(14):
p.sendline(str(regs[i]))
sleep(0.5)
log.info('Sending {0} = {1}'.format(REGS_ORDER[i], regs[i]))
print p.recv(2048, timeout=1)
def find_state():
log.info('Open angr project and load entry state')
p = angr.Project('a.out')
state = p.factory.entry_state(addr=0x4004ED)
log.info('Creating and loading symbolics and constants')
context_rcx = angr.claripy.BVS(name="context_rcx", size=8*8)
context_rbx = angr.claripy.BVS(name="context_rbx", size=8*8)
context_rdx = angr.claripy.BVV(int(p64(0x400).encode('hex'), 16), size=8*8)
context_rdi = angr.claripy.BVV(int(p64(0x1aa000).encode('hex'), 16), size=8*8)
context_rsi = angr.claripy.BVS(name="context_rsi", size=8*8)
context_r8 = angr.claripy.BVS(name="context_r8", size=8*8)
context_r9 = angr.claripy.BVS(name="context_r9", size=8*8)
context_r10 = angr.claripy.BVS(name="context_r10", size=8*8)
context_r11 = angr.claripy.BVV(int(p64(0x1).encode('hex'), 16), size=8*8)
context_r12 = angr.claripy.BVV(int(p64(0).encode('hex'), 16), size=8*8)
context_r13 = angr.claripy.BVS(name="context_r13", size=8*8)
context_r14 = angr.claripy.BVS(name="context_r14", size=8*8)
context_r15 = angr.claripy.BVV(int(p64(0).encode('hex'), 16), size=8*8)
state.memory.store(addr=0x601088, data=context_rcx)
state.memory.store(addr=0x6010a0, data=context_rbx)
state.memory.store(addr=0x6010d0, data=context_rdx)
state.memory.store(addr=0x601060, data=context_rdi)
state.memory.store(addr=0x601090, data=context_rsi)
state.memory.store(addr=0x601068, data=context_r8)
state.memory.store(addr=0x6010b0, data=context_r9)
state.memory.store(addr=0x601058, data=context_r10)
state.memory.store(addr=0x6010c8, data=context_r11)
state.memory.store(addr=0x601050, data=context_r12)
state.memory.store(addr=0x6010b8, data=context_r13)
state.memory.store(addr=0x601048, data=context_r14)
state.memory.store(addr=0x6010c0, data=context_r15)
log.info('Stepping till the end of the program')
path = p.factory.path(state)
path = path.step()[0].step()[0]
for i in xrange(0x400):
path = path.step()[0]
path = path.step()[0].step()[0]
log.info('Finding initial state')
solver = path.state.se
solver.add(path.state.memory.load(0x601039, size=1) == 0xb0)
solver.add(path.state.memory.load(0x60103a, size=1) == 0x93)
solver.add(path.state.memory.load(0x60103b, size=1) == 0x13)
solver.add(path.state.memory.load(0x60103c, size=1) == 0x80)
return [u64(c) for c in [
p64(0)
,solver.any_str(context_rbx)
,solver.any_str(context_rcx)
,solver.any_str(context_rdx)
,solver.any_str(context_rsi)
,solver.any_str(context_rdi)
,solver.any_str(context_r8)
,solver.any_str(context_r9)
,solver.any_str(context_r10)
,solver.any_str(context_r11)
,solver.any_str(context_r12)
,solver.any_str(context_r13)
,solver.any_str(context_r14)
,solver.any_str(context_r15)]]
def get_flag():
p = find_state()
send_regs(p) | UTF-8 | Python | false | false | 3,166 | py | 22 | trumpervisor.py | 11 | 0.698673 | 0.609602 | 0 | 86 | 35.825581 | 109 |
Vector254/Api | 10,084,583,242,460 | 8801efcd74d933238db1d27ad07112435827b01f | 0e26574bb4baf59f2eb36badee0e560b53903f79 | /app/main/views.py | 6196571babcd66ac18f56b1e0ca79138428167ab | [
"MIT"
] | permissive | https://github.com/Vector254/Api | 7b799612723e6147b2ca964d956e07a1f50a0e4f | b6f1f2deae7e9a1e7b0ffc158b4cd0ba178a2354 | refs/heads/master | 2022-12-28T11:49:45.017447 | 2020-10-05T07:08:06 | 2020-10-05T07:08:06 | 300,807,013 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import request, jsonify, abort, url_for, make_response, render_template
from flask_api import FlaskAPI
from .. import db
from . import main
from ..models import Quotes
def make_public_quote(quote):
new_quote = {}
for field in quote:
if field == 'id':
new_quote['uri'] = url_for('main.quotes', quote_id=quote['id'], _external=True)
else:
new_quote[field] = quote[field]
return new_quote
@main.route('/')
def index():
return render_template('index.html')
@main.route('/vector/api/v1.0/quotes', methods=['POST', 'GET'])
def quotes():
if request.method == "POST":
title = str(request.data.get('title', ''))
author = str(request.data.get('author', ''))
description = str(request.data.get('description', ''))
quote = Quotes(name=title)
quote.save()
response = jsonify({
'id': quote.id,
'title': quote.title,
'date_created': quote.date_created,
'description': quote.description,
'author': quote.author,
})
response.status_code = 201
return response
else:
# GET
quotes = Quotes.get_all()
results = []
for quote in quotes:
obj = {
'id': quote.id,
'title': quote.title,
'date_created': quote.date_created,
'description': quote.description,
'author': quote.author,
}
results.append(obj)
response = jsonify([make_public_quote(quote) for quote in results])
response.status_code = 200
return response
@main.route('/vector/api/v1.0/quotes/<int:id>', methods=['GET', 'PUT', 'DELETE'])
def quotes_manipulation(id, **kwargs):
# retrieve a buckelist using it's ID
quote = Quotes.query.filter_by(id=id).first()
if not quote:
# Raise an HTTPException with a 404 not found status code
abort(404)
if request.method == 'DELETE':
quote.delete()
return {
"message": "quote {} deleted successfully".format(quote.id)
}, 200
elif request.method == 'PUT':
title = str(request.data.decode.get('title', ''))
author = str(request.data.get('author', ''))
description = str(request.data.get('description', ''))
date_created = str(request.data.get('date_created', ''))
quote.title = title
quote.author = author
quote.description = description
quote.date_created = date_created
quote.save()
response = jsonify({
'id': quote.id,
'title': quote.title,
'date_created': quote.date_created,
'description': quote.description,
'author': quote.author,
})
response.status_code = 200
return response
else:
# GET
response = jsonify({
'id': quote.id,
'title': quote.title,
'date_created': quote.date_created,
'description': quote.description,
'author': quote.author,
})
response.status_code = 200
return response
| UTF-8 | Python | false | false | 3,348 | py | 7 | views.py | 6 | 0.526882 | 0.519415 | 0 | 110 | 29.427273 | 95 |
chamemilo/agriculture_sim | 3,504,693,354,524 | 9516ec32cbe4e38d53e2e2740884f5316ad8d73b | b8e7a8114ee8555e4f608cb18668403d569a9041 | /devel/lib/python2.7/dist-packages/champ_msgs/msg/_PointArray.py | 2207b166d26bb266bb54c4259a4ee4652ddc6510 | [] | no_license | https://github.com/chamemilo/agriculture_sim | ebbe8af2c4a99ae39ed34bf7623ed1a782ec24fc | 2153dfa5cee77c11d93f437d77d6b6ea47e1dee1 | refs/heads/master | 2023-06-26T09:29:10.771370 | 2021-07-28T13:59:07 | 2021-07-28T13:59:07 | 390,370,071 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from champ_msgs/PointArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import champ_msgs.msg
class PointArray(genpy.Message):
_md5sum = "e70791c6db4935709e33b9966d293c36"
_type = "champ_msgs/PointArray"
_has_header = False # flag to mark the presence of a Header object
_full_text = """champ_msgs/Point lf
champ_msgs/Point rf
champ_msgs/Point lh
champ_msgs/Point rh
================================================================================
MSG: champ_msgs/Point
float32 x
float32 y
float32 z"""
__slots__ = ['lf','rf','lh','rh']
_slot_types = ['champ_msgs/Point','champ_msgs/Point','champ_msgs/Point','champ_msgs/Point']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
lf,rf,lh,rh
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointArray, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.lf is None:
self.lf = champ_msgs.msg.Point()
if self.rf is None:
self.rf = champ_msgs.msg.Point()
if self.lh is None:
self.lh = champ_msgs.msg.Point()
if self.rh is None:
self.rh = champ_msgs.msg.Point()
else:
self.lf = champ_msgs.msg.Point()
self.rf = champ_msgs.msg.Point()
self.lh = champ_msgs.msg.Point()
self.rh = champ_msgs.msg.Point()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_12f().pack(_x.lf.x, _x.lf.y, _x.lf.z, _x.rf.x, _x.rf.y, _x.rf.z, _x.lh.x, _x.lh.y, _x.lh.z, _x.rh.x, _x.rh.y, _x.rh.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.lf is None:
self.lf = champ_msgs.msg.Point()
if self.rf is None:
self.rf = champ_msgs.msg.Point()
if self.lh is None:
self.lh = champ_msgs.msg.Point()
if self.rh is None:
self.rh = champ_msgs.msg.Point()
end = 0
_x = self
start = end
end += 48
(_x.lf.x, _x.lf.y, _x.lf.z, _x.rf.x, _x.rf.y, _x.rf.z, _x.lh.x, _x.lh.y, _x.lh.z, _x.rh.x, _x.rh.y, _x.rh.z,) = _get_struct_12f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_12f().pack(_x.lf.x, _x.lf.y, _x.lf.z, _x.rf.x, _x.rf.y, _x.rf.z, _x.lh.x, _x.lh.y, _x.lh.z, _x.rh.x, _x.rh.y, _x.rh.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.lf is None:
self.lf = champ_msgs.msg.Point()
if self.rf is None:
self.rf = champ_msgs.msg.Point()
if self.lh is None:
self.lh = champ_msgs.msg.Point()
if self.rh is None:
self.rh = champ_msgs.msg.Point()
end = 0
_x = self
start = end
end += 48
(_x.lf.x, _x.lf.y, _x.lf.z, _x.rf.x, _x.rf.y, _x.rf.z, _x.lh.x, _x.lh.y, _x.lh.z, _x.rh.x, _x.rh.y, _x.rh.z,) = _get_struct_12f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_12f = None
def _get_struct_12f():
global _struct_12f
if _struct_12f is None:
_struct_12f = struct.Struct("<12f")
return _struct_12f
| UTF-8 | Python | false | false | 5,164 | py | 333 | _PointArray.py | 64 | 0.60244 | 0.588885 | 0 | 143 | 35.111888 | 158 |
laiunce/helpfull_code | 7,876,970,040,129 | 4b7d062db3e3537ffae2e88b67cbf8ec2c80c344 | 410ef1dd9e716ae7353eed867bd415ef0c0757fa | /model_image_recognition/modelo/reconoce_personas.py | 6c4825fd02db6f7f91e561f3e7767d3deb43d831 | [] | no_license | https://github.com/laiunce/helpfull_code | ba428a90ab91a5faa0c4609ae16aaa75836cdd02 | bf297622a4dab2f0760de2279c4334a5c8705971 | refs/heads/master | 2020-04-18T14:46:57.956372 | 2019-04-10T03:44:27 | 2019-04-10T03:44:27 | 167,597,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 12 21:13:34 2019
@author: crilaiun
"""
#source activate retinanet
from imageai.Detection import VideoObjectDetection
import os
import cv2
import time
from imageai.Detection import ObjectDetection
#esta lina porque sino pincha a veces ver bien luego
os.environ['KMP_DUPLICATE_LIB_OK']='True'
camera = cv2.VideoCapture(0)
execution_path = os.getcwd()
camera = cv2.VideoCapture(0)
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel()
custom_objects = detector.CustomObjects(person=True, car=True)
#detections = detector.detectCustomObjectsFromImage(input_image='tpm/origen0.png', output_image_path= 'tpm/opencv0.png', custom_objects=custom_objects, minimum_percentage_probability=70)
for i in range(4):
return_value, image = camera.read()
cv2.imwrite('tpm/origen'+str(i)+'.png', image)
detections = detector.detectCustomObjectsFromImage(input_image='tpm/origen'+str(i)+'.png', output_image_path= 'tpm/opencv'+str(i)+'.png', custom_objects=custom_objects, minimum_percentage_probability=70)
if len(detections)>0:
cv2.imwrite('tpm/detect/'+str(i)+'.png', image)
#cv2.imwrite('tpm/opencv'+str(i)+'.png', image)
#time.sleep(1)
del(camera)
| UTF-8 | Python | false | false | 1,371 | py | 122 | reconoce_personas.py | 78 | 0.733771 | 0.706783 | 0 | 46 | 28.782609 | 207 |
rihter007/Po-znaikaDemo | 5,317,169,558,877 | 846c2b3323abf9148e330c085017e02dfdb95e6d | 54594dbf9cab6466157fdc636302d85142cd42dc | /web_server/poznaika/accounts/forms.py | 67249c0b8e2c9d81a7f8687d3e8650842142f9b3 | [] | no_license | https://github.com/rihter007/Po-znaikaDemo | eeb1417311ac5136a33aac68091ea03a1d23eefb | 0ac8a48fdb09bd68f5ea0cd49cccce5627289d57 | refs/heads/master | 2021-01-18T20:39:07.814264 | 2015-09-30T20:32:08 | 2015-09-30T20:32:08 | 28,452,714 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=cp1251
from django import forms
from django.contrib import auth
from django.contrib.auth.models import User
from models import Pupil
from models import Class
def ToUtf(str):
return str.decode('cp1251').encode('utf8')
class RegisterForm(forms.Form):
UserName = forms.CharField(max_length=30, label=ToUtf("Ваше имя"))
Password1 = forms.CharField(max_length=30, label=ToUtf("Пароль"))
Password2 = forms.CharField(max_length=30, label=ToUtf("Пароль еще раз"))
def clean_UserName(self):
name = self.cleaned_data['UserName']
users = User.objects.filter(username=name)
if len(users) > 0:
raise forms.ValidationError(u"Имя уже существует!")
return name
def clean_Password2(self):
pwd1 = self.cleaned_data['Password1']
pwd2 = self.cleaned_data['Password2']
if pwd1 != pwd2:
raise forms.ValidationError(u"Введенные пароли различаются!")
return pwd1
class LoginForm(forms.Form):
UserName = forms.CharField(max_length=30, label=ToUtf("Ваше имя"))
Password = forms.CharField(max_length=30, label=ToUtf("Пароль"))
def clean_Password(self):
cd = self.cleaned_data
pwd = password=cd['Password']
user = auth.authenticate(username=cd['UserName'], password=pwd)
if user is None or not user.is_active:
raise forms.ValidationError(u"Сочетание логин+пароль некорректно!")
return pwd
class AddNameForm(forms.Form):
Name = forms.CharField(max_length=30)
class DeleteNameForm(forms.Form):
Names = forms.MultipleChoiceField()
def __init__(self, choices, labelText, *args):
super(DeleteNameForm, self).__init__(*args)
self.fields['Names'] = forms.MultipleChoiceField(choices=choices)
self.fields['Names'].label = labelText
class AddPupilForm(AddNameForm):
Class = forms.ModelChoiceField(Class.objects.all(), empty_label=None,
to_field_name="Name")
class DeletePupilForm(forms.Form):
Pupils = forms.ModelMultipleChoiceField(Pupil.objects.all(),
label=ToUtf("Список учеников"))
| WINDOWS-1251 | Python | false | false | 2,276 | py | 230 | forms.py | 105 | 0.667596 | 0.65272 | 0 | 64 | 32.484375 | 79 |
dixonaws/PowerCo | 7,988,639,203,546 | d8a03b482cfec8f75342d28fc19c9b807e5542ab | ffa0a0cf22d1de2e73ac45a3a3e12796f3c993ae | /PowerCo.py | a54cf2f77f0b7dc1844e1aaec1bcbff50472890d | [] | no_license | https://github.com/dixonaws/PowerCo | 922575132dccf889bd8a97dd33b28612c6b52417 | 33a8c86fec90724c3f24cd9eb266b52c0012e759 | refs/heads/master | 2021-09-04T02:24:50.051675 | 2018-01-14T16:33:25 | 2018-01-14T16:33:25 | 75,781,510 | 0 | 1 | null | true | 2016-12-06T23:44:51 | 2016-12-06T23:44:50 | 2016-05-24T12:08:14 | 2016-06-28T15:01:11 | 42 | 0 | 0 | 0 | null | null | null | from __future__ import print_function
import urllib2
import json
import time
import datetime
# PowerCo sample skill
# 7-6-2016, John Dixon
# dixonaws@amazon.com, www.github.com/dixonaws
# PowerCo is a sample Alexa skill that demonstrates Lambda
# integration with RESTful APIs to get data about a customer's electricity bill.
# The PIN is hardcoded as "9876" in this skill in the verifyPIN() function
# We only support one customer in this version, "customer 1" in the mainMenu() function
# Customer 1 has one account
# The account has three invoices
# Each invoice includes a service addresses and an amount due
# This account data is inserted into an in-memory database from the configuration in Bootstrap.groovy
strApiBaseUrl = "http://172.16.1.198:8080"
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
print("event[session][user][userId]=" + str(event['session']['user']['userId']))
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
# ------------------------- on_session_started()
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
# ------------------------- on_launch()
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
# ------------------------- on_intent()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
# intent_request is a Python dict object
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
print("*** on_intent: I received intent=" + str(intent));
print("*** on_intent: I received intent_name=" + str(intent_name));
# Dispatch to your skill's intent handlers
if intent_name == "VerifyPIN":
return verifyPIN(intent, session)
elif intent_name == "MainMenu":
return mainMenu(intent, session)
elif intent_name == "GetAccount":
return getAccount(intent, session)
elif intent_name == "AccountCommand":
return getAccountCommand(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
# ------------------------- verifyPIN()
def verifyPIN(intent, session):
# We hardcode the matching PIN for now
intCorrectPIN = 9876
print("*** verifyPIN: I received intent " + str(intent));
# Grab the PIN out of the intent and cast it to an integer
PIN = intent['slots']['PIN']['value']
intReceivedPIN = int(PIN)
print("*** verifyPIN: I received PIN " + str(PIN))
card_title = "Welcome"
# Compare the PIN we received with the correct PIN
if (intReceivedPIN == intCorrectPIN):
return mainMenu()
elif (intReceivedPIN != intCorrectPIN):
speech_output = "Hmmm. That PIN code doesn't match my records";
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# ------------------------- getAccount()
def getAccount(intent, session):
print("*** getAccount: I received intent" + str(intent));
card_title = "Welcome"
# Calculate the due date, 30 days from today
dateNow = datetime.datetime.now()
dateDueDate = (dateNow + datetime.timedelta(days=30))
strDueDay = dateDueDate.strftime("%d")
strDueMonth = dateDueDate.strftime("%B")
strCurrentDay = dateNow.strftime("%d")
strCurrentMonth = dateNow.strftime("%B")
selectedAccount = intent['slots']['Account']['value']
url = strApiBaseUrl + "/api/invoices/" + str(selectedAccount)
opener = urllib2.build_opener()
opener.addheaders = [('Accept', 'application/json')]
# response will be a raw JSON string
response = opener.open(url).read()
# parse our JSON object into a Python Dict
JSONinvoice = json.loads(str(response))
# get the values out of the JSON dict object (and cast them to strings)
strAmountDollars = str((JSONinvoice['amountDollars']))
strAmountCents = str((JSONinvoice['amountCents']))
strServicePeriodEnd = str((JSONinvoice['servicePeriodEnd']))
speech_output = "<speak>"
speech_output += "I found details for that account. The current amount due is " + strAmountDollars + " dollars and " + strAmountCents + " cents"
speech_output += " for the period ending " + strCurrentMonth + " " + strCurrentDay + ". Your energy consumption " \
"last period was 49 kilowatt hours."
speech_output += "<break time='0.5s'/>I see that your are enrolled in AutoPay! This amount will be requested from your " \
"financial institution on " + strDueMonth + " " + strDueDay
speech_output += "</speak>"
# speech_output = "Here are some details for that account... account number " + strAccountNumber + ",,," \
# "Total amount due for the month of May: $94.12" + ",,,," \
# "Due date: June 16, 2016" + ",,,," \
# "Consumption: 748 kilowatt hours." + ",,,," \
# "Say 'stats' if you want me to analyze your consumption.";
# Setting this to true ends the session and exits the skill.
should_end_session = True
print("*** done with getAccount(), returning speech...")
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# ------------------------- getAccount()
def retrieveAccountInfo(id):
print("*** in retrieveAccountInfo()")
url = strApiBaseUrl + "/api/invoices/" + str(id)
opener = urllib2.build_opener()
opener.addheaders = [('Accept', 'application/json')]
response = 0
try:
response = opener.open(url, timeout=2).read()
except urllib2.URLError:
print("URL error, trying localhost...")
try:
url = "http://localhost:8080/RESTTest/api/invoices/1"
response = opener.open(url, timeout=2).read()
except urllib2.HTTPError:
print("tried both URLs, giving up")
return
JSONinvoice = json.loads(str(response))
print("*** done with retriveAccountInfo(), returning JSONinvoice object...")
return (JSONinvoice)
# ------------------------- getAccount()
def getAccountById(accountId):
print("*** in getAccountById(), getting account" + str(accountId))
accountUrl = strApiBaseUrl + "/api/accounts/" + str(accountId)
startTime = int(round(time.time() * 1000))
endTime = 0;
print(accountUrl + ": GETting account... ")
opener = urllib2.build_opener()
# ask the API to return JSON
opener.addheaders = [('Accept', 'application/json')]
response = ""
try:
# our response string should result in a JSON object
response = opener.open(accountUrl).read()
endTime = int(round(time.time() * 1000))
print("done (" + str(endTime - startTime) + " ms).")
except urllib2.HTTPError:
print("Error in GET...")
# decode the returned JSON response into JSONaccount (a Python dict object)
JSONaccounts = json.loads(str(response))
print("*** done with getAccountById(), returning JSONaccounts...")
return (JSONaccounts)
# ------------------------- getAccount()
def getAccountCommand(intent, session):
print("*** getAccountCommand: I received intent" + str(intent));
strAccountNumber = "2 0 1 3 4 dash 0 9 4"
card_title = "Welcome"
speech_output = "Here is the analysis of your bill for the month of May,,," \
"Consumption: 748 kilowatt hours." + ",,,," \
"Which was 119% of April's consumption of 629 kilowatt hours,,," \
"Last year, you consumed 792 kilowatt hours versus 748 this year,,," \
"Your consumption is about average, households of similar type in" \
"your zipcode consume between 512 " \
"and 1,298 kilowatt hours of energy in the summer months,,," \
"I sent a PDF report to your inbox j p dixon@gmail.com";
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# ------------------------- onSessionEnded()
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def getCustomer(id):
print("*** in getCustomer()")
baseurl = strApiBaseUrl + "/api/customers/"
accountUrl = baseurl + str(id)
startTime = int(round(time.time() * 1000))
endTime = 0;
print(accountUrl + ": GETting account... ")
opener = urllib2.build_opener()
# ask the API to return JSON
opener.addheaders = [('Accept', 'application/json')]
response = ""
try:
# our response string should result in a JSON object
response = opener.open(accountUrl).read()
endTime = int(round(time.time() * 1000))
print
"done (" + str(endTime - startTime) + " ms)."
except urllib2.HTTPError:
print
"Error in GET..."
# decode the returned JSON response into JSONIaccount (a Python dict object)
JSONcustomer = json.loads(str(response))
print("*** done with getCustomer, returning JSONcustomer...")
return (JSONcustomer)
# --------------- end getCustomer() ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
print("*** in get_welcome_response()")
session_attributes = {}
card_title = "Welcome"
speech_output = "<speak>Hi there! You're on line with the Power Company! What's your " \
"4 digit PIN?</speak>"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "You created a 4 digit pin code the first time you enabled the" \
"PowerCo skill. If you remember it, go ahead and say it now."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
# ------------------------- mainMenu()
def mainMenu():
print("*** in mainMenu()")
session_attributes = {}
card_title = "Welcome"
# hit the API and get the customer details, like the first name, last name, and associated accounts
# for now, we hardcode the customer number
JSONcustomer = getCustomer(1)
accountFirstName = JSONcustomer['firstName']
accountLastName = JSONcustomer['lastName']
JSONaccounts = JSONcustomer['accounts']
intAccounts = len(JSONaccounts)
speech_output = "<speak>"
speech_output += "Great! Hello " + accountFirstName + " " + accountLastName + "! ... "
speech_output += "I found " + str(intAccounts) + " accounts in your profile. Which account can I help you with?"
# loop through the accounts associated with this customer and speak the service addresses
i = 1
for accounts in JSONaccounts:
account = getAccountById(accounts['id'])
speech_output += "Account " + str(i) + "<break time='0.5s'/> <say-as interpret-as='digits'>" + account[
'serviceAddress'] + "</say-as>"
speech_output += "<break time='1s'/>"
i += 1
speech_output += "</speak>"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "I'm here to help. You can ask me about your current bill, just say the number" \
"corresponding with the account"
should_end_session = False
print("*** done with mainMenu(), now the user should have selected an account...")
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
# ------------------------- handle_session_end_request()
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for being a Power Company customer." \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
# 'outputSpeech': {
# 'type': 'PlainText',
# 'text': output
# },
'outputSpeech': {
'type': 'SSML',
'ssml': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def main():
print("*** in main()")
JSONinvoice = retrieveAccountInfo(1)
strAmountDollars = (JSONinvoice['amountDollars'])
print(strAmountDollars)
| UTF-8 | Python | false | false | 16,564 | py | 7 | PowerCo.py | 3 | 0.589471 | 0.581804 | 0 | 459 | 35.087146 | 275 |
vancdk/python_moocs | 1,082,331,795,559 | cb5dffedaa8cd7ab029a8411ea45876fdeb03be5 | 6d5b988cb96bca312d97433abc0b009981f2c63f | /coursera/interact_with_the_operating_system_by_google/02_files_and_directories.py | 8eed2ced36fbfe6395abace07bd23de024760f46 | [] | no_license | https://github.com/vancdk/python_moocs | ba6eee569eded3805170a0aade2c2607dd0ed491 | 46b8fe27dfa6adbd86059c742f3d0013b4657ea0 | refs/heads/main | 2023-03-24T22:16:59.511766 | 2021-03-26T10:50:09 | 2021-03-26T10:50:09 | 329,634,536 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Files
"""
import os
file= "file.dat"
if os.path.isfile(file):
print(os.path.isfile(file))
print(os.path.getsize(file))
else:
print(os.path.isfile(file))
print("File not found")
"""
Directories
"""
# Get current directory
print(os.getcwd())
# Create directory
os.mkdir("new_dir")
# Change the current working directory
os.chdir("new_dir")
# Remove an empty directory
os.mkdir("newer_dir")
os.rmdir("newer_dir")
# List all contents of a dir
os.listdir("downloads")
"""
Below is a snippet of code that displays the content
of the downloads directory along with the type of content
By using os.path.join we can concatenate directories
in a way that can be used with other os.path() functions
in all operating systems.
"""
import os
dir = "downloads"
for name in os.listdir(dir):
fullname = os.path.join(dir, name)
if os.path.isdir(fullname):
print("{} is a directory".format(fullname))
else:
print("{} is a file".format(fullname))
| UTF-8 | Python | false | false | 968 | py | 32 | 02_files_and_directories.py | 32 | 0.703512 | 0.703512 | 0 | 50 | 18.36 | 57 |
mnhousel1992/flask-greet-calc | 7,859,790,162,421 | 675c2577de4ddbbf4fe4569d64ba14e4cc8437ec | af299633a8906728733d4287877d39166a20c59d | /calc/app.py | ccb4da068f5cf439afe5cda04877c46ddd913421 | [] | no_license | https://github.com/mnhousel1992/flask-greet-calc | 410d6cba021821aa5be0ff9130f8288eba5f3462 | c1d554e544670d6ddfa011e9906a6e56236be285 | refs/heads/main | 2022-12-27T02:55:39.793212 | 2020-10-16T14:38:45 | 2020-10-16T14:38:45 | 304,649,691 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Put your app in here.
from flask import Flask, request
from operations import add, sub, mult, div
app = Flask(__name__)
# @app.route('/add')
# def add_call():
# a = int(request.args.get("a"))
# b = int(request.args.get("b"))
# result = add(a, b)
# return str(result)
# @app.route('/sub')
# def sub_call():
# a = int(request.args.get("a"))
# b = int(request.args.get("b"))
# result = sub(a, b)
# return str(result)
# @app.route('/mult')
# def mult_call():
# a = int(request.args.get("a"))
# b = int(request.args.get("b"))
# result = mult(a, b)
# return str(result)
# @app.route('/div')
# def div_call():
# a = int(request.args.get("a"))
# b = int(request.args.get("b"))
# result = div(a, b)
# return str(result)
operators = {
"add": add,
"sub": sub,
"mult": mult,
"div": div
}
@app.route('/math/<operation>')
def operation_call(operation):
a = int(request.args.get("a"))
b = int(request.args.get("b"))
result = operators[operation](a, b)
return str(result)
| UTF-8 | Python | false | false | 1,066 | py | 2 | app.py | 1 | 0.554409 | 0.554409 | 0 | 52 | 19.5 | 42 |
theresaswayne/6002x | 10,136,122,859,771 | 98c0d64c8ff97e5c18696ae6f7c628388f153ea9 | e4bbbe84890ecd3e7db1056361d54e3557d5d766 | /6001x/test_type_testing.py | 01f40a935ac73c1525561239f7606f026d515bcb | [] | no_license | https://github.com/theresaswayne/6002x | 8a0717d39ffb52452c5f8e2d01e8e8d9e0d27a01 | dc01c6be406e0b29d02430bcad7aaf13a8e8127b | refs/heads/master | 2020-05-18T15:11:00.039387 | 2020-05-10T18:36:41 | 2020-05-10T18:36:41 | 84,256,499 | 0 | 0 | null | false | 2017-03-15T13:53:33 | 2017-03-07T23:27:31 | 2017-03-07T23:27:36 | 2017-03-15T13:53:33 | 9 | 0 | 0 | 0 | Python | null | null | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
x = [6, 5]
y = type(x)
print(y)
if isinstance(x, int):
print("it's an int")
elif isinstance(x, str):
print("it's a str")
else:
print("something else")
if type(x) == int:
print("it's still an int")
if type(x) == str:
print("it's still a str")
if type(x) == list:
print("it's a list")
| UTF-8 | Python | false | false | 396 | py | 91 | test_type_testing.py | 83 | 0.563131 | 0.555556 | 0 | 26 | 14.230769 | 32 |
pintonos/theMatchMoverProject | 10,746,008,207,108 | 66fb664b7db56d1e829bb1b14ac5447336506d10 | fe351a52fceed480cdf19d881defb09088c1db8a | /src/functions/matcher_functions.py | 4c420fce6da52115e9e67e7454e47a7d0f11e68a | [] | no_license | https://github.com/pintonos/theMatchMoverProject | 2232e26587599bd2a99f9a29e4082bb6c0456b35 | 81548b72f9ad3df988b4a51ca23a519c307d5b16 | refs/heads/master | 2021-10-29T12:18:32.559045 | 2020-06-20T14:48:07 | 2020-06-20T14:48:07 | 247,703,179 | 2 | 1 | null | false | 2020-03-16T14:31:57 | 2020-03-16T13:00:21 | 2020-03-16T13:51:54 | 2020-03-16T14:31:56 | 368 | 1 | 1 | 0 | Python | false | false | from functions import *
from util import models
from matplotlib import pyplot as plt
import cv2
import numpy as np
'''
Functions required for automatic point matching
These functions are used for point matching between to frames
in order to perform a stereo calibration.
'''
def lowes_ratio_test(kp1, kp2, matches, threshold=0.8):
"""
Ratio test as per Lowe's paper.
"""
pts1 = []
pts2 = []
good = []
for i, match in enumerate(matches):
if len(match) < 2:
continue
(m, n) = match
if m.distance < threshold * n.distance: # TODO tweak ratio
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
good.append(m)
return pts1, pts2, good
def get_flann_matches(kp1, des1, kp2, des2, detector):
"""
Computes matches between keypoints with FLANN algorithm. Filters matches with Lowe's ratio test.
"""
# FLANN parameters
if detector == Detector.ORB:
index_params = dict(algorithm=6,
table_number=12,
key_size=20,
multi_probe_level=2)
else:
index_params = dict(algorithm=0, trees=5)
if index_params is None:
raise Exception('Unknown detector [' + str(detector) + ']')
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Filter for good matches
pts1, pts2, matches = lowes_ratio_test(kp1, kp2, matches)
return np.int32(pts1), np.int32(pts2), matches
def get_brute_force_matches(kp1, des1, kp2, des2, detector, ratio_test=True):
'''
Computes matches between keypoints with BF algorithm.
:param kp1 First list of keypoints
:param des1 Descriptors of first keypoints
:param kp2 Second list of keypoints
:param des2 Descriptors of second keypoints
:param detector Detector method previously used to match points
:param ratio_test defines how matches should be validated. If ratio_test is True, the matches are checked with
Lowe's ratio test. Otherwise, the matches are sorted and the worst (half of the list) are dropped.
:returns pts1, pts2, matches
'''
if detector == models.Detector.ORB:
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=not ratio_test)
else:
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=not ratio_test)
if ratio_test:
matches = bf.knnMatch(des1, des2, k=2)
pts1, pts2, good_matches = lowes_ratio_test(kp1, kp2, matches)
else:
# Match descriptors
matches = bf.match(des1, des2)
pts1, pts2 = [], []
# sort matches and drop worst ones
matches = sorted(matches, key=lambda x: x.distance)
num_good_matches = len(matches) // 2
matches = matches[:num_good_matches]
for i, match in enumerate(matches):
pts2.append(kp2[match.trainIdx].pt)
pts1.append(kp1[match.queryIdx].pt)
good_matches = matches[:num_good_matches]
return np.int32(pts1), np.int32(pts2), good_matches
def detect_and_match_keypoints(img1, img2, detector=models.Detector.ORB, filtered=True,
matcher=models.Matcher.BRUTE_FORCE, show_matches=False):
'''
Detects, matches and filters keypoints between two images.
:param img1 First image to match points
:param img2 Second image to match points
:param detector The keypoint detector (SIFT, SURF, FAST, ORB)
:param filtered Filter images to remove noise like a gaussian, but preserves edges
:param matcher The keypoint matcher (BRUTE_FORCE, FLANN)
:param show_matches Debugging option to draw matches into the images
:returns pts1, pts2, matches
'''
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# Filter images to remove noise like a gaussian, but preserves edges
if filtered:
gray1 = cv2.bilateralFilter(gray1, 5, 50, 50)
gray2 = cv2.bilateralFilter(gray2, 5, 50, 50)
# Find the keypoints and descriptors
kp1, kp2 = None, None
des1, des2 = None, None
if detector == models.Detector.SIFT:
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)
elif detector == models.Detector.SURF:
surf = cv2.xfeatures2d.SURF_create()
kp1, des1 = surf.detectAndCompute(gray1, None)
kp2, des2 = surf.detectAndCompute(gray2, None)
elif detector == models.Detector.FAST:
fast = cv2.FastFeatureDetector_create()
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.compute(gray1, fast.detect(gray1, None))
kp2, des2 = sift.compute(gray2, fast.detect(gray2, None))
elif detector == models.Detector.ORB:
orb = cv2.ORB_create(nfeatures=2000, scaleFactor=1.5)
kp1, des1 = orb.detectAndCompute(gray1, None)
kp2, des2 = orb.detectAndCompute(gray2, None)
if kp1 is None or kp2 is None or des1 is None or des2 is None:
raise Exception('Unknown detector [' + str(detector) + ']')
# Match points
pts1, pts2, matches = None, None, None
if matcher == models.Matcher.FLANN:
pts1, pts2, matches = get_flann_matches(kp1, des1, kp2, des2, detector)
elif matcher == models.Matcher.BRUTE_FORCE:
pts1, pts2, matches = get_brute_force_matches(kp1, des1, kp2, des2, detector)
if pts1 is None or pts2 is None:
raise Exception('Unknown matcher [' + str(matcher) + ']')
# Subpixel refinement
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
pts1 = cv2.cornerSubPix(gray1, np.float32(pts1), (5, 5), (-1, -1), criteria)
pts2 = cv2.cornerSubPix(gray2, np.float32(pts2), (5, 5), (-1, -1), criteria)
if show_matches:
matches = sorted(matches, key=lambda x: x.distance)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:100], None, flags=2)
plt.imshow(img3), plt.show()
return pts1, pts2, matches
| UTF-8 | Python | false | false | 6,238 | py | 25 | matcher_functions.py | 14 | 0.647323 | 0.610933 | 0 | 167 | 36.353293 | 114 |
RajanSikarwar/Miscellaneous_python_work | 19,481,971,672,427 | 804d8589740881270bdc74843521bfa535c7e92f | 9d3ac31713b0cdfb65de57ad6555d7973a1f7d71 | /Socket/server.py | 4cf71a25446ecdc971f1ed5252685980e9e9ab18 | [] | no_license | https://github.com/RajanSikarwar/Miscellaneous_python_work | ca4769bcaa4d2b4c6b5f5870aa74311e2a85e1c7 | d4aa0ea8c1f4d5796bf0d6cc89048b8e61064b7f | refs/heads/master | 2023-08-15T19:56:58.265232 | 2021-10-20T09:34:50 | 2021-10-20T09:34:50 | 419,251,191 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import time
import pickle
HEADERSIZE = 10
s= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((socket.gethostname(),1234))
s.listen(5)
while True :
clientsocket,address=s.accept()
print(f"You have established connection with {address}")
d ={1:"hey",2:"There"}
msg = pickle.dumps(d)
msg= bytes(f'{len(msg):<{HEADERSIZE}}','utf-8')+msg
clientsocket.send(msg)
| UTF-8 | Python | false | false | 429 | py | 23 | server.py | 22 | 0.648019 | 0.624709 | 0 | 21 | 19 | 60 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.