repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jporcelli/suggest
| 18,262,200,965,026 |
83c16e71ff3177f247ea84d3d9a2b1606e6ba898
|
a393e6041c5dadd8aa1e4dbfdc40e7c59dcc2f69
|
/suggest.py
|
e13e7a9aa7903a02fb85956a26e497e9ee2d54ce
|
[] |
no_license
|
https://github.com/jporcelli/suggest
|
ab07e138d079ee0574704f2d60981f5c32b51ed0
|
06f04e4136cec716bb37c0ef2d24375c118381ce
|
refs/heads/master
| 2020-06-04T21:36:14.328535 | 2014-02-28T06:17:57 | 2014-02-28T06:17:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Python module for use as server side implementation
of an auto-complete app.
__author__ James Porcelli
"""
from twisted.web import server, resource
from twisted.internet import reactor
import trie
import json
"""
Auto-suggest server uses the event driven
twistedmatrix library for event driven servers
using select, poll, epoll, or kqeue.
"""
class Suggest(resource.Resource):
isLeaf = True
"""
Create the Trie which will then be available in memory
upon starting of the server
"""
def __init__(self):
# @TODO - Create a logger for this server and
# log the creation of this resource
self.t = trie.Trie()
with open('input.txt', 'rt') as f:
for line in f:
self.t.insert(line.strip('\n '))
"""
Handle only HTTP GET requests where the prefix
to search on is specified by the key 'q'
"""
def render_GET(self, request):
q = request.args['q']
# Return a list of results keyed on the prefix that was
# used to search with
return json.dumps({q : self.t.getDescendents(q)})
reactor.listenTCP(8080, server.Site(Suggest()))
reactor.run()
|
UTF-8
|
Python
| false | false | 1,080 |
py
| 3 |
suggest.py
| 2 | 0.70463 | 0.700926 | 0 | 47 | 21.851064 | 57 |
hweeeeeei/pythonLearn
| 42,949,697,043 |
6cccf9efec6100c77ebebefd3491d48674b9b197
|
62798715f92ed031a415f385172596dbc1688894
|
/6.常用内建模块/编码base64.py
|
fbe8d216dd4f5f939e7c7558a27434776559e751
|
[] |
no_license
|
https://github.com/hweeeeeei/pythonLearn
|
e06d56b424ebf84a0dea199e6a1d7587d9998680
|
096c9beee9a6f154d1e65d9bf2ea1edca8ec9e37
|
refs/heads/master
| 2022-12-15T08:56:03.566520 | 2020-09-09T02:14:43 | 2020-09-09T02:14:43 | 282,863,483 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Base64是一种用64个字符来表示任意二进制数据的方法。
import base64
print(base64.b64encode(b'binary\x00string'))
print(base64.b64decode(b'YmluYXJ5AHN0cmluZw=='))
print(base64.b64encode(b'i\xb7\x1d\xfb\xef\xff'))
# url safe的base64编码,把字符+和/分别变成-和_:
print('url safe', base64.urlsafe_b64encode(b'i\xb7\x1d\xfb\xef\xff'))
# Base64是一种通过查表的编码方法,不能用于加密
# Base64是一种任意二进制到文本字符串的编码方法,常用于在URL、Cookie、网页中传输少量二进制数据。
|
UTF-8
|
Python
| false | false | 571 |
py
| 60 |
编码base64.py
| 57 | 0.784416 | 0.690909 | 0.028571 | 14 | 26.5 | 69 |
moneeshkumar95/100_days_of_code_challenge_python
| 8,624,294,375,739 |
d85c3ffc911b903ee5408e50d948f1edd30db731
|
66a087484878baadd91c33f8a96adad62c38aecf
|
/Day_02.py
|
94bb31a05b16d46dad3329d91b54440ef383aa89
|
[] |
no_license
|
https://github.com/moneeshkumar95/100_days_of_code_challenge_python
|
5df21ee80ab5f271f849dbbf824b232ebac31ede
|
881161f1d59236c66d9c3931aa2a05636cbd080b
|
refs/heads/main
| 2023-07-06T21:44:53.479785 | 2021-08-11T03:51:06 | 2021-08-11T03:51:06 | 394,691,682 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#100 days of python coding challenge
#Day 2 : Finding the max of two number
num_1 = int(input("Enter the first number: "))
num_2 = int(input("Enter the second number: "))
print("\nResults of Method_1")
if num_1 < num_2:
print(f"{num_2} is the maximum")
else:
print(f"{num_1} is the maximum")
print("\nResults of Method_2")
def max_finder(a,b):
if a > b:
return f"{num_1} is the maximum"
return f"{num_2} is the maximum"
print(max_finder(num_1,num_2))
print("\nResults of Method_3")
maximum = max(num_1,num_2)
print(f"{maximum} is the maximum")
|
UTF-8
|
Python
| false | false | 569 |
py
| 95 |
Day_02.py
| 91 | 0.657293 | 0.623902 | 0 | 20 | 27.5 | 47 |
jayanthbhat/Products_app
| 5,866,925,333,109 |
2ad4ef34b56eedae0126c9699476e26341d8a2e4
|
ace3d11563a51dd81c807bcfab1b982a61f569d8
|
/ufaber/products/views.py
|
d0b8f432c63d85a0e2b91c8979d8dcf647ea2595
|
[] |
no_license
|
https://github.com/jayanthbhat/Products_app
|
35412129b6e077315e51a63d0e524fc8daf60977
|
36a5f8d543a8de3929773a980acd10274f311dc8
|
refs/heads/master
| 2022-11-29T16:48:31.872205 | 2020-08-12T13:16:36 | 2020-08-12T13:16:36 | 287,008,420 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import ListAPIView,ListCreateAPIView,RetrieveAPIView,UpdateAPIView,DestroyAPIView,CreateAPIView,RetrieveUpdateAPIView
from .serializers import ProductSerializer,ListAllCategorySerializer,ListSubcategorySerializer
from products.models import Products,Category,Subcategory
from products.forms import AddProductForm
import json
from django.core.serializers.json import DjangoJSONEncoder
class PostCreateAPIView(ListCreateAPIView):
serializer_class = ProductSerializer
def get_queryset(self):
return Products.objects.all()
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid(raise_exception=True):
product_name = serializer.data['product_name']
category_id = serializer.data['category']
subcategory_id = serializer.data['subcategory']
try:
category=Category.objects.get(id=category_id)
subcategory=Subcategory.objects.get(id=subcategory_id,category=category)
pd=Products.objects.create(product_name=product_name,subcategory=subcategory,category=category)
content = {"success":"Product Added Successfully"}
return Response(content,status=status.HTTP_200_OK)
except Category.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ListAllCategoryAPIView(ListAPIView):
serializer_class = ListAllCategorySerializer
def get_queryset(self):
# current_user = self.request.user
return Category.objects.all()
class ListSubcategoryAPIView(ListAPIView):
serializer_class = ListSubcategorySerializer
def get_queryset(self):
category_id = self.request.query_params.get('category_id', None)
if category_id:
return Subcategory.objects.filter(category__id=category_id)
else:
return Subcategory.objects.all()
class ListProductByCategoryAPIView(ListAPIView):
serializer_class = ProductSerializer
def get_queryset(self):
category_id = self.request.query_params.get('category_id', None)
if category_id:
return Products.objects.filter(category__id=category_id)
else:
return Products.objects.all()
class ListProductBySubcategoryAPIView(ListAPIView):
serializer_class = ProductSerializer
def get_queryset(self):
subcategory_id = self.request.query_params.get('subcategory_id', None)
if subcategory_id:
return Products.objects.filter(subcategory__id=subcategory_id)
else:
return Products.objects.all()
def products(request):
products=Products.objects.all()
categories=Category.objects.all()
sub_categories=Subcategory.objects.all()
sub=[]
for i in sub_categories:
sub.append({
"subcategory_name":i.subcategory_name,
"subcategory_id":i.id,
"category":i.category.id,
})
subcategory_json = json.dumps(list(sub), cls=DjangoJSONEncoder)
form=AddProductForm()
context = {"products":products,"form":form,"categories":categories,'sub_categories':sub_categories,'subcategory_json':subcategory_json}
return render(request,'application_app/home.html',context)
def add_product(request):
if request.method == 'POST':
product_name = request.POST.get('product_name')
category_id = request.POST.get('category')
subcategory_id = request.POST.get('subcategory')
try:
category=Category.objects.get(id=category_id)
subcategory=Subcategory.objects.get(id=subcategory_id,category=category)
pd=Products.objects.create(product_name=product_name,subcategory=subcategory,category=category)
success="Product Added Successfully"
context = {"products":products,"form":form,"categories":categories,'sub_categories':sub_categories,"success":success}
return render(request,'application_app/home.html',context)
except Category.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
form=AddProductForm()
context = {"products":products,"form":form,"categories":categories,'sub_categories':sub_categories,}
return render(request,'application_app/home.html',context)
|
UTF-8
|
Python
| false | false | 4,712 |
py
| 11 |
views.py
| 7 | 0.686545 | 0.683998 | 0 | 109 | 42.238532 | 146 |
BlackLight/platypush
| 13,314,398,663,886 |
1819609ca2f6f79495bd2cc68c12bd2613c919ef
|
60d6b8501d0be546437b26a6ee1f9fab97ec3897
|
/platypush/schemas/system/_user/_base.py
|
be32bb4f638c9512bc18870da9fb09980abcbf03
|
[
"MIT"
] |
permissive
|
https://github.com/BlackLight/platypush
|
68284a85b2f9eef303d26b04530f075927b5834a
|
446bc2f67493d3554c5422242ff91d5b5c76d78a
|
refs/heads/master
| 2023-08-31T21:01:53.519960 | 2023-08-29T22:05:38 | 2023-08-29T22:05:38 | 109,421,017 | 265 | 25 |
MIT
| false | 2023-09-01T23:15:49 | 2017-11-03T16:56:24 | 2023-08-08T22:58:36 | 2023-09-01T23:15:49 | 56,463 | 261 | 22 | 4 |
Python
| false | false |
from datetime import datetime
from dateutil.tz import gettz
from marshmallow import pre_load
from .._base import SystemBaseSchema
class UserBaseSchema(SystemBaseSchema):
"""
Base schema for system users.
"""
@pre_load
def pre_load(self, data: dict, **_) -> dict:
data = super().pre_load(data)
started_ts = data.pop('started', None)
if started_ts is not None:
data['started'] = datetime.fromtimestamp(started_ts).replace(tzinfo=gettz())
data['username'] = data.pop('name', data.pop('username', None))
return data
|
UTF-8
|
Python
| false | false | 590 |
py
| 1,332 |
_base.py
| 733 | 0.642373 | 0.642373 | 0 | 22 | 25.818182 | 88 |
Prin-Meng/NetDevOps
| 13,666,585,965,242 |
353edfd320ebce996e01ac33c87fb2d470f4b7bd
|
55c6343fa98a97ca375b53e244adfcf721fb68e5
|
/python_basic/task_day10/task_1/qytang_ssh.py
|
574d2d93db66d28f9bc12e1305dd2c0d7a14157c
|
[] |
no_license
|
https://github.com/Prin-Meng/NetDevOps
|
7a835879fb55c26b792d06c729dcaf58c1427c5a
|
c852cdee300135320c3844c42755f0f6b1b6688a
|
refs/heads/master
| 2023-06-01T22:39:49.041141 | 2021-06-20T02:34:00 | 2021-06-20T02:34:00 | 345,110,429 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# paramiko需要通过pip下载
import paramiko
# import time的目的是为了保证不会因为输入命令或者回显内容过快而导致SSH终端速度跟不上,仅能显示部分命令,而netmiko已经自动解决了此问题
import time
def ssh(ip, username, password, cmds, port=22):
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password, timeout=5, compress=True)
print("You have successfully connect to " + ip + '\n')
command = ssh.invoke_shell()
for cmd in cmds:
command.send(cmd)
time.sleep(2)
output = command.recv(65535)
x = output.decode('ascii')
return x
except paramiko.ssh_exception.AuthenticationException:
print("User authentication failed for " + ip + ".")
return
if __name__ == '__main__':
# 创建三个变量,表示SW3的IP地址、SSH的用户名和密码
ip = "192.168.56.11"
username = "prin"
password = "Huawei@123"
cmds = []
with open('config.txt', 'r') as f:
for line in f.readlines():
cmds.append(line)
print(cmds)
print(ssh(ip, username, password, cmds))
|
UTF-8
|
Python
| false | false | 1,286 |
py
| 156 |
qytang_ssh.py
| 124 | 0.617438 | 0.596975 | 0 | 39 | 27.820513 | 78 |
Deeplayer/CS231n-winter-2016
| 4,148,938,439,364 |
87d35a04e3ee2814c393b042774555363b6d8cc0
|
04d3c09db31e2251bca993ea0e986e086cd5f663
|
/RNN/test.py
|
7576441110d966151fe6857ef0b4827f7c74f465
|
[] |
no_license
|
https://github.com/Deeplayer/CS231n-winter-2016
|
54b27f818cef93759fc1406b3b240fd3c4f51352
|
45faf6350b8c3a8719cb4ca7263ae4d3139982f1
|
refs/heads/master
| 2021-01-20T09:36:56.015678 | 2017-05-04T14:49:50 | 2017-05-04T14:49:50 | 90,266,981 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import matplotlib.pyplot as plt
from rnn_layers import *
from captioning_solver import CaptioningSolver
from rnn import CaptioningRNN
from coco_utils import load_coco_data, sample_coco_minibatch, decode_captions, fc_coco_minibatch
from image_utils import image_from_url
data = load_coco_data()
lstm_model = CaptioningRNN(
cell_type='lstm',
word_to_idx=data['word_to_idx'],
input_dim=data['train_features'].shape[1],
hidden_dim=512,
wordvec_dim=512,
reg=1e-8,
dtype=np.float32,
)
lstm_solver = CaptioningSolver(lstm_model, data,
update_rule='rmsprop',
num_epochs=20,
batch_size=256,
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=5000,
)
lstm_solver.train()
# Plot the training losses
plt.subplot(2, 1, 1)
plt.plot(lstm_solver.loss_history)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
# Plot the training/validation accuracy
plt.subplot(2, 1, 2)
plt.plot(lstm_solver.train_acc_history, label='train')
plt.plot(lstm_solver.val_acc_history, label='val')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.show()
for split in ['train', 'val']:
minibatch = sample_coco_minibatch(data, split=split, batch_size=20)
gt_captions, features, urls = minibatch
gt_captions = decode_captions(gt_captions, data['idx_to_word'])
sample_captions = lstm_model.sample(features)
sample_captions = decode_captions(sample_captions, data['idx_to_word'])
for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):
plt.imshow(image_from_url(url))
plt.title('%s\n%s\nGT:%s' % (split, sample_caption, gt_caption))
plt.axis('off')
plt.show()
|
UTF-8
|
Python
| false | false | 1,848 |
py
| 34 |
test.py
| 33 | 0.65368 | 0.637446 | 0 | 63 | 28.31746 | 96 |
beyzasubasi/python-exercise
| 17,978,733,103,132 |
553eaca5a3206d6f00b72f76a8ed3cc3f7ee2024
|
b114cc858c2e5f1bb055ec70018d7378b8f94545
|
/PYTHON/DERS2-11.1.py
|
5818c1c504b5f917b83dedd136113ba3511f931b
|
[] |
no_license
|
https://github.com/beyzasubasi/python-exercise
|
fa89129eec794a63003852aa6d1b85689ca426e4
|
9ef8a5264502cb367970a4c143a33ebae9788330
|
refs/heads/main
| 2023-03-20T18:40:19.895372 | 2021-03-15T22:31:24 | 2021-03-15T22:31:24 | 347,605,535 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# girilen sayı asal mıdır değil midir
p = int(input("Bir sayı giriniz: "))
asal = 0
for n in range(2, p//2+1): #sayının kareköküne kadar gitsem yeterli
if(p//n*n == p):
asal = 1
if(asal==0):
print("asal sayıdır")
else:
print("asal sayı değildir")
|
UTF-8
|
Python
| false | false | 283 |
py
| 88 |
DERS2-11.1.py
| 88 | 0.618519 | 0.596296 | 0 | 13 | 19.846154 | 68 |
adarshrao007/python
| 9,835,475,113,733 |
02fd2e4e353d8fec4d026683b7fabab1e067a4bf
|
55733c62eeeebbaecc06d97a38173dea6b701a46
|
/8.py
|
4ac22798f4cc8ddda7b4d5629ceb7e2f4af15d22
|
[] |
no_license
|
https://github.com/adarshrao007/python
|
729c642b602b9c83b05d6421c278042f83bae839
|
0567b0ca05b27a295c22f9764b4aac0609db421c
|
refs/heads/master
| 2020-03-28T04:16:32.460225 | 2018-09-06T19:00:05 | 2018-09-06T19:00:05 | 147,704,580 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Implement a program to create a list with two tuple of fruits and vegetables. Access fruits separately and vegetables separately.
tuple1=('carrot','potato','tomato','beans')
print(tuple1)
tuple2=('mango','apple','banana','pineapple')
print(tuple2)
mylist=list(zip(tuple1,tuple2))
for i in range(len(mylist)):
print(mylist[i])
|
UTF-8
|
Python
| false | false | 334 |
py
| 9 |
8.py
| 9 | 0.739521 | 0.721557 | 0 | 9 | 36.111111 | 131 |
Edyta2801/Python-kurs-infoshareacademy
| 7,619,271,983,804 |
66098c0dd265b598cd74e042c2e11866de420402
|
c384046bc0e7d49f46feaf0c6722523f81b41214
|
/code/Day_4/Homework_Mario_Pyramid.py
|
4b39abd73c758cdc678a6b5da260e0b8da67f916
|
[] |
no_license
|
https://github.com/Edyta2801/Python-kurs-infoshareacademy
|
b0c8c9e2de4e980cc1b10aed80a156ac83fdd4aa
|
41bdbbad8a15b16220a82fb06c7bd353792fe6d7
|
refs/heads/master
| 2020-12-15T18:38:21.157330 | 2020-01-20T22:48:28 | 2020-01-20T22:48:28 | 235,212,406 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Wymagania:
Narysuj na ekranie piramidę Mario
(wypisując odpowiednio znaki #)
jako input podaj wysokość piramidy
Piramida wysokości 3 ma wyglądać:
#
###
#####
1 - użyj jedynie pętli i znaków "#" oraz spacji
2 - przeczytaj https://docs.python.org/3/library/string.html aby ułatwić sobie życie
"""
|
UTF-8
|
Python
| false | false | 318 |
py
| 163 |
Homework_Mario_Pyramid.py
| 160 | 0.727869 | 0.714754 | 0 | 15 | 19.333333 | 84 |
yashmanne/Arab_ABC-LTP_Linkage
| 17,154,099,409,306 |
ace669e03defbd8e208cbe5897eaee27e3493865
|
5ddc7ecd58169a6204dc610ef344213e0a6c03a4
|
/src/ShortestPath.py
|
2199fea664e311c2a98a8aa5e85a143f672df80e
|
[] |
no_license
|
https://github.com/yashmanne/Arab_ABC-LTP_Linkage
|
02c8c683f1fcdad5b2789548755639aa541eb0a8
|
179e6ecfb1f1e7e36c3c987fe8e43deb8b5cc2b3
|
refs/heads/master
| 2023-04-06T13:52:38.101278 | 2023-03-24T03:08:48 | 2023-03-24T03:08:48 | 246,419,422 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Import necessary Libraries
import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import seaborn as sns
import collections
import pickle # used to store cleaned data
import numpy.ma as ma # masking for numpy Arrays
boolVal = False
attedDataName = 'Overlapping_GenesMR.p'
bioGRIDfileName = 'BioGRID_with_ATTEDMR.p'
attedData = pickle.load(open(attedDataName, 'rb'))
genesGRID = pickle.load(open(bioGRIDfileName, 'rb'))
# Initializes Networkx Graph
genesG = nx.Graph()
# Adds edges for each interaction
for i in range(genesGRID.shape[0]):
genesG.add_edge(genesGRID['Entrez Gene Interactor A'].loc[i], genesGRID['Entrez Gene Interactor B'].loc[i], weight = genesGRID['MR'].loc[i])
def nonConvertible(orig,trans):
'''
Checks if all orginal values are in new list of values
and returns the ones that aren't
Inputs: List of original values, list of new values
Output: List of all original values that are not in the new value list
'''
nc = []
for i in range(len(orig)):
if orig[i] not in trans:
nc.append(orig[i])
else:
continue
return nc
def existing(lst, reference):
'''
Checks if items of lst are in reference, returns a list of existing values
Inputs: List to check, list to cross-reference with
Output: List of all values that also exist in the reference list.
'''
exists = [] # stores the existing values
NE = [] # stores the non-existing values
for x in lst:
if x in reference:
exists.append(x)
else:
NE.append(x)
return exists, NE
def ShortestDistances(Graph,grpA,grpB,dataFrame = True): #using function caused Kernel to crash due to memory
'''
Finds the shortest distances in a graph between all members of group 1
and all members of group 2 and stores into a dict.
Inputs: Graph to scan, list of group A nodes, list of group B nodes
Output: Returns a pandas dataFrame with group A as columns and group B as rows
and the shortest-distance as the values.
Else, returns a dict with group A as keys and a list of shortest distances
as values (size of list is the length of group B)
'''
graphGenes = list(Graph.nodes)
abc, Uabc = existing(grpA,graphGenes)
ltp, Ultp = existing(grpB,graphGenes)
DF={0:ltp}
for x in abc:
valList = []
for y in ltp:
if (x in graphGenes) and (y in graphGenes): # checks if both genes are part of the overlapping set
val = nx.astar_path_length(Graph, x,y)
else:
val = np.nan
valList.append(val)
DF.update({x:valList})
if dataFrame:
Data = pd.DataFrame(DF,dtype='float64')
Data = Data.astype({0:'int'})
Data = Data.set_index(0)
return Data
else:
return DF
# option: filter out nodes with 1 connection
# store as a dictionary, dict:
def simplifyGraph(grpA, grpB, Graph, kdeg):
'''
Gathers all nodes within kdeg degrees of separation from the given groups.
Inputs: list of group A nodes, list of group B nodes, graph to simplify, int representing
the degree of seperation from the key nodes allow.
Output: subgraph of inputted graph as well as dictionary with each node in the new graph as key
and # of neighboring nodes in group A, # of neighboring nodes in group B, and
list of all neighboring nodes.
'''
keyGenes = sorted(set(grpA+grpB))
i = kdeg
while i >0:
# Stores neighbors at each key gene
neighbors = []
# Loops through all key genes
for gene in keyGenes:
nodes = [n for n in Graph.neighbors(gene)]
neighbors.append(nodes)
neighbors.append(keyGenes)
neighbors = [item for sublist in neighbors for item in sublist]
keyGenes = sorted(set(neighbors))
i-=1
# Creates subgraph
newGraph = Graph.subgraph(keyGenes)
# Iterates through the new nodes and checks if neighbors are in either group A or B
geneDict = {}
for gene in newGraph.nodes:
nodes = [n for n in newGraph.neighbors(gene)]
grpACount = 0
grpBCount = 0
for ngene in nodes:
if ngene in grpA:
grpACount +=1
elif ngene in grpB:
grpBCount +=1
geneDict[gene] = (grpACount,grpBCount, nodes)
return newGraph, geneDict
# Read In Data
ABC_trans = pd.read_csv('convABC_Genes.txt', sep = '\t')
ABC_trans = ABC_trans.rename(columns = {'From':'TAIR_ID','To':'ENTREZ_ID'})
ABC_orig = pd.read_excel('ABC_Genes.xls', sheet_name = 'Sheet2')
abcAT = sorted(set(list(ABC_trans['TAIR_ID'])))
abcEntrez = sorted(set(list(ABC_trans['ENTREZ_ID'])))
abcAT_orig = sorted(set(list(ABC_orig['TAIR_ID'])))
abc_NC = nonConvertible(abcAT_orig,abcAT)
LTP_trans = pd.read_csv('convLTP_Genes.txt', sep = '\t')
LTP_trans = LTP_trans.rename(columns = {'From':'TAIR_ID','To':'ENTREZ_ID'})
LTP_orig = pd.read_excel('LTP_Genes.xlsx', sheet_name = 'Sheet1')
ltpAT = sorted(set(list(LTP_trans['TAIR_ID'])))
ltpEntrez = sorted(set(list(LTP_trans['ENTREZ_ID'])))
ltpAT_orig = sorted(set(list(LTP_orig['TAIR_ID'])))
ltp_NC = nonConvertible(ltpAT_orig,ltpAT)
abc, Uabc = existing(abcEntrez,genesG.nodes)
ltp, Ultp = existing(ltpEntrez,genesG.nodes)
# Run Shortest Path & store as pickle files
simpleShortDistFileName = 'simplifiedShortestDistances.p'
shortDistFileName = 'shortestDistances.p'
simpleG, simpleGDict = simplifyGraph(abc,ltp, genesG,1)
simple_abc_ltp_shortDist = ShortestDistances(genesG,abcEntrez,ltpEntrez, True)
pickle.dump(abc_ltp_shortDist, open(shortDistFileName, 'wb'))
simple_abc_ltp_shortDist = ShortestDistances(simpleG,abcEntrez,ltpEntrez, True)
pickle.dump(simple_abc_ltp_shortDist, open(simpleShortDistFileName, 'wb'))
|
UTF-8
|
Python
| false | false | 5,910 |
py
| 11 |
ShortestPath.py
| 1 | 0.664298 | 0.661252 | 0 | 147 | 39.210884 | 144 |
Azure/azure-cli-extensions
| 15,891,379,017,030 |
8ae8bcecfd9f0ee703431e9e237f3198175f9398
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/swiftlet/azext_swiftlet/generated/commands.py
|
5ab786951b2da3fe57d0aabd0828b0df9c1c931e
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
https://github.com/Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 |
MIT
| false | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | 2023-09-12T21:00:04 | 2023-09-14T10:48:56 | 338,909 | 327 | 961 | 566 |
Python
| false | false |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_swiftlet.generated._client_factory import cf_virtual_machine
swiftlet_virtual_machine = CliCommandType(
operations_tmpl='azext_swiftlet.vendored_sdks.swiftlet.operations._virtual_machine_operations#VirtualMachineOpe'
'rations.{}',
client_factory=cf_virtual_machine)
with self.command_group('swiftlet virtual-machine', swiftlet_virtual_machine, client_factory=cf_virtual_machine,
is_experimental=True) as g:
g.custom_command('list', 'swiftlet_virtual_machine_list')
g.custom_show_command('show', 'swiftlet_virtual_machine_show')
g.custom_command('create', 'swiftlet_virtual_machine_create', supports_no_wait=True)
g.custom_command('update', 'swiftlet_virtual_machine_update', supports_no_wait=True)
g.custom_command('delete', 'swiftlet_virtual_machine_delete', supports_no_wait=True, confirmation=True)
g.custom_command('list-bundle', 'swiftlet_virtual_machine_list_bundle')
g.custom_command('list-image', 'swiftlet_virtual_machine_list_image')
g.custom_command('start', 'swiftlet_virtual_machine_start', supports_no_wait=True)
g.custom_command('stop', 'swiftlet_virtual_machine_stop', supports_no_wait=True)
g.custom_wait_command('wait', 'swiftlet_virtual_machine_show')
|
UTF-8
|
Python
| false | false | 1,944 |
py
| 3,291 |
commands.py
| 2,156 | 0.665638 | 0.665638 | 0 | 34 | 56.176471 | 120 |
fuzzygwalchmei/scratchingPost
| 10,325,101,408,622 |
0c8b4b5bb0aab1968ac3b794755115383bb007b2
|
fea444217851a92510651da2b60035b73344d7da
|
/treeBrowsing.py
|
0ff1520b4150ded7c2e7b080a07f5e5b8333077d
|
[] |
no_license
|
https://github.com/fuzzygwalchmei/scratchingPost
|
c70d4f3f37d3d4d6490edfbbae603305b2bb5764
|
b232c54aac975aebb0945d66a841db3f241b7cd2
|
refs/heads/master
| 2023-01-29T13:02:22.615813 | 2020-12-15T00:47:56 | 2020-12-15T00:47:56 | 176,823,898 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import time
#path=input("Whats the folder?: ")
curTime = time.strftime('%B %A %d %Y')
print(curTime)
# for i in os.scandir(path):
# if i.is_file():
# print('File: ' + i.path)
# elif i.is_dir():
# print('Folder: '+ i.path)
|
UTF-8
|
Python
| false | false | 258 |
py
| 32 |
treeBrowsing.py
| 27 | 0.55814 | 0.55814 | 0 | 13 | 18.923077 | 38 |
highmore9501/study_flask
| 1,374,389,562,671 |
8c0c6425bab32222ab2923e13d7a2af338506813
|
10eeb53e5222b13bc86270e1d77494ce871ba61e
|
/venv/Lib/site-packages/pytest_dotenv/__init__.py
|
8287fbc4917558d184588ecf068237639d820422
|
[] |
no_license
|
https://github.com/highmore9501/study_flask
|
5a52fdce1215945dab63ebe89b0226366233d5cf
|
c8ac39264e40c43df3e5f6434fafb5a962b73355
|
refs/heads/master
| 2020-07-09T10:27:42.406647 | 2019-08-25T09:04:05 | 2019-08-25T09:04:05 | 203,948,230 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__version__='0.4.0'
__author__='Marcel Radischat'
|
UTF-8
|
Python
| false | false | 50 |
py
| 3 |
__init__.py
| 2 | 0.62 | 0.56 | 0 | 2 | 24 | 29 |
AtlasBuggy/Naboris
| 9,981,504,026,388 |
1fcd08ce451bf8b85a3ed0a9d23ee60d12972bf0
|
ded043a164bbecdb5b3c75dddc64efb9eae2aa6e
|
/naboris/inception/pipeline.py
|
f1c5f1329bb3ed760ac4ce64113ee74da1776d04
|
[] |
no_license
|
https://github.com/AtlasBuggy/Naboris
|
c4eaab10473854d6181c00fb87833b35f93c5fef
|
9b8481b6f98652f936f7d5ce97edd42a351f304a
|
refs/heads/master
| 2020-03-29T06:35:10.347671 | 2017-08-27T17:05:17 | 2017-08-27T17:05:17 | 94,658,393 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import time
import numpy as np
import multiprocessing
import tensorflow as tf
from naboris.texture.pipeline import TexturePipeline
class InceptionPipeline(TexturePipeline):
def __init__(self, enabled=True, log_level=None):
super(InceptionPipeline, self).__init__(enabled, log_level)
self.model_path = "naboris/inception/output_graph.pb"
self.labels_path = "naboris/inception/output_labels.txt"
self.create_graph()
num_threads = multiprocessing.cpu_count()
print("Running on %s threads" % num_threads)
self.sess = tf.Session(config=tf.ConfigProto(
intra_op_parallelism_threads=num_threads))
self.softmax_tensor = self.sess.graph.get_tensor_by_name('final_result:0')
with open(self.labels_path) as labels_file:
lines = labels_file.readlines()
self.prediction_labels = [str(w).replace("\n", "") for w in lines]
def create_graph(self):
"""Creates a graph from saved GraphDef file and returns a saver."""
with tf.gfile.FastGFile(self.model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def pipeline(self, frame):
cropped = self.crop_frame(frame)
t0 = time.time()
predictions = self.sess.run(self.softmax_tensor,
{'DecodeJpeg/contents:0': self.numpy_to_bytes(cropped)})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[::-1]
answer = self.prediction_labels[top_k[0]]
t1 = time.time()
self.logger.info("Took: %0.4fs, Answer: %s" % (t1 - t0, answer))
cv2.putText(frame, answer, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 0, 255), 2)
y1, y2, x1, x2 = self.get_crop_points(frame)
cv2.rectangle(frame, (x1 - 1, y1 - 1), (x2 + 1, y2 + 1), (255, 0, 0))
self.post((answer, top_k[0]), self.results_service_tag)
self.post((frame, cropped), self.texture_service_tag)
return frame
@staticmethod
def numpy_to_bytes(frame):
return cv2.imencode(".jpg", frame)[1].tostring()
def stop(self):
self.sess.close()
|
UTF-8
|
Python
| false | false | 2,282 |
py
| 34 |
pipeline.py
| 28 | 0.606924 | 0.586328 | 0 | 64 | 34.65625 | 92 |
hustmonk/avazu
| 15,144,054,687,899 |
8613a78240e361136a072572253bda394adb1bee
|
03d70018ba0aef0a91ca93572ff65ba76ac4fcfa
|
/newcode/densy.py
|
101f824f4bcb9fcb5ab9b90f09b4399babf6ab1a
|
[] |
no_license
|
https://github.com/hustmonk/avazu
|
d52fd96c5780d8897aeee128059153e466e7d003
|
4b6bd076d8b22a99eb32d97dc64437839dac201a
|
refs/heads/master
| 2018-12-28T13:24:01.905930 | 2015-01-27T14:35:31 | 2015-01-27T14:35:31 | 26,867,207 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: GB2312 -*-
# Last modified:
"""docstring
"""
__revision__ = '0.1'
class Denesy():
def __init__(self):
self.indexs = {}
self.counts = {}
k = {}
for line in open("../data/bias/sort.wy.IDX"):
if len(k) > 1000:
break
arr = line.strip().split(" ")
k[arr[0]+"_"+arr[1]] = 1
for line in open("../todensy/0"):
arr = line.strip().split("\t")
if arr[0] in k:
self.counts[arr[0]] = -int(arr[1])
else:
self.counts[arr[0]] = int(arr[1])
#self.indexs[arr[0]] = len(self.indexs)
def getNum(self, head, v):
#key = head + "_" + v
key = v + "_" + head
c = self.counts.get(key, -1)
#if c > 100:
# c = 101
return c
|
UTF-8
|
Python
| false | false | 866 |
py
| 27 |
densy.py
| 26 | 0.419169 | 0.387991 | 0 | 33 | 25.242424 | 53 |
dinesh207/ninekm
| 12,953,621,366,910 |
4eb5b998929b9f30d1ac8514b455bc4a297d6409
|
82ce339de1a5c6382164d7692afa78d86b8a45ec
|
/main.py
|
df8c20a1a1255275b5f406f889f82d6692c377e1
|
[] |
no_license
|
https://github.com/dinesh207/ninekm
|
352b8ee6fcbb3d8940d4b1efbde1072eaa3324be
|
fa0bbc7219db6efbd1fa75d0de79cd4e7a6c8e33
|
refs/heads/master
| 2022-01-04T17:00:14.835725 | 2019-09-26T05:51:03 | 2019-09-26T05:51:03 | 210,085,991 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sample import Scrapper
'''
To use scraper, use initialize() function. It fetches the data in data-frame format.
'''
print('Enter item you want to search:')
search = input()
obj = Scrapper(search)
obj.initialize()
obj.tearDown()
print("Task Completed Successfully!")
# def write_to_excel(dataframe):
# dataframe.to_excel(search + '_data.xlsx', sheet_name='sheet1', index=False)
# return dataframe
# df = obj.initialize()
# write_to_excel(df)
# Product full description
# Brand Name
# Company name
# Weight
# Sub-Category
# Parent Category
# Family
# MRP
# SKU
# Barcode
# Picture
#Sample Data:
#Aashirvaad Multigrains Atta 5 Kg | Aashirvaad | ITC Limited | 5 Kg | Atta | Staples | Food | 245.00 | 8901725121624
|
UTF-8
|
Python
| false | false | 737 |
py
| 2 |
main.py
| 2 | 0.701493 | 0.672999 | 0 | 39 | 17.871795 | 116 |
OladeleO/lookyloo
| 6,700,149,009,628 |
9c7cfc52bca398128f9cb2da726121018140d14b
|
acded2ecb631f143c34d1fd16e74a95fef0093f5
|
/lookyloo/modules/phishtank.py
|
7071977c7c7baeb9f85a158ba1838b86400185a8
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/OladeleO/lookyloo
|
2c174217774a150be71aa01fbf4bf05a1805d4ff
|
f1f9cab2a946639332fd0526be361e1b43d094e4
|
refs/heads/main
| 2023-07-18T18:29:40.522671 | 2021-09-22T19:22:26 | 2021-09-22T19:22:26 | 407,678,761 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hashlib
import json
from datetime import date, datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, Optional
from har2tree import CrawledTree
from pyphishtanklookup import PhishtankLookup
from ..exceptions import ConfigError
from ..helpers import get_homedir
# Note: stop doing requests 48 after the capture was intially done.
class Phishtank():
def __init__(self, config: Dict[str, Any]):
if not config.get('enabled'):
self.available = False
return
self.available = True
self.allow_auto_trigger = False
if config.get('url'):
self.client = PhishtankLookup(config['url'])
else:
self.client = PhishtankLookup()
if config.get('allow_auto_trigger'):
self.allow_auto_trigger = True
self.storage_dir_pt = get_homedir() / 'phishtank'
self.storage_dir_pt.mkdir(parents=True, exist_ok=True)
def __get_cache_directory(self, url: str) -> Path:
m = hashlib.md5()
m.update(url.encode())
return self.storage_dir_pt / m.hexdigest()
def get_url_lookup(self, url: str) -> Optional[Dict[str, Any]]:
url_storage_dir = self.__get_cache_directory(url)
if not url_storage_dir.exists():
return None
cached_entries = sorted(url_storage_dir.glob('*'), reverse=True)
if not cached_entries:
return None
with cached_entries[0].open() as f:
return json.load(f)
def capture_default_trigger(self, crawled_tree: CrawledTree, /, *, auto_trigger: bool=False) -> Dict:
'''Run the module on all the nodes up to the final redirect'''
if not self.available:
return {'error': 'Module not available'}
if auto_trigger and not self.allow_auto_trigger:
return {'error': 'Auto trigger not allowed on module'}
# Quit if the capture is more than 70h old, the data in phishtank expire around that time.
if crawled_tree.start_time <= datetime.now(timezone.utc) - timedelta(hours=70):
return {'error': 'Capture to old, the response will be irrelevant.'}
if crawled_tree.redirects:
for redirect in crawled_tree.redirects:
self.url_lookup(redirect)
else:
self.url_lookup(crawled_tree.root_hartree.har.root_url)
return {'success': 'Module triggered'}
def url_lookup(self, url: str) -> None:
'''Lookup an URL on Phishtank lookup
Note: It will trigger a request to phishtank every time *until* there is a hit (it's cheap), then once a day.
'''
if not self.available:
raise ConfigError('VirusTotal not available, probably no API key')
url_storage_dir = self.__get_cache_directory(url)
url_storage_dir.mkdir(parents=True, exist_ok=True)
pt_file = url_storage_dir / date.today().isoformat()
if pt_file.exists():
return
url_information = self.client.get_url_entry(url)
if url_information:
with pt_file.open('w') as _f:
json.dump(url_information, _f)
|
UTF-8
|
Python
| false | false | 3,214 |
py
| 8 |
phishtank.py
| 1 | 0.622278 | 0.618855 | 0 | 90 | 34.711111 | 117 |
mazurkin/opencv-stereo
| 19,241,453,510,420 |
a3276f6bc76465a7aab607966525be583c787729
|
dc78a1b868da07c1ede5c12714a752484b8f76d0
|
/stereo2.py
|
9d416e09cd0c23af470749214041f482af431e46
|
[] |
no_license
|
https://github.com/mazurkin/opencv-stereo
|
4e4f6780ec0a0691d60360022d6ab1ba130161f3
|
49cee39c5bc74751d3b54c4dcebb893c174150bf
|
refs/heads/master
| 2016-06-05T08:40:18.199168 | 2015-10-27T14:57:29 | 2015-10-27T15:45:59 | 45,051,482 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import cv2
import matplotlib.pyplot as plt
# path_l = "pics/aloeL.jpg"
# path_r = "pics/aloeR.jpg"
path_l = "pics/tsucuba_left.png"
path_r = "pics/tsucuba_right.png"
# path_l = "pics/boob_l.png"
# path_r = "pics/boob_r.png"
imgL = cv2.pyrDown(cv2.imread(path_l, cv2.CV_LOAD_IMAGE_GRAYSCALE))
imgR = cv2.pyrDown(cv2.imread(path_r, cv2.CV_LOAD_IMAGE_GRAYSCALE))
window_size = 5
min_disp = 0
num_disp = 64
stereo = cv2.StereoSGBM(
minDisparity=min_disp,
numDisparities=num_disp,
SADWindowSize=window_size,
uniquenessRatio=10,
speckleWindowSize=100,
speckleRange=32,
disp12MaxDiff=1,
P1=8*3*window_size**2,
P2=32*3*window_size**2,
fullDP=False
)
disp = stereo.compute(imgL, imgR).astype(np.float32)
disp /= disp.max()
disp[disp < 0.4] = 0
fig, axes = plt.subplots(1, 3)
axes[0].imshow(imgL, "gray")
axes[1].imshow(disp, "gray")
axes[2].imshow(imgR, "gray")
plt.show()
cv2.waitKey()
|
UTF-8
|
Python
| false | false | 946 |
py
| 3 |
stereo2.py
| 3 | 0.67759 | 0.633192 | 0 | 44 | 20.5 | 67 |
etherlabsio/ai-engine
| 14,121,852,515,362 |
695909b190dba72ca8d2899839129fd348f78c79
|
00de7800d1d1042cb9ece8e4cb192c31e0b2f7ad
|
/services/scorer/transport.py
|
cded533970b994378f4f03f6708f0d6a47c5ef63
|
[
"MIT"
] |
permissive
|
https://github.com/etherlabsio/ai-engine
|
860e70090470bd4ed12f99b0e1741270a93dc39c
|
e73a4419a34db42a410e2a7e7629eb946b86f2c2
|
refs/heads/develop
| 2022-04-22T21:14:02.087995 | 2020-04-19T08:36:30 | 2020-04-19T08:36:30 | 180,766,422 | 0 | 1 | null | false | 2020-04-19T08:36:31 | 2019-04-11T10:10:21 | 2020-04-19T08:30:36 | 2020-04-19T08:36:31 | 5,294 | 3 | 0 | 24 |
Jupyter Notebook
| false | false |
import json
from dataclasses import dataclass, asdict, field
from typing import List
import logging
from copy import deepcopy
from scorer.scorer import TextSegment, Score
@dataclass
class Request:
mind_id: str
context_id: str
instance_id: str
segments: List[TextSegment] = field(default_factory=list)
@dataclass
class Response:
scores: List[Score] = field(default_factory=list)
def decode_json_request(body) -> Request:
req = body
def decode_segments(seg):
seg_id = seg["id"]
text = seg["originalText"]
speaker = seg["spokenBy"]
return TextSegment(seg_id, text, speaker)
mind_id = str(req["mindId"]).lower()
context_id = req["contextId"]
instance_id = req["instanceId"]
segments = list(map(lambda x: decode_segments(x), req["segments"]))
return Request(mind_id, context_id, instance_id, list(segments))
class AWSLambdaTransport:
@staticmethod
def encode_response(body: Response):
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps(body),
}
|
UTF-8
|
Python
| false | false | 1,135 |
py
| 133 |
transport.py
| 82 | 0.652863 | 0.65022 | 0 | 45 | 24.222222 | 71 |
joshcreter/ft-tmdb
| 11,510,512,396,668 |
fd28f9783b628aced73ab24faf1491477af227b3
|
5e5df858fee33361964e1faffe55b400c1e3aa66
|
/populators/common.py
|
a0737f2e908e62f2128b550fdd6e1a4d0d9dd8b1
|
[] |
no_license
|
https://github.com/joshcreter/ft-tmdb
|
0f18a16ee159a78faeb3a90b07ef11809e88faaf
|
f3e24e5169263272fe252f633055986d00988bd3
|
refs/heads/master
| 2022-12-11T20:46:00.349649 | 2018-09-25T03:17:00 | 2018-09-25T03:17:00 | 129,861,907 | 0 | 0 | null | false | 2021-06-01T22:32:14 | 2018-04-17T07:11:08 | 2018-09-25T03:17:27 | 2021-06-01T22:32:12 | 12,931 | 0 | 0 | 1 |
Python
| false | false |
from generators import *
class CommonPopulator:
@staticmethod
def populate_applications_sheet(workbook, title_code: str):
worksheet = workbook.get_application_sheet().get_worksheet()
applications = ['Avails', 'BizAffairs', 'InvenTrack']
for application in applications:
dataset = {
'title_code': title_code,
'application': application
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_project_groups_sheet(workbook, title_code: str):
worksheet = workbook.get_project_groups_sheet().get_worksheet()
project_groups = ['Demo Titles']
for project_group in project_groups:
dataset = {
'title_code': title_code,
'project_group': project_group
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_ratings_sheet(worksheet: RatingsSheet, title_code: str, certifications: {}):
for certification in certifications:
dataset = {
'title_code': title_code,
'authority': certification['authority'],
'rating': certification['rating']
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_ratings_sheet_US_only(workbook, title_code: str, rating_US: str):
worksheet_ratings = workbook.get_ratings_sheet().get_worksheet()
ratings_dataset = {
'title_code': title_code,
'authority': 'MPAA',
'rating': rating_US
}
worksheet_ratings.write_data_row(ratings_dataset)
@staticmethod
def populate_genres_sheet(workbook, title_code: str, genres: [str]):
worksheet_genre = workbook.get_genre_sheet().get_worksheet()
for genre in genres:
genre_dataset = {
'title_code': title_code,
'genre': genre
}
worksheet_genre.write_data_row(genre_dataset)
@staticmethod
def populate_countries_of_origin_sheet(workbook, title_code: str, countries: [str]):
worksheet = workbook.get_countries_of_origin_sheet().get_worksheet()
for country in countries:
dataset = {
'title_code': title_code,
'country': country
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_localizations_sheet(workbook, title_code, localizations, formatted_title: str):
worksheet = workbook.get_localizations_sheet().get_worksheet()
allowed_localization_codes = ['af', 'ar', 'az', 'be', 'bg', 'ca', 'cs', 'da', 'de', 'div', 'el', 'es', 'et',
'eu', 'fa', 'fi', 'fil', 'fo', 'fr', 'gl', 'gu', 'he', 'hi', 'hr', 'hu', 'hy',
'id', 'is', 'it', 'ja', 'ka', 'kk', 'kn', 'ko', 'kok', 'ky', 'lt', 'lv', 'mk',
'mn', 'mr', 'ms', 'nl', 'no', 'pa', 'pl', 'pt', 'ro', 'ru', 'sa', 'sk', 'sl',
'sq', 'sv', 'sw', 'syr', 'ta', 'te', 'th', 'tr', 'tt', 'uk', 'ur', 'uz', 'vi']
for localization in localizations['translations']:
if localization['iso_639_1'] in allowed_localization_codes:
if len(localization['data']['overview']) > 0:
title = formatted_title
if len(localization['data'].get('title', '')) > 0:
title = localization['data']['title']
elif len(localization['data'].get('name', '')) > 0:
title = localization['data']['name']
dataset = {
'title_code': title_code,
'culture': localization['iso_639_1'],
'title': title,
'synopsis': localization['data']['overview']
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_timeline_sheet(workbook, title_code: str, release_dates: {}):
worksheet = workbook.get_timeline_sheet().get_worksheet()
for release_date in release_dates:
dataset = {
'title_code': title_code,
'territory': release_date['territory'],
'start_date': release_date['start_date'],
'type': release_date['type'],
'note': release_date['note'],
'media': release_date['media'],
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_awards_sheet(workbook, title_code: str, awards: {}):
worksheet = workbook.get_awards_sheet().get_worksheet()
for award in awards:
dataset = {
'title_code': title_code,
'result': award['result'],
'year': award['year'],
'organization': award['organization'],
'trophy': award['trophy'],
'category': award['category'],
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_languages_sheet(worksheet: LanguagesSheet, title_code: str, languages: [str]):
for language in languages:
dataset = {
'title_code': title_code,
'language': language,
}
worksheet.write_data_row(dataset)
@staticmethod
def populate_subtitles_sheet(worksheet: SubtitlesSheet, title_code: str, languages: [str]):
for language in languages:
dataset = {
'title_code': title_code,
'language': language,
}
worksheet.write_data_row(dataset)
|
UTF-8
|
Python
| false | false | 5,761 |
py
| 57 |
common.py
| 51 | 0.520222 | 0.518313 | 0 | 144 | 39.006944 | 116 |
kawing-chiu/exc
| 7,902,739,872,857 |
7dd730ce77fd490cfaf45e8a0b7de1bffbdb0723
|
86b7908beacc1544bf1554a9734bb4bea4ef935d
|
/Python/01cffi/C++_lib_ver_2/out-of-line_api_build.py
|
d3f4814bfabe99dc8b36fca3ea1cacd87384206a
|
[] |
no_license
|
https://github.com/kawing-chiu/exc
|
16835f2f7834168693af7795940797dff12e8472
|
7aceeae42637ce70867ce1ea34e1b38647b8e718
|
refs/heads/master
| 2021-01-23T14:06:44.187297 | 2018-11-07T15:59:19 | 2018-11-07T15:59:19 | 42,144,449 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from cffi import FFI
ffi = FFI()
ffi.set_source('_wrapper_module', '''
#include "test_class.h"
#ifndef __C_WRAPPER_H
#define __C_WRAPPER_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TestClass TestClass;
TestClass* new_test_class(void);
void test_class_set_attr(TestClass* c, int i);
int test_class_get_attr(TestClass* c);
void del_test_class(TestClass* c);
#ifdef __cplusplus
}
#endif
#endif
extern "C" {
TestClass* new_test_class(void) {
return new TestClass();
}
void test_class_set_attr(TestClass* c,int i) {
c->set_attr(i);
}
int test_class_get_attr(TestClass* c) {
return c->get_attr();
}
void del_test_class(TestClass* c) {
delete c;
}
}
''', libraries=['test'], source_extension='.cpp',
# even if -ggdb3 is not specified, the resulting library is not stripped,
# to strip the library, '-Wl,--strip-debug' might be added, not quite sure
# whether it is a good approach yet
extra_compile_args=['-std=c++11', '-Wall', '-Wextra', '-ggdb3'],
extra_link_args=['-L.', '-Wl,-rpath,$ORIGIN', '-Wl,--no-undefined'])
ffi.cdef("""
typedef struct TestClass TestClass;
TestClass* new_test_class();
void test_class_set_attr(TestClass* c, int i);
int test_class_get_attr(TestClass* c);
void del_test_class(TestClass* c);
""")
def run():
ffi.compile()
if __name__ == "__main__":
run()
|
UTF-8
|
Python
| false | false | 1,803 |
py
| 195 |
out-of-line_api_build.py
| 167 | 0.48863 | 0.486412 | 0 | 72 | 24.041667 | 79 |
siren0413/Mercury
| 2,035,814,515,279 |
1a9d91afb23ed51837765705002f5cc38ff6e88f
|
6639cd097307cb72ca417578c6e1a584ee169b15
|
/mercury/api.py
|
c615d82d5d37ae334d2b7b0843db006ce91a7549
|
[] |
no_license
|
https://github.com/siren0413/Mercury
|
1ed77d877210979f625d1abf1aa306ad033b6b5d
|
28c10b44318525cb877b72eaed7141d6c6a94ffa
|
refs/heads/master
| 2021-01-09T06:22:11.703347 | 2017-02-06T06:20:56 | 2017-02-06T06:20:56 | 80,972,272 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .client import initrinioTags, intrinioFinancials
from .settings import *
import concurrent.futures as cf
import logging
import copy
def financials_by_item(identifiers, sequence=-1, type='FY', item='marketcap'):
statements = [INCOME_STATEMENT, BALANCE_SHEET, CASH_FLOW_STATEMENT, CALCULATIONS, CURRENT]
result = dict()
def sub_task(identifier):
for statement in statements:
try:
result = intrinioFinancials(identifier, sequence, item, statement, type)
except IOError as e:
logging.error('io error. %s' % e)
return
if result:
return result
with cf.ThreadPoolExecutor(max_workers=10) as executor:
future_to_subtask = {executor.submit(sub_task, identifier): identifier for identifier in identifiers}
for future in cf.as_completed(future_to_subtask):
name = future_to_subtask[future]
try:
data = future.result()
if data:
result[data['identifier']] = data
except Exception as exc:
logging.error('Unable to get [%s]: %s' % (name, exc))
return result
def financials_by_identifier(identifier, items, sequence=-1, type='FY'):
statements = [INCOME_STATEMENT, BALANCE_SHEET, CASH_FLOW_STATEMENT, CALCULATIONS, CURRENT]
result = dict()
def sub_task(item):
for statement in statements:
try:
result = intrinioFinancials(identifier, sequence, item, statement, type)
except IOError as e:
logging.error('io error. %s' % e)
return
if result:
return result
with cf.ThreadPoolExecutor(max_workers=10) as executor:
future_to_subtask = {executor.submit(sub_task, item): item for item in items}
for future in cf.as_completed(future_to_subtask):
name = future_to_subtask[future]
try:
data = future.result()
if data:
result[data['item']] = data
except Exception as exc:
logging.error('Unable to get [%s]: %s' % (name, exc))
return result
def tag_lookup(tag):
statements = [INCOME_STATEMENT, BALANCE_SHEET, CASH_FLOW_STATEMENT, CALCULATIONS, CURRENT]
for statement in statements:
try:
tags = initrinioTags(statement)
except IOError as e:
logging.error('io error. %s' % e)
return
if tags:
for tag_dict in tags['data']:
if tag_dict['tag'] == tag:
return tag_dict
def get_financials(identifiers, items, sequence=-1, type='FY'):
result = dict()
for identifier in identifiers:
fin = financials_by_identifier(identifier, items, sequence, type)
data_list = list()
for item in items:
if item in fin and 'value' in fin[item]:
data_list.append(fin[item]['value'])
else:
data_list.append(None)
result[identifier] = data_list
return result
def translate_and_rounding(tags, datas):
translated_tags = []
rounding_datas = copy.deepcopy(datas)
for i in range(len(tags)):
tag = tags[i]
tag_dict = tag_lookup(tag)
tag_name = tag_dict['name']
tag_units = tag_dict['units']
translated_tags.append(tag_name)
for identifier, data_list in rounding_datas.items():
if tag_units == 'usd':
if data_list[i]:
data_list[i] = round(data_list[i]/1000000)
return translated_tags, rounding_datas
|
UTF-8
|
Python
| false | false | 3,678 |
py
| 8 |
api.py
| 8 | 0.579391 | 0.575585 | 0 | 103 | 34.708738 | 109 |
csghone/validate_s3_upload
| 13,915,694,047,039 |
6cae5ef6babce3e3759bc608bd7776865e2e5bca
|
d34a72894d1096b5970046b61de7310c3a85bbde
|
/validate_s3_upload.py
|
a2d3643bcc298770be856794b60acf4d30a3f04b
|
[] |
no_license
|
https://github.com/csghone/validate_s3_upload
|
3d5af4f8051150ba60e8e7af1fbbce25b58bb147
|
738bfb96681066530cf0f367ed0b7e6feca36f79
|
refs/heads/master
| 2021-06-11T04:28:40.654655 | 2020-08-19T12:50:15 | 2020-08-19T12:50:15 | 128,357,841 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import argparse
import traceback
import logging
import logging.handlers
import math
import hashlib
import boto3
logger = logging.getLogger()
LOG_FORMATTER = logging.Formatter(
"%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - " +
"%(lineno)s - %(funcName)s - " +
"%(message)s",
"%Y%m%d %H:%M:%S")
S3 = boto3.client("s3")
def setup_logging(level=logging.INFO, enable_console=True):
file_log_handler = logging.handlers.RotatingFileHandler(
"__" + os.path.basename(__file__) + ".main__" + ".log",
maxBytes=1000000,
backupCount=5)
console_log_handler = logging.StreamHandler()
logger.addHandler(file_log_handler)
if enable_console:
logger.addHandler(console_log_handler)
logger.setLevel(level)
for handler in logging.root.handlers:
handler.setFormatter(fmt=LOG_FORMATTER)
def get_s3_object(inp_s3_path, local_basename=None):
s3_path = inp_s3_path
s3_path = s3_path.replace(r"s3://", "")
remote_basename = os.path.basename(s3_path)
if remote_basename == "" and local_basename is not None:
s3_path = os.path.join(s3_path, local_basename)
bucket = s3_path.split("/")[0]
s3_key = "/".join(s3_path.split("/")[1:])
logger.debug("Attempting to get")
logger.debug("Bucket: %s", bucket)
logger.debug("Key: %s", s3_key)
try:
s3_obj = S3.get_object(Bucket=bucket, Key=s3_key)
except:
s3_obj = None
if s3_obj is None:
if s3_path[-1] == "/":
logger.error("Cannot find object: %s", s3_path)
return None
if local_basename is not None:
logger.warning("Trying by adding / at the end")
return get_s3_object(s3_path + "/" + local_basename)
return s3_obj
def get_chunk_size(local_file, chunks):
filesize = os.stat(local_file).st_size
logger.debug("local filesize for: %s: %s", local_file, filesize)
chunk_size = int(math.ceil(filesize / chunks / 1024.0 / 1024.0))
logger.info("chunk_size for: %s: %s MB", local_file, chunk_size)
return chunk_size * 1024 * 1024
def calculate_local_etag(local_file, chunk_size):
# Ref: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py
md5s = []
file_handle = open(local_file, mode="rb")
while True:
data = file_handle.read(chunk_size)
if not data:
break
chunk_digest = hashlib.md5(data)
logger.debug("Chunk digest: %s", chunk_digest.hexdigest())
md5s.append(chunk_digest)
file_handle.close()
if len(md5s) == 1:
final_etag = "{}".format(md5s[0].hexdigest())
else:
digests = b"".join(item.digest() for item in md5s)
final_md5 = hashlib.md5(digests)
final_etag = "{}-{}".format(final_md5.hexdigest(), len(md5s))
if final_etag.endswith("-0"):
final_etag = final_etag.strip("-0")
logger.debug("Intermediate etag for: %s: %s", local_file, final_etag)
return final_etag
def get_chunks(etag):
if "-" in etag:
try:
chunks = int(etag.split("-")[1])
except ValueError:
logger.error("Unexpected ETag: %s", etag)
assert False
else:
chunks = 1
return chunks
def get_local_etag(local_file, s3_etag, inp_chunk_size=None):
chunks = get_chunks(s3_etag)
chunk_size = get_chunk_size(local_file, chunks)
if inp_chunk_size is not None:
chunk_size = kwargs["chunk_size"] * 1024 * 1024
while True:
local_etag = calculate_local_etag(local_file, chunk_size)
if get_chunks(local_etag) != chunks:
break
if local_etag == s3_etag:
break
chunk_size += 1024 * 1024
logger.info("Trying chunk_size: %s MB", chunk_size / 1024 / 1024)
logger.info("Local ETag: %s: %s", local_file, local_etag)
return local_etag
def get_s3_etag(s3_obj):
s3_etag = s3_obj["ETag"].strip('"')
logger.info("S3 Etag: %s", s3_etag)
return s3_etag
def get_s3_size(s3_obj):
s3_size = int(s3_obj["ContentLength"])
return s3_size
def compare_files(local_file, s3_path, inp_chunk_size=None):
if not os.path.exists(local_file):
logger.error("Path does not exist")
return False
if not os.path.isfile(local_file):
logger.error("Directories/links are not supported")
return False
assert s3_path.startswith(r"s3://"), logger.error("Invalid s3_path: %s", s3_path)
s3_path = s3_path.replace(r"s3://", "")
local_basename = os.path.basename(local_file)
s3_obj = get_s3_object(s3_path, local_basename)
if s3_obj is None:
return False
s3_etag = get_s3_etag(s3_obj)
s3_size = get_s3_size(s3_obj)
local_size = os.stat(local_file).st_size
if s3_size != local_size:
logger.error("Mismatch in size: s3: %s, local: %s", s3_size, local_size)
return False
local_etag = get_local_etag(local_file, s3_etag, inp_chunk_size)
if local_etag != s3_etag:
logger.error("Local file does not match Remote")
return False
return True
def process(**kwargs):
local_file = kwargs["local_file"]
s3_path = kwargs["s3_path"]
if not compare_files(local_file, s3_path, inp_chunk_size=kwargs["chunk_size"]):
return -1
logger.info("Local file matches Remote")
if kwargs["delete_local"]:
logger.info("Deleting local file")
os.remove(local_file)
return 0
def main():
parser = argparse.ArgumentParser(description="Validate S3 uploads")
parser.add_argument(
"-l",
"--local_file",
dest="local_file",
help="Path to file on local disk",
required=True
)
parser.add_argument(
"-s",
"--s3_path",
dest="s3_path",
help="s3://bucket/dir1/dir2/file or s3://dir1/dir2/",
required=True
)
parser.add_argument(
"-d",
"--delete_local",
dest="delete_local",
action="store_true",
help="Delete local file if checksum matches",
default=False
)
parser.add_argument(
"-c",
"--chunk_size",
dest="chunk_size",
type=int,
help="Override chunk_size",
default=None
)
myargs = parser.parse_args()
return process(**vars(myargs))
if __name__ == '__main__':
setup_logging(level=logging.INFO)
try:
sys.exit(main()) # Ensure return value is passed to shell
except Exception as error: # pylint: disable=W0702, W0703
exc_mesg = traceback.format_exc()
logger.error("\n%s", exc_mesg)
logger.error("Error: %s", error)
sys.exit(-1)
|
UTF-8
|
Python
| false | false | 6,780 |
py
| 2 |
validate_s3_upload.py
| 1 | 0.601622 | 0.577876 | 0 | 241 | 27.13278 | 104 |
luckyjudyz/books
| 9,929,964,391,140 |
af99eb27af5f745275e69e87bd06b26ec4f18235
|
cecb5c3d17c84ac5f8d3a733255d1bd660840bc4
|
/apps/books_app/models.py
|
2bc5befa3f97b90c4e2e0f3cb1c59249708243b1
|
[] |
no_license
|
https://github.com/luckyjudyz/books
|
547c5af5f4eadf42cff2a9704c064ef70c968cc5
|
29ec7ff2d56ab7342dd903b89104f9fbbb023616
|
refs/heads/master
| 2021-01-25T04:42:03.899932 | 2017-06-06T02:13:10 | 2017-06-06T02:13:10 | 93,465,918 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals
# from django.db import models
# from django.utils.encoding import python_2_unicode_compatible
#
# # Create your models here.
#
# class BookManager(models.Manaegr):
# def createbook(self, info):
# print info
# book = BookDB.objects.create(title=info['title'],author=info['author'])
# return
|
UTF-8
|
Python
| false | false | 385 |
py
| 5 |
models.py
| 4 | 0.664935 | 0.65974 | 0 | 12 | 31.083333 | 81 |
haust-Kevin/ProxyPool
| 8,100,308,339,652 |
8121e341d51df3d7443a4b39a6e8bb2e92b89755
|
18df59e73000ecad6d48d40dac11c123d18a469a
|
/proxypool/api.py
|
4af01b63f05c419a7722513900da0f75fb165025
|
[] |
no_license
|
https://github.com/haust-Kevin/ProxyPool
|
22ba2cb5958b5344bea5ec22b8b1f1cc0be8a492
|
9329934f31858ed429beb269098532abff2f3187
|
refs/heads/master
| 2023-01-19T22:06:37.461054 | 2020-11-28T15:55:18 | 2020-11-28T15:55:18 | 316,768,556 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, g, request
__all__ = ['app']
from dao.mysql_dao import MysqlConn
from dao.redis_dao import RedisClient
app = Flask(__name__)
def get_redis_conn():
if not hasattr(g, 'redis'):
g.redis = RedisClient()
return g.redis
def get_mysql_conn():
if not hasattr(g, 'mysql'):
g.mysql = MysqlConn()
return g.mysql
@app.route('/')
def index():
return '''
<h2>Welcome to Proxy Pool System!</h2>
'''
@app.route('/get')
def get_proxy():
data = request.args
name = data.get('name')
pwd = data.get('pwd')
if get_mysql_conn().exist(name, pwd):
return get_redis_conn().random()
return '<h2>用户或口令错误, 有疑问请QQ咨询: {}</h2>'.format('878474339')
if __name__ == '__main__':
app.run()
|
UTF-8
|
Python
| false | false | 809 |
py
| 9 |
api.py
| 8 | 0.57599 | 0.559387 | 0 | 41 | 18.097561 | 63 |
plejc/my-new-project
| 2,869,038,187,000 |
1e6ab7fc32fcacfdafcd11889d2c4bac38718a1f
|
7ecd12e4f7c78b96d62ac6719df9cc31481bb07a
|
/bidder/views.py
|
230792e6b950857bda92ddbce3ae65e658a7b082
|
[] |
no_license
|
https://github.com/plejc/my-new-project
|
1385ae14dbd1a63e9660bba019e5b7a29513ec37
|
0a5ad9ff40f1dabc7c14c083dfd539426f7171de
|
refs/heads/master
| 2022-12-30T18:07:29.397988 | 2020-10-16T08:56:09 | 2020-10-16T08:56:09 | 304,569,302 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render,redirect,HttpResponse
from django.views.generic import ListView
from .models import Bidder
from .forms import BidderForm
def home(request):
return render(request,'bidder/home.html')
def book(request):
return render(request,'bidder/book.html')
def bidder(request):
if request.method == 'POST':
form = BidderForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('book')
else:
form = BidderForm()
return render(request,'bidder/bidder.html',{'form':form})
|
UTF-8
|
Python
| false | false | 610 |
py
| 16 |
views.py
| 11 | 0.645902 | 0.645902 | 0 | 23 | 24.521739 | 61 |
sgrosu/python_scripts
| 13,855,564,502,680 |
bed306c328044abe3e8aa65b52aa2a3fbaf95956
|
0a953cb324ef542c2af07316d498d3caafbc08f8
|
/gmail_app.py
|
eebb1057e25c8c63d5af378abdc7326b4e189157
|
[] |
no_license
|
https://github.com/sgrosu/python_scripts
|
d2e8e060298fd8ba6fe836f80353f00e3c6f845f
|
a5f521ad7f0682fec2ed394ebb83b253e7a3d5d2
|
refs/heads/master
| 2021-01-18T16:12:01.497907 | 2017-04-27T11:53:18 | 2017-04-27T11:53:18 | 86,723,531 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
'''
connect to my gmail inbox and fetch all the mails and display their subject and from address
'''
import imaplib
obj = imaplib.IMAP4_SSL('imap.gmail.com',993)
obj.login('dumblinuxuser','secretpass')
status, msgs = obj.select()
if status == 'OK':
obj.select('INBOX') # <-- response like ('OK', ['74']) --> total no. of mail in sent folder
#mboxes = obj.list()
#print mboxes
result, data = obj.search(None, "ALL")
ids = data[0] # data is a list.
id_list = ids.split() # ids is a space separated string
latest_email_id = id_list[-1] # get the latest
#result, data = obj.fetch(latest_email_id, "(RFC822)") # fetch the email body (RFC822) for the given ID
#raw_email = data[0][1] # here's the body, which is raw text of the whole email
#print raw_email
for emaill in id_list:
result, data = obj.fetch(emaill, '(BODY[HEADER.FIELDS (SUBJECT FROM)])')
header_data = data[0][1]
print header_data
# including headers and alternate payloads
else:
print status
print msgs
|
UTF-8
|
Python
| false | false | 1,023 |
py
| 24 |
gmail_app.py
| 17 | 0.674487 | 0.656891 | 0 | 34 | 28.970588 | 104 |
zanayeem/Python-Specialization
| 17,025,250,382,096 |
d4a981b352490e6c360afe38d41008c27c4e0f32
|
70a69d84b7df8b8dbdc0b70d829fd829eba6d281
|
/Data Structures/Week5 (Dictionary)/Dictionary (Data Structure).py
|
7a728270aaeba4d900ba0c4ce8dcd082056623e4
|
[] |
no_license
|
https://github.com/zanayeem/Python-Specialization
|
ed21cedfaa9de12b245b630df1d20f061db728b5
|
9c3a909ca1bd0cba19a029efe9a961088f52e33b
|
refs/heads/master
| 2022-12-02T01:53:35.471824 | 2020-08-13T08:13:59 | 2020-08-13T08:13:59 | 261,119,803 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# #import sys
# duffle_bag = dict()
# duffle_bag['money']= [10,45,50]
# duffle_bag['money'][2] = duffle_bag['money'][2] + 5
# print(duffle_bag)
# duffle_bag = {'money': [26,35,33,55,66,66,65], 'gold': 25, 'gem': 55}
# print(duffle_bag)
# # Counting the occurance of names using a for and a conditional loop
# counts = dict()
# names = ['nayeem','navid','tamzid','navid','tamzid','ashik','tamzid','ashik','tajwar','ashik','tajwar']
# for name in names:
# if name not in counts:
# counts[name] = 1
# else:
# counts[name] += 1
# print(counts)
# #ALTERNATE
# counts = dict()
# names = ['nayeem','navid','tamzid','navid','tamzid','ashik','tamzid','ashik','tajwar','ashik','tajwar']
# for name in names:
# counts[name] = counts.get(name,0) + 1 #A method to do the if and else
# print(counts)
# #COUNTING FROM INPUTS
# counts = dict()
# inp = input("Enter your text here: ")
# words = inp.split()
# print("The splitted words: ",words)
# for word in words:
# counts[word] = counts.get(word,0) +1
# print(counts)
#Retrieving keys and values
duffle_bag = {'money': [26,35,33,55,66,66,65], 'gold': 25, 'gem': 55}
print(duffle_bag.keys())
print(list(duffle_bag)) #Creates a list() from dict()
print(duffle_bag.values())
print(duffle_bag.items()) #Double values as items
for key,value in duffle_bag.items(): #Items can be used for TWO simultaneous iteration variables
print(key,value)
|
UTF-8
|
Python
| false | false | 1,416 |
py
| 26 |
Dictionary (Data Structure).py
| 23 | 0.639124 | 0.603107 | 0 | 45 | 30.466667 | 105 |
Ramblurr/jsonmapping
| 10,522,669,923,080 |
47101f9f84b028417c43b8d6a36c443256769e6c
|
fdff2cfe5289d4cba4a457eee4b64fee5c6aea5a
|
/jsonmapping/transforms.py
|
7db9cd6fc84764ba1d9e3d910396456fd432b516
|
[
"MIT"
] |
permissive
|
https://github.com/Ramblurr/jsonmapping
|
bf729bbdada7eac6c12b0acb70c6e5f0b805dbc7
|
76110c46b22084d3a830e2b291d2d0f71295e376
|
refs/heads/master
| 2020-12-24T21:00:38.609298 | 2015-09-08T13:20:33 | 2015-09-08T13:20:33 | 41,037,613 | 0 | 0 | null | true | 2015-08-19T13:58:17 | 2015-08-19T13:58:16 | 2015-08-19T13:58:17 | 2015-08-11T10:11:14 | 168 | 0 | 0 | 0 |
Python
| null | null |
import six
from hashlib import sha1
import normality
def coalesce(mapping, bind, values):
""" Given a list of values, return the first non-null value. """
for value in values:
if value is not None:
return [value]
return []
def slugify(mapping, bind, values):
""" Transform all values into URL-capable slugs. """
return [normality.slugify(v) for v in values]
def join(mapping, bind, values):
""" Merge all the strings. No space between them? """
return [''.join([six.text_type(v) for v in values])]
def str_func(name):
""" Apply functions like upper(), lower() and strip(). """
def func(mapping, bind, values):
for v in values:
if isinstance(v, six.string_types):
v = getattr(v, name)()
yield v
return func
def hash(mapping, bind, values):
""" Generate a sha1 for each of the given values. """
for v in values:
if not isinstance(v, six.string_types):
v = six.text_type(v)
v = v.encode('utf-8')
yield sha1(v).hexdigest()
TRANSFORMS = {
'coalesce': coalesce,
'slugify': slugify,
'join': join,
'upper': str_func('upper'),
'lower': str_func('lower'),
'strip': str_func('strip'),
'hash': hash
}
def add(name, func):
"""
Add a user supplied transform and make it avaialble for mapping
:param name: the name of the function, used in the json mapping
:param func: the function pointer accepting params (mapping, bind, values)
:return:
"""
TRANSFORMS.update({name : func})
|
UTF-8
|
Python
| false | false | 1,587 |
py
| 8 |
transforms.py
| 8 | 0.604285 | 0.601764 | 0 | 62 | 24.612903 | 79 |
muyajil/wheretolive.ch
| 2,508,260,918,855 |
75cb810cd775d3e27d42790fbe54b7674a84ba52
|
31141850490c0a177a139d59588886360d0c3038
|
/backend/jobs/initial_import/sbb_timetable.py
|
7df1398a58f6f50d11031b3de8ae7c05c450a00f
|
[] |
no_license
|
https://github.com/muyajil/wheretolive.ch
|
7e61c3f517f6ace262b04625df953d78fa7de0d7
|
9edebb09c8a748c2c767188b11986ab04ff414c6
|
refs/heads/master
| 2022-11-13T05:55:53.396531 | 2020-06-10T12:54:41 | 2020-06-10T12:54:41 | 247,510,150 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
import os
from flask import Blueprint
from ...crawlers import SBBTimetableCrawler
from ...extensions import db
from ...models import (
SBBCalendar,
SBBRoute,
SBBStation,
SBBStopTime,
SBBTransfer,
SBBTrip,
)
from ...utils import BatchedDBInserter
bp = Blueprint("initial_import.sbb_timetable", __name__, cli_group=None)
@bp.cli.command("import_sbb_timetable")
def run_job():
SBBStation.__table__.drop(db.engine)
SBBStopTime.__table__.drop(db.engine)
SBBTrip.__table__.drop(db.engine)
SBBRoute.__table__.drop(db.engine)
SBBCalendar.__table__.drop(db.engine)
SBBTransfer.__table__.drop(db.engine)
db.create_all()
logger = logging.getLogger(os.path.basename(__file__))
inserter = BatchedDBInserter(logger, db.session, batch_size=50000)
logger.debug("Starting process...")
crawler = SBBTimetableCrawler()
logger.debug("Getting Stations...")
stations = map(lambda x: SBBStation(**x), crawler.crawl_stops())
logger.debug("Inserting Stations into Database...")
inserter.insert(stations)
logger.debug("Getting Stop Times...")
stop_times = map(lambda x: SBBStopTime(**x), crawler.crawl_stop_times())
logger.debug("Inserting Stop Times into Database...")
inserter.insert(stop_times)
logger.debug("Getting Trips...")
trips = map(lambda x: SBBTrip(**x), crawler.crawl_trips())
logger.debug("Inserting Trips into Database...")
inserter.insert(trips)
logger.debug("Getting Routes...")
routes = map(lambda x: SBBRoute(**x), crawler.crawl_routes())
logger.debug("Inserting Routes into Database...")
inserter.insert(routes)
logger.debug("Getting Calendar...")
calendars = map(lambda x: SBBCalendar(**x), crawler.crawl_calendar())
logger.debug("Inserting Calendar into Database...")
inserter.insert(calendars)
logger.debug("Getting Transfers...")
transfers = map(lambda x: SBBTransfer(**x), crawler.crawl_transfers())
logger.debug("Inserting Transfers into Database...")
inserter.insert(transfers)
|
UTF-8
|
Python
| false | false | 2,071 |
py
| 96 |
sbb_timetable.py
| 60 | 0.686625 | 0.684211 | 0 | 65 | 30.861538 | 76 |
aldenjenkins/ThiccGaming
| 16,346,645,575,189 |
616e181a72cd58908126966cc922b9caeae5941b
|
be134c181703b95aca1e48b6a31bcfdb7bcfcc76
|
/site/thicc/apps/rules/views.py
|
bea1c79feec52aecd5a96d1dabce33f75ae0ed53
|
[] |
permissive
|
https://github.com/aldenjenkins/ThiccGaming
|
0245955a797394bcfeedb2cfb385f633653ba55d
|
4790d2568b019438d1569d0fe4e9f9aba008b737
|
refs/heads/master
| 2022-12-16T02:43:36.532981 | 2021-11-17T04:15:21 | 2021-11-17T04:15:21 | 154,858,818 | 0 | 0 |
BSD-3-Clause
| false | 2022-12-08T02:58:44 | 2018-10-26T15:52:39 | 2021-11-17T04:15:30 | 2022-12-08T02:58:44 | 1,276 | 0 | 0 | 15 |
Python
| false | false |
from django.shortcuts import render
def index(request):
return render(request, 'rules/rules.html')
|
UTF-8
|
Python
| false | false | 104 |
py
| 173 |
views.py
| 83 | 0.759615 | 0.759615 | 0 | 4 | 25 | 46 |
tarsqi/ttk
| 4,114,578,676,627 |
47969230cf60d9c9925917f04557e7a16a04d679
|
df1e54249446ba2327442e2dbb77df9931f4d039
|
/deprecated/get_lexes.py
|
2a536630d8e3c854980d9d7ef4e9f1102162cf30
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/tarsqi/ttk
|
8c90ee840606fb4c59b9652bd87a0995286f1c3d
|
085007047ab591426d5c08b123906c070deb6627
|
refs/heads/master
| 2021-07-12T06:56:19.924195 | 2021-03-02T22:05:39 | 2021-03-02T22:05:39 | 35,170,093 | 26 | 12 |
Apache-2.0
| false | 2021-03-02T22:05:39 | 2015-05-06T16:24:38 | 2021-01-26T15:53:18 | 2021-03-02T22:05:39 | 9,339 | 22 | 7 | 27 |
Python
| false | false |
"""
Standalone utility script to extract all the tokens from a file and print them space
separated using one sentence per line. Files are XML files that need to contain <s> and
<lex> tags.
USAGE:
% python get_lexes.py INFILE OUTFILE
% python get_lexes.py INDIR OUTDIR
Note that in the first case INFILE has to exist and outfile not. In the second case, INDIR
and OUTDIR have to be directories, existing files in OUTDIR may be overwritten.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, os
from xml.dom.minidom import parse
from io import open
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def get_lexes(infile, outfile):
try:
dom = parse(infile)
except:
"Warning in XML parsing, skipping %s" % infile
return
fh = open(outfile, 'w')
sentences = dom.getElementsByTagName("s")
for sentence in sentences:
tokens = []
lexes = sentence.getElementsByTagName("lex")
for lex in lexes:
tokens.append(getText(lex.childNodes))
fh.write(' '.join(tokens).encode('utf-8') + "\n")
if __name__ == '__main__':
IN = sys.argv[1]
OUT = sys.argv[2]
if os.path.isfile(IN) and not os.path.exists(OUT):
get_lexes(IN, OUT)
elif os.path.isdir(IN) and os.path.isdir(OUT):
for filename in os.listdir(IN):
infile = IN + os.sep + filename
outfile = OUT + os.sep + filename
if outfile[-3:] == 'xml':
outfile = outfile[:-3] + 'txt'
print(outfile)
get_lexes(infile, outfile)
|
UTF-8
|
Python
| false | false | 1,738 |
py
| 563 |
get_lexes.py
| 114 | 0.613349 | 0.610472 | 0 | 64 | 26.15625 | 90 |
ramamca90/Hackathon
| 8,435,315,814,477 |
c642a4b56417a04d2a39dd245e9b2353cb4076fe
|
352f70858b0e293db951b2dba61bda15d8cd45cf
|
/xpologistics/XPOLOGISTICS.py
|
0dcfcf3e8629e6a80108c0a09b7bdc7b590c1915
|
[] |
no_license
|
https://github.com/ramamca90/Hackathon
|
99cb88a29ca207ad03f24a6e54bdf2f5709e31e1
|
6ea1751f674592d9a7baab6bf69bf63a9e7d9a1c
|
refs/heads/master
| 2022-03-10T00:21:51.068180 | 2019-11-18T17:46:02 | 2019-11-18T17:46:02 | 113,473,415 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 12:15:52 2017
@author: user
"""
import pandas as pd
loadboard = pd.read_excel("C:\\Users\\user\\Desktop\\loadBoard.xlsx")
print ("HI ..Please provide your load details ")
pickup_selected=input("Please give the pick up location")
pickup_destination=input("Please give the destination location")
print (loadboard[(loadboard.PICKUP == pickup_selected.upper() )
& (loadboard.DELIVERY == pickup_destination.upper() )] )
|
UTF-8
|
Python
| false | false | 508 |
py
| 10 |
XPOLOGISTICS.py
| 2 | 0.65748 | 0.633858 | 0 | 16 | 28.75 | 69 |
patarapolw/CJKhyperradicals
| 13,941,463,872,012 |
5107ca66103c0e36dcd7c36b26f0d2d6e13a4b5c
|
7e00385aa4a53b2d2083573a1656a68c936c8841
|
/CJKhyperradicals/dir.py
|
0a7910a9b3f56e22940c15af79d60d68d4b66df6
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/patarapolw/CJKhyperradicals
|
0ccf6bffaca81bfc44cfdc349bb65dbc3b41b4c4
|
2a7c2ef489d2e2dd6496c676559738da220f4b03
|
refs/heads/master
| 2022-12-11T03:11:56.924515 | 2019-10-22T10:19:24 | 2019-10-22T10:19:24 | 128,757,374 | 3 | 0 |
Apache-2.0
| false | 2022-12-08T06:37:02 | 2018-04-09T10:52:28 | 2022-07-27T22:14:11 | 2022-12-08T06:37:01 | 10,380 | 3 | 0 | 3 |
Python
| false | false |
import os
import inspect
ROOT = os.path.abspath(os.path.dirname(inspect.getframeinfo(inspect.currentframe()).filename))
def database_path(data):
return os.path.join(ROOT, 'database', data)
def chinese_path(data):
return os.path.join(ROOT, 'database', 'chinese', data)
def japanese_path(data):
return os.path.join(ROOT, 'database', 'japanese', data)
|
UTF-8
|
Python
| false | false | 368 |
py
| 21 |
dir.py
| 12 | 0.717391 | 0.717391 | 0 | 16 | 22 | 94 |
psdh/WhatsintheVector
| 7,138,235,658,431 |
539c455d6aca03a4afdff92112b1c8228dc04287
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnmidchannel.py
|
e32138751606b438ddefede7585adf831b8387c8
|
[] |
no_license
|
https://github.com/psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | false | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | 2015-09-23T11:51:13 | 2015-09-23T11:54:06 | 71,656 | 2 | 2 | 2 |
Python
| null | null |
ii = [('FitzRNS3.py', 3), ('FitzRNS4.py', 2), ('FitzRNS.py', 13)]
|
UTF-8
|
Python
| false | false | 65 |
py
| 24,174 |
nnmidchannel.py
| 24,172 | 0.538462 | 0.446154 | 0 | 1 | 65 | 65 |
shankravi007/CIP_Project
| 9,655,086,517,686 |
7e46415bc9f09c6d383e6a6ed705ace7e714bae9
|
ca0939e6c18b9c352addddde23720751bd9d6f58
|
/Project-UtilityFunctions/dataloadinglibrary.py
|
476d13b301322cda40d6fe28ea0e63e55a843c63
|
[] |
no_license
|
https://github.com/shankravi007/CIP_Project
|
e134dd1466ec670722f82ce16864914993ff8ce5
|
243e2215c977f47e7e6ccab6e02c85fff9b9ffce
|
refs/heads/main
| 2023-03-26T06:51:38.189486 | 2021-03-30T06:44:57 | 2021-03-30T06:44:57 | 352,892,889 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
#This function is used to load CSV file from the 'data' directory
#in the present working directly
def loadCSV (fileNameWithAbsolutePath):
dataSet = pd.read_csv(fileNameWithAbsolutePath)
return dataSet
|
UTF-8
|
Python
| false | false | 233 |
py
| 10 |
dataloadinglibrary.py
| 7 | 0.776824 | 0.776824 | 0 | 7 | 32.285714 | 66 |
nigelrodrigues15/AutomatedDriving-NeuralNetwork
| 7,825,430,463,677 |
9d548266357fd0ef824f7e955c136f5427d1a447
|
e0764868b090776da92a24e1709bbf72ac5c853d
|
/binData.py
|
537c2c25886223c5d095acaf818c7f26af1ec7c1
|
[] |
no_license
|
https://github.com/nigelrodrigues15/AutomatedDriving-NeuralNetwork
|
adcb78071c134e6e7d5644872c17e16fe1447980
|
0a718dc41d2d2de9ededfe3a9c87035545ac1eb2
|
refs/heads/master
| 2020-06-02T05:11:19.251499 | 2019-07-17T00:59:24 | 2019-07-17T00:59:24 | 191,047,780 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
num_bins = 20
interval = 2.0/num_bins
inc = -1
bins = []
while inc <= 1:
inc = round(inc, 1)
bins.append(inc)
inc += interval
### Test code
# print(bins)
# labels = np.arange(0,len(bins) - 1)
# print(labels)
# yTest = y_train[0:10]
# print('before',yTest)
# ybinned = pd.cut(yTest, bins = bins, labels = labels, include_lowest = True)
# print('after',ybinned)
####
xTrainLenth, a, b, c = X_train.shape
xTestLenth, a, b, c = X_test.shape
xValLenth, a, b, c = X_val.shape
labels = np.arange(0, len(bins) - 1)
yAll = yPlot
yBinned = pd.cut(yAll, bins=bins, labels=labels, include_lowest=True)
print(xTrainLenth)
print(xTestLenth)
print(xValLenth)
print('\n')
y_train = yBinned[0:xTrainLenth]
print(y_train.shape)
y_test = yBinned[xTrainLenth:xTrainLenth + xTestLenth]
print(y_test.shape)
y_val = yBinned[xTrainLenth + xTestLenth:xTrainLenth + xTestLenth + xValLenth]
print(y_val.shape)
print(len(y_train) + len(y_test) + len(y_val))
numberOfClasses = 20
def indices_to_one_hot(data, nb_classes):
"""Convert an iterable of indices to one-hot encoded labels."""
targets = np.array(data).reshape(-1)
y = np.eye(nb_classes)[targets]
return y
# indices_to_one_hot(ytest,nb_classes)
y_train = [y_train.tolist()]
y_train = indices_to_one_hot(y_train, numberOfClasses)
y_test = [y_test.tolist()]
y_test = indices_to_one_hot(y_test, numberOfClasses)
y_val = [y_val.tolist()]
y_val = indices_to_one_hot(y_val, numberOfClasses)
|
UTF-8
|
Python
| false | false | 1,576 |
py
| 13 |
binData.py
| 12 | 0.637056 | 0.625635 | 0 | 84 | 16.761905 | 78 |
NaiveTorch/ARC
| 8,564,164,825,143 |
ff7b9c9c5539be37d595303d6880fb3f9e7ee24e
|
a27c13c55680e95a0cfe375dd02daae9d8e64d04
|
/src/build/util/archive_test_bundle.py
|
6a381a6829e6abccab44d95ab2bf87aabc0a440f
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
https://github.com/NaiveTorch/ARC
|
4572ed94d01f3b237492579be4091f3874a8dfe0
|
4007a4e72f742bb50de5615b2adb7e46d569b7ed
|
refs/heads/master
| 2021-01-22T06:38:12.078262 | 2014-10-22T15:43:28 | 2014-10-22T15:43:28 | 25,082,433 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Archives files needed to run integration tests such as arc runtime,
# CRX files, config files, and test jar files.
import argparse
import os
import re
import subprocess
import sys
import zipfile
sys.path.insert(0, 'src/build')
import build_common
import run_integration_tests
import toolchain
from build_options import OPTIONS
from util import remote_executor_util
def _get_stripped_dir():
return os.path.join(build_common.get_build_dir(), 'test_bundle_stripped')
def _collect_descendants(paths):
"""Returns the set of descendant files of the directories in |paths|.
If |paths| includes files in |paths|, the files are included in the returned
set. Unnecessary files for running integration tests such as temporary files
created by editor, .pyc, and .ncval files are excluded from the returned set.
"""
files = [path for path in paths if os.path.isfile(path)]
dirs = [path for path in paths if os.path.isdir(path)]
files += build_common.find_all_files(dirs, include_tests=True,
use_staging=False)
files = [f for f in files if not re.match(r'.*\.(pyc|ncval)', f)]
return set(files)
def _get_archived_file_paths():
"""Returns the file paths to be archived."""
paths = _collect_descendants(
remote_executor_util.get_integration_test_files_and_directories())
paths.add(os.path.relpath(toolchain.get_adb_path_for_chromeos(),
build_common.get_arc_root()))
paths |= set(run_integration_tests.get_configs_for_integration_tests())
return paths
def _zip_files(filename, paths):
"""Creates a zip file that contains the specified files."""
# Set allowZip64=True so that large zip files can be handled.
with zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED,
allowZip64=True) as f:
for path in set(paths):
if path.startswith(_get_stripped_dir()):
# When archiving a stripped file, use the path of the corresponding
# unstripped file as archive name
f.write(path, arcname=os.path.relpath(path, _get_stripped_dir()))
else:
f.write(path)
def _get_integration_tests_args(jobs):
"""Gets args of run_integration_tests.py adjusted for archiving files."""
args = run_integration_tests.parse_args(['--jobs=%d' % jobs])
# Create an archive to be used on buildbots.
args.buildbot = True
# Assume buildbots support GPU.
args.gpu = 'on'
# Archive failing tests as well.
args.include_failing = True
return args
def _should_strip(path):
"""Returns true if the file at |path| should be stripped."""
return (path.startswith(build_common.get_build_dir()) and
path.endswith(('.nexe', '.so')))
def _strip_binaries(paths):
"""Strips the files in |paths| and returns the paths of stripped files."""
stripped_paths = []
for path in paths:
if _should_strip(path):
stripped_path = os.path.join(_get_stripped_dir(), path)
build_common.makedirs_safely(os.path.dirname(stripped_path))
subprocess.check_call(['strip', path, '-o', stripped_path])
stripped_paths.append(stripped_path)
else:
stripped_paths.append(path)
return stripped_paths
def _parse_args():
description = 'Archive files needed to run integration tests.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-j', '--jobs', metavar='N', default=1, type=int,
help='Prepare N tests at once.')
parser.add_argument('-o', '--output',
default=build_common.get_test_bundle_name(),
help=('The name of the test bundle to be created.'))
return parser.parse_args()
if __name__ == '__main__':
OPTIONS.parse_configure_file()
# Build arc runtime.
build_common.run_ninja()
# Prepare all the files needed to run integration tests.
parsed_args = _parse_args()
integration_tests_args = _get_integration_tests_args(parsed_args.jobs)
run_integration_tests.set_test_options(integration_tests_args)
run_integration_tests.set_test_config_flags(integration_tests_args)
assert run_integration_tests.prepare_suites(integration_tests_args)
# Prepare dalvik.401-perf for perf vm tests.
integration_tests_args.include_patterns = ['dalvik.401-perf:*']
assert run_integration_tests.prepare_suites(integration_tests_args)
# Archive all the files needed to run integration tests into a zip file.
paths = _get_archived_file_paths()
if OPTIONS.is_debug_info_enabled():
paths = _strip_binaries(paths)
print 'Creating %s' % parsed_args.output
_zip_files(parsed_args.output, paths)
print 'Done'
|
UTF-8
|
Python
| false | false | 4,821 |
py
| 336 |
archive_test_bundle.py
| 254 | 0.69135 | 0.688032 | 0 | 134 | 34.977612 | 80 |
Oshayer-Siddique/Oshayer
| 18,983,755,473,002 |
5a2bf03577c47f157b2f6e5566e2dea50287688b
|
e7f503d82383895844961e9bec63856ab638a38b
|
/recurrsion1.py
|
c2c5fc724df904a0c4e3c0e96a5d8623bd8792ee
|
[] |
no_license
|
https://github.com/Oshayer-Siddique/Oshayer
|
65f823bcae7a7dd9ffd09f39577c51d6b1c66d28
|
3d1de99577a167f32e28f4bdebc82bf05bf01c02
|
refs/heads/master
| 2022-11-22T17:04:40.059849 | 2022-10-30T20:53:18 | 2022-10-30T20:53:18 | 273,699,757 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def func(n):
if n == 1 or n == 2:
return 1
else:
return func(n-1)+func(n-2)
n = int(input())
print(func(n))
def func1():
print("HHH")
return
func1()
def func2():
print("JJJ")
return func2()
func2()
import itertools
for j in range(8):
for i in itertools.combinations([1, 2, 3,4,5,6,7], j):
print(i)
import itertools
for i in itertools.permutations([1,2,3]):
print(i)
|
UTF-8
|
Python
| false | false | 464 |
py
| 317 |
recurrsion1.py
| 313 | 0.521552 | 0.476293 | 0 | 29 | 14.068966 | 58 |
sscpac/statick
| 17,437,567,253,095 |
616f94eb022fb834ce5ddfba5c0d876b3399f829
|
921b402e42e711b6ed937273310f8af0b34721c5
|
/tests/profile/test_profile.py
|
9268b1e3f88c7878dc0a72a5af2de4f0f583fef4
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
https://github.com/sscpac/statick
|
77705a324b715a59d7cc5f9b12760a5c69b490eb
|
25225188b04dbdebb04674d0b3c8af886ccf87c3
|
refs/heads/main
| 2023-07-06T09:22:39.545513 | 2023-06-21T23:38:27 | 2023-06-21T23:38:27 | 146,325,712 | 73 | 14 |
CC0-1.0
| false | 2023-06-21T23:38:29 | 2018-08-27T16:39:56 | 2023-05-09T10:53:30 | 2023-06-21T23:38:28 | 1,864 | 65 | 11 | 2 |
Python
| false | false |
"""Unit tests for the Args module."""
import os
import pytest
from statick_tool.package import Package
from statick_tool.profile import Profile
def test_profile_init():
"""Test that the Profile module initializes correctly.
Expected result: profile is initialized
"""
profile = Profile(os.path.join(os.path.dirname(__file__), "profile.yaml"))
assert profile.profile
def test_profile_nonexistent():
"""Test for when a Profile is initialized with a nonexistent YAML.
Expected result: OSError is thrown
"""
with pytest.raises(OSError):
Profile(os.path.join(os.path.dirname(__file__), "nope.yaml"))
def test_profile_file_empty_string():
"""Test for when a Profile is initialized with an empty string.
Expected result: ValueError is thrown
"""
with pytest.raises(ValueError):
Profile(os.path.join(""))
def test_profile_empty():
"""Test for when a Profile is initialized with an empty YAML.
Expected result: ValueError is thrown
"""
with pytest.raises(ValueError):
Profile(os.path.join(os.path.dirname(__file__), "empty.yaml"))
def test_profile_nodefault():
"""Test for when a Profile is initialized with a YAML that doesn't have a 'default'
key.
Expected result: ValueError is thrown
"""
with pytest.raises(ValueError):
Profile(os.path.join(os.path.dirname(__file__), "nodefault.yaml"))
def test_profile_bad_yaml():
"""Test for when a Profile is initialized with something that isn't a valid YAML
file.
Expected result: ValueError is thrown
"""
with pytest.raises(ValueError):
Profile(os.path.join(os.path.dirname(__file__), "bad.yaml"))
def test_profile_get_package_level_nopackage():
"""Test for when get_package_level is called with no packages defined.
Expected result: default is returned
"""
package = Package("test", os.path.dirname(__file__))
profile = Profile(os.path.join(os.path.dirname(__file__), "profile-nopackage.yaml"))
assert profile.get_package_level(package) == "default_value"
def test_profile_get_package_level_invalidpackage():
"""Test for when get_package_level is called with a package not in the packages
list.
Expected result: default is returned
"""
package = Package("nopackage", os.path.dirname(__file__))
profile = Profile(os.path.join(os.path.dirname(__file__), "profile.yaml"))
assert profile.get_package_level(package) == "default_value"
def test_profile_get_package_level_validpackage():
"""Test for when get_package_level is called with a package not in the packages
list.
Expected result: the package-specific value is returned
"""
package = Package("package", os.path.dirname(__file__))
profile = Profile(os.path.join(os.path.dirname(__file__), "profile.yaml"))
assert profile.get_package_level(package) == "package_specific"
|
UTF-8
|
Python
| false | false | 2,915 |
py
| 228 |
test_profile.py
| 131 | 0.684391 | 0.684391 | 0 | 95 | 29.684211 | 88 |
Riley-Milligan/pythonweekone
| 17,085,379,917,396 |
37c2975a92500acba939c4df2090005aff663c14
|
451492d1a2d4375b830b3a1dfbe1085ee4a66770
|
/day1/exercise_four.py
|
246989e5ea3e650281ad4ffb60fb38581906e308
|
[] |
no_license
|
https://github.com/Riley-Milligan/pythonweekone
|
fc9971e3a653402aca19f8e34d4887fffa0da30d
|
3322166a350a1b1ce29a764369294795a5764368
|
refs/heads/main
| 2023-05-06T03:17:56.286878 | 2021-05-28T07:21:48 | 2021-05-28T07:21:48 | 371,615,760 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
principle = 25000
annual_rate = 12
term_length = 12
print("Input Data")
print(principle, annual_rate, term_length)
monthly_rate = (annual_rate / 12) / 100 + 1
total = principle
for x in range(term_length):
total = (total * monthly_rate)
monthly_payment = total / term_length
print(round(monthly_payment))
|
UTF-8
|
Python
| false | false | 314 |
py
| 16 |
exercise_four.py
| 16 | 0.707006 | 0.659236 | 0 | 16 | 18.625 | 43 |
jamesyoung4/multi_calc
| 6,451,040,927,290 |
e713b12531f74c06d082fb14f8fe17f32c65380a
|
bce17afc809f132f681d2653824598a81397a1e4
|
/level_tangent.py
|
f34c43a9d1e7a5eb23e2b3d6428087b0e4e3cbaa
|
[] |
no_license
|
https://github.com/jamesyoung4/multi_calc
|
c96c872d2bffea47814c3c91303ff720b65cf07f
|
b8cb11adc1288b58ac2de2fb534342136da5242b
|
refs/heads/master
| 2020-03-11T06:52:52.075085 | 2018-04-19T05:50:46 | 2018-04-19T05:50:46 | 129,842,607 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sympy import *
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
def partial_x(func,x):
return (diff(func,x))
def partial_y(func,y):
return (diff(func,y))
def partial_z(func,z):
return (diff(func,z))
def gradient2(func):
return [partial_x(func,x), partial_y(func,y)]
def gradient3(func):
return [partial_x(func,x), partial_y(func,y), partial_z(func,z)]
def dot_product(x,y):
scalar_final = 0
for i in range(0,len(x)):
scalar_final = scalar_final + x[i] * y[i]
return (scalar_final)
def level_tangent(func,point):
if len(point) == 2:
minuspoint = [x-point[0],y-point[1]]
func.subs([(x,point[0]),(y,point[1])])
#return(dot_product(gradient2(func),minuspoint))
equation = '%s = 0' % (dot_product(gradient2(func),minuspoint))
print (equation)
else:
minuspoint = [x-point[0],y-point[1],z-point[2]]
func.subs([(x,point[0]),(y,point[1]),(z,point[2])])
#return(dot_product(gradient3(func),minuspoint))
equation = '%s = 0' % (dot_product(gradient3(func),minuspoint))
print (equation)
func = eval(input('f(x) = '))
point = eval(input('Point = '))
level_tangent(func,point)
|
UTF-8
|
Python
| false | false | 1,213 |
py
| 6 |
level_tangent.py
| 5 | 0.591096 | 0.573784 | 0 | 46 | 25.369565 | 71 |
Gabriel1955/machine-learning
| 283,467,864,213 |
606212c5a3d2c488b92e2754760606f1a82cbd1a
|
5f72013e7abbcd8077211c1d39120e9cd38dda70
|
/optimization_of_antennas.py
|
78014196031890bcf33bf45f52a82a345fd45327
|
[] |
no_license
|
https://github.com/Gabriel1955/machine-learning
|
51690675e19ed1281031c5c62aa46333a7d437ce
|
bea193961a0ee61c20394eb31d10b4c6cd388db6
|
refs/heads/master
| 2020-04-19T12:35:18.931664 | 2019-01-29T17:22:24 | 2019-01-29T17:22:24 | 168,195,669 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
https://aydanomachado.com/mlclass/02_Optimization.php?
phi1=90&theta1=90&phi2=90&theta2=90&phi3=90&theta3=90&dev_key=Simple%20Agent*
'''
from random import *
import copy
import random
import requests
import json
import threading
import numpy as np
import numpy.random
dev_key = "Os ifzinhos"
size_population = 20
precision = 1
class Chromosome:
phi1 = 0
theta1 =0
phi2 = 0
theta2 = 0
phi3 = 0
theta3 = 0
score = 0
def __init__(self):
self.phi1 = getRamdomGene()
self.theta1 = getRamdomGene()
self.phi2 = getRamdomGene()
self.theta2 = getRamdomGene()
self.phi3 = getRamdomGene()
self.theta3 = getRamdomGene()
self.score = 0
def getRamdomGene():
aux_precision = 10**precision
gene = randint(0,359*aux_precision)
return gene/aux_precision
def createPopulation():
population = []
i = 0
while i < size_population*2:
population.append(Chromosome())
i += 1
return population
def toEvaluatePopulation(population, best):
i = 0
threads = []
#print("to")
while i < len(population):
threads.insert(i,threading.Thread(target=getScore,args=(population[i],i)))
threads[i].start()
i += 1
#print("to 2 ")
while(threadsIsAlive(threads)):
pass
#print("to 3 ")
i = 0
while i <len(population):
if population[i].score > best.score:
best = population[i]
i +=1
#print("to 4 ")
return best
def threadsIsAlive(threads):
for t in threads:
if t.isAlive():
return True
return False
def getScore(chromosome,i):
params = {"phi1": chromosome.phi1, "theta1": chromosome.theta1,
"phi2": chromosome.phi2, "theta2": chromosome.theta2,
"phi3": chromosome.phi3, "theta3": chromosome.theta3, "dev_key": dev_key }
URL = "https://aydanomachado.com/mlclass/02_Optimization.php"
r = requests.post(url=URL, data=params)
gain = json.loads(r.text)['gain']
print("get score " +str(i)+ " get: "+str(gain)+"\n")
chromosome.score = gain
return gain
def selectionPopulation(population):
population.sort(key=lambda a: a.score, reverse=True)
return population[0:size_population]
def mutationPopulation(population):
news_chromosome = []
for p in population:
news_chromosome.append(copy.copy(p))
i = 0
while i < len(news_chromosome):
for index in random.sample(range(6), randint(0,4)):
new_value = getRamdomGene()
if index == 0:
news_chromosome[i].phi1 = new_value
elif index == 1:
news_chromosome[i].theta1 = new_value
elif index == 2:
news_chromosome[i].phi2 = new_value
elif index == 3:
news_chromosome[i].theta2 = new_value
elif index == 4:
news_chromosome[i].phi3 = new_value
elif index == 5:
news_chromosome[i].theta3 = new_value
else :
print("eero")
i += 1
return news_chromosome
def printChromosome(chromosome):
print("phi1 "+str(chromosome.phi1))
print("theta1 "+str(chromosome.theta1))
print("phi2 "+str(chromosome.phi2))
print("theta2 "+str(chromosome.theta2))
print("phi3 "+str(chromosome.phi3))
print("theta3 "+str(chromosome.theta3))
print("score "+str(chromosome.score))
print("")
def main():
#getScore(Chromosome())
#print(random.sample(range(6), randint(1,6)))
best_chromosome = Chromosome()
population_global = createPopulation()
best_chromosome = toEvaluatePopulation(population_global, best_chromosome)
i = 0
aux_precision = 0
while 1:
population_global = selectionPopulation(population_global)
mutation = mutationPopulation(population_global)
bestScore = best_chromosome.score
best_chromosome = toEvaluatePopulation(mutation, best_chromosome)
if bestScore == best_chromosome.score:
aux_precision +=1
if aux_precision > 20:
aux_precision = 0
global precision
precision +=1
if precision > 20:
precision = 20
elif bestScore < best_chromosome.score:
printChromosome(best_chromosome)
population_global += mutation
i+=1
return 0
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 4,615 |
py
| 1 |
optimization_of_antennas.py
| 1 | 0.580932 | 0.554496 | 0 | 179 | 24.782123 | 88 |
Vinushan/project8
| 7,602,092,142,991 |
10da6324f49d704c813cbf06c4bd99c4259ab89b
|
e84da9a6a3d9596ce9c91ed12c57666fe0de9b4a
|
/project8-web/home/views.py
|
e40156c0d25cc59a4b582a6a399361d864ab3956
|
[] |
no_license
|
https://github.com/Vinushan/project8
|
6da6b7615f0f88b20bf38e6536dec9a2dba28908
|
5df3df307c9542775c271ada8d3ade73aedda2ce
|
refs/heads/master
| 2022-11-23T22:22:58.660426 | 2020-07-25T20:04:03 | 2020-07-25T20:04:03 | 276,161,008 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
def landing(request):
if request.user.is_authenticated:
context = {'logged_in':True}
else:
context = {'logged_in':False}
return render(request, 'home/all.html', context)
|
UTF-8
|
Python
| false | false | 316 |
py
| 31 |
views.py
| 17 | 0.712025 | 0.712025 | 0 | 11 | 27.818182 | 52 |
euguroglu/Flask_Socket_IO_Session
| 14,912,126,490,536 |
14bdd9edff8d3a4f0467d31b5de1206ddfc66aa2
|
9426adf2d5fe7485d1d12ededa9c5dce2fcc69ea
|
/app.py
|
92738908eeb6d43e2f0ae91fd71dc3143df49173
|
[] |
no_license
|
https://github.com/euguroglu/Flask_Socket_IO_Session
|
5eb04842154a5e1893413de7f37e47ba3b0e4ecb
|
57b0859f086ce542979b496b010a3727272adadb
|
refs/heads/master
| 2023-02-07T13:33:08.582096 | 2020-12-30T16:30:17 | 2020-12-30T16:30:17 | 325,559,970 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, render_template, request
from flask_socketio import SocketIO, send, emit, join_room, leave_room
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretkey'
app.config['DEBUG'] = True
socketio = SocketIO(app)
users = {}
@app.route('/')
def index():
return render_template('index.html')
@socketio.on('username',namespace='/private')
def receive_username(username):
users[username] = request.sid
print('Username added')
@socketio.on('private_message',namespace='/private')
def private_message(payload):
recipient_session_id = users[payload['username']]
message = payload['message']
emit('new_private_message',message,room=recipient_session_id)
@socketio.on('join_room',namespace='/private')
def handle_join_room(room):
join_room(room)
emit('room_message','a new user has joined',room=room)
@socketio.on('leave_the_room',namespace='/private')
def handle_leave_room(room):
leave_room(room)
emit('room_message','a user has lefted to room',room=room)
if __name__ == "__main__":
socketio.run(app)
|
UTF-8
|
Python
| false | false | 1,069 |
py
| 3 |
app.py
| 2 | 0.695978 | 0.695978 | 0 | 39 | 26.410256 | 70 |
AdamZhouSE/pythonHomework
| 11,347,303,609,376 |
dadf283617ac1439bd2f4ff443a50afdec2bedae
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2793/60876/260384.py
|
ff273094d7c12aea8c1581e3b1e431c0851e76d8
|
[] |
no_license
|
https://github.com/AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n,c=map(int,input().split(" "))
seconds=list(map(int,input().split(" ")))
seconds=seconds[::-1]
sum=1
for i in range(0,n-1):
if seconds[i]-seconds[i+1]>c:
break
else:
sum+=1
if c==0:
print(0)
else:
print(sum)
|
UTF-8
|
Python
| false | false | 240 |
py
| 45,079 |
260384.py
| 43,489 | 0.554167 | 0.520833 | 0 | 13 | 17.538462 | 41 |
ostin-r/automate-boring-stuff-solutions
| 2,568,390,474,106 |
4a1ae1fc679b2bce1676ee30c169928d6b05de2a
|
68a96e220e92c0247dd62bbf00e06451dc45f88e
|
/follow-along/Chapters 10-20/combine_pdf_pages.py
|
982cd3fe1a8cee78c9763eebe5bc005f26371738
|
[] |
no_license
|
https://github.com/ostin-r/automate-boring-stuff-solutions
|
f0853bfa12e2e09e26bd5e0e482e59c1cc2fd10f
|
78f0a2981e6520ff2907285e666168a0f35eba02
|
refs/heads/main
| 2023-06-16T03:53:16.200117 | 2021-07-14T11:11:24 | 2021-07-14T11:11:24 | 335,448,959 | 4 | 1 | null | false | 2021-03-14T15:59:26 | 2021-02-02T23:14:15 | 2021-03-14T15:57:55 | 2021-03-14T15:59:26 | 232 | 0 | 0 | 0 |
Python
| false | false |
'''
Ch. 15 follow-along project for selectively merging
multiple .PDF files
'''
import os
import PyPDF2 as PyPDF
os.chdir('follow-along/Chapters 10-20')
files = []
for filename in os.listdir():
if filename.endswith('.pdf'):
files.append(filename)
files.sort(key = str.lower)
writer = PyPDF.PdfFileWriter()
for filename in files:
file_obj = open(filename, 'rb')
reader = PyPDF.PdfFileReader(file_obj)
for page in range(1, reader.numPages):
page_obj = reader.getPage(page)
writer.addPage(page_obj)
pdf_output = open('allminutes.pdf', 'wb')
writer.write(pdf_output)
pdf_output.close()
|
UTF-8
|
Python
| false | false | 626 |
py
| 72 |
combine_pdf_pages.py
| 70 | 0.690096 | 0.677316 | 0 | 27 | 22.222222 | 51 |
CheshireCatNick/super-mro
| 652,835,043,014 |
cdda59bcb8d2860ff731a21a4825274ce7b0d616
|
7554878d438d2ba8b5752c3b80947deeaf58b88c
|
/super0.py
|
18009d242e2a19e899adc007d71c3786b9e0be1f
|
[] |
no_license
|
https://github.com/CheshireCatNick/super-mro
|
45e5d2d71ba1df898ef4d4c529c5b045be7d69c1
|
faf49c60bd51236119ca21b55bbe945d778375a4
|
refs/heads/master
| 2022-11-10T07:05:10.471899 | 2020-07-02T14:28:17 | 2020-07-02T14:28:17 | 275,703,781 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
def show(self):
print(f'length: {self.length}, width: {self.width}')
def calculate(self):
print('area:', self.length * self.width)
class Square(Rectangle):
def __init__(self, length):
super().__init__(length, length)
square = Square(3)
square.show()
square.calculate()
|
UTF-8
|
Python
| false | false | 437 |
py
| 14 |
super0.py
| 13 | 0.578947 | 0.576659 | 0 | 20 | 20.75 | 60 |
Vassi3997/opencv-practice
| 7,799,660,659,294 |
72be78d060ed593e04dd9ca78422b10bab3c9ad2
|
94982b64f7ecaa37b593b61e2a3ea50c77a13e81
|
/opencv/IO.py
|
89c753f6cda8e607b816b6c6581ea09e424fc549
|
[] |
no_license
|
https://github.com/Vassi3997/opencv-practice
|
3e6ad2fe5f1f31d374ec95f3ef8c2ee90bed7f5c
|
783766de46de02e34cb4933423728fd974047c9d
|
refs/heads/master
| 2020-06-22T04:56:20.854132 | 2019-07-27T16:39:38 | 2019-07-27T16:39:38 | 197,638,865 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import matplotlib.pyplot as plt
import numpy
img = cv2.imread('color-theory.jpg',cv2.IMREAD_GRAYSCALE)
# cv2.imshow('img',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
plt.imshow(img,cmap = 'gray' , interpolation = 'bicubic')
plt.show()
|
UTF-8
|
Python
| false | false | 252 |
py
| 13 |
IO.py
| 13 | 0.730159 | 0.702381 | 0 | 12 | 20.083333 | 57 |
bunjlabs/hap-web
| 18,305,150,618,574 |
2dad267b38756dcd12a043f48d60ac9fff5a7703
|
b5ebd4fe8de7409719a9699db05239587435df2f
|
/main.py
|
f324f720dc77d0e72f76dc4dc0025b8600cb664d
|
[] |
no_license
|
https://github.com/bunjlabs/hap-web
|
3c1fe16218c90dce6718ccaad679408c3afa315d
|
271076c498d9213f8149f742d1b20906372beecb
|
refs/heads/master
| 2019-07-07T18:46:40.834719 | 2017-04-08T13:14:00 | 2017-04-08T13:14:00 | 87,554,890 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
# Settings must be configured before importing some things
# from django.views.decorators.csrf import csrf_exempt
# VIEW
def index(request, name=None):
return render(request, 'index.html', {'name': name})
def index2(request, name=None):
return render(request, 'base.html', {'name': name})
# URLS
from django.conf.urls import url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^$', index),
url(r'^1$', index2),
url(r'^(?P<name>\w+)?$', index),
]
urlpatterns += staticfiles_urlpatterns()
if __name__ == '__main__':
# set the ENV
sys.path += (here('.'),)
# run the development server
from django.core import management
management.execute_from_command_line()
|
UTF-8
|
Python
| false | false | 784 |
py
| 5 |
main.py
| 4 | 0.678571 | 0.674745 | 0 | 32 | 23.46875 | 67 |
Kenny3Shen/EGLS
| 695,784,709,376 |
67ed59005bc7af923f716382fca6d211deb691a5
|
17227a3f2c818893dc4ec1933828dc690178a620
|
/EGLS/about_ui.py
|
6b392ee12b7ba0a0b3c3f74c05c0114dd0aba9db
|
[] |
no_license
|
https://github.com/Kenny3Shen/EGLS
|
e4e524550e89682d4065812d352dde86e28baa6c
|
d3a32ab655f8a452db88461fea43c428a37925c6
|
refs/heads/master
| 2023-06-12T19:44:07.663431 | 2023-02-24T12:00:18 | 2023-02-24T12:00:18 | 365,752,842 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'about.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_Form(object):
def setupUi(self, Form):
if not Form.objectName():
Form.setObjectName(u"Form")
Form.resize(700, 350)
Form.setMinimumSize(QSize(700, 350))
Form.setMaximumSize(QSize(700, 350))
font = QFont()
font.setFamily(u"Consolas")
font.setPointSize(24)
Form.setFont(font)
Form.setStyleSheet(u"*{\n"
" background-color:rgb(255,255,255);\n"
" color: rgb(0,0,0);\n"
" font-family:Consolas, \u5fae\u8f6f\u96c5\u9ed1\n"
"}\n"
" \n"
" ")
self.label = QLabel(Form)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(20, 10, 671, 41))
font1 = QFont()
font1.setFamily(u"Consolas")
font1.setPointSize(28)
self.label.setFont(font1)
self.label_2 = QLabel(Form)
self.label_2.setObjectName(u"label_2")
self.label_2.setGeometry(QRect(20, 70, 671, 31))
font2 = QFont()
font2.setFamily(u"Consolas")
font2.setPointSize(14)
self.label_2.setFont(font2)
self.label_3 = QLabel(Form)
self.label_3.setObjectName(u"label_3")
self.label_3.setGeometry(QRect(20, 120, 671, 21))
self.label_3.setFont(font2)
self.label_3.setStyleSheet(u"")
self.label_4 = QLabel(Form)
self.label_4.setObjectName(u"label_4")
self.label_4.setGeometry(QRect(20, 320, 441, 16))
font3 = QFont()
font3.setFamily(u"Consolas")
font3.setPointSize(10)
self.label_4.setFont(font3)
self.label_5 = QLabel(Form)
self.label_5.setObjectName(u"label_5")
self.label_5.setGeometry(QRect(20, 200, 681, 21))
font4 = QFont()
font4.setFamily(u"Consolas")
font4.setPointSize(12)
self.label_5.setFont(font4)
self.label_6 = QLabel(Form)
self.label_6.setObjectName(u"label_6")
self.label_6.setGeometry(QRect(20, 240, 671, 21))
self.label_6.setFont(font4)
self.label_7 = QLabel(Form)
self.label_7.setObjectName(u"label_7")
self.label_7.setGeometry(QRect(20, 160, 681, 21))
self.label_7.setFont(font4)
self.label_8 = QLabel(Form)
self.label_8.setObjectName(u"label_8")
self.label_8.setGeometry(QRect(620, 290, 71, 51))
self.label_8.setFont(font3)
self.label_9 = QLabel(Form)
self.label_9.setObjectName(u"label_9")
self.label_9.setGeometry(QRect(20, 280, 591, 21))
self.label_9.setFont(font4)
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# setupUi
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate("Form", u"About EGLS", None))
self.label.setText(QCoreApplication.translate("Form", u"Easy Get Live Streaming", None))
self.label_2.setText(QCoreApplication.translate("Form", u"A software used to extract a live streaming and save it", None))
self.label_3.setText(QCoreApplication.translate("Form", u"<html><head/><body><p>EGLS Open Source Project: <a href=\"https://github.com/Kenny3Shen/EGLS\"><span style=\" font-weight:600; font-style:italic; text-decoration: none; color:#00aaff;\">GitHub</span></a></p></body></html>", None))
self.label_4.setText(QCoreApplication.translate("Form", u"Copyright\u00a9 2021 Kenny3Shen(1815200045)", None))
self.label_5.setText(QCoreApplication.translate("Form", u"Powered by JetBrains PyCharm 2021.1.2 (Community Editon)", None))
self.label_6.setText(QCoreApplication.translate("Form", u"GUI powered by PySide2 & Qt Designer", None))
self.label_7.setText(QCoreApplication.translate("Form", u"Developing environment: Windows 10 21H1 & Bulit on 8/6/2021", None))
self.label_8.setText(QCoreApplication.translate("Form", u"This is\n"
"an ICON", None))
self.label_9.setText(QCoreApplication.translate("Form", u"Database and backend powered by MySQL & Django", None))
# retranslateUi
|
UTF-8
|
Python
| false | false | 4,526 |
py
| 43 |
about_ui.py
| 35 | 0.614229 | 0.557667 | 0 | 101 | 43.80198 | 296 |
Boyden/SlicerBreast_DCEMRI_FTV
| 3,058,016,725,675 |
7781b77047726ff3d1b0c7e034228757fc9af51d
|
6247fd261fcd5e8584ce1aee7c35bbaa34a1b839
|
/DCE_TumorMapProcess/Breast_DCEMRI_FTV_plugins2/ident_gzipped_exam.py
|
8aab0142ba0a503f46dd598d71cb84e56cfb3784
|
[
"LicenseRef-scancode-3dslicer-1.0",
"BSD-3-Clause"
] |
permissive
|
https://github.com/Boyden/SlicerBreast_DCEMRI_FTV
|
bfaed668288157a54050b5a1687d8ff3562f9cef
|
4a65206ef5a483ff070ce08ed85dd3399343ddfd
|
refs/heads/master
| 2023-07-02T16:43:42.409051 | 2021-08-05T00:50:48 | 2021-08-05T00:50:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#---------------------------------------------------------------------------------
#Copyright (c) 2021
#By Rohan Nadkarni and the Regents of the University of California.
#All rights reserved.
#This code was developed by the UCSF Breast Imaging Research Group,
#and it is part of the extension Breast_DCEMRI_FTV,
#which can be installed from the Extension Manager in the 3D Slicer application.
#If you intend to make derivative works of any code from the
#Breast_DCEMRI_FTV extension, please inform the
#UCSF Breast Imaging Research Group
#(https://radiology.ucsf.edu/research/labs/breast-imaging-research).
#This notice must be attached to all copies, partial copies,
#revisions, or derivations of this code.
#---------------------------------------------------------------------------------
#Created by Rohan Nadkarni
#function to check if an exam's DICOMs need
#to be gunzipped or not
import os
def checkForGzippedDicoms(exampath):
#return list of folders in exampath
folders = [directory for directory in os.listdir(exampath) if os.path.isdir(os.path.join(exampath,directory))]
#by default, gzipped binary variable is set to 0
gzipped = 0
#loop through all folders to see if you find a gzipped DICOM
#If you find one, exam is considered gzipped
#Edit 12/16/2020: Change the criterion to be finding > 1 gzipped DICOMs
for i in range(len(folders)):
curr_path = os.path.join(exampath,folders[i])
gzip_files_lc = [f for f in os.listdir(curr_path) if f.endswith('.dcm.gz')] #lowercase dcm gzipped
gzip_files_uc = [f for f in os.listdir(curr_path) if f.endswith('.DCM.gz')] #lowercase dcm gzipped
#Edit 6/30/21: make sure gzipped .dmi files are not counted in this list
gzip_files_noext = [f for f in os.listdir(curr_path) if(f.endswith('.gz') and '.dmi' not in f)] #Edit 1/26/21: For DICOMs that don't have .dcm or .DCM extension
#If current folder has either of these gzipped DICOM types,
#considered exam to be gzipped and exit loop
#6/30/21: Folder with gzipped exam must have number as name
#This prevents gunzipping prompted by folders containing
#gzipped .dmi files, for example.
if(folders[i].isdigit() and (len(gzip_files_lc) > 1) or (len(gzip_files_uc) > 1) or (len(gzip_files_noext) > 1) ):
gzipped = 1
break
return gzipped
|
UTF-8
|
Python
| false | false | 2,395 |
py
| 21 |
ident_gzipped_exam.py
| 16 | 0.657203 | 0.642589 | 0 | 55 | 42.545455 | 168 |
concerttttt/Py2C
| 15,195,594,321,928 |
4fa73bcfd327003eaaefbaaf23166480b4830085
|
9540e98f2294fb500015b3aa76857a0fa4cd7f40
|
/setup.py
|
43357615ff049b1aa446be741130978bcafd47e5
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/concerttttt/Py2C
|
5e438a39ac2006b72f693bc687e445376723d5a7
|
b5c9fd238db589f6d7709482901e33ffebb764eb
|
refs/heads/develop
| 2021-01-21T20:29:49.282057 | 2016-03-13T11:42:35 | 2016-03-13T11:42:35 | 92,241,652 | 0 | 1 | null | true | 2017-05-24T02:36:23 | 2017-05-24T02:36:23 | 2017-05-24T02:36:15 | 2016-03-13T11:42:35 | 1,126 | 0 | 0 | 0 | null | null | null |
#!/usr/bin/python3
# pylint:disable=C0103
import sys
if sys.version_info[:2] < (3, 3):
print("[Py2C] Cannot run on Python versions before Python 3.3")
sys.exit(1)
try:
from setuptools import setup, find_packages
except ImportError:
print("[Py2C] Please install 'setuptools'..")
sys.exit(1)
# setup.py metadata
from setup_metadata import get_metadata # noqa
# -----------------------------------------------------------------------------
# Generating the AST
# -----------------------------------------------------------------------------
from os.path import join, dirname, realpath # noqa
try: # If ever setuptools improves on the build_py command.
from setuptools.command.build_py import build_py as _build_py
except ImportError:
from distutils.command.build_py import build_py as _build_py
class build_py(_build_py):
"""A customized version to build the AST definition files
"""
def initialize_options(self):
import py2c.tree.node_gen as node_gen
path_to_definitions = realpath(join(dirname(__file__), "py2c", "tree"))
node_gen.generate(path_to_definitions)
super().initialize_options()
metadata = get_metadata()
# -----------------------------------------------------------------------------
# The main setup call
# -----------------------------------------------------------------------------
setup(
# Packaging related stuff
packages=find_packages(),
setup_requires=["ply==3.4"],
cmdclass={
'build_py': build_py,
},
**metadata
)
|
UTF-8
|
Python
| false | false | 1,554 |
py
| 54 |
setup.py
| 32 | 0.535393 | 0.52381 | 0 | 55 | 27.254545 | 79 |
stavrosvl7/ergasiess
| 8,297,876,838,015 |
0eb03ccd7f7764d73b642184ff4628e43f4df145
|
685d1f6e0427ec85c0382a2ab38940dea94334ad
|
/askisi1.py
|
1085fa2ad2f15d1e636c545da17d9547cb2e9495
|
[] |
no_license
|
https://github.com/stavrosvl7/ergasiess
|
52dcefbcbe7ba6a1048e01c9b71021e807636a59
|
77b97f0be8ef0b1a1609b95a70b7b9466b0c8927
|
refs/heads/master
| 2021-01-10T03:28:14.585918 | 2016-03-11T01:07:20 | 2016-03-11T01:07:20 | 53,629,190 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def createGame():
import random
Matrix = [[0 for x in range(10)] for y in range(10)];
for i in range(10):
print(Matrix[i]);
print('se ena apo ola ta 0 vriskesai esy kai se ena allo o thisauros..prosekse mh vgeis ektos tou pinaka giati tha xaseis..vres ton!!')
playerx = random.randint(0,9);
playery = random.randint(0,9);
tresurex = random.randint(0,9);
tresurey = random.randint(0,9);
Matrix[playerx][playery] = 'P';
Matrix[tresurex][tresurey] = 'T';
distx=tresurex-playerx;
disty=tresurey-playery;
while(distx != 0) or (disty != 0):
k = str(raw_input("dialekse tin kinisi sou gia na vreis ton thisauro..left,right,up or down?"))
if k=="left":
Matrix[playerx][playery] = 0;
playery -= 1
Matrix[playerx][playery] = 'P';
if disty >=0:
print('apomakrynesai apo to thisauro')
if disty < 0 and abs(disty) != 1:
print('i apostash sou apo to thisauro orizontia einai %s' % (abs(disty+1)))
if disty == -1:
print('vriskesai stin idia sthlh me to thisauro')
if k=="right":
if disty <= 0:
print('apomakrynesai apo to thisauro')
if disty > 0 and abs(disty) != 1:
print('i apostash sou apo to thisauro orizontia einai %s' % (abs(disty-1)))
if disty == 1:
print('vriskesai stin idia sthlh me to thisauro')
Matrix[playerx][playery] = 0;
playery += 1
Matrix[playerx][playery] = 'P';
if k=="down":
if distx <=0:
print('apomakrynesai apo to thisauro')
if distx > 0 and abs(distx) != 1:
print('i apostash sou apo to thisauro katakoryfa einai %s' % (abs(distx-1)))
if distx == 1:
print('vriskesai stin idia grammh me to thisauro')
Matrix[playerx][playery] = 0;
playerx += 1
Matrix[playerx][playery] = 'P';
if k=="up":
if distx >= 0:
print('apomakrynesai apo to thisauro')
if distx < 0 and abs(distx) != 1:
print('i apostash sou apo to thisauro katakoryfa einai %s' % (abs(distx+1)))
if distx == -1:
print('vriskesai stin idia grammh me to thisauro')
Matrix[playerx][playery] = 0;
playerx -= 1;
Matrix[playerx][playery] = 'P';
distx=tresurex-playerx;
disty=tresurey-playery;
if (playerx < 0):
print('exases');
break;
if (playerx > 9):
print('exases');
break;
if (playery < 0):
print('exases');
break;
if (playery > 9):
print('exases');
breaek;
if (distx == 0) and (disty == 0):
print("nikhses vrikes ton thisauro");
|
UTF-8
|
Python
| false | false | 2,954 |
py
| 3 |
askisi1.py
| 3 | 0.51151 | 0.493907 | 0 | 72 | 39.833333 | 139 |
jeremyjyang/BioClients
| 13,194,139,551,126 |
ffa2bdfe0e7d86f8745363f9aae1cb638743c2b6
|
620edb7706208ad35a268046cf3ef390b3742cf1
|
/BioClients/icite/Client.py
|
06cf17ae3a668f04ce7528cc17a482c95524aee4
|
[
"CC0-1.0"
] |
permissive
|
https://github.com/jeremyjyang/BioClients
|
c4092449315ef8473900739f379fbd202d0eebf5
|
ec703afbd5a234f767b4209da5042040c5ee5d47
|
refs/heads/master
| 2023-08-07T13:44:38.087806 | 2023-07-20T16:34:41 | 2023-07-20T16:34:41 | 245,521,437 | 12 | 2 |
CC0-1.0
| false | 2020-04-07T21:34:52 | 2020-03-06T21:39:53 | 2020-04-07T19:57:14 | 2020-04-07T21:34:51 | 230 | 0 | 0 | 0 |
Python
| false | false |
#!/usr/bin/env python3
"""
PubMed iCite REST API client
https://icite.od.nih.gov/api
"""
###
import sys,os,re,argparse,logging
#
from .. import icite
#############################################################################
if __name__=='__main__':
parser = argparse.ArgumentParser(description='PubMed iCite REST API client utility', epilog='Publication metadata.')
ops = ['get_stats']
parser.add_argument("op", choices=ops, help='OPERATION')
parser.add_argument("--ids", help="PubMed IDs, comma-separated (ex:25533513)")
parser.add_argument("--i", dest="ifile", help="input file, PubMed IDs")
parser.add_argument("--nmax", help="list: max to return")
parser.add_argument("--year", help="list: year of publication")
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--api_host", default=icite.API_HOST)
parser.add_argument("--api_base_path", default=icite.API_BASE_PATH)
parser.add_argument("-v", "--verbose", default=0, action="count")
parser.add_argument("-q", "--quiet", action="store_true", help="Suppress progress notification.")
args = parser.parse_args()
# logging.PROGRESS = 15 (custom)
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>0 else logging.ERROR if args.quiet else 15))
base_url='https://'+args.api_host+args.api_base_path
fout = open(args.ofile, "w", encoding="utf-8") if args.ofile else sys.stdout
ids=[];
if args.ifile:
fin = open(args.ifile)
while True:
line = fin.readline()
if not line: break
ids.append(line.rstrip())
logging.info('Input IDs: %d'%(len(ids)))
fin.close()
elif args.ids:
ids = re.split(r'[\s,]+', args.ids.strip())
if args.op == 'get_stats':
if not ids: parser.error(f'Operation requires PMID[s]: {args.op}')
icite.GetStats(ids, base_url, fout)
else:
parser.error(f"Invalid operation: {args.op}")
|
UTF-8
|
Python
| false | false | 1,922 |
py
| 243 |
Client.py
| 188 | 0.638398 | 0.630073 | 0 | 51 | 36.666667 | 139 |
bellyfat/thorchain-arb
| 128,849,051,180 |
1158de06f27dfe99c29bc50b74327ef08510091d
|
3e21b8ef0f07383b1fbf7254c4dc9f77a90ea490
|
/midgard_client/midgard_client/models/thor_name_details.py
|
355966751f1d7cffe681d9712b19759f81f55d7a
|
[] |
no_license
|
https://github.com/bellyfat/thorchain-arb
|
dffc7d7e2a3d55ac59d6b6bbf374e2e15c8d585f
|
c0c3c9ca750f0b3e5a8aa24996ce7e9286d42664
|
refs/heads/main
| 2023-08-22T00:58:30.161998 | 2021-10-01T12:48:24 | 2021-10-01T12:48:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
"""
Midgard Public API
The Midgard Public API queries THORChain and any chains linked via the Bifröst and prepares information about the network to be readily available for public users. The API parses transaction event data from THORChain and stores them in a time-series database to make time-dependent queries easy. Midgard does not hold critical information. To interact with BEPSwap and Asgardex, users should query THORChain directly. # noqa: E501
OpenAPI spec version: 2.4.1
Contact: devs@thorchain.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class THORNameDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'owner': 'str',
'expire': 'str',
'entries': 'list[THORNameEntry]'
}
attribute_map = {
'owner': 'owner',
'expire': 'expire',
'entries': 'entries'
}
def __init__(self, owner=None, expire=None, entries=None): # noqa: E501
"""THORNameDetails - a model defined in Swagger""" # noqa: E501
self._owner = None
self._expire = None
self._entries = None
self.discriminator = None
self.owner = owner
self.expire = expire
self.entries = entries
@property
def owner(self):
"""Gets the owner of this THORNameDetails. # noqa: E501
owner's THOR address # noqa: E501
:return: The owner of this THORNameDetails. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this THORNameDetails.
owner's THOR address # noqa: E501
:param owner: The owner of this THORNameDetails. # noqa: E501
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`") # noqa: E501
self._owner = owner
@property
def expire(self):
"""Gets the expire of this THORNameDetails. # noqa: E501
Int64, THORChain block height in which THORName expires # noqa: E501
:return: The expire of this THORNameDetails. # noqa: E501
:rtype: str
"""
return self._expire
@expire.setter
def expire(self, expire):
"""Sets the expire of this THORNameDetails.
Int64, THORChain block height in which THORName expires # noqa: E501
:param expire: The expire of this THORNameDetails. # noqa: E501
:type: str
"""
if expire is None:
raise ValueError("Invalid value for `expire`, must not be `None`") # noqa: E501
self._expire = expire
@property
def entries(self):
"""Gets the entries of this THORNameDetails. # noqa: E501
List details of all chains and their addresses for a given THORName # noqa: E501
:return: The entries of this THORNameDetails. # noqa: E501
:rtype: list[THORNameEntry]
"""
return self._entries
@entries.setter
def entries(self, entries):
"""Sets the entries of this THORNameDetails.
List details of all chains and their addresses for a given THORName # noqa: E501
:param entries: The entries of this THORNameDetails. # noqa: E501
:type: list[THORNameEntry]
"""
if entries is None:
raise ValueError("Invalid value for `entries`, must not be `None`") # noqa: E501
self._entries = entries
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(THORNameDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, THORNameDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
UTF-8
|
Python
| false | false | 5,414 |
py
| 137 |
thor_name_details.py
| 79 | 0.580824 | 0.566599 | 0 | 171 | 30.654971 | 435 |
Igor-IGI/PythonProjects_SN
| 3,848,290,720,853 |
04eaa2ff545caf4b7ecc3e154649995b419d14cb
|
b7dd906e90397d89ba844693817a55304ba704d6
|
/HomeWork_8/Mood_8_1.py
|
a773cf911b96f64af03fb06815ce9f1428c254c1
|
[] |
no_license
|
https://github.com/Igor-IGI/PythonProjects_SN
|
999cc7de54d9329ab1ba1ed98ffa45b7ba4878da
|
c0cfa18a5194c729da0e462f7cc363c4ccd0c098
|
refs/heads/master
| 2022-06-20T05:10:10.392679 | 2020-05-14T20:52:10 | 2020-05-14T20:52:10 | 259,880,770 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
mood = input("Enter your mode: ")
if mood == "happy":
print("It is grate to see you happy!")
elif mood == "nervous":
print("Take a deep breath 3 times.")
elif mood == "sad":
print("Go to do, what make you happy")
elif mood == "excited":
print("It is nice to be excited")
elif mood == "relaxed":
print("Chill and enjoy")
else:
print("I don't recognize this mode")
|
UTF-8
|
Python
| false | false | 387 |
py
| 6 |
Mood_8_1.py
| 6 | 0.625323 | 0.622739 | 0 | 14 | 26.714286 | 42 |
HitBirds/weimei
| 16,569,983,852,948 |
7bdb86ddc491050b760303547fd5b95a4b92ff5d
|
30ff9c903ee60c63a0f4a26470be3428ec77c613
|
/apps/clothes/views.py
|
388ae43d217962b1015849e0e5dc89f3060eddf0
|
[] |
no_license
|
https://github.com/HitBirds/weimei
|
8487a46dc19bc38ff4eab5d390f757b2a968030f
|
93c30377b9dc2966164ac8f77c5e98b699411fef
|
refs/heads/master
| 2022-05-16T16:51:56.851898 | 2021-09-15T11:10:29 | 2021-09-15T11:10:29 | 144,744,410 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Max,Min
from django.db.models import Q
from clothes.models import Clothes,Season
from users.models import UserProfile
# Create your views here.
def more(request,sortby='time'):
seasons = None
user_info = None
clothes = None
search_keywords = request.POST.get('keywords','')
try:
user_info = UserProfile.objects.filter(username=request.user.username)[0]
except:
pass
try:
seasons = Season.objects.order_by('index')
except:
pass
cson = request.GET.get('season','all')
if cson=='all':
if sortby=='sold':
clothes = Clothes.objects.order_by('sold')
else:
clothes = Clothes.objects.order_by('update_time')
else:
try:
clothes = Clothes.objects.filter(seasons__name=cson)
except:
pass
if sortby=='sold':
clothes = clothes.order_by('sold')
else:
clothes = clothes.order_by('update_time')
if search_keywords:
clothes = clothes.filter(Q(name__icontains=search_keywords)|Q(brand__name__icontains=search_keywords)|Q(sleeve__icontains=search_keywords))
paginator = Paginator(clothes, 4)
page = request.GET.get('page','1')
clothes_paginator = paginator.get_page(page)
clothes = list(map(lambda x: {'firstImg': x.colors.all()[0].colorImg,
'firstColor':x.colors.all()[0].id,
'c_id':x.id,
'c_name':x.name,
'price': x.price,
'colors': x.colors.all(),
'brand':x.brand,
'fabric':x.fabric,
'size_min':x.size_to_styles.aggregate(Min('name'))['name__min'],
'size_max':x.size_to_styles.aggregate(Max('name'))['name__max'],
'flower': x.colors.all()[0].flower},clothes_paginator))
context = {'user_info': user_info, 'seasons': seasons, 'clothes': clothes,'clothes_paginator':clothes_paginator,'cson':cson,'sortby':sortby}
return render(request, 'more.html', context=context)
def detail(request,c_id=1,color_id=1):
user_info = None
clothes = None
colors = None
sizes = None
seasons = None
all_seasons = None
flowers = None
ez = False
try:
user_info = UserProfile.objects.filter(username=request.user.username)[0]
except:
pass
try:
clothes = Clothes.objects.filter(id=c_id)[0]
colors = clothes.colors.all()
first_color = colors.filter(id=color_id)[0]
sizes = clothes.size_to_styles.order_by('name')
seasons = clothes.seasons.all()
all_seasons = Season.objects.order_by('index').values('name')
flowers = set(map(lambda x:x.flower,colors))
ez = user_info.fav.filter(id=c_id).exists()
except:
pass
context={'user_info':user_info,'clothing':clothes,'cur_color':first_color,'colors':colors,'colorsName':set(map(lambda x:x.name,colors)),'seasons':seasons,'all_seasons':all_seasons,'sizes':sizes,'flowers':flowers,'ez':ez}
return render(request,'clothing.html',context=context)
|
UTF-8
|
Python
| false | false | 3,402 |
py
| 31 |
views.py
| 18 | 0.582011 | 0.578777 | 0 | 86 | 38.569767 | 224 |
DheerajSuthar4076/Major_project2021
| 7,739,531,107,793 |
dba9bd1c8ffae1b2a326dffb750b490433a0ef63
|
2f8e1887d3eedc4c20b6abea69e2c72716293fb5
|
/quiz/migrations/0029_auto_20200810_1015.py
|
7d3f752bf1bd8b44c1447ef0160e30acebb50106
|
[] |
no_license
|
https://github.com/DheerajSuthar4076/Major_project2021
|
1cac3a0a967c50d46acc9c3050b2c6cdde240ee6
|
a6bd396e6083e609267053dd919cd80e9c2d5e0a
|
refs/heads/master
| 2023-07-05T23:12:17.781157 | 2021-08-11T13:03:08 | 2021-08-11T13:03:08 | 394,959,018 | 0 | 1 | null | false | 2021-08-11T12:05:54 | 2021-08-11T11:02:45 | 2021-08-11T11:28:03 | 2021-08-11T12:05:53 | 0 | 0 | 1 | 0 |
HTML
| false | false |
# Generated by Django 3.0.3 on 2020-08-10 04:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quiz', '0028_syllabus'),
]
operations = [
migrations.CreateModel(
name='Standard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('standard', models.CharField(blank=True, max_length=250, null=True, unique=True, verbose_name='Standard')),
],
),
migrations.AddField(
model_name='question',
name='standard',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='quiz.Standard', verbose_name='Standard'),
),
]
|
UTF-8
|
Python
| false | false | 855 |
py
| 89 |
0029_auto_20200810_1015.py
| 35 | 0.602339 | 0.576608 | 0 | 26 | 31.884615 | 149 |
Dragfy11/Python_condition
| 5,145,370,834,202 |
fb1c2fe5329a1901d83d1998cdbc54da892d13ab
|
71165e0b2242f1d91c586f3ab416686c54fd4a39
|
/condition2.py
|
0bf9ca7c1428e8bb4539be1c15a35fc53ec8654c
|
[] |
no_license
|
https://github.com/Dragfy11/Python_condition
|
682e90b20afd9bd30700588238377b7b6b04c316
|
0c059e7ecd1327d5637adda7e195bb971bedf3f8
|
refs/heads/main
| 2023-04-08T23:37:26.672933 | 2021-04-20T13:13:43 | 2021-04-20T13:13:43 | 359,809,892 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# exemple : Systeme de verification de mot de passe
password = input("Entrer votre mot de passe: ")
passeword_length= len(password)
#verifier si le mot de passe est inferieur à 8 caractères
if passeword_length <= 8:
print("mot de passe trop court !")
elif 8 < passeword_length <= 12:
print("mot de passe moyen !")
else:
print("mot de passe parfait !")
print(passeword_length)
|
UTF-8
|
Python
| false | false | 391 |
py
| 3 |
condition2.py
| 3 | 0.709512 | 0.696658 | 0 | 13 | 29 | 57 |
cash2one/xai
| 12,841,952,256,527 |
ae78f27e0a6837e1b9a8c8b542115c5594c92946
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_hairier.py
|
62ed9ce21a9b3fc4d78fd3b2cec20f095599a3d8
|
[
"MIT"
] |
permissive
|
https://github.com/cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from xai.brain.wordbase.adjectives._hairy import _HAIRY
#calss header
class _HAIRIER(_HAIRY, ):
def __init__(self,):
_HAIRY.__init__(self)
self.name = "HAIRIER"
self.specie = 'adjectives'
self.basic = "hairy"
self.jsondata = {}
|
UTF-8
|
Python
| false | false | 243 |
py
| 37,275 |
_hairier.py
| 37,266 | 0.654321 | 0.654321 | 0 | 10 | 23.1 | 55 |
korkmaztest/recipe-app-api
| 5,463,198,440,036 |
78149d05b8f6d51f1e2e1b27089f182954479c7c
|
02e0927d8a19d16387e1f1fc751f59c72990ec5d
|
/api/apps/user/tests/test_user_api.py
|
64955ea8cff5bb3a49f3bd93488364a4fc53d619
|
[] |
no_license
|
https://github.com/korkmaztest/recipe-app-api
|
b0bde131cd85667c54d705e0f8c604e880d29337
|
7626aa3b8a2169c8484dc5729e51df147c1efd69
|
refs/heads/master
| 2020-12-14T13:36:41.334651 | 2020-01-31T18:07:13 | 2020-01-31T18:07:13 | 234,759,973 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
def create_user(**params):
return get_user_model().objects.create(**params)
class PublicUserApiTest(TestCase):
def setUp(self):
self.url = reverse('user:create')
self.token_url = reverse('user:token')
self.me_url = reverse('user:me')
self.client = APIClient()
def test_create_valid_user_success(self):
payload = {
'email': 'test@something.com',
'password': 'pass123',
'name': 'Test Name',
}
res = self.client.post(self.url, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_create_user_exists(self):
payload = {
'email': 'test@something.com',
'password': 'pass123',
'name': 'Test Name',
}
create_user(**payload)
res = self.client.post(self.url, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
payload = {
'email': 'test@something.com',
'password': '123',
'name': 'Test Name',
}
res = self.client.post(self.url, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
does_user_exist = get_user_model().objects.filter(email=payload['email']).exists()
self.assertFalse(does_user_exist)
def test_login_successful(self):
payload = {'email': 'test@something.com', 'password': 'pass123'}
create_user(**payload)
res = self.client.post(self.token_url, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('token', res.data)
def test_login_invalid_password(self):
create_user(email='test@something.com', password='test123')
res = self.client.post(
self.token_url, {'email': 'test@something.com', 'password': 'wrong'}
)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_login_non_existing_user(self):
res = self.client.post(
self.token_url, {'email': 'test@something.com', 'password': 'test123'}
)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_login_empty_password(self):
create_user(email='test@something.com', password='test123')
res = self.client.post(self.token_url, {'email': 'test@something.com', 'password': ''})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_retrieve_user_unauthorized(self):
res = self.client.get(self.me_url)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTest(TestCase):
def setUp(self):
self.me_url = reverse('user:me')
self.logout_url = reverse('user:logout')
self.user = create_user(
email='test@something.com',
password='pass123',
name='Test User',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_user_successful(self):
res = self.client.get(self.me_url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': 'Test User',
'email': 'test@something.com',
})
self.assertNotIn('password', res.data)
def test_post_not_allowed(self):
res = self.client.post(self.me_url, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
payload = {
'name': 'Test User Updated',
'password': 'passwordupdated',
}
res = self.client.patch(self.me_url, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
def test_logout_with_get_request(self):
res = self.client.get(self.logout_url)
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_logout_with_not_logged_in(self):
self.client.logout()
res = self.client.post(self.logout_url, {})
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_logout_successful(self):
res = self.client.post(self.logout_url, {})
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
UTF-8
|
Python
| false | false | 4,983 |
py
| 11 |
test_user_api.py
| 9 | 0.619105 | 0.60586 | 0 | 137 | 35.372263 | 95 |
Nirvanios/BIO-head-pose-estimation
| 16,612,933,542,188 |
35c40ceb9bfed13e6c92a9a4ae4cabef0ebd1b2e
|
e4362885e1f43ed5e0423966e5d713e99003b643
|
/head_pose_multi.py
|
45b0cb86fabb76a2c7d05f10a458f8ed12e7bdf7
|
[] |
no_license
|
https://github.com/Nirvanios/BIO-head-pose-estimation
|
c5aeca8fb5e9f0ba1a24cceda6f531aca69a4c3e
|
21ffa779ac778350448f2ccfbcffb6a41c644566
|
refs/heads/master
| 2022-03-02T11:19:38.736231 | 2019-11-13T14:34:21 | 2019-11-13T14:34:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from head_pose_geometry import HeadPoseGeometry
from head_pose_model import HeadPoseModel
from head_pose_tracker import HeadPoseTracker
class MultiHeadPoseEstimator:
"""
Head pose estimation using all 3 implemented methods.
"""
def __init__(self):
self.__geom = HeadPoseGeometry()
self.__track = HeadPoseTracker()
self.__model = HeadPoseModel()
self.landmarks = []
def get_name(self):
return "multi"
def pose_for_landmarks(self, image, landmarks):
geom_res = self.__geom.pose_for_landmarks(image, landmarks)
track_res = self.__track.pose_for_landmarks(image, landmarks)
model_res = self.__model.pose_for_landmarks(image, landmarks)
self.landmarks = self.__geom.landmarks
return self.__format_group(geom_res[0], track_res[0], model_res[0]), \
self.__format_group(geom_res[1], track_res[1], model_res[1]), \
self.__format_group(geom_res[2], track_res[2], model_res[2])
def __format_group(self, arg1, arg2, arg3):
return 'geom: {}, track: {}, model: {}'.format(arg1, arg2, arg3)
|
UTF-8
|
Python
| false | false | 1,127 |
py
| 13 |
head_pose_multi.py
| 12 | 0.631766 | 0.617569 | 0 | 29 | 37.862069 | 78 |
tratda/life-partner
| 7,636,451,858,265 |
8f688a1108c7e440095d081676ad484c0abdd47e
|
6e52d0340ffb56a0e1d56e1fe162551bb71fd3f6
|
/working/Roster.py
|
bf1f0a44eaecdbdbc26b545917beed41ff4c439c
|
[] |
no_license
|
https://github.com/tratda/life-partner
|
a89c57f4bb982d8efc13db2ac66650e24a246fcc
|
56d7735d07358b4d353bb238f795b3d7b649e1a9
|
refs/heads/master
| 2018-01-03T19:55:45.767981 | 2016-10-18T22:36:16 | 2016-10-18T22:36:16 | 71,294,083 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
import time
import os
def cls():
os.system('cls' if os.name=='nt' else 'clear')
def absent(Absentdict):
for name in Absentdict:
if Absentdict[name] != 0:
print(name+' left at '+ time.strftime("%a, %d %b %Y %H:%M", Absentdict[name]))
def losttime(Users):
cls()
for year in range(len(Users)):
for user in Users[year]:
hour = Users[year][user]//3600
minute = (Users[year][user] - hour*3600) // 60
sec = (Users[year][user] - hour*3600 - minute *60)
print('User ' + user+' has been out of the room for ' + str(int(hour))+':'+ str(int(minute))+':'+str(int(sec)))
raw_input()
return
def saveDat(caseDict,teamDict,Users):
teamfile = open('TeamCaseLoad.txt', 'w')
out = ''
#companyList = ['Alpha','Bravo','Charlie','Delta','Echo','Foxtrot','Golf','Hotel', 'India']
for co in companyList:
out +='{}:{}\n'.format(co,caseDict[co])
out += '===\n'
for co in companyList:
out+='{}:{}\n'.format(co,teamDict[co])
teamfile.write(out)
teamfile.close()
out = ''
Userfile = open('Users.dat')
for year in Users:
for name in F:
out += name
out += '++++'
Userfile.write(out)
Userfile.close()
return
def LoadData():
Absentdict = {}
dict2017 = {}
dict2018 = {}
dict2019 = {}
dict2020 = {}
usertime = {}
F = []
C = []
Y = []
P = []
teamfile = open('TimeTracker.txt', 'r')
flag = 0
year = 2017
for line in teamfile:
if line.strip() == '===':
flag = 1
elif line.strip() == '####':
year += 1
elif flag == 0:
if year == 2017:
line = line.strip()
line = line.split(':')
dict2017[line[0]] = int(line[1])
if year == 2018:
line = line.strip()
line = line.split(':')
dict2018[line[0]] = int(line[1])
if year == 2019:
line = line.strip()
line = line.split(':')
dict2019[line[0]] = int(line[1])
if year == 2020:
line = line.strip()
line = line.split(':')
dict2020[line[0]] = int(line[1])
elif flag == 1:
line = line.strip()
line = line.split(':')
Absentdict[line[0]] = int(line[1])
teamfile.close()
userfile = open('Users.dat', 'r')
year = ['F','C','Y','P']
i = 0
for line in userfile:
if line.strip() == '++++':
i += 0
elif year[i] == 'F':
line = line.strip()
F.append(line)
elif year[i] == 'C':
line = line.strip()
C.append(line)
elif year[i] == 'Y':
line = line.strip()
Y.append(line)
elif year[i] == 'P':
line = line.strip()
P.append(line)
Users = [F,C,Y,P]
Absences = [Absentdict, dict2017, dict2018, dict2019, dict2020, usertime]
return Absences, Users
def leaving(Absences, Users):
#while True:
absent(Absences[0])
print('-'*80)
print('What year do you want?')
print('\t1. 2017')
print('\t2. 2018')
print('\t3. 2019')
print('\t4. 2020')
year = raw_input('Your Choice: ')
year = int(year) - 1
i = 0
print('Who is leaving?')
for n in Users[year]:
i += 1
print('\t{}. {}'.format(i,n))
user = raw_input('Your Choice: ')
user = int(user) - 1
user = Users[year][user]
Absences[0][user] = time.localtime()
Absences[4][user] = time.time()
return Absences, Users
def returning(Absences, Users):
#while True:
#absent(Absentdict)
print('-'*80)
print('What year do you want?')
print('\t1. 2017')
print('\t2. 2018')
print('\t3. 2019')
print('\t4. 2020')
year = raw_input('Your Choice: ')
year = int(year) - 1
i = 0
print('Who is back?')
for n in Users[year]:
i += 1
print('\t{}. {}'.format(i,n))
user = raw_input('Your Choice: ')
user = int(user) - 1
user = Users[year][user]
Absences[0][user] = 0
left = Absences[4][user]
print(Absences[year+1])
Absences[year+1][user] += time.time() - left
return Absences, Users
def addUser(Users): # Users should be a list of the lists of classes
print("Which class are you?") # Start dialog with user
print("1. 2017")
print("2. 2018")
print("3. 2019")
print("4. 2020")
year = raw_input('Your Choice ')# Select year group
year = int(year)
print("Please type your first and last name.")
name = raw_input('Your name is (First "space" Last): ') #Have user respond with "First Last" name
#Append name to year lists
Users[year-1].append(name+':')
print("Added " + name) #Confirm name is added correctly
return Users
def removeUser(Users):
print("Which class is the user in?")
print("1. 2017")
print("2. 2018")
print("3. 2019")
print("4. 2020")
year = raw_input('Your Choice ')# Select year group
print Users[year]
print("Please type your first and last name.")
name = raw_input('Type the name as shown in the list') #Have user respond with "First Last" name
Users[int(year) - 1].pop(name+':') #Remove name from year lists
print("Removed " + name) #Confirm name is removed correctly
return Users
def main():
cls()
Absences, Users = LoadData()
while True:
cls()
absent(Absences[0])
print('-'*80)
print('What do you want to do?')
print('\t1. Record Someone Leaving.')
print('\t2. Record Someone Returning.')
print('\t3. Display Time Lost per person.')
print('\t4. Add a User')
print('\t5. Remove a User')
ans = raw_input('Your Choice: ')
if ans == '1':
cls()
Absences, Users = leaving(Absences, Users)
cls()
elif ans == '2':
cls()
Absences, Users = returning(Absences, Users)
elif ans == '3':
cls()
losttime(Users)
cls()
elif ans == '4':
cls()
Users = addUser(Users)
print(Users)
raw_input()
cls()
elif ans == '5':
cls()
Users = removeUser(Users)
cls()
main()
|
UTF-8
|
Python
| false | false | 5,529 |
py
| 3 |
Roster.py
| 1 | 0.593778 | 0.554531 | 0 | 235 | 22.52766 | 114 |
q-gao/Small_Scripts
| 10,754,598,159,871 |
f3701ac628aa55bf7146e4651d771f4117385074
|
7393b521708b5508c9f78bec7b3439c76dd63665
|
/PythonCode/CalcCharPortionPercentage.py
|
dcb510c131879124d9cdced227f612a0ad878844
|
[] |
no_license
|
https://github.com/q-gao/Small_Scripts
|
8e1bbff9b73037a381addd9d747c435e3f9d213c
|
f91db9ebb00b4ac31a121fa240c1e39f3ac1c2ce
|
refs/heads/master
| 2020-05-17T04:08:15.230991 | 2019-04-25T19:57:36 | 2019-04-25T19:57:36 | 183,500,545 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
from FontStudyTool import *
import sys
if len(sys.argv) < 3:
print "Usage: test.py <char_seq> <font_name> [rotate_angle]"
sys.exit(-1)
if len(sys.argv) >= 4:
rotate = sys.argv[3]
else:
rotate = '0'
print GetCharPortionPercentage(sys.argv[1], sys.argv[2], rotate)
#
#
#imgFile = 'ab.gif'
#
##cc= DetectLeftCCFromImage(imgFile)
##GrayScaleNumpyMatrix(cc)
##cc = DetectRightCCFromImage(imgFile)
##GrayScaleNumpyMatrix(cc)
#
#ccLeft, ccRight = DetectLeftRightCCFromImage(imgFile)
#BwScaleNumpyMatrix(ccLeft)
#BwScaleNumpyMatrix(ccRight)
#from CalcMinCCDistance import *
#
#print CalcMinCCGapInImage('ab.gif')
#
|
UTF-8
|
Python
| false | false | 656 |
py
| 85 |
CalcCharPortionPercentage.py
| 30 | 0.707317 | 0.696646 | 0 | 33 | 18.757576 | 64 |
swetha65/raniSwetha
| 4,277,787,462,618 |
54f8d9ecebf077a9f5c69138c6587b73030d5733
|
740f5cda0f10ff7111062e11f3e2c9792302ca1b
|
/threa.py
|
7f350749e72b97423f68c7df8b1620a802d15d8c
|
[] |
no_license
|
https://github.com/swetha65/raniSwetha
|
61c481dc90e698edfde800ee118bf7cbf7b680ac
|
9744cd3dbde8fa1fbafdeb8a84e89dfa65978972
|
refs/heads/master
| 2023-06-24T00:55:14.804743 | 2021-07-23T13:42:48 | 2021-07-23T13:42:48 | 388,478,089 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import threading
def func(n):
print('Thread {} is runnning'.format(n))
print('Main beginning')
x = [threading.Thread(target = func, args = (i,)) for i in range(10)]
for t in x:
t.start()
print('Main all done')
|
UTF-8
|
Python
| false | false | 219 |
py
| 8 |
threa.py
| 8 | 0.648402 | 0.639269 | 0 | 9 | 23.333333 | 69 |
Bluessea/yolov5-tensorrt
| 18,605,798,354,110 |
a63c76104ede32f614033a9bf2a869b80a1c9fea
|
7bdd0bfaab125d309d70568c011c8c5a5ef10f88
|
/python/export_tensorrt.py
|
2a693f1dd6ef9ef21cbe9ebf9cbecb19e2a07334
|
[] |
no_license
|
https://github.com/Bluessea/yolov5-tensorrt
|
71e101db986028ad70834be5e0059e89609afb18
|
930a3629bb4a7a70d3353938c3f0aab55900af11
|
refs/heads/master
| 2023-05-09T06:09:16.251060 | 2020-11-20T23:15:51 | 2020-11-20T23:15:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tensorrt as trt
import sys
import argparse
"""
takes in onnx model
converts to tensorrt
"""
def cli():
desc = 'compile Onnx model to TensorRT'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-m', '--model', help='onnx file location inside ./lib/models')
parser.add_argument('-fp', '--floatingpoint', type=int, default=32, help='floating point precision. 16 or 32')
parser.add_argument('-o', '--output', help='name of trt output file')
args = parser.parse_args()
model = args.model or 'yolov5s-simple.onnx'
fp = args.floatingpoint
if fp != 16 and fp != 32:
print('floating point precision must be 16 or 32')
sys.exit()
output = args.output or 'yolov5s-simple-{}.trt'.format(fp)
return {
'model': model,
'fp': fp,
'output': output
}
if __name__ == '__main__':
args = cli()
model = 'lib/models/{}'.format(args['model'])
output = 'lib/models/{}'.format(args['output'])
logger = trt.Logger(trt.Logger.VERBOSE)
EXPLICIT_BATCH = []
print('trt version', trt.__version__)
if trt.__version__[0] >= '7':
EXPLICIT_BATCH.append(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
with trt.Builder(logger) as builder, builder.create_network(*EXPLICIT_BATCH) as network, trt.OnnxParser(network, logger) as parser:
builder.max_workspace_size = 1 << 28
builder.max_batch_size = 1
if args['fp'] == '16':
builder.fp16_mode = True
with open(model, 'rb') as f:
if not parser.parse(f.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# reshape input from 32 to 1
shape = list(network.get_input(0).shape)
engine = builder.build_cuda_engine(network)
with open(output, 'wb') as f:
f.write(engine.serialize())
|
UTF-8
|
Python
| false | false | 1,949 |
py
| 3 |
export_tensorrt.py
| 3 | 0.600821 | 0.584915 | 0 | 55 | 34.436364 | 135 |
sauravbiswasiupr/lstmsolver-3.0
| 9,156,870,310,068 |
989602a52cd403ff16141b17c8d1e5a4e846c4f6
|
04835fc458fbca722a23d4c824ca2b7d805cfd05
|
/lstmsolver/networks/LinearLayer.py
|
3b258415d109e091efd449f77200b39cccb30e11
|
[] |
no_license
|
https://github.com/sauravbiswasiupr/lstmsolver-3.0
|
2e88d74e5917805de9eab1b4dbe4e1d082447c8b
|
1d3347b7e987487bdf273399cbd632bbc1f62fc3
|
refs/heads/master
| 2020-12-24T17:08:29.842063 | 2019-11-08T18:38:59 | 2019-11-08T18:38:59 | 16,409,347 | 8 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''An implementation of a Linearlayer. There is no transfer function '''
__author__="Saurav Biswas"
#fix imports
from Network import *
class LinearLayer(Network):
'''A Linear Layer that has no activation function'''
def __init__(self,Nh,No,initial_range=initial_range,rand=rand):
self.Nh=Nh
self.No=No
self.W=randu(No,Nh+1)*initial_range
self.DW=zeros((No,Nh+1))
def ninputs(self):
return self.Nh
def noutputs(self):
return self.No
def forward(self,ys):
#the forward pass function
n=len(ys)
inputs,zs = [None]*n,[None]*n
for i in range(n):
inputs[i] = concatenate([ones(1),ys[i]])
zs[i] = dot(self.W,inputs[i])
self.state = (inputs,zs)
return zs
def backward(self,deltas):
inputs,zs = self.state
n = len(zs)
assert len(deltas)==len(inputs)
dzspre,dys = [None]*n,[None]*n
for i in reversed(range(len(zs))):
dzspre[i] = deltas[i]
dys[i] = dot(dzspre[i],self.W)[1:]
self.dzspre = dzspre
self.DW = sumouter(dzspre,inputs)
return dys
def info(self):
vars = sorted("W".split())
for v in vars:
a = array(getattr(self,v))
print v,a.shape,amin(a),amax(a)
def weights(self):
yield self.W,self.DW,"LinearLayer"
|
UTF-8
|
Python
| false | false | 1,396 |
py
| 23 |
LinearLayer.py
| 20 | 0.557307 | 0.554441 | 0 | 44 | 30.727273 | 72 |
Ian970912/pyvhonMinecaft20210126
| 10,771,777,994,721 |
b8767c6a21ef1f22726dfd28d5c3461b0a9e10b4
|
401f73d776ae9e9be6de39ff34dc15883c381f4c
|
/2-6.py
|
f27ab1c308b06dd77fc9667ddc6518400b6a31f2
|
[] |
no_license
|
https://github.com/Ian970912/pyvhonMinecaft20210126
|
744d66a98670c0fd4cbf9d45ef01f09a1bb90030
|
c5aa35afb322bcfe443514e96069db5b681346c2
|
refs/heads/main
| 2023-02-23T08:45:41.882797 | 2021-01-26T08:13:44 | 2021-01-26T08:13:44 | 333,015,571 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from mcpi.minecraft import Minecraft
MC=Minecraft.create()
X,Y,Z=MC.player.getTilePos()
answer=int(input('請問你右邊要放甚麼方塊:'))
MC.setBlocks(X+1,Y,Z,answer)
|
UTF-8
|
Python
| false | false | 182 |
py
| 7 |
2-6.py
| 7 | 0.725 | 0.71875 | 0 | 7 | 20.857143 | 36 |
jkrayco/image_study
| 6,064,493,843,729 |
b42837e8cdea836aa5517568a4c648edb19beae2
|
790dc7a55528d31ba5cb9a76ff7194adb4be21d7
|
/week5/wittyimage/urls.py
|
ee19899b8535bb051a69548f163c977f0d42ddd9
|
[] |
no_license
|
https://github.com/jkrayco/image_study
|
f1017860b2ec629e7d6e7f8d8cde4421d5cbea4f
|
bd29d4cea56fc9ad104595afdf211bc29d54c759
|
refs/heads/master
| 2020-04-30T21:41:30.733189 | 2019-11-28T10:07:14 | 2019-11-28T10:07:14 | 177,099,656 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""wittyimage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import api
# from django.contrib import admin
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^api/face_morph/$', api.face_morph),
url(r'^api/face_map/$', api.face_map),
url(r'^api/face_animate_map/$', api.face_map, {'animate': True}),
url(r'^api/face_points/$', api.face_points),
url(r'^api/face_animate_points/$', api.face_points, {'animate': True}),
url(r'^api/face_box/$', api.face_box),
url(r'^api/face_gray/$', api.face_gray),
url(r'^api/face_sketch/$', api.face_sketch),
url(r'^api/generate_gif_effect/$', api.generate_gif_effect),
url(r'^api/face_recognition/$', api.face_recognize),
url(r'^api/generate_gif/$', api.generate_gif),
url(r'^api/generate_gif_no_crop/$', api.generate_gif, {'crop': False}),
url(r'^api/generate_gifr/$', api.generate_gif, {'reverse': True}),
url(r'^api/generate_gifr_no_crop/$', api.generate_gif, {'crop': False,'reverse': True}),
url(r'^api/generate_movie/$', api.generate_movie),
url(r'^api/combine_names/$', api.combine_names),
# url(r'^api/openface_infer/$', api.openface_infer),
url(r'^api/filter_makeup/$', api.filter_makeup),
url(r'^api/face_sticker/$', api.face_sticker),
url(r'^api/custom/$', api.custom),
url(r'^api/videoMorph/$',api.videoMorph),
url(r'^api/beautify/$', api.beautify),
url(r'^api/kpop_lookbackvid/$', api.kpop_lookbackvid),
]
|
UTF-8
|
Python
| false | false | 2,080 |
py
| 57 |
urls.py
| 56 | 0.652404 | 0.648077 | 0 | 45 | 45.222222 | 92 |
DoHuy/WebChecker
| 10,952,166,624,753 |
c152d648ca9b7439a86eb56bc10364cf9dbed4ea
|
275c81ca93a34e857b8ae27198f13f62317415a7
|
/deface.py
|
ae541aae8b2e82c6e82cacb49472b0129e62f434
|
[] |
no_license
|
https://github.com/DoHuy/WebChecker
|
4bc325818badb09ba7c2071401bb14b765dad574
|
263bcecd9d1de607da5024ad6c9fc7e3b3f2745d
|
refs/heads/master
| 2020-09-06T08:50:54.404447 | 2019-11-09T15:41:28 | 2019-11-09T15:41:28 | 220,379,451 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env/python
# -*- coding: utf-8 -*-
import setting
import error
import Node
import cPickle
import base64
import traceback
import re
import alert
from datetime import datetime
hot_word = ["hacked", "god verify", "security is low", "visited", "h4ck3r", "hack"]
def check_deface(newContent, url, name, ipAddress, webId, userId):
try:
flag = 0
rawContent = base64.b64encode(newContent)
newContent = newContent.strip()
newContent = re.sub('</', '\n</', newContent)
newContent = re.sub('>\s*<', '>\n<', newContent)
newContent = re.sub('><', '>\n<', newContent)
newContent = newContent.split('\n')
newNode = Node.Node()
newNode.import_object(newContent)
newNode.importContent(newContent)
try:
result = setting.listWebData[webId]
except:
newNode = base64.b64encode(cPickle.dumps(newNode))
update = "INSERT INTO webStruct (url, limitTime, isStructed, struct, webId, userId, object) VALUES ('%s' , '%d', '%d', '%s', '%d', '%d', '%s')"%(url, 1, 0, rawContent, webId, userId, newNode)
setting.listObject[webId] = (newNode, rawContent, webId)
else:
limitTime = result[0]
isStructed = int(result[1])
struct = setting.listObject[webId][1]
learningTime = setting.listLearnTime[webId]
if isStructed == 1:
# confirmstruct = base64.b64decode(struct)
# confirmstruct = confirmstruct.strip()
# confirmstruct = re.sub('</', '\n</', confirmstruct)
# confirmstruct = re.sub('>\s*<', '>\n<', confirmstruct)
# confirmstruct = re.sub('><', '>\n<', confirmstruct)
# confirmstruct = confirmstruct.split('\n')
# oldNode = Node.Node()
# oldNode.import_object(confirmstruct, hostName)
# oldNode.importContent(confirmstruct)
oldNode = cPickle.loads(base64.b64decode(setting.listObject[webId][0]))
oldNode, tmp = find_struct(oldNode, newNode, 'C', ipAddress, name, webId, url)
if tmp == 0:
alert.check_status(setting.db, 'defaced', url, webId)
else:
newContent = oldNode.boderDiffHTML(newContent)
else:
oldNode = cPickle.loads(base64.b64decode(setting.listObject[webId][0]))
oldNode, tmp = find_struct(oldNode, newNode, 'F', ipAddress, name, webId, url)
newContent = oldNode.render_html(newContent)
struct = ""
for each in newContent:
if each != None:
struct += each + '\n'
struct = base64.b64encode(struct)
if limitTime <= 480*learningTime:
limitTime += 1
else:
isStructed = 1
if tmp == 0:
alert.check_status(setting.db, 'defaced', url, webId)
else:
newContent = oldNode.boderDiffHTML(newContent)
oldNode = base64.b64encode(cPickle.dumps(oldNode))
update = "UPDATE webStruct SET isStructed = '%d', struct = '%s', limitTime = '%d', object = '%s' WHERE webId = '%d'"%(isStructed, struct, limitTime, oldNode, webId)
setting.listObject[webId] = (oldNode, struct, webId)
setting.MySQLUpdate.append(update)
except RuntimeError:
print "Hitted maximum recursion depth! Cannot save this object"
except Exception:
error.catchError(traceback.format_exc())
return
def push_alert_data(Node1, Node2, name, hostName, webId, src_ip, url):
flag = 0
word = ""
if name == "Different at sub Tag":
diff = {
"Name" : u"Số lượng thẻ con không giống nhau",
"tagName" : Node1.name,
"startAt" : Node1.startAt,
"endAt" : Node1.endAt,
"oldContent" : unicode("Bao gồm " + str(len(Node1.listChildren)) + " sub Tag", "utf-8"),
"newContent" : unicode("Bao gồm " + str(len(Node2.listChildren)) + " sub Tag", "utf-8")
}
severity = 1
for each in hot_word:
if flag == 1:
break
for node in Node2.listChildren:
try:
if each in node.attribute.lower() or each in node.content.lower():
print "Level 3 alert"
word = each
flag = 1
severity = 3
break
except:
pass
elif name == "Different at Attribute":
diff = {
"Name" : u"Tính chất của thẻ bị thay đổi ",
"tagName" : Node1.name,
"startAt" : Node1.startAt,
"endAt" : Node1.endAt,
"oldContent" : Node1.attribute,
"newContent" : Node2.attribute
}
severity = 1
for each in hot_word:
if each in Node2.attribute.lower():
word = each
flag = 1
severity = 2
elif name == "Different at content":
diff = {
"Name" : u"Nội dung của thẻ bị thay đổi",
"tagName" : Node1.name,
"startAt" : Node1.startAt,
"endAt" : Node1.endAt,
"oldContent" : Node1.content,
"newContent" : Node2.content
}
severity = 1
for each in hot_word:
if each in Node2.content.lower():
word = each
flag = 1
severity = 2
else:
diff = {
"Name" : u"Tên của thẻ bị thay đổi",
"tagName" : Node1.name,
"startAt" : Node1.startAt,
"endAt" : Node1.endAt,
"oldContent" : Node1.attribute,
"newContent" : Node2.attribute
}
severity = 1
for each in hot_word:
if each in Node2.attribute.lower():
word = each
flag = 1
severity = 2
setting.threadLock.acquire()
alert.take_shot(url, name, 0, int(webId))
setting.threadLock.release()
if flag == 0 :
tmpQuery = ('defaced', severity, src_ip, hostName, 0, 0, webId, url, diff, None, str(datetime.now().replace(microsecond=0)).replace(' ','T'))
else:
tmpQuery = ('defaced', severity, src_ip, hostName, 0, 0, webId, url, diff, word, str(datetime.now().replace(microsecond=0)).replace(' ','T'))
setting.MongoData.append(tmpQuery)
def find_struct(Node1, Node2, mode, src_ip, hostName, webId, link):
Node1.startAt = Node2.startAt
Node1.endAt = Node2.endAt
if Node1.passed == 0:
if ( Node1.deep == Node2.deep and Node1.name == Node2.name):
if len(Node1.listChildren) != len(Node2.listChildren):
if mode == 'F':
Node1.passed = 1
Node1.listChildren = Node2.listChildren
for each in Node1.listChildren:
each.delete = 1
if mode != 'F' and Node1.delete == 0:
Node1.border = 1
push_alert_data(Node1,Node2, "Different at sub Tag", hostName,webId, src_ip, link)
return Node1, 1
elif Node1.attribute != Node2.attribute:
if mode == 'F':
Node1.passed = 1
Node1.delete = 1
else:
Node1.border = 1
push_alert_data(Node1,Node2, "Different at Attribute", hostName,webId, src_ip, link)
return Node1, 1
elif Node1.content != Node2.content:
if mode == 'F':
Node1.passed = 1
Node1.delete = 1
else:
Node1.border = 1
push_alert_data(Node1,Node2, "Different at content", hostName,webId, src_ip, link)
return Node1, 1
else:
for index in range(len(Node1.listChildren)):
Node1.listChildren[index], stopCode = find_struct(Node1.listChildren[index], Node2.listChildren[index], mode, src_ip, hostName, webId, link)
if stopCode == 1:
return Node1, 1
else:
if mode == 'F':
Node1.passed = 1
Node1.delete = 1
else:
Node1.border = 1
push_alert_data(Node1,Node2, "Different at Deep or Name", hostName,webId, src_ip, link)
return Node1, 1
return Node1, 0
|
UTF-8
|
Python
| false | false | 7,143 |
py
| 16 |
deface.py
| 14 | 0.629932 | 0.605834 | 0 | 221 | 31.108597 | 205 |
tushushu/leetcode
| 18,803,366,857,690 |
a1d82d88e52638c05a2d9c95819a7a078394bdc6
|
08897473af60ae5a10b4b66240a826ebb841f20d
|
/python/189. Rotate Array.py
|
d87e4735ddd5cd871552f76bd8508a1958772d99
|
[] |
no_license
|
https://github.com/tushushu/leetcode
|
ff6ccf5310a5965c62ea9e53b480584b23651934
|
1613613f4ba26f489a1a7228af5bcb6563fe5852
|
refs/heads/master
| 2021-08-09T20:07:57.744593 | 2021-08-08T07:57:27 | 2021-08-08T07:57:27 | 118,591,623 | 5 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
@Author: tushushu
@Date: 2018-11-21 11:34:06
@Last Modified by: tushushu
@Last Modified time: 2018-11-21 11:34:06
"""
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
n = len(nums)
k = k % n
m = n - k
for _ in range(m):
nums.append(nums.pop(0))
|
UTF-8
|
Python
| false | false | 476 |
py
| 208 |
189. Rotate Array.py
| 207 | 0.523109 | 0.460084 | 0 | 20 | 22.8 | 74 |
rashmee/Python-Projects
| 10,522,669,887,807 |
7b9a2a797dfd39d0803130fd3513ee22547e08e6
|
05f528e203b3e3a18a893a064a440ec123a0c1e6
|
/diceRollingSimulator.py
|
c4b6b09efe752f64f4bd4510a7dbe2ca71ea6822
|
[] |
no_license
|
https://github.com/rashmee/Python-Projects
|
08bc5776e074f444bced0060d94189d5f5846df1
|
05783eabc0bf551c30b1b3ce93eb89ded6941f7d
|
refs/heads/master
| 2021-08-24T13:19:57.273542 | 2017-11-21T07:32:36 | 2017-11-21T07:32:36 | 111,325,967 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Program simulating the dice roll
from random import randint
repeat = True
dice1 = randint(1,6)
dice2 = randint(1,6)
while repeat:
print "You rolled ",dice1, "&",dice2
print "Your total is ",dice1+dice2
if(dice1==dice2):
print "DOUBLES! Great going!!!"
elif(dice1==6 & dice2==6):
print "DOUBLE SIXES!!! Way to go!"
print("Do you want to roll again?")
repeat = ("y" or "yes") in input().lower()
print("Thank you for playing!")
|
UTF-8
|
Python
| false | false | 470 |
py
| 10 |
diceRollingSimulator.py
| 10 | 0.62766 | 0.593617 | 0 | 19 | 23.736842 | 46 |
abhi9321/python_repo
| 7,258,494,758,353 |
9d8ff60d2318eb09bbdedbfb188623b4a54fccf4
|
55a11d5097a4824de3510f6ebf27386d7a3d1446
|
/age_calculator.py
|
f0b76e1cef6cd7be71df23e20ef124ab7fe7b2d6
|
[] |
no_license
|
https://github.com/abhi9321/python_repo
|
6ecc64471eb65988c1bc5b1ed99446835b1cd13f
|
b920c5285fb65d5c81b06612059d625b2bbc27ce
|
refs/heads/master
| 2021-03-25T09:32:39.051212 | 2020-12-11T12:46:41 | 2020-12-11T12:46:41 | 247,604,940 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
birth_year = input("enter your birth year")
age = 2020 - int(birth_year)
print(f"your age is : {age}")
|
UTF-8
|
Python
| false | false | 105 |
py
| 26 |
age_calculator.py
| 25 | 0.657143 | 0.619048 | 0 | 5 | 20 | 43 |
amutebe/AMMS_General
| 7,447,473,301,106 |
e07d81576dc89a171fc9f91372591f6b97b60d29
|
de392462a549be77e5b3372fbd9ea6d7556f0282
|
/itsms_20000/forms.py
|
7c8fad672828b00ca187194e42e33b23e98336d2
|
[] |
no_license
|
https://github.com/amutebe/AMMS_General
|
2830770b276e995eca97e37f50a7c51f482b2405
|
57b9b85ea2bdd272b44c59f222da8202d3173382
|
refs/heads/main
| 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.forms import ModelForm,TextInput,NumberInput,RadioSelect,DateInput,TimeInput
from django.forms.widgets import HiddenInput
from .models import *
from accounts.models import Customer
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from multiselectfield import MultiSelectFormField
class HorizontalRadioSelect(forms.RadioSelect):
template_name = 'horizontal_select.html'
class DateInput(forms.DateInput):
input_type = 'date'
class TimeInput(forms.TimeInput):
input_type = 'time'
class serviceRequest(ModelForm):
class Meta:
model = mod20000_service_request
exclude = ['date_today','planning_flag']
widgets={'record_group':HiddenInput(),'requestor':forms.Textarea(attrs={'rows': 2, 'cols': 40}),'entered_by':HiddenInput(),'date':DateInput(),'time':TimeInput(),'other':forms.Textarea(attrs={'rows': 2, 'cols': 40})}
class serviceRequestPlans(ModelForm):
class Meta:
model = mod20000_service_planning
exclude = ['document','entered_by','date_today','verification','verification_status','verification_failed','qmsstatus','scheduled','completion_date','completedby', 'component_affected','error','solution','report_number']
widgets={'record_group':HiddenInput(),'entered_by':HiddenInput(),'status':forms.HiddenInput,'due':DateInput(),'planning_date':DateInput(),'completion_date':DateInput(), 'description':forms.Textarea(attrs={'rows': 2, 'cols': 40}), 'error':forms.Textarea(attrs={'rows': 2, 'cols': 40}), 'activities':forms.Textarea(attrs={'rows': 2, 'cols': 40}), 'report_number':forms.Textarea(attrs={'rows': 2, 'cols': 40}), 'solution':forms.Textarea(attrs={'rows': 2, 'cols': 40}), 'remark':forms.Textarea(attrs={'rows': 2, 'cols': 40})}
def clean(self):
cleaned_data = super().clean()
start_date = cleaned_data.get("planning_date")
end_date = cleaned_data.get("due")
if end_date is not None and start_date is not None:
if end_date < start_date:
raise forms.ValidationError("When date should be greater than Planning date.")
else:
raise forms.ValidationError("When date and Planning date cannot be empty")
class VerifyServiceRequest(ModelForm):
class Meta:
model = mod20000_service_planning
#fields = '__all__'
fields=['planning_date','due','qmsstatus','scheduled','completion_date','completedby','verification_failed','report_number','error','solution','remark','component_affected','document']
widgets={'report_number':forms.Textarea(attrs={'rows': 1, 'cols': 60}),'error':forms.Textarea(attrs={'rows': 3, 'cols': 60}),'solution':forms.Textarea(attrs={'rows': 3, 'cols': 60}),'remark':forms.Textarea(attrs={'rows': 3, 'cols': 60}),'planning_date':HiddenInput(),'due':HiddenInput(),'completion_date':DateInput(),'scheduled':DateInput(),'verification_failed':forms.Textarea(attrs={'rows': 3, 'cols': 60}),'verification_status':forms.Textarea(attrs={'rows': 3, 'cols': 60})}
def clean(self):
cleaned_data = super().clean()
start_date = cleaned_data.get("planning_date")
end_date = cleaned_data.get("completion_date")
reschedule_date = cleaned_data.get("scheduled")
# print("PRINT",end_date,start_date)
if end_date is not None and start_date is not None:
if end_date < start_date or end_date>date.today() :
raise forms.ValidationError("Completion date shouldn't be less than Planning date or be in Future")
elif reschedule_date is not None and start_date is not None:
if reschedule_date < start_date or reschedule_date < date.today():
raise forms.ValidationError("Reschedule date shouldn't be less than Planning date or today's date")
else:
raise forms.ValidationError("Completion date or Reschedule date cannot be empty")
|
UTF-8
|
Python
| false | false | 4,037 |
py
| 268 |
forms.py
| 178 | 0.66807 | 0.653951 | 0 | 77 | 51.285714 | 529 |
WadeBarnes/aries-cloudagent-python
| 3,204,045,648,319 |
cc4e06dc2775463bd093b773892bb8b367617d36
|
ce1eca0537b7df8444450eb0bf71931bedc9ffb5
|
/aries_cloudagent/config/banner.py
|
0598b6d54aee6eefcf49cbec7d88022f1d662e32
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
https://github.com/WadeBarnes/aries-cloudagent-python
|
f6f1a95213bcbf2809c9d96f053e10b5a2749e17
|
b336e851301d8124917e7b14d6d0866ef526138c
|
refs/heads/main
| 2023-07-24T19:49:47.018389 | 2023-02-15T22:26:53 | 2023-02-15T22:26:53 | 194,136,068 | 2 | 0 |
Apache-2.0
| true | 2023-02-14T15:26:02 | 2019-06-27T17:22:06 | 2022-11-21T18:48:22 | 2023-02-11T13:16:17 | 42,358 | 1 | 0 | 1 |
Python
| false | false |
"""Module to contain logic to generate the banner for ACA-py."""
class Banner:
"""Management class to generate a banner for ACA-py."""
def __init__(self, border: str, length: int):
"""Initialize the banner object.
The ``border`` is a single character to be used, and ``length``
is the desired length of the whole banner, inclusive.
"""
self.border = border
self.length = length
def print_border(self):
"""Print a full line using the border character."""
print(self.border * (self.length + 6))
def print_title(self, title):
"""Print the main title element."""
spacer = " " * (self.length - len(title))
print(self.lr_pad(f"{title}{spacer}"))
def print_spacer(self):
"""Print an empty line with the border character only."""
print(self.lr_pad(" " * self.length))
def print_subtitle(self, title):
"""Print a subtitle for a section."""
title += ":"
spacer = " " * (self.length - len(title))
print(self.lr_pad(f"{title}{spacer}"))
def print_list(self, items):
"""Print a list of items, prepending a dash to each item."""
for item in items:
left_part = f" - {item}"
spacer = " " * (self.length - len(left_part))
print(self.lr_pad(f"{left_part}{spacer}"))
def print_version(self, version):
"""Print the current ``version``."""
version = f"ver: {version}"
spacer = " " * (self.length - len(version))
print(self.lr_pad(f"{spacer}{version}"))
def lr_pad(self, content: str):
"""Pad string content with defined border character.
Args:
content: String content to pad
"""
return f"{self.border}{self.border} {content} {self.border}{self.border}"
|
UTF-8
|
Python
| false | false | 1,845 |
py
| 44 |
banner.py
| 27 | 0.569106 | 0.568564 | 0 | 54 | 33.166667 | 81 |
fermat986/ProyectoUnoIA
| 13,752,485,298,443 |
cfd525e02fe8dfe2243a2d29c9958e7e635520c9
|
e9fbe6f698f45387b59e50708873bb55ea0010ce
|
/Lectura/Ambiente.py
|
96faaff3156715692c1c5858d15c6326bec17498
|
[] |
no_license
|
https://github.com/fermat986/ProyectoUnoIA
|
e3554ca26fa9583d189c9c518080894ead8674a4
|
f6c1bb0876045881e0e5a672ac5dbc0353f65abc
|
refs/heads/master
| 2021-05-15T01:23:51.634735 | 2015-05-06T04:09:31 | 2015-05-06T04:09:31 | 33,286,974 | 0 | 1 | null | false | 2015-05-06T02:49:39 | 2015-04-02T03:20:37 | 2015-05-01T21:57:55 | 2015-05-06T02:49:38 | 290 | 0 | 1 | 1 |
Python
| null | null |
__author__ = 'alvaro'
class Ambiente:
global tamano
global matriz
def __init__(self, ruta):
global tamano
global matriz
archi=open(ruta,'r')
linea = archi.readline()
tamano= int(linea)
matriz = [[0 for x in range(tamano)] for x in range(tamano)]
print linea
for i in range(0, tamano):
linea=archi.readline()
splitLinea = linea.split(' ')
print(splitLinea)
for j in range(0, tamano):
matriz[i][j]= splitLinea[j]
archi.close()
def getPosition(self, x, y):
'''
Metodo para retornar el valor del ambiente en la posicion x , y
x valor de la fila del ambiente
y valor de la columna del ambiente
'''
global tamano
global matriz
if(x<0):
return -1
if(y<0):
return -1
if(x>tamano):
return -1
if (y>tamano):
return -1
else:
return matriz[x][y]
'''
Prueba de lectura de archivo
print 'hola'
miAmb = Ambiente('archivo.txt')
'''
'''
Prueba de getPosition
miAmb = Ambiente('archivo.txt')
print miAmb.getPosition(-1, 1)
print miAmb.getPosition(3,3)
'''
|
UTF-8
|
Python
| false | false | 1,263 |
py
| 10 |
Ambiente.py
| 6 | 0.527316 | 0.517023 | 0 | 59 | 20.355932 | 72 |
etoki/0813_testing
| 19,112,604,502,869 |
13736f3f42291fa00bc2d446d9d65dc4c56ab9ae
|
d70362a2ce5cbd27acd152592064650b25cef917
|
/apps/apps/settings/local.py
|
4bb1e401693d21372a66bf31e0b166b9248394e9
|
[] |
no_license
|
https://github.com/etoki/0813_testing
|
6b069e77373d7499a0b610bfd10fda8c6d93b4c5
|
f02e14ebebaa1ef05844a7d5a5ea2c94d8613a38
|
refs/heads/master
| 2021-01-20T15:44:48.197290 | 2016-08-15T14:15:27 | 2016-08-15T14:15:27 | 65,602,124 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding:utf8
"""
local環境アプリ用設定ファイル
"""
from apps.settings.base import *
DOMAIN = "127.0.0.1:8000"
|
UTF-8
|
Python
| false | false | 124 |
py
| 18 |
local.py
| 12 | 0.69 | 0.58 | 0 | 7 | 13.285714 | 32 |
sunmyung777/studying-database
| 14,989,435,864,512 |
60138be618c8a8c867026dcf7017e623591f3ef4
|
e3c1664bbc23b2cf93199a21b66bea05d830261b
|
/new_flask/manage.py
|
5ef3fcd2c76d95a752ca04c1be0a6442a7abd560
|
[] |
no_license
|
https://github.com/sunmyung777/studying-database
|
683298cfb6cb4c12fcb9dcc693cc8ae78006f047
|
c6db2e297506c1369b402a7d16853d1e36386765
|
refs/heads/master
| 2020-04-17T18:12:39.572558 | 2019-01-21T13:19:18 | 2019-01-21T13:19:18 | 166,816,648 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -- coding: utf-8 --
import os
from flask import Flask,url_for,render_template,request,redirect
import sqlite3 as lite
from werkzeug import secure_filename
app=Flask(__name__)
UPP_FOLDER='static/img'
ALLOW=set(['png','jpg','jpeg','gif','PNG'])
app.config['UPP_FOLDER']=UPP_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOW
@app.route('/')
def main():
return render_template('main.html')
@app.route('/index',methods=['POST'])
def index():
file=request.files['file']
if file and allowed_file(file.filename):
filename=secure_filename(file.filename)
file.save(os.path.join(app.config['UPP_FOLDER'],filename))
database_filename='db/hello.db'
conn = lite.connect(database_filename)
cs = conn.cursor()
url='../static/img/'+str(filename)
cs.execute('INSERT INTO img VALUES (?,?);',(filename,url))
conn.commit()
order='SELECT img FROM img WHERE name=="'+filename+'";'
log_name=cs.execute(order).fetchall()
a=log_name[0][0]
log_name=0
cs.close()
conn.close()
return render_template('hello.html',img=a)
else:
return redirect(url_for('main'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('error.html',e=e), 404
@app.errorhandler(500)
def page_not_found(e):
return render_template('error.html',e=e), 500
if __name__ == '__main__':
app.run(debug=True)
|
UTF-8
|
Python
| false | false | 1,420 |
py
| 5 |
manage.py
| 1 | 0.659859 | 0.646479 | 0 | 53 | 24.792453 | 64 |
Rombituon-Resource/project_controls
| 14,499,809,624,054 |
96ecc318e5fbda2f96d3d4f8c3dc49aff0f335c1
|
3a6f855961c3c433126f0dd804d4049d64b43dbf
|
/project_controls/project_controls/custom.py
|
8ace8272546b738af618b37c56fccd236429b6b3
|
[
"MIT"
] |
permissive
|
https://github.com/Rombituon-Resource/project_controls
|
1eeea1817083265cf664954f0b7268edd922f905
|
c6e9f464c2e0ae67318d8adde6ab058058532250
|
refs/heads/master
| 2023-05-12T07:22:45.591936 | 2021-05-12T13:25:49 | 2021-05-12T13:25:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, PibiCo and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_commit(project):
""" Get from database all committments amounts from not cancelled Purchase Orders
on specific Project
"""
data = frappe.db.sql("""
SELECT item_code, item_name, parent, parenttype, docstatus, sum(qty) as Qty, sum(net_amount) as Amount FROM `tabPurchase Order Item` WHERE project=%s and docstatus<2 GROUP by item_code""", project, True)
return data
@frappe.whitelist()
def get_time(project):
""" Get from database all hours from not cancelled Timesheets on specific Project """
data = frappe.db.sql("""
SELECT name, docstatus, parent, parenttype, sum(hours) as Hours, project, task, activity_type, item_code, sum(costing_amount) as Costs from `tabTimesheet Detail` WHERE project=%s and docstatus<2 GROUP by item_code""", project, True)
return data
@frappe.whitelist()
def get_assignment(employee):
""" Get from database all non closed assignments to an employee """
data = frappe.db.sql("""SELECT * from `tabAssignment Detail` WHERE parent=%s and docstatus<2 and to_time is NULL""", employee, True)
return data
@frappe.whitelist()
def get_timelogs(timesheet):
""" Get from database all time logs from draft Timesheet """
data = frappe.db.sql("""SELECT * from `tabTimesheet Detail` WHERE parent=%s and docstatus<2""", timesheet, True)
return data
@frappe.whitelist()
def get_emplid(user):
""" Get from database employee id from user logged in """
data = frappe.db.sql("""SELECT * from `tabEmployee` WHERE user_id=%s""", user, True)
return data
from six import BytesIO
from docxtpl import DocxTemplate
def _fill_template(template, data):
"""
Fill a word template with data.
Makes use of BytesIO to write the resulting file to memory instead of disk.
:param template: path to docx file or file-like object
:param data: dict with keys and values
"""
doc = DocxTemplate(template)
doc.render(data)
_file = BytesIO()
doc.docx.save(_file)
return _file
@frappe.whitelist()
def fill_and_attach_template(doctype, name, template):
"""
Use a documents data to fill a docx template and attach the result.
Reads a document and a template file, fills the template with data from the
document and attaches the resulting file to the document.
:param doctype" data doctype
:param name" data name
:param template" name of the template file
"""
data = frappe.get_doc(doctype, name)
data_dict = data.as_dict()
template_doc = frappe.get_doc("File", template)
#frappe.msgprint(template_doc.get_full_path())
template_path = template_doc.get_full_path()
output_file = _fill_template(template_path, data_dict)
output_doc = frappe.get_doc({
"doctype": "File",
"file_name": "-".join([name, template_doc.file_name]),
"attached_to_doctype": doctype,
"attached_to_name": name,
"content": output_file.getvalue(),
})
output_doc.save()
|
UTF-8
|
Python
| false | false | 3,153 |
py
| 27 |
custom.py
| 6 | 0.689185 | 0.68633 | 0 | 88 | 34.840909 | 237 |
sljiaa/grace
| 16,295,105,952,864 |
319d31717309e71c0ecec391231994fbb885ec2e
|
574f22ac93441d390f9444ddbb71b32566be607a
|
/src/t2g.py
|
3d9d928addaafcf835c5c395708a987adb2a4e04
|
[] |
no_license
|
https://github.com/sljiaa/grace
|
3ad70447324d66766098fa5aaea0aef0925608f1
|
14eaff4d7f0a9f4ab1a907be60be5c5040baea6d
|
refs/heads/master
| 2022-03-30T10:01:59.347089 | 2020-01-16T22:47:42 | 2020-01-16T22:47:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# Copyright (C) 2020, Daniel S. Fava. All Rights Reserved.
import os
import re
import sys
import traceback
import grace
DEBUG = True
VERBOSE = False
TSAN_INVALID_TID = '8129'
dbgCtx = {}
class StateMachine:
# Does not change
sm = { # State machine
# (state, input) : [next state, lambda symbolic_state, symbolic_input : next_symbolic_state]
("init", "") : ["init", lambda ss, sinp : None],
("init", "chansend") : ["send", lambda ss, sinp : sinp],
("send", "acquire") : ["send_acq", lambda ss, sinp :
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['tid']!=sinp['tid']
else
(_ for _ in ()).throw(AssertionError("addr less than base: %s %s" % (ss,sinp))) if int(sinp['addr'],16)<int(ss['base'],16)
else
(_ for _ in ()).throw(AssertionError("addr less than base: %s %s" % (ss,sinp))) if int(sinp['addr'],16)>(int(ss['base'],16)+int(ss['size'],16))
else sinp],
("send_acq", "release") : ["init", lambda ss, sinp :
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['tid']!=sinp['tid']
else
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['addr']!=sinp['addr']
else None],
("init", "acquire") : ["recv", lambda ss, sinp : sinp],
("recv", "release") : ["init", lambda ss, sinp :
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['tid']!=sinp['tid']
else
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['addr']!=sinp['addr']
else None],
("init", "release_merge") : ["merge", lambda ss, sinp : sinp],
("merge", "acquire") : ["init", lambda ss, sinp :
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['tid']!=sinp['tid']
else
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['addr']!=sinp['addr']
else None],
("init", "closechan") : ["close", lambda ss, sinp : sinp],
("close", "release") : ["init", lambda ss, sinp :
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['tid']!=sinp['tid']
else
(_ for _ in ()).throw(AssertionError("%s != %s" % (ss,sinp))) if ss['addr']!=sinp['addr']
else None],
}
@classmethod
def check_invariants(cls, initial):
states = set()
transitions = set()
for (s,t) in cls.sm.keys():
states.add(s)
transitions.add(t)
assert(states.intersection(transitions)==set())
assert(initial in states)
return (states, transitions)
@classmethod
def run(cls, initial, ss, inputs, debug=DEBUG):
'''Run the state machine'''
cls.check_invariants(initial)
curr = initial
for (inp,sinp) in inputs:
next_state = cls.sm[(curr, inp)][0]
try:
ss = cls.sm[(curr, inp)][1](ss, sinp)
except AssertionError as e:
if debug:
traceback.print_exc()
print(dbgCtx)
sys.exit(1)
else:
raise e
curr = next_state
return (curr, ss)
def parse(fhandle):
potential_chans = {}
gr = grace.Grace()
gr.initProc('0')
# State machine
curr = {} #"init"
ss = {} #= None
# Stack
stack = {}
s_go = re.compile('__tsan_go_start,.*,tid=(.*),.*,tid=(.*),.*')
s_go_end = re.compile('__tsan_go_end,.*,tid=(.*)')
s_read = re.compile('__tsan_read,.*,tid=(.*),(.*),.*')
s_write = re.compile('__tsan_write,.*,tid=(.*),(.*),.*')
s_acquire = re.compile('__tsan_acquire,.*,tid=(.*),(.*)')
s_release = re.compile('__tsan_release,.*,tid=(.*),(.*)')
s_release_merge = re.compile('__tsan_release_merge,.*,tid=(.*),(.*)')
s_chansend = re.compile('__tsan_read_pc,.*,tid=(.*),(.*),.*,.*,chansend')
s_closechan = re.compile('__tsan_write_pc,.*,tid=(.*),(.*),.*,.*,closechan')
s_malloc = re.compile('__tsan_malloc,.*,tid=.*,.*,(.*),(.*)')
s_func_enter = re.compile('__tsan_func_enter,.*,tid=(.*),.*,(.*)')
s_func_exit = re.compile('__tsan_func_exit,.*,tid=(.*)')
idx = 0
for line in fhandle:
idx += 1
dbgCtx['idx'] = idx
dbgCtx['line'] = line
if DEBUG and VERBOSE:
print(line.strip())
print(curr)
print(ss)
print()
r = s_func_exit.match(line)
if r:
tid = r.group(1)
func = stack[tid].pop()
print("func_exit ", func, tid)
continue
r = s_func_enter.match(line)
if r:
tid = r.group(1)
func = r.group(2)
if tid not in stack.keys():
stack[tid] = []
stack[tid].append(func)
print("func_enter ", func, tid)
continue
# Filter out trace from within sync
if line.startswith('__tsan_'):
assert(line.split(',')[2][:4] == 'tid=')
tid = line.split(',')[2][4:]
if tid in stack.keys() and stack[tid] != []:
top_of_stack = stack[tid][-1]
if top_of_stack.startswith('sync') or \
top_of_stack.startswith('syscall') or \
top_of_stack.startswith('fmt'):
#print("Skipping sync, tid=%s" % tid)
continue
r = s_malloc.match(line)
if r:
potential_chans["0x%x" % (int(r.group(1),16))] = "0x%x" % (int(r.group(2), 16))
continue
r = s_go.match(line)
if r:
gr.go(r.group(1),r.group(2))
continue
r = s_read.match(line)
if r:
gr.read(r.group(1),"0x%x" % (int(r.group(2),16)))
continue
r = s_write.match(line)
if r:
gr.write(r.group(1),"0x%x" % (int(r.group(2),16)))
continue
r = s_closechan.match(line)
if r:
tid = r.group(1)
addr = "0x%x" % (int(r.group(2),16))
if tid not in curr.keys():
curr[tid] = "init"
ss[tid] = None
(curr[tid], ss[tid]) = StateMachine.run(curr[tid], ss[tid], [("closechan", {'tid': tid, 'addr': addr})])
continue
r = s_chansend.match(line)
if r:
tid = r.group(1)
addr = "0x%x" % (int(r.group(2),16))
# Sometimes we need to take an offset into account, sometimes we don't need to.
# It depends on `race.go` and how `c.buf` is set in `makechan().`
base = addr if addr in potential_chans else "0x%x" % (int(addr, 16) - 0x10)
try:
assert(base in potential_chans)
except AssertionError as e:
if DEBUG:
print('addr %s' % addr)
print('base %s' % base)
print('potential_chans ', potential_chans)
base = "0x%x" % (int(addr, 16) - 0x10)
print(base in potential_chans)
raise e
size = potential_chans[base]
if tid not in curr.keys():
curr[tid] = "init"
ss[tid] = None
(curr[tid], ss[tid]) = StateMachine.run(curr[tid], ss[tid], [("chansend", {'tid': tid, 'base': base, 'size': size})])
continue
r = s_acquire.match(line)
if r:
tid = r.group(1)
addr = "0x%x" % (int(r.group(2),16))
if tid not in curr.keys():
curr[tid] = "init"
ss[tid] = None
(curr[tid], ss[tid]) = StateMachine.run(curr[tid], ss[tid], [("acquire", {'tid': tid, 'addr': addr})])
continue
r = s_release.match(line)
if r:
tid = r.group(1)
addr = "0x%x" % (int(r.group(2),16))
tmp = curr[tid]
(curr[tid], ss[tid]) = StateMachine.run(curr[tid], ss[tid], [("release", {'tid': tid, 'addr': addr})])
if tmp == "send_acq":
gr.send(tid, addr)
elif tmp == "recv":
dbgCtx['addr'] = addr
dbgCtx['grace.chans.keys()'] = gr.chans.keys()
dbgCtx['tid'] = tid
dbgCtx['stack[%s]' % tid] = stack[tid] if tid in stack.keys() else None
gr.recv(tid, addr)
elif tmp == "close":
gr.close(tid, addr)
else:
assert(0)
del(tmp)
continue
r = s_release_merge.match(line)
if r:
tid = r.group(1)
addr = "0x%x" % (int(r.group(2),16))
if tid not in curr.keys():
curr[tid] = "init"
ss[tid] = None
(curr[tid], ss[tid]) = StateMachine.run(curr[tid], ss, [("release_merge", {'tid': tid, 'addr': addr})])
continue
r = s_go_end.match(line)
if r:
tid = r.group(1)
# What does it mean, from the point of view of grace.py (and from the paper)
# for a thread-id to be reused?
# Perhaps its best to give it a fake new name?
print('WARNING: go_end %s' % tid)
# I'm letting execution continue since, if a new goroutine is created with
# the same name as the one that ended, grace will assert and error out.
#assert(0)
print()
gr.printReport(print_chans=False)
gr.gc(verbose=True)
print()
gr.printReport(print_chans=False, print_vars=False)
print()
#gr.gc(verbose=False)
#gr.printReport(print_chans=False, print_vars=False, fmt="dot")
def main(argv):
if len(argv) != 2:
print("%s %s" % (os.path.basename(__file__), "<TRACE>"))
sys.exit(1)
fhandle = open(argv[1], 'r')
try:
parse(fhandle)
except (AssertionError, KeyError) as e:
traceback.print_exc()
print(dbgCtx)
sys.exit(1)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
UTF-8
|
Python
| false | false | 9,069 |
py
| 7 |
t2g.py
| 5 | 0.534127 | 0.523431 | 0 | 277 | 31.740072 | 149 |
Aasthaengg/IBMdataset
| 10,668,698,810,052 |
8b9369a5f585c80960f2d4a0ae87cb523b3a35df
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03400/s475812853.py
|
349fe335e2e6ecf16ace90b457aef158d7f9331c
|
[] |
no_license
|
https://github.com/Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n = int(input())
d,x = map(int, input().split())
a = [int(input()) for i in range(n)]
a_eat = []
for i in range(n):
y = 1
j = 1
while j+a[i] <= d:
y += 1
j += a[i]
a_eat.append(y)
print(sum(a_eat)+x)
|
UTF-8
|
Python
| false | false | 232 |
py
| 202,060 |
s475812853.py
| 202,055 | 0.456897 | 0.443966 | 0 | 13 | 16.923077 | 36 |
wellingtonlope/treinamento-programacao
| 11,897,059,447,873 |
2c1be8a6b8ced014b9a37405a3e94f22f83b22b9
|
c14f6bdcf226262e7872902b17e17376b74e619e
|
/uri-online-judge/python3/iniciante/1094.py
|
7b587317a94489ea1f2caae22ab09a4c9334d5b6
|
[] |
no_license
|
https://github.com/wellingtonlope/treinamento-programacao
|
ce672e05dfaeecbc965bc6c63f30c0149282a0a5
|
56c317ef0581b47621864a97a758d4b1208e9bca
|
refs/heads/master
| 2020-03-26T05:44:02.697167 | 2019-10-14T16:30:15 | 2019-10-14T16:30:15 | 144,571,913 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
coelhos = 0
ratos = 0
sapos = 0
for i in range(int(input())):
quantidade, tipo = input().split()
coelhos += int(quantidade) if tipo == 'C' else 0
ratos += int(quantidade) if tipo == 'R' else 0
sapos += int(quantidade) if tipo == 'S' else 0
total = coelhos + ratos + sapos
print('Total: %i cobaias' % total)
print('Total de coelhos: %i' % coelhos)
print('Total de ratos: %i' % ratos)
print('Total de sapos: %i' % sapos)
print('Percentual de coelhos: %.2f %%' % ((coelhos * 100) / total))
print('Percentual de ratos: %.2f %%' % ((ratos * 100) / total))
print('Percentual de sapos: %.2f %%' % ((sapos * 100) / total))
|
UTF-8
|
Python
| false | false | 658 |
py
| 85 |
1094.py
| 80 | 0.604863 | 0.575988 | 0 | 20 | 31.9 | 67 |
junyi1997/kl-520-test
| 6,047,313,986,375 |
f197e206301008cd32d95efc243bf426130724c5
|
d9e996d7f2abd7172a63a4cf4ec8dfe51777beae
|
/python_wrapper/kdp_wrapper.py
|
e35458915e3224190e4ae979f72950798aa601cf
|
[] |
no_license
|
https://github.com/junyi1997/kl-520-test
|
1da4427d526e9e6ec25882324754251dd95698d4
|
e0c7e45bb5760da80ff146c754bd036422697828
|
refs/heads/master
| 2023-08-04T17:41:51.437742 | 2021-09-27T13:21:10 | 2021-09-27T13:21:10 | 410,889,201 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
This is KDP wrapper.
"""
from __future__ import absolute_import
import ctypes
import math
import sys
from time import sleep
import cv2
import numpy as np
from common import constants
import kdp_host_api as api
#from keras.applications.mobilenet_v2 import MobileNetV2
#from keras.applications.mobilenet_v2 import preprocess_input, decode_predictions
HOST_LIB_DIR = ""
TEST_DIR = "".join([HOST_LIB_DIR, "../test_images"])
TEST_DME_MOBILENET_DIR = "".join([TEST_DIR, "/dme_mobilenet/"])
DME_MODEL_FILE = "".join([TEST_DME_MOBILENET_DIR, "all_models.bin"])
DME_FW_SETUP = "".join([TEST_DME_MOBILENET_DIR, "fw_info.bin"])
TEST_DME_SSD_FD_DIR = "".join([TEST_DIR, "/dme_ssd_fd/"])
DME_SSD_FD_MODEL_FILE = "".join([TEST_DME_SSD_FD_DIR, "all_models.bin"])
DME_SSD_FD_FW_SETUP = "".join([TEST_DME_SSD_FD_DIR, "fw_info.bin"])
TEST_DME_YOLO_224_DIR = "".join([TEST_DIR, "/dme_yolo_224/"])
DME_YOLO_224_MODEL_FILE = "".join([TEST_DME_YOLO_224_DIR, "all_models.bin"])
DME_YOLO_224_FW_SETUP = "".join([TEST_DME_YOLO_224_DIR, "fw_info.bin"])
# IMG_SOURCE_W = 320
# IMG_SOURCE_H = 240
IMG_SOURCE_W = 480
IMG_SOURCE_H = 640
DME_IMG_SIZE = IMG_SOURCE_W * IMG_SOURCE_H * 2
DME_MODEL_SIZE = 20 * 1024 * 1024
DME_FWINFO_SIZE = 512
DME_SEND_IMG_RETRY_TIME = 2
SLEEP_TIME = 0.001
ISI_IMG_SIZE = IMG_SOURCE_W * IMG_SOURCE_H * 2
def pad_up_16(value):
"""Aligns value argument to 16"""
return math.ceil(value / 16) * 16
####################################SFID####################################
FDR_IMG_SIZE = (IMG_SOURCE_W * IMG_SOURCE_H * 2)
FDR_THRESH = 0.475
IMG_FORMAT_RGB565 = constants.IMAGE_FORMAT_SUB128 | constants.NPU_FORMAT_RGB565
img_idx = 0
def softmax(logits):
"""
softmax for logits like [[[x1,x2], [y1,y2], [z1,z2], ...]]
minimum and maximum here work as preventing overflow
"""
clas = np.exp(np.minimum(logits, 22.))
clas = clas / np.maximum(np.sum(clas, axis=-1, keepdims=True), 1e-10)
return clas
def get_object_detection_res(dev_idx, inf_size, frames):
"""Gets detection results."""
det_res = []
inf_res = (ctypes.c_char * inf_size)()
api.kdp_dme_retrieve_res(dev_idx, 0, inf_size, inf_res)
od_header_res = ctypes.cast(
ctypes.byref(inf_res), ctypes.POINTER(constants.ObjectDetectionRes)).contents
box_count = od_header_res.box_count
#det_res.append(od_header_res.class_count)
#det_res.append(od_header_res.box_count)
#print("image -> {} object(s)\n".format(box_count))
r_size = 4
if r_size >= 4:
header_result = ctypes.cast(
ctypes.byref(inf_res), ctypes.POINTER(constants.ObjectDetectionRes)).contents
box_result = ctypes.cast(
ctypes.byref(header_result.boxes),
ctypes.POINTER(constants.BoundingBox * header_result.box_count)).contents
for box in box_result:
x1 = int(box.x1)
y1 = int(box.y1)
x2 = int(box.x2)
y2 = int(box.y2)
score = float(box.score)
class_num = int(box.class_num)
res = [x1, y1, x2, y2, class_num, score]
det_res.append(res)
return det_res
#return np.asarray(det_res)
def get_landmark_res(dev_idx, inf_size, frames):
"""Gets landmark results."""
inf_res = (ctypes.c_char * inf_size)()
api.kdp_dme_retrieve_res(dev_idx, 0, inf_size, inf_res)
lm_res = ctypes.cast(
ctypes.byref(inf_res), ctypes.POINTER(constants.LandmakrResult)).contents
score = lm_res.score
blur = lm_res.blur
print(score, blur)
return lm_res
def get_age_gender_res(dev_idx, inf_size):
#inf_res = (ctypes.c_char * inf_size)()
inf_res = constants.FDAgeGenderRes()
api.kdp_dme_retrieve_res(dev_idx, 0, inf_size, ctypes.cast(ctypes.byref(inf_res), ctypes.c_char_p))
det_res = []
FACE_SCORE_THRESHOLD = 0.8
if inf_res.fd_res.score > FACE_SCORE_THRESHOLD:
# print("[INFO] FACE DETECT (x1, y1, x2, y2, score) = {}, {}, {}, {}, {}\n".format(
# inf_res.fd_res.x1, inf_res.fd_res.y1, inf_res.fd_res.x2, inf_res.fd_res.y2,
# inf_res.fd_res.score))
if not inf_res.ag_res.age and not inf_res.ag_res.ismale:
#print("[INFO] FACE TOO SMALL\n")
res = [int(inf_res.fd_res.x1), int(inf_res.fd_res.y1), int(inf_res.fd_res.x2), int(inf_res.fd_res.y2),
float(inf_res.fd_res.score), 0, 3] # age:0 gender:3
else:
#gender = "Male" if inf_res.ag_res.ismale else "Female"
# print("[INFO] AGE_GENDER (Age, Gender) = {}, {}\n".format(
# inf_res.ag_res.age, gender))
res = [int(inf_res.fd_res.x1), int(inf_res.fd_res.y1), int(inf_res.fd_res.x2), int(inf_res.fd_res.y2),
float(inf_res.fd_res.score), int(inf_res.ag_res.age), int(inf_res.ag_res.ismale) ] # male:1 female:2
det_res.append(res)
# else:
# print("[INFO] NO FACE OR FACE SCORE TOO LOW!!!\n")
return det_res
def get_detection_res(dev_idx, inf_size):
"""Gets detection results."""
inf_res = (ctypes.c_char * inf_size)()
# Get the data for all output nodes: TOTAL_OUT_NUMBER + (H/C/W/RADIX/SCALE) +
# (H/C/W/RADIX/SCALE) + ... + FP_DATA + FP_DATA + ...
api.kdp_dme_retrieve_res(dev_idx, 0, inf_size, inf_res)
# Prepare for postprocessing
listdata = [ord(byte) for byte in inf_res]
npdata = np.asarray(listdata)
fp_header_res = ctypes.cast(
ctypes.byref(inf_res), ctypes.POINTER(constants.RawFixpointData)).contents
output_num = fp_header_res.output_num
outnode_params_res = ctypes.cast(
ctypes.byref(fp_header_res.out_node_params),
ctypes.POINTER(constants.OutputNodeParams * output_num)).contents
height = 0
channel = 0
width = 0
radix = 0
scale = 0.0
npraw_data_array = []
data_offset = 0
for param in outnode_params_res:
height = param.height
channel = param.channel
width = param.width
radix = param.radix
scale = param.scale
# print(output_num, height, channel, width, pad_up_16(width), radix, scale)
# offset in bytes for TOTAL_OUT_NUMBER + (H/C/W/RADIX/SCALE) + (H/C/W/RADIX/SCALE)
offset = ctypes.sizeof(ctypes.c_int) + output_num * ctypes.sizeof(constants.OutputNodeParams)
# print("offset ", offset, ctypes.sizeof(c_int), ctypes.sizeof(OutputNodeParams))
# get the fixed-point data
npdata = npdata.astype("int8")
raw_data = []
raw_data = npdata[offset + data_offset:offset + data_offset + height*channel*pad_up_16(width)]
data_offset += height*channel*pad_up_16(width)
# print(raw_data.shape, offset, offset + height*channel*pad_up_16(width), height*channel*pad_up_16(width))
raw_data = raw_data.reshape(height, channel, pad_up_16(width))
raw_data = raw_data[:,:,:width]
# save the fp data into numpy array and convert to float
npraw_data = np.array(raw_data)
npraw_data = npraw_data.transpose(0, 2, 1) / (2 ** radix) / scale
npraw_data_array.append(npraw_data)
return npraw_data_array
def capture_frame(image):
if isinstance(image, str):
print(image)
frame = cv2.imread(image)
if isinstance(image, np.ndarray):
frame = image
frame = cv2.resize(frame, (IMG_SOURCE_W, IMG_SOURCE_H), interpolation=cv2.INTER_CUBIC)
#cv2.imshow('', frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGR565)
frame_data = frame.reshape(DME_IMG_SIZE)
buf_len = DME_IMG_SIZE
c_char_p = ctypes.POINTER(ctypes.c_char)
frame_data = frame_data.astype(np.uint8)
data_p = frame_data.ctypes.data_as(c_char_p)
return data_p, buf_len
def kdp_dme_load_model(dev_idx, _model_path):
"""Load dme model."""
model_id = 0
data = (ctypes.c_char * DME_FWINFO_SIZE)()
p_buf = (ctypes.c_char * DME_MODEL_SIZE)()
ret_size = 0
# read firmware setup data
print("loading models to Kneron Device: ")
n_len = api.read_file_to_buf(data, DME_FW_SETUP, DME_FWINFO_SIZE)
if n_len <= 0:
print("reading fw setup file failed: {}...\n".format(n_len))
return -1
dat_size = n_len
n_len = api.read_file_to_buf(p_buf, DME_MODEL_FILE, DME_MODEL_SIZE)
if n_len <= 0:
print("reading model file failed: {}...\n".format(n_len))
return -1
buf_len = n_len
model_size = n_len
print("starting DME mode ...\n")
ret, ret_size = api.kdp_start_dme(
dev_idx, model_size, data, dat_size, ret_size, p_buf, buf_len)
if ret:
print("could not set to DME mode:{}..\n".format(ret_size))
return -1
print("DME mode succeeded...\n")
print("Model loading successful")
sleep(SLEEP_TIME)
# dme configuration
model_id = 1000 # model id when compiling in toolchain
output_num = 1 # number of output node for the model
image_col = 640
image_row = 480
image_ch = 3
image_format = (constants.IMAGE_FORMAT_SUB128 |
constants.NPU_FORMAT_RGB565 |
constants.IMAGE_FORMAT_RAW_OUTPUT |
constants.IMAGE_FORMAT_CHANGE_ASPECT_RATIO)
c_int_p = ctypes.POINTER(ctypes.c_int)
crop_box = np.array([0, 0, 0, 0]) # for future use
crop_box = crop_box.astype(np.int32)
crop_box = crop_box.ctypes.data_as(c_int_p)
pad_value = np.array([0, 0, 0, 0]) # for future use
pad_value = pad_value.astype(np.int32)
pad_value = pad_value.ctypes.data_as(c_int_p)
c_float_p = ctypes.POINTER(ctypes.c_float)
extra_param = np.array([0.0])
extra_param = extra_param.astype(np.float32)
extra_param = extra_param.ctypes.data_as(c_float_p)
dme_cfg = constants.KDPDMEConfig(model_id, output_num, image_col,
image_row, image_ch, image_format, crop_box, pad_value, extra_param)
dat_size = ctypes.sizeof(dme_cfg)
print("starting DME configure ...\n")
ret, model_id = api.kdp_dme_configure(
dev_idx, ctypes.cast(ctypes.byref(dme_cfg), ctypes.c_char_p), dat_size, model_id)
if ret:
print("could not set to DME configure mode..\n")
return -1
print("DME configure model [{}] succeeded...\n".format(model_id))
sleep(SLEEP_TIME)
return 0
def kdp_dme_load_ssd_model(dev_idx, _model_path, is_raw_output):
"""Load dme model."""
model_id = 0
data = (ctypes.c_char * DME_FWINFO_SIZE)()
p_buf = (ctypes.c_char * DME_MODEL_SIZE)()
ret_size = 0
# read firmware setup data
print("loading models to Kneron Device: ")
n_len = api.read_file_to_buf(data, DME_SSD_FD_FW_SETUP, DME_FWINFO_SIZE)
if n_len <= 0:
print("reading fw setup file failed: {}...\n".format(n_len))
return -1
dat_size = n_len
n_len = api.read_file_to_buf(p_buf, DME_SSD_FD_MODEL_FILE, DME_MODEL_SIZE)
if n_len <= 0:
print("reading model file failed: {}...\n".format(n_len))
return -1
buf_len = n_len
model_size = n_len
print("starting DME mode ...\n")
ret, ret_size = api.kdp_start_dme(
dev_idx, model_size, data, dat_size, ret_size, p_buf, buf_len)
if ret:
print("could not set to DME mode:{}..\n".format(ret_size))
return -1
print("DME mode succeeded...\n")
print("Model loading successful")
sleep(SLEEP_TIME)
# dme configuration
model_id = 3 # model id when compiling in toolchain
output_num = 8 # number of output node for the model
image_col = 320
image_row = 240
image_ch = 3
image_format = (constants.IMAGE_FORMAT_SUB128 |
constants.NPU_FORMAT_RGB565)
if is_raw_output:
image_format = image_format | constants.IMAGE_FORMAT_RAW_OUTPUT
c_int_p = ctypes.POINTER(ctypes.c_int)
crop_box = np.array([0, 0, 0, 0]) # for future use
crop_box = crop_box.astype(np.int32)
crop_box = crop_box.ctypes.data_as(c_int_p)
pad_value = np.array([0, 0, 0, 0]) # for future use
pad_value = pad_value.astype(np.int32)
pad_value = pad_value.ctypes.data_as(c_int_p)
c_float_p = ctypes.POINTER(ctypes.c_float)
extra_param = np.array([0.5])
extra_param = extra_param.astype(np.float32)
extra_param = extra_param.ctypes.data_as(c_float_p)
dme_cfg = constants.KDPDMEConfig(model_id, output_num, image_col,
image_row, image_ch, image_format, crop_box, pad_value, extra_param)
dat_size = ctypes.sizeof(dme_cfg)
print("starting DME configure ...\n")
ret, model_id = api.kdp_dme_configure(
dev_idx, ctypes.cast(ctypes.byref(dme_cfg), ctypes.c_char_p), dat_size, model_id)
if ret:
print("could not set to DME configure mode..\n", model_id)
return -1
print("DME configure model [{}] succeeded...\n".format(model_id))
sleep(SLEEP_TIME)
return 0
def kdp_dme_load_yolo_model(dev_idx, _model_path):
"""Load dme model."""
model_id = 0
data = (ctypes.c_char * DME_FWINFO_SIZE)()
p_buf = (ctypes.c_char * DME_MODEL_SIZE)()
ret_size = 0
# read firmware setup data
print("loading models to Kneron Device: ")
n_len = api.read_file_to_buf(data, DME_YOLO_224_FW_SETUP, DME_FWINFO_SIZE)
if n_len <= 0:
print("reading fw setup file failed: {}...\n".format(n_len))
return -1
dat_size = n_len
n_len = api.read_file_to_buf(p_buf, DME_YOLO_224_MODEL_FILE, DME_MODEL_SIZE)
if n_len <= 0:
print("reading model file failed: {}...\n".format(n_len))
return -1
buf_len = n_len
model_size = n_len
print("starting DME mode ...\n")
ret, ret_size = api.kdp_start_dme(
dev_idx, model_size, data, dat_size, ret_size, p_buf, buf_len)
if ret:
print("could not set to DME mode:{}..\n".format(ret_size))
return -1
print("DME mode succeeded...\n")
print("Model loading successful")
sleep(SLEEP_TIME)
# dme configuration
model_id = 19 # model id when compiling in toolchain
output_num = 2 # number of output node for the model
image_col = 640
image_row = 480
image_ch = 3
image_format = (constants.IMAGE_FORMAT_SUB128 |
constants.NPU_FORMAT_RGB565 |
constants.IMAGE_FORMAT_RAW_OUTPUT)
c_int_p = ctypes.POINTER(ctypes.c_int)
crop_box = np.array([0, 0, 0, 0]) # for future use
crop_box = crop_box.astype(np.int32)
crop_box = crop_box.ctypes.data_as(c_int_p)
pad_value = np.array([0, 0, 0, 0]) # for future use
pad_value = pad_value.astype(np.int32)
pad_value = pad_value.ctypes.data_as(c_int_p)
c_float_p = ctypes.POINTER(ctypes.c_float)
extra_param = np.array([0.0])
extra_param = extra_param.astype(np.float32)
extra_param = extra_param.ctypes.data_as(c_float_p)
dme_cfg = constants.KDPDMEConfig(model_id, output_num, image_col,
image_row, image_ch, image_format, crop_box, pad_value, extra_param)
dat_size = ctypes.sizeof(dme_cfg)
print("starting DME configure ...\n")
ret, model_id = api.kdp_dme_configure(
dev_idx, ctypes.cast(ctypes.byref(dme_cfg), ctypes.c_char_p), dat_size, model_id)
if ret:
print("could not set to DME configure mode..\n", model_id)
return -1
print("DME configure model [{}] succeeded...\n".format(model_id))
sleep(SLEEP_TIME)
return 0
def kdp_dme_load_age_gender_model(dev_idx, _model_path):
"""Load dme model."""
model_id = 0
data = (ctypes.c_char * DME_FWINFO_SIZE)()
p_buf = (ctypes.c_char * DME_MODEL_SIZE)()
ret_size = 0
model_file = _model_path+"all_models.bin"
fw_setup = _model_path+"fw_info.bin"
# read firmware setup data
print("loading models to Kneron Device: ")
n_len = api.read_file_to_buf(data, fw_setup, DME_FWINFO_SIZE)
if n_len <= 0:
print("reading fw setup file failed: {}...\n".format(n_len))
return -1
dat_size = n_len
n_len = api.read_file_to_buf(p_buf, model_file, DME_MODEL_SIZE)
if n_len <= 0:
print("reading model file failed: {}...\n".format(n_len))
return -1
buf_len = n_len
model_size = n_len
print("starting DME mode ...\n")
ret, ret_size = api.kdp_start_dme(
dev_idx, model_size, data, dat_size, ret_size, p_buf, buf_len)
if ret:
print("could not set to DME mode:{}..\n".format(ret_size))
return -1
print("DME mode succeeded...\n")
print("Model loading successful")
sleep(SLEEP_TIME)
# dme configuration
model_id = 3 # model id when compiling in toolchain
output_num = 1 # number of output node for the model
image_col = 640
image_row = 480
image_ch = 3
image_format = (constants.IMAGE_FORMAT_MODEL_AGE_GENDER |
constants.IMAGE_FORMAT_SUB128 |
constants.NPU_FORMAT_RGB565)
c_int_p = ctypes.POINTER(ctypes.c_int)
crop_box = np.array([0, 0, 0, 0]) # for future use
crop_box = crop_box.astype(np.int32)
crop_box = crop_box.ctypes.data_as(c_int_p)
pad_value = np.array([0, 0, 0, 0]) # for future use
pad_value = pad_value.astype(np.int32)
pad_value = pad_value.ctypes.data_as(c_int_p)
c_float_p = ctypes.POINTER(ctypes.c_float)
extra_param = np.array([0.0])
extra_param = extra_param.astype(np.float32)
extra_param = extra_param.ctypes.data_as(c_float_p)
dme_cfg = constants.KDPDMEConfig(model_id, output_num, image_col,
image_row, image_ch, image_format, crop_box, pad_value, extra_param)
dat_size = ctypes.sizeof(dme_cfg)
print("starting DME configure ...\n")
ret, model_id = api.kdp_dme_configure(
dev_idx, ctypes.cast(ctypes.byref(dme_cfg), ctypes.c_char_p), dat_size, model_id)
if ret:
print("could not set to DME configure mode..\n", model_id)
return -1
print("DME configure model [{}] succeeded...\n".format(model_id))
sleep(SLEEP_TIME)
return 0
def sync_inference(device_index, app_id, input_size, capture,
img_id_tx, frames, post_handler):
"""Send the rest of images and get the results.
Arguments:
device_index: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
input_size: Size of input image.
ret_size: Return size.
capture: Active cv2 video capture instance.
img_id_tx: Should be returned from fill_buffer.
frames: List of frames captured by the video capture instance.
post_handler: Function to process the results of the inference.
"""
ret_size = 2048
inf_res = (ctypes.c_char * ret_size)()
data_p = isi_capture_frame(capture, frames)
ret, _, img_left = isi_inference(
device_index, data_p, input_size, img_id_tx, 0, 0)
if ret:
return ret
_, _, result_size = isi_get_result(
device_index, img_id_tx, 0, 0, inf_res, app_id)
post_handler(inf_res, result_size, frames)
return
def kdp_inference(dev_idx, img_path):
"""Performs dme inference."""
img_buf, buf_len = capture_frame(img_path)
inf_size = 0
inf_res = (ctypes.c_char * 256000)()
res_flag = False
mode = 1
model_id = 0
ssid = 0
status = 0
_ret, ssid, res_flag = api.kdp_dme_inference(
dev_idx, img_buf, buf_len, ssid, res_flag, inf_res, mode, model_id)
# get status for session 1
while 1:
status = 0 # Must re-initialize status to 0
_ret, ssid, status, inf_size = api.kdp_dme_get_status(
dev_idx, ssid, status, inf_size, inf_res)
# print(status, inf_size)
if status == 1:
npraw_data = get_detection_res(dev_idx, inf_size)
break
return npraw_data
def kdp_dme_inference(dev_idx, app_id, capture, buf_len, frames):
"""Performs dme inference."""
img_buf = isi_capture_frame(capture, frames)
inf_size = 0
inf_res = (ctypes.c_char * 256000)()
res_flag = False
mode = 0
model_id = 0
_ret, inf_size, res_flag = api.kdp_dme_inference(
dev_idx, img_buf, buf_len, inf_size, res_flag, inf_res, mode, model_id)
if (app_id == constants.APP_AGE_GENDER):
det_res = get_age_gender_res(dev_idx, inf_size)
elif (app_id == constants.APP_FD_LM):
det_res = get_object_detection_res(dev_idx, inf_size, frames)
elif (app_id == constants.APP_TINY_YOLO3):
det_res = get_detection_res(dev_idx, inf_size)
elif (app_id == 0):
det_res = get_detection_res(dev_idx, inf_size)
return det_res
def kdp_exit_dme(dev_idx):
api.kdp_end_dme(dev_idx)
def load_reg_user_list(reg_user_list):
list_path = './data/fdr/userlist.txt'
np_list = np.loadtxt(list_path).astype(int)
if (np_list.size == 20):
reg_user_list = np_list.tolist()
return reg_user_list
def save_reg_user_list(reg_user_list):
list_path = './data/fdr/userlist.txt'
np_list = np.array(reg_user_list).astype(int)
np.savetxt(list_path,np_list,fmt='%i')
def capture_cam_frame(cap):
cv_ret, frame1 = cap.read()
frame1 = cv2.flip(frame1, 1)
return frame1
def frame_to_565_data(frame1):
frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2BGR565)
frame_data = frame.reshape(FDR_IMG_SIZE)
c_char_p = ctypes.POINTER(ctypes.c_char)
frame_data = frame_data.astype(np.uint8)
data_p = frame_data.ctypes.data_as(c_char_p)
return data_p
def start_reg_mode(dev_idx, user_id):
img_size = 0
ret, img_size = api.kdp_start_sfid_mode(dev_idx, img_size, FDR_THRESH, IMG_SOURCE_W, IMG_SOURCE_H, IMG_FORMAT_RGB565)
if (ret != 0) or (img_size == 0):
print("start verify mode")
return -1
sleep(SLEEP_TIME)
global img_idx
img_idx += 1
ret = api.kdp_start_reg_user_mode(dev_idx, user_id, img_idx)
return ret
def register_user(dev_idx, frame, user_id):
data_p = frame_to_565_data(frame)
res = (ctypes.c_char * 0)()
ret, mask = api.kdp_extract_feature_generic(dev_idx, data_p, FDR_IMG_SIZE, 0, res)
if (ret):
if (ret == constants.MSG_APP_UID_EXIST):
print("> user exist <")
return ret
ret = api.kdp_register_user(dev_idx, user_id)
if (ret):
print("register user failed")
return ret
def del_user_id(dev_idx, user_id):
img_size = 0
ret, img_size = api.kdp_start_sfid_mode(dev_idx, img_size, FDR_THRESH, IMG_SOURCE_W, IMG_SOURCE_H, IMG_FORMAT_RGB565)
if (ret != 0) or (img_size == 0):
print("start verify mode")
return -1
sleep(SLEEP_TIME)
ret = api.kdp_remove_user(dev_idx, user_id)
return ret
def start_inf_mode(dev_idx):
img_size = 0
ret, img_size = api.kdp_start_sfid_mode(dev_idx, img_size, FDR_THRESH, IMG_SOURCE_W, IMG_SOURCE_H, IMG_FORMAT_RGB565)
if (ret != 0) or (img_size == 0):
print("start inf mode fail")
return -1
return ret
def verify_user_id(dev_idx, frame):
mask = api.kdp_get_res_mask(1, 1, 0, 0)
res_size = api.kdp_get_res_size(1, 1, 0, 0)
res = (ctypes.c_char * res_size)()
user_id = mask
data_p = frame_to_565_data(frame)
ret, u_id, mask_value = api.kdp_verify_user_id_generic(dev_idx, user_id, data_p, FDR_IMG_SIZE, mask, res)
fd_lm_res = ctypes.cast(ctypes.byref(res), ctypes.POINTER(constants.FDLMRes)).contents
x = fd_lm_res.fd_res.x
y = fd_lm_res.fd_res.y
w = fd_lm_res.fd_res.w
h = fd_lm_res.fd_res.h
return u_id, x, y, w, h
def isi_inference(dev_idx, img_buf, buf_len, img_id, rsp_code, window_left):
"""Performs ISI inference.
Arguments:
device_index: Connected device ID. A host can connect several devices.
img_buf: Image buffer.
buf_len: File size.
img_id: Sequence ID of the image.
rsp_code:
window_left: Number of image buffers still available for input.
"""
ret, rsp_code, window_left = api.kdp_isi_inference(
dev_idx, img_buf, buf_len, img_id, rsp_code, window_left)
if ret:
print("ISI inference failed: {}\n".format(ret))
return -1
if rsp_code:
print("ISI inference error_code: [{}] [{}]\n".format(rsp_code, window_left))
return -1
return ret, rsp_code, window_left
def isi_get_result(dev_idx, img_id, rsp_code, r_size, r_data, app_id):
"""Gets inference results.
Arguments:
dev_idx: Connected device ID. A host can connect several devices.
img_id: Sequence ID to get inference results of an image with that ID.
rsp_code:
r_size: Inference data size.
r_data: Inference result data.
app_id: ID of application to be run.
"""
ret, rsp_code, r_size = api.kdp_isi_retrieve_res(dev_idx, img_id, rsp_code, r_size, r_data)
if ret:
print("ISI get [{}] result failed: {}\n".format(img_id, ret))
return -1, rsp_code, r_size
if rsp_code:
print("ISI get [{}] result error_code: [{}] [{}]\n".format(img_id, rsp_code, r_size))
return -1, rsp_code, r_size
if r_size >= 4:
if app_id == constants.APP_AGE_GENDER: # age_gender
gender = ["Female", "Male"]
result = ctypes.cast(
ctypes.byref(r_data), ctypes.POINTER(constants.FDAgeGenderS)).contents
box_count = result.count
print("Img [{}]: {} people\n".format(img_id, box_count))
box = ctypes.cast(
ctypes.byref(result.boxes),
ctypes.POINTER(constants.FDAgeGenderRes * box_count)).contents
for idx in range(box_count):
print("[{}]: {}, {}\n".format(idx, gender[box[idx].ag_res.ismale], box[idx].ag_res.age))
else: # od, yolo
od_header_res = ctypes.cast(
ctypes.byref(r_data), ctypes.POINTER(constants.ObjectDetectionRes)).contents
box_count = od_header_res.box_count
print("image {} -> {} object(s)\n".format(img_id, box_count))
return 0, rsp_code, r_size
print("Img [{}]: result_size {} too small\n".format(img_id, r_size))
return -1, rsp_code, r_size
def isi_capture_frame(cap, frames):
"""Frame read and convert to RGB565.
Arguments:
cap: Active cv2 video capture instance.
frames: List of frames for the video capture to add to.
"""
_cv_ret, frame = cap.read()
if sys.platform == "darwin":
cap.set(cv2.CAP_PROP_FRAME_WIDTH, IMG_SOURCE_W)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, IMG_SOURCE_H)
if frame is None:
print("fail to read from cam!")
frame = cv2.flip(frame, 1)
frames.append(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGR565)
frame_data = frame.reshape(ISI_IMG_SIZE)
c_char_p = ctypes.POINTER(ctypes.c_char)
frame_data = frame_data.astype(np.uint8)
data_p = frame_data.ctypes.data_as(c_char_p)
return data_p
def setup_capture(cam_id, width, height):
"""Sets up the video capture device.
Returns the video capture instance on success and None on failure.
Arguments:
width: Width of frames to capture.
height: Height of frames to capture.
"""
capture = cv2.VideoCapture(cam_id)
if not capture.isOpened():
print("Could not open video device!")
return None
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
return capture
def start_isi(device_index, app_id, width, height):
"""Starts the ISI mode.
Returns 0 on success and -1 on failure.
Arguments:
device_index: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
size: Return size.
width: Width of the input image.
height: Height of the input image.
image_format: Format of input image.
"""
print("starting ISI mode...\n")
if (app_id == constants.APP_OD):
image_format = 0x80000060 | constants.IMAGE_FORMAT_CHANGE_ASPECT_RATIO #RGB565, no parallel mode
else:
image_format = 0x80000060 #RGB565, no parallel mode
size = 2048
ret, _, image_buf_size = api.kdp_start_isi_mode(
device_index, app_id, size, width, height, image_format, 0, 0)
if ret:
print("could not set to ISI mode: {} ..\n".format(ret))
return -1
if image_buf_size < 3:
print("ISI mode window {} too small...\n".format(image_buf_size))
return -1
print("ISI mode succeeded (window = {})...\n".format(image_buf_size))
sleep(SLEEP_TIME)
return 0
def start_isi_parallel(device_index, app_id, width, height):
"""Starts the ISI mode.
Returns 0 on success and -1 on failure.
Arguments:
device_index: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
size: Return size.
width: Width of the input image.
height: Height of the input image.
image_format: Format of input image.
"""
print("starting ISI mode...\n")
if (app_id == constants.APP_OD):
image_format = 0x88000060 | constants.IMAGE_FORMAT_CHANGE_ASPECT_RATIO #RGB565, parallel mode
else:
image_format = 0x88000060 #RGB565, parallel mode
size = 2048
ret, _, image_buf_size = api.kdp_start_isi_mode(
device_index, app_id, size, width, height, image_format, 0, 0)
if ret:
print("could not set to ISI mode: {} ..\n".format(ret))
return -1
if image_buf_size < 3:
print("ISI mode window {} too small...\n".format(image_buf_size))
return -1
print("ISI mode succeeded (window = {})...\n".format(image_buf_size))
sleep(SLEEP_TIME)
return 0
def start_isi_parallel_ext(device_index, app_id, width, height):
"""Starts the ISI mode with isi configuration.
Returns 0 on success and -1 on failure.
Arguments:
device_index: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
size: Return size.
width: Width of the input image.
height: Height of the input image.
image_format: Format of input image.
"""
print("starting ISI mode...\n")
# isi configuration
c_int_p = ctypes.POINTER(ctypes.c_int)
crop_box = np.array([0, 0, 0, 0]) # for future use
crop_box = crop_box.astype(np.int32)
crop_box = crop_box.ctypes.data_as(c_int_p)
pad_value = np.array([0, 0, 0, 0]) # for future use
pad_value = pad_value.astype(np.int32)
pad_value = pad_value.ctypes.data_as(c_int_p)
c_float_p = ctypes.POINTER(ctypes.c_float)
extra_param = np.array([0.0])
extra_param = extra_param.astype(np.float32)
extra_param = extra_param.ctypes.data_as(c_float_p)
if (app_id == constants.APP_OD):
image_format = 0x88000060 | constants.IMAGE_FORMAT_CHANGE_ASPECT_RATIO #RGB565, parallel mode
else:
image_format = 0x88000060 #RGB565, parallel mode
size = 2048
isi_cfg = constants.KDPISIConfig(app_id, size, width, height, image_format, crop_box, pad_value, extra_param)
dat_size = ctypes.sizeof(isi_cfg)
ret, _, image_buf_size = api.kdp_start_isi_mode_ext(
device_index, ctypes.cast(ctypes.byref(isi_cfg), ctypes.c_char_p), dat_size, 0, 0)
if ret:
print("could not set to ISI mode: {} ..\n".format(ret))
return -1
if image_buf_size < 3:
print("ISI mode window {} too small...\n".format(image_buf_size))
return -1
print("ISI mode succeeded (window = {})...\n".format(image_buf_size))
sleep(SLEEP_TIME)
return 0
def fill_buffer(device_index, capture, size, frames):
"""Fill up the image buffer using the capture device.
Arguments:
device_index: Connected device ID. A host can connect several devices.
capture: Active cv2 video capture instance.
size: Size of the input images.
frames: List of frames captured by the video capture instance.
"""
print("starting ISI inference ...\n")
img_id_tx = 1234
img_left = 12
buffer_depth = 0
while 1:
data_p = isi_capture_frame(capture, frames)
ret, error_code, img_left = isi_inference(
device_index, data_p, size, img_id_tx, 0, img_left)
if ret:
print("Companion inference failed")
return -1, img_id_tx, img_left, buffer_depth
if not error_code:
img_id_tx += 1
buffer_depth += 1
if not img_left:
break
return 0, img_id_tx, img_left, buffer_depth
def pipeline_inference(device_index, app_id, loops, input_size, capture,
img_id_tx, img_left, buffer_depth, frames, post_handler):
"""Send the rest of images and get the results.
Arguments:
device_index: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
loops: Number of images to get results.
input_size: Size of input image.
ret_size: Return size.
capture: Active cv2 video capture instance.
img_id_tx: Should be returned from fill_buffer.
img_left: Should be returned from fill_buffer.
buffer_depth: Should be returned from fill_buffer.
frames: List of frames captured by the video capture instance.
post_handler: Function to process the results of the inference.
"""
img_id_rx = 1234
ret_size = 2048
inf_res = (ctypes.c_char * ret_size)()
while loops:
_, _, result_size = isi_get_result(
device_index, img_id_rx, 0, 0, inf_res, app_id)
post_handler(inf_res, result_size, frames)
img_id_rx += 1
data_p = isi_capture_frame(capture, frames)
ret, _, img_left = isi_inference(
device_index, data_p, input_size, img_id_tx, 0, img_left)
if ret:
return ret
img_id_tx += 1
loops -= 1
# Get last 2 results
while buffer_depth:
ret, _, result_size = isi_get_result(
device_index, img_id_rx, 0, 0, inf_res, app_id)
post_handler(inf_res, result_size, frames)
img_id_rx += 1
buffer_depth -= 1
return 0
def dme_fill_buffer(device_index, capture, size, frames):
"""Send 1 image to the DME image buffers using the capture device.
Arguments:
device_index: Connected device ID. A host can connect several devices.
capture: Active cv2 video capture instance.
size: Size of the input images.
frames: List of frames captured by the video capture instance.
"""
print("starting DME inference ...\n")
inf_res = (ctypes.c_char * 256000)()
res_flag = False
mode = 1
model_id = 0
ssid = 0
img_buf = isi_capture_frame(capture, frames)
_ret, ssid, res_flag = api.kdp_dme_inference(
device_index, img_buf, size, ssid, res_flag, inf_res, mode, model_id)
return 0, ssid
def dme_pipeline_inference(device_index, app_id, loops, input_size, capture,
prev_ssid, frames, post_handler):
"""Send the rest of images and get the results.
Arguments:
device_index: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
loops: Number of images to get results.
input_size: Size of input image.
capture: Active cv2 video capture instance.
prev_ssid: Should be returned from dme_fill_buffer.
frames: List of frames captured by the video capture instance.
post_handler: Function to process the results of the inference.
"""
inf_res = (ctypes.c_char * 256000)()
res_flag = False
mode = 1
model_id = 0
ssid = 0
inf_size = 0
while loops:
img_buf = isi_capture_frame(capture, frames)
_ret, ssid, res_flag = api.kdp_dme_inference(
device_index, img_buf, input_size, ssid, res_flag, inf_res, mode, model_id)
# get status for previous session
# print("ssid prev ", ssid, prev_ssid)
while 1:
status = 0 # Must re-initialize status to 0
_ret, prev_ssid, status, inf_size = api.kdp_dme_get_status(
device_index, prev_ssid, status, inf_size, inf_res)
# print(status, inf_size)
if status == 1:
if (app_id == constants.APP_TINY_YOLO3):
npraw_data = get_detection_res(device_index, inf_size)
post_handler(device_index, npraw_data, frames)
break
prev_ssid = ssid
loops -= 1
# Get last 1 results
while 1:
status = 0 # Must re-initialize status to 0
_ret, prev_ssid, status, inf_size = api.kdp_dme_get_status(
device_index, prev_ssid, status, inf_size, inf_res)
# print(status, inf_size)
if status == 1:
if (app_id == constants.APP_TINY_YOLO3):
npraw_data = get_detection_res(device_index, inf_size)
post_handler(device_index, npraw_data, frames)
break
return 0
def read_file_to_buf(image_file, image_size):
"""Reads input image into a buffer.
Arguments:
image_file: File containing the input image.
image_size: Size of the input image.
"""
buffer = (ctypes.c_char * image_size)()
length = api.read_file_to_buf(buffer, image_file, image_size)
if length <= 0:
print("reading image file, {}, failed: {}...\n".format(image_file, length))
return None
return buffer
def isi_send_first_two(dev_idx, buffer, buffer_t, size):
"""Sends two images first for inference.
Arguments:
dev_idx: Connected device ID. A host can connect several devices.
buffer: Buffer holding the image data from the first file.
buffer_t: Buffer holding the image data from the second file.
size: Size of one input image.
"""
print("starting ISI inference ...\n")
img_id_tx = 1234
img_left = 12
ret, _, img_left = isi_inference(
dev_idx, buffer, size, img_id_tx, 0, img_left)
if ret:
return ret, img_left
img_id_tx += 1
ret, _, img_left = isi_inference(
dev_idx, buffer_t, size, img_id_tx, 0, img_left)
if ret:
return ret, img_left
img_id_tx += 1
return 0, img_left
def isi_send_rest(dev_idx, app_id, buffer, buffer_t, input_size,
ret_size, img_left, test_loop):
"""Sends rest of the images for inference and results.
Arguments:
dev_idx: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
buffer: Buffer holding the image data from the first file.
buffer_t: Buffer holding the image data from the second file.
input_size: Size of one input image.
ret_size: Return size.
img_left: Number of image buffers still available for input.
test_loop: Number of loops to send two images.
"""
img_id_tx = 1236
img_id_rx = 1234
inf_res = (ctypes.c_char * ret_size)()
loop = 0
if test_loop > 3:
loop = test_loop - 2
while loop:
ret, _, img_left = isi_inference(
dev_idx, buffer, input_size, img_id_tx, 0, img_left)
if ret:
return ret, img_id_rx
img_id_tx += 1
ret, _, _ = isi_get_result(
dev_idx, img_id_rx, 0, 0, inf_res, app_id)
if ret:
return ret, img_id_rx
img_id_rx += 1
loop -= 1
# Odd loop case
if not loop:
break
ret, _, img_left = isi_inference(
dev_idx, buffer_t, input_size, img_id_tx, 0, img_left)
if ret:
return ret, img_id_rx
img_id_tx += 1
ret, _, _ = isi_get_result(
dev_idx, img_id_rx, 0, 0, inf_res, app_id)
if ret:
return ret, img_id_rx
img_id_rx += 1
loop -= 1
return 0, img_id_rx
def isi_get_last_results(dev_idx, app_id, img_id_rx, ret_size):
"""Gets results for last two images.
Arguments:
dev_idx: Connected device ID. A host can connect several devices.
app_id: ID of application to be run.
img_id_rx: Sequence ID to get inference results of an image with that ID
ret_size: Return size.
"""
inf_res = (ctypes.c_char * ret_size)()
ret, _, _ = isi_get_result(
dev_idx, img_id_rx, 0, 0, inf_res, app_id)
if ret:
return ret
img_id_rx += 1
ret, _, _ = isi_get_result(
dev_idx, img_id_rx, 0, 0, inf_res, app_id)
if ret:
return ret
img_id_rx += 1
return 0
def kdp_get_kn_number(device_index, kn_num):
"""Request for system KN number.
Returns 0 on success and -1 on failure. Also returns the new values of kn_num.
Arguments:
device_index: Connected device ID. A host can connect several devices.
kn_num: ID of KN number.
"""
ret, kn_number = api.kdp_get_kn_number(device_index, 0)
return ret, kn_number
def kdp_get_model_info(device_index, from_ddr):
"""Request for request for model IDs in DDR or Flash.
Returns 0 on success and -1 on failure.
Arguments:
device_index: Connected device ID. A host can connect several devices.
from_ddr: if models are in ddr (1) or flash (0)
data_buf: model info
"""
r_data = (ctypes.c_char * 1024)()
# Get the data for model info: total_number(4 bytes) + model_id_1(4 bytes) + model_id_2(4 bytes) + ...
ret = api.kdp_get_model_info(device_index, from_ddr, r_data)
modelinfo = ctypes.cast(
ctypes.byref(r_data), ctypes.POINTER(ctypes.c_int * 256)).contents
model_info = []
model_info.append(modelinfo[0])
for i in range(modelinfo[0]):
model_info.append(modelinfo[i+1])
#print(modelinfo[i+1], model_info[i+1])
return ret, model_info
def kdp_reset_sys(dev_idx):
api.kdp_end_dme(dev_idx)
|
UTF-8
|
Python
| false | false | 42,264 |
py
| 10 |
kdp_wrapper.py
| 9 | 0.603587 | 0.586551 | 0 | 1,212 | 33.872112 | 121 |
TK-IT/web
| 16,406,775,073,424 |
6f7745d619d48a491e5e0659a1c63a32e3e845f6
|
6218fc9c7e1953992febbcd519f62c820a175764
|
/tkweb/apps/tkbrand/templatetags/tkbrand.py
|
f17374cbb39ca08c3622796644dce76477e6d2de
|
[
"Beerware"
] |
permissive
|
https://github.com/TK-IT/web
|
2ce920a1ed262e9482aae8143ef6473b9475a878
|
2b0626400b8f1a1f8927f9960ec0f70fac37cdf8
|
refs/heads/master
| 2023-03-07T03:14:10.645439 | 2022-11-24T12:05:10 | 2022-11-24T12:05:10 | 29,788,346 | 3 | 4 |
NOASSERTION
| false | 2023-03-01T19:35:00 | 2015-01-24T19:17:54 | 2021-11-30T16:49:43 | 2023-03-01T19:34:58 | 36,236 | 2 | 2 | 88 |
Python
| false | false |
import re
from django import template
from django.utils.safestring import mark_safe
from constance import config
from tkweb.apps.tkbrand import util
import tktitler as tk
register = template.Library()
HTML_T = (
'<span style="vertical-align: -0.038em;">T</span>'
)
HTML_ARING = (
'<span style="font-weight: bold; margin-left: -0.05em;">Å</span>'
)
HTML_AA = (
'<span style="font-weight: bold; margin-left: -0.05em;">A</span>'
'<span style="vertical-align: -0.01em; margin-left: -0.05em; ">A</span>'
)
HTML_GEKAMMER = (
'<span style="margin-left: -0.05em;">G</span>'
'<span style="display: inline-block; transform: rotate(8deg);">E</span>'
'<span style="vertical-align: -0.057em; margin-left: 0.05em;">K</span>'
'<span style="vertical-align: 0.020em; font-weight: bold;">A</span>'
'<span style="vertical-align: -0.057em;">M</span>'
'<span style="display: inline-block; transform: rotate(-8deg); font-weight: bold; margin-left: 0.060em;">M</span>'
'<span style="margin-left: 0.05em;">E</span>'
'<span style="margin-left: 0.02em;">R</span>'
)
HTML_TK = HTML_T + HTML_ARING + HTML_GEKAMMER
HTML_TKAA = HTML_T + HTML_AA + HTML_GEKAMMER
HTML_ET = (
'<span style="vertical-align: 0.057em">E</span>'
+ '<span style="vertical-align: 0.057em">T</span>'
)
HTML_TKET = HTML_TK + HTML_ET
HTML_TKETAA = HTML_TKAA + HTML_ET
HTML_TKETs = HTML_TKET + 's'
HTML_TKETsAA = HTML_TKETAA + 's'
HTML_TKETS = HTML_TKET + 'S'
HTML_TKETSAA = HTML_TKETAA + 'S'
@register.simple_tag
def TK():
return wrap_tk_html(HTML_TK)
@register.simple_tag
def TKAA():
return wrap_tk_html(HTML_TKAA)
@register.simple_tag
def TKET():
return wrap_tk_html(HTML_TKET)
@register.simple_tag
def TKETAA():
return wrap_tk_html(HTML_TKETAA)
@register.simple_tag
def TKETs():
return wrap_tk_html(HTML_TKETs)
@register.simple_tag
def TKETsAA():
return wrap_tk_html(HTML_TKETsAA)
@register.simple_tag
def TKETS():
return wrap_tk_html(HTML_TKETS)
@register.simple_tag
def TKETSAA():
return wrap_tk_html(HTML_TKETSAA)
PRIDE_COLORS = (
'''#e40303 #ff8c00 #ffed00 #008026 #004dff #b100cc #e40303 #ff8c00
#ffed00 #008026 #004dff #b100cc'''.split()
)
UKRAINE_COLORS = '#449FFF #FFD500'.split() * 6
def add_colors(colors, html):
insert_after = 'style="'
count = html.count(insert_after)
if count < len(colors):
colors = [
colors[int(i / count * len(colors))] for i in range(count)
]
else:
s = 1
extra = count - len(colors) + 1
# Repeat element at index s enough times.
colors = colors[:s] + extra * colors[s:s+1] + colors[s+1:]
assert len(colors) == count
def repl(mo):
# Insert next color from 'colors' after 'style="'
return "%scolor: %s; " % (mo.group(0), colors.pop(0))
return re.sub(re.escape(insert_after), repl, html)
def wrap_tk_html(html):
if config.PRIDE:
html = add_colors(PRIDE_COLORS, html)
elif config.UKRAINE:
html = add_colors(UKRAINE_COLORS, html)
return mark_safe('<span class="tk-brand">' + html + '</span>')
@register.filter
def gfyearPP(gfyear):
return util.gfyearPP(gfyear)
@register.filter
def gfyearPPslash(gfyear):
return util.gfyearPPslash(gfyear)
@register.filter
def gfyearPPslash_gallery(gfyear):
"""
For the gallery app where there is a special multi-year album
"""
if gfyear == 1960:
return "60/64"
return util.gfyearPPslash(gfyear)
@register.filter
def tk_prefix(title, arg='unicode'):
return tk.prefix(title, gfyear=config.GFYEAR, type=arg)
@register.filter
def tk_kprefix(title, arg='unicode'):
return tk.kprefix(title, gfyear=config.GFYEAR, type=arg)
@register.filter
def tk_postfix(title, arg='single'):
return tk.postfix(title, type=arg)
@register.filter
def tk_prepostfix(title, arg='longslash'):
"""
:param str arg: postfixtype til :func:`tktitler.prepostfix`.
Det er ikke muligt at ændre prefixtype.
"""
return tk.prepostfix(title, gfyear=config.GFYEAR,
prefixtype='unicode', postfixtype=arg)
@register.filter
def tk_email(title, arg='postfix'):
return tk.email(title, gfyear=config.GFYEAR, type=arg)
# For evaluation of tags in flatpages
@register.tag(name="evaluate")
def do_evaluate(parser, token):
"""
tag usage {% evaluate flatpage.content %}
"""
try:
tag_name, variable = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument"
% token.contents.split()[0])
return EvaluateNode(variable)
class EvaluateNode(template.Node):
def __init__(self, variable):
self.variable = template.Variable(variable)
def render(self, context):
try:
content = self.variable.resolve(context)
content = '{% load tkbrand %}\n' + content # Always load tkbrand
t = template.Template(content)
return t.render(context)
except (template.VariableDoesNotExist, template.TemplateSyntaxError):
return 'Error rendering', self.variable
|
UTF-8
|
Python
| false | false | 5,200 |
py
| 272 |
tkbrand.py
| 182 | 0.644094 | 0.621008 | 0 | 203 | 24.605911 | 118 |
Minho-dev/Minho-Programmers
| 9,122,510,561,409 |
afbbb9d1f46ba45cc8ac923f7d3a3dede9493d92
|
de5ae93ffc92e808cf5b97553bde93f3bd2a68b5
|
/모의고사.py
|
13fe695914498bf99ff00130a66d54e3f379c7da
|
[] |
no_license
|
https://github.com/Minho-dev/Minho-Programmers
|
7958aa89bb1eb2c5b98a1e75cdf56dc3e619b3d5
|
b92f12d2a5cc4eb5d9a6d3d107e08055857e536b
|
refs/heads/master
| 2020-07-31T19:27:19.880172 | 2019-10-15T00:23:06 | 2019-10-15T00:23:06 | 210,728,212 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def solution(answers):
answer = []
a = [ 1, 2, 3, 4, 5 ] * ( len( answers ) // 5 + 1 )
b = [ 2, 1, 2, 3, 2, 4, 2, 5 ] * ( len( answers ) // 8 + 1 )
c = [ 3, 3, 1, 1, 2, 2, 4, 4, 5, 5 ] * ( len( answers ) // 10 + 1 )
a_count = 0
b_count = 0
c_count = 0
for num in range( len( answers ) ):
if answers[num] == a[num]:
a_count += 1
if answers[num] == b[num]:
b_count += 1
if answers[num] == c[num]:
c_count += 1
rank = [ [1, a_count], [2, b_count], [3, c_count] ]
rank = sorted( rank, key = lambda key: key[1], reverse = True )
if rank[0][1] == rank[2][1]:
answer = [rank[0][0], rank[1][0], rank[2][0]]
elif rank[0][1] == rank[1][1]:
answer = [ rank[0][0], rank[1][0] ]
else:
answer = [ rank[0][0] ]
return answer
|
UTF-8
|
Python
| false | false | 922 |
py
| 17 |
모의고사.py
| 17 | 0.396963 | 0.331887 | 0 | 28 | 30.928571 | 71 |
VisPro128/testing_project
| 18,348,100,319,065 |
90a3b5180ba10d56f59063ecca55895e4323ae6c
|
5b460657da6aab0062353490865544dc6eb0b579
|
/run_tests.py
|
82b671cc22879fd49c8968f99dccef9697a9ad8b
|
[] |
no_license
|
https://github.com/VisPro128/testing_project
|
c8055cb6e0d30e09ce6cdb161b8ed39e70043d63
|
a71808b73caa6d2afae2a03aea31fb446a24861c
|
refs/heads/main
| 2023-08-24T06:37:03.128626 | 2021-10-21T10:40:14 | 2021-10-21T10:40:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
os.system("coverage run --branch -m pytest tests.py")
os.system("coverage html")
|
UTF-8
|
Python
| false | false | 91 |
py
| 5 |
run_tests.py
| 3 | 0.736264 | 0.736264 | 0 | 4 | 22 | 53 |
kirill-s576/pg_json_table
| 18,047,452,589,831 |
afb74bc6e74d3bde03953d83df064b4d1908198e
|
a2cf1319dcf4962215b5ee633a0683ee1407a2d8
|
/config.py
|
62534cf0b14f37c35d5cca1357374f7d6976c115
|
[] |
no_license
|
https://github.com/kirill-s576/pg_json_table
|
b0f937845e98b3ce8a32f808c2e2ad6ffbdb337e
|
975d4a3186473c5b0fe78c040aefabb6f736b295
|
refs/heads/main
| 2023-04-18T21:14:10.486091 | 2021-05-05T11:01:01 | 2021-05-05T11:01:01 | 364,546,164 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from dotenv import load_dotenv
from pathlib import Path
ENV_FILE_PATH = 'variables.env'
# Load environments
env_path = Path(ENV_FILE_PATH)
load_dotenv(dotenv_path=env_path)
##
POSTGRES_USER = os.environ.get("POSTGRES_USER", None)
POSTGRES_PASSWORD = os.environ.get("POSTGRES_PASSWORD", None)
POSTGRES_HOST = os.environ.get("POSTGRES_HOST", None)
POSTGRES_PORT = os.environ.get("POSTGRES_PORT", None)
POSTGRES_DB = os.environ.get("POSTGRES_DB", None)
|
UTF-8
|
Python
| false | false | 464 |
py
| 12 |
config.py
| 9 | 0.747845 | 0.747845 | 0 | 17 | 26.294118 | 61 |
ZacharyEWelch/game
| 7,335,804,175,910 |
64b7bc5f4a66506643c6296ce3ec85716efc8b4d
|
348e0d10df0bee6d40b8225b19f8ad0d5ec0f69a
|
/rawdb/genncgr.py
|
eea2b17f6d5095b769ee2d42cafb9ce0a943fe99
|
[] |
no_license
|
https://github.com/ZacharyEWelch/game
|
a0dddd6ca442bec91dce53564591440baa7132f2
|
ea1c5dab7625aa1ac8a0c66900bbdec51aa570f0
|
refs/heads/master
| 2021-01-01T20:07:13.994364 | 2017-07-30T02:23:04 | 2017-07-30T02:23:04 | 98,767,269 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from gen import *
from nds import narc, ncgr
import os
ofile = None
FNAME = "ncgr"+FEXT
def recursefs(d):
if os.path.isdir(C_DIR+d):
for f in sorted(os.listdir(C_DIR+d)):
recursefs(d+"/"+f)
else:
f = open(C_DIR+d, "rb")
count = 0
if f.read(4) == "NARC":
f.seek(0)
n = narc.NARC(f.read())
f.close()
pfile = None
dirname = os.path.dirname(ODIR+d)
for j, f in enumerate(n.gmif.files):
if f[:4] == "RGCN":
count += 1
graphic = ncgr.NCGR(f)
if not pfile:
template.mkdir(dirname)
pfile = template.open(ODIR+d+FEXT, "w", "Pokemon %s NCGR - %s"%(game.title(), d))
pfile.write("<p><a href='%s%s'>RGCN list</a></p>\n"%("../"*len(d.strip("/").split("/")), FNAME))
pfile.write("<p>File %i: %ix%i</p>"%(j, graphic.char.width, graphic.char.height))
pfile.write("<p><img src='%i.png' alt='%s %i RGCN'></p>\n"%(j, d, j))
graphic.toImage().save("%s/%i.png"%(dirname, j))
del graphic
if pfile:
pfile.close()
else:
f.close()
if count:
ofile.write("\t<tr><td><a href='ncgr%s'>%s</a></td><td>%i files</td></tr>\n"%(d+FEXT, d, count))
else:
ofile.write("\t<tr><td>%s</td><td>0 files</td></tr>\n"%d)
for game in games:
ODIR = STATIC_DIR+game+"/ncgr/"
ofile = template.open(STATIC_DIR+game+"/"+FNAME, "w", "Pokemon %s NCGR (Graphics) Files"%game.title())
ofile.write("""
<h2>Pokemon %s NCGR (Graphics) Files</h2>
<table class='filelist'>\n"""%game.title())
C_DIR = DATA_DIR+game+"/fs"
recursefs("")
ofile.write("</table>")
ofile.close()
|
UTF-8
|
Python
| false | false | 1,897 |
py
| 60 |
genncgr.py
| 57 | 0.467053 | 0.462836 | 0 | 53 | 34.811321 | 120 |
GSimas/PyThings
| 3,375,844,312,827 |
a9642d074cefa15be33dcbb3cc6481c909ddf5b6
|
4e5a152ad83224854a163e7fe9bae7c96be88f0c
|
/RandomCourses/calico-aula1.py
|
0b0e199efe64cc4eebfa658e4284b295ae06bb7d
|
[] |
no_license
|
https://github.com/GSimas/PyThings
|
4dcc82e36fc3a3d535ac08e0540eab639f489f52
|
5f2f9f3262a869e911fb642a4d344badfad66df8
|
refs/heads/master
| 2021-01-23T10:47:55.916118 | 2020-02-03T23:12:17 | 2020-02-03T23:12:17 | 93,092,905 | 5 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
''' This is an assignment code for basic python variables input/print
Logical and arithmetic operations, functions, control structures
Code sequence based on Calico UFSC python minicourse - class 1
Dependencies
Python 3.5.x (64bit)
Developer: Gustavo S.
Date: June-2017
References: https://github.com/CalicoUFSC/minicurso-python
'''
# 1) Write a function named 'factorial' that receives 'n' and returns n!
def factorial(num):
x = 1
if num == 0:
return 1
for i in range(1,num+1):
x *= i
return x
print("Factorial of 5 is: ",factorial(5))
print("Factorial of 1 is: ",factorial(1))
print("Factorial of 0 is: ",factorial(0))
#or...
def fact(num):
if num == 0:
return 1
elif num < 0:
return "Value is not valid"
else:
x = num * fact(num-1)
return x
print("Factorial (recursive) of 6 is: ",fact(6))
print("Factorial (recursive) of 0 is: ",fact(0))
print("Factorial (recursive) of -4 is: ",fact(-4))
# 2) Write a Fibonacci series function
def fibonacci(num):
x, y = 0, 1
if num < 0:
return None
if num == 0:
return 0
if num == 1:
return 1
for i in range(1,num+1):
l = x + y
y, x = l, y
return l
print("Fibonacci of 5 is: ", fibonacci(5))
print("Fibonacci of 0 is: ", fibonacci(0))
print("Fibonacci of -2 is: ", fibonacci(-2))
# 3) Write an Euclides Algorithm function (maximum common divisor)
def mcd(a,b):
max = 0
if a >= b:
for i in range(1,a+1):
if a % i == 0 and b % i == 0:
max = i
if a < b:
for i in range(1,b+1):
if a % i == 0 and b % i == 0:
max = i
return max
print ("Maximum commond divisor of 10 and 5 is: ", mcd(10, 5))
print ("Maximum commond divisor of 17 and -2 is: ", mcd(17, -2))
print ("Maximum commond divisor of 20 and 2 is: ", mcd(20, 2))
# 4) Check if a number is prime
def is_prime(num):
if num == 1:
return False
if num == 2:
return True
for d in range(2, num):
if num % d == 0:
return False
return True
print("Check if 10 is prime: ", is_prime(10))
print("Check if 383 is prime: ", is_prime(383))
print("Check if -7 is prime: ", is_prime(10))
|
UTF-8
|
Python
| false | false | 2,289 |
py
| 58 |
calico-aula1.py
| 39 | 0.573613 | 0.532984 | 0 | 91 | 24.164835 | 72 |
policecar/etaoin
| 2,173,253,492,383 |
e0e6fb5beb7dfe1c217f4c70410d0652663269ae
|
a4cedd825018c061931cce2ebb084192199160f3
|
/google-ngrams/download/download_ngrams.py
|
c03e3f4e3f079673de86252aa887e8994a8802bb
|
[] |
no_license
|
https://github.com/policecar/etaoin
|
0eecde882d0372f425320998677b4a4e4df8571e
|
ea0c0dff3af840dea2ae1019e6c9b6db2986574a
|
refs/heads/master
| 2021-01-02T09:26:23.375927 | 2013-11-14T17:37:24 | 2013-11-14T17:37:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# adapted from https://github.com/piantado/ngrampy/blob/master/download.py
"""
Script to selectively download google ngram data from
http://storage.googleapis.com/books/ngrams/books/datasetsv2.html.
"""
import httplib2
from BeautifulSoup import BeautifulSoup, SoupStrainer
import re
import os
import urllib
# from IPython import embed
version = "20120701"
language = "eng-all"
# languages: eng-all, eng-us-all, eng-gb-all, fre-all, ger-all, heb-all, ita-all, rus-all, spa-all
base_url = "http://storage.googleapis.com/books/ngrams/books/"
index_page = "datasetsv2.html"
pattern = r"googlebooks-%s-([\d\w]+)-%s" % ( language, version )
# scrape relevant links from the landing /index page
http = httplib2.Http()
status, response = http.request( base_url + index_page )
for link in BeautifulSoup( response, parseOnlyThese=SoupStrainer('a') ):
if link.has_key( 'href' ):
url = link['href']
# if url matches specified language and version
match = re.search( pattern, url )
if match:
filename = os.path.basename( url )
whatgram = match.group(1)
subdir = os.path.join( language, whatgram )
# if directory doesn't exist, make it
if not os.path.exists( language ): os.mkdir( language )
if not os.path.exists( subdir ): os.mkdir( subdir )
# if file doesn't exist, download it
#TODO: add checksum /hash test or some other way to verify file integrity
filepath = os.path.join( language, whatgram, filename )
if not os.path.exists( filepath ):
try:
print "# Downloading %s to %s" % ( url, filepath )
urllib.urlretrieve( url, filepath )
except urllib.ContentTooShortError:
os.remove( filepath )
urllib.urlretrieve( url, filepath )
|
UTF-8
|
Python
| false | false | 2,012 |
py
| 16 |
download_ngrams.py
| 10 | 0.607853 | 0.600895 | 0 | 57 | 34.298246 | 98 |
tarnilok/javascript-python
| 14,809,047,269,613 |
d3d2a33c473f60cb6743ae9a56b3545368d08cff
|
62bcfa4f17f927d3eac1f435cb8f9a790e8fd665
|
/teamwork(reversestring).py
|
50b8fead0f1b9c4ee4763f5fab28b681c5bb4667
|
[] |
no_license
|
https://github.com/tarnilok/javascript-python
|
c835c9b35f76e4e33a64e60e5c1a7f47ba42f5ce
|
8c8164434f9e237f9c6c437e72e71e75802eb281
|
refs/heads/main
| 2023-08-14T00:12:11.907006 | 2021-10-06T21:23:40 | 2021-10-06T21:23:40 | 381,395,858 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Solution 1
# def reverser (ins) :
# return ins[::-1]
# print(reverser(input("give me a string to reverse").strip()))
# Solution 2
# def reverser2 (ins) :
# str = ""
# for i in ins:
# str = i + str
# return str
# print(reverser2(input("give me a string to reverse").strip()))
|
UTF-8
|
Python
| false | false | 305 |
py
| 16 |
teamwork(reversestring).py
| 11 | 0.577049 | 0.560656 | 0 | 15 | 19.333333 | 64 |
KaranToor/MA450
| 16,329,465,668,793 |
af7a4e76165e765480c3b3e4052458b3f008c9ed
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/iam/v1/iam_v1_messages.py
|
564ea8ef5e1279851d920900a5b744732ee51a1d
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
https://github.com/KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 |
Apache-2.0
| false | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | 2020-12-24T00:36:47 | 2020-12-24T00:38:08 | 46,069 | 1 | 1 | 4 |
Python
| false | false |
"""Generated message classes for iam version v1.
Manages identity and access control for Google Cloud Platform resources,
including the creation of service accounts, which you can use to authenticate
to Google and make API calls.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'iam'
class AuditData(_messages.Message):
"""Audit log information specific to Cloud IAM. This message is serialized
as an `Any` type in the `ServiceData` message of an `AuditLog` message.
Fields:
policyDelta: Policy delta between the original policy and the newly set
policy.
"""
policyDelta = _messages.MessageField('PolicyDelta', 1)
class Binding(_messages.Message):
"""Associates `members` with a `role`.
Fields:
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `alice@gmail.com`
or `joe@example.com`. * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address
that represents a Google group. For example, `admins@example.com`. *
`domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
members = _messages.StringField(1, repeated=True)
role = _messages.StringField(2)
class BindingDelta(_messages.Message):
"""One delta entry for Binding. Each individual change (only one member in
each entry) to a binding will be a separate entry.
Enums:
ActionValueValuesEnum: The action that was performed on a Binding.
Required
Fields:
action: The action that was performed on a Binding. Required
member: A single identity requesting access for a Cloud Platform resource.
Follows the same format of Binding.members. Required
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
class ActionValueValuesEnum(_messages.Enum):
"""The action that was performed on a Binding. Required
Values:
ACTION_UNSPECIFIED: Unspecified.
ADD: Addition of a Binding.
REMOVE: Removal of a Binding.
"""
ACTION_UNSPECIFIED = 0
ADD = 1
REMOVE = 2
action = _messages.EnumField('ActionValueValuesEnum', 1)
member = _messages.StringField(2)
role = _messages.StringField(3)
class CreateServiceAccountKeyRequest(_messages.Message):
"""The service account key create request.
Enums:
KeyAlgorithmValueValuesEnum: Which type of key and algorithm to use for
the key. The default is currently a 4K RSA key. However this may change
in the future.
PrivateKeyTypeValueValuesEnum: The output format of the private key.
`GOOGLE_CREDENTIALS_FILE` is the default output format.
Fields:
keyAlgorithm: Which type of key and algorithm to use for the key. The
default is currently a 4K RSA key. However this may change in the
future.
privateKeyType: The output format of the private key.
`GOOGLE_CREDENTIALS_FILE` is the default output format.
"""
class KeyAlgorithmValueValuesEnum(_messages.Enum):
"""Which type of key and algorithm to use for the key. The default is
currently a 4K RSA key. However this may change in the future.
Values:
KEY_ALG_UNSPECIFIED: An unspecified key algorithm.
KEY_ALG_RSA_1024: 1k RSA Key.
KEY_ALG_RSA_2048: 2k RSA Key.
"""
KEY_ALG_UNSPECIFIED = 0
KEY_ALG_RSA_1024 = 1
KEY_ALG_RSA_2048 = 2
class PrivateKeyTypeValueValuesEnum(_messages.Enum):
"""The output format of the private key. `GOOGLE_CREDENTIALS_FILE` is the
default output format.
Values:
TYPE_UNSPECIFIED: Unspecified. Equivalent to
`TYPE_GOOGLE_CREDENTIALS_FILE`.
TYPE_PKCS12_FILE: PKCS12 format. The password for the PKCS12 file is
`notasecret`. For more information, see
https://tools.ietf.org/html/rfc7292.
TYPE_GOOGLE_CREDENTIALS_FILE: Google Credentials File format.
"""
TYPE_UNSPECIFIED = 0
TYPE_PKCS12_FILE = 1
TYPE_GOOGLE_CREDENTIALS_FILE = 2
keyAlgorithm = _messages.EnumField('KeyAlgorithmValueValuesEnum', 1)
privateKeyType = _messages.EnumField('PrivateKeyTypeValueValuesEnum', 2)
class CreateServiceAccountRequest(_messages.Message):
"""The service account create request.
Fields:
accountId: Required. The account id that is used to generate the service
account email address and a stable unique id. It is unique within a
project, must be 6-30 characters long, and match the regular expression
`[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.
serviceAccount: The ServiceAccount resource to create. Currently, only the
following values are user assignable: `display_name` .
"""
accountId = _messages.StringField(1)
serviceAccount = _messages.MessageField('ServiceAccount', 2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class IamProjectsServiceAccountsCreateRequest(_messages.Message):
"""A IamProjectsServiceAccountsCreateRequest object.
Fields:
createServiceAccountRequest: A CreateServiceAccountRequest resource to be
passed as the request body.
name: Required. The resource name of the project associated with the
service accounts, such as `projects/my-project-123`.
"""
createServiceAccountRequest = _messages.MessageField('CreateServiceAccountRequest', 1)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsDeleteRequest(_messages.Message):
"""A IamProjectsServiceAccountsDeleteRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsGetIamPolicyRequest(_messages.Message):
"""A IamProjectsServiceAccountsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
`resource` is usually specified as a path. For example, a Project
resource is specified as `projects/{project}`.
"""
resource = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsGetRequest(_messages.Message):
"""A IamProjectsServiceAccountsGetRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsKeysCreateRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysCreateRequest object.
Fields:
createServiceAccountKeyRequest: A CreateServiceAccountKeyRequest resource
to be passed as the request body.
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
createServiceAccountKeyRequest = _messages.MessageField('CreateServiceAccountKeyRequest', 1)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsKeysDeleteRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysDeleteRequest object.
Fields:
name: The resource name of the service account key in the following
format: `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/k
eys/{key}`. Using `-` as a wildcard for the project will infer the
project from the account. The `account` value can be the `email` address
or the `unique_id` of the service account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsKeysGetRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysGetRequest object.
Enums:
PublicKeyTypeValueValuesEnum: The output format of the public key
requested. X509_PEM is the default output format.
Fields:
name: The resource name of the service account key in the following
format: `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/k
eys/{key}`. Using `-` as a wildcard for the project will infer the
project from the account. The `account` value can be the `email` address
or the `unique_id` of the service account.
publicKeyType: The output format of the public key requested. X509_PEM is
the default output format.
"""
class PublicKeyTypeValueValuesEnum(_messages.Enum):
"""The output format of the public key requested. X509_PEM is the default
output format.
Values:
TYPE_NONE: <no description>
TYPE_X509_PEM_FILE: <no description>
TYPE_RAW_PUBLIC_KEY: <no description>
"""
TYPE_NONE = 0
TYPE_X509_PEM_FILE = 1
TYPE_RAW_PUBLIC_KEY = 2
name = _messages.StringField(1, required=True)
publicKeyType = _messages.EnumField('PublicKeyTypeValueValuesEnum', 2)
class IamProjectsServiceAccountsKeysListRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysListRequest object.
Enums:
KeyTypesValueValuesEnum: Filters the types of keys the user wants to
include in the list response. Duplicate key types are not allowed. If no
key type is provided, all keys are returned.
Fields:
keyTypes: Filters the types of keys the user wants to include in the list
response. Duplicate key types are not allowed. If no key type is
provided, all keys are returned.
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project, will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
class KeyTypesValueValuesEnum(_messages.Enum):
"""Filters the types of keys the user wants to include in the list
response. Duplicate key types are not allowed. If no key type is provided,
all keys are returned.
Values:
KEY_TYPE_UNSPECIFIED: <no description>
USER_MANAGED: <no description>
SYSTEM_MANAGED: <no description>
"""
KEY_TYPE_UNSPECIFIED = 0
USER_MANAGED = 1
SYSTEM_MANAGED = 2
keyTypes = _messages.EnumField('KeyTypesValueValuesEnum', 1, repeated=True)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsListRequest(_messages.Message):
"""A IamProjectsServiceAccountsListRequest object.
Fields:
name: Required. The resource name of the project associated with the
service accounts, such as `projects/my-project-123`.
pageSize: Optional limit on the number of service accounts to include in
the response. Further accounts can subsequently be obtained by including
the ListServiceAccountsResponse.next_page_token in a subsequent request.
pageToken: Optional pagination token returned in an earlier
ListServiceAccountsResponse.next_page_token.
"""
name = _messages.StringField(1, required=True)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
class IamProjectsServiceAccountsSetIamPolicyRequest(_messages.Message):
"""A IamProjectsServiceAccountsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
`resource` is usually specified as a path. For example, a Project
resource is specified as `projects/{project}`.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class IamProjectsServiceAccountsSignBlobRequest(_messages.Message):
"""A IamProjectsServiceAccountsSignBlobRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
signBlobRequest: A SignBlobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signBlobRequest = _messages.MessageField('SignBlobRequest', 2)
class IamProjectsServiceAccountsSignJwtRequest(_messages.Message):
"""A IamProjectsServiceAccountsSignJwtRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
signJwtRequest: A SignJwtRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signJwtRequest = _messages.MessageField('SignJwtRequest', 2)
class IamProjectsServiceAccountsTestIamPermissionsRequest(_messages.Message):
"""A IamProjectsServiceAccountsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. `resource` is usually specified as a path. For example, a
Project resource is specified as `projects/{project}`.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class ListServiceAccountKeysResponse(_messages.Message):
"""The service account keys list response.
Fields:
keys: The public keys for the service account.
"""
keys = _messages.MessageField('ServiceAccountKey', 1, repeated=True)
class ListServiceAccountsResponse(_messages.Message):
"""The service account list response.
Fields:
accounts: The list of matching service accounts.
nextPageToken: To retrieve the next page of results, set
ListServiceAccountsRequest.page_token to this value.
"""
accounts = _messages.MessageField('ServiceAccount', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Policy(_messages.Message):
"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `Binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com", "serviceAccount:my-other-
app@appspot.gserviceaccount.com", ] }, {
"role": "roles/viewer", "members": ["user:sean@example.com"]
} ] } For a description of IAM and its features, see the [IAM
developer's guide](https://cloud.google.com/iam).
Fields:
bindings: Associates a list of `members` to a `role`. Multiple `bindings`
must not be specified for the same `role`. `bindings` with no members
will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
version: Version of the `Policy`. The default version is 0.
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
version = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class PolicyDelta(_messages.Message):
"""The difference delta between two policies.
Fields:
bindingDeltas: The delta for Bindings between two policies.
"""
bindingDeltas = _messages.MessageField('BindingDelta', 1, repeated=True)
class QueryGrantableRolesRequest(_messages.Message):
"""The grantable role query request.
Fields:
fullResourceName: Required. The full resource name to query from the list
of grantable roles. The name follows the Google Cloud Platform resource
format. For example, a Cloud Platform project with id `my-project` will
be named `//cloudresourcemanager.googleapis.com/projects/my-project`.
"""
fullResourceName = _messages.StringField(1)
class QueryGrantableRolesResponse(_messages.Message):
"""The grantable role query response.
Fields:
roles: The list of matching roles.
"""
roles = _messages.MessageField('Role', 1, repeated=True)
class Role(_messages.Message):
"""A role in the Identity and Access Management API.
Fields:
deleted: A boolean attribute.
description: Optional. A human-readable description for the role.
name: The name of the role. When Role is used in CreateRole, the role
name must not be set. When Role is used in output and other input such
as UpdateRole, the role name is the complete path, e.g.,
roles/logging.viewer for curated roles and
organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.
title: Optional. A human-readable title for the role. Typically this is
limited to 100 UTF-8 bytes.
trashed: A boolean attribute.
"""
deleted = _messages.BooleanField(1)
description = _messages.StringField(2)
name = _messages.StringField(3)
title = _messages.StringField(4)
trashed = _messages.BooleanField(5)
class ServiceAccount(_messages.Message):
"""A service account in the Identity and Access Management API. To create a
service account, specify the `project_id` and the `account_id` for the
account. The `account_id` is unique within the project, and is used to
generate the service account email address and a stable `unique_id`. If the
account already exists, the account's resource name is returned in
util::Status's ResourceInfo.resource_name in the format of
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}. The caller
can use the name in other methods to access the account. All other methods
can identify the service account using the format
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using `-`
as a wildcard for the project will infer the project from the account. The
`account` value can be the `email` address or the `unique_id` of the service
account.
Fields:
displayName: Optional. A user-specified description of the service
account. Must be fewer than 100 UTF-8 bytes.
email: @OutputOnly The email address of the service account.
etag: Used to perform a consistent read-modify-write.
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.
Requests using `-` as a wildcard for the project will infer the project
from the `account` and the `account` value can be the `email` address or
the `unique_id` of the service account. In responses the resource name
will always be in the format
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.
oauth2ClientId: @OutputOnly. The OAuth2 client id for the service account.
This is used in conjunction with the OAuth2 clientconfig API to make
three legged OAuth2 (3LO) flows to access the data of Google users.
projectId: @OutputOnly The id of the project that owns the service
account.
uniqueId: @OutputOnly The unique and stable id of the service account.
"""
displayName = _messages.StringField(1)
email = _messages.StringField(2)
etag = _messages.BytesField(3)
name = _messages.StringField(4)
oauth2ClientId = _messages.StringField(5)
projectId = _messages.StringField(6)
uniqueId = _messages.StringField(7)
class ServiceAccountKey(_messages.Message):
"""Represents a service account key. A service account has two sets of key-
pairs: user-managed, and system-managed. User-managed key-pairs can be
created and deleted by users. Users are responsible for rotating these keys
periodically to ensure security of their service accounts. Users retain the
private key of these key-pairs, and Google retains ONLY the public key.
System-managed key-pairs are managed automatically by Google, and rotated
daily without user intervention. The private key never leaves Google's
servers to maximize security. Public keys for all service accounts are also
published at the OAuth2 Service Account API.
Enums:
KeyAlgorithmValueValuesEnum: Specifies the algorithm (and possibly key
size) for the key.
PrivateKeyTypeValueValuesEnum: The output format for the private key. Only
provided in `CreateServiceAccountKey` responses, not in
`GetServiceAccountKey` or `ListServiceAccountKey` responses. Google
never exposes system-managed private keys, and never retains user-
managed private keys.
Fields:
keyAlgorithm: Specifies the algorithm (and possibly key size) for the key.
name: The resource name of the service account key in the following format
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key
}`.
privateKeyData: The private key data. Only provided in
`CreateServiceAccountKey` responses.
privateKeyType: The output format for the private key. Only provided in
`CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or
`ListServiceAccountKey` responses. Google never exposes system-managed
private keys, and never retains user-managed private keys.
publicKeyData: The public key data. Only provided in
`GetServiceAccountKey` responses.
validAfterTime: The key can be used after this timestamp.
validBeforeTime: The key can be used before this timestamp.
"""
class KeyAlgorithmValueValuesEnum(_messages.Enum):
"""Specifies the algorithm (and possibly key size) for the key.
Values:
KEY_ALG_UNSPECIFIED: An unspecified key algorithm.
KEY_ALG_RSA_1024: 1k RSA Key.
KEY_ALG_RSA_2048: 2k RSA Key.
"""
KEY_ALG_UNSPECIFIED = 0
KEY_ALG_RSA_1024 = 1
KEY_ALG_RSA_2048 = 2
class PrivateKeyTypeValueValuesEnum(_messages.Enum):
"""The output format for the private key. Only provided in
`CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or
`ListServiceAccountKey` responses. Google never exposes system-managed
private keys, and never retains user-managed private keys.
Values:
TYPE_UNSPECIFIED: Unspecified. Equivalent to
`TYPE_GOOGLE_CREDENTIALS_FILE`.
TYPE_PKCS12_FILE: PKCS12 format. The password for the PKCS12 file is
`notasecret`. For more information, see
https://tools.ietf.org/html/rfc7292.
TYPE_GOOGLE_CREDENTIALS_FILE: Google Credentials File format.
"""
TYPE_UNSPECIFIED = 0
TYPE_PKCS12_FILE = 1
TYPE_GOOGLE_CREDENTIALS_FILE = 2
keyAlgorithm = _messages.EnumField('KeyAlgorithmValueValuesEnum', 1)
name = _messages.StringField(2)
privateKeyData = _messages.BytesField(3)
privateKeyType = _messages.EnumField('PrivateKeyTypeValueValuesEnum', 4)
publicKeyData = _messages.BytesField(5)
validAfterTime = _messages.StringField(6)
validBeforeTime = _messages.StringField(7)
class SetIamPolicyRequest(_messages.Message):
"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class SignBlobRequest(_messages.Message):
"""The service account sign blob request.
Fields:
bytesToSign: The bytes to sign.
"""
bytesToSign = _messages.BytesField(1)
class SignBlobResponse(_messages.Message):
"""The service account sign blob response.
Fields:
keyId: The id of the key used to sign the blob.
signature: The signed blob.
"""
keyId = _messages.StringField(1)
signature = _messages.BytesField(2)
class SignJwtRequest(_messages.Message):
"""The service account sign JWT request.
Fields:
payload: The JWT payload to sign, a JSON JWT Claim set.
"""
payload = _messages.StringField(1)
class SignJwtResponse(_messages.Message):
"""The service account sign JWT response.
Fields:
keyId: The id of the key used to sign the JWT.
signedJwt: The signed JWT.
"""
keyId = _messages.StringField(1)
signedJwt = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestIamPermissionsRequest(_messages.Message):
"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'iam')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'iam')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'iam')
|
UTF-8
|
Python
| true | false | 30,377 |
py
| 1,342 |
iam_v1_messages.py
| 977 | 0.726043 | 0.717747 | 0 | 792 | 37.354798 | 94 |
kastnerkyle/research_megarepo
| 15,178,414,457,209 |
893c345dbd1ab1cbd3f94a1e4d6e2d2db6326d80
|
2ec26d004a653c0576594e48ac13dd71f539b30a
|
/crikey/conditional_audio/kmedians.py
|
953853b6d0d382f2f6d4ef307677afa039f3a50f
|
[] |
no_license
|
https://github.com/kastnerkyle/research_megarepo
|
6aca5b2c3b2413e0def1093b23f2826e3e7e5e97
|
ab182667650fd59b99f75d4b599d7ace77a3f30b
|
refs/heads/master
| 2021-01-17T20:31:52.250050 | 2016-12-27T01:28:54 | 2016-12-27T01:28:54 | 68,341,074 | 13 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Author: Kyle Kastner
# Thanks to LD for mathematical guidance
# License: BSD 3-Clause
# See pseudocode for minibatch kmeans
# https://algorithmicthoughts.wordpress.com/2013/07/26/machine-learning-mini-batch-k-means/
# Unprincipled and hacky recentering to median at the end of function
import numpy as np
from scipy.cluster.vq import vq
def minibatch_kmedians(X, M=None, n_components=10, n_iter=100,
minibatch_size=100, random_state=None):
n_clusters = n_components
if M is not None:
assert M.shape[0] == n_components
assert M.shape[1] == X.shape[1]
if random_state is None:
random_state = np.random.RandomState(random_state)
elif not hasattr(random_state, 'shuffle'):
# Assume integer passed
random_state = np.random.RandomState(int(random_state))
if M is None:
ind = np.arange(len(X)).astype('int32')
random_state.shuffle(ind)
M = X[ind[:n_clusters]]
center_counts = np.zeros(n_clusters)
pts = list(np.arange(len(X), minibatch_size)) + [len(X)]
if len(pts) == 1:
# minibatch size > dataset size case
pts = [0, None]
minibatch_indices = zip(pts[:-1], pts[1:])
for i in range(n_iter):
for mb_s, mb_e in minibatch_indices:
Xi = X[mb_s:mb_e]
# Broadcasted Manhattan distance
# Could be made faster with einsum perhaps
centers = np.abs(Xi[:, None, :] - M[None]).sum(
axis=-1).argmin(axis=1)
def count_update(c):
center_counts[c] += 1
[count_update(c) for c in centers]
scaled_lr = 1. / center_counts[centers]
Mi = M[centers]
scaled_lr = scaled_lr[:, None]
# Gradient of abs
Mi = Mi - scaled_lr * ((Xi - Mi) / np.sqrt((Xi - Mi) ** 2 + 1E-9))
M[centers] = Mi
# Reassign centers to nearest datapoint
mem, _ = vq(M, X)
M = X[mem]
return M
if __name__ != "__main_":
random_state = np.random.RandomState(1999)
Xa = random_state.randn(200, 2)
Xb = .25 * random_state.randn(200, 2) + np.array((5, 3))
X = np.vstack((Xa, Xb))
ind = np.arange(len(X))
random_state.shuffle(ind)
X = X[ind]
M1 = minibatch_kmedians(X, n_iter=1, random_state=random_state)
M2 = minibatch_kmedians(X, M1, n_iter=1000, random_state=random_state)
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
raise ValueError()
|
UTF-8
|
Python
| false | false | 2,472 |
py
| 85 |
kmedians.py
| 76 | 0.591424 | 0.568366 | 0 | 67 | 35.895522 | 91 |
matthewdhoffman/mattenv
| 4,681,514,388,400 |
ca2f5469dc4cb7398cd564a20f8dd571900ab0ca
|
3ca064a57101e3c31e12098e4cc7f4fd4db2417d
|
/configs/.ipython/profile_default/startup/startup.py
|
7e9336aa1f83acb6ba8816e8040f3eecfba54533
|
[] |
no_license
|
https://github.com/matthewdhoffman/mattenv
|
2e0d4bce9e5eb4cf5da132864ae4672e52e2f82d
|
c46a0bd0efc306e8a2d5cbb6591ed5bb7225be9f
|
refs/heads/master
| 2020-06-16T07:32:36.023299 | 2016-12-21T21:41:53 | 2016-12-21T21:41:53 | 75,233,900 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os, sys, re, cPickle, gzip, time, collections, IPython, h5py, pandas
from itertools import *
import numpy as np
import scipy, scipy.special, scipy.stats, scipy.weave
sys.path.append(os.path.expanduser('~/mattenv/python'))
from mdhlib import *
from matplotlib.pyplot import *
ip = IPython.get_ipython()
ip.magic('matplotlib inline')
|
UTF-8
|
Python
| false | false | 341 |
py
| 4 |
startup.py
| 2 | 0.759531 | 0.756598 | 0 | 10 | 33 | 75 |
liuyang9643/AI_toy
| 4,552,665,334,813 |
9d06c650dbb4f0cc415ee22aa0a396db984b37c3
|
aada3c89a69d3d24bd1526b60edefb7a17766fce
|
/serv/devices.py
|
bb08aba526ca14cc0c0a82a227a1b3ed9ff0b815
|
[] |
no_license
|
https://github.com/liuyang9643/AI_toy
|
d67530a2ba89f725417ab30ef6494c40cc0a530e
|
dfb1f939aeecc591c318e4e49c3de9ff7328a068
|
refs/heads/main
| 2023-05-13T16:06:53.862771 | 2021-06-04T11:26:33 | 2021-06-04T11:26:33 | 373,808,200 | 0 | 0 | null | false | 2021-06-04T11:26:34 | 2021-06-04T10:38:53 | 2021-06-04T10:49:37 | 2021-06-04T11:26:33 | 0 | 0 | 0 | 0 |
Python
| false | false |
from bson import ObjectId
from flask import Blueprint, jsonify, request
from setting import MongoDB, RET
device = Blueprint("device", __name__)
# 扫码绑定玩具
@device.route("/scan_qr", methods=["POST"])
def scan_qr():
device_key = request.form.to_dict()
# 查询当前的设备编号是否已经授权
device_info = MongoDB.devices.find_one(device_key)
# 授权的
if device_info:
toy_info = MongoDB.toys.find_one(device_key)
if toy_info:
RET["code"] = 2
RET["msg"] = f"请求添加{toy_info.get('toy_name')}为好友"
RET["data"] = {"toy_id": str(toy_info.get("_id"))}
return jsonify(RET)
RET["code"] = 0
RET["msg"] = "来啦老板"
RET["data"] = device_key
return jsonify(RET)
# 未授权的Device
else:
RET["code"] = 1
RET["msg"] = "你花钱买玩具了吗?"
RET["data"] = {}
return jsonify(RET)
# 对玩具进行绑定
@device.route("/bind_toy", methods=["POST"])
def bind_toy():
device_info = request.form.to_dict()
device_info["avatar"] = "toy.jpg"
device_info["bind_user"] = device_info.pop("user_id")
user_info = MongoDB.users.find_one({"_id": ObjectId(device_info["bind_user"])})
# user_list 中缺少 用户的_id
chat_window = MongoDB.chats.insert_one({"user_list": [], "chat_list": []})
device_info["friend_list"] = [
{
"friend_id": device_info["bind_user"],
"friend_nick": user_info.get("nickname"),
"friend_remark": device_info.pop("remark"),
"friend_avatar": user_info.get("avatar"),
"friend_chat": str(chat_window.inserted_id),
"friend_type": "app"
}
]
toy = MongoDB.toys.insert_one(device_info)
toy_id = str(toy.inserted_id)
# 获取到了toy_id 那么chats表中的 user_list 就有数据了
user_list = [device_info["bind_user"], toy_id]
MongoDB.chats.update_one({"_id": chat_window.inserted_id}, {"$set": {"user_list": user_list}})
# 既然用户已经是玩具的好友了 ,那么用户要不要添加玩具为好友呢?
user_add_toy = {
"friend_id": toy_id,
"friend_nick": device_info.get("baby_name"),
"friend_remark": device_info.get("toy_name"),
"friend_avatar": device_info.get("avatar"),
"friend_chat": str(chat_window.inserted_id),
"friend_type": "toy"
}
MongoDB.users.update_one({"_id": ObjectId(device_info["bind_user"])},
{"$push": {"bind_toys": toy_id, "friend_list": user_add_toy}})
RET["code"] = 0
RET["msg"] = "老板再来一个呗~"
RET["data"] = {}
return jsonify(RET)
# 玩具开机
@device.route("/open_toy", methods=["POST"])
def open_toy():
device_info = request.form.to_dict()
is_device = MongoDB.devices.find_one(device_info)
# 是否为授权设备
if is_device:
toy = MongoDB.toys.find_one(device_info)
# 是否已经与用户发生绑定关系
if toy:
toy_ret = {
"code": 0,
"music": "Success.mp3",
"toy_id": str(toy.get("_id")),
"name": toy.get("toy_name")
}
return jsonify(toy_ret)
# 设备存在但未和用户发生绑定关系
else:
toy_ret = {
"code": 1,
"music": "Nobind.mp3"
}
return jsonify(toy_ret)
# 设备未经授权 或 (devicekey 异常) 请联系经销商
else:
toy_ret = {
"code": 2,
"music": "Nolic.mp3"
}
return jsonify(toy_ret)
# 获取玩具列表
@device.route("/toy_list", methods=["POST"])
def toy_list():
user_id = request.form.get("_id")
toylist = list(MongoDB.toys.find({"bind_user": user_id}))
for index, item in enumerate(toylist):
toylist[index]["_id"] = str(item.get("_id"))
RET["code"] = 0
RET["msg"] = "获取Toy列表"
RET["data"] = toylist
return jsonify(RET)
|
UTF-8
|
Python
| false | false | 4,243 |
py
| 17 |
devices.py
| 14 | 0.518376 | 0.515549 | 0 | 138 | 26.195652 | 98 |
google/mediapipe
| 704,374,646,052 |
95b920cadd4c1735c8e660abb7924836902a2e64
|
a64eeba4575eee849b459dab9c7000350ee636f1
|
/mediapipe/model_maker/python/core/utils/loss_functions_test.py
|
3a14567edefe4d337a8fce3b234979d4a1a5284e
|
[
"Apache-2.0",
"dtoa"
] |
permissive
|
https://github.com/google/mediapipe
|
0b6b56aff8bacc7b680c205f0788f1b49dd33f5e
|
007824594bf1d07c7c1467df03a43886f8a4b3ad
|
refs/heads/master
| 2023-09-01T16:11:21.218234 | 2023-09-01T11:55:21 | 2023-09-01T11:57:34 | 191,820,100 | 23,940 | 5,164 |
Apache-2.0
| false | 2023-09-14T09:01:36 | 2019-06-13T19:16:41 | 2023-09-14T08:55:03 | 2023-09-14T09:01:36 | 593,203 | 23,335 | 4,756 | 293 |
C++
| false | false |
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tempfile
from typing import Dict, Optional, Sequence
from unittest import mock as unittest_mock
from absl.testing import parameterized
import tensorflow as tf
from mediapipe.model_maker.python.core.utils import loss_functions
class FocalLossTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='no_sample_weight', sample_weight=None),
dict(
testcase_name='with_sample_weight',
sample_weight=tf.constant([0.2, 0.2, 0.3, 0.1, 0.2])))
def test_focal_loss_gamma_0_is_cross_entropy(
self, sample_weight: Optional[tf.Tensor]):
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1,
0]])
y_pred = tf.constant([[0.7, 0.1, 0.2], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4],
[0.8, 0.1, 0.1], [0.4, 0.5, 0.1]])
tf_cce = tf.keras.losses.CategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)
focal_loss = loss_functions.FocalLoss(gamma=0)
self.assertAllClose(
tf_cce(y_true, y_pred, sample_weight=sample_weight),
focal_loss(y_true, y_pred, sample_weight=sample_weight), 1e-4)
def test_focal_loss_with_sample_weight(self):
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1,
0]])
y_pred = tf.constant([[0.7, 0.1, 0.2], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4],
[0.8, 0.1, 0.1], [0.4, 0.5, 0.1]])
focal_loss = loss_functions.FocalLoss(gamma=0)
sample_weight = tf.constant([0.2, 0.2, 0.3, 0.1, 0.2])
self.assertGreater(
focal_loss(y_true=y_true, y_pred=y_pred),
focal_loss(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight))
@parameterized.named_parameters(
dict(testcase_name='gt_0.1', y_pred=tf.constant([0.1, 0.9])),
dict(testcase_name='gt_0.3', y_pred=tf.constant([0.3, 0.7])),
dict(testcase_name='gt_0.5', y_pred=tf.constant([0.5, 0.5])),
dict(testcase_name='gt_0.7', y_pred=tf.constant([0.7, 0.3])),
dict(testcase_name='gt_0.9', y_pred=tf.constant([0.9, 0.1])),
)
def test_focal_loss_decreases_with_increasing_gamma(self, y_pred: tf.Tensor):
y_true = tf.constant([[1, 0]])
focal_loss_gamma_0 = loss_functions.FocalLoss(gamma=0)
loss_gamma_0 = focal_loss_gamma_0(y_true, y_pred)
focal_loss_gamma_0p5 = loss_functions.FocalLoss(gamma=0.5)
loss_gamma_0p5 = focal_loss_gamma_0p5(y_true, y_pred)
focal_loss_gamma_1 = loss_functions.FocalLoss(gamma=1)
loss_gamma_1 = focal_loss_gamma_1(y_true, y_pred)
focal_loss_gamma_2 = loss_functions.FocalLoss(gamma=2)
loss_gamma_2 = focal_loss_gamma_2(y_true, y_pred)
focal_loss_gamma_5 = loss_functions.FocalLoss(gamma=5)
loss_gamma_5 = focal_loss_gamma_5(y_true, y_pred)
self.assertGreater(loss_gamma_0, loss_gamma_0p5)
self.assertGreater(loss_gamma_0p5, loss_gamma_1)
self.assertGreater(loss_gamma_1, loss_gamma_2)
self.assertGreater(loss_gamma_2, loss_gamma_5)
@parameterized.named_parameters(
dict(testcase_name='index_0', true_class=0),
dict(testcase_name='index_1', true_class=1),
dict(testcase_name='index_2', true_class=2),
)
def test_focal_loss_class_weight_is_applied(self, true_class: int):
class_weight = [1.0, 3.0, 10.0]
y_pred = tf.constant([[1.0, 1.0, 1.0]]) / 3.0
y_true = tf.one_hot(true_class, depth=3)[tf.newaxis, :]
expected_loss = -math.log(1.0 / 3.0) * class_weight[true_class]
loss_fn = loss_functions.FocalLoss(gamma=0, class_weight=class_weight)
loss = loss_fn(y_true, y_pred)
self.assertNear(loss, expected_loss, 1e-4)
class SparseFocalLossTest(tf.test.TestCase):
def test_sparse_focal_loss_matches_focal_loss(self):
num_classes = 2
y_pred = tf.constant([[0.8, 0.2], [0.3, 0.7]])
y_true = tf.constant([1, 0])
y_true_one_hot = tf.one_hot(y_true, num_classes)
for gamma in [0.0, 0.5, 1.0]:
expected_loss_fn = loss_functions.FocalLoss(gamma=gamma)
loss_fn = loss_functions.SparseFocalLoss(
gamma=gamma, num_classes=num_classes
)
expected_loss = expected_loss_fn(y_true_one_hot, y_pred)
loss = loss_fn(y_true, y_pred)
self.assertNear(loss, expected_loss, 1e-4)
class MockPerceptualLoss(loss_functions.PerceptualLoss):
"""A mock class with implementation of abstract methods for testing."""
def __init__(
self,
use_mock_loss_op: bool = False,
feature_weight: Optional[Sequence[float]] = None,
loss_weight: Optional[loss_functions.PerceptualLossWeight] = None,
):
super().__init__(feature_weight=feature_weight, loss_weight=loss_weight)
if use_mock_loss_op:
self._loss_op = lambda x, y: tf.math.reduce_mean(x - y)
def _compute_features(self, img: tf.Tensor) -> Sequence[tf.Tensor]:
return [tf.random.normal(shape=(1, 8, 8, 3))] * 5
class PerceptualLossTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._img1 = tf.fill(dims=(8, 8), value=0.2)
self._img2 = tf.fill(dims=(8, 8), value=0.8)
def test_invalid_feature_weight_raise_value_error(self):
with self.assertRaisesRegex(
ValueError,
'Input feature weight length 2 is smaller than feature length 5',
):
MockPerceptualLoss(feature_weight=[1.0, 2.0])(
img1=self._img1, img2=self._img2
)
@parameterized.named_parameters(
dict(
testcase_name='default_loss_weight_and_loss_op',
use_mock_loss_op=False,
feature_weight=None,
loss_weight=None,
loss_values={
'style_loss': 0.032839,
'content_loss': 5.639870,
},
),
dict(
testcase_name='style_loss_weight_is_0_default_loss_op',
use_mock_loss_op=False,
feature_weight=None,
loss_weight=loss_functions.PerceptualLossWeight(style=0),
loss_values={
'style_loss': 0,
'content_loss': 5.639870,
},
),
dict(
testcase_name='content_loss_weight_is_0_default_loss_op',
use_mock_loss_op=False,
feature_weight=None,
loss_weight=loss_functions.PerceptualLossWeight(content=0),
loss_values={
'style_loss': 0.032839,
'content_loss': 0,
},
),
dict(
testcase_name='customized_loss_weight_default_loss_op',
use_mock_loss_op=False,
feature_weight=None,
loss_weight=loss_functions.PerceptualLossWeight(
style=1.0, content=2.0
),
loss_values={'style_loss': 0.032839, 'content_loss': 11.279739},
),
dict(
testcase_name=(
'customized_feature_weight_and_loss_weight_default_loss_op'
),
use_mock_loss_op=False,
feature_weight=[1.0, 2.0, 3.0, 4.0, 5.0],
loss_weight=loss_functions.PerceptualLossWeight(
style=1.0, content=2.0
),
loss_values={'style_loss': 0.164193, 'content_loss': 33.839218},
),
dict(
testcase_name='no_loss_change_if_extra_feature_weight_provided',
use_mock_loss_op=False,
feature_weight=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
loss_weight=loss_functions.PerceptualLossWeight(
style=1.0, content=2.0
),
loss_values={
'style_loss': 0.164193,
'content_loss': 33.839218,
},
),
dict(
testcase_name='customized_loss_weight_custom_loss_op',
use_mock_loss_op=True,
feature_weight=None,
loss_weight=loss_functions.PerceptualLossWeight(
style=1.0, content=2.0
),
loss_values={'style_loss': 0.000395, 'content_loss': -1.533469},
),
)
def test_weighted_perceptul_loss(
self,
use_mock_loss_op: bool,
feature_weight: Sequence[float],
loss_weight: loss_functions.PerceptualLossWeight,
loss_values: Dict[str, float],
):
perceptual_loss = MockPerceptualLoss(
use_mock_loss_op=use_mock_loss_op,
feature_weight=feature_weight,
loss_weight=loss_weight,
)
loss = perceptual_loss(img1=self._img1, img2=self._img2)
self.assertEqual(list(loss.keys()), ['style_loss', 'content_loss'])
self.assertNear(loss['style_loss'], loss_values['style_loss'], 1e-4)
self.assertNear(loss['content_loss'], loss_values['content_loss'], 1e-4)
class VGGPerceptualLossTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
# Mock tempfile.gettempdir() to be unique for each test to avoid race
# condition when downloading model since these tests may run in parallel.
mock_gettempdir = unittest_mock.patch.object(
tempfile,
'gettempdir',
return_value=self.create_tempdir(),
autospec=True,
)
self.mock_gettempdir = mock_gettempdir.start()
self.addCleanup(mock_gettempdir.stop)
self._img1 = tf.fill(dims=(1, 256, 256, 3), value=0.1)
self._img2 = tf.fill(dims=(1, 256, 256, 3), value=0.9)
@parameterized.named_parameters(
dict(
testcase_name='default_loss_weight',
loss_weight=None,
loss_values={
'style_loss': 5.8363257e-06,
'content_loss': 1.7016045,
},
),
dict(
testcase_name='customized_loss_weight',
loss_weight=loss_functions.PerceptualLossWeight(
style=10.0, content=20.0
),
loss_values={
'style_loss': 5.8363257e-05,
'content_loss': 34.03208,
},
),
)
def test_vgg_perceptual_loss(self, loss_weight, loss_values):
vgg_loss = loss_functions.VGGPerceptualLoss(loss_weight=loss_weight)
loss = vgg_loss(img1=self._img1, img2=self._img2)
self.assertEqual(list(loss.keys()), ['style_loss', 'content_loss'])
self.assertNear(
loss['style_loss'],
loss_values['style_loss'],
loss_values['style_loss'] / 1e5,
)
self.assertNear(
loss['content_loss'],
loss_values['content_loss'],
loss_values['content_loss'] / 1e5,
)
class ImagePerceptualQualityLossTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
# Mock tempfile.gettempdir() to be unique for each test to avoid race
# condition when downloading model since these tests may run in parallel.
mock_gettempdir = unittest_mock.patch.object(
tempfile,
'gettempdir',
return_value=self.create_tempdir(),
autospec=True,
)
self.mock_gettempdir = mock_gettempdir.start()
self.addCleanup(mock_gettempdir.stop)
self._img1 = tf.fill(dims=(1, 256, 256, 3), value=0.1)
self._img2 = tf.fill(dims=(1, 256, 256, 3), value=0.9)
@parameterized.named_parameters(
dict(
testcase_name='default_loss_weight',
loss_weight=None,
loss_value=2.501612,
),
dict(
testcase_name='customized_loss_weight_zero_l1',
loss_weight=loss_functions.PerceptualLossWeight(
l1=0.0, style=10.0, content=20.0
),
loss_value=34.032139,
),
dict(
testcase_name='customized_loss_weight_nonzero_l1',
loss_weight=loss_functions.PerceptualLossWeight(
l1=10.0, style=10.0, content=20.0
),
loss_value=42.032139,
),
)
def test_image_perceptual_quality_loss(self, loss_weight, loss_value):
image_quality_loss = loss_functions.ImagePerceptualQualityLoss(
loss_weight=loss_weight
)
loss = image_quality_loss(img1=self._img1, img2=self._img2)
self.assertNear(loss, loss_value, 1e-4)
if __name__ == '__main__':
tf.test.main()
|
UTF-8
|
Python
| false | false | 12,605 |
py
| 2,696 |
loss_functions_test.py
| 1,815 | 0.607061 | 0.563745 | 0 | 345 | 35.536232 | 79 |
DomNelson/wf_coalescent
| 6,562,710,033,661 |
56aeae6160e58a339db772cd9786d4c1e097409e
|
6eb4a52d22ae21126f1ae8a0f61c367e1c38e9c1
|
/scripts/admixture_props.py
|
216f4f5e68b99bcefea6c1554d2b3272d8052d8d
|
[] |
no_license
|
https://github.com/DomNelson/wf_coalescent
|
b7e863d44d5a90db6c8db8f97bd535d579b9b248
|
842a3f22c075b6499b13f214adfb752b80c4e4a4
|
refs/heads/master
| 2022-04-22T04:42:45.070921 | 2020-03-30T18:25:35 | 2020-03-30T18:25:35 | 123,977,115 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys, os
sys.path.append('../../msprime')
sys.path.append('../../msprime/lib/subprojects/git-submodules/tskit/python')
import msprime
import collections
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import pandas as pd
from tqdm import tqdm
def diploid_gravel_ancestry_variance(T, m, L, K, N):
"""
T - admixture time
m - migration rate
L - total length
K - number of chromosomes
N - effective population size
"""
A = m * (1 - m) / 2 ** (T - 1)
B_num = 2 * m * (1 - m) * (1 - 1 / (2 * N)) ** (T - 1)
B_denom = 2 * (K + 1 * (T - 2) * L)
B = B_num / B_denom
return A + B
def haploid_gravel_ancestry_variance(T, m, L, K, N):
"""
T - admixture time
m - migration rate
L - total length
K - number of chromosomes
N - effective population size
"""
A = m * (1 - m) / 2 ** (T - 1)
B_num = 2 * m * (1 - m) * (1 - 1 / (2 * N)) ** (T - 1)
B_denom = 1 * (K + 1 * (T - 2) * L)
B = B_num / B_denom
return A + B
def get_positions_rates(chrom_lengths, rho):
"""
Takes a list of chromosome lengths and returns lists of positions and
rates to pass to msprime.RecombinationMap
"""
positions = []
rates = []
total_length = 0
for length in chrom_lengths:
positions.extend([int(total_length), int(total_length) + int(length) - 1])
rates.extend([rho, 0.5])
total_length += length
rates[-1] = 0
return positions, rates
def get_whole_genome_positions_rates_loci(args, rho=1e-8):
all_lengths_morgans = [2.77693825, 2.633496065, 2.24483368, 2.12778391,
2.03765845, 1.929517394, 1.867959329, 1.701765192, 1.68073935,
1.789473882, 1.594854258, 1.72777271, 1.26940475, 1.16331251,
1.2554709, 1.348911043, 1.29292106, 1.18978483, 1.077960694,
1.079243479, 0.61526812, 0.72706815]
all_lengths = [x * 1e8 for x in all_lengths_morgans]
chrom_lengths = all_lengths[:args.num_chroms]
positions, rates = get_positions_rates(chrom_lengths, rho)
num_loci = positions[-1]
## HACK: This is to avoid bad edge intervals (right <= left) when
## simulating whole genomes. Possible overflow / floating point precision
## issue
if args.discretize_hack:
num_loci = int(num_loci / 100)
print("Scaling to", num_loci, "loci")
return positions, rates, num_loci
def get_ind_tracts(ts, max_time):
ind_tracts = collections.defaultdict(list)
key = lambda x: x.left
migrations = sorted(ts.migrations(), key=key)
trees = ts.trees()
t = next(trees)
for migration in migrations:
if migration.time > max_time:
continue
node = migration.node
length = migration.right - migration.left
while t.interval[1] <= migration.left:
t = next(trees)
assert t.interval[0] <= migration.left and t.interval[1] > migration.left
samples = t.get_leaves(node)
for s in samples:
ind_tracts[s].append(length)
return ind_tracts
def get_ancestry_props(replicates, max_time, num_replicates):
ancestry_props = []
with tqdm(total=num_replicates, desc=str(max_time)) as pbar:
for ts in replicates:
ind_tracts = get_ind_tracts(ts, max_time)
total_length = ts.get_sequence_length()
replicate_props = []
samples = iter(ts.samples())
# while True:
# try:
# sample = next(samples)
# sample_copy = next(samples)
# except StopIteration:
# break
# len1 = sum(ind_tracts[sample])
# len2 = sum(ind_tracts[sample_copy])
# prop = (len1 + len2) / (2 * total_length)
for sample in samples:
sample = next(samples)
prop = sum(ind_tracts[sample])
prop = prop / total_length
replicate_props.append(prop)
ancestry_props.append(replicate_props)
pbar.update(1)
return ancestry_props
def simulate(args, model, recombination_map, admixture_time):
population_configurations = [
msprime.PopulationConfiguration(
sample_size=args.sample_size,
initial_size=args.Ne
),
msprime.PopulationConfiguration(
sample_size=0,
initial_size=args.Ne
)
]
demographic_events = [
msprime.MassMigration(
time=admixture_time,
source=0,
dest=1,
proportion=args.admixture_prop
),
msprime.MigrationRateChange(time=admixture_time, rate=0),
msprime.MassMigration(
time=admixture_time + 1,
source=1,
dest=0,
proportion=1
),
msprime.PopulationParametersChange(
time=admixture_time + 2,
initial_size=1.0,
population_id=0
)
]
replicates = msprime.simulate(
recombination_map=recombination_map,
demographic_events=demographic_events,
population_configurations=population_configurations,
model=model,
record_migrations=True,
num_replicates=args.replicates
)
return replicates
def get_output_suffix(args, admixture_time):
suffix = 'ancestry_variance'
suffix += 'Ne' + '_' + str(args.Ne) + '_'
suffix += 'admix_time' + '_' + str(admixture_time) + '_'
suffix += 'admix_prop' + '_' + str(args.admixture_prop) + '_'
suffix += 'nchroms' + '_' + str(args.num_chroms)
return suffix
def set_paper_params(args):
print("*" * 79)
print("Reproducing figure from paper - ignoring all args" +\
" except --out_dir")
print("*" * 79)
# admixture_times = [x for x in range(100, 130, 10)]
admixture_times = [x for x in range(1, 5)]
admixture_times = [x for x in range(1, 20)]
admixture_times += [x for x in range(20, 50, 5)]
admixture_times += [x for x in range(50, 100, 10)]
admixture_times += [x for x in range(100, 200, 10)]
admixture_times += [x for x in range(200, 525, 25)]
variance_dir = '../results/variance/combined_haploid_mig/'
dtwf_file = variance_dir + 'ancestry_varianceNe_80_admix_time_500' +\
'_admix_prop_0.3_nchroms_22_replicates_dtwf.txt'
hudson_file = variance_dir + 'ancestry_varianceNe_80_admix_time_500' +\
'_admix_prop_0.3_nchroms_22_replicates_hudson.txt'
paper_args = argparse.Namespace(
Ne=80,
sample_size=80,
dtwf_file=dtwf_file,
hudson_file=hudson_file,
admixture_prop=0.3,
num_chroms=22,
replicates=1,
CI_width=0.95,
out_dir=args.out_dir,
discretize_hack=True,
plot=True,
)
return paper_args, admixture_times
def get_output_prefix(out_dir, admixture_times):
basedir, ext = os.path.splitext(args.out_dir)
suffix = get_output_suffix(args, admixture_time=admixture_times[-1])
return os.path.join(basedir, suffix)
def get_simulation_variance(args, model, admixture_times, rec_map):
nrows = len(admixture_times)
ncols = args.replicates
variance_array = np.zeros([nrows, ncols])
prefix = get_output_prefix(args.out_dir, admixture_times)
for i, t in enumerate(admixture_times):
replicates = simulate(args, model, rec_map, admixture_time=t)
props_replicates = get_ancestry_props(
replicates,
max_time=t,
num_replicates=args.replicates)
for j, props in enumerate(props_replicates):
variance_array[i, j] = np.var(props)# / (1 - 1 / len(props))
model_df = pd.DataFrame(
variance_array,
columns=range(args.replicates),
index=admixture_times)
return model_df
def get_mean_variance_with_CIs(model_df, CI_interval=0.95):
assert 0 < CI_interval <= 1
low_CI = 0.5 - (CI_interval / 2)
high_CI = 0.5 + (CI_interval / 2)
## Long-winded but other methods don't seem to allow specifying
## percentiles
percentiles = [low_CI, high_CI]
CI_df = model_df.transpose().describe(percentiles).transpose()
## This is awful. Pandas is inconsistent in how it handles trailing
## zeros when rounding percentiles and converting to strings for
## column names. This seems to handle all cases.
low_CI_column = str(np.round(low_CI * 100, 1)).strip('0.') + '%'
high_CI_column = str(np.round(high_CI * 100, 1)).strip('0.') + '%'
try:
CI_df = CI_df[['mean', low_CI_column, high_CI_column]]
except KeyError:
low_CI_column = str(np.round(low_CI * 100, 1)) + '%'
CI_df = CI_df[['mean', low_CI_column, high_CI_column]]
return CI_df
def format_CIs_for_plot(CI_df):
errs = CI_df.drop(columns='mean').transpose().values
errs = errs.reshape(1, 2, -1)
errs[:, 0, :] = CI_df['mean'].transpose().values - errs[:, 0, :]
errs[:, 1, :] = errs[:, 1, :] - CI_df['mean'].transpose().values
return errs
def parse_model_name(name):
ret = name
if name == 'dtwf':
ret = 'msprime (WF)'
elif name == 'hudson':
ret = 'msprime (Hudson)'
elif 'hybrid' in name:
assert '_' in name
_, num_gens = name.split('_')
ret = 'hybrid (' + num_gens + ' WF generations)'
return ret
def main(args):
if args.paper_params:
args, admixture_times = set_paper_params(args)
else:
admixture_range = [int(x.strip()) for x in args.admixture_range.split(',')]
admixture_times = range(*admixture_range)
prefix = get_output_prefix(args.out_dir, admixture_times)
positions, rates, num_loci = get_whole_genome_positions_rates_loci(args)
rec_map = msprime.RecombinationMap(
positions, rates, num_loci=num_loci)
dfs = {}
## DTWF variance
if args.dtwf_file is not None:
df = pd.read_csv(args.dtwf_file, index_col=0)
dtwf_times = list(df.index)
else:
df = get_simulation_variance(args, 'dtwf', admixture_times, rec_map)
df.to_csv(prefix + '_replicates_dtwf.txt')
CI_df = get_mean_variance_with_CIs(df, CI_interval=args.CI_width)
errs = format_CIs_for_plot(CI_df)
dfs['dtwf'] = [CI_df, errs]
## Hudson variance
if args.hudson_file is not None:
df = pd.read_csv(args.hudson_file, index_col=0)
hudson_times = list(df.index)
## To help compare to the proper theory curve
## TODO: Check other args as well
assert(hudson_times == dtwf_times)
admixture_times = hudson_times
else:
pass
# df = get_simulation_variance(args, 'hudson', admixture_times, rec_map)
# df.to_csv(prefix + '_replicates_hudson.txt')
#
CI_df = get_mean_variance_with_CIs(df, CI_interval=args.CI_width)
errs = format_CIs_for_plot(CI_df)
dfs['hudson'] = [CI_df, errs]
## Tracts expected variance
length_in_morgans = positions[-1] / 1e8
diploid_gravel_variance = [
diploid_gravel_ancestry_variance(
T,
args.admixture_prop,
length_in_morgans,
args.num_chroms,
args.Ne
) for T in admixture_times]
haploid_gravel_variance = [
haploid_gravel_ancestry_variance(
T,
args.admixture_prop,
length_in_morgans,
args.num_chroms,
args.Ne
) for T in admixture_times]
print("Comparing vs tracts with", length_in_morgans, "Morgans")
expected_df = pd.DataFrame(index=admixture_times)
expected_df['Expected'] = haploid_gravel_variance
# expected_df['Expected (diploid)'] = diploid_gravel_variance
if args.plot:
sns.set_palette("muted", 8)
plot_file_png = prefix + '.png'
plot_file_pdf = prefix + '.pdf'
fig, ax = plt.subplots(figsize=(4.25, 3))
for model, (CI_df, errs) in dfs.items():
label = parse_model_name(model)
CI_df['mean'].plot(ax=ax, yerr=errs, capsize=1, fmt='.', legend=False,
label=label, linewidth=1, markersize=4)
expected_df.plot(ax=ax, legend=False)
ax.set_xlim(0.9, 550)
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend()
ax.set_xlabel('Number of generations')
ax.set_ylabel('Variance in ancestry proportion')
fig.tight_layout()
print("Plotting to", plot_file_png)
fig.savefig(plot_file_png)
print("Plotting to", plot_file_pdf)
fig.savefig(plot_file_pdf)
import IPython; IPython.embed()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--paper_params", action='store_true')
parser.add_argument("--Ne", type=int, default=80)
parser.add_argument("--sample_size", type=int, default=80)
parser.add_argument('--dtwf_file', default=None)
parser.add_argument('--hudson_file', default=None)
parser.add_argument('--admixture_range', default="1,10")
parser.add_argument('--admixture_prop', type=float, default=0.3)
parser.add_argument('--num_chroms', type=int, default=22)
parser.add_argument('--replicates', type=int, default=1)
parser.add_argument('--CI_width', type=float, default=0.66)
parser.add_argument('--plot', action='store_true')
parser.add_argument('--discretize_hack', action='store_true')
parser.add_argument('--out_dir', default=os.path.expanduser('~/temp/'))
args = parser.parse_args()
main(args)
|
UTF-8
|
Python
| false | false | 13,909 |
py
| 25 |
admixture_props.py
| 20 | 0.582644 | 0.553311 | 0 | 435 | 30.974713 | 83 |
hschwane/offline_production
| 17,549,236,390,016 |
61b08e0db34f69032d4512b276c321a913cd2378
|
70450f0c551adf47b450468e424f4f90bebfb58d
|
/icetray/resources/test/pyparameter.py
|
2e0d11ae332fcd62bb1a7c57543f1faf0fdecd7d
|
[
"MIT"
] |
permissive
|
https://github.com/hschwane/offline_production
|
ebd878c5ac45221b0631a78d9e996dea3909bacb
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
refs/heads/master
| 2023-03-23T11:22:43.118222 | 2021-03-16T13:11:22 | 2021-03-16T13:11:22 | 280,381,714 | 0 | 0 |
MIT
| true | 2020-07-17T09:20:29 | 2020-07-17T09:20:29 | 2020-07-10T08:48:54 | 2020-07-10T13:59:48 | 78,754 | 0 | 0 | 0 | null | false | false |
#!/usr/bin/env python
#
# Sample i3module in python
#
from I3Tray import I3Tray
from icecube import icetray
from icecube import dataclasses
class GetI3Int(icetray.I3Module):
def __init__(self, context):
icetray.I3Module.__init__(self, context)
self.AddParameter("obj", "Python wrapped C++ class", None)
def Configure(self):
self.parameter = self.GetParameter("obj")
tray = I3Tray()
i3int = icetray.I3Int(777)
# generate empty frames
tray.AddModule(GetI3Int, obj=i3int)
tray.Execute(1)
|
UTF-8
|
Python
| false | false | 526 |
py
| 2,503 |
pyparameter.py
| 1,786 | 0.697719 | 0.669202 | 0 | 25 | 20 | 66 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.