repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
noahggit/CovidMaskDetector | 10,746,008,213,761 | 54cc7f482542f5f8c653e134e31c0b608ef31713 | a236d001a8f80ce7f2d428a730a0fa2265d021a6 | /app.py | 231763763eaea0ca13ad792216c47ed3c459584a | []
| no_license | https://github.com/noahggit/CovidMaskDetector | 8e8c03a0435ec980135a94ce83fee400f9ed3537 | 75cbd353007b3dd48b4cdaa6023bf8052d13154a | refs/heads/main | 2023-02-19T22:12:28.797936 | 2021-01-22T08:26:03 | 2021-01-22T08:26:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import streamlit as st
from tensorflow.keras.models import load_model
import numpy as np
from webcam import webcam
def main():
model = load_model("mask_detection.h5")
face_classifier = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
labels_dict = {0: " MASK", 1: " NO MASK"}
color_dict = {0: (0, 255, 0), 1: (255, 0, 0)}
st.title("Webcam Live Feed")
run = st.checkbox('Run')
frame_window = st.image([])
while run:
captured_image = webcam()
frame = captured_image.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
for x, y, w, h in faces:
face_img = gray[y:y + w, x:x + w]
resized = cv2.resize(face_img, (100, 100))
normalized = resized / 255.0
reshaped = np.reshape(normalized, (1, 100, 100, 1))
result = model.predict(reshaped)
label = np.argmax(result, axis=1)[0]
cv2.rectangle(frame, (x, y), (x + w, y + h), color_dict[label], 2)
cv2.rectangle(frame, (x, y - 40), (x + w, y), color_dict[label], -1)
cv2.putText(frame, labels_dict[label], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
frame_window.image(frame)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,374 | py | 3 | app.py | 1 | 0.580058 | 0.531295 | 0 | 41 | 32.512195 | 114 |
shirley5229/pyrequest | 11,141,145,205,495 | 61fbea940196616846fde053e021a87aafe4376b | b4c7a72fc7ed2434d3aad0d28a31373498b2f3a4 | /rest_test_tms3/interface/consultation_cancelSave_test.py | 2d496386af0d83759898ff5c91dccb0c211a1181 | []
| no_license | https://github.com/shirley5229/pyrequest | ce6e940c81e6b410e09059d87ba66823d589817b | df0b3ce51507b643b2a4be24321136797d85c002 | refs/heads/master | 2021-04-12T03:05:54.961455 | 2019-04-02T01:51:44 | 2019-04-02T01:51:44 | 125,944,133 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import json
import os,sys
from request_pub import config
from request_pub import httpRequest
from request_pub import getParams
from request_pub import getAssert
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
#需要跳转到pyrequest目录下引用db_fixture
from db_fixture import test_data
from public import log
class CancelConsultationTest(unittest.TestCase):
"""接受取消会诊,异常性测试"""
@classmethod
def setUpClass(self):
self.logicName = "cancelConsultation"
self.log = log.setLog()
def test_error2(self):
''' 传入已经是取消状态的primid'''
self.log.info(self.logicName+':传入已经是取消状态的primid')
params=getParams.getParam_cancelConsultation(config.appDBConf,self.logicName,config.primid03)
result=httpRequest.postRequest(config.cons_url,params)
self.assertEqual(result['code'],"18")
self.assertEqual(result['message'],"会诊状态非法")
def test_null(self):
''' primid传入空'''
self.log.info(self.logicName+':必填项primid为空')
params=getParams.getParam_cancelConsultation(config.appDBConf,self.logicName,'','')
result=httpRequest.postRequest(config.cons_url,params)
self.assertEqual(result['code'],"16")
self.assertEqual(result['message'],"id为空,请传输id值")
def test_error1(self):
''' 传入错误的primid'''
self.log.info(self.logicName+':传入错误的primid')
params=getParams.getParam_cancelConsultation(config.appDBConf,self.logicName,'','1111112222')
result=httpRequest.postRequest(config.cons_url,params)
self.assertEqual(result['code'],"19")
self.assertEqual(result['message'],"根据id查询会诊失败")
if __name__=="__main__":
unittest.main(verbosity=2)
| UTF-8 | Python | false | false | 1,901 | py | 45 | consultation_cancelSave_test.py | 40 | 0.699598 | 0.686962 | 0 | 50 | 33.82 | 101 |
oucher/ec2_test | 15,865,609,193,918 | 161c74d412550a2d5dcedcdf79b054709e3bbdf5 | 033ed5ef4545ef94d5c9b4ebe68309c067db1dc3 | /marketplaces/webStore/webStore_interface.py | 0cedc93e100f1942b991314b163fca7c0998d3b8 | []
| no_license | https://github.com/oucher/ec2_test | 9843395b0ab55f35d72af51f1de72a35cc0f0a1d | 4c0fd854378caab8b712139e18120b8278718ba7 | refs/heads/master | 2019-03-13T08:14:34.432019 | 2017-10-09T06:44:51 | 2017-10-09T06:44:51 | 102,559,523 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #import this for eBay and Amazon use
from .utils import get_element_by_tag
from xml.dom.minidom import parseString
class BaseInterface(object):
_account = None
def getImgUrl(self,**args):
'''
@param **args:
{'itemID':itemID}
{'type':type,'id':[id]}
'''
pass
def uploadTrack(self,submit_list):
pass
def listOrders(self,**args):
'''
@param **args:
{'timeFrom':timeFrom,'timeTo':timeTo,'Pagenumber':Pagenumber,'PerPage':PerPage,'OrderRole':OrderRole,'OrderStatus':OrderStatus,'DetailLevel':DetailLevel}
{'created_after':created_after,'created_before':created_before,'lastupdatedafter':lastupdatedafter,'lastupdatedbefore':lastupdatedbefore,'orderstatus':orderstatus,'fulfillment_channels':fulfillment_channels,'payment_methods':payment_methods,'buyer_email':buyer_email,'seller_orderid':seller_orderid,'PerPage':PerPage
}
'''
pass
def getOrder(self,**args):
'''
@param **args:
{'OrderIDS':OrderIDS}
{'amazon_order_ids':amazon_order_ids}
'''
pass
def getToken(self,**args):
'''
@param **args:
{'SessionID':SessionID}
{'MWSAuthToken':MWSAuthToken}
'''
pass
def getTokenUrl(self,**args):
'''
@param **args:
{'SessionID':SessionID}
'''
pass
def getSessionId(self,args):
pass
def listOrdersMore(self,**args):
'''
@param **args:
{'token':token}
'''
pass
def listOrderItems(self,**args):
'''
@param **args:
{'amazon_order_id':amazon_order_id}
'''
pass
def listOrderItemsMore(self,**args):
pass
pass | UTF-8 | Python | false | false | 1,890 | py | 48 | webStore_interface.py | 44 | 0.538624 | 0.538624 | 0 | 71 | 25.633803 | 328 |
bolcom/pgcdfga | 13,889,924,255,561 | 744ccf8479690df3df1b7034e300a2dee0f7be7b | 16aee12df11be7bca2a2c1e0f742acd90cc20244 | /tests/test_pgcdfga.py | 6dbf68ac5adefd3bfef026abaf9c9ddde1311f19 | [
"Apache-2.0"
]
| permissive | https://github.com/bolcom/pgcdfga | 042cc4792bc51efca3e5abbf3246f09ecc6206a6 | 56ef5482eab560fdb56937b58d2327fbaa068afa | refs/heads/master | 2020-04-04T11:20:09.873226 | 2019-10-28T10:40:20 | 2019-10-28T10:40:20 | 155,887,184 | 2 | 0 | Apache-2.0 | false | 2021-04-21T13:02:17 | 2018-11-02T15:41:17 | 2020-04-24T14:09:03 | 2021-04-21T13:02:17 | 9,852 | 1 | 0 | 1 | Python | false | false | #!/usr/bin/env python3
# Copyright 2019 Bol.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module holds all unit tests for the pgcdfga module
'''
import unittest
from pgcdfga import pgcdfga
class DictWithDefaultsTest(unittest.TestCase):
"""
Test the dict_with_defaults function.
"""
def test_valid_dict_with_defaults(self):
'''
Test dict_with_defaults for normal functionality
'''
base = {'a': 'b', 'b': 'c', 1: 2, '1': '2'}
default = {'a': 'c', 'c': 'd', 1: 3, 2: 3, '1': '3', '2': 3}
expected = {'a': 'b', 'b': 'c', 'c': 'd', 1: 2, 2: 3, '1': '2', '2': 3}
result1 = pgcdfga.dict_with_defaults(base, default)
result2 = pgcdfga.dict_with_defaults(None, default)
result3 = pgcdfga.dict_with_defaults(base, None)
self.assertEqual(result1, expected)
self.assertEqual(result2, default)
self.assertEqual(result3, base)
def test_invalid_dict_with_defaults(self):
'''
Test dict_with_defaults with error input functionality
'''
correct = {'a': 'c', 'c': 'd', 1: 3, 2: 3, '1': '3', '2': 3}
listvalues = [1, 2, 3, 4]
with self.assertRaises(TypeError):
pgcdfga.dict_with_defaults(listvalues, correct)
with self.assertRaises(TypeError):
pgcdfga.dict_with_defaults(correct, listvalues)
class NonWordCharReTest(unittest.TestCase):
"""
Test the Non Word Characters regular expression.
"""
def test_valid_non_word_char_re(self):
'''
Test NON_WORD_CHAR_RE for matches
'''
self.assertEqual(pgcdfga.NON_WORD_CHAR_RE.search('123_!?').group(0), '_')
self.assertEqual(pgcdfga.NON_WORD_CHAR_RE.search('abc!?_').group(0), '!')
self.assertEqual(pgcdfga.NON_WORD_CHAR_RE.search('ABC?_!').group(0), '?')
def test_invalid_non_word_char_re(self):
'''
Test NON_WORD_CHAR_RE for non-matches
'''
self.assertEqual(pgcdfga.NON_WORD_CHAR_RE.search('1234abcdABCD'), None)
| UTF-8 | Python | false | false | 2,552 | py | 12 | test_pgcdfga.py | 8 | 0.624608 | 0.602273 | 0 | 70 | 35.457143 | 81 |
Jon-Gibson/BogleBot | 11,914,239,305,089 | 669c39798c75bbb67d8a8577bb3357e75331ea68 | b402c705acf14dac6b372bf0c2415934413b5774 | /expense.py | ccf0bc584812b76e42a3d98a2d1687bdcd5fd734 | []
| no_license | https://github.com/Jon-Gibson/BogleBot | 4675957980f71747eff24c6ec69da8a6a141fa8f | 0323a1b7f78999e7b5cddd3b109680f3d1b1e156 | refs/heads/master | 2023-04-09T19:29:00.673906 | 2021-04-18T00:40:37 | 2021-04-18T00:40:37 | 359,007,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from urllib.request import urlopen
import re
import html
def getStockData(stockSymbol):
page = urlopen("http://finance.yahoo.com/q/pr?s=" + stockSymbol + "+profile" )
data = page.read()
return str(data)
def getFullName(stockSymbol, stockData):
row = re.findall("<h1[^>]*?>(.*?)</h1>", stockData)
if len(row) > 0:
return html.unescape(row[0])
else:
return stockSymbol
def getCategory(stockSymbol, stockData):
row = re.findall("Category</span></span></span><span [^>]*>(.*?)</span>", stockData)
if len(row) > 0:
return html.unescape(row[0])
else:
return "N/A"
def getFundFamily(stockSymbol, stockData):
row = re.findall("Fund Family</span></span></span><span [^>]*>(.*?)</span>", stockData)
if len(row) > 0:
return html.unescape(row[0])
else:
return "N/A"
def getExpenseRatio(stockSymbol, stockData):
row = re.findall("Annual Report Expense Ratio.*?</span></span></span><span [^>]*>(.*?)</span>", stockData)
if len(row) > 0:
ER = row[0]
return ER
else:
return "N/A"
def findInfo(stockSymbols):
stockSymbols = list(set(stockSymbols))
stockSymbols.sort()
global cache
expenses = []
for stockSymbol in stockSymbols:
if not stockSymbol in cache:
stockData = getStockData(stockSymbol)
info = [
stockSymbol,
getFullName(stockSymbol, stockData),
getExpenseRatio(stockSymbol, stockData),
getCategory(stockSymbol, stockData),
getFundFamily(stockSymbol, stockData)
]
cache[stockSymbol] = info
else:
info = cache[stockSymbol]
if info[2] != "N/A" or info[3] != "N/A" or info[4] != "N/A":
expenses.append( info[0:4] )
else:
print("Skipping", stockSymbol)
return expenses
global cache
cache = {} | UTF-8 | Python | false | false | 1,945 | py | 3 | expense.py | 3 | 0.576864 | 0.569152 | 0 | 64 | 29.40625 | 110 |
kingking888/PythonSpider-3 | 1,640,677,554,078 | bfba6962bae6c70c2f1271e47b6682187cf8d0bb | 880b663cbd015eb1f7bd610619d44dff3b20266d | /10-zhihu/begin.py | 148695e0fd9f0977bf6524adf7940b053e0df955 | []
| no_license | https://github.com/kingking888/PythonSpider-3 | 1f84cba58afa3957529bfee95e0adaedfe1413c6 | 0c3bb0e302c0b7d80472ac597f87f66cac240427 | refs/heads/master | 2020-07-04T05:05:30.310978 | 2019-08-10T07:06:32 | 2019-08-10T07:06:32 | 202,166,073 | 4 | 1 | null | true | 2019-08-13T14:50:14 | 2019-08-13T14:50:13 | 2019-08-13T13:24:45 | 2019-08-10T07:07:03 | 1,396 | 0 | 0 | 0 | null | false | false | """
@author:hp
@project:10-zhihu
@file:begin.py
@ide:PyCharm
@time:2019/4/24-13:54
"""
from scrapy import cmdline
# cmdline.execute("scrapy crawl zhihuspider".split())
cmdline.execute("scrapy crawl user".split())
| UTF-8 | Python | false | false | 213 | py | 50 | begin.py | 44 | 0.732394 | 0.671362 | 0 | 10 | 20.3 | 53 |
yengsheng/SplitVRPHUROP | 15,951,508,576,737 | 906126218c1ddb29bb9f6459d70d85e47e823ae4 | 0be08827bfe94ba75293cab23cf05848b595f848 | /convert_to_tsplib.py | 1c7409a746e713da9addc0ff531b7124cf322ead | []
| no_license | https://github.com/yengsheng/SplitVRPHUROP | 7baddffbcf9c2289e5759d4fba71acd097cbe8ed | 2f9472bc88d763953c94822101ac928905439ce1 | refs/heads/master | 2023-02-17T22:18:37.713266 | 2021-01-08T05:58:10 | 2021-01-08T05:58:10 | 297,616,436 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 4 12:05:55 2020
@author: Yong Sheng
"""
import os
def convert(filename):
f = open(filename)
lines = f.readlines()
nodes, capacity = lines[0].strip().split()
nodes = int(nodes) + 1
demands = ['0'] + lines[1].strip().split()
locs = lines[2:]
locs_stripped = []
for i in locs:
x = i.strip().split()
temp = []
for j in x:
temp.append(int(j))
locs_stripped.append(temp)
init_x, init_y = locs_stripped[0]
for i in locs_stripped[1:]:
i[0] -= init_x
i[1] -= init_y
locs_stripped[0] = [0, 0]
final = "NAME: " + filename[0:-4] + '\n' + 'BEST_KNOWN: 0\nCOMMENT: 0\nDIMENSION: ' + str(nodes) + '\nCAPACITY: ' + capacity + '\nEDGE_WEIGHT_FORMAT: FUNCTION\nEDGE_WEIGHT_TYPE: EXACT_2D\n'
node_coord_section = 'NODE_COORD_SECTION\n'
for i in range(1, nodes + 1):
node_coord_section += str(i) + ' ' + str(locs_stripped[i-1][0]) + ' ' + str(locs_stripped[i-1][1]) + '\n'
node_coord_section += 'DEMAND_SECTION\n'
for i in range(1, nodes + 1):
node_coord_section += str(i) + ' ' + demands[i-1] + '\n'
node_coord_section += 'DEPOT_SECTION\n1\n-1\nEOF\n'
index_check = 0
while os.path.isfile(filename[0:-4] + "_tsplib" + str(index_check) + ".txt"):
index_check += 1
f = open(filename[0:-4] + "_tsplib" + str(index_check) + ".txt", "x")
f.write(final)
f.write(node_coord_section)
f.close()
dirs = tuple(os.walk('.\\SDVRP instances'))
for i in dirs[1:]:
for j in i[2]:
if j[-4:] == '.cri':
convert(i[0] + '\\' + j)
| UTF-8 | Python | false | false | 1,648 | py | 1,205 | convert_to_tsplib.py | 12 | 0.541262 | 0.510922 | 0 | 48 | 33.291667 | 193 |
ikaruga0508/tcml | 8,950,711,846,601 | ba8621d8be744820ad5d4dcd2420c303197fce93 | 3df0a2eeefbf1289d81839efec9f4a8036b65883 | /_loader_configs.py | 2520cf455395e9c673b6f97562ff9893da048802 | []
| no_license | https://github.com/ikaruga0508/tcml | c3bfa965fef67f2cc74e356e9645e9cc0bbb5c8a | 48a1e4cb4b450587d3a5d29071ed5a8806c07eb9 | refs/heads/main | 2023-04-22T18:00:57.805486 | 2021-05-08T15:39:06 | 2021-05-08T15:39:06 | 359,476,643 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class KFoldCrossValidationConfig:
def __init__(self, X_indices_list, X_val_indices_list):
"""初始化
Args:
X_indices_list: 训练集索引矩阵
X_val_indices_list: 验证集索引矩阵
"""
assert (len(X_indices_list) == len(X_val_indices_list))
self.X_indices_list = X_indices_list
self.X_val_indices_list = X_val_indices_list
self.n_splits = len(X_indices_list)
def get_indices(self, k):
"""获得当前折的训练集索引和验证集索引
Args:
k: 当前折
Returns:
(训练集索引, 验证集索引)
"""
assert (k < self.n_splits)
return self.X_indices_list[k], self.X_val_indices_list[k]
| UTF-8 | Python | false | false | 762 | py | 12 | _loader_configs.py | 11 | 0.546407 | 0.546407 | 0 | 22 | 29.363636 | 65 |
keithrpotempa/bangazon-workforce-management-oculus-nebula | 15,418,932,614,506 | cb6cf98d278f3ad5dadf634e5dffa67a0c0ae91e | 168605919f28ea761cfd8d92ed69518f7656921f | /hrapp/models/employee_computer.py | 1cbc51b37c7ab63020aa9fe1efc35ce4f32d16d5 | []
| no_license | https://github.com/keithrpotempa/bangazon-workforce-management-oculus-nebula | 81a35fec751f0f975999b980c2dda0a52ed9515f | 4282491a92b96ac98f64672b77f9892dfbc368d5 | refs/heads/master | 2022-08-18T22:05:12.995936 | 2020-05-14T16:40:58 | 2020-05-14T16:40:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.urls import reverse
class EmployeeComputer(models.Model):
"""
Creates the join table for the many to many relationship between computers and employees
Author: Joe Shep
methods: none
"""
employee = models.ForeignKey("Employee", on_delete=models.CASCADE)
computer = models.ForeignKey("Computer", on_delete=models.CASCADE)
assign_date = models.DateField()
unassign_date = models.DateField(null=True, blank=True)
class Meta:
verbose_name = "computer and employee"
verbose_name_plural = "computers and employees"
def get_absolute_url(self):
return reverse("EmployeeComputer_detail", kwargs={"pk": self.pk})
| UTF-8 | Python | false | false | 716 | py | 62 | employee_computer.py | 40 | 0.703911 | 0.703911 | 0 | 22 | 31.545455 | 92 |
Indrateja25/Learning-Python | 14,714,558,003,035 | 67d1177ed7cf1f9e96791f679c844e1c2bdf47c1 | 1548742f7beb798792cc319c0789a588fe338a26 | /dictionaries2.py | 82a0f273c7462cb374b878bafce45e2e79976a73 | []
| no_license | https://github.com/Indrateja25/Learning-Python | 0d0a8c5900e6abf70ce368acbced083202cb83ea | 8c2cbe4c556401a3bd8b27178f232419617614e9 | refs/heads/master | 2021-04-09T11:03:17.181442 | 2018-03-17T19:42:53 | 2018-03-17T19:42:53 | 125,523,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | stocks = {
'GOOG':256.8,
'APPL':128.9,
'FB':167.1,
'MST':212.5,
'AMZ':230.0
}
print(min(stocks))
print(min(stocks.items()))
print(max(zip(stocks.values(),stocks.keys())))
print('\n')
print(sorted(stocks))
print(sorted(stocks.items()))
print(sorted(zip(stocks.values(),stocks.keys())))
| UTF-8 | Python | false | false | 306 | py | 15 | dictionaries2.py | 14 | 0.617647 | 0.552288 | 0 | 15 | 19.4 | 49 |
salonikalra/BVCOE-Feedback | 5,403,068,868,265 | e76ec4526e0b373394987110169333296a5ce6f0 | 141d9f25727349c584b8fcebd64991f27c1232a8 | /feedbacks/admin.py | 1602416925bc05f01bde10fa57e78a8bcddd5112 | []
| no_license | https://github.com/salonikalra/BVCOE-Feedback | 2b92c01b15e9c70cacf77c86662d9f92a730d811 | abd46cd6259a56ec91dfba8b0aa01ede490ae96a | refs/heads/master | 2022-12-12T22:07:19.355436 | 2019-06-24T11:45:30 | 2019-06-24T11:45:30 | 193,562,536 | 1 | 0 | null | false | 2022-12-06T20:21:57 | 2019-06-24T18:59:08 | 2021-05-23T11:18:45 | 2022-12-06T20:21:56 | 8,885 | 0 | 0 | 8 | CSS | false | false | from django.contrib import admin
from . import models
class DepartmentAdmin(admin.ModelAdmin):
list_display = ['department_id', 'department_name']
class TeacherAdmin(admin.ModelAdmin):
list_filter = ['department']
list_display = ['teacher_name', 'teacher_id', 'get_subjects']
search_fields = ['teacher_name', 'teacher_id']
def get_subjects(self, obj):
return ", ".join([s.subject_name for s in obj.subject_set.all()])
get_subjects.short_description = 'subjects'
class SubjectAdmin(admin.ModelAdmin):
list_filter = ['semester', 'department']
list_display = ['subject_code', 'subject_name', 'get_teachers']
search_fields = ['subject_code', 'subject_name']
def get_teachers(self, obj):
return ", ".join([t.teacher_name for t in obj.teacher.all()])
get_teachers.short_description = 'teachers'
class feedbackAdmin(admin.ModelAdmin):
list_display = ['student', 'teacher', 'subject', 'total', 'average']
list_select_related = ['student', 'teacher', 'subject']
search_fields = ['student__user__username', 'teacher__teacher_name', 'subject__subject_name']
admin.site.register(models.Department, DepartmentAdmin)
admin.site.register(models.Semester)
admin.site.register(models.Teacher, TeacherAdmin)
admin.site.register(models.Subject, SubjectAdmin)
admin.site.register(models.feedback, feedbackAdmin)
| UTF-8 | Python | false | false | 1,372 | py | 34 | admin.py | 16 | 0.704082 | 0.704082 | 0 | 35 | 38.2 | 97 |
hanrick2000/Leetcode-for-Fun | 2,010,044,739,334 | b37fec175d2db8d7dd06ce977466011931b0a30a | 96ded4b80a459483d3359c9e2f68f1e4c80453a2 | /lintcode/lintcode_376_Tree_pathSum.py | 6209cd7e8a4c7c5e4e078aab34852f0a6254fd2c | []
| no_license | https://github.com/hanrick2000/Leetcode-for-Fun | bd37f8a9ab92ac564e24e38edfa04f1fbaab9fbd | cce863008199c0beb0fc8839f9efbd4487240e2c | refs/heads/master | 2020-12-19T05:33:48.957554 | 2019-04-15T19:52:01 | 2019-04-15T19:52:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
# @param {TreeNode} root the root of binary tree
# @param {int} target an integer
# @return {int[][]} all valid paths
def binaryTreePathSum(self, root, target):
# Write your code here
res = []
temp = []
if (root is None):
return res
res, temp = self.helper(root, target, res, temp)
return res
def helper(self, root, target, res, temp):
if (root is None):
return res, temp
temp.append(root.val)
if (root.val == target
and root.left is None
and root.right is None):
res.append(temp[:])
target -= root.val
res, temp = self.helper(root.left, target, res, temp)
res, temp = self.helper(root.right, target, res, temp)
return res, temp[0 : -1] | UTF-8 | Python | false | false | 990 | py | 557 | lintcode_376_Tree_pathSum.py | 390 | 0.543434 | 0.541414 | 0 | 34 | 28.147059 | 62 |
tunmyatbryan/EECS118_Web_and_Database_Programming | 10,333,691,355,447 | 8f7f0b0d668fbfcb9e48ccab1c5904d681bc4cde | 212397f6ba92b23a79df9e91630e4bf5c8ca6fa2 | /new_gallery.py | b0f8f09f3cd66e3944a55e14194fd3f5127863fd | []
| no_license | https://github.com/tunmyatbryan/EECS118_Web_and_Database_Programming | fef7ee518478226ff8a57b97b7c6b6e1a5b6dcc4 | 77d321d33fee8c4cf798e3b545901ebdb74120e7 | refs/heads/main | 2023-01-05T07:36:48.482933 | 2020-10-30T22:19:12 | 2020-10-30T22:19:12 | 308,755,760 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Name - Tun Myat
#ID - 51705354
print("Content-type: text/html") #HTML is following
print()
print("<H1>Requesting New Gallery</H1>")
print("<b>Please type the detail of new gallery<br></b>")
print("<form action='new_gallery_add.py' method = 'post'>")
print("Gallery Name: <input type='text' name='gallery_name'><br>")
print("Gallery Description: <input type='text' name='gallery_description'><br><br>")
print("<input type='submit' value='Submit'></form>") | UTF-8 | Python | false | false | 465 | py | 27 | new_gallery.py | 27 | 0.68172 | 0.660215 | 0 | 16 | 28.125 | 84 |
Aasthaengg/IBMdataset | 11,063,835,781,000 | 6b7ed046f6c06b74ff27cac1139d84088b7265d3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02697/s864560609.py | 44d593fa4fc9a9d8aae51d4b3dbdf5e0ea96939f | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
stdin = sys.stdin
inf = 1 << 60
mod = 1000000007
sys.setrecursionlimit(10 ** 7)
ni = lambda: int(ns())
nin = lambda y: [ni() for _ in range(y)]
na = lambda: list(map(int, stdin.readline().split()))
nan = lambda y: [na() for _ in range(y)]
nf = lambda: float(ns())
nfn = lambda y: [nf() for _ in range(y)]
nfa = lambda: list(map(float, stdin.readline().split()))
nfan = lambda y: [nfa() for _ in range(y)]
ns = lambda: stdin.readline().rstrip()
nsn = lambda y: [ns() for _ in range(y)]
ncl = lambda y: [list(ns()) for _ in range(y)]
nas = lambda: stdin.readline().split()
n, m = na()
if m == 1:
print(1, 2)
exit()
if m % 2 == 0:
l = (m + 1) // 2
r = (m + 1) // 2 + 2
cnt = 0
while cnt < m and l >= 1 and r <= (m + 1):
print(l, r)
l -= 1
r += 1
cnt += 1
l = (n - m - 1) // 2
r = (n - m - 1) // 2 + 1
while cnt < m and l >= 1 and r <= (n - m - 1):
print(m + 1 + l, m + 1 + r)
l -= 1
r += 1
cnt += 1
else:
l = m // 2
r = m // 2 + 2
cnt = 0
while cnt < m and l >= 1 and r <= m:
print(l, r)
l -= 1
r += 1
cnt += 1
l = (n - m) // 2
r = (n - m) // 2 + 1
while cnt < m and l >= 1 and r <= (n - m):
print(m + l, m + r)
l -= 1
r += 1
cnt += 1 | UTF-8 | Python | false | false | 1,391 | py | 202,060 | s864560609.py | 202,055 | 0.419123 | 0.376707 | 0 | 59 | 22.59322 | 60 |
Jarrod95/Python_Programs | 6,279,242,190,784 | 6a0c770074704cc899a0cccdffe0c24d8de7827f | b96bf8f96d33fd2e9235501f9523e70f7bb51547 | /closestTherapist.py | c7772956d842231fd7278cbe88f88364e6b1f6ba | []
| no_license | https://github.com/Jarrod95/Python_Programs | a85b2b513c7135dcc51207cd704e0e953b2472a6 | 6741619cf363457e4f376d593691304769311272 | refs/heads/master | 2023-02-06T11:34:29.483723 | 2020-12-31T04:57:53 | 2020-12-31T04:57:53 | 275,974,159 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import googlemaps
from datetime import date, timedelta
api_key = ''
gmaps = googlemaps.Client(key=api_key)
today = date.today()
#Find most recent file
extension = r'C:\Users\info\OneDrive\1. M2M Administration\EXPORTED FROM SOFTWARE\Therapist Data\Maps_Export_'
#Extract most recent therapist data up to 10 days ago
i = 0
while i < 10:
dt = date.today() - timedelta(i)
df = pd.read_csv(fr'{extension}{dt.strftime("%d-%m-%Y")}.csv', skiprows=1, skipfooter=3, engine='python')
if len(df) > 1:
break
i += 1
if i == 10:
print("Checked 10 days worth of data -- please extract more recent data")
#Drop everyone expired or unavail
df = df.dropna(subset=['Insurance Expiry Date', 'First Aid Expiry', 'Police Check Expiry', 'Membership Expiry'])
df = df[~df['Client Type'].str.match('Expired')]
df = df[~df['Client Type'].str.match('Unavailable')]
#Change Mobile column to international number
df['Mobile'] = df['Mobile'].str.replace('04', '+614', 1)
df['Mobile'] = df['Mobile'].str.replace(' ', '')
print('input state')
state = input()
print('Please input an address')
dest = input()
#drop rows that don't match the state of new client
match = df[df['State-'].str.match(state)].reset_index(drop=True)
match['Address'] = match['Street'] + ', ' + match['Suburb'] + ', ' + \
match['State-'] + ', ' + match['Postcode'].astype(str)
# Compare distances
dist = []
dur = []
for i in match['Address']:
result = gmaps.distance_matrix(i, dest, mode='driving')
duration = result['rows'][0]['elements'][0]['duration']['text']
distance = result['rows'][0]['elements'][0]['distance']['text']
print(duration + ' (' + distance + ')')
dist.append(distance)
dur.append(duration)
match['distance'] = dist
match['duration'] = dur
#Convert format to mins
match['Minutes'] = pd.eval(match['duration'].replace(['hours?', 'mins', 'min'], ['*60+', '', ''], regex=True)).astype(int)
match = match.sort_values(by=['Minutes']).reset_index(drop=True)
print(match[['First Name-', 'duration', 'distance', 'Minutes']])
best_fit = match.nsmallest(4, 'Minutes')
print('--------------------------')
print('Here are the four closest therapists:\n\n')
print(best_fit[['First Name-', 'duration', 'distance', 'Minutes']])
| UTF-8 | Python | false | false | 2,279 | py | 5 | closestTherapist.py | 2 | 0.644142 | 0.631856 | 0 | 59 | 37.627119 | 122 |
kogwang/Flask-fenli | 16,801,912,064,837 | d1df94f71d436112f94555949e73458e869bb516 | b346f7ea4a4bb05800acd731bfba9960249ee3e9 | /App/models.py | 4670084706e205a68b9d12fbee7395c787d2f681 | [
"Apache-2.0"
]
| permissive | https://github.com/kogwang/Flask-fenli | f340c977e24bc0783adaa2652e9caa594e2200cf | 6e54f2e03c107611fec986a672b47001086e1c7d | refs/heads/master | 2020-03-27T23:17:10.149182 | 2018-09-04T07:56:46 | 2018-09-04T07:56:46 | 147,306,744 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_sqlalchemy import SQLAlchemy
# 创建对象
db = SQLAlchemy()
# 例:
class User(db.Model):
id = db.Column(db.Integer,primary_key=True,autoincrement=True)
name = db.Column(db.String(20),unique=True)
password = db.Column(db.String(100)) | UTF-8 | Python | false | false | 260 | py | 4 | models.py | 4 | 0.71371 | 0.693548 | 0 | 9 | 26.666667 | 66 |
eastene/vRNN-PyCUDA | 11,587,821,803,536 | cf5453cabc702f5c4f82b002001230f794e644a9 | 7c81cea6c5607c62ec9744051cc3931696f6631d | /test/test_cases/model_tests.py | b6b6b9b9022b44b8e743d38ad2cd2131c84b0b63 | []
| no_license | https://github.com/eastene/vRNN-PyCUDA | a3e2e6386cd429bf922294de100863d749f7cc66 | 0dffad1b995ee66c99a5d452dbf9e5085882b03a | refs/heads/master | 2020-03-10T08:52:59.419694 | 2018-06-26T17:59:54 | 2018-06-26T17:59:54 | 129,296,437 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import unittest
from time import time
from pycuda.tools import make_default_context
import pycuda.gpuarray
import pycuda.driver
import pycuda.curandom
from pycuda.tools import mark_cuda_test
import string
from src.model.LSTM import LSTM
from src.model.lstm_layer import *
from src.preprocess.nlp import *
from src.preprocess.VocabCoder import VocabCoder
from src.preprocess.BatchGenerator import BatchGenerator
class RNNTestCase(unittest.TestCase):
"""def test_init(self):
rnn = RNN(10, [1, 3, 5])
answer = "5 layers: \n"
answer += " Input layer of size 10 to 10\n"
answer += " Hidden layer of size 10 to 1\n"
answer += " Hidden layer of size 1 to 3\n"
answer += " Hidden layer of size 3 to 5\n"
answer += " Output layer of size 5 to 10\n"
self.assertEqual(rnn.__repr__(), answer)
"""
def test_train_sample(self):
vocab = string.ascii_lowercase + " "
num_unroll = 15
vocab_size = len(vocab)
batch_size = 5
num_layers = 3
learning_rate = 0.05
lstm = LSTM(num_unroll, vocab_size, batch_size, num_layers, learning_rate)
text = "is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's " \
"standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled " \
"it to make a type specimen book. It has survived not only five centuries, but also the leap into " \
"electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the " \
"release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing " \
"software like Aldus PageMaker including versions of Lorem Ipsum."
tokens = tokenize_char(text)
normal = normalize(tokens)
lstm.train(list(vocab), normal, 1000)
lstm.run(list(vocab), 10)
def test_train_sample_gpu(self):
vocab = string.ascii_lowercase + " "
num_unroll = 3
vocab_size = len(vocab)
batch_size = 2
num_layers = 4
learning_rate = 0.05
lstm = LSTM(num_unroll, vocab_size, batch_size, num_layers, learning_rate)
text = "is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's " \
"standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled " \
"it to make a type specimen book. It has survived not only five centuries, but also the leap into " \
"electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the " \
"release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing " \
"software like Aldus PageMaker including versions of Lorem Ipsum."
tokens = tokenize_char(text)
normal = normalize(tokens)
lstm.train_gpu(list(vocab), normal, 4, 2)
lstm.run(list(vocab), "This is a test of the gpu version")
def test_train_sample_gpu_async(self):
vocab = string.ascii_lowercase + " "
num_unroll = 13
vocab_size = len(vocab)
batch_size = 50
num_layers = 4
learning_rate = 0.05
lstm = LSTM(num_unroll, vocab_size, batch_size, num_layers, learning_rate)
text = "is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's " \
"standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled " \
"it to make a type specimen book. It has survived not only five centuries, but also the leap into " \
"electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the " \
"release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing " \
"software like Aldus PageMaker including versions of Lorem Ipsum."
tokens = tokenize_char(text)
normal = normalize(tokens)
print("Starting async test")
lstm.train_gpu_async(list(vocab), normal, 4)
lstm.run(list(vocab), "This is a test of the gpu version")
def test_train(self):
num_unroll = 10
vocab_size = 1000
batch_size = 5
num_layers = 3
learning_rate = 0.05
lstm = LSTM(num_unroll, vocab_size, batch_size, num_layers, learning_rate)
parameters = lstm.allocate_parameters()
caches_cache = []
X = np.zeros((vocab_size, batch_size, num_unroll + 1))
for i in range(num_unroll + 1):
for j in range(batch_size):
X[random.randrange(0, vocab_size), j, i] = 1
a0 = np.zeros((vocab_size, batch_size))
start = time()
a, y, c, caches = lstm_forward(X[:, :, :num_unroll], a0, parameters[0])
caches_cache.append(caches)
for layer in range(1, num_layers):
a, y, c, caches = lstm_forward(y, a0, parameters[layer])
caches_cache.append(caches)
loss = X[:, :, 1:] - y
gradients = lstm_backward(loss, caches_cache[len(caches_cache) - 1])
update_weights(parameters[num_layers - 1], gradients, learning_rate)
for layer in reversed(range(num_layers - 1)):
gradients = lstm_backward(gradients['dx'], caches_cache[layer])
update_weights(parameters[layer], gradients, learning_rate)
end = time()
print(end - start)
def test_train_gpu(self):
misc.init()
num_unroll = 5
vocab_size = 100
batch_size = 5
num_layers = 3
learning_rate = 0.05
lstm = LSTM(num_unroll, vocab_size, batch_size, num_layers, learning_rate)
parameters = lstm.allocate_parameters()
caches_cache = []
gpu_parameters = []
for layer_params in parameters:
gpu_parameters.append(layer_to_gpu(layer_params))
X = np.zeros((vocab_size, batch_size, num_unroll + 1))
for i in range(num_unroll + 1):
for j in range(batch_size):
X[random.randrange(0, vocab_size), j, i] = 1
a0 = np.zeros((vocab_size, batch_size))
start = time()
a, y, c, caches = lstm_forward_gpu(X[:, :, :num_unroll], a0, gpu_parameters[0])
caches_cache.append(caches)
for layer in range(1, num_layers):
a, y, c, caches = lstm_forward_gpu(y, a0, gpu_parameters[layer])
caches_cache.append(caches)
loss = X[:, :, 1:] - y.get()
gradients = lstm_backward_gpu(loss, caches_cache[len(caches_cache) - 1])
update_weights(gpu_parameters[num_layers - 1], gradients, learning_rate)
for layer in reversed(range(num_layers - 1)):
gradients = lstm_backward_gpu(gradients['dx'], caches_cache[layer])
update_weights(gpu_parameters[layer], gradients, learning_rate)
end = time()
print(end - start)
class LstmLayerTestCase(unittest.TestCase):
def test_layer_to_gpu_async(self):
D = np.random.rand(20000,10000)
stream = pycuda.driver.Stream()
print(time())
d = pycuda.gpuarray.to_gpu_async(D, stream=stream)
print(time())
stream.synchronize()
print("")
del d
print(time())
d = pycuda.gpuarray.to_gpu(D)
print(time())
@mark_cuda_test
def test_add(self):
np.random.seed(1)
Wy = np.random.randn(2, 10)
by = np.random.randn(2, 10)
Wy_gpu = pycuda.gpuarray.to_gpu(Wy)
by_gpu = pycuda.gpuarray.to_gpu(by)
print(Wy + by)
print((Wy_gpu + by_gpu).get())
@mark_cuda_test
def test_forward_prop(self):
np.random.seed(1)
x = np.random.randn(3, 10)
a0 = np.random.randn(5, 10)
Wf = np.random.randn(5, 5 + 3)
bf = np.random.randn(5, 1)
Wi = np.random.randn(5, 5 + 3)
bi = np.random.randn(5, 1)
Wo = np.random.randn(5, 5 + 3)
bo = np.random.randn(5, 1)
Wc = np.random.randn(5, 5 + 3)
bc = np.random.randn(5, 1)
Wy = np.random.randn(2, 5)
by = np.random.randn(2, 1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc,
"by": by}
Wf_gpu = pycuda.gpuarray.to_gpu(Wf)
Wi_gpu = pycuda.gpuarray.to_gpu(Wi)
Wo_gpu = pycuda.gpuarray.to_gpu(Wo)
Wc_gpu = pycuda.gpuarray.to_gpu(Wc)
Wy_gpu = pycuda.gpuarray.to_gpu(Wy)
bf_gpu = pycuda.gpuarray.to_gpu(bf)
bi_gpu = pycuda.gpuarray.to_gpu(bi)
bo_gpu = pycuda.gpuarray.to_gpu(bo)
bc_gpu = pycuda.gpuarray.to_gpu(bc)
by_gpu = pycuda.gpuarray.to_gpu(by)
c_gpu = pycuda.gpuarray.to_gpu(a0)
parameters_gpu = {"Wf": Wf_gpu, "Wi": Wi_gpu, "Wo": Wo_gpu, "Wc": Wc_gpu, "Wy": Wy_gpu, "bf": bf_gpu, "bi": bi_gpu, "bo": bo_gpu, "bc": bc_gpu,
"by": by_gpu}
a, y, c, caches = lstm_cell_forward(x, a0, a0, parameters)
print("CPU DONE")
a_gpu, y_gpu, c_gpu, caches = lstm_cell_forward_gpu(x, a0, c_gpu, parameters_gpu)
print("GPU DONE")
print(a)
print(a_gpu.get())
@mark_cuda_test
def test_back_prop(self):
np.random.seed(1)
x = np.random.randn(3, 10)
a0 = np.random.randn(5, 10)
Wf = np.random.randn(5, 5 + 3)
bf = np.random.randn(5, 1)
Wi = np.random.randn(5, 5 + 3)
bi = np.random.randn(5, 1)
Wo = np.random.randn(5, 5 + 3)
bo = np.random.randn(5, 1)
Wc = np.random.randn(5, 5 + 3)
bc = np.random.randn(5, 1)
Wy = np.random.randn(2, 5)
by = np.random.randn(2, 1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc,
"by": by}
Wf_gpu = pycuda.gpuarray.to_gpu(Wf)
Wi_gpu = pycuda.gpuarray.to_gpu(Wi)
Wo_gpu = pycuda.gpuarray.to_gpu(Wo)
Wc_gpu = pycuda.gpuarray.to_gpu(Wc)
Wy_gpu = pycuda.gpuarray.to_gpu(Wy)
bf_gpu = pycuda.gpuarray.to_gpu(bf)
bi_gpu = pycuda.gpuarray.to_gpu(bi)
bo_gpu = pycuda.gpuarray.to_gpu(bo)
bc_gpu = pycuda.gpuarray.to_gpu(bc)
by_gpu = pycuda.gpuarray.to_gpu(by)
c_gpu = pycuda.gpuarray.to_gpu(a0)
parameters_gpu = {"Wf": Wf_gpu, "Wi": Wi_gpu, "Wo": Wo_gpu, "Wc": Wc_gpu, "Wy": Wy_gpu, "bf": bf_gpu,
"bi": bi_gpu, "bo": bo_gpu, "bc": bc_gpu,
"by": by_gpu}
a, y, c, caches = lstm_cell_forward(x, a0, a0, parameters)
da_next = np.random.randn(5, 10)
dc_next = np.random.randn(5, 10)
gradients = lstm_cell_backward(da_next, dc_next, caches)
print("CPU DONE")
a_gpu, y_gpu, c_gpu, caches_gpu = lstm_cell_forward_gpu(x, a0, c_gpu, parameters_gpu)
da_next_gpu = pycuda.gpuarray.to_gpu(da_next)
dc_next_gpu = pycuda.gpuarray.to_gpu(dc_next)
gradients_gpu = lstm_cell_backward_gpu(da_next_gpu, dc_next_gpu, caches_gpu)
print("GPU DONE")
print(gradients['dbo'])
print(gradients_gpu['dbo'].get())
class CellTestCase(unittest.TestCase):
"""def test_forward_prop(self):
cell = Cell(4, 4)
x_t = np.array([[0], [0], [0], [1]])
y_t = [[1], [1], [1], [1]]
self.assertListEqual(cell.forward_prop(x_t).tolist(), y_t)
"""
@mark_cuda_test
def test_forward_prop_gpu(self):
vocab = 10
batches = 2
cell = Cell(vocab, batches, (0,0))
gpu_cell = Cell(vocab, batches, (0,0))
gpu_cell.cell_to_gpu()
x_t = np.zeros((vocab, batches))
h_prev = np.zeros((vocab, batches))
c_prev = np.zeros((vocab, batches))
x_t[5][0] = 1
x_t[7][1] = 1
x_t_gpu = pycuda.gpuarray.to_gpu(x_t)
h_prev_gpu = pycuda.gpuarray.to_gpu(h_prev)
c_prev_gpu = pycuda.gpuarray.to_gpu(c_prev)
h, c = cell.forward_prop(c_prev, h_prev, x_t)
print("CPU done")
h_gpu, c_gpu = gpu_cell.forward_prop_gpu(c_prev_gpu, h_prev_gpu, x_t_gpu)
gpu_cell.cell_from_gpu()
print("GPU Done")
h_gpu_mem = h_gpu.get()
for j in range(batches):
for i in range(vocab):
self.assertLessEqual(abs(h[i][j] - h_gpu_mem[i][j]), 0.25) | UTF-8 | Python | false | false | 12,666 | py | 15 | model_tests.py | 14 | 0.576109 | 0.557082 | 0 | 339 | 36.365782 | 151 |
avoinea/allen.image.scale | 661,424,987,118 | cd52a95155b12db1fb7119b909de4bd76bda3223 | c136fdf7083fae2198f660c6a92c1232e982fd5a | /allen/image/scale/__init__.py | af4f4b242ba7fbe00b200f3143aa328351214b6b | []
| no_license | https://github.com/avoinea/allen.image.scale | 1cfd9b7c5a42010903b4a9a21273c7e6b785c47a | 041e0e31cf128ea6ff65f9bee2f1a49df3933f43 | refs/heads/master | 2020-04-23T03:08:59.408327 | 2010-07-10T05:19:16 | 2010-07-10T05:19:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from PIL import Image as PilImage
from StringIO import StringIO
from zope.interface import implements
from interfaces import IThumbnail
from z3c.blobfile.image import Image
logger = logging.getLogger('allen.image.scale')
class Thumbnail(object):
""" Adapter for zope.app.file.image.Image object to generate thumbnails
"""
implements(IThumbnail)
def __init__(self, context):
self.context = context
self.quality = 100
@property
def templates(self):
return {
'album': (100, 100),
'thumbnail': (128, 128),
'normal': (192, 192),
'large': (480, 480),
'icon': (32, 32),
'link': (16, 16),
}
def get_crop_box(self, width, height):
if width == height:
return 0, 0, width, height
elif width > height:
return width/2 - height/2, 0, width/2 + height/2, height
return 0, 0, width, width
def get_crop_aspect_ratio(self, size):
img_width, img_height = self.context.getImageSize()
if img_width == img_height:
return size, size
width = height = size
sw = float(width) / img_width
sh = float(height) / img_height
if img_width > img_height:
width = int(sh * img_width + 0.5)
else:
height = int(sw * img_height + 0.5)
return width, height
def get_aspect_ratio(self, width, height):
#return proportional dimensions within desired size
img_width, img_height = self.context.getImageSize()
sw = float(width) / img_width
sh = float(height) / img_height
if sw <= sh: height = int(sw * img_height + 0.5)
else: width = int(sh * img_width + 0.5)
return width, height
def _resize(self, display, crop=False):
if display not in self.templates.keys():
display = 'thumbnail'
width, height = self.templates.get(display)
# Calculate image width, size
if crop:
width, height = self.get_crop_aspect_ratio(width)
else:
width, height = self.get_aspect_ratio(width, height)
# Resize image
newimg = StringIO()
img = PilImage.open(StringIO(self.context.data))
fmt = img.format
try:
img = img.resize((width, height), Image.ANTIALIAS)
except AttributeError:
img = img.resize((width, height))
# Crop if needed
if crop:
box = self.get_crop_box(width, height)
img = img.crop(box)
img.save(newimg, fmt, quality=self.quality)
newimg.seek(0)
return newimg.read()
def resize(self, size):
crop = False
if size == 'album':
crop = True
try:
img = self._resize(size, crop)
except IOError, err:
logger.exception(err)
img = ''
return img
| UTF-8 | Python | false | false | 2,959 | py | 4 | __init__.py | 3 | 0.559311 | 0.541061 | 0 | 97 | 29.505155 | 75 |
InsightSoftwareConsortium/ITK | 7,576,322,321,875 | b4c87ebe59ff4374313d555718f7ad66137d3208 | bed3ac926beac0f4e0293303d7b2a6031ee476c9 | /Modules/ThirdParty/pygccxml/src/pygccxml/declarations/dependencies.py | fedf793be00c34e3465e2b9348aa7935aa8e2aab | [
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"SMLNJ",
"BSD-3-Clause",
"BSD-4.3TAHOE",
"LicenseRef-scancode-free-unknown",
"Spencer-86",
"LicenseRef-scancode-llnl",
"FSFUL",
"Libpng",
"libtiff",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-hdf5",
"MIT",
"NTP",
"LicenseRef-scancode-mit-old-style",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MPL-2.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
]
| permissive | https://github.com/InsightSoftwareConsortium/ITK | ed9dbbc5b8b3f7511f007c0fc0eebb3ad37b88eb | 3eb8fd7cdfbc5ac2d0c2e5e776848a4cbab3d7e1 | refs/heads/master | 2023-08-31T17:21:47.754304 | 2023-08-31T00:58:51 | 2023-08-31T14:12:21 | 800,928 | 1,229 | 656 | Apache-2.0 | false | 2023-09-14T17:54:00 | 2010-07-27T15:48:04 | 2023-09-11T02:58:41 | 2023-09-14T17:53:59 | 200,420 | 1,245 | 624 | 286 | C++ | false | false | # Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
from . import declaration
from . import typedef
from . import cpptypes
from . import type_traits
from . import class_declaration
from . import variable
from . import calldef
from . import namespace
def get_dependencies_from_decl(decl, recursive=True):
"""
Returns the list of all types and declarations the declaration depends on.
"""
result = []
if isinstance(decl, typedef.typedef_t) or \
isinstance(decl, variable.variable_t):
return [dependency_info_t(decl, decl.decl_type)]
if isinstance(decl, namespace.namespace_t):
if recursive:
for d in decl.declarations:
result.extend(get_dependencies_from_decl(d))
return result
if isinstance(decl, calldef.calldef_t):
if decl.return_type:
result.append(
dependency_info_t(decl, decl.return_type, hint="return type"))
for arg in decl.arguments:
result.append(dependency_info_t(decl, arg.decl_type))
for exc in decl.exceptions:
result.append(dependency_info_t(decl, exc, hint="exception"))
return result
if isinstance(decl, class_declaration.class_t):
for base in decl.bases:
result.append(
dependency_info_t(
decl,
base.related_class,
base.access_type,
"base class"))
if recursive:
for access_type in class_declaration.ACCESS_TYPES.ALL:
result.extend(
__find_out_member_dependencies(
decl.get_members(access_type), access_type))
return result
return result
def __find_out_member_dependencies(members, access_type):
answer = []
for mem in members:
answer.extend(get_dependencies_from_decl(mem, recursive=True))
member_ids = set([id(m) for m in members])
for dependency in answer:
if id(dependency.declaration) in member_ids:
dependency.access_type = access_type
return answer
class dependency_info_t(object):
def __init__(self, decl, depend_on_it, access_type=None, hint=None):
object.__init__(self)
assert isinstance(
depend_on_it, (class_declaration.class_t, cpptypes.type_t))
self._decl = decl
self._depend_on_it = depend_on_it
self._access_type = access_type
self._hint = hint
@property
def declaration(self):
return self._decl
@property
def depend_on_it(self):
return self._depend_on_it
@property
def access_type(self):
return self._access_type
@access_type.setter
def access_type(self, access_type):
self._access_type = access_type
def __str__(self):
return 'declaration "%s" depends( %s ) on "%s" ' \
% (self.declaration, self.access_type, self.depend_on_it)
@property
def hint(self):
"""The declaration, that report dependency can put some additional
inforamtion about dependency. It can be used later"""
return self._hint
def find_out_depend_on_it_declarations(self):
"""If declaration depends on other declaration and not on some type
this function will return reference to it. Otherwise None will be
returned
"""
return impl_details.dig_declarations(self.depend_on_it)
@staticmethod
def i_depend_on_them(decl):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for dependency_info in get_dependencies_from_decl(decl):
for ddecl in dependency_info.find_out_depend_on_it_declarations():
if ddecl:
to_be_included.add(ddecl)
if isinstance(decl.parent, class_declaration.class_t):
to_be_included.add(decl.parent)
return to_be_included
@staticmethod
def we_depend_on_them(decls):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for decl in decls:
to_be_included.update(dependency_info_t.i_depend_on_them(decl))
return to_be_included
class impl_details(object):
@staticmethod
def dig_declarations(depend_on_it):
if isinstance(depend_on_it, declaration.declaration_t):
return [depend_on_it]
base_type = type_traits.base_type(
type_traits.remove_alias(depend_on_it))
if isinstance(base_type, cpptypes.declarated_t):
return [base_type.declaration]
elif isinstance(base_type, cpptypes.calldef_type_t):
result = []
result.extend(impl_details.dig_declarations(base_type.return_type))
for argtype in base_type.arguments_types:
result.extend(impl_details.dig_declarations(argtype))
if isinstance(base_type, cpptypes.member_function_type_t):
result.extend(
impl_details.dig_declarations(
base_type.class_inst))
return result
return []
| UTF-8 | Python | true | false | 5,418 | py | 8,579 | dependencies.py | 5,576 | 0.615356 | 0.611665 | 0 | 158 | 33.291139 | 79 |
turekj/cassom | 9,010,841,409,273 | 38557a27aaeffffe3039326d3a8371c09ba5e4b9 | 56304e0d89b726725250795ddfc1cf6a50756386 | /core/model/model.py | e7b6cdfe07745ba244b4e6c33936cc9f331a7d09 | [
"MIT"
]
| permissive | https://github.com/turekj/cassom | 3907c6232fe2f4d78d50f35e6a3a20c31f5f6b1f | 6c70e9b6ef5c390a546ea25cd854d74e73863dbd | refs/heads/master | 2021-01-13T02:26:00.178605 | 2014-10-02T08:31:15 | 2014-10-02T08:31:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from core.keyspace.keyspace_manager import KeyspaceManager
from core.model.model_fields import ModelFieldsFactory
from core.model.model_manager import ModelManager
from core.model.model_transformator import ModelTransformator
from core.table.table_manager import TableManager
from core.model.model_metadata import ModelMetadataFactory, ModelMetadata
from utilities.string.string_utilities import StringUtilities
class ModelMeta(type):
def __init__(cls, name, bases, attrs):
super(ModelMeta, cls).__init__(name, bases, attrs)
if not hasattr(cls, 'registry'):
cls.registry = {}
else:
cls.registry[name] = cls
ModelTransformator().transform_model(cls)
class Model(object):
__metaclass__ = ModelMeta
engine = None
keyspace_manager = KeyspaceManager()
table_manager = TableManager()
def __init__(self):
pass
@staticmethod
def bind(engine):
if engine is None:
raise AttributeError('engine is not initialized')
Model.engine = engine
Model.create_keyspace()
Model.create_tables()
Model.update_managers()
@staticmethod
def create_keyspace():
if not Model.keyspace_manager.check_keyspace_exists(Model.engine, Model.engine.get_keyspace()):
Model.keyspace_manager.create_keyspace(Model.engine, Model.engine.get_keyspace())
@staticmethod
def create_tables():
for metadata_key in Model.metadata:
if not Model.table_manager.check_table_exists(Model.engine, metadata_key):
Model.table_manager.create_table(Model.engine, Model.metadata[metadata_key])
@staticmethod
def update_managers():
for _, manager in Model.managers.iteritems():
manager.engine = Model.engine
manager.table_manager = Model.table_manager
@staticmethod
def _table_name(cls):
return StringUtilities.convert_to_underscore(cls.__name__)
@staticmethod
def _manager(cls):
return Model.managers[Model._table_name(cls)]
def save(self):
manager = Model._manager(self.__class__)
manager.save(self)
@classmethod
def objects(cls):
return Model._manager(cls)
| UTF-8 | Python | false | false | 2,243 | py | 56 | model.py | 35 | 0.670085 | 0.670085 | 0 | 72 | 30.152778 | 103 |
kanhaichun/ICS4U | 14,456,859,946,067 | 6e5fc847fa9f80fb1634a70803b7df650d930d93 | aea37c205cd97404f2a564fcebaf25cd284c5586 | /Toxicbug/Jeffrey/assignment1Jeffrey.py | cf32c86c0012050aff23820fe9ff2c3abed2055b | []
| no_license | https://github.com/kanhaichun/ICS4U | b9457d001f9cdde84894f004409621023bea91ab | bf3e29364707f52fcd5f7b20c7ba6ca1d848af31 | refs/heads/master | 2020-07-31T02:15:38.211002 | 2018-04-26T17:18:25 | 2018-04-26T17:18:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Author: Jeffrey
Date: 10th January 2018
Title: division
Function: Loop through 1000 numbers, and find the numbers that are divisible by 3 or 19.
"""
#loop from 0 to 1000:
for number in range(0,1000):
if number%3 == 0: #find the number that is divisible by 3
print(number, "is divisible by 3.")
elif number%19 == 0: #find the number that is divisible by 19
print(number, "is multiple of 19.")
| UTF-8 | Python | false | false | 428 | py | 411 | assignment1Jeffrey.py | 378 | 0.670561 | 0.591121 | 0 | 13 | 31.461538 | 88 |
lipengddf/test | 7,267,084,710,344 | 329357af19a6b05ed7547298a19adf8eb0d782c3 | 39134e5bed6fb212697fe9d712c4ca1309e4e066 | /PycharmProjects/untitled/web.py | f09d1fce99e83654a2181e9fcc5bcd869409567e | []
| no_license | https://github.com/lipengddf/test | e13970d3bfeeef0d096ac1f92a485c796477e90f | 5b4899a95ab20c9497df5dfa67dee410e61f3399 | refs/heads/master | 2020-05-24T20:48:18.138833 | 2019-05-19T11:20:08 | 2019-05-19T11:20:08 | 187,462,326 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
#! the author lipeng
# from selenium import webdriver
# from time import sleep
# #定义打开的浏览器
# dr = webdriver.Firefox()
# sleep(2)
# #请求网页
# dr.get('https://qzone.qq.com/')
# sleep(2)
# dr.get('http://www.jd.com')
# sleep(2)
#回到上一次打开的网页
# dr.back()
# sleep(2)
#前进
# dr.forward()
#关闭浏览器
# dr.quit()
#获取网页标题,一般用作断言,判断请求到的标题是否符合预期结果
# print(dr.title)
#获取请求的网址
# print(dr.current_url)
#设置浏览器窗口大小
# dr.set_window_size(400,400)
#设置浏览器窗口的位置
# dr.set_window_position(400,400)
#最大化浏览器
# dr.maximize_window()
# sleep(3)
#最小化浏览器
# dr.minimize_window()
# sleep(2)
#1 、id 定位
# dr.find_element_by_id('kw').send_keys('python')
# dr.find_element_by_id('su').click()
#2、class 为了区分跟python中的class,class_name
#单个定位的时候保证class的值是唯一的
# dr.find_element_by_class_name('manv').click()
#3、name 通过name定位
# dr.find_element_by_name('wd').send_keys('python')
#4、link_text文本定位
# dr.find_element_by_link_text('视频').click()
#5、partial link text 模糊文本定位
# dr.find_element_by_link_text('hao').click()
#6、tag_name 定位 通过标签页的名称
# dr.find_element_by_tag_name('')
#7、xpath 定位 路径定位
#路径标记语言
# dr.find_element_by_xpath('//*[@id="kw"]').click()
#8、css 定位
# dr.find_element_by_css_selector('#kw').click()
#动作:1、send_keys() 输入 2、click() 点击 3、clear() 清除 4、text 文本
# from selenium import webdriver
# from time import sleep
# import os
# # #定义打开的浏览器
# dr = webdriver.Firefox()
# # #请求网页
# dr.get('https://qzone.qq.com/')
# sleep(2)
# #自动登录QQ空间
# dr.switch_to.frame('login_frame')
# sleep(2)
# dr.find_element_by_id('switcher_plogin').click()
# sleep(2)
# dr.find_element_by_id('u').send_keys('319808789')
# sleep(2)
# dr.find_element_by_id('p').send_keys('ai319808789')
# sleep(2)
# dr.find_element_by_css_selector('#login_button').click()
# sleep(2)
# #定位到退出的按钮
# dr.find_element_by_id('tb_logout').click()
# sleep(2)
#切换到alter上去,自动点击确定
# we = dr.switch_to.alert()
# #获取alter上面的文本
# print(we.text)
# #点击确定
# we.accept()
#点击取消
# we.dismiss()
#点击退出的时候会弹出框 叫alert
#定位一组,定位多个数据
# ww = dr.find_element_by_id('su')
#层级定位:先定位一个顶层元素,在定位这个元素下面的元素
# dr.get('https://www.ctrip.com')
# sleep(2)
#层级定位,多用于复杂的定位场景
# ww = dr.find_element_by_id('searchHotelLevelSelect').click().find_elements_by_class_name('option')
# from selenium import webdriver
# from time import sleep
# #定义打开的浏览器
# dr = webdriver.Firefox()
# #请求网页
# dr.get('file:///C:/Users/admin/Desktop/abc.html')
# sleep(2)
# dr.find_element_by_xpath('/html/body/input').click()
# sleep(2)
#将控制器切换至弹出框
# ww = dr.switch_to.alert()
#获取弹出框上的文本
# print(ww.text)
#点击确定
# ww.accept()
#点击取消
# ww.dismiss()
#输入数据
# ww.send_keys('你好吗?')
# from selenium import webdriver
# from time import sleep
# import os
# # #定义打开的浏览器
# dr = webdriver.Firefox()
# # #请求网页
# dr.get('https://qzone.qq.com/')
# sleep(2)
# #自动登录QQ空间
# dr.switch_to.frame('login_frame')
# sleep(2)
# #切换到框架 id ,name
# #先定义到框架
# w = dr.find_element_by_xpath('//*[@id="login"]').click()
# dr.switch_to.frame(w)
# sleep(2)
# dr.switch_to.parent_frame()
# sleep(2)
# #退出框架,退出到最初的页面
# # dr.switch_to_default_content()
# dr.find_element_by_xpath('html/body/div[3]/div/div/div[1]/div[1]/a[2]/i').click()
# #iframe 网页框架
from selenium import webdriver
from time import sleep
# import os
# #定义打开的浏览器
dr = webdriver.Firefox()
# #请求网页
dr.get('https://www.douban.com/')# 1号窗口
sleep(2)
#获取第一个窗口的标识(句柄)
print(dr.current_window_handle)
# 2号窗口
dr.find_element_by_xpath('/html/body/div[1]/div[1]/ul/li[1]/a').click()
#获取所有窗口的标识
ww = dr.window_handles
sleep(2)
# print(ww)
dr.switch_to.window(ww[-1])
print(dr.title)
#切换窗口
#浏览器本身是无法决定什么时候打开哪一个窗口
#按照窗口打开的顺序给窗口标号(唯一标识这个窗口的字符串)
# dr.switch_to_window() | UTF-8 | Python | false | false | 4,597 | py | 13 | web.py | 11 | 0.670627 | 0.649903 | 0 | 169 | 20.420118 | 100 |
yriart/python-projects | 4,320,737,103,774 | 566eb07de4c13f1a69d19bfd1cd388a67106718c | 629cc6526150d7b3e320e88f236e0843de33ba70 | /convert_to_lower.py | 84cc0cc95481b794005e59aaf2bf1a8ac098145b | []
| no_license | https://github.com/yriart/python-projects | 84546ba6a4bf69fc68d9a42db9a6b0af160f2ba7 | 3c42c13243d356ce4eec1b92b10a5928bb6c363f | refs/heads/master | 2021-01-20T11:59:40.369297 | 2015-05-18T22:56:24 | 2015-05-18T22:56:24 | 9,307,884 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # take input of a filename on command line, convert the contents of the file to lowercase.
import sys
f = sys.argv[1]
myfile = open(f).read().split()
for item in myfile:
print item.lower()
| UTF-8 | Python | false | false | 191 | py | 12 | convert_to_lower.py | 8 | 0.722513 | 0.717277 | 0 | 7 | 26.285714 | 90 |
GeorgiyDemo/FA | 12,592,844,124,041 | 479e44f088d006ee92a84f55c850e3fd87496002 | 45ab1e397b5fc69ba84c8f5dfb66c09b79bca4c6 | /Course_I/Алгоритмы Python/Part2/семинары/pract2/example/your_package/file2.py | 5bfab372e2c3ea4bcddeb215debafd0feb9af6a2 | [
"WTFPL"
]
| permissive | https://github.com/GeorgiyDemo/FA | 926016727afa1ce0ee49e6ca9c9a3c60c755b35f | 9575c43fa01c261ea1ed573df9b5686b5a6f4211 | refs/heads/master | 2023-06-28T00:35:43.166167 | 2023-06-16T14:45:00 | 2023-06-16T14:45:00 | 203,040,913 | 46 | 65 | WTFPL | false | 2022-04-09T21:16:39 | 2019-08-18T18:19:32 | 2022-02-22T02:39:45 | 2022-04-09T21:16:38 | 1,281,041 | 23 | 11 | 4 | Jupyter Notebook | false | false | def add2():
print("MEOW add2")
| UTF-8 | Python | false | false | 35 | py | 1,270 | file2.py | 702 | 0.571429 | 0.514286 | 0 | 2 | 16.5 | 22 |
hwua/openerp | 11,192,684,788,978 | 2b513aadc1032bff550d44fc0142d4bbaa347ee9 | 7746c96fc763e56d55c882824a5d078f1a62a2ab | /Recruitment/models/recruitment_email.py | 2c9ad311d80a0ccca02edb81511b5cb0003226a5 | []
| no_license | https://github.com/hwua/openerp | a744c4d0ee54cac44448ac54f4315d7dbf5df704 | d2fdf24daf83030c0fbcee052e04eee6c86eda2b | refs/heads/master | 2021-01-23T15:41:27.614700 | 2017-09-21T04:11:47 | 2017-09-21T04:11:47 | 102,717,626 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-*- coding:utf-8
import logging
from openerp import models,fields,api,_
from openerp.exceptions import ValidationError,UserError
_logger = logging.getLogger(__name__)
#工资邮件发送模板
class RecruitmentinformationTemplate(models.Model):
_name = 'recruitment.information'
_inherit = ['recruitment.information','mail.thread']
_description = u'招聘需求模型'
#只要该网关接收到邮件,便会建立关联模型,执行关联代码,
#只有审批用户回复邮件,才能触发目标模型的关联信号
subject = fields.Char(string=u'招聘需求',readonly=True,compute='_compute_email_subject')
subject2 = fields.Char(string=u'招聘需求',readonly=True,compute='_compute_email_subject2')
body_html = fields.Html(string=u'消息内容',readonly=True,compute='_compute_email_body')
body_value = fields.Html(string=u'消息内容',readonly=True,compute='_compute_email_value')
center_state = fields.Char(default=" ",)
statelist = fields.Char(string=u'状态',readonly=True,compute='_compute_statelist')
@api.one
@api.depends('state')
def _compute_statelist(self):
if self.state == 'draft':
self.statelist = u'草稿'
elif self.state == 'wait':
self.statelist = u'处理中'
elif self.state == 'done':
self.statelist = u'己完成'
elif self.state == 'feedback':
self.statelist = u'反馈中'
elif self.state == 'off':
self.statelist = u'关闭'
elif self.state == 'refuse':
self.statelist = u'拒绝'
else:
pass
shenqingreasonlist = fields.Char(string=u'申请理由',readonly=True,compute='_compute_shenqingreasonlist')
@api.one
@api.depends('shenqingreasonlist')
def _compute_shenqingreasonlist(self):
if self.shenqingreason == 'extends':
self.shenqingreasonlist = u'扩大编制'
elif self.shenqingreason == 'vacancy':
self.shenqingreasonlist = u'岗位空缺'
elif self.shenqingreason == 'supplement':
self.shenqingreasonlist = u'离职补充'
elif self.shenqingreason == 'reserve':
self.shenqingreasonlist = u'储备人员'
elif self.shenqingreason == 'other':
self.shenqingreasonlist = u'其他'
else:
pass
######################################这里是邮箱后缀的名字。。。。。上
@api.one
def _compute_email_subject(self):
self.subject = u'招聘需求/%s' % (str(self.id))
@api.one
def _compute_email_subject2(self):
self.subject2 = u'招聘需求/%s' % (str(self.id))
@api.one
def send_email(self,**kw):
mail_mail_obj = self.env['mail.mail']
mail = mail_mail_obj.create(kw)
mail.send()
#发送电子邮件,,,这里的函数会用到action.py文件中。提交审批用到
#调用了这个方法
def send_recruitment_email(self,email_from,email_to,email_cc=None,tag=None):
_logger.info(u'发送邮件的主题:%s' % self.subject)
_logger.info(u'从%s发送电子邮件给%s' % (email_from,email_to))
vals = {'subject':self.subject,'body_html':self.body_html,'email_from':email_from,'email_to':email_to,'reply_to':self.shenqingresuser.email}
self.send_email(**vals)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#写HTML格式化布局
@api.one
def _compute_email_body(self):
self.body_html = u'''
<html lang="en">
<head>
<meta charset="utf-8"/>
</head>
<body>
<table border="1" cellspacing="0" cellpadding="5" width="700" style="font-size:16px;font-weight:bold;color:#000;font-family:'Microsoft YaHei';border-collapse:collapse;" >
<tr style="text-align:center;font-weight:bold;font-size:32px;" bgcolor="#84c1ff" >
<td colspan="8">招聘需求</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请人</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请日期</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请部门</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请岗位</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >所属中心HR</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请理由</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >需求人数</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >薪酬预算</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >主要工作职责</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >任职资格</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >要求到岗时间</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >招聘天数</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >到期时间</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >状态</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >扣分值</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >加分值</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >理由</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >备注</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
</table>
</body>
</html>
''' % (self.shenqingresuser.name,str(self.shenqingstartdate),self.shenqingdepartment_id.name,
self.shenqinggangwei,self.zhipaiuser.name,self.shenqingreasonlist,self.xuqiurenshu,
self.wagesyusuan,self.workduty,self.workqualifications,self.yaoqiudate,self.zhaorendays,
self.lasttime,self.statelist,self.koufenterm,self.jiafenterm,self.reasontext,self.beizhu)
#写HTML格式化布局
@api.one
def _compute_email_value(self):
self.body_html = u'''
<html lang="en">
<head>
<meta charset="utf-8"/>
</head>
<body>
<table border="1" cellspacing="0" cellpadding="5" width="700" style="font-size:16px;font-weight:bold;color:#000;font-family:'Microsoft YaHei';border-collapse:collapse;" >
<tr style="text-align:center;font-weight:bold;font-size:32px;" bgcolor="#84c1ff" >
<td colspan="8">招聘需求</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请人</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请日期</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请部门</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请岗位</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >所属中心HR</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >申请理由</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >需求人数</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >薪酬预算</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >主要工作职责</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >任职资格</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >要求到岗时间</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >招聘天数</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >到期时间</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >状态</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >扣分值</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >加分值</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20">%s</td>
</tr>
<tr style="text-align:center;">
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >理由</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >备注</td>
<td style="border-right:1px solid #000;border-bottom:1px solid #000;" width="200" height="20" >%s</td>
</tr>
</table>
</body>
</html>
''' % (self.shenqingresuser.name,str(self.shenqingstartdate),self.shenqingdepartment_id.name,
self.shenqinggangwei,self.zhipaiuser.name,self.shenqingreasonlist,self.xuqiurenshu,
self.wagesyusuan,self.workduty,self.workqualifications,self.yaoqiudate,self.zhaorendays,
self.lasttime,self.statelist,self.koufenterm,self.jiafenterm,self.reasontext,self.beizhu) | UTF-8 | Python | false | false | 14,669 | py | 109 | recruitment_email.py | 55 | 0.624776 | 0.554896 | 0 | 236 | 58.245763 | 170 |
5Elements/myappp | 9,234,179,727,592 | 4ec4398a688bf702e6f224335232eb6152899709 | fb6539851d623bff4b9f47ff110b7f5339d242ac | /myapp/migrations/0002_auto_20180405_2238.py | ce6de17abb259511c561af9be55a4622e83da8b8 | []
| no_license | https://github.com/5Elements/myappp | 038c076bcacfd8d0e9affbb3f39ffa22683fc8a9 | 875fa1e0a2a14a1b3bb32566d2abcb976514f74e | refs/heads/master | 2020-03-08T16:37:43.131198 | 2018-04-05T18:01:40 | 2018-04-05T18:01:40 | 128,244,664 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.2 on 2018-04-05 17:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='tw_cat',
field=models.FloatField(max_length=200),
),
]
| UTF-8 | Python | false | false | 374 | py | 3 | 0002_auto_20180405_2238.py | 2 | 0.574866 | 0.516043 | 0 | 18 | 19.777778 | 52 |
HuangZengPei/LeetCode | 18,305,150,615,804 | 8196b8ef15adab88ce8a73b33448b4068b234e7c | b729b6262f6cb096c988341829cd4ff2525a6133 | /Middle/Que114.py | 5225e50f4a68e1a02883b67ec0614f5d36dbbf05 | [
"MIT"
]
| permissive | https://github.com/HuangZengPei/LeetCode | f812014cf840a9ecd764e5850d7258aa881549b5 | d2b8a1dfe986d71d02d2568b55bad6e5b1c81492 | refs/heads/master | 2020-09-13T04:12:26.824514 | 2020-06-27T12:52:03 | 2020-06-27T12:52:03 | 222,651,348 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 将二叉树展开为链表,使用栈保存右子树
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: None Do not return anything, modify root in-place instead.
"""
if not root:return
stack = []
stack.append(root)
pre = None
while stack:
temp = stack.pop()
if pre:
pre.right = temp
pre.left = None
if temp.right:
stack.append(temp.right)
if temp.left:
stack.append(temp.left)
pre = temp | UTF-8 | Python | false | false | 795 | py | 97 | Que114.py | 92 | 0.491436 | 0.491436 | 0 | 28 | 26.142857 | 74 |
artyomLisovskij/blockchain-market | 6,725,918,799,695 | a181122ee042aceda4878d4fc5b4f88929c745f7 | 3d974bf4f769d20afc8249d1fe384716c016d3d1 | /main/migrations/0001_initial.py | adc39c60a66523d25e84354928e53dc8c3a14427 | []
| no_license | https://github.com/artyomLisovskij/blockchain-market | afdbf0082311be800112f5ae7e7b5eed23c569cf | e87d45c3d71719618142f38eb6ca5825e0f44ff1 | refs/heads/master | 2021-05-06T01:13:42.355475 | 2017-12-16T16:17:55 | 2017-12-16T16:17:55 | 114,412,097 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0 on 2017-12-16 09:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=42)),
],
),
migrations.CreateModel(
name='EthUsers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=42)),
],
),
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=42)),
('name', models.CharField(max_length=42)),
('price', models.IntegerField()),
('last_price_date', models.DateField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Categories')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.EthUsers')),
],
),
migrations.CreateModel(
name='UserBought',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('products', models.ManyToManyField(to='main.Products')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.EthUsers')),
],
),
]
| UTF-8 | Python | false | false | 1,892 | py | 14 | 0001_initial.py | 8 | 0.556554 | 0.544926 | 0 | 49 | 37.612245 | 115 |
ferjavrec/imp_firebird | 15,461,882,280,747 | 10c7fa0b3edd169a10c07a3d711732a76ab027d3 | d714e7791bd1aa9f7149c054b05c631be6325844 | /nuevos_script/listar_nuevos_usuarios.py | 2bb364abf69e4a2aa26889eaa120d877c2dc5e35 | []
| no_license | https://github.com/ferjavrec/imp_firebird | 42f8ad9394778bceb5bc641771922aa4bee5dc27 | bb6c4a7ddd9d3f3db13e50635449a57f65ce9ba1 | refs/heads/master | 2016-08-11T05:51:36.277214 | 2016-02-13T03:38:00 | 2016-02-13T03:38:00 | 47,922,306 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys);
sys.setdefaultencoding("utf8")
sys.path.append('/home/fer/tryton/trytond')
from decimal import *
import datetime
import fdb
import psycopg2
import os
hoy = datetime.date.today()
RUTA_PROYECTO = os.path.dirname(os.path.realpath(__file__))
con = fdb.connect(dsn=RUTA_PROYECTO+'/../database/puan.fdb', user='sysdba',
password='masterkey', charset='WIN1250')
cur = con.cursor()
cur1 = con.cursor()
conn = psycopg2.connect(host='localhost' ,dbname='puan', user='tryton',
password='tryton')
curpg = conn.cursor()
#retorna el id del party_party
def retorna_partyid(suministroanterior):
sql = ("select id from party_party where suministro_anterior = %s")
data = (suministroanterior,)
curpg.execute(sql, data)
return curpg.fetchone()
def crear_partys():
#en party.py anular el required en campo rangos y en campo aportes para poder importar.
sql = ("select usu.nro_cuenta, trim(usu.nombre) as nombre, "
"iif(trim(soc.nro_docum)='','0',coalesce(trim(soc.NRO_DOCUM),'0')) as nro_docum, "
"case soc.TIPO_DOCUM when '1' then 'CI' when '2' then 'DNI' "
"when '3' then 'LC' when '4' then 'LE' when '9' then 'DNI' "
"when '80' then 'CUIT' else '' end as tipodoc, "
"coalesce(trim(soc.NRO_CUIT),'') as nro_cuit, "
"case usu.CAT_IVA when 'CONS' then 'consumidor_final' when 'EXEN' "
"then 'exento' when 'INSC' then 'responsable_inscripto' when 'MONO' "
"then 'monotributo' when 'NOCA' then 'consumidor_final' else '' end as tipoiva, "
"trim(usu.calle) as calle, trim(usu.NRO_PTA) as num_pta, usu.COD_POSTAL, soc.NRO_SOCIO, "
"soc.NRO_CUIT, usu.ruta, usu.vereda, usu.numero, usu.subnro, usu.fecha_ingreso, usu.cod_estado "
"from USUARIOS usu "
"left join SOCIOS soc on usu.nro_socio=soc.nro_socio "
"where usu.fecha_ingreso > '01.10.2015' "
#"where (usu.COD_ESTADO = 'BAJ' or usu.COD_ESTADO is null) " #todos menos los prepagos
#"where (usu.COD_ESTADO = 'BAJ') " #prepagos solos
#"where usu.nombre starting with '{0}' "
#"and soc.nro_socio = '1881' "
"order by usu.nombre ")
cur.execute(sql)
for row in cur:
ruta = str(row[11]).strip()
vereda = str(row[12]).strip()
numero = str(row[13]).strip()
subnro = str(row[14]).strip()
suministro_anterior = ruta+'.'+vereda+'.'+numero+'.'+subnro
if retorna_partyid(suministro_anterior)==None:
partynombre = str(row[1]).strip()
print suministro_anterior
if __name__ == "__main__":
crear_partys()
| UTF-8 | Python | false | false | 2,557 | py | 58 | listar_nuevos_usuarios.py | 44 | 0.652718 | 0.637075 | 0 | 77 | 32.12987 | 101 |
siddharthvaria/GI-DL | 11,218,454,614,663 | e9aea849891f25c699869356243ba071ec8daf04 | 3aabf076ca0f31c69948ccb34151488bec6e1e3e | /terra/classifier.py | 16ead567f316c48b3749cc50ebe244812ab24d0c | [
"MIT"
]
| permissive | https://github.com/siddharthvaria/GI-DL | 2e4af5dada42cefaa8e19137963b0a4675025011 | 715b5fe4426d737ed1b23ffbb812058a90433682 | refs/heads/master | 2021-09-14T16:07:41.572158 | 2018-05-15T22:39:20 | 2018-05-15T22:39:20 | 94,111,739 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from keras.models import Model, load_model
from keras.layers import Input, LSTM, Dense, RepeatVector
# autoencoder/encoder + classifier for tweets
class Classifier:
def __init__(self, timesteps, input_dim=1, latent_dim=100,
elayers=3, dlayers=3, clayers=3):
ainputs = Input(shape=(timesteps, input_dim))
cinputs = Input(shape=(timesteps, input_dim))
# create encoding subnet
encoded = LSTM(latent_dim, return_sequences=True)
# for l in range(1, elayers):
# encoded = LSTM(latent_dim)(encoded)
# create decoding subnet
decoded = RepeatVector(timesteps)(LSTM(latent_dim)(encoded(ainputs)))
decoded = LSTM(input_dim, return_sequences=True)(decoded)
# for l in range(1, dlayers):
# decoded = LSTM(input_dim, return_sequences=True)(decoded)
# set up autoencoder model
self.autoencoder = Model(ainputs, decoded)
# create and set up classifier
classified = LSTM(128)(encoded(cinputs))
# for l in range(1, clayers):
# classified = LSTM(128)(classified)
classified = Dense(3, activation='softmax')
self.classifier = Model(cinputs, classified)
# compile models
self.autoencoder.compile(loss='binary_crossentropy',
optimizer='adam')
self.classifier.compile(loss='categorical_crossentropy',
optimizer='adam')
# def __init__(self, auto, classif):
# self.autoencoder = load_model(auto)
# self.classifier = load_model(classif)
def save(self, auto_out, classif_out):
self.autoencoder.save_weights(auto_out)
self.classifier.save_weights(classif_out)
def fit_auto(X):
self.autoencoder.fit(X, X)
def fit_classif(X, y):
self.classifier.fit(X, y)
def evaluate(X, y):
self.classifier.evaluate(X, y)
def predict(X):
return self.classifier.predict(X)
# a variational version
# TODO: make variational!
class VClassifier:
def __init__(self, batch_size,
input_dim=1, intermediate_dim=50, latent_dim=100,
elayers=3, dlayers=3, clayers=3):
ainputs = Input(shape=(timesteps, input_dim))
cinputs = Input(shape=(timesteps, input_dim))
# create encoding subnet
encoded = LSTM(latent_dim)
for l in range(1, elayers):
encoded = LSTM(latent_dim)(encoded)
# create decoding subnet
decoded = RepeatVector(timesteps)(encoded(ainputs))
decoded = LSTM(input_dim, return_sequences=True)(decoded)
for l in range(1, dlayers):
decoded = LSTM(input_dim, return_sequences=True)(decoded)
# set up autoencoder model
self.autoencoder = Model(ainputs, decoded)
# create and set up classifier
classified = LSTM(128)(encoded(cinputs))
for l in range(1, clayers):
classified = LSTM(128)(classified)
classified = Dense(3, activation='softmax')
self.classifier = Model(cinputs, classified)
# compile models
self.autoencoder.compile(loss='binary_crossentropy',
optimizer='adam')
self.classifier.compile(loss='categorical_crossentropy',
optimizer='adam')
def __init__(self, auto, classif):
self.autoencoder = load_model(auto)
self.classifier = load_model(classif)
def save(self, auto_out, classif_out):
self.autoencoder.save_weights(auto_out)
self.classifier.save_weights(classif_out)
def fit_auto(X):
self.autoencoder.fit(X, X)
def fit_classif(X, y):
self.classifier.fit(X, y)
def evaluate(X, y):
self.classifier.evaluate(X, y)
def predict(X):
return self.classifier.predict(X)
| UTF-8 | Python | false | false | 3,968 | py | 90 | classifier.py | 85 | 0.595514 | 0.586442 | 0 | 122 | 31.45082 | 77 |
delimitrou/DeathStarBench | 17,145,509,476,209 | 3bd3718b51792fd331777af130fc9e727a2e2c40 | e95e60cb698b7b2e7622b33643df944c0362c02c | /mediaMicroservices/gen-py/media_service/CastInfoService.py | 378fd2ab5778816c783d45a800793a73d3a56879 | [
"Apache-2.0",
"GPL-1.0-or-later"
]
| permissive | https://github.com/delimitrou/DeathStarBench | 5ae251e87052a83f0daadfe2f60231ee5e999dda | ef9823b441aec5a8133304f1cc322f882f767812 | refs/heads/master | 2023-09-01T02:23:38.342474 | 2023-08-28T17:37:44 | 2023-08-28T17:37:44 | 180,669,714 | 576 | 364 | Apache-2.0 | false | 2023-09-01T17:20:13 | 2019-04-10T21:57:24 | 2023-08-29T22:48:43 | 2023-09-01T17:20:12 | 129,490 | 568 | 328 | 65 | C | false | false | #
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def WriteCastInfo(self, req_id, cast_info_id, name, gender, intro, carrier):
"""
Parameters:
- req_id
- cast_info_id
- name
- gender
- intro
- carrier
"""
pass
def ReadCastInfo(self, req_id, cast_ids, carrier):
"""
Parameters:
- req_id
- cast_ids
- carrier
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def WriteCastInfo(self, req_id, cast_info_id, name, gender, intro, carrier):
"""
Parameters:
- req_id
- cast_info_id
- name
- gender
- intro
- carrier
"""
self.send_WriteCastInfo(req_id, cast_info_id, name, gender, intro, carrier)
self.recv_WriteCastInfo()
def send_WriteCastInfo(self, req_id, cast_info_id, name, gender, intro, carrier):
self._oprot.writeMessageBegin('WriteCastInfo', TMessageType.CALL, self._seqid)
args = WriteCastInfo_args()
args.req_id = req_id
args.cast_info_id = cast_info_id
args.name = name
args.gender = gender
args.intro = intro
args.carrier = carrier
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_WriteCastInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = WriteCastInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.se is not None:
raise result.se
return
def ReadCastInfo(self, req_id, cast_ids, carrier):
"""
Parameters:
- req_id
- cast_ids
- carrier
"""
self.send_ReadCastInfo(req_id, cast_ids, carrier)
return self.recv_ReadCastInfo()
def send_ReadCastInfo(self, req_id, cast_ids, carrier):
self._oprot.writeMessageBegin('ReadCastInfo', TMessageType.CALL, self._seqid)
args = ReadCastInfo_args()
args.req_id = req_id
args.cast_ids = cast_ids
args.carrier = carrier
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ReadCastInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ReadCastInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "ReadCastInfo failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["WriteCastInfo"] = Processor.process_WriteCastInfo
self._processMap["ReadCastInfo"] = Processor.process_ReadCastInfo
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_WriteCastInfo(self, seqid, iprot, oprot):
args = WriteCastInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = WriteCastInfo_result()
try:
self._handler.WriteCastInfo(args.req_id, args.cast_info_id, args.name, args.gender, args.intro, args.carrier)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServiceException as se:
msg_type = TMessageType.REPLY
result.se = se
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("WriteCastInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ReadCastInfo(self, seqid, iprot, oprot):
args = ReadCastInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = ReadCastInfo_result()
try:
result.success = self._handler.ReadCastInfo(args.req_id, args.cast_ids, args.carrier)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServiceException as se:
msg_type = TMessageType.REPLY
result.se = se
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ReadCastInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class WriteCastInfo_args(object):
"""
Attributes:
- req_id
- cast_info_id
- name
- gender
- intro
- carrier
"""
def __init__(self, req_id=None, cast_info_id=None, name=None, gender=None, intro=None, carrier=None,):
self.req_id = req_id
self.cast_info_id = cast_info_id
self.name = name
self.gender = gender
self.intro = intro
self.carrier = carrier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.req_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.cast_info_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.gender = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.intro = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.carrier = {}
(_ktype260, _vtype261, _size259) = iprot.readMapBegin()
for _i263 in range(_size259):
_key264 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val265 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.carrier[_key264] = _val265
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('WriteCastInfo_args')
if self.req_id is not None:
oprot.writeFieldBegin('req_id', TType.I64, 1)
oprot.writeI64(self.req_id)
oprot.writeFieldEnd()
if self.cast_info_id is not None:
oprot.writeFieldBegin('cast_info_id', TType.I64, 2)
oprot.writeI64(self.cast_info_id)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 3)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.gender is not None:
oprot.writeFieldBegin('gender', TType.BOOL, 4)
oprot.writeBool(self.gender)
oprot.writeFieldEnd()
if self.intro is not None:
oprot.writeFieldBegin('intro', TType.STRING, 5)
oprot.writeString(self.intro.encode('utf-8') if sys.version_info[0] == 2 else self.intro)
oprot.writeFieldEnd()
if self.carrier is not None:
oprot.writeFieldBegin('carrier', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.carrier))
for kiter266, viter267 in self.carrier.items():
oprot.writeString(kiter266.encode('utf-8') if sys.version_info[0] == 2 else kiter266)
oprot.writeString(viter267.encode('utf-8') if sys.version_info[0] == 2 else viter267)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(WriteCastInfo_args)
WriteCastInfo_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.I64, 'cast_info_id', None, None, ), # 2
(3, TType.STRING, 'name', 'UTF8', None, ), # 3
(4, TType.BOOL, 'gender', None, None, ), # 4
(5, TType.STRING, 'intro', 'UTF8', None, ), # 5
(6, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 6
)
class WriteCastInfo_result(object):
"""
Attributes:
- se
"""
def __init__(self, se=None,):
self.se = se
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.se = ServiceException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('WriteCastInfo_result')
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(WriteCastInfo_result)
WriteCastInfo_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
class ReadCastInfo_args(object):
"""
Attributes:
- req_id
- cast_ids
- carrier
"""
def __init__(self, req_id=None, cast_ids=None, carrier=None,):
self.req_id = req_id
self.cast_ids = cast_ids
self.carrier = carrier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.req_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.cast_ids = []
(_etype271, _size268) = iprot.readListBegin()
for _i272 in range(_size268):
_elem273 = iprot.readI64()
self.cast_ids.append(_elem273)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.carrier = {}
(_ktype275, _vtype276, _size274) = iprot.readMapBegin()
for _i278 in range(_size274):
_key279 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val280 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.carrier[_key279] = _val280
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ReadCastInfo_args')
if self.req_id is not None:
oprot.writeFieldBegin('req_id', TType.I64, 1)
oprot.writeI64(self.req_id)
oprot.writeFieldEnd()
if self.cast_ids is not None:
oprot.writeFieldBegin('cast_ids', TType.LIST, 2)
oprot.writeListBegin(TType.I64, len(self.cast_ids))
for iter281 in self.cast_ids:
oprot.writeI64(iter281)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.carrier is not None:
oprot.writeFieldBegin('carrier', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.carrier))
for kiter282, viter283 in self.carrier.items():
oprot.writeString(kiter282.encode('utf-8') if sys.version_info[0] == 2 else kiter282)
oprot.writeString(viter283.encode('utf-8') if sys.version_info[0] == 2 else viter283)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ReadCastInfo_args)
ReadCastInfo_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.LIST, 'cast_ids', (TType.I64, None, False), None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class ReadCastInfo_result(object):
"""
Attributes:
- success
- se
"""
def __init__(self, success=None, se=None,):
self.success = success
self.se = se
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype287, _size284) = iprot.readListBegin()
for _i288 in range(_size284):
_elem289 = CastInfo()
_elem289.read(iprot)
self.success.append(_elem289)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.se = ServiceException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ReadCastInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter290 in self.success:
iter290.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ReadCastInfo_result)
ReadCastInfo_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [CastInfo, None], False), None, ), # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| UTF-8 | Python | false | true | 21,033 | py | 800 | CastInfoService.py | 395 | 0.552227 | 0.539058 | 0 | 592 | 34.527027 | 134 |
lijyigithub/leetcode | 360,777,273,543 | 33955c806c9a25b1e04883941e3ce951ca7435b6 | dae0a181a09abe51d84267f797fed9e4da38e7d4 | /1_Two_Sum/two_sum.py | 06a6fb693a6e11dc00b0456c3710a2ea8e050cd6 | []
| no_license | https://github.com/lijyigithub/leetcode | f922ee28b187d272a03c8b0bff43a8443d0ed18f | 41db86171719fda9be621f79dfb201a52abdaf04 | refs/heads/master | 2016-06-05T13:00:16.005377 | 2016-03-24T18:20:15 | 2016-03-24T18:20:15 | 54,652,892 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def twoSum(self, nums, target):
temp = dict()
for i in range(0, len(nums)):
d = target - nums[i]
if d in temp:
return [temp[d], i]
temp[nums[i]] = i
| UTF-8 | Python | false | false | 194 | py | 4 | two_sum.py | 2 | 0.57732 | 0.572165 | 0 | 8 | 22.875 | 32 |
LoveRL/BAEKJOON-online-judge | 6,279,242,221,298 | 7f988d0aaad112d97c465e70806f0d8295e8729b | 0eb83bd6cd229758c38e8ff097db49af7cf4b762 | /Solutions_Part1/2609번_최대공약수와 최소공배수.py | 2396be8323c5312bbf2d1574e0461175c170d059 | []
| no_license | https://github.com/LoveRL/BAEKJOON-online-judge | d1d90d7fabd6056f35e1200277c03359f1d630b1 | 89477e4ccc5a0e6a2cb771f91cf2371e7390a7d3 | refs/heads/master | 2021-01-25T22:36:57.071990 | 2020-02-26T08:46:19 | 2020-02-26T08:46:19 | 243,208,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a,b=input().split()
a=int(a); b=int(b)
gcd=lambda a,b : a if not b else gcd(b, a%b)
gcd_1=gcd(a,b)
lcm=int((a*b)/gcd_1)
print(gcd_1)
print(lcm)
| UTF-8 | Python | false | false | 155 | py | 246 | 2609번_최대공약수와 최소공배수.py | 245 | 0.567742 | 0.548387 | 0 | 9 | 15.222222 | 44 |
jpraychev/portfolio | 1,039,382,093,159 | 7363049eb208ae2cfa6f0d1b1087e1ec723b7f4b | 72e8ed7d2c48bc8665c2e4142cee94672721a314 | /jraychev_website/accounts/views.py | 4815d4fb9235969dfc0d37f2458001255d50953f | []
| no_license | https://github.com/jpraychev/portfolio | 95992531c85d33eae38b0c979088d068a70a46b3 | 2509a3c37dd0d53339ad5eb943156488d5335172 | refs/heads/master | 2023-06-05T02:38:52.612773 | 2021-06-29T19:21:56 | 2021-06-29T19:21:56 | 340,250,099 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, TemplateView, RedirectView, UpdateView, FormView
from django.contrib.auth.views import LoginView, LogoutView
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponsePermanentRedirect
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.urls import reverse_lazy, reverse
from .models import CustomUser
from .forms import CustomUserCreationForm, CustomUserChangeForm
import re
from django.core.mail import send_mail
from django.contrib.auth import authenticate
from django.shortcuts import get_object_or_404
def username_valid(username):
invalid_characters = re.compile('(?![a-zA-Z0-9.@_]).')
valid_email = re.compile(r'(^[\w\-\.]+@[\w]+\.[\w]{1,5}$)')
if not invalid_characters.search(username):
if re.match(valid_email, username):
return True
return False
class SignUpView(LoginRequiredMixin, CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'accounts/signup.html'
def form_valid(self, form):
# messages.success(self.request, 'You have registered successfully. You can now log in.')
user = form.save()
user.is_active = False
user.save()
return super().form_valid(form)
# user.email holds the email
send_mail(
'Subject here',
'Here is the message.',
'jraychevdjango@gmail.com',
[user.email],
fail_silently=False,
)
return HttpResponseRedirect(reverse('login'))
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'accounts/profile.html'
login_url = reverse_lazy('login')
class ProfileUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = CustomUser
form_class = CustomUserChangeForm
template_name = 'accounts/profile_update.html'
success_url ="/"
def test_func(self):
# get requested user from URL using regex - OLD - can be removed
# regex = re.compile('(/accounts/)(\w+)(/update)')
# m = regex.match(self.request.path)
# requested_user = m.group(2)
requested_user = self.kwargs['slug']
if not self.request.user.username == requested_user:
return False
return True
class LoginView(LoginView):
template_name = 'accounts/login.html'
def post(self, request, *args, **kwargs):
form = self.form_class(request, request.POST)
username = request.POST['username']
password = request.POST['password']
# Username custom validation
if not username_valid(username):
print('Username validation failed. Redirecting...')
return render(request, self.template_name, {'form': form})
current_user = CustomUser.objects.filter(email=username).values_list('username', flat=True)
if current_user:
get_user = get_object_or_404(current_user)
user = authenticate(username=get_user, password=password)
else:
print('User does no exists')
return render(request, self.template_name, {'form': form})
if user is not None:
if user.is_active:
login(request, user)
if (self.request.GET.get('next') is None):
# messages.success(request, 'You have successfully logged in.')
return HttpResponseRedirect('/')
else:
# messages.success(request, 'You have successfully logged in.')
return HttpResponseRedirect(self.request.GET.get('next'))
else:
# messages.error(request, 'Something went wront. Contact the system administrator!')
return HttpResponseRedirect('/')
else:
# messages.error(request, 'Something went wront. Contact the system administrator!')
return HttpResponseRedirect('/')
return render(request, self.template_name, {'form': form})
class LogoutView(LoginRequiredMixin, RedirectView):
template_name = 'accounts/logout.html'
def get(self, request):
return render(request, 'accounts/logout.html')
def post(self, request):
logout(request)
# messages.success(request, 'You have successfully logged out. Log in <a href = ' + str(reverse_lazy('login')) + '> again</a>')
return HttpResponseRedirect('/')
| UTF-8 | Python | false | false | 4,628 | py | 57 | views.py | 29 | 0.649957 | 0.646932 | 0 | 126 | 35.714286 | 129 |
venamax/dataincubator | 3,040,836,885,572 | 241f175aae865b3fcabd46e95f2886556c68a7ff | 282a8f786b8bd1d437c70e7aae5bb6926d9acfd4 | /skeleton/data.py | eb8fc28554503bd1df9f7458403556e6f963f3fb | []
| no_license | https://github.com/venamax/dataincubator | 7e94791b92925826d089177dcda93b9bcfb28a37 | 88bc238abf5492a32e1bd85ac3bb4dc8b0ce486e | refs/heads/master | 2021-01-10T02:11:35.950275 | 2016-03-21T20:05:20 | 2016-03-21T20:05:20 | 54,677,844 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # flake8: noqa
"""
This file holds sample data for validation, it is visible to fellows.
Usually this is given for modelling problems involving parsing raw data.
"""
sample_jsons_for_validation = [
{"hip": "broken", "shoulder": "intact", "wrist": "missing"},
{"hip": "intact", "shoulder": "fragmented", "wrist": "intact"},
{"hip": "intact", "shoulder": "intact", "wrist": "intact"},
]
| UTF-8 | Python | false | false | 403 | py | 33 | data.py | 18 | 0.640199 | 0.637717 | 0 | 11 | 35.181818 | 72 |
DavidMachineLearning/Quadcopter-RL | 249,108,105,129 | 2ceceb5521cdb12fd844a16d1e5572084b65eeb4 | ef05e5f0a9c0722a63137bac3e5ff5856a377007 | /ActorCritic.py | 3f27a4d91c1d72f45e4a9bbab25021018dbe7017 | [
"MIT"
]
| permissive | https://github.com/DavidMachineLearning/Quadcopter-RL | a141b1ce0726dbb9e844351c428c53d4538452b5 | e78da0d1156d5631fa3946087318e0ab8d896620 | refs/heads/master | 2020-04-13T13:17:18.152381 | 2019-01-16T20:58:32 | 2019-01-16T20:58:32 | 163,225,559 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy as np
class Actor:
"""Actor Model."""
def __init__(self, state_size, action_size, action_low, action_high):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
action_low (array): Min value of each action dimension
action_high (array): Max value of each action dimension
"""
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
self.model = None
self.train_fn = None
self._build_model()
def _build_model(self):
"""Build an actor (policy) network that maps states -> actions."""
# Define input layer (as a placeholder tensor)
states = tf.keras.Input(shape=(self.state_size,), name='states')
# Add hidden layers
net = tf.keras.layers.Dense(units=32,kernel_regularizer=tf.keras.regularizers.L1L2(l2=1e-6))(states)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.Dense(units=64,kernel_regularizer=tf.keras.regularizers.L1L2(l2=1e-6))(net)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.Dense(units=128,kernel_regularizer=tf.keras.regularizers.L1L2(l2=1e-6))(net)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.Dense(units=128, activation="relu")(net)
# Add final output layer with sigmoid activation
raw_actions = tf.keras.layers.Dense(units=self.action_size, activation='sigmoid',
name='raw_actions')(net)
# Scale [0, 1] output for each action dimension to proper range
actions = tf.keras.layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
name='actions')(raw_actions)
# Create Keras model
self.model = tf.keras.Model(inputs=states, outputs=actions)
# Define loss function using action value (Q value) gradients
action_gradients = tf.keras.layers.Input(shape=(self.action_size,))
loss = tf.keras.backend.mean(-action_gradients * actions)
# Define optimizer and training function
optimizer = tf.keras.optimizers.Adam(lr=6.5e-4)
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = tf.keras.backend.function(inputs=[self.model.input, action_gradients,
tf.keras.backend.learning_phase()],
outputs=[], updates=updates_op)
class Critic:
"""Critic Model."""
def __init__(self, state_size, action_size):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
"""
self.state_size = state_size
self.action_size = action_size
self.model = None
self.get_action_gradients = None
self._build_model()
def _build_model(self):
"""Build a critic network that maps (state, action) pairs -> Q-values."""
# Define input layers (as placeholder tensors)
states = tf.keras.layers.Input(shape=(self.state_size,), name='states')
actions = tf.keras.layers.Input(shape=(self.action_size,), name='actions')
# Add hidden layer(s) for state pathway
net_states = tf.keras.layers.Dense(units=32,kernel_regularizer=tf.keras.regularizers.L1L2(l2=1e-6))(states)
net_states = tf.keras.layers.BatchNormalization()(net_states)
net_states = tf.keras.layers.Activation('relu')(net_states)
net_states = tf.keras.layers.Dense(units=64, activation='relu')(net_states)
# Add hidden layer(s) for action pathway
net_actions = tf.keras.layers.Dense(units=32,kernel_regularizer=tf.keras.regularizers.L1L2(l2=1e-6))(actions)
net_actions = tf.keras.layers.BatchNormalization()(net_actions)
net_actions = tf.keras.layers.Activation('relu')(net_actions)
net_actions = tf.keras.layers.Dense(units=64, activation='relu')(net_actions)
# Combine state and action pathways
net = tf.keras.layers.Add()([net_states, net_actions])
net = tf.keras.layers.Activation('relu')(net)
# Add final output layer to prduce action values (Q values)
q_values = tf.keras.layers.Dense(units=1, name='q_values')(net)
# Create Keras model
self.model = tf.keras.Model(inputs=[states, actions], outputs=q_values)
# Define optimizer and compile model for training with built-in loss function
optimizer = tf.keras.optimizers.Adam(lr=1e-4)
self.model.compile(optimizer=optimizer, loss='mse')
# Compute action gradients (derivative of Q values w.r.t. to actions)
action_gradients = tf.keras.backend.gradients(q_values, actions)
# Define an additional function to fetch action gradients (to be used by actor model)
self.get_action_gradients = tf.keras.backend.function(inputs=[*self.model.input,
tf.keras.backend.learning_phase()],
outputs=action_gradients)
| UTF-8 | Python | false | false | 5,724 | py | 5 | ActorCritic.py | 2 | 0.616702 | 0.607792 | 0 | 118 | 47.508475 | 117 |
SwampStats/visProj | 12,721,693,170,786 | d25945c26fa759466edfae838b96f7e7e3bcc0f7 | ce2ec082608e5f0f95a615b4fd35f041e1c81c56 | /rw_visual.py | b9e351be916f19aaa88fe6b6fd2333602629f431 | []
| no_license | https://github.com/SwampStats/visProj | 226c60cebf7536d8c11a1462d44f359f2fa07fcf | 1d02341a97d0b0c6e6d893376af268b6081cff96 | refs/heads/master | 2021-05-03T07:48:59.632530 | 2018-02-18T02:58:46 | 2018-02-18T02:58:46 | 120,555,972 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
from random_walk import RandomWalk
# Make a randomw walk, and plot the points.
rw = RandomWalk()
rw.fill_walk()
plt.figure(figsize=(20, 12))
#plt.scatter(rw.x_values, rw.y_values, s=1)
#now try with plot
plt.plot(rw.x_values, rw.y_values, linewidth=1)
plt.show()
| UTF-8 | Python | false | false | 299 | py | 3 | rw_visual.py | 2 | 0.725753 | 0.705686 | 0 | 13 | 21.923077 | 47 |
aichi-t/leetcode | 9,517,647,537,206 | 6003fc6342e2249e4d8a1928443aac6e7d77f22e | 8244306e8c31dcd598ade6bca2e7d6568045fbc6 | /332.Coin_Change.py | a54288910db0166d1691396bb97555224781b16d | []
| no_license | https://github.com/aichi-t/leetcode | 57463acbc32975a0ec6d637170bdb615f175848e | f8d0cb0c33e365ac21c07eb25a41b49a7f7e6cce | refs/heads/master | 2020-08-01T20:04:02.439248 | 2020-06-27T00:13:56 | 2020-06-27T00:13:56 | 211,100,475 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def coinChange(coins, amount):
memo = [0] + [float('inf')]*amount
for c in coins:
for i in range(1, amount+1):
if i - c >= 0:
memo[i] = min(memo[i], memo[i-c]+1)
return memo[-1] if memo[-1] != float('inf') else -1
coins = [1, 2, 5]
amount = 11
coinChange(coins, amount)
| UTF-8 | Python | false | false | 336 | py | 53 | 332.Coin_Change.py | 52 | 0.494048 | 0.455357 | 0 | 14 | 22 | 55 |
potomatoo/TIL | 19,267,223,319,447 | 0abfadd8e93402984638d14df6527060cf40d389 | 37c3b81ad127c9e3cc26fa9168fda82460ca9bda | /SW_expert/sw_5102_노드의거리.py | 5e83df39e9645929070a5b54d6c7d574ae3925f4 | []
| no_license | https://github.com/potomatoo/TIL | 5d85b69fdaed68966db7cfe2a565b7c64ed3e816 | 395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c | refs/heads/master | 2021-07-08T16:19:40.410097 | 2021-04-19T02:33:40 | 2021-04-19T02:33:40 | 238,872,774 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.stdin = open('./input/input_5102.txt', 'r')
from _collections import deque
TC = int(input())
for tc in range(TC):
V, E = map(int, input().split())
G = [[] for _ in range(V+1)]
visit = [0] * (V+1)
for i in range(E):
v, u = map(int, input().split())
G[v].append(u)
G[u].append(v)
start, end = map(int, input().split())
Q = deque()
Q.append((start, 0))
visit[start] = 1
flag = True
while Q:
q, d = Q.popleft()
if q == end:
print('#{} {}'.format(tc+1, d))
flag = False
break
for w in G[q]:
if visit[w] == 1:
continue
visit[w] = 1
Q.append((w, d+1))
if flag:
print('#{} {}'.format(tc+1, 0))
| UTF-8 | Python | false | false | 796 | py | 393 | sw_5102_노드의거리.py | 380 | 0.442211 | 0.423367 | 0 | 32 | 23.71875 | 47 |
Nani446/blog-gjango-ar | 9,826,885,206,356 | 79d08e4aedd15dc0b1a815c35e8ee195beb318a9 | 57f7579d8bbbfaf6fc1bcd0daed2b732103f7ac7 | /myapp/admin.py | cad6f6a6b44da0a8b0f6f301018f54b2b84373af | []
| no_license | https://github.com/Nani446/blog-gjango-ar | 06444a34fb740e005045c844809bb1384943721c | 2020c15a3f060d8e3660118cda61c1c29670ffcb | refs/heads/main | 2023-07-19T03:31:47.788126 | 2021-09-12T20:35:18 | 2021-09-12T20:35:18 | 405,741,606 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from myapp import models
# Register your models here.
admin.site.register(models.Student)
admin.site.register(models.Teacher)
admin.site.register(models.Fach)
admin.site.register(models.Library)
admin.site.register(models.Father)
admin.site.register(models.Author)
admin.site.register(models.Book) | UTF-8 | Python | false | false | 331 | py | 14 | admin.py | 13 | 0.824773 | 0.824773 | 0 | 11 | 29.181818 | 35 |
jmathes/assbible | 10,574,209,522,792 | 933cb3f3bc3fbe5031dc5f6bd6e7d165d7cb7365 | 3a4c9c60fed3fb12066f347de3e7b8270cd6eb8a | /grabass.py | 5159f8314953596d9ab6fff69cb4d09867474780 | []
| no_license | https://github.com/jmathes/assbible | bc09c39acc5e3299873cba921c56cba4035da790 | fee74fb2c5db1df521f3421c5e770dc0ce4e0623 | refs/heads/master | 2020-07-25T21:53:54.606606 | 2012-10-29T05:47:06 | 2012-10-29T05:47:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import re
from pprint import pprint
bibledir = os.path.join(os.path.dirname(__file__), "bible")
asslines = {}
asscounts = {}
ass_re = re.compile(r"\b[Aa][Ss][Ss]\b|\b[Aa][Ss][Ss][Ee][Ss]\b")
assert ass_re.search("ass")
assert ass_re.search("Ass")
assert ass_re.search("aSS")
assert ass_re.search("aSS")
assert ass_re.search(" ass ")
assert ass_re.search(" asses ")
assert ass_re.search("dumb ass")
assert ass_re.search("Rejoice greatly, O daughter of Zion; shout, O daughter of Jerusalem: behold, thy King cometh unto thee: he is just, and having salvation; lowly, and riding upon an ass, and upon a colt the foal of an ass.")
assert not ass_re.search("dick")
assert not ass_re.search("pass")
assert not ass_re.search("assyria")
for bookfile in os.listdir(bibledir):
with open(os.path.join(bibledir, bookfile), 'rb') as book:
bookname = book.readline()
asscounts[bookname] = 0
line = "asdf"
while line != "":
line = book.readline()
if not ass_re.search(line):
continue
asscounts[bookname] += 1
niceline = line.replace("\n", "")
niceline = niceline.replace("\r", "")
asslines[bookname[:-2] + " " + niceline[:niceline.find(" ") - 1]] = niceline[niceline.find(" ") + 1:]
pprint(asslines)
# pprint(asscounts)
| UTF-8 | Python | false | false | 1,345 | py | 4 | grabass.py | 2 | 0.628253 | 0.624535 | 0 | 39 | 33.487179 | 228 |
Matheus-SS/Python | 8,478,265,460,740 | 1aa90b951a837fd5bfbcdb718fd8efd06096c73f | a1f7ad37bd91d2a53fdbce0c8da7f9862a5ad458 | /agenda.py | 85ccdf4be626d71b277d5e88d58614c1a6637478 | []
| no_license | https://github.com/Matheus-SS/Python | 47cc7a6292038a98e069cc6584574c5b139a11ba | e604b4433aef84ba7779ad4b47b423fa9ab4ac2f | refs/heads/master | 2020-03-26T17:02:47.735979 | 2018-08-17T15:52:49 | 2018-08-17T15:52:49 | 145,138,788 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
lista = []
print("1 - Adicionar contato")
print("2 - Remover contato")
print("3 - Atualizar contato")
print("4 - Listar contato")
print("0 - Sair")
print()
while True:
n_digitado = int(input("O que deseja fazer:"))
print()
if(n_digitado==0):
break
if(n_digitado==1):
res=input("Deseja voltar o menu digite Y senao aperte qualquer "
"outra tecla para continuar:")
if (res == 'Y' or res == 'y'):
continue
nome = input("Digite nome:")
num = int(input("Digite numero de Telefone:"))
print()
lista.append([nome,num])
if(n_digitado == 2):
print(list(enumerate(lista)))
print()
res = input("Deseja voltar o menu digite Y senao aperte qualquer "
"outra tecla para continuar:")
if (res == 'Y' or res == 'y'):
continue
x = int(input("Digite o numero que vem antes do nome "
"para que seja excluido o contato:"))
lista.pop(x)
if(n_digitado==3):
res = input("Deseja voltar o menu digite Y senao aperte qualquer "
"outra tecla para continuar:")
if (res == 'Y' or res == 'y'):
continue
y = int(input("Qual contato dejesa alterar - digite o seu indice : "))
x = int(input("Se deseja alterar o NOME digite 1,alterar NUMERO digite 2: "))
if(x==1):
z=input("Digite o nome:")
lista[y][0] = z
if (x == 2):
z = int(input("Digite o Numero:"))
lista[y][1] = z
if(n_digitado==4):
res = input("Deseja voltar o menu digite Y senao aperte qualquer "
"outra tecla para continuar:")
if (res == 'Y' or res == 'y'):
continue
print()
print(list(enumerate(lista)))
| UTF-8 | Python | false | false | 1,844 | py | 36 | agenda.py | 36 | 0.522777 | 0.5141 | 0 | 52 | 34.403846 | 85 |
imsb-uke/bias-transfer-microscopy | 14,285,061,244,696 | 17995260549df554d6701596bddb507f57533cbc | 2c0aeaacaa818f83ce035ef261ca9cad14798521 | /debiasmedimg/debiasmedimg/cyclegan/util/evaluation.py | 4b3f929884f43df949fb27dc89ff282a2f3e0303 | [
"MIT"
]
| permissive | https://github.com/imsb-uke/bias-transfer-microscopy | 54e35df5829b69d6c2b9ce5a0a0d7d4df7dc3d93 | 7cfbdc9098b3ec725397d5a45affa77863c4fc46 | refs/heads/main | 2023-06-27T15:10:16.486063 | 2021-08-03T08:52:33 | 2021-08-03T08:52:33 | 304,248,427 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from scipy.linalg import sqrtm
from skimage.metrics import structural_similarity as ssim
import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.inception_v3 import preprocess_input
def ssim_score(input1, input2):
"""
Calculate the mean structural similarity index (SSIM) between two images
:param input1: Image 1
:param input2: Image 2 (must have the same shape as Image 1)
:return: SSIM value
"""
ssim_value = ssim(input1, input2, gaussian_weights=True, multichannel=True)
return ssim_value
def scale_images(images, new_shape):
"""
Scale images to a specified shape
:param images: Images to re-scale
:param new_shape: Shape to scale the images to
"""
images_list = list()
for image in images:
# resize with nearest neighbor interpolation
new_image = tf.image.resize(image, new_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# store
images_list.append(new_image.numpy())
return np.asarray(images_list)
def get_fid(set_a, set_b):
"""
Calculate the Fréchet Inception Distance (Evaluation metric conceptualised for GANs)
:param set_a: Set of images of domain A
:param set_b: Set of images of domain B
"""
# prepare the inception v3 model
model = InceptionV3(include_top=False, pooling='avg', input_shape=(299, 299, 3))
# Prepare images
images_a = scale_images(set_a, [299, 299])
images_b = scale_images(set_b, [299, 299])
images_a = preprocess_input(images_a)
images_b = preprocess_input(images_b)
# calculate activations
act1 = model.predict(images_a)
act2 = model.predict(images_b)
# calculate mean and covariance statistics
mu1, sigma1 = act1.mean(axis=0), np.cov(act1, rowvar=False)
mu2, sigma2 = act2.mean(axis=0), np.cov(act2, rowvar=False)
# calculate sum squared difference between means
ssdiff = np.sum((mu1 - mu2) ** 2.0)
# calculate sqrt of product between cov
covmean = sqrtm(sigma1.dot(sigma2))
# check and correct imaginary numbers from sqrt
if np.iscomplexobj(covmean):
covmean = covmean.real
# calculate score
score = ssdiff + np.trace(sigma1 + sigma2 - 2.0 * covmean)
return score
| UTF-8 | Python | false | false | 2,311 | py | 45 | evaluation.py | 28 | 0.691342 | 0.667532 | 0 | 63 | 35.666667 | 100 |
svillalc/WebScrapingUoc | 463,856,500,851 | 16f93b37e94e05df7001185bb93adf5685a62ef6 | eb3857e637e84724a2332e8731af0549bb1c5370 | /scraper/scraper.py | 16a8dab4d831e6351e5966b76ab664c7d4302b6c | []
| no_license | https://github.com/svillalc/WebScrapingUoc | 113b110940f07f78daab2cd57d93d39722edb1a6 | 0dc20b377605e641ff9cf766a4d3f77891f52760 | refs/heads/master | 2022-04-21T15:32:45.116156 | 2020-04-13T16:03:52 | 2020-04-13T16:03:52 | 253,291,066 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: cp1252 -*-
from bs4 import BeautifulSoup
import requests
import pandas as pd
import math
import time
# Headers para que idealista no bloquee el scraper
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "es-ES,es;q=0.9",
"Cache-Control": "no-cache",
"dnt": "1",
"Pragma": "no-cache",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
# Creamos el objeto BeautifulSoup
req = requests.get("https://www.idealista.com/alquiler-viviendas/alicante-alacant-alicante/", headers=headers)
soup = BeautifulSoup(req.text, "html.parser")
# Numero de viviendas
required0 = soup.find_all("h1")
nviviendas = []
for i in required0:
nviviendas.append(i.get_text())
numero_viviendas = int(nviviendas[0].split()[0].replace(".", ""))
print(numero_viviendas)
# Numero de paginas a partir del numero total de viviendas
# Idealista muestra 30 viviendas por pagina
numero_paginas = math.ceil(numero_viviendas / 30)
# Inicializamos las columnas del dataset
precios = []
habitaciones = []
metros = []
# Recogemos la informacion de cada pagina.
# Introducimos un sleep para no saturar el servidor
for i in range(numero_paginas):
req2 = requests.get(
'https://www.idealista.com/alquiler-viviendas/alicante-alacant-alicante/pagina-' + str(i + 1) + '.htm',
headers=headers)
print(i + 1)
soup2 = BeautifulSoup(req2.text, "html.parser")
anuncios2 = soup2.find_all("div", "item-info-container")
# Listas temporales
precios2 = []
habitaciones2 = []
metros2 = []
# Lo recorremos asi para limpiar los datos erroneos antes de incluirlos en nuestras listas
for anuncio2 in anuncios2:
precios2.append(anuncio2.find("span", "item-price h2-simulated").text)
numhabs = anuncio2.find_all("span", "item-detail")[0].text
nummetros = anuncio2.find_all("span", "item-detail")[1].text
if "hab" not in numhabs:
if "m²" not in numhabs:
nummetros, numhabs = None, None
else:
nummetros = numhabs
numhabs = None
elif "m²" not in nummetros:
nummetros = None
habitaciones2.append(numhabs)
metros2.append(nummetros)
# Anyadimos las listas temporales a nuestras listas globales
precios = precios + precios2
habitaciones = habitaciones + habitaciones2
metros = metros + metros2
# Introducimos el sleep
time.sleep(10)
# Formamos el dataset
data = {"Precio (Euros/Mes)": precios,
"Habitaciones": habitaciones,
"Tamanyo (m2)": metros}
df = pd.DataFrame(data)
# Le damos formato a las columnas y calculamos una nueva (precio por metro cuadrado)
df["Precio (Euros/Mes)"] = list(map(lambda x: x.replace(".", "").split("€")[0], df["Precio (Euros/Mes)"]))
df["Tamanyo (m2)"] = list(map(lambda x: x.split()[0], df["Tamanyo (m2)"]))
df["Euros/m2"] = round(df["Precio (Euros/Mes)"].astype(int) / df["Tamanyo (m2)"].astype(int), 2)
# Exportamos el dataset
df.to_csv("dataset.csv") | WINDOWS-1252 | Python | false | false | 3,298 | py | 3 | scraper.py | 1 | 0.663024 | 0.636308 | 0 | 96 | 33.322917 | 141 |
xiongjia/vim-fanfou | 13,039,520,714,886 | 817aaa09c52849496780b194445311f079d693a5 | 08a7def003486eb07873567a25daab69ab3fefcf | /plugin/vim_fanfou/vim_fanfou_unittest.py | 96f643120edb8879a31467876d0afb45b78f813d | [
"LicenseRef-scancode-unknown-license-reference",
"Vim"
]
| permissive | https://github.com/xiongjia/vim-fanfou | 49520b9a3554cf66e63e7f1093f1552b998774aa | 800b3181d658269985f6f642ac707198dc7f472c | refs/heads/master | 2020-05-01T11:33:14.634932 | 2014-07-26T03:41:29 | 2014-07-26T03:41:29 | 20,683,294 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
vim_fanfou.vim_fanfou_unittest:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The entry of all unit tests.
NOTE:
We need the vim Python module.
Therefore, we must run these unit tests in VIM.
Steps:
1. Open "plugin/vim_fanfou.vim" in vim
2. Run :so %
3. call VimFanfouTests()
:copyright: (c) 2014 by xiong-jia.le ( lexiongjia@gmail.com )
:license: Vim license. See :help license
"""
import unittest
from . import misc_unittest
def vim_fanfou_tests():
test_loader = unittest.TestLoader()
suite = test_loader.loadTestsFromTestCase(misc_unittest.BaseTests)
unittest.TextTestRunner().run(suite)
| UTF-8 | Python | false | false | 669 | py | 16 | vim_fanfou_unittest.py | 12 | 0.641256 | 0.630792 | 0 | 25 | 25.72 | 70 |
asmrpuppy/talon_config | 8,607,114,463,525 | 67bd16fadbdd4a68a75727fd18660cab43c9014f | 2fff7c3428583e9da80c8c75579fd76e83a9746a | /misc/keys.py | 769749583cd5d054c50882bc174fa95ecfedbc40 | [
"MIT"
]
| permissive | https://github.com/asmrpuppy/talon_config | 59fb36ff4310eaa682b0aa78e6d62138631ccb16 | d30e02f30873986b899bd1f10efc7442c6bd6d22 | refs/heads/master | 2022-05-25T10:45:43.794255 | 2020-04-23T17:20:38 | 2020-04-23T17:26:46 | 270,017,394 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Basic key mapping.
Originally written by lunixbochs, version taken from the knausj_talon repo:
https://github.com/knausj85/knausj_talon/blob/d330a6eb1fbfcc13f99a732a097f220fd0c10950/code/keys.py
"""
from typing import Set, List
from talon import Module, Context, actions
from user.utils import multi_map, dictify
default_alphabet = "air bat cap drum each fine gust harp sit jury crunch look made near odd pit quench red sun trap urge vest whale plex yank zip"
# My setup has trouble with some words. Probably my accent.
modified_alphabet = (
default_alphabet.replace("air", "arch")
.replace("bat", "batch")
.replace("zip", "zen")
)
# chosen_alphabet = default_alphabet.split(" ")
chosen_alphabet = modified_alphabet.split(" ")
letters_string = "abcdefghijklmnopqrstuvwxyz"
# TODO: Use digits in number.py?
default_digits = "zero one two three four five six seven eight nine".split(" ")
ints = [str(i) for i in range(10)]
mod = Module()
mod.list("letter", desc="The spoken phonetic alphabet")
mod.list("symbol", desc="All symbols from the keyboard")
mod.list("arrow", desc="All arrow keys")
mod.list("standalone_arrow", desc="Arrow keys that can be spoken on their own")
mod.list("number", desc="All number keys")
mod.list("modifier", desc="All modifier keys")
mod.list("special", desc="All special keys")
@mod.capture
def modifiers(m) -> Set[str]:
"""Zero or more modifier keys"""
@mod.capture
def arrow(m) -> str:
"""One directional arrow key"""
@mod.capture
def arrows(m) -> str:
"""One or more arrows separate by a space"""
@mod.capture
def standalone_arrow(m) -> str:
"""One arrow that can be spoken on its own (without modifiers).
Standalone arrows are separated to avoid "up" being misrecognized.
"""
@mod.capture
def number_key(m) -> str:
"""One number key"""
@mod.capture
def number_keys(m) -> str:
"""Multiple number keys"""
@mod.capture
def letter(m) -> str:
"""One letter key"""
@mod.capture
def letters(m) -> list:
"""Multiple letter keys"""
@mod.capture
def symbol(m) -> str:
"""One symbol key"""
@mod.capture
def special(m) -> str:
"""One special key"""
@mod.capture
def any_key(m) -> str:
"""Any single key"""
@mod.capture
def keychord(m) -> str:
"""A single key with modifiers"""
@mod.capture
def character(m) -> str:
"""Any key that can be typed as a character."""
ctx = Context()
ctx.lists["self.modifier"] = {
"command": "cmd",
"control": "ctrl",
"troll": "ctrl",
"shift": "shift",
"schiff": "shift",
"sky": "shift",
"alt": "alt",
"option": "alt",
"super": "super",
}
ctx.lists["self.letter"] = dict(zip(chosen_alphabet, letters_string))
ctx.lists["self.symbol"] = multi_map(
{
("back tick", "grave"): "`",
("comma", "cam"): ",",
("dot", "period", "full stop", "point"): ".",
("semicolon", "semi"): ";",
("apostrophe", "quote"): "'",
("double quote", "dub quote", "speech mark", "speech"): '"',
# FIXME: slash and blash recognition conflicts
("forward slash", "slash"): "/",
("backslash", "blash"): "\\",
("minus", "dash"): "-",
("equals", "eek"): "=",
"plus": "+",
("question mark", "question", "quest"): "?",
"tilde": "~",
("exclamation", "bang"): "!",
("dollar sign", "dollar"): "$",
("underscore", "score"): "_",
("colon", "coal"): ":",
("asterisk", "star"): "*",
# "pound": "#",
"hash": "#",
"percent": "%",
"caret": "^",
"at sign": "@",
("ampersand", "amper"): "&",
"pipe": "|",
# Currency
"dollar": "$",
"pound": "£",
"euro": "€", # FIXME: comes out as "4"
# Brackets
("left square", "lack"): "[",
("right square", "rack"): "]",
("left paren", "lub"): "(",
("right paren", "rub"): ")",
("left brace", "lace"): "{",
("right brace", "race"): "}",
("left angle", "langle"): "<",
("right angle", "rangle"): ">",
}
)
ctx.lists["self.number"] = dict(zip(default_digits, ints))
basic_arrows = dictify(
[
#
"left",
"right",
"down",
]
)
ctx.lists["self.arrow"] = {
#
**basic_arrows,
"up": "up",
}
ctx.lists["self.standalone_arrow"] = {
#
**basic_arrows,
"pup": "up",
}
# TODO: Separate standalone arrow list, use "pup" or something similar to
# mitigate "up" misrecognition
simple_keys = dictify(
[
#
"tab",
"escape",
"enter",
"space",
"pageup",
"pagedown",
"backspace",
"delete",
"home",
"end",
]
)
alternate_keys = {
# b[ackward k]ill
"bill": "backspace",
# f[orward k]ill
"fill": "delete",
"scape": "escape",
"knock": "end",
# Explicitly don't allow "return" because it's a common programming keyword.
"slap": "enter",
# TODO: Extract compound keys, shouldn't really be here
"squares": "[ ] left",
"parens": "( ) left",
"braces": "{ } left",
"angles": "< > left",
}
keys = {**simple_keys, **alternate_keys}
ctx.lists["self.special"] = keys
@ctx.capture(rule="[{self.modifier}+]")
def modifiers(m) -> Set[str]:
try:
return set(m.modifier)
except AttributeError:
return set()
@ctx.capture(rule="{self.arrow}")
def arrow(m) -> str:
return m.arrow
@ctx.capture(rule="<self.arrow>+")
def arrows(m) -> str:
return m.arrow_list
@ctx.capture(rule="{self.standalone_arrow}")
def standalone_arrow(m) -> str:
return m.standalone_arrow
@ctx.capture(rule="numb <digits>")
def number_key(m):
return str(m.digits)
@ctx.capture(rule="numb <number>")
def number_keys(m):
return str(m.number)
@ctx.capture(rule="{self.letter}")
def letter(m):
return m.letter
@ctx.capture(rule="{self.special}")
def special(m):
return m.special
@ctx.capture(rule="{self.symbol}")
def symbol(m):
return m.symbol
@ctx.capture(
rule="(<self.arrow> | <number> | <self.letter> | <self.special> | <self.symbol>)"
)
def any_key(m) -> str:
return str(m[0])
@ctx.capture(rule="{self.modifier}+ <self.any_key>")
def keychord(m) -> str:
return "-".join(m.modifier_list + [m.any_key])
@ctx.capture(rule="{self.letter}+")
def letters(m):
return m.letter
@ctx.capture(rule="(<self.letter> | <self.symbol> | <self.number_key>)")
def character(m) -> str:
return m[0]
@mod.action_class
class Actions:
def modifier_key(modifier: str, key: str):
"""(TEMPORARY) Presses the modifier plus supplied number"""
res = "-".join([modifier, str(key)])
actions.key(res)
def uppercase_letters(chars: List[str]):
"""Inserts uppercase letters from list"""
# TODO: Do we want insert here? What if someone wants to press an
# uppercase char?
actions.insert("".join(chars).upper())
# TODO: Switch to many_keys
def many(keys: List[str]):
"""Press a list of keys in sequence."""
for key in keys:
actions.key(key)
def press_number(number: float):
"""Press each key in a number"""
# TODO: Allow leading zeros
for char in str(number):
actions.key(char)
| UTF-8 | Python | false | false | 7,339 | py | 60 | keys.py | 24 | 0.570883 | 0.566658 | 0 | 317 | 22.141956 | 146 |
terrifyzhao/educative4 | 15,152,644,660,465 | 5a781d3d8571098cefcc73c311464a7e681b7858 | cb410de77d8118ca6e43919a84402b3bdb99289a | /14_k_way_merge/4.py | d4adebfadb931fe34a43c01fb4ab786b3d43d824 | []
| no_license | https://github.com/terrifyzhao/educative4 | e4708ba4088a72bd2973ddaae33e7d4bf6fda7d6 | 4f2def0d328d160da52ffa7c8082763e34185ab2 | refs/heads/main | 2022-12-28T04:26:45.428774 | 2020-10-13T09:21:38 | 2020-10-13T09:21:38 | 302,783,542 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from heapq import *
def find_smallest_range(lists):
min_heap = []
max_num = 0
for list in lists:
max_num = max(max_num, list[0])
heappush(min_heap, (list[0], 0, list))
start = 0
end = 1000
while len(min_heap) == len(lists):
value, index, list = heappop(min_heap)
if end - start > max_num - value:
start = value
end = max_num
index += 1
if index < len(list):
heappush(min_heap, (list[index], index, list))
max_num = max(max_num, list[index])
return [start, end]
def main():
print("Smallest range is: " +
str(find_smallest_range([[1, 5, 8], [4, 12], [7, 8, 10]])))
main()
| UTF-8 | Python | false | false | 719 | py | 33 | 4.py | 32 | 0.513213 | 0.485396 | 0 | 34 | 20.147059 | 69 |
Divinreddy/michael_le_python_scripts | 11,149,735,113,358 | f867a3ec14186367de6befa65df6fa1c269b4687 | 8220f38acfb4e9cef6583175676b4a5e5acda862 | /le_print_hostNames_keys_Logs/le_print_hostNames_keys_Logs.py | ef5669e38d39bad71d3b9a6c04e333f757b268c2 | []
| no_license | https://github.com/Divinreddy/michael_le_python_scripts | 5fbf37f182fa4e0f40da428e1c6e7582378ef2e0 | a21932b68f4c5d94e645cdb33f7fe4199156990e | refs/heads/master | 2020-04-15T04:48:08.279453 | 2014-12-01T15:32:11 | 2014-12-01T15:32:11 | 164,397,017 | 0 | 1 | null | true | 2019-01-07T07:51:27 | 2019-01-07T07:51:26 | 2014-12-02T12:21:21 | 2014-12-01T15:32:23 | 72 | 0 | 0 | 0 | null | false | null | # coding: utf-8
#!/usr/bin/python
# THis script prints an alphabetically sorted list of host names, respective host keys, associated logs and log details
# The log details include: Token-based or agent-based, log name, log token and log key
# REQUIREMENTS - You must have the Account Key of the Logentries Account on which you wish to run this script.
# This script was written and tested on Python 2.7.5.
# TO RUN: python print_all_hostNames_keys_logs.py <ACCOUNT_KEY_HERE>
import urllib
import json
import sys
import os
import collections
import operator
ACCOUNT_KEY = ''
EXISTING_HOST_KEY = ''
HOST_NAMES_KEYS_DICT = {}
#gets host names
def get_host_name():
req = urllib.urlopen("http://api.logentries.com/" + ACCOUNT_KEY + '/hosts/')
response = json.load(req)
for hosts in response['list']:
HOST_NAMES_KEYS_DICT[hosts['key']] = hosts['name']
i=0
# the line below sorts the dictionary by the Values, which are the names of the Hosts.
for k, v in sorted(HOST_NAMES_KEYS_DICT.iteritems(), key=operator.itemgetter(1)):
#for k,v in HOST_NAMES_KEYS_DICT.items():
print "["+str(i) +"] " + v + ' - ' + k
i=i+1
get_log_name_and_token(k)
def get_log_name_and_token(host_key):
req = urllib.urlopen("http://api.logentries.com/" + ACCOUNT_KEY + '/hosts/' + host_key + '/')
response = json.load(req)
for log in response['list']:
if log['type']== 'agent':
print "\t"+ "AGENT path:" + log['filename'] + " Log key:" + log['key']
elif log['type']=='token':
print "\t"+"TOKEN Name:" +log['name'] + " Token:" + log['token'] + " Log key:" + log['key']
if __name__ == '__main__':
ACCOUNT_KEY = sys.argv[1]
get_host_name()
| UTF-8 | Python | false | false | 1,753 | py | 14 | le_print_hostNames_keys_Logs.py | 7 | 0.628066 | 0.623503 | 0 | 54 | 31.444444 | 120 |
habi/JahresRueckblick15 | 9,680,856,331,555 | 77847ec78bcae15c3ce63280f38bdd79c82db5c7 | e4ba193aca8160991388f6828c71b690ca1e09a9 | /geodata.py | d08d0acab3a783065c9b2fce38412d44f6abc339 | []
| no_license | https://github.com/habi/JahresRueckblick15 | 9c7d68420f95aeb165999ed6411771374f190a59 | 0541a915df5129ab007aafc2ed09e067f367acf9 | refs/heads/master | 2021-01-10T11:32:37.657126 | 2020-09-22T20:04:11 | 2020-09-22T20:04:11 | 46,274,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Plotting OpenPaths data on a map
'''
import numpy
import matplotlib.pyplot as plt
import matplotlib.dates as mpdates
#~ lat,lon,alt,date,device,os,version
#~ 49.9761505127,8.00768661499,86.5952682495,2014-01-18 16:17:04,"iPhone4,1",7.0.4,1.1
Lat, Lon, Altitude, Date, = numpy.loadtxt('openpaths_habi.csv', delimiter=',',
converters={3: mpdates.strpdate2num('%Y-%m-%d %H:%M:%S')},
skiprows=1, usecols=(0, 1, 2, 3), unpack=True)
plt.plot_date(Date, Altitude)
plt.xlabel('Date')
plt.ylabel('Altitude')
plt.show()
# We use [Folium](http://folium.readthedocs.org/en/latest/) to present the map...
import folium
WhichOne = 666
print 'Centering map on Lat: %s and Lon: %s' % (Lat[WhichOne], Lon[WhichOne])
whereabouts = folium.Map(location=[Lat[WhichOne], Lon[WhichOne]],
tiles='Stamen Toner')
for i in range(WhichOne-333,WhichOne+333):
whereabouts.simple_marker([Lat[i], Lon[i]], popup='Datapoint %s' % i)
whereabouts.save(path='map.html')
| UTF-8 | Python | false | false | 1,027 | py | 8 | geodata.py | 2 | 0.654333 | 0.583252 | 0 | 28 | 35.678571 | 86 |
nosarthur/cloud-harness | 7,911,329,780,773 | 06d2609968ac5b769d9519cae2e3d9c2e349d4b4 | c9909cc2f99c59105fea5af33e85903b5f1938e9 | /tests/test_user_model.py | 886180a33e564f2f41272c357ae2774f13164cbd | [
"MIT"
]
| permissive | https://github.com/nosarthur/cloud-harness | dbd32641be8e93d06f91fe9b1ba0621417f2576c | 8c4377860d35b94a89833b5700f374427b090b67 | refs/heads/master | 2022-12-14T11:32:10.659902 | 2017-08-24T04:49:24 | 2017-08-24T04:49:24 | 95,626,269 | 0 | 0 | MIT | false | 2022-12-07T23:59:58 | 2017-06-28T03:47:31 | 2018-03-07T05:41:08 | 2022-12-07T23:59:58 | 215 | 0 | 0 | 16 | JavaScript | false | false | import pytest
from app.models import User
class TestUserModel:
def test_password_setter(self):
u = User('test', 'a@b.com', password='cat')
assert u.password_hash
def test_no_password_getter(self):
u = User('test', 'a@b.com', password='cat')
with pytest.raises(AttributeError):
u.password
def test_password_verification(self):
u = User('test', 'a@b.com', password='cat')
assert u.verifyPassword('cat')
assert not u.verifyPassword('dog')
@pytest.mark.usefixtures('vanilla_app')
class TestUserDB:
def test_validate_token(self):
u = User.query.get(1)
token = u.encodeToken()
user_id = User.decodeToken(token)
assert user_id == u.id
assert User.validate('a@a.com', 'aaa') == u
| UTF-8 | Python | false | false | 803 | py | 27 | test_user_model.py | 22 | 0.608966 | 0.607721 | 0 | 29 | 26.689655 | 51 |
h4hany/yeet-the-leet | 15,496,242,016,892 | 4338f71fea86b5e5ae57048886d39b7747617ede | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Hard/132.palindrome-partitioning-ii.py | 8b9a40e961fb9225099fdee9e81af001f4148f04 | []
| no_license | https://github.com/h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# @lc app=leetcode id=132 lang=python3
#
# [132] Palindrome Partitioning II
#
# https://leetcode.com/problems/palindrome-partitioning-ii/description/
#
# algorithms
# Hard (30.32%)
# Total Accepted: 135.9K
# Total Submissions: 448.2K
# Testcase Example: '"aab"'
#
# Given a string s, partition s such that every substring of the partition is a
# palindrome
#
# Return the minimum cuts needed for a palindrome partitioning of s.
#
#
# Example 1:
#
#
# Input: s = "aab"
# Output: 1
# Explanation: The palindrome partitioning ["aa","b"] could be produced using 1
# cut.
#
#
# Example 2:
#
#
# Input: s = "a"
# Output: 0
#
#
# Example 3:
#
#
# Input: s = "ab"
# Output: 1
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 2000
# s consists of lower-case English letters only.
#
#
#
class Solution:
def minCut(self, s: str) -> int:
| UTF-8 | Python | false | false | 860 | py | 3,103 | 132.palindrome-partitioning-ii.py | 1,763 | 0.631395 | 0.595349 | 0 | 53 | 15.056604 | 79 |
JustinLiam/DAN | 6,605,659,713,047 | ed4a35850d5f2d20cdba0a7f4ea1fd3092ed6070 | 3cfb76a1540326ad2d17042c5263697a91147a20 | /test.py | 7064ce6c9fb68b3075543a9c5ff0c9642a2a3d91 | [
"MIT"
]
| permissive | https://github.com/JustinLiam/DAN | 5f3e1568bce353f1f48f69e4b0c4cc4fd2d412ae | eb29cddad6c93e591854b115ef524643b1cd471c | refs/heads/main | 2023-07-29T22:21:45.578891 | 2021-09-09T09:46:15 | 2021-09-09T09:46:15 | 409,651,931 | 1 | 0 | MIT | true | 2021-09-23T15:44:56 | 2021-09-23T15:44:55 | 2021-09-14T09:28:54 | 2021-09-09T09:46:15 | 15,370 | 0 | 0 | 0 | null | false | false | import torch
import os
import ray
from time import time
from runner_for_test import TestRunner
from config import config
from env import Env
cfg = config()
model_path = cfg.model_path
device = cfg.device
decode_type = 'greedy'
test_size = 500
def test():
average_max_length = 0
average_mean_length = 0
average_time = 0
sum_time = 0
runner = TestRunner(metaAgentID=0, cfg=cfg, decode_type=decode_type)
checkpoint = torch.load(model_path + '/model_states.pth')
runner.model.load_state_dict(checkpoint['model'])
for i in range(test_size):
print(i)
env = Env(cfg, seed=i)
t1 = time()
with torch.no_grad():
max_length = runner.sample(env)
t2 = time()
max_length = max_length.item()
# mean_length = mean_length.item()
t = t2 - t1
average_max_length = (max_length + average_max_length * i) / (i + 1)
#average_mean_length = (mean_length + average_mean_length * i) / (i + 1)
average_time = (t + average_time * i) / (i + 1)
sum_time += t
print('average_max_length', average_max_length)
print('average_time', average_time)
print('average_max_length', average_max_length)
#print('average_mean_length', average_mean_length)
print('average_time', average_time)
print('sum_time', sum_time)
if __name__ == '__main__':
test()
| UTF-8 | Python | false | false | 1,393 | py | 13 | test.py | 12 | 0.61163 | 0.600861 | 0 | 50 | 26.86 | 80 |
Conpancol/PyHeroku | 10,256,381,912,364 | e26ee1426bd8f5355f17e726f05cf9cae78cc3eb | d9c41d8356ce6d7334598909db030b4f802c5146 | /CPFrontend/quotes/views.py | de57c9d639dd808d2f887c1c61e170a0085129dd | [
"MIT"
]
| permissive | https://github.com/Conpancol/PyHeroku | da8336c6b51fd17ee2965622fc3a6a8d1ffe8a8c | 16b157a23c77cd794d246a56cf8575766e48689c | refs/heads/master | 2020-03-22T20:24:04.797147 | 2019-11-03T02:49:22 | 2019-11-03T02:49:22 | 140,596,657 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.decorators import login_required
from django.forms import formset_factory
from requests.exceptions import ConnectionError
from .services.QuoteCreator import QuoteCreator
from .services.ExtQuotedMaterialCreator import ExtQuotedMaterialCreator
from .forms import QuotesForm, QuotedMaterialsForm, SelectorForm, QuotesFormOnlyinfo, QuotedMaterialForm
from .forms import SmartQuotesForm
import requests
import json
import os
from common.BackendMessage import BackendMessage
from common.MachineConfig import MachineConfigurator
from common.Instructions import Instructions
from common.FrontendTexts import FrontendTexts
view_texts = FrontendTexts('quotes')
def cleanup(filename):
try:
os.remove('.' + filename)
print("removed file: " + filename)
except Exception as error:
print(error)
@login_required(login_url='/auth/login')
def quotes_upload(request):
menu_texts = FrontendTexts('menu')
instructions = Instructions('quotes', 'upload')
uploaded_file_url = ''
try:
form = QuotesForm()
if request.method == 'POST':
form = QuotesForm(request.POST, request.FILES)
if form.is_valid():
quote = QuoteCreator()
internal_code = form.cleaned_data['internalCode']
external_code = form.cleaned_data['externalCode']
provider_code = form.cleaned_data['providerCode']
received_date = form.cleaned_data['receivedDate']
sent_date = form.cleaned_data['sentDate']
user = form.cleaned_data['user']
provider_id = form.cleaned_data['providerId']
provider_name = form.cleaned_data['providerName']
contact_name = form.cleaned_data['contactName']
incoterms = form.cleaned_data['incoterms']
note = form.cleaned_data['note']
edt = form.cleaned_data['edt']
quote.setQuoteInformation(internal_code, external_code, provider_code, provider_id, provider_name,
contact_name, received_date, sent_date, user, edt)
quote.setQuoteIncoterms(incoterms)
quote.setQuoteNote(note)
myfile = request.FILES['document']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
result = quote.createQuotefromCSV('.' + uploaded_file_url)
# ... print(json.dumps(result))
backend_host = MachineConfigurator().getBackend()
r = requests.post(backend_host + '/auth/quotes/', json=result)
backend_message = BackendMessage(json.loads(r.text))
cleanup(uploaded_file_url)
backend_result = []
if backend_message.errorInd:
display_message = {}
display_message['internalCode'] = internal_code
display_message['externalCode'] = external_code
display_message['status'] = backend_message.getValue()
backend_result.append(display_message)
else:
backend_result = json.loads(backend_message.getValue())
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'upload_result': backend_result})
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'form': form,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except ValueError as exception:
cleanup(uploaded_file_url)
print("There is a problem with the backend return value")
print(exception)
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except ConnectionError as exception:
cleanup(uploaded_file_url)
print("Backend connection problem")
print(exception)
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend connection problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except Exception as exception:
cleanup(uploaded_file_url)
print(exception)
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'General problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
@login_required(login_url='/auth/login')
def smart_quotes_upload(request):
menu_texts = FrontendTexts('menu')
instructions = Instructions('quotes', 'upload')
uploaded_file_url = ''
try:
backend_host = MachineConfigurator().getBackend()
r = requests.get(backend_host + '/auth/providers/short')
backend_message = BackendMessage(json.loads(r.text))
providers_list = json.loads(backend_message.getValue())
form = SmartQuotesForm(providers_list)
if request.method == 'POST':
form = SmartQuotesForm(providers_list, request.POST, request.FILES)
if form.is_valid():
quote = QuoteCreator()
internal_code = form.cleaned_data['internalCode']
external_code = form.cleaned_data['externalCode']
provider_code = form.cleaned_data['providerCode']
received_date = form.cleaned_data['receivedDate']
sent_date = form.cleaned_data['sentDate']
user = form.cleaned_data['user']
contact_name = form.cleaned_data['contactName']
incoterms = form.cleaned_data['incoterms']
note = form.cleaned_data['note']
edt = form.cleaned_data['edt']
provider = int(form.cleaned_data[ 'providerId' ])
provider_choices = form.getProviderInfo()[provider - 1][1].split(' | ')
# ... print(provider_choices[0], provider_choices[1])
provider_id = provider_choices[0]
provider_name = provider_choices[1]
quote.setQuoteInformation(internal_code, external_code, provider_code, provider_id, provider_name,
contact_name, received_date, sent_date, user, edt)
quote.setQuoteIncoterms(incoterms)
quote.setQuoteNote(note)
myfile = request.FILES['document']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
result = quote.createQuotefromCSV('.' + uploaded_file_url)
# ... print(json.dumps(result))
backend_host = MachineConfigurator().getBackend()
r = requests.post(backend_host + '/auth/quotes/', json=result)
backend_message = BackendMessage(json.loads(r.text))
cleanup(uploaded_file_url)
backend_result = []
if backend_message.errorInd:
display_message = {}
display_message['internalCode'] = internal_code
display_message['externalCode'] = external_code
display_message['status'] = backend_message.getValue()
backend_result.append(display_message)
else:
backend_result = json.loads(backend_message.getValue())
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'upload_result': backend_result})
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'form': form,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except ValueError as exception:
cleanup(uploaded_file_url)
print("There is a problem with the backend return value")
print(exception)
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except ConnectionError as exception:
cleanup(uploaded_file_url)
print("Backend connection problem")
print(exception)
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend connection problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except Exception as exception:
cleanup(uploaded_file_url)
print(exception)
return render(request, 'quotes/quote_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'General problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
@login_required(login_url='/auth/login')
def quoted_materials_upload(request):
menu_texts = FrontendTexts('menu')
instructions = Instructions('quotes', 'materials_upload')
uploaded_file_url = ''
try:
if request.method == 'POST':
form = QuotedMaterialsForm(request.POST, request.FILES)
if form.is_valid():
data = ExtQuotedMaterialCreator()
providerId = form.cleaned_data['providerId']
providerName = form.cleaned_data['providerName']
revision = form.cleaned_data['revision']
data.setExtendedInformation(providerId, providerName, revision)
my_file = request.FILES['document']
fs = FileSystemStorage()
filename = fs.save(my_file.name, my_file)
uploaded_file_url = fs.url(filename)
result = data.createExtQuotedMaterialsfromCSV('.' + uploaded_file_url)
# print(json.dumps(result))
backend_host = MachineConfigurator().getBackend()
r = requests.post(backend_host + '/auth/quotes/materials', json=result)
backend_message = BackendMessage(json.loads(r.text))
cleanup(uploaded_file_url)
backend_result = []
if backend_message.errorInd:
display_message = {}
display_message['itemcode'] = "-"
display_message['revision'] = "-"
display_message['status'] = "-"
backend_result.append(display_message)
else:
backend_result = json.loads(backend_message.getValue())
return render(request, 'quotes/materials_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'upload_result': backend_result})
else:
form = QuotedMaterialsForm()
return render(request, 'quotes/materials_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'form': form,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except ValueError as exception:
cleanup(uploaded_file_url)
print("There is a problem with the backend return value")
print(exception)
return render(request, 'quotes/materials_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except ConnectionError as exception:
cleanup(uploaded_file_url)
print("There is a problem with the backend return value")
print(exception)
return render(request, 'quotes/materials_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend connection problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except Exception as exception:
cleanup(uploaded_file_url)
print(exception)
return render(request, 'quotes/materials_upload.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': "Frontend Error",
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
@login_required(login_url='/auth/login')
def quotes_manager(request):
menu_texts = FrontendTexts('menu')
instructions = Instructions('quotes', 'manage')
print("quotes manager")
try:
if request.method == 'POST':
selector_form = SelectorForm(request.POST)
if selector_form.is_valid():
code = selector_form.cleaned_data['code']
action = selector_form.cleaned_data['action']
if action == '1':
return redirect('edit/' + code)
elif action == '2':
return redirect('edit/materials/' + code)
else:
selector_form = SelectorForm()
return render(request, 'quotes/quote_selector.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'selector_form': selector_form,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except ValueError as exception:
print("There is a problem with the backend return value")
print(exception)
return render(request, 'quotes/quote_selector.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except ConnectionError as exception:
print("Backend connection problem")
print(exception)
return render(request, 'quotes/quote_selector.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend connection problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
except Exception as exception:
print(exception)
return render(request, 'quotes/quote_selector.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'System error',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()
})
@login_required(login_url='/auth/login')
def quotes_editor(request, code):
menu_texts = FrontendTexts('menu')
instructions = Instructions('quotes', 'edit')
try:
backend_host = MachineConfigurator().getBackend()
r = requests.post(backend_host + '/auth/quotes/' + code)
backend_message = BackendMessage(json.loads(r.text))
backend_result = json.loads(backend_message.getValue())
material_data = backend_result['materialList']
quote_form = QuotesFormOnlyinfo(initial=backend_result)
MaterialFormSet = formset_factory(QuotedMaterialForm, extra=0)
materials_formset = MaterialFormSet(initial=material_data)
if request.method == 'POST':
quote_form = QuotesFormOnlyinfo(request.POST)
materials_formset = MaterialFormSet(request.POST)
if quote_form.is_valid() and materials_formset.is_valid():
# ... update current material with the data provided
# ... send data to backend
creator = QuoteCreator()
result = creator.editQuotewithMaterials(quote_form, materials_formset, material_data)
print(result)
result_json = []
for quote in result:
result_json.append(json.dumps(quote))
r = requests.put(backend_host + '/auth/quotes/' + code, json=result)
backend_message = BackendMessage(json.loads(r.text))
backend_result = json.loads(backend_message.getValue())
return render(request, 'quotes/quote_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'updated_materials': backend_result})
else:
print("Invalid form")
return render(request, 'quotes/quote_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'quote_form': quote_form,
'materials_formset': materials_formset,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except ValueError as exception:
print("There is a problem with the backend return value")
print(exception)
return render(request, 'quotes/quote_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'No such QUOTE exists in the DB: ' + code,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except ConnectionError as exception:
print("Backend connection problem")
print(exception)
return render(request, 'quotes/quote_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend connection problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except Exception as exception:
print(exception)
return render(request, 'quotes/quote_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'System error',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
@login_required(login_url='/auth/login')
def quoted_materials_editor(request, code):
menu_texts = FrontendTexts('menu')
instructions = Instructions('quotes', 'edit')
try:
backend_host = MachineConfigurator().getBackend()
r = requests.post(backend_host + '/auth/quotes/materials/' + code)
backend_message = BackendMessage(json.loads(r.text))
quoted_materials_data = json.loads(backend_message.getValue())
quoted_material_form = QuotedMaterialForm(initial=quoted_materials_data)
if request.method == 'POST':
quoted_material_form = QuotedMaterialForm(request.POST)
if quoted_material_form.is_valid():
# ... update current material with the data provided
# ... send data to backend
creator = QuoteCreator()
result = creator.editQuotedMaterials(quoted_material_form, quoted_materials_data)
result_json = []
for quote in result:
result_json.append(json.dumps(quote))
r = requests.put(backend_host + '/auth/quotes/materials/' + code, json=result)
backend_message = BackendMessage(json.loads(r.text))
backend_result = json.loads(backend_message.getValue())
return render(request, 'quotes/materials_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'updated_materials': backend_result})
else:
print("Invalid form")
return render(request, 'quotes/materials_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'quote_form': quoted_material_form,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except ValueError as exception:
print("There is a problem with the backend return value")
print(exception)
return render(request, 'quotes/materials_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'No such quoted material exists ' +
'in the DB: ' + code,
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except ConnectionError as exception:
print("Backend connection problem")
print(exception)
return render(request, 'quotes/materials_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'Backend connection problem',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
except Exception as exception:
print(exception)
return render(request, 'quotes/materials_editor.html', {'menu_text': menu_texts.getComponent(),
'view_texts': view_texts.getComponent(),
'error_message': 'System error',
'instructions_title': instructions.getTitle(),
'instructions_steps': instructions.getSteps()})
| UTF-8 | Python | false | false | 29,210 | py | 73 | views.py | 48 | 0.476036 | 0.475727 | 0 | 557 | 51.441652 | 119 |
beproud/bpmailer | 12,867,722,057,793 | 2063fd528693718836d0e40bba8bfd4992768e3e | 4bc1b018d091c85e0e9581b2e14e6fc2bf8c3f94 | /beproud/django/mailer/backends/console.py | 4b205330044dd798190a9138b8242819edd48b99 | []
| no_license | https://github.com/beproud/bpmailer | cf1942c8666a8f4dcaae4a82e03ff9d01c4f7776 | cea10bd2f087979d6632f71d87917787b9bd13c1 | refs/heads/master | 2023-07-11T09:06:51.924940 | 2022-07-12T04:21:14 | 2022-07-12T04:21:14 | 41,725,433 | 0 | 2 | null | false | 2022-07-12T04:21:15 | 2015-09-01T08:03:23 | 2022-03-11T07:30:56 | 2022-07-12T04:21:14 | 354 | 0 | 2 | 3 | Python | false | false | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import logging
import threading
from django.conf import settings
from beproud.django.mailer.backends.base import BaseEmailBackend
logger = logging.getLogger(getattr(settings, "EMAIL_LOGGER", ""))
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
self._lock.acquire()
num_sent = 0
try:
# The try-except is nested to allow for
# Python 2.4 support (Refs #12147)
stream_created = self.open()
for message in email_messages:
if self._send_message_wrapper(message):
num_sent += 1
if stream_created:
self.close()
finally:
self._lock.release()
return num_sent
def _send_message(self, email_message):
self.stream.write('%s\n' % email_message.message().as_string())
self.stream.write('-'*79)
self.stream.write('\n')
self.stream.flush() # flush after each message
| UTF-8 | Python | false | false | 1,398 | py | 23 | console.py | 12 | 0.600858 | 0.59299 | 0 | 44 | 30.772727 | 71 |
saleph/SAG_spatial_factory | 4,303,557,247,882 | 912a60e04ca9021fe2920051fb22065699d8de5c | a32a2f525ced34dac627cb135c34b55f88679aec | /DataTypes/MessageThreadCounter.py | cdc3e9112c9aa8b2d96f492b14532f6936b794d6 | []
| no_license | https://github.com/saleph/SAG_spatial_factory | 50f6eea5d021d512a37aef0277cf4f06a922873b | 88ccfa6d25aa8c279f930e3f43ceecfc079464cb | refs/heads/master | 2020-09-27T15:30:15.951510 | 2020-01-28T21:47:51 | 2020-01-28T21:47:51 | 226,546,285 | 0 | 0 | null | false | 2020-01-18T20:51:52 | 2019-12-07T16:52:52 | 2020-01-18T17:10:51 | 2020-01-18T20:51:51 | 26 | 0 | 0 | 0 | Python | false | false | class MessageThreadCounter:
def __init__(self, thread_id = None, counter = 0 ):
self.thread_id = thread_id
self.counter = counter
def increaseCounter(self):
self.counter += 1
def decreaseCounter(self) -> bool:
self.counter -= 1
return self.isCounterEmpty()
def getCounterValue(self):
return self.counter
def isCounterEmpty(self):
return self.counter == 0 | UTF-8 | Python | false | false | 435 | py | 26 | MessageThreadCounter.py | 22 | 0.616092 | 0.606897 | 0 | 18 | 23.222222 | 55 |
hubaym/waf | 1,760,936,612,804 | 9e3822df1a0543d5982538588dede3f66720d9dc | 4f50a8a320e89fa60147ae21949168f9929a24ea | /wafweb/wafwebapp/classes/archive/process.py | 04812d369d317a1eeacbe885f97b153877a33310 | []
| no_license | https://github.com/hubaym/waf | 78732b3d42054b0bc608912a37bed3e78bd750f0 | a51ec7331c0c80841de3a52b325fb42708e38a6d | refs/heads/master | 2021-04-15T03:52:02.586070 | 2016-08-24T22:18:15 | 2016-08-24T22:18:15 | 65,689,380 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tweet2json import Tweet2Json
from twitterconnection import TwitterApi
import datetime
from mongolayer import MongoLayer
from processTweetBatch import processTweetBatch
from waflog import WafLog
twitter= TwitterApi()
mongodb = MongoLayer()
def ingestion(users, count = 1, insert = True, drop = False):
if drop:
WafLog().twitterlogger.info('drop database')
mongodb.drop()
WafLog().twitterlogger.info(datetime.datetime.utcnow())
if insert:
WafLog().twitterlogger.info('insert tweets to database')
for tweet in twitter.timeline(count):
jsontoinsert = Tweet2Json(tweet._json)
mongodb.insertIfNew(jsontoinsert.getDefaultJson())
WafLog().twitterlogger.info(datetime.datetime.utcnow())
def loadNearTweet(users, drop=False):
if drop:
WafLog().twitterlogger.info('drop database')
mongodb.drop()
WafLog().twitterlogger.info(datetime.datetime.utcnow())
for user, oldid in users:
i = 0
for tweet in twitter.nearPastUserTimeline(user):
mongodb.insertIfNew(Tweet2Json(tweet._json, user).getDefaultJson())
i+=1
if i %100==0:
WafLog().twitterlogger.info(datetime.datetime.utcnow(), i, ' for user ', user)
def loadOldTweet(users, drop=False, delete = False,extraold=False):
if drop:
WafLog().twitterlogger.info('drop database')
mongodb.drop()
WafLog().twitterlogger.info(datetime.datetime.utcnow())
if delete:
for user in users:
WafLog().twitterlogger.info('drop tweets for user ',user)
mongodb.drop()
WafLog().twitterlogger.info(datetime.datetime.utcnow())
for user, oldid in users:
i=0
if not extraold:
oldid =None
for tweet in twitter.allTweetUserTimeline(user,oldid):
mongodb.insertIfNew(Tweet2Json(tweet._json, user).getDefaultJson())
i+=1
if i %100==0:
WafLog().twitterlogger.info('saved already %s for user %s' % (str(i), user))
if __name__ == '__main__':
languages = ['en']
users = [('SecretFlying',678946054218346495),
('Fly4freecom',667036797105528831),
('FareDealAlert',505071205221937151)]
getTweets =False
extraold = False
processit =True
#loadNearTweet(users)
if getTweets:
loadOldTweet(users,drop = False, delete= False, extraold=extraold)
print('Done')
| UTF-8 | Python | false | false | 2,592 | py | 80 | process.py | 58 | 0.612269 | 0.584105 | 0 | 83 | 30.180723 | 97 |
codeashen/Python-Journey | 8,856,222,568,143 | b1119ad956acc82cde01bad18c29a99ca5ceda57 | 08afbb6a4d3856c7db4b004bd24d5b815f190951 | /python-full-stack/1-elementary-python/11-模块与包-python_package/test_01.py | 28a82772f5868dc52a6a954cae694024e2e3e7d5 | []
| no_license | https://github.com/codeashen/Python-Journey | 90734548249f469ef9d08e2d64ceb699a7607bfd | 39002b0e008fb9f536b200e95f587649e9bca6bc | refs/heads/master | 2023-04-16T06:13:32.526114 | 2022-09-17T13:51:09 | 2022-09-17T13:51:09 | 359,531,091 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
import re
str_data = 'hello xiaomu, this is a good day!'
result = re.search('h([a-zA-Z])s', str_data)
print(result.groups())
| UTF-8 | Python | false | false | 142 | py | 170 | test_01.py | 151 | 0.661972 | 0.65493 | 0 | 7 | 19.285714 | 46 |
achenbachsven/learningSkript | 1,571,958,031,957 | b1277e4de9bfa4022c4628722a93d9f6ec419ec2 | 533d7f6213181bc65f89f4f01e4521671deb266a | /cobras_ts/cobras_dtw.py | bbfcfbf938cad229bf89568682db62bcd11f916b | [
"BSD-3-Clause"
]
| permissive | https://github.com/achenbachsven/learningSkript | 7353b8341212da6ad8af6e5acb0ab20327d0cd57 | 7af067cbf0c8d7eed010806923f8af2e38977be2 | refs/heads/master | 2022-06-04T16:10:48.109638 | 2021-03-05T16:37:03 | 2021-03-05T16:37:03 | 245,118,795 | 0 | 0 | BSD-3-Clause | false | 2022-05-26T20:42:39 | 2020-03-05T09:21:50 | 2021-03-05T16:37:05 | 2022-05-26T20:42:38 | 2,601 | 0 | 0 | 2 | Jupyter Notebook | false | false | import numpy as np
from sklearn.cluster import SpectralClustering
from cobras_ts.superinstance_dtw import SuperInstance_DTW, get_prototype
from cobras_ts.cobras import COBRAS
class COBRAS_DTW(COBRAS):
def split_superinstance(self, si, k):
data_to_cluster = self.data[np.ix_(si.indices, si.indices)]
spec = SpectralClustering(k, affinity="precomputed")
spec.fit(data_to_cluster)
split_labels = spec.labels_.astype(np.int)
labels_to_indices = []
for label in set(split_labels):
labels_to_indices.append(np.where(split_labels == label))
training = []
no_training = []
for new_si_idx in set(split_labels):
# go from super instance indices to global ones
cur_indices = [si.indices[idx] for idx, c in enumerate(split_labels) if c == new_si_idx]
si_train_indices = [x for x in cur_indices if x in self.train_indices]
if len(si_train_indices) != 0:
training.append(SuperInstance_DTW(self.data, cur_indices, self.train_indices, si))
else:
no_training.append((cur_indices, get_prototype(self.data, cur_indices)))
for indices, centroid in no_training:
closest_train = max(training, key=lambda x: self.data[x.representative_idx, centroid])
closest_train.indices.extend(indices)
si.children = training
return training
def create_superinstance(self, indices, parent=None):
return SuperInstance_DTW(self.data, indices, self.train_indices, parent) | UTF-8 | Python | false | false | 1,583 | py | 27 | cobras_dtw.py | 18 | 0.646241 | 0.64561 | 0 | 42 | 36.714286 | 100 |
jain7727/html | 12,455,405,186,039 | 3c2a0f13d4c2fdab094da996b2191b6248e75d56 | 5849acd68ed0ba545c767663911ee0365cad42c2 | /collections/list/list1.py | bc7a7cf9bc8158d53e081c1aa012b9b313632216 | []
| no_license | https://github.com/jain7727/html | 785e6731634732e63fc4d76d52524f8707d53bb6 | c056d6023e9fc05b8cea1afbc941bb35a63efcf5 | refs/heads/master | 2023-04-30T21:39:22.710943 | 2021-05-20T08:26:24 | 2021-05-20T08:26:24 | 369,136,223 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # lst=[1,2,3,4,5,6,7,8,9,10]
# lst=[2,5,3,9,10,7,11,5,1,8,4,6]
# sum=0
# for i in lst:
# sum+=i
# print(sum)
# print(sum(lst)) #function of sum, of all elements
# print(max(lst)) # function called max, prints max element
# print(min(lst)) # function called mix, prints min element
# d=0
# for i in lst:
# if(i>d):
# d=i
# print(d)
#
# print(len(lst)) #function called length prints total
#
# print(sorted(lst)) # function called sorted, sorts all the elements
#
# lst.sort(reverse=True)
# print(lst)
# lst=[2,5,3,9,10,7,11,5,1,8,4,6]
# for i in lst:
# if(i%2==0):
# print(i)
# # print()
# lst=[]
# lst.extend([1,2,3]) #multiple elements insertion using extend
# print(lst)
# for i in range(1,101):
# lst.append(i)
# print(lst)
lst=[2,5,3,9,10,4,7,11,5,1,8,4,6]
print(lst)
| UTF-8 | Python | false | false | 825 | py | 77 | list1.py | 77 | 0.587879 | 0.509091 | 0 | 42 | 18.595238 | 70 |
QuinnBast/SaskTel-Communication-Portal | 16,784,732,224,998 | 02b3b4af13919f1eafdcfadd40b31f13dc1906e7 | 820626d43974817219be626fbb12119a545b5d0d | /REST/config/ProductionConfig.py | 09f82d41129727823f4edd4cc42a62aa6e9f2b8c | []
| no_license | https://github.com/QuinnBast/SaskTel-Communication-Portal | 7bd3c6ea8262ff1dd42844851919d77db76c58ee | 45e113331cfa5af48e27812764316ce436f16b46 | refs/heads/master | 2020-04-02T12:06:42.626039 | 2019-04-04T03:42:31 | 2019-04-04T03:42:31 | 154,419,804 | 2 | 0 | null | false | 2019-04-04T03:42:32 | 2018-10-24T01:27:25 | 2019-04-02T01:09:40 | 2019-04-04T03:42:31 | 5,980 | 1 | 0 | 10 | JavaScript | false | false | from REST.config.Config import Config
class ProductionFlaskConfig(Config):
JWT_COOKIE_SECURE = True # Cookies must be sent using HTTPS
JWT_SECRET_KEY = "ChangeThis" # Secret value used encrypt tokens | UTF-8 | Python | false | false | 221 | py | 67 | ProductionConfig.py | 46 | 0.723982 | 0.723982 | 0 | 5 | 43.4 | 75 |
h4wldev/seed | 816,043,824,520 | e4249ad664fab7f6641589b1b53939be72b31e45 | bd9c828c41e334a743073f8c48600a85a72db214 | /seed/models/ability_model.py | 931a19d8389e0021b901add2ad8f0ea6e5144267 | [
"MIT"
]
| permissive | https://github.com/h4wldev/seed | caab268501548e09ffce256945b3264bd234158d | 858e74b002526c7100200241fb4ec36c9f9f691f | refs/heads/main | 2023-03-22T03:24:27.305840 | 2021-01-26T13:52:46 | 2021-01-26T13:52:46 | 323,279,470 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
from sqlalchemy import Column, Text, String, DateTime
from .mixin import Base, ModelMixin
class AbilityModel(Base, ModelMixin):
__tablename__ = 'abilities'
_repr_attrs = ('ability')
ability = Column(String(20), primary_key=True)
description = Column(Text)
created_at = Column(DateTime, default=datetime.datetime.now)
| UTF-8 | Python | false | false | 359 | py | 70 | ability_model.py | 61 | 0.715877 | 0.710306 | 0 | 15 | 22.933333 | 64 |
fc860325/Leetcode | 2,800,318,686,258 | 83e8a6d17fa5af36536f14c69cb60d48b0fa8615 | c6c2384f3013ff0445c2cb93474b1caf035912e9 | /HW3.py | dc50eb3989aa633b1ea756aa5f10779b902a2610 | []
| no_license | https://github.com/fc860325/Leetcode | bd3c14f0a8d24a829f993267dc738a7ca957d2c4 | a6ff313e67737b9b33b8763e1cca9de7a6d24a71 | refs/heads/master | 2022-02-07T18:43:06.284220 | 2019-07-10T16:12:50 | 2019-07-10T16:12:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def isPalindrome(self, x):
back = str(x)[:: - 1 ]
if(x >= 0 and str(x) == back):
return True
else:
return False | UTF-8 | Python | false | false | 193 | py | 6 | HW3.py | 5 | 0.455959 | 0.445596 | 0 | 7 | 25.857143 | 38 |
leadrive-public/portal | 13,580,686,631,489 | abea3550e43e9c5ac7a5433b5851ef973d6ea0eb | 827957b8f7269173732d3627b62eaef869abacab | /application/qms/blueprint.py | 299f4d966d704dd787f758f4b32b26c82bcd55f8 | []
| no_license | https://github.com/leadrive-public/portal | 04ed58b6bf55bd41195a26c1a5356718ea11595a | a72f4b9afc315124d9dd6e056d602c56fa1e7196 | refs/heads/master | 2021-01-16T01:34:53.911100 | 2020-08-06T01:56:33 | 2020-08-06T01:56:33 | 242,927,610 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import flask
import flask_login
import sqlite3
import os
import json
from . import service as service
bp = flask.Blueprint('qms', __name__, url_prefix='/qms')
def basePath():
return os.path.dirname(__file__)
@bp.route('/')
def default():
user=flask_login.current_user
if user.is_authenticated:
return flask.render_template('qms_home.html', user=user)
else:
return flask.render_template('qms_home.html')
@bp.route('/manual')
def manual():
user=flask_login.current_user
return flask.render_template('qms_qm.html', user=user)
@bp.route('/procedures')
def procedures():
user=flask_login.current_user
return flask.render_template('qms_qp.html', user=user)
@bp.route('/wi-daily')
def workinstructions_daily():
user=flask_login.current_user
return flask.render_template('qms_wi_daily.html', user=user)
@bp.route('/wi-eng')
def workinstructions_engineering():
user=flask_login.current_user
return flask.render_template('qms_wi_eng.html', user=user)
@bp.route('/files/<string:file>')
def files(file):
user=flask_login.current_user
return flask.render_template('qms_files.html', user=user, file=file)
@bp.route('/files/<string:file>/<string:version>')
def filesWithVersion(file, version):
user=flask_login.current_user
return flask.render_template('qms_files.html', user=user, file=file, version=version)
@bp.route('/service', methods=['POST','GET'])
def webService():
req = flask.request.get_json()
func = req['function']
rsp = None
if func == 'getDoc':
rsp = __service_getDoc(req)
else:
rsp = {'isSuccess': False, 'exceptionMessage': 'Function not implement'}
if rsp is None:
rsp = {'isSuccess': False, 'exceptionMessage': 'None return value.'}
# print(rsp)
return flask.jsonify(rsp)
def __service_getDoc(req):
code=req['code']
content=service.getDoc(code=code)
return {'isSuccess': True, 'content': content}
| UTF-8 | Python | false | false | 1,961 | py | 85 | blueprint.py | 34 | 0.677715 | 0.677206 | 0 | 69 | 27.42029 | 89 |
jeremysherriff/HTPC-Manager | 18,794,776,911,051 | a5e052f275dc3c274b2aea0a5ee58ad1f6ca3330 | b76446925e45c3da75829cb98786125f59381b13 | /libs/sqlobject/maxdb/maxdbconnection.py | b88a79f49b446a4548078795d324139e54b84556 | [
"MIT",
"GPL-1.0-or-later"
]
| permissive | https://github.com/jeremysherriff/HTPC-Manager | f28f319f2b95a966825458dbaf1d0069cbde009a | f4972361b768f9b585b9060daa605d5d8346c1a8 | refs/heads/master2 | 2021-05-13T15:30:22.453814 | 2019-02-13T22:43:00 | 2019-02-13T22:43:00 | 116,771,272 | 1 | 0 | MIT | true | 2019-01-22T06:37:13 | 2018-01-09T05:42:23 | 2019-01-21T04:47:26 | 2019-01-22T06:26:46 | 11,816 | 0 | 0 | 3 | Python | false | null | """
Contributed by Edigram SAS, Paris France Tel:01 44 77 94 00
Ahmed MOHAMED ALI <ahmedmoali@yahoo.com> 27 April 2004
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
connection creation sample::
__connection__ = DBConnection.maxdbConnection(
host=hostname, database=dbname,
user=user_name, password=user_password, autoCommit=1, debug=1)
"""
from sqlobject.dbconnection import DBAPI
from sqlobject import col
class maxdbException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class LowerBoundOfSliceIsNotSupported(maxdbException):
def __init__(self, value):
maxdbException.__init__(self, '')
class IncorrectIDStyleError(maxdbException) :
def __init__(self,value):
maxdbException.__init__(
self,
'This primary key name is not in the expected style, '
'please rename the column to %r or switch to another style'
% value)
class StyleMismatchError(maxdbException):
def __init__(self, value):
maxdbException.__init__(
self,
'The name %r is only permitted for primary key, change the '
'column name or switch to another style' % value)
class PrimaryKeyNotFounded(maxdbException):
def __init__(self, value):
maxdbException.__init__(
self,
"No primary key was defined on table %r" % value)
SAPDBMAX_ID_LENGTH=32
class MaxdbConnection(DBAPI):
supportTransactions = True
dbName = 'maxdb'
schemes = [dbName]
def __init__ (self, host='', port=None, user=None, password=None,
database=None, autoCommit=1, sqlmode='internal',
isolation=None, timeout=None, **kw):
from sapdb import dbapi
self.module = dbapi
self.host = host
self.port = port
self.user = user
self.password = password
self.db = database
self.autoCommit = autoCommit
self.sqlmode = sqlmode
self.isolation = isolation
self.timeout = timeout
DBAPI.__init__(self, **kw)
@classmethod
def _connectionFromParams(cls, auth, password, host, port, path, args):
path = path.replace('/', os.path.sep)
return cls(host, port, user=auth, password=password,
database=path, **args)
def _getConfigParams(self,sqlmode,auto):
autocommit='off'
if auto:
autocommit='on'
opt = {}
opt["autocommit"] = autocommit
opt["sqlmode"] = sqlmode
if self.isolation:
opt["isolation"]=self.isolation
if self.timeout :
opt["timeout"]=self.timeout
return opt
def _setAutoCommit(self, conn, auto):
conn.close()
conn.__init__(self.user, self.password, self.db, self.host,
**self._getConfigParams(self.sqlmode, auto))
def createSequenceName(self,table):
"""
sequence name are builded with the concatenation of the table
name with '_SEQ' word we truncate the name of the
sequence_name because sapdb identifier cannot exceed 32
characters so that the name of the sequence does not exceed 32
characters
"""
return '%s_SEQ'%(table[:SAPDBMAX_ID_LENGTH -4])
def makeConnection(self):
conn = self.module.Connection(
self.user, self.password, self.db, self.host,
**self._getConfigParams(self.sqlmode, self.autoCommit))
return conn
def _queryInsertID(self, conn, soInstance, id, names, values):
table = soInstance.sqlmeta.table
idName = soInstance.sqlmeta.idName
c = conn.cursor()
if id is None:
c.execute('SELECT %s.NEXTVAL FROM DUAL' % (self.createSequenceName(table)))
id = c.fetchone()[0]
names = [idName] + names
values = [id] + values
q = self._insertSQL(table, names, values)
if self.debug:
self.printDebug(conn, q, 'QueryIns')
c.execute(q)
if self.debugOutput:
self.printDebug(conn, id, 'QueryIns', 'result')
return id
@classmethod
def sqlAddLimit(cls,query,limit):
sql = query
sql = sql.replace("SELECT","SELECT ROWNO, ")
if sql.find('WHERE') != -1:
sql = sql + ' AND ' + limit
else:
sql = sql + 'WHERE ' + limit
return sql
@classmethod
def _queryAddLimitOffset(cls, query, start, end):
if start:
raise LowerBoundOfSliceIsNotSupported
limit = ' ROWNO <= %d ' % (end)
return cls.sqlAddLimit(query,limit)
def createTable(self, soClass):
#we create the table in a transaction because the addition of the
#table and the sequence must be atomic
#i tried to use the transaction class but i get a recursion limit error
#t=self.transaction()
# t.query('CREATE TABLE %s (\n%s\n)' % \
# (soClass.sqlmeta.table, self.createColumns(soClass)))
#
# t.query("CREATE SEQUENCE %s" % self.createSequenceName(soClass.sqlmeta.table))
# t.commit()
#so use transaction when the problem will be solved
self.query('CREATE TABLE %s (\n%s\n)' % \
(soClass.sqlmeta.table, self.createColumns(soClass)))
self.query("CREATE SEQUENCE %s"
% self.createSequenceName(soClass.sqlmeta.table))
return []
def createReferenceConstraint(self, soClass, col):
return col.maxdbCreateReferenceConstraint()
def createColumn(self, soClass, col):
return col.maxdbCreateSQL()
def createIDColumn(self, soClass):
key_type = {int: "INT", str: "TEXT"}[soClass.sqlmeta.idType]
return '%s %s PRIMARY KEY' % (soClass.sqlmeta.idName, key_type)
def createIndexSQL(self, soClass, index):
return index.maxdbCreateIndexSQL(soClass)
def dropTable(self, tableName,cascade=False):
#we drop the table in a transaction because the removal of the
#table and the sequence must be atomic
#i tried to use the transaction class but i get a recursion limit error
# try:
# t=self.transaction()
# t.query("DROP TABLE %s" % tableName)
# t.query("DROP SEQUENCE %s" % self.createSequenceName(tableName))
# t.commit()
# except:
# t.rollback()
#so use transaction when the problem will be solved
self.query("DROP TABLE %s" % tableName)
self.query("DROP SEQUENCE %s" % self.createSequenceName(tableName))
def joinSQLType(self, join):
return 'INT NOT NULL'
def tableExists(self, tableName):
for (table,) in self.queryAll("SELECT OBJECT_NAME FROM ALL_OBJECTS WHERE OBJECT_TYPE='TABLE'"):
if table.lower() == tableName.lower():
return True
return False
def addColumn(self, tableName, column):
self.query('ALTER TABLE %s ADD %s' %
(tableName,
column.maxdbCreateSQL()))
def delColumn(self, sqlmeta, column):
self.query('ALTER TABLE %s DROP COLUMN %s' % (sqlmeta.table, column.dbName))
GET_COLUMNS = """
SELECT COLUMN_NAME, NULLABLE, DATA_DEFAULT, DATA_TYPE,
DATA_LENGTH, DATA_SCALE
FROM USER_TAB_COLUMNS WHERE TABLE_NAME=UPPER('%s')"""
GET_PK_AND_FK = """
SELECT constraint_cols.column_name, constraints.constraint_type,
refname,reftablename
FROM user_cons_columns constraint_cols
INNER JOIN user_constraints constraints
ON constraint_cols.constraint_name = constraints.constraint_name
LEFT OUTER JOIN show_foreign_key fk
ON constraint_cols.column_name = fk.columnname
WHERE constraints.table_name =UPPER('%s')"""
def columnsFromSchema(self, tableName, soClass):
colData = self.queryAll(self.GET_COLUMNS
% tableName)
results = []
keymap = {}
pkmap={}
fkData = self.queryAll(self.GET_PK_AND_FK% tableName)
for col, cons_type, refcol, reftable in fkData:
col_name= col.lower()
pkmap[col_name]=False
if cons_type == 'R':
keymap[col_name]=reftable.lower()
elif cons_type == 'P':
pkmap[col_name]=True
if len(pkmap) == 0:
raise PrimaryKeyNotFounded, tableName
for (field, nullAllowed, default, data_type, data_len,
data_scale) in colData:
# id is defined as primary key --> ok
# We let sqlobject raise error if the 'id' is used for another column
field_name = field.lower()
if (field_name == soClass.sqlmeta.idName) and pkmap[field_name]:
continue
colClass, kw = self.guessClass(data_type,data_len,data_scale)
kw['name'] = field_name
kw['dbName'] = field
if nullAllowed == 'Y' :
nullAllowed=False
else:
nullAllowed=True
kw['notNone'] = nullAllowed
if default is not None:
kw['default'] = default
if field_name in keymap:
kw['foreignKey'] = keymap[field_name]
results.append(colClass(**kw))
return results
_numericTypes=['INTEGER', 'INT','SMALLINT']
_dateTypes=['DATE','TIME','TIMESTAMP']
def guessClass(self, t, flength, fscale=None):
"""
An internal method that tries to figure out what Col subclass
is appropriate given whatever introspective information is
available -- both very database-specific.
"""
if t in self._numericTypes:
return col.IntCol, {}
# The type returned by the sapdb library for LONG is
# SapDB_LongReader To get the data call the read member with
# desired size (default =-1 means get all)
elif t.find('LONG') != -1:
return col.StringCol, {'length': flength,
'varchar': False}
elif t in self._dateTypes:
return col.DateTimeCol, {}
elif t == 'FIXED':
return CurrencyCol,{'size':flength,
'precision':fscale}
else:
return col.Col, {}
| UTF-8 | Python | false | false | 10,919 | py | 157 | maxdbconnection.py | 145 | 0.576335 | 0.573496 | 0 | 303 | 34.036304 | 103 |
x41lakazam/di-python-2018 | 6,021,544,163,589 | b1422f5000857609fdc56f3dbff65c3b76d51061 | 75f20a330bfa1295c9d564ea58ae582e871c107b | /Homeworks/anagrams/anagram_checker.py | 89e778bb88c773ace33b3d391f464794cfda2d2b | []
| no_license | https://github.com/x41lakazam/di-python-2018 | ffd1c94347415a72f3ea85644a76628ffade7f72 | 743a5887b15a2234268d702741aff17b7e7e441d | refs/heads/master | 2020-04-24T06:04:14.942358 | 2019-07-23T16:49:02 | 2019-07-23T16:49:02 | 171,752,683 | 1 | 4 | null | false | 2019-11-02T22:41:48 | 2019-02-20T21:33:35 | 2019-07-23T16:49:19 | 2019-11-02T22:41:47 | 33,879 | 1 | 1 | 1 | Python | false | false | class AnagramChecker:
def __init__(self):
# Read the lines of the file
with open("engmix.txt", 'r') as f:
lines = f.readlines()
# Put everything in lowercase
self.clearlist = []
for line in lines:
self.clearlist.append(line.lower())
def is_valid_word(self, word):
# word = word.lower()
# for listword in self.clearlist:
# if word == listword:
# return True
# return False
return word.lower() in self.clearlist
def get_anagram(self, word):
word = word.lower()
anagrams = []
sorted_word = sorted(word)
for w in self.clearlist:
if sorted_word == sorted(w) and w != word:
anagrams.append(w)
return anagrams
| UTF-8 | Python | false | false | 815 | py | 159 | anagram_checker.py | 49 | 0.52638 | 0.52638 | 0 | 31 | 25.258065 | 54 |
jimc101/opec-tools | 4,088,808,913,119 | 573d1121098df7ed37ac3fb082c06ed2ffc15258 | bb5c056b07887d19a198a30ee1c63ac0ff349a6a | /opec/benchmarking.py | d045b5840473d23e2283544dee8b87cf19c7d1b6 | []
| no_license | https://github.com/jimc101/opec-tools | 246d66e529d293adc2ae53cbca77b718430d9955 | dffabfc3383f262b1c69b68f2529425f10875968 | refs/heads/master | 2020-12-25T02:40:15.926017 | 2014-07-02T09:29:05 | 2014-07-02T09:29:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (C) 2013 Brockmann Consult GmbH (info@brockmann-consult.de)
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see http://www.gnu.org/licenses/gpl.html
from opec import processor, utils
from opec.configuration import Configuration
from opec.configuration import get_default_config
from opec.data import Data
from opec.matchup_engine import MatchupEngine
from opec.output import Output
def load(filename, ref_filename=None, config=None):
"""
Returns an abstraction view on the data from the given input file.
@param filename: the source file.
@param ref_filename: the file containing the reference data; if None, the reference data is assumed to be in the source file.
@return: a 'Data' object.
"""
if config is not None:
max_cache_size = config.max_cache_size
else:
max_cache_size = None
return Data(filename, ref_filename, max_cache_size)
def create_config(filename):
"""
Returns a configuration according to the properties file.
@param filename: the path to the properties file used to load configuration from.
@return: an object of type 'Configuration'.
"""
return Configuration(properties_file_name=filename)
def calculate_statistics(model_name, ref_name, data, config=None):
"""
Calculates the statistics for the given model and reference variables located in the data file. Calculation will be
performed according to the provided configuration.
@param model_name: the name of the model variable.
@param ref_name: the name of the reference variable.
@param data: the input data object.
@param config: the optional configuration.
@return: a dictionary of statistics.
"""
if config is None:
config = get_default_config()
is_gridded = len(data.get_reference_dimensions(ref_name)) > 1
if is_gridded:
reference_values, model_values = data.get_values(ref_name, model_name)
unit = data.unit(model_name)
return processor.calculate_statistics(model_values, reference_values, model_name, ref_name, unit, config)
me = MatchupEngine(data, config)
matchups = me.find_all_matchups()
if config.remove_empty_matchups:
matchups = me.remove_empty_matchups(matchups)
if len(matchups) == 0:
print("No matchups found; maybe allow higher maximum time delta.")
return
unit = data.unit(model_name)
return calculate_statistics_from_matchups(matchups, model_name, ref_name, data, unit, config=None)
def calculate_statistics_from_matchups(matchups, model_name, ref_name, data, unit=None, config=None):
"""
Calculates the statistics for the given matchups and model and reference variable. Calculation will be
performed according to the provided configuration.
@param matchups: an iterable of 'Matchup' objects.
@param model_name: the name of the model variable.
@param ref_name: the name of the reference variable.
@param data: the input data object.
@param config: the optional configuration.
@return: a dictionary of statistics.
"""
reference_values, model_values = extract_values_from_matchups(matchups, data, model_name, ref_name)
return processor.calculate_statistics(model_values, reference_values, model_name, ref_name, unit=unit, config=config)
def calculate_statistics_from_values(model_values, ref_values, model_name=None, ref_name=None, unit=None, config=None):
"""
Calculates the statistics for two given numpy arrays; the first is considered the model data, the second is
considered the reference data. Calculation will be performed according to the provided configuration. Note that the
condition len(model_values) == len(ref_values) must hold.
@param model_values: numpy array containing the model values.
@param ref_values: numpy array containing the reference values.
@param config: the optional configuration.
@return: a dictionary of statistics.
"""
return processor.calculate_statistics(model_values, ref_values, model_name, ref_name, unit, config=config)
def get_matchups(data, config=None):
"""
Returns all matchups in the given dataset.
@param data: the source data file.
@param config: the optional configuration.
@return: all matchups.
"""
me = MatchupEngine(data, config)
return me.find_all_matchups()
def remove_matchups_with_empty_model_data(data, config, matchups):
"""
Returns all matchups from the list that have at least one valid model data entry.
@param data: the source data file.
@param config: the optional configuration.
@param matchups: the matchups to filter.
@return: the filtered matchups.
"""
me = MatchupEngine(data, config)
return me.remove_empty_matchups(matchups)
def extract_values(model_name, ref_name, data, config=None):
"""
Extracts the matchups for the given data and returns the reference values and the model values for the given
variables.
@param model_name: the name of the model variable.
@param ref_name: the name of the reference variable.
@param data: the source data file.
@param config: the optional configuration.
@return: two numpy arrays: reference_values, model_values
"""
matchups = get_matchups(data, config)
return extract_values_from_matchups(matchups, data, model_name, ref_name)
def extract_values_from_matchups(matchups, data, model_name, ref_name):
"""
Returns the reference values and the model values for the given variables in the given matchups.
@param matchups: the matchups from which to extract the data.
@param model_name: the name of the model variable.
@param ref_name: the name of the reference variable.
@return: two numpy arrays: reference_values, model_values
"""
return utils.extract_values(matchups, data, ref_name, model_name)
def write_csv(statistics, model_name, ref_name, matchups, data, target_file=None, config=None):
"""
Returns the given statistics as CSV string; writes this string if target_file is provided.
@param model_name: the model variable name.
@param ref_name: the reference variable name.
@param statistics: a dictionary containing all statistics.
@param matchups: the matchups.
@param data: the input data file.
@param target_file: the optional target file; if not None, result string will be written to that file.
@param config: the optional configuration.
@return: the given statistics as CSV string.
"""
op = Output(config=config)
op.csv(data, [(model_name, ref_name)], {(model_name, ref_name): statistics}, len(matchups), matchups=matchups, target_file=target_file)
def taylor_diagrams(statistics, target_file=None, config=None):
"""
Creates the taylor diagrams derived from the statistics and possibly writes them to the given target file.
@param statistics: an iterable of statistics to write taylor diagrams for.
@param target_file: the basename for the file where to write the taylor diagrams. If None, nothing will be written.
@param config: the optional configuration.
@return: a list of the taylor diagrams.
"""
op = Output(config=config)
t, diagrams = op.taylor(statistics, target_file)
return diagrams
def target_diagram(statistics, target_file=None, config=None):
"""
Creates the target diagram derived from the statistics and possibly writes it to the given target file.
@param statistics: An iterable of statistics to write the target diagram for.
@param target_file: the name of the target diagram file. If None, nothing will be written.
@param config: the optional configuration.
@return: the target diagram.
"""
op = Output(config=config)
return op.target_diagram(statistics, target_file)
def density_plot(model_name, ref_name, model_values, ref_values, axis_min=None, axis_max=None, target_file=None, unit=None, log_scaled=None, config=None):
"""
Creates the density plot for the given matchups and variables and possible writes it to the given target file.
@param model_name: the name of the model variable.
@param ref_name: the name of the reference variable.
@param model_name: the name of the model variable.
@param ref_name: the name of the reference variable.
@param target_file: the optional target diagram file. If None, nothing will be written.
@param config: the optional configuration.
@return: the density plot.
"""
op = Output(config=config)
if log_scaled is None and config is not None:
log_scaled = config.density_plot_log_scaled
elif log_scaled is None:
log_scaled = False
return op.density_plot(model_name, ref_name, model_values, ref_values, log_scaled, target_file, axis_min, axis_max, unit)
def write_xhtml_report(statistics_list, matchups, data, target_file, taylor_target_files=None, target_diagram_file=None, density_plot_files=None, config=None):
"""
Writes the xhtml report to the given target file.
@param statistics_list: the list of statistics to mention in the report.
@param matchups: the matchups the statistics have been calculated for.
@param target_file: the target xhtml file.
@param taylor_target_files: the optional paths to the taylor diagrams.
@param target_diagram_file: the optional paths to the target diagram.
@param density_plot_files: the optional paths to the density plots.
@param config: the optional configuration.
@return: None.
"""
op = Output(config=config)
op.xhtml(statistics_list, len(matchups), matchups, data, target_file, taylor_target_files, target_diagram_file, density_plot_files)
| UTF-8 | Python | false | false | 10,228 | py | 35 | benchmarking.py | 24 | 0.724482 | 0.723797 | 0 | 225 | 44.457778 | 159 |
enixdark/itNews | 2,319,282,382,274 | 09d803fe7784c39c23b2b9deecb654f0c1fd5096 | 47d1edfeef8c67a237c62b3caf0fbc6a5a2ddf35 | /Haskell.py | ae1196859b61a2460f530e558f77e2973b359c3e | []
| no_license | https://github.com/enixdark/itNews | c54e9219c6d8e8a82d7ebd19fa742069501ebccb | 0d10a6dc3b8b3e26d278c9249ecf975d8a5e12bb | refs/heads/master | 2021-01-19T10:57:46.926398 | 2015-09-26T09:41:40 | 2015-09-26T09:41:40 | 33,329,765 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | "Haskell":[
"https://blog.wearewizards.io/using-haskells-quickcheck-for-python"
"http://haskellbook.com/"
"http://engineering.imvu.com/2015/06/20/testable-io-in-haskell-2/"
"https://realm.io/news/swift-summit-abizer-nasir-lessons-from-haskell/"
"http://haskell-distributed.github.io/"
"http://www.brendanhay.nz/amazonka-1-0-release/"
"http://www.gilmi.xyz/post/2015/08/12/how-haskell-models-io"
"https://code.facebook.com/posts/745068642270222/fighting-spam-with-haskell/"
"https://pragprog.com/magazines/2012-08/thinking-functionally-with-haskell"
"https://www.fpcomplete.com/user/agocorona/moving-haskell-processes-between-nodes-transient-effects-iv"
] | UTF-8 | Python | false | false | 664 | py | 48 | Haskell.py | 47 | 0.76506 | 0.704819 | 0 | 12 | 54.416667 | 104 |
adrianhindes/generalizedModeling | 6,579,889,941,059 | e86b14576450d55482f6ba3190d108dde2e75a36 | 451ac0df177e15e17105fce4404511bc48bae618 | /mangrove-peat-salt/jacobian.py | e92eadc010c2bf8ffd0e10c41da9048c246ded83 | [
"MIT"
]
| permissive | https://github.com/adrianhindes/generalizedModeling | dab9c3723ecd539093b351c5e78ac34dcbae4655 | ba0e366814fa289aafde4011b95d24b647da4cb6 | refs/heads/master | 2020-05-20T20:55:33.925217 | 2019-10-07T04:08:01 | 2019-10-07T04:08:01 | 185,751,376 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 13:17:03 2019
@author: hindesa
return Jacobian for mangroves model given parameter set
"""
import numpy as np
def computeJac(data):
alphaM = data['alphaM']
alphaP = data['alphaP']
alphaS = data['alphaS']
betaG = data['betaG']
betaP = data['betaP']
betaP = data['betaP']
betaD = data['betaD']
betaL = data['betaL']
betaA = data['betaA']
betaR = data['betaR']
betaE = data['betaE']
hydP = data['hydP']
propM = data['propM']
propS = data['propS']
growM = data['growM']
drownHyd = data['drownHyd']
drownM = data['drownM']
stressM = data['stressM']
stressS = data['stressS']
littM = data['littM']
accSed = data['accSed']
sedHyd = data['sedHyd']
accM = data['accM']
retLitt = data['retLitt']
retHyd = data['retHyd']
volGrow = data['volGrow']
volP = data['volP']
eroM = data['eroM']
subsM = data['subsM']
subsHyd = data['subsHyd']
subsP = data['subsP']
inS = data['inS']
inM = data['inM']
outS = data['outS']
# Define Jacobian matrix elements
# Note syntax here is not strictly correct - dmdm == (dm/dt)/dm
betaG = 1-betaP
betaS = 1-betaD-betaL
betaSB = 1-betaE
betaV = 1-betaA-betaR
dmdm = betaP*propM +betaG*growM -betaS*stressM -betaD*drownM -betaL*littM
dmdp = -1*betaD*hydP*drownHyd
dmds = betaP*propS -betaS*stressS
dpdm = betaA*accM +betaR*retLitt*littM + betaV*volGrow*growM\
-betaE*eroM -betaSB*subsM
dpdp = hydP*(betaA*accSed*sedHyd + betaR*retHyd-betaSB*subsHyd)\
+betaV*volP-betaSB*subsP
dpds = 0
dsdm = inM
dsdp = 0
dsds = inS - outS
# alpha paramater array
alphas = np.array([ [alphaM, 0, 0], [0, alphaP, 0], [0, 0, alphaS]])
alphas = alphas.astype(float)
R1 = [dmdm, dmdp, dmds]
R2 = [dpdm, dpdp, dpds]
R3 = [dsdm, dsdp, dsds]
jac0 = np.array([R1,R2,R3])
jac0 = jac0.astype(float)
jac = np.matmul(alphas, jac0)
return jac | UTF-8 | Python | false | false | 2,144 | py | 46 | jacobian.py | 44 | 0.564832 | 0.548041 | 0 | 93 | 22.064516 | 77 |
ajso/recommendation-system- | 11,974,368,837,574 | bf8bb453fa1733f7aa861e602756c09040207317 | 77173d31ba70fa2e5ea8e4457c0e9d84195a2a93 | /recommender/load_movies.py | 11c06ecd4bfe87fb34a9332569573d4366ae3cc7 | []
| no_license | https://github.com/ajso/recommendation-system- | 81896547f6154ea950c3ff8598af307e6de70b09 | 8507784d36ee7f87b0130bb77f99d5876ba913da | refs/heads/master | 2022-05-17T16:13:13.731392 | 2018-12-07T12:52:28 | 2018-12-07T12:52:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys, os
import pandas as pd
import datetime
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "recommender.settings")
import django
django.setup()
from moviesreview.models import Movie
def save_movie_from_row(movie_row):
movie = Movie()
movie.id = movie_row[0]
movie.title = movie_row[1]
movie.save()
if __name__ == "__main__":
if len(sys.argv) == 2:
print ("Reading from file " + str(sys.argv[1]))
movies_df = pd.read_csv(sys.argv[1])
print (movies_df)
movies_df.apply(
save_movie_from_row,
axis=1
)
print ("There are {} movies".format(Movie.objects.count()))
else:
print ("Please, provide movie file path")
| UTF-8 | Python | false | false | 756 | py | 2 | load_movies.py | 2 | 0.580688 | 0.572751 | 0 | 36 | 20 | 71 |
robertoantony32/Activity-010 | 16,303,695,893,709 | e1dac0fd5f4715e3b6882b1174688bf84d4d670b | 9962b2cd97dfbfa0caff4d69024a2804c04e9212 | /gameplay.py | b1d2e24020bc128a976f5db56d4e64e3a9d410d1 | []
| no_license | https://github.com/robertoantony32/Activity-010 | 1b587aa4e662b941353ed564a39bbc5b730df99e | 27681653af3fe6efd644d4ec516ec936c2391d50 | refs/heads/master | 2023-06-05T05:36:32.002823 | 2021-06-28T14:30:35 | 2021-06-28T14:30:35 | 380,808,324 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
from pygame.locals import *
from until import Until
class Gameplay:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((Until.WINDOWM_SIZE, Until.WINDOW_SIZE))
self.clock = pygame.time.Clock()
self.running = True
def loop(self):
while self.running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
self.screen.fill(Until.BLACK)
self.update()
self.render()
pygame.display.flip()
self.clock.tick(60)
def update(self):
pass
def render(self):
pass | UTF-8 | Python | false | false | 706 | py | 13 | gameplay.py | 13 | 0.532578 | 0.529745 | 0 | 34 | 19.794118 | 86 |
raphaelhazout/Raphael5 | 9,079,560,898,077 | 19a6c28f28f4d1a0d378d71ea6530ee55e2456ca | 85a10d5fd6788bbcb743f7cfa95a1f12d80c5491 | /Package1/test_worker.py | f842ac67b0ca63111bd9d229bd8a874fb8a00a05 | []
| no_license | https://github.com/raphaelhazout/Raphael5 | 60782715acf16e2d9273038d9de0f5fdd3ad9f30 | 29c26b1ffb756ddfb5aafa62f5b12992731256c0 | refs/heads/master | 2022-11-24T21:41:36.024135 | 2022-08-02T19:01:49 | 2022-08-02T19:01:49 | 279,591,863 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from unittest import TestCase
from unittest.mock import patch
from Package1.Worker import *
class TestWorker(TestCase):
def setUp(self):
print('setUp')
self.moshe = Worker('Moshe', 'Cohen', 2000,2,17,'1 Yigal Alon, Tel Aviv','il')
print(self.moshe.full_name())
def tearDown(self):
print('tearDown')
def test_full_name(self):
res = self.moshe.full_name()
self.assertTrue(res == 'Moshe Cohen')
def test_age(self):
pass
def test_days_to_birthday(self):
pass
def test_greet(self):
pass
def test_location(self):
with patch('Package1.Worker.requests.get') as mocked_get:
mocked_get.return_value.ok=True
mocked_get.return_value.text='Success'
a = self.moshe.location()
mocked_get.assert_called_with('https://geocode.xyz/?locate=1 Yigal Alon, Tel Aviv,il &json=1')
mocked_get.assert_called_once_with('https://geocode.xyz/?locate=1 Yigal Alon, Tel Aviv,il &json=1')
self.assertEqual(a,'Success')
| UTF-8 | Python | false | false | 1,076 | py | 79 | test_worker.py | 76 | 0.615242 | 0.60223 | 0 | 35 | 29.714286 | 111 |
t-hanya/xreco | 7,808,250,567,587 | 8430ecba945ccad58a9d8ae1cb1fc016f39f96ad | 965f54a150ee950fa00d684b44a3fbbfc80d00b1 | /xreco/__init__.py | 7f024c31608112ce182288f35a1cd6bceb7cace7 | [
"MIT"
]
| permissive | https://github.com/t-hanya/xreco | 4bc10ed0d1478c0d093507f8e2613cbd980c7948 | b004d6892a49018d8684d02fda88640e07ca0b26 | refs/heads/master | 2021-04-15T06:55:05.104047 | 2018-03-26T14:40:44 | 2018-03-26T14:40:44 | 126,154,969 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
xreco module
"""
from xreco.argument_parser import ArgumentParser
| UTF-8 | Python | false | false | 95 | py | 6 | __init__.py | 4 | 0.673684 | 0.663158 | 0 | 6 | 14.833333 | 48 |
pmlmodelling/fabm-mizer | 8,254,927,186,364 | b02a669179a68fcbbaafdce3bc6142bd415b82d8 | 98125515c8b7efaacdd0ba7728ed225242e8c405 | /python/run_offline_amm7.py | b9c70aed1cb3908ad77b76a0c6295c49873b3144 | []
| no_license | https://github.com/pmlmodelling/fabm-mizer | 02b7c58d8f3ad710a78013e34f56c041c25e8798 | 3a62680301c1806f1369e8ff7ddf5d0555c131b7 | refs/heads/master | 2023-09-03T17:32:29.403712 | 2023-07-17T13:41:00 | 2023-07-17T13:41:00 | 341,277,850 | 0 | 0 | null | false | 2023-07-17T13:41:02 | 2021-02-22T17:17:05 | 2023-05-31T13:00:21 | 2023-07-17T13:41:00 | 849 | 1 | 0 | 0 | Fortran | false | false | from __future__ import print_function
import sys
import os
import glob
import datetime
import argparse
import re
import shutil
import gc
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
import yaml
import numpy
from matplotlib import pyplot
from matplotlib.dates import datestr2num, date2num, num2date
import netCDF4
fabm_root = '../../fabm'
fabm_root = '../../fabm-git'
build_dir = '../../build/pyfabm'
build_dir = '../build'
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), fabm_root, 'src/drivers/python')))
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), build_dir, 'Release'))) # Visual Studio/Windows
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), build_dir))) # Linux
start_time = datetime.datetime(2009, 1, 1)
stop_time = datetime.datetime(2012, 1, 1)
start_time = None
stop_time = None
import mizer
# Function for converting from Equivalent Spherical Diameter (micrometer) to wet mass in g
def esd2mass(*d): # d: equivalent spherical diameter in micrometer
V = 4./3.*numpy.pi*(numpy.array(d)/2e6)**3 # V: volume in m3
return V*1e6 # mass in g approximately equals volume in m3 multiplied by 1e6 (assumes density of 1000 kg/m3)
additional_outputs = []
preylist = []
preylist.append(('diatoms', 'P1_c', esd2mass(20., 200.)))
preylist.append(('nanophytoplankton', 'P2_c', esd2mass(2., 20.)))
preylist.append(('picophytoplankton', 'P3_c', esd2mass(.2, 2.)))
preylist.append(('microphytoplankton', 'P4_c', esd2mass(20., 200.)))
preylist.append(('mesozooplankton', 'Z4_c', (4.188e-6, 1e-3)))
preylist.append(('microzooplankton', 'Z5_c', esd2mass(20., 200.)))
preylist.append(('heterotrophic nanoflagellates', 'Z6_c', esd2mass(2., 20.)))
temp_name = 'votemper'
time_name = 'time_counter'
# mizer parameters
parameters = {
'w_min': 1e-3,
'w_inf': 1e6,
'nclass': 100,
'T_dependence': 1,
'T_ref': 13.,
'E_a': 0.63,
'beta': 100,
'sigma': float(numpy.log(10.)), # paper has log10 units, we use ln units
'gamma': 156, # clearance in m3/yr for single individual of mass 1 g. Blanchard et al 2009: 640 m3/yr; Blanchard et al 2012: 64 ?UNITS? [times kappa=0.5 for time spent in pelagic]; Faking giants paper gives 10^14.076 * W^0.926 * exp(-Ea/(kT) L d-1, which is 428 L d-1 = 156 m3 yr-1
'q': 0.82,
'alpha': 0.2,
'z0_type': 1,
'z0pre': 0.1,
'z0exp': -0.25,
'w_s': 1000.,
'z_s': 0.3,
'ks': 0.,
'SRR': 0,
'recruitment': 0.,
'h': 1e9,
'fishing_type': 1,
'w_minF': 1.25, # Blanchard et al 2012
'F': 0.4
}
def addVariable(nc, name, long_name, units, data=None, dimensions=None, zlib=False, contiguous=True, dtype='f4'):
if dimensions is None:
dimensions = (time_name,)
chunksizes = None
if not contiguous:
chunksizes = []
for dim in dimensions:
chunksizes.append(1 if dim in ('x', 'y') else len(nc.dimensions[dim]))
ncvar = nc.createVariable(name, dtype, dimensions, zlib=zlib, fill_value=-2e20, contiguous=contiguous, chunksizes=chunksizes)
ncvar.set_var_chunk_cache(0, 0, 0)
if data is not None:
ncvar[...] = data
ncvar.long_name = long_name
ncvar.units = units
if 'x' in dimensions and 'y' in dimensions and 'nav_lon' in nc.variables and 'nav_lat' in nc.variables:
ncvar.coordinates = 'nav_lon nav_lat'
return ncvar
def copyVariable(nc, ncvar, **kwargs):
ncvar_out = nc.createVariable(ncvar.name, ncvar.dtype, ncvar.dimensions, fill_value=getattr(ncvar, '_FillValue', None), **kwargs)
for key in ncvar.ncattrs():
if key != '_FillValue':
setattr(ncvar_out, key, getattr(ncvar, key))
if 'x' in ncvar.dimensions and 'y' in ncvar.dimensions and 'nav_lon' in nc.variables and 'nav_lat' in nc.variables:
ncvar_out.coordinates = 'nav_lon nav_lat'
ncvar_out[...] = ncvar[...]
return ncvar_out
def processLocation(args):
path, i, j = args
print('Processing %s for i=%i, j=%i...' % (path, i, j))
# prey (currently from GOTM-ERSEM simulation) - scale to g WM/m3
scale_factor = 10*0.001 # 10 g wet mass/g carbon * 0.001 g C/mg C
prey = []
for name, ncname, size_range in preylist:
timeseries = mizer.datasources.TimeSeries(path, ncname, scale_factor=scale_factor, time_name=time_name, x=i, y=j, stop=stop_time)
times = timeseries.times
prey.append(mizer.Prey(name, size_range, timeseries))
prey_collection = mizer.PreyCollection(*prey)
prey_collection = mizer.GriddedPreyCollection(prey_collection)
# environment
temp = mizer.datasources.TimeSeries(path, temp_name, time_name=time_name, x=i, y=j, stop=stop_time)
depth = mizer.datasources.TimeSeries(path, 'bm_int**2/bm2_int', time_name=time_name, x=i, y=j, stop=stop_time)
#temp = 12.
# create mizer model
m = mizer.Mizer(prey=prey_collection, parameters=parameters, temperature=temp, recruitment_from_prey=2, depth=depth)
# Time-integrate
spinup = 50
istart, istop = 0, times.size
if start_time is not None:
istart = times.searchsorted(date2num(start_time))
if stop_time is not None:
istop = times.searchsorted(date2num(stop_time))
times = times[istart:istop]
result = m.run(times, spinup=spinup, verbose=True, save_spinup=False,dt=1/(24*4))
if result is None:
return
#result.plot_spectrum()
#result.plot_lfi_timeseries(500., 1.25)
#result.plot_biomass_timeseries(0., 500.)
#result.plot_timeseries('landings')
#result.plot_annual_mean('landings', plot_change=True)
#pyplot.show()
biomass = result.get_biomass_timeseries()
landings_var, landings = result.get_timeseries('landings')
lfi10 = result.get_lfi_timeseries(10.)
lfi80 = result.get_lfi_timeseries(80.)
lfi500 = result.get_lfi_timeseries(500.)
lfi10000 = result.get_lfi_timeseries(10000.)
bio1_25 = result.get_biomass_timeseries(1.25)
bio10 = result.get_biomass_timeseries(10.)
bio80 = result.get_biomass_timeseries(80.)
bio1000 = result.get_biomass_timeseries(1000.)
landings[1:] = landings[1:] - landings[:-1]
landings[0] = 0
return path, i, j, times, biomass, landings, lfi10, lfi80, lfi500, lfi10000, bio1_25,bio10,bio80,bio1000,m.bin_masses, result.spectrum
def ppProcessLocation(args, p):
import run_offline_amm7
run_offline_amm7.parameters = p
return run_offline_amm7.processLocation(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('source_path')
parser.add_argument('output_path')
parser.add_argument('--method', choices=('serial', 'multiprocessing', 'pp'), default='pp')
parser.add_argument('--ncpus', type=int, default=None)
parser.add_argument('--ppservers', default=None)
parser.add_argument('--secret', default=None)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--parameters', default=None)
parser.add_argument('--ifirst', type=int, default=None)
parser.add_argument('--shm', action='store_true')
parser.add_argument('--profile', action='store_true')
args = parser.parse_args()
if args.parameters is not None:
with open(args.parameters, 'rU') as f:
args.parameters = yaml.load(f)
parameters = args.parameters
if isinstance(args.ppservers, (str, u''.__class__)):
match = re.match(r'(.*)\[(.*)\](.*)', args.ppservers)
if match is not None:
# Hostnames in PBS/SLURM notation, e.g., node[01-06]
ppservers = []
left, middle, right = match.groups()
for item in middle.split(','):
if '-' in item:
start, stop = item.split('-')
for i in range(int(start), int(stop)+1):
ppservers.append('%s%s%s' % (left, str(i).zfill(len(start)), right))
else:
ppservers.append('%s%s%s' % (left, item, right))
else:
# Comma-separated hostnames
ppservers = args.ppservers.split(',')
ppservers = tuple(ppservers)
else:
assert args.ppservers is None
ppservers = ()
if args.ncpus is None:
args.ncpus = 'autodetect'
tasks = []
if not os.path.isdir(args.output_path):
os.mkdir(args.output_path)
paths = glob.glob(args.source_path)
assert len(paths) > 0, 'no files found at %s' % args.source_path
for path in paths:
print('Opening %s...' % path)
with netCDF4.Dataset(path) as nc:
if 'mask' in nc.variables:
mask = nc.variables['mask'][...] > 0
else:
mask = (nc.variables['bm_int'][...] > 0).any(axis=0)
for i in range(len(nc.dimensions['x'])):
for j in range(len(nc.dimensions['y'])):
if mask[j, i]:
tasks.append((path, i, j))
if args.ifirst is not None:
tasks = tasks[args.ifirst:]
source2output = {}
source2vars = {}
def getOutput(source, times, w, compress=False, add_biomass_per_bin=False, contiguous=False):
if source not in source2output:
output_path = os.path.join(args.output_path, os.path.basename(source))
if args.ifirst is not None:
assert os.path.isfile(output_path)
ncout = netCDF4.Dataset(output_path, 'r+')
else:
with netCDF4.Dataset(path) as nc:
print('Creating output file %s...' % output_path, end='')
ncout = netCDF4.Dataset(output_path, 'w') #, persist=True, diskless=True)
nctime_in = nc.variables[time_name]
ncout.createDimension(time_name, len(times))
ncout.createDimension('x', len(nc.dimensions['x']))
ncout.createDimension('y', len(nc.dimensions['y']))
nctime_out = ncout.createVariable(time_name, nctime_in.datatype, nctime_in.dimensions, zlib=compress, contiguous=contiguous)
nctime_out.units = nctime_in.units
dates = [dt.replace(tzinfo=None) for dt in num2date(times)]
nctime_out[...] = netCDF4.date2num(dates, nctime_out.units)
if 'nav_lon' in nc.variables:
copyVariable(ncout, nc.variables['nav_lon'], zlib=compress)
copyVariable(ncout, nc.variables['nav_lat'], zlib=compress)
vardict = {}
vardict['mask'] = ncout.createVariable('mask', 'i1', ('y', 'x'), zlib=compress, contiguous=contiguous)
vardict['mask'][...] = 0
vardict['biomass'] = addVariable(ncout, 'biomass', 'biomass', 'g WM/m2', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['landings'] = addVariable(ncout, 'landings', 'landings', 'g WM', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['lfi10'] = addVariable(ncout, 'lfi10', 'fraction of fish > 10 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['lfi80'] = addVariable(ncout, 'lfi80', 'fraction of fish > 80 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['lfi500'] = addVariable(ncout, 'lfi500', 'fraction of fish > 500 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['lfi10000'] = addVariable(ncout, 'lfi10000', 'fraction of fish > 10000 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['bio1_25'] = addVariable(ncout, 'bio1_25', 'fraction of fish > 1.25 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['bio10'] = addVariable(ncout, 'bio10', 'fraction of fish > 10 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['bio80'] = addVariable(ncout, 'bio80', 'fraction of fish > 80 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
vardict['bio1000'] = addVariable(ncout, 'bio1000', 'fraction of fish > 1000 g', '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
if add_biomass_per_bin:
ncout.createDimension('bin', w.size)
addVariable(ncout, 'w', 'individual mass', 'g WM', dimensions=('bin',), zlib=compress)[:] = w
vardict['Nw'] = addVariable(ncout, 'Nw', 'biomass per bin', 'g WM/m2', dimensions=(time_name, 'y', 'x', 'bin'), zlib=compress, contiguous=contiguous)
vardict['Nw_final'] = addVariable(ncout, 'Nw_final', 'final biomass per bin', 'g WM/m2', dimensions=('y', 'x', 'bin'), zlib=compress, contiguous=contiguous)
print('done')
source2output[source] = ncout
source2vars[source] = vardict
return source2output[source], source2vars[source]
def saveResult(result, sync=True, add_biomass_per_bin=False):
source, i, j, times, biomass, landings, lfi10,lfi80, lfi500, lfi10000, bio1_25, bio10, bio80, bio1000,w, spectrum = result
ncout, vardict = getOutput(source, times, w, add_biomass_per_bin=add_biomass_per_bin)
print('saving results from %s, i=%i, j=%i (mean biomass = %.3g)' % (source, i, j, biomass.mean()))
vardict['biomass'][:, j, i] = biomass
vardict['landings'][:, j, i] = landings
vardict['lfi10'][:,j,i]= lfi10
vardict['lfi80'][:, j, i] = lfi80
vardict['lfi500'][:, j, i] = lfi500
vardict['lfi10000'][:, j, i] = lfi10000
vardict['bio1_25'][:,j,i]= bio1_25
vardict['bio10'][:, j, i] = bio10
vardict['bio80'][:, j, i] = bio80
vardict['bio1000'][:, j, i] = bio1000
vardict['mask'][j, i] = 1
if add_biomass_per_bin:
vardict['Nw'][:, j, i, :] = spectrum
vardict['Nw_final'][j, i, :] = spectrum[-1, :]
if sync:
print('Synchronizing NetCDF output to disk...', end='')
ncout.sync()
print('done')
job_server = None
final_output_path = None
if args.method == 'serial':
def runSerial(n):
for i in range(n):
saveResult(processLocation(tasks[i]))
if args.profile:
import cProfile
import pstats
cProfile.run('runSerial(%s)' % min(len(tasks), 3), 'mizerprof')
p = pstats.Stats('mizerprof')
p.strip_dirs().sort_stats('cumulative').print_stats()
else:
runSerial(min(len(tasks), 3))
elif args.method == 'multiprocessing':
# Process all EEZs using all available cores
# Kill child process after processing a single EEZ (maxtasksperchild=1) to prevent ever increasing memory consumption.
import multiprocessing
pool = multiprocessing.Pool(processes=None, maxtasksperchild=1)
#results = pool.map(processLocation, tasks)
#for result in results:
# saveResult(result)
#result = pool.map_async(processLocation, tasks, callback=saveResult)
#result.wait()
for result in pool.imap(processLocation, tasks):
saveResult(result, sync=False, add_biomass_per_bin=True)
else:
if args.debug:
import logging
logging.basicConfig( level=logging.DEBUG)
import pp
if args.shm:
final_output_path = args.output_path
args.output_path = '/dev/shm'
job_server = pp.Server(ncpus=args.ncpus, ppservers=ppservers, restart=True, secret=args.secret)
jobs = []
for task in tasks:
jobs.append(job_server.submit(ppProcessLocation, (task, args.parameters)))
ijob = 0
nfailed = 0
while jobs:
job = jobs.pop(0)
result = job()
sync = ijob % 1000 == 0
if result is not None:
print('job %i: saving result...' % ijob)
saveResult(result, sync=sync, add_biomass_per_bin=True)
if sync:
gc.collect()
print(gc.garbage)
else:
print('job %i: FAILED!' % ijob)
nfailed += 1
ijob += 1
print('%i tasks out of %i FAILED.' % (nfailed, len(tasks)))
job_server.print_stats()
for source, nc in source2output.items():
name = os.path.basename(source)
print('Closing %s...' % os.path.join(args.output_path, name))
nc.close()
if final_output_path is not None:
target = os.path.join(final_output_path, name)
if os.path.isfile(target):
os.remove(target)
shutil.move(os.path.join(args.output_path, name), target)
if job_server is not None:
job_server.destroy()
| UTF-8 | Python | false | false | 16,973 | py | 35 | run_offline_amm7.py | 13 | 0.603193 | 0.574795 | 0 | 374 | 44.379679 | 285 |
goldfish724/kaori | 15,118,284,916,699 | 48d12e8a8b353ffe2ea81fc0b0652c03bb92b6f5 | 3f42728b17c8e780c864e847ceed96f0abb9d693 | /kaori/skills/file_uploads/__init__.py | 847ec416f3d266a7b691b113719a143bc7023788 | [
"MIT"
]
| permissive | https://github.com/goldfish724/kaori | b1b4390085a8713098e54c16ba7d57e5f744bcc5 | aa2abedd2be4484539d79340ebdc9b6e213b5381 | refs/heads/master | 2022-12-30T13:04:44.802242 | 2020-08-24T16:04:29 | 2020-08-24T16:04:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .abc import FileUploader
from .local import LocalFileUploader
from .gcloud import GCloudStorageUploader | UTF-8 | Python | false | false | 108 | py | 124 | __init__.py | 104 | 0.87037 | 0.87037 | 0 | 3 | 35.333333 | 41 |
ishangala16/Python-Basics | 15,401,752,740,922 | 41c8bd05a57113ce0eb18929dd4503431e70b50d | 3e6ef2a834f373a3f51fb2de4abe0c1ac9ce0cbd | /searchfile.py | e2f3544962588becb67780601eda7c841ba41213 | []
| no_license | https://github.com/ishangala16/Python-Basics | 5386851d139f5fefcb874607c024b61590606eec | 7bfd00e137196a7296e82dac521f9c3c5cc74c9e | refs/heads/master | 2020-07-10T14:04:57.029194 | 2019-12-27T10:02:20 | 2019-12-27T10:02:20 | 204,279,959 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | searchWord=input("enter word to search: ")
with open('/home/fpl-i/Desktop/chagan.txt') as infile:
words = 0
characters=0
count = 0
for lineno, line in enumerate(infile,1):
count += line.count(searchWord)
wordslist = line.split()
words += len(wordslist)
characters += sum(len(word) for word
in wordslist)
print(lineno)
print(words)
print(characters)
print('Frequency of ',searchWord,'is ',count)
| UTF-8 | Python | false | false | 460 | py | 8 | searchfile.py | 8 | 0.634783 | 0.626087 | 0 | 15 | 29.066667 | 54 |
andrasormos/Ai_Statistics | 12,541,304,504,629 | 3172c81e7378eb9e2464ab40d3596a34d1c5b63c | fca1c652a0b9672db1e64cd4de9c89d6f1f28e1d | /Examiner.py | fd090e7f4ad8a04395df8b6334e55cb7a5dea330 | []
| no_license | https://github.com/andrasormos/Ai_Statistics | e12948683b1a549adeedf9ca288fdf78973362e2 | 05c15e20c6eaae46b04c933185fefa5d63e81c56 | refs/heads/master | 2020-03-31T16:37:06.476104 | 2018-10-13T23:16:15 | 2018-10-13T23:16:15 | 152,382,495 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from random import randint
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pylab as pylab
from scipy import stats
import os.path
def guessCorrectness(guessedRightCnt, guessUpCnt, guessDownCnt, guessSkipCnt, rewardSum, guessCnt, profit, period):
correctnessList = []
upList = []
downList = []
skiplist = []
scoreList = []
cnt = 0
upSum = 0
downSum = 0
skipSum = 0
scoreSum = 0
rightSum = 0
for i in range(len(guessCnt)):
cnt += 1
upSum += guessUpCnt[i]
downSum += guessDownCnt[i]
skipSum += guessSkipCnt[i]
scoreSum += rewardSum[i]
rightSum += guessedRightCnt[i]
if cnt == period:
if (upSum + downSum) != 0:
correctness = ( rightSum / (downSum + upSum ) ) * 100
correctnessList.append(correctness)
upList.append(upSum/period)
downList.append(downSum/period)
skiplist.append(skipSum/period)
scoreList.append(scoreSum/period)
upSum = 0
downSum = 0
skipSum = 0
scoreSum = 0
rightSum = 0
cnt = 0
return correctnessList, upList, downList, skiplist, scoreList
def reduceCnt(eps, eps_period):
epsList = []
cnt = 0
temp = 0
for i in range(len(eps)):
cnt += 1
temp += eps[i]
if cnt == eps_period:
epsList.append(temp*9)
cnt = 0
temp = 0
return epsList
log_nr = "35"
imageName = "run_" + log_nr
t_period = 100
e_period = 50
eps_period = 1
e_file = os.path.exists("/home/andras/PycharmProjects/TradingGame/logs/evalLog_0" + log_nr + ".csv")
a_log = pd.read_csv("/home/andras/PycharmProjects/TradingGame/logs/actionLog_0" + log_nr + ".csv", sep=",", index_col=0)
a_price = a_log.BTCPrice
a_bought = a_log.bought
a_sold = a_log.sold
priceMax = np.amax(a_price)
priceMin = np.amin(a_price)
if e_file == True:
e_log = pd.read_csv("/home/andras/PycharmProjects/TradingGame/logs/evalLog_0" + log_nr + ".csv", sep=",", index_col=0)
e_sumPercent = e_log.sumPercent
e_rewardSum = e_log.rewardSum
e_profit = e_log.profit
e_guessedRightCnt = e_log.guessedRightCnt
e_guessedWrongCnt = e_log.guessedWrongCnt
e_guessSkipCnt = e_log.guessSkipCnt
e_guessCnt = e_log.guessCnt
e_guessUpCnt = e_log.guessUpCnt
e_guessDownCnt = e_log.guessDownCnt
print("e_Log Count:", len(e_profit))
print("e_Log Period:", e_period)
e_profit = e_log.profit
fig = plt.figure(figsize=(12, 10))
if e_file == True:
#e_correctnessList, e_upList, e_downList, e_skiplist, e_scoreList = guessCorrectness(e_guessedRightCnt, e_guessUpCnt, e_guessDownCnt, e_guessSkipCnt, e_rewardSum, e_guessCnt, e_profit,e_period)
#latest = e_correctnessList[-10:]
#print("Correct:", np.mean(latest))
# AX 1
ax1 = fig.add_subplot(211)
ax1.plot(a_price, "-", color='b', linewidth=1)
ax1.plot(a_bought, "*", color='g', linewidth=1)
ax1.plot(a_sold, "*", color='r', linewidth=1)
ax1.set_ylim([priceMin, priceMax])
# AX 1
ax2 = fig.add_subplot(212)
ax2.plot(e_profit, "*", color='g', linewidth=1)
#ax2.set_ylim([35, 65])
plt.axhline(0, color='black', linewidth=0.5)
#plt.title("Eval Success Percentage")
# # AX 3 -
# ax3 = fig.add_subplot(223)
# ax3.plot(e_correctnessList, "-", color='g', linewidth=1)
# ax3.set_ylim([35, 65])
# plt.axhline(50, color='black', linewidth=0.5)
# #plt.title("Eval Success Percentage")
# # AX 4 -
# ax4 = fig.add_subplot(224)
# ax4.plot(e_upList, "-", color='g', linewidth=1)
# #ax4.plot(e_downList, "-", color='r', linewidth=1)
# ax4.plot(e_skiplist, "-", color='b', linewidth=1)
# #ax4.set_ylim([-70, 70])
# #ax4.set_xlim([0, Epoch])
# #plt.axhline(0, color='black', linewidth=0.5)
# #plt.title("Eval Guess Occurences")
fig.suptitle(imageName) # or plt.suptitle('Main title')
#ax1.legend()
#fig.tight_layout(rect=[0, 0.03, 1, 0.95])
fileName = "/home/andras/PycharmProjects/TradingGame/lab/img_" + imageName + ".png"
fig.savefig(fileName)
plt.show()
'''
321
322
323
324
325
326
''' | UTF-8 | Python | false | false | 4,247 | py | 7 | Examiner.py | 6 | 0.612197 | 0.580881 | 0 | 153 | 26.764706 | 197 |
Ping1122/JRAuto | 11,038,065,960,327 | 9a2f0324e6470955392d00f6629e9bfdf8456cbf | c42e0c34ab9962c552ed79088f37079e48ee8119 | /util/logger.py | 00a5a90cb3dd420c0c26ef3c7758133aa50a7bee | []
| no_license | https://github.com/Ping1122/JRAuto | c479ce9dab8a9f145bef2c5d953cd4b8cb7c4930 | d885631cf66f4ed5206bdcdd09e55489cb5813ab | refs/heads/master | 2020-12-14T22:03:49.235071 | 2020-03-06T18:17:35 | 2020-03-06T18:17:35 | 234,883,205 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from enum import Enum
Types = Enum("Types", "error warning info debug verbose")
typeMessageHeader = {
Types.error : "Error: ",
Types.warning : "Warning: ",
Types.info: "",
Types.verbose: "",
Types.debug: "Debug: ",
}
def log(message, type):
print(typeMessageHeader[type] + message)
| UTF-8 | Python | false | false | 291 | py | 142 | logger.py | 140 | 0.676976 | 0.676976 | 0 | 14 | 19.785714 | 57 |
Asadullah-Dal17/Improve-object-detection | 18,949,395,716,397 | 3c63bea26c7dfd247ef40bbebd5a31f1bc2b186d | c4dd282c3ad8f617cf270c35912bb341748a56e2 | /QR_Code_Detection/opticalFlow.py | ae73503abe6dc739e8d9601b8b78f17efa5d9cfd | []
| no_license | https://github.com/Asadullah-Dal17/Improve-object-detection | f63a08d16954d64a4a9b44b852eac71c6b7e93c4 | ba3674067367f9c16ca31aa86db3433014b9e4f2 | refs/heads/master | 2023-07-18T16:08:35.154358 | 2021-09-17T10:44:53 | 2021-09-17T10:44:53 | 405,378,013 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2 as cv
import numpy as np
cap = cv.VideoCapture(1)
_, frame = cap.read()
old_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
lk_params = dict(winSize=(15, 15),
maxLevel=4,
criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
def selected_Point(event, x, y, flags, params):
global point, point_selected, old_points
if event == cv.EVENT_LBUTTONDOWN:
point = (int(x), int(y))
print(point)
point_selected = True
old_points = np.array([[x, y]], dtype=np.float32)
cv.namedWindow('frame')
cv.setMouseCallback("frame", selected_Point)
point_selected = False
point = ()
old_points = np.array([[]])
while True:
ret, frame = cap.read()
cv.imshow('old frame ', old_gray)
gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
print(old_points)
if point_selected is True:
# cv.circle(frame, point, 5, (155, 0, 255), -1)
new_points, status, error = cv.calcOpticalFlowPyrLK(
old_gray, gray_frame, old_points, None, **lk_params)
old_points = new_points
x, y = new_points.ravel()
# cv.c(frame, (x, y), 6, (0, 255, 255), 4)
# cv.cricle(frame, )
old_gray = gray_frame.copy()
cv.imshow('frame', frame)
key = cv.waitKey(1)
if key == ord('q'):
break
cv.destroyAllWindows
cap.release()
| UTF-8 | Python | false | false | 1,370 | py | 9 | opticalFlow.py | 8 | 0.59854 | 0.572993 | 0 | 51 | 25.862745 | 84 |
PKQ1688/rpa_verification | 15,204,184,266,486 | c40d5d71d9ded1b69275d431a969bd8dc1e2a5b0 | a7c7ec87766cb2f7837567bb9e408c0d2dfddcb7 | /rpa_ocr/__init__.py | 2c450d9520335c378fc53ddaea83e68082b96201 | [
"MIT"
]
| permissive | https://github.com/PKQ1688/rpa_verification | 843e77aed0763655beb363b3e1d7dc0fb532c6bd | 12e5402cdbe3e48db338c821baed4d41bfef278d | refs/heads/master | 2023-01-21T11:32:23.221423 | 2020-12-01T08:39:22 | 2020-12-01T08:39:22 | 317,476,129 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
# @author :adolf
__name__ = 'rpa_ocr'
__version__ = '0.2.1'
from rpa_ocr.Identify_English.train_tools import Train
from rpa_ocr.verification_service.verification_main import ocr_pipeline_main
from rpa_ocr.Identify_English.inference import CRNNInference
| UTF-8 | Python | false | false | 276 | py | 32 | __init__.py | 26 | 0.746377 | 0.731884 | 0 | 7 | 38.428571 | 76 |
yaotian/IPProxys | 9,955,734,225,024 | d3572e81b8dd8bf87c1594f5c0c79b5a1387ab9d | db46667e51210956b10c9b5ef3ca815007287552 | /spider/ProxySpider.py | 61b2040fd9a750eba5a4bf6133d30df8f6c99a79 | []
| no_license | https://github.com/yaotian/IPProxys | 3ee32dc71fcb71dd276f027d4ea157479133f7e8 | c3aa23b281ddf8f68d9b6433141ca16f30b17fc3 | refs/heads/master | 2020-06-18T04:26:18.356336 | 2016-12-01T05:08:33 | 2016-12-01T05:08:33 | 74,949,704 | 0 | 0 | null | true | 2016-11-28T07:48:47 | 2016-11-28T07:48:46 | 2016-11-28T07:03:09 | 2016-11-28T01:23:34 | 4,901 | 0 | 0 | 0 | null | null | null | #coding:utf-8
import os
from gevent.pool import Pool
import requests
import time
from config import THREADNUM, parserList, MINNUM, UPDATE_TIME
from db.SQLiteHelper import SqliteHelper
from spider.HtmlDownLoader import Html_Downloader
from spider.HtmlPraser import Html_Parser
from validator.Validator import Validator
import logging
logger = logging.getLogger('spider')
__author__ = 'Xaxdus'
from gevent import monkey
monkey.patch_all()
'''
这个类的作用是描述爬虫的逻辑
'''
class ProxySpider(object):
def __init__(self):
self.crawl_pool = Pool(THREADNUM)
# self.sqlHelper = sqlHelper
def run(self):
while True:
logger.info("Start to run spider")
sqlHelper = SqliteHelper()
logger.info('Start to run validator')
validator = Validator(sqlHelper)
count = validator.run_db()
logger.info('Finished to run validator, count=%s' % count)
if count[0] < MINNUM:
proxys = self.crawl_pool.map(self.crawl, parserList)
#这个时候proxys的格式是[[{},{},{}],[{},{},{}]]
# print proxys
#这个时候应该去重:
proxys_tmp = []
for proxy in proxys:
proxys_tmp.extend(proxy)
proxys = proxys_tmp
logger.info('first_proxys: %s' % len(proxys))
#这个时候proxys的格式是[{},{},{},{},{},{}]
proxys_tmp = None
#这个时候开始去重:
proxys = [
dict(t)
for t in set([tuple(proxy.items()) for proxy in proxys])
]
logger.info('end_proxy: %s' % len(proxys))
logger.info('spider proxys: %s' % type(proxys))
proxys = validator.run_list(proxys) #这个是检测后的ip地址
sqlHelper.batch_insert(sqlHelper.tableName, proxys)
logger.info('success ip: %s' % sqlHelper.selectCount())
sqlHelper.close()
#更新squid配置文件并重启
self.update_squid_conf(proxys)
else:
results = sqlHelper.selectAll()
self.update_squid_conf(results)
logger.info('Finished to run spider')
time.sleep(UPDATE_TIME)
def crawl(self, parser):
proxys = []
html_parser = Html_Parser()
for url in parser['urls']:
response = Html_Downloader.download(url)
if response != None:
proxylist = html_parser.parse(response, parser)
if proxylist != None:
proxys.extend(proxylist)
return proxys
def update_squid_conf(self, proxys):
# file_default = '/etc/squid3/squid.conf.default'
# file_run = '/etc/squid3/squid.conf'
file_default = '/usr/local/etc/squid.conf.default'
file_run = '/usr/local/etc/squid.conf'
default_conf = open(file_default, 'r').read()
default_conf += 'cache_dir null /tmp\n'
for index, proxy in enumerate(proxys):
print proxys
proxy_conf = "cache_peer " + proxy[0] + " parent " + str(proxy[1]) + " 0 no-query weighted-round-robin weight=2 connect-fail-limit=2 allow-miss max-conn=5 name=proxy-" + str(index) + "\n"
default_conf += proxy_conf
conf = open(file_run, 'w')
conf.write(default_conf)
conf.close()
message = os.system('service squid3 restart')
print message
if __name__ == "__main__":
spider = ProxySpider()
spider.run() | UTF-8 | Python | false | false | 3,670 | py | 1 | ProxySpider.py | 1 | 0.554207 | 0.551101 | 0 | 105 | 32.742857 | 199 |
carolinewang01/psa-analysis | 13,005,161,019,398 | 86471c67a6524ce1dd9c95e3b772f114908b83a9 | 25cbfbdc36cefd99e988c0dd5d6cd8fbbf2aa49e | /kentucky/interpretable/six-month/.ipynb_checkpoints/advance_functions-checkpoint.py | 0cf9d2b4c4a7b37cc0ab3f9992691b73bbdf0ba0 | []
| no_license | https://github.com/carolinewang01/psa-analysis | 5ec25960a75fcb452051cf0f99e45f6406f0a50d | d8d77d23ec5964c995b2756eb56675e6cae3263f | refs/heads/master | 2022-04-01T05:08:30.037675 | 2019-12-20T00:53:00 | 2019-12-20T00:53:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ### AdaBoost -- one-depth decision tree
#def Adaboost(x, y, learning_rate, estimators, seed):
#
# import numpy as np
# from sklearn.model_selection import KFold, GridSearchCV
# from sklearn.ensemble import AdaBoostClassifier
#
# ### model & parameters
# ada = AdaBoostClassifier(random_state=seed)
# cross_validation = KFold(n_splits=5, shuffle=True, random_state=seed)
# c_grid = {"n_estimators": estimators,
# "learning_rate": learning_rate}
#
# ### nested cross validation
# clf = GridSearchCV(estimator=ada, param_grid=c_grid, scoring='roc_auc',cv=cross_validation, return_train_score=True).fit(x,y)
# train_score = clf.cv_results_['mean_train_score']
# test_score = clf.cv_results_['mean_test_score']
# test_std = clf.cv_results_['std_test_score']
#
# ### scores
# best_auc = clf.best_score_
# best_std = test_std[np.where(test_score == clf.best_score_)[0][0]]
# best_param = clf.best_params_
# auc_diff = train_score[np.where(test_score == clf.best_score_)[0][0]] - clf.best_score_
#
# return best_auc, best_std, auc_diff, best_param
## GAM -- generalized additive model
def EBM(train_x, train_y, test_x, test_y, learning_rate, depth, estimators, seed):
import numpy as np
from sklearn.model_selection import KFold, GridSearchCV
from interpret.glassbox import ExplainableBoostingClassifier
from sklearn.metrics import roc_auc_score
### extract race & gender
train_gender = train_x['Gender'].values
train_race = train_x['Race'].values
test_gender = test_x['Gender'].values
test_race = test_x['Race'].values
### process train_x & test_x
train_x = train_x.drop(['Race'], axis=1).values
test_x = test_x.drop(['Race'], axis=1).values
### model & parameters
gam = ExplainableBoostingClassifier(random_state=seed)
cross_validation = KFold(n_splits=5, shuffle=True, random_state=seed)
c_grid = {"n_estimators": estimators,
"max_tree_splits": depth,
"learning_rate": learning_rate}
### nested cross validation
clf = GridSearchCV(estimator=gam, param_grid=c_grid, scoring='roc_auc',
cv=cross_validation, return_train_score=True).fit(train_x, train_y)
train_score = clf.cv_results_['mean_train_score']
test_score = clf.cv_results_['mean_test_score']
test_std = clf.cv_results_['std_test_score']
### scores
best_auc = clf.best_score_
best_std = test_std[np.where(test_score == clf.best_score_)[0][0]]
best_param = clf.best_params_
auc_diff = train_score[np.where(test_score == clf.best_score_)[0][0]] - clf.best_score_
gam = ExplainableBoostingClassifier(random_state=seed,
n_estimators = best_param['n_estimators'],
max_tree_splits = best_param['max_tree_splits'],
learning_rate = best_param['learning_rate']).fit(train_x, train_y)
holdout_prob = gam.predict_proba(test_x)[:,1]
holdout_pred = gam.predict(test_x)
holdout_auc = roc_auc_score(test_y, holdout_prob)
return {'best_param': best_param,
'best_validation_auc': best_auc,
'best_validation_std': best_std,
'best_validation_auc_diff': auc_diff,
'holdout_test_proba': holdout_prob,
'holdout_test_pred': holdout_pred,
'holdout_test_auc': holdout_auc}
| UTF-8 | Python | false | false | 3,520 | py | 510 | advance_functions-checkpoint.py | 9 | 0.615341 | 0.611648 | 0 | 83 | 41.39759 | 130 |
oauthlib/oauthlib | 12,386,685,729,896 | 92f8e5b33bdef1deddaea9267ac61e365c474924 | aed49405cb8b3d0b90e9aabedd6a730efaa7cd6b | /oauthlib/oauth1/__init__.py | 9caf12a90d878efa4460811310818ee3572fcdd4 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | https://github.com/oauthlib/oauthlib | fe6e56e50487492fce6bc940bcdd639d9cca67e4 | 00f9a212004a80df790ed071a59af53a05f5e3f2 | refs/heads/master | 2023-09-02T18:30:07.113921 | 2023-08-11T16:35:16 | 2023-09-02T11:35:31 | 2,811,323 | 1,223 | 236 | BSD-3-Clause | false | 2023-09-14T09:47:49 | 2011-11-19T23:08:51 | 2023-09-13T14:29:21 | 2023-09-14T09:47:48 | 3,098 | 2,640 | 489 | 87 | Python | false | false | """
oauthlib.oauth1
~~~~~~~~~~~~~~
This module is a wrapper for the most recent implementation of OAuth 1.0 Client
and Server classes.
"""
from .rfc5849 import (
SIGNATURE_HMAC, SIGNATURE_HMAC_SHA1, SIGNATURE_HMAC_SHA256,
SIGNATURE_HMAC_SHA512, SIGNATURE_PLAINTEXT, SIGNATURE_RSA,
SIGNATURE_RSA_SHA1, SIGNATURE_RSA_SHA256, SIGNATURE_RSA_SHA512,
SIGNATURE_TYPE_AUTH_HEADER, SIGNATURE_TYPE_BODY, SIGNATURE_TYPE_QUERY,
Client,
)
from .rfc5849.endpoints import (
AccessTokenEndpoint, AuthorizationEndpoint, RequestTokenEndpoint,
ResourceEndpoint, SignatureOnlyEndpoint, WebApplicationServer,
)
from .rfc5849.errors import (
InsecureTransportError, InvalidClientError, InvalidRequestError,
InvalidSignatureMethodError, OAuth1Error,
)
from .rfc5849.request_validator import RequestValidator
| UTF-8 | Python | false | false | 822 | py | 193 | __init__.py | 118 | 0.776156 | 0.734793 | 0 | 23 | 34.73913 | 79 |
ac259/The-GANfather | 16,956,530,899,316 | b9dced42e782d28853d86480c68c98be48e557c6 | 13582d22715a3b5c5a4630683205b725295cf782 | /DCGAN_SAGAN/python_files/test_fid.py | f0582363893a6a6129ccd2d2e6d30049ec124411 | []
| no_license | https://github.com/ac259/The-GANfather | 3cf772de70d7565b28fbb93f11e6faf20f664cc0 | ffb0891b83ca9c57bf9bc0fc3672f1cb82e2727c | refs/heads/master | 2022-12-01T02:55:12.671609 | 2020-02-28T00:27:43 | 2020-02-28T00:27:43 | 183,055,833 | 0 | 0 | null | false | 2022-11-22T04:35:52 | 2019-04-23T16:35:41 | 2020-02-28T00:27:47 | 2022-11-22T04:35:49 | 27,844 | 0 | 0 | 14 | Jupyter Notebook | false | false | from keras import layers, models
import numpy as np
import os, glob
from keras.preprocessing import image
from numpy import zeros
from numpy.random import randn
from matplotlib import pyplot
from time import gmtime, strftime
import numpy
import keras
from numpy import cov
from numpy import trace
from numpy import iscomplexobj
from numpy import asarray
from numpy.random import shuffle
from scipy.linalg import sqrtm
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
from keras.datasets.mnist import load_data
from skimage.transform import resize
from keras.datasets import cifar10
def scale_images(images, new_shape):
images_list = list()
for image in images:
# resize with nearest neighbor interpolation
new_image = resize(image, new_shape, 0)
# store
images_list.append(new_image)
return asarray(images_list)
# calculate frechet inception distance
def calculate_fid(model, act1, act2):
# calculate mean and covariance statistics
mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False)
mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False)
# calculate sum squared difference between means
ssdiff = numpy.sum((mu1 - mu2)**2.0)
# calculate sqrt of product between cov
covmean = sqrtm(sigma1.dot(sigma2))
# check and correct imaginary numbers from sqrt
if iscomplexobj(covmean):
covmean = covmean.real
# calculate score
fid = ssdiff + trace(sigma1 + sigma2 - 2.0 * covmean)
return fid
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
# generate points in the latent space
x_input = randn(latent_dim * n_samples)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
# use the generator to generate n fake examples, with class labels
def generate_fake_samples(g_model, latent_dim, n_samples):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n_samples)
# predict outputs
X = g_model.predict(x_input)
return X
# Imports to load the model
from keras.models import load_model
from keras.models import model_from_json
import json
with open('model_in_json.json','r') as f:
model_json = json.load(f)
model = model_from_json(model_json)
(_, _), (images2, _) = keras.datasets.cifar10.load_data()
images2 = (images2 - 127.5) / 127.5
path = os.getcwd()
path = os.path.join(path,'GAN_weight_2')
# For all weights in the folder
# Generate the image and calculate the FIDs
for i in glob.glob(path+'/*.h5'):
model.load_weights(i)
filename = os.path.split(i)[-1]
n_samples = 100
latent_dim = 100
X = generate_fake_samples(model, latent_dim, n_samples)
#print(X.shape)
images1 = X
model_inception = InceptionV3(include_top=False, pooling='avg', input_shape=(299,299,3))
# load cifar10 images
shuffle(images2)
# images1
images2 = images2[:100]
#print('Loaded', images1.shape, images2.shape)
# convert integer to floating point values
images1 = images1.astype('float32')
images2 = images2.astype('float32')
# resize images
images1 = scale_images(images1, (299,299,3))
images2 = scale_images(images2, (299,299,3))
# calculate activations
act1 = model_inception.predict(images1)
act2 = model_inception.predict(images2)
# calculate fid
fid = calculate_fid(model_inception, act1, act2)
print(f'FID is {round(fid,3)} and generator is {filename}') | UTF-8 | Python | false | false | 3,610 | py | 6 | test_fid.py | 1 | 0.710526 | 0.681994 | 0 | 112 | 31.241071 | 92 |
denniscallanan/gaa-api | 13,477,607,382,755 | cc379ccf233f70cf77575e8dcfbb3ee7ce45d14d | 8a3580f4816f81f1698ed71617a2ddb9563bf340 | /src/helpers.py | f2f9a7df94a654b4389d5c689f8abe2353f4f27a | []
| no_license | https://github.com/denniscallanan/gaa-api | c46b67d0e8f496c93b791dfb619fbbe41f0063cb | 6ed23baecb976e740171c72be37c7137324323f4 | refs/heads/main | 2023-03-01T12:34:04.869174 | 2021-02-10T09:30:44 | 2021-02-10T09:30:44 | 334,648,455 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def get_field_args(obj, new_field_map=None, discarded_fields=None, nested_structure=True):
attributes = obj.__dict__
if nested_structure:
attributes = attributes['__data__']
field_map = new_field_map or {}
fields_to_discard = discarded_fields or set([])
field_args = {}
for k in list(attributes.keys()):
if k not in fields_to_discard:
if k in field_map and field_map[k] is not None:
field_args[field_map[k]] = attributes[k]
else:
field_args[k] = attributes[k]
return field_args
| UTF-8 | Python | false | false | 581 | py | 28 | helpers.py | 20 | 0.592083 | 0.592083 | 0 | 16 | 35.25 | 90 |
shivam221098/data-structures | 9,878,424,810,188 | 6baf2a5dc52ba55005b7cbb6fb78f28bc7ea51ac | 9794293160296c46696541047a9929d56193ca70 | /trees.py | c48c8183a68fb8793d5738e41a183493aa71ac47 | []
| no_license | https://github.com/shivam221098/data-structures | 5edb8976e07e19b34dbd5ff4e616dac83a1a9219 | 53832076f7deed78a1d7e7e3025b93de21fd0289 | refs/heads/master | 2023-07-11T13:59:50.768351 | 2021-08-10T19:02:34 | 2021-08-10T19:02:34 | 394,754,826 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Max Heap
"""
import sys
def parent(pos):
return pos // 2
def left_child(pos):
return pos * 2
def right_child(pos):
return (2 * pos) + 1
class MaxHeap:
def __init__(self, size):
self.maxsize = size
self.size = 0
self.Heap = [0] * (self.maxsize + 1)
self.Heap[0] = sys.maxsize
self.FRONT = 1
def is_leaf(self, pos):
if self.size // 2 <= pos <= self.size:
return True
return False
def swap(self, f_pos, s_pos):
self.Heap[f_pos], self.Heap[s_pos] = self.Heap[s_pos], self.Heap[f_pos]
def max_heapify(self, pos):
if not self.is_leaf(pos):
if self.Heap[pos] < self.Heap[left_child(pos)] or self.Heap[pos] < self.Heap[right_child(pos)]:
if self.Heap[left_child(pos)] > self.Heap[right_child(pos)]:
self.swap(pos, left_child(pos))
self.max_heapify(left_child(pos))
else:
self.swap(pos, right_child(pos))
self.max_heapify(right_child(pos))
def insert(self, data):
if self.size >= self.maxsize:
return
self.size += 1
self.Heap[self.size] = data
current = self.size
while self.Heap[current] > self.Heap[parent(current)]:
self.swap(current, parent(current))
current = parent(current)
def print_heap(self):
for i in range(1, (self.size // 2) + 1):
print(f'Parent: {str(self.Heap[i])}, Left child: {self.Heap[2 * i]}, Right child: {self.Heap[2 * i + 1]}')
def extract_max(self):
pop = self.Heap[self.FRONT]
self.Heap[self.FRONT] = self.Heap[self.size]
self.size -= 1
self.max_heapify(self.FRONT)
return pop
"""
Min Heap
"""
class MinHeap:
def __init__(self, size):
self.maxsize = size
self.size = 0
self.Heap = [0] * (self.maxsize + 1)
self.Heap[0] = sys.maxsize * -1
self.FRONT = 1
def is_leaf(self, pos):
if self.size // 2 <= pos <= self.size:
return True
return False
def swap(self, f_pos, s_pos):
self.Heap[f_pos], self.Heap[s_pos] = self.Heap[s_pos], self.Heap[f_pos]
def min_heapify(self, pos):
if not self.is_leaf(pos):
if self.Heap[pos] > self.Heap[left_child(pos)] or self.Heap[pos] > self.Heap[right_child(pos)]:
if self.Heap[left_child(pos)] < self.Heap[right_child(pos)]:
self.swap(pos, left_child(pos))
self.min_heapify(left_child(pos))
else:
self.swap(pos, right_child(pos))
self.min_heapify(right_child(pos))
def insert(self, data):
if self.size >= self.maxsize:
return
self.size += 1
self.Heap[self.size] = data
current = self.size
while self.Heap[current] < self.Heap[parent(current)]:
self.swap(current, parent(current))
current = parent(current)
def print_heap(self):
for i in range(1, (self.size // 2) + 1):
print(f'Parent: {str(self.Heap[i])}, Left child: {self.Heap[2 * i]}, Right child: {self.Heap[2 * i + 1]}')
def min_heap(self):
for pos in range(self.size // 2, 0, -1):
self.min_heapify(pos)
def extract_min(self):
pop = self.Heap[self.FRONT]
self.Heap[self.FRONT] = self.Heap[self.size]
self.size -= 1
self.min_heapify(self.FRONT)
return pop
"""
Binary Tree
"""
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert(root, data):
if root is None:
return Node(data)
else:
if root.data == data:
return root
elif root.data < data:
root.right = insert(root.right, data)
else:
root.left = insert(root.left, data)
return root
def min_value_node(root):
current_node = root
while current_node.left is not None:
current_node = current_node.left
return current_node
def delete(root, data):
if root is None:
return root
if data < root.data:
root.left = delete(root.left, data)
elif data > root.data:
root.right = delete(root.right, data)
else:
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
# In case of two child nodes
temp = min_value_node(root.right)
root.data = temp.data
root.right = delete(root.right, temp.data)
return root
def print_tree(root):
if root:
print_tree(root.left)
print(root.data, end=" ")
print_tree(root.right)
if __name__ == '__main__':
# BT
root = Node(68)
root = insert(root, 78)
root = insert(root, 8)
root = insert(root, 96)
root = insert(root, 43)
root = insert(root, 70)
root = insert(root, 0)
root = insert(root, 100)
print_tree(root)
print("\n")
delete(root, 0)
print_tree(root)
# MAX HEAP
print()
heap = MaxHeap(45)
heap.insert(3)
heap.insert(13)
heap.insert(0)
heap.insert(67)
heap.insert(69)
heap.insert(309)
heap.insert(90)
heap.insert(29)
heap.print_heap()
print(f'Max Value: {str(heap.extract_max())}')
# MIN HEAP
print()
heap = MinHeap(45)
heap.insert(3)
heap.insert(13)
heap.insert(0)
heap.insert(67)
heap.insert(69)
heap.insert(309)
heap.insert(90)
heap.insert(29)
heap.print_heap()
print(f'Min Value: {str(heap.extract_min())}')
| UTF-8 | Python | false | false | 5,847 | py | 6 | trees.py | 6 | 0.534804 | 0.520096 | 0 | 260 | 21.488462 | 118 |
IAAA-Lab/python-quality-checker | 10,118,942,986,198 | dc30e8e047453516322696ba01363e8f01d2c497 | 63e92f5bebb2bc297e87ac4ebf77985cda7006b4 | /tests/test_url_availability.py | bac2c17a1ee297ee16d7a5970249fe7188b5d1d5 | []
| no_license | https://github.com/IAAA-Lab/python-quality-checker | 496201c7c19cee9a9ea9ea87292b68c4d6883057 | 2e5973f5707dcaccf809115485a13d19de115931 | refs/heads/master | 2022-12-10T15:43:50.335547 | 2019-08-28T13:37:38 | 2019-08-28T13:46:28 | 196,577,290 | 0 | 0 | null | false | 2022-12-08T05:55:50 | 2019-07-12T12:40:54 | 2019-08-28T13:49:24 | 2022-12-08T05:55:49 | 22 | 0 | 0 | 2 | Python | false | false | from qualitychecker import URL
# pylama:ignore=E501
available_urls = [
("http://google.com", False),
("http://datos.gob.es", True),
(
"http://servicios.ine.es/wstempus/js/es/DATOS_TABLA/t37/p198/p01/serie/02002.px?tip=AM",
False,
),
("http://www.ine.es/jaxiT3/Tabla.htm?t=24357", False),
(
"http://www.boa.aragon.es/cgi-bin/EBOA/BRSCGI?CMD=VERLST&OUTPUTMODE=XML&BASE=BOLE&DOCS=1-10000&SEC=OPENDATABOAXML&SORT=-PUBL&SEPARADOR=&MATE-C=04LIC",
False,
),
(
"https://opendata.aragon.es/sparql?default-graph-uri=&query=select+distinct+%3Fobs+%3Fx+%3Fy+where+%7B%0D%0A+%3Fobs+%3Chttp%3A%2F%2Fpurl.org%2Flinked-data%2Fsdmx%2F2009%2Fdimension%23refArea%3E+%3Chttp%3A%2F%2Fopendata.aragon.es%2Frecurso%2Fterritorio%2FMunicipio%2FPiedratajada%3E+.%0D%0A+%3Fobs+%3Fx+%3Fy++.%0D%0A%7D+%0D%0AORDER+BY+%3Fobs&format=text%2Fcsv&timeout=0&debug=on",
False,
),
]
def test_available_urls():
for (url, ssl) in available_urls:
connection = URL(url)
assert connection.isAccesible()
accessibility = connection.getAccessibility()
assert connection.isAccesible() is accessibility
assert accessibility.sslError == ssl
unavailable_urls = [
(
"http://piuhwpeognpiapondguawehgnonb.es",
None,
"[Errno -2] Name or service not known",
False,
),
("https://google.com/aerpvqiwuegadsfgpaibv", 404, "Not Found", False),
(
"https://opendata.aragon.es/sparql?default-graph-uri=&query=select+distinct+%3Fobs+%3Fx+%3Fy+where+%7B%0D%0A+%3Fobs+%3Chttp%3A%2F%2Fpurl.org%2Flinked-data%2Fsdmx%2F2009%2Fdimension%23refArea%3E+%3Chttp%3A%2F",
400,
"Bad Request",
False,
),
]
def test_unavailable_urls():
for (url, status, reason, ssl) in unavailable_urls:
connection = URL(url)
assert not connection.isAccesible()
accessibility = connection.getAccessibility()
assert connection.isAccesible() is accessibility
assert accessibility.status == status
assert str(accessibility.reason) == reason
assert accessibility.sslError == ssl
| UTF-8 | Python | false | false | 2,160 | py | 13 | test_url_availability.py | 6 | 0.658333 | 0.609722 | 0 | 58 | 36.241379 | 387 |
Harold1994/ProgramingCollectiveIntelligence | 11,605,001,673,526 | 99d14b7c230f54dd61484aaa8bdde4f23b4680ff | f75e27b2a778d5cae678daa3a49ddfba2e728291 | /chapter5_Optimazation/kayak.py | 93f0784d2b2d7605f7372321bdcaebdb1ed0ebb8 | []
| no_license | https://github.com/Harold1994/ProgramingCollectiveIntelligence | 8560bd4e3f82d0991f01d39ba3d712ad512f4074 | 44940566eb0bccb5c07af5c4ddc37fb558617066 | refs/heads/master | 2022-10-23T20:29:14.100504 | 2018-01-12T14:04:12 | 2018-01-12T14:04:12 | 114,741,164 | 0 | 2 | null | false | 2022-10-17T14:07:55 | 2017-12-19T08:48:08 | 2017-12-19T08:50:35 | 2018-01-12T14:04:44 | 17,245 | 0 | 2 | 1 | Python | false | false | import urllib.request
import xml.dom.minidom
import time
kayakkey='YOURKEYHere'
def getkayaksession():
url = 'http://www.kayak.com/k/ident/apisession?token=%s&version=1' % kayakkey
doc = xml.dom.minidom.parseString(urllib.request.urlopen(url).read())
sid = doc.getElementsByTagName('sid')[0].firstChild.data
return sid
def flightsearch(sid,origin,destination,depart_date):
url = 'http://www.kayak.com/s/apisearch?basicmode=true&oneway=y&origin=%s' % origin
url += '&destination=%s&depart_date=%s' % (destination, depart_date)
url += '&return_date=none&depart_time=a&return_time=a'
url += '&travelers=1&cabin=e&action=doFlights&apimode=1'
url += '&_sid_=%s&version=1' % (sid)
doc = xml.dom.minidom.parseString(urllib.request.urlopen(url).read())
searchid = doc.getElementsByTagName('serarchid')[0].firstChild.data
return searchid
def flightsearchresults(sid,searchid):
def parceprice(p):
return float(p[1:].replace(',',''))
while 1:
time.sleep(2)
url = 'http://www.kayak.com/s/basic/flight?'
url += 'searchid=%s&c=5&apimode=1&_sid_=%s&version=1' % (searchid, sid)
doc=xml.dom.minidom.parseString(urllib.request.urlopen(url).read())
morepending = doc.getElementsByTagName('morepending')[0].firstChild
if morepending == None or morepending.data=='false':
break
url = 'http://www.kayak.com/s/basic/flight?'
url += 'searchid=%s&c=999&apimode=1&_sid_=%s&version=1' % (searchid, sid)
doc = xml.dom.minidom.parseString(urllib.request.urlopen(url).read())
prices = doc.getElementsByTagName('price')
departures = doc.getElementsByTagName('depart')
arrivals=doc.getElementsByTagName('arrive')
return zip([p.firstChild.data.split(' ')[1] for p in departures],
[p.firstChild.data.split(' ')[1] for p in arrivals],
[p.firstChild.data.split(' ')[1] for p in prices]) | UTF-8 | Python | false | false | 1,943 | py | 19 | kayak.py | 16 | 0.663922 | 0.653114 | 0 | 47 | 40.361702 | 87 |
DenizCicekDS/Workspace-Clarusway | 4,776,003,666,782 | 21d550ac5d2d4d5c88ba9a383609e4ed74a6235e | b0f61695760b2b94673d1e05541c894e02d0b17c | /covid19_new.py | 8bb4492cb679c3e7141b18162de82d52973a8b3e | []
| no_license | https://github.com/DenizCicekDS/Workspace-Clarusway | bb95645ab676712cda36326663aa127cb4b76b8e | e008f632bd02a311163d4eb99f76a9ed99f6a160 | refs/heads/main | 2023-04-20T03:53:00.964281 | 2021-05-03T00:57:32 | 2021-05-03T00:57:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 16 02:16:15 2021
@author: C7169-Deniz
"""
print("\nThis simple program shows you, which risk group you are belonging \
\nto for Covid-19. There are 8 groups in total and the group 8 represents \
\nthe highest risk group. I wish you all the best. Stay at home, stay safe!\n")
cigarette_age75 = ""
severe_chronic_disease = ""
immune_weak = ""
while ((cigarette_age75 == 'y') or (cigarette_age75 == 'n')) == False:
cigarette_age75 = input("Are you cigarette addict older than 75?\
\nPlease press y for Yes and n for No: ")
while ((severe_chronic_disease == 'y') or (severe_chronic_disease == 'n')) == False:
severe_chronic_disease = input("Do you have any severe chronic diseases? \
\nPlease press y for Yes and n for No: ")
while ((immune_weak == 'y') or (immune_weak == 'n')) == False:
immune_weak = input("Is your immune system too weak?\
\nPlease press y for Yes and n for No: ")
a = cigarette_age75
b = severe_chronic_disease
c = immune_weak
def risk_calculator(a, b, c):
print("\n\
------------------------------\n\
Group 8 = Smoking, more than 75 years old, has severe chronic disease \
\nand has a weak immune system.\n\
Group 7 = Smoking, more than 75 years old, has severe chronic disease \
\nand has a STRONG immune system.\n\
Group 6 = Smoking, more than 75 years old, has NO severe chronic disease \
\nand has a weak immune system.\n\
Group 5 = Smoking, more than 75 years old, has NO severe chronic disease \
\nand has a STRONG immune system.\n\
Group 4 = Younger than 75 or not smoker, has severe chronic disease \
\nand has a weak immune system.\n\
Group 3 = Younger than 75 or not smoker, has severe chronic disease \
\nand has a STRONG immune system.\n\
Group 2 = Younger than 75 or not smoker, has NO severe chronic disease \
\nand has a weak immune system.\n\
Group 1 = Younger than 75 or not smoker, has NO severe chronic disease \
\nand has a STRONG immune system.\n\
------------------------------\
")
if (a == 'y'):
if (b == 'y'):
if (c == 'y'): #Group8
print("\nYou belong the Group 8. It is the riskiest group. \
\nYou are smoking, older than 75, have chronic disease, and \
\na weak immune system. You should close the door, and read books \
\nfor the next 6 months. Which is a great opportunity to improve \
\nyourself.\n")
elif(c == 'n'): #Group7
print("\nYou belong the Group 7. Your immune systeym is good, \
\nbut it doesn't mean you are not under high risk. Because \
\nyou are smoking, older than 75 and have chronic disease. \
\nyou should be very careful.\n")
elif (b == 'n'):
if (c == 'y'): #Group6
print("\nYou belong the Group 6. You don't have any severe chronic \
\ndisease, which is good. But still you are under great risk, \
\nbecause you are smoking, older than 75 and have a week immune \
\nsystem.\n")
elif(c == 'n'): #Group5
print("\nYou belong the Group 5. You don't have any severe chronic \
\ndisease, which is good, and have a strong immune system. But still \
\nyou are under risk, because you are smoking and older than 75 \
\ntherefore, you should be careful.\n")
elif (a == 'n'):
if (b == 'y'):
if (c == 'y'): #Group4
print("\nYou belong the Group 4. You are younger than 75 or not \
\nsmoking. But you have severe chronic disease and a weak immune \
\nsystem. Therefore you should be careful.\n")
elif(c == 'n'): #Group3
print("\nYou belong the Group 3. You are younger than 75 or not \
\nsmoking, and have a STRONG immune system. But you have severe \
\nchronic disease. Therefore, you should still be careful.\n")
elif (b == 'n'):
if (c == 'y'): #Group2
print("\nYou belong the Group 2. You are younger than 75 or not \
\nsmoking, and have NO chronic disease, which is good, but you have \
\na weak immune system, which makes you vulnerable. Being careful \
\nwouldn't hurt.\n")
elif(c == 'n'): #Group1
print("\nYou belong the Group 1. You are younger than 75 or not \
\nsmoking, and have NO chronic disease, which is good, you also \
\nhave a STRONG immune system. But there are cases like you, \
\nand had great problems with Covid-19. Being careful doesn't hurt.\n")
else:
print("Something went wrong!!!")
risk_calculator(a, b, c)
| UTF-8 | Python | false | false | 4,954 | py | 7 | covid19_new.py | 6 | 0.577715 | 0.559144 | 0 | 102 | 47.343137 | 87 |
mandstoni/Aula2 | 6,279,242,194,328 | 8a6a7af68b6cec7a3f11f432e5d5853fa7e14944 | 7bff0a016a89949dc12c125bb44014c02b4dbf70 | /exer07.py | f5ce1aafe35aaff39617dc6a046f314453189718 | []
| no_license | https://github.com/mandstoni/Aula2 | db835d614865f4c64da45e7f3b8a7b4d3e5245b5 | 77c80ffa11ee7a218248b37f73f8fb78b5b49bcf | refs/heads/master | 2021-03-13T21:36:33.279606 | 2020-03-12T01:22:36 | 2020-03-12T01:22:36 | 246,715,261 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("Exemplo 07")
for item in range(9, -1, -1):
print(item)
| UTF-8 | Python | false | false | 69 | py | 11 | exer07.py | 11 | 0.57971 | 0.507246 | 0 | 3 | 21 | 29 |
Karagul/reportlabbookcode | 137,438,957,373 | 975b56c0ab09e7da12ee8f25a054d270145fbde1 | fc1141aabffe60455898b014fd8b4a2e8307ce85 | /chapter6_other_flowables/xpreformatted_paragraph.py | 718497674299f1423d3c1cc9b2ba9ccb2fef261f | []
| no_license | https://github.com/Karagul/reportlabbookcode | b5bff1609d62fe2bcfb17bfd7b65777121ac175c | e271348d5562f4842b9d1628ef917539a8ebcd5d | refs/heads/master | 2020-09-21T14:58:43.427964 | 2018-12-19T17:40:46 | 2018-12-19T17:40:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # xpreformatted_paragraph.py
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.platypus import XPreformatted
from reportlab.lib.styles import getSampleStyleSheet
def xpreformatted_paragraph():
doc = SimpleDocTemplate("xpreformatted_paragraph.pdf",
pagesize=letter
)
styles = getSampleStyleSheet()
flowables = []
text = """<font color="blue">Hello, I'm a Paragraph</font>"""
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
text = """Hello, I'm a <font color="red">XPreformatted Paragraph</font>"""
para = XPreformatted(text, style=styles["Normal"])
flowables.append(para)
doc.build(flowables)
if __name__ == '__main__':
xpreformatted_paragraph() | UTF-8 | Python | false | false | 842 | py | 106 | xpreformatted_paragraph.py | 95 | 0.672209 | 0.672209 | 0 | 28 | 29.107143 | 78 |
GannTrader/django_resetpassword | 962,072,726,605 | de5226acb2807c805afaf5ca3c9703aa1dc7eefe | 7104988a98a8152465bd76b60cde7b79e7772c61 | /account/forms.py | e53b6a1aa0d113c1e1c0c714bce6be7100c6c017 | []
| no_license | https://github.com/GannTrader/django_resetpassword | a3cdc6a0557f0bb22e17ac4b9f9514524b36a2a9 | ab3ef49975aff9af0e49cf12a6187e3874c63cbf | refs/heads/master | 2022-12-07T17:53:37.494831 | 2020-09-02T04:08:18 | 2020-09-02T04:08:18 | 291,991,086 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(max_length = 255, widget = forms.TextInput(attrs = {
'class':'form-control',
'placeholder':'enter your name'
}))
password = forms.CharField(max_length = 255, widget = forms.PasswordInput(attrs = {
'class':'form-control',
'placeholder':'enter your password'
}))
class PasswordResetForm(forms.Form):
email = forms.EmailField(
max_length=254,
widget=forms.EmailInput(attrs={'autocomplete': 'email', 'class': 'form-control'})
) | UTF-8 | Python | false | false | 544 | py | 4 | forms.py | 3 | 0.683824 | 0.667279 | 0 | 17 | 31.058824 | 89 |
kotakasahara/omegagene | 11,020,886,117,474 | 7566cc78a627fcbd5962d146d6e06f68aaee866e | e6f8c7567dec8cd2c37077147636fe7d3d5b9177 | /toolkit/kkmdconf.py | 4a7571a6cd9a235611fbb1bac0a74c647ff63feb | []
| no_license | https://github.com/kotakasahara/omegagene | 3f8649ee42cbabf4fb326d8c567d576a11e0e376 | 6ef292e065903c28ebe138425e2ab855feb35986 | refs/heads/master | 2023-05-12T16:56:12.455951 | 2023-05-09T02:30:16 | 2023-05-09T02:30:16 | 183,242,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2.7
import kkkit
import sys
import re
class MDConf(object):
def __init__(self):
self.description = ""
self.gro_init_structure = ""
self.gro_traj_edr = []
self.gro_traj_trr = []
self.gro_traj_set = []
self.atom_selections = []
self.selection_names = []
self.segments = {}
self.straddling = {}
self.n_digit = 4
self.gro_index_file = ""
self.gro_tpr_file = ""
self.psy_traj_crd = []
self.psy_traj_vel = []
self.psy_traj_set = []
self.psy_init_structure = ""
self.kk_traj_trans = ""
self.pseudo_atoms = []
def add_gro_traj_set(self, pref, begin, last):
self.gro_traj_set.append([pref, begin, last])
def add_gro_traj_file(self,pref):
fn_edr = pref + ".edr"
self.gro_traj_edr.append(fn_edr)
fn_trr = pref + ".trr"
self.gro_traj_trr.append(fn_trr)
def set_gro_traj_files(self):
for info in self.gro_traj_set:
st = "%0"+str(self.n_digit)+"d"
for i in range(info[1],info[2]+1):
run_id = st%i
fn = info[0] + run_id
self.add_gro_traj_file(fn)
def add_psy_traj_set(self, pref, begin, last, suff_crd, suff_vel=""):
self.psy_traj_set.append([pref, begin, last, suff_crd, suff_vel])
def set_psy_traj_files(self):
for info in self.psy_traj_set:
st = "%0"+str(self.n_digit)+"d"
for i in range(info[1],info[2]+1):
suff_crd = info[3]
suff_vel = info[4]
run_id = st%i
fn = info[0] + run_id
self.psy_traj_crd.append(fn + "." + suff_crd)
if suff_vel != "":
self.psy_traj_vel.append(fn + "." + suff_vel)
def get_segment_bynum(self, num, default=None):
for segname, seg in self.segments.items():
if num >= seg[0] and num <= seg[1]:
return segname, seg
return default, (default,default,default)
def set_straddling(self, seg_name, xyz):
self.straddling[seg_name] = xyz
def set_gro_index_file(self, fn):
self.gro_index_file = fn
def set_gro_tpr_file(self, fn):
self.gro_tpr_file = fn
def write_text(self):
text = ""
text += "--description--\n"
text += self.description + "\n"
text += "--description--\n"
for fn in self.gro_traj_edr:
text += fn + "\n"
return text
class MDConfReader(kkkit.FileI):
def __init__(self,fn):
super(MDConfReader, self).__init__(fn)
def read_conf(self):
mdconf = MDConf()
self.open()
current_field = ""
current_text = ""
for orig_line in self.f:
line = kkkit.eliminate_comment(orig_line).strip()
if line.lower() == current_field:
if current_field == "--description--":
mdconf.description = current_text
current_field = ""
current_text = ""
elif line[0:2] == "--" and line[-2:] == "--":
current_field = line.lower()
elif current_field != "":
current_text += line + "\n"
else:
terms = re.compile("\s+").split(line.strip())
if len(line) == 0:
continue
elif terms[0][0:2] != "--":
sys.stderr.write("Syntax error: Field tag must begin with '--':")
sys.stderr.write(line + "\n")
elif terms[0] == "--gro-traj-set":
mdconf.add_gro_traj_set(terms[1],int(terms[2]),int(terms[3]))
elif terms[0] == "--gro-traj":
mdconf.add_gro_traj_file(terms[1])
elif terms[0] == "--gro-index":
mdconf.set_gro_index_file(terms[1])
elif terms[0] == "--gro-tpr":
mdconf.set_gro_tpr_file(terms[1])
elif terms[0] == "--gro-initial-structure":
mdconf.gro_init_structure = terms[1]
elif terms[0] == "--atom-selection":
mdconf.atom_selections.append(" ".join(terms[1:]))
elif terms[0] == "--selection-name":
mdconf.selection_names.append(" ".join(terms[1:]))
elif terms[0] == "--segment":
seg = (int(terms[2]), int(terms[3]), terms[4])
mdconf.segments[terms[1]] = seg
elif terms[0] == "--straddling":
mdconf.set_straddling(terms[1], [int(x) for x in terms[2:5]])
elif terms[0] == "--n-digit-run-id":
mdconf.n_digit = int(terms[1])
elif terms[0] == "--psy-traj-set":
if len(terms)==5:
mdconf.add_psy_traj_set(terms[1],int(terms[2]),int(terms[3]),terms[4])
elif len(terms) >= 6:
mdconf.add_psy_traj_set(terms[1],int(terms[2]),int(terms[3]),terms[4],terms[5])
elif terms[0] == "--psy-initial-structure":
mdconf.psy_init_structure = terms[1]
elif terms[0] == "--kk-traj-trans":
mdconf.kk_traj_trans = terms[1]
elif terms[0] == "--pseudo-atom":
mdconf.pseudo_atoms.append([float(x) for x in terms[1:4]])
else:
sys.stderr.write("Unknown tag:")
sys.stderr.write(terms[0])
self.close()
mdconf.set_gro_traj_files()
return mdconf
| UTF-8 | Python | false | false | 5,728 | py | 320 | kkmdconf.py | 146 | 0.474686 | 0.462291 | 0 | 141 | 39.624113 | 103 |
Tech-Amol5278/Python_Basics | 5,798,205,851,083 | e75fb24afcea5a701137e4e41c62be9eb49e246b | d06302e2cc1d76eda354c39b516782452785c02a | /c2 - strings methods/c2_1.py | 9bfa801c00816315bf081a038fd6275b0588bb72 | []
| no_license | https://github.com/Tech-Amol5278/Python_Basics | 8ff418290075ee3476dfca40d9f9b3a10e597a60 | 9c7d98b4aac437502f99d7ded83669bee1d4d3a0 | refs/heads/master | 2022-04-18T01:58:11.387087 | 2020-04-18T11:12:09 | 2020-04-18T11:12:09 | 256,205,799 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #### Tut: 24 #############3
# string indexing
language = "python"
# positions
# p = 0, -6
# y = 1, -5
# t = 2, -4
# h = 3, -3
# o = 4, -2
# n = 5, -1
print(language[2])
print(language[-2])
###### Tut: 25 ############
# string slicing - to print multiplt characters in string
# syntax - [start arguement : stop arguement+1]
print(language[0:2])
print(language[2:4])
print(language[:3])
#### Tut: 27 #################################
# step arguement
# slicing/selecting sub sequences
# syntax - [start arguement : stop arguement +1 : step]
print("amol"[:]) ## Default step argument is 1
print("amol"[::]) ## Default step argument is 1
print("amol"[::2])
# reverse string
print("amol"[::-1])
| UTF-8 | Python | false | false | 709 | py | 162 | c2_1.py | 145 | 0.559944 | 0.51481 | 0 | 34 | 19.705882 | 57 |
jrvalcourt/feature-selection | 1,855,425,874,882 | 469d8599cd133d52104c99217ad37eaf274d1013 | 3c8eafd20fc6b47ea02bec5dd523da44e0050fb7 | /scripts/plot_varying_ks.py | 77268eefbe1d2df82775c0493688e50379d1c9e7 | []
| no_license | https://github.com/jrvalcourt/feature-selection | 0a6acede5be8885a42a2af148b429576e775b39e | b847d6044aabb28201ead1f1615b0db705fe303d | refs/heads/master | 2022-07-20T00:04:52.769160 | 2020-05-20T15:35:56 | 2020-05-20T15:35:56 | 259,767,364 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import count_and_score_kmers
import sys
import pickle
import os
import numpy as np
from guess_alphabet import candidate_breaks
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
test_file = sys.argv[1]
train_data = sys.argv[2]
maxK = int(os.path.basename(train_data).split('__')[1][1:])
counts, total_kmers = pickle.load(open(train_data, 'rb'))
results = count_and_score_kmers.score_kmers_krange([test_file], maxK, counts, total_kmers)
plt.figure(figsize=(12,6))
for ii, K in enumerate(np.linspace(2, maxK, num=((maxK-2)//2)+1, dtype='int32')):
plt.plot(np.array(range(len(results[K]))) + ii + 0.5,
results[K],
label=f'k = {K}')
breaks = candidate_breaks(results[K], K//2-1)
if K == maxK:
plt.plot(breaks + 0.5, [2] * len(breaks), 'ko')
plt.legend()
for ii, c in enumerate(count_and_score_kmers.yield_all_text([test_file])):
plt.text(ii, 0, c, ha='center')
plt.ylabel('log(p(full kmer) / (p(firsthalf) * p(secondhalf)))')
plt.xlabel('position')
plt.savefig(sys.argv[3], dpi=300)
| UTF-8 | Python | false | false | 1,159 | py | 9 | plot_varying_ks.py | 8 | 0.61346 | 0.591027 | 0 | 32 | 35.21875 | 94 |
almohress/djrest-wrapper | 13,511,967,160,932 | 0851002e232b44eee97d02fef4349da6ae9612c9 | 933ffbc470030abc0a443e879dee66ca1305544e | /djrest_wrapper/decorators/create_model.py | ce6af0ea587f72292bb688613be7016b0f521482 | [
"MIT"
]
| permissive | https://github.com/almohress/djrest-wrapper | 587cdbc0feae59f6b8cfea3605815fad44271e8a | 48f6e413fc9d8c8e22585133af7b344185398c4a | refs/heads/master | 2023-08-04T09:48:39.094197 | 2021-09-29T06:51:30 | 2021-09-29T06:51:30 | 387,012,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from functools import wraps
from rest_framework.response import Response
from rest_framework.status import HTTP_201_CREATED
def create_model(func):
@wraps(func)
def inner(self, request, *args, **kwargs):
reqser = self.get_serializer(data=request.data)
reqser.is_valid(raise_exception=True)
model = self.service.create_model(reqser.data)
func(self, request, *args, **kwargs)
resser = self.get_serializer_response()(model)
return Response(data={model.__class__.__name__.lower(): resser.data}, status=HTTP_201_CREATED)
return inner
| UTF-8 | Python | false | false | 592 | py | 36 | create_model.py | 34 | 0.6875 | 0.677365 | 0 | 15 | 38.4 | 102 |
LIDA-training/starting_python | 16,389,595,244,086 | 64ca877c5bdc6ed2b2f481cafe3144f5cc93adef | 2a821f2c36b123670deb0596626506e8a9ef1d85 | /py1.py | a51733ce851bfbd97b0fd30ccf6c7f3b1f77b016 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/LIDA-training/starting_python | 04eb113439b33ce615d176c04e43ebe489328ba5 | 559d3748f1d47ca4a314f1d4c75635a3a20645d5 | refs/heads/master | 2022-03-01T16:21:03.432864 | 2019-10-03T10:56:05 | 2019-10-03T10:56:05 | 212,553,023 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Comment
print("Hello world")
def func(var):
if var == 1:
print("Var is 1")
else:
print("Var is not 1")
func(1)
func(0) | UTF-8 | Python | false | false | 144 | py | 3 | py1.py | 1 | 0.534722 | 0.5 | 0 | 9 | 15.111111 | 29 |
VictorCcccc/CS411_FINAL_DEMO | 1,494,648,661,427 | d350e8f00a48964fb9459d00e532bc8a5fad2614 | 5eda9290b7112020272b2be7284ad71570c121dc | /Users/migrations/0001_initial.py | 5b3015b1193f4f3182945fca31656493d8b37c52 | []
| no_license | https://github.com/VictorCcccc/CS411_FINAL_DEMO | da21570498a78d8bd0e88c3b40d361d852067834 | 1ca607a512015c3e288c8eac61e682ce467a5e4d | refs/heads/master | 2020-05-15T14:10:16.084285 | 2019-04-19T21:26:20 | 2019-04-19T21:26:20 | 182,326,324 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.1 on 2019-04-14 02:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Grade', models.CharField(choices=[('Freshman', 'Freshman'), ('Sophomore', 'Sophomore'), ('Junior', 'Junior'), ('Senior', 'Senior'), ('Graduate', 'Graduate')], default='Freshman', max_length=10, verbose_name='Grade')),
('image', models.ImageField(blank=True, default='avatar.jpg', upload_to='image/%Y/%m', verbose_name='image')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| UTF-8 | Python | false | false | 1,049 | py | 25 | 0001_initial.py | 13 | 0.627264 | 0.611058 | 0 | 26 | 39.346154 | 235 |
wavefrontHQ/demo-app | 8,787,503,113,610 | 87bc57d84ea1f83f2b0d5510856ab427d9c481ae | d9f6ae8293543f68fb8465eace3f3ebe855fc11c | /warehouse/warehouse/views.py | 0bc19ebb196e2ce00cd7c36a1d1736cd95a06907 | [
"Apache-2.0"
]
| permissive | https://github.com/wavefrontHQ/demo-app | 85c547124dba57097c8d314a2d0f08049934eb31 | 41d1e4d034981a58414f54c914b1bf9b071b69e4 | refs/heads/master | 2023-02-03T08:44:28.023608 | 2022-12-15T12:41:09 | 2022-12-15T12:41:09 | 228,472,322 | 8 | 18 | Apache-2.0 | false | 2023-02-02T07:52:54 | 2019-12-16T20:43:11 | 2023-01-17T15:25:16 | 2023-02-02T07:52:54 | 114,574 | 7 | 14 | 5 | Java | false | false | import logging
import random
import sys
import time
import traceback
import requests
from concurrent.futures import ThreadPoolExecutor
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.conf import settings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
tracing = settings.OPENTRACING_TRACING
tracer = tracing.tracer
executor = ThreadPoolExecutor(max_workers=2)
@api_view(http_method_names=["GET"])
def fetch(request, order_num):
try:
time.sleep(1)
if random.randint(1, 1000) == 1000:
raise RuntimeError("Random Service Unavailable!")
if not order_num:
raise ValueError("Invalid Order Num!")
executor.submit(async_fetch, tracer.active_span)
if random.randint(1, 3) == 3:
requests.get(
"http://localhost:" + request.META["SERVER_PORT"] + "/check_stock")
return Response(
data={"status": "Order:" + order_num + " fetched from warehouse"},
status=202)
except RuntimeError:
return handle_exception(tracer.active_span, sys.exc_info(), 503)
except ValueError:
return handle_exception(tracer.active_span, sys.exc_info(), 400)
def async_fetch(parent_span):
with tracer.scope_manager.activate(parent_span, finish_on_close=True):
with tracer.start_active_span('async_fetch') as scope:
try:
time.sleep(0.5)
if random.randint(1, 1000) == 1000:
raise RuntimeError("Fail to execute async_fetch")
invoke_lambda(tracer.active_span)
return
except RuntimeError:
handle_exception(scope.span, sys.exc_info())
def invoke_lambda(parent_span):
with tracer.scope_manager.activate(parent_span, finish_on_close=True):
with tracer.start_active_span('invoke_lambda',
tags=[("span.kind", "client"),
("component", "java-aws-sdk"),
("peer.service", "AWSLambda")]) as scope:
try:
time.sleep(1.5)
if random.randint(1, 1000) == 1000:
raise RuntimeError("Fail to invoke lambda")
return
except RuntimeError:
handle_exception(scope.span, sys.exc_info())
@api_view(http_method_names=["GET"])
def check_stock(request):
time.sleep(1)
schedule_checking(tracer.active_span)
return Response(status=202)
def schedule_checking(parent_span):
with tracer.scope_manager.activate(parent_span, finish_on_close=True):
with tracer.start_active_span('schedule_checking') as scope:
time.sleep(1)
executor.submit(async_check, scope.span)
return
def async_check(parent_span):
with tracer.scope_manager.activate(parent_span, finish_on_close=True):
with tracer.start_active_span('async_check'):
time.sleep(1)
return
def handle_exception(active_span, exe_info, status_code=None):
error_msg = str(exe_info[1])
if error_msg:
logging.warning(error_msg)
if active_span:
active_span.set_tag('error', 'true')
error_log = {
'ErrorType': str(exe_info[0].__name__),
'ErrorContent': error_msg,
'ErrorTraceBack':
'\n'.join(map(str.strip, traceback.format_tb(exe_info[2])))}
print(error_log)
active_span.log_kv(error_log)
if not status_code:
return
else:
return Response(error_msg, status=status_code)
| UTF-8 | Python | false | false | 3,687 | py | 131 | views.py | 69 | 0.602929 | 0.588283 | 0 | 106 | 33.783019 | 85 |
redspider/fkths | 6,030,134,113,088 | 6d9dc12427fcedeb1dff09fbcbb119878c764889 | 4f2f9924353c1dc541ae58dfcb56710d01d23b22 | /fkths/view/index.py | 570eed69ef46b69cc76ccd53b28e1cee8e5e74db | []
| no_license | https://github.com/redspider/fkths | 0210626818e00b5d6e1892689df9d73385fd4895 | fd9c329c49a0a95f6840bca745ac561b7e3b9cd7 | refs/heads/master | 2021-01-21T14:01:40.987405 | 2012-11-10T09:52:31 | 2012-11-10T09:52:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fkths import app
from trex.flask import render_html
from flask import Blueprint
blueprint = Blueprint('index', __name__)
@blueprint.route('/')
@render_html()
def index():
"""
We can't stop here, this is bat country
"""
return {}
app.register_blueprint(blueprint) | UTF-8 | Python | false | false | 286 | py | 14 | index.py | 8 | 0.681818 | 0.681818 | 0 | 15 | 18.133333 | 43 |
insan333/Children-Garden | 5,239,860,135,169 | be11f613c00488cb63d38e7edcf021faee1eeabc | f786f2b40f873a4b1d1172d7b704e6529107412a | /main/views.py | daef9eacf2748219e91bd952ef501e8bfdb984f8 | []
| no_license | https://github.com/insan333/Children-Garden | a24af50ed65132652f20f4fceeee7346d3fe88ff | ffba18ad03e92deb1d5641dc193b93016db80c35 | refs/heads/main | 2023-06-16T04:34:45.923504 | 2021-07-03T13:04:00 | 2021-07-03T13:04:00 | 382,617,080 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.views.generic import View
from .models import Educator,Children,Children_Garden
class MainView(View):
children_garden=Children_Garden.objects.all()
def get(self, request):
return render(request, template_name="main/main.html",context={'children_garden':self.children_garden})
class Children_Garden1View(View):
educator=Educator.objects.filter(children_garden_id=1)
def get(self, request):
return render(request, template_name="main/children_gardens1.html",context={'educators':self.educator})
class Children_Garden2View(View):
educator=Educator.objects.filter(children_garden_id=2)
def get(self, request):
return render(request, template_name="main/children_gardens2.html",context={'educators':self.educator})
class Children_Garden3View(View):
educator=Educator.objects.filter(children_garden_id=3)
def get(self, request):
return render(request, template_name="main/children_gardens3.html",context={'educators':self.educator})
class Educator1View(View):
educator=Children.objects.filter(educator_id=1)
def get(self, request):
return render(request, template_name="main/educators1.html",context={'educators':self.educator})
class Educator2View(View):
educator=Children.objects.filter(educator_id=2)
def get(self, request):
return render(request, template_name="main/educators2.html",context={'educators':self.educator})
class Educator3View(View):
educator=Children.objects.filter(educator_id=3)
def get(self, request):
return render(request, template_name="main/educators3.html",context={'educators':self.educator})
class Educator4View(View):
educator=Children.objects.filter(educator_id=4)
def get(self, request):
return render(request, template_name="main/educators4.html",context={'educators':self.educator})
class Educator5View(View):
educator=Children.objects.filter(educator_id=5)
def get(self, request):
return render(request, template_name="main/educators5.html",context={'educators':self.educator})
class Educator6View(View):
educator=Children.objects.filter(educator_id=6)
def get(self, request):
return render(request, template_name="main/educators6.html",context={'educators':self.educator})
class Educator7View(View):
educator=Children.objects.filter(educator_id=7)
def get(self, request):
return render(request, template_name="main/educators7.html",context={'educators':self.educator})
class Educator8View(View):
educator=Children.objects.filter(educator_id=8)
def get(self, request):
return render(request, template_name="main/educators8.html",context={'educators':self.educator})
class Educator9View(View):
educator=Children.objects.filter(educator_id=9)
def get(self, request):
return render(request, template_name="main/educators9.html",context={'educators':self.educator}) | UTF-8 | Python | false | false | 2,953 | py | 6 | views.py | 4 | 0.74128 | 0.729089 | 0 | 62 | 46.645161 | 111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.